26 #ifndef _RTDM_DRIVER_H
27 #define _RTDM_DRIVER_H
30 #error This header is for kernel space usage only. \
31 You are likely looking for rtdm/rtdm.h...
34 #include <asm/atomic.h>
35 #include <linux/list.h>
37 #include <nucleus/xenomai.h>
38 #include <nucleus/heap.h>
40 #include <nucleus/synch.h>
46 #include <nucleus/assert.h>
48 #include <asm-generic/xenomai/pci_ids.h>
51 #ifndef CONFIG_XENO_OPT_DEBUG_RTDM
52 #define CONFIG_XENO_OPT_DEBUG_RTDM 0
56 typedef struct xnselector rtdm_selector_t;
71 #define RTDM_EXCLUSIVE 0x0001
74 #define RTDM_NAMED_DEVICE 0x0010
78 #define RTDM_PROTOCOL_DEVICE 0x0020
81 #define RTDM_DEVICE_TYPE_MASK 0x00F0
91 #define RTDM_CREATED_IN_NRT 0
94 #define RTDM_CLOSING 1
97 #define RTDM_USER_CONTEXT_FLAG 8
107 #define RTDM_DEVICE_STRUCT_VER 5
110 #define RTDM_CONTEXT_STRUCT_VER 3
113 #define RTDM_SECURE_DEVICE 0x80000000
116 #define RTDM_DRIVER_VER(major, minor, patch) \
117 (((major & 0xFF) << 16) | ((minor & 0xFF) << 8) | (patch & 0xFF))
120 #define RTDM_DRIVER_MAJOR_VER(ver) (((ver) >> 16) & 0xFF)
123 #define RTDM_DRIVER_MINOR_VER(ver) (((ver) >> 8) & 0xFF)
126 #define RTDM_DRIVER_PATCH_VER(ver) ((ver) & 0xFF)
173 rtdm_user_info_t *user_info,
int oflag);
190 rtdm_user_info_t *user_info,
int protocol);
213 rtdm_user_info_t *user_info);
231 rtdm_user_info_t *user_info,
232 unsigned int request,
void __user *arg);
248 rtdm_selector_t *selector,
268 rtdm_user_info_t *user_info,
269 void *buf,
size_t nbyte);
287 rtdm_user_info_t *user_info,
288 const void *buf,
size_t nbyte);
307 rtdm_user_info_t *user_info,
308 struct msghdr *msg,
int flags);
327 rtdm_user_info_t *user_info,
328 const struct msghdr *msg,
int flags);
332 rtdm_user_info_t *user_info,
void *arg);
381 struct rtdm_devctx_reserved {
383 struct list_head cleanup;
449 struct rtdm_dev_reserved {
450 struct list_head entry;
521 #ifdef CONFIG_XENO_OPT_VFILE
523 struct xnvfile_directory vfroot;
524 struct xnvfile_regular info_vfile;
544 #define rtdm_open rt_dev_open
545 #define rtdm_socket rt_dev_socket
546 #define rtdm_close rt_dev_close
547 #define rtdm_ioctl rt_dev_ioctl
548 #define rtdm_read rt_dev_read
549 #define rtdm_write rt_dev_write
550 #define rtdm_recvmsg rt_dev_recvmsg
551 #define rtdm_recv rt_dev_recv
552 #define rtdm_recvfrom rt_dev_recvfrom
553 #define rtdm_sendmsg rt_dev_sendmsg
554 #define rtdm_send rt_dev_send
555 #define rtdm_sendto rt_dev_sendto
556 #define rtdm_bind rt_dev_bind
557 #define rtdm_listen rt_dev_listen
558 #define rtdm_accept rt_dev_accept
559 #define rtdm_getsockopt rt_dev_getsockopt
560 #define rtdm_setsockopt rt_dev_setsockopt
561 #define rtdm_getsockname rt_dev_getsockname
562 #define rtdm_getpeername rt_dev_getpeername
563 #define rtdm_shutdown rt_dev_shutdown
569 #define CONTEXT_IS_LOCKED(context) \
570 (atomic_read(&(context)->close_lock_count) > 1 || \
571 (test_bit(RTDM_CLOSING, &(context)->context_flags) && \
572 atomic_read(&(context)->close_lock_count) > 0))
576 XENO_ASSERT(RTDM, CONTEXT_IS_LOCKED(context),
585 XENO_ASSERT(RTDM, CONTEXT_IS_LOCKED(context),
587 smp_mb__before_atomic_dec();
589 rthal_apc_schedule(rtdm_apc);
599 extern struct xntbase *rtdm_tbase;
608 return xntbase_ticks2ns(rtdm_tbase, xntbase_get_jiffies(rtdm_tbase));
659 #define RTDM_EXECUTE_ATOMICALLY(code_block) \
661 <ENTER_ATOMIC_SECTION> \
663 <LEAVE_ATOMIC_SECTION> \
666 #define RTDM_EXECUTE_ATOMICALLY(code_block) \
670 xnlock_get_irqsave(&nklock, __rtdm_s); \
671 __xnpod_lock_sched(); \
673 __xnpod_unlock_sched(); \
674 xnlock_put_irqrestore(&nklock, __rtdm_s); \
687 #define RTDM_LOCK_UNLOCKED RTHAL_SPIN_LOCK_UNLOCKED
710 #define rtdm_lock_init(lock) rthal_spin_lock_init(lock)
729 #define rtdm_lock_get(lock) rthal_spin_lock(lock)
731 #define rtdm_lock_get(lock) \
733 XENO_BUGON(RTDM, !rthal_local_irq_disabled()); \
734 rthal_spin_lock(lock); \
735 __xnpod_lock_sched(); \
755 #define rtdm_lock_put(lock) \
757 rthal_spin_unlock(lock); \
758 __xnpod_unlock_sched(); \
778 #define rtdm_lock_get_irqsave(lock, context) \
780 rthal_spin_lock_irqsave(lock, context); \
781 __xnpod_lock_sched(); \
801 #define rtdm_lock_put_irqrestore(lock, context) \
803 rthal_spin_unlock(lock); \
804 __xnpod_unlock_sched(); \
805 rthal_local_irq_restore(context); \
824 #define rtdm_lock_irqsave(context) \
825 rthal_local_irq_save(context)
843 #define rtdm_lock_irqrestore(context) \
844 rthal_local_irq_restore(context)
855 typedef xnintr_t rtdm_irq_t;
863 #define RTDM_IRQTYPE_SHARED XN_ISR_SHARED
866 #define RTDM_IRQTYPE_EDGE XN_ISR_EDGE
884 #define RTDM_IRQ_NONE XN_ISR_NONE
886 #define RTDM_IRQ_HANDLED XN_ISR_HANDLED
906 #define rtdm_irq_get_arg(irq_handle, type) ((type *)irq_handle->cookie)
911 const char *device_name,
void *arg);
916 XENO_ASSERT(RTDM, xnpod_root_p(),
return -EPERM;);
938 typedef unsigned rtdm_nrtsig_t;
957 *nrt_sig = rthal_alloc_virq();
962 rthal_virtualize_irq(rthal_root_domain, *nrt_sig, handler, arg, NULL,
969 rthal_virtualize_irq(rthal_root_domain, *nrt_sig, NULL, NULL, NULL, 0);
971 rthal_free_virq(*nrt_sig);
976 rthal_trigger_irq(*nrt_sig);
987 typedef xntimer_t rtdm_timer_t;
1016 #define rtdm_timer_init(timer, handler, name) \
1018 xntimer_init((timer), rtdm_tbase, handler); \
1019 xntimer_set_name((timer), (name)); \
1037 return xntimer_start(timer, xntbase_ns2ticks_ceil(rtdm_tbase, expiry),
1038 xntbase_ns2ticks_ceil(rtdm_tbase, interval),
1054 typedef xnthread_t rtdm_task_t;
1067 #define RTDM_TASK_LOWEST_PRIORITY XNSCHED_LOW_PRIO
1068 #define RTDM_TASK_HIGHEST_PRIORITY XNSCHED_HIGH_PRIO
1075 #define RTDM_TASK_RAISE_PRIORITY (+1)
1076 #define RTDM_TASK_LOWER_PRIORITY (-1)
1084 int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode);
1097 union xnsched_policy_param param = { .rt = { .prio = priority } };
1108 xntbase_ns2ticks_ceil
1109 (xnthread_time_base(task), period));
1122 return xnpod_current_thread();
1127 XENO_ASSERT(RTDM, !xnpod_unblockable_p(),
return -EPERM;);
1133 return __rtdm_task_sleep(delay, XN_RELATIVE);
1142 return __rtdm_task_sleep(wakeup_date, (xntmode_t)mode);
1148 return __rtdm_task_sleep(wakeup_time, XN_REALTIME);
1161 xnsynch_t synch_base;
1162 DECLARE_XNSELECT(select_block);
1165 #define RTDM_EVENT_PENDING XNSYNCH_SPARE1
1168 #ifdef CONFIG_XENO_OPT_RTDM_SELECT
1172 #define rtdm_event_select_bind(e, s, t, i) ({ (void)(e); -EBADF; })
1176 rtdm_toseq_t *timeout_seq);
1182 void __rtdm_synch_flush(xnsynch_t *synch,
unsigned long reason);
1186 trace_mark(xn_rtdm, event_pulse,
"event %p", event);
1187 __rtdm_synch_flush(&event->synch_base, 0);
1192 trace_mark(xn_rtdm, event_destroy,
"event %p", event);
1193 __rtdm_synch_flush(&event->synch_base,
XNRMID);
1201 unsigned long value;
1202 xnsynch_t synch_base;
1203 DECLARE_XNSELECT(select_block);
1207 #ifdef CONFIG_XENO_OPT_RTDM_SELECT
1211 #define rtdm_sem_select_bind(s, se, t, i) ({ (void)(s); -EBADF; })
1215 rtdm_toseq_t *timeout_seq);
1222 __rtdm_synch_flush(&sem->synch_base,
XNRMID);
1230 xnsynch_t synch_base;
1236 rtdm_toseq_t *timeout_seq);
1241 XENO_ASSERT(RTDM, !xnpod_asynch_p(),
return;);
1243 trace_mark(xn_rtdm, mutex_unlock,
"mutex %p", mutex);
1251 trace_mark(xn_rtdm, mutex_destroy,
"mutex %p", mutex);
1253 __rtdm_synch_flush(&mutex->synch_base,
XNRMID);
1259 #define rtdm_printk(format, ...) printk(format, ##__VA_ARGS__)
1261 struct rtdm_ratelimit_state {
1270 int rtdm_ratelimit(
struct rtdm_ratelimit_state *rs,
const char *func);
1272 #define DEFINE_RTDM_RATELIMIT_STATE(name, interval_init, burst_init) \
1273 struct rtdm_ratelimit_state name = { \
1274 .lock = RTDM_LOCK_UNLOCKED, \
1275 .interval = interval_init, \
1276 .burst = burst_init, \
1280 #define DEF_RTDM_RATELIMIT_INTERVAL 5000000000LL
1281 #define DEF_RTDM_RATELIMIT_BURST 10
1283 #define rtdm_printk_ratelimited(fmt, ...) ({ \
1284 static DEFINE_RTDM_RATELIMIT_STATE(_rs, \
1285 DEF_RTDM_RATELIMIT_INTERVAL, \
1286 DEF_RTDM_RATELIMIT_BURST); \
1288 if (rtdm_ratelimit(&_rs, __func__)) \
1289 printk(fmt, ##__VA_ARGS__); \
1295 return xnmalloc(size);
1303 #ifdef CONFIG_XENO_OPT_PERVASIVE
1305 void *src_addr,
size_t len,
1306 int prot,
void **pptr,
1307 struct vm_operations_struct *vm_ops,
1308 void *vm_private_data);
1310 phys_addr_t src_addr,
size_t len,
1311 int prot,
void **pptr,
1312 struct vm_operations_struct *vm_ops,
1313 void *vm_private_data);
1314 int rtdm_munmap(rtdm_user_info_t *user_info,
void *ptr,
size_t len);
1317 const void __user *ptr,
size_t size)
1319 return access_rok(ptr, size);
1323 const void __user *ptr,
size_t size)
1325 return access_wok(ptr, size);
1329 void *dst,
const void __user *src,
1332 return __xn_copy_from_user(dst, src, size) ? -EFAULT : 0;
1336 void *dst,
const void __user *src,
1339 return (!access_rok(src, size) ||
1340 __xn_copy_from_user(dst, src, size)) ? -EFAULT : 0;
1344 void __user *dst,
const void *src,
1347 return __xn_copy_to_user(dst, src, size) ? -EFAULT : 0;
1351 void __user *dst,
const void *src,
1354 return (!access_wok(dst, size) ||
1355 __xn_copy_to_user(dst, src, size)) ? -EFAULT : 0;
1360 const char __user *src,
size_t count)
1362 if (unlikely(!access_rok(src, 1)))
1364 return __xn_strncpy_from_user(dst, src, count);
1369 XENO_ASSERT(RTDM, !xnpod_asynch_p(),
return 0;);
1371 return (user_info ? xnshadow_thread(user_info) != NULL
1377 #define rtdm_mmap_to_user(...) ({ -ENOSYS; })
1378 #define rtdm_munmap(...) ({ -ENOSYS; })
1379 #define rtdm_read_user_ok(...) ({ 0; })
1380 #define rtdm_rw_user_ok(...) ({ 0; })
1381 #define rtdm_copy_from_user(...) ({ -ENOSYS; })
1382 #define rtdm_safe_copy_from_user(...) ({ -ENOSYS; })
1383 #define rtdm_copy_to_user(...) ({ -ENOSYS; })
1384 #define rtdm_safe_copy_to_user(...) ({ -ENOSYS; })
1385 #define rtdm_strncpy_from_user(...) ({ -ENOSYS; })
1389 XENO_ASSERT(RTDM, !xnpod_asynch_p(),
return 0;);
1391 return !xnpod_root_p();
1398 return (rthal_current_domain != rthal_root_domain);
1404 rtdm_user_info_t *user_info,
void *arg,
1405 rtdm_rt_handler_t handler);