1 /******************************************************************************
\r
3 * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
\r
5 * This program is free software; you can redistribute it and/or modify it
\r
6 * under the terms of version 2 of the GNU General Public License as
\r
7 * published by the Free Software Foundation.
\r
9 * This program is distributed in the hope that it will be useful, but WITHOUT
\r
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
\r
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
\r
14 * You should have received a copy of the GNU General Public License along with
\r
15 * this program; if not, write to the Free Software Foundation, Inc.,
\r
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
\r
19 ******************************************************************************/
\r
20 #ifndef __OSDEP_BSD_SERVICE_H_
\r
21 #define __OSDEP_BSD_SERVICE_H_
\r
24 #include <sys/cdefs.h>
\r
25 #include <sys/types.h>
\r
26 #include <sys/systm.h>
\r
27 #include <sys/param.h>
\r
28 #include <sys/sockio.h>
\r
29 #include <sys/sysctl.h>
\r
30 #include <sys/lock.h>
\r
31 #include <sys/mutex.h>
\r
32 #include <sys/mbuf.h>
\r
33 #include <sys/kernel.h>
\r
34 #include <sys/socket.h>
\r
35 #include <sys/systm.h>
\r
36 #include <sys/malloc.h>
\r
37 #include <sys/module.h>
\r
38 #include <sys/bus.h>
\r
39 #include <sys/endian.h>
\r
40 #include <sys/kdb.h>
\r
41 #include <sys/kthread.h>
\r
42 #include <sys/malloc.h>
\r
43 #include <sys/time.h>
\r
44 #include <machine/atomic.h>
\r
45 #include <machine/bus.h>
\r
46 #include <machine/resource.h>
\r
47 #include <sys/rman.h>
\r
49 #include <net/bpf.h>
\r
51 #include <net/if_arp.h>
\r
52 #include <net/ethernet.h>
\r
53 #include <net/if_dl.h>
\r
54 #include <net/if_media.h>
\r
55 #include <net/if_types.h>
\r
56 #include <net/route.h>
\r
59 #include <netinet/in.h>
\r
60 #include <netinet/in_systm.h>
\r
61 #include <netinet/in_var.h>
\r
62 #include <netinet/if_ether.h>
\r
63 #include <if_ether.h>
\r
65 #include <net80211/ieee80211_var.h>
\r
66 #include <net80211/ieee80211_regdomain.h>
\r
67 #include <net80211/ieee80211_radiotap.h>
\r
68 #include <net80211/ieee80211_ratectl.h>
\r
70 #include <dev/usb/usb.h>
\r
71 #include <dev/usb/usbdi.h>
\r
72 #include "usbdevs.h"
\r
74 #define USB_DEBUG_VAR rum_debug
\r
75 #include <dev/usb/usb_debug.h>
\r
77 #if 1 //Baron porting from linux, it's all temp solution, needs to check again
\r
78 #include <sys/sema.h>
\r
79 #include <sys/pcpu.h> /* XXX for PCPU_GET */
\r
80 // typedef struct semaphore _sema;
\r
81 typedef struct sema _sema;
\r
82 // typedef spinlock_t _lock;
\r
83 typedef struct mtx _lock;
\r
84 typedef struct mtx _mutex;
\r
85 typedef struct timer_list _timer;
\r
87 struct list_head *next, *prev;
\r
90 struct list_head queue;
\r
94 //typedef struct sk_buff _pkt;
\r
95 typedef struct mbuf _pkt;
\r
96 typedef struct mbuf _buffer;
\r
98 typedef struct __queue _queue;
\r
99 typedef struct list_head _list;
\r
100 typedef int _OS_STATUS;
\r
101 //typedef u32 _irqL;
\r
102 typedef unsigned long _irqL;
\r
103 typedef struct ifnet * _nic_hdl;
\r
105 typedef pid_t _thread_hdl_;
\r
106 // typedef struct thread _thread_hdl_;
\r
107 typedef void thread_return;
\r
108 typedef void* thread_context;
\r
110 //#define thread_exit() complete_and_exit(NULL, 0)
\r
112 #define thread_exit() do{printf("%s", "RTKTHREAD_exit");}while(0)
\r
114 typedef void timer_hdl_return;
\r
115 typedef void* timer_hdl_context;
\r
116 typedef struct work_struct _workitem;
\r
118 #define KERNEL_VERSION(a,b,c) (((a) << 16) + ((b) << 8) + (c))
\r
119 /* emulate a modern version */
\r
120 #define LINUX_VERSION_CODE KERNEL_VERSION(2, 6, 35)
\r
122 #define WIRELESS_EXT -1
\r
124 #define spin_lock_irqsave mtx_lock_irqsave
\r
125 #define spin_lock_bh mtx_lock_irqsave
\r
126 #define mtx_lock_irqsave(lock, x) mtx_lock(lock)//{local_irq_save((x)); mtx_lock_spin((lock));}
\r
127 //#define IFT_RTW 0xf9 //ifnet allocate type for RTW
\r
128 #define free_netdev if_free
\r
129 #define LIST_CONTAINOR(ptr, type, member) \
\r
130 ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
\r
131 #define container_of(p,t,n) (t*)((p)-&(((t*)0)->n))
\r
133 * Linux timers are emulated using FreeBSD callout functions
\r
134 * (and taskqueue functionality).
\r
136 * Currently no timer stats functionality.
\r
138 * See (linux_compat) processes.c
\r
141 struct timer_list {
\r
143 /* FreeBSD callout related fields */
\r
144 struct callout callout;
\r
147 void (*function)(void*);
\r
152 struct workqueue_struct;
\r
153 struct work_struct;
\r
154 typedef void (*work_func_t)(struct work_struct *work);
\r
155 /* Values for the state of an item of work (work_struct) */
\r
156 typedef enum work_state {
\r
157 WORK_STATE_UNSET = 0,
\r
158 WORK_STATE_CALLOUT_PENDING = 1,
\r
159 WORK_STATE_TASK_PENDING = 2,
\r
160 WORK_STATE_WORK_CANCELLED = 3
\r
163 struct work_struct {
\r
164 struct task task; /* FreeBSD task */
\r
165 work_state_t state; /* the pending or otherwise state of work. */
\r
168 #define spin_unlock_irqrestore mtx_unlock_irqrestore
\r
169 #define spin_unlock_bh mtx_unlock_irqrestore
\r
170 #define mtx_unlock_irqrestore(lock,x) mtx_unlock(lock);
\r
171 extern void _rtw_spinlock_init(_lock *plock);
\r
173 //modify private structure to match freebsd
\r
174 #define BITS_PER_LONG 32
\r
177 #if BITS_PER_LONG != 64 && !defined(CONFIG_KTIME_SCALAR)
\r
179 #ifdef __BIG_ENDIAN
\r
187 #define kmemcheck_bitfield_begin(name)
\r
188 #define kmemcheck_bitfield_end(name)
\r
189 #define CHECKSUM_NONE 0
\r
190 typedef unsigned char *sk_buff_data_t;
\r
191 typedef union ktime ktime_t; /* Kill this */
\r
193 void rtw_mtx_lock(_lock *plock);
\r
195 void rtw_mtx_unlock(_lock *plock);
\r
198 * struct sk_buff - socket buffer
\r
199 * @next: Next buffer in list
\r
200 * @prev: Previous buffer in list
\r
201 * @sk: Socket we are owned by
\r
202 * @tstamp: Time we arrived
\r
203 * @dev: Device we arrived on/are leaving by
\r
204 * @transport_header: Transport layer header
\r
205 * @network_header: Network layer header
\r
206 * @mac_header: Link layer header
\r
207 * @_skb_refdst: destination entry (with norefcount bit)
\r
208 * @sp: the security path, used for xfrm
\r
209 * @cb: Control buffer. Free for use by every layer. Put private vars here
\r
210 * @len: Length of actual data
\r
211 * @data_len: Data length
\r
212 * @mac_len: Length of link layer header
\r
213 * @hdr_len: writable header length of cloned skb
\r
214 * @csum: Checksum (must include start/offset pair)
\r
215 * @csum_start: Offset from skb->head where checksumming should start
\r
216 * @csum_offset: Offset from csum_start where checksum should be stored
\r
217 * @local_df: allow local fragmentation
\r
218 * @cloned: Head may be cloned (check refcnt to be sure)
\r
219 * @nohdr: Payload reference only, must not modify header
\r
220 * @pkt_type: Packet class
\r
221 * @fclone: skbuff clone status
\r
222 * @ip_summed: Driver fed us an IP checksum
\r
223 * @priority: Packet queueing priority
\r
224 * @users: User count - see {datagram,tcp}.c
\r
225 * @protocol: Packet protocol from driver
\r
226 * @truesize: Buffer size
\r
227 * @head: Head of buffer
\r
228 * @data: Data head pointer
\r
229 * @tail: Tail pointer
\r
230 * @end: End pointer
\r
231 * @destructor: Destruct function
\r
232 * @mark: Generic packet mark
\r
233 * @nfct: Associated connection, if any
\r
234 * @ipvs_property: skbuff is owned by ipvs
\r
235 * @peeked: this packet has been seen already, so stats have been
\r
236 * done for it, don't do them again
\r
237 * @nf_trace: netfilter packet trace flag
\r
238 * @nfctinfo: Relationship of this skb to the connection
\r
239 * @nfct_reasm: netfilter conntrack re-assembly pointer
\r
240 * @nf_bridge: Saved data about a bridged frame - see br_netfilter.c
\r
241 * @skb_iif: ifindex of device we arrived on
\r
242 * @rxhash: the packet hash computed on receive
\r
243 * @queue_mapping: Queue mapping for multiqueue devices
\r
244 * @tc_index: Traffic control index
\r
245 * @tc_verd: traffic control verdict
\r
246 * @ndisc_nodetype: router type (from link layer)
\r
247 * @dma_cookie: a cookie to one of several possible DMA operations
\r
248 * done by skb DMA functions
\r
249 * @secmark: security marking
\r
250 * @vlan_tci: vlan tag control information
\r
254 /* These two members must be first. */
\r
255 struct sk_buff *next;
\r
256 struct sk_buff *prev;
\r
261 //struct net_device *dev;
\r
265 * This is the control buffer. It is free to use for every
\r
266 * layer. Please put your private variables there. If you
\r
267 * want to keep them across layers you have to do a skb_clone()
\r
268 * first. This is owned by whoever has the skb queued ATM.
\r
270 char cb[48] __aligned(8);
\r
272 unsigned long _skb_refdst;
\r
274 struct sec_path *sp;
\r
288 kmemcheck_bitfield_begin(flags1);
\r
299 kmemcheck_bitfield_end(flags1);
\r
302 void (*destructor)(struct sk_buff *skb);
\r
303 #if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
\r
304 struct nf_conntrack *nfct;
\r
305 struct sk_buff *nfct_reasm;
\r
307 #ifdef CONFIG_BRIDGE_NETFILTER
\r
308 struct nf_bridge_info *nf_bridge;
\r
312 #ifdef CONFIG_NET_SCHED
\r
313 u16 tc_index; /* traffic control index */
\r
314 #ifdef CONFIG_NET_CLS_ACT
\r
315 u16 tc_verd; /* traffic control verdict */
\r
321 kmemcheck_bitfield_begin(flags2);
\r
322 u16 queue_mapping:16;
\r
323 #ifdef CONFIG_IPV6_NDISC_NODETYPE
\r
324 u8 ndisc_nodetype:2,
\r
325 deliver_no_wcard:1;
\r
327 u8 deliver_no_wcard:1;
\r
329 kmemcheck_bitfield_end(flags2);
\r
331 /* 0/14 bit hole */
\r
333 #ifdef CONFIG_NET_DMA
\r
334 dma_cookie_t dma_cookie;
\r
336 #ifdef CONFIG_NETWORK_SECMARK
\r
346 sk_buff_data_t transport_header;
\r
347 sk_buff_data_t network_header;
\r
348 sk_buff_data_t mac_header;
\r
349 /* These elements must be at the end, see alloc_skb() for details. */
\r
350 sk_buff_data_t tail;
\r
351 sk_buff_data_t end;
\r
352 unsigned char *head,
\r
354 unsigned int truesize;
\r
357 struct sk_buff_head {
\r
358 /* These two members must be first. */
\r
359 struct sk_buff *next;
\r
360 struct sk_buff *prev;
\r
365 #define skb_tail_pointer(skb) skb->tail
\r
366 static inline unsigned char *skb_put(struct sk_buff *skb, unsigned int len)
\r
368 unsigned char *tmp = skb_tail_pointer(skb);
\r
369 //SKB_LINEAR_ASSERT(skb);
\r
375 static inline unsigned char *__skb_pull(struct sk_buff *skb, unsigned int len)
\r
378 if(skb->len < skb->data_len)
\r
379 printf("%s(),%d,error!\n",__FUNCTION__,__LINE__);
\r
380 return skb->data += len;
\r
382 static inline unsigned char *skb_pull(struct sk_buff *skb, unsigned int len)
\r
384 #ifdef PLATFORM_FREEBSD
\r
385 return __skb_pull(skb, len);
\r
387 return unlikely(len > skb->len) ? NULL : __skb_pull(skb, len);
\r
388 #endif //PLATFORM_FREEBSD
\r
390 static inline u32 skb_queue_len(const struct sk_buff_head *list_)
\r
392 return list_->qlen;
\r
394 static inline void __skb_insert(struct sk_buff *newsk,
\r
395 struct sk_buff *prev, struct sk_buff *next,
\r
396 struct sk_buff_head *list)
\r
398 newsk->next = next;
\r
399 newsk->prev = prev;
\r
400 next->prev = prev->next = newsk;
\r
403 static inline void __skb_queue_before(struct sk_buff_head *list,
\r
404 struct sk_buff *next,
\r
405 struct sk_buff *newsk)
\r
407 __skb_insert(newsk, next->prev, next, list);
\r
409 static inline void skb_queue_tail(struct sk_buff_head *list,
\r
410 struct sk_buff *newsk)
\r
412 mtx_lock(&list->lock);
\r
413 __skb_queue_before(list, (struct sk_buff *)list, newsk);
\r
414 mtx_unlock(&list->lock);
\r
416 static inline struct sk_buff *skb_peek(struct sk_buff_head *list_)
\r
418 struct sk_buff *list = ((struct sk_buff *)list_)->next;
\r
419 if (list == (struct sk_buff *)list_)
\r
423 static inline void __skb_unlink(struct sk_buff *skb, struct sk_buff_head *list)
\r
425 struct sk_buff *next, *prev;
\r
430 skb->next = skb->prev = NULL;
\r
435 static inline struct sk_buff *skb_dequeue(struct sk_buff_head *list)
\r
437 mtx_lock(&list->lock);
\r
439 struct sk_buff *skb = skb_peek(list);
\r
441 __skb_unlink(skb, list);
\r
443 mtx_unlock(&list->lock);
\r
447 static inline void skb_reserve(struct sk_buff *skb, int len)
\r
452 static inline void __skb_queue_head_init(struct sk_buff_head *list)
\r
454 list->prev = list->next = (struct sk_buff *)list;
\r
458 * This function creates a split out lock class for each invocation;
\r
459 * this is needed for now since a whole lot of users of the skb-queue
\r
460 * infrastructure in drivers have different locking usage (in hardirq)
\r
461 * than the networking core (in softirq only). In the long run either the
\r
462 * network layer or drivers should need annotation to consolidate the
\r
463 * main types of usage into 3 classes.
\r
465 static inline void skb_queue_head_init(struct sk_buff_head *list)
\r
467 _rtw_spinlock_init(&list->lock);
\r
468 __skb_queue_head_init(list);
\r
470 unsigned long copy_from_user(void *to, const void *from, unsigned long n);
\r
471 unsigned long copy_to_user(void *to, const void *from, unsigned long n);
\r
472 struct sk_buff * dev_alloc_skb(unsigned int size);
\r
473 struct sk_buff *skb_clone(const struct sk_buff *skb);
\r
474 void dev_kfree_skb_any(struct sk_buff *skb);
\r
475 #endif //Baron porting from linux, it's all temp solution, needs to check again
\r
478 #if 1 // kenny add Linux compatibility code for Linux USB driver
\r
479 #include <dev/usb/usb_compat_linux.h>
\r
481 #define __init // __attribute ((constructor))
\r
482 #define __exit // __attribute ((destructor))
\r
485 * Definitions for module_init and module_exit macros.
\r
487 * These macros will use the SYSINIT framework to call a specified
\r
488 * function (with no arguments) on module loading or unloading.
\r
492 void module_init_exit_wrapper(void *arg);
\r
494 #define module_init(initfn) \
\r
495 SYSINIT(mod_init_ ## initfn, \
\r
496 SI_SUB_KLD, SI_ORDER_FIRST, \
\r
497 module_init_exit_wrapper, initfn)
\r
499 #define module_exit(exitfn) \
\r
500 SYSUNINIT(mod_exit_ ## exitfn, \
\r
501 SI_SUB_KLD, SI_ORDER_ANY, \
\r
502 module_init_exit_wrapper, exitfn)
\r
505 * The usb_register and usb_deregister functions are used to register
\r
506 * usb drivers with the usb subsystem.
\r
508 int usb_register(struct usb_driver *driver);
\r
509 int usb_deregister(struct usb_driver *driver);
\r
512 * usb_get_dev and usb_put_dev - increment/decrement the reference count
\r
513 * of the usb device structure.
\r
515 * Original body of usb_get_dev:
\r
518 * get_device(&dev->dev);
\r
521 * Reference counts are not currently used in this compatibility
\r
522 * layer. So these functions will do nothing.
\r
524 static inline struct usb_device *
\r
525 usb_get_dev(struct usb_device *dev)
\r
530 static inline void
\r
531 usb_put_dev(struct usb_device *dev)
\r
537 // rtw_usb_compat_linux
\r
538 int rtw_usb_submit_urb(struct urb *urb, uint16_t mem_flags);
\r
539 int rtw_usb_unlink_urb(struct urb *urb);
\r
540 int rtw_usb_clear_halt(struct usb_device *dev, struct usb_host_endpoint *uhe);
\r
541 int rtw_usb_control_msg(struct usb_device *dev, struct usb_host_endpoint *uhe,
\r
542 uint8_t request, uint8_t requesttype,
\r
543 uint16_t value, uint16_t index, void *data,
\r
544 uint16_t size, usb_timeout_t timeout);
\r
545 int rtw_usb_set_interface(struct usb_device *dev, uint8_t iface_no, uint8_t alt_index);
\r
546 int rtw_usb_setup_endpoint(struct usb_device *dev,
\r
547 struct usb_host_endpoint *uhe, usb_size_t bufsize);
\r
548 struct urb *rtw_usb_alloc_urb(uint16_t iso_packets, uint16_t mem_flags);
\r
549 struct usb_host_endpoint *rtw_usb_find_host_endpoint(struct usb_device *dev, uint8_t type, uint8_t ep);
\r
550 struct usb_host_interface *rtw_usb_altnum_to_altsetting(const struct usb_interface *intf, uint8_t alt_index);
\r
551 struct usb_interface *rtw_usb_ifnum_to_if(struct usb_device *dev, uint8_t iface_no);
\r
552 void *rtw_usbd_get_intfdata(struct usb_interface *intf);
\r
553 void rtw_usb_linux_register(void *arg);
\r
554 void rtw_usb_linux_deregister(void *arg);
\r
555 void rtw_usb_linux_free_device(struct usb_device *dev);
\r
556 void rtw_usb_free_urb(struct urb *urb);
\r
557 void rtw_usb_init_urb(struct urb *urb);
\r
558 void rtw_usb_kill_urb(struct urb *urb);
\r
559 void rtw_usb_set_intfdata(struct usb_interface *intf, void *data);
\r
560 void rtw_usb_fill_bulk_urb(struct urb *urb, struct usb_device *udev,
\r
561 struct usb_host_endpoint *uhe, void *buf,
\r
562 int length, usb_complete_t callback, void *arg);
\r
563 int rtw_usb_bulk_msg(struct usb_device *udev, struct usb_host_endpoint *uhe,
\r
564 void *data, int len, uint16_t *pactlen, usb_timeout_t timeout);
\r
565 void *usb_get_intfdata(struct usb_interface *intf);
\r
566 int usb_linux_init_endpoints(struct usb_device *udev);
\r
570 typedef struct urb * PURB;
\r
572 typedef unsigned gfp_t;
\r
573 #define __GFP_WAIT ((gfp_t)0x10u) /* Can wait and reschedule? */
\r
574 #define __GFP_HIGH ((gfp_t)0x20u) /* Should access emergency pools? */
\r
575 #define __GFP_IO ((gfp_t)0x40u) /* Can start physical IO? */
\r
576 #define __GFP_FS ((gfp_t)0x80u) /* Can call down to low-level FS? */
\r
577 #define __GFP_COLD ((gfp_t)0x100u) /* Cache-cold page required */
\r
578 #define __GFP_NOWARN ((gfp_t)0x200u) /* Suppress page allocation failure warning */
\r
579 #define __GFP_REPEAT ((gfp_t)0x400u) /* Retry the allocation. Might fail */
\r
580 #define __GFP_NOFAIL ((gfp_t)0x800u) /* Retry for ever. Cannot fail */
\r
581 #define __GFP_NORETRY ((gfp_t)0x1000u)/* Do not retry. Might fail */
\r
582 #define __GFP_NO_GROW ((gfp_t)0x2000u)/* Slab internal usage */
\r
583 #define __GFP_COMP ((gfp_t)0x4000u)/* Add compound page metadata */
\r
584 #define __GFP_ZERO ((gfp_t)0x8000u)/* Return zeroed page on success */
\r
585 #define __GFP_NOMEMALLOC ((gfp_t)0x10000u) /* Don't use emergency reserves */
\r
586 #define __GFP_HARDWALL ((gfp_t)0x20000u) /* Enforce hardwall cpuset memory allocs */
\r
588 /* This equals 0, but use constants in case they ever change */
\r
589 #define GFP_NOWAIT (GFP_ATOMIC & ~__GFP_HIGH)
\r
590 /* GFP_ATOMIC means both !wait (__GFP_WAIT not set) and use emergency pool */
\r
591 #define GFP_ATOMIC (__GFP_HIGH)
\r
592 #define GFP_NOIO (__GFP_WAIT)
\r
593 #define GFP_NOFS (__GFP_WAIT | __GFP_IO)
\r
594 #define GFP_KERNEL (__GFP_WAIT | __GFP_IO | __GFP_FS)
\r
595 #define GFP_USER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL)
\r
596 #define GFP_HIGHUSER (__GFP_WAIT | __GFP_IO | __GFP_FS | __GFP_HARDWALL | \
\r
600 #endif // kenny add Linux compatibility code for Linux USB
\r
602 __inline static _list *get_next(_list *list)
\r
607 __inline static _list *get_list_head(_queue *queue)
\r
609 return (&(queue->queue));
\r
613 #define LIST_CONTAINOR(ptr, type, member) \
\r
614 ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
\r
617 __inline static void _enter_critical(_lock *plock, _irqL *pirqL)
\r
619 spin_lock_irqsave(plock, *pirqL);
\r
622 __inline static void _exit_critical(_lock *plock, _irqL *pirqL)
\r
624 spin_unlock_irqrestore(plock, *pirqL);
\r
627 __inline static void _enter_critical_ex(_lock *plock, _irqL *pirqL)
\r
629 spin_lock_irqsave(plock, *pirqL);
\r
632 __inline static void _exit_critical_ex(_lock *plock, _irqL *pirqL)
\r
634 spin_unlock_irqrestore(plock, *pirqL);
\r
637 __inline static void _enter_critical_bh(_lock *plock, _irqL *pirqL)
\r
639 spin_lock_bh(plock, *pirqL);
\r
642 __inline static void _exit_critical_bh(_lock *plock, _irqL *pirqL)
\r
644 spin_unlock_bh(plock, *pirqL);
\r
647 __inline static void _enter_critical_mutex(_mutex *pmutex, _irqL *pirqL)
\r
655 __inline static void _exit_critical_mutex(_mutex *pmutex, _irqL *pirqL)
\r
658 mtx_unlock(pmutex);
\r
661 static inline void __list_del(struct list_head * prev, struct list_head * next)
\r
666 static inline void INIT_LIST_HEAD(struct list_head *list)
\r
671 __inline static void rtw_list_delete(_list *plist)
\r
673 __list_del(plist->prev, plist->next);
\r
674 INIT_LIST_HEAD(plist);
\r
677 __inline static void _init_timer(_timer *ptimer,_nic_hdl padapter,void *pfunc,void* cntx)
\r
679 ptimer->function = pfunc;
\r
680 ptimer->arg = cntx;
\r
681 callout_init(&ptimer->callout, CALLOUT_MPSAFE);
\r
684 __inline static void _set_timer(_timer *ptimer,u32 delay_time)
\r
686 // mod_timer(ptimer , (jiffies+(delay_time*HZ/1000)));
\r
687 if(ptimer->function && ptimer->arg){
\r
688 rtw_mtx_lock(NULL);
\r
689 callout_reset(&ptimer->callout, delay_time,ptimer->function, ptimer->arg);
\r
690 rtw_mtx_unlock(NULL);
\r
694 __inline static void _cancel_timer(_timer *ptimer,u8 *bcancelled)
\r
696 // del_timer_sync(ptimer);
\r
697 // *bcancelled= _TRUE;//TRUE ==1; FALSE==0
\r
698 rtw_mtx_lock(NULL);
\r
699 callout_drain(&ptimer->callout);
\r
700 rtw_mtx_unlock(NULL);
\r
703 __inline static void _init_workitem(_workitem *pwork, void *pfunc, PVOID cntx)
\r
705 printf("%s Not implement yet! \n",__FUNCTION__);
\r
708 __inline static void _set_workitem(_workitem *pwork)
\r
710 printf("%s Not implement yet! \n",__FUNCTION__);
\r
711 // schedule_work(pwork);
\r
715 // Global Mutex: can only be used at PASSIVE level.
\r
718 #define ACQUIRE_GLOBAL_MUTEX(_MutexCounter) \
\r
722 #define RELEASE_GLOBAL_MUTEX(_MutexCounter) \
\r
726 #define ATOMIC_INIT(i) { (i) }
\r
728 static __inline void thread_enter(char *name);
\r
730 //Atomic integer operations
\r
731 typedef uint32_t ATOMIC_T ;
\r
733 #define rtw_netdev_priv(netdev) (((struct ifnet *)netdev)->if_softc)
\r
735 #define rtw_free_netdev(netdev) if_free((netdev))
\r
737 #define NDEV_FMT "%s"
\r
738 #define NDEV_ARG(ndev) ""
\r
739 #define ADPT_FMT "%s"
\r
740 #define ADPT_ARG(adapter) ""
\r
741 #define FUNC_NDEV_FMT "%s"
\r
742 #define FUNC_NDEV_ARG(ndev) __func__
\r
743 #define FUNC_ADPT_FMT "%s"
\r
744 #define FUNC_ADPT_ARG(adapter) __func__
\r
746 #define STRUCT_PACKED
\r