1 /******************************************************************************
\r
3 * Copyright(c) 2007 - 2013 Realtek Corporation. All rights reserved.
\r
5 * This program is free software; you can redistribute it and/or modify it
\r
6 * under the terms of version 2 of the GNU General Public License as
\r
7 * published by the Free Software Foundation.
\r
9 * This program is distributed in the hope that it will be useful, but WITHOUT
\r
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
\r
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
\r
14 * You should have received a copy of the GNU General Public License along with
\r
15 * this program; if not, write to the Free Software Foundation, Inc.,
\r
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
\r
19 ******************************************************************************/
\r
20 #ifndef __OSDEP_LINUX_SERVICE_H_
\r
21 #define __OSDEP_LINUX_SERVICE_H_
\r
23 #include <linux/version.h>
\r
24 #include <linux/spinlock.h>
\r
25 #include <linux/compiler.h>
\r
26 #include <linux/kernel.h>
\r
27 #include <linux/errno.h>
\r
28 #include <linux/init.h>
\r
29 #include <linux/slab.h>
\r
30 #include <linux/module.h>
\r
31 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,5))
\r
32 #include <linux/kref.h>
\r
34 //#include <linux/smp_lock.h>
\r
35 #include <linux/netdevice.h>
\r
36 #include <linux/skbuff.h>
\r
37 #include <linux/circ_buf.h>
\r
38 #include <asm/uaccess.h>
\r
39 #include <asm/byteorder.h>
\r
40 #include <asm/atomic.h>
\r
42 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26))
\r
43 #include <asm/semaphore.h>
\r
45 #include <linux/semaphore.h>
\r
47 #include <linux/sem.h>
\r
48 #include <linux/sched.h>
\r
49 #include <linux/etherdevice.h>
\r
50 #include <linux/wireless.h>
\r
51 #include <net/iw_handler.h>
\r
52 #include <linux/if_arp.h>
\r
53 #include <linux/rtnetlink.h>
\r
54 #include <linux/delay.h>
\r
55 #include <linux/interrupt.h> // for struct tasklet_struct
\r
56 #include <linux/ip.h>
\r
57 #include <linux/kthread.h>
\r
58 #include <linux/list.h>
\r
59 #include <linux/vmalloc.h>
\r
61 #if (LINUX_VERSION_CODE <= KERNEL_VERSION(2,5,41))
\r
62 #include <linux/tqueue.h>
\r
65 #ifdef RTK_DMP_PLATFORM
\r
66 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,6,12))
\r
67 #include <linux/pageremap.h>
\r
72 #ifdef CONFIG_IOCTL_CFG80211
\r
73 // #include <linux/ieee80211.h>
\r
74 #include <net/ieee80211_radiotap.h>
\r
75 #include <net/cfg80211.h>
\r
76 #endif //CONFIG_IOCTL_CFG80211
\r
78 #ifdef CONFIG_TCP_CSUM_OFFLOAD_TX
\r
79 #include <linux/in.h>
\r
80 #include <linux/udp.h>
\r
83 #ifdef CONFIG_HAS_EARLYSUSPEND
\r
84 #include <linux/earlysuspend.h>
\r
85 #endif //CONFIG_HAS_EARLYSUSPEND
\r
87 #include <linux/fs.h>
\r
89 #ifdef CONFIG_USB_HCI
\r
90 #include <linux/usb.h>
\r
91 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21))
\r
92 #include <linux/usb_ch9.h>
\r
94 #include <linux/usb/ch9.h>
\r
98 #ifdef CONFIG_USB_HCI
\r
99 typedef struct urb * PURB;
\r
100 #if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,22))
\r
101 #ifdef CONFIG_USB_SUSPEND
\r
102 #define CONFIG_AUTOSUSPEND 1
\r
107 typedef struct semaphore _sema;
\r
108 typedef spinlock_t _lock;
\r
109 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
\r
110 typedef struct mutex _mutex;
\r
112 typedef struct semaphore _mutex;
\r
114 typedef struct timer_list _timer;
\r
117 struct list_head queue;
\r
121 typedef struct sk_buff _pkt;
\r
122 typedef unsigned char _buffer;
\r
124 typedef struct __queue _queue;
\r
125 typedef struct list_head _list;
\r
126 typedef int _OS_STATUS;
\r
127 //typedef u32 _irqL;
\r
128 typedef unsigned long _irqL;
\r
129 typedef struct net_device * _nic_hdl;
\r
131 typedef void* _thread_hdl_;
\r
132 typedef int thread_return;
\r
133 typedef void* thread_context;
\r
135 #define thread_exit() complete_and_exit(NULL, 0)
\r
137 typedef void timer_hdl_return;
\r
138 typedef void* timer_hdl_context;
\r
140 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41))
\r
141 typedef struct work_struct _workitem;
\r
143 typedef struct tq_struct _workitem;
\r
146 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
\r
147 #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
\r
150 #if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,22))
\r
151 // Porting from linux kernel, for compatible with old kernel.
\r
152 static inline unsigned char *skb_tail_pointer(const struct sk_buff *skb)
\r
157 static inline void skb_reset_tail_pointer(struct sk_buff *skb)
\r
159 skb->tail = skb->data;
\r
162 static inline void skb_set_tail_pointer(struct sk_buff *skb, const int offset)
\r
164 skb->tail = skb->data + offset;
\r
167 static inline unsigned char *skb_end_pointer(const struct sk_buff *skb)
\r
173 __inline static _list *get_next(_list *list)
\r
178 __inline static _list *get_list_head(_queue *queue)
\r
180 return (&(queue->queue));
\r
184 #define LIST_CONTAINOR(ptr, type, member) \
\r
185 ((type *)((char *)(ptr)-(SIZE_T)(&((type *)0)->member)))
\r
188 __inline static void _enter_critical(_lock *plock, _irqL *pirqL)
\r
190 spin_lock_irqsave(plock, *pirqL);
\r
193 __inline static void _exit_critical(_lock *plock, _irqL *pirqL)
\r
195 spin_unlock_irqrestore(plock, *pirqL);
\r
198 __inline static void _enter_critical_ex(_lock *plock, _irqL *pirqL)
\r
200 spin_lock_irqsave(plock, *pirqL);
\r
203 __inline static void _exit_critical_ex(_lock *plock, _irqL *pirqL)
\r
205 spin_unlock_irqrestore(plock, *pirqL);
\r
208 __inline static void _enter_critical_bh(_lock *plock, _irqL *pirqL)
\r
210 spin_lock_bh(plock);
\r
213 __inline static void _exit_critical_bh(_lock *plock, _irqL *pirqL)
\r
215 spin_unlock_bh(plock);
\r
218 __inline static int _enter_critical_mutex(_mutex *pmutex, _irqL *pirqL)
\r
221 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
\r
222 //mutex_lock(pmutex);
\r
223 ret = mutex_lock_interruptible(pmutex);
\r
225 ret = down_interruptible(pmutex);
\r
231 __inline static void _exit_critical_mutex(_mutex *pmutex, _irqL *pirqL)
\r
233 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,37))
\r
234 mutex_unlock(pmutex);
\r
240 __inline static void rtw_list_delete(_list *plist)
\r
242 list_del_init(plist);
\r
245 #define RTW_TIMER_HDL_ARGS void *FunctionContext
\r
247 __inline static void _init_timer(_timer *ptimer,_nic_hdl nic_hdl,void *pfunc,void* cntx)
\r
249 //setup_timer(ptimer, pfunc,(u32)cntx);
\r
250 ptimer->function = pfunc;
\r
251 ptimer->data = (unsigned long)cntx;
\r
252 init_timer(ptimer);
\r
255 __inline static void _set_timer(_timer *ptimer,u32 delay_time)
\r
257 mod_timer(ptimer , (jiffies+(delay_time*HZ/1000)));
\r
260 __inline static void _cancel_timer(_timer *ptimer,u8 *bcancelled)
\r
262 del_timer_sync(ptimer);
\r
263 *bcancelled= _TRUE;//TRUE ==1; FALSE==0
\r
267 __inline static void _init_workitem(_workitem *pwork, void *pfunc, PVOID cntx)
\r
269 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20))
\r
270 INIT_WORK(pwork, pfunc);
\r
271 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41))
\r
272 INIT_WORK(pwork, pfunc,pwork);
\r
274 INIT_TQUEUE(pwork, pfunc,pwork);
\r
278 __inline static void _set_workitem(_workitem *pwork)
\r
280 #if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41))
\r
281 schedule_work(pwork);
\r
283 schedule_task(pwork);
\r
287 __inline static void _cancel_workitem_sync(_workitem *pwork)
\r
289 #if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,22))
\r
290 cancel_work_sync(pwork);
\r
291 #elif (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,41))
\r
292 flush_scheduled_work();
\r
294 flush_scheduled_tasks();
\r
298 // Global Mutex: can only be used at PASSIVE level.
\r
301 #define ACQUIRE_GLOBAL_MUTEX(_MutexCounter) \
\r
303 while (atomic_inc_return((atomic_t *)&(_MutexCounter)) != 1)\
\r
305 atomic_dec((atomic_t *)&(_MutexCounter)); \
\r
310 #define RELEASE_GLOBAL_MUTEX(_MutexCounter) \
\r
312 atomic_dec((atomic_t *)&(_MutexCounter)); \
\r
315 static inline int rtw_netif_queue_stopped(struct net_device *pnetdev)
\r
317 #if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35))
\r
318 return (netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 0)) &&
\r
319 netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 1)) &&
\r
320 netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 2)) &&
\r
321 netif_tx_queue_stopped(netdev_get_tx_queue(pnetdev, 3)) );
\r
323 return netif_queue_stopped(pnetdev);
\r
327 static inline void rtw_netif_wake_queue(struct net_device *pnetdev)
\r
329 #if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35))
\r
330 netif_tx_wake_all_queues(pnetdev);
\r
332 netif_wake_queue(pnetdev);
\r
336 static inline void rtw_netif_start_queue(struct net_device *pnetdev)
\r
338 #if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35))
\r
339 netif_tx_start_all_queues(pnetdev);
\r
341 netif_start_queue(pnetdev);
\r
345 static inline void rtw_netif_stop_queue(struct net_device *pnetdev)
\r
347 #if (LINUX_VERSION_CODE>=KERNEL_VERSION(2,6,35))
\r
348 netif_tx_stop_all_queues(pnetdev);
\r
350 netif_stop_queue(pnetdev);
\r
354 static inline void rtw_merge_string(char *dst, int dst_len, char *src1, char *src2)
\r
357 len += snprintf(dst+len, dst_len - len, "%s", src1);
\r
358 len += snprintf(dst+len, dst_len - len, "%s", src2);
\r
361 #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
\r
362 #define rtw_signal_process(pid, sig) kill_pid(find_vpid((pid)),(sig), 1)
\r
363 #else //(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
\r
364 #define rtw_signal_process(pid, sig) kill_proc((pid), (sig), 1)
\r
365 #endif //(LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,27))
\r
368 // Suspend lock prevent system from going suspend
\r
369 #ifdef CONFIG_WAKELOCK
\r
370 #include <linux/wakelock.h>
\r
371 #elif defined(CONFIG_ANDROID_POWER)
\r
372 #include <linux/android_power.h>
\r
375 // limitation of path length
\r
376 #define PATH_LENGTH_MAX PATH_MAX
\r
378 //Atomic integer operations
\r
379 #define ATOMIC_T atomic_t
\r
381 #define rtw_netdev_priv(netdev) ( ((struct rtw_netdev_priv_indicator *)netdev_priv(netdev))->priv )
\r
383 #define NDEV_FMT "%s"
\r
384 #define NDEV_ARG(ndev) ndev->name
\r
385 #define ADPT_FMT "%s"
\r
386 #define ADPT_ARG(adapter) adapter->pnetdev->name
\r
387 #define FUNC_NDEV_FMT "%s(%s)"
\r
388 #define FUNC_NDEV_ARG(ndev) __func__, ndev->name
\r
389 #define FUNC_ADPT_FMT "%s(%s)"
\r
390 #define FUNC_ADPT_ARG(adapter) __func__, adapter->pnetdev->name
\r
392 struct rtw_netdev_priv_indicator {
\r
396 struct net_device *rtw_alloc_etherdev_with_old_priv(int sizeof_priv, void *old_priv);
\r
397 extern struct net_device * rtw_alloc_etherdev(int sizeof_priv);
\r
399 #define STRUCT_PACKED __attribute__ ((packed))
\r