2 * Copyright (c) 2010 -2013 Espressif System.
5 * - interface for other driver or kernel
12 #include <net/cfg80211.h>
13 #include <linux/skbuff.h>
14 #include <linux/bitops.h>
15 #include <linux/version.h>
16 #include <linux/mmc/card.h>
17 #include <linux/mmc/mmc.h>
18 #include <linux/mmc/host.h>
19 #include <linux/mmc/sdio_func.h>
20 #include <linux/mmc/sdio_ids.h>
21 #include <linux/mmc/sdio.h>
22 #include <linux/mmc/sd.h>
23 #include <linux/completion.h>
26 #include "esp_debug.h"
31 extern void register_ext_gpio_ops(struct esp_ext_gpio_ops *ops);
32 extern void unregister_ext_gpio_ops(void);
34 static struct esp_ext_gpio_ops ext_gpio_ops = {
35 .gpio_request = ext_gpio_request, /* gpio_request gpio_no from 0x0 to 0xf*/
36 .gpio_release = ext_gpio_release, /* gpio_release */
37 .gpio_set_mode = ext_gpio_set_mode, /* gpio_set_mode, data is irq_func of irq_mode , default level of output_mode */
38 .gpio_get_mode = ext_gpio_get_mode, /* gpio_get_mode, current mode */
39 .gpio_set_state = ext_gpio_set_output_state, /* only output state, high level or low level */
40 .gpio_get_state = ext_gpio_get_state, /* current state */
41 .irq_ack = ext_irq_ack, /* ack interrupt */
47 static struct esp_pub *ext_epub = NULL;
49 static u16 intr_mask_reg = 0x0000;
50 struct workqueue_struct *ext_irq_wkq = NULL;
51 struct work_struct ext_irq_work;
52 static struct mutex ext_mutex_lock;
54 static struct ext_gpio_info gpio_list[EXT_GPIO_MAX_NUM] = {
55 { 0, EXT_GPIO_MODE_DISABLE, EXT_GPIO_STATE_IDLE, NULL},
56 { 1, EXT_GPIO_MODE_DISABLE, EXT_GPIO_STATE_IDLE, NULL},
57 { 2, EXT_GPIO_MODE_DISABLE, EXT_GPIO_STATE_IDLE, NULL},
58 { 3, EXT_GPIO_MODE_DISABLE, EXT_GPIO_STATE_IDLE, NULL},
59 { 4, EXT_GPIO_MODE_DISABLE, EXT_GPIO_STATE_IDLE, NULL},
60 { 5, EXT_GPIO_MODE_DISABLE, EXT_GPIO_STATE_IDLE, NULL},
61 { 6, EXT_GPIO_MODE_DISABLE, EXT_GPIO_STATE_IDLE, NULL},
62 { 7, EXT_GPIO_MODE_DISABLE, EXT_GPIO_STATE_IDLE, NULL},
63 { 8, EXT_GPIO_MODE_DISABLE, EXT_GPIO_STATE_IDLE, NULL},
64 { 9, EXT_GPIO_MODE_DISABLE, EXT_GPIO_STATE_IDLE, NULL},
65 {10, EXT_GPIO_MODE_DISABLE, EXT_GPIO_STATE_IDLE, NULL},
66 {11, EXT_GPIO_MODE_DISABLE, EXT_GPIO_STATE_IDLE, NULL},
67 {12, EXT_GPIO_MODE_DISABLE, EXT_GPIO_STATE_IDLE, NULL},
68 {13, EXT_GPIO_MODE_DISABLE, EXT_GPIO_STATE_IDLE, NULL},
69 {14, EXT_GPIO_MODE_DISABLE, EXT_GPIO_STATE_IDLE, NULL},
70 {15, EXT_GPIO_MODE_DISABLE, EXT_GPIO_STATE_IDLE, NULL},
73 static struct pending_intr_list_info esp_pending_intr_list = {
79 u16 ext_gpio_get_int_mask_reg(void)
84 int ext_gpio_request(int gpio_no)
86 if (ext_epub == NULL || ext_epub->sip == NULL ||
87 atomic_read(&ext_epub->sip->state) != SIP_RUN) {
88 esp_dbg(ESP_DBG_ERROR, "%s esp state is not ok\n", __func__);
89 return -ENOTRECOVERABLE;
92 mutex_lock(&ext_mutex_lock);
94 if (gpio_no >= EXT_GPIO_MAX_NUM || gpio_no < 0) {
95 mutex_unlock(&ext_mutex_lock);
96 esp_dbg(ESP_DBG_ERROR, "%s unkown gpio num\n", __func__);
100 if (gpio_list[gpio_no].gpio_mode != EXT_GPIO_MODE_DISABLE) {
101 mutex_unlock(&ext_mutex_lock);
102 esp_dbg(ESP_DBG_ERROR, "%s gpio is already in used by other\n", __func__);
105 gpio_list[gpio_no].gpio_mode = EXT_GPIO_MODE_MAX;
106 mutex_unlock(&ext_mutex_lock);
110 EXPORT_SYMBOL(ext_gpio_request);
112 int ext_gpio_release(int gpio_no)
116 if (ext_epub == NULL || ext_epub->sip == NULL ||
117 atomic_read(&ext_epub->sip->state) != SIP_RUN) {
118 esp_dbg(ESP_DBG_ERROR, "%s esp state is not ok\n", __func__);
119 return -ENOTRECOVERABLE;
122 mutex_lock(&ext_mutex_lock);
124 if (gpio_no >= EXT_GPIO_MAX_NUM || gpio_no < 0) {
125 mutex_unlock(&ext_mutex_lock);
126 esp_dbg(ESP_DBG_ERROR, "%s unkown gpio num\n", __func__);
129 sif_lock_bus(ext_epub);
130 ret = sif_config_gpio_mode(ext_epub, (u8)gpio_no, EXT_GPIO_MODE_DISABLE);
131 sif_unlock_bus(ext_epub);
133 esp_dbg(ESP_DBG_ERROR, "%s gpio release error\n", __func__);
134 mutex_unlock(&ext_mutex_lock);
138 gpio_list[gpio_no].gpio_mode = EXT_GPIO_MODE_DISABLE;
139 gpio_list[gpio_no].gpio_state = EXT_GPIO_STATE_IDLE;
140 gpio_list[gpio_no].irq_handler = NULL;
141 intr_mask_reg &= ~(1<<gpio_no);
143 mutex_unlock(&ext_mutex_lock);
147 EXPORT_SYMBOL(ext_gpio_release);
149 int ext_gpio_set_mode(int gpio_no, int mode, void *data)
153 struct ext_gpio_info backup_info;
155 if (ext_epub == NULL || ext_epub->sip == NULL ||
156 atomic_read(&ext_epub->sip->state) != SIP_RUN) {
157 esp_dbg(ESP_DBG_LOG, "%s esp state is not ok\n", __func__);
158 return -ENOTRECOVERABLE;
161 mutex_lock(&ext_mutex_lock);
163 if (gpio_no >= EXT_GPIO_MAX_NUM || gpio_no < 0) {
164 mutex_unlock(&ext_mutex_lock);
165 esp_dbg(ESP_DBG_ERROR, "%s unkown gpio num\n", __func__);
169 if (gpio_list[gpio_no].gpio_mode == EXT_GPIO_MODE_DISABLE) {
170 mutex_unlock(&ext_mutex_lock);
171 esp_dbg(ESP_DBG_ERROR, "%s gpio is not in occupy, please request gpio\n", __func__);
172 return -ENOTRECOVERABLE;
175 if (mode <= EXT_GPIO_MODE_OOB || mode >= EXT_GPIO_MODE_MAX) {
176 mutex_unlock(&ext_mutex_lock);
177 esp_dbg(ESP_DBG_ERROR, "%s gpio mode unknown\n", __func__);
181 memcpy(&backup_info, &gpio_list[gpio_no], sizeof(struct ext_gpio_info));
183 gpio_list[gpio_no].gpio_mode = mode;
184 gpio_mode = (u8)mode;
187 case EXT_GPIO_MODE_INTR_POSEDGE:
188 case EXT_GPIO_MODE_INTR_NEGEDGE:
189 case EXT_GPIO_MODE_INTR_LOLEVEL:
190 case EXT_GPIO_MODE_INTR_HILEVEL:
192 memcpy(&gpio_list[gpio_no], &backup_info, sizeof(struct ext_gpio_info));
193 esp_dbg(ESP_DBG_ERROR, "%s irq_handler is NULL\n", __func__);
194 mutex_unlock(&ext_mutex_lock);
197 gpio_list[gpio_no].irq_handler = (ext_irq_handler_t)data;
198 intr_mask_reg |= (1<<gpio_no);
200 case EXT_GPIO_MODE_OUTPUT:
202 memcpy(&gpio_list[gpio_no], &backup_info, sizeof(struct ext_gpio_info));
203 esp_dbg(ESP_DBG_ERROR, "%s output default value is NULL\n", __func__);
204 mutex_unlock(&ext_mutex_lock);
207 *(int *)data = (*(int *)data == 0 ? 0 : 1);
208 gpio_mode = (u8)(((*(int *)data)<<4) | gpio_mode);
210 gpio_list[gpio_no].irq_handler = NULL;
211 intr_mask_reg &= ~(1<<gpio_no);
215 sif_lock_bus(ext_epub);
216 ret = sif_config_gpio_mode(ext_epub, (u8)gpio_no, gpio_mode);
217 sif_unlock_bus(ext_epub);
219 memcpy(&gpio_list[gpio_no], &backup_info, sizeof(struct ext_gpio_info));
220 esp_dbg(ESP_DBG_ERROR, "%s gpio set error\n", __func__);
221 mutex_unlock(&ext_mutex_lock);
225 mutex_unlock(&ext_mutex_lock);
228 EXPORT_SYMBOL(ext_gpio_set_mode);
230 int ext_gpio_get_mode(int gpio_no)
234 if (ext_epub == NULL || ext_epub->sip == NULL ||
235 atomic_read(&ext_epub->sip->state) != SIP_RUN) {
236 esp_dbg(ESP_DBG_LOG, "%s esp state is not ok\n", __func__);
237 return -ENOTRECOVERABLE;
240 mutex_lock(&ext_mutex_lock);
242 if (gpio_no >= EXT_GPIO_MAX_NUM || gpio_no < 0) {
243 esp_dbg(ESP_DBG_ERROR, "%s unkown gpio num\n", __func__);
244 mutex_unlock(&ext_mutex_lock);
248 gpio_mode = gpio_list[gpio_no].gpio_mode;
250 mutex_unlock(&ext_mutex_lock);
254 EXPORT_SYMBOL(ext_gpio_get_mode);
257 int ext_gpio_set_output_state(int gpio_no, int state)
261 if (ext_epub == NULL || ext_epub->sip == NULL ||
262 atomic_read(&ext_epub->sip->state) != SIP_RUN) {
263 esp_dbg(ESP_DBG_LOG, "%s esp state is not ok\n", __func__);
264 return -ENOTRECOVERABLE;
267 mutex_lock(&ext_mutex_lock);
269 if (gpio_no >= EXT_GPIO_MAX_NUM || gpio_no < 0) {
270 mutex_unlock(&ext_mutex_lock);
271 esp_dbg(ESP_DBG_ERROR, "%s unkown gpio num\n", __func__);
275 if (gpio_list[gpio_no].gpio_mode != EXT_GPIO_MODE_OUTPUT) {
276 mutex_unlock(&ext_mutex_lock);
277 esp_dbg(ESP_DBG_ERROR, "%s gpio is not in output state, please request gpio or set output state\n", __func__);
281 if (state != EXT_GPIO_STATE_LOW && state != EXT_GPIO_STATE_HIGH) {
282 mutex_unlock(&ext_mutex_lock);
283 esp_dbg(ESP_DBG_ERROR, "%s gpio state unknown\n", __func__);
284 return -ENOTRECOVERABLE;
287 sif_lock_bus(ext_epub);
288 ret = sif_set_gpio_output(ext_epub, 1<<gpio_no, state<<gpio_no);
289 sif_unlock_bus(ext_epub);
291 esp_dbg(ESP_DBG_ERROR, "%s gpio state set error\n", __func__);
292 mutex_unlock(&ext_mutex_lock);
295 gpio_list[gpio_no].gpio_state = state;
297 mutex_unlock(&ext_mutex_lock);
301 EXPORT_SYMBOL(ext_gpio_set_output_state);
303 int ext_gpio_get_state(int gpio_no)
309 if (ext_epub == NULL || ext_epub->sip == NULL ||
310 atomic_read(&ext_epub->sip->state) != SIP_RUN) {
311 esp_dbg(ESP_DBG_LOG, "%s esp state is not ok\n", __func__);
312 return -ENOTRECOVERABLE;
315 mutex_lock(&ext_mutex_lock);
317 if (gpio_no >= EXT_GPIO_MAX_NUM || gpio_no < 0) {
318 esp_dbg(ESP_DBG_ERROR, "%s unkown gpio num\n", __func__);
319 mutex_unlock(&ext_mutex_lock);
323 if (gpio_list[gpio_no].gpio_mode == EXT_GPIO_MODE_OUTPUT) {
324 state = gpio_list[gpio_no].gpio_state;
325 } else if (gpio_list[gpio_no].gpio_mode == EXT_GPIO_MODE_INPUT) {
326 sif_lock_bus(ext_epub);
327 ret = sif_get_gpio_input(ext_epub, &mask, &state);
328 sif_unlock_bus(ext_epub);
330 esp_dbg(ESP_DBG_ERROR, "%s get gpio_input state error\n", __func__);
331 mutex_unlock(&ext_mutex_lock);
335 esp_dbg(ESP_DBG_ERROR, "%s gpio_state is not input or output\n", __func__);
336 mutex_unlock(&ext_mutex_lock);
339 mutex_unlock(&ext_mutex_lock);
341 return (state & (1<<gpio_no)) ? 1 : 0;
343 EXPORT_SYMBOL(ext_gpio_get_state);
345 int ext_irq_ack(int gpio_no)
349 if (ext_epub == NULL || ext_epub->sip == NULL ||
350 atomic_read(&ext_epub->sip->state) != SIP_RUN) {
351 esp_dbg(ESP_DBG_LOG, "%s esp state is not ok\n", __func__);
352 return -ENOTRECOVERABLE;
355 mutex_lock(&ext_mutex_lock);
356 if (gpio_no >= EXT_GPIO_MAX_NUM || gpio_no < 0) {
357 esp_dbg(ESP_DBG_ERROR, "%s unkown gpio num\n", __func__);
358 mutex_unlock(&ext_mutex_lock);
362 if (gpio_list[gpio_no].gpio_mode != EXT_GPIO_MODE_INTR_POSEDGE
363 && gpio_list[gpio_no].gpio_mode != EXT_GPIO_MODE_INTR_NEGEDGE
364 && gpio_list[gpio_no].gpio_mode != EXT_GPIO_MODE_INTR_LOLEVEL
365 && gpio_list[gpio_no].gpio_mode != EXT_GPIO_MODE_INTR_HILEVEL) {
366 esp_dbg(ESP_DBG_ERROR, "%s gpio mode is not intr mode\n", __func__);
367 mutex_unlock(&ext_mutex_lock);
368 return -ENOTRECOVERABLE;
371 sif_lock_bus(ext_epub);
372 ret = sif_set_gpio_output(ext_epub, 0x00, 1<<gpio_no);
373 sif_unlock_bus(ext_epub);
375 esp_dbg(ESP_DBG_ERROR, "%s gpio intr ack error\n", __func__);
376 mutex_unlock(&ext_mutex_lock);
380 mutex_unlock(&ext_mutex_lock);
383 EXPORT_SYMBOL(ext_irq_ack);
385 void show_status(void)
388 for (i = 0; i < MAX_PENDING_INTR_LIST;i++)
389 esp_dbg(ESP_DBG_ERROR, "status[%d] = [0x%04x]\n", i, esp_pending_intr_list.pending_intr_list[i]);
391 esp_dbg(ESP_DBG_ERROR, "start_pos[%d]\n",esp_pending_intr_list.start_pos);
392 esp_dbg(ESP_DBG_ERROR, "end_pos[%d]\n",esp_pending_intr_list.end_pos);
393 esp_dbg(ESP_DBG_ERROR, "curr_num[%d]\n",esp_pending_intr_list.curr_num);
396 void esp_tx_work(struct work_struct *work)
399 u16 tmp_intr_status_reg;
401 esp_dbg(ESP_DBG_TRACE, "%s enter\n", __func__);
403 spin_lock(&esp_pending_intr_list.spin_lock);
405 tmp_intr_status_reg = esp_pending_intr_list.pending_intr_list[esp_pending_intr_list.start_pos];
407 esp_pending_intr_list.pending_intr_list[esp_pending_intr_list.start_pos] = 0x0000;
408 esp_pending_intr_list.start_pos = (esp_pending_intr_list.start_pos + 1) % MAX_PENDING_INTR_LIST;
409 esp_pending_intr_list.curr_num--;
411 spin_unlock(&esp_pending_intr_list.spin_lock);
413 for (i = 0; i < EXT_GPIO_MAX_NUM; i++) {
414 if (tmp_intr_status_reg & (1<<i) && (gpio_list[i].irq_handler))
415 gpio_list[i].irq_handler();
418 spin_lock(&esp_pending_intr_list.spin_lock);
419 if (esp_pending_intr_list.curr_num > 0)
420 queue_work(ext_irq_wkq, &ext_irq_work);
421 spin_unlock(&esp_pending_intr_list.spin_lock);
424 void ext_gpio_int_process(u16 value) {
428 esp_dbg(ESP_DBG_TRACE, "%s enter\n", __func__);
430 /* intr cycle queue is full, wait */
431 while (esp_pending_intr_list.curr_num >= MAX_PENDING_INTR_LIST)
436 spin_lock(&esp_pending_intr_list.spin_lock);
438 esp_pending_intr_list.pending_intr_list[esp_pending_intr_list.end_pos] = value;
439 esp_pending_intr_list.end_pos = (esp_pending_intr_list.end_pos + 1) % MAX_PENDING_INTR_LIST;
440 esp_pending_intr_list.curr_num++;
442 queue_work(ext_irq_wkq, &ext_irq_work);
444 spin_unlock(&esp_pending_intr_list.spin_lock);
447 int ext_gpio_init(struct esp_pub *epub)
449 esp_dbg(ESP_DBG_ERROR, "%s enter\n", __func__);
451 ext_irq_wkq = create_singlethread_workqueue("esp_ext_irq_wkq");
452 if (ext_irq_wkq == NULL) {
453 esp_dbg(ESP_DBG_ERROR, "%s create workqueue error\n", __func__);
457 INIT_WORK(&ext_irq_work, esp_tx_work);
458 mutex_init(&ext_mutex_lock);
462 if (ext_epub == NULL)
466 register_ext_gpio_ops(&ext_gpio_ops);
472 void ext_gpio_deinit(void)
474 esp_dbg(ESP_DBG_ERROR, "%s enter\n", __func__);
477 unregister_ext_gpio_ops();
480 cancel_work_sync(&ext_irq_work);
483 destroy_workqueue(ext_irq_wkq);
487 #endif /* USE_EXT_GPIO */