dfe28d329fadc7ca2109139a9fb25915ce4dddb4
[firefly-linux-kernel-4.4.55.git] / drivers / usb / dwc_otg_310 / dwc_otg_pcd.c
1 /* ==========================================================================
2  * $File: //dwh/usb_iip/dev/software/otg/linux/drivers/dwc_otg_pcd.c $
3  * $Revision: #104 $
4  * $Date: 2012/12/21 $
5  * $Change: 2131568 $
6  *
7  * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
8  * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
9  * otherwise expressly agreed to in writing between Synopsys and you.
10  *
11  * The Software IS NOT an item of Licensed Software or Licensed Product under
12  * any End User Software License Agreement or Agreement for Licensed Product
13  * with Synopsys or any supplement thereto. You are permitted to use and
14  * redistribute this Software in source and binary forms, with or without
15  * modification, provided that redistributions of source code must retain this
16  * notice. You may not view, use, disclose, copy or distribute this file or
17  * any information contained herein except pursuant to this license grant from
18  * Synopsys. If you do not agree with this notice, including the disclaimer
19  * below, then you are not authorized to use the Software.
20  *
21  * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
22  * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
23  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
24  * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
25  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
26  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
27  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
28  * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
29  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
30  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
31  * DAMAGE.
32  * ========================================================================== */
33 #ifndef DWC_HOST_ONLY
34
35 /** @file
36  * This file implements PCD Core. All code in this file is portable and doesn't
37  * use any OS specific functions.
38  * PCD Core provides Interface, defined in <code><dwc_otg_pcd_if.h></code>
39  * header file, which can be used to implement OS specific PCD interface.
40  *
41  * An important function of the PCD is managing interrupts generated
42  * by the DWC_otg controller. The implementation of the DWC_otg device
43  * mode interrupt service routines is in dwc_otg_pcd_intr.c.
44  *
45  * @todo Add Device Mode test modes (Test J mode, Test K mode, etc).
46  * @todo Does it work when the request size is greater than DEPTSIZ
47  * transfer size
48  *
49  */
50
51 #include "dwc_otg_pcd.h"
52
53 #ifdef DWC_UTE_CFI
54 #include "dwc_otg_cfi.h"
55
56 extern int init_cfi(cfiobject_t * cfiobj);
57 #endif
58
59 /**
60  * Choose endpoint from ep arrays using usb_ep structure.
61  */
62 static dwc_otg_pcd_ep_t *get_ep_from_handle(dwc_otg_pcd_t * pcd, void *handle)
63 {
64         int i;
65         if (pcd->ep0.priv == handle) {
66                 return &pcd->ep0;
67         }
68         for (i = 0; i < MAX_EPS_CHANNELS - 1; i++) {
69                 if (pcd->in_ep[i].priv == handle)
70                         return &pcd->in_ep[i];
71                 if (pcd->out_ep[i].priv == handle)
72                         return &pcd->out_ep[i];
73         }
74
75         return NULL;
76 }
77
78 /**
79  * This function completes a request.  It call's the request call back.
80  */
81 void dwc_otg_request_done(dwc_otg_pcd_ep_t * ep, dwc_otg_pcd_request_t * req,
82                           int32_t status)
83 {
84         unsigned stopped = ep->stopped;
85
86         DWC_DEBUGPL(DBG_PCDV, "%s(ep %p req %p)\n", __func__, ep, req);
87         DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
88
89         /* don't modify queue heads during completion callback */
90         ep->stopped = 1;
91         /* spin_unlock/spin_lock now done in fops->complete() */
92         ep->pcd->fops->complete(ep->pcd, ep->priv, req->priv, status,
93                                 req->actual);
94
95         if (ep->pcd->request_pending > 0) {
96                 --ep->pcd->request_pending;
97         }
98
99         ep->stopped = stopped;
100         DWC_FREE(req);
101 }
102
103 /**
104  * This function terminates all the requsts in the EP request queue.
105  */
106 void dwc_otg_request_nuke(dwc_otg_pcd_ep_t * ep)
107 {
108         dwc_otg_pcd_request_t *req;
109
110         ep->stopped = 1;
111
112         /* called with irqs blocked?? */
113         while (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
114                 req = DWC_CIRCLEQ_FIRST(&ep->queue);
115                 dwc_otg_request_done(ep, req, -DWC_E_SHUTDOWN);
116         }
117 }
118
119 void dwc_otg_pcd_start(dwc_otg_pcd_t * pcd,
120                        const struct dwc_otg_pcd_function_ops *fops)
121 {
122         pcd->fops = fops;
123 }
124
125 /**
126  * PCD Callback function for initializing the PCD when switching to
127  * device mode.
128  *
129  * @param p void pointer to the <code>dwc_otg_pcd_t</code>
130  */
131 static int32_t dwc_otg_pcd_start_cb(void *p)
132 {
133         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
134         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
135
136         /*
137          * Initialized the Core for Device mode.
138          */
139         if (dwc_otg_is_device_mode(core_if)) {
140                 //dwc_otg_core_dev_init(core_if);
141                 /* Set core_if's lock pointer to the pcd->lock */
142                 core_if->lock = pcd->lock;
143         }
144         return 1;
145 }
146
147 /** CFI-specific buffer allocation function for EP */
148 #ifdef DWC_UTE_CFI
149 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t * pcd, void *pep, dwc_dma_t * addr,
150                               size_t buflen, int flags)
151 {
152         dwc_otg_pcd_ep_t *ep;
153         ep = get_ep_from_handle(pcd, pep);
154         if (!ep) {
155                 DWC_WARN("bad ep\n");
156                 return -DWC_E_INVALID;
157         }
158
159         return pcd->cfi->ops.ep_alloc_buf(pcd->cfi, pcd, ep, addr, buflen,
160                                           flags);
161 }
162 #else
163 uint8_t *cfiw_ep_alloc_buffer(dwc_otg_pcd_t * pcd, void *pep, dwc_dma_t * addr,
164                               size_t buflen, int flags);
165 #endif
166
167 /**
168  * PCD Callback function for notifying the PCD when resuming from
169  * suspend.
170  *
171  * @param p void pointer to the <code>dwc_otg_pcd_t</code>
172  */
173 static int32_t dwc_otg_pcd_resume_cb(void *p)
174 {
175         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
176
177         if (pcd->fops->resume) {
178                 pcd->fops->resume(pcd);
179         }
180
181         /* Stop the SRP timeout timer. */
182         if ((GET_CORE_IF(pcd)->core_params->phy_type != DWC_PHY_TYPE_PARAM_FS)
183             || (!GET_CORE_IF(pcd)->core_params->i2c_enable)) {
184                 if (GET_CORE_IF(pcd)->srp_timer_started) {
185                         GET_CORE_IF(pcd)->srp_timer_started = 0;
186                         DWC_TIMER_CANCEL(GET_CORE_IF(pcd)->srp_timer);
187                 }
188         }
189         return 1;
190 }
191
192 /**
193  * PCD Callback function for notifying the PCD device is suspended.
194  *
195  * @param p void pointer to the <code>dwc_otg_pcd_t</code>
196  */
197 static int32_t dwc_otg_pcd_suspend_cb(void *p)
198 {
199         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
200
201         if (pcd->fops->suspend) {
202                 DWC_SPINUNLOCK(pcd->lock);
203                 pcd->fops->suspend(pcd);
204                 DWC_SPINLOCK(pcd->lock);
205         }
206
207         return 1;
208 }
209
210 /**
211  * PCD Callback function for stopping the PCD when switching to Host
212  * mode.
213  *
214  * @param p void pointer to the <code>dwc_otg_pcd_t</code>
215  */
216 static int32_t dwc_otg_pcd_stop_cb(void *p)
217 {
218         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) p;
219         extern void dwc_otg_pcd_stop(dwc_otg_pcd_t * _pcd);
220
221         dwc_otg_pcd_stop(pcd);
222         return 1;
223 }
224
225 /**
226  * PCD Callback structure for handling mode switching.
227  */
228 static dwc_otg_cil_callbacks_t pcd_callbacks = {
229         .start = dwc_otg_pcd_start_cb,
230         .stop = dwc_otg_pcd_stop_cb,
231         .suspend = dwc_otg_pcd_suspend_cb,
232         .resume_wakeup = dwc_otg_pcd_resume_cb,
233         .p = 0,                 /* Set at registration */
234 };
235
236 /**
237  * This function allocates a DMA Descriptor chain for the Endpoint
238  * buffer to be used for a transfer to/from the specified endpoint.
239  */
240 dwc_otg_dev_dma_desc_t *dwc_otg_ep_alloc_desc_chain(dwc_dma_t * dma_desc_addr,
241                                                     uint32_t count)
242 {
243         return DWC_DMA_ALLOC_ATOMIC(count * sizeof(dwc_otg_dev_dma_desc_t),
244                                     dma_desc_addr);
245 }
246
247 /**
248  * This function frees a DMA Descriptor chain that was allocated by ep_alloc_desc.
249  */
250 void dwc_otg_ep_free_desc_chain(dwc_otg_dev_dma_desc_t * desc_addr,
251                                 uint32_t dma_desc_addr, uint32_t count)
252 {
253         DWC_DMA_FREE(count * sizeof(dwc_otg_dev_dma_desc_t), desc_addr,
254                      dma_desc_addr);
255 }
256
257 #ifdef DWC_EN_ISOC
258
259 /**
260  * This function initializes a descriptor chain for Isochronous transfer
261  *
262  * @param core_if Programming view of DWC_otg controller.
263  * @param dwc_ep The EP to start the transfer on.
264  *
265  */
266 void dwc_otg_iso_ep_start_ddma_transfer(dwc_otg_core_if_t * core_if,
267                                         dwc_ep_t * dwc_ep)
268 {
269
270         dsts_data_t dsts = {.d32 = 0 };
271         depctl_data_t depctl = {.d32 = 0 };
272         volatile uint32_t *addr;
273         int i, j;
274         uint32_t len;
275
276         if (dwc_ep->is_in)
277                 dwc_ep->desc_cnt = dwc_ep->buf_proc_intrvl / dwc_ep->bInterval;
278         else
279                 dwc_ep->desc_cnt =
280                     dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
281                     dwc_ep->bInterval;
282
283         /** Allocate descriptors for double buffering */
284         dwc_ep->iso_desc_addr =
285             dwc_otg_ep_alloc_desc_chain(&dwc_ep->iso_dma_desc_addr,
286                                         dwc_ep->desc_cnt * 2);
287         if (dwc_ep->desc_addr) {
288                 DWC_WARN("%s, can't allocate DMA descriptor chain\n", __func__);
289                 return;
290         }
291
292         dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
293
294         /** ISO OUT EP */
295         if (dwc_ep->is_in == 0) {
296                 dev_dma_desc_sts_t sts = {.d32 = 0 };
297                 dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
298                 dma_addr_t dma_ad;
299                 uint32_t data_per_desc;
300                 dwc_otg_dev_out_ep_regs_t *out_regs =
301                     core_if->dev_if->out_ep_regs[dwc_ep->num];
302                 int offset;
303
304                 addr = &core_if->dev_if->out_ep_regs[dwc_ep->num]->doepctl;
305                 dma_ad = (dma_addr_t) DWC_READ_REG32(&(out_regs->doepdma));
306
307                 /** Buffer 0 descriptors setup */
308                 dma_ad = dwc_ep->dma_addr0;
309
310                 sts.b_iso_out.bs = BS_HOST_READY;
311                 sts.b_iso_out.rxsts = 0;
312                 sts.b_iso_out.l = 0;
313                 sts.b_iso_out.sp = 0;
314                 sts.b_iso_out.ioc = 0;
315                 sts.b_iso_out.pid = 0;
316                 sts.b_iso_out.framenum = 0;
317
318                 offset = 0;
319                 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
320                      i += dwc_ep->pkt_per_frm) {
321
322                         for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
323                                 uint32_t len = (j + 1) * dwc_ep->maxpacket;
324                                 if (len > dwc_ep->data_per_frame)
325                                         data_per_desc =
326                                             dwc_ep->data_per_frame -
327                                             j * dwc_ep->maxpacket;
328                                 else
329                                         data_per_desc = dwc_ep->maxpacket;
330                                 len = data_per_desc % 4;
331                                 if (len)
332                                         data_per_desc += 4 - len;
333
334                                 sts.b_iso_out.rxbytes = data_per_desc;
335                                 dma_desc->buf = dma_ad;
336                                 dma_desc->status.d32 = sts.d32;
337
338                                 offset += data_per_desc;
339                                 dma_desc++;
340                                 dma_ad += data_per_desc;
341                         }
342                 }
343
344                 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
345                         uint32_t len = (j + 1) * dwc_ep->maxpacket;
346                         if (len > dwc_ep->data_per_frame)
347                                 data_per_desc =
348                                     dwc_ep->data_per_frame -
349                                     j * dwc_ep->maxpacket;
350                         else
351                                 data_per_desc = dwc_ep->maxpacket;
352                         len = data_per_desc % 4;
353                         if (len)
354                                 data_per_desc += 4 - len;
355                         sts.b_iso_out.rxbytes = data_per_desc;
356                         dma_desc->buf = dma_ad;
357                         dma_desc->status.d32 = sts.d32;
358
359                         offset += data_per_desc;
360                         dma_desc++;
361                         dma_ad += data_per_desc;
362                 }
363
364                 sts.b_iso_out.ioc = 1;
365                 len = (j + 1) * dwc_ep->maxpacket;
366                 if (len > dwc_ep->data_per_frame)
367                         data_per_desc =
368                             dwc_ep->data_per_frame - j * dwc_ep->maxpacket;
369                 else
370                         data_per_desc = dwc_ep->maxpacket;
371                 len = data_per_desc % 4;
372                 if (len)
373                         data_per_desc += 4 - len;
374                 sts.b_iso_out.rxbytes = data_per_desc;
375
376                 dma_desc->buf = dma_ad;
377                 dma_desc->status.d32 = sts.d32;
378                 dma_desc++;
379
380                 /** Buffer 1 descriptors setup */
381                 sts.b_iso_out.ioc = 0;
382                 dma_ad = dwc_ep->dma_addr1;
383
384                 offset = 0;
385                 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
386                      i += dwc_ep->pkt_per_frm) {
387                         for (j = 0; j < dwc_ep->pkt_per_frm; ++j) {
388                                 uint32_t len = (j + 1) * dwc_ep->maxpacket;
389                                 if (len > dwc_ep->data_per_frame)
390                                         data_per_desc =
391                                             dwc_ep->data_per_frame -
392                                             j * dwc_ep->maxpacket;
393                                 else
394                                         data_per_desc = dwc_ep->maxpacket;
395                                 len = data_per_desc % 4;
396                                 if (len)
397                                         data_per_desc += 4 - len;
398
399                                 data_per_desc =
400                                     sts.b_iso_out.rxbytes = data_per_desc;
401                                 dma_desc->buf = dma_ad;
402                                 dma_desc->status.d32 = sts.d32;
403
404                                 offset += data_per_desc;
405                                 dma_desc++;
406                                 dma_ad += data_per_desc;
407                         }
408                 }
409                 for (j = 0; j < dwc_ep->pkt_per_frm - 1; ++j) {
410                         data_per_desc =
411                             ((j + 1) * dwc_ep->maxpacket >
412                              dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
413                             j * dwc_ep->maxpacket : dwc_ep->maxpacket;
414                         data_per_desc +=
415                             (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
416                         sts.b_iso_out.rxbytes = data_per_desc;
417                         dma_desc->buf = dma_ad;
418                         dma_desc->status.d32 = sts.d32;
419
420                         offset += data_per_desc;
421                         dma_desc++;
422                         dma_ad += data_per_desc;
423                 }
424
425                 sts.b_iso_out.ioc = 1;
426                 sts.b_iso_out.l = 1;
427                 data_per_desc =
428                     ((j + 1) * dwc_ep->maxpacket >
429                      dwc_ep->data_per_frame) ? dwc_ep->data_per_frame -
430                     j * dwc_ep->maxpacket : dwc_ep->maxpacket;
431                 data_per_desc +=
432                     (data_per_desc % 4) ? (4 - data_per_desc % 4) : 0;
433                 sts.b_iso_out.rxbytes = data_per_desc;
434
435                 dma_desc->buf = dma_ad;
436                 dma_desc->status.d32 = sts.d32;
437
438                 dwc_ep->next_frame = 0;
439
440                 /** Write dma_ad into DOEPDMA register */
441                 DWC_WRITE_REG32(&(out_regs->doepdma),
442                                 (uint32_t) dwc_ep->iso_dma_desc_addr);
443
444         }
445         /** ISO IN EP */
446         else {
447                 dev_dma_desc_sts_t sts = {.d32 = 0 };
448                 dwc_otg_dev_dma_desc_t *dma_desc = dwc_ep->iso_desc_addr;
449                 dma_addr_t dma_ad;
450                 dwc_otg_dev_in_ep_regs_t *in_regs =
451                     core_if->dev_if->in_ep_regs[dwc_ep->num];
452                 unsigned int frmnumber;
453                 fifosize_data_t txfifosize, rxfifosize;
454
455                 txfifosize.d32 =
456                     DWC_READ_REG32(&core_if->dev_if->in_ep_regs[dwc_ep->num]->
457                                    dtxfsts);
458                 rxfifosize.d32 =
459                     DWC_READ_REG32(&core_if->core_global_regs->grxfsiz);
460
461                 addr = &core_if->dev_if->in_ep_regs[dwc_ep->num]->diepctl;
462
463                 dma_ad = dwc_ep->dma_addr0;
464
465                 dsts.d32 =
466                     DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
467
468                 sts.b_iso_in.bs = BS_HOST_READY;
469                 sts.b_iso_in.txsts = 0;
470                 sts.b_iso_in.sp =
471                     (dwc_ep->data_per_frame % dwc_ep->maxpacket) ? 1 : 0;
472                 sts.b_iso_in.ioc = 0;
473                 sts.b_iso_in.pid = dwc_ep->pkt_per_frm;
474
475                 frmnumber = dwc_ep->next_frame;
476
477                 sts.b_iso_in.framenum = frmnumber;
478                 sts.b_iso_in.txbytes = dwc_ep->data_per_frame;
479                 sts.b_iso_in.l = 0;
480
481                 /** Buffer 0 descriptors setup */
482                 for (i = 0; i < dwc_ep->desc_cnt - 1; i++) {
483                         dma_desc->buf = dma_ad;
484                         dma_desc->status.d32 = sts.d32;
485                         dma_desc++;
486
487                         dma_ad += dwc_ep->data_per_frame;
488                         sts.b_iso_in.framenum += dwc_ep->bInterval;
489                 }
490
491                 sts.b_iso_in.ioc = 1;
492                 dma_desc->buf = dma_ad;
493                 dma_desc->status.d32 = sts.d32;
494                 ++dma_desc;
495
496                 /** Buffer 1 descriptors setup */
497                 sts.b_iso_in.ioc = 0;
498                 dma_ad = dwc_ep->dma_addr1;
499
500                 for (i = 0; i < dwc_ep->desc_cnt - dwc_ep->pkt_per_frm;
501                      i += dwc_ep->pkt_per_frm) {
502                         dma_desc->buf = dma_ad;
503                         dma_desc->status.d32 = sts.d32;
504                         dma_desc++;
505
506                         dma_ad += dwc_ep->data_per_frame;
507                         sts.b_iso_in.framenum += dwc_ep->bInterval;
508
509                         sts.b_iso_in.ioc = 0;
510                 }
511                 sts.b_iso_in.ioc = 1;
512                 sts.b_iso_in.l = 1;
513
514                 dma_desc->buf = dma_ad;
515                 dma_desc->status.d32 = sts.d32;
516
517                 dwc_ep->next_frame = sts.b_iso_in.framenum + dwc_ep->bInterval;
518
519                 /** Write dma_ad into diepdma register */
520                 DWC_WRITE_REG32(&(in_regs->diepdma),
521                                 (uint32_t) dwc_ep->iso_dma_desc_addr);
522         }
523         /** Enable endpoint, clear nak  */
524         depctl.d32 = 0;
525         depctl.b.epena = 1;
526         depctl.b.usbactep = 1;
527         depctl.b.cnak = 1;
528
529         DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
530         depctl.d32 = DWC_READ_REG32(addr);
531 }
532
533 /**
534  * This function initializes a descriptor chain for Isochronous transfer
535  *
536  * @param core_if Programming view of DWC_otg controller.
537  * @param ep The EP to start the transfer on.
538  *
539  */
540 void dwc_otg_iso_ep_start_buf_transfer(dwc_otg_core_if_t * core_if,
541                                        dwc_ep_t * ep)
542 {
543         depctl_data_t depctl = {.d32 = 0 };
544         volatile uint32_t *addr;
545
546         if (ep->is_in) {
547                 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
548         } else {
549                 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
550         }
551
552         if (core_if->dma_enable == 0 || core_if->dma_desc_enable != 0) {
553                 return;
554         } else {
555                 deptsiz_data_t deptsiz = {.d32 = 0 };
556
557                 ep->xfer_len =
558                     ep->data_per_frame * ep->buf_proc_intrvl / ep->bInterval;
559                 ep->pkt_cnt =
560                     (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
561                 ep->xfer_count = 0;
562                 ep->xfer_buff =
563                     (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
564                 ep->dma_addr =
565                     (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
566
567                 if (ep->is_in) {
568                         /* Program the transfer size and packet count
569                          *      as follows: xfersize = N * maxpacket +
570                          *      short_packet pktcnt = N + (short_packet
571                          *      exist ? 1 : 0) 
572                          */
573                         deptsiz.b.mc = ep->pkt_per_frm;
574                         deptsiz.b.xfersize = ep->xfer_len;
575                         deptsiz.b.pktcnt =
576                             (ep->xfer_len - 1 + ep->maxpacket) / ep->maxpacket;
577                         DWC_WRITE_REG32(&core_if->dev_if->in_ep_regs[ep->num]->
578                                         dieptsiz, deptsiz.d32);
579
580                         /* Write the DMA register */
581                         DWC_WRITE_REG32(&
582                                         (core_if->dev_if->in_ep_regs[ep->num]->
583                                          diepdma), (uint32_t) ep->dma_addr);
584
585                 } else {
586                         deptsiz.b.pktcnt =
587                             (ep->xfer_len + (ep->maxpacket - 1)) /
588                             ep->maxpacket;
589                         deptsiz.b.xfersize = deptsiz.b.pktcnt * ep->maxpacket;
590
591                         DWC_WRITE_REG32(&core_if->dev_if->out_ep_regs[ep->num]->
592                                         doeptsiz, deptsiz.d32);
593
594                         /* Write the DMA register */
595                         DWC_WRITE_REG32(&
596                                         (core_if->dev_if->out_ep_regs[ep->num]->
597                                          doepdma), (uint32_t) ep->dma_addr);
598
599                 }
600                 /** Enable endpoint, clear nak  */
601                 depctl.d32 = 0;
602                 depctl.b.epena = 1;
603                 depctl.b.cnak = 1;
604
605                 DWC_MODIFY_REG32(addr, depctl.d32, depctl.d32);
606         }
607 }
608
609 /**
610  * This function does the setup for a data transfer for an EP and
611  * starts the transfer. For an IN transfer, the packets will be
612  * loaded into the appropriate Tx FIFO in the ISR. For OUT transfers,
613  * the packets are unloaded from the Rx FIFO in the ISR.
614  *
615  * @param core_if Programming view of DWC_otg controller.
616  * @param ep The EP to start the transfer on.
617  */
618
619 static void dwc_otg_iso_ep_start_transfer(dwc_otg_core_if_t * core_if,
620                                           dwc_ep_t * ep)
621 {
622         if (core_if->dma_enable) {
623                 if (core_if->dma_desc_enable) {
624                         if (ep->is_in) {
625                                 ep->desc_cnt = ep->pkt_cnt / ep->pkt_per_frm;
626                         } else {
627                                 ep->desc_cnt = ep->pkt_cnt;
628                         }
629                         dwc_otg_iso_ep_start_ddma_transfer(core_if, ep);
630                 } else {
631                         if (core_if->pti_enh_enable) {
632                                 dwc_otg_iso_ep_start_buf_transfer(core_if, ep);
633                         } else {
634                                 ep->cur_pkt_addr =
635                                     (ep->proc_buf_num) ? ep->xfer_buff1 : ep->
636                                     xfer_buff0;
637                                 ep->cur_pkt_dma_addr =
638                                     (ep->proc_buf_num) ? ep->dma_addr1 : ep->
639                                     dma_addr0;
640                                 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
641                         }
642                 }
643         } else {
644                 ep->cur_pkt_addr =
645                     (ep->proc_buf_num) ? ep->xfer_buff1 : ep->xfer_buff0;
646                 ep->cur_pkt_dma_addr =
647                     (ep->proc_buf_num) ? ep->dma_addr1 : ep->dma_addr0;
648                 dwc_otg_iso_ep_start_frm_transfer(core_if, ep);
649         }
650 }
651
652 /**
653  * This function stops transfer for an EP and
654  * resets the ep's variables. 
655  *
656  * @param core_if Programming view of DWC_otg controller.
657  * @param ep The EP to start the transfer on.
658  */
659
660 void dwc_otg_iso_ep_stop_transfer(dwc_otg_core_if_t * core_if, dwc_ep_t * ep)
661 {
662         depctl_data_t depctl = {.d32 = 0 };
663         volatile uint32_t *addr;
664
665         if (ep->is_in == 1) {
666                 addr = &core_if->dev_if->in_ep_regs[ep->num]->diepctl;
667         } else {
668                 addr = &core_if->dev_if->out_ep_regs[ep->num]->doepctl;
669         }
670
671         /* disable the ep */
672         depctl.d32 = DWC_READ_REG32(addr);
673
674         depctl.b.epdis = 1;
675         depctl.b.snak = 1;
676
677         DWC_WRITE_REG32(addr, depctl.d32);
678
679         if (core_if->dma_desc_enable &&
680             ep->iso_desc_addr && ep->iso_dma_desc_addr) {
681                 dwc_otg_ep_free_desc_chain(ep->iso_desc_addr,
682                                            ep->iso_dma_desc_addr,
683                                            ep->desc_cnt * 2);
684         }
685
686         /* reset varibales */
687         ep->dma_addr0 = 0;
688         ep->dma_addr1 = 0;
689         ep->xfer_buff0 = 0;
690         ep->xfer_buff1 = 0;
691         ep->data_per_frame = 0;
692         ep->data_pattern_frame = 0;
693         ep->sync_frame = 0;
694         ep->buf_proc_intrvl = 0;
695         ep->bInterval = 0;
696         ep->proc_buf_num = 0;
697         ep->pkt_per_frm = 0;
698         ep->pkt_per_frm = 0;
699         ep->desc_cnt = 0;
700         ep->iso_desc_addr = 0;
701         ep->iso_dma_desc_addr = 0;
702 }
703
704 int dwc_otg_pcd_iso_ep_start(dwc_otg_pcd_t * pcd, void *ep_handle,
705                              uint8_t * buf0, uint8_t * buf1, dwc_dma_t dma0,
706                              dwc_dma_t dma1, int sync_frame, int dp_frame,
707                              int data_per_frame, int start_frame,
708                              int buf_proc_intrvl, void *req_handle,
709                              int atomic_alloc)
710 {
711         dwc_otg_pcd_ep_t *ep;
712         dwc_irqflags_t flags = 0;
713         dwc_ep_t *dwc_ep;
714         int32_t frm_data;
715         dsts_data_t dsts;
716         dwc_otg_core_if_t *core_if;
717
718         ep = get_ep_from_handle(pcd, ep_handle);
719
720         if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
721                 DWC_WARN("bad ep\n");
722                 return -DWC_E_INVALID;
723         }
724
725         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
726         core_if = GET_CORE_IF(pcd);
727         dwc_ep = &ep->dwc_ep;
728
729         if (ep->iso_req_handle) {
730                 DWC_WARN("ISO request in progress\n");
731         }
732
733         dwc_ep->dma_addr0 = dma0;
734         dwc_ep->dma_addr1 = dma1;
735
736         dwc_ep->xfer_buff0 = buf0;
737         dwc_ep->xfer_buff1 = buf1;
738
739         dwc_ep->data_per_frame = data_per_frame;
740
741         /** @todo - pattern data support is to be implemented in the future */
742         dwc_ep->data_pattern_frame = dp_frame;
743         dwc_ep->sync_frame = sync_frame;
744
745         dwc_ep->buf_proc_intrvl = buf_proc_intrvl;
746
747         dwc_ep->bInterval = 1 << (ep->desc->bInterval - 1);
748
749         dwc_ep->proc_buf_num = 0;
750
751         dwc_ep->pkt_per_frm = 0;
752         frm_data = ep->dwc_ep.data_per_frame;
753         while (frm_data > 0) {
754                 dwc_ep->pkt_per_frm++;
755                 frm_data -= ep->dwc_ep.maxpacket;
756         }
757
758         dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
759
760         if (start_frame == -1) {
761                 dwc_ep->next_frame = dsts.b.soffn + 1;
762                 if (dwc_ep->bInterval != 1) {
763                         dwc_ep->next_frame =
764                             dwc_ep->next_frame + (dwc_ep->bInterval - 1 -
765                                                   dwc_ep->next_frame %
766                                                   dwc_ep->bInterval);
767                 }
768         } else {
769                 dwc_ep->next_frame = start_frame;
770         }
771
772         if (!core_if->pti_enh_enable) {
773                 dwc_ep->pkt_cnt =
774                     dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
775                     dwc_ep->bInterval;
776         } else {
777                 dwc_ep->pkt_cnt =
778                     (dwc_ep->data_per_frame *
779                      (dwc_ep->buf_proc_intrvl / dwc_ep->bInterval)
780                      - 1 + dwc_ep->maxpacket) / dwc_ep->maxpacket;
781         }
782
783         if (core_if->dma_desc_enable) {
784                 dwc_ep->desc_cnt =
785                     dwc_ep->buf_proc_intrvl * dwc_ep->pkt_per_frm /
786                     dwc_ep->bInterval;
787         }
788
789         if (atomic_alloc) {
790                 dwc_ep->pkt_info =
791                     DWC_ALLOC_ATOMIC(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
792         } else {
793                 dwc_ep->pkt_info =
794                     DWC_ALLOC(sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
795         }
796         if (!dwc_ep->pkt_info) {
797                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
798                 return -DWC_E_NO_MEMORY;
799         }
800         if (core_if->pti_enh_enable) {
801                 dwc_memset(dwc_ep->pkt_info, 0,
802                            sizeof(iso_pkt_info_t) * dwc_ep->pkt_cnt);
803         }
804
805         dwc_ep->cur_pkt = 0;
806         ep->iso_req_handle = req_handle;
807
808         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
809         dwc_otg_iso_ep_start_transfer(core_if, dwc_ep);
810         return 0;
811 }
812
813 int dwc_otg_pcd_iso_ep_stop(dwc_otg_pcd_t * pcd, void *ep_handle,
814                             void *req_handle)
815 {
816         dwc_irqflags_t flags = 0;
817         dwc_otg_pcd_ep_t *ep;
818         dwc_ep_t *dwc_ep;
819
820         ep = get_ep_from_handle(pcd, ep_handle);
821         if (!ep || !ep->desc || ep->dwc_ep.num == 0) {
822                 DWC_WARN("bad ep\n");
823                 return -DWC_E_INVALID;
824         }
825         dwc_ep = &ep->dwc_ep;
826
827         dwc_otg_iso_ep_stop_transfer(GET_CORE_IF(pcd), dwc_ep);
828
829         DWC_FREE(dwc_ep->pkt_info);
830         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
831         if (ep->iso_req_handle != req_handle) {
832                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
833                 return -DWC_E_INVALID;
834         }
835
836         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
837
838         ep->iso_req_handle = 0;
839         return 0;
840 }
841
842 /**
843  * This function is used for perodical data exchnage between PCD and gadget drivers.
844  * for Isochronous EPs
845  *
846  *      - Every time a sync period completes this function is called to
847  *        perform data exchange between PCD and gadget
848  */
849 void dwc_otg_iso_buffer_done(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * ep,
850                              void *req_handle)
851 {
852         int i;
853         dwc_ep_t *dwc_ep;
854
855         dwc_ep = &ep->dwc_ep;
856
857         DWC_SPINUNLOCK(ep->pcd->lock);
858         pcd->fops->isoc_complete(pcd, ep->priv, ep->iso_req_handle,
859                                  dwc_ep->proc_buf_num ^ 0x1);
860         DWC_SPINLOCK(ep->pcd->lock);
861
862         for (i = 0; i < dwc_ep->pkt_cnt; ++i) {
863                 dwc_ep->pkt_info[i].status = 0;
864                 dwc_ep->pkt_info[i].offset = 0;
865                 dwc_ep->pkt_info[i].length = 0;
866         }
867 }
868
869 int dwc_otg_pcd_get_iso_packet_count(dwc_otg_pcd_t * pcd, void *ep_handle,
870                                      void *iso_req_handle)
871 {
872         dwc_otg_pcd_ep_t *ep;
873         dwc_ep_t *dwc_ep;
874
875         ep = get_ep_from_handle(pcd, ep_handle);
876         if (!ep->desc || ep->dwc_ep.num == 0) {
877                 DWC_WARN("bad ep\n");
878                 return -DWC_E_INVALID;
879         }
880         dwc_ep = &ep->dwc_ep;
881
882         return dwc_ep->pkt_cnt;
883 }
884
885 void dwc_otg_pcd_get_iso_packet_params(dwc_otg_pcd_t * pcd, void *ep_handle,
886                                        void *iso_req_handle, int packet,
887                                        int *status, int *actual, int *offset)
888 {
889         dwc_otg_pcd_ep_t *ep;
890         dwc_ep_t *dwc_ep;
891
892         ep = get_ep_from_handle(pcd, ep_handle);
893         if (!ep)
894                 DWC_WARN("bad ep\n");
895
896         dwc_ep = &ep->dwc_ep;
897
898         *status = dwc_ep->pkt_info[packet].status;
899         *actual = dwc_ep->pkt_info[packet].length;
900         *offset = dwc_ep->pkt_info[packet].offset;
901 }
902
903 #endif /* DWC_EN_ISOC */
904
905 static void dwc_otg_pcd_init_ep(dwc_otg_pcd_t * pcd, dwc_otg_pcd_ep_t * pcd_ep,
906                                 uint32_t is_in, uint32_t ep_num)
907 {
908         /* Init EP structure */
909         pcd_ep->desc = 0;
910         pcd_ep->pcd = pcd;
911         pcd_ep->stopped = 1;
912         pcd_ep->queue_sof = 0;
913
914         /* Init DWC ep structure */
915         pcd_ep->dwc_ep.is_in = is_in;
916         pcd_ep->dwc_ep.num = ep_num;
917         pcd_ep->dwc_ep.active = 0;
918         pcd_ep->dwc_ep.tx_fifo_num = 0;
919         /* Control until ep is actvated */
920         pcd_ep->dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
921         pcd_ep->dwc_ep.maxpacket = MAX_PACKET_SIZE;
922         pcd_ep->dwc_ep.dma_addr = 0;
923         pcd_ep->dwc_ep.start_xfer_buff = 0;
924         pcd_ep->dwc_ep.xfer_buff = 0;
925         pcd_ep->dwc_ep.xfer_len = 0;
926         pcd_ep->dwc_ep.xfer_count = 0;
927         pcd_ep->dwc_ep.sent_zlp = 0;
928         pcd_ep->dwc_ep.total_len = 0;
929         pcd_ep->dwc_ep.desc_addr = 0;
930         pcd_ep->dwc_ep.dma_desc_addr = 0;
931         DWC_CIRCLEQ_INIT(&pcd_ep->queue);
932 }
933
934 /**
935  * Initialize ep's
936  */
937 static void dwc_otg_pcd_reinit(dwc_otg_pcd_t * pcd)
938 {
939         int i;
940         uint32_t hwcfg1;
941         dwc_otg_pcd_ep_t *ep;
942         int in_ep_cntr, out_ep_cntr;
943         uint32_t num_in_eps = (GET_CORE_IF(pcd))->dev_if->num_in_eps;
944         uint32_t num_out_eps = (GET_CORE_IF(pcd))->dev_if->num_out_eps;
945         /**
946          * Initialize the EP0 structure.
947          */
948         ep = &pcd->ep0;
949         dwc_otg_pcd_init_ep(pcd, ep, 0, 0);
950
951         in_ep_cntr = 0;
952         hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 3;
953         for (i = 1; in_ep_cntr < num_in_eps; i++) {
954                 if ((hwcfg1 & 0x1) == 0) {
955                         dwc_otg_pcd_ep_t *ep = &pcd->in_ep[in_ep_cntr];
956                         in_ep_cntr++;
957                         /**
958                          * @todo NGS: Add direction to EP, based on contents
959                          * of HWCFG1.  Need a copy of HWCFG1 in pcd structure?
960                          * sprintf(";r
961                          */
962                         dwc_otg_pcd_init_ep(pcd, ep, 1 /* IN */ , i);
963
964                         DWC_CIRCLEQ_INIT(&ep->queue);
965                 }
966                 hwcfg1 >>= 2;
967         }
968
969         out_ep_cntr = 0;
970         hwcfg1 = (GET_CORE_IF(pcd))->hwcfg1.d32 >> 2;
971         for (i = 1; out_ep_cntr < num_out_eps; i++) {
972                 if ((hwcfg1 & 0x1) == 0) {
973                         dwc_otg_pcd_ep_t *ep = &pcd->out_ep[out_ep_cntr];
974                         out_ep_cntr++;
975                         /**
976                          * @todo NGS: Add direction to EP, based on contents
977                          * of HWCFG1.  Need a copy of HWCFG1 in pcd structure?
978                          * sprintf(";r
979                          */
980                         dwc_otg_pcd_init_ep(pcd, ep, 0 /* OUT */ , i);
981                         DWC_CIRCLEQ_INIT(&ep->queue);
982                 }
983                 hwcfg1 >>= 2;
984         }
985
986         pcd->ep0state = EP0_DISCONNECT;
987         pcd->ep0.dwc_ep.maxpacket = MAX_EP0_SIZE;
988         pcd->ep0.dwc_ep.type = DWC_OTG_EP_TYPE_CONTROL;
989 }
990
991 /**
992  * This function is called when the SRP timer expires. The SRP should
993  * complete within 6 seconds.
994  */
995 static void srp_timeout(void *ptr)
996 {
997         gotgctl_data_t gotgctl;
998         dwc_otg_core_if_t *core_if = (dwc_otg_core_if_t *) ptr;
999         volatile uint32_t *addr = &core_if->core_global_regs->gotgctl;
1000
1001         gotgctl.d32 = DWC_READ_REG32(addr);
1002
1003         core_if->srp_timer_started = 0;
1004
1005         if (core_if->adp_enable) {
1006                 if (gotgctl.b.bsesvld == 0) {
1007                         gpwrdn_data_t gpwrdn = {.d32 = 0 };
1008                         DWC_PRINTF("SRP Timeout BSESSVLD = 0\n");
1009                         /* Power off the core */
1010                         if (core_if->power_down == 2) {
1011                                 gpwrdn.b.pwrdnswtch = 1;
1012                                 DWC_MODIFY_REG32(&core_if->
1013                                                  core_global_regs->gpwrdn,
1014                                                  gpwrdn.d32, 0);
1015                         }
1016
1017                         gpwrdn.d32 = 0;
1018                         gpwrdn.b.pmuintsel = 1;
1019                         gpwrdn.b.pmuactv = 1;
1020                         DWC_MODIFY_REG32(&core_if->core_global_regs->gpwrdn, 0,
1021                                          gpwrdn.d32);
1022                         dwc_otg_adp_probe_start(core_if);
1023                 } else {
1024                         DWC_PRINTF("SRP Timeout BSESSVLD = 1\n");
1025                         core_if->op_state = B_PERIPHERAL;
1026                         dwc_otg_core_init(core_if);
1027                         dwc_otg_enable_global_interrupts(core_if);
1028                         cil_pcd_start(core_if);
1029                 }
1030         }
1031
1032         if ((core_if->core_params->phy_type == DWC_PHY_TYPE_PARAM_FS) &&
1033             (core_if->core_params->i2c_enable)) {
1034                 DWC_PRINTF("SRP Timeout\n");
1035
1036                 if ((core_if->srp_success) && (gotgctl.b.bsesvld)) {
1037                         if (core_if->pcd_cb && core_if->pcd_cb->resume_wakeup) {
1038                                 core_if->pcd_cb->resume_wakeup(core_if->pcd_cb->p);
1039                         }
1040
1041                         /* Clear Session Request */
1042                         gotgctl.d32 = 0;
1043                         gotgctl.b.sesreq = 1;
1044                         DWC_MODIFY_REG32(&core_if->core_global_regs->gotgctl,
1045                                          gotgctl.d32, 0);
1046
1047                         core_if->srp_success = 0;
1048                 } else {
1049                         __DWC_ERROR("Device not connected/responding\n");
1050                         gotgctl.b.sesreq = 0;
1051                         DWC_WRITE_REG32(addr, gotgctl.d32);
1052                 }
1053         } else if (gotgctl.b.sesreq) {
1054                 DWC_PRINTF("SRP Timeout\n");
1055
1056                 __DWC_ERROR("Device not connected/responding\n");
1057                 gotgctl.b.sesreq = 0;
1058                 DWC_WRITE_REG32(addr, gotgctl.d32);
1059         } else {
1060                 DWC_PRINTF(" SRP GOTGCTL=%0x\n", gotgctl.d32);
1061         }
1062 }
1063
1064 /**
1065  * Tasklet
1066  *
1067  */
1068 extern void start_next_request(dwc_otg_pcd_ep_t * ep);
1069
1070 static void start_xfer_tasklet_func(void *data)
1071 {
1072         dwc_otg_pcd_t *pcd = (dwc_otg_pcd_t *) data;
1073         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1074
1075         int i;
1076         depctl_data_t diepctl;
1077
1078         DWC_DEBUGPL(DBG_PCDV, "Start xfer tasklet\n");
1079
1080         diepctl.d32 = DWC_READ_REG32(&core_if->dev_if->in_ep_regs[0]->diepctl);
1081
1082         if (pcd->ep0.queue_sof) {
1083                 pcd->ep0.queue_sof = 0;
1084                 start_next_request(&pcd->ep0);
1085                 // break;
1086         }
1087
1088         for (i = 0; i < core_if->dev_if->num_in_eps; i++) {
1089                 depctl_data_t diepctl;
1090                 diepctl.d32 =
1091                     DWC_READ_REG32(&core_if->dev_if->in_ep_regs[i]->diepctl);
1092
1093                 if (pcd->in_ep[i].queue_sof) {
1094                         pcd->in_ep[i].queue_sof = 0;
1095                         start_next_request(&pcd->in_ep[i]);
1096                         // break;
1097                 }
1098         }
1099
1100         return;
1101 }
1102
1103 /**
1104  * This function initialized the PCD portion of the driver.
1105  *
1106  */
1107 dwc_otg_pcd_t *dwc_otg_pcd_init(dwc_otg_core_if_t * core_if)
1108 {
1109         dwc_otg_pcd_t *pcd = NULL;
1110         dwc_otg_dev_if_t *dev_if;
1111         int i;
1112
1113         /*
1114          * Allocate PCD structure
1115          */
1116         pcd = DWC_ALLOC(sizeof(dwc_otg_pcd_t));
1117
1118         if (pcd == NULL) {
1119                 return NULL;
1120         }
1121
1122         pcd->lock = DWC_SPINLOCK_ALLOC();
1123         if (!pcd->lock) {
1124                 DWC_ERROR("Could not allocate lock for pcd");
1125                 DWC_FREE(pcd);
1126                 return NULL;
1127         }
1128         /* Set core_if's lock pointer to hcd->lock */
1129         core_if->lock = pcd->lock;
1130         pcd->core_if = core_if;
1131
1132         dev_if = core_if->dev_if;
1133         dev_if->isoc_ep = NULL;
1134
1135         if (core_if->hwcfg4.b.ded_fifo_en) {
1136                 DWC_PRINTF("Dedicated Tx FIFOs mode\n");
1137         } else {
1138                 DWC_PRINTF("Shared Tx FIFO mode\n");
1139         }
1140
1141         /*
1142          * Initialized the Core for Device mode here if there is nod ADP support. 
1143          * Otherwise it will be done later in dwc_otg_adp_start routine.
1144          */
1145         //if (dwc_otg_is_device_mode(core_if) /*&& !core_if->adp_enable */ ) {
1146         //      dwc_otg_core_dev_init(core_if);
1147         //}
1148
1149         /*
1150          * Register the PCD Callbacks.
1151          */
1152         dwc_otg_cil_register_pcd_callbacks(core_if, &pcd_callbacks, pcd);
1153
1154         /*
1155          * Initialize the DMA buffer for SETUP packets
1156          */
1157         if (GET_CORE_IF(pcd)->dma_enable) {
1158                 pcd->setup_pkt =
1159                     DWC_DMA_ALLOC_ATOMIC(sizeof(*pcd->setup_pkt) * 5,
1160                                   &pcd->setup_pkt_dma_handle);
1161                 if (pcd->setup_pkt == NULL) {
1162                         DWC_FREE(pcd);
1163                         return NULL;
1164                 }
1165
1166                 pcd->status_buf =
1167                     DWC_DMA_ALLOC_ATOMIC(sizeof(uint16_t),
1168                                   &pcd->status_buf_dma_handle);
1169                 if (pcd->status_buf == NULL) {
1170                         DWC_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
1171                                      pcd->setup_pkt, pcd->setup_pkt_dma_handle);
1172                         DWC_FREE(pcd);
1173                         return NULL;
1174                 }
1175
1176                 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1177                         dev_if->setup_desc_addr[0] =
1178                             dwc_otg_ep_alloc_desc_chain
1179                             (&dev_if->dma_setup_desc_addr[0], 1);
1180                         dev_if->setup_desc_addr[1] =
1181                             dwc_otg_ep_alloc_desc_chain
1182                             (&dev_if->dma_setup_desc_addr[1], 1);
1183                         dev_if->in_desc_addr =
1184                             dwc_otg_ep_alloc_desc_chain
1185                             (&dev_if->dma_in_desc_addr, 1);
1186                         dev_if->out_desc_addr =
1187                             dwc_otg_ep_alloc_desc_chain
1188                             (&dev_if->dma_out_desc_addr, 1);
1189                         pcd->data_terminated = 0;
1190
1191                         if (dev_if->setup_desc_addr[0] == 0
1192                             || dev_if->setup_desc_addr[1] == 0
1193                             || dev_if->in_desc_addr == 0
1194                             || dev_if->out_desc_addr == 0) {
1195
1196                                 if (dev_if->out_desc_addr)
1197                                         dwc_otg_ep_free_desc_chain
1198                                             (dev_if->out_desc_addr,
1199                                              dev_if->dma_out_desc_addr, 1);
1200                                 if (dev_if->in_desc_addr)
1201                                         dwc_otg_ep_free_desc_chain
1202                                             (dev_if->in_desc_addr,
1203                                              dev_if->dma_in_desc_addr, 1);
1204                                 if (dev_if->setup_desc_addr[1])
1205                                         dwc_otg_ep_free_desc_chain
1206                                             (dev_if->setup_desc_addr[1],
1207                                              dev_if->dma_setup_desc_addr[1], 1);
1208                                 if (dev_if->setup_desc_addr[0])
1209                                         dwc_otg_ep_free_desc_chain
1210                                             (dev_if->setup_desc_addr[0],
1211                                              dev_if->dma_setup_desc_addr[0], 1);
1212
1213                                 DWC_DMA_FREE(sizeof(*pcd->setup_pkt) * 5,
1214                                              pcd->setup_pkt,
1215                                              pcd->setup_pkt_dma_handle);
1216                                 DWC_DMA_FREE(sizeof(*pcd->status_buf),
1217                                              pcd->status_buf,
1218                                              pcd->status_buf_dma_handle);
1219
1220                                 DWC_FREE(pcd);
1221
1222                                 return NULL;
1223                         }
1224                 }
1225         } else {
1226                 pcd->setup_pkt = DWC_ALLOC(sizeof(*pcd->setup_pkt) * 5);
1227                 if (pcd->setup_pkt == NULL) {
1228                         DWC_FREE(pcd);
1229                         return NULL;
1230                 }
1231
1232                 pcd->status_buf = DWC_ALLOC(sizeof(uint16_t));
1233                 if (pcd->status_buf == NULL) {
1234                         DWC_FREE(pcd->setup_pkt);
1235                         DWC_FREE(pcd);
1236                         return NULL;
1237                 }
1238         }
1239
1240         dwc_otg_pcd_reinit(pcd);
1241
1242         /* Allocate the cfi object for the PCD */
1243 #ifdef DWC_UTE_CFI
1244         pcd->cfi = DWC_ALLOC(sizeof(cfiobject_t));
1245         if (NULL == pcd->cfi)
1246                 goto fail;
1247         if (init_cfi(pcd->cfi)) {
1248                 CFI_INFO("%s: Failed to init the CFI object\n", __func__);
1249                 goto fail;
1250         }
1251 #endif
1252
1253         /* Initialize tasklets */
1254         pcd->start_xfer_tasklet = DWC_TASK_ALLOC("xfer_tasklet",
1255                                                  start_xfer_tasklet_func, pcd);
1256         pcd->test_mode_tasklet = DWC_TASK_ALLOC("test_mode_tasklet",
1257                                                 do_test_mode, pcd);
1258
1259         /* Initialize SRP timer */
1260         core_if->srp_timer = DWC_TIMER_ALLOC("SRP TIMER", srp_timeout, core_if);
1261
1262         if (core_if->core_params->dev_out_nak) {
1263                 /** 
1264                 * Initialize xfer timeout timer. Implemented for
1265                 * 2.93a feature "Device DDMA OUT NAK Enhancement"
1266                 */
1267                 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1268                         pcd->core_if->ep_xfer_timer[i] =
1269                             DWC_TIMER_ALLOC("ep timer", ep_xfer_timeout,
1270                                             &pcd->core_if->ep_xfer_info[i]);
1271                 }
1272         }
1273
1274         return pcd;
1275 #ifdef DWC_UTE_CFI
1276 fail:
1277 #endif
1278         if (pcd->setup_pkt)
1279                 DWC_FREE(pcd->setup_pkt);
1280         if (pcd->status_buf)
1281                 DWC_FREE(pcd->status_buf);
1282 #ifdef DWC_UTE_CFI
1283         if (pcd->cfi)
1284                 DWC_FREE(pcd->cfi);
1285 #endif
1286         if (pcd)
1287                 DWC_FREE(pcd);
1288         return NULL;
1289
1290 }
1291
1292 /**
1293  * Remove PCD specific data
1294  */
1295 void dwc_otg_pcd_remove(dwc_otg_pcd_t * pcd)
1296 {
1297         dwc_otg_dev_if_t *dev_if = GET_CORE_IF(pcd)->dev_if;
1298         int i;
1299         if (pcd->core_if->core_params->dev_out_nak) {
1300                 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1301                         DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[i]);
1302                         pcd->core_if->ep_xfer_info[i].state = 0;
1303                 }
1304         }
1305
1306         if (GET_CORE_IF(pcd)->dma_enable) {
1307                 DWC_DMA_FREE(sizeof(*pcd->setup_pkt) * 5, pcd->setup_pkt,
1308                              pcd->setup_pkt_dma_handle);
1309                 DWC_DMA_FREE(sizeof(uint16_t), pcd->status_buf,
1310                              pcd->status_buf_dma_handle);
1311                 if (GET_CORE_IF(pcd)->dma_desc_enable) {
1312                         dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[0],
1313                                                    dev_if->dma_setup_desc_addr
1314                                                    [0], 1);
1315                         dwc_otg_ep_free_desc_chain(dev_if->setup_desc_addr[1],
1316                                                    dev_if->dma_setup_desc_addr
1317                                                    [1], 1);
1318                         dwc_otg_ep_free_desc_chain(dev_if->in_desc_addr,
1319                                                    dev_if->dma_in_desc_addr, 1);
1320                         dwc_otg_ep_free_desc_chain(dev_if->out_desc_addr,
1321                                                    dev_if->dma_out_desc_addr,
1322                                                    1);
1323                 }
1324         } else {
1325                 DWC_FREE(pcd->setup_pkt);
1326                 DWC_FREE(pcd->status_buf);
1327         }
1328         DWC_SPINLOCK_FREE(pcd->lock);
1329         /* Set core_if's lock pointer to NULL */
1330         pcd->core_if->lock = NULL;
1331
1332         DWC_TASK_FREE(pcd->start_xfer_tasklet);
1333         DWC_TASK_FREE(pcd->test_mode_tasklet);
1334         if (pcd->core_if->core_params->dev_out_nak) {
1335                 for (i = 0; i < MAX_EPS_CHANNELS; i++) {
1336                         if (pcd->core_if->ep_xfer_timer[i]) {
1337                                 DWC_TIMER_FREE(pcd->core_if->ep_xfer_timer[i]);
1338                         }
1339                 }
1340         }
1341
1342 /* Release the CFI object's dynamic memory */
1343 #ifdef DWC_UTE_CFI
1344         if (pcd->cfi->ops.release) {
1345                 pcd->cfi->ops.release(pcd->cfi);
1346         }
1347 #endif
1348
1349         DWC_FREE(pcd);
1350 }
1351
1352 /**
1353  * Returns whether registered pcd is dual speed or not
1354  */
1355 uint32_t dwc_otg_pcd_is_dualspeed(dwc_otg_pcd_t * pcd)
1356 {
1357         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1358
1359         if ((core_if->core_params->speed == DWC_SPEED_PARAM_FULL) ||
1360             ((core_if->hwcfg2.b.hs_phy_type == 2) &&
1361              (core_if->hwcfg2.b.fs_phy_type == 1) &&
1362              (core_if->core_params->ulpi_fs_ls))) {
1363                 return 0;
1364         }
1365
1366         return 1;
1367 }
1368
1369 /**
1370  * Returns whether registered pcd is OTG capable or not
1371  */
1372 uint32_t dwc_otg_pcd_is_otg(dwc_otg_pcd_t * pcd)
1373 {
1374         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
1375         gusbcfg_data_t usbcfg = {.d32 = 0 };
1376         uint32_t retval = 0;
1377
1378         usbcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->gusbcfg);
1379 #if LINUX_VERSION_CODE < KERNEL_VERSION(3,6,0)
1380         if (!usbcfg.b.srpcap || !usbcfg.b.hnpcap)
1381                 return 0;
1382         else 
1383                 return 1;
1384 # else
1385         if (!usbcfg.b.srpcap)
1386                 return 0;
1387         else 
1388                 retval |= 1;
1389
1390         if (usbcfg.b.hnpcap)
1391                 retval |= 2;
1392         
1393         if (core_if->adp_enable) 
1394                 retval |= 4;
1395 #endif
1396
1397         return retval;
1398 }
1399
1400 /**
1401  * This function assigns periodic Tx FIFO to an periodic EP
1402  * in shared Tx FIFO mode
1403  */
1404 static uint32_t assign_tx_fifo(dwc_otg_core_if_t * core_if)
1405 {
1406         uint32_t TxMsk = 1;
1407         int i;
1408
1409         for (i = 0; i < core_if->hwcfg4.b.num_in_eps; ++i) {
1410                 if ((TxMsk & core_if->tx_msk) == 0) {
1411                         core_if->tx_msk |= TxMsk;
1412                         return i + 1;
1413                 }
1414                 TxMsk <<= 1;
1415         }
1416         return 0;
1417 }
1418
1419 /**
1420  * This function assigns periodic Tx FIFO to an periodic EP
1421  * in shared Tx FIFO mode
1422  */
1423 static uint32_t assign_perio_tx_fifo(dwc_otg_core_if_t * core_if)
1424 {
1425         uint32_t PerTxMsk = 1;
1426         int i;
1427         for (i = 0; i < core_if->hwcfg4.b.num_dev_perio_in_ep; ++i) {
1428                 if ((PerTxMsk & core_if->p_tx_msk) == 0) {
1429                         core_if->p_tx_msk |= PerTxMsk;
1430                         return i + 1;
1431                 }
1432                 PerTxMsk <<= 1;
1433         }
1434         return 0;
1435 }
1436
1437 /**
1438  * This function releases periodic Tx FIFO
1439  * in shared Tx FIFO mode
1440  */
1441 static void release_perio_tx_fifo(dwc_otg_core_if_t * core_if,
1442                                   uint32_t fifo_num)
1443 {
1444         core_if->p_tx_msk =
1445             (core_if->p_tx_msk & (1 << (fifo_num - 1))) ^ core_if->p_tx_msk;
1446 }
1447
1448 /**
1449  * This function releases periodic Tx FIFO
1450  * in shared Tx FIFO mode
1451  */
1452 static void release_tx_fifo(dwc_otg_core_if_t * core_if, uint32_t fifo_num)
1453 {
1454         core_if->tx_msk =
1455             (core_if->tx_msk & (1 << (fifo_num - 1))) ^ core_if->tx_msk;
1456 }
1457
1458 /**
1459  * This function is being called from gadget 
1460  * to enable PCD endpoint.
1461  */
1462 int dwc_otg_pcd_ep_enable(dwc_otg_pcd_t * pcd,
1463                           const uint8_t * ep_desc, void *usb_ep)
1464 {
1465         int num, dir;
1466         dwc_otg_pcd_ep_t *ep = NULL;
1467         const usb_endpoint_descriptor_t *desc;
1468         dwc_irqflags_t flags;
1469 //      fifosize_data_t dptxfsiz = {.d32 = 0 };
1470 //      gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
1471 //      gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
1472         int retval = 0;
1473         int i, epcount;
1474
1475         desc = (const usb_endpoint_descriptor_t *)ep_desc;
1476
1477         if (!desc) {
1478                 pcd->ep0.priv = usb_ep;
1479                 ep = &pcd->ep0;
1480                 retval = -DWC_E_INVALID;
1481                 goto out;
1482         }
1483
1484         num = UE_GET_ADDR(desc->bEndpointAddress);
1485         dir = UE_GET_DIR(desc->bEndpointAddress);
1486
1487         if (!desc->wMaxPacketSize) {
1488                 DWC_WARN("bad maxpacketsize\n");
1489                 retval = -DWC_E_INVALID;
1490                 goto out;
1491         }
1492
1493         if (dir == UE_DIR_IN) {
1494                 epcount = pcd->core_if->dev_if->num_in_eps;
1495                 for (i = 0; i < epcount; i++) {
1496                         if (num == pcd->in_ep[i].dwc_ep.num) {
1497                                 ep = &pcd->in_ep[i];
1498                                 break;
1499                         }
1500                 }
1501         } else {
1502                 epcount = pcd->core_if->dev_if->num_out_eps;
1503                 for (i = 0; i < epcount; i++) {
1504                         if (num == pcd->out_ep[i].dwc_ep.num) {
1505                                 ep = &pcd->out_ep[i];
1506                                 break;
1507                         }
1508                 }
1509         }
1510
1511         if (!ep) {
1512                 DWC_WARN("bad address\n");
1513                 retval = -DWC_E_INVALID;
1514                 goto out;
1515         }
1516
1517         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1518
1519         ep->desc = desc;
1520         ep->priv = usb_ep;
1521
1522         /*
1523          * Activate the EP
1524          */
1525         ep->stopped = 0;
1526
1527         ep->dwc_ep.is_in = (dir == UE_DIR_IN);
1528         ep->dwc_ep.maxpacket = UGETW(desc->wMaxPacketSize);
1529
1530         ep->dwc_ep.type = desc->bmAttributes & UE_XFERTYPE;
1531
1532         if (ep->dwc_ep.is_in) {
1533                 if (!GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1534                         ep->dwc_ep.tx_fifo_num = 0;
1535
1536                         if (ep->dwc_ep.type == UE_ISOCHRONOUS) {
1537                                 /*
1538                                  * if ISOC EP then assign a Periodic Tx FIFO.
1539                                  */
1540                                 ep->dwc_ep.tx_fifo_num =
1541                                     assign_perio_tx_fifo(GET_CORE_IF(pcd));
1542                         }
1543                 } else {
1544                         /*
1545                          * if Dedicated FIFOs mode is on then assign a Tx FIFO.
1546                          */
1547                         ep->dwc_ep.tx_fifo_num =
1548                             assign_tx_fifo(GET_CORE_IF(pcd));
1549                 }
1550
1551                 /* Calculating EP info controller base address */
1552                 #if 0
1553                 if (ep->dwc_ep.tx_fifo_num
1554                     && GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1555                         gdfifocfg.d32 =
1556                             DWC_READ_REG32(&GET_CORE_IF(pcd)->
1557                                            core_global_regs->gdfifocfg);
1558                         gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
1559                         dptxfsiz.d32 =
1560                             (DWC_READ_REG32
1561                              (&GET_CORE_IF(pcd)->core_global_regs->
1562                               dtxfsiz[ep->dwc_ep.tx_fifo_num - 1]) >> 16);
1563                         gdfifocfg.b.epinfobase =
1564                             gdfifocfgbase.d32 + dptxfsiz.d32;
1565                         if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
1566                                 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
1567                                                 core_global_regs->gdfifocfg,
1568                                                 gdfifocfg.d32);
1569                         }
1570                 }
1571                 #endif
1572         }
1573         /* Set initial data PID. */
1574         if (ep->dwc_ep.type == UE_BULK) {
1575                 ep->dwc_ep.data_pid_start = 0;
1576         }
1577
1578         /* Alloc DMA Descriptors */
1579         if (GET_CORE_IF(pcd)->dma_desc_enable) {
1580 #ifndef DWC_UTE_PER_IO
1581                 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1582 #endif
1583                         ep->dwc_ep.desc_addr =
1584                             dwc_otg_ep_alloc_desc_chain(&ep->
1585                                                         dwc_ep.dma_desc_addr,
1586                                                         MAX_DMA_DESC_CNT);
1587                         if (!ep->dwc_ep.desc_addr) {
1588                                 DWC_WARN("%s, can't allocate DMA descriptor\n",
1589                                          __func__);
1590                                 retval = -DWC_E_SHUTDOWN;
1591                                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1592                                 goto out;
1593                         }
1594 #ifndef DWC_UTE_PER_IO
1595                 }
1596 #endif
1597         }
1598
1599         DWC_DEBUGPL(DBG_PCD, "Activate %s: type=%d, mps=%d desc=%p\n",
1600                     (ep->dwc_ep.is_in ? "IN" : "OUT"),
1601                     ep->dwc_ep.type, ep->dwc_ep.maxpacket, ep->desc);
1602 #ifdef DWC_UTE_PER_IO
1603         ep->dwc_ep.xiso_bInterval = 1 << (ep->desc->bInterval - 1);
1604 #endif
1605         if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
1606                 ep->dwc_ep.bInterval = 1 << (ep->desc->bInterval - 1);
1607                 ep->dwc_ep.frame_num = 0xFFFFFFFF;
1608         }
1609
1610         dwc_otg_ep_activate(GET_CORE_IF(pcd), &ep->dwc_ep);
1611
1612 #ifdef DWC_UTE_CFI
1613         if (pcd->cfi->ops.ep_enable) {
1614                 pcd->cfi->ops.ep_enable(pcd->cfi, pcd, ep);
1615         }
1616 #endif
1617
1618         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1619
1620 out:
1621         return retval;
1622 }
1623
1624 /**
1625  * This function is being called from gadget 
1626  * to disable PCD endpoint.
1627  */
1628 int dwc_otg_pcd_ep_disable(dwc_otg_pcd_t * pcd, void *ep_handle)
1629 {
1630         dwc_otg_pcd_ep_t *ep;
1631         dwc_irqflags_t flags;
1632         dwc_otg_dev_dma_desc_t *desc_addr;
1633         dwc_dma_t dma_desc_addr;
1634         gdfifocfg_data_t gdfifocfgbase = {.d32 = 0 };
1635         gdfifocfg_data_t gdfifocfg = {.d32 = 0 };
1636 //      fifosize_data_t dptxfsiz = {.d32 = 0 };
1637
1638         ep = get_ep_from_handle(pcd, ep_handle);
1639
1640         if (!ep || !ep->desc) {
1641                 DWC_DEBUGPL(DBG_PCD, "bad ep address\n");
1642                 return -DWC_E_INVALID;
1643         }
1644
1645         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
1646
1647         dwc_otg_request_nuke(ep);
1648
1649         dwc_otg_ep_deactivate(GET_CORE_IF(pcd), &ep->dwc_ep);
1650         if (pcd->core_if->core_params->dev_out_nak) {
1651                 DWC_TIMER_CANCEL(pcd->core_if->ep_xfer_timer[ep->dwc_ep.num]);
1652                 pcd->core_if->ep_xfer_info[ep->dwc_ep.num].state = 0;
1653         }
1654         ep->desc = NULL;
1655         ep->stopped = 1;
1656
1657         gdfifocfg.d32 =
1658             DWC_READ_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg);
1659         gdfifocfgbase.d32 = gdfifocfg.d32 >> 16;
1660
1661         if (ep->dwc_ep.is_in) {
1662                 if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1663                         /* Flush the Tx FIFO */
1664                         dwc_otg_flush_tx_fifo(GET_CORE_IF(pcd),
1665                                               ep->dwc_ep.tx_fifo_num);
1666                 }
1667                 release_perio_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1668                 release_tx_fifo(GET_CORE_IF(pcd), ep->dwc_ep.tx_fifo_num);
1669                 #if 0
1670                 if (GET_CORE_IF(pcd)->en_multiple_tx_fifo) {
1671                         /* Decreasing EPinfo Base Addr */
1672                         dptxfsiz.d32 =
1673                             (DWC_READ_REG32
1674                              (&GET_CORE_IF(pcd)->
1675                                 core_global_regs->dtxfsiz[ep->dwc_ep.tx_fifo_num-1]) >> 16);
1676                         gdfifocfg.b.epinfobase = gdfifocfgbase.d32 - dptxfsiz.d32;
1677                         if (GET_CORE_IF(pcd)->snpsid <= OTG_CORE_REV_2_94a) {
1678                                 DWC_WRITE_REG32(&GET_CORE_IF(pcd)->core_global_regs->gdfifocfg,
1679                                                 gdfifocfg.d32);
1680                         }
1681                 }
1682                 #endif
1683         }
1684
1685         /* Free DMA Descriptors */
1686         if (GET_CORE_IF(pcd)->dma_desc_enable) {
1687                 if (ep->dwc_ep.type != UE_ISOCHRONOUS) {
1688                         desc_addr = ep->dwc_ep.desc_addr;
1689                         dma_desc_addr = ep->dwc_ep.dma_desc_addr;
1690
1691                         /* Cannot call dma_free_coherent() with IRQs disabled */
1692                         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1693                         dwc_otg_ep_free_desc_chain(desc_addr, dma_desc_addr,
1694                                                    MAX_DMA_DESC_CNT);
1695
1696                         goto out_unlocked;
1697                 }
1698         }
1699         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
1700
1701 out_unlocked:
1702         DWC_DEBUGPL(DBG_PCD, "%d %s disabled\n", ep->dwc_ep.num,
1703                     ep->dwc_ep.is_in ? "IN" : "OUT");
1704         return 0;
1705
1706 }
1707
1708 /******************************************************************************/
1709 #ifdef DWC_UTE_PER_IO
1710
1711 /**
1712  * Free the request and its extended parts
1713  *
1714  */
1715 void dwc_pcd_xiso_ereq_free(dwc_otg_pcd_ep_t * ep, dwc_otg_pcd_request_t * req)
1716 {
1717         DWC_FREE(req->ext_req.per_io_frame_descs);
1718         DWC_FREE(req);
1719 }
1720
1721 /**
1722  * Start the next request in the endpoint's queue.
1723  *
1724  */
1725 int dwc_otg_pcd_xiso_start_next_request(dwc_otg_pcd_t * pcd,
1726                                         dwc_otg_pcd_ep_t * ep)
1727 {
1728         int i;
1729         dwc_otg_pcd_request_t *req = NULL;
1730         dwc_ep_t *dwcep = NULL;
1731         struct dwc_iso_xreq_port *ereq = NULL;
1732         struct dwc_iso_pkt_desc_port *ddesc_iso;
1733         uint16_t nat;
1734         depctl_data_t diepctl;
1735
1736         dwcep = &ep->dwc_ep;
1737
1738         if (dwcep->xiso_active_xfers > 0) {
1739 #if 0   //Disable this to decrease s/w overhead that is crucial for Isoc transfers
1740                 DWC_WARN("There are currently active transfers for EP%d \
1741                                 (active=%d; queued=%d)", dwcep->num, dwcep->xiso_active_xfers, 
1742                                 dwcep->xiso_queued_xfers);
1743 #endif
1744                 return 0;
1745         }
1746
1747         nat = UGETW(ep->desc->wMaxPacketSize);
1748         nat = (nat >> 11) & 0x03;
1749
1750         if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1751                 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1752                 ereq = &req->ext_req;
1753                 ep->stopped = 0;
1754
1755                 /* Get the frame number */
1756                 dwcep->xiso_frame_num =
1757                     dwc_otg_get_frame_number(GET_CORE_IF(pcd));
1758                 DWC_DEBUG("FRM_NUM=%d", dwcep->xiso_frame_num);
1759
1760                 ddesc_iso = ereq->per_io_frame_descs;
1761
1762                 if (dwcep->is_in) {
1763                         /* Setup DMA Descriptor chain for IN Isoc request */
1764                         for (i = 0; i < ereq->pio_pkt_count; i++) {
1765                                 //if ((i % (nat + 1)) == 0)
1766                                 if (i > 0)
1767                                         dwcep->xiso_frame_num =
1768                                             (dwcep->xiso_bInterval +
1769                                              dwcep->xiso_frame_num) & 0x3FFF;
1770                                 dwcep->desc_addr[i].buf =
1771                                     req->dma + ddesc_iso[i].offset;
1772                                 dwcep->desc_addr[i].status.b_iso_in.txbytes =
1773                                     ddesc_iso[i].length;
1774                                 dwcep->desc_addr[i].status.b_iso_in.framenum =
1775                                     dwcep->xiso_frame_num;
1776                                 dwcep->desc_addr[i].status.b_iso_in.bs =
1777                                     BS_HOST_READY;
1778                                 dwcep->desc_addr[i].status.b_iso_in.txsts = 0;
1779                                 dwcep->desc_addr[i].status.b_iso_in.sp =
1780                                     (ddesc_iso[i].length %
1781                                      dwcep->maxpacket) ? 1 : 0;
1782                                 dwcep->desc_addr[i].status.b_iso_in.ioc = 0;
1783                                 dwcep->desc_addr[i].status.b_iso_in.pid = nat + 1;
1784                                 dwcep->desc_addr[i].status.b_iso_in.l = 0;
1785
1786                                 /* Process the last descriptor */
1787                                 if (i == ereq->pio_pkt_count - 1) {
1788                                         dwcep->desc_addr[i].status.b_iso_in.ioc = 1;
1789                                         dwcep->desc_addr[i].status.b_iso_in.l = 1;
1790                                 }
1791                         }
1792
1793                         /* Setup and start the transfer for this endpoint */
1794                         dwcep->xiso_active_xfers++;
1795                         DWC_WRITE_REG32(&GET_CORE_IF(pcd)->dev_if->
1796                                         in_ep_regs[dwcep->num]->diepdma,
1797                                         dwcep->dma_desc_addr);
1798                         diepctl.d32 = 0;
1799                         diepctl.b.epena = 1;
1800                         diepctl.b.cnak = 1;
1801                         DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->dev_if->
1802                                          in_ep_regs[dwcep->num]->diepctl, 0,
1803                                          diepctl.d32);
1804                 } else {
1805                         /* Setup DMA Descriptor chain for OUT Isoc request */
1806                         for (i = 0; i < ereq->pio_pkt_count; i++) {
1807                                 //if ((i % (nat + 1)) == 0)
1808                                 dwcep->xiso_frame_num = (dwcep->xiso_bInterval + 
1809                                                                                 dwcep->xiso_frame_num) & 0x3FFF;
1810                                 dwcep->desc_addr[i].buf =
1811                                     req->dma + ddesc_iso[i].offset;
1812                                 dwcep->desc_addr[i].status.b_iso_out.rxbytes =
1813                                     ddesc_iso[i].length;
1814                                 dwcep->desc_addr[i].status.b_iso_out.framenum =
1815                                     dwcep->xiso_frame_num;
1816                                 dwcep->desc_addr[i].status.b_iso_out.bs =
1817                                     BS_HOST_READY;
1818                                 dwcep->desc_addr[i].status.b_iso_out.rxsts = 0;
1819                                 dwcep->desc_addr[i].status.b_iso_out.sp =
1820                                     (ddesc_iso[i].length %
1821                                      dwcep->maxpacket) ? 1 : 0;
1822                                 dwcep->desc_addr[i].status.b_iso_out.ioc = 0;
1823                                 dwcep->desc_addr[i].status.b_iso_out.pid = nat + 1;
1824                                 dwcep->desc_addr[i].status.b_iso_out.l = 0;
1825                                 
1826                                 /* Process the last descriptor */
1827                                 if (i == ereq->pio_pkt_count - 1) {
1828                                         dwcep->desc_addr[i].status.b_iso_out.ioc = 1;
1829                                         dwcep->desc_addr[i].status.b_iso_out.l = 1;
1830                                 }                       
1831                         }
1832                         
1833                         /* Setup and start the transfer for this endpoint */
1834                         dwcep->xiso_active_xfers++;
1835                         DWC_WRITE_REG32(&GET_CORE_IF(pcd)->
1836                                         dev_if->out_ep_regs[dwcep->num]->
1837                                         doepdma, dwcep->dma_desc_addr);
1838                         diepctl.d32 = 0;
1839                         diepctl.b.epena = 1;
1840                         diepctl.b.cnak = 1;
1841                         DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
1842                                          dev_if->out_ep_regs[dwcep->num]->
1843                                          doepctl, 0, diepctl.d32);
1844                 }
1845
1846         } else {
1847                 ep->stopped = 1;
1848         }
1849
1850         return 0;
1851 }
1852
1853 /**
1854  *      - Remove the request from the queue
1855  */
1856 void complete_xiso_ep(dwc_otg_pcd_ep_t * ep)
1857 {
1858         dwc_otg_pcd_request_t *req = NULL;
1859         struct dwc_iso_xreq_port *ereq = NULL;
1860         struct dwc_iso_pkt_desc_port *ddesc_iso = NULL;
1861         dwc_ep_t *dwcep = NULL;
1862         int i;
1863
1864         //DWC_DEBUG();
1865         dwcep = &ep->dwc_ep;
1866
1867         /* Get the first pending request from the queue */
1868         if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
1869                 req = DWC_CIRCLEQ_FIRST(&ep->queue);
1870                 if (!req) {
1871                         DWC_PRINTF("complete_ep 0x%p, req = NULL!\n", ep);
1872                         return;
1873                 }
1874                 dwcep->xiso_active_xfers--;
1875                 dwcep->xiso_queued_xfers--;
1876                 /* Remove this request from the queue */
1877                 DWC_CIRCLEQ_REMOVE_INIT(&ep->queue, req, queue_entry);
1878         } else {
1879                 DWC_PRINTF("complete_ep 0x%p, ep->queue empty!\n", ep);
1880                 return;
1881         }
1882
1883         ep->stopped = 1;
1884         ereq = &req->ext_req;
1885         ddesc_iso = ereq->per_io_frame_descs;
1886
1887         if (dwcep->xiso_active_xfers < 0) {
1888                 DWC_WARN("EP#%d (xiso_active_xfers=%d)", dwcep->num,
1889                          dwcep->xiso_active_xfers);
1890         }
1891
1892         /* Fill the Isoc descs of portable extended req from dma descriptors */
1893         for (i = 0; i < ereq->pio_pkt_count; i++) {
1894                 if (dwcep->is_in) {     /* IN endpoints */
1895                         ddesc_iso[i].actual_length = ddesc_iso[i].length -
1896                             dwcep->desc_addr[i].status.b_iso_in.txbytes;
1897                         ddesc_iso[i].status =
1898                             dwcep->desc_addr[i].status.b_iso_in.txsts;
1899                 } else {        /* OUT endpoints */
1900                         ddesc_iso[i].actual_length = ddesc_iso[i].length -
1901                             dwcep->desc_addr[i].status.b_iso_out.rxbytes;
1902                         ddesc_iso[i].status =
1903                             dwcep->desc_addr[i].status.b_iso_out.rxsts;
1904                 }
1905         }
1906
1907         DWC_SPINUNLOCK(ep->pcd->lock);
1908
1909         /* Call the completion function in the non-portable logic */
1910         ep->pcd->fops->xisoc_complete(ep->pcd, ep->priv, req->priv, 0,
1911                                       &req->ext_req);
1912
1913         DWC_SPINLOCK(ep->pcd->lock);
1914
1915         /* Free the request - specific freeing needed for extended request object */
1916         dwc_pcd_xiso_ereq_free(ep, req);
1917
1918         /* Start the next request */
1919         dwc_otg_pcd_xiso_start_next_request(ep->pcd, ep);
1920
1921         return;
1922 }
1923
1924 /**
1925  * Create and initialize the Isoc pkt descriptors of the extended request.
1926  *
1927  */
1928 static int dwc_otg_pcd_xiso_create_pkt_descs(dwc_otg_pcd_request_t * req,
1929                                              void *ereq_nonport,
1930                                              int atomic_alloc)
1931 {
1932         struct dwc_iso_xreq_port *ereq = NULL;
1933         struct dwc_iso_xreq_port *req_mapped = NULL;
1934         struct dwc_iso_pkt_desc_port *ipds = NULL;      /* To be created in this function */
1935         uint32_t pkt_count;
1936         int i;
1937
1938         ereq = &req->ext_req;
1939         req_mapped = (struct dwc_iso_xreq_port *)ereq_nonport;
1940         pkt_count = req_mapped->pio_pkt_count;
1941
1942         /* Create the isoc descs */
1943         if (atomic_alloc) {
1944                 ipds = DWC_ALLOC_ATOMIC(sizeof(*ipds) * pkt_count);
1945         } else {
1946                 ipds = DWC_ALLOC(sizeof(*ipds) * pkt_count);
1947         }
1948
1949         if (!ipds) {
1950                 DWC_ERROR("Failed to allocate isoc descriptors");
1951                 return -DWC_E_NO_MEMORY;
1952         }
1953
1954         /* Initialize the extended request fields */
1955         ereq->per_io_frame_descs = ipds;
1956         ereq->error_count = 0;
1957         ereq->pio_alloc_pkt_count = pkt_count;
1958         ereq->pio_pkt_count = pkt_count;
1959         ereq->tr_sub_flags = req_mapped->tr_sub_flags;
1960
1961         /* Init the Isoc descriptors */
1962         for (i = 0; i < pkt_count; i++) {
1963                 ipds[i].length = req_mapped->per_io_frame_descs[i].length;
1964                 ipds[i].offset = req_mapped->per_io_frame_descs[i].offset;
1965                 ipds[i].status = req_mapped->per_io_frame_descs[i].status;      /* 0 */
1966                 ipds[i].actual_length =
1967                     req_mapped->per_io_frame_descs[i].actual_length;
1968         }
1969
1970         return 0;
1971 }
1972
1973 static void prn_ext_request(struct dwc_iso_xreq_port *ereq)
1974 {
1975         struct dwc_iso_pkt_desc_port *xfd = NULL;
1976         int i;
1977
1978         DWC_DEBUG("per_io_frame_descs=%p", ereq->per_io_frame_descs);
1979         DWC_DEBUG("tr_sub_flags=%d", ereq->tr_sub_flags);
1980         DWC_DEBUG("error_count=%d", ereq->error_count);
1981         DWC_DEBUG("pio_alloc_pkt_count=%d", ereq->pio_alloc_pkt_count);
1982         DWC_DEBUG("pio_pkt_count=%d", ereq->pio_pkt_count);
1983         DWC_DEBUG("res=%d", ereq->res);
1984
1985         for (i = 0; i < ereq->pio_pkt_count; i++) {
1986                 xfd = &ereq->per_io_frame_descs[0];
1987                 DWC_DEBUG("FD #%d", i);
1988
1989                 DWC_DEBUG("xfd->actual_length=%d", xfd->actual_length);
1990                 DWC_DEBUG("xfd->length=%d", xfd->length);
1991                 DWC_DEBUG("xfd->offset=%d", xfd->offset);
1992                 DWC_DEBUG("xfd->status=%d", xfd->status);
1993         }
1994 }
1995
1996 /**
1997  *
1998  */
1999 int dwc_otg_pcd_xiso_ep_queue(dwc_otg_pcd_t * pcd, void *ep_handle,
2000                               uint8_t * buf, dwc_dma_t dma_buf, uint32_t buflen,
2001                               int zero, void *req_handle, int atomic_alloc,
2002                               void *ereq_nonport)
2003 {
2004         dwc_otg_pcd_request_t *req = NULL;
2005         dwc_otg_pcd_ep_t *ep;
2006         dwc_irqflags_t flags;
2007         int res;
2008
2009         ep = get_ep_from_handle(pcd, ep_handle);
2010         if (!ep) {
2011                 DWC_WARN("bad ep\n");
2012                 return -DWC_E_INVALID;
2013         }
2014
2015         /* We support this extension only for DDMA mode */
2016         if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC)
2017                 if (!GET_CORE_IF(pcd)->dma_desc_enable)
2018                         return -DWC_E_INVALID;
2019
2020         /* Create a dwc_otg_pcd_request_t object */
2021         if (atomic_alloc) {
2022                 req = DWC_ALLOC_ATOMIC(sizeof(*req));
2023         } else {
2024                 req = DWC_ALLOC(sizeof(*req));
2025         }
2026
2027         if (!req) {
2028                 return -DWC_E_NO_MEMORY;
2029         }
2030
2031         /* Create the Isoc descs for this request which shall be the exact match
2032          * of the structure sent to us from the non-portable logic */
2033         res =
2034             dwc_otg_pcd_xiso_create_pkt_descs(req, ereq_nonport, atomic_alloc);
2035         if (res) {
2036                 DWC_WARN("Failed to init the Isoc descriptors");
2037                 DWC_FREE(req);
2038                 return res;
2039         }
2040
2041         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2042
2043         DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
2044         req->buf = buf;
2045         req->dma = dma_buf;
2046         req->length = buflen;
2047         req->sent_zlp = zero;
2048         req->priv = req_handle;
2049
2050         //DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2051         ep->dwc_ep.dma_addr = dma_buf;
2052         ep->dwc_ep.start_xfer_buff = buf;
2053         ep->dwc_ep.xfer_buff = buf;
2054         ep->dwc_ep.xfer_len = 0;
2055         ep->dwc_ep.xfer_count = 0;
2056         ep->dwc_ep.sent_zlp = 0;
2057         ep->dwc_ep.total_len = buflen;
2058
2059         /* Add this request to the tail */
2060         DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2061         ep->dwc_ep.xiso_queued_xfers++;
2062
2063 //DWC_DEBUG("CP_0");
2064 //DWC_DEBUG("req->ext_req.tr_sub_flags=%d", req->ext_req.tr_sub_flags);
2065 //prn_ext_request((struct dwc_iso_xreq_port *) ereq_nonport);
2066 //prn_ext_request(&req->ext_req);
2067
2068         //DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2069
2070         /* If the req->status == ASAP  then check if there is any active transfer
2071          * for this endpoint. If no active transfers, then get the first entry
2072          * from the queue and start that transfer
2073          */
2074         if (req->ext_req.tr_sub_flags == DWC_EREQ_TF_ASAP) {
2075                 res = dwc_otg_pcd_xiso_start_next_request(pcd, ep);
2076                 if (res) {
2077                         DWC_WARN("Failed to start the next Isoc transfer");
2078                         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2079                         DWC_FREE(req);
2080                         return res;
2081                 }
2082         }
2083
2084         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2085         return 0;
2086 }
2087
2088 #endif
2089 /* END ifdef DWC_UTE_PER_IO ***************************************************/
2090 int dwc_otg_pcd_ep_queue(dwc_otg_pcd_t * pcd, void *ep_handle,
2091                          uint8_t * buf, dwc_dma_t dma_buf, uint32_t buflen,
2092                          int zero, void *req_handle, int atomic_alloc)
2093 {
2094         dwc_irqflags_t flags;
2095         dwc_otg_pcd_request_t *req;
2096         dwc_otg_pcd_ep_t *ep;
2097         uint32_t max_transfer;
2098
2099         ep = get_ep_from_handle(pcd, ep_handle);
2100         if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
2101                 DWC_WARN("bad ep\n");
2102                 return -DWC_E_INVALID;
2103         }
2104
2105         if (atomic_alloc) {
2106                 req = DWC_ALLOC_ATOMIC(sizeof(*req));
2107         } else {
2108                 req = DWC_ALLOC(sizeof(*req));
2109         }
2110
2111         if (!req) {
2112                 return -DWC_E_NO_MEMORY;
2113         }
2114         DWC_CIRCLEQ_INIT_ENTRY(req, queue_entry);
2115         if (!GET_CORE_IF(pcd)->core_params->opt) {
2116                 if (ep->dwc_ep.num != 0) {
2117                         DWC_ERROR("queue req %p, len %d buf %p\n",
2118                                   req_handle, buflen, buf);
2119                 }
2120         }
2121
2122         req->buf = buf;
2123         req->dma = dma_buf;
2124         req->length = buflen;
2125         req->sent_zlp = zero;
2126         req->priv = req_handle;
2127         req->dw_align_buf = NULL;
2128         if ((dma_buf & 0x3) && GET_CORE_IF(pcd)->dma_enable
2129             && !GET_CORE_IF(pcd)->dma_desc_enable)
2130                 req->dw_align_buf = DWC_DMA_ALLOC(buflen,
2131                                                   &req->dw_align_buf_dma);
2132         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2133
2134         /*
2135          * After adding request to the queue for IN ISOC wait for In Token Received
2136          * when TX FIFO is empty interrupt and for OUT ISOC wait for OUT Token 
2137          * Received when EP is disabled interrupt to obtain starting microframe
2138          * (odd/even) start transfer
2139          */
2140         if (ep->dwc_ep.type == DWC_OTG_EP_TYPE_ISOC) {
2141                 if (req != 0) {
2142                         depctl_data_t depctl = {.d32 =
2143                                     DWC_READ_REG32(&pcd->core_if->dev_if->
2144                                                    in_ep_regs[ep->dwc_ep.num]->
2145                                                    diepctl) };
2146                         ++pcd->request_pending;
2147
2148                         DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2149                         if (ep->dwc_ep.is_in) {
2150                                 depctl.b.cnak = 1;
2151                                 DWC_WRITE_REG32(&pcd->core_if->dev_if->
2152                                                 in_ep_regs[ep->dwc_ep.num]->
2153                                                 diepctl, depctl.d32);
2154                         }
2155
2156                         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2157                 }
2158                 return 0;
2159         }
2160
2161         /*
2162          * For EP0 IN without premature status, zlp is required?
2163          */
2164         if (ep->dwc_ep.num == 0 && ep->dwc_ep.is_in) {
2165                 DWC_DEBUGPL(DBG_PCDV, "%d-OUT ZLP\n", ep->dwc_ep.num);
2166                 //_req->zero = 1;
2167         }
2168
2169         /* Start the transfer */
2170         if (DWC_CIRCLEQ_EMPTY(&ep->queue) && !ep->stopped) {
2171                 /* EP0 Transfer? */
2172                 if (ep->dwc_ep.num == 0) {
2173                         switch (pcd->ep0state) {
2174                         case EP0_IN_DATA_PHASE:
2175                                 DWC_DEBUGPL(DBG_PCD,
2176                                             "%s ep0: EP0_IN_DATA_PHASE\n",
2177                                             __func__);
2178                                 break;
2179
2180                         case EP0_OUT_DATA_PHASE:
2181                                 DWC_DEBUGPL(DBG_PCD,
2182                                             "%s ep0: EP0_OUT_DATA_PHASE\n",
2183                                             __func__);
2184                                 if (pcd->request_config) {
2185                                         /* Complete STATUS PHASE */
2186                                         ep->dwc_ep.is_in = 1;
2187                                         pcd->ep0state = EP0_IN_STATUS_PHASE;
2188                                 }
2189                                 break;
2190
2191                         case EP0_IN_STATUS_PHASE:
2192                                 DWC_DEBUGPL(DBG_PCD,
2193                                             "%s ep0: EP0_IN_STATUS_PHASE\n",
2194                                             __func__);
2195                                 break;
2196
2197                         default:
2198                                 DWC_DEBUGPL(DBG_ANY, "ep0: odd state %d\n",
2199                                             pcd->ep0state);
2200                                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2201                                 return -DWC_E_SHUTDOWN;
2202                         }
2203
2204                         ep->dwc_ep.dma_addr = dma_buf;
2205                         ep->dwc_ep.start_xfer_buff = buf;
2206                         ep->dwc_ep.xfer_buff = buf;
2207                         ep->dwc_ep.xfer_len = buflen;
2208                         ep->dwc_ep.xfer_count = 0;
2209                         ep->dwc_ep.sent_zlp = 0;
2210                         ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
2211
2212                         if (zero) {
2213                                 if ((ep->dwc_ep.xfer_len %
2214                                      ep->dwc_ep.maxpacket == 0)
2215                                     && (ep->dwc_ep.xfer_len != 0)) {
2216                                         ep->dwc_ep.sent_zlp = 1;
2217                                 }
2218
2219                         }
2220
2221                         dwc_otg_ep0_start_transfer(GET_CORE_IF(pcd),
2222                                                    &ep->dwc_ep);
2223                 }               // non-ep0 endpoints
2224                 else {
2225 #ifdef DWC_UTE_CFI
2226                         if (ep->dwc_ep.buff_mode != BM_STANDARD) {
2227                                 /* store the request length */
2228                                 ep->dwc_ep.cfi_req_len = buflen;
2229                                 pcd->cfi->ops.build_descriptors(pcd->cfi, pcd,
2230                                                                 ep, req);
2231                         } else {
2232 #endif
2233                                 max_transfer =
2234                                     GET_CORE_IF(ep->pcd)->core_params->
2235                                     max_transfer_size;
2236
2237                                 /* Setup and start the Transfer */
2238                                 if (req->dw_align_buf) {
2239                                         if (ep->dwc_ep.is_in)
2240                                                 dwc_memcpy(req->dw_align_buf,
2241                                                            buf, buflen);
2242                                         ep->dwc_ep.dma_addr =
2243                                             req->dw_align_buf_dma;
2244                                         ep->dwc_ep.start_xfer_buff =
2245                                             req->dw_align_buf;
2246                                         ep->dwc_ep.xfer_buff =
2247                                             req->dw_align_buf;
2248                                 } else {
2249                                         ep->dwc_ep.dma_addr = dma_buf;
2250                                         ep->dwc_ep.start_xfer_buff = buf;
2251                                         ep->dwc_ep.xfer_buff = buf;
2252                                 }
2253                                 ep->dwc_ep.xfer_len = 0;
2254                                 ep->dwc_ep.xfer_count = 0;
2255                                 ep->dwc_ep.sent_zlp = 0;
2256                                 ep->dwc_ep.total_len = buflen;
2257
2258                                 ep->dwc_ep.maxxfer = max_transfer;
2259                                 if (GET_CORE_IF(pcd)->dma_desc_enable) {
2260                                         uint32_t out_max_xfer =
2261                                             DDMA_MAX_TRANSFER_SIZE -
2262                                             (DDMA_MAX_TRANSFER_SIZE % 4);
2263                                         if (ep->dwc_ep.is_in) {
2264                                                 if (ep->dwc_ep.maxxfer >
2265                                                     DDMA_MAX_TRANSFER_SIZE) {
2266                                                         ep->dwc_ep.maxxfer =
2267                                                             DDMA_MAX_TRANSFER_SIZE;
2268                                                 }
2269                                         } else {
2270                                                 if (ep->dwc_ep.maxxfer >
2271                                                     out_max_xfer) {
2272                                                         ep->dwc_ep.maxxfer =
2273                                                             out_max_xfer;
2274                                                 }
2275                                         }
2276                                 }
2277                                 if (ep->dwc_ep.maxxfer < ep->dwc_ep.total_len) {
2278                                         ep->dwc_ep.maxxfer -=
2279                                             (ep->dwc_ep.maxxfer %
2280                                              ep->dwc_ep.maxpacket);
2281                                 }
2282
2283                                 if (zero) {
2284                                         if ((ep->dwc_ep.total_len %
2285                                              ep->dwc_ep.maxpacket == 0)
2286                                             && (ep->dwc_ep.total_len != 0)) {
2287                                                 ep->dwc_ep.sent_zlp = 1;
2288                                         }
2289                                 }
2290 #ifdef DWC_UTE_CFI
2291                         }
2292 #endif
2293                         dwc_otg_ep_start_transfer(GET_CORE_IF(pcd),
2294                                                   &ep->dwc_ep);
2295                 }
2296         }
2297
2298         if (req != 0) {
2299                 ++pcd->request_pending;
2300                 DWC_CIRCLEQ_INSERT_TAIL(&ep->queue, req, queue_entry);
2301                 if (ep->dwc_ep.is_in && ep->stopped
2302                     && !(GET_CORE_IF(pcd)->dma_enable)) {
2303                         /** @todo NGS Create a function for this. */
2304                         diepmsk_data_t diepmsk = {.d32 = 0 };
2305                         diepmsk.b.intktxfemp = 1;
2306                         if (GET_CORE_IF(pcd)->multiproc_int_enable) {
2307                                 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
2308                                                  dev_if->dev_global_regs->diepeachintmsk
2309                                                  [ep->dwc_ep.num], 0,
2310                                                  diepmsk.d32);
2311                         } else {
2312                                 DWC_MODIFY_REG32(&GET_CORE_IF(pcd)->
2313                                                  dev_if->dev_global_regs->
2314                                                  diepmsk, 0, diepmsk.d32);
2315                         }
2316
2317                 }
2318         }
2319         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2320
2321         return 0;
2322 }
2323
2324 int dwc_otg_pcd_ep_dequeue(dwc_otg_pcd_t * pcd, void *ep_handle,
2325                            void *req_handle)
2326 {
2327         dwc_irqflags_t flags;
2328         dwc_otg_pcd_request_t *req;
2329         dwc_otg_pcd_ep_t *ep;
2330
2331         ep = get_ep_from_handle(pcd, ep_handle);
2332         if (!ep || (!ep->desc && ep->dwc_ep.num != 0)) {
2333                 DWC_WARN("bad argument\n");
2334                 return -DWC_E_INVALID;
2335         }
2336
2337         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2338
2339         /* make sure it's actually queued on this endpoint */
2340         DWC_CIRCLEQ_FOREACH(req, &ep->queue, queue_entry) {
2341                 if (req->priv == (void *)req_handle) {
2342                         break;
2343                 }
2344         }
2345
2346         if (req->priv != (void *)req_handle) {
2347                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2348                 return -DWC_E_INVALID;
2349         }
2350
2351         if (!DWC_CIRCLEQ_EMPTY_ENTRY(req, queue_entry)) {
2352                 dwc_otg_request_done(ep, req, -DWC_E_RESTART);
2353         } else {
2354                 req = NULL;
2355         }
2356
2357         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2358
2359         return req ? 0 : -DWC_E_SHUTDOWN;
2360
2361 }
2362
2363 int dwc_otg_pcd_ep_halt(dwc_otg_pcd_t * pcd, void *ep_handle, int value)
2364 {
2365         dwc_otg_pcd_ep_t *ep;
2366         dwc_irqflags_t flags;
2367         int retval = 0;
2368
2369         ep = get_ep_from_handle(pcd, ep_handle);
2370
2371         if (!ep || (!ep->desc && ep != &pcd->ep0) ||
2372             (ep->desc && (ep->desc->bmAttributes == UE_ISOCHRONOUS))) {
2373                 DWC_WARN("%s, bad ep\n", __func__);
2374                 return -DWC_E_INVALID;
2375         }
2376
2377         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2378         if (!DWC_CIRCLEQ_EMPTY(&ep->queue)) {
2379                 DWC_WARN("%d %s XFer In process\n", ep->dwc_ep.num,
2380                          ep->dwc_ep.is_in ? "IN" : "OUT");
2381                 retval = -DWC_E_AGAIN;
2382         } else if (value == 0) {
2383             ep->dwc_ep.stall_clear_flag = 0;
2384                 dwc_otg_ep_clear_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2385         } else if (value == 1) {
2386         stall:
2387                 if (ep->dwc_ep.is_in == 1 && GET_CORE_IF(pcd)->dma_desc_enable) {
2388                         dtxfsts_data_t txstatus;
2389                         fifosize_data_t txfifosize;
2390
2391                         txfifosize.d32 =
2392                             DWC_READ_REG32(&GET_CORE_IF(pcd)->
2393                                            core_global_regs->dtxfsiz[ep->dwc_ep.
2394                                                                      tx_fifo_num]);
2395                         txstatus.d32 =
2396                             DWC_READ_REG32(&GET_CORE_IF(pcd)->
2397                                            dev_if->in_ep_regs[ep->dwc_ep.num]->
2398                                            dtxfsts);
2399
2400                         if (txstatus.b.txfspcavail < txfifosize.b.depth) {
2401                                 DWC_WARN("%s() Data In Tx Fifo\n", __func__);
2402                                 retval = -DWC_E_AGAIN;
2403                         } else {
2404                                 if (ep->dwc_ep.num == 0) {
2405                                         pcd->ep0state = EP0_STALL;
2406                                 }
2407
2408                                 ep->stopped = 1;
2409                                 dwc_otg_ep_set_stall(GET_CORE_IF(pcd),
2410                                                      &ep->dwc_ep);
2411                         }
2412                 } else {
2413                         if (ep->dwc_ep.num == 0) {
2414                                 pcd->ep0state = EP0_STALL;
2415                         }
2416
2417                         ep->stopped = 1;
2418                         dwc_otg_ep_set_stall(GET_CORE_IF(pcd), &ep->dwc_ep);
2419                 }
2420         } else if (value == 2) {
2421                 ep->dwc_ep.stall_clear_flag = 0;
2422         } else if (value == 3) {
2423                 ep->dwc_ep.stall_clear_flag = 1;
2424                 goto stall;
2425         }
2426
2427         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2428
2429         return retval;
2430 }
2431
2432 /**
2433  * This function initiates remote wakeup of the host from suspend state.
2434  */
2435 void dwc_otg_pcd_rem_wkup_from_suspend(dwc_otg_pcd_t * pcd, int set)
2436 {
2437         dctl_data_t dctl = { 0 };
2438         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2439         dsts_data_t dsts;
2440
2441         dsts.d32 = DWC_READ_REG32(&core_if->dev_if->dev_global_regs->dsts);
2442         if (!dsts.b.suspsts) {
2443                 DWC_WARN("Remote wakeup while is not in suspend state\n");
2444         }
2445         /* Check if DEVICE_REMOTE_WAKEUP feature enabled */
2446         if (pcd->remote_wakeup_enable) {
2447                 if (set) {
2448
2449                         if (core_if->adp_enable) {
2450                                 gpwrdn_data_t gpwrdn;
2451
2452                                 dwc_otg_adp_probe_stop(core_if);
2453
2454                                 /* Mask SRP detected interrupt from Power Down Logic */
2455                                 gpwrdn.d32 = 0;
2456                                 gpwrdn.b.srp_det_msk = 1;
2457                                 DWC_MODIFY_REG32(&core_if->
2458                                                  core_global_regs->gpwrdn,
2459                                                  gpwrdn.d32, 0);
2460
2461                                 /* Disable Power Down Logic */
2462                                 gpwrdn.d32 = 0;
2463                                 gpwrdn.b.pmuactv = 1;
2464                                 DWC_MODIFY_REG32(&core_if->
2465                                                  core_global_regs->gpwrdn,
2466                                                  gpwrdn.d32, 0);
2467
2468                                 /*
2469                                  * Initialize the Core for Device mode.
2470                                  */
2471                                 core_if->op_state = B_PERIPHERAL;
2472                                 dwc_otg_core_init(core_if);
2473                                 dwc_otg_enable_global_interrupts(core_if);
2474                                 cil_pcd_start(core_if);
2475
2476                                 dwc_otg_initiate_srp(core_if);
2477                         }
2478
2479                         dctl.b.rmtwkupsig = 1;
2480                         DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
2481                                          dctl, 0, dctl.d32);
2482                         DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
2483
2484                         dwc_mdelay(2);
2485                         DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->
2486                                          dctl, dctl.d32, 0);
2487                         DWC_DEBUGPL(DBG_PCD, "Clear Remote Wakeup\n");
2488                 }
2489         } else {
2490                 DWC_DEBUGPL(DBG_PCD, "Remote Wakeup is disabled\n");
2491         }
2492 }
2493
2494 #ifdef CONFIG_USB_DWC_OTG_LPM
2495 /**
2496  * This function initiates remote wakeup of the host from L1 sleep state.
2497  */
2498 void dwc_otg_pcd_rem_wkup_from_sleep(dwc_otg_pcd_t * pcd, int set)
2499 {
2500         glpmcfg_data_t lpmcfg;
2501         pcgcctl_data_t pcgcctl = {.d32 = 0 };
2502         
2503         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2504
2505         lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
2506
2507         /* Check if we are in L1 state */
2508         if (!lpmcfg.b.prt_sleep_sts) {
2509                 DWC_DEBUGPL(DBG_PCD, "Device is not in sleep state\n");
2510                 return;
2511         }
2512
2513         /* Check if host allows remote wakeup */
2514         if (!lpmcfg.b.rem_wkup_en) {
2515                 DWC_DEBUGPL(DBG_PCD, "Host does not allow remote wakeup\n");
2516                 return;
2517         }
2518
2519         /* Check if Resume OK */
2520         if (!lpmcfg.b.sleep_state_resumeok) {
2521                 DWC_DEBUGPL(DBG_PCD, "Sleep state resume is not OK\n");
2522                 return;
2523         }
2524
2525         lpmcfg.d32 = DWC_READ_REG32(&core_if->core_global_regs->glpmcfg);
2526         lpmcfg.b.en_utmi_sleep = 0;
2527         lpmcfg.b.hird_thres &= (~(1 << 4));
2528         
2529         /* Clear Enbl_L1Gating bit. */
2530         pcgcctl.b.enbl_sleep_gating = 1;
2531         DWC_MODIFY_REG32(core_if->pcgcctl, pcgcctl.d32,0);
2532                         
2533         DWC_WRITE_REG32(&core_if->core_global_regs->glpmcfg, lpmcfg.d32);
2534
2535         if (set) {
2536                 dctl_data_t dctl = {.d32 = 0 };
2537                 dctl.b.rmtwkupsig = 1;
2538                 /* Set RmtWkUpSig bit to start remote wakup signaling.
2539                  * Hardware will automatically clear this bit.
2540                  */
2541                 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl,
2542                                  0, dctl.d32);
2543                 DWC_DEBUGPL(DBG_PCD, "Set Remote Wakeup\n");
2544         }
2545
2546 }
2547 #endif
2548
2549 /**
2550  * Performs remote wakeup.
2551  */
2552 void dwc_otg_pcd_remote_wakeup(dwc_otg_pcd_t * pcd, int set)
2553 {
2554         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2555         dwc_irqflags_t flags;
2556         if (dwc_otg_is_device_mode(core_if)) {
2557                 DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2558 #ifdef CONFIG_USB_DWC_OTG_LPM
2559                 if (core_if->lx_state == DWC_OTG_L1) {
2560                         dwc_otg_pcd_rem_wkup_from_sleep(pcd, set);
2561                 } else {
2562 #endif
2563                         dwc_otg_pcd_rem_wkup_from_suspend(pcd, set);
2564 #ifdef CONFIG_USB_DWC_OTG_LPM
2565                 }
2566 #endif
2567                 DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2568         }
2569         return;
2570 }
2571
2572 void dwc_otg_pcd_disconnect_us(dwc_otg_pcd_t * pcd, int no_of_usecs)
2573 {
2574         dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2575         dctl_data_t dctl = { 0 };
2576
2577         if (dwc_otg_is_device_mode(core_if)) {
2578                 dctl.b.sftdiscon = 1;
2579                 DWC_PRINTF("Soft disconnect for %d useconds\n",no_of_usecs);
2580                 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, 0, dctl.d32);
2581                 dwc_udelay(no_of_usecs);
2582                 DWC_MODIFY_REG32(&core_if->dev_if->dev_global_regs->dctl, dctl.d32,0);
2583                 
2584         } else{
2585                 DWC_PRINTF("NOT SUPPORTED IN HOST MODE\n");
2586         }
2587         return;
2588
2589 }
2590
2591 int dwc_otg_pcd_wakeup(dwc_otg_pcd_t * pcd)
2592 {
2593         dsts_data_t dsts;
2594         gotgctl_data_t gotgctl;
2595
2596         /*
2597          * This function starts the Protocol if no session is in progress. If
2598          * a session is already in progress, but the device is suspended,
2599          * remote wakeup signaling is started.
2600          */
2601
2602         /* Check if valid session */
2603         gotgctl.d32 =
2604             DWC_READ_REG32(&(GET_CORE_IF(pcd)->core_global_regs->gotgctl));
2605         if (gotgctl.b.bsesvld) {
2606                 /* Check if suspend state */
2607                 dsts.d32 =
2608                     DWC_READ_REG32(&
2609                                    (GET_CORE_IF(pcd)->dev_if->
2610                                     dev_global_regs->dsts));
2611                 if (dsts.b.suspsts) {
2612                         dwc_otg_pcd_remote_wakeup(pcd, 1);
2613                 }
2614         } else {
2615                 dwc_otg_pcd_initiate_srp(pcd);
2616         }
2617
2618         return 0;
2619
2620 }
2621
2622
2623 /**
2624  * Implement Soft-Connect and Soft-Disconnect function
2625  */
2626
2627 void dwc_otg_pcd_pullup_enable(dwc_otg_pcd_t * pcd)
2628 {
2629     if(pcd);
2630         DWC_MODIFY_REG32( &(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl),2,0 );
2631 }
2632
2633 void dwc_otg_pcd_pullup_disable(dwc_otg_pcd_t * pcd)
2634 {
2635     if(pcd);
2636         DWC_MODIFY_REG32( &(GET_CORE_IF(pcd)->dev_if->dev_global_regs->dctl),0,2 );
2637 }
2638
2639 void dwc_pcd_reset(dwc_otg_pcd_t *pcd)
2640 {
2641     dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
2642     dwc_otg_disable_global_interrupts(core_if);
2643         dwc_otg_core_init(core_if);
2644     dwc_otg_pcd_reinit(pcd);
2645     dwc_otg_core_dev_init(core_if);
2646     dwc_otg_enable_global_interrupts(core_if);
2647 }
2648
2649 /**
2650  * Start the SRP timer to detect when the SRP does not complete within
2651  * 6 seconds.
2652  *
2653  * @param pcd the pcd structure.
2654  */
2655 void dwc_otg_pcd_initiate_srp(dwc_otg_pcd_t * pcd)
2656 {
2657         dwc_irqflags_t flags;
2658         DWC_SPINLOCK_IRQSAVE(pcd->lock, &flags);
2659         dwc_otg_initiate_srp(GET_CORE_IF(pcd));
2660         DWC_SPINUNLOCK_IRQRESTORE(pcd->lock, flags);
2661 }
2662
2663 int dwc_otg_pcd_get_frame_number(dwc_otg_pcd_t * pcd)
2664 {
2665         return dwc_otg_get_frame_number(GET_CORE_IF(pcd));
2666 }
2667
2668 int dwc_otg_pcd_is_lpm_enabled(dwc_otg_pcd_t * pcd)
2669 {
2670         return GET_CORE_IF(pcd)->core_params->lpm_enable;
2671 }
2672
2673 int dwc_otg_pcd_is_besl_enabled(dwc_otg_pcd_t * pcd)
2674 {
2675         return GET_CORE_IF(pcd)->core_params->besl_enable;
2676 }
2677
2678 int dwc_otg_pcd_get_param_baseline_besl(dwc_otg_pcd_t * pcd)
2679 {
2680         return GET_CORE_IF(pcd)->core_params->baseline_besl;
2681 }
2682
2683 int dwc_otg_pcd_get_param_deep_besl(dwc_otg_pcd_t * pcd)
2684 {
2685         return GET_CORE_IF(pcd)->core_params->deep_besl;
2686 }
2687
2688 uint32_t get_b_hnp_enable(dwc_otg_pcd_t * pcd)
2689 {
2690         return pcd->b_hnp_enable;
2691 }
2692
2693 uint32_t get_a_hnp_support(dwc_otg_pcd_t * pcd)
2694 {
2695         return pcd->a_hnp_support;
2696 }
2697
2698 uint32_t get_a_alt_hnp_support(dwc_otg_pcd_t * pcd)
2699 {
2700         return pcd->a_alt_hnp_support;
2701 }
2702
2703 int dwc_otg_pcd_get_rmwkup_enable(dwc_otg_pcd_t * pcd)
2704 {
2705         return pcd->remote_wakeup_enable;
2706 }
2707
2708 #endif /* DWC_HOST_ONLY */