1 /* ==========================================================================
2 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
3 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
4 * otherwise expressly agreed to in writing between Synopsys and you.
6 * The Software IS NOT an item of Licensed Software or Licensed Product under
7 * any End User Software License Agreement or Agreement for Licensed Product
8 * with Synopsys or any supplement thereto. You are permitted to use and
9 * redistribute this Software in source and binary forms, with or without
10 * modification, provided that redistributions of source code must retain this
11 * notice. You may not view, use, disclose, copy or distribute this file or
12 * any information contained herein except pursuant to this license grant from
13 * Synopsys. If you do not agree with this notice, including the disclaimer
14 * below, then you are not authorized to use the Software.
16 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27 * ========================================================================== */
31 * This file contains the most of the CFI(Core Feature Interface)
32 * implementation for the OTG.
37 #include "dwc_otg_pcd.h"
38 #include "dwc_otg_cfi.h"
40 /** This definition should actually migrate to the Portability Library */
41 #define DWC_CONSTANT_CPU_TO_LE16(x) (x)
43 extern dwc_otg_pcd_ep_t *get_ep_by_addr(dwc_otg_pcd_t *pcd, u16 wIndex);
45 static int cfi_core_features_buf(uint8_t *buf, uint16_t buflen);
46 static int cfi_get_feature_value(uint8_t *buf, uint16_t buflen,
47 struct dwc_otg_pcd *pcd,
48 struct cfi_usb_ctrlrequest *ctrl_req);
49 static int cfi_set_feature_value(struct dwc_otg_pcd *pcd);
50 static int cfi_ep_get_sg_val(uint8_t *buf, struct dwc_otg_pcd *pcd,
51 struct cfi_usb_ctrlrequest *req);
52 static int cfi_ep_get_concat_val(uint8_t *buf, struct dwc_otg_pcd *pcd,
53 struct cfi_usb_ctrlrequest *req);
54 static int cfi_ep_get_align_val(uint8_t *buf, struct dwc_otg_pcd *pcd,
55 struct cfi_usb_ctrlrequest *req);
56 static int cfi_preproc_reset(struct dwc_otg_pcd *pcd,
57 struct cfi_usb_ctrlrequest *req);
58 static void cfi_free_ep_bs_dyn_data(cfi_ep_t *cfiep);
60 static uint16_t get_dfifo_size(dwc_otg_core_if_t *core_if);
61 static int32_t get_rxfifo_size(dwc_otg_core_if_t *core_if, uint16_t wValue);
62 static int32_t get_txfifo_size(struct dwc_otg_pcd *pcd, uint16_t wValue);
64 static uint8_t resize_fifos(dwc_otg_core_if_t *core_if);
66 /** This is the header of the all features descriptor */
67 static cfi_all_features_header_t all_props_desc_header = {
68 .wVersion = DWC_CONSTANT_CPU_TO_LE16(0x100),
69 .wCoreID = DWC_CONSTANT_CPU_TO_LE16(CFI_CORE_ID_OTG),
70 .wNumFeatures = DWC_CONSTANT_CPU_TO_LE16(9),
73 /** This is an array of statically allocated feature descriptors */
74 static cfi_feature_desc_header_t prop_descs[] = {
78 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_DMA_MODE),
79 .bmAttributes = CFI_FEATURE_ATTR_RW,
80 .wDataLength = DWC_CONSTANT_CPU_TO_LE16(1),
83 /* FT_ID_DMA_BUFFER_SETUP */
85 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_DMA_BUFFER_SETUP),
86 .bmAttributes = CFI_FEATURE_ATTR_RW,
87 .wDataLength = DWC_CONSTANT_CPU_TO_LE16(6),
90 /* FT_ID_DMA_BUFF_ALIGN */
92 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_DMA_BUFF_ALIGN),
93 .bmAttributes = CFI_FEATURE_ATTR_RW,
94 .wDataLength = DWC_CONSTANT_CPU_TO_LE16(2),
97 /* FT_ID_DMA_CONCAT_SETUP */
99 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_DMA_CONCAT_SETUP),
100 .bmAttributes = CFI_FEATURE_ATTR_RW,
101 /* .wDataLength = DWC_CONSTANT_CPU_TO_LE16(6), */
104 /* FT_ID_DMA_CIRCULAR */
106 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_DMA_CIRCULAR),
107 .bmAttributes = CFI_FEATURE_ATTR_RW,
108 .wDataLength = DWC_CONSTANT_CPU_TO_LE16(6),
111 /* FT_ID_THRESHOLD_SETUP */
113 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_THRESHOLD_SETUP),
114 .bmAttributes = CFI_FEATURE_ATTR_RW,
115 .wDataLength = DWC_CONSTANT_CPU_TO_LE16(6),
118 /* FT_ID_DFIFO_DEPTH */
120 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_DFIFO_DEPTH),
121 .bmAttributes = CFI_FEATURE_ATTR_RO,
122 .wDataLength = DWC_CONSTANT_CPU_TO_LE16(2),
125 /* FT_ID_TX_FIFO_DEPTH */
127 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_TX_FIFO_DEPTH),
128 .bmAttributes = CFI_FEATURE_ATTR_RW,
129 .wDataLength = DWC_CONSTANT_CPU_TO_LE16(2),
132 /* FT_ID_RX_FIFO_DEPTH */
134 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_RX_FIFO_DEPTH),
135 .bmAttributes = CFI_FEATURE_ATTR_RW,
136 .wDataLength = DWC_CONSTANT_CPU_TO_LE16(2),
140 /** The table of feature names */
141 cfi_string_t prop_name_table[] = {
142 {FT_ID_DMA_MODE, "dma_mode"},
143 {FT_ID_DMA_BUFFER_SETUP, "buffer_setup"},
144 {FT_ID_DMA_BUFF_ALIGN, "buffer_align"},
145 {FT_ID_DMA_CONCAT_SETUP, "concat_setup"},
146 {FT_ID_DMA_CIRCULAR, "buffer_circular"},
147 {FT_ID_THRESHOLD_SETUP, "threshold_setup"},
148 {FT_ID_DFIFO_DEPTH, "dfifo_depth"},
149 {FT_ID_TX_FIFO_DEPTH, "txfifo_depth"},
150 {FT_ID_RX_FIFO_DEPTH, "rxfifo_depth"},
154 /************************************************************************/
157 * Returns the name of the feature by its ID
158 * or NULL if no featute ID matches.
161 const uint8_t *get_prop_name(uint16_t prop_id, int *len)
166 for (pstr = prop_name_table; pstr && pstr->s; pstr++) {
167 if (pstr->id == prop_id) {
168 *len = DWC_STRLEN(pstr->s);
176 * This function handles all CFI specific control requests.
178 * Return a negative value to stall the DCE.
180 int cfi_setup(struct dwc_otg_pcd *pcd, struct cfi_usb_ctrlrequest *ctrl)
183 dwc_otg_pcd_ep_t *ep = NULL;
184 cfiobject_t *cfi = pcd->cfi;
185 struct dwc_otg_core_if *coreif = GET_CORE_IF(pcd);
186 uint16_t wLen = DWC_LE16_TO_CPU(&ctrl->wLength);
187 uint16_t wValue = DWC_LE16_TO_CPU(&ctrl->wValue);
188 uint16_t wIndex = DWC_LE16_TO_CPU(&ctrl->wIndex);
189 uint32_t regaddr = 0;
192 /* Save this Control Request in the CFI object.
193 * The data field will be assigned in the data
194 * stage completion CB function.
196 cfi->ctrl_req = *ctrl;
197 cfi->ctrl_req.data = NULL;
199 cfi->need_gadget_att = 0;
200 cfi->need_status_in_complete = 0;
202 switch (ctrl->bRequest) {
203 case VEN_CORE_GET_FEATURES:
204 retval = cfi_core_features_buf(cfi->buf_in.buf, CFI_IN_BUF_LEN);
206 /* dump_msg(cfi->buf_in.buf, retval); */
209 retval = min((uint16_t) retval, wLen);
210 /* Transfer this buffer to the host
211 * through the EP0-IN EP
213 ep->dwc_ep.dma_addr = cfi->buf_in.addr;
214 ep->dwc_ep.start_xfer_buff = cfi->buf_in.buf;
215 ep->dwc_ep.xfer_buff = cfi->buf_in.buf;
216 ep->dwc_ep.xfer_len = retval;
217 ep->dwc_ep.xfer_count = 0;
218 ep->dwc_ep.sent_zlp = 0;
219 ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
221 pcd->ep0_pending = 1;
222 dwc_otg_ep0_start_transfer(coreif, &ep->dwc_ep);
227 case VEN_CORE_GET_FEATURE:
228 CFI_INFO("VEN_CORE_GET_FEATURE\n");
229 retval = cfi_get_feature_value(cfi->buf_in.buf, CFI_IN_BUF_LEN,
234 retval = min((uint16_t) retval, wLen);
235 /* Transfer this buffer to the host
236 * through the EP0-IN EP
238 ep->dwc_ep.dma_addr = cfi->buf_in.addr;
239 ep->dwc_ep.start_xfer_buff = cfi->buf_in.buf;
240 ep->dwc_ep.xfer_buff = cfi->buf_in.buf;
241 ep->dwc_ep.xfer_len = retval;
242 ep->dwc_ep.xfer_count = 0;
243 ep->dwc_ep.sent_zlp = 0;
244 ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
246 pcd->ep0_pending = 1;
247 dwc_otg_ep0_start_transfer(coreif, &ep->dwc_ep);
249 CFI_INFO("VEN_CORE_GET_FEATURE=%d\n", retval);
250 dump_msg(cfi->buf_in.buf, retval);
253 case VEN_CORE_SET_FEATURE:
254 CFI_INFO("VEN_CORE_SET_FEATURE\n");
255 /* Set up an XFER to get the data stage of the control request,
256 * which is the new value of the feature to be modified.
259 ep->dwc_ep.is_in = 0;
260 ep->dwc_ep.dma_addr = cfi->buf_out.addr;
261 ep->dwc_ep.start_xfer_buff = cfi->buf_out.buf;
262 ep->dwc_ep.xfer_buff = cfi->buf_out.buf;
263 ep->dwc_ep.xfer_len = wLen;
264 ep->dwc_ep.xfer_count = 0;
265 ep->dwc_ep.sent_zlp = 0;
266 ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
268 pcd->ep0_pending = 1;
269 /* Read the control write's data stage */
270 dwc_otg_ep0_start_transfer(coreif, &ep->dwc_ep);
274 case VEN_CORE_RESET_FEATURES:
275 CFI_INFO("VEN_CORE_RESET_FEATURES\n");
276 cfi->need_gadget_att = 1;
277 cfi->need_status_in_complete = 1;
278 retval = cfi_preproc_reset(pcd, ctrl);
279 CFI_INFO("VEN_CORE_RESET_FEATURES = (%d)\n", retval);
282 case VEN_CORE_ACTIVATE_FEATURES:
283 CFI_INFO("VEN_CORE_ACTIVATE_FEATURES\n");
286 case VEN_CORE_READ_REGISTER:
287 CFI_INFO("VEN_CORE_READ_REGISTER\n");
288 /* wValue optionally contains the HI WORD of
289 * the register offset and wIndex contains
290 * the LOW WORD of the register offset
293 /* @TODO - MAS - fix the access to the base field */
295 /* regaddr = (uint32_t) pcd->otg_dev->os_dep.base; */
296 /* GET_CORE_IF(pcd)->co */
299 regaddr = (wValue << 16) | wIndex;
302 /* Read a 32-bit value of the memory at the regaddr */
303 regval = DWC_READ_REG32((uint32_t *) regaddr);
306 dwc_memcpy(cfi->buf_in.buf, ®val, sizeof(uint32_t));
307 ep->dwc_ep.is_in = 1;
308 ep->dwc_ep.dma_addr = cfi->buf_in.addr;
309 ep->dwc_ep.start_xfer_buff = cfi->buf_in.buf;
310 ep->dwc_ep.xfer_buff = cfi->buf_in.buf;
311 ep->dwc_ep.xfer_len = wLen;
312 ep->dwc_ep.xfer_count = 0;
313 ep->dwc_ep.sent_zlp = 0;
314 ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
316 pcd->ep0_pending = 1;
317 dwc_otg_ep0_start_transfer(coreif, &ep->dwc_ep);
318 cfi->need_gadget_att = 0;
322 case VEN_CORE_WRITE_REGISTER:
323 CFI_INFO("VEN_CORE_WRITE_REGISTER\n");
324 /* Set up an XFER to get the data stage of the control request,
325 * which is the new value of the register to be modified.
328 ep->dwc_ep.is_in = 0;
329 ep->dwc_ep.dma_addr = cfi->buf_out.addr;
330 ep->dwc_ep.start_xfer_buff = cfi->buf_out.buf;
331 ep->dwc_ep.xfer_buff = cfi->buf_out.buf;
332 ep->dwc_ep.xfer_len = wLen;
333 ep->dwc_ep.xfer_count = 0;
334 ep->dwc_ep.sent_zlp = 0;
335 ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
337 pcd->ep0_pending = 1;
338 /* Read the control write's data stage */
339 dwc_otg_ep0_start_transfer(coreif, &ep->dwc_ep);
344 retval = -DWC_E_NOT_SUPPORTED;
352 * This function prepares the core features descriptors and copies its
353 * raw representation into the buffer <buf>.
355 * The buffer structure is as follows:
356 * all_features_header (8 bytes)
357 * features_#1 (8 bytes + feature name string length)
358 * features_#2 (8 bytes + feature name string length)
360 * features_#n - where n=the total count of feature descriptors
362 static int cfi_core_features_buf(uint8_t *buf, uint16_t buflen)
364 cfi_feature_desc_header_t *prop_hdr = prop_descs;
365 cfi_feature_desc_header_t *prop;
366 cfi_all_features_header_t *all_props_hdr = &all_props_desc_header;
367 cfi_all_features_header_t *tmp;
368 uint8_t *tmpbuf = buf;
369 const uint8_t *pname = NULL;
370 int i, j, namelen = 0, totlen;
372 /* Prepare and copy the core features into the buffer */
373 CFI_INFO("%s:\n", __func__);
375 tmp = (cfi_all_features_header_t *) tmpbuf;
376 *tmp = *all_props_hdr;
377 tmpbuf += CFI_ALL_FEATURES_HDR_LEN;
379 j = sizeof(prop_descs) / sizeof(cfi_all_features_header_t);
380 for (i = 0; i < j; i++, prop_hdr++) {
381 pname = get_prop_name(prop_hdr->wFeatureID, &namelen);
382 prop = (cfi_feature_desc_header_t *) tmpbuf;
385 prop->bNameLen = namelen;
387 DWC_CONSTANT_CPU_TO_LE16(CFI_FEATURE_DESC_HDR_LEN +
390 tmpbuf += CFI_FEATURE_DESC_HDR_LEN;
391 dwc_memcpy(tmpbuf, pname, namelen);
395 totlen = tmpbuf - buf;
398 tmp = (cfi_all_features_header_t *) buf;
399 tmp->wTotalLen = DWC_CONSTANT_CPU_TO_LE16(totlen);
406 * This function releases all the dynamic memory in the CFI object.
408 static void cfi_release(cfiobject_t *cfiobj)
411 dwc_list_link_t *tmp;
413 CFI_INFO("%s\n", __func__);
415 if (cfiobj->buf_in.buf) {
416 DWC_DMA_FREE(CFI_IN_BUF_LEN, cfiobj->buf_in.buf,
417 cfiobj->buf_in.addr);
418 cfiobj->buf_in.buf = NULL;
421 if (cfiobj->buf_out.buf) {
422 DWC_DMA_FREE(CFI_OUT_BUF_LEN, cfiobj->buf_out.buf,
423 cfiobj->buf_out.addr);
424 cfiobj->buf_out.buf = NULL;
427 /* Free the Buffer Setup values for each EP */
428 /* list_for_each_entry(cfiep, &cfiobj->active_eps, lh) { */
429 DWC_LIST_FOREACH(tmp, &cfiobj->active_eps) {
430 cfiep = DWC_LIST_ENTRY(tmp, struct cfi_ep, lh);
431 cfi_free_ep_bs_dyn_data(cfiep);
436 * This function frees the dynamically allocated EP buffer setup data.
438 static void cfi_free_ep_bs_dyn_data(cfi_ep_t *cfiep)
441 DWC_FREE(cfiep->bm_sg);
445 if (cfiep->bm_align) {
446 DWC_FREE(cfiep->bm_align);
447 cfiep->bm_align = NULL;
450 if (cfiep->bm_concat) {
451 if (NULL != cfiep->bm_concat->wTxBytes) {
452 DWC_FREE(cfiep->bm_concat->wTxBytes);
453 cfiep->bm_concat->wTxBytes = NULL;
455 DWC_FREE(cfiep->bm_concat);
456 cfiep->bm_concat = NULL;
461 * This function initializes the default values of the features
462 * for a specific endpoint and should be called only once when
463 * the EP is enabled first time.
465 static int cfi_ep_init_defaults(struct dwc_otg_pcd *pcd, cfi_ep_t *cfiep)
469 cfiep->bm_sg = DWC_ALLOC(sizeof(ddma_sg_buffer_setup_t));
470 if (NULL == cfiep->bm_sg) {
471 CFI_INFO("Failed to allocate memory for SG feature value\n");
472 return -DWC_E_NO_MEMORY;
474 dwc_memset(cfiep->bm_sg, 0, sizeof(ddma_sg_buffer_setup_t));
476 /* For the Concatenation feature's default value we do not allocate
477 * memory for the wTxBytes field - it will be done in the
478 * set_feature_value request handler.
480 cfiep->bm_concat = DWC_ALLOC(sizeof(ddma_concat_buffer_setup_t));
481 if (NULL == cfiep->bm_concat) {
483 ("Failed to allocate memory for
484 CONCATENATION feature value\n");
485 DWC_FREE(cfiep->bm_sg);
486 return -DWC_E_NO_MEMORY;
488 dwc_memset(cfiep->bm_concat, 0, sizeof(ddma_concat_buffer_setup_t));
490 cfiep->bm_align = DWC_ALLOC(sizeof(ddma_align_buffer_setup_t));
491 if (NULL == cfiep->bm_align) {
493 ("Failed to allocate memory for Alignment feature value\n");
494 DWC_FREE(cfiep->bm_sg);
495 DWC_FREE(cfiep->bm_concat);
496 return -DWC_E_NO_MEMORY;
498 dwc_memset(cfiep->bm_align, 0, sizeof(ddma_align_buffer_setup_t));
504 * The callback function that notifies the CFI on the activation of
505 * an endpoint in the PCD. The following steps are done in this function:
507 * Create a dynamically allocated cfi_ep_t object (a CFI wrapper to the PCD's
509 * Create MAX_DMA_DESCS_PER_EP count DMA Descriptors for the EP
510 * Set the Buffer Mode to standard
511 * Initialize the default values for all EP modes (SG, Circular, Concat, Align)
512 * Add the cfi_ep_t object to the list of active endpoints in the CFI object
514 static int cfi_ep_enable(struct cfiobject *cfi, struct dwc_otg_pcd *pcd,
515 struct dwc_otg_pcd_ep *ep)
518 int retval = -DWC_E_NOT_SUPPORTED;
520 CFI_INFO("%s: epname=%s; epnum=0x%02x\n", __func__,
521 "EP_" /*ep->ep.name */ , ep->desc->bEndpointAddress);
522 /* MAS - Check whether this endpoint already is in the list */
523 cfiep = get_cfi_ep_by_pcd_ep(cfi, ep);
526 /* Allocate a cfi_ep_t object */
527 cfiep = DWC_ALLOC(sizeof(cfi_ep_t));
530 ("Unable to allocate memory for <cfiep> in function %s\n",
532 return -DWC_E_NO_MEMORY;
534 dwc_memset(cfiep, 0, sizeof(cfi_ep_t));
536 /* Save the dwc_otg_pcd_ep pointer in the cfiep object */
539 /* Allocate the DMA Descriptors chain of
540 * MAX_DMA_DESCS_PER_EP count */
542 DWC_DMA_ALLOC(MAX_DMA_DESCS_PER_EP *
543 sizeof(dwc_otg_dma_desc_t),
544 &ep->dwc_ep.descs_dma_addr);
546 if (NULL == ep->dwc_ep.descs) {
548 return -DWC_E_NO_MEMORY;
551 DWC_LIST_INIT(&cfiep->lh);
553 /* Set the buffer mode to BM_STANDARD. It will be modified
554 * when building descriptors for a specific buffer mode */
555 ep->dwc_ep.buff_mode = BM_STANDARD;
557 /* Create and initialize the default values
558 * for this EP's Buffer modes */
559 retval = cfi_ep_init_defaults(pcd, cfiep);
563 /* Add the cfi_ep_t object to the CFI object's
564 * list of active endpoints */
565 DWC_LIST_INSERT_TAIL(&cfi->active_eps, &cfiep->lh);
567 } else { /* The sought EP already is in the list */
568 CFI_INFO("%s: The sought EP already is in the list\n",
576 * This function is called when the data stage of a 3-stage Control Write request
580 static int cfi_ctrl_write_complete(struct cfiobject *cfi,
581 struct dwc_otg_pcd *pcd)
583 uint32_t addr, reg_value;
584 uint16_t wIndex, wValue;
586 uint8_t *buf = cfi->buf_out.buf;
587 /* struct usb_ctrlrequest *ctrl_req = &cfi->ctrl_req_saved; */
588 struct cfi_usb_ctrlrequest *ctrl_req = &cfi->ctrl_req;
589 int retval = -DWC_E_NOT_SUPPORTED;
591 CFI_INFO("%s\n", __func__);
593 bRequest = ctrl_req->bRequest;
594 wIndex = DWC_CONSTANT_CPU_TO_LE16(ctrl_req->wIndex);
595 wValue = DWC_CONSTANT_CPU_TO_LE16(ctrl_req->wValue);
598 * Save the pointer to the data stage in the ctrl_req's <data> field.
599 * The request should be already saved in the command stage by now.
601 ctrl_req->data = cfi->buf_out.buf;
602 cfi->need_status_in_complete = 0;
603 cfi->need_gadget_att = 0;
606 case VEN_CORE_WRITE_REGISTER:
607 /* The buffer contains raw data of
608 * the new value for the register */
609 reg_value = *((uint32_t *) buf);
612 /* addr = (uint32_t) pcd->otg_dev->os_dep.base; */
615 addr = (wValue << 16) | wIndex;
618 /* writel(reg_value, addr); */
621 cfi->need_status_in_complete = 1;
624 case VEN_CORE_SET_FEATURE:
625 /* The buffer contains raw data of
626 * the new value of the feature */
627 retval = cfi_set_feature_value(pcd);
631 cfi->need_status_in_complete = 1;
642 * This function builds the DMA descriptors for the SG buffer mode.
644 static void cfi_build_sg_descs(struct cfiobject *cfi, cfi_ep_t *cfiep,
645 dwc_otg_pcd_request_t *req)
647 struct dwc_otg_pcd_ep *ep = cfiep->ep;
648 ddma_sg_buffer_setup_t *sgval = cfiep->bm_sg;
649 struct dwc_otg_dma_desc *desc = cfiep->ep->dwc_ep.descs;
650 struct dwc_otg_dma_desc *desc_last = cfiep->ep->dwc_ep.descs;
651 dma_addr_t buff_addr = req->dma;
653 uint32_t txsize, off;
655 txsize = sgval->wSize;
656 off = sgval->bOffset;
658 /* CFI_INFO("%s: %s TXSIZE=0x%08x; OFFSET=0x%08x\n",
659 __func__, cfiep->ep->ep.name, txsize, off);*/
661 for (i = 0; i < sgval->bCount; i++) {
662 desc->status.b.bs = BS_HOST_BUSY;
663 desc->buf = buff_addr;
664 desc->status.b.l = 0;
665 desc->status.b.ioc = 0;
666 desc->status.b.sp = 0;
667 desc->status.b.bytes = txsize;
668 desc->status.b.bs = BS_HOST_READY;
670 /* Set the next address of the buffer */
671 buff_addr += txsize + off;
676 /* Set the last, ioc and sp bits on the Last DMA Descriptor */
677 desc_last->status.b.l = 1;
678 desc_last->status.b.ioc = 1;
679 desc_last->status.b.sp = ep->dwc_ep.sent_zlp;
680 /* Save the last DMA descriptor pointer */
681 cfiep->dma_desc_last = desc_last;
682 cfiep->desc_count = sgval->bCount;
686 * This function builds the DMA descriptors for the Concatenation buffer mode.
688 static void cfi_build_concat_descs(struct cfiobject *cfi, cfi_ep_t *cfiep,
689 dwc_otg_pcd_request_t *req)
691 struct dwc_otg_pcd_ep *ep = cfiep->ep;
692 ddma_concat_buffer_setup_t *concatval = cfiep->bm_concat;
693 struct dwc_otg_dma_desc *desc = cfiep->ep->dwc_ep.descs;
694 struct dwc_otg_dma_desc *desc_last = cfiep->ep->dwc_ep.descs;
695 dma_addr_t buff_addr = req->dma;
699 txsize = concatval->wTxBytes;
701 for (i = 0; i < concatval->hdr.bDescCount; i++) {
702 desc->buf = buff_addr;
703 desc->status.b.bs = BS_HOST_BUSY;
704 desc->status.b.l = 0;
705 desc->status.b.ioc = 0;
706 desc->status.b.sp = 0;
707 desc->status.b.bytes = *txsize;
708 desc->status.b.bs = BS_HOST_READY;
711 /* Set the next address of the buffer */
712 buff_addr += UGETW(ep->desc->wMaxPacketSize);
717 /* Set the last, ioc and sp bits on the Last DMA Descriptor */
718 desc_last->status.b.l = 1;
719 desc_last->status.b.ioc = 1;
720 desc_last->status.b.sp = ep->dwc_ep.sent_zlp;
721 cfiep->dma_desc_last = desc_last;
722 cfiep->desc_count = concatval->hdr.bDescCount;
726 * This function builds the DMA descriptors for the Circular buffer mode
728 static void cfi_build_circ_descs(struct cfiobject *cfi, cfi_ep_t *cfiep,
729 dwc_otg_pcd_request_t *req)
731 /* @todo: MAS - add implementation when
732 * this feature needs to be tested */
736 * This function builds the DMA descriptors for the Alignment buffer mode
738 static void cfi_build_align_descs(struct cfiobject *cfi, cfi_ep_t *cfiep,
739 dwc_otg_pcd_request_t *req)
741 struct dwc_otg_pcd_ep *ep = cfiep->ep;
742 ddma_align_buffer_setup_t *alignval = cfiep->bm_align;
743 struct dwc_otg_dma_desc *desc = cfiep->ep->dwc_ep.descs;
744 dma_addr_t buff_addr = req->dma;
746 desc->status.b.bs = BS_HOST_BUSY;
747 desc->status.b.l = 1;
748 desc->status.b.ioc = 1;
749 desc->status.b.sp = ep->dwc_ep.sent_zlp;
750 desc->status.b.bytes = req->length;
751 /* Adjust the buffer alignment */
752 desc->buf = (buff_addr + alignval->bAlign);
753 desc->status.b.bs = BS_HOST_READY;
754 cfiep->dma_desc_last = desc;
755 cfiep->desc_count = 1;
759 * This function builds the DMA descriptors chain for different modes of the
760 * buffer setup of an endpoint.
762 static void cfi_build_descriptors(struct cfiobject *cfi,
763 struct dwc_otg_pcd *pcd,
764 struct dwc_otg_pcd_ep *ep,
765 dwc_otg_pcd_request_t *req)
769 /* Get the cfiep by the dwc_otg_pcd_ep */
770 cfiep = get_cfi_ep_by_pcd_ep(cfi, ep);
772 CFI_INFO("%s: Unable to find a matching active endpoint\n",
777 cfiep->xfer_len = req->length;
779 /* Iterate through all the DMA descriptors */
780 switch (cfiep->ep->dwc_ep.buff_mode) {
782 cfi_build_sg_descs(cfi, cfiep, req);
786 cfi_build_concat_descs(cfi, cfiep, req);
790 cfi_build_circ_descs(cfi, cfiep, req);
794 cfi_build_align_descs(cfi, cfiep, req);
803 * Allocate DMA buffer for different Buffer modes.
805 static void *cfi_ep_alloc_buf(struct cfiobject *cfi, struct dwc_otg_pcd *pcd,
806 struct dwc_otg_pcd_ep *ep, dma_addr_t *dma,
807 unsigned size, gfp_t flags)
809 return DWC_DMA_ALLOC(size, dma);
813 * This function initializes the CFI object.
815 int init_cfi(cfiobject_t *cfiobj)
817 CFI_INFO("%s\n", __func__);
819 /* Allocate a buffer for IN XFERs */
821 DWC_DMA_ALLOC(CFI_IN_BUF_LEN, &cfiobj->buf_in.addr);
822 if (NULL == cfiobj->buf_in.buf) {
823 CFI_INFO("Unable to allocate buffer for INs\n");
824 return -DWC_E_NO_MEMORY;
827 /* Allocate a buffer for OUT XFERs */
828 cfiobj->buf_out.buf =
829 DWC_DMA_ALLOC(CFI_OUT_BUF_LEN, &cfiobj->buf_out.addr);
830 if (NULL == cfiobj->buf_out.buf) {
831 CFI_INFO("Unable to allocate buffer for OUT\n");
832 return -DWC_E_NO_MEMORY;
835 /* Initialize the callback function pointers */
836 cfiobj->ops.release = cfi_release;
837 cfiobj->ops.ep_enable = cfi_ep_enable;
838 cfiobj->ops.ctrl_write_complete = cfi_ctrl_write_complete;
839 cfiobj->ops.build_descriptors = cfi_build_descriptors;
840 cfiobj->ops.ep_alloc_buf = cfi_ep_alloc_buf;
842 /* Initialize the list of active endpoints in the CFI object */
843 DWC_LIST_INIT(&cfiobj->active_eps);
849 * This function reads the required feature's current value into the buffer
851 * @retval: Returns negative as error, or the data length of the feature
853 static int cfi_get_feature_value(uint8_t *buf, uint16_t buflen,
854 struct dwc_otg_pcd *pcd,
855 struct cfi_usb_ctrlrequest *ctrl_req)
857 int retval = -DWC_E_NOT_SUPPORTED;
858 struct dwc_otg_core_if *coreif = GET_CORE_IF(pcd);
859 uint16_t dfifo, rxfifo, txfifo;
861 switch (ctrl_req->wIndex) {
862 /* Whether the DDMA is enabled or not */
864 *buf = (coreif->dma_enable && coreif->dma_desc_enable) ? 1 : 0;
868 case FT_ID_DMA_BUFFER_SETUP:
869 retval = cfi_ep_get_sg_val(buf, pcd, ctrl_req);
872 case FT_ID_DMA_BUFF_ALIGN:
873 retval = cfi_ep_get_align_val(buf, pcd, ctrl_req);
876 case FT_ID_DMA_CONCAT_SETUP:
877 retval = cfi_ep_get_concat_val(buf, pcd, ctrl_req);
880 case FT_ID_DMA_CIRCULAR:
881 CFI_INFO("GetFeature value (FT_ID_DMA_CIRCULAR)\n");
884 case FT_ID_THRESHOLD_SETUP:
885 CFI_INFO("GetFeature value (FT_ID_THRESHOLD_SETUP)\n");
888 case FT_ID_DFIFO_DEPTH:
889 dfifo = get_dfifo_size(coreif);
890 *((uint16_t *) buf) = dfifo;
891 retval = sizeof(uint16_t);
894 case FT_ID_TX_FIFO_DEPTH:
895 retval = get_txfifo_size(pcd, ctrl_req->wValue);
898 *((uint16_t *) buf) = txfifo;
899 retval = sizeof(uint16_t);
903 case FT_ID_RX_FIFO_DEPTH:
904 retval = get_rxfifo_size(coreif, ctrl_req->wValue);
907 *((uint16_t *) buf) = rxfifo;
908 retval = sizeof(uint16_t);
917 * This function resets the SG for the specified EP to its default value
919 static int cfi_reset_sg_val(cfi_ep_t *cfiep)
921 dwc_memset(cfiep->bm_sg, 0, sizeof(ddma_sg_buffer_setup_t));
926 * This function resets the Alignment for the specified EP to its default value
928 static int cfi_reset_align_val(cfi_ep_t *cfiep)
930 dwc_memset(cfiep->bm_sg, 0, sizeof(ddma_sg_buffer_setup_t));
935 * This function resets the Concatenation for the specified EP to its default value
936 * This function will also set the value of the wTxBytes field to NULL after
937 * freeing the memory previously allocated for this field.
939 static int cfi_reset_concat_val(cfi_ep_t *cfiep)
941 /* First we need to free the wTxBytes field */
942 if (cfiep->bm_concat->wTxBytes) {
943 DWC_FREE(cfiep->bm_concat->wTxBytes);
944 cfiep->bm_concat->wTxBytes = NULL;
947 dwc_memset(cfiep->bm_concat, 0, sizeof(ddma_concat_buffer_setup_t));
952 * This function resets all the buffer setups of the specified endpoint
954 static int cfi_ep_reset_all_setup_vals(cfi_ep_t *cfiep)
956 cfi_reset_sg_val(cfiep);
957 cfi_reset_align_val(cfiep);
958 cfi_reset_concat_val(cfiep);
962 static int cfi_handle_reset_fifo_val(struct dwc_otg_pcd *pcd, uint8_t ep_addr,
963 uint8_t rx_rst, uint8_t tx_rst)
965 int retval = -DWC_E_INVALID;
968 dwc_otg_pcd_ep_t *ep = NULL;
969 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
970 dwc_otg_core_params_t *params = GET_CORE_IF(pcd)->core_params;
973 rx_siz = params->dev_rx_fifo_size;
974 params->dev_rx_fifo_size = GET_CORE_IF(pcd)->init_rxfsiz;
981 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
983 core_if->core_params->dev_tx_fifo_size[i];
984 core_if->core_params->dev_tx_fifo_size[i] =
985 core_if->init_txfsiz[i];
989 ep = get_ep_by_addr(pcd, ep_addr);
993 ("%s: Unable to get the endpoint addr=0x%02x\n",
995 return -DWC_E_INVALID;
999 params->dev_tx_fifo_size[ep->dwc_ep.tx_fifo_num -
1001 params->dev_tx_fifo_size[ep->dwc_ep.tx_fifo_num - 1] =
1002 GET_CORE_IF(pcd)->init_txfsiz[ep->dwc_ep.
1007 if (resize_fifos(GET_CORE_IF(pcd))) {
1011 ("%s: Error resetting the feature Reset All(FIFO size)\n",
1014 params->dev_rx_fifo_size = rx_siz;
1019 for (i = 0; i < core_if->hwcfg4.b.num_in_eps;
1021 core_if->core_params->
1022 dev_tx_fifo_size[i] = tx_siz[i];
1025 params->dev_tx_fifo_size[ep->dwc_ep.
1030 retval = -DWC_E_INVALID;
1035 static int cfi_handle_reset_all(struct dwc_otg_pcd *pcd, uint8_t addr)
1039 cfiobject_t *cfi = pcd->cfi;
1040 dwc_list_link_t *tmp;
1042 retval = cfi_handle_reset_fifo_val(pcd, addr, 1, 1);
1046 /* If the EP address is known then
1047 * reset the features for only that EP */
1049 cfiep = get_cfi_ep_by_addr(pcd->cfi, addr);
1050 if (NULL == cfiep) {
1051 CFI_INFO("%s: Error getting the EP address 0x%02x\n",
1053 return -DWC_E_INVALID;
1055 retval = cfi_ep_reset_all_setup_vals(cfiep);
1056 cfiep->ep->dwc_ep.buff_mode = BM_STANDARD;
1058 /* Otherwise (wValue == 0), reset all features of all EP's */
1060 /* Traverse all the active EP's and
1061 * reset the feature(s) value(s) */
1062 /* list_for_each_entry(cfiep, &cfi->active_eps, lh) { */
1063 DWC_LIST_FOREACH(tmp, &cfi->active_eps) {
1064 cfiep = DWC_LIST_ENTRY(tmp, struct cfi_ep, lh);
1065 retval = cfi_ep_reset_all_setup_vals(cfiep);
1066 cfiep->ep->dwc_ep.buff_mode = BM_STANDARD;
1069 ("%s: Error resetting the feature Reset All\n",
1078 static int cfi_handle_reset_dma_buff_setup(struct dwc_otg_pcd *pcd,
1083 cfiobject_t *cfi = pcd->cfi;
1084 dwc_list_link_t *tmp;
1086 /* If the EP address is known then reset
1087 * the features for only that EP */
1090 cfiep = get_cfi_ep_by_addr(pcd->cfi, addr);
1091 if (NULL == cfiep) {
1092 CFI_INFO("%s: Error getting the EP address 0x%02x\n",
1094 return -DWC_E_INVALID;
1096 retval = cfi_reset_sg_val(cfiep);
1098 /* Otherwise (wValue == 0), reset all features of all EP's */
1100 /* Traverse all the active EP's and
1101 * reset the feature(s) value(s) */
1102 /* list_for_each_entry(cfiep, &cfi->active_eps, lh) { */
1103 DWC_LIST_FOREACH(tmp, &cfi->active_eps) {
1104 cfiep = DWC_LIST_ENTRY(tmp, struct cfi_ep, lh);
1105 retval = cfi_reset_sg_val(cfiep);
1108 ("%s: Error resetting the feature Buffer Setup\n",
1117 static int cfi_handle_reset_concat_val(struct dwc_otg_pcd *pcd, uint8_t addr)
1121 cfiobject_t *cfi = pcd->cfi;
1122 dwc_list_link_t *tmp;
1124 /* If the EP address is known then
1125 * reset the features for only that EP */
1127 cfiep = get_cfi_ep_by_addr(pcd->cfi, addr);
1128 if (NULL == cfiep) {
1129 CFI_INFO("%s: Error getting the EP address 0x%02x\n",
1131 return -DWC_E_INVALID;
1133 retval = cfi_reset_concat_val(cfiep);
1135 /* Otherwise (wValue == 0), reset all features of all EP's */
1137 /* Traverse all the active EP's and
1138 * reset the feature(s) value(s) */
1139 /* list_for_each_entry(cfiep, &cfi->active_eps, lh) { */
1140 DWC_LIST_FOREACH(tmp, &cfi->active_eps) {
1141 cfiep = DWC_LIST_ENTRY(tmp, struct cfi_ep, lh);
1142 retval = cfi_reset_concat_val(cfiep);
1145 ("%s: Error resetting the feature Concatenation Value\n",
1154 static int cfi_handle_reset_align_val(struct dwc_otg_pcd *pcd, uint8_t addr)
1158 cfiobject_t *cfi = pcd->cfi;
1159 dwc_list_link_t *tmp;
1161 /* If the EP address is known then reset the features for only that EP */
1163 cfiep = get_cfi_ep_by_addr(pcd->cfi, addr);
1164 if (NULL == cfiep) {
1165 CFI_INFO("%s: Error getting the EP address 0x%02x\n",
1167 return -DWC_E_INVALID;
1169 retval = cfi_reset_align_val(cfiep);
1171 /* Otherwise (wValue == 0), reset all features of all EP's */
1173 /* Traverse all the active EP's and reset the feature(s) value(s) */
1174 /* list_for_each_entry(cfiep, &cfi->active_eps, lh) { */
1175 DWC_LIST_FOREACH(tmp, &cfi->active_eps) {
1176 cfiep = DWC_LIST_ENTRY(tmp, struct cfi_ep, lh);
1177 retval = cfi_reset_align_val(cfiep);
1180 ("%s: Error resetting the feature Aliignment Value\n",
1190 static int cfi_preproc_reset(struct dwc_otg_pcd *pcd,
1191 struct cfi_usb_ctrlrequest *req)
1195 switch (req->wIndex) {
1197 /* Reset all features */
1198 retval = cfi_handle_reset_all(pcd, req->wValue & 0xff);
1201 case FT_ID_DMA_BUFFER_SETUP:
1202 /* Reset the SG buffer setup */
1204 cfi_handle_reset_dma_buff_setup(pcd, req->wValue & 0xff);
1207 case FT_ID_DMA_CONCAT_SETUP:
1208 /* Reset the Concatenation buffer setup */
1209 retval = cfi_handle_reset_concat_val(pcd, req->wValue & 0xff);
1212 case FT_ID_DMA_BUFF_ALIGN:
1213 /* Reset the Alignment buffer setup */
1214 retval = cfi_handle_reset_align_val(pcd, req->wValue & 0xff);
1217 case FT_ID_TX_FIFO_DEPTH:
1219 cfi_handle_reset_fifo_val(pcd, req->wValue & 0xff, 0, 1);
1220 pcd->cfi->need_gadget_att = 0;
1223 case FT_ID_RX_FIFO_DEPTH:
1224 retval = cfi_handle_reset_fifo_val(pcd, 0, 1, 0);
1225 pcd->cfi->need_gadget_att = 0;
1234 * This function sets a new value for the SG buffer setup.
1236 static int cfi_ep_set_sg_val(uint8_t *buf, struct dwc_otg_pcd *pcd)
1238 uint8_t inaddr, outaddr;
1239 cfi_ep_t *epin, *epout;
1240 ddma_sg_buffer_setup_t *psgval;
1241 uint32_t desccount, size;
1243 CFI_INFO("%s\n", __func__);
1245 psgval = (ddma_sg_buffer_setup_t *) buf;
1246 desccount = (uint32_t) psgval->bCount;
1247 size = (uint32_t) psgval->wSize;
1249 /* Check the DMA descriptor count */
1250 if ((desccount > MAX_DMA_DESCS_PER_EP) || (desccount == 0)) {
1252 ("%s: The count of DMA Descriptors should be between 1 and %d\n",
1253 __func__, MAX_DMA_DESCS_PER_EP);
1254 return -DWC_E_INVALID;
1257 /* Check the DMA descriptor count */
1261 CFI_INFO("%s: The transfer size should be at least 1 byte\n",
1264 return -DWC_E_INVALID;
1268 inaddr = psgval->bInEndpointAddress;
1269 outaddr = psgval->bOutEndpointAddress;
1271 epin = get_cfi_ep_by_addr(pcd->cfi, inaddr);
1272 epout = get_cfi_ep_by_addr(pcd->cfi, outaddr);
1274 if (NULL == epin || NULL == epout) {
1276 ("%s: Unable to get the endpoints inaddr=0x%02x outaddr=0x%02x\n",
1277 __func__, inaddr, outaddr);
1278 return -DWC_E_INVALID;
1281 epin->ep->dwc_ep.buff_mode = BM_SG;
1282 dwc_memcpy(epin->bm_sg, psgval, sizeof(ddma_sg_buffer_setup_t));
1284 epout->ep->dwc_ep.buff_mode = BM_SG;
1285 dwc_memcpy(epout->bm_sg, psgval, sizeof(ddma_sg_buffer_setup_t));
1291 * This function sets a new value for the buffer Alignment setup.
1293 static int cfi_ep_set_alignment_val(uint8_t *buf, struct dwc_otg_pcd *pcd)
1297 ddma_align_buffer_setup_t *palignval;
1299 palignval = (ddma_align_buffer_setup_t *) buf;
1300 addr = palignval->bEndpointAddress;
1302 ep = get_cfi_ep_by_addr(pcd->cfi, addr);
1305 CFI_INFO("%s: Unable to get the endpoint addr=0x%02x\n",
1307 return -DWC_E_INVALID;
1310 ep->ep->dwc_ep.buff_mode = BM_ALIGN;
1311 dwc_memcpy(ep->bm_align, palignval, sizeof(ddma_align_buffer_setup_t));
1317 * This function sets a new value for the Concatenation buffer setup.
1319 static int cfi_ep_set_concat_val(uint8_t *buf, struct dwc_otg_pcd *pcd)
1323 struct _ddma_concat_buffer_setup_hdr *pConcatValHdr;
1329 pConcatValHdr = (struct _ddma_concat_buffer_setup_hdr *)buf;
1330 desccount = (uint32_t) pConcatValHdr->bDescCount;
1331 pVals = (uint16_t *) (buf + BS_CONCAT_VAL_HDR_LEN);
1333 /* Check the DMA descriptor count */
1334 if (desccount > MAX_DMA_DESCS_PER_EP) {
1335 CFI_INFO("%s: Maximum DMA Descriptor count should be %d\n",
1336 __func__, MAX_DMA_DESCS_PER_EP);
1337 return -DWC_E_INVALID;
1340 addr = pConcatValHdr->bEndpointAddress;
1341 ep = get_cfi_ep_by_addr(pcd->cfi, addr);
1343 CFI_INFO("%s: Unable to get the endpoint addr=0x%02x\n",
1345 return -DWC_E_INVALID;
1348 mps = UGETW(ep->ep->desc->wMaxPacketSize);
1351 for (i = 0; i < desccount; i++) {
1352 CFI_INFO("%s: wTxSize[%d]=0x%04x\n", __func__, i, pVals[i]);
1354 CFI_INFO("%s: epname=%s; mps=%d\n", __func__, ep->ep->ep.name, mps);
1357 /* Check the wTxSizes to be less than or equal to the mps */
1358 for (i = 0; i < desccount; i++) {
1359 if (pVals[i] > mps) {
1361 ("%s: ERROR - the wTxSize[%d] should be <= MPS (wTxSize=%d)\n",
1362 __func__, i, pVals[i]);
1363 return -DWC_E_INVALID;
1367 ep->ep->dwc_ep.buff_mode = BM_CONCAT;
1368 dwc_memcpy(ep->bm_concat, pConcatValHdr, BS_CONCAT_VAL_HDR_LEN);
1370 /* Free the previously allocated storage for the wTxBytes */
1371 if (ep->bm_concat->wTxBytes) {
1372 DWC_FREE(ep->bm_concat->wTxBytes);
1375 /* Allocate a new storage for the wTxBytes field */
1376 ep->bm_concat->wTxBytes =
1377 DWC_ALLOC(sizeof(uint16_t) * pConcatValHdr->bDescCount);
1378 if (NULL == ep->bm_concat->wTxBytes) {
1379 CFI_INFO("%s: Unable to allocate memory\n", __func__);
1380 return -DWC_E_NO_MEMORY;
1383 /* Copy the new values into the wTxBytes filed */
1384 dwc_memcpy(ep->bm_concat->wTxBytes, buf + BS_CONCAT_VAL_HDR_LEN,
1385 sizeof(uint16_t) * pConcatValHdr->bDescCount);
1391 * This function calculates the total of all FIFO sizes
1393 * @param core_if Programming view of DWC_otg controller
1395 * @return The total of data FIFO sizes.
1398 static uint16_t get_dfifo_size(dwc_otg_core_if_t *core_if)
1400 dwc_otg_core_params_t *params = core_if->core_params;
1401 uint16_t dfifo_total = 0;
1404 /* The shared RxFIFO size */
1406 params->dev_rx_fifo_size + params->dev_nperio_tx_fifo_size;
1408 /* Add up each TxFIFO size to the total */
1409 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++)
1410 dfifo_total += params->dev_tx_fifo_size[i];
1416 * This function returns Rx FIFO size
1418 * @param core_if Programming view of DWC_otg controller
1420 * @return The total of data FIFO sizes.
1423 static int32_t get_rxfifo_size(dwc_otg_core_if_t *core_if, uint16_t wValue)
1425 switch (wValue >> 8) {
1427 return (core_if->pwron_rxfsiz <
1428 32768) ? core_if->pwron_rxfsiz : 32768;
1431 return core_if->core_params->dev_rx_fifo_size;
1434 return -DWC_E_INVALID;
1440 * This function returns Tx FIFO size for IN EP
1442 * @param core_if Programming view of DWC_otg controller
1444 * @return The total of data FIFO sizes.
1447 static int32_t get_txfifo_size(struct dwc_otg_pcd *pcd, uint16_t wValue)
1449 dwc_otg_pcd_ep_t *ep;
1451 ep = get_ep_by_addr(pcd, wValue & 0xff);
1454 CFI_INFO("%s: Unable to get the endpoint addr=0x%02x\n",
1455 __func__, wValue & 0xff);
1456 return -DWC_E_INVALID;
1459 if (!ep->dwc_ep.is_in) {
1461 ("%s: No Tx FIFO assingned to the Out endpoint addr=0x%02x\n",
1462 __func__, wValue & 0xff);
1463 return -DWC_E_INVALID;
1466 switch (wValue >> 8) {
1468 return (GET_CORE_IF(pcd)->pwron_txfsiz
1469 [ep->dwc_ep.tx_fifo_num - 1] <
1470 768) ? GET_CORE_IF(pcd)->pwron_txfsiz[ep->dwc_ep.
1475 return GET_CORE_IF(pcd)->core_params->dev_tx_fifo_size[ep->
1480 return -DWC_E_INVALID;
1486 * This function checks if the submitted combination of
1487 * device mode FIFO sizes is possible or not.
1489 * @param core_if Programming view of DWC_otg controller
1491 * @return 1 if possible, 0 otherwise.
1494 static uint8_t check_fifo_sizes(dwc_otg_core_if_t *core_if)
1496 uint16_t dfifo_actual = 0;
1497 dwc_otg_core_params_t *params = core_if->core_params;
1498 uint16_t start_addr = 0;
1502 params->dev_rx_fifo_size + params->dev_nperio_tx_fifo_size;
1504 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++)
1505 dfifo_actual += params->dev_tx_fifo_size[i];
1507 if (dfifo_actual > core_if->total_fifo_size)
1510 if (params->dev_rx_fifo_size > 32768 || params->dev_rx_fifo_size < 16)
1513 if (params->dev_nperio_tx_fifo_size > 32768
1514 || params->dev_nperio_tx_fifo_size < 16)
1517 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
1519 if (params->dev_tx_fifo_size[i] > 768
1520 || params->dev_tx_fifo_size[i] < 4)
1524 if (params->dev_rx_fifo_size > core_if->pwron_rxfsiz)
1526 start_addr = params->dev_rx_fifo_size;
1528 if (params->dev_nperio_tx_fifo_size > core_if->pwron_gnptxfsiz)
1530 start_addr += params->dev_nperio_tx_fifo_size;
1532 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
1534 if (params->dev_tx_fifo_size[i] > core_if->pwron_txfsiz[i])
1536 start_addr += params->dev_tx_fifo_size[i];
1543 * This function resizes Device mode FIFOs
1545 * @param core_if Programming view of DWC_otg controller
1547 * @return 1 if successful, 0 otherwise
1550 static uint8_t resize_fifos(dwc_otg_core_if_t *core_if)
1553 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
1554 dwc_otg_core_params_t *params = core_if->core_params;
1555 uint32_t rx_fifo_size;
1556 fifosize_data_t nptxfifosize;
1557 fifosize_data_t txfifosize[15];
1559 uint32_t rx_fsz_bak;
1560 uint32_t nptxfsz_bak;
1561 uint32_t txfsz_bak[15];
1563 uint16_t start_address;
1566 if (!check_fifo_sizes(core_if))
1569 /* Configure data FIFO sizes */
1570 if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
1571 rx_fsz_bak = DWC_READ_REG32(&global_regs->grxfsiz);
1572 rx_fifo_size = params->dev_rx_fifo_size;
1573 DWC_WRITE_REG32(&global_regs->grxfsiz, rx_fifo_size);
1576 * Tx FIFOs These FIFOs are numbered from 1 to 15.
1577 * Indexes of the FIFO size module parameters in the
1578 * dev_tx_fifo_size array and the FIFO size registers in
1579 * the dtxfsiz array run from 0 to 14.
1582 /* Non-periodic Tx FIFO */
1583 nptxfsz_bak = DWC_READ_REG32(&global_regs->gnptxfsiz);
1584 nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
1585 start_address = params->dev_rx_fifo_size;
1586 nptxfifosize.b.startaddr = start_address;
1588 DWC_WRITE_REG32(&global_regs->gnptxfsiz, nptxfifosize.d32);
1590 start_address += nptxfifosize.b.depth;
1592 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
1593 txfsz_bak[i] = DWC_READ_REG32(&global_regs->dtxfsiz[i]);
1595 txfifosize[i].b.depth = params->dev_tx_fifo_size[i];
1596 txfifosize[i].b.startaddr = start_address;
1597 DWC_WRITE_REG32(&global_regs->dtxfsiz[i],
1600 start_address += txfifosize[i].b.depth;
1603 /** Check if register values are set correctly */
1604 if (rx_fifo_size != DWC_READ_REG32(&global_regs->grxfsiz))
1607 if (nptxfifosize.d32 != DWC_READ_REG32(&global_regs->gnptxfsiz))
1610 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
1611 if (txfifosize[i].d32 !=
1612 DWC_READ_REG32(&global_regs->dtxfsiz[i])) {
1617 /** If register values are not set correctly, reset old values */
1619 DWC_WRITE_REG32(&global_regs->grxfsiz, rx_fsz_bak);
1621 /* Non-periodic Tx FIFO */
1622 DWC_WRITE_REG32(&global_regs->gnptxfsiz, nptxfsz_bak);
1624 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
1625 DWC_WRITE_REG32(&global_regs->dtxfsiz[i],
1633 /* Flush the FIFOs */
1634 dwc_otg_flush_tx_fifo(core_if, 0x10); /* all Tx FIFOs */
1635 dwc_otg_flush_rx_fifo(core_if);
1641 * This function sets a new value for the buffer Alignment setup.
1643 static int cfi_ep_set_tx_fifo_val(uint8_t *buf, dwc_otg_pcd_t *pcd)
1649 dwc_otg_pcd_ep_t *ep;
1650 dwc_otg_core_params_t *params = GET_CORE_IF(pcd)->core_params;
1651 tx_fifo_size_setup_t *ptxfifoval;
1653 ptxfifoval = (tx_fifo_size_setup_t *) buf;
1654 ep_addr = ptxfifoval->bEndpointAddress;
1655 size = ptxfifoval->wDepth;
1657 ep = get_ep_by_addr(pcd, ep_addr);
1660 ("%s: Set Tx FIFO size: endpoint addr=0x%02x, depth=%d, FIFO Num=%d\n",
1661 __func__, ep_addr, size, ep->dwc_ep.tx_fifo_num);
1664 CFI_INFO("%s: Unable to get the endpoint addr=0x%02x\n",
1666 return -DWC_E_INVALID;
1669 fsiz = params->dev_tx_fifo_size[ep->dwc_ep.tx_fifo_num - 1];
1670 params->dev_tx_fifo_size[ep->dwc_ep.tx_fifo_num - 1] = size;
1672 if (resize_fifos(GET_CORE_IF(pcd))) {
1676 ("%s: Error setting the feature Tx FIFO Size for EP%d\n",
1678 params->dev_tx_fifo_size[ep->dwc_ep.tx_fifo_num - 1] = fsiz;
1679 retval = -DWC_E_INVALID;
1686 * This function sets a new value for the buffer Alignment setup.
1688 static int cfi_set_rx_fifo_val(uint8_t *buf, dwc_otg_pcd_t *pcd)
1693 dwc_otg_core_params_t *params = GET_CORE_IF(pcd)->core_params;
1694 rx_fifo_size_setup_t *prxfifoval;
1696 prxfifoval = (rx_fifo_size_setup_t *) buf;
1697 size = prxfifoval->wDepth;
1699 fsiz = params->dev_rx_fifo_size;
1700 params->dev_rx_fifo_size = size;
1702 if (resize_fifos(GET_CORE_IF(pcd))) {
1705 CFI_INFO("%s: Error setting the feature Rx FIFO Size\n",
1707 params->dev_rx_fifo_size = fsiz;
1708 retval = -DWC_E_INVALID;
1715 * This function reads the SG of an EP's buffer setup into the buffer buf
1717 static int cfi_ep_get_sg_val(uint8_t *buf, struct dwc_otg_pcd *pcd,
1718 struct cfi_usb_ctrlrequest *req)
1720 int retval = -DWC_E_INVALID;
1724 /* The Low Byte of the wValue contains
1725 * a non-zero address of the endpoint */
1726 addr = req->wValue & 0xFF;
1727 if (addr == 0) /* The address should be non-zero */
1730 ep = get_cfi_ep_by_addr(pcd->cfi, addr);
1732 CFI_INFO("%s: Unable to get the endpoint address(0x%02x)\n",
1737 dwc_memcpy(buf, ep->bm_sg, BS_SG_VAL_DESC_LEN);
1738 retval = BS_SG_VAL_DESC_LEN;
1743 * This function reads the Concatenation value of an EP's buffer mode into
1746 static int cfi_ep_get_concat_val(uint8_t *buf, struct dwc_otg_pcd *pcd,
1747 struct cfi_usb_ctrlrequest *req)
1749 int retval = -DWC_E_INVALID;
1754 /* The Low Byte of the wValue contains
1755 * a non-zero address of the endpoint */
1756 addr = req->wValue & 0xFF;
1757 if (addr == 0) /* The address should be non-zero */
1760 ep = get_cfi_ep_by_addr(pcd->cfi, addr);
1762 CFI_INFO("%s: Unable to get the endpoint address(0x%02x)\n",
1767 /* Copy the header to the buffer */
1768 dwc_memcpy(buf, ep->bm_concat, BS_CONCAT_VAL_HDR_LEN);
1769 /* Advance the buffer pointer by the header size */
1770 buf += BS_CONCAT_VAL_HDR_LEN;
1772 desc_count = ep->bm_concat->hdr.bDescCount;
1773 /* Copy alll the wTxBytes to the buffer */
1774 dwc_memcpy(buf, ep->bm_concat->wTxBytes, sizeof(uid16_t) * desc_count);
1776 retval = BS_CONCAT_VAL_HDR_LEN + sizeof(uid16_t) * desc_count;
1781 * This function reads the buffer Alignment value of an EP's buffer mode into
1784 * @return The total number of bytes copied to the buffer or negative error code.
1786 static int cfi_ep_get_align_val(uint8_t *buf, struct dwc_otg_pcd *pcd,
1787 struct cfi_usb_ctrlrequest *req)
1789 int retval = -DWC_E_INVALID;
1793 /* The Low Byte of the wValue contains a non-zero address of the endpoint */
1794 addr = req->wValue & 0xFF;
1795 if (addr == 0) /* The address should be non-zero */
1798 ep = get_cfi_ep_by_addr(pcd->cfi, addr);
1800 CFI_INFO("%s: Unable to get the endpoint address(0x%02x)\n",
1805 dwc_memcpy(buf, ep->bm_align, BS_ALIGN_VAL_HDR_LEN);
1806 retval = BS_ALIGN_VAL_HDR_LEN;
1812 * This function sets a new value for the specified feature
1814 * @param pcd A pointer to the PCD object
1816 * @return 0 if successful, negative error code otherwise to stall the DCE.
1818 static int cfi_set_feature_value(struct dwc_otg_pcd *pcd)
1820 int retval = -DWC_E_NOT_SUPPORTED;
1821 uint16_t wIndex, wValue;
1823 struct dwc_otg_core_if *coreif;
1824 cfiobject_t *cfi = pcd->cfi;
1825 struct cfi_usb_ctrlrequest *ctrl_req;
1827 ctrl_req = &cfi->ctrl_req;
1829 buf = pcd->cfi->ctrl_req.data;
1831 coreif = GET_CORE_IF(pcd);
1832 bRequest = ctrl_req->bRequest;
1833 wIndex = DWC_CONSTANT_CPU_TO_LE16(ctrl_req->wIndex);
1834 wValue = DWC_CONSTANT_CPU_TO_LE16(ctrl_req->wValue);
1836 /* See which feature is to be modified */
1838 case FT_ID_DMA_BUFFER_SETUP:
1839 /* Modify the feature */
1840 retval = cfi_ep_set_sg_val(buf, pcd);
1844 /* And send this request to the gadget */
1845 cfi->need_gadget_att = 1;
1848 case FT_ID_DMA_BUFF_ALIGN:
1849 retval = cfi_ep_set_alignment_val(buf, pcd);
1852 cfi->need_gadget_att = 1;
1855 case FT_ID_DMA_CONCAT_SETUP:
1856 /* Modify the feature */
1857 retval = cfi_ep_set_concat_val(buf, pcd);
1860 cfi->need_gadget_att = 1;
1863 case FT_ID_DMA_CIRCULAR:
1864 CFI_INFO("FT_ID_DMA_CIRCULAR\n");
1867 case FT_ID_THRESHOLD_SETUP:
1868 CFI_INFO("FT_ID_THRESHOLD_SETUP\n");
1871 case FT_ID_DFIFO_DEPTH:
1872 CFI_INFO("FT_ID_DFIFO_DEPTH\n");
1875 case FT_ID_TX_FIFO_DEPTH:
1876 CFI_INFO("FT_ID_TX_FIFO_DEPTH\n");
1877 retval = cfi_ep_set_tx_fifo_val(buf, pcd);
1880 cfi->need_gadget_att = 0;
1883 case FT_ID_RX_FIFO_DEPTH:
1884 CFI_INFO("FT_ID_RX_FIFO_DEPTH\n");
1885 retval = cfi_set_rx_fifo_val(buf, pcd);
1888 cfi->need_gadget_att = 0;
1895 #endif /* DWC_UTE_CFI */