1 /* ==========================================================================
2 * Synopsys HS OTG Linux Software Driver and documentation (hereinafter,
3 * "Software") is an Unsupported proprietary work of Synopsys, Inc. unless
4 * otherwise expressly agreed to in writing between Synopsys and you.
6 * The Software IS NOT an item of Licensed Software or Licensed Product under
7 * any End User Software License Agreement or Agreement for Licensed Product
8 * with Synopsys or any supplement thereto. You are permitted to use and
9 * redistribute this Software in source and binary forms, with or without
10 * modification, provided that redistributions of source code must retain this
11 * notice. You may not view, use, disclose, copy or distribute this file or
12 * any information contained herein except pursuant to this license grant from
13 * Synopsys. If you do not agree with this notice, including the disclaimer
14 * below, then you are not authorized to use the Software.
16 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS" BASIS
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS BE LIABLE FOR ANY DIRECT,
20 * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
21 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
22 * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
23 * CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
27 * ========================================================================== */
31 * This file contains the most of the CFI(Core Feature Interface)
32 * implementation for the OTG.
37 #include "dwc_otg_pcd.h"
38 #include "dwc_otg_cfi.h"
40 /** This definition should actually migrate to the Portability Library */
41 #define DWC_CONSTANT_CPU_TO_LE16(x) (x)
43 extern dwc_otg_pcd_ep_t *get_ep_by_addr(dwc_otg_pcd_t * pcd, u16 wIndex);
45 static int cfi_core_features_buf(uint8_t * buf, uint16_t buflen);
46 static int cfi_get_feature_value(uint8_t * buf, uint16_t buflen,
47 struct dwc_otg_pcd *pcd,
48 struct cfi_usb_ctrlrequest *ctrl_req);
49 static int cfi_set_feature_value(struct dwc_otg_pcd *pcd);
50 static int cfi_ep_get_sg_val(uint8_t * buf, struct dwc_otg_pcd *pcd,
51 struct cfi_usb_ctrlrequest *req);
52 static int cfi_ep_get_concat_val(uint8_t * buf, struct dwc_otg_pcd *pcd,
53 struct cfi_usb_ctrlrequest *req);
54 static int cfi_ep_get_align_val(uint8_t * buf, struct dwc_otg_pcd *pcd,
55 struct cfi_usb_ctrlrequest *req);
56 static int cfi_preproc_reset(struct dwc_otg_pcd *pcd,
57 struct cfi_usb_ctrlrequest *req);
58 static void cfi_free_ep_bs_dyn_data(cfi_ep_t * cfiep);
60 static uint16_t get_dfifo_size(dwc_otg_core_if_t * core_if);
61 static int32_t get_rxfifo_size(dwc_otg_core_if_t * core_if, uint16_t wValue);
62 static int32_t get_txfifo_size(struct dwc_otg_pcd *pcd, uint16_t wValue);
64 static uint8_t resize_fifos(dwc_otg_core_if_t * core_if);
66 /** This is the header of the all features descriptor */
67 static cfi_all_features_header_t all_props_desc_header = {
68 .wVersion = DWC_CONSTANT_CPU_TO_LE16(0x100),
69 .wCoreID = DWC_CONSTANT_CPU_TO_LE16(CFI_CORE_ID_OTG),
70 .wNumFeatures = DWC_CONSTANT_CPU_TO_LE16(9),
73 /** This is an array of statically allocated feature descriptors */
74 static cfi_feature_desc_header_t prop_descs[] = {
78 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_DMA_MODE),
79 .bmAttributes = CFI_FEATURE_ATTR_RW,
80 .wDataLength = DWC_CONSTANT_CPU_TO_LE16(1),
83 /* FT_ID_DMA_BUFFER_SETUP */
85 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_DMA_BUFFER_SETUP),
86 .bmAttributes = CFI_FEATURE_ATTR_RW,
87 .wDataLength = DWC_CONSTANT_CPU_TO_LE16(6),
90 /* FT_ID_DMA_BUFF_ALIGN */
92 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_DMA_BUFF_ALIGN),
93 .bmAttributes = CFI_FEATURE_ATTR_RW,
94 .wDataLength = DWC_CONSTANT_CPU_TO_LE16(2),
97 /* FT_ID_DMA_CONCAT_SETUP */
99 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_DMA_CONCAT_SETUP),
100 .bmAttributes = CFI_FEATURE_ATTR_RW,
101 //.wDataLength = DWC_CONSTANT_CPU_TO_LE16(6),
104 /* FT_ID_DMA_CIRCULAR */
106 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_DMA_CIRCULAR),
107 .bmAttributes = CFI_FEATURE_ATTR_RW,
108 .wDataLength = DWC_CONSTANT_CPU_TO_LE16(6),
111 /* FT_ID_THRESHOLD_SETUP */
113 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_THRESHOLD_SETUP),
114 .bmAttributes = CFI_FEATURE_ATTR_RW,
115 .wDataLength = DWC_CONSTANT_CPU_TO_LE16(6),
118 /* FT_ID_DFIFO_DEPTH */
120 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_DFIFO_DEPTH),
121 .bmAttributes = CFI_FEATURE_ATTR_RO,
122 .wDataLength = DWC_CONSTANT_CPU_TO_LE16(2),
125 /* FT_ID_TX_FIFO_DEPTH */
127 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_TX_FIFO_DEPTH),
128 .bmAttributes = CFI_FEATURE_ATTR_RW,
129 .wDataLength = DWC_CONSTANT_CPU_TO_LE16(2),
132 /* FT_ID_RX_FIFO_DEPTH */
134 .wFeatureID = DWC_CONSTANT_CPU_TO_LE16(FT_ID_RX_FIFO_DEPTH),
135 .bmAttributes = CFI_FEATURE_ATTR_RW,
136 .wDataLength = DWC_CONSTANT_CPU_TO_LE16(2),
140 /** The table of feature names */
141 cfi_string_t prop_name_table[] = {
142 {FT_ID_DMA_MODE, "dma_mode"},
143 {FT_ID_DMA_BUFFER_SETUP, "buffer_setup"},
144 {FT_ID_DMA_BUFF_ALIGN, "buffer_align"},
145 {FT_ID_DMA_CONCAT_SETUP, "concat_setup"},
146 {FT_ID_DMA_CIRCULAR, "buffer_circular"},
147 {FT_ID_THRESHOLD_SETUP, "threshold_setup"},
148 {FT_ID_DFIFO_DEPTH, "dfifo_depth"},
149 {FT_ID_TX_FIFO_DEPTH, "txfifo_depth"},
150 {FT_ID_RX_FIFO_DEPTH, "rxfifo_depth"},
154 /************************************************************************/
157 * Returns the name of the feature by its ID
158 * or NULL if no featute ID matches.
161 const uint8_t *get_prop_name(uint16_t prop_id, int *len)
166 for (pstr = prop_name_table; pstr && pstr->s; pstr++) {
167 if (pstr->id == prop_id) {
168 *len = DWC_STRLEN(pstr->s);
176 * This function handles all CFI specific control requests.
178 * Return a negative value to stall the DCE.
180 int cfi_setup(struct dwc_otg_pcd *pcd, struct cfi_usb_ctrlrequest *ctrl)
183 dwc_otg_pcd_ep_t *ep = NULL;
184 cfiobject_t *cfi = pcd->cfi;
185 struct dwc_otg_core_if *coreif = GET_CORE_IF(pcd);
186 uint16_t wLen = DWC_LE16_TO_CPU(&ctrl->wLength);
187 uint16_t wValue = DWC_LE16_TO_CPU(&ctrl->wValue);
188 uint16_t wIndex = DWC_LE16_TO_CPU(&ctrl->wIndex);
189 uint32_t regaddr = 0;
192 /* Save this Control Request in the CFI object.
193 * The data field will be assigned in the data stage completion CB function.
195 cfi->ctrl_req = *ctrl;
196 cfi->ctrl_req.data = NULL;
198 cfi->need_gadget_att = 0;
199 cfi->need_status_in_complete = 0;
201 switch (ctrl->bRequest) {
202 case VEN_CORE_GET_FEATURES:
203 retval = cfi_core_features_buf(cfi->buf_in.buf, CFI_IN_BUF_LEN);
205 //dump_msg(cfi->buf_in.buf, retval);
208 retval = min((uint16_t) retval, wLen);
209 /* Transfer this buffer to the host through the EP0-IN EP */
210 ep->dwc_ep.dma_addr = cfi->buf_in.addr;
211 ep->dwc_ep.start_xfer_buff = cfi->buf_in.buf;
212 ep->dwc_ep.xfer_buff = cfi->buf_in.buf;
213 ep->dwc_ep.xfer_len = retval;
214 ep->dwc_ep.xfer_count = 0;
215 ep->dwc_ep.sent_zlp = 0;
216 ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
218 pcd->ep0_pending = 1;
219 dwc_otg_ep0_start_transfer(coreif, &ep->dwc_ep);
224 case VEN_CORE_GET_FEATURE:
225 CFI_INFO("VEN_CORE_GET_FEATURE\n");
226 retval = cfi_get_feature_value(cfi->buf_in.buf, CFI_IN_BUF_LEN,
231 retval = min((uint16_t) retval, wLen);
232 /* Transfer this buffer to the host through the EP0-IN EP */
233 ep->dwc_ep.dma_addr = cfi->buf_in.addr;
234 ep->dwc_ep.start_xfer_buff = cfi->buf_in.buf;
235 ep->dwc_ep.xfer_buff = cfi->buf_in.buf;
236 ep->dwc_ep.xfer_len = retval;
237 ep->dwc_ep.xfer_count = 0;
238 ep->dwc_ep.sent_zlp = 0;
239 ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
241 pcd->ep0_pending = 1;
242 dwc_otg_ep0_start_transfer(coreif, &ep->dwc_ep);
244 CFI_INFO("VEN_CORE_GET_FEATURE=%d\n", retval);
245 dump_msg(cfi->buf_in.buf, retval);
248 case VEN_CORE_SET_FEATURE:
249 CFI_INFO("VEN_CORE_SET_FEATURE\n");
250 /* Set up an XFER to get the data stage of the control request,
251 * which is the new value of the feature to be modified.
254 ep->dwc_ep.is_in = 0;
255 ep->dwc_ep.dma_addr = cfi->buf_out.addr;
256 ep->dwc_ep.start_xfer_buff = cfi->buf_out.buf;
257 ep->dwc_ep.xfer_buff = cfi->buf_out.buf;
258 ep->dwc_ep.xfer_len = wLen;
259 ep->dwc_ep.xfer_count = 0;
260 ep->dwc_ep.sent_zlp = 0;
261 ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
263 pcd->ep0_pending = 1;
264 /* Read the control write's data stage */
265 dwc_otg_ep0_start_transfer(coreif, &ep->dwc_ep);
269 case VEN_CORE_RESET_FEATURES:
270 CFI_INFO("VEN_CORE_RESET_FEATURES\n");
271 cfi->need_gadget_att = 1;
272 cfi->need_status_in_complete = 1;
273 retval = cfi_preproc_reset(pcd, ctrl);
274 CFI_INFO("VEN_CORE_RESET_FEATURES = (%d)\n", retval);
277 case VEN_CORE_ACTIVATE_FEATURES:
278 CFI_INFO("VEN_CORE_ACTIVATE_FEATURES\n");
281 case VEN_CORE_READ_REGISTER:
282 CFI_INFO("VEN_CORE_READ_REGISTER\n");
283 /* wValue optionally contains the HI WORD of the register offset and
284 * wIndex contains the LOW WORD of the register offset
287 /* @TODO - MAS - fix the access to the base field */
289 //regaddr = (uint32_t) pcd->otg_dev->os_dep.base;
290 //GET_CORE_IF(pcd)->co
293 regaddr = (wValue << 16) | wIndex;
296 /* Read a 32-bit value of the memory at the regaddr */
297 regval = DWC_READ_REG32((uint32_t *) regaddr);
300 dwc_memcpy(cfi->buf_in.buf, ®val, sizeof(uint32_t));
301 ep->dwc_ep.is_in = 1;
302 ep->dwc_ep.dma_addr = cfi->buf_in.addr;
303 ep->dwc_ep.start_xfer_buff = cfi->buf_in.buf;
304 ep->dwc_ep.xfer_buff = cfi->buf_in.buf;
305 ep->dwc_ep.xfer_len = wLen;
306 ep->dwc_ep.xfer_count = 0;
307 ep->dwc_ep.sent_zlp = 0;
308 ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
310 pcd->ep0_pending = 1;
311 dwc_otg_ep0_start_transfer(coreif, &ep->dwc_ep);
312 cfi->need_gadget_att = 0;
316 case VEN_CORE_WRITE_REGISTER:
317 CFI_INFO("VEN_CORE_WRITE_REGISTER\n");
318 /* Set up an XFER to get the data stage of the control request,
319 * which is the new value of the register to be modified.
322 ep->dwc_ep.is_in = 0;
323 ep->dwc_ep.dma_addr = cfi->buf_out.addr;
324 ep->dwc_ep.start_xfer_buff = cfi->buf_out.buf;
325 ep->dwc_ep.xfer_buff = cfi->buf_out.buf;
326 ep->dwc_ep.xfer_len = wLen;
327 ep->dwc_ep.xfer_count = 0;
328 ep->dwc_ep.sent_zlp = 0;
329 ep->dwc_ep.total_len = ep->dwc_ep.xfer_len;
331 pcd->ep0_pending = 1;
332 /* Read the control write's data stage */
333 dwc_otg_ep0_start_transfer(coreif, &ep->dwc_ep);
338 retval = -DWC_E_NOT_SUPPORTED;
346 * This function prepares the core features descriptors and copies its
347 * raw representation into the buffer <buf>.
349 * The buffer structure is as follows:
350 * all_features_header (8 bytes)
351 * features_#1 (8 bytes + feature name string length)
352 * features_#2 (8 bytes + feature name string length)
354 * features_#n - where n=the total count of feature descriptors
356 static int cfi_core_features_buf(uint8_t * buf, uint16_t buflen)
358 cfi_feature_desc_header_t *prop_hdr = prop_descs;
359 cfi_feature_desc_header_t *prop;
360 cfi_all_features_header_t *all_props_hdr = &all_props_desc_header;
361 cfi_all_features_header_t *tmp;
362 uint8_t *tmpbuf = buf;
363 const uint8_t *pname = NULL;
364 int i, j, namelen = 0, totlen;
366 /* Prepare and copy the core features into the buffer */
367 CFI_INFO("%s:\n", __func__);
369 tmp = (cfi_all_features_header_t *) tmpbuf;
370 *tmp = *all_props_hdr;
371 tmpbuf += CFI_ALL_FEATURES_HDR_LEN;
373 j = sizeof(prop_descs) / sizeof(cfi_all_features_header_t);
374 for (i = 0; i < j; i++, prop_hdr++) {
375 pname = get_prop_name(prop_hdr->wFeatureID, &namelen);
376 prop = (cfi_feature_desc_header_t *) tmpbuf;
379 prop->bNameLen = namelen;
381 DWC_CONSTANT_CPU_TO_LE16(CFI_FEATURE_DESC_HDR_LEN +
384 tmpbuf += CFI_FEATURE_DESC_HDR_LEN;
385 dwc_memcpy(tmpbuf, pname, namelen);
389 totlen = tmpbuf - buf;
392 tmp = (cfi_all_features_header_t *) buf;
393 tmp->wTotalLen = DWC_CONSTANT_CPU_TO_LE16(totlen);
400 * This function releases all the dynamic memory in the CFI object.
402 static void cfi_release(cfiobject_t * cfiobj)
405 dwc_list_link_t *tmp;
407 CFI_INFO("%s\n", __func__);
409 if (cfiobj->buf_in.buf) {
410 DWC_DMA_FREE(CFI_IN_BUF_LEN, cfiobj->buf_in.buf,
411 cfiobj->buf_in.addr);
412 cfiobj->buf_in.buf = NULL;
415 if (cfiobj->buf_out.buf) {
416 DWC_DMA_FREE(CFI_OUT_BUF_LEN, cfiobj->buf_out.buf,
417 cfiobj->buf_out.addr);
418 cfiobj->buf_out.buf = NULL;
421 /* Free the Buffer Setup values for each EP */
422 //list_for_each_entry(cfiep, &cfiobj->active_eps, lh) {
423 DWC_LIST_FOREACH(tmp, &cfiobj->active_eps) {
424 cfiep = DWC_LIST_ENTRY(tmp, struct cfi_ep, lh);
425 cfi_free_ep_bs_dyn_data(cfiep);
430 * This function frees the dynamically allocated EP buffer setup data.
432 static void cfi_free_ep_bs_dyn_data(cfi_ep_t * cfiep)
435 DWC_FREE(cfiep->bm_sg);
439 if (cfiep->bm_align) {
440 DWC_FREE(cfiep->bm_align);
441 cfiep->bm_align = NULL;
444 if (cfiep->bm_concat) {
445 if (NULL != cfiep->bm_concat->wTxBytes) {
446 DWC_FREE(cfiep->bm_concat->wTxBytes);
447 cfiep->bm_concat->wTxBytes = NULL;
449 DWC_FREE(cfiep->bm_concat);
450 cfiep->bm_concat = NULL;
455 * This function initializes the default values of the features
456 * for a specific endpoint and should be called only once when
457 * the EP is enabled first time.
459 static int cfi_ep_init_defaults(struct dwc_otg_pcd *pcd, cfi_ep_t * cfiep)
463 cfiep->bm_sg = DWC_ALLOC(sizeof(ddma_sg_buffer_setup_t));
464 if (NULL == cfiep->bm_sg) {
465 CFI_INFO("Failed to allocate memory for SG feature value\n");
466 return -DWC_E_NO_MEMORY;
468 dwc_memset(cfiep->bm_sg, 0, sizeof(ddma_sg_buffer_setup_t));
470 /* For the Concatenation feature's default value we do not allocate
471 * memory for the wTxBytes field - it will be done in the set_feature_value
474 cfiep->bm_concat = DWC_ALLOC(sizeof(ddma_concat_buffer_setup_t));
475 if (NULL == cfiep->bm_concat) {
477 ("Failed to allocate memory for CONCATENATION feature value\n");
478 DWC_FREE(cfiep->bm_sg);
479 return -DWC_E_NO_MEMORY;
481 dwc_memset(cfiep->bm_concat, 0, sizeof(ddma_concat_buffer_setup_t));
483 cfiep->bm_align = DWC_ALLOC(sizeof(ddma_align_buffer_setup_t));
484 if (NULL == cfiep->bm_align) {
486 ("Failed to allocate memory for Alignment feature value\n");
487 DWC_FREE(cfiep->bm_sg);
488 DWC_FREE(cfiep->bm_concat);
489 return -DWC_E_NO_MEMORY;
491 dwc_memset(cfiep->bm_align, 0, sizeof(ddma_align_buffer_setup_t));
497 * The callback function that notifies the CFI on the activation of
498 * an endpoint in the PCD. The following steps are done in this function:
500 * Create a dynamically allocated cfi_ep_t object (a CFI wrapper to the PCD's
502 * Create MAX_DMA_DESCS_PER_EP count DMA Descriptors for the EP
503 * Set the Buffer Mode to standard
504 * Initialize the default values for all EP modes (SG, Circular, Concat, Align)
505 * Add the cfi_ep_t object to the list of active endpoints in the CFI object
507 static int cfi_ep_enable(struct cfiobject *cfi, struct dwc_otg_pcd *pcd,
508 struct dwc_otg_pcd_ep *ep)
511 int retval = -DWC_E_NOT_SUPPORTED;
513 CFI_INFO("%s: epname=%s; epnum=0x%02x\n", __func__,
514 "EP_" /*ep->ep.name */ , ep->desc->bEndpointAddress);
515 /* MAS - Check whether this endpoint already is in the list */
516 cfiep = get_cfi_ep_by_pcd_ep(cfi, ep);
519 /* Allocate a cfi_ep_t object */
520 cfiep = DWC_ALLOC(sizeof(cfi_ep_t));
523 ("Unable to allocate memory for <cfiep> in function %s\n",
525 return -DWC_E_NO_MEMORY;
527 dwc_memset(cfiep, 0, sizeof(cfi_ep_t));
529 /* Save the dwc_otg_pcd_ep pointer in the cfiep object */
532 /* Allocate the DMA Descriptors chain of MAX_DMA_DESCS_PER_EP count */
534 DWC_DMA_ALLOC(MAX_DMA_DESCS_PER_EP *
535 sizeof(dwc_otg_dma_desc_t),
536 &ep->dwc_ep.descs_dma_addr);
538 if (NULL == ep->dwc_ep.descs) {
540 return -DWC_E_NO_MEMORY;
543 DWC_LIST_INIT(&cfiep->lh);
545 /* Set the buffer mode to BM_STANDARD. It will be modified
546 * when building descriptors for a specific buffer mode */
547 ep->dwc_ep.buff_mode = BM_STANDARD;
549 /* Create and initialize the default values for this EP's Buffer modes */
550 if ((retval = cfi_ep_init_defaults(pcd, cfiep)) < 0)
553 /* Add the cfi_ep_t object to the CFI object's list of active endpoints */
554 DWC_LIST_INSERT_TAIL(&cfi->active_eps, &cfiep->lh);
556 } else { /* The sought EP already is in the list */
557 CFI_INFO("%s: The sought EP already is in the list\n",
565 * This function is called when the data stage of a 3-stage Control Write request
569 static int cfi_ctrl_write_complete(struct cfiobject *cfi,
570 struct dwc_otg_pcd *pcd)
572 uint32_t addr, reg_value;
573 uint16_t wIndex, wValue;
575 uint8_t *buf = cfi->buf_out.buf;
576 //struct usb_ctrlrequest *ctrl_req = &cfi->ctrl_req_saved;
577 struct cfi_usb_ctrlrequest *ctrl_req = &cfi->ctrl_req;
578 int retval = -DWC_E_NOT_SUPPORTED;
580 CFI_INFO("%s\n", __func__);
582 bRequest = ctrl_req->bRequest;
583 wIndex = DWC_CONSTANT_CPU_TO_LE16(ctrl_req->wIndex);
584 wValue = DWC_CONSTANT_CPU_TO_LE16(ctrl_req->wValue);
587 * Save the pointer to the data stage in the ctrl_req's <data> field.
588 * The request should be already saved in the command stage by now.
590 ctrl_req->data = cfi->buf_out.buf;
591 cfi->need_status_in_complete = 0;
592 cfi->need_gadget_att = 0;
595 case VEN_CORE_WRITE_REGISTER:
596 /* The buffer contains raw data of the new value for the register */
597 reg_value = *((uint32_t *) buf);
600 //addr = (uint32_t) pcd->otg_dev->os_dep.base;
603 addr = (wValue << 16) | wIndex;
606 //writel(reg_value, addr);
609 cfi->need_status_in_complete = 1;
612 case VEN_CORE_SET_FEATURE:
613 /* The buffer contains raw data of the new value of the feature */
614 retval = cfi_set_feature_value(pcd);
618 cfi->need_status_in_complete = 1;
629 * This function builds the DMA descriptors for the SG buffer mode.
631 static void cfi_build_sg_descs(struct cfiobject *cfi, cfi_ep_t * cfiep,
632 dwc_otg_pcd_request_t * req)
634 struct dwc_otg_pcd_ep *ep = cfiep->ep;
635 ddma_sg_buffer_setup_t *sgval = cfiep->bm_sg;
636 struct dwc_otg_dma_desc *desc = cfiep->ep->dwc_ep.descs;
637 struct dwc_otg_dma_desc *desc_last = cfiep->ep->dwc_ep.descs;
638 dma_addr_t buff_addr = req->dma;
640 uint32_t txsize, off;
642 txsize = sgval->wSize;
643 off = sgval->bOffset;
645 // CFI_INFO("%s: %s TXSIZE=0x%08x; OFFSET=0x%08x\n",
646 // __func__, cfiep->ep->ep.name, txsize, off);
648 for (i = 0; i < sgval->bCount; i++) {
649 desc->status.b.bs = BS_HOST_BUSY;
650 desc->buf = buff_addr;
651 desc->status.b.l = 0;
652 desc->status.b.ioc = 0;
653 desc->status.b.sp = 0;
654 desc->status.b.bytes = txsize;
655 desc->status.b.bs = BS_HOST_READY;
657 /* Set the next address of the buffer */
658 buff_addr += txsize + off;
663 /* Set the last, ioc and sp bits on the Last DMA Descriptor */
664 desc_last->status.b.l = 1;
665 desc_last->status.b.ioc = 1;
666 desc_last->status.b.sp = ep->dwc_ep.sent_zlp;
667 /* Save the last DMA descriptor pointer */
668 cfiep->dma_desc_last = desc_last;
669 cfiep->desc_count = sgval->bCount;
673 * This function builds the DMA descriptors for the Concatenation buffer mode.
675 static void cfi_build_concat_descs(struct cfiobject *cfi, cfi_ep_t * cfiep,
676 dwc_otg_pcd_request_t * req)
678 struct dwc_otg_pcd_ep *ep = cfiep->ep;
679 ddma_concat_buffer_setup_t *concatval = cfiep->bm_concat;
680 struct dwc_otg_dma_desc *desc = cfiep->ep->dwc_ep.descs;
681 struct dwc_otg_dma_desc *desc_last = cfiep->ep->dwc_ep.descs;
682 dma_addr_t buff_addr = req->dma;
686 txsize = concatval->wTxBytes;
688 for (i = 0; i < concatval->hdr.bDescCount; i++) {
689 desc->buf = buff_addr;
690 desc->status.b.bs = BS_HOST_BUSY;
691 desc->status.b.l = 0;
692 desc->status.b.ioc = 0;
693 desc->status.b.sp = 0;
694 desc->status.b.bytes = *txsize;
695 desc->status.b.bs = BS_HOST_READY;
698 /* Set the next address of the buffer */
699 buff_addr += UGETW(ep->desc->wMaxPacketSize);
704 /* Set the last, ioc and sp bits on the Last DMA Descriptor */
705 desc_last->status.b.l = 1;
706 desc_last->status.b.ioc = 1;
707 desc_last->status.b.sp = ep->dwc_ep.sent_zlp;
708 cfiep->dma_desc_last = desc_last;
709 cfiep->desc_count = concatval->hdr.bDescCount;
713 * This function builds the DMA descriptors for the Circular buffer mode
715 static void cfi_build_circ_descs(struct cfiobject *cfi, cfi_ep_t * cfiep,
716 dwc_otg_pcd_request_t * req)
718 /* @todo: MAS - add implementation when this feature needs to be tested */
722 * This function builds the DMA descriptors for the Alignment buffer mode
724 static void cfi_build_align_descs(struct cfiobject *cfi, cfi_ep_t * cfiep,
725 dwc_otg_pcd_request_t * req)
727 struct dwc_otg_pcd_ep *ep = cfiep->ep;
728 ddma_align_buffer_setup_t *alignval = cfiep->bm_align;
729 struct dwc_otg_dma_desc *desc = cfiep->ep->dwc_ep.descs;
730 dma_addr_t buff_addr = req->dma;
732 desc->status.b.bs = BS_HOST_BUSY;
733 desc->status.b.l = 1;
734 desc->status.b.ioc = 1;
735 desc->status.b.sp = ep->dwc_ep.sent_zlp;
736 desc->status.b.bytes = req->length;
737 /* Adjust the buffer alignment */
738 desc->buf = (buff_addr + alignval->bAlign);
739 desc->status.b.bs = BS_HOST_READY;
740 cfiep->dma_desc_last = desc;
741 cfiep->desc_count = 1;
745 * This function builds the DMA descriptors chain for different modes of the
746 * buffer setup of an endpoint.
748 static void cfi_build_descriptors(struct cfiobject *cfi,
749 struct dwc_otg_pcd *pcd,
750 struct dwc_otg_pcd_ep *ep,
751 dwc_otg_pcd_request_t * req)
755 /* Get the cfiep by the dwc_otg_pcd_ep */
756 cfiep = get_cfi_ep_by_pcd_ep(cfi, ep);
758 CFI_INFO("%s: Unable to find a matching active endpoint\n",
763 cfiep->xfer_len = req->length;
765 /* Iterate through all the DMA descriptors */
766 switch (cfiep->ep->dwc_ep.buff_mode) {
768 cfi_build_sg_descs(cfi, cfiep, req);
772 cfi_build_concat_descs(cfi, cfiep, req);
776 cfi_build_circ_descs(cfi, cfiep, req);
780 cfi_build_align_descs(cfi, cfiep, req);
789 * Allocate DMA buffer for different Buffer modes.
791 static void *cfi_ep_alloc_buf(struct cfiobject *cfi, struct dwc_otg_pcd *pcd,
792 struct dwc_otg_pcd_ep *ep, dma_addr_t * dma,
793 unsigned size, gfp_t flags)
795 return DWC_DMA_ALLOC(size, dma);
799 * This function initializes the CFI object.
801 int init_cfi(cfiobject_t * cfiobj)
803 CFI_INFO("%s\n", __func__);
805 /* Allocate a buffer for IN XFERs */
807 DWC_DMA_ALLOC(CFI_IN_BUF_LEN, &cfiobj->buf_in.addr);
808 if (NULL == cfiobj->buf_in.buf) {
809 CFI_INFO("Unable to allocate buffer for INs\n");
810 return -DWC_E_NO_MEMORY;
813 /* Allocate a buffer for OUT XFERs */
814 cfiobj->buf_out.buf =
815 DWC_DMA_ALLOC(CFI_OUT_BUF_LEN, &cfiobj->buf_out.addr);
816 if (NULL == cfiobj->buf_out.buf) {
817 CFI_INFO("Unable to allocate buffer for OUT\n");
818 return -DWC_E_NO_MEMORY;
821 /* Initialize the callback function pointers */
822 cfiobj->ops.release = cfi_release;
823 cfiobj->ops.ep_enable = cfi_ep_enable;
824 cfiobj->ops.ctrl_write_complete = cfi_ctrl_write_complete;
825 cfiobj->ops.build_descriptors = cfi_build_descriptors;
826 cfiobj->ops.ep_alloc_buf = cfi_ep_alloc_buf;
828 /* Initialize the list of active endpoints in the CFI object */
829 DWC_LIST_INIT(&cfiobj->active_eps);
835 * This function reads the required feature's current value into the buffer
837 * @retval: Returns negative as error, or the data length of the feature
839 static int cfi_get_feature_value(uint8_t * buf, uint16_t buflen,
840 struct dwc_otg_pcd *pcd,
841 struct cfi_usb_ctrlrequest *ctrl_req)
843 int retval = -DWC_E_NOT_SUPPORTED;
844 struct dwc_otg_core_if *coreif = GET_CORE_IF(pcd);
845 uint16_t dfifo, rxfifo, txfifo;
847 switch (ctrl_req->wIndex) {
848 /* Whether the DDMA is enabled or not */
850 *buf = (coreif->dma_enable && coreif->dma_desc_enable) ? 1 : 0;
854 case FT_ID_DMA_BUFFER_SETUP:
855 retval = cfi_ep_get_sg_val(buf, pcd, ctrl_req);
858 case FT_ID_DMA_BUFF_ALIGN:
859 retval = cfi_ep_get_align_val(buf, pcd, ctrl_req);
862 case FT_ID_DMA_CONCAT_SETUP:
863 retval = cfi_ep_get_concat_val(buf, pcd, ctrl_req);
866 case FT_ID_DMA_CIRCULAR:
867 CFI_INFO("GetFeature value (FT_ID_DMA_CIRCULAR)\n");
870 case FT_ID_THRESHOLD_SETUP:
871 CFI_INFO("GetFeature value (FT_ID_THRESHOLD_SETUP)\n");
874 case FT_ID_DFIFO_DEPTH:
875 dfifo = get_dfifo_size(coreif);
876 *((uint16_t *) buf) = dfifo;
877 retval = sizeof(uint16_t);
880 case FT_ID_TX_FIFO_DEPTH:
881 retval = get_txfifo_size(pcd, ctrl_req->wValue);
884 *((uint16_t *) buf) = txfifo;
885 retval = sizeof(uint16_t);
889 case FT_ID_RX_FIFO_DEPTH:
890 retval = get_rxfifo_size(coreif, ctrl_req->wValue);
893 *((uint16_t *) buf) = rxfifo;
894 retval = sizeof(uint16_t);
903 * This function resets the SG for the specified EP to its default value
905 static int cfi_reset_sg_val(cfi_ep_t * cfiep)
907 dwc_memset(cfiep->bm_sg, 0, sizeof(ddma_sg_buffer_setup_t));
912 * This function resets the Alignment for the specified EP to its default value
914 static int cfi_reset_align_val(cfi_ep_t * cfiep)
916 dwc_memset(cfiep->bm_sg, 0, sizeof(ddma_sg_buffer_setup_t));
921 * This function resets the Concatenation for the specified EP to its default value
922 * This function will also set the value of the wTxBytes field to NULL after
923 * freeing the memory previously allocated for this field.
925 static int cfi_reset_concat_val(cfi_ep_t * cfiep)
927 /* First we need to free the wTxBytes field */
928 if (cfiep->bm_concat->wTxBytes) {
929 DWC_FREE(cfiep->bm_concat->wTxBytes);
930 cfiep->bm_concat->wTxBytes = NULL;
933 dwc_memset(cfiep->bm_concat, 0, sizeof(ddma_concat_buffer_setup_t));
938 * This function resets all the buffer setups of the specified endpoint
940 static int cfi_ep_reset_all_setup_vals(cfi_ep_t * cfiep)
942 cfi_reset_sg_val(cfiep);
943 cfi_reset_align_val(cfiep);
944 cfi_reset_concat_val(cfiep);
948 static int cfi_handle_reset_fifo_val(struct dwc_otg_pcd *pcd, uint8_t ep_addr,
949 uint8_t rx_rst, uint8_t tx_rst)
951 int retval = -DWC_E_INVALID;
954 dwc_otg_pcd_ep_t *ep = NULL;
955 dwc_otg_core_if_t *core_if = GET_CORE_IF(pcd);
956 dwc_otg_core_params_t *params = GET_CORE_IF(pcd)->core_params;
959 rx_siz = params->dev_rx_fifo_size;
960 params->dev_rx_fifo_size = GET_CORE_IF(pcd)->init_rxfsiz;
967 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
969 core_if->core_params->dev_tx_fifo_size[i];
970 core_if->core_params->dev_tx_fifo_size[i] =
971 core_if->init_txfsiz[i];
975 ep = get_ep_by_addr(pcd, ep_addr);
979 ("%s: Unable to get the endpoint addr=0x%02x\n",
981 return -DWC_E_INVALID;
985 params->dev_tx_fifo_size[ep->dwc_ep.tx_fifo_num -
987 params->dev_tx_fifo_size[ep->dwc_ep.tx_fifo_num - 1] =
988 GET_CORE_IF(pcd)->init_txfsiz[ep->
994 if (resize_fifos(GET_CORE_IF(pcd))) {
998 ("%s: Error resetting the feature Reset All(FIFO size)\n",
1001 params->dev_rx_fifo_size = rx_siz;
1007 for (i = 0; i < core_if->hwcfg4.b.num_in_eps;
1010 core_params->dev_tx_fifo_size[i] =
1014 params->dev_tx_fifo_size[ep->
1015 dwc_ep.tx_fifo_num -
1019 retval = -DWC_E_INVALID;
1024 static int cfi_handle_reset_all(struct dwc_otg_pcd *pcd, uint8_t addr)
1028 cfiobject_t *cfi = pcd->cfi;
1029 dwc_list_link_t *tmp;
1031 retval = cfi_handle_reset_fifo_val(pcd, addr, 1, 1);
1036 /* If the EP address is known then reset the features for only that EP */
1038 cfiep = get_cfi_ep_by_addr(pcd->cfi, addr);
1039 if (NULL == cfiep) {
1040 CFI_INFO("%s: Error getting the EP address 0x%02x\n",
1042 return -DWC_E_INVALID;
1044 retval = cfi_ep_reset_all_setup_vals(cfiep);
1045 cfiep->ep->dwc_ep.buff_mode = BM_STANDARD;
1047 /* Otherwise (wValue == 0), reset all features of all EP's */
1049 /* Traverse all the active EP's and reset the feature(s) value(s) */
1050 //list_for_each_entry(cfiep, &cfi->active_eps, lh) {
1051 DWC_LIST_FOREACH(tmp, &cfi->active_eps) {
1052 cfiep = DWC_LIST_ENTRY(tmp, struct cfi_ep, lh);
1053 retval = cfi_ep_reset_all_setup_vals(cfiep);
1054 cfiep->ep->dwc_ep.buff_mode = BM_STANDARD;
1057 ("%s: Error resetting the feature Reset All\n",
1066 static int cfi_handle_reset_dma_buff_setup(struct dwc_otg_pcd *pcd,
1071 cfiobject_t *cfi = pcd->cfi;
1072 dwc_list_link_t *tmp;
1074 /* If the EP address is known then reset the features for only that EP */
1076 cfiep = get_cfi_ep_by_addr(pcd->cfi, addr);
1077 if (NULL == cfiep) {
1078 CFI_INFO("%s: Error getting the EP address 0x%02x\n",
1080 return -DWC_E_INVALID;
1082 retval = cfi_reset_sg_val(cfiep);
1084 /* Otherwise (wValue == 0), reset all features of all EP's */
1086 /* Traverse all the active EP's and reset the feature(s) value(s) */
1087 //list_for_each_entry(cfiep, &cfi->active_eps, lh) {
1088 DWC_LIST_FOREACH(tmp, &cfi->active_eps) {
1089 cfiep = DWC_LIST_ENTRY(tmp, struct cfi_ep, lh);
1090 retval = cfi_reset_sg_val(cfiep);
1093 ("%s: Error resetting the feature Buffer Setup\n",
1102 static int cfi_handle_reset_concat_val(struct dwc_otg_pcd *pcd, uint8_t addr)
1106 cfiobject_t *cfi = pcd->cfi;
1107 dwc_list_link_t *tmp;
1109 /* If the EP address is known then reset the features for only that EP */
1111 cfiep = get_cfi_ep_by_addr(pcd->cfi, addr);
1112 if (NULL == cfiep) {
1113 CFI_INFO("%s: Error getting the EP address 0x%02x\n",
1115 return -DWC_E_INVALID;
1117 retval = cfi_reset_concat_val(cfiep);
1119 /* Otherwise (wValue == 0), reset all features of all EP's */
1121 /* Traverse all the active EP's and reset the feature(s) value(s) */
1122 //list_for_each_entry(cfiep, &cfi->active_eps, lh) {
1123 DWC_LIST_FOREACH(tmp, &cfi->active_eps) {
1124 cfiep = DWC_LIST_ENTRY(tmp, struct cfi_ep, lh);
1125 retval = cfi_reset_concat_val(cfiep);
1128 ("%s: Error resetting the feature Concatenation Value\n",
1137 static int cfi_handle_reset_align_val(struct dwc_otg_pcd *pcd, uint8_t addr)
1141 cfiobject_t *cfi = pcd->cfi;
1142 dwc_list_link_t *tmp;
1144 /* If the EP address is known then reset the features for only that EP */
1146 cfiep = get_cfi_ep_by_addr(pcd->cfi, addr);
1147 if (NULL == cfiep) {
1148 CFI_INFO("%s: Error getting the EP address 0x%02x\n",
1150 return -DWC_E_INVALID;
1152 retval = cfi_reset_align_val(cfiep);
1154 /* Otherwise (wValue == 0), reset all features of all EP's */
1156 /* Traverse all the active EP's and reset the feature(s) value(s) */
1157 //list_for_each_entry(cfiep, &cfi->active_eps, lh) {
1158 DWC_LIST_FOREACH(tmp, &cfi->active_eps) {
1159 cfiep = DWC_LIST_ENTRY(tmp, struct cfi_ep, lh);
1160 retval = cfi_reset_align_val(cfiep);
1163 ("%s: Error resetting the feature Aliignment Value\n",
1173 static int cfi_preproc_reset(struct dwc_otg_pcd *pcd,
1174 struct cfi_usb_ctrlrequest *req)
1178 switch (req->wIndex) {
1180 /* Reset all features */
1181 retval = cfi_handle_reset_all(pcd, req->wValue & 0xff);
1184 case FT_ID_DMA_BUFFER_SETUP:
1185 /* Reset the SG buffer setup */
1187 cfi_handle_reset_dma_buff_setup(pcd, req->wValue & 0xff);
1190 case FT_ID_DMA_CONCAT_SETUP:
1191 /* Reset the Concatenation buffer setup */
1192 retval = cfi_handle_reset_concat_val(pcd, req->wValue & 0xff);
1195 case FT_ID_DMA_BUFF_ALIGN:
1196 /* Reset the Alignment buffer setup */
1197 retval = cfi_handle_reset_align_val(pcd, req->wValue & 0xff);
1200 case FT_ID_TX_FIFO_DEPTH:
1202 cfi_handle_reset_fifo_val(pcd, req->wValue & 0xff, 0, 1);
1203 pcd->cfi->need_gadget_att = 0;
1206 case FT_ID_RX_FIFO_DEPTH:
1207 retval = cfi_handle_reset_fifo_val(pcd, 0, 1, 0);
1208 pcd->cfi->need_gadget_att = 0;
1217 * This function sets a new value for the SG buffer setup.
1219 static int cfi_ep_set_sg_val(uint8_t * buf, struct dwc_otg_pcd *pcd)
1221 uint8_t inaddr, outaddr;
1222 cfi_ep_t *epin, *epout;
1223 ddma_sg_buffer_setup_t *psgval;
1224 uint32_t desccount, size;
1226 CFI_INFO("%s\n", __func__);
1228 psgval = (ddma_sg_buffer_setup_t *) buf;
1229 desccount = (uint32_t) psgval->bCount;
1230 size = (uint32_t) psgval->wSize;
1232 /* Check the DMA descriptor count */
1233 if ((desccount > MAX_DMA_DESCS_PER_EP) || (desccount == 0)) {
1235 ("%s: The count of DMA Descriptors should be between 1 and %d\n",
1236 __func__, MAX_DMA_DESCS_PER_EP);
1237 return -DWC_E_INVALID;
1240 /* Check the DMA descriptor count */
1244 CFI_INFO("%s: The transfer size should be at least 1 byte\n",
1247 return -DWC_E_INVALID;
1251 inaddr = psgval->bInEndpointAddress;
1252 outaddr = psgval->bOutEndpointAddress;
1254 epin = get_cfi_ep_by_addr(pcd->cfi, inaddr);
1255 epout = get_cfi_ep_by_addr(pcd->cfi, outaddr);
1257 if (NULL == epin || NULL == epout) {
1259 ("%s: Unable to get the endpoints inaddr=0x%02x outaddr=0x%02x\n",
1260 __func__, inaddr, outaddr);
1261 return -DWC_E_INVALID;
1264 epin->ep->dwc_ep.buff_mode = BM_SG;
1265 dwc_memcpy(epin->bm_sg, psgval, sizeof(ddma_sg_buffer_setup_t));
1267 epout->ep->dwc_ep.buff_mode = BM_SG;
1268 dwc_memcpy(epout->bm_sg, psgval, sizeof(ddma_sg_buffer_setup_t));
1274 * This function sets a new value for the buffer Alignment setup.
1276 static int cfi_ep_set_alignment_val(uint8_t * buf, struct dwc_otg_pcd *pcd)
1280 ddma_align_buffer_setup_t *palignval;
1282 palignval = (ddma_align_buffer_setup_t *) buf;
1283 addr = palignval->bEndpointAddress;
1285 ep = get_cfi_ep_by_addr(pcd->cfi, addr);
1288 CFI_INFO("%s: Unable to get the endpoint addr=0x%02x\n",
1290 return -DWC_E_INVALID;
1293 ep->ep->dwc_ep.buff_mode = BM_ALIGN;
1294 dwc_memcpy(ep->bm_align, palignval, sizeof(ddma_align_buffer_setup_t));
1300 * This function sets a new value for the Concatenation buffer setup.
1302 static int cfi_ep_set_concat_val(uint8_t * buf, struct dwc_otg_pcd *pcd)
1306 struct _ddma_concat_buffer_setup_hdr *pConcatValHdr;
1312 pConcatValHdr = (struct _ddma_concat_buffer_setup_hdr *)buf;
1313 desccount = (uint32_t) pConcatValHdr->bDescCount;
1314 pVals = (uint16_t *) (buf + BS_CONCAT_VAL_HDR_LEN);
1316 /* Check the DMA descriptor count */
1317 if (desccount > MAX_DMA_DESCS_PER_EP) {
1318 CFI_INFO("%s: Maximum DMA Descriptor count should be %d\n",
1319 __func__, MAX_DMA_DESCS_PER_EP);
1320 return -DWC_E_INVALID;
1323 addr = pConcatValHdr->bEndpointAddress;
1324 ep = get_cfi_ep_by_addr(pcd->cfi, addr);
1326 CFI_INFO("%s: Unable to get the endpoint addr=0x%02x\n",
1328 return -DWC_E_INVALID;
1331 mps = UGETW(ep->ep->desc->wMaxPacketSize);
1334 for (i = 0; i < desccount; i++) {
1335 CFI_INFO("%s: wTxSize[%d]=0x%04x\n", __func__, i, pVals[i]);
1337 CFI_INFO("%s: epname=%s; mps=%d\n", __func__, ep->ep->ep.name, mps);
1340 /* Check the wTxSizes to be less than or equal to the mps */
1341 for (i = 0; i < desccount; i++) {
1342 if (pVals[i] > mps) {
1344 ("%s: ERROR - the wTxSize[%d] should be <= MPS (wTxSize=%d)\n",
1345 __func__, i, pVals[i]);
1346 return -DWC_E_INVALID;
1350 ep->ep->dwc_ep.buff_mode = BM_CONCAT;
1351 dwc_memcpy(ep->bm_concat, pConcatValHdr, BS_CONCAT_VAL_HDR_LEN);
1353 /* Free the previously allocated storage for the wTxBytes */
1354 if (ep->bm_concat->wTxBytes) {
1355 DWC_FREE(ep->bm_concat->wTxBytes);
1358 /* Allocate a new storage for the wTxBytes field */
1359 ep->bm_concat->wTxBytes =
1360 DWC_ALLOC(sizeof(uint16_t) * pConcatValHdr->bDescCount);
1361 if (NULL == ep->bm_concat->wTxBytes) {
1362 CFI_INFO("%s: Unable to allocate memory\n", __func__);
1363 return -DWC_E_NO_MEMORY;
1366 /* Copy the new values into the wTxBytes filed */
1367 dwc_memcpy(ep->bm_concat->wTxBytes, buf + BS_CONCAT_VAL_HDR_LEN,
1368 sizeof(uint16_t) * pConcatValHdr->bDescCount);
1374 * This function calculates the total of all FIFO sizes
1376 * @param core_if Programming view of DWC_otg controller
1378 * @return The total of data FIFO sizes.
1381 static uint16_t get_dfifo_size(dwc_otg_core_if_t * core_if)
1383 dwc_otg_core_params_t *params = core_if->core_params;
1384 uint16_t dfifo_total = 0;
1387 /* The shared RxFIFO size */
1389 params->dev_rx_fifo_size + params->dev_nperio_tx_fifo_size;
1391 /* Add up each TxFIFO size to the total */
1392 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
1393 dfifo_total += params->dev_tx_fifo_size[i];
1400 * This function returns Rx FIFO size
1402 * @param core_if Programming view of DWC_otg controller
1404 * @return The total of data FIFO sizes.
1407 static int32_t get_rxfifo_size(dwc_otg_core_if_t * core_if, uint16_t wValue)
1409 switch (wValue >> 8) {
1411 return (core_if->pwron_rxfsiz <
1412 32768) ? core_if->pwron_rxfsiz : 32768;
1415 return core_if->core_params->dev_rx_fifo_size;
1418 return -DWC_E_INVALID;
1424 * This function returns Tx FIFO size for IN EP
1426 * @param core_if Programming view of DWC_otg controller
1428 * @return The total of data FIFO sizes.
1431 static int32_t get_txfifo_size(struct dwc_otg_pcd *pcd, uint16_t wValue)
1433 dwc_otg_pcd_ep_t *ep;
1435 ep = get_ep_by_addr(pcd, wValue & 0xff);
1438 CFI_INFO("%s: Unable to get the endpoint addr=0x%02x\n",
1439 __func__, wValue & 0xff);
1440 return -DWC_E_INVALID;
1443 if (!ep->dwc_ep.is_in) {
1445 ("%s: No Tx FIFO assingned to the Out endpoint addr=0x%02x\n",
1446 __func__, wValue & 0xff);
1447 return -DWC_E_INVALID;
1450 switch (wValue >> 8) {
1452 return (GET_CORE_IF(pcd)->pwron_txfsiz
1453 [ep->dwc_ep.tx_fifo_num - 1] <
1454 768) ? GET_CORE_IF(pcd)->pwron_txfsiz[ep->
1459 return GET_CORE_IF(pcd)->core_params->
1460 dev_tx_fifo_size[ep->dwc_ep.num - 1];
1463 return -DWC_E_INVALID;
1469 * This function checks if the submitted combination of
1470 * device mode FIFO sizes is possible or not.
1472 * @param core_if Programming view of DWC_otg controller
1474 * @return 1 if possible, 0 otherwise.
1477 static uint8_t check_fifo_sizes(dwc_otg_core_if_t * core_if)
1479 uint16_t dfifo_actual = 0;
1480 dwc_otg_core_params_t *params = core_if->core_params;
1481 uint16_t start_addr = 0;
1485 params->dev_rx_fifo_size + params->dev_nperio_tx_fifo_size;
1487 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
1488 dfifo_actual += params->dev_tx_fifo_size[i];
1491 if (dfifo_actual > core_if->total_fifo_size) {
1495 if (params->dev_rx_fifo_size > 32768 || params->dev_rx_fifo_size < 16)
1498 if (params->dev_nperio_tx_fifo_size > 32768
1499 || params->dev_nperio_tx_fifo_size < 16)
1502 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
1504 if (params->dev_tx_fifo_size[i] > 768
1505 || params->dev_tx_fifo_size[i] < 4)
1509 if (params->dev_rx_fifo_size > core_if->pwron_rxfsiz)
1511 start_addr = params->dev_rx_fifo_size;
1513 if (params->dev_nperio_tx_fifo_size > core_if->pwron_gnptxfsiz)
1515 start_addr += params->dev_nperio_tx_fifo_size;
1517 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
1519 if (params->dev_tx_fifo_size[i] > core_if->pwron_txfsiz[i])
1521 start_addr += params->dev_tx_fifo_size[i];
1528 * This function resizes Device mode FIFOs
1530 * @param core_if Programming view of DWC_otg controller
1532 * @return 1 if successful, 0 otherwise
1535 static uint8_t resize_fifos(dwc_otg_core_if_t * core_if)
1538 dwc_otg_core_global_regs_t *global_regs = core_if->core_global_regs;
1539 dwc_otg_core_params_t *params = core_if->core_params;
1540 uint32_t rx_fifo_size;
1541 fifosize_data_t nptxfifosize;
1542 fifosize_data_t txfifosize[15];
1544 uint32_t rx_fsz_bak;
1545 uint32_t nptxfsz_bak;
1546 uint32_t txfsz_bak[15];
1548 uint16_t start_address;
1551 if (!check_fifo_sizes(core_if)) {
1555 /* Configure data FIFO sizes */
1556 if (core_if->hwcfg2.b.dynamic_fifo && params->enable_dynamic_fifo) {
1557 rx_fsz_bak = DWC_READ_REG32(&global_regs->grxfsiz);
1558 rx_fifo_size = params->dev_rx_fifo_size;
1559 DWC_WRITE_REG32(&global_regs->grxfsiz, rx_fifo_size);
1562 * Tx FIFOs These FIFOs are numbered from 1 to 15.
1563 * Indexes of the FIFO size module parameters in the
1564 * dev_tx_fifo_size array and the FIFO size registers in
1565 * the dtxfsiz array run from 0 to 14.
1568 /* Non-periodic Tx FIFO */
1569 nptxfsz_bak = DWC_READ_REG32(&global_regs->gnptxfsiz);
1570 nptxfifosize.b.depth = params->dev_nperio_tx_fifo_size;
1571 start_address = params->dev_rx_fifo_size;
1572 nptxfifosize.b.startaddr = start_address;
1574 DWC_WRITE_REG32(&global_regs->gnptxfsiz, nptxfifosize.d32);
1576 start_address += nptxfifosize.b.depth;
1578 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
1579 txfsz_bak[i] = DWC_READ_REG32(&global_regs->dtxfsiz[i]);
1581 txfifosize[i].b.depth = params->dev_tx_fifo_size[i];
1582 txfifosize[i].b.startaddr = start_address;
1583 DWC_WRITE_REG32(&global_regs->dtxfsiz[i],
1586 start_address += txfifosize[i].b.depth;
1589 /** Check if register values are set correctly */
1590 if (rx_fifo_size != DWC_READ_REG32(&global_regs->grxfsiz)) {
1594 if (nptxfifosize.d32 != DWC_READ_REG32(&global_regs->gnptxfsiz)) {
1598 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
1599 if (txfifosize[i].d32 !=
1600 DWC_READ_REG32(&global_regs->dtxfsiz[i])) {
1605 /** If register values are not set correctly, reset old values */
1607 DWC_WRITE_REG32(&global_regs->grxfsiz, rx_fsz_bak);
1609 /* Non-periodic Tx FIFO */
1610 DWC_WRITE_REG32(&global_regs->gnptxfsiz, nptxfsz_bak);
1612 for (i = 0; i < core_if->hwcfg4.b.num_in_eps; i++) {
1613 DWC_WRITE_REG32(&global_regs->dtxfsiz[i],
1621 /* Flush the FIFOs */
1622 dwc_otg_flush_tx_fifo(core_if, 0x10); /* all Tx FIFOs */
1623 dwc_otg_flush_rx_fifo(core_if);
1629 * This function sets a new value for the buffer Alignment setup.
1631 static int cfi_ep_set_tx_fifo_val(uint8_t * buf, dwc_otg_pcd_t * pcd)
1637 dwc_otg_pcd_ep_t *ep;
1638 dwc_otg_core_params_t *params = GET_CORE_IF(pcd)->core_params;
1639 tx_fifo_size_setup_t *ptxfifoval;
1641 ptxfifoval = (tx_fifo_size_setup_t *) buf;
1642 ep_addr = ptxfifoval->bEndpointAddress;
1643 size = ptxfifoval->wDepth;
1645 ep = get_ep_by_addr(pcd, ep_addr);
1648 ("%s: Set Tx FIFO size: endpoint addr=0x%02x, depth=%d, FIFO Num=%d\n",
1649 __func__, ep_addr, size, ep->dwc_ep.tx_fifo_num);
1652 CFI_INFO("%s: Unable to get the endpoint addr=0x%02x\n",
1654 return -DWC_E_INVALID;
1657 fsiz = params->dev_tx_fifo_size[ep->dwc_ep.tx_fifo_num - 1];
1658 params->dev_tx_fifo_size[ep->dwc_ep.tx_fifo_num - 1] = size;
1660 if (resize_fifos(GET_CORE_IF(pcd))) {
1664 ("%s: Error setting the feature Tx FIFO Size for EP%d\n",
1666 params->dev_tx_fifo_size[ep->dwc_ep.tx_fifo_num - 1] = fsiz;
1667 retval = -DWC_E_INVALID;
1674 * This function sets a new value for the buffer Alignment setup.
1676 static int cfi_set_rx_fifo_val(uint8_t * buf, dwc_otg_pcd_t * pcd)
1681 dwc_otg_core_params_t *params = GET_CORE_IF(pcd)->core_params;
1682 rx_fifo_size_setup_t *prxfifoval;
1684 prxfifoval = (rx_fifo_size_setup_t *) buf;
1685 size = prxfifoval->wDepth;
1687 fsiz = params->dev_rx_fifo_size;
1688 params->dev_rx_fifo_size = size;
1690 if (resize_fifos(GET_CORE_IF(pcd))) {
1693 CFI_INFO("%s: Error setting the feature Rx FIFO Size\n",
1695 params->dev_rx_fifo_size = fsiz;
1696 retval = -DWC_E_INVALID;
1703 * This function reads the SG of an EP's buffer setup into the buffer buf
1705 static int cfi_ep_get_sg_val(uint8_t * buf, struct dwc_otg_pcd *pcd,
1706 struct cfi_usb_ctrlrequest *req)
1708 int retval = -DWC_E_INVALID;
1712 /* The Low Byte of the wValue contains a non-zero address of the endpoint */
1713 addr = req->wValue & 0xFF;
1714 if (addr == 0) /* The address should be non-zero */
1717 ep = get_cfi_ep_by_addr(pcd->cfi, addr);
1719 CFI_INFO("%s: Unable to get the endpoint address(0x%02x)\n",
1724 dwc_memcpy(buf, ep->bm_sg, BS_SG_VAL_DESC_LEN);
1725 retval = BS_SG_VAL_DESC_LEN;
1730 * This function reads the Concatenation value of an EP's buffer mode into
1733 static int cfi_ep_get_concat_val(uint8_t * buf, struct dwc_otg_pcd *pcd,
1734 struct cfi_usb_ctrlrequest *req)
1736 int retval = -DWC_E_INVALID;
1741 /* The Low Byte of the wValue contains a non-zero address of the endpoint */
1742 addr = req->wValue & 0xFF;
1743 if (addr == 0) /* The address should be non-zero */
1746 ep = get_cfi_ep_by_addr(pcd->cfi, addr);
1748 CFI_INFO("%s: Unable to get the endpoint address(0x%02x)\n",
1753 /* Copy the header to the buffer */
1754 dwc_memcpy(buf, ep->bm_concat, BS_CONCAT_VAL_HDR_LEN);
1755 /* Advance the buffer pointer by the header size */
1756 buf += BS_CONCAT_VAL_HDR_LEN;
1758 desc_count = ep->bm_concat->hdr.bDescCount;
1759 /* Copy alll the wTxBytes to the buffer */
1760 dwc_memcpy(buf, ep->bm_concat->wTxBytes, sizeof(uid16_t) * desc_count);
1762 retval = BS_CONCAT_VAL_HDR_LEN + sizeof(uid16_t) * desc_count;
1767 * This function reads the buffer Alignment value of an EP's buffer mode into
1770 * @return The total number of bytes copied to the buffer or negative error code.
1772 static int cfi_ep_get_align_val(uint8_t * buf, struct dwc_otg_pcd *pcd,
1773 struct cfi_usb_ctrlrequest *req)
1775 int retval = -DWC_E_INVALID;
1779 /* The Low Byte of the wValue contains a non-zero address of the endpoint */
1780 addr = req->wValue & 0xFF;
1781 if (addr == 0) /* The address should be non-zero */
1784 ep = get_cfi_ep_by_addr(pcd->cfi, addr);
1786 CFI_INFO("%s: Unable to get the endpoint address(0x%02x)\n",
1791 dwc_memcpy(buf, ep->bm_align, BS_ALIGN_VAL_HDR_LEN);
1792 retval = BS_ALIGN_VAL_HDR_LEN;
1798 * This function sets a new value for the specified feature
1800 * @param pcd A pointer to the PCD object
1802 * @return 0 if successful, negative error code otherwise to stall the DCE.
1804 static int cfi_set_feature_value(struct dwc_otg_pcd *pcd)
1806 int retval = -DWC_E_NOT_SUPPORTED;
1807 uint16_t wIndex, wValue;
1809 struct dwc_otg_core_if *coreif;
1810 cfiobject_t *cfi = pcd->cfi;
1811 struct cfi_usb_ctrlrequest *ctrl_req;
1813 ctrl_req = &cfi->ctrl_req;
1815 buf = pcd->cfi->ctrl_req.data;
1817 coreif = GET_CORE_IF(pcd);
1818 bRequest = ctrl_req->bRequest;
1819 wIndex = DWC_CONSTANT_CPU_TO_LE16(ctrl_req->wIndex);
1820 wValue = DWC_CONSTANT_CPU_TO_LE16(ctrl_req->wValue);
1822 /* See which feature is to be modified */
1824 case FT_ID_DMA_BUFFER_SETUP:
1825 /* Modify the feature */
1826 if ((retval = cfi_ep_set_sg_val(buf, pcd)) < 0)
1829 /* And send this request to the gadget */
1830 cfi->need_gadget_att = 1;
1833 case FT_ID_DMA_BUFF_ALIGN:
1834 if ((retval = cfi_ep_set_alignment_val(buf, pcd)) < 0)
1836 cfi->need_gadget_att = 1;
1839 case FT_ID_DMA_CONCAT_SETUP:
1840 /* Modify the feature */
1841 if ((retval = cfi_ep_set_concat_val(buf, pcd)) < 0)
1843 cfi->need_gadget_att = 1;
1846 case FT_ID_DMA_CIRCULAR:
1847 CFI_INFO("FT_ID_DMA_CIRCULAR\n");
1850 case FT_ID_THRESHOLD_SETUP:
1851 CFI_INFO("FT_ID_THRESHOLD_SETUP\n");
1854 case FT_ID_DFIFO_DEPTH:
1855 CFI_INFO("FT_ID_DFIFO_DEPTH\n");
1858 case FT_ID_TX_FIFO_DEPTH:
1859 CFI_INFO("FT_ID_TX_FIFO_DEPTH\n");
1860 if ((retval = cfi_ep_set_tx_fifo_val(buf, pcd)) < 0)
1862 cfi->need_gadget_att = 0;
1865 case FT_ID_RX_FIFO_DEPTH:
1866 CFI_INFO("FT_ID_RX_FIFO_DEPTH\n");
1867 if ((retval = cfi_set_rx_fifo_val(buf, pcd)) < 0)
1869 cfi->need_gadget_att = 0;
1876 #endif //DWC_UTE_CFI