summaryrefslogtreecommitdiff
path: root/firmware/target/arm/tms320dm320/sansa-connect/tnetv105_cppi.c
diff options
context:
space:
mode:
authorTomasz Moń <desowin@gmail.com>2021-06-05 09:22:27 +0200
committerTomasz Moń <desowin@gmail.com>2021-06-06 07:57:38 +0000
commit474293a12b6152041404378abd932ac495e5e18d (patch)
tree752c777a4b326d40216da1868e5c7aedaccfa7bd /firmware/target/arm/tms320dm320/sansa-connect/tnetv105_cppi.c
parent77603c344dd4946d0319688c3b58bf9e1507d9aa (diff)
downloadrockbox-474293a12b6152041404378abd932ac495e5e18d.tar.gz
rockbox-474293a12b6152041404378abd932ac495e5e18d.zip
Sansa Connect: Initial TNETV105 driver port
Port USB driver from Sansa Connect Linux kernel sources. The device successfully enumerates and responds to SCSI commands but actual disk access does not work. The SCSI response sent to host mentions that both internal storage and microsd card are not present. Change-Id: Ic6c07da12382c15c0b069f23a75f7df9765b7525
Diffstat (limited to 'firmware/target/arm/tms320dm320/sansa-connect/tnetv105_cppi.c')
-rw-r--r--firmware/target/arm/tms320dm320/sansa-connect/tnetv105_cppi.c1044
1 files changed, 1044 insertions, 0 deletions
diff --git a/firmware/target/arm/tms320dm320/sansa-connect/tnetv105_cppi.c b/firmware/target/arm/tms320dm320/sansa-connect/tnetv105_cppi.c
new file mode 100644
index 0000000000..93477bed9e
--- /dev/null
+++ b/firmware/target/arm/tms320dm320/sansa-connect/tnetv105_cppi.c
@@ -0,0 +1,1044 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id: $
9 *
10 * Copyright (C) 2021 by Tomasz Moń
11 * Copied with minor modifications from Sansa Connect Linux driver
12 * Copyright (c) 2005,2006 Zermatt Systems, Inc.
13 * Written by: Ben Bostwick
14 *
15 * This program is free software; you can redistribute it and/or
16 * modify it under the terms of the GNU General Public License
17 * as published by the Free Software Foundation; either version 2
18 * of the License, or (at your option) any later version.
19 *
20 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
21 * KIND, either express or implied.
22 *
23 ****************************************************************************/
24
25#include <string.h>
26#include "config.h"
27#include "system.h"
28#include "kernel.h"
29#include "panic.h"
30#include "logf.h"
31#include "tnetv105_usb_drv.h"
32#include "tnetv105_cppi.h"
33
34/* This file is pretty much directly copied from the Sansa Connect
35 * Linux kernel source code. This is because the functionality is
36 * nicely separated from actual kernel specific code and CPPI seems
37 * complex (atleast without access to the datasheet).
38 *
39 * The only non cosmetic change was changing the dynamic allocations
40 * to static allocations.
41 *
42 * It seems that the only way to get interrupt on non-control endpoint
43 * acticity is to use the CPPI. This sounds like a plausible explanation
44 * for the fake DMA buffers mentioned in CPPI code.
45 */
46
47/* Translate Linux consistent_sync() to Rockbox functions */
48#define DMA_TO_DEVICE commit_discard_dcache_range
49#define DMA_FROM_DEVICE discard_dcache_range
50#define consistent_sync(ptr, size, func) func(ptr, size)
51/* Rockbox TMS320DM320 crt0.S maps everything to itself */
52#define __virt_to_phys(ptr) ptr
53#define __phys_to_virt(ptr) ptr
54
55// CPPI functions
56#define CB_SOF_BIT (1<<31)
57#define CB_EOF_BIT (1<<30)
58#define CB_OWNERSHIP_BIT (1<<29)
59#define CB_EOQ_BIT (1<<28)
60#define CB_ZLP_GARBAGE (1<<23)
61#define CB_SIZE_MASK 0x0000ffff
62#define CB_OFFSET_MASK 0xffff0000
63#define TEARDOWN_VAL 0xfffffffc
64
65#define MAX_BUF_SIZE 512
66
67#define CPPI_DMA_RX_BUF_SIZE (MAX_BUF_SIZE * CPPI_RX_NUM_BUFS)
68
69static uint8_t *dma_recv_buf[CPPI_NUM_CHANNELS];
70static uint8_t ch0_rx_buf[CPPI_DMA_RX_BUF_SIZE];
71static uint8_t ch1_rx_buf[CPPI_DMA_RX_BUF_SIZE];
72
73#if USB_CPPI_LOGGING
74#define cppi_log_event0(msg) usb_log_event(msg, LOG_EVENT_D0, 0, 0, 0, 0)
75#define cppi_log_event1(msg, data0) usb_log_event(msg, LOG_EVENT_D1, data0, 0, 0, 0)
76#define cppi_log_event2(msg, data0, data1) usb_log_event(msg, LOG_EVENT_D2, data0, data1, 0, 0)
77#define cppi_log_event3(msg, data0, data1, data2) usb_log_event(msg, LOG_EVENT_D3, data0, data1, data2, 0)
78#define cppi_log_event4(msg, data0, data1, data2, data3) usb_log_event(msg, LOG_EVENT_D4, data0, data1, data2, data3)
79#else
80#define cppi_log_event0(x)
81#define cppi_log_event1(x, y)
82#define cppi_log_event2(x, y, z)
83#define cppi_log_event3(x, y, z, w)
84#define cppi_log_event4(x, y, z, w, u)
85#endif
86
87/*
88 * This function processes transmit interrupts. It traverses the
89 * transmit buffer queue, detecting sent data buffers
90 *
91 * @return 0 if OK, non-zero otherwise.
92 */
93int tnetv_cppi_tx_int(struct cppi_info *cppi, int ch)
94{
95 cppi_tcb *CurrentTcb,*LastTcbProcessed;
96 uint32_t TxFrameStatus;
97 cppi_txcntl *pTxCtl = &cppi->tx_ctl[ch];
98 int bytes_sent = 0;
99
100 cppi_log_event1("[cppi]TxInt ch", ch);
101
102 CurrentTcb = pTxCtl->TxActQueueHead;
103
104 if (CurrentTcb == 0)
105 {
106 cppi_log_event0("[cppi] tx int: no current tcb");
107 return -1;
108 }
109
110 // sync up the tcb from memory
111 consistent_sync(CurrentTcb, sizeof(*CurrentTcb), DMA_FROM_DEVICE);
112
113 TxFrameStatus = CurrentTcb->mode;
114 LastTcbProcessed = NULL;
115
116 cppi_log_event3("[cppi] int tcb status", (uint32_t) CurrentTcb, TxFrameStatus, CurrentTcb->Off_BLen);
117
118 while(CurrentTcb && (TxFrameStatus & CB_OWNERSHIP_BIT) == 0)
119 {
120 cppi_log_event3("[cppi] tx int: tcb (mode) (len)", (uint32_t) CurrentTcb, CurrentTcb->mode, CurrentTcb->Off_BLen);
121
122 // calculate the amount of bytes sent.
123 // don't count the fake ZLP byte
124 if (CurrentTcb->Off_BLen > 0x1)
125 {
126 bytes_sent += CurrentTcb->Off_BLen & 0xFFFF;
127 }
128
129 if (CurrentTcb->mode & CB_EOQ_BIT)
130 {
131 if (CurrentTcb->Next)
132 {
133 cppi_log_event0(" [cppi] misqueue!");
134
135 // Misqueued packet
136 tnetv_usb_reg_write(TNETV_DMA_TX_STATE(ch, TNETV_CPPI_TX_WORD_HDP), CurrentTcb->HNext);
137 }
138 else
139 {
140 cppi_log_event0("[cppi] eoq");
141
142 /* Tx End of Queue */
143 pTxCtl->TxActive = 0;
144 }
145 }
146
147 cppi_log_event1("[cppi]SendComplete: ", CurrentTcb->Off_BLen & 0xFFFF);
148
149 // Write the completion pointer
150 tnetv_usb_reg_write(TNETV_DMA_TX_CMPL(ch), __dma_to_vlynq_phys(CurrentTcb->dma_handle));
151
152
153 LastTcbProcessed = CurrentTcb;
154 CurrentTcb = CurrentTcb->Next;
155
156 // clean up TCB fields
157 LastTcbProcessed->HNext = 0;
158 LastTcbProcessed->Next = 0;
159 LastTcbProcessed->BufPtr = 0;
160 LastTcbProcessed->Off_BLen = 0;
161 LastTcbProcessed->mode = 0;
162 LastTcbProcessed->Eop = 0;
163
164 /* Push Tcb(s) back onto the list */
165 if (pTxCtl->TcbPool)
166 {
167 LastTcbProcessed->Next = pTxCtl->TcbPool->Next;
168 pTxCtl->TcbPool->Next = LastTcbProcessed;
169 }
170 else
171 {
172 pTxCtl->TcbPool = LastTcbProcessed;
173 }
174
175 consistent_sync(LastTcbProcessed, sizeof(*LastTcbProcessed), DMA_TO_DEVICE);
176
177 // get the status of the next packet
178 if (CurrentTcb)
179 {
180 // sync up the tcb from memory
181 consistent_sync(CurrentTcb, sizeof(*CurrentTcb), DMA_FROM_DEVICE);
182
183 TxFrameStatus = CurrentTcb->mode;
184 }
185
186
187 }
188
189 pTxCtl->TxActQueueHead = CurrentTcb;
190
191 if (!LastTcbProcessed)
192 {
193 cppi_log_event1(" [cppi]No Tx packets serviced on int! ch", ch);
194 return -1;
195 }
196
197 return bytes_sent;
198}
199
200int tnetv_cppi_flush_tx_queue(struct cppi_info *cppi, int ch)
201{
202 cppi_txcntl *pTxCtl = &cppi->tx_ctl[ch];
203 cppi_tcb *tcb, *next_tcb;
204
205 tcb = pTxCtl->TxActQueueHead;
206
207 cppi_log_event1("[cppi] flush TX ", (uint32_t) pTxCtl->TxActQueueHead);
208
209 while (tcb)
210 {
211 tcb->mode = 0;
212 tcb->BufPtr = 0;
213 tcb->Off_BLen = 0;
214 tcb->Eop = 0;
215 tcb->HNext = 0;
216
217 next_tcb = tcb->Next;
218
219 tcb->Next = pTxCtl->TcbPool;
220 pTxCtl->TcbPool = tcb;
221
222 tcb = next_tcb;
223 }
224
225 pTxCtl->TxActQueueHead = 0;
226 pTxCtl->TxActQueueTail = 0;
227 pTxCtl->TxActive = 0;
228
229 return 0;
230}
231
232
233/**
234 * @ingroup CPHAL_Functions
235 * This function transmits the data in FragList using available transmit
236 * buffer descriptors. More information on the use of the Mode parameter
237 * is available in the module-specific appendices. Note: The OS should
238 * not call Send() for a channel that has been requested to be torndown.
239 *
240 */
241int tnetv_cppi_send(struct cppi_info *cppi, int ch, dma_addr_t buf, unsigned length, int send_zlp)
242{
243 cppi_txcntl *pTxCtl;
244 cppi_tcb *first_tcb;
245 cppi_tcb *tcb;
246 int queued_len;
247 dma_addr_t buf_to_send;
248 dma_addr_t buf_ptr;
249 int total_len = length;
250 int pktlen;
251
252 pTxCtl = &cppi->tx_ctl[ch];
253
254 if (length == 0)
255 {
256 cppi_log_event0("[cppi] len = 0, nothing to send");
257 return -1;
258 }
259
260 // no send buffers.. try again later
261 if (!pTxCtl->TcbPool)
262 {
263 cppi_log_event0("[cppi] out of cppi buffers");
264 return -1;
265 }
266
267 // only send 1 packet at a time
268 if (pTxCtl->TxActQueueHead || pTxCtl->TxActive)
269 {
270 cppi_log_event0("[cppi] already sending!");
271 return -1;
272 }
273
274 buf_to_send = buf;
275
276 // usb_requests can have a 32 bit length, but CPPI DMA fragments
277 // have a (64k - 1) limit. Split the usb_request up into fragments here.
278 first_tcb = pTxCtl->TcbPool;
279 tcb = first_tcb;
280
281 cppi_log_event4("[cppi]cppi_send (buf) (len) (pool) (dma)", (uint32_t) buf_to_send, total_len, (uint32_t) first_tcb, first_tcb->dma_handle);
282
283 queued_len = 0;
284
285 do
286 {
287 buf_ptr = buf_to_send + queued_len;
288 tcb->BufPtr = __dma_to_vlynq_phys(buf_ptr);
289 tcb->HNext = 0;
290
291 // can't transfer more that 64k-1 bytes in 1 CPPI transfer
292 // need to queue up transfers if it's greater than that
293 pktlen = ((total_len - queued_len) > CPPI_MAX_FRAG) ? CPPI_MAX_FRAG : (total_len - queued_len);
294 tcb->Off_BLen = pktlen;
295 tcb->mode = (CB_OWNERSHIP_BIT | CB_SOF_BIT | CB_EOF_BIT | pktlen);
296
297 queued_len += pktlen;
298
299 if (queued_len < total_len)
300 {
301 tcb->HNext = __dma_to_vlynq_phys(((cppi_tcb *) tcb->Next)->dma_handle);
302
303 // write out the buffer to memory
304 consistent_sync(tcb, sizeof(*tcb), DMA_TO_DEVICE);
305
306 cppi_log_event4("[cppi] q tcb", (uint32_t) tcb, ((uint32_t *) tcb)[0], ((uint32_t *) tcb)[1], ((uint32_t *) tcb)[2]);
307 cppi_log_event4("[cppi] ", ((uint32_t *) tcb)[3], ((uint32_t *) tcb)[4], ((uint32_t *) tcb)[5], ((uint32_t *) tcb)[6]);
308
309 tcb = tcb->Next;
310 }
311 } while (queued_len < total_len);
312
313 /* In the Tx Interrupt handler, we will need to know which TCB is EOP,
314 so we can save that information in the SOP */
315 first_tcb->Eop = tcb;
316
317 // set the secret ZLP bit if necessary, this will be a completely separate packet
318 if (send_zlp)
319 {
320#if defined(AUTO_ZLP) && AUTO_ZLP
321 // add an extra buffer at the end to hold the ZLP
322 tcb->HNext = __dma_to_vlynq_phys(((cppi_tcb *) tcb->Next)->dma_handle);
323
324 // write out the buffer to memory
325 consistent_sync(tcb, sizeof(*tcb), DMA_TO_DEVICE);
326
327 tcb = tcb->Next;
328
329 /* In the Tx Interrupt handler, we will need to know which TCB is EOP,
330 so we can save that information in the SOP */
331 first_tcb->Eop = tcb;
332#endif
333
334 buf_ptr = buf_to_send + queued_len;
335 tcb->BufPtr = __dma_to_vlynq_phys(buf_ptr); // not used, but can't be zero
336 tcb->HNext = 0;
337 tcb->Off_BLen = 0x1; // device will send (((len - 1) / maxpacket) + 1) ZLPs
338 tcb->mode = (CB_SOF_BIT | CB_EOF_BIT | CB_OWNERSHIP_BIT | CB_ZLP_GARBAGE | 0x1); // send 1 ZLP
339 tcb->Eop = tcb;
340
341 cppi_log_event0("[cppi] Send ZLP!");
342 }
343
344 pTxCtl->TcbPool = tcb->Next;
345
346 tcb->Next = 0;
347 tcb->HNext = 0;
348
349 // write out the buffer to memory
350 consistent_sync(tcb, sizeof(*tcb), DMA_TO_DEVICE);
351
352 cppi_log_event4("[cppi] q tcb", (uint32_t) tcb, ((uint32_t *) tcb)[0], ((uint32_t *) tcb)[1], ((uint32_t *) tcb)[2]);
353 cppi_log_event4("[cppi] ", ((uint32_t *) tcb)[3], ((uint32_t *) tcb)[4], ((uint32_t *) tcb)[5], ((uint32_t *) tcb)[6]);
354
355 cppi_log_event4("[cppi] send queued (ptr) (len) (ftcb, ltcb)", (uint32_t) tcb->BufPtr, tcb->Off_BLen, (uint32_t) first_tcb, (uint32_t) tcb);
356
357 /* put it on the queue */
358 pTxCtl->TxActQueueHead = first_tcb;
359 pTxCtl->TxActQueueTail = tcb;
360
361 cppi_log_event3("[cppi] setting state (head) (virt) (next)", (uint32_t) first_tcb, __dma_to_vlynq_phys(first_tcb->dma_handle), (uint32_t) first_tcb->HNext);
362
363 /* write CPPI TX HDP - cache is cleaned above */
364 tnetv_usb_reg_write(TNETV_DMA_TX_STATE(ch, TNETV_CPPI_TX_WORD_HDP), __dma_to_vlynq_phys(first_tcb->dma_handle));
365
366 pTxCtl->TxActive = 1;
367
368 return 0;
369}
370
371/*
372 * This function allocates transmit buffer descriptors (internal CPHAL function).
373 * It creates a high priority transmit queue by default for a single Tx
374 * channel. If QoS is enabled for the given CPHAL device, this function
375 * will also allocate a low priority transmit queue.
376 *
377 * @return 0 OK, Non-Zero Not OK
378 */
379int tnetv_cppi_init_tcb(struct cppi_info *cppi, int ch)
380{
381 int i, num;
382 cppi_tcb *pTcb = 0;
383 char *AllTcb;
384 int tcbSize;
385 cppi_txcntl *pTxCtl = &cppi->tx_ctl[ch];
386
387 num = pTxCtl->TxNumBuffers;
388 tcbSize = (sizeof(cppi_tcb) + 0xf) & ~0xf;
389
390 cppi_log_event4("[cppi] init_tcb (ch) (num) (dma) (tcbsz)", ch, num, pTxCtl->tcb_start_dma_addr, tcbSize);
391
392 if (pTxCtl->TxNumBuffers == 0)
393 {
394 return -1;
395 }
396
397 /* if the memory has already been allocated, simply reuse it! */
398 AllTcb = pTxCtl->TcbStart;
399
400 // now reinitialize the TCB pool
401 pTxCtl->TcbPool = 0;
402 for (i = 0; i < num; i++)
403 {
404 pTcb = (cppi_tcb *)(AllTcb + (i * tcbSize));
405 pTcb->dma_handle = pTxCtl->tcb_start_dma_addr + (i * tcbSize);
406
407 pTcb->BufPtr = 0;
408 pTcb->mode = 0;
409 pTcb->HNext = 0;
410 pTcb->Off_BLen = 0;
411 pTcb->Eop = 0;
412
413 pTcb->Next = (void *) pTxCtl->TcbPool;
414
415 pTxCtl->TcbPool = pTcb;
416 }
417
418 cppi_log_event2(" [cppi]TcbPool", (uint32_t) pTxCtl->TcbPool, pTxCtl->TcbPool->dma_handle);
419
420#if USB_CPPI_LOGGING
421 {
422 // BEN DEBUG
423 cppi_tcb *first_tcb = pTxCtl->TcbPool;
424 cppi_log_event4("[cppi] init tcb", (uint32_t) first_tcb, ((uint32_t *) first_tcb)[0], ((uint32_t *) first_tcb)[1], ((uint32_t *) first_tcb)[2]);
425 cppi_log_event4("[cppi] ", ((uint32_t *) first_tcb)[3], ((uint32_t *) first_tcb)[4], ((uint32_t *) first_tcb)[5], ((uint32_t *) first_tcb)[6]);
426 }
427#endif
428
429 return 0;
430}
431
432// BEN DEBUG
433void tnetv_cppi_dump_info(struct cppi_info *cppi)
434{
435 int ch;
436 cppi_rxcntl *pRxCtl;
437 cppi_txcntl *pTxCtl;
438 cppi_tcb *tcb;
439 cppi_rcb *rcb;
440
441 logf("CPPI struct:\n");
442 logf("Buf mem: %x Buf size: %d int: %x %x\n\n", (uint32_t) cppi->dma_mem, cppi->dma_size, tnetv_usb_reg_read(TNETV_USB_RX_INT_STATUS), tnetv_usb_reg_read(DM320_VLYNQ_INTST));
443
444 for (ch = 0; ch < CPPI_NUM_CHANNELS; ch++)
445 {
446 pRxCtl = &cppi->rx_ctl[ch];
447 pTxCtl = &cppi->tx_ctl[ch];
448
449 logf("ch: %d\n", ch);
450 logf(" rx_numbufs: %d active %d free_buf_cnt %d\n", pRxCtl->RxNumBuffers, pRxCtl->RxActive, tnetv_usb_reg_read(TNETV_USB_RX_FREE_BUF_CNT(ch)));
451 logf(" q_cnt %d head %x tail %x\n", pRxCtl->RxActQueueCount, (uint32_t) pRxCtl->RxActQueueHead, (uint32_t) pRxCtl->RxActQueueTail);
452 logf(" fake_head: %x fake_tail: %x\n", (uint32_t) pRxCtl->RxFakeRcvHead, (uint32_t) pRxCtl->RxFakeRcvTail);
453
454 rcb = (cppi_rcb *) pRxCtl->RcbStart;
455 do
456 {
457 if (!rcb)
458 break;
459
460 logf(" Rcb: %x\n", (uint32_t) rcb);
461 logf(" HNext %x BufPtr %x Off_BLen %x mode %x\n", rcb->HNext, rcb->BufPtr, rcb->Off_BLen, rcb->mode);
462 logf(" Next %x Eop %x dma_handle %x fake_bytes %x\n", (uint32_t) rcb->Next, (uint32_t) rcb->Eop, rcb->dma_handle, rcb->fake_bytes);
463 rcb = rcb->Next;
464
465 } while (rcb && rcb != (cppi_rcb *) pRxCtl->RcbStart);
466
467 logf("\n");
468 logf(" tx_numbufs: %d active %d\n", pTxCtl->TxNumBuffers, pTxCtl->TxActive);
469 logf(" q_cnt %d head %x tail %x\n", pTxCtl->TxActQueueCount, (uint32_t) pTxCtl->TxActQueueHead, (uint32_t) pTxCtl->TxActQueueTail);
470
471 tcb = (cppi_tcb *) pTxCtl->TcbPool;
472 do
473 {
474 if (!tcb)
475 break;
476
477 logf(" Tcb (pool): %x\n", (uint32_t) tcb);
478 logf(" HNext %x BufPtr %x Off_BLen %x mode %x\n", tcb->HNext, tcb->BufPtr, tcb->Off_BLen, tcb->mode);
479 logf(" Next %x Eop %x dma_handle %x\n", (uint32_t) tcb->Next, (uint32_t) tcb->Eop, tcb->dma_handle);
480 tcb = tcb->Next;
481
482 } while (tcb && tcb != (cppi_tcb *) pTxCtl->TcbPool);
483
484 tcb = (cppi_tcb *) pTxCtl->TxActQueueHead;
485 do
486 {
487 if (!tcb)
488 break;
489
490 logf(" Tcb (act): %x\n", (uint32_t) tcb);
491 logf(" HNext %x BufPtr %x Off_BLen %x mode %x\n", tcb->HNext, tcb->BufPtr, tcb->Off_BLen, tcb->mode);
492 logf(" Next %x Eop %x dma_handle %x\n", (uint32_t) tcb->Next, (uint32_t) tcb->Eop, tcb->dma_handle);
493 tcb = tcb->Next;
494
495 } while (tcb && tcb != (cppi_tcb *) pTxCtl->TxActQueueTail);
496
497 }
498}
499
500/**
501 *
502 * This function is called to indicate to the CPHAL that the upper layer
503 * software has finished processing the receive data (given to it by
504 * osReceive()). The CPHAL will then return the appropriate receive buffers
505 * and buffer descriptors to the available pool.
506 *
507 */
508int tnetv_cppi_rx_return(struct cppi_info *cppi, int ch, cppi_rcb *done_rcb)
509{
510 cppi_rxcntl *pRxCtl = &cppi->rx_ctl[ch];
511 cppi_rcb *curRcb, *lastRcb, *endRcb;
512 int num_bufs = 0;
513
514 if (!done_rcb)
515 return -1;
516
517 //cppi_log_event3("[cppi] rx_return (last) (first) bufinq", (uint32_t) done_rcb, (uint32_t) done_rcb->Eop, tnetv_usb_reg_read(TNETV_USB_RX_FREE_BUF_CNT(ch)));
518
519 curRcb = done_rcb;
520 endRcb = done_rcb->Eop;
521 do
522 {
523 curRcb->mode = CB_OWNERSHIP_BIT;
524 curRcb->Off_BLen = MAX_BUF_SIZE;
525 curRcb->Eop = 0;
526
527 pRxCtl->RxActQueueCount++;
528 num_bufs++;
529
530 lastRcb = curRcb;
531 curRcb = lastRcb->Next;
532
533 consistent_sync(lastRcb, sizeof(*lastRcb), DMA_TO_DEVICE);
534
535 } while (lastRcb != endRcb);
536
537 cppi_log_event1("[cppi] rx_return done", num_bufs);
538
539 // let the hardware know about the buffer(s)
540 tnetv_usb_reg_write(TNETV_USB_RX_FREE_BUF_CNT(ch), num_bufs);
541
542 return 0;
543}
544
545int tnetv_cppi_rx_int_recv(struct cppi_info *cppi, int ch, int *buf_size, void *buf, int maxpacket)
546{
547 cppi_rxcntl *pRxCtl = &cppi->rx_ctl[ch];
548 cppi_rcb *CurrentRcb, *LastRcb = 0, *SopRcb;
549 uint8_t *cur_buf_data_addr;
550 int cur_buf_bytes;
551 int copy_buf_size = *buf_size;
552 int ret = -EAGAIN;
553
554 *buf_size = 0;
555
556 CurrentRcb = pRxCtl->RxFakeRcvHead;
557 if (!CurrentRcb)
558 {
559 cppi_log_event2("[cppi] rx_int recv: nothing in q", tnetv_usb_reg_read(TNETV_USB_RX_INT_STATUS), tnetv_usb_reg_read(DM320_VLYNQ_INTST));
560 return -1;
561 }
562
563 cppi_log_event1("[cppi] rx_int recv (ch)", ch);
564 cppi_log_event4(" [cppi] recv - Processing SOP descriptor fb hd tl", (uint32_t) CurrentRcb, CurrentRcb->fake_bytes, (uint32_t) pRxCtl->RxFakeRcvHead, (uint32_t) pRxCtl->RxFakeRcvTail);
565
566 SopRcb = CurrentRcb;
567 LastRcb = 0;
568
569 do
570 {
571 // convert from vlynq phys to virt
572 cur_buf_data_addr = (uint8_t *) __vlynq_phys_to_dma(CurrentRcb->BufPtr);
573 cur_buf_data_addr = (uint8_t *) __phys_to_virt(cur_buf_data_addr);
574 cur_buf_bytes = (CurrentRcb->mode) & CB_SIZE_MASK;
575
576 // make sure we don't overflow the buffer.
577 if (cur_buf_bytes > copy_buf_size)
578 {
579 ret = 0;
580 break;
581 }
582
583 // BEN - packet can be ZLP
584 if (cur_buf_bytes)
585 {
586 consistent_sync(cur_buf_data_addr, MAX_BUF_SIZE, DMA_FROM_DEVICE);
587
588 memcpy((buf + *buf_size), cur_buf_data_addr, cur_buf_bytes);
589
590 copy_buf_size -= cur_buf_bytes;
591 *buf_size += cur_buf_bytes;
592 CurrentRcb->fake_bytes -= cur_buf_bytes;
593 }
594 else
595 {
596 CurrentRcb->fake_bytes = 0;
597 }
598
599 cppi_log_event4(" [cppi] bytes totrcvd amtleft fake", cur_buf_bytes, *buf_size, copy_buf_size, CurrentRcb->fake_bytes);
600
601 LastRcb = CurrentRcb;
602 CurrentRcb = LastRcb->Next;
603
604 // sync out fake bytes info
605 consistent_sync(LastRcb, sizeof(*LastRcb), DMA_TO_DEVICE);
606
607 // make sure each packet processed individually
608 if (cur_buf_bytes < maxpacket)
609 {
610 ret = 0;
611 break;
612 }
613
614 } while (LastRcb != pRxCtl->RxFakeRcvTail && CurrentRcb->fake_bytes && copy_buf_size > 0);
615
616 // make sure that the CurrentRcb isn't in the cache
617 consistent_sync(CurrentRcb, sizeof(*CurrentRcb), DMA_FROM_DEVICE);
618
619 if (copy_buf_size == 0)
620 {
621 ret = 0;
622 }
623
624 if (LastRcb)
625 {
626 SopRcb->Eop = LastRcb;
627
628 cppi_log_event3(" [cppi] rcv end", *buf_size, (uint32_t) CurrentRcb, (uint32_t) SopRcb->Eop);
629
630 if (LastRcb == pRxCtl->RxFakeRcvTail)
631 {
632 pRxCtl->RxFakeRcvHead = 0;
633 pRxCtl->RxFakeRcvTail = 0;
634 }
635 else
636 {
637 pRxCtl->RxFakeRcvHead = CurrentRcb;
638 }
639
640 cppi_log_event1(" [cppi] st rx return", ch);
641 cppi_log_event2(" rcv fake hd tl", (uint32_t) pRxCtl->RxFakeRcvHead, (uint32_t) pRxCtl->RxFakeRcvTail);
642
643 // all done, clean up the RCBs
644 tnetv_cppi_rx_return(cppi, ch, SopRcb);
645 }
646
647 return ret;
648}
649
650/*
651 * This function processes receive interrupts. It traverses the receive
652 * buffer queue, extracting the data and passing it to the upper layer software via
653 * osReceive(). It handles all error conditions and fragments without valid data by
654 * immediately returning the RCB's to the RCB pool.
655 */
656int tnetv_cppi_rx_int(struct cppi_info *cppi, int ch)
657{
658 cppi_rxcntl *pRxCtl = &cppi->rx_ctl[ch];
659 cppi_rcb *CurrentRcb, *LastRcb = 0, *SopRcb;
660 uint32_t RxBufStatus,PacketsServiced;
661 int TotalFrags;
662
663 cppi_log_event1("[cppi] rx_int (ch)", ch);
664
665 CurrentRcb = pRxCtl->RxActQueueHead;
666
667 if (!CurrentRcb)
668 {
669 cppi_log_event1("[cppi] rx_int no bufs!", (uint32_t) CurrentRcb);
670 return -1;
671 }
672
673 // make sure that all of the buffers get an invalidated cache
674 consistent_sync(pRxCtl->RcbStart, sizeof(cppi_rcb) * CPPI_RX_NUM_BUFS, DMA_FROM_DEVICE);
675
676 RxBufStatus = CurrentRcb->mode;
677 PacketsServiced = 0;
678
679 cppi_log_event4("[cppi] currentrcb, mode numleft fake", (uint32_t) CurrentRcb, CurrentRcb->mode, pRxCtl->RxActQueueCount, CurrentRcb->fake_bytes);
680 cppi_log_event4("[cppi]", ((uint32_t *) CurrentRcb)[0], ((uint32_t *) CurrentRcb)[1], ((uint32_t *) CurrentRcb)[2], ((uint32_t *) CurrentRcb)[3]);
681
682 while(((RxBufStatus & CB_OWNERSHIP_BIT) == 0) && (pRxCtl->RxActQueueCount > 0))
683 {
684 cppi_log_event2(" [cppi]Processing SOP descriptor st", (uint32_t) CurrentRcb, RxBufStatus);
685
686 SopRcb = CurrentRcb;
687
688 TotalFrags = 0;
689
690 do
691 {
692 TotalFrags++;
693 PacketsServiced++;
694
695 // Write the completion pointer
696 tnetv_usb_reg_write(TNETV_DMA_RX_CMPL(ch), __dma_to_vlynq_phys(CurrentRcb->dma_handle));
697
698 CurrentRcb->fake_bytes = (CurrentRcb->mode) & 0xFFFF;
699
700 // BEN - make sure this gets marked!
701 if (!CurrentRcb->fake_bytes || (CurrentRcb->mode & CB_ZLP_GARBAGE))
702 {
703 CurrentRcb->mode &= 0xFFFF0000;
704 CurrentRcb->fake_bytes = 0x10000;
705 }
706
707 cppi_log_event1(" fake_bytes:", CurrentRcb->fake_bytes);
708
709 RxBufStatus = CurrentRcb->mode;
710 LastRcb = CurrentRcb;
711 CurrentRcb = LastRcb->Next;
712
713 // sync the fake_bytes value back to mem
714 consistent_sync(LastRcb, sizeof(*LastRcb), DMA_TO_DEVICE);
715
716 } while (((CurrentRcb->mode & CB_OWNERSHIP_BIT) == 0) && ((RxBufStatus & CB_EOF_BIT) == 0));
717
718 SopRcb->Eop = LastRcb;
719
720 pRxCtl->RxActQueueHead = CurrentRcb;
721 pRxCtl->RxActQueueCount -= TotalFrags;
722
723 if (LastRcb->mode & CB_EOQ_BIT)
724 {
725 if (CurrentRcb)
726 {
727 cppi_log_event1(" [cppi] rcv done q next", LastRcb->HNext);
728 tnetv_usb_reg_write(TNETV_DMA_RX_STATE(ch, TNETV_CPPI_RX_WORD_HDP), LastRcb->HNext);
729 }
730 else
731 {
732 cppi_log_event0(" [cppi] rcv done");
733
734 pRxCtl->RxActive = 0;
735 }
736 }
737
738 // BEN - add to the list of buffers we need to deal with
739 if (!pRxCtl->RxFakeRcvHead)
740 {
741 pRxCtl->RxFakeRcvHead = SopRcb;
742 pRxCtl->RxFakeRcvTail = SopRcb->Eop;
743 }
744 else
745 {
746 pRxCtl->RxFakeRcvTail = SopRcb->Eop;
747 }
748
749 // make sure we have enough buffers
750 cppi_log_event1(" nextrcb", CurrentRcb->mode);
751
752 if (CurrentRcb)
753 {
754 // continue the loop
755 RxBufStatus = CurrentRcb->mode;
756 }
757
758 } /* while */
759
760 cppi_log_event2("[cppi] fake hd tl", (uint32_t) pRxCtl->RxFakeRcvHead, (uint32_t) pRxCtl->RxFakeRcvTail);
761
762 // sync out all buffers before leaving
763 consistent_sync(pRxCtl->RcbStart, (CPPI_RX_NUM_BUFS * sizeof(cppi_rcb)), DMA_FROM_DEVICE);
764
765 return PacketsServiced;
766}
767
768static void tnetv_cppi_rx_queue_init(struct cppi_info *cppi, int ch, dma_addr_t buf, unsigned length)
769{
770 cppi_rxcntl *pRxCtl = &cppi->rx_ctl[ch];
771 cppi_rcb *rcb, *first_rcb;
772 unsigned int queued_len = 0;
773 int rcblen;
774 int num_frags = 0;
775 dma_addr_t buf_ptr;
776
777 if (length == 0)
778 {
779 cppi_log_event0("[cppi] len = 0, nothing to recv");
780 return;
781 }
782
783 // usb_requests can have a 32 bit length, but CPPI DMA fragments
784 // have a 64k limit. Split the usb_request up into fragments here.
785 first_rcb = pRxCtl->RcbPool;
786 rcb = first_rcb;
787
788 cppi_log_event2("[cppi] Rx queue add: head len", (uint32_t) first_rcb, length);
789
790 while (queued_len < length)
791 {
792 buf_ptr = buf + queued_len;
793 rcb->BufPtr = __dma_to_vlynq_phys(buf_ptr);
794
795 rcb->HNext = 0;
796 rcb->mode = CB_OWNERSHIP_BIT;
797
798 rcblen = ((length - queued_len) > MAX_BUF_SIZE) ? MAX_BUF_SIZE : (length - queued_len);
799 rcb->Off_BLen = rcblen;
800
801 queued_len += rcblen;
802 if (queued_len < length)
803 {
804 rcb->HNext = __dma_to_vlynq_phys(((cppi_rcb *) (rcb->Next))->dma_handle);
805 rcb = rcb->Next;
806 }
807
808 num_frags++;
809 }
810
811 pRxCtl->RcbPool = rcb->Next;
812 rcb->Next = 0;
813
814 cppi_log_event4("[cppi] Adding Rcb (dma) (paddr) (buf)", (uint32_t) rcb, rcb->dma_handle, __dma_to_vlynq_phys(rcb->dma_handle), (uint32_t) rcb->BufPtr);
815 cppi_log_event4("[cppi] Next HNext (len) of (total)", (uint32_t) rcb->Next, rcb->HNext, queued_len, length);
816
817 pRxCtl->RxActQueueCount += num_frags;
818
819 cppi_log_event4("[cppi] rx queued (ptr) (len) (ftcb, ltcb)", (uint32_t) rcb->BufPtr, rcb->Off_BLen, (uint32_t) first_rcb, (uint32_t) rcb);
820 cppi_log_event2(" [cppi] mode num_frags", rcb->mode, num_frags);
821
822 pRxCtl->RxActQueueHead = first_rcb;
823 pRxCtl->RxActQueueTail = rcb;
824
825 cppi_log_event2("[cppi] setting rx (head) (virt)", (uint32_t) first_rcb, __dma_to_vlynq_phys(first_rcb->dma_handle));
826 cppi_log_event4("[cppi] ", ((uint32_t *) first_rcb)[0], ((uint32_t *) first_rcb)[1], ((uint32_t *) first_rcb)[2], ((uint32_t *) first_rcb)[3]);
827
828 // make this into a circular buffer so we never get caught with
829 // no free buffers left
830 rcb->Next = pRxCtl->RxActQueueHead;
831 rcb->HNext = (uint32_t) (__dma_to_vlynq_phys(pRxCtl->RxActQueueHead->dma_handle));
832}
833
834int tnetv_cppi_rx_queue_add(struct cppi_info *cppi, int ch, dma_addr_t buf, unsigned length)
835{
836 (void)buf;
837 (void)length;
838 cppi_rxcntl *pRxCtl = &cppi->rx_ctl[ch];
839 unsigned int cur_bufs;
840
841 cur_bufs = tnetv_usb_reg_read(TNETV_USB_RX_FREE_BUF_CNT(ch));
842
843 if (!pRxCtl->RxActive)
844 {
845 cppi_log_event0("[cppi] queue add - not active");
846
847 pRxCtl->RcbPool = (cppi_rcb *) pRxCtl->RcbStart;
848
849 // add all the buffers to the active (circular) queue
850 tnetv_cppi_rx_queue_init(cppi, ch, (dma_addr_t) __virt_to_phys(dma_recv_buf[ch]), (MAX_BUF_SIZE * pRxCtl->RxNumBuffers));
851
852 /* write Rx Queue Head Descriptor Pointer */
853 tnetv_usb_reg_write(TNETV_DMA_RX_STATE(ch, TNETV_CPPI_RX_WORD_HDP), __dma_to_vlynq_phys(pRxCtl->RxActQueueHead->dma_handle));
854
855 pRxCtl->RxActive = 1;
856
857 // sync out all buffers before starting
858 consistent_sync(pRxCtl->RcbStart, (CPPI_RX_NUM_BUFS * sizeof(cppi_rcb)), DMA_TO_DEVICE);
859
860 // sync out temp rx buffer
861 consistent_sync(dma_recv_buf[ch], CPPI_DMA_RX_BUF_SIZE, DMA_FROM_DEVICE);
862
863 if (cur_bufs < pRxCtl->RxActQueueCount)
864 {
865 // let the hardware know about the buffer(s)
866 tnetv_usb_reg_write(TNETV_USB_RX_FREE_BUF_CNT(ch), pRxCtl->RxActQueueCount - cur_bufs);
867 }
868 }
869
870 cppi_log_event3("[cppi] rx add: (cur_bufs) (avail_bufs) (now)", cur_bufs, pRxCtl->RxActQueueCount, tnetv_usb_reg_read(TNETV_USB_RX_FREE_BUF_CNT(ch)));
871
872 return 0;
873}
874
875int tnetv_cppi_flush_rx_queue(struct cppi_info *cppi, int ch)
876{
877 cppi_rxcntl *pRxCtl = &cppi->rx_ctl[ch];
878 cppi_rcb *rcb;
879 int num_bufs;
880
881 cppi_log_event1("[cppi] flush RX ", (uint32_t) pRxCtl->RxActQueueHead);
882
883 // flush out any pending receives
884 tnetv_cppi_rx_int(cppi, ch);
885
886 // now discard all received data
887 rcb = pRxCtl->RxFakeRcvHead;
888
889 if (rcb)
890 {
891 rcb->Eop = pRxCtl->RxFakeRcvTail;
892
893 // clean up any unreceived RCBs
894 tnetv_cppi_rx_return(cppi, ch, rcb);
895 }
896
897 pRxCtl->RxFakeRcvHead = 0;
898 pRxCtl->RxFakeRcvTail = 0;
899
900 pRxCtl->RxActive = 0;
901
902 // drain the HW free buffer count
903 num_bufs = tnetv_usb_reg_read(TNETV_USB_RX_FREE_BUF_CNT(ch));
904 tnetv_usb_reg_write(TNETV_USB_RX_FREE_BUF_CNT(ch), -num_bufs);
905
906 cppi_log_event2("[cppi] flush RX queue done (freed) act: ", num_bufs, (uint32_t) pRxCtl->RxActQueueCount);
907
908 return 0;
909}
910
911
912/*
913 * This function allocates receive buffer descriptors (internal CPHAL function).
914 * After allocation, the function 'queues' (gives to the hardware) the newly
915 * created receive buffers to enable packet reception.
916 *
917 * @param ch Channel number.
918 *
919 * @return 0 OK, Non-Zero Not OK
920 */
921int tnetv_cppi_init_rcb(struct cppi_info *cppi, int ch)
922{
923 int i, num;
924 cppi_rcb *pRcb;
925 char *AllRcb;
926 int rcbSize;
927 cppi_rxcntl *pRxCtl = &cppi->rx_ctl[ch];
928
929 num = pRxCtl->RxNumBuffers;
930 rcbSize = (sizeof(cppi_rcb) + 0xf) & ~0xf;
931
932 cppi_log_event2("[cppi] init_rcb ch num", ch, num);
933
934 if (pRxCtl->RxNumBuffers == 0)
935 {
936 return -1;
937 }
938
939 /* if the memory has already been allocated, simply reuse it! */
940 AllRcb = pRxCtl->RcbStart;
941
942 // now reinitialize the RCB pool
943 pRxCtl->RcbPool = 0;
944 for (i = (num - 1); i >= 0; i--)
945 {
946 pRcb = (cppi_rcb *)(AllRcb + (i * rcbSize));
947
948 pRcb->dma_handle = pRxCtl->rcb_start_dma_addr + (i * rcbSize);
949
950 pRcb->BufPtr = 0;
951 pRcb->mode = 0;
952 pRcb->HNext = 0;
953 pRcb->Next = (void *) pRxCtl->RcbPool;
954 pRcb->Off_BLen = 0;
955 pRcb->Eop = 0;
956 pRcb->fake_bytes = 0;
957
958 pRxCtl->RcbPool = pRcb;
959 }
960
961 cppi_log_event2(" [cppi]RcbPool (dma)", (uint32_t) pRxCtl->RcbPool, pRxCtl->RcbPool->dma_handle);
962
963 pRxCtl->RxActQueueCount = 0;
964 pRxCtl->RxActQueueHead = 0;
965 pRxCtl->RxActive = 0;
966
967 pRxCtl->RxFakeRcvHead = 0;
968 pRxCtl->RxFakeRcvTail = 0;
969
970 return 0;
971}
972
973static uint8_t ch_buf_cnt[][2] = {
974 {CPPI_RX_NUM_BUFS, 2}, // ch0: bulk out/in
975 {CPPI_RX_NUM_BUFS, 2}, // ch1: bulk out/in
976 {0, 2}, // ch2: interrupt
977 {0, 2} // ch3: interrupt
978};
979
980void tnetv_cppi_init(struct cppi_info *cppi)
981{
982 int ch;
983 uint8_t *alloc_ptr;
984 int ch_mem_size[CPPI_NUM_CHANNELS];
985
986 // wipe cppi memory
987 memset(cppi, 0, sizeof(*cppi));
988
989 // find out how much memory we need to allocate
990 cppi->dma_size = 0;
991 for (ch = 0; ch < CPPI_NUM_CHANNELS; ch++)
992 {
993 ch_mem_size[ch] = (ch_buf_cnt[ch][0] * sizeof(cppi_rcb)) + (ch_buf_cnt[ch][1] * sizeof(cppi_tcb));
994 cppi->dma_size += ch_mem_size[ch];
995 }
996
997 // allocate DMA-able memory
998 if (cppi->dma_size != CPPI_INFO_MEM_SIZE)
999 {
1000 panicf("Invalid dma size expected %d got %d", cppi->dma_size, CPPI_INFO_MEM_SIZE);
1001 }
1002 cppi->dma_handle = (dma_addr_t) __virt_to_phys(cppi->dma_mem);
1003
1004 memset(cppi->dma_mem, 0, cppi->dma_size);
1005
1006 cppi_log_event2("[cppi] all CBs sz mem", cppi->dma_size, (uint32_t) cppi->dma_mem);
1007
1008 // now set up the pointers
1009 alloc_ptr = cppi->dma_mem;
1010 for (ch = 0; ch < CPPI_NUM_CHANNELS; ch++)
1011 {
1012 cppi->rx_ctl[ch].RxNumBuffers = ch_buf_cnt[ch][0];
1013 cppi->rx_ctl[ch].RcbStart = alloc_ptr;
1014 cppi->rx_ctl[ch].rcb_start_dma_addr = (dma_addr_t) __virt_to_phys(alloc_ptr);
1015 alloc_ptr += (ch_buf_cnt[ch][0] * sizeof(cppi_rcb));
1016
1017 cppi->tx_ctl[ch].TxNumBuffers = ch_buf_cnt[ch][1];
1018 cppi->tx_ctl[ch].TcbStart = alloc_ptr;
1019 cppi->tx_ctl[ch].tcb_start_dma_addr = (dma_addr_t) __virt_to_phys(alloc_ptr);
1020 alloc_ptr += (ch_buf_cnt[ch][1] * sizeof(cppi_tcb));
1021
1022 cppi_log_event3("[cppi] alloc bufs: ch dmarcb dmatcb", ch, cppi->rx_ctl[ch].rcb_start_dma_addr, cppi->tx_ctl[ch].tcb_start_dma_addr);
1023
1024 // set up receive buffer
1025 if (ch_buf_cnt[ch][0])
1026 {
1027 dma_recv_buf[ch] = (ch == 0) ? ch0_rx_buf : ((ch == 1) ? ch1_rx_buf : 0);
1028 cppi_log_event3("[cppi] Alloc fake DMA buf ch", ch, (uint32_t) dma_recv_buf[ch], (uint32_t) __virt_to_phys(dma_recv_buf[ch]));
1029 }
1030 else
1031 {
1032 dma_recv_buf[ch] = 0;
1033 }
1034 }
1035
1036}
1037
1038void tnetv_cppi_cleanup(struct cppi_info *cppi)
1039{
1040 cppi_log_event0("wipe cppi mem");
1041
1042 // wipe cppi memory
1043 memset(cppi, 0, sizeof(*cppi));
1044}