summaryrefslogtreecommitdiff
path: root/firmware/target/arm/s5l8702
diff options
context:
space:
mode:
authorCástor Muñoz <cmvidal@gmail.com>2014-12-06 18:33:11 +0100
committerCástor Muñoz <cmvidal@gmail.com>2015-10-07 06:15:03 +0200
commitd6ee2c9eafbf5381695d1c7eb01801855c85222b (patch)
treea6ae8fb8deeebfece6f110a0e44643fa8194662e /firmware/target/arm/s5l8702
parent609cde94689b20098be1c36cc5157514b6dd63a4 (diff)
downloadrockbox-d6ee2c9eafbf5381695d1c7eb01801855c85222b.tar.gz
rockbox-d6ee2c9eafbf5381695d1c7eb01801855c85222b.zip
iPod Classic: introduce PL080 DMA controller driver
Motivation: This driver began as a set of functions to help to test and experiment with different DMA configurations. It is cumbersome, time consuming, and leads to mistakes to handle LLIs and DMA registers dispersed along the code. Later, i decided to adapt an old DMA queue driver written in the past for a similar (scatter-gather) controller, all task/queue code is based on the old driver. Finally, some cleaning and dmac_ch_get_info() function was added to complete RB needs. Description: - Generic, can be used by other targets including the same controller. Not difficult to adapt for other similar controllers if necesary. - Easy to experiment and compare results using different setups and/or queue algorithms: Multi-controller and fully configurable from an unique place. All task and LLI management is done by the driver, user only has to (statically) allocate them. - Two queue modes: QUEUE_NORMAL: each task in the queue is launched using a new DMA transfer once previous task is finished. QUEUE_LINK: when a task is queued, it is linked with the last queued task, creating a single continuous DMA transfer. New tasks must be queued while the channel is running, otherwise the continuous DMA transfer will be broken. On Classic, QUEUE_LINK mode is needed for I2S continuous transfers, QUEUE_NORMAL is used for LCD and could be useful in the future for I2C or UART (non-blocking serial debug) if necessary. - Robust DMA transfer progress info (peak meter), needs final testing, see below. Technical details about DMA progress: There are comments in the code related to the method actually used (sequence method), it reads progress without halting the DMA transfer. Althought the datasheet does not recommend to do that, the sequence method seems to be robust, I ran tests calling dmac_ch_get_info() millions of times and the results were always as expected (tests done at 2:1 CPU/AHB clock ratio, no other ratios were tried but probably sequence method will work for any typical ratio). This controller allows to halt the transfer and drain the DMAC FIFO, DMA requests are ignored when the DMA channel is halted. This method is not suitable for playback because FIFO is never drained to I2S peripheral (who raises the DMA requests). This method probably works for capture, the FIFO is drained to memory before halting. Another way is to disable (stop) the playback channel. When the channel is disabled, all FIFO data is lost. It is unknown how much the FIFO was filled when it was cleared, SRCADDR counter includes the lost data, therefore the only useful information is LINK and COUNT, that is the same information disponible when using the sequence method. At this point we must procced in the same way as in sequence method, in addition the playback channel should be relaunched (configure + start) after calculating real SRCADDR. The stop+relaunch method should work, it is a bit complicated, and not valid for all peripheral FIFO configurations (depending on stream rate). Moreover, due to the way the COUNT register is implemented in HW, I suspect that this method will fail when source and destination bus widths doesn't match. And more important, it is not easy to garantize that no sample is lost here or there, using the sequence method we can always be sure that playback is ok. Change-Id: Ib12a1e2992e2b6da4fc68431128c793a21b4b540
Diffstat (limited to 'firmware/target/arm/s5l8702')
-rw-r--r--firmware/target/arm/s5l8702/pl080.c580
1 files changed, 580 insertions, 0 deletions
diff --git a/firmware/target/arm/s5l8702/pl080.c b/firmware/target/arm/s5l8702/pl080.c
new file mode 100644
index 0000000000..d7b3a3173f
--- /dev/null
+++ b/firmware/target/arm/s5l8702/pl080.c
@@ -0,0 +1,580 @@
1/***************************************************************************
2 * __________ __ ___.
3 * Open \______ \ ____ ____ | | _\_ |__ _______ ___
4 * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ /
5 * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < <
6 * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \
7 * \/ \/ \/ \/ \/
8 * $Id$
9 *
10 * Copyright (C) 2014 by Cástor Muñoz
11 *
12 * This program is free software; you can redistribute it and/or
13 * modify it under the terms of the GNU General Public License
14 * as published by the Free Software Foundation; either version 2
15 * of the License, or (at your option) any later version.
16 *
17 * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY
18 * KIND, either express or implied.
19 *
20 ****************************************************************************/
21#include "config.h"
22#include <stddef.h>
23#include "system.h"
24#include "pl080.h"
25#include "panic.h"
26
27/*
28 * ARM PrimeCell PL080 Multiple Master DMA controller
29 */
30
31/*#define PANIC_DEBUG*/
32#ifdef PANIC_DEBUG
33void dmac_ch_panicf(const char *fn, struct dmac_ch* ch)
34{
35 char *err = NULL;
36
37 if (!ch)
38 err = "NULL channel";
39 else if (!ch->dmac)
40 err = "NULL ch->dmac";
41 else if (ch->dmac->ch_l[ch->prio] != ch)
42 err = "not initialized channel";
43
44 if (err)
45 panicf("%s(): <%d> %s", fn, ch ? (int)ch->prio : -1, err);
46}
47#define PANIC_DEBUG_CHANNEL(ch) dmac_ch_panicf(__func__,(ch))
48#else
49#define PANIC_DEBUG_CHANNEL(ch) {}
50#endif
51
52/* task helpers */
53static inline struct dmac_tsk *dmac_ch_tsk_by_idx(
54 struct dmac_ch *ch, uint32_t idx)
55{
56 return ch->tskbuf + (idx & ch->tskbuf_mask);
57}
58#define CH_TSK_TOP(ch) dmac_ch_tsk_by_idx((ch), (ch)->tasks_queued)
59#define CH_TSK_TAIL(ch) dmac_ch_tsk_by_idx((ch), (ch)->tasks_done)
60
61static inline bool dmac_ch_task_queue_empty(struct dmac_ch *ch)
62{
63 return (ch->tasks_done == ch->tasks_queued);
64}
65
66/* enable/disable DMA controller */
67static inline void dmac_hw_enable(struct dmac *dmac)
68{
69 DMACCONFIG(dmac->baddr) |= DMACCONFIG_E_BIT;
70}
71
72static inline void dmac_hw_disable(struct dmac *dmac)
73{
74 DMACCONFIG(dmac->baddr) &= ~DMACCONFIG_E_BIT;
75}
76
77/* enable/disable DMA channel */
78static inline void dmac_ch_enable(struct dmac_ch *ch)
79{
80 DMACCxCONFIG(ch->baddr) |= DMACCxCONFIG_E_BIT;
81}
82
83static inline void dmac_ch_disable(struct dmac_ch *ch)
84{
85 uint32_t baddr = ch->baddr;
86
87 /* Disable the channel, clears the FIFO after
88 completing current AHB transfer */
89 DMACCxCONFIG(baddr) &= ~DMACCxCONFIG_E_BIT;
90 /* Wait for it to go inactive */
91 while (DMACCxCONFIG(baddr) & DMACCxCONFIG_A_BIT);
92}
93
94#if 0
95static void dmac_ch_halt(struct dmac_ch *ch)
96{
97 uint32_t baddr = ch->baddr;
98
99 /* Halt the channel, ignores subsequent DMA requests,
100 the contents of the FIFO are drained */
101 DMACCxCONFIG(baddr) |= DMACCxCONFIG_H_BIT;
102 /* Wait for it to go inactive */
103 while (DMACCxCONFIG(baddr) & DMACCxCONFIG_A_BIT);
104 /* Disable channel and restore Halt bit */
105 DMACCxCONFIG(baddr) &= ~(DMACCxCONFIG_H_BIT | DMACCxCONFIG_E_BIT);
106}
107#endif
108
109/* launch next task in queue */
110static void ICODE_ATTR dmac_ch_run(struct dmac_ch *ch)
111{
112 struct dmac *dmac = ch->dmac;
113
114 if (!dmac->ch_run_status)
115 dmac_hw_enable(dmac);
116 dmac->ch_run_status |= (1 << ch->prio);
117
118 /* Clear any pending interrupts leftover from previous operation */
119 /*DMACINTTCCLR(dmac->baddr) = (1 << ch->prio);*/ /* not needed */
120
121 /* copy whole LLI to HW registers */
122 *DMACCxLLI(ch->baddr) = *(CH_TSK_TAIL(ch)->start_lli);
123
124 dmac_ch_enable(ch);
125}
126
127static void ICODE_ATTR dmac_ch_abort(struct dmac_ch* ch)
128{
129 struct dmac *dmac = ch->dmac;
130
131 dmac_ch_disable(ch);
132
133 /* Clear any pending interrupt */
134 DMACINTTCCLR(dmac->baddr) = (1 << ch->prio);
135
136 dmac->ch_run_status &= ~(1 << ch->prio);
137 if (!dmac->ch_run_status)
138 dmac_hw_disable(dmac);
139}
140
141/* ISR */
142static inline void dmac_ch_callback(struct dmac_ch *ch)
143{
144 PANIC_DEBUG_CHANNEL(ch);
145
146 /* backup current task cb_data */
147 void *cb_data = CH_TSK_TAIL(ch)->cb_data;
148
149 /* mark current task as finished (resources can be reused) */
150 ch->tasks_done++;
151
152 /* launch next DMA task */
153 if (ch->queue_mode == QUEUE_NORMAL)
154 if (!dmac_ch_task_queue_empty(ch))
155 dmac_ch_run(ch);
156
157 /* run user callback, new tasks could be launched/queued here */
158 if (ch->cb_fn)
159 ch->cb_fn(cb_data);
160
161 /* disable DMA channel if there are no running tasks */
162 if (dmac_ch_task_queue_empty(ch))
163 dmac_ch_abort(ch);
164}
165
166void ICODE_ATTR dmac_callback(struct dmac *dmac)
167{
168 #ifdef PANIC_DEBUG
169 if (!dmac)
170 panicf("dmac_callback(): NULL dmac");
171 #endif
172
173 unsigned int ch_n;
174 uint32_t baddr = dmac->baddr;
175 uint32_t intsts = DMACINTSTS(baddr);
176
177 /* Lowest channel index is serviced first */
178 for (ch_n = 0; ch_n < DMAC_CH_COUNT; ch_n++) {
179 if ((intsts & (1 << ch_n))) {
180 if (DMACINTERRSTS(baddr) & (1 << ch_n))
181 panicf("DMA ch%d: HW error", ch_n);
182
183 /* clear terminal count interrupt */
184 DMACINTTCCLR(baddr) = (1 << ch_n);
185
186 dmac_ch_callback(dmac->ch_l[ch_n]);
187 }
188 }
189}
190
191/*
192 * API
193 */
194void dmac_open(struct dmac *dmac)
195{
196 uint32_t baddr = dmac->baddr;
197 int ch_n;
198
199 dmac_hw_enable(dmac);
200
201 DMACCONFIG(baddr) = ((dmac->m1 & DMACCONFIG_M1_MSK) << DMACCONFIG_M1_POS)
202 | ((dmac->m2 & DMACCONFIG_M2_MSK) << DMACCONFIG_M2_POS);
203
204 for (ch_n = 0; ch_n < DMAC_CH_COUNT; ch_n++) {
205 DMACCxCONFIG(DMAC_CH_BASE(baddr, ch_n)) = 0; /* disable channel */
206 dmac->ch_l[ch_n] = NULL;
207 }
208 dmac->ch_run_status = 0;
209
210 /* clear channel interrupts */
211 DMACINTTCCLR(baddr) = 0xff;
212 DMACINTERRCLR(baddr) = 0xff;
213
214 dmac_hw_disable(dmac);
215}
216
217void dmac_ch_init(struct dmac_ch *ch, struct dmac_ch_cfg *cfg)
218{
219 #ifdef PANIC_DEBUG
220 if (!ch)
221 panicf("%s(): NULL channel", __func__);
222 else if (!ch->dmac)
223 panicf("%s(): NULL ch->dmac", __func__);
224 else if (ch->dmac->ch_l[ch->prio])
225 panicf("%s(): channel %d already initilized", __func__, ch->prio);
226 uint32_t align_mask = (1 << MIN(cfg->swidth, cfg->dwidth)) - 1;
227 if (ch->cfg->lli_xfer_max_count & align_mask)
228 panicf("%s(): bad bus width: sw=%u dw=%u max_cnt=%u", __func__,
229 cfg->swidth, cfg->dwidth, ch->cfg->lli_xfer_max_count);
230 #endif
231
232 struct dmac *dmac = ch->dmac;
233 int ch_n = ch->prio;
234
235 dmac->ch_l[ch_n] = ch;
236
237 ch->baddr = DMAC_CH_BASE(dmac->baddr, ch_n);
238 ch->llibuf_top = ch->llibuf;
239 ch->tasks_queued = 0;
240 ch->tasks_done = 0;
241 ch->cfg = cfg;
242
243 ch->control =
244 ((cfg->sbsize & DMACCxCONTROL_SBSIZE_MSK) << DMACCxCONTROL_SBSIZE_POS) |
245 ((cfg->dbsize & DMACCxCONTROL_DBSIZE_MSK) << DMACCxCONTROL_DBSIZE_POS) |
246 ((cfg->swidth & DMACCxCONTROL_SWIDTH_MSK) << DMACCxCONTROL_SWIDTH_POS) |
247 ((cfg->dwidth & DMACCxCONTROL_DWIDTH_MSK) << DMACCxCONTROL_DWIDTH_POS) |
248 ((cfg->sbus & DMACCxCONTROL_S_MSK) << DMACCxCONTROL_S_POS) |
249 ((cfg->dbus & DMACCxCONTROL_D_MSK) << DMACCxCONTROL_D_POS) |
250 ((cfg->sinc & DMACCxCONTROL_SI_MSK) << DMACCxCONTROL_SI_POS) |
251 ((cfg->dinc & DMACCxCONTROL_DI_MSK) << DMACCxCONTROL_DI_POS) |
252 ((cfg->prot & DMACCxCONTROL_PROT_MSK) << DMACCxCONTROL_PROT_POS);
253
254 /* flow control notes:
255 * - currently only master modes are supported (FLOWCNTRL_x_DMA).
256 * - must use DMAC_PERI_NONE when srcperi and/or dstperi are memory.
257 */
258 uint32_t flowcntrl = (((cfg->srcperi != DMAC_PERI_NONE) << 1) |
259 (cfg->dstperi != DMAC_PERI_NONE)) << DMACCxCONFIG_FLOWCNTRL_POS;
260
261 DMACCxCONFIG(ch->baddr) =
262 ((cfg->srcperi & DMACCxCONFIG_SRCPERI_MSK) << DMACCxCONFIG_SRCPERI_POS) |
263 ((cfg->dstperi & DMACCxCONFIG_DESTPERI_MSK) << DMACCxCONFIG_DESTPERI_POS) |
264 flowcntrl | DMACCxCONFIG_IE_BIT | DMACCxCONFIG_ITC_BIT;
265}
266
267void dmac_ch_lock_int(struct dmac_ch *ch)
268{
269 PANIC_DEBUG_CHANNEL(ch);
270
271 int flags = disable_irq_save();
272 DMACCxCONFIG(ch->baddr) &= ~DMACCxCONFIG_ITC_BIT;
273 restore_irq(flags);
274}
275
276void dmac_ch_unlock_int(struct dmac_ch *ch)
277{
278 PANIC_DEBUG_CHANNEL(ch);
279
280 int flags = disable_irq_save();
281 DMACCxCONFIG(ch->baddr) |= DMACCxCONFIG_ITC_BIT;
282 restore_irq(flags);
283}
284
285/* 1D->2D DMA transfers:
286 *
287 * srcaddr: aaaaaaaaaaabbbbbbbbbbbccccccc
288 * <- size ->
289 * <- width -><- width -><- r ->
290 *
291 * dstaddr: aaaaaaaaaaa.....
292 * dstaddr + stride: bbbbbbbbbbb.....
293 * dstaddr + 2*stride: ccccccc.........
294 * <- stride ->
295 * <- width ->
296 *
297 * 1D->1D DMA transfers:
298 *
299 * If 'width'=='stride', uses 'lli_xfer_max_count' for LLI count.
300 *
301 * Queue modes:
302 *
303 * QUEUE_NORMAL: each task in the queue is launched using a new
304 * DMA transfer once previous task is finished.
305 *
306 * QUEUE_LINK: when a task is queued, it is linked with the last
307 * queued task, creating a single continuous DMA transfer. New
308 * tasks must be queued while the channel is running, otherwise
309 * the continuous DMA transfer will be broken.
310 *
311 * Misc notes:
312 *
313 * Arguments 'size', 'width' and 'stride' are in bytes.
314 *
315 * Maximum supported 'width' depends on bus 'swidth' size, it is:
316 * maximum width = DMAC_LLI_MAX_COUNT << swidth
317 *
318 * User must supply 'srcaddr', 'dstaddr', 'width', 'size', 'stride'
319 * and 'lli_xfer_max_count' aligned to configured source and
320 * destination bus widths, otherwise transfers will be internally
321 * aligned by DMA hardware.
322 */
323#define LLI_COUNT(lli) ((lli)->control & DMACCxCONTROL_COUNT_MSK)
324#define LNK2LLI(link) ((struct dmac_lli*) ((link) & ~3))
325
326static inline void drain_write_buffer(void)
327{
328 asm volatile (
329 "mcr p15, 0, %0, c7, c10, 4\n"
330 : : "r"(0));
331}
332
333static inline void clean_dcache_line(void volatile *addr)
334{
335 asm volatile (
336 "mcr p15, 0, %0, c7, c10, 1\n" /* clean d-cache line by MVA */
337 : : "r"((uint32_t)addr & ~(CACHEALIGN_SIZE - 1)));
338}
339
340void ICODE_ATTR dmac_ch_queue_2d(
341 struct dmac_ch *ch, void *srcaddr, void *dstaddr,
342 size_t size, size_t width, size_t stride, void *cb_data)
343{
344 #ifdef PANIC_DEBUG
345 PANIC_DEBUG_CHANNEL(ch);
346 uint32_t align = (1 << MIN(ch->cfg->swidth, ch->cfg->dwidth)) - 1;
347 if (((uint32_t)srcaddr | (uint32_t)dstaddr | size | width | stride) & align)
348 panicf("dmac_ch_queue_2d(): %d,%p,%p,%u,%u,%u: bad alignment?",
349 ch->prio, srcaddr, dstaddr, size, width, stride);
350 #endif
351
352 struct dmac_tsk *tsk;
353 unsigned int srcinc, dstinc;
354 uint32_t control, llibuf_idx;
355 struct dmac_lli volatile *lli, *next_lli;
356
357 /* get and fill new task */
358 tsk = CH_TSK_TOP(ch);
359 tsk->start_lli = ch->llibuf_top;
360 tsk->size = size;
361 tsk->cb_data = cb_data;
362
363 /* use maximum LLI transfer count for 1D->1D transfers */
364 if (width == stride)
365 width = stride = ch->cfg->lli_xfer_max_count << ch->cfg->swidth;
366
367 srcinc = (ch->cfg->sinc) ? stride : 0;
368 dstinc = (ch->cfg->dinc) ? width : 0;
369
370 size >>= ch->cfg->swidth;
371 width >>= ch->cfg->swidth;
372
373 /* fill LLI circular buffer */
374 control = ch->control | width;
375 lli = ch->llibuf_top;
376 llibuf_idx = lli - ch->llibuf;
377
378 while (1)
379 {
380 llibuf_idx = (llibuf_idx + 1) & ch->llibuf_mask;
381 next_lli = ch->llibuf + llibuf_idx;
382
383 lli->srcaddr = srcaddr;
384 lli->dstaddr = dstaddr;
385
386 if (size <= width)
387 break;
388
389 lli->link = (uint32_t)next_lli | ch->llibuf_bus;
390 lli->control = control;
391
392 srcaddr += srcinc;
393 dstaddr += dstinc;
394 size -= width;
395
396 /* clean dcache after completing a line */
397 if (((uint32_t)next_lli & (CACHEALIGN_SIZE - 1)) == 0)
398 clean_dcache_line(lli);
399
400 lli = next_lli;
401 }
402 /* last lli, enable terminal count interrupt */
403 lli->link = 0;
404 lli->control = ch->control | size | DMACCxCONTROL_I_BIT;
405 clean_dcache_line(lli);
406 drain_write_buffer();
407
408 tsk->end_lli = lli;
409
410 /* previous code is not protected against IRQs, it is fine to
411 enter the DMA interrupt handler while an application is
412 queuing a task, but the aplication must be protected when
413 doing concurrent queueing. */
414
415 int flags = disable_irq_save();
416
417 ch->llibuf_top = next_lli;
418
419 /* queue new task, launch it if it is the only queued task */
420 if (ch->tasks_done == ch->tasks_queued++)
421 {
422 dmac_ch_run(ch);
423 }
424 else if (ch->queue_mode == QUEUE_LINK)
425 {
426 uint32_t baddr = ch->baddr;
427 uint32_t link, hw_link;
428
429 link = (uint32_t)tsk->start_lli | ch->llibuf_bus;
430 hw_link = DMACCxLINK(baddr);
431
432 /* if it is a direct HW link, do it ASAP */
433 if (!hw_link) {
434 DMACCxLINK(baddr) = link;
435 /* check if the link was successful */
436 link = DMACCxLINK(baddr); /* dummy read for delay */
437 if (!(DMACCxCONFIG(baddr) & DMACCxCONFIG_E_BIT))
438 panicf("DMA ch%d: link error", ch->prio);
439 }
440
441 /* Locate the LLI where the new task must be linked. Link it even
442 if it was a direct HW link, dmac_ch_get_info() needs it. */
443 lli = dmac_ch_tsk_by_idx(ch, ch->tasks_queued-2)->end_lli;
444 lli->link = link;
445 clean_dcache_line(lli);
446 drain_write_buffer();
447
448 /* If the updated LLI was loaded by the HW while it was being
449 modified, verify that the HW link is correct. */
450 if (LNK2LLI(hw_link) == lli) {
451 uint32_t cur_hw_link = DMACCxLINK(baddr);
452 if ((cur_hw_link != hw_link) && (cur_hw_link != link))
453 DMACCxLINK(baddr) = link;
454 }
455 }
456
457 restore_irq(flags);
458}
459
460void dmac_ch_stop(struct dmac_ch* ch)
461{
462 PANIC_DEBUG_CHANNEL(ch);
463
464 int flags = disable_irq_save();
465 dmac_ch_abort(ch);
466 ch->tasks_done = ch->tasks_queued; /* clear queue */
467 restore_irq(flags);
468}
469
470bool dmac_ch_running(struct dmac_ch *ch)
471{
472 PANIC_DEBUG_CHANNEL(ch);
473
474 int flags = disable_irq_save();
475 bool running = !dmac_ch_task_queue_empty(ch);
476 restore_irq(flags);
477 return running;
478}
479
480/* returns source or destination address of the actual LLI transfer,
481 remaining bytes for current task, and total remaining bytes */
482void *dmac_ch_get_info(struct dmac_ch *ch, size_t *bytes, size_t *t_bytes)
483{
484 PANIC_DEBUG_CHANNEL(ch);
485
486 void *cur_addr = NULL;
487 size_t count = 0, t_count = 0;
488
489 int flags = disable_irq_save();
490
491 if (!dmac_ch_task_queue_empty(ch))
492 {
493 struct dmac_lli volatile *cur_lli;
494 struct dmac_tsk *tsk;
495 uint32_t cur_task; /* index */
496 uint32_t baddr = ch->baddr;
497
498 /* Read DMA transfer progress:
499 *
500 * The recommended procedure (stop channel -> read progress ->
501 * relaunch channel) is problematic for real time transfers,
502 * specially when fast sample rates are combined with small
503 * pheripheral FIFOs.
504 *
505 * An experimental method is used, it is based on the results
506 * observed when reading the LLI registers at the instant they
507 * are being updated by the HW (using s5l8702, 2:1 CPU/AHB
508 * clock ratio):
509 * - SRCADDR may return erroneous/corrupted data
510 * - LINK and COUNT always returns valid data
511 * - it seems that HW internally updates LINK and COUNT
512 * 'atomically', this means that reading twice using the
513 * sequence LINK1->COUNT1->LINK2->COUNT2:
514 * if LINK1 == LINK2 then COUNT1 is consistent with LINK
515 * if LINK1 <> LINK2 then COUNT2 is consistent with LINK2
516 */
517 uint32_t link, link2, control, control2;
518
519 /* HW read sequence */
520 link = DMACCxLINK(baddr);
521 control = DMACCxCONTROL(baddr);
522 link2 = DMACCxLINK(baddr);
523 control2 = DMACCxCONTROL(baddr);
524
525 if (link != link2) {
526 link = link2;
527 control = control2;
528 }
529
530 count = control & DMACCxCONTROL_COUNT_MSK; /* HW count */
531
532 cur_task = ch->tasks_done;
533
534 /* In QUEUE_LINK mode, when the task has just finished and is
535 * waiting to enter the interrupt handler, the readed HW data
536 * may correspont to the next linked task. Check it and update
537 * real cur_task accordly.
538 */
539 struct dmac_lli *next_start_lli = LNK2LLI(
540 dmac_ch_tsk_by_idx(ch, cur_task)->end_lli->link);
541 if (next_start_lli && (next_start_lli->link == link))
542 cur_task++;
543
544 tsk = dmac_ch_tsk_by_idx(ch, cur_task);
545
546 /* get previous to next LLI in the circular buffer */
547 cur_lli = (link) ? ch->llibuf + (ch->llibuf_mask &
548 (LNK2LLI(link) - ch->llibuf - 1)) : tsk->end_lli;
549
550 /* Calculate current address, choose destination address when
551 * dest increment is set (usually MEMMEM or PERIMEM transfers),
552 * otherwise use source address (usually MEMPERI transfers).
553 */
554 void *start_addr;
555 if (ch->control & (1 << DMACCxCONTROL_DI_POS)) {
556 cur_addr = cur_lli->dstaddr;
557 start_addr = tsk->start_lli->dstaddr;
558 }
559 else {
560 cur_addr = cur_lli->srcaddr;
561 start_addr = tsk->start_lli->srcaddr;
562 }
563 cur_addr += (LLI_COUNT(cur_lli) - count) << ch->cfg->swidth;
564
565 /* calculate bytes for current task */
566 count = tsk->size - (cur_addr - start_addr);
567
568 /* count bytes for the remaining tasks */
569 if (t_bytes)
570 while (++cur_task != ch->tasks_queued)
571 t_count += dmac_ch_tsk_by_idx(ch, cur_task)->size;
572 }
573
574 restore_irq(flags);
575
576 if (bytes) *bytes = count;
577 if (t_bytes) *t_bytes = count + t_count;
578
579 return cur_addr;
580}