summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorMichael Sevakis <jethead71@rockbox.org>2010-05-27 23:14:39 +0000
committerMichael Sevakis <jethead71@rockbox.org>2010-05-27 23:14:39 +0000
commit25ebd9832dcc61709571abf9705066b6a83f2038 (patch)
tree1610a13a0cface77c0dcc905f6b5e7032cbc6e30
parent07ba7461516350ca929c9f2d617ea2284c2ccddc (diff)
downloadrockbox-25ebd9832dcc61709571abf9705066b6a83f2038.tar.gz
rockbox-25ebd9832dcc61709571abf9705066b6a83f2038.zip
Gigabeat S PCM: There's no reason to touch any hardware registers in order to lock out PCM callbacks.
git-svn-id: svn://svn.rockbox.org/rockbox/trunk@26340 a1c6a512-1295-4272-9138-f99709370657
-rw-r--r--firmware/target/arm/imx31/gigabeat-s/pcm-gigabeat-s.c63
1 files changed, 39 insertions, 24 deletions
diff --git a/firmware/target/arm/imx31/gigabeat-s/pcm-gigabeat-s.c b/firmware/target/arm/imx31/gigabeat-s/pcm-gigabeat-s.c
index 2c65c70360..cf68eb0fe0 100644
--- a/firmware/target/arm/imx31/gigabeat-s/pcm-gigabeat-s.c
+++ b/firmware/target/arm/imx31/gigabeat-s/pcm-gigabeat-s.c
@@ -35,6 +35,23 @@
35static struct buffer_descriptor dma_play_bd NOCACHEBSS_ATTR; 35static struct buffer_descriptor dma_play_bd NOCACHEBSS_ATTR;
36static struct channel_descriptor dma_play_cd NOCACHEBSS_ATTR; 36static struct channel_descriptor dma_play_cd NOCACHEBSS_ATTR;
37 37
38/* The pcm locking relies on the fact the interrupt handlers run to completion
39 * before lower-priority modes proceed. We don't have to touch hardware
40 * registers. Disabling SDMA interrupt would disable DMA callbacks systemwide
41 * and that is not something that is desireable.
42 *
43 * Lock explanation [++.locked]:
44 * Trivial, just increment .locked.
45 *
46 * Unlock explanation [if (--.locked == 0 && .state != 0)]:
47 * If int occurred and saw .locked as nonzero, we'll get a pending
48 * and it will have taken no action other than to set the flag to the
49 * value of .state. If it saw zero for .locked, it will have proceeded
50 * normally into the pcm callbacks. If cb set the pending flag, it has
51 * to be called to kickstart the callback mechanism and DMA. If the unlock
52 * came after a stop, we won't be in the block and DMA will be off. If
53 * we're still doing transfers, cb will see 0 for .locked and if pending,
54 * it won't be called by DMA again. */
38struct dma_data 55struct dma_data
39{ 56{
40 int locked; 57 int locked;
@@ -44,7 +61,7 @@ struct dma_data
44 61
45static struct dma_data dma_play_data = 62static struct dma_data dma_play_data =
46{ 63{
47 /* Initialize to a locked, stopped state */ 64 /* Initialize to an unlocked, stopped state */
48 .locked = 0, 65 .locked = 0,
49 .callback_pending = 0, 66 .callback_pending = 0,
50 .state = 0 67 .state = 0
@@ -81,8 +98,7 @@ static void play_dma_callback(void)
81 98
82void pcm_play_lock(void) 99void pcm_play_lock(void)
83{ 100{
84 if (++dma_play_data.locked == 1) 101 ++dma_play_data.locked;
85 imx31_regclr32(&SSI_SIER2, SSI_SIER_TDMAE);
86} 102}
87 103
88void pcm_play_unlock(void) 104void pcm_play_unlock(void)
@@ -92,12 +108,8 @@ void pcm_play_unlock(void)
92 int oldstatus = disable_irq_save(); 108 int oldstatus = disable_irq_save();
93 int pending = dma_play_data.callback_pending; 109 int pending = dma_play_data.callback_pending;
94 dma_play_data.callback_pending = 0; 110 dma_play_data.callback_pending = 0;
95 SSI_SIER2 |= SSI_SIER_TDMAE;
96 restore_irq(oldstatus); 111 restore_irq(oldstatus);
97 112
98 /* Should an interrupt be forced instead? The upper pcm layer can
99 * call producer's callback in thread context so technically this is
100 * acceptable. */
101 if (pending != 0) 113 if (pending != 0)
102 play_dma_callback(); 114 play_dma_callback();
103 } 115 }
@@ -238,21 +250,24 @@ static void play_start_pcm(void)
238 SSI_STX0_2 = 0; 250 SSI_STX0_2 = 0;
239 SSI_STX0_2 = 0; 251 SSI_STX0_2 = 0;
240 252
241 SSI_SCR2 |= SSI_SCR_TE; /* Start transmitting */ 253 SSI_SIER2 |= SSI_SIER_TDMAE; /* Enable DMA req. */
254 SSI_SCR2 |= SSI_SCR_TE; /* Start transmitting */
242} 255}
243 256
244static void play_stop_pcm(void) 257static void play_stop_pcm(void)
245{ 258{
259 SSI_SIER2 &= ~SSI_SIER_TDMAE; /* Disable DMA req. */
260
261 /* Set state before pending to prevent race with interrupt */
262 dma_play_data.state = 0;
263
246 /* Wait for FIFO to empty */ 264 /* Wait for FIFO to empty */
247 while (SSI_SFCSR_TFCNT0 & SSI_SFCSR2); 265 while (SSI_SFCSR_TFCNT0 & SSI_SFCSR2);
248 266
249 /* Disable transmission */ 267 SSI_STCR2 &= ~SSI_STCR_TFEN0; /* Disable TX */
250 SSI_STCR2 &= ~SSI_STCR_TFEN0; 268 SSI_SCR2 &= ~(SSI_SCR_TE | SSI_SCR_SSIEN); /* Disable transmission, SSI */
251 SSI_SCR2 &= ~(SSI_SCR_TE | SSI_SCR_SSIEN);
252 269
253 /* Set state before pending to prevent race with interrupt */ 270 /* Clear any pending callback */
254 /* Do not enable DMA requests on unlock */
255 dma_play_data.state = 0;
256 dma_play_data.callback_pending = 0; 271 dma_play_data.callback_pending = 0;
257} 272}
258 273
@@ -361,7 +376,7 @@ static struct channel_descriptor dma_rec_cd NOCACHEBSS_ATTR;
361 376
362static struct dma_data dma_rec_data = 377static struct dma_data dma_rec_data =
363{ 378{
364 /* Initialize to a locked, stopped state */ 379 /* Initialize to an unlocked, stopped state */
365 .locked = 0, 380 .locked = 0,
366 .callback_pending = 0, 381 .callback_pending = 0,
367 .state = 0 382 .state = 0
@@ -402,8 +417,7 @@ static void rec_dma_callback(void)
402 417
403void pcm_rec_lock(void) 418void pcm_rec_lock(void)
404{ 419{
405 if (++dma_rec_data.locked == 1) 420 ++dma_rec_data.locked;
406 imx31_regclr32(&SSI_SIER1, SSI_SIER_RDMAE);
407} 421}
408 422
409void pcm_rec_unlock(void) 423void pcm_rec_unlock(void)
@@ -413,12 +427,8 @@ void pcm_rec_unlock(void)
413 int oldstatus = disable_irq_save(); 427 int oldstatus = disable_irq_save();
414 int pending = dma_rec_data.callback_pending; 428 int pending = dma_rec_data.callback_pending;
415 dma_rec_data.callback_pending = 0; 429 dma_rec_data.callback_pending = 0;
416 SSI_SIER1 |= SSI_SIER_RDMAE;
417 restore_irq(oldstatus); 430 restore_irq(oldstatus);
418 431
419 /* Should an interrupt be forced instead? The upper pcm layer can
420 * call consumer's callback in thread context so technically this is
421 * acceptable. */
422 if (pending != 0) 432 if (pending != 0)
423 rec_dma_callback(); 433 rec_dma_callback();
424 } 434 }
@@ -426,6 +436,11 @@ void pcm_rec_unlock(void)
426 436
427void pcm_rec_dma_stop(void) 437void pcm_rec_dma_stop(void)
428{ 438{
439 SSI_SIER1 &= ~SSI_SIER_RDMAE; /* Disable DMA req. */
440
441 /* Set state before pending to prevent race with interrupt */
442 dma_rec_data.state = 0;
443
429 /* Stop receiving data */ 444 /* Stop receiving data */
430 sdma_channel_stop(DMA_REC_CH_NUM); 445 sdma_channel_stop(DMA_REC_CH_NUM);
431 446
@@ -434,9 +449,7 @@ void pcm_rec_dma_stop(void)
434 SSI_SCR1 &= ~SSI_SCR_RE; /* Disable RX */ 449 SSI_SCR1 &= ~SSI_SCR_RE; /* Disable RX */
435 SSI_SRCR1 &= ~SSI_SRCR_RFEN0; /* Disable RX FIFO */ 450 SSI_SRCR1 &= ~SSI_SRCR_RFEN0; /* Disable RX FIFO */
436 451
437 /* Set state before pending to prevent race with interrupt */ 452 /* Clear any pending callback */
438 /* Do not enable DMA requests on unlock */
439 dma_rec_data.state = 0;
440 dma_rec_data.callback_pending = 0; 453 dma_rec_data.callback_pending = 0;
441} 454}
442 455
@@ -466,6 +479,8 @@ void pcm_rec_dma_start(void *addr, size_t size)
466 479
467 /* Enable receive */ 480 /* Enable receive */
468 SSI_SCR1 |= SSI_SCR_RE; 481 SSI_SCR1 |= SSI_SCR_RE;
482 SSI_SIER1 |= SSI_SIER_RDMAE; /* Enable DMA req. */
483
469 sdma_channel_run(DMA_REC_CH_NUM); 484 sdma_channel_run(DMA_REC_CH_NUM);
470} 485}
471 486