summaryrefslogtreecommitdiff
path: root/firmware/target
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/target')
-rw-r--r--firmware/target/arm/imx31/gigabeat-s/mc13783-gigabeat-s.c15
-rw-r--r--firmware/target/arm/imx31/mc13783-imx31.c278
-rw-r--r--firmware/target/arm/imx31/spi-imx31.c27
3 files changed, 128 insertions, 192 deletions
diff --git a/firmware/target/arm/imx31/gigabeat-s/mc13783-gigabeat-s.c b/firmware/target/arm/imx31/gigabeat-s/mc13783-gigabeat-s.c
index e0745a5b8b..6d992388f2 100644
--- a/firmware/target/arm/imx31/gigabeat-s/mc13783-gigabeat-s.c
+++ b/firmware/target/arm/imx31/gigabeat-s/mc13783-gigabeat-s.c
@@ -56,33 +56,28 @@ const struct mc13783_event mc13783_events[MC13783_NUM_EVENTS] =
56{ 56{
57 [MC13783_ADCDONE_EVENT] = /* ADC conversion complete */ 57 [MC13783_ADCDONE_EVENT] = /* ADC conversion complete */
58 { 58 {
59 .set = MC13783_EVENT_SET0, 59 .int_id = MC13783_INT_ID_ADCDONE,
60 .mask = MC13783_ADCDONEM,
61 .callback = adc_done, 60 .callback = adc_done,
62 }, 61 },
63 [MC13783_ONOFD1_EVENT] = /* Power button */ 62 [MC13783_ONOFD1_EVENT] = /* Power button */
64 { 63 {
65 .set = MC13783_EVENT_SET1, 64 .int_id = MC13783_INT_ID_ONOFD1,
66 .mask = MC13783_ONOFD1M,
67 .callback = button_power_event, 65 .callback = button_power_event,
68 }, 66 },
69 [MC13783_SE1_EVENT] = /* Main charger detection */ 67 [MC13783_SE1_EVENT] = /* Main charger detection */
70 { 68 {
71 .set = MC13783_EVENT_SET0, 69 .int_id = MC13783_INT_ID_SE1,
72 .mask = MC13783_SE1M,
73 .callback = charger_main_detect_event, 70 .callback = charger_main_detect_event,
74 }, 71 },
75 [MC13783_USB_EVENT] = /* USB insertion/USB charger detection */ 72 [MC13783_USB_EVENT] = /* USB insertion/USB charger detection */
76 { 73 {
77 .set = MC13783_EVENT_SET0, 74 .int_id = MC13783_INT_ID_USB,
78 .mask = MC13783_USBM,
79 .callback = usb_connect_event, 75 .callback = usb_connect_event,
80 }, 76 },
81#ifdef HAVE_HEADPHONE_DETECTION 77#ifdef HAVE_HEADPHONE_DETECTION
82 [MC13783_ONOFD2_EVENT] = /* Headphone jack */ 78 [MC13783_ONOFD2_EVENT] = /* Headphone jack */
83 { 79 {
84 .set = MC13783_EVENT_SET1, 80 .int_id = MC13783_INT_ID_ONOFD2,
85 .mask = MC13783_ONOFD2M,
86 .callback = headphone_detect_event, 81 .callback = headphone_detect_event,
87 }, 82 },
88#endif 83#endif
diff --git a/firmware/target/arm/imx31/mc13783-imx31.c b/firmware/target/arm/imx31/mc13783-imx31.c
index 094fbaa58b..268c33a549 100644
--- a/firmware/target/arm/imx31/mc13783-imx31.c
+++ b/firmware/target/arm/imx31/mc13783-imx31.c
@@ -34,10 +34,6 @@ static int mc13783_thread_stack[DEFAULT_STACK_SIZE/sizeof(int)];
34static const char * const mc13783_thread_name = "pmic"; 34static const char * const mc13783_thread_name = "pmic";
35static struct semaphore mc13783_svc_wake; 35static struct semaphore mc13783_svc_wake;
36 36
37/* Synchronous thread communication objects */
38static struct mutex mc13783_spi_mutex;
39static struct semaphore mc13783_spi_complete;
40
41/* Tracking for which interrupts are enabled */ 37/* Tracking for which interrupts are enabled */
42static uint32_t pmic_int_enabled[2] = 38static uint32_t pmic_int_enabled[2] =
43 { 0x00000000, 0x00000000 }; 39 { 0x00000000, 0x00000000 };
@@ -50,32 +46,27 @@ static const unsigned char pmic_ints_regs[2] =
50 46
51static volatile unsigned int mc13783_thread_id = 0; 47static volatile unsigned int mc13783_thread_id = 0;
52 48
53static void mc13783_xfer_complete_cb(struct spi_transfer_desc *trans); 49/* Extend the basic SPI transfer descriptor with our own fields */
54 50struct mc13783_transfer_desc
55/* Transfer descriptor for synchronous reads and writes */
56static struct spi_transfer_desc mc13783_transfer =
57{ 51{
58 .node = &mc13783_spi, 52 struct spi_transfer_desc xfer;
59 .txbuf = NULL, 53 union
60 .rxbuf = NULL, 54 {
61 .count = 0, 55 struct semaphore sema;
62 .callback = mc13783_xfer_complete_cb, 56 uint32_t data;
63 .next = NULL, 57 };
64}; 58};
65 59
66/* Called when a transfer is finished and data is ready/written */ 60/* Called when a transfer is finished and data is ready/written */
67static void mc13783_xfer_complete_cb(struct spi_transfer_desc *xfer) 61static void mc13783_xfer_complete_cb(struct spi_transfer_desc *xfer)
68{ 62{
69 if (xfer->count != 0) 63 semaphore_release(&((struct mc13783_transfer_desc *)xfer)->sema);
70 return;
71
72 semaphore_release(&mc13783_spi_complete);
73} 64}
74 65
75static inline bool wait_for_transfer_complete(void) 66static inline bool wait_for_transfer_complete(struct mc13783_transfer_desc *xfer)
76{ 67{
77 return semaphore_wait(&mc13783_spi_complete, HZ*2) 68 return semaphore_wait(&xfer->sema, TIMEOUT_BLOCK)
78 == OBJ_WAIT_SUCCEEDED && mc13783_transfer.count == 0; 69 == OBJ_WAIT_SUCCEEDED && xfer->xfer.count == 0;
79} 70}
80 71
81static void mc13783_interrupt_thread(void) 72static void mc13783_interrupt_thread(void)
@@ -114,15 +105,14 @@ static void mc13783_interrupt_thread(void)
114 /* .count is surely expected to be > 0 */ 105 /* .count is surely expected to be > 0 */
115 do 106 do
116 { 107 {
117 enum mc13783_event_sets set = event->set; 108 unsigned int set = event->int_id / MC13783_INT_ID_SET_DIV;
118 uint32_t pnd = pending[set]; 109 uint32_t pnd = pending[set];
119 uint32_t mask = event->mask; 110 uint32_t mask = 1 << (event->int_id & MC13783_INT_ID_NUM_MASK);
120 111
121 if (pnd & mask) 112 if (pnd & mask)
122 { 113 {
123 event->callback(); 114 event->callback();
124 pnd &= ~mask; 115 pending[set] = pnd & ~mask;
125 pending[set] = pnd;
126 } 116 }
127 117
128 if ((pending[0] | pending[1]) == 0) 118 if ((pending[0] | pending[1]) == 0)
@@ -147,9 +137,6 @@ void INIT_ATTR mc13783_init(void)
147{ 137{
148 /* Serial interface must have been initialized first! */ 138 /* Serial interface must have been initialized first! */
149 semaphore_init(&mc13783_svc_wake, 1, 0); 139 semaphore_init(&mc13783_svc_wake, 1, 0);
150 mutex_init(&mc13783_spi_mutex);
151
152 semaphore_init(&mc13783_spi_complete, 1, 0);
153 140
154 /* Enable the PMIC SPI module */ 141 /* Enable the PMIC SPI module */
155 spi_enable_module(&mc13783_spi); 142 spi_enable_module(&mc13783_spi);
@@ -183,205 +170,169 @@ void mc13783_close(void)
183bool mc13783_enable_event(enum mc13783_event_ids id) 170bool mc13783_enable_event(enum mc13783_event_ids id)
184{ 171{
185 const struct mc13783_event * const event = &mc13783_events[id]; 172 const struct mc13783_event * const event = &mc13783_events[id];
186 int set = event->set; 173 unsigned int set = event->int_id / MC13783_INT_ID_SET_DIV;
187 uint32_t mask = event->mask; 174 uint32_t mask = 1 << (event->int_id & MC13783_INT_ID_NUM_MASK);
188
189 mutex_lock(&mc13783_spi_mutex);
190 175
191 pmic_int_enabled[set] |= mask; 176 pmic_int_enabled[set] |= mask;
192 mc13783_clear(pmic_intm_regs[set], mask); 177 mc13783_clear(pmic_intm_regs[set], mask);
193 178
194 mutex_unlock(&mc13783_spi_mutex);
195
196 return true; 179 return true;
197} 180}
198 181
199void mc13783_disable_event(enum mc13783_event_ids id) 182void mc13783_disable_event(enum mc13783_event_ids id)
200{ 183{
201 const struct mc13783_event * const event = &mc13783_events[id]; 184 const struct mc13783_event * const event = &mc13783_events[id];
202 int set = event->set; 185 unsigned int set = event->int_id / MC13783_INT_ID_SET_DIV;
203 uint32_t mask = event->mask; 186 uint32_t mask = 1 << (event->int_id & MC13783_INT_ID_NUM_MASK);
204
205 mutex_lock(&mc13783_spi_mutex);
206 187
207 pmic_int_enabled[set] &= ~mask; 188 pmic_int_enabled[set] &= ~mask;
208 mc13783_set(pmic_intm_regs[set], mask); 189 mc13783_set(pmic_intm_regs[set], mask);
209
210 mutex_unlock(&mc13783_spi_mutex);
211} 190}
212 191
213uint32_t mc13783_set(unsigned address, uint32_t bits) 192static inline bool mc13783_transfer(struct spi_transfer_desc *xfer,
193 uint32_t *txbuf,
194 uint32_t *rxbuf,
195 int count,
196 spi_transfer_cb_fn_type callback)
214{ 197{
215 uint32_t data; 198 xfer->node = &mc13783_spi;
216 199 xfer->txbuf = txbuf;
217 mutex_lock(&mc13783_spi_mutex); 200 xfer->rxbuf = rxbuf;
218 201 xfer->count = count;
219 data = mc13783_read(address); 202 xfer->callback = callback;
220 203 xfer->next = NULL;
221 if (data != MC13783_DATA_ERROR)
222 mc13783_write(address, data | bits);
223 204
224 mutex_unlock(&mc13783_spi_mutex); 205 return spi_transfer(xfer);
206}
225 207
226 return data; 208uint32_t mc13783_set(unsigned address, uint32_t bits)
209{
210 return mc13783_write_masked(address, bits, bits);
227} 211}
228 212
229uint32_t mc13783_clear(unsigned address, uint32_t bits) 213uint32_t mc13783_clear(unsigned address, uint32_t bits)
230{ 214{
231 uint32_t data; 215 return mc13783_write_masked(address, 0, bits);
232
233 mutex_lock(&mc13783_spi_mutex);
234
235 data = mc13783_read(address);
236
237 if (data != MC13783_DATA_ERROR)
238 mc13783_write(address, data & ~bits);
239
240 mutex_unlock(&mc13783_spi_mutex);
241
242 return data;
243} 216}
244 217
245int mc13783_write(unsigned address, uint32_t data) 218/* Called when the first transfer of mc13783_write_masked is complete */
219static void mc13783_write_masked_cb(struct spi_transfer_desc *xfer)
246{ 220{
247 uint32_t packet; 221 struct mc13783_transfer_desc *desc = (struct mc13783_transfer_desc *)xfer;
248 int i; 222 uint32_t *packets = desc->xfer.rxbuf; /* Will have been advanced by 1 */
223 packets[0] |= packets[-1] & ~desc->data;
224}
249 225
226uint32_t mc13783_write_masked(unsigned address, uint32_t data, uint32_t mask)
227{
250 if (address >= MC13783_NUM_REGS) 228 if (address >= MC13783_NUM_REGS)
251 return -1; 229 return MC13783_DATA_ERROR;
230
231 mask &= 0xffffff;
252 232
253 packet = (1 << 31) | (address << 25) | (data & 0xffffff); 233 uint32_t packets[2] =
234 {
235 address << 25,
236 (1 << 31) | (address << 25) | (data & mask)
237 };
254 238
255 mutex_lock(&mc13783_spi_mutex); 239 struct mc13783_transfer_desc xfers[2];
240 xfers[0].data = mask;
241 semaphore_init(&xfers[1].sema, 1, 0);
256 242
257 mc13783_transfer.txbuf = &packet; 243 unsigned long cpsr = disable_irq_save();
258 mc13783_transfer.rxbuf = NULL;
259 mc13783_transfer.count = 1;
260 244
261 i = -1; 245 /* Queue up two transfers in a row */
246 bool ok = mc13783_transfer(&xfers[0].xfer, &packets[0], &packets[0], 1,
247 mc13783_write_masked_cb) &&
248 mc13783_transfer(&xfers[1].xfer, &packets[1], NULL, 1,
249 mc13783_xfer_complete_cb);
262 250
263 if (spi_transfer(&mc13783_transfer) && wait_for_transfer_complete()) 251 restore_irq(cpsr);
264 i = 1 - mc13783_transfer.count;
265 252
266 mutex_unlock(&mc13783_spi_mutex); 253 if (ok && wait_for_transfer_complete(&xfers[1]))
254 return packets[0];
267 255
268 return i; 256 return MC13783_DATA_ERROR;
269} 257}
270 258
271uint32_t mc13783_write_masked(unsigned address, uint32_t data, uint32_t mask) 259uint32_t mc13783_read(unsigned address)
272{ 260{
273 uint32_t old; 261 if (address >= MC13783_NUM_REGS)
262 return MC13783_DATA_ERROR;
274 263
275 mutex_lock(&mc13783_spi_mutex); 264 uint32_t packet = address << 25;
276 265
277 old = mc13783_read(address); 266 struct mc13783_transfer_desc xfer;
267 semaphore_init(&xfer.sema, 1, 0);
278 268
279 if (old != MC13783_DATA_ERROR) 269 if (mc13783_transfer(&xfer.xfer, &packet, &packet, 1,
270 mc13783_xfer_complete_cb) &&
271 wait_for_transfer_complete(&xfer))
280 { 272 {
281 data = (old & ~mask) | (data & mask); 273 return packet;
282
283 if (mc13783_write(address, data) != 1)
284 old = MC13783_DATA_ERROR;
285 } 274 }
286 275
287 mutex_unlock(&mc13783_spi_mutex); 276 return MC13783_DATA_ERROR;
288
289 return old;
290} 277}
291 278
292uint32_t mc13783_read(unsigned address) 279int mc13783_write(unsigned address, uint32_t data)
293{ 280{
294 uint32_t packet;
295
296 if (address >= MC13783_NUM_REGS) 281 if (address >= MC13783_NUM_REGS)
297 return MC13783_DATA_ERROR; 282 return -1;
298
299 packet = address << 25;
300
301 mutex_lock(&mc13783_spi_mutex);
302 283
303 mc13783_transfer.txbuf = &packet; 284 uint32_t packet = (1 << 31) | (address << 25) | (data & 0xffffff);
304 mc13783_transfer.rxbuf = &packet;
305 mc13783_transfer.count = 1;
306 285
307 if (!spi_transfer(&mc13783_transfer) || !wait_for_transfer_complete()) 286 struct mc13783_transfer_desc xfer;
308 packet = MC13783_DATA_ERROR; 287 semaphore_init(&xfer.sema, 1, 0);
309 288
310 mutex_unlock(&mc13783_spi_mutex); 289 if (mc13783_transfer(&xfer.xfer, &packet, NULL, 1,
290 mc13783_xfer_complete_cb) &&
291 wait_for_transfer_complete(&xfer))
292 {
293 return 1 - xfer.xfer.count;
294 }
311 295
312 return packet; 296 return -1;
313} 297}
314 298
315int mc13783_read_regs(const unsigned char *regs, uint32_t *buffer, 299int mc13783_read_regs(const unsigned char *regs, uint32_t *buffer,
316 int count) 300 int count)
317{ 301{
318 int i; 302 struct mc13783_transfer_desc xfer;
303 semaphore_init(&xfer.sema, 1, 0);
319 304
320 for (i = 0; i < count; i++) 305 if (mc13783_read_async(&xfer.xfer, regs, buffer, count,
306 mc13783_xfer_complete_cb) &&
307 wait_for_transfer_complete(&xfer))
321 { 308 {
322 unsigned reg = regs[i]; 309 return count - xfer.xfer.count;
323
324 if (reg >= MC13783_NUM_REGS)
325 return -1;
326
327 buffer[i] = reg << 25;
328 } 310 }
329 311
330 mutex_lock(&mc13783_spi_mutex); 312 return -1;
331
332 mc13783_transfer.txbuf = buffer;
333 mc13783_transfer.rxbuf = buffer;
334 mc13783_transfer.count = count;
335
336 i = -1;
337
338 if (spi_transfer(&mc13783_transfer) && wait_for_transfer_complete())
339 i = count - mc13783_transfer.count;
340
341 mutex_unlock(&mc13783_spi_mutex);
342
343 return i;
344} 313}
345 314
346int mc13783_write_regs(const unsigned char *regs, uint32_t *buffer, 315int mc13783_write_regs(const unsigned char *regs, uint32_t *buffer,
347 int count) 316 int count)
348{ 317{
349 int i; 318 struct mc13783_transfer_desc xfer;
319 semaphore_init(&xfer.sema, 1, 0);
350 320
351 for (i = 0; i < count; i++) 321 if (mc13783_write_async(&xfer.xfer, regs, buffer, count,
322 mc13783_xfer_complete_cb) &&
323 wait_for_transfer_complete(&xfer))
352 { 324 {
353 unsigned reg = regs[i]; 325 return count - xfer.xfer.count;
354
355 if (reg >= MC13783_NUM_REGS)
356 return -1;
357
358 buffer[i] = (1 << 31) | (reg << 25) | (buffer[i] & 0xffffff);
359 } 326 }
360 327
361 mutex_lock(&mc13783_spi_mutex); 328 return -1;
362
363 mc13783_transfer.txbuf = buffer;
364 mc13783_transfer.rxbuf = NULL;
365 mc13783_transfer.count = count;
366
367 i = -1;
368
369 if (spi_transfer(&mc13783_transfer) && wait_for_transfer_complete())
370 i = count - mc13783_transfer.count;
371
372 mutex_unlock(&mc13783_spi_mutex);
373
374 return i;
375} 329}
376 330
377#if 0 /* Not needed right now */
378bool mc13783_read_async(struct spi_transfer_desc *xfer, 331bool mc13783_read_async(struct spi_transfer_desc *xfer,
379 const unsigned char *regs, uint32_t *buffer, 332 const unsigned char *regs, uint32_t *buffer,
380 int count, spi_transfer_cb_fn_type callback) 333 int count, spi_transfer_cb_fn_type callback)
381{ 334{
382 int i; 335 for (int i = 0; i < count; i++)
383
384 for (i = 0; i < count; i++)
385 { 336 {
386 unsigned reg = regs[i]; 337 unsigned reg = regs[i];
387 338
@@ -391,24 +342,14 @@ bool mc13783_read_async(struct spi_transfer_desc *xfer,
391 buffer[i] = reg << 25; 342 buffer[i] = reg << 25;
392 } 343 }
393 344
394 xfer->node = &mc13783_spi; 345 return mc13783_transfer(xfer, buffer, buffer, count, callback);
395 xfer->txbuf = buffer;
396 xfer->rxbuf = buffer;
397 xfer->count = count;
398 xfer->callback = callback;
399 xfer->next = NULL;
400
401 return spi_transfer(xfer);
402} 346}
403#endif
404 347
405bool mc13783_write_async(struct spi_transfer_desc *xfer, 348bool mc13783_write_async(struct spi_transfer_desc *xfer,
406 const unsigned char *regs, uint32_t *buffer, 349 const unsigned char *regs, uint32_t *buffer,
407 int count, spi_transfer_cb_fn_type callback) 350 int count, spi_transfer_cb_fn_type callback)
408{ 351{
409 int i; 352 for (int i = 0; i < count; i++)
410
411 for (i = 0; i < count; i++)
412 { 353 {
413 unsigned reg = regs[i]; 354 unsigned reg = regs[i];
414 355
@@ -418,12 +359,5 @@ bool mc13783_write_async(struct spi_transfer_desc *xfer,
418 buffer[i] = (1 << 31) | (reg << 25) | (buffer[i] & 0xffffff); 359 buffer[i] = (1 << 31) | (reg << 25) | (buffer[i] & 0xffffff);
419 } 360 }
420 361
421 xfer->node = &mc13783_spi; 362 return mc13783_transfer(xfer, buffer, NULL, count, callback);
422 xfer->txbuf = buffer;
423 xfer->rxbuf = NULL;
424 xfer->count = count;
425 xfer->callback = callback;
426 xfer->next = NULL;
427
428 return spi_transfer(xfer);
429} 363}
diff --git a/firmware/target/arm/imx31/spi-imx31.c b/firmware/target/arm/imx31/spi-imx31.c
index 7fcf94ce90..ea3d2f8d77 100644
--- a/firmware/target/arm/imx31/spi-imx31.c
+++ b/firmware/target/arm/imx31/spi-imx31.c
@@ -170,7 +170,10 @@ static bool start_transfer(struct spi_module_desc * const desc,
170 unsigned long intreg; 170 unsigned long intreg;
171 171
172 if (!spi_set_context(desc, xfer)) 172 if (!spi_set_context(desc, xfer))
173 {
174 xfer->count = -1;
173 return false; 175 return false;
176 }
174 177
175 base[CONREG] |= CSPI_CONREG_EN; /* Enable module */ 178 base[CONREG] |= CSPI_CONREG_EN; /* Enable module */
176 179
@@ -249,8 +252,18 @@ static void spi_interrupt(enum spi_module_number spi)
249 if (xfer->count > 0) 252 if (xfer->count > 0)
250 { 253 {
251 /* Data to transmit - fill TXFIFO or write until exhausted. */ 254 /* Data to transmit - fill TXFIFO or write until exhausted. */
252 if (tx_fill_fifo(desc, base, xfer) != 0) 255 int remaining = tx_fill_fifo(desc, base, xfer);
253 return; 256
257 /* If transfer completed because TXFIFO ran out of data, resume it or
258 else it will not finish. */
259 if (!(base[CONREG] & CSPI_CONREG_XCH))
260 {
261 base[STATREG] = CSPI_STATREG_TC;
262 base[CONREG] |= CSPI_CONREG_XCH;
263 }
264
265 if (remaining > 0)
266 return; /* Still more after this */
254 267
255 /* Out of data - stop TX interrupts, enable TC interrupt. */ 268 /* Out of data - stop TX interrupts, enable TC interrupt. */
256 intreg &= ~CSPI_INTREG_THEN; 269 intreg &= ~CSPI_INTREG_THEN;
@@ -263,7 +276,6 @@ static void spi_interrupt(enum spi_module_number spi)
263 /* Outbound transfer is complete. */ 276 /* Outbound transfer is complete. */
264 intreg &= ~CSPI_INTREG_TCEN; 277 intreg &= ~CSPI_INTREG_TCEN;
265 base[INTREG] = intreg; 278 base[INTREG] = intreg;
266 base[STATREG] = CSPI_STATREG_TC; /* Ack 'complete' */
267 } 279 }
268 280
269 if (intreg != 0) 281 if (intreg != 0)
@@ -276,8 +288,6 @@ static void spi_interrupt(enum spi_module_number spi)
276 spi_transfer_cb_fn_type callback = xfer->callback; 288 spi_transfer_cb_fn_type callback = xfer->callback;
277 xfer->next = NULL; 289 xfer->next = NULL;
278 290
279 base[CONREG] &= ~CSPI_CONREG_EN; /* Disable module */
280
281 if (next == xfer) 291 if (next == xfer)
282 { 292 {
283 /* Last job on queue */ 293 /* Last job on queue */
@@ -287,6 +297,8 @@ static void spi_interrupt(enum spi_module_number spi)
287 callback(xfer); 297 callback(xfer);
288 298
289 /* Callback may have restarted transfers. */ 299 /* Callback may have restarted transfers. */
300 if (desc->head == NULL)
301 base[CONREG] &= ~CSPI_CONREG_EN; /* Disable module */
290 } 302 }
291 else 303 else
292 { 304 {
@@ -299,7 +311,6 @@ static void spi_interrupt(enum spi_module_number spi)
299 if (!start_transfer(desc, next)) 311 if (!start_transfer(desc, next))
300 { 312 {
301 xfer = next; 313 xfer = next;
302 xfer->count = -1;
303 continue; /* Failed: try next */ 314 continue; /* Failed: try next */
304 } 315 }
305 } 316 }
@@ -416,10 +427,6 @@ bool spi_transfer(struct spi_transfer_desc *xfer)
416 desc->tail = xfer; 427 desc->tail = xfer;
417 xfer->next = xfer; /* First, self-reference terminate */ 428 xfer->next = xfer; /* First, self-reference terminate */
418 } 429 }
419 else
420 {
421 xfer->count = -1; /* Signal error */
422 }
423 } 430 }
424 else 431 else
425 { 432 {