summaryrefslogtreecommitdiff
path: root/firmware/target/arm/imx31/mc13783-imx31.c
diff options
context:
space:
mode:
Diffstat (limited to 'firmware/target/arm/imx31/mc13783-imx31.c')
-rw-r--r--firmware/target/arm/imx31/mc13783-imx31.c219
1 files changed, 118 insertions, 101 deletions
diff --git a/firmware/target/arm/imx31/mc13783-imx31.c b/firmware/target/arm/imx31/mc13783-imx31.c
index 627048fa54..6bf0b878f4 100644
--- a/firmware/target/arm/imx31/mc13783-imx31.c
+++ b/firmware/target/arm/imx31/mc13783-imx31.c
@@ -20,9 +20,9 @@
20 ****************************************************************************/ 20 ****************************************************************************/
21#include "system.h" 21#include "system.h"
22#include "cpu.h" 22#include "cpu.h"
23#include "gpio-imx31.h" 23#define DEFINE_MC13783_VECTOR_TABLE
24#include "mc13783.h"
25#include "mc13783-target.h" 24#include "mc13783-target.h"
25#include "gpio-target.h"
26#include "debug.h" 26#include "debug.h"
27#include "kernel.h" 27#include "kernel.h"
28 28
@@ -32,21 +32,42 @@ struct mc13783_transfer_desc
32 struct spi_transfer_desc xfer; 32 struct spi_transfer_desc xfer;
33 union 33 union
34 { 34 {
35 /* Pick _either_ data or semaphore */
35 struct semaphore sema; 36 struct semaphore sema;
36 uint32_t data; 37 uint32_t data[4];
37 }; 38 };
38}; 39};
39 40
40extern const struct mc13783_event mc13783_events[MC13783_NUM_EVENTS]; 41static uint32_t pmic_int_enb[2]; /* Enabled ints */
41extern struct spi_node mc13783_spi; 42static uint32_t pmic_int_sense_enb[2]; /* Enabled sense reading */
43static struct mc13783_transfer_desc int_xfers[2]; /* ISR transfer descriptor */
44static const struct mc13783_event *current_event; /* Current event in callback */
45static bool int_restore; /* Prevent SPI callback from
46 unmasking GPIO interrupt
47 (lockout) */
42 48
43static uint32_t pmic_int_enb[2]; /* Enabled ints */ 49static const struct mc13783_event * event_from_id(enum mc13783_int_ids id)
44static uint32_t pmic_int_sense_enb[2]; /* Enabled sense reading */ 50{
45static uint32_t int_pnd_buf[2]; /* Pending ints */ 51 for (unsigned int i = 0; i < mc13783_event_vector_tbl_len; i++)
46static uint32_t int_data_buf[4]; /* ISR data buffer */ 52 {
47static struct spi_transfer_desc int_xfers[2]; /* ISR transfer descriptor */ 53 if (mc13783_event_vector_tbl[i].id == id)
48static bool restore_event = true; /* Protect SPI callback from unmasking GPIO 54 return &mc13783_event_vector_tbl[i];
49 interrupt (lockout) */ 55 }
56
57 return NULL;
58}
59
60/* Called when a transfer is finished and data is ready/written */
61static void mc13783_xfer_complete_cb(struct spi_transfer_desc *xfer)
62{
63 semaphore_release(&((struct mc13783_transfer_desc *)xfer)->sema);
64}
65
66static inline bool wait_for_transfer_complete(struct mc13783_transfer_desc *xfer)
67{
68 return semaphore_wait(&xfer->sema, TIMEOUT_BLOCK)
69 == OBJ_WAIT_SUCCEEDED && xfer->xfer.count == 0;
70}
50 71
51static inline bool mc13783_transfer(struct spi_transfer_desc *xfer, 72static inline bool mc13783_transfer(struct spi_transfer_desc *xfer,
52 uint32_t *txbuf, 73 uint32_t *txbuf,
@@ -64,90 +85,96 @@ static inline bool mc13783_transfer(struct spi_transfer_desc *xfer,
64 return spi_transfer(xfer); 85 return spi_transfer(xfer);
65} 86}
66 87
67/* Called when a transfer is finished and data is ready/written */ 88static inline void sync_transfer_init(struct mc13783_transfer_desc *xfer)
68static void mc13783_xfer_complete_cb(struct spi_transfer_desc *xfer)
69{ 89{
70 semaphore_release(&((struct mc13783_transfer_desc *)xfer)->sema); 90 semaphore_init(&xfer->sema, 1, 0);
71} 91}
72 92
73static inline bool wait_for_transfer_complete(struct mc13783_transfer_desc *xfer) 93static inline bool mc13783_sync_transfer(struct mc13783_transfer_desc *xfer,
94 uint32_t *txbuf,
95 uint32_t *rxbuf,
96 int count)
74{ 97{
75 return semaphore_wait(&xfer->sema, TIMEOUT_BLOCK) 98 sync_transfer_init(xfer);
76 == OBJ_WAIT_SUCCEEDED && xfer->xfer.count == 0; 99 return mc13783_transfer(&xfer->xfer, txbuf, rxbuf, count, mc13783_xfer_complete_cb);
77} 100}
78 101
79/* Efficient interrupt status and acking */ 102/* Efficient interrupt status and acking */
80static void mc13783_int_svc_complete_callback(struct spi_transfer_desc *xfer) 103static void mc13783_int_svc_complete_callback(struct spi_transfer_desc *xfer)
81{ 104{
105 struct mc13783_transfer_desc *desc1 = (struct mc13783_transfer_desc *)xfer;
106 uint32_t pnd0 = desc1->data[0], pnd1 = desc1->data[1];
107
82 /* Restore PMIC interrupt events */ 108 /* Restore PMIC interrupt events */
83 if (restore_event) 109 if (int_restore)
84 bitset32(&MC13783_GPIO_IMR, 1ul << MC13783_GPIO_LINE); 110 gpio_int_enable(MC13783_EVENT_ID);
85 111
86 /* Call handlers */ 112 /* Call handlers */
87 for ( 113 const struct mc13783_event *event = mc13783_event_vector_tbl;
88 const struct mc13783_event *event = mc13783_events; 114 while (pnd0 | pnd1)
89 int_pnd_buf[0] | int_pnd_buf[1];
90 event++
91 )
92 { 115 {
93 unsigned int set = event->int_id / MC13783_INT_ID_SET_DIV; 116 uint32_t id = event->id;
94 uint32_t pnd = int_pnd_buf[set]; 117 uint32_t set = id / 32;
95 uint32_t mask = 1 << (event->int_id & MC13783_INT_ID_NUM_MASK); 118 uint32_t bit = 1 << (id % 32);
96 119
97 if (pnd & mask) 120 uint32_t pnd = set == 0 ? pnd0 : pnd1;
121 if (pnd & bit)
98 { 122 {
123 current_event = event;
99 event->callback(); 124 event->callback();
100 int_pnd_buf[set] = pnd & ~mask; 125 set == 0 ? (pnd0 &= ~bit) : (pnd1 &= ~bit);
101 } 126 }
102 }
103 127
104 (void)xfer; 128 event++;
129 }
105} 130}
106 131
107static void mc13783_int_svc_callback(struct spi_transfer_desc *xfer) 132static void mc13783_int_svc_callback(struct spi_transfer_desc *xfer)
108{ 133{
109 /* Only clear interrupts with handlers */ 134 /* Only clear interrupts with handlers */
110 int_pnd_buf[0] &= pmic_int_enb[0]; 135 struct mc13783_transfer_desc *desc0 = (struct mc13783_transfer_desc *)xfer;
111 int_pnd_buf[1] &= pmic_int_enb[1]; 136 struct mc13783_transfer_desc *desc1 = &int_xfers[1];
112 137
113 /* Only read sense if enabled interrupts have them enabled */ 138 uint32_t pnd0 = desc0->data[0] & pmic_int_enb[0];
114 if ((int_pnd_buf[0] & pmic_int_sense_enb[0]) || 139 uint32_t pnd1 = desc0->data[1] & pmic_int_enb[1];
115 (int_pnd_buf[1] & pmic_int_sense_enb[1])) 140
116 { 141 desc1->data[0] = pnd0;
117 int_data_buf[2] = MC13783_INTERRUPT_SENSE0 << 25; 142 desc1->data[1] = pnd1;
118 int_data_buf[3] = MC13783_INTERRUPT_SENSE1 << 25;
119 int_xfers[1].rxbuf = int_data_buf;
120 int_xfers[1].count = 4;
121 }
122 143
123 /* Setup the write packets with status(es) to clear */ 144 /* Setup the write packets with status(es) to clear */
124 int_data_buf[0] = (1 << 31) | (MC13783_INTERRUPT_STATUS0 << 25) 145 desc0->data[0] = (1 << 31) | (MC13783_INTERRUPT_STATUS0 << 25) | pnd0;
125 | int_pnd_buf[0]; 146 desc0->data[1] = (1 << 31) | (MC13783_INTERRUPT_STATUS1 << 25) | pnd1;
126 int_data_buf[1] = (1 << 31) | (MC13783_INTERRUPT_STATUS1 << 25) 147
127 | int_pnd_buf[1]; 148 /* Only read sense if any pending interrupts have them enabled */
128 (void)xfer; 149 if ((pnd0 & pmic_int_sense_enb[0]) || (pnd1 & pmic_int_sense_enb[1]))
150 {
151 desc0->data[2] = MC13783_INTERRUPT_SENSE0 << 25;
152 desc0->data[3] = MC13783_INTERRUPT_SENSE1 << 25;
153 desc1->xfer.rxbuf = desc0->data;
154 desc1->xfer.count = 4;
155 }
129} 156}
130 157
131/* GPIO interrupt handler for mc13783 */ 158/* GPIO interrupt handler for mc13783 */
132void mc13783_event(void) 159void INT_MC13783(void)
133{ 160{
134 /* Mask the interrupt (unmasked after final read services it). */ 161 /* Mask the interrupt (unmasked after final read services it). */
135 bitclr32(&MC13783_GPIO_IMR, 1ul << MC13783_GPIO_LINE); 162 gpio_int_disable(MC13783_EVENT_ID);
136 MC13783_GPIO_ISR = (1ul << MC13783_GPIO_LINE); 163 gpio_int_clear(MC13783_EVENT_ID);
137 164
138 /* Setup the read packets */ 165 /* Setup the read packets */
139 int_pnd_buf[0] = MC13783_INTERRUPT_STATUS0 << 25; 166 int_xfers[0].data[0] = MC13783_INTERRUPT_STATUS0 << 25;
140 int_pnd_buf[1] = MC13783_INTERRUPT_STATUS1 << 25; 167 int_xfers[0].data[1] = MC13783_INTERRUPT_STATUS1 << 25;
141 168
142 unsigned long cpsr = disable_irq_save(); 169 unsigned long cpsr = disable_irq_save();
143 170
144 /* Do these without intervening transfers */ 171 /* Do these without intervening transfers */
145 if (mc13783_transfer(&int_xfers[0], int_pnd_buf, int_pnd_buf, 2, 172 if (mc13783_transfer(&int_xfers[0].xfer, int_xfers[0].data,
146 mc13783_int_svc_callback)) 173 int_xfers[0].data, 2, mc13783_int_svc_callback))
147 { 174 {
148 /* Start this provisionally and fill-in actual values during the 175 /* Start this provisionally and fill-in actual values during the
149 first transfer's callback - set whatever could be known */ 176 first transfer's callback - set whatever could be known */
150 mc13783_transfer(&int_xfers[1], int_data_buf, NULL, 2, 177 mc13783_transfer(&int_xfers[1].xfer, int_xfers[0].data, NULL, 2,
151 mc13783_int_svc_complete_callback); 178 mc13783_int_svc_complete_callback);
152 } 179 }
153 180
@@ -166,43 +193,45 @@ void INIT_ATTR mc13783_init(void)
166 mc13783_write(MC13783_INTERRUPT_MASK0, 0xffffff); 193 mc13783_write(MC13783_INTERRUPT_MASK0, 0xffffff);
167 mc13783_write(MC13783_INTERRUPT_MASK1, 0xffffff); 194 mc13783_write(MC13783_INTERRUPT_MASK1, 0xffffff);
168 195
169 MC13783_GPIO_ISR = (1ul << MC13783_GPIO_LINE); 196 gpio_int_clear(MC13783_EVENT_ID);
170 gpio_enable_event(MC13783_EVENT_ID); 197 gpio_enable_event(MC13783_EVENT_ID, true);
171} 198}
172 199
173void mc13783_close(void) 200void mc13783_close(void)
174{ 201{
175 restore_event = false; 202 int_restore = false;
176 gpio_disable_event(MC13783_EVENT_ID); 203 gpio_int_disable(MC13783_EVENT_ID);
204 gpio_enable_event(MC13783_EVENT_ID, false);
177 spi_enable_node(&mc13783_spi, false); 205 spi_enable_node(&mc13783_spi, false);
178} 206}
179 207
180void mc13783_enable_event(enum mc13783_event_ids id, bool enable) 208void mc13783_enable_event(enum mc13783_int_ids id, bool enable)
181{ 209{
182 static const unsigned char pmic_intm_regs[2] = 210 static const unsigned char pmic_intm_regs[2] =
183 { MC13783_INTERRUPT_MASK0, MC13783_INTERRUPT_MASK1 }; 211 { MC13783_INTERRUPT_MASK0, MC13783_INTERRUPT_MASK1 };
184 212
185 const struct mc13783_event * const event = &mc13783_events[id]; 213 const struct mc13783_event * const event = event_from_id(id);
186 unsigned int set = event->int_id / MC13783_INT_ID_SET_DIV; 214 if (event == NULL)
187 uint32_t mask = 1 << (event->int_id & MC13783_INT_ID_NUM_MASK); 215 return;
216
217 unsigned int set = id / 32;
218 uint32_t mask = 1 << (id % 32);
219 uint32_t bit = enable ? mask : 0;
188 220
189 /* Mask GPIO while changing bits around */ 221 /* Mask GPIO while changing bits around */
190 restore_event = false; 222 int_restore = false;
191 bitclr32(&MC13783_GPIO_IMR, 1ul << MC13783_GPIO_LINE); 223 gpio_int_disable(MC13783_EVENT_ID);
192 mc13783_write_masked(pmic_intm_regs[set], 224 mc13783_write_masked(pmic_intm_regs[set], bit ^ mask, mask);
193 enable ? 0 : mask, mask); 225 bitmod32(&pmic_int_sense_enb[set], event->sense ? bit : 0, mask);
194 bitmod32(&pmic_int_enb[set], enable ? mask : 0, mask); 226 bitmod32(&pmic_int_enb[set], bit, mask);
195 bitmod32(&pmic_int_sense_enb[set], enable ? event->sense : 0, 227 int_restore = true;
196 event->sense); 228 gpio_int_enable(MC13783_EVENT_ID);
197 restore_event = true;
198 bitset32(&MC13783_GPIO_IMR, 1ul << MC13783_GPIO_LINE);
199} 229}
200 230
201uint32_t mc13783_event_sense(enum mc13783_event_ids id) 231uint32_t mc13783_event_sense(void)
202{ 232{
203 const struct mc13783_event * const event = &mc13783_events[id]; 233 const struct mc13783_event *event = current_event;
204 unsigned int set = event->int_id / MC13783_INT_ID_SET_DIV; 234 return int_xfers[0].data[2 + event->id / 32] & event->sense;
205 return int_data_buf[2 + set] & event->sense;
206} 235}
207 236
208uint32_t mc13783_set(unsigned address, uint32_t bits) 237uint32_t mc13783_set(unsigned address, uint32_t bits)
@@ -219,8 +248,7 @@ uint32_t mc13783_clear(unsigned address, uint32_t bits)
219static void mc13783_write_masked_cb(struct spi_transfer_desc *xfer) 248static void mc13783_write_masked_cb(struct spi_transfer_desc *xfer)
220{ 249{
221 struct mc13783_transfer_desc *desc = (struct mc13783_transfer_desc *)xfer; 250 struct mc13783_transfer_desc *desc = (struct mc13783_transfer_desc *)xfer;
222 uint32_t *packets = desc->xfer.rxbuf; /* Will have been advanced by 1 */ 251 desc->data[1] |= desc->data[0] & desc->data[2]; /* & ~mask */
223 packets[0] |= packets[-1] & ~desc->data;
224} 252}
225 253
226uint32_t mc13783_write_masked(unsigned address, uint32_t data, uint32_t mask) 254uint32_t mc13783_write_masked(unsigned address, uint32_t data, uint32_t mask)
@@ -230,28 +258,23 @@ uint32_t mc13783_write_masked(unsigned address, uint32_t data, uint32_t mask)
230 258
231 mask &= 0xffffff; 259 mask &= 0xffffff;
232 260
233 uint32_t packets[2] =
234 {
235 address << 25,
236 (1 << 31) | (address << 25) | (data & mask)
237 };
238
239 struct mc13783_transfer_desc xfers[2]; 261 struct mc13783_transfer_desc xfers[2];
240 xfers[0].data = mask; 262 xfers[0].data[0] = address << 25;
241 semaphore_init(&xfers[1].sema, 1, 0); 263 xfers[0].data[1] = (1 << 31) | (address << 25) | (data & mask);
264 xfers[0].data[2] = ~mask;
242 265
243 unsigned long cpsr = disable_irq_save(); 266 unsigned long cpsr = disable_irq_save();
244 267
245 /* Queue up two transfers in a row */ 268 /* Queue up two transfers in a row */
246 bool ok = mc13783_transfer(&xfers[0].xfer, &packets[0], &packets[0], 1, 269 bool ok = mc13783_transfer(&xfers[0].xfer,
270 &xfers[0].data[0], &xfers[0].data[0], 1,
247 mc13783_write_masked_cb) && 271 mc13783_write_masked_cb) &&
248 mc13783_transfer(&xfers[1].xfer, &packets[1], NULL, 1, 272 mc13783_sync_transfer(&xfers[1], &xfers[0].data[1], NULL, 1);
249 mc13783_xfer_complete_cb);
250 273
251 restore_irq(cpsr); 274 restore_irq(cpsr);
252 275
253 if (ok && wait_for_transfer_complete(&xfers[1])) 276 if (ok && wait_for_transfer_complete(&xfers[1]))
254 return packets[0]; 277 return xfers[0].data[0];
255 278
256 return MC13783_DATA_ERROR; 279 return MC13783_DATA_ERROR;
257} 280}
@@ -264,10 +287,7 @@ uint32_t mc13783_read(unsigned address)
264 uint32_t packet = address << 25; 287 uint32_t packet = address << 25;
265 288
266 struct mc13783_transfer_desc xfer; 289 struct mc13783_transfer_desc xfer;
267 semaphore_init(&xfer.sema, 1, 0); 290 if (mc13783_sync_transfer(&xfer, &packet, &packet, 1) &&
268
269 if (mc13783_transfer(&xfer.xfer, &packet, &packet, 1,
270 mc13783_xfer_complete_cb) &&
271 wait_for_transfer_complete(&xfer)) 291 wait_for_transfer_complete(&xfer))
272 { 292 {
273 return packet; 293 return packet;
@@ -284,10 +304,7 @@ int mc13783_write(unsigned address, uint32_t data)
284 uint32_t packet = (1 << 31) | (address << 25) | (data & 0xffffff); 304 uint32_t packet = (1 << 31) | (address << 25) | (data & 0xffffff);
285 305
286 struct mc13783_transfer_desc xfer; 306 struct mc13783_transfer_desc xfer;
287 semaphore_init(&xfer.sema, 1, 0); 307 if (mc13783_sync_transfer(&xfer, &packet, NULL, 1) &&
288
289 if (mc13783_transfer(&xfer.xfer, &packet, NULL, 1,
290 mc13783_xfer_complete_cb) &&
291 wait_for_transfer_complete(&xfer)) 308 wait_for_transfer_complete(&xfer))
292 { 309 {
293 return 1 - xfer.xfer.count; 310 return 1 - xfer.xfer.count;
@@ -300,7 +317,7 @@ int mc13783_read_regs(const unsigned char *regs, uint32_t *buffer,
300 int count) 317 int count)
301{ 318{
302 struct mc13783_transfer_desc xfer; 319 struct mc13783_transfer_desc xfer;
303 semaphore_init(&xfer.sema, 1, 0); 320 sync_transfer_init(&xfer);
304 321
305 if (mc13783_read_async(&xfer.xfer, regs, buffer, count, 322 if (mc13783_read_async(&xfer.xfer, regs, buffer, count,
306 mc13783_xfer_complete_cb) && 323 mc13783_xfer_complete_cb) &&
@@ -316,7 +333,7 @@ int mc13783_write_regs(const unsigned char *regs, uint32_t *buffer,
316 int count) 333 int count)
317{ 334{
318 struct mc13783_transfer_desc xfer; 335 struct mc13783_transfer_desc xfer;
319 semaphore_init(&xfer.sema, 1, 0); 336 sync_transfer_init(&xfer);
320 337
321 if (mc13783_write_async(&xfer.xfer, regs, buffer, count, 338 if (mc13783_write_async(&xfer.xfer, regs, buffer, count,
322 mc13783_xfer_complete_cb) && 339 mc13783_xfer_complete_cb) &&