diff options
Diffstat (limited to 'firmware/common/disk_cache.c')
-rw-r--r-- | firmware/common/disk_cache.c | 343 |
1 files changed, 343 insertions, 0 deletions
diff --git a/firmware/common/disk_cache.c b/firmware/common/disk_cache.c new file mode 100644 index 0000000000..0e842e7796 --- /dev/null +++ b/firmware/common/disk_cache.c | |||
@@ -0,0 +1,343 @@ | |||
1 | /*************************************************************************** | ||
2 | * __________ __ ___. | ||
3 | * Open \______ \ ____ ____ | | _\_ |__ _______ ___ | ||
4 | * Source | _// _ \_/ ___\| |/ /| __ \ / _ \ \/ / | ||
5 | * Jukebox | | ( <_> ) \___| < | \_\ ( <_> > < < | ||
6 | * Firmware |____|_ /\____/ \___ >__|_ \|___ /\____/__/\_ \ | ||
7 | * \/ \/ \/ \/ \/ | ||
8 | * $Id$ | ||
9 | * | ||
10 | * Copyright (C) 2014 by Michael Sevakis | ||
11 | * | ||
12 | * This program is free software; you can redistribute it and/or | ||
13 | * modify it under the terms of the GNU General Public License | ||
14 | * as published by the Free Software Foundation; either version 2 | ||
15 | * of the License, or (at your option) any later version. | ||
16 | * | ||
17 | * This software is distributed on an "AS IS" basis, WITHOUT WARRANTY OF ANY | ||
18 | * KIND, either express or implied. | ||
19 | * | ||
20 | ****************************************************************************/ | ||
21 | #include "config.h" | ||
22 | #include "debug.h" | ||
23 | #include "system.h" | ||
24 | #include "linked_list.h" | ||
25 | #include "disk_cache.h" | ||
26 | #include "fat.h" /* for SECTOR_SIZE */ | ||
27 | #include "bitarray.h" | ||
28 | |||
29 | /* Cache: LRU cache with separately-chained hashtable | ||
30 | * | ||
31 | * Each entry of the map is the mapped location of the hashed sector value | ||
32 | * where each bit in each map entry indicates which corresponding cache | ||
33 | * entries are occupied by sector values that collide in that map entry. | ||
34 | * | ||
35 | * Each volume is given its own bit map. | ||
36 | * | ||
37 | * To probe for a specific key, each bit in the map entry must be examined, | ||
38 | * its position used as an index into the cache_entry array and the actual | ||
39 | * sector information compared for that cache entry. If the search exhausts | ||
40 | * all bits, the sector is not cached. | ||
41 | * | ||
42 | * To avoid long chains, the map entry count should be much greater than the | ||
43 | * number of cache entries. Since the cache is an LRU design, no buffer entry | ||
44 | * in the array is intrinsically associated with any particular sector number | ||
45 | * or volume. | ||
46 | * | ||
47 | * Example 6-sector cache with 8-entry map: | ||
48 | * cache entry 543210 | ||
49 | * cache map 100000 <- sector number hashes into map | ||
50 | * 000000 | ||
51 | * 000100 | ||
52 | * 000000 | ||
53 | * 010000 | ||
54 | * 000000 | ||
55 | * 001001 <- collision | ||
56 | * 000000 | ||
57 | * volume map 111101 <- entry usage by the volume (OR of all map entries) | ||
58 | */ | ||
59 | |||
60 | enum dce_flags /* flags for each cache entry */ | ||
61 | { | ||
62 | DCE_INUSE = 0x01, /* entry in use and valid */ | ||
63 | DCE_DIRTY = 0x02, /* entry is dirty in need of writeback */ | ||
64 | DCE_BUF = 0x04, /* entry is being used as a general buffer */ | ||
65 | }; | ||
66 | |||
67 | struct disk_cache_entry | ||
68 | { | ||
69 | struct lldc_node node; /* LRU list links */ | ||
70 | unsigned char flags; /* entry flags */ | ||
71 | #ifdef HAVE_MULTIVOLUME | ||
72 | unsigned char volume; /* volume of sector */ | ||
73 | #endif | ||
74 | unsigned long sector; /* cached disk sector number */ | ||
75 | }; | ||
76 | |||
77 | BITARRAY_TYPE_DECLARE(cache_map_entry_t, cache_map, DC_NUM_ENTRIES) | ||
78 | |||
79 | static inline unsigned int map_sector(unsigned long sector) | ||
80 | { | ||
81 | /* keep sector hash simple for now */ | ||
82 | return sector % DC_MAP_NUM_ENTRIES; | ||
83 | } | ||
84 | |||
85 | static struct lldc_head cache_lru; /* LRU cache list (head = LRU item) */ | ||
86 | static struct disk_cache_entry cache_entry[DC_NUM_ENTRIES]; | ||
87 | static cache_map_entry_t cache_map_entry[NUM_VOLUMES][DC_MAP_NUM_ENTRIES]; | ||
88 | static cache_map_entry_t cache_vol_map[NUM_VOLUMES] IBSS_ATTR; | ||
89 | static uint8_t cache_buffer[DC_NUM_ENTRIES][DC_CACHE_BUFSIZE] CACHEALIGN_ATTR; | ||
90 | struct mutex disk_cache_mutex SHAREDBSS_ATTR; | ||
91 | |||
92 | #define CACHE_MAP_ENTRY(volume, mapnum) \ | ||
93 | cache_map_entry[IF_MV_VOL(volume)][mapnum] | ||
94 | #define CACHE_VOL_MAP(volume) \ | ||
95 | cache_vol_map[IF_MV_VOL(volume)] | ||
96 | |||
97 | #define DCE_LRU() ((struct disk_cache_entry *)cache_lru.head) | ||
98 | #define DCE_NEXT(fce) ((struct disk_cache_entry *)(fce)->node.next) | ||
99 | #define NODE_DCE(node) ((struct disk_cache_entry *)(node)) | ||
100 | |||
101 | /* get the cache index from a pointer to a buffer */ | ||
102 | #define DCIDX_FROM_BUF(buf) \ | ||
103 | ((uint8_t (*)[DC_CACHE_BUFSIZE])(buf) - cache_buffer) | ||
104 | |||
105 | #define DCIDX_FROM_DCE(dce) \ | ||
106 | ((dce) - cache_entry) | ||
107 | |||
108 | /* set the in-use bit in the map */ | ||
109 | static inline void cache_bitmap_set_bit(int volume, unsigned int mapnum, | ||
110 | unsigned int bitnum) | ||
111 | { | ||
112 | cache_map_set_bit(&CACHE_MAP_ENTRY(volume, mapnum), bitnum); | ||
113 | cache_map_set_bit(&CACHE_VOL_MAP(volume), bitnum); | ||
114 | (void)volume; | ||
115 | } | ||
116 | |||
117 | /* clear the in-use bit in the map */ | ||
118 | static inline void cache_bitmap_clear_bit(int volume, unsigned int mapnum, | ||
119 | unsigned int bitnum) | ||
120 | { | ||
121 | cache_map_clear_bit(&CACHE_MAP_ENTRY(volume, mapnum), bitnum); | ||
122 | cache_map_clear_bit(&CACHE_VOL_MAP(volume), bitnum); | ||
123 | (void)volume; | ||
124 | } | ||
125 | |||
126 | /* make entry MRU by moving it to the list tail */ | ||
127 | static inline void touch_cache_entry(struct disk_cache_entry *which) | ||
128 | { | ||
129 | struct lldc_node *lru = cache_lru.head; | ||
130 | struct lldc_node *node = &which->node; | ||
131 | |||
132 | if (node == lru->prev) /* already MRU */ | ||
133 | ; /**/ | ||
134 | else if (node == lru) /* is the LRU? just rotate list */ | ||
135 | cache_lru.head = lru->next; | ||
136 | else /* somewhere else; move it */ | ||
137 | { | ||
138 | lldc_remove(&cache_lru, node); | ||
139 | lldc_insert_last(&cache_lru, node); | ||
140 | } | ||
141 | } | ||
142 | |||
143 | /* remove LRU entry from the cache list to use as a buffer */ | ||
144 | static struct disk_cache_entry * cache_remove_lru_entry(void) | ||
145 | { | ||
146 | struct lldc_node *lru = cache_lru.head; | ||
147 | |||
148 | /* at least one is reserved for client */ | ||
149 | if (lru == lru->next) | ||
150 | return NULL; | ||
151 | |||
152 | /* remove it; next-LRU becomes the LRU */ | ||
153 | lldc_remove(&cache_lru, lru); | ||
154 | return NODE_DCE(lru); | ||
155 | } | ||
156 | |||
157 | /* return entry to the cache list and set it LRU */ | ||
158 | static void cache_return_lru_entry(struct disk_cache_entry *fce) | ||
159 | { | ||
160 | lldc_insert_first(&cache_lru, &fce->node); | ||
161 | } | ||
162 | |||
163 | /* discard the entry's data and mark it unused */ | ||
164 | static inline void cache_discard_entry(struct disk_cache_entry *dce, | ||
165 | unsigned int index) | ||
166 | { | ||
167 | cache_bitmap_clear_bit(IF_MV_VOL(dce->volume), map_sector(dce->sector), | ||
168 | index); | ||
169 | dce->flags = 0; | ||
170 | } | ||
171 | |||
172 | /* search the cache for the specified sector, returning a buffer, either | ||
173 | to the specified sector, if it exists, or a new/evicted entry that must | ||
174 | be filled */ | ||
175 | void * dc_cache_probe(IF_MV(int volume,) unsigned long sector, | ||
176 | unsigned int *flagsp) | ||
177 | { | ||
178 | unsigned int mapnum = map_sector(sector); | ||
179 | |||
180 | FOR_EACH_BITARRAY_SET_BIT(&CACHE_MAP_ENTRY(volume, mapnum), index) | ||
181 | { | ||
182 | struct disk_cache_entry *dce = &cache_entry[index]; | ||
183 | |||
184 | if (dce->sector == sector) | ||
185 | { | ||
186 | *flagsp = DCE_INUSE; | ||
187 | touch_cache_entry(dce); | ||
188 | return cache_buffer[index]; | ||
189 | } | ||
190 | } | ||
191 | |||
192 | /* sector not found so the LRU is the victim */ | ||
193 | struct disk_cache_entry *dce = DCE_LRU(); | ||
194 | cache_lru.head = dce->node.next; | ||
195 | |||
196 | unsigned int index = DCIDX_FROM_DCE(dce); | ||
197 | void *buf = cache_buffer[index]; | ||
198 | unsigned int old_flags = dce->flags; | ||
199 | |||
200 | if (old_flags) | ||
201 | { | ||
202 | int old_volume = IF_MV_VOL(dce->volume); | ||
203 | unsigned long sector = dce->sector; | ||
204 | unsigned int old_mapnum = map_sector(sector); | ||
205 | |||
206 | if (old_flags & DCE_DIRTY) | ||
207 | dc_writeback_callback(IF_MV(old_volume,) sector, buf); | ||
208 | |||
209 | if (mapnum == old_mapnum IF_MV( && volume == old_volume )) | ||
210 | goto finish_setup; | ||
211 | |||
212 | cache_bitmap_clear_bit(old_volume, old_mapnum, index); | ||
213 | } | ||
214 | |||
215 | cache_bitmap_set_bit(IF_MV_VOL(volume), mapnum, index); | ||
216 | |||
217 | finish_setup: | ||
218 | dce->flags = DCE_INUSE; | ||
219 | #ifdef HAVE_MULTIVOLUME | ||
220 | dce->volume = volume; | ||
221 | #endif | ||
222 | dce->sector = sector; | ||
223 | |||
224 | *flagsp = 0; | ||
225 | return buf; | ||
226 | } | ||
227 | |||
228 | /* mark in-use cache entry as dirty by buffer */ | ||
229 | void dc_dirty_buf(void *buf) | ||
230 | { | ||
231 | unsigned int index = DCIDX_FROM_BUF(buf); | ||
232 | |||
233 | if (index >= DC_NUM_ENTRIES) | ||
234 | return; | ||
235 | |||
236 | /* dirt remains, sticky until flushed */ | ||
237 | struct disk_cache_entry *fce = &cache_entry[index]; | ||
238 | if (fce->flags & DCE_INUSE) | ||
239 | fce->flags |= DCE_DIRTY; | ||
240 | } | ||
241 | |||
242 | /* discard in-use cache entry by buffer */ | ||
243 | void dc_discard_buf(void *buf) | ||
244 | { | ||
245 | unsigned int index = DCIDX_FROM_BUF(buf); | ||
246 | |||
247 | if (index >= DC_NUM_ENTRIES) | ||
248 | return; | ||
249 | |||
250 | struct disk_cache_entry *dce = &cache_entry[index]; | ||
251 | if (dce->flags & DCE_INUSE) | ||
252 | cache_discard_entry(dce, index); | ||
253 | } | ||
254 | |||
255 | /* commit all dirty cache entries to storage for a specified volume */ | ||
256 | void dc_commit_all(IF_MV_NONVOID(int volume)) | ||
257 | { | ||
258 | DEBUGF("dc_commit_all()\n"); | ||
259 | |||
260 | FOR_EACH_BITARRAY_SET_BIT(&CACHE_VOL_MAP(volume), index) | ||
261 | { | ||
262 | struct disk_cache_entry *dce = &cache_entry[index]; | ||
263 | unsigned int flags = dce->flags; | ||
264 | |||
265 | if (flags & DCE_DIRTY) | ||
266 | { | ||
267 | dc_writeback_callback(IF_MV(volume,) dce->sector, | ||
268 | cache_buffer[index]); | ||
269 | dce->flags = flags & ~DCE_DIRTY; | ||
270 | } | ||
271 | } | ||
272 | } | ||
273 | |||
274 | /* discard all cache entries from the specified volume */ | ||
275 | void dc_discard_all(IF_MV_NONVOID(int volume)) | ||
276 | { | ||
277 | DEBUGF("dc_discard_all()\n"); | ||
278 | |||
279 | FOR_EACH_BITARRAY_SET_BIT(&CACHE_VOL_MAP(volume), index) | ||
280 | cache_discard_entry(&cache_entry[index], index); | ||
281 | } | ||
282 | |||
283 | /* expropriate a buffer from the cache */ | ||
284 | void * dc_get_buffer(void) | ||
285 | { | ||
286 | dc_lock_cache(); | ||
287 | |||
288 | void *buf = NULL; | ||
289 | struct disk_cache_entry *dce = cache_remove_lru_entry(); | ||
290 | |||
291 | if (dce) | ||
292 | { | ||
293 | unsigned int index = DCIDX_FROM_DCE(dce); | ||
294 | unsigned int flags = dce->flags; | ||
295 | |||
296 | buf = cache_buffer[index]; | ||
297 | |||
298 | if (flags) | ||
299 | { | ||
300 | /* must first commit this sector if dirty */ | ||
301 | if (flags & DCE_DIRTY) | ||
302 | dc_writeback_callback(IF_MV(dce->volume,) dce->sector, buf); | ||
303 | |||
304 | cache_discard_entry(dce, index); | ||
305 | } | ||
306 | |||
307 | dce->flags = DCE_BUF; | ||
308 | } | ||
309 | /* cache is out of buffers */ | ||
310 | |||
311 | dc_unlock_cache(); | ||
312 | return buf; | ||
313 | } | ||
314 | |||
315 | /* return buffer to the cache by buffer */ | ||
316 | void dc_release_buffer(void *buf) | ||
317 | { | ||
318 | unsigned int index = DCIDX_FROM_BUF(buf); | ||
319 | |||
320 | if (index >= DC_NUM_ENTRIES) | ||
321 | return; | ||
322 | |||
323 | dc_lock_cache(); | ||
324 | |||
325 | struct disk_cache_entry *dce = &cache_entry[index]; | ||
326 | |||
327 | if (dce->flags & DCE_BUF) | ||
328 | { | ||
329 | dce->flags = 0; | ||
330 | cache_return_lru_entry(dce); | ||
331 | } | ||
332 | |||
333 | dc_unlock_cache(); | ||
334 | } | ||
335 | |||
336 | /* one-time init at startup */ | ||
337 | void dc_init(void) | ||
338 | { | ||
339 | mutex_init(&disk_cache_mutex); | ||
340 | lldc_init(&cache_lru); | ||
341 | for (unsigned int i = 0; i < DC_NUM_ENTRIES; i++) | ||
342 | lldc_insert_last(&cache_lru, &cache_entry[i].node); | ||
343 | } | ||