001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019 #include <sos/ksynch.h>
020 #include <sos/hash.h>
021 #include <sos/kmem_slab.h>
022 #include <sos/kmalloc.h>
023 #include <sos/ksynch.h>
024 #include <sos/assert.h>
025 #include <sos/list.h>
026
027 #include "blkcache.h"
028
029
030
031
032
033
034
035
036
037
038
039
040
041
042
043
044
045
046
047
048
049
050 struct sos_block_cache_entry
051 {
052
053 sos_luoffset_t block_index;
054
055
056 sos_vaddr_t block_contents;
057
058
059 enum { ENTRY_FREE, ENTRY_SYNC, ENTRY_DIRTY } state;
060
061
062 struct sos_kmutex lock;
063
064
065 struct sos_hash_linkage hlink;
066
067
068 struct sos_block_cache_entry *prev, *next;
069 };
070
071
072
073 static struct sos_kslab_cache * cache_of_bkcache_entries;
074
075
076 struct sos_block_cache
077 {
078
079 struct sos_kslab_cache * slab_cache;
080
081
082 struct sos_hash_table * lookup_table;
083
084
085 struct sos_blockdev_operations * operations;
086 void * blkdev_instance_custom_data;
087
088
089 struct sos_block_cache_entry * free_list;
090
091 struct sos_block_cache_entry * sync_list;
092
093 struct sos_block_cache_entry * dirty_list;
094
095 };
096
097
098 sos_ret_t sos_blkcache_subsystem_setup()
099 {
100 cache_of_bkcache_entries
101 = sos_kmem_cache_create("blkcache_entry",
102 sizeof(struct sos_block_cache_entry),
103 1, 0,
104 SOS_KSLAB_CREATE_MAP | SOS_KSLAB_CREATE_ZERO);
105 if (NULL == cache_of_bkcache_entries)
106 return -SOS_ENOMEM;
107 return SOS_OK;
108 }
109
110
111 struct sos_block_cache *
112 sos_blkcache_new_cache(void * blockdev_instance_custom_data,
113 sos_size_t block_size,
114 sos_count_t cache_size_in_blocks,
115 struct sos_blockdev_operations * blockdev_ops)
116 {
117 sos_count_t idx;
118 struct sos_block_cache * blkcache;
119
120 SOS_ASSERT_FATAL(block_size > 0);
121 SOS_ASSERT_FATAL(cache_size_in_blocks > 0);
122
123 blkcache
124 = (struct sos_block_cache*) sos_kmalloc(sizeof(struct sos_block_cache), 0);
125 if (NULL == blkcache)
126 return NULL;
127
128 blkcache->blkdev_instance_custom_data = blockdev_instance_custom_data;
129 blkcache->operations = blockdev_ops;
130
131
132 blkcache->lookup_table = sos_hash_create("blkcache",
133 struct sos_block_cache_entry,
134 sos_hash_ui64,
135 sos_hash_key_eq_ui64,
136 17, block_index, hlink);
137 if (NULL == blkcache->lookup_table)
138 {
139 sos_kfree((sos_vaddr_t)blkcache);
140 return NULL;
141 }
142
143
144 blkcache->slab_cache = sos_kmem_cache_create("blkcache", block_size,
145 4, 0,
146 SOS_KSLAB_CREATE_MAP
147 | SOS_KSLAB_CREATE_ZERO);
148 if (NULL == blkcache->slab_cache)
149 {
150 sos_hash_dispose(blkcache->lookup_table);
151 sos_kfree((sos_vaddr_t)blkcache);
152 return NULL;
153 }
154
155
156
157 for (idx = 0 ; idx < cache_size_in_blocks ; idx ++)
158 {
159 struct sos_block_cache_entry * bkcache_entry;
160 sos_vaddr_t bkcache_contents;
161
162 bkcache_entry
163 = (struct sos_block_cache_entry*)sos_kmem_cache_alloc(cache_of_bkcache_entries,
164 0);
165 if (NULL == bkcache_entry)
166 {
167 sos_blkcache_delete_cache(blkcache);
168 return NULL;
169 }
170
171 bkcache_contents = sos_kmem_cache_alloc(blkcache->slab_cache, 0);
172 if (NULL == (void*)bkcache_contents)
173 {
174 sos_kmem_cache_free((sos_vaddr_t)bkcache_entry);
175 sos_blkcache_delete_cache(blkcache);
176 return NULL;
177 }
178
179
180 bkcache_entry->block_contents = bkcache_contents;
181 bkcache_entry->state = ENTRY_FREE;
182 SOS_ASSERT_FATAL(SOS_OK == sos_kmutex_init(& bkcache_entry->lock,
183 "bkcache_lock",
184 SOS_KWQ_ORDER_FIFO));
185
186
187 list_add_head(blkcache->free_list, bkcache_entry);
188 }
189
190 return blkcache;
191 }
192
193
194
195 static sos_ret_t blkcache_collapse_list(struct sos_block_cache_entry ** l)
196 {
197 while (! list_is_empty(*l))
198 {
199 struct sos_block_cache_entry * entry = list_pop_head(*l);
200 if (NULL == entry)
201 break;
202
203
204
205 SOS_ASSERT_FATAL(SOS_OK == sos_kmutex_dispose(& entry->lock));
206
207 if (NULL != (void*)entry->block_contents)
208 sos_kmem_cache_free(entry->block_contents);
209
210 sos_kmem_cache_free((sos_vaddr_t)entry);
211 }
212
213 return SOS_OK;
214 }
215
216
217 sos_ret_t
218 sos_blkcache_delete_cache(struct sos_block_cache * bc)
219 {
220 sos_blkcache_flush(bc);
221
222 sos_hash_dispose(bc->lookup_table);
223 blkcache_collapse_list(& bc->sync_list);
224 blkcache_collapse_list(& bc->free_list);
225
226 sos_kfree((sos_vaddr_t)bc);
227
228 return SOS_OK;
229 }
230
231
232
233
234 static sos_ret_t blkcache_flush_entry(struct sos_block_cache * bc,
235 struct sos_block_cache_entry * entry,
236 sos_bool_t mark_as_free)
237 {
238 sos_ret_t retval = SOS_OK;
239
240 SOS_ASSERT_FATAL(TRUE == sos_kmutex_owned_by_me(& entry->lock));
241 SOS_ASSERT_FATAL(entry->state == ENTRY_DIRTY);
242
243
244 if (mark_as_free)
245 {
246 list_delete(bc->dirty_list, entry);
247 entry->state = ENTRY_FREE;
248 sos_hash_remove(bc->lookup_table, entry);
249 list_add_head(bc->free_list, entry);
250 }
251 else
252 {
253 list_delete(bc->dirty_list, entry);
254 entry->state = ENTRY_SYNC;
255 list_add_head(bc->sync_list, entry);
256 }
257
258 retval = bc->operations->write_block(bc->blkdev_instance_custom_data,
259 entry->block_contents,
260 entry->block_index);
261 if (SOS_OK != retval)
262 {
263
264
265
266
267
268
269
270 SOS_FATAL_ERROR("Not implemented yet: we don't support failed disk access (see comments in source file)");
271 }
272
273 return retval;
274 }
275
276
277 struct sos_block_cache_entry *
278 sos_blkcache_retrieve_block(struct sos_block_cache * bc,
279 sos_luoffset_t block_index,
280 sos_blkcache_access_type_t access_type,
281 sos_vaddr_t */*out*/ block_contents)
282 {
283 struct sos_block_cache_entry * entry = NULL;
284 *block_contents = (sos_vaddr_t)NULL;
285
286
287 while (TRUE)
288 {
289 entry = sos_hash_lookup(bc->lookup_table,
290 & block_index);
291 if (NULL != entry)
292 {
293
294
295 sos_kmutex_lock(& entry->lock, NULL);
296
297
298
299
300 if ((ENTRY_FREE != entry->state)
301 && (entry->block_index == block_index))
302 {
303
304
305
306 if ( (access_type == SOS_BLKCACHE_WRITE_ONLY)
307 && (ENTRY_SYNC == entry->state) )
308 {
309 list_delete(bc->sync_list, entry);
310 entry->state = ENTRY_DIRTY;
311 list_add_head(bc->dirty_list, entry);
312 }
313
314 *block_contents = entry->block_contents;
315 return entry;
316 }
317
318
319
320 sos_kmutex_unlock(& entry->lock);
321
322 continue;
323 }
324
325
326
327
328
329
330
331 if (! list_is_empty(bc->free_list))
332 {
333 entry = list_get_head(bc->free_list);
334 sos_kmutex_lock(& entry->lock, NULL);
335
336
337
338 if ((ENTRY_FREE != entry->state)
339 || (NULL != sos_hash_lookup(bc->lookup_table, & block_index)))
340 {
341 sos_kmutex_unlock(& entry->lock);
342 continue;
343 }
344
345
346 break;
347 }
348
349
350
351
352 if (! list_is_empty(bc->sync_list))
353 {
354 entry = list_get_tail(bc->sync_list);
355 sos_kmutex_lock(& entry->lock, NULL);
356
357
358
359
360
361 if ((ENTRY_SYNC != entry->state)
362 || (NULL != sos_hash_lookup(bc->lookup_table, & block_index)))
363 {
364 sos_kmutex_unlock(& entry->lock);
365 continue;
366 }
367
368 list_delete(bc->sync_list, entry);
369 sos_hash_remove(bc->lookup_table, entry);
370 entry->state = ENTRY_FREE;
371 list_add_head(bc->free_list, entry);
372
373
374 break;
375 }
376
377
378
379
380 SOS_ASSERT_FATAL(! list_is_empty(bc->dirty_list));
381 entry = list_get_tail(bc->dirty_list);
382
383 sos_kmutex_lock(& entry->lock, NULL);
384
385
386
387
388
389 if ((ENTRY_DIRTY == entry->state)
390 && (NULL == sos_hash_lookup(bc->lookup_table, & block_index)))
391 {
392
393
394 if (SOS_OK == blkcache_flush_entry(bc, entry, TRUE))
395
396 break;
397 }
398
399 sos_kmutex_unlock(& entry->lock);
400
401
402
403 }
404
405
406
407
408 list_delete(bc->free_list, entry);
409
410
411 entry->block_index = block_index;
412
413
414 if (SOS_OK != sos_hash_insert(bc->lookup_table, entry))
415 SOS_FATAL_ERROR("Unexpected hash collision");
416
417
418
419
420 if (access_type == SOS_BLKCACHE_WRITE_ONLY)
421 {
422 entry->state = ENTRY_DIRTY;
423 list_add_head(bc->dirty_list, entry);
424 }
425
426
427 else
428 {
429 entry->state = ENTRY_SYNC;
430 list_add_head(bc->sync_list, entry);
431
432 if (SOS_OK
433 != bc->operations->read_block(bc->blkdev_instance_custom_data,
434 entry->block_contents,
435 entry->block_index))
436 {
437
438 list_delete(bc->sync_list, entry);
439 sos_hash_remove(bc->lookup_table, entry);
440 entry->state = ENTRY_FREE;
441 list_add_head(bc->free_list, entry);
442 sos_kmutex_unlock(& entry->lock);
443 return NULL;
444 }
445 }
446
447 *block_contents = entry->block_contents;
448 return entry;
449 }
450
451
452 sos_ret_t
453 sos_blkcache_release_block(struct sos_block_cache * bc,
454 struct sos_block_cache_entry * entry,
455 sos_bool_t is_dirty,
456 sos_bool_t force_flush)
457 {
458
459 if (ENTRY_SYNC == entry->state)
460 {
461 list_delete(bc->sync_list, entry);
462
463 if (is_dirty)
464 entry->state = ENTRY_DIRTY;
465 }
466 else
467 list_delete(bc->dirty_list, entry);
468
469 if (ENTRY_SYNC == entry->state)
470 list_add_head(bc->sync_list, entry);
471 else
472 list_add_head(bc->dirty_list, entry);
473
474 if ( (ENTRY_DIRTY == entry->state) && force_flush)
475 blkcache_flush_entry(bc, entry, FALSE);
476
477 sos_kmutex_unlock(& entry->lock);
478 return SOS_OK;
479 }
480
481
482 sos_ret_t
483 sos_blkcache_flush(struct sos_block_cache * bc)
484 {
485 struct sos_block_cache_entry * entry;
486 while (NULL != (entry = list_get_head(bc->dirty_list)) )
487 {
488 sos_ret_t retval = SOS_OK;
489
490 sos_kmutex_lock(& entry->lock, NULL);
491 if (ENTRY_DIRTY == entry->state)
492 retval = blkcache_flush_entry(bc, entry, FALSE);
493 sos_kmutex_unlock(& entry->lock);
494
495 if (SOS_OK != retval)
496 return retval;
497 }
498
499 return SOS_OK;
500 }