001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019 #include <sos/list.h>
020 #include <sos/assert.h>
021 #include <sos/kmalloc.h>
022 #include <sos/uaccess.h>
023 #include <sos/thread.h>
024 #include <sos/time.h>
025 #include <sos/blkcache.h>
026 #include <sos/fs_pagecache.h>
027 #include <sos/umem_vmm.h>
028 #include <sos/physmem.h>
029 #include <hwcore/paging.h>
030 #include <drivers/devices.h>
031
032 #include "blkdev.h"
033
034 struct sos_blockdev_instance
035 {
036
037
038
039
040
041 sos_ui64_t uid;
042
043
044
045
046
047 sos_size_t block_size;
048
049
050
051
052
053 sos_lcount_t number_of_blocks;
054
055
056
057
058
059
060 struct sos_blockdev_instance *parent_blockdev;
061 sos_luoffset_t index_of_first_block;
062
063
064 struct sos_fs_dev_id_t dev_id;
065
066 struct sos_blockdev_operations * operations;
067
068
069
070
071
072 struct sos_block_cache * blk_cache;
073
074
075
076
077
078
079
080
081
082 struct sos_fs_pagecache * map_cache;
083
084
085 struct sos_umem_vmm_mapped_resource mapres;
086
087 void * custom_data;
088
089 sos_count_t ref_cnt;
090 struct sos_blockdev_instance *next, *prev;
091 };
092
093
094
095 static struct sos_blockdev_instance *registered_blockdev_instances;
096
097
098
099 static sos_ui64_t last_fs_instance_uid;
100
101
102
103 static sos_ret_t
104 blockdev_sync_dirty_page(sos_luoffset_t offset,
105 sos_vaddr_t dirty_page,
106 void * custom_data);
107
108 static sos_ret_t
109 blockdev_helper_new_opened_file(struct sos_fs_node * this,
110 const struct sos_process * owner,
111 sos_ui32_t open_flags,
112 struct sos_fs_opened_file ** result_of);
113 static sos_ret_t
114 blockdev_helper_close_opened_file(struct sos_fs_node * this,
115 struct sos_fs_opened_file * of);
116
117 static sos_ret_t
118 duplicate_opened_blockdev(struct sos_fs_opened_file *this,
119 const struct sos_process * for_owner,
120 struct sos_fs_opened_file **result_of);
121
122 static sos_ret_t blockdev_new_mapping(struct sos_umem_vmm_vr *);
123
124 static struct sos_fs_ops_opened_file blockdev_ops_opened_file;
125 static struct sos_fs_ops_opened_blockdev blockdev_ops_opened_blockdev;
126
127
128
129 static void bdflush_thread(void * unused) __attribute__((noreturn));
130 static void bdflush_thread(void * unused)
131 {
132 while (1)
133 {
134 struct sos_time t = (struct sos_time) { .sec=30, .nanosec=0 };
135 sos_thread_sleep(&t);
136 sos_blockdev_sync_all_devices();
137 }
138 }
139
140
141 sos_ret_t sos_blockdev_subsystem_setup()
142 {
143 sos_ret_t retval;
144 last_fs_instance_uid = 42;
145
146 retval = sos_blkcache_subsystem_setup();
147 if (SOS_OK != retval)
148 return retval;
149
150
151 if (NULL == sos_create_kernel_thread("bdflush",
152 bdflush_thread,
153 NULL,
154 SOS_SCHED_PRIO_TS_LOWEST))
155 return -SOS_ENOMEM;
156
157 return SOS_OK;
158 }
159
160
161
162
163 static sos_ret_t
164 blockdev_use_instance(struct sos_blockdev_instance * blockdev)
165 {
166 SOS_ASSERT_FATAL(blockdev->ref_cnt > 0);
167 blockdev->ref_cnt ++;
168 return SOS_OK;
169 }
170
171
172
173
174 sos_ret_t
175 sos_blockdev_release_instance(struct sos_blockdev_instance * blockdev)
176 {
177 SOS_ASSERT_FATAL(blockdev->ref_cnt > 1);
178 blockdev->ref_cnt --;
179 return SOS_OK;
180 }
181
182
183
184
185
186
187 static struct sos_blockdev_instance*
188 lookup_blockdev_instance(sos_ui32_t device_class, sos_ui32_t device_instance)
189 {
190 struct sos_blockdev_instance * blockdev;
191 int nb;
192
193 list_foreach (registered_blockdev_instances, blockdev, nb)
194 {
195 if (blockdev->dev_id.device_class != device_class)
196 continue;
197 if (blockdev->dev_id.device_instance != device_instance)
198 continue;
199
200 return blockdev;
201 }
202
203 return NULL;
204 }
205
206
207 sos_ret_t
208 sos_blockdev_register_disk (sos_ui32_t device_class,
209 sos_ui32_t device_instance,
210 sos_size_t block_size,
211 sos_lcount_t number_of_blocks,
212 sos_count_t blkcache_size_in_blocks,
213 struct sos_blockdev_operations * blockdev_ops,
214 void * blockdev_instance_custom_data)
215 {
216 struct sos_blockdev_instance * blockdev;
217
218 blockdev = lookup_blockdev_instance(device_class, device_instance);
219 if (NULL != blockdev)
220 return -SOS_EBUSY;
221
222 if (block_size <= 0)
223 return -SOS_EINVAL;
224 if (number_of_blocks <= 0)
225 return -SOS_EINVAL;
226
227 blockdev = (struct sos_blockdev_instance*)
228 sos_kmalloc(sizeof(struct sos_blockdev_instance), 0);
229 if (NULL == blockdev)
230 return -SOS_ENOMEM;
231
232
233 blockdev->blk_cache = sos_blkcache_new_cache(blockdev_instance_custom_data,
234 block_size,
235 blkcache_size_in_blocks,
236 blockdev_ops);
237 if (NULL == blockdev->blk_cache)
238 {
239 sos_kfree((sos_vaddr_t) blockdev);
240 return -SOS_ENOMEM;
241 }
242
243
244 blockdev->map_cache
245 = sos_fs_pagecache_new_cache((sos_fs_pagecache_sync_function_t)
246 blockdev_sync_dirty_page,
247 (void*)blockdev);
248 if (NULL == blockdev->map_cache)
249 {
250 sos_blkcache_delete_cache(blockdev->blk_cache);
251 sos_kfree((sos_vaddr_t) blockdev);
252 return -SOS_ENOMEM;
253 }
254
255
256 blockdev->dev_id.device_class = device_class;
257 blockdev->dev_id.device_instance = device_instance;
258
259
260 blockdev->block_size = block_size;
261 blockdev->number_of_blocks = number_of_blocks;
262 blockdev->parent_blockdev = NULL;
263 blockdev->index_of_first_block = 0;
264
265
266 blockdev->operations = blockdev_ops;
267 blockdev->custom_data = blockdev_instance_custom_data;
268
269
270 blockdev->mapres.allowed_access_rights = SOS_VM_MAP_PROT_READ
271 | SOS_VM_MAP_PROT_WRITE
272 | SOS_VM_MAP_PROT_EXEC;
273 blockdev->mapres.flags = 0;
274 list_init(blockdev->mapres.list_vr);
275 blockdev->mapres.custom_data = (void*)blockdev;
276 blockdev->mapres.mmap = blockdev_new_mapping;
277
278 blockdev->ref_cnt = 1;
279
280 blockdev->uid = last_fs_instance_uid ++;
281 list_add_tail(registered_blockdev_instances, blockdev);
282
283 return SOS_OK;
284 }
285
286
287 sos_ret_t
288 sos_blockdev_register_partition(sos_ui32_t device_class,
289 sos_ui32_t device_instance,
290 struct sos_blockdev_instance * parent_bd,
291 sos_luoffset_t index_of_first_block,
292 sos_lcount_t number_of_blocks,
293 void * blockdev_instance_custom_data)
294 {
295 struct sos_blockdev_instance * blockdev;
296
297 if (NULL == parent_bd)
298 return -SOS_EINVAL;
299
300
301 if (index_of_first_block + number_of_blocks >
302 parent_bd->number_of_blocks)
303 return -SOS_EINVAL;
304
305 blockdev = lookup_blockdev_instance(device_class, device_instance);
306 if (NULL != blockdev)
307 return -SOS_EBUSY;
308
309 blockdev = (struct sos_blockdev_instance*)
310 sos_kmalloc(sizeof(struct sos_blockdev_instance), 0);
311 if (NULL == blockdev)
312 return -SOS_ENOMEM;
313
314
315 blockdev->map_cache
316 = sos_fs_pagecache_new_cache((sos_fs_pagecache_sync_function_t)
317 blockdev_sync_dirty_page,
318 (void*)blockdev);
319 if (NULL == blockdev->map_cache)
320 {
321 sos_kfree((sos_vaddr_t) blockdev);
322 return -SOS_ENOMEM;
323 }
324
325
326 blockdev_use_instance(parent_bd);
327
328
329 blockdev->dev_id.device_class = device_class;
330 blockdev->dev_id.device_instance = device_instance;
331
332
333 blockdev->block_size = parent_bd->block_size;
334 blockdev->number_of_blocks = number_of_blocks;
335 blockdev->parent_blockdev = parent_bd;
336 blockdev->index_of_first_block
337 = parent_bd->index_of_first_block + index_of_first_block;
338
339
340 blockdev->operations = parent_bd->operations;
341 blockdev->blk_cache = parent_bd->blk_cache;
342 blockdev->custom_data = blockdev_instance_custom_data;
343
344
345 blockdev->mapres.allowed_access_rights = SOS_VM_MAP_PROT_READ
346 | SOS_VM_MAP_PROT_WRITE
347 | SOS_VM_MAP_PROT_EXEC;
348 blockdev->mapres.flags = 0;
349 list_init(blockdev->mapres.list_vr);
350 blockdev->mapres.custom_data = (void*)blockdev;
351 blockdev->mapres.mmap = blockdev_new_mapping;
352
353 blockdev->ref_cnt = 1;
354
355 blockdev->uid = last_fs_instance_uid ++;
356 list_add_tail(registered_blockdev_instances, blockdev);
357
358 return SOS_OK;
359 }
360
361
362 sos_ret_t sos_blockdev_unregister_device (sos_ui32_t device_class,
363 sos_ui32_t device_instance)
364 {
365 struct sos_blockdev_instance * blockdev;
366
367 blockdev = lookup_blockdev_instance(device_class, device_instance);
368 if (NULL == blockdev)
369 return -SOS_ENODEV;
370
371 if (blockdev->ref_cnt != 1)
372 return -SOS_EBUSY;
373
374
375 sos_fs_pagecache_delete_cache(blockdev->map_cache);
376
377
378 if (NULL != blockdev->parent_blockdev)
379 {
380 blockdev->parent_blockdev->ref_cnt --;
381 }
382 else
383 {
384
385 sos_blkcache_delete_cache(blockdev->blk_cache);
386 }
387
388 list_delete(registered_blockdev_instances, blockdev);
389 return sos_kfree((sos_vaddr_t)blockdev);
390 }
391
392
393
394
395
396 static sos_ret_t
397 blockdev_generic_read(struct sos_blockdev_instance * blockdev,
398 sos_luoffset_t offset_in_device,
399 sos_genaddr_t buff_addr,
400 sos_size_t * len,
401 sos_bool_t bypass_pagecache)
402 {
403 sos_size_t rdbytes = 0;
404
405 while (rdbytes < *len)
406 {
407 sos_ret_t retval;
408 sos_size_t offset_in_block, wrbytes;
409
410 sos_luoffset_t block_id
411 = offset_in_device / blockdev->block_size;
412 SOS_GENADDR_DECL(gaddr, buff_addr.is_user, buff_addr.addr + rdbytes);
413
414
415 if (block_id >= blockdev->number_of_blocks)
416 break;
417
418 wrbytes = *len - rdbytes;
419
420
421 if (! bypass_pagecache)
422 retval = sos_fs_pagecache_read(blockdev->map_cache,
423 offset_in_device,
424 gaddr,
425 & wrbytes);
426 else
427 retval = -SOS_ENOENT;
428
429 if (-SOS_ENOENT != retval)
430 {
431
432 rdbytes += wrbytes;
433 offset_in_device += wrbytes;
434
435 if (SOS_OK == retval)
436 continue;
437 else
438 break;
439 }
440
441
442
443 block_id += blockdev->index_of_first_block;
444
445
446 sos_vaddr_t block_data;
447 struct sos_block_cache_entry * bkcache_entry
448 = sos_blkcache_retrieve_block(blockdev->blk_cache,
449 block_id,
450 SOS_BLKCACHE_READ_ONLY,
451 & block_data);
452 if (NULL == bkcache_entry)
453 break;
454
455
456 offset_in_block
457 = offset_in_device % blockdev->block_size;
458 wrbytes
459 = blockdev->block_size - offset_in_block;
460 if (*len - rdbytes < wrbytes)
461 wrbytes = *len - rdbytes;
462
463 retval = sos_memcpy_generic_to(gaddr,
464 block_data + offset_in_block,
465 wrbytes);
466
467
468 sos_blkcache_release_block(blockdev->blk_cache,
469 bkcache_entry,
470 FALSE, FALSE);
471 if (retval > 0)
472 {
473 rdbytes += retval;
474 offset_in_device += retval;
475 }
476 if (retval != (sos_ret_t)wrbytes)
477 break;
478 }
479
480 *len = rdbytes;
481 return SOS_OK;
482 }
483
484
485
486
487
488 static sos_ret_t
489 blockdev_generic_write(struct sos_blockdev_instance * blockdev,
490 sos_luoffset_t offset_in_device,
491 sos_genaddr_t buff_addr,
492 sos_size_t * len,
493 sos_bool_t synchronous_write,
494 sos_bool_t bypass_pagecache)
495 {
496 sos_size_t wrbytes = 0;
497
498 while (wrbytes < *len)
499 {
500 sos_ret_t retval;
501 sos_size_t offset_in_block, usrbytes;
502 sos_blkcache_access_type_t access_type;
503
504
505 sos_luoffset_t block_id
506 = offset_in_device / blockdev->block_size;
507 SOS_GENADDR_DECL(gaddr, buff_addr.is_user, buff_addr.addr + wrbytes);
508
509
510 if (block_id >= blockdev->number_of_blocks)
511 break;
512
513 usrbytes = *len - wrbytes;
514
515
516 if (! bypass_pagecache)
517 retval = sos_fs_pagecache_write(blockdev->map_cache,
518 offset_in_device,
519 gaddr,
520 & usrbytes,
521 synchronous_write);
522 else
523 retval = -SOS_ENOENT;
524
525 if (-SOS_ENOENT != retval)
526 {
527
528 wrbytes += usrbytes;
529 offset_in_device += usrbytes;
530
531 if (SOS_OK == retval)
532 continue;
533 else
534 break;
535 }
536
537
538
539 block_id += blockdev->index_of_first_block;
540
541
542 offset_in_block
543 = offset_in_device % blockdev->block_size;
544 usrbytes
545 = blockdev->block_size - offset_in_block;
546 if (*len - wrbytes < usrbytes)
547 usrbytes = *len - wrbytes;
548
549 if (usrbytes != blockdev->block_size)
550
551 access_type = SOS_BLKCACHE_READ_WRITE;
552 else
553
554 access_type = SOS_BLKCACHE_WRITE_ONLY;
555
556
557 sos_vaddr_t block_data;
558 struct sos_block_cache_entry * bkcache_entry
559 = sos_blkcache_retrieve_block(blockdev->blk_cache,
560 block_id, access_type,
561 & block_data);
562 if (NULL == bkcache_entry)
563 break;
564
565
566 retval = sos_memcpy_generic_from(block_data + offset_in_block,
567 gaddr,
568 usrbytes);
569
570
571 sos_blkcache_release_block(blockdev->blk_cache,
572 bkcache_entry,
573 TRUE,
574 synchronous_write);
575
576 if (retval > 0)
577 {
578 wrbytes += retval;
579 offset_in_device += retval;
580 }
581 if (retval != (sos_ret_t)usrbytes)
582 break;
583 }
584
585 *len = wrbytes;
586 return SOS_OK;
587 }
588
589
590 struct sos_blockdev_instance *
591 sos_blockdev_ref_instance(sos_ui32_t device_class,
592 sos_ui32_t device_instance)
593 {
594 struct sos_blockdev_instance * blockdev;
595 blockdev = lookup_blockdev_instance(device_class,
596 device_instance);
597 if (NULL == blockdev)
598 return NULL;
599
600 blockdev_use_instance(blockdev);
601 return blockdev;
602 }
603
604
605 sos_ret_t sos_blockdev_kernel_read(struct sos_blockdev_instance * blockdev,
606 sos_luoffset_t offset,
607 sos_vaddr_t dest_buf,
608 sos_size_t * len)
609 {
610 sos_ret_t retval;
611 SOS_GENADDR_DECL(gaddr, FALSE, dest_buf);
612
613 blockdev_use_instance(blockdev);
614 retval = blockdev_generic_read(blockdev, offset, gaddr, len, FALSE);
615 sos_blockdev_release_instance(blockdev);
616
617 return retval;
618 }
619
620
621 sos_ret_t sos_blockdev_kernel_write(struct sos_blockdev_instance * blockdev,
622 sos_luoffset_t offset,
623 sos_vaddr_t src_buf,
624 sos_size_t * len)
625 {
626 sos_ret_t retval;
627 SOS_GENADDR_DECL(gaddr, FALSE, src_buf);
628
629 blockdev_use_instance(blockdev);
630 retval = blockdev_generic_write(blockdev, offset, gaddr, len, FALSE, FALSE);
631 sos_blockdev_release_instance(blockdev);
632
633 return retval;
634 }
635
636
637
638
639 static sos_ret_t
640 blockdev_sync_dirty_page(sos_luoffset_t offset,
641 sos_vaddr_t dirty_page,
642 void * custom_data)
643 {
644 sos_ret_t retval;
645 struct sos_blockdev_instance * blockdev
646 = (struct sos_blockdev_instance*) custom_data;
647 sos_size_t len = SOS_PAGE_SIZE;
648
649 SOS_GENADDR_DECL(gaddr, FALSE, dirty_page);
650
651 blockdev_use_instance(blockdev);
652 retval = blockdev_generic_write(blockdev, offset, gaddr, &len, FALSE, TRUE);
653 sos_blockdev_release_instance(blockdev);
654
655 if (SOS_OK != retval)
656 return retval;
657
658 if (SOS_PAGE_SIZE != len)
659 return -SOS_EIO;
660
661 return SOS_OK;
662 }
663
664
665 sos_ret_t sos_blockdev_sync(struct sos_blockdev_instance * blockdev)
666 {
667 sos_ret_t retval_bc, retval_pc;
668
669 blockdev_use_instance(blockdev);
670 retval_pc = sos_fs_pagecache_sync(blockdev->map_cache);
671 retval_bc = sos_blkcache_flush(blockdev->blk_cache);
672 sos_blockdev_release_instance(blockdev);
673
674 if (SOS_OK == retval_bc)
675 return retval_pc;
676 return retval_bc;
677 }
678
679
680 sos_ret_t sos_blockdev_sync_all_devices()
681 {
682 int dummy = 0;
683 sos_ui64_t uid = 0;
684
685
686
687
688 while (1)
689 {
690
691 struct sos_blockdev_instance *blockdev;
692 int nbd;
693
694
695
696 list_foreach_forward(registered_blockdev_instances, blockdev, nbd)
697 {
698 if (blockdev->uid <= uid)
699 continue;
700
701 uid = blockdev->uid;
702 sos_blockdev_sync(blockdev);
703
704
705
706
707 goto lookup_next_bd;
708 }
709
710
711 break;
712
713 lookup_next_bd:
714
715 dummy ++;
716 }
717
718 return SOS_OK;
719 }
720
721
722
723
724
725
726 sos_ret_t sos_blockdev_helper_ref_new_fsnode(struct sos_fs_node * this)
727 {
728 struct sos_blockdev_instance * blockdev;
729
730 blockdev = sos_blockdev_ref_instance(this->dev_id.device_class,
731 this->dev_id.device_instance);
732 this->block_device = blockdev;
733 this->new_opened_file = blockdev_helper_new_opened_file;
734 this->close_opened_file = blockdev_helper_close_opened_file;
735 return SOS_OK;
736 }
737
738
739 sos_ret_t sos_blockdev_helper_release_fsnode(struct sos_fs_node * this)
740 {
741 if (NULL != this->block_device)
742 return sos_blockdev_release_instance(this->block_device);
743
744 return SOS_OK;
745 }
746
747
748 sos_ret_t sos_blockdev_helper_sync_fsnode(struct sos_fs_node * this)
749 {
750 if (NULL != this->block_device)
751 return sos_blockdev_sync(this->block_device);
752
753 return SOS_OK;
754 }
755
756
757 static sos_ret_t
758 blockdev_helper_new_opened_file(struct sos_fs_node * this,
759 const struct sos_process * owner,
760 sos_ui32_t open_flags,
761 struct sos_fs_opened_file ** result_of)
762 {
763
764 struct sos_blockdev_instance * blockdev = this->block_device;
765 if (NULL == blockdev)
766 return -SOS_ENODEV;
767
768
769 *result_of = (struct sos_fs_opened_file*)
770 sos_kmalloc(sizeof(struct sos_fs_opened_file), 0);
771 if (NULL == *result_of)
772 return -SOS_ENOMEM;
773
774 memset(*result_of, 0x0, sizeof(struct sos_fs_opened_file));
775
776
777 (*result_of)->owner = owner;
778 (*result_of)->open_flags = open_flags;
779 (*result_of)->ops_file = & blockdev_ops_opened_file;
780 (*result_of)->ops_blockdev = & blockdev_ops_opened_blockdev;
781
782
783 (*result_of)->duplicate = duplicate_opened_blockdev;
784
785 return SOS_OK;
786 }
787
788
789 static sos_ret_t
790 blockdev_helper_close_opened_file(struct sos_fs_node * this,
791 struct sos_fs_opened_file * of)
792 {
793 return sos_kfree((sos_vaddr_t) of);
794 }
795
796
797 static sos_ret_t
798 duplicate_opened_blockdev(struct sos_fs_opened_file *this,
799 const struct sos_process * for_owner,
800 struct sos_fs_opened_file **result_of)
801 {
802
803 *result_of = (struct sos_fs_opened_file*)
804 sos_kmalloc(sizeof(struct sos_fs_opened_file), 0);
805 if (NULL == *result_of)
806 return -SOS_ENOMEM;
807
808 memcpy(*result_of, this, sizeof(struct sos_fs_opened_file));
809 (*result_of)->owner = for_owner;
810 (*result_of)->direntry = NULL;
811
812 return SOS_OK;
813 }
814
815
816 void *
817 sos_blockdev_get_instance_custom_data(struct sos_blockdev_instance * blockdev)
818 {
819 return blockdev->custom_data;
820 }
821
822
823
824
825
826
827 static sos_ret_t blockdev_wrap_seek(struct sos_fs_opened_file *this,
828 sos_lsoffset_t offset,
829 sos_seek_whence_t whence,
830 sos_lsoffset_t * result_position)
831 {
832
833 struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
834
835
836 sos_lsoffset_t ref_offs;
837 sos_lsoffset_t dev_size
838 = fsnode->block_device->block_size
839 * fsnode->block_device->number_of_blocks;
840
841 *result_position = this->position;
842 switch (whence)
843 {
844 case SOS_SEEK_SET:
845 ref_offs = 0;
846 break;
847
848 case SOS_SEEK_CUR:
849 ref_offs = this->position;
850 break;
851
852 case SOS_SEEK_END:
853 ref_offs = dev_size;
854 break;
855
856 default:
857 return -SOS_EINVAL;
858 }
859
860
861 if (offset < -ref_offs)
862 return -SOS_EINVAL;
863
864
865 else if (ref_offs + offset > dev_size)
866 return -SOS_EINVAL;
867
868 this->position = ref_offs + offset;
869 *result_position = this->position;
870 return SOS_OK;
871 }
872
873
874 static sos_ret_t blockdev_wrap_read(struct sos_fs_opened_file *this,
875 sos_uaddr_t dest_buf,
876 sos_size_t * len)
877 {
878 struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
879 struct sos_blockdev_instance * blockdev = fsnode->block_device;
880
881 SOS_GENADDR_DECL(gaddr, TRUE, dest_buf);
882 sos_ret_t retval = blockdev_generic_read(blockdev, this->position,
883 gaddr, len, FALSE);
884 this->position += *len;
885 return retval;
886 }
887
888
889 static sos_ret_t blockdev_wrap_write(struct sos_fs_opened_file *this,
890 sos_uaddr_t src_buf,
891 sos_size_t * len)
892 {
893 struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
894 struct sos_blockdev_instance * blockdev = fsnode->block_device;
895
896 SOS_GENADDR_DECL(gaddr, TRUE, src_buf);
897 sos_ret_t retval
898 = blockdev_generic_write(blockdev, this->position,
899 gaddr, len,
900 this->open_flags & SOS_FS_OPEN_SYNC,
901 FALSE);
902 this->position += *len;
903 return retval;
904 }
905
906
907 static sos_ret_t blockdev_wrap_mmap(struct sos_fs_opened_file *this,
908 sos_uaddr_t *uaddr, sos_size_t size,
909 sos_ui32_t access_rights,
910 sos_ui32_t flags,
911 sos_luoffset_t offset)
912 {
913 struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
914 struct sos_blockdev_instance * blockdev = fsnode->block_device;
915
916 if (! SOS_IS_PAGE_ALIGNED(offset))
917 return -SOS_EINVAL;
918
919 return sos_umem_vmm_map(sos_process_get_address_space(this->owner),
920 uaddr, size, access_rights,
921 flags, & blockdev->mapres, offset);
922 }
923
924
925
926 static sos_ret_t blockdev_wrap_fcntl(struct sos_fs_opened_file *this,
927 int req_id,
928 sos_ui32_t req_arg)
929 {
930 return -SOS_ENOSYS;
931 }
932
933
934 static sos_ret_t blockdev_wrap_ioctl(struct sos_fs_opened_file *this,
935 int req_id,
936 sos_ui32_t req_arg)
937 {
938 struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
939 struct sos_blockdev_instance * blockdev = fsnode->block_device;
940
941 if (req_id == SOS_IOCTL_BLOCKDEV_SYNC)
942 return sos_blockdev_sync(blockdev);
943
944 if (blockdev->operations->ioctl)
945 return blockdev->operations->ioctl(this, req_id, req_arg);
946
947 return -SOS_ENOSYS;
948 }
949
950
951 static struct sos_fs_ops_opened_file blockdev_ops_opened_file
952 = (struct sos_fs_ops_opened_file) {
953 .seek = blockdev_wrap_seek,
954 .read = blockdev_wrap_read,
955 .write = blockdev_wrap_write,
956 .mmap = blockdev_wrap_mmap,
957 .fcntl = blockdev_wrap_fcntl
958 };
959
960
961 static struct sos_fs_ops_opened_blockdev blockdev_ops_opened_blockdev
962 = (struct sos_fs_ops_opened_blockdev) {
963 .ioctl = blockdev_wrap_ioctl
964 };
965
966
967
968
969
970
971 inline static struct sos_blockdev_instance *
972 get_blockdev_of_vr(struct sos_umem_vmm_vr * vr)
973 {
974 struct sos_umem_vmm_mapped_resource *mr
975 = sos_umem_vmm_get_mapped_resource_of_vr(vr);
976
977 return (struct sos_blockdev_instance *)mr->custom_data;
978 }
979
980
981 static void blockdev_map_ref(struct sos_umem_vmm_vr * vr)
982 {
983 struct sos_blockdev_instance * blockdev = get_blockdev_of_vr(vr);
984 blockdev_use_instance(blockdev);
985 }
986
987
988 static void blockdev_map_unref(struct sos_umem_vmm_vr * vr)
989 {
990 struct sos_blockdev_instance * blockdev = get_blockdev_of_vr(vr);
991 sos_uaddr_t vr_start = sos_umem_vmm_get_start_of_vr(vr);
992 sos_uaddr_t vr_size = sos_umem_vmm_get_size_of_vr(vr);
993 sos_luoffset_t start_offset = sos_umem_vmm_get_offset_in_resource(vr);
994 sos_ui32_t vr_flags = sos_umem_vmm_get_flags_of_vr(vr);
995
996
997
998 if (vr_flags & SOS_VR_MAP_SHARED)
999 {
1000 sos_uaddr_t uaddr;
1001 for (uaddr = vr_start ;
1002 uaddr < vr_start + vr_size ;
1003 uaddr += SOS_PAGE_SIZE)
1004 {
1005 sos_paddr_t paddr;
1006 sos_luoffset_t blockdev_offset;
1007 paddr = sos_paging_get_paddr(uaddr);
1008 if (! paddr)
1009
1010 continue;
1011
1012
1013 blockdev_offset = SOS_PAGE_ALIGN_INF(uaddr) - vr_start;
1014 blockdev_offset += start_offset;
1015
1016
1017 if (sos_paging_is_dirty(paddr))
1018 sos_fs_pagecache_set_dirty(blockdev->map_cache, blockdev_offset,
1019 FALSE);
1020
1021 SOS_ASSERT_FATAL(SOS_OK ==
1022 sos_fs_pagecache_unref_page(blockdev->map_cache,
1023 blockdev_offset));
1024 }
1025 }
1026
1027 sos_blockdev_release_instance(blockdev);
1028 }
1029
1030
1031 static sos_ret_t blockdev_map_page_in(struct sos_umem_vmm_vr * vr,
1032 sos_uaddr_t uaddr,
1033 sos_bool_t write_access)
1034 {
1035 struct sos_blockdev_instance * blockdev = get_blockdev_of_vr(vr);
1036 sos_uaddr_t vr_start = sos_umem_vmm_get_start_of_vr(vr);
1037 sos_uaddr_t vr_size = sos_umem_vmm_get_size_of_vr(vr);
1038 sos_luoffset_t start_offset = sos_umem_vmm_get_offset_in_resource(vr);
1039 sos_luoffset_t offset_in_device;
1040
1041 sos_bool_t read_from_blkcache = TRUE;
1042
1043 sos_vaddr_t kernel_page;
1044
1045 struct sos_fs_pagecache_entry * pagecache_entry = NULL;
1046 sos_ret_t retval;
1047
1048 SOS_ASSERT_FATAL(vr_size > 0);
1049
1050
1051 offset_in_device = uaddr - vr_start + sizeof(int) - 1;
1052 offset_in_device += start_offset;
1053 offset_in_device /= blockdev->block_size;
1054 if (offset_in_device >= blockdev->number_of_blocks)
1055 return -SOS_EFAULT;
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065 offset_in_device = SOS_PAGE_ALIGN_INF(uaddr) - vr_start;;
1066 offset_in_device += start_offset;
1067
1068 if (sos_umem_vmm_get_flags_of_vr(vr) & SOS_VR_MAP_SHARED)
1069 {
1070
1071 sos_bool_t newly_allocated;
1072 pagecache_entry = sos_fs_pagecache_ref_page(blockdev->map_cache,
1073 offset_in_device,
1074 & kernel_page,
1075 & newly_allocated);
1076 if (! pagecache_entry)
1077 return -SOS_EFAULT;
1078
1079 if (! newly_allocated)
1080 read_from_blkcache = FALSE;
1081 }
1082 else
1083 {
1084
1085 kernel_page = sos_kmem_vmm_alloc(1, SOS_KMEM_VMM_MAP);
1086 if ((sos_vaddr_t)NULL == kernel_page)
1087 return -SOS_EFAULT;
1088 }
1089
1090 retval = SOS_OK;
1091
1092
1093 if (read_from_blkcache)
1094 {
1095 SOS_GENADDR_DECL(gaddr, FALSE, kernel_page);
1096 sos_size_t rdlen = SOS_PAGE_SIZE;
1097 retval = blockdev_generic_read(blockdev,
1098 offset_in_device,
1099 gaddr,
1100 & rdlen,
1101 TRUE);
1102 if (SOS_PAGE_SIZE != rdlen)
1103 retval = -SOS_EIO;
1104 }
1105
1106
1107 if ((sos_paddr_t)NULL != sos_paging_get_paddr(uaddr))
1108 retval = -SOS_EBUSY;
1109
1110 if (SOS_OK == retval)
1111 {
1112 sos_paddr_t ppage_paddr = sos_paging_get_paddr(kernel_page);
1113 if (0 != ppage_paddr)
1114 retval = sos_paging_map(ppage_paddr,
1115 SOS_PAGE_ALIGN_INF(uaddr),
1116 TRUE,
1117 sos_umem_vmm_get_prot_of_vr(vr));
1118 else
1119 retval = -SOS_EFAULT;
1120 }
1121
1122
1123
1124 if (NULL != pagecache_entry)
1125 {
1126 sos_fs_pagecache_unlock_page(blockdev->map_cache,
1127 pagecache_entry,
1128 (SOS_OK != retval));
1129 }
1130
1131 else
1132 sos_kmem_vmm_free((sos_vaddr_t)kernel_page);
1133
1134 return retval;
1135 }
1136
1137
1138
1139
1140
1141 static sos_ret_t blockdev_map_sync_page(struct sos_umem_vmm_vr * vr,
1142 sos_uaddr_t page_uaddr,
1143 sos_ui32_t flags)
1144 {
1145 struct sos_blockdev_instance * blockdev = get_blockdev_of_vr(vr);
1146 sos_uaddr_t vr_start = sos_umem_vmm_get_start_of_vr(vr);
1147 sos_luoffset_t start_offset = sos_umem_vmm_get_offset_in_resource(vr);
1148 sos_luoffset_t offset;
1149
1150
1151 offset = page_uaddr - vr_start;
1152 offset += start_offset;
1153 if (offset >= blockdev->number_of_blocks*blockdev->block_size)
1154 return -SOS_EFAULT;
1155
1156
1157 sos_paging_set_dirty(page_uaddr, FALSE);
1158
1159
1160 return sos_fs_pagecache_set_dirty(blockdev->map_cache,
1161 offset,
1162 flags & SOS_MSYNC_SYNC);
1163 }
1164
1165
1166 static struct sos_umem_vmm_vr_ops blockdev_map_ops
1167 = (struct sos_umem_vmm_vr_ops){
1168 .ref = blockdev_map_ref,
1169 .unref = blockdev_map_unref,
1170 .page_in = blockdev_map_page_in,
1171 .sync_page = blockdev_map_sync_page
1172 };
1173
1174
1175
1176
1177 static sos_ret_t blockdev_new_mapping(struct sos_umem_vmm_vr * vr)
1178 {
1179 struct sos_blockdev_instance * blockdev = get_blockdev_of_vr(vr);
1180 sos_luoffset_t start_offset = sos_umem_vmm_get_offset_in_resource(vr);
1181 sos_size_t map_size = sos_umem_vmm_get_size_of_vr(vr);
1182 sos_luoffset_t stop_offset;
1183 sos_luoffset_t block_index;
1184
1185 if (map_size <= 0)
1186 return -SOS_EINVAL;
1187
1188
1189 stop_offset = start_offset + map_size - 1;
1190 block_index = stop_offset / blockdev->block_size;
1191 if (block_index >= blockdev->number_of_blocks)
1192 return -SOS_EINVAL;
1193
1194 return sos_umem_vmm_set_ops_of_vr(vr, &blockdev_map_ops);
1195 }
1196