Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 018
019 #include <sos/assert.h> 019 #include <sos/assert.h>
020 #include <sos/kmalloc.h> 020 #include <sos/kmalloc.h>
021 #include <sos/physmem.h> 021 #include <sos/physmem.h>
022 #include <hwcore/paging.h> 022 #include <hwcore/paging.h>
023 #include <sos/kmem_slab.h> 023 #include <sos/kmem_slab.h>
024 #include <sos/list.h> 024 #include <sos/list.h>
025 #include <hwcore/paging.h> 025 #include <hwcore/paging.h>
>> 026 #include <drivers/devices.h>
>> 027 #include <sos/kmem_vmm.h>
>> 028 #include <sos/uaccess.h>
>> 029 #include <sos/chardev.h>
026 030
027 #include "mem.h" 031 #include "mem.h"
028 032
029 033
030 034
031 035
032 036
033 struct kernel_remapped_resource 037 struct kernel_remapped_resource
034 { 038 {
035 int ref_cnt; 039 int ref_cnt;
036 struct sos_umem_vmm_mapped_resource mr; 040 struct sos_umem_vmm_mapped_resource mr;
037 }; 041 };
038 042
039 043
040 044
041 045
042 static void resource_ref(struct sos_umem_vmm_v 046 static void resource_ref(struct sos_umem_vmm_vr * vr)
043 { 047 {
044 048
045 struct kernel_remapped_resource * resource; 049 struct kernel_remapped_resource * resource;
046 resource 050 resource
047 = (struct kernel_remapped_resource*) 051 = (struct kernel_remapped_resource*)
048 sos_umem_vmm_get_mapped_resource_of_vr(vr) 052 sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
049 053
050 054
051 resource->ref_cnt ++; 055 resource->ref_cnt ++;
052 } 056 }
053 057
054 058
055 059
056 060
057 static void resource_unref(struct sos_umem_vmm 061 static void resource_unref(struct sos_umem_vmm_vr * vr)
058 { 062 {
059 063
060 struct kernel_remapped_resource * resource; 064 struct kernel_remapped_resource * resource;
061 resource 065 resource
062 = (struct kernel_remapped_resource*) 066 = (struct kernel_remapped_resource*)
063 sos_umem_vmm_get_mapped_resource_of_vr(vr) 067 sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
064 068
065 069
066 SOS_ASSERT_FATAL(resource->ref_cnt > 0); 070 SOS_ASSERT_FATAL(resource->ref_cnt > 0);
067 resource->ref_cnt --; 071 resource->ref_cnt --;
068 072
069 073
070 if (resource->ref_cnt == 0) 074 if (resource->ref_cnt == 0)
071 sos_kfree((sos_vaddr_t)resource); 075 sos_kfree((sos_vaddr_t)resource);
072 } 076 }
073 077
074 078
075 079
076 080
077 static sos_ret_t kmem_page_in(struct sos_umem_ 081 static sos_ret_t kmem_page_in(struct sos_umem_vmm_vr * vr,
078 sos_uaddr_t uadd 082 sos_uaddr_t uaddr,
079 sos_bool_t write 083 sos_bool_t write_access)
080 { 084 {
081 sos_vaddr_t vaddr; 085 sos_vaddr_t vaddr;
082 sos_ret_t retval = SOS_OK; 086 sos_ret_t retval = SOS_OK;
083 sos_paddr_t ppage_paddr; 087 sos_paddr_t ppage_paddr;
084 088
085 089
086 vaddr = uaddr - sos_umem_vmm_get_start_of_vr 090 vaddr = uaddr - sos_umem_vmm_get_start_of_vr(vr)
087 + sos_umem_vmm_get_offset_in_resource(vr); 091 + sos_umem_vmm_get_offset_in_resource(vr);
088 092
089 093
090 if (vaddr >= SOS_PAGING_BASE_USER_ADDRESS) 094 if (vaddr >= SOS_PAGING_BASE_USER_ADDRESS)
091 return -SOS_EFAULT; 095 return -SOS_EFAULT;
092 096
093 097
094 ppage_paddr = sos_paging_get_paddr(SOS_PAGE_ 098 ppage_paddr = sos_paging_get_paddr(SOS_PAGE_ALIGN_INF(vaddr));
095 099
096 100
097 if (! ppage_paddr) 101 if (! ppage_paddr)
098 return -SOS_EFAULT; 102 return -SOS_EFAULT;
099 103
100 104
101 retval = sos_paging_map(ppage_paddr, 105 retval = sos_paging_map(ppage_paddr,
102 SOS_PAGE_ALIGN_INF(u 106 SOS_PAGE_ALIGN_INF(uaddr),
103 TRUE, 107 TRUE,
104 sos_umem_vmm_get_pro 108 sos_umem_vmm_get_prot_of_vr(vr));
105 109
106 return retval; 110 return retval;
107 } 111 }
108 112
109 113
110 114
111 static struct sos_umem_vmm_vr_ops kmem_ops = ( 115 static struct sos_umem_vmm_vr_ops kmem_ops = (struct sos_umem_vmm_vr_ops)
112 { 116 {
113 .ref = resource_ref, 117 .ref = resource_ref,
114 .unref = resource_unref, 118 .unref = resource_unref,
115 .page_in = kmem_page_in, 119 .page_in = kmem_page_in,
116 }; 120 };
117 121
118 122
119 123
120 static sos_ret_t kmem_mmap(struct sos_umem_vmm 124 static sos_ret_t kmem_mmap(struct sos_umem_vmm_vr *vr)
121 { 125 {
122 return sos_umem_vmm_set_ops_of_vr(vr, &kmem_ 126 return sos_umem_vmm_set_ops_of_vr(vr, &kmem_ops);
123 } 127 }
124 128
125 129
126 130
127 131
>> 132 static
128 sos_ret_t sos_dev_kmem_map(struct sos_umem_vmm 133 sos_ret_t sos_dev_kmem_map(struct sos_umem_vmm_as * dest_as,
129 sos_uaddr_t *uaddr, 134 sos_uaddr_t *uaddr,
130 sos_size_t size, 135 sos_size_t size,
131 sos_vaddr_t offset, 136 sos_vaddr_t offset,
132 sos_ui32_t access_r 137 sos_ui32_t access_rights,
133 sos_ui32_t flags) 138 sos_ui32_t flags)
134 { 139 {
135 sos_ret_t retval; 140 sos_ret_t retval;
136 struct kernel_remapped_resource * kmem_resou 141 struct kernel_remapped_resource * kmem_resource;
137 142
138 143
139 kmem_resource 144 kmem_resource
140 = (struct kernel_remapped_resource*) sos_k 145 = (struct kernel_remapped_resource*) sos_kmalloc(sizeof(*kmem_resource),
141 146 0);
142 if (! kmem_resource) 147 if (! kmem_resource)
143 return -SOS_ENOMEM; 148 return -SOS_ENOMEM;
144 149
145 memset(kmem_resource, 0x0, sizeof(*kmem_reso 150 memset(kmem_resource, 0x0, sizeof(*kmem_resource));
146 kmem_resource->mr.allowed_access_rights 151 kmem_resource->mr.allowed_access_rights
147 = SOS_VM_MAP_PROT_READ 152 = SOS_VM_MAP_PROT_READ
148 | SOS_VM_MAP_PROT_WRITE 153 | SOS_VM_MAP_PROT_WRITE
149 | SOS_VM_MAP_PROT_EXEC; 154 | SOS_VM_MAP_PROT_EXEC;
150 kmem_resource->mr.custom_data = kmem_reso 155 kmem_resource->mr.custom_data = kmem_resource;
151 kmem_resource->mr.mmap = kmem_mmap 156 kmem_resource->mr.mmap = kmem_mmap;
152 157
153 158
154 retval = sos_umem_vmm_map(dest_as, uaddr, si 159 retval = sos_umem_vmm_map(dest_as, uaddr, size,
155 access_rights, fla 160 access_rights, flags,
156 & kmem_resource->m 161 & kmem_resource->mr, offset);
157 if (SOS_OK != retval) 162 if (SOS_OK != retval)
158 { 163 {
159 sos_kfree((sos_vaddr_t)kmem_resource); 164 sos_kfree((sos_vaddr_t)kmem_resource);
160 return retval; 165 return retval;
161 } 166 }
162 167
163 return SOS_OK; 168 return SOS_OK;
164 } 169 }
165 170
166 171
167 172
168 173
169 static sos_ret_t physmem_page_in(struct sos_um 174 static sos_ret_t physmem_page_in(struct sos_umem_vmm_vr * vr,
170 sos_uaddr_t u 175 sos_uaddr_t uaddr,
171 sos_bool_t wr 176 sos_bool_t write_access)
172 { 177 {
173 sos_ret_t retval = SOS_OK; 178 sos_ret_t retval = SOS_OK;
174 sos_paddr_t ppage_paddr; 179 sos_paddr_t ppage_paddr;
175 180
176 181
177 ppage_paddr = uaddr - sos_umem_vmm_get_start 182 ppage_paddr = uaddr - sos_umem_vmm_get_start_of_vr(vr)
178 + sos_umem_vmm_get_offset_in_resource(vr); 183 + sos_umem_vmm_get_offset_in_resource(vr);
179 184
180 185
181 retval = sos_paging_map(SOS_PAGE_ALIGN_INF(p 186 retval = sos_paging_map(SOS_PAGE_ALIGN_INF(ppage_paddr),
182 SOS_PAGE_ALIGN_INF(u 187 SOS_PAGE_ALIGN_INF(uaddr),
183 TRUE, 188 TRUE,
184 sos_umem_vmm_get_pro 189 sos_umem_vmm_get_prot_of_vr(vr));
185 return retval; 190 return retval;
186 } 191 }
187 192
188 193
189 194
190 static struct sos_umem_vmm_vr_ops physmem_ops 195 static struct sos_umem_vmm_vr_ops physmem_ops = (struct sos_umem_vmm_vr_ops)
191 { 196 {
192 .ref = resource_ref, 197 .ref = resource_ref,
193 .unref = resource_unref, 198 .unref = resource_unref,
194 .page_in = physmem_page_in, 199 .page_in = physmem_page_in,
195 }; 200 };
196 201
197 202
198 203
199 static sos_ret_t physmem_mmap(struct sos_umem_ 204 static sos_ret_t physmem_mmap(struct sos_umem_vmm_vr *vr)
200 { 205 {
201 return sos_umem_vmm_set_ops_of_vr(vr, &physm 206 return sos_umem_vmm_set_ops_of_vr(vr, &physmem_ops);
202 } 207 }
203 208
204 209
205 210
206 211
>> 212 static
207 sos_ret_t sos_dev_physmem_map(struct sos_umem_ 213 sos_ret_t sos_dev_physmem_map(struct sos_umem_vmm_as * dest_as,
208 sos_uaddr_t *uad 214 sos_uaddr_t *uaddr,
209 sos_size_t size, 215 sos_size_t size,
210 sos_paddr_t offs 216 sos_paddr_t offset,
211 sos_ui32_t acces 217 sos_ui32_t access_rights,
212 sos_ui32_t flags 218 sos_ui32_t flags)
213 { 219 {
214 sos_ret_t retval; 220 sos_ret_t retval;
215 struct kernel_remapped_resource * physmem_re 221 struct kernel_remapped_resource * physmem_resource;
216 222
217 physmem_resource 223 physmem_resource
218 = (struct kernel_remapped_resource*) sos_k 224 = (struct kernel_remapped_resource*) sos_kmalloc(sizeof(*physmem_resource),
219 225 0);
220 if (! physmem_resource) 226 if (! physmem_resource)
221 return -SOS_ENOMEM; 227 return -SOS_ENOMEM;
222 228
223 memset(physmem_resource, 0x0, sizeof(*physme 229 memset(physmem_resource, 0x0, sizeof(*physmem_resource));
224 physmem_resource->mr.allowed_access_rights 230 physmem_resource->mr.allowed_access_rights
225 = SOS_VM_MAP_PROT_READ 231 = SOS_VM_MAP_PROT_READ
226 | SOS_VM_MAP_PROT_WRITE 232 | SOS_VM_MAP_PROT_WRITE
227 | SOS_VM_MAP_PROT_EXEC; 233 | SOS_VM_MAP_PROT_EXEC;
228 physmem_resource->mr.custom_data = physme 234 physmem_resource->mr.custom_data = physmem_resource;
229 physmem_resource->mr.mmap = physme 235 physmem_resource->mr.mmap = physmem_mmap;
230 236
231 retval = sos_umem_vmm_map(dest_as, uaddr, si 237 retval = sos_umem_vmm_map(dest_as, uaddr, size,
232 access_rights, fla 238 access_rights, flags,
233 & physmem_resource 239 & physmem_resource->mr, offset);
234 if (SOS_OK != retval) 240 if (SOS_OK != retval)
235 { 241 {
236 sos_kfree((sos_vaddr_t)physmem_resource) 242 sos_kfree((sos_vaddr_t)physmem_resource);
237 return retval; 243 return retval;
238 } 244 }
239 245
240 return SOS_OK; 246 return SOS_OK;
>> 247 }
>> 248
>> 249
>> 250
>> 251
>> 252
>> 253
>> 254
>> 255
>> 256 #define GET_DEV_SIZE(fsnode) \
>> 257 ((sos_size_t)(fsnode)->custom_data)
>> 258
>> 259 static sos_ret_t dev_mem_fs_open(struct sos_fs_node * fsnode,
>> 260 struct sos_fs_opened_file * of,
>> 261 void * chardev_class_custom_data)
>> 262 {
>> 263
>> 264
>> 265 switch (fsnode->dev_id.device_instance)
>> 266 {
>> 267
>> 268 case SOS_CHARDEV_KMEM_MINOR:
>> 269 fsnode->custom_data = (void*)SOS_PAGING_BASE_USER_ADDRESS;
>> 270 return SOS_OK;
>> 271 break;
>> 272
>> 273
>> 274 case SOS_CHARDEV_PHYSMEM_MINOR:
>> 275 {
>> 276 sos_size_t ram_pages = 0;
>> 277 sos_physmem_get_state(& ram_pages, NULL);
>> 278 fsnode->custom_data = (void*)(ram_pages << SOS_PAGE_SHIFT);
>> 279 }
>> 280 return SOS_OK;
>> 281 break;
>> 282
>> 283 default:
>> 284 break;
>> 285 }
>> 286
>> 287 return -SOS_ENODEV;
>> 288 }
>> 289
>> 290
>> 291 static sos_ret_t dev_mem_fs_seek(struct sos_fs_opened_file *this,
>> 292 sos_lsoffset_t offset,
>> 293 sos_seek_whence_t whence,
>> 294 sos_lsoffset_t * result_position)
>> 295 {
>> 296
>> 297 struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
>> 298
>> 299
>> 300 sos_lsoffset_t ref_offs;
>> 301 sos_lsoffset_t dev_size = GET_DEV_SIZE(fsnode);
>> 302
>> 303 *result_position = this->position;
>> 304 switch (whence)
>> 305 {
>> 306 case SOS_SEEK_SET:
>> 307 ref_offs = 0;
>> 308 break;
>> 309
>> 310 case SOS_SEEK_CUR:
>> 311 ref_offs = this->position;
>> 312 break;
>> 313
>> 314 case SOS_SEEK_END:
>> 315 ref_offs = dev_size;
>> 316 break;
>> 317
>> 318 default:
>> 319 return -SOS_EINVAL;
>> 320 }
>> 321
>> 322
>> 323 if (offset < -ref_offs)
>> 324 return -SOS_EINVAL;
>> 325
>> 326
>> 327 else if (ref_offs + offset > dev_size)
>> 328 return -SOS_EINVAL;
>> 329
>> 330 this->position = ref_offs + offset;
>> 331 *result_position = this->position;
>> 332 return SOS_OK;
>> 333 }
>> 334
>> 335
>> 336 typedef enum { DO_READ, DO_WRITE } dev_mem_access_type_t;
>> 337 static sos_ret_t dev_mem_fs_access(struct sos_fs_opened_file *this,
>> 338 sos_uaddr_t user_buf,
>> 339 sos_size_t * len,
>> 340 dev_mem_access_type_t access_type)
>> 341 {
>> 342 struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
>> 343 sos_vaddr_t physmem_transfer_kernel_page = 0;
>> 344 sos_uoffset_t offs;
>> 345 sos_size_t accesslen = 0;
>> 346
>> 347
>> 348 if (this->position + *len >= GET_DEV_SIZE(fsnode))
>> 349 *len = GET_DEV_SIZE(fsnode) - this->position;
>> 350
>> 351
>> 352 if (*len <= 0)
>> 353 return SOS_OK;
>> 354
>> 355
>> 356
>> 357 if (SOS_CHARDEV_PHYSMEM_MINOR == fsnode->dev_id.device_instance)
>> 358 {
>> 359 physmem_transfer_kernel_page = sos_kmem_vmm_alloc(1, 0);
>> 360 if (! physmem_transfer_kernel_page)
>> 361 return -SOS_ENOMEM;
>> 362 }
>> 363
>> 364
>> 365 offs = this->position;
>> 366 while (offs < this->position + *len)
>> 367 {
>> 368
>> 369 sos_uoffset_t page_boundary = SOS_PAGE_ALIGN_INF(offs);
>> 370 sos_vaddr_t page_vaddr;
>> 371 sos_uoffset_t offset_in_page;
>> 372 sos_uoffset_t accesslen_in_page;
>> 373 sos_ret_t retval;
>> 374
>> 375
>> 376
>> 377 if (SOS_CHARDEV_PHYSMEM_MINOR == fsnode->dev_id.device_instance)
>> 378 {
>> 379 retval = sos_paging_map(page_boundary,
>> 380 physmem_transfer_kernel_page,
>> 381 FALSE,
>> 382 (access_type==DO_WRITE)?
>> 383 SOS_VM_MAP_PROT_WRITE
>> 384 :SOS_VM_MAP_PROT_READ);
>> 385 if (SOS_OK != retval)
>> 386 break;
>> 387
>> 388 page_vaddr = physmem_transfer_kernel_page;
>> 389 }
>> 390
>> 391 else if (! sos_kmem_vmm_is_valid_vaddr(page_boundary))
>> 392 break;
>> 393 else
>> 394 page_vaddr = page_boundary;
>> 395
>> 396
>> 397 offset_in_page = offs - page_boundary;
>> 398 accesslen_in_page = SOS_PAGE_SIZE - offset_in_page;
>> 399 if (accesslen + accesslen_in_page > *len)
>> 400 accesslen_in_page = *len - accesslen;
>> 401
>> 402 if (access_type==DO_WRITE)
>> 403 retval = sos_memcpy_from_user(page_vaddr + offset_in_page,
>> 404 user_buf + accesslen,
>> 405 accesslen_in_page);
>> 406 else
>> 407 retval = sos_memcpy_to_user(user_buf + accesslen,
>> 408 page_vaddr + offset_in_page,
>> 409 accesslen_in_page);
>> 410
>> 411
>> 412 if (SOS_CHARDEV_PHYSMEM_MINOR == fsnode->dev_id.device_instance)
>> 413 sos_paging_unmap(physmem_transfer_kernel_page);
>> 414
>> 415
>> 416 if (retval < 0)
>> 417 break;
>> 418
>> 419 accesslen += retval;
>> 420
>> 421 if (retval < (sos_ret_t)accesslen_in_page)
>> 422 break;
>> 423
>> 424
>> 425 offs = page_boundary + SOS_PAGE_SIZE;
>> 426 }
>> 427
>> 428
>> 429 if (SOS_CHARDEV_PHYSMEM_MINOR == fsnode->dev_id.device_instance)
>> 430 sos_kmem_vmm_free(physmem_transfer_kernel_page);
>> 431
>> 432
>> 433 *len = accesslen;
>> 434 this->position += accesslen;
>> 435 return SOS_OK;
>> 436 }
>> 437
>> 438
>> 439 static sos_ret_t dev_mem_fs_read(struct sos_fs_opened_file *this,
>> 440 sos_uaddr_t dest_buf,
>> 441 sos_size_t * len)
>> 442 {
>> 443 return dev_mem_fs_access(this, dest_buf, len, DO_READ);
>> 444 }
>> 445
>> 446
>> 447 static sos_ret_t dev_mem_fs_write(struct sos_fs_opened_file *this,
>> 448 sos_uaddr_t src_buf,
>> 449 sos_size_t * len)
>> 450 {
>> 451 return dev_mem_fs_access(this, src_buf, len, DO_WRITE);
>> 452 }
>> 453
>> 454
>> 455 static sos_ret_t dev_mem_fs_mmap(struct sos_fs_opened_file *this,
>> 456 sos_uaddr_t *uaddr, sos_size_t size,
>> 457 sos_ui32_t access_rights,
>> 458 sos_ui32_t flags,
>> 459 sos_luoffset_t offset)
>> 460 {
>> 461 struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
>> 462 if (SOS_CHARDEV_PHYSMEM_MINOR == fsnode->dev_id.device_instance)
>> 463 return sos_dev_physmem_map(sos_process_get_address_space(this->owner),
>> 464 uaddr, size, offset, access_rights, flags);
>> 465
>> 466 return sos_dev_kmem_map(sos_process_get_address_space(this->owner),
>> 467 uaddr, size, offset, access_rights, flags);
>> 468 }
>> 469
>> 470
>> 471 static struct sos_chardev_ops dev_mem_fs_ops
>> 472 = (struct sos_chardev_ops) {
>> 473 .open = dev_mem_fs_open,
>> 474 .close = NULL,
>> 475 .seek = dev_mem_fs_seek,
>> 476 .read = dev_mem_fs_read,
>> 477 .write = dev_mem_fs_write,
>> 478 .mmap = dev_mem_fs_mmap,
>> 479 .fcntl = NULL,
>> 480 .ioctl = NULL
>> 481 };
>> 482
>> 483
>> 484 sos_ret_t sos_dev_mem_chardev_setup()
>> 485 {
>> 486 return sos_chardev_register_class(SOS_CHARDEV_MEM_MAJOR,
>> 487 & dev_mem_fs_ops,
>> 488 NULL);
241 } 489 }