SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2005 David Decotigny
002 
003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any later version.
007    
008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details.
012    
013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA. 
017 */
018 
019 #include <sos/assert.h>
020 #include <sos/kmalloc.h>
021 #include <sos/physmem.h>
022 #include <hwcore/paging.h>
023 #include <sos/kmem_slab.h>
024 #include <sos/list.h>
025 #include <hwcore/paging.h>
026 #include <drivers/devices.h>
027 #include <sos/kmem_vmm.h>
028 #include <sos/uaccess.h>
029 #include <sos/chardev.h>
030 
031 #include "mem.h"
032 
033 
034 /**
035  * A mapped mem/kmem resource
036  */
037 struct kernel_remapped_resource
038 {
039   int ref_cnt;
040   struct sos_umem_vmm_mapped_resource mr;
041 };
042 
043 
044 /** Called after the virtual region has been inserted inside its
045     address space */
046 static void resource_ref(struct sos_umem_vmm_vr * vr)
047 {
048   /* Retrieve the mem/kmem structure associated with the mapped resource */
049   struct kernel_remapped_resource * resource;
050   resource
051     = (struct kernel_remapped_resource*)
052     sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
053 
054   /* Increment ref counter */
055   resource->ref_cnt ++;
056 }
057 
058 
059 /** Called when the virtual region is removed from its address
060     space */
061 static void resource_unref(struct sos_umem_vmm_vr * vr)
062 {
063   /* Retrieve the mem/kmem structure associated with the mapped resource */
064   struct kernel_remapped_resource * resource;
065   resource
066     = (struct kernel_remapped_resource*)
067     sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
068   
069   /* Decrement ref coutner */
070   SOS_ASSERT_FATAL(resource->ref_cnt > 0);
071   resource->ref_cnt --;
072 
073   /* Free the resource if it becomes unused */
074   if (resource->ref_cnt == 0)
075     sos_kfree((sos_vaddr_t)resource);
076 }
077 
078 
079 /** MOST IMPORTANT callback ! Called when a thread page faults on the
080     resource's mapping */
081 static sos_ret_t kmem_page_in(struct sos_umem_vmm_vr * vr,
082                               sos_uaddr_t uaddr,
083                               sos_bool_t write_access)
084 {
085   sos_vaddr_t vaddr;
086   sos_ret_t retval = SOS_OK;
087   sos_paddr_t ppage_paddr;
088 
089   /* Compute address of kernel page */
090   vaddr = uaddr - sos_umem_vmm_get_start_of_vr(vr)
091     + sos_umem_vmm_get_offset_in_resource(vr);
092 
093   /* Don't allow demand paging of non kernel pages */
094   if (vaddr >= SOS_PAGING_BASE_USER_ADDRESS)
095     return -SOS_EFAULT;
096 
097   /* Lookup physical kernel page */
098   ppage_paddr = sos_paging_get_paddr(SOS_PAGE_ALIGN_INF(vaddr));
099 
100   /* Cannot access unmapped kernel pages */
101   if (! ppage_paddr)
102     return -SOS_EFAULT;
103   
104   /* Remap it in user space */
105   retval = sos_paging_map(ppage_paddr,
106                           SOS_PAGE_ALIGN_INF(uaddr),
107                           TRUE,
108                           sos_umem_vmm_get_prot_of_vr(vr));
109 
110   return retval;
111 }
112 
113 
114 /** The callbacks for a mapped kmem resource */
115 static struct sos_umem_vmm_vr_ops kmem_ops = (struct sos_umem_vmm_vr_ops)
116 {
117   .ref     = resource_ref,
118   .unref   = resource_unref,
119   .page_in = kmem_page_in,
120 };
121 
122 
123 /** The callback that gets called when the resource gets mapped */
124 static sos_ret_t kmem_mmap(struct sos_umem_vmm_vr *vr)
125 {
126   return sos_umem_vmm_set_ops_of_vr(vr, &kmem_ops);
127 }
128 
129 
130 /** The function responsible for mapping the /dev/kmem resource in
131     user space */
132 static
133 sos_ret_t sos_dev_kmem_map(struct sos_umem_vmm_as * dest_as,
134                            sos_uaddr_t *uaddr,
135                            sos_size_t size,
136                            sos_vaddr_t offset,
137                            sos_ui32_t access_rights,
138                            sos_ui32_t flags)
139 {
140   sos_ret_t retval;
141   struct kernel_remapped_resource * kmem_resource;
142 
143   /* Allocate a new "descriptor" for the resource */
144   kmem_resource
145     = (struct kernel_remapped_resource*) sos_kmalloc(sizeof(*kmem_resource),
146                                                      0);
147   if (! kmem_resource)
148     return -SOS_ENOMEM;
149 
150   memset(kmem_resource, 0x0, sizeof(*kmem_resource));
151   kmem_resource->mr.allowed_access_rights 
152     = SOS_VM_MAP_PROT_READ
153     | SOS_VM_MAP_PROT_WRITE
154     | SOS_VM_MAP_PROT_EXEC;
155   kmem_resource->mr.custom_data    = kmem_resource;
156   kmem_resource->mr.mmap           = kmem_mmap;
157 
158   /* Map it in user space */
159   retval = sos_umem_vmm_map(dest_as, uaddr, size,
160                             access_rights, flags,
161                             & kmem_resource->mr, offset);
162   if (SOS_OK != retval)
163     {
164       sos_kfree((sos_vaddr_t)kmem_resource);
165       return retval;
166     }
167 
168   return SOS_OK;
169 }
170 
171 
172 /** MOST IMPORTANT callback ! Called when a thread page faults on the
173     resource's mapping */
174 static sos_ret_t physmem_page_in(struct sos_umem_vmm_vr * vr,
175                                  sos_uaddr_t uaddr,
176                                  sos_bool_t write_access)
177 {
178   sos_ret_t retval = SOS_OK;
179   sos_paddr_t ppage_paddr;
180 
181   /* Compute address of kernel page */
182   ppage_paddr = uaddr - sos_umem_vmm_get_start_of_vr(vr)
183     + sos_umem_vmm_get_offset_in_resource(vr);
184 
185   /* Remap page in user space */
186   retval = sos_paging_map(SOS_PAGE_ALIGN_INF(ppage_paddr),
187                           SOS_PAGE_ALIGN_INF(uaddr),
188                           TRUE,
189                           sos_umem_vmm_get_prot_of_vr(vr));
190   return retval;
191 }
192 
193 
194 /** The callbacks for a mapped physmem resource */
195 static struct sos_umem_vmm_vr_ops physmem_ops = (struct sos_umem_vmm_vr_ops)
196 {
197   .ref     = resource_ref,
198   .unref   = resource_unref,
199   .page_in = physmem_page_in,
200 };
201 
202 
203 /** The callback that gets called when the resource gets mapped */
204 static sos_ret_t physmem_mmap(struct sos_umem_vmm_vr *vr)
205 {
206   return sos_umem_vmm_set_ops_of_vr(vr, &physmem_ops);
207 }
208 
209 
210 /** The function responsible for mapping the /dev/mem resource in
211     user space */
212 static
213 sos_ret_t sos_dev_physmem_map(struct sos_umem_vmm_as * dest_as,
214                               sos_uaddr_t *uaddr,
215                               sos_size_t size,
216                               sos_paddr_t offset,
217                               sos_ui32_t access_rights,
218                               sos_ui32_t flags)
219 {
220   sos_ret_t retval;
221   struct kernel_remapped_resource * physmem_resource;
222 
223   physmem_resource
224     = (struct kernel_remapped_resource*) sos_kmalloc(sizeof(*physmem_resource),
225                                                      0);
226   if (! physmem_resource)
227     return -SOS_ENOMEM;
228 
229   memset(physmem_resource, 0x0, sizeof(*physmem_resource));
230   physmem_resource->mr.allowed_access_rights 
231     = SOS_VM_MAP_PROT_READ
232     | SOS_VM_MAP_PROT_WRITE
233     | SOS_VM_MAP_PROT_EXEC;
234   physmem_resource->mr.custom_data    = physmem_resource;
235   physmem_resource->mr.mmap           = physmem_mmap;
236 
237   retval = sos_umem_vmm_map(dest_as, uaddr, size,
238                             access_rights, flags,
239                             & physmem_resource->mr, offset);
240   if (SOS_OK != retval)
241     {
242       sos_kfree((sos_vaddr_t)physmem_resource);
243       return retval;
244     }
245 
246   return SOS_OK;
247 }
248 
249 
250 /*
251  * /dev/mem and /dev/kmem character device operations
252  *
253  * the "custom_data" field of the FS node is used to store the total
254  * number of pages available
255  */
256 #define GET_DEV_SIZE(fsnode) \
257   ((sos_size_t)(fsnode)->custom_data)
258 
259 static sos_ret_t dev_mem_fs_open(struct sos_fs_node        * fsnode,
260                                  struct sos_fs_opened_file * of,
261                                  void * chardev_class_custom_data)
262 {
263   /* Make sure the device is supported by this driver and compute its
264      "size" (use the custom_data field to store it) */
265   switch (fsnode->dev_id.device_instance)
266     {
267       /* For /dev/kmem, go to the end of the kernel mapping */
268     case SOS_CHARDEV_KMEM_MINOR:
269       fsnode->custom_data = (void*)SOS_PAGING_BASE_USER_ADDRESS;
270       return SOS_OK;
271       break;
272 
273       /* For /dev/mem, go to the end of physical memory */
274     case SOS_CHARDEV_PHYSMEM_MINOR:
275       {
276         sos_size_t ram_pages = 0;
277         sos_physmem_get_state(& ram_pages, NULL);
278         fsnode->custom_data = (void*)(ram_pages << SOS_PAGE_SHIFT);
279       }
280       return SOS_OK;
281       break;
282 
283     default:
284       break;
285     }
286 
287   return -SOS_ENODEV;
288 }
289 
290 
291 static sos_ret_t dev_mem_fs_seek(struct sos_fs_opened_file *this,
292                                  sos_lsoffset_t offset,
293                                  sos_seek_whence_t whence,
294                                  /* out */ sos_lsoffset_t * result_position)
295 {
296   /* Make sure the device is supported by this driver */
297   struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
298 
299   /* Artificiallly update the position in the "file" */
300   sos_lsoffset_t ref_offs;
301   sos_lsoffset_t dev_size = GET_DEV_SIZE(fsnode);
302 
303   *result_position = this->position;
304   switch (whence)
305     {
306     case SOS_SEEK_SET:
307       ref_offs = 0;
308       break;
309 
310     case SOS_SEEK_CUR:
311       ref_offs = this->position;
312       break;
313 
314     case SOS_SEEK_END:
315       ref_offs = dev_size;
316       break;
317 
318     default:
319       return -SOS_EINVAL;
320     }
321 
322   /* Forbid accesses "before" the start of the device */
323   if (offset < -ref_offs)
324     return -SOS_EINVAL;
325 
326   /* Forbid accesses "after" the end of the device */
327   else if (ref_offs + offset > dev_size)
328     return -SOS_EINVAL;
329   
330   this->position = ref_offs + offset;
331   *result_position = this->position;
332   return SOS_OK;
333 }
334 
335 
336 typedef enum { DO_READ, DO_WRITE } dev_mem_access_type_t;
337 static sos_ret_t dev_mem_fs_access(struct sos_fs_opened_file *this,
338                                    sos_uaddr_t user_buf,
339                                    sos_size_t * /* in/out */len,
340                                    dev_mem_access_type_t access_type)
341 {
342   struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
343   sos_vaddr_t   physmem_transfer_kernel_page = 0; /* Used for /dev/mem only */
344   sos_uoffset_t offs;
345   sos_size_t    accesslen = 0;
346 
347   /* Readjust copy length to match the size of the device */
348   if (this->position + *len >= GET_DEV_SIZE(fsnode))
349     *len = GET_DEV_SIZE(fsnode) - this->position;
350 
351   /* Ignore zero-size requests */
352   if (*len <= 0)
353     return SOS_OK;
354 
355   /* For /dev/mem device, prepare a kernel page to copy the physical
356      pages before transferring to user space */
357   if (SOS_CHARDEV_PHYSMEM_MINOR == fsnode->dev_id.device_instance)
358     {
359       physmem_transfer_kernel_page = sos_kmem_vmm_alloc(1, 0);
360       if (! physmem_transfer_kernel_page)
361         return -SOS_ENOMEM;
362     }
363 
364   /* Try to copy the data in page-size chunks */
365   offs = this->position;
366   while (offs < this->position + *len)
367     {
368       /* Retrieve page address of data in kernel memory */
369       sos_uoffset_t page_boundary = SOS_PAGE_ALIGN_INF(offs);
370       sos_vaddr_t   page_vaddr;
371       sos_uoffset_t offset_in_page;
372       sos_uoffset_t accesslen_in_page;
373       sos_ret_t retval;
374 
375       /* For /dev/mem device, we need to map the page in kernel memory
376          before */
377       if (SOS_CHARDEV_PHYSMEM_MINOR == fsnode->dev_id.device_instance)
378         {
379           retval = sos_paging_map(page_boundary,
380                                   physmem_transfer_kernel_page,
381                                   FALSE,
382                                   (access_type==DO_WRITE)?
383                                     SOS_VM_MAP_PROT_WRITE
384                                     :SOS_VM_MAP_PROT_READ);
385           if (SOS_OK != retval)
386             break;
387 
388           page_vaddr = physmem_transfer_kernel_page;
389         }
390       /* For /dev/kmem device, the page should already be in kernel space */
391       else if (! sos_kmem_vmm_is_valid_vaddr(page_boundary))
392         break; /* No: page is not mapped in kernel space ! */
393       else
394         page_vaddr = page_boundary; /* Yes, page is mapped */
395 
396       /* Now copy the data from kernel to user space */
397       offset_in_page = offs - page_boundary;
398       accesslen_in_page  = SOS_PAGE_SIZE - offset_in_page;
399       if (accesslen + accesslen_in_page > *len)
400         accesslen_in_page = *len - accesslen;
401 
402       if (access_type==DO_WRITE)
403         retval = sos_memcpy_from_user(page_vaddr + offset_in_page,
404                                       user_buf + accesslen,
405                                       accesslen_in_page);
406       else
407         retval = sos_memcpy_to_user(user_buf + accesslen,
408                                     page_vaddr + offset_in_page,
409                                     accesslen_in_page);
410       
411       /* Now, for /dev/mem, unmap the page from kernel */
412       if (SOS_CHARDEV_PHYSMEM_MINOR == fsnode->dev_id.device_instance)
413         sos_paging_unmap(physmem_transfer_kernel_page);
414       
415       /* Go to next page if possible */
416       if (retval < 0)
417         break;
418 
419       accesslen += retval;
420       /* If transfer was interrupted, stop here */
421       if (retval < (sos_ret_t)accesslen_in_page)
422         break;
423 
424       /* Go on to next page */
425       offs = page_boundary + SOS_PAGE_SIZE;
426     }
427 
428   /* Release the temporary page for physical mem transfers */
429   if (SOS_CHARDEV_PHYSMEM_MINOR == fsnode->dev_id.device_instance)
430     sos_kmem_vmm_free(physmem_transfer_kernel_page);
431 
432   /* Update the position in the "file" */
433   *len = accesslen;
434   this->position += accesslen;
435   return SOS_OK;
436 }
437 
438 
439 static sos_ret_t dev_mem_fs_read(struct sos_fs_opened_file *this,
440                                    sos_uaddr_t dest_buf,
441                                    sos_size_t * /* in/out */len)
442 {
443   return dev_mem_fs_access(this, dest_buf, len, DO_READ);
444 }
445 
446 
447 static sos_ret_t dev_mem_fs_write(struct sos_fs_opened_file *this,
448                                   sos_uaddr_t src_buf,
449                                   sos_size_t * /* in/out */len)
450 {
451   return dev_mem_fs_access(this, src_buf, len, DO_WRITE);
452 }
453 
454 
455 static sos_ret_t dev_mem_fs_mmap(struct sos_fs_opened_file *this,
456                                  sos_uaddr_t *uaddr, sos_size_t size,
457                                  sos_ui32_t access_rights,
458                                  sos_ui32_t flags,
459                                  sos_luoffset_t offset)
460 {
461   struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
462   if (SOS_CHARDEV_PHYSMEM_MINOR == fsnode->dev_id.device_instance)
463     return sos_dev_physmem_map(sos_process_get_address_space(this->owner),
464                                uaddr, size, offset, access_rights, flags);
465 
466   return sos_dev_kmem_map(sos_process_get_address_space(this->owner),
467                           uaddr, size, offset, access_rights, flags);
468 }
469 
470 
471 static struct sos_chardev_ops dev_mem_fs_ops
472   = (struct sos_chardev_ops) {
473     .open  = dev_mem_fs_open,
474     .close = NULL,
475     .seek  = dev_mem_fs_seek,
476     .read  = dev_mem_fs_read,
477     .write = dev_mem_fs_write,
478     .mmap  = dev_mem_fs_mmap,
479     .fcntl = NULL,
480     .ioctl = NULL
481   };
482 
483 
484 sos_ret_t sos_dev_mem_chardev_setup()
485 {
486   return sos_chardev_register_class(SOS_CHARDEV_MEM_MAJOR,
487                                     & dev_mem_fs_ops,
488                                     NULL);
489 }

source navigation ] diff markup ] identifier search ] general search ]