SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2005 David Decotigny
002 
003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any later version.
007    
008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details.
012    
013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA. 
017 */
018 
019 #include <sos/assert.h>
020 #include <sos/kmalloc.h>
021 #include <sos/physmem.h>
022 #include <hwcore/paging.h>
023 #include <sos/kmem_slab.h>
024 #include <sos/list.h>
025 #include <hwcore/paging.h>
026 #include <sos/uaccess.h>
027 #include <sos/chardev.h>
028 #include <drivers/devices.h>
029 
030 #include "zero.h"
031 
032 
033 /**
034  * A mapped page for a shared mapping of /dev/zero
035  */
036 struct zero_mapped_page
037 {
038   sos_uoffset_t page_id;
039   sos_paddr_t   ppage_paddr;
040 
041   struct zero_mapped_page *prev, *next;
042 };
043 /** The Slab cache of shared mapped pages */
044 struct sos_kslab_cache * cache_of_zero_mapped_pages;
045 
046 
047 /**
048  * A mapped /dev/zero resource
049  */
050 struct zero_mapped_resource
051 {
052   int ref_cnt;
053 
054   /**
055    * For shared mappings: the list of shared pages mapped inside one
056    * or multiple VRs
057    */
058   struct zero_mapped_page *list_mapped_pages;
059 
060   struct sos_umem_vmm_mapped_resource mr;
061 };
062 
063 
064 /** Forward declaration: the FS operation for the /dev/zero character
065     device */
066 static struct sos_chardev_ops dev_zero_fs_ops;
067 
068 
069 /** Helper function to insert the given physical page in the list of
070     physical pages used for shared anonymous mappings */
071 static sos_ret_t insert_anonymous_physpage(struct zero_mapped_resource *mr,
072                                            sos_paddr_t ppage_paddr,
073                                            sos_uoffset_t page_id);
074 
075 
076 /** Helper function to insert the given physical page in the list of
077     physical pages used for shared anonymous mappings */
078 static sos_paddr_t lookup_anonymous_physpage(struct zero_mapped_resource *mr,
079                                              sos_uoffset_t page_id);
080 
081 
082 sos_ret_t sos_dev_zero_subsystem_setup()
083 {
084   sos_ret_t retval;
085 
086   cache_of_zero_mapped_pages =
087     sos_kmem_cache_create("shared anonymous mappings",
088                           sizeof(struct zero_mapped_page),
089                           1, 0,
090                           SOS_KSLAB_CREATE_MAP | SOS_KSLAB_CREATE_ZERO);
091   if (! cache_of_zero_mapped_pages)
092     return -SOS_ENOMEM;
093 
094   retval = sos_chardev_register_class(SOS_CHARDEV_ZERO_MAJOR,
095                                       & dev_zero_fs_ops,
096                                       NULL);
097   if (SOS_OK != retval)
098     {
099       sos_kmem_cache_destroy(cache_of_zero_mapped_pages);
100       return retval;
101     }
102 
103   return SOS_OK;
104 }
105 
106 
107 /** Called after the virtual region has been inserted inside its
108     address space */
109 static void zero_ref(struct sos_umem_vmm_vr * vr)
110 {
111   /* Retrieve the 'zero' structure associated with the mapped resource */
112   struct zero_mapped_resource * zero_resource;
113   zero_resource
114     = (struct zero_mapped_resource*)
115     sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
116   
117   /* Increment ref counter */
118   zero_resource->ref_cnt ++;
119 }
120 
121 
122 /** Called when the virtual region is removed from its address
123     space */
124 static void zero_unref(struct sos_umem_vmm_vr * vr)
125 {
126   /* Retrieve the 'zero' structure associated with the mapped resource */
127   struct zero_mapped_resource * zero_resource;
128   zero_resource
129     = (struct zero_mapped_resource*)
130     sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
131 
132   /* Decrement ref coutner */
133   SOS_ASSERT_FATAL(zero_resource->ref_cnt > 0);
134   zero_resource->ref_cnt --;
135 
136   /* Free the resource if it becomes unused */
137   if (zero_resource->ref_cnt == 0)
138     {
139       /* Delete the list of anonymous shared mappings */
140       struct zero_mapped_page *zmp;
141       list_collapse(zero_resource->list_mapped_pages, zmp)
142         {
143           /* Unreference the underlying physical page */
144           sos_physmem_unref_physpage(zmp->ppage_paddr);
145           sos_kfree((sos_vaddr_t)zmp);
146         }
147 
148       sos_kfree((sos_vaddr_t)zero_resource);
149     }
150 }
151 
152 
153 /** MOST IMPORTANT callback ! Called when a thread page faults on the
154     resource's mapping */
155 static sos_ret_t zero_page_in(struct sos_umem_vmm_vr * vr,
156                               sos_uaddr_t uaddr,
157                               sos_bool_t write_access)
158 {
159   sos_ret_t retval = SOS_OK;
160   sos_paddr_t ppage_paddr;
161   sos_uoffset_t required_page_id;
162   struct zero_mapped_resource * zero_resource;
163   sos_ui32_t vr_prot, vr_flags;
164 
165   /* Retrieve the 'zero' structure associated with the mapped resource */
166   zero_resource
167     = (struct zero_mapped_resource*)
168     sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
169 
170   /* Retrieve access rights/flags of the VR */
171   vr_prot  = sos_umem_vmm_get_prot_of_vr(vr);
172   vr_flags = sos_umem_vmm_get_flags_of_vr(vr);
173 
174   /* Identifies the page in the mapping that's being paged-in */
175   required_page_id = SOS_PAGE_ALIGN_INF(uaddr)
176     - sos_umem_vmm_get_start_of_vr(vr)
177     + sos_umem_vmm_get_offset_in_resource(vr);
178 
179   /* For shared mappings, check if there is a page already mapping the
180      required address */
181   if (vr_flags & SOS_VR_MAP_SHARED)
182     {
183       ppage_paddr = lookup_anonymous_physpage(zero_resource, required_page_id);
184       if (NULL != (void*)ppage_paddr)
185         {
186           retval = sos_paging_map(ppage_paddr,
187                                   SOS_PAGE_ALIGN_INF(uaddr),
188                                   TRUE,
189                                   vr_prot);
190 
191           return retval;
192         }
193     }
194 
195   /* For write accesses, directly maps a new page. For read accesses,
196      simply map in the zero_page (and wait for COW to handle the next
197      write accesses) */
198   if (write_access)
199     {
200       /* Allocate a new page for the virtual address */
201       ppage_paddr = sos_physmem_ref_physpage_new(FALSE);
202       if (! ppage_paddr)
203         return -SOS_ENOMEM;
204 
205       retval = sos_paging_map(ppage_paddr,
206                               SOS_PAGE_ALIGN_INF(uaddr),
207                               TRUE,
208                               vr_prot);
209       if (SOS_OK != retval)
210         {
211           sos_physmem_unref_physpage(ppage_paddr);
212           return retval;
213         }
214       
215       memset((void*)SOS_PAGE_ALIGN_INF(uaddr), 0x0, SOS_PAGE_SIZE);
216 
217       /* For shared mappings, add the page in the list of shared
218          mapped pages */
219       if (vr_flags & SOS_VR_MAP_SHARED)
220         insert_anonymous_physpage(zero_resource, ppage_paddr,
221                                   required_page_id);
222 
223       sos_physmem_unref_physpage(ppage_paddr);
224     }
225   else
226     {
227       /* Map-in the zero page in READ ONLY whatever the access_rights
228          or the type (shared/private) of the VR to activate COW */
229       retval = sos_paging_map(sos_zero_physpage,
230                               SOS_PAGE_ALIGN_INF(uaddr),
231                               TRUE,
232                               SOS_VM_MAP_PROT_READ);
233     }
234 
235   return retval;
236 }
237 
238 
239 /** The callbacks for a mapped /dev/zero resource */
240 static struct sos_umem_vmm_vr_ops zero_ops = (struct sos_umem_vmm_vr_ops)
241 {
242   .ref     = zero_ref,
243   .unref   = zero_unref,
244   .page_in = zero_page_in,
245   .unmap   = NULL
246 };
247 
248 
249 /** The callback that gets called when the resource gets mapped */
250 static sos_ret_t zero_mmap(struct sos_umem_vmm_vr *vr)
251 {
252   return sos_umem_vmm_set_ops_of_vr(vr, &zero_ops);
253 }
254 
255 
256 /** The function responsible for mapping the /dev/zero resource in
257     user space */
258 sos_ret_t sos_dev_zero_map(struct sos_umem_vmm_as * dest_as,
259                            sos_uaddr_t *uaddr,
260                            sos_size_t size,
261                            sos_ui32_t access_rights,
262                            sos_ui32_t flags)
263 {
264   sos_ret_t retval;
265   struct zero_mapped_resource * zero_resource;
266 
267   zero_resource
268     = (struct zero_mapped_resource*) sos_kmalloc(sizeof(*zero_resource), 0);
269   if (! zero_resource)
270     return -SOS_ENOMEM;
271 
272   memset(zero_resource, 0x0, sizeof(*zero_resource));
273   zero_resource->mr.allowed_access_rights 
274     = SOS_VM_MAP_PROT_READ
275     | SOS_VM_MAP_PROT_WRITE
276     | SOS_VM_MAP_PROT_EXEC;
277   zero_resource->mr.flags         |= SOS_MAPPED_RESOURCE_ANONYMOUS;
278   zero_resource->mr.custom_data    = zero_resource;
279   zero_resource->mr.mmap           = zero_mmap;
280 
281   retval = sos_umem_vmm_map(dest_as, uaddr, size,
282                             access_rights, flags,
283                             &zero_resource->mr, 0);
284   if (SOS_OK != retval)
285     {
286       sos_kfree((sos_vaddr_t)zero_resource);
287       return retval;
288     }
289 
290   return SOS_OK;
291 }
292 
293 
294 static sos_ret_t insert_anonymous_physpage(struct zero_mapped_resource *mr,
295                                            sos_paddr_t ppage_paddr,
296                                            sos_uoffset_t page_id)
297 {
298   struct zero_mapped_page * zmp
299     = (struct zero_mapped_page*)sos_kmem_cache_alloc(cache_of_zero_mapped_pages,
300                                                      0);
301   if (! zmp)
302     return -SOS_ENOMEM;
303 
304   zmp->page_id     = page_id;
305   zmp->ppage_paddr = ppage_paddr;
306 
307   list_add_head(mr->list_mapped_pages, zmp);
308   sos_physmem_ref_physpage_at(ppage_paddr);
309   return SOS_OK;
310 }
311 
312 
313 static sos_paddr_t lookup_anonymous_physpage(struct zero_mapped_resource *mr,
314                                              sos_uoffset_t page_id)
315 {
316   struct zero_mapped_page * zmp;
317   int nb_elts;
318 
319   list_foreach_forward(mr->list_mapped_pages, zmp, nb_elts)
320     {
321       if (zmp->page_id == page_id)
322         return zmp->ppage_paddr;
323     }
324   
325   return (sos_paddr_t)NULL;
326 }
327 
328 /*
329  * /dev/zero character device FS operations
330  */
331 
332 static sos_ret_t dev_zero_fs_open(struct sos_fs_node        * fsnode,
333                                   struct sos_fs_opened_file * of,
334                                   void * chardev_class_custom_data)
335 {
336   /* Make sure the device instance is known to the driver */
337   if ( (SOS_CHARDEV_NULL_MINOR != fsnode->dev_id.device_instance)
338        && (SOS_CHARDEV_ZERO_MINOR != fsnode->dev_id.device_instance) )
339     return -SOS_ENODEV;
340 
341   return SOS_OK;
342 }
343 
344 
345 static sos_ret_t dev_zero_fs_seek(struct sos_fs_opened_file *this,
346                                   sos_lsoffset_t offset,
347                                   sos_seek_whence_t whence,
348                                   /* out */ sos_lsoffset_t * result_position)
349 {
350   /* Artificiallly update the position in the "file" */
351   sos_lsoffset_t ref_offs;
352 
353   *result_position = this->position;
354   switch (whence)
355     {
356     case SOS_SEEK_SET:
357       ref_offs = 0;
358       break;
359 
360     case SOS_SEEK_CUR:
361       ref_offs = this->position;
362       break;
363 
364     case SOS_SEEK_END:
365       return -SOS_ENOSUP;
366       break;
367 
368     default:
369       return -SOS_EINVAL;
370     }
371 
372   if (offset < -ref_offs)
373     return -SOS_EINVAL;
374   
375   this->position = ref_offs + offset;
376   *result_position = this->position;
377   return SOS_OK;
378 }
379 
380 
381 static sos_ret_t dev_zero_fs_read(struct sos_fs_opened_file *this,
382                                   sos_uaddr_t dest_buf,
383                                   sos_size_t * /* in/out */len)
384 {
385   struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
386   sos_size_t offs, rdlen;
387 
388   /* Reading /dev/null returns immediately */
389   if (SOS_CHARDEV_NULL_MINOR == fsnode->dev_id.device_instance)
390     {
391       *len = 0;
392       return SOS_OK;
393     }
394 
395   /* ZERO the destination buffer using the zero page (by page_size
396      increments) */
397   for (rdlen = offs = 0 ; offs < *len ; offs += SOS_PAGE_SIZE)
398     {
399       sos_ret_t retval;
400       sos_size_t memcpy_len = SOS_PAGE_SIZE;
401       if (offs + memcpy_len > *len)
402         memcpy_len = *len - offs;
403       
404       retval = sos_memcpy_to_user(dest_buf + offs, sos_zero_kernelpage,
405                                   memcpy_len);
406       if (retval < 0)
407         break;
408 
409       rdlen += retval;
410       if (retval != (sos_ret_t)memcpy_len)
411         break;
412     }
413 
414   /* Artificiallly update the position in the "file" */
415   *len = rdlen;
416   this->position += rdlen;
417   return SOS_OK;
418 }
419 
420 
421 static sos_ret_t dev_zero_fs_write(struct sos_fs_opened_file *this,
422                                    sos_uaddr_t src_buf,
423                                    sos_size_t * /* in/out */len)
424 {
425   /* Artificiallly update the position in the "file" */
426   this->position += *len;
427   return SOS_OK;
428 }
429 
430 
431 static sos_ret_t dev_zero_fs_mmap(struct sos_fs_opened_file *this,
432                                   sos_uaddr_t *uaddr, sos_size_t size,
433                                   sos_ui32_t access_rights,
434                                   sos_ui32_t flags,
435                                   sos_luoffset_t offset)
436 {
437   return sos_dev_zero_map(sos_process_get_address_space(this->owner),
438                           uaddr, size, access_rights, flags);
439 }
440 
441 
442 static struct sos_chardev_ops dev_zero_fs_ops
443   = (struct sos_chardev_ops) {
444     .open  = dev_zero_fs_open,
445     .close = NULL,
446     .seek  = dev_zero_fs_seek,
447     .read  = dev_zero_fs_read,
448     .write = dev_zero_fs_write,
449     .mmap  = dev_zero_fs_mmap,
450     .fcntl = NULL,
451     .ioctl = NULL
452   };

source navigation ] diff markup ] identifier search ] general search ]