SimpleOS

LXR

Navigation



Site hébergé par : enix

The LXR Cross Referencer for SOS

source navigation ]
diff markup ]
identifier search ]
general search ]
 
 
Article:1 ] [ 2 ] [ 3 ] [ 4 ] [ 5 ] [ 6 ] [ 6.5 ] [ 7 ] [ 7.5 ] [ 8 ] [ 9 ] [ 9.5 ]

001 /* Copyright (C) 2005 David Decotigny
002 
003    This program is free software; you can redistribute it and/or
004    modify it under the terms of the GNU General Public License
005    as published by the Free Software Foundation; either version 2
006    of the License, or (at your option) any later version.
007    
008    This program is distributed in the hope that it will be useful,
009    but WITHOUT ANY WARRANTY; without even the implied warranty of
010    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
011    GNU General Public License for more details.
012    
013    You should have received a copy of the GNU General Public License
014    along with this program; if not, write to the Free Software
015    Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307,
016    USA. 
017 */
018 
019 #include <sos/assert.h>
020 #include <sos/kmalloc.h>
021 #include <sos/physmem.h>
022 #include <hwcore/paging.h>
023 #include <sos/kmem_slab.h>
024 #include <sos/list.h>
025 #include <hwcore/paging.h>
026 
027 #include "zero.h"
028 
029 
030 /**
031  * A mapped page for a shared mapping of /dev/zero
032  */
033 struct zero_mapped_page
034 {
035   sos_uoffset_t page_id;
036   sos_paddr_t   ppage_paddr;
037 
038   struct zero_mapped_page *prev, *next;
039 };
040 /** The Slab cache of shared mapped pages */
041 struct sos_kslab_cache * cache_of_zero_mapped_pages;
042 
043 
044 /**
045  * A mapped /dev/zero resource
046  */
047 struct zero_mapped_resource
048 {
049   int ref_cnt;
050 
051   /**
052    * For shared mappings: the list of shared pages mapped inside one
053    * or multiple VRs
054    */
055   struct zero_mapped_page *list_mapped_pages;
056 
057   struct sos_umem_vmm_mapped_resource mr;
058 };
059 
060 
061 /** Helper function to insert the given physical page in the list of
062     physical pages used for shared anonymous mappings */
063 static sos_ret_t insert_anonymous_physpage(struct zero_mapped_resource *mr,
064                                            sos_paddr_t ppage_paddr,
065                                            sos_uoffset_t page_id);
066 
067 
068 /** Helper function to insert the given physical page in the list of
069     physical pages used for shared anonymous mappings */
070 static sos_paddr_t lookup_anonymous_physpage(struct zero_mapped_resource *mr,
071                                              sos_uoffset_t page_id);
072 
073 
074 sos_ret_t sos_dev_zero_subsystem_setup()
075 {
076   cache_of_zero_mapped_pages =
077     sos_kmem_cache_create("shared anonymous mappings",
078                           sizeof(struct zero_mapped_page),
079                           1, 0,
080                           SOS_KSLAB_CREATE_MAP | SOS_KSLAB_CREATE_ZERO);
081   if (! cache_of_zero_mapped_pages)
082     return -SOS_ENOMEM;
083 
084   return SOS_OK;
085 }
086 
087 
088 /** Called after the virtual region has been inserted inside its
089     address space */
090 static void zero_ref(struct sos_umem_vmm_vr * vr)
091 {
092   /* Retrieve the 'zero' structure associated with the mapped resource */
093   struct zero_mapped_resource * zero_resource;
094   zero_resource
095     = (struct zero_mapped_resource*)
096     sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
097   
098   /* Increment ref counter */
099   zero_resource->ref_cnt ++;
100 }
101 
102 
103 /** Called when the virtual region is removed from its address
104     space */
105 static void zero_unref(struct sos_umem_vmm_vr * vr)
106 {
107   /* Retrieve the 'zero' structure associated with the mapped resource */
108   struct zero_mapped_resource * zero_resource;
109   zero_resource
110     = (struct zero_mapped_resource*)
111     sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
112   
113   /* Decrement ref coutner */
114   SOS_ASSERT_FATAL(zero_resource->ref_cnt > 0);
115   zero_resource->ref_cnt --;
116 
117   /* Free the resource if it becomes unused */
118   if (zero_resource->ref_cnt == 0)
119     {
120       /* Delete the list of anonymous shared mappings */
121       struct zero_mapped_page *zmp;
122       list_collapse(zero_resource->list_mapped_pages, zmp)
123         {
124           /* No need to free the underlying physical pages, since they
125              should have been unmapped just before this unref is
126              called */
127           sos_kfree((sos_vaddr_t)zmp);
128         }
129 
130       sos_kfree((sos_vaddr_t)zero_resource);
131     }
132 }
133 
134 
135 /** MOST IMPORTANT callback ! Called when a thread page faults on the
136     resource's mapping */
137 static sos_ret_t zero_page_in(struct sos_umem_vmm_vr * vr,
138                               sos_uaddr_t uaddr,
139                               sos_bool_t write_access)
140 {
141   sos_ret_t retval = SOS_OK;
142   sos_paddr_t ppage_paddr;
143   sos_uoffset_t required_page_id;
144   struct zero_mapped_resource * zero_resource;
145   sos_ui32_t vr_prot, vr_flags;
146 
147   /* Retrieve the 'zero' structure associated with the mapped resource */
148   zero_resource
149     = (struct zero_mapped_resource*)
150     sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
151 
152   /* Retrieve access rights/flags of the VR */
153   vr_prot  = sos_umem_vmm_get_prot_of_vr(vr);
154   vr_flags = sos_umem_vmm_get_flags_of_vr(vr);
155 
156   /* Identifies the page in the mapping that's being paged-in */
157   required_page_id = SOS_PAGE_ALIGN_INF(uaddr)
158     - sos_umem_vmm_get_start_of_vr(vr)
159     + sos_umem_vmm_get_offset_in_resource(vr);
160 
161   /* For shared mappings, check if there is a page already mapping the
162      required address */
163   if (vr_flags & SOS_VR_MAP_SHARED)
164     {
165       ppage_paddr = lookup_anonymous_physpage(zero_resource, required_page_id);
166       if (NULL != (void*)ppage_paddr)
167         {
168           retval = sos_paging_map(ppage_paddr,
169                                   SOS_PAGE_ALIGN_INF(uaddr),
170                                   TRUE,
171                                   vr_prot);
172           return retval;
173         }
174     }
175 
176   /* For write accesses, directly maps a new page. For read accesses,
177      simply map in the zero_page (and wait for COW to handle the next
178      write accesses) */
179   if (write_access)
180     {
181       /* Allocate a new page for the virtual address */
182       ppage_paddr = sos_physmem_ref_physpage_new(FALSE);
183       if (! ppage_paddr)
184         return -SOS_ENOMEM;
185       
186       retval = sos_paging_map(ppage_paddr,
187                               SOS_PAGE_ALIGN_INF(uaddr),
188                               TRUE,
189                               vr_prot);
190       if (SOS_OK != retval)
191         {
192           sos_physmem_unref_physpage(ppage_paddr);
193           return retval;
194         }
195       
196       memset((void*)SOS_PAGE_ALIGN_INF(uaddr), 0x0, SOS_PAGE_SIZE);
197 
198       /* For shared mappings, add the page in the list of shared
199          mapped pages */
200       if (vr_flags & SOS_VR_MAP_SHARED)
201         insert_anonymous_physpage(zero_resource, ppage_paddr,
202                                   required_page_id);
203 
204       sos_physmem_unref_physpage(ppage_paddr);
205     }
206   else
207     {
208       /* Map-in the zero page in READ ONLY whatever the access_rights
209          or the type (shared/private) of the VR to activate COW */
210       retval = sos_paging_map(sos_zero_page,
211                               SOS_PAGE_ALIGN_INF(uaddr),
212                               TRUE,
213                               SOS_VM_MAP_PROT_READ);
214     }
215 
216   return retval;
217 }
218 
219 
220 /** The callbacks for a mapped /dev/zero resource */
221 static struct sos_umem_vmm_vr_ops zero_ops = (struct sos_umem_vmm_vr_ops)
222 {
223   .ref     = zero_ref,
224   .unref   = zero_unref,
225   .page_in = zero_page_in,
226   .unmap   = NULL
227 };
228 
229 
230 /** The callback that gets called when the resource gets mapped */
231 static sos_ret_t zero_mmap(struct sos_umem_vmm_vr *vr)
232 {
233   return sos_umem_vmm_set_ops_of_vr(vr, &zero_ops);
234 }
235 
236 
237 /** The function responsible for mapping the /dev/zero resource in
238     user space */
239 sos_ret_t sos_dev_zero_map(struct sos_umem_vmm_as * dest_as,
240                            sos_uaddr_t *uaddr,
241                            sos_size_t size,
242                            sos_ui32_t access_rights,
243                            sos_ui32_t flags)
244 {
245   sos_ret_t retval;
246   struct zero_mapped_resource * zero_resource;
247 
248   zero_resource
249     = (struct zero_mapped_resource*) sos_kmalloc(sizeof(*zero_resource), 0);
250   if (! zero_resource)
251     return -SOS_ENOMEM;
252 
253   memset(zero_resource, 0x0, sizeof(*zero_resource));
254   zero_resource->mr.allowed_access_rights 
255     = SOS_VM_MAP_PROT_READ
256     | SOS_VM_MAP_PROT_WRITE
257     | SOS_VM_MAP_PROT_EXEC;
258   zero_resource->mr.flags         |= SOS_MAPPED_RESOURCE_ANONYMOUS;
259   zero_resource->mr.custom_data    = zero_resource;
260   zero_resource->mr.mmap           = zero_mmap;
261 
262   retval = sos_umem_vmm_map(dest_as, uaddr, size,
263                             access_rights, flags,
264                             &zero_resource->mr, 0);
265   if (SOS_OK != retval)
266     {
267       sos_kfree((sos_vaddr_t)zero_resource);
268       return retval;
269     }
270 
271   return SOS_OK;
272 }
273 
274 
275 static sos_ret_t insert_anonymous_physpage(struct zero_mapped_resource *mr,
276                                            sos_paddr_t ppage_paddr,
277                                            sos_uoffset_t page_id)
278 {
279   struct zero_mapped_page * zmp
280     = (struct zero_mapped_page*)sos_kmem_cache_alloc(cache_of_zero_mapped_pages,
281                                                      0);
282   if (! zmp)
283     return -SOS_ENOMEM;
284 
285   zmp->page_id     = page_id;
286   zmp->ppage_paddr = ppage_paddr;
287 
288   list_add_head(mr->list_mapped_pages, zmp);
289   return SOS_OK;
290 }
291 
292 
293 static sos_paddr_t lookup_anonymous_physpage(struct zero_mapped_resource *mr,
294                                             sos_uoffset_t page_id)
295 {
296   struct zero_mapped_page * zmp;
297   int nb_elts;
298 
299   list_foreach_forward(mr->list_mapped_pages, zmp, nb_elts)
300     {
301       if (zmp->page_id == page_id)
302         return zmp->ppage_paddr;
303     }
304   
305   return (sos_paddr_t)NULL;
306 }

source navigation ] diff markup ] identifier search ] general search ]