Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 018
019 #include <sos/assert.h> 019 #include <sos/assert.h>
020 #include <sos/kmalloc.h> 020 #include <sos/kmalloc.h>
021 #include <sos/physmem.h> 021 #include <sos/physmem.h>
022 #include <hwcore/paging.h> 022 #include <hwcore/paging.h>
023 #include <sos/kmem_slab.h> 023 #include <sos/kmem_slab.h>
024 #include <sos/list.h> 024 #include <sos/list.h>
025 #include <hwcore/paging.h> 025 #include <hwcore/paging.h>
026 #include <sos/uaccess.h> <<
027 #include <sos/chardev.h> <<
028 #include <drivers/devices.h> <<
029 026
030 #include "zero.h" 027 #include "zero.h"
031 028
032 029
033 030
034 031
035 032
036 struct zero_mapped_page 033 struct zero_mapped_page
037 { 034 {
038 sos_uoffset_t page_id; 035 sos_uoffset_t page_id;
039 sos_paddr_t ppage_paddr; 036 sos_paddr_t ppage_paddr;
040 037
041 struct zero_mapped_page *prev, *next; 038 struct zero_mapped_page *prev, *next;
042 }; 039 };
043 040
044 struct sos_kslab_cache * cache_of_zero_mapped_ 041 struct sos_kslab_cache * cache_of_zero_mapped_pages;
045 042
046 043
047 044
048 045
049 046
050 struct zero_mapped_resource 047 struct zero_mapped_resource
051 { 048 {
052 int ref_cnt; 049 int ref_cnt;
053 050
054 051
055 052
056 053
057 054
058 struct zero_mapped_page *list_mapped_pages; 055 struct zero_mapped_page *list_mapped_pages;
059 056
060 struct sos_umem_vmm_mapped_resource mr; 057 struct sos_umem_vmm_mapped_resource mr;
061 }; 058 };
062 059
063 060
064 <<
065 <<
066 static struct sos_chardev_ops dev_zero_fs_ops; <<
067 <<
068 <<
069 061
070 062
071 static sos_ret_t insert_anonymous_physpage(str 063 static sos_ret_t insert_anonymous_physpage(struct zero_mapped_resource *mr,
072 sos 064 sos_paddr_t ppage_paddr,
073 sos 065 sos_uoffset_t page_id);
074 066
075 067
076 068
077 069
078 static sos_paddr_t lookup_anonymous_physpage(s 070 static sos_paddr_t lookup_anonymous_physpage(struct zero_mapped_resource *mr,
079 s 071 sos_uoffset_t page_id);
080 072
081 073
082 sos_ret_t sos_dev_zero_subsystem_setup() 074 sos_ret_t sos_dev_zero_subsystem_setup()
083 { 075 {
084 sos_ret_t retval; <<
085 <<
086 cache_of_zero_mapped_pages = 076 cache_of_zero_mapped_pages =
087 sos_kmem_cache_create("shared anonymous ma 077 sos_kmem_cache_create("shared anonymous mappings",
088 sizeof(struct zero_m 078 sizeof(struct zero_mapped_page),
089 1, 0, 079 1, 0,
090 SOS_KSLAB_CREATE_MAP 080 SOS_KSLAB_CREATE_MAP | SOS_KSLAB_CREATE_ZERO);
091 if (! cache_of_zero_mapped_pages) 081 if (! cache_of_zero_mapped_pages)
092 return -SOS_ENOMEM; 082 return -SOS_ENOMEM;
093 083
094 retval = sos_chardev_register_class(SOS_CHAR <<
095 & dev_ze <<
096 NULL); <<
097 if (SOS_OK != retval) <<
098 { <<
099 sos_kmem_cache_destroy(cache_of_zero_map <<
100 return retval; <<
101 } <<
102 <<
103 return SOS_OK; 084 return SOS_OK;
104 } 085 }
105 086
106 087
107 088
108 089
109 static void zero_ref(struct sos_umem_vmm_vr * 090 static void zero_ref(struct sos_umem_vmm_vr * vr)
110 { 091 {
111 092
112 struct zero_mapped_resource * zero_resource; 093 struct zero_mapped_resource * zero_resource;
113 zero_resource 094 zero_resource
114 = (struct zero_mapped_resource*) 095 = (struct zero_mapped_resource*)
115 sos_umem_vmm_get_mapped_resource_of_vr(vr) 096 sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
116 097
117 098
118 zero_resource->ref_cnt ++; 099 zero_resource->ref_cnt ++;
119 } 100 }
120 101
121 102
122 103
123 104
124 static void zero_unref(struct sos_umem_vmm_vr 105 static void zero_unref(struct sos_umem_vmm_vr * vr)
125 { 106 {
126 107
127 struct zero_mapped_resource * zero_resource; 108 struct zero_mapped_resource * zero_resource;
128 zero_resource 109 zero_resource
129 = (struct zero_mapped_resource*) 110 = (struct zero_mapped_resource*)
130 sos_umem_vmm_get_mapped_resource_of_vr(vr) 111 sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
131 112
132 113
133 SOS_ASSERT_FATAL(zero_resource->ref_cnt > 0) 114 SOS_ASSERT_FATAL(zero_resource->ref_cnt > 0);
134 zero_resource->ref_cnt --; 115 zero_resource->ref_cnt --;
135 116
136 117
137 if (zero_resource->ref_cnt == 0) 118 if (zero_resource->ref_cnt == 0)
138 { 119 {
139 120
140 struct zero_mapped_page *zmp; 121 struct zero_mapped_page *zmp;
141 list_collapse(zero_resource->list_mapped 122 list_collapse(zero_resource->list_mapped_pages, zmp)
142 { 123 {
143 124
144 sos_physmem_unref_physpage(zmp->ppag 125 sos_physmem_unref_physpage(zmp->ppage_paddr);
145 sos_kfree((sos_vaddr_t)zmp); 126 sos_kfree((sos_vaddr_t)zmp);
146 } 127 }
147 128
148 sos_kfree((sos_vaddr_t)zero_resource); 129 sos_kfree((sos_vaddr_t)zero_resource);
149 } 130 }
150 } 131 }
151 132
152 133
153 134
154 135
155 static sos_ret_t zero_page_in(struct sos_umem_ 136 static sos_ret_t zero_page_in(struct sos_umem_vmm_vr * vr,
156 sos_uaddr_t uadd 137 sos_uaddr_t uaddr,
157 sos_bool_t write 138 sos_bool_t write_access)
158 { 139 {
159 sos_ret_t retval = SOS_OK; 140 sos_ret_t retval = SOS_OK;
160 sos_paddr_t ppage_paddr; 141 sos_paddr_t ppage_paddr;
161 sos_uoffset_t required_page_id; 142 sos_uoffset_t required_page_id;
162 struct zero_mapped_resource * zero_resource; 143 struct zero_mapped_resource * zero_resource;
163 sos_ui32_t vr_prot, vr_flags; 144 sos_ui32_t vr_prot, vr_flags;
164 145
165 146
166 zero_resource 147 zero_resource
167 = (struct zero_mapped_resource*) 148 = (struct zero_mapped_resource*)
168 sos_umem_vmm_get_mapped_resource_of_vr(vr) 149 sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
169 150
170 151
171 vr_prot = sos_umem_vmm_get_prot_of_vr(vr); 152 vr_prot = sos_umem_vmm_get_prot_of_vr(vr);
172 vr_flags = sos_umem_vmm_get_flags_of_vr(vr); 153 vr_flags = sos_umem_vmm_get_flags_of_vr(vr);
173 154
174 155
175 required_page_id = SOS_PAGE_ALIGN_INF(uaddr) 156 required_page_id = SOS_PAGE_ALIGN_INF(uaddr)
176 - sos_umem_vmm_get_start_of_vr(vr) 157 - sos_umem_vmm_get_start_of_vr(vr)
177 + sos_umem_vmm_get_offset_in_resource(vr); 158 + sos_umem_vmm_get_offset_in_resource(vr);
178 159
179 160
180 161
181 if (vr_flags & SOS_VR_MAP_SHARED) 162 if (vr_flags & SOS_VR_MAP_SHARED)
182 { 163 {
183 ppage_paddr = lookup_anonymous_physpage( 164 ppage_paddr = lookup_anonymous_physpage(zero_resource, required_page_id);
184 if (NULL != (void*)ppage_paddr) 165 if (NULL != (void*)ppage_paddr)
185 { 166 {
186 retval = sos_paging_map(ppage_paddr, 167 retval = sos_paging_map(ppage_paddr,
187 SOS_PAGE_ALI 168 SOS_PAGE_ALIGN_INF(uaddr),
188 TRUE, 169 TRUE,
189 vr_prot); 170 vr_prot);
190 171
191 return retval; 172 return retval;
192 } 173 }
193 } 174 }
194 175
195 176
196 177
197 178
198 if (write_access) 179 if (write_access)
199 { 180 {
200 181
201 ppage_paddr = sos_physmem_ref_physpage_n 182 ppage_paddr = sos_physmem_ref_physpage_new(FALSE);
202 if (! ppage_paddr) 183 if (! ppage_paddr)
203 return -SOS_ENOMEM; 184 return -SOS_ENOMEM;
204 185
205 retval = sos_paging_map(ppage_paddr, 186 retval = sos_paging_map(ppage_paddr,
206 SOS_PAGE_ALIGN_I 187 SOS_PAGE_ALIGN_INF(uaddr),
207 TRUE, 188 TRUE,
208 vr_prot); 189 vr_prot);
209 if (SOS_OK != retval) 190 if (SOS_OK != retval)
210 { 191 {
211 sos_physmem_unref_physpage(ppage_pad 192 sos_physmem_unref_physpage(ppage_paddr);
212 return retval; 193 return retval;
213 } 194 }
214 195
215 memset((void*)SOS_PAGE_ALIGN_INF(uaddr), 196 memset((void*)SOS_PAGE_ALIGN_INF(uaddr), 0x0, SOS_PAGE_SIZE);
216 197
217 198
218 199
219 if (vr_flags & SOS_VR_MAP_SHARED) 200 if (vr_flags & SOS_VR_MAP_SHARED)
220 insert_anonymous_physpage(zero_resourc 201 insert_anonymous_physpage(zero_resource, ppage_paddr,
221 required_pag 202 required_page_id);
222 203
223 sos_physmem_unref_physpage(ppage_paddr); 204 sos_physmem_unref_physpage(ppage_paddr);
224 } 205 }
225 else 206 else
226 { 207 {
227 208
228 209
229 retval = sos_paging_map(sos_zero_physpag !! 210 retval = sos_paging_map(sos_zero_page,
230 SOS_PAGE_ALIGN_I 211 SOS_PAGE_ALIGN_INF(uaddr),
231 TRUE, 212 TRUE,
232 SOS_VM_MAP_PROT_ 213 SOS_VM_MAP_PROT_READ);
233 } 214 }
234 215
235 return retval; 216 return retval;
236 } 217 }
237 218
238 219
239 220
240 static struct sos_umem_vmm_vr_ops zero_ops = ( 221 static struct sos_umem_vmm_vr_ops zero_ops = (struct sos_umem_vmm_vr_ops)
241 { 222 {
242 .ref = zero_ref, 223 .ref = zero_ref,
243 .unref = zero_unref, 224 .unref = zero_unref,
244 .page_in = zero_page_in, 225 .page_in = zero_page_in,
245 .unmap = NULL 226 .unmap = NULL
246 }; 227 };
247 228
248 229
249 230
250 static sos_ret_t zero_mmap(struct sos_umem_vmm 231 static sos_ret_t zero_mmap(struct sos_umem_vmm_vr *vr)
251 { 232 {
252 return sos_umem_vmm_set_ops_of_vr(vr, &zero_ 233 return sos_umem_vmm_set_ops_of_vr(vr, &zero_ops);
253 } 234 }
254 235
255 236
256 237
257 238
258 sos_ret_t sos_dev_zero_map(struct sos_umem_vmm 239 sos_ret_t sos_dev_zero_map(struct sos_umem_vmm_as * dest_as,
259 sos_uaddr_t *uaddr, 240 sos_uaddr_t *uaddr,
260 sos_size_t size, 241 sos_size_t size,
261 sos_ui32_t access_r 242 sos_ui32_t access_rights,
262 sos_ui32_t flags) 243 sos_ui32_t flags)
263 { 244 {
264 sos_ret_t retval; 245 sos_ret_t retval;
265 struct zero_mapped_resource * zero_resource; 246 struct zero_mapped_resource * zero_resource;
266 247
267 zero_resource 248 zero_resource
268 = (struct zero_mapped_resource*) sos_kmall 249 = (struct zero_mapped_resource*) sos_kmalloc(sizeof(*zero_resource), 0);
269 if (! zero_resource) 250 if (! zero_resource)
270 return -SOS_ENOMEM; 251 return -SOS_ENOMEM;
271 252
272 memset(zero_resource, 0x0, sizeof(*zero_reso 253 memset(zero_resource, 0x0, sizeof(*zero_resource));
273 zero_resource->mr.allowed_access_rights 254 zero_resource->mr.allowed_access_rights
274 = SOS_VM_MAP_PROT_READ 255 = SOS_VM_MAP_PROT_READ
275 | SOS_VM_MAP_PROT_WRITE 256 | SOS_VM_MAP_PROT_WRITE
276 | SOS_VM_MAP_PROT_EXEC; 257 | SOS_VM_MAP_PROT_EXEC;
277 zero_resource->mr.flags |= SOS_MAPPE 258 zero_resource->mr.flags |= SOS_MAPPED_RESOURCE_ANONYMOUS;
278 zero_resource->mr.custom_data = zero_reso 259 zero_resource->mr.custom_data = zero_resource;
279 zero_resource->mr.mmap = zero_mmap 260 zero_resource->mr.mmap = zero_mmap;
280 261
281 retval = sos_umem_vmm_map(dest_as, uaddr, si 262 retval = sos_umem_vmm_map(dest_as, uaddr, size,
282 access_rights, fla 263 access_rights, flags,
283 &zero_resource->mr 264 &zero_resource->mr, 0);
284 if (SOS_OK != retval) 265 if (SOS_OK != retval)
285 { 266 {
286 sos_kfree((sos_vaddr_t)zero_resource); 267 sos_kfree((sos_vaddr_t)zero_resource);
287 return retval; 268 return retval;
288 } 269 }
289 270
290 return SOS_OK; 271 return SOS_OK;
291 } 272 }
292 273
293 274
294 static sos_ret_t insert_anonymous_physpage(str 275 static sos_ret_t insert_anonymous_physpage(struct zero_mapped_resource *mr,
295 sos 276 sos_paddr_t ppage_paddr,
296 sos 277 sos_uoffset_t page_id)
297 { 278 {
298 struct zero_mapped_page * zmp 279 struct zero_mapped_page * zmp
299 = (struct zero_mapped_page*)sos_kmem_cache 280 = (struct zero_mapped_page*)sos_kmem_cache_alloc(cache_of_zero_mapped_pages,
300 281 0);
301 if (! zmp) 282 if (! zmp)
302 return -SOS_ENOMEM; 283 return -SOS_ENOMEM;
303 284
304 zmp->page_id = page_id; 285 zmp->page_id = page_id;
305 zmp->ppage_paddr = ppage_paddr; 286 zmp->ppage_paddr = ppage_paddr;
306 287
307 list_add_head(mr->list_mapped_pages, zmp); 288 list_add_head(mr->list_mapped_pages, zmp);
308 sos_physmem_ref_physpage_at(ppage_paddr); 289 sos_physmem_ref_physpage_at(ppage_paddr);
309 return SOS_OK; 290 return SOS_OK;
310 } 291 }
311 292
312 293
313 static sos_paddr_t lookup_anonymous_physpage(s 294 static sos_paddr_t lookup_anonymous_physpage(struct zero_mapped_resource *mr,
314 so 295 sos_uoffset_t page_id)
315 { 296 {
316 struct zero_mapped_page * zmp; 297 struct zero_mapped_page * zmp;
317 int nb_elts; 298 int nb_elts;
318 299
319 list_foreach_forward(mr->list_mapped_pages, 300 list_foreach_forward(mr->list_mapped_pages, zmp, nb_elts)
320 { 301 {
321 if (zmp->page_id == page_id) 302 if (zmp->page_id == page_id)
322 return zmp->ppage_paddr; 303 return zmp->ppage_paddr;
323 } 304 }
324 305
325 return (sos_paddr_t)NULL; 306 return (sos_paddr_t)NULL;
326 } 307 }
327 <<
328 <<
329 <<
330 <<
331 <<
332 static sos_ret_t dev_zero_fs_open(struct sos_f <<
333 struct sos_f <<
334 void * chard <<
335 { <<
336 <<
337 if ( (SOS_CHARDEV_NULL_MINOR != fsnode->dev_ <<
338 && (SOS_CHARDEV_ZERO_MINOR != fsnode->d <<
339 return -SOS_ENODEV; <<
340 <<
341 return SOS_OK; <<
342 } <<
343 <<
344 <<
345 static sos_ret_t dev_zero_fs_seek(struct sos_f <<
346 sos_lsoffset <<
347 sos_seek_whe <<
348 so <<
349 { <<
350 <<
351 sos_lsoffset_t ref_offs; <<
352 <<
353 *result_position = this->position; <<
354 switch (whence) <<
355 { <<
356 case SOS_SEEK_SET: <<
357 ref_offs = 0; <<
358 break; <<
359 <<
360 case SOS_SEEK_CUR: <<
361 ref_offs = this->position; <<
362 break; <<
363 <<
364 case SOS_SEEK_END: <<
365 return -SOS_ENOSUP; <<
366 break; <<
367 <<
368 default: <<
369 return -SOS_EINVAL; <<
370 } <<
371 <<
372 if (offset < -ref_offs) <<
373 return -SOS_EINVAL; <<
374 <<
375 this->position = ref_offs + offset; <<
376 *result_position = this->position; <<
377 return SOS_OK; <<
378 } <<
379 <<
380 <<
381 static sos_ret_t dev_zero_fs_read(struct sos_f <<
382 sos_uaddr_t <<
383 sos_size_t * <<
384 { <<
385 struct sos_fs_node * fsnode = sos_fs_nscache <<
386 sos_size_t offs, rdlen; <<
387 <<
388 <<
389 if (SOS_CHARDEV_NULL_MINOR == fsnode->dev_id <<
390 { <<
391 *len = 0; <<
392 return SOS_OK; <<
393 } <<
394 <<
395 <<
396 <<
397 for (rdlen = offs = 0 ; offs < *len ; offs + <<
398 { <<
399 sos_size_t retval, memcpy_len = SOS_PAGE <<
400 if (offs + memcpy_len > *len) <<
401 memcpy_len = *len - offs; <<
402 <<
403 retval = sos_memcpy_to_user(dest_buf + o <<
404 memcpy_len); <<
405 if (retval < 0) <<
406 break; <<
407 <<
408 rdlen += retval; <<
409 if (retval != memcpy_len) <<
410 break; <<
411 } <<
412 <<
413 <<
414 *len = rdlen; <<
415 this->position += rdlen; <<
416 return SOS_OK; <<
417 } <<
418 <<
419 <<
420 static sos_ret_t dev_zero_fs_write(struct sos_ <<
421 sos_uaddr_t <<
422 sos_size_t <<
423 { <<
424 <<
425 this->position += *len; <<
426 return SOS_OK; <<
427 } <<
428 <<
429 <<
430 static sos_ret_t dev_zero_fs_mmap(struct sos_f <<
431 sos_uaddr_t <<
432 sos_ui32_t a <<
433 sos_ui32_t f <<
434 sos_luoffset <<
435 { <<
436 return sos_dev_zero_map(sos_process_get_addr <<
437 uaddr, size, access_ <<
438 } <<
439 <<
440 <<
441 static struct sos_chardev_ops dev_zero_fs_ops <<
442 = (struct sos_chardev_ops) { <<
443 .open = dev_zero_fs_open, <<
444 .close = NULL, <<
445 .seek = dev_zero_fs_seek, <<
446 .read = dev_zero_fs_read, <<
447 .write = dev_zero_fs_write, <<
448 .mmap = dev_zero_fs_mmap, <<
449 .fcntl = NULL, <<
450 .ioctl = NULL <<
451 }; <<