001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019 #include <sos/assert.h>
020 #include <sos/kmalloc.h>
021 #include <sos/physmem.h>
022 #include <hwcore/paging.h>
023 #include <sos/kmem_slab.h>
024 #include <sos/list.h>
025 #include <hwcore/paging.h>
026 #include <sos/uaccess.h>
027 #include <sos/chardev.h>
028 #include <drivers/devices.h>
029
030 #include "zero.h"
031
032
033
034
035
036 struct zero_mapped_page
037 {
038 sos_uoffset_t page_id;
039 sos_paddr_t ppage_paddr;
040
041 struct zero_mapped_page *prev, *next;
042 };
043
044 struct sos_kslab_cache * cache_of_zero_mapped_pages;
045
046
047
048
049
050 struct zero_mapped_resource
051 {
052 int ref_cnt;
053
054
055
056
057
058 struct zero_mapped_page *list_mapped_pages;
059
060 struct sos_umem_vmm_mapped_resource mr;
061 };
062
063
064
065
066 static struct sos_chardev_ops dev_zero_fs_ops;
067
068
069
070
071 static sos_ret_t insert_anonymous_physpage(struct zero_mapped_resource *mr,
072 sos_paddr_t ppage_paddr,
073 sos_uoffset_t page_id);
074
075
076
077
078 static sos_paddr_t lookup_anonymous_physpage(struct zero_mapped_resource *mr,
079 sos_uoffset_t page_id);
080
081
082 sos_ret_t sos_dev_zero_subsystem_setup()
083 {
084 sos_ret_t retval;
085
086 cache_of_zero_mapped_pages =
087 sos_kmem_cache_create("shared anonymous mappings",
088 sizeof(struct zero_mapped_page),
089 1, 0,
090 SOS_KSLAB_CREATE_MAP | SOS_KSLAB_CREATE_ZERO);
091 if (! cache_of_zero_mapped_pages)
092 return -SOS_ENOMEM;
093
094 retval = sos_chardev_register_class(SOS_CHARDEV_ZERO_MAJOR,
095 & dev_zero_fs_ops,
096 NULL);
097 if (SOS_OK != retval)
098 {
099 sos_kmem_cache_destroy(cache_of_zero_mapped_pages);
100 return retval;
101 }
102
103 return SOS_OK;
104 }
105
106
107
108
109 static void zero_ref(struct sos_umem_vmm_vr * vr)
110 {
111
112 struct zero_mapped_resource * zero_resource;
113 zero_resource
114 = (struct zero_mapped_resource*)
115 sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
116
117
118 zero_resource->ref_cnt ++;
119 }
120
121
122
123
124 static void zero_unref(struct sos_umem_vmm_vr * vr)
125 {
126
127 struct zero_mapped_resource * zero_resource;
128 zero_resource
129 = (struct zero_mapped_resource*)
130 sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
131
132
133 SOS_ASSERT_FATAL(zero_resource->ref_cnt > 0);
134 zero_resource->ref_cnt --;
135
136
137 if (zero_resource->ref_cnt == 0)
138 {
139
140 struct zero_mapped_page *zmp;
141 list_collapse(zero_resource->list_mapped_pages, zmp)
142 {
143
144 sos_physmem_unref_physpage(zmp->ppage_paddr);
145 sos_kfree((sos_vaddr_t)zmp);
146 }
147
148 sos_kfree((sos_vaddr_t)zero_resource);
149 }
150 }
151
152
153
154
155 static sos_ret_t zero_page_in(struct sos_umem_vmm_vr * vr,
156 sos_uaddr_t uaddr,
157 sos_bool_t write_access)
158 {
159 sos_ret_t retval = SOS_OK;
160 sos_paddr_t ppage_paddr;
161 sos_uoffset_t required_page_id;
162 struct zero_mapped_resource * zero_resource;
163 sos_ui32_t vr_prot, vr_flags;
164
165
166 zero_resource
167 = (struct zero_mapped_resource*)
168 sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
169
170
171 vr_prot = sos_umem_vmm_get_prot_of_vr(vr);
172 vr_flags = sos_umem_vmm_get_flags_of_vr(vr);
173
174
175 required_page_id = SOS_PAGE_ALIGN_INF(uaddr)
176 - sos_umem_vmm_get_start_of_vr(vr)
177 + sos_umem_vmm_get_offset_in_resource(vr);
178
179
180
181 if (vr_flags & SOS_VR_MAP_SHARED)
182 {
183 ppage_paddr = lookup_anonymous_physpage(zero_resource, required_page_id);
184 if (NULL != (void*)ppage_paddr)
185 {
186 retval = sos_paging_map(ppage_paddr,
187 SOS_PAGE_ALIGN_INF(uaddr),
188 TRUE,
189 vr_prot);
190
191 return retval;
192 }
193 }
194
195
196
197
198 if (write_access)
199 {
200
201 ppage_paddr = sos_physmem_ref_physpage_new(FALSE);
202 if (! ppage_paddr)
203 return -SOS_ENOMEM;
204
205 retval = sos_paging_map(ppage_paddr,
206 SOS_PAGE_ALIGN_INF(uaddr),
207 TRUE,
208 vr_prot);
209 if (SOS_OK != retval)
210 {
211 sos_physmem_unref_physpage(ppage_paddr);
212 return retval;
213 }
214
215 memset((void*)SOS_PAGE_ALIGN_INF(uaddr), 0x0, SOS_PAGE_SIZE);
216
217
218
219 if (vr_flags & SOS_VR_MAP_SHARED)
220 insert_anonymous_physpage(zero_resource, ppage_paddr,
221 required_page_id);
222
223 sos_physmem_unref_physpage(ppage_paddr);
224 }
225 else
226 {
227
228
229 retval = sos_paging_map(sos_zero_physpage,
230 SOS_PAGE_ALIGN_INF(uaddr),
231 TRUE,
232 SOS_VM_MAP_PROT_READ);
233 }
234
235 return retval;
236 }
237
238
239
240 static struct sos_umem_vmm_vr_ops zero_ops = (struct sos_umem_vmm_vr_ops)
241 {
242 .ref = zero_ref,
243 .unref = zero_unref,
244 .page_in = zero_page_in,
245 .unmap = NULL
246 };
247
248
249
250 static sos_ret_t zero_mmap(struct sos_umem_vmm_vr *vr)
251 {
252 return sos_umem_vmm_set_ops_of_vr(vr, &zero_ops);
253 }
254
255
256
257
258 sos_ret_t sos_dev_zero_map(struct sos_umem_vmm_as * dest_as,
259 sos_uaddr_t *uaddr,
260 sos_size_t size,
261 sos_ui32_t access_rights,
262 sos_ui32_t flags)
263 {
264 sos_ret_t retval;
265 struct zero_mapped_resource * zero_resource;
266
267 zero_resource
268 = (struct zero_mapped_resource*) sos_kmalloc(sizeof(*zero_resource), 0);
269 if (! zero_resource)
270 return -SOS_ENOMEM;
271
272 memset(zero_resource, 0x0, sizeof(*zero_resource));
273 zero_resource->mr.allowed_access_rights
274 = SOS_VM_MAP_PROT_READ
275 | SOS_VM_MAP_PROT_WRITE
276 | SOS_VM_MAP_PROT_EXEC;
277 zero_resource->mr.flags |= SOS_MAPPED_RESOURCE_ANONYMOUS;
278 zero_resource->mr.custom_data = zero_resource;
279 zero_resource->mr.mmap = zero_mmap;
280
281 retval = sos_umem_vmm_map(dest_as, uaddr, size,
282 access_rights, flags,
283 &zero_resource->mr, 0);
284 if (SOS_OK != retval)
285 {
286 sos_kfree((sos_vaddr_t)zero_resource);
287 return retval;
288 }
289
290 return SOS_OK;
291 }
292
293
294 static sos_ret_t insert_anonymous_physpage(struct zero_mapped_resource *mr,
295 sos_paddr_t ppage_paddr,
296 sos_uoffset_t page_id)
297 {
298 struct zero_mapped_page * zmp
299 = (struct zero_mapped_page*)sos_kmem_cache_alloc(cache_of_zero_mapped_pages,
300 0);
301 if (! zmp)
302 return -SOS_ENOMEM;
303
304 zmp->page_id = page_id;
305 zmp->ppage_paddr = ppage_paddr;
306
307 list_add_head(mr->list_mapped_pages, zmp);
308 sos_physmem_ref_physpage_at(ppage_paddr);
309 return SOS_OK;
310 }
311
312
313 static sos_paddr_t lookup_anonymous_physpage(struct zero_mapped_resource *mr,
314 sos_uoffset_t page_id)
315 {
316 struct zero_mapped_page * zmp;
317 int nb_elts;
318
319 list_foreach_forward(mr->list_mapped_pages, zmp, nb_elts)
320 {
321 if (zmp->page_id == page_id)
322 return zmp->ppage_paddr;
323 }
324
325 return (sos_paddr_t)NULL;
326 }
327
328
329
330
331
332 static sos_ret_t dev_zero_fs_open(struct sos_fs_node * fsnode,
333 struct sos_fs_opened_file * of,
334 void * chardev_class_custom_data)
335 {
336
337 if ( (SOS_CHARDEV_NULL_MINOR != fsnode->dev_id.device_instance)
338 && (SOS_CHARDEV_ZERO_MINOR != fsnode->dev_id.device_instance) )
339 return -SOS_ENODEV;
340
341 return SOS_OK;
342 }
343
344
345 static sos_ret_t dev_zero_fs_seek(struct sos_fs_opened_file *this,
346 sos_lsoffset_t offset,
347 sos_seek_whence_t whence,
348 sos_lsoffset_t * result_position)
349 {
350
351 sos_lsoffset_t ref_offs;
352
353 *result_position = this->position;
354 switch (whence)
355 {
356 case SOS_SEEK_SET:
357 ref_offs = 0;
358 break;
359
360 case SOS_SEEK_CUR:
361 ref_offs = this->position;
362 break;
363
364 case SOS_SEEK_END:
365 return -SOS_ENOSUP;
366 break;
367
368 default:
369 return -SOS_EINVAL;
370 }
371
372 if (offset < -ref_offs)
373 return -SOS_EINVAL;
374
375 this->position = ref_offs + offset;
376 *result_position = this->position;
377 return SOS_OK;
378 }
379
380
381 static sos_ret_t dev_zero_fs_read(struct sos_fs_opened_file *this,
382 sos_uaddr_t dest_buf,
383 sos_size_t * len)
384 {
385 struct sos_fs_node * fsnode = sos_fs_nscache_get_fs_node(this->direntry);
386 sos_size_t offs, rdlen;
387
388
389 if (SOS_CHARDEV_NULL_MINOR == fsnode->dev_id.device_instance)
390 {
391 *len = 0;
392 return SOS_OK;
393 }
394
395
396
397 for (rdlen = offs = 0 ; offs < *len ; offs += SOS_PAGE_SIZE)
398 {
399 sos_ret_t retval;
400 sos_size_t memcpy_len = SOS_PAGE_SIZE;
401 if (offs + memcpy_len > *len)
402 memcpy_len = *len - offs;
403
404 retval = sos_memcpy_to_user(dest_buf + offs, sos_zero_kernelpage,
405 memcpy_len);
406 if (retval < 0)
407 break;
408
409 rdlen += retval;
410 if (retval != (sos_ret_t)memcpy_len)
411 break;
412 }
413
414
415 *len = rdlen;
416 this->position += rdlen;
417 return SOS_OK;
418 }
419
420
421 static sos_ret_t dev_zero_fs_write(struct sos_fs_opened_file *this,
422 sos_uaddr_t src_buf,
423 sos_size_t * len)
424 {
425
426 this->position += *len;
427 return SOS_OK;
428 }
429
430
431 static sos_ret_t dev_zero_fs_mmap(struct sos_fs_opened_file *this,
432 sos_uaddr_t *uaddr, sos_size_t size,
433 sos_ui32_t access_rights,
434 sos_ui32_t flags,
435 sos_luoffset_t offset)
436 {
437 return sos_dev_zero_map(sos_process_get_address_space(this->owner),
438 uaddr, size, access_rights, flags);
439 }
440
441
442 static struct sos_chardev_ops dev_zero_fs_ops
443 = (struct sos_chardev_ops) {
444 .open = dev_zero_fs_open,
445 .close = NULL,
446 .seek = dev_zero_fs_seek,
447 .read = dev_zero_fs_read,
448 .write = dev_zero_fs_write,
449 .mmap = dev_zero_fs_mmap,
450 .fcntl = NULL,
451 .ioctl = NULL
452 };