001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019 #include <sos/assert.h>
020 #include <sos/kmalloc.h>
021 #include <sos/physmem.h>
022 #include <hwcore/paging.h>
023 #include <sos/kmem_slab.h>
024 #include <sos/list.h>
025 #include <hwcore/paging.h>
026
027 #include "zero.h"
028
029
030
031
032
033 struct zero_mapped_page
034 {
035 sos_uoffset_t page_id;
036 sos_paddr_t ppage_paddr;
037
038 struct zero_mapped_page *prev, *next;
039 };
040
041 struct sos_kslab_cache * cache_of_zero_mapped_pages;
042
043
044
045
046
047 struct zero_mapped_resource
048 {
049 int ref_cnt;
050
051
052
053
054
055 struct zero_mapped_page *list_mapped_pages;
056
057 struct sos_umem_vmm_mapped_resource mr;
058 };
059
060
061
062
063 static sos_ret_t insert_anonymous_physpage(struct zero_mapped_resource *mr,
064 sos_paddr_t ppage_paddr,
065 sos_uoffset_t page_id);
066
067
068
069
070 static sos_paddr_t lookup_anonymous_physpage(struct zero_mapped_resource *mr,
071 sos_uoffset_t page_id);
072
073
074 sos_ret_t sos_dev_zero_subsystem_setup()
075 {
076 cache_of_zero_mapped_pages =
077 sos_kmem_cache_create("shared anonymous mappings",
078 sizeof(struct zero_mapped_page),
079 1, 0,
080 SOS_KSLAB_CREATE_MAP | SOS_KSLAB_CREATE_ZERO);
081 if (! cache_of_zero_mapped_pages)
082 return -SOS_ENOMEM;
083
084 return SOS_OK;
085 }
086
087
088
089
090 static void zero_ref(struct sos_umem_vmm_vr * vr)
091 {
092
093 struct zero_mapped_resource * zero_resource;
094 zero_resource
095 = (struct zero_mapped_resource*)
096 sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
097
098
099 zero_resource->ref_cnt ++;
100 }
101
102
103
104
105 static void zero_unref(struct sos_umem_vmm_vr * vr)
106 {
107
108 struct zero_mapped_resource * zero_resource;
109 zero_resource
110 = (struct zero_mapped_resource*)
111 sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
112
113
114 SOS_ASSERT_FATAL(zero_resource->ref_cnt > 0);
115 zero_resource->ref_cnt --;
116
117
118 if (zero_resource->ref_cnt == 0)
119 {
120
121 struct zero_mapped_page *zmp;
122 list_collapse(zero_resource->list_mapped_pages, zmp)
123 {
124
125
126
127 sos_kfree((sos_vaddr_t)zmp);
128 }
129
130 sos_kfree((sos_vaddr_t)zero_resource);
131 }
132 }
133
134
135
136
137 static sos_ret_t zero_page_in(struct sos_umem_vmm_vr * vr,
138 sos_uaddr_t uaddr,
139 sos_bool_t write_access)
140 {
141 sos_ret_t retval = SOS_OK;
142 sos_paddr_t ppage_paddr;
143 sos_uoffset_t required_page_id;
144 struct zero_mapped_resource * zero_resource;
145 sos_ui32_t vr_prot, vr_flags;
146
147
148 zero_resource
149 = (struct zero_mapped_resource*)
150 sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
151
152
153 vr_prot = sos_umem_vmm_get_prot_of_vr(vr);
154 vr_flags = sos_umem_vmm_get_flags_of_vr(vr);
155
156
157 required_page_id = SOS_PAGE_ALIGN_INF(uaddr)
158 - sos_umem_vmm_get_start_of_vr(vr)
159 + sos_umem_vmm_get_offset_in_resource(vr);
160
161
162
163 if (vr_flags & SOS_VR_MAP_SHARED)
164 {
165 ppage_paddr = lookup_anonymous_physpage(zero_resource, required_page_id);
166 if (NULL != (void*)ppage_paddr)
167 {
168 retval = sos_paging_map(ppage_paddr,
169 SOS_PAGE_ALIGN_INF(uaddr),
170 TRUE,
171 vr_prot);
172 return retval;
173 }
174 }
175
176
177
178
179 if (write_access)
180 {
181
182 ppage_paddr = sos_physmem_ref_physpage_new(FALSE);
183 if (! ppage_paddr)
184 return -SOS_ENOMEM;
185
186 retval = sos_paging_map(ppage_paddr,
187 SOS_PAGE_ALIGN_INF(uaddr),
188 TRUE,
189 vr_prot);
190 if (SOS_OK != retval)
191 {
192 sos_physmem_unref_physpage(ppage_paddr);
193 return retval;
194 }
195
196 memset((void*)SOS_PAGE_ALIGN_INF(uaddr), 0x0, SOS_PAGE_SIZE);
197
198
199
200 if (vr_flags & SOS_VR_MAP_SHARED)
201 insert_anonymous_physpage(zero_resource, ppage_paddr,
202 required_page_id);
203
204 sos_physmem_unref_physpage(ppage_paddr);
205 }
206 else
207 {
208
209
210 retval = sos_paging_map(sos_zero_page,
211 SOS_PAGE_ALIGN_INF(uaddr),
212 TRUE,
213 SOS_VM_MAP_PROT_READ);
214 }
215
216 return retval;
217 }
218
219
220
221 static struct sos_umem_vmm_vr_ops zero_ops = (struct sos_umem_vmm_vr_ops)
222 {
223 .ref = zero_ref,
224 .unref = zero_unref,
225 .page_in = zero_page_in,
226 .unmap = NULL
227 };
228
229
230
231 static sos_ret_t zero_mmap(struct sos_umem_vmm_vr *vr)
232 {
233 return sos_umem_vmm_set_ops_of_vr(vr, &zero_ops);
234 }
235
236
237
238
239 sos_ret_t sos_dev_zero_map(struct sos_umem_vmm_as * dest_as,
240 sos_uaddr_t *uaddr,
241 sos_size_t size,
242 sos_ui32_t access_rights,
243 sos_ui32_t flags)
244 {
245 sos_ret_t retval;
246 struct zero_mapped_resource * zero_resource;
247
248 zero_resource
249 = (struct zero_mapped_resource*) sos_kmalloc(sizeof(*zero_resource), 0);
250 if (! zero_resource)
251 return -SOS_ENOMEM;
252
253 memset(zero_resource, 0x0, sizeof(*zero_resource));
254 zero_resource->mr.allowed_access_rights
255 = SOS_VM_MAP_PROT_READ
256 | SOS_VM_MAP_PROT_WRITE
257 | SOS_VM_MAP_PROT_EXEC;
258 zero_resource->mr.flags |= SOS_MAPPED_RESOURCE_ANONYMOUS;
259 zero_resource->mr.custom_data = zero_resource;
260 zero_resource->mr.mmap = zero_mmap;
261
262 retval = sos_umem_vmm_map(dest_as, uaddr, size,
263 access_rights, flags,
264 &zero_resource->mr, 0);
265 if (SOS_OK != retval)
266 {
267 sos_kfree((sos_vaddr_t)zero_resource);
268 return retval;
269 }
270
271 return SOS_OK;
272 }
273
274
275 static sos_ret_t insert_anonymous_physpage(struct zero_mapped_resource *mr,
276 sos_paddr_t ppage_paddr,
277 sos_uoffset_t page_id)
278 {
279 struct zero_mapped_page * zmp
280 = (struct zero_mapped_page*)sos_kmem_cache_alloc(cache_of_zero_mapped_pages,
281 0);
282 if (! zmp)
283 return -SOS_ENOMEM;
284
285 zmp->page_id = page_id;
286 zmp->ppage_paddr = ppage_paddr;
287
288 list_add_head(mr->list_mapped_pages, zmp);
289 return SOS_OK;
290 }
291
292
293 static sos_paddr_t lookup_anonymous_physpage(struct zero_mapped_resource *mr,
294 sos_uoffset_t page_id)
295 {
296 struct zero_mapped_page * zmp;
297 int nb_elts;
298
299 list_foreach_forward(mr->list_mapped_pages, zmp, nb_elts)
300 {
301 if (zmp->page_id == page_id)
302 return zmp->ppage_paddr;
303 }
304
305 return (sos_paddr_t)NULL;
306 }