Diff markup
001 001
002 002
003 003
004 004
005 005
006 006
007 007
008 008
009 009
010 010
011 011
012 012
013 013
014 014
015 015
016 016
017 017
018 018
019 #include <sos/assert.h> 019 #include <sos/assert.h>
020 #include <sos/kmalloc.h> 020 #include <sos/kmalloc.h>
021 #include <sos/physmem.h> 021 #include <sos/physmem.h>
022 #include <hwcore/paging.h> 022 #include <hwcore/paging.h>
023 #include <sos/kmem_slab.h> 023 #include <sos/kmem_slab.h>
024 #include <sos/list.h> 024 #include <sos/list.h>
025 #include <hwcore/paging.h> 025 #include <hwcore/paging.h>
026 026
027 #include "zero.h" 027 #include "zero.h"
028 028
029 029
030 030
031 031
032 032
033 struct zero_mapped_page 033 struct zero_mapped_page
034 { 034 {
035 sos_uoffset_t page_id; 035 sos_uoffset_t page_id;
036 sos_paddr_t ppage_paddr; 036 sos_paddr_t ppage_paddr;
037 037
038 struct zero_mapped_page *prev, *next; 038 struct zero_mapped_page *prev, *next;
039 }; 039 };
040 040
041 struct sos_kslab_cache * cache_of_zero_mapped_ 041 struct sos_kslab_cache * cache_of_zero_mapped_pages;
042 042
043 043
044 044
045 045
046 046
047 struct zero_mapped_resource 047 struct zero_mapped_resource
048 { 048 {
049 int ref_cnt; 049 int ref_cnt;
050 050
051 051
052 052
053 053
054 054
055 struct zero_mapped_page *list_mapped_pages; 055 struct zero_mapped_page *list_mapped_pages;
056 056
057 struct sos_umem_vmm_mapped_resource mr; 057 struct sos_umem_vmm_mapped_resource mr;
058 }; 058 };
059 059
060 060
061 061
062 062
063 static sos_ret_t insert_anonymous_physpage(str 063 static sos_ret_t insert_anonymous_physpage(struct zero_mapped_resource *mr,
064 sos 064 sos_paddr_t ppage_paddr,
065 sos 065 sos_uoffset_t page_id);
066 066
067 067
068 068
069 069
070 static sos_paddr_t lookup_anonymous_physpage(s 070 static sos_paddr_t lookup_anonymous_physpage(struct zero_mapped_resource *mr,
071 s 071 sos_uoffset_t page_id);
072 072
073 073
074 sos_ret_t sos_dev_zero_subsystem_setup() 074 sos_ret_t sos_dev_zero_subsystem_setup()
075 { 075 {
076 cache_of_zero_mapped_pages = 076 cache_of_zero_mapped_pages =
077 sos_kmem_cache_create("shared anonymous ma 077 sos_kmem_cache_create("shared anonymous mappings",
078 sizeof(struct zero_m 078 sizeof(struct zero_mapped_page),
079 1, 0, 079 1, 0,
080 SOS_KSLAB_CREATE_MAP 080 SOS_KSLAB_CREATE_MAP | SOS_KSLAB_CREATE_ZERO);
081 if (! cache_of_zero_mapped_pages) 081 if (! cache_of_zero_mapped_pages)
082 return -SOS_ENOMEM; 082 return -SOS_ENOMEM;
083 083
084 return SOS_OK; 084 return SOS_OK;
085 } 085 }
086 086
087 087
088 088
089 089
090 static void zero_ref(struct sos_umem_vmm_vr * 090 static void zero_ref(struct sos_umem_vmm_vr * vr)
091 { 091 {
092 092
093 struct zero_mapped_resource * zero_resource; 093 struct zero_mapped_resource * zero_resource;
094 zero_resource 094 zero_resource
095 = (struct zero_mapped_resource*) 095 = (struct zero_mapped_resource*)
096 sos_umem_vmm_get_mapped_resource_of_vr(vr) 096 sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
097 097
098 098
099 zero_resource->ref_cnt ++; 099 zero_resource->ref_cnt ++;
100 } 100 }
101 101
102 102
103 103
104 104
105 static void zero_unref(struct sos_umem_vmm_vr 105 static void zero_unref(struct sos_umem_vmm_vr * vr)
106 { 106 {
107 107
108 struct zero_mapped_resource * zero_resource; 108 struct zero_mapped_resource * zero_resource;
109 zero_resource 109 zero_resource
110 = (struct zero_mapped_resource*) 110 = (struct zero_mapped_resource*)
111 sos_umem_vmm_get_mapped_resource_of_vr(vr) 111 sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
112 !! 112
113 113
114 SOS_ASSERT_FATAL(zero_resource->ref_cnt > 0) 114 SOS_ASSERT_FATAL(zero_resource->ref_cnt > 0);
115 zero_resource->ref_cnt --; 115 zero_resource->ref_cnt --;
116 116
117 117
118 if (zero_resource->ref_cnt == 0) 118 if (zero_resource->ref_cnt == 0)
119 { 119 {
120 120
121 struct zero_mapped_page *zmp; 121 struct zero_mapped_page *zmp;
122 list_collapse(zero_resource->list_mapped 122 list_collapse(zero_resource->list_mapped_pages, zmp)
123 { 123 {
124 !! 124
125 sos_physmem_unref_physpage(zmp->ppag !! 125
>> 126
126 sos_kfree((sos_vaddr_t)zmp); 127 sos_kfree((sos_vaddr_t)zmp);
127 } 128 }
128 129
129 sos_kfree((sos_vaddr_t)zero_resource); 130 sos_kfree((sos_vaddr_t)zero_resource);
130 } 131 }
131 } 132 }
132 133
133 134
134 135
135 136
136 static sos_ret_t zero_page_in(struct sos_umem_ 137 static sos_ret_t zero_page_in(struct sos_umem_vmm_vr * vr,
137 sos_uaddr_t uadd 138 sos_uaddr_t uaddr,
138 sos_bool_t write 139 sos_bool_t write_access)
139 { 140 {
140 sos_ret_t retval = SOS_OK; 141 sos_ret_t retval = SOS_OK;
141 sos_paddr_t ppage_paddr; 142 sos_paddr_t ppage_paddr;
142 sos_uoffset_t required_page_id; 143 sos_uoffset_t required_page_id;
143 struct zero_mapped_resource * zero_resource; 144 struct zero_mapped_resource * zero_resource;
144 sos_ui32_t vr_prot, vr_flags; 145 sos_ui32_t vr_prot, vr_flags;
145 146
146 147
147 zero_resource 148 zero_resource
148 = (struct zero_mapped_resource*) 149 = (struct zero_mapped_resource*)
149 sos_umem_vmm_get_mapped_resource_of_vr(vr) 150 sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
150 151
151 152
152 vr_prot = sos_umem_vmm_get_prot_of_vr(vr); 153 vr_prot = sos_umem_vmm_get_prot_of_vr(vr);
153 vr_flags = sos_umem_vmm_get_flags_of_vr(vr); 154 vr_flags = sos_umem_vmm_get_flags_of_vr(vr);
154 155
155 156
156 required_page_id = SOS_PAGE_ALIGN_INF(uaddr) 157 required_page_id = SOS_PAGE_ALIGN_INF(uaddr)
157 - sos_umem_vmm_get_start_of_vr(vr) 158 - sos_umem_vmm_get_start_of_vr(vr)
158 + sos_umem_vmm_get_offset_in_resource(vr); 159 + sos_umem_vmm_get_offset_in_resource(vr);
159 160
160 161
161 162
162 if (vr_flags & SOS_VR_MAP_SHARED) 163 if (vr_flags & SOS_VR_MAP_SHARED)
163 { 164 {
164 ppage_paddr = lookup_anonymous_physpage( 165 ppage_paddr = lookup_anonymous_physpage(zero_resource, required_page_id);
165 if (NULL != (void*)ppage_paddr) 166 if (NULL != (void*)ppage_paddr)
166 { 167 {
167 retval = sos_paging_map(ppage_paddr, 168 retval = sos_paging_map(ppage_paddr,
168 SOS_PAGE_ALI 169 SOS_PAGE_ALIGN_INF(uaddr),
169 TRUE, 170 TRUE,
170 vr_prot); 171 vr_prot);
171 <<
172 return retval; 172 return retval;
173 } 173 }
174 } 174 }
175 175
176 176
177 177
178 178
179 if (write_access) 179 if (write_access)
180 { 180 {
181 181
182 ppage_paddr = sos_physmem_ref_physpage_n 182 ppage_paddr = sos_physmem_ref_physpage_new(FALSE);
183 if (! ppage_paddr) 183 if (! ppage_paddr)
184 return -SOS_ENOMEM; 184 return -SOS_ENOMEM;
185 !! 185
186 retval = sos_paging_map(ppage_paddr, 186 retval = sos_paging_map(ppage_paddr,
187 SOS_PAGE_ALIGN_I 187 SOS_PAGE_ALIGN_INF(uaddr),
188 TRUE, 188 TRUE,
189 vr_prot); 189 vr_prot);
190 if (SOS_OK != retval) 190 if (SOS_OK != retval)
191 { 191 {
192 sos_physmem_unref_physpage(ppage_pad 192 sos_physmem_unref_physpage(ppage_paddr);
193 return retval; 193 return retval;
194 } 194 }
195 195
196 memset((void*)SOS_PAGE_ALIGN_INF(uaddr), 196 memset((void*)SOS_PAGE_ALIGN_INF(uaddr), 0x0, SOS_PAGE_SIZE);
197 197
198 198
199 199
200 if (vr_flags & SOS_VR_MAP_SHARED) 200 if (vr_flags & SOS_VR_MAP_SHARED)
201 insert_anonymous_physpage(zero_resourc 201 insert_anonymous_physpage(zero_resource, ppage_paddr,
202 required_pag 202 required_page_id);
203 203
204 sos_physmem_unref_physpage(ppage_paddr); 204 sos_physmem_unref_physpage(ppage_paddr);
205 } 205 }
206 else 206 else
207 { 207 {
208 208
209 209
210 retval = sos_paging_map(sos_zero_page, 210 retval = sos_paging_map(sos_zero_page,
211 SOS_PAGE_ALIGN_I 211 SOS_PAGE_ALIGN_INF(uaddr),
212 TRUE, 212 TRUE,
213 SOS_VM_MAP_PROT_ 213 SOS_VM_MAP_PROT_READ);
214 } 214 }
215 215
216 return retval; 216 return retval;
217 } 217 }
218 218
219 219
220 220
221 static struct sos_umem_vmm_vr_ops zero_ops = ( 221 static struct sos_umem_vmm_vr_ops zero_ops = (struct sos_umem_vmm_vr_ops)
222 { 222 {
223 .ref = zero_ref, 223 .ref = zero_ref,
224 .unref = zero_unref, 224 .unref = zero_unref,
225 .page_in = zero_page_in, 225 .page_in = zero_page_in,
226 .unmap = NULL 226 .unmap = NULL
227 }; 227 };
228 228
229 229
230 230
231 static sos_ret_t zero_mmap(struct sos_umem_vmm 231 static sos_ret_t zero_mmap(struct sos_umem_vmm_vr *vr)
232 { 232 {
233 return sos_umem_vmm_set_ops_of_vr(vr, &zero_ 233 return sos_umem_vmm_set_ops_of_vr(vr, &zero_ops);
234 } 234 }
235 235
236 236
237 237
238 238
239 sos_ret_t sos_dev_zero_map(struct sos_umem_vmm 239 sos_ret_t sos_dev_zero_map(struct sos_umem_vmm_as * dest_as,
240 sos_uaddr_t *uaddr, 240 sos_uaddr_t *uaddr,
241 sos_size_t size, 241 sos_size_t size,
242 sos_ui32_t access_r 242 sos_ui32_t access_rights,
243 sos_ui32_t flags) 243 sos_ui32_t flags)
244 { 244 {
245 sos_ret_t retval; 245 sos_ret_t retval;
246 struct zero_mapped_resource * zero_resource; 246 struct zero_mapped_resource * zero_resource;
247 247
248 zero_resource 248 zero_resource
249 = (struct zero_mapped_resource*) sos_kmall 249 = (struct zero_mapped_resource*) sos_kmalloc(sizeof(*zero_resource), 0);
250 if (! zero_resource) 250 if (! zero_resource)
251 return -SOS_ENOMEM; 251 return -SOS_ENOMEM;
252 252
253 memset(zero_resource, 0x0, sizeof(*zero_reso 253 memset(zero_resource, 0x0, sizeof(*zero_resource));
254 zero_resource->mr.allowed_access_rights 254 zero_resource->mr.allowed_access_rights
255 = SOS_VM_MAP_PROT_READ 255 = SOS_VM_MAP_PROT_READ
256 | SOS_VM_MAP_PROT_WRITE 256 | SOS_VM_MAP_PROT_WRITE
257 | SOS_VM_MAP_PROT_EXEC; 257 | SOS_VM_MAP_PROT_EXEC;
258 zero_resource->mr.flags |= SOS_MAPPE 258 zero_resource->mr.flags |= SOS_MAPPED_RESOURCE_ANONYMOUS;
259 zero_resource->mr.custom_data = zero_reso 259 zero_resource->mr.custom_data = zero_resource;
260 zero_resource->mr.mmap = zero_mmap 260 zero_resource->mr.mmap = zero_mmap;
261 261
262 retval = sos_umem_vmm_map(dest_as, uaddr, si 262 retval = sos_umem_vmm_map(dest_as, uaddr, size,
263 access_rights, fla 263 access_rights, flags,
264 &zero_resource->mr 264 &zero_resource->mr, 0);
265 if (SOS_OK != retval) 265 if (SOS_OK != retval)
266 { 266 {
267 sos_kfree((sos_vaddr_t)zero_resource); 267 sos_kfree((sos_vaddr_t)zero_resource);
268 return retval; 268 return retval;
269 } 269 }
270 270
271 return SOS_OK; 271 return SOS_OK;
272 } 272 }
273 273
274 274
275 static sos_ret_t insert_anonymous_physpage(str 275 static sos_ret_t insert_anonymous_physpage(struct zero_mapped_resource *mr,
276 sos 276 sos_paddr_t ppage_paddr,
277 sos 277 sos_uoffset_t page_id)
278 { 278 {
279 struct zero_mapped_page * zmp 279 struct zero_mapped_page * zmp
280 = (struct zero_mapped_page*)sos_kmem_cache 280 = (struct zero_mapped_page*)sos_kmem_cache_alloc(cache_of_zero_mapped_pages,
281 281 0);
282 if (! zmp) 282 if (! zmp)
283 return -SOS_ENOMEM; 283 return -SOS_ENOMEM;
284 284
285 zmp->page_id = page_id; 285 zmp->page_id = page_id;
286 zmp->ppage_paddr = ppage_paddr; 286 zmp->ppage_paddr = ppage_paddr;
287 287
288 list_add_head(mr->list_mapped_pages, zmp); 288 list_add_head(mr->list_mapped_pages, zmp);
289 sos_physmem_ref_physpage_at(ppage_paddr); <<
290 return SOS_OK; 289 return SOS_OK;
291 } 290 }
292 291
293 292
294 static sos_paddr_t lookup_anonymous_physpage(s 293 static sos_paddr_t lookup_anonymous_physpage(struct zero_mapped_resource *mr,
295 so 294 sos_uoffset_t page_id)
296 { 295 {
297 struct zero_mapped_page * zmp; 296 struct zero_mapped_page * zmp;
298 int nb_elts; 297 int nb_elts;
299 298
300 list_foreach_forward(mr->list_mapped_pages, 299 list_foreach_forward(mr->list_mapped_pages, zmp, nb_elts)
301 { 300 {
302 if (zmp->page_id == page_id) 301 if (zmp->page_id == page_id)
303 return zmp->ppage_paddr; 302 return zmp->ppage_paddr;
304 } 303 }
305 304
306 return (sos_paddr_t)NULL; 305 return (sos_paddr_t)NULL;
307 } 306 }