001
002
003
004
005
006
007
008
009
010
011
012
013
014
015
016
017
018
019 #include <sos/kmalloc.h>
020 #include <sos/assert.h>
021 #include <sos/physmem.h>
022 #include <drivers/bochs.h>
023 #include <hwcore/paging.h>
024 #include <drivers/zero.h>
025
026 #include "binfmt_elf32.h"
027
028
029
030
031
032
033 struct userprog_entry
034 {
035 const char *name;
036 sos_vaddr_t bottom_vaddr;
037 sos_vaddr_t top_vaddr;
038 };
039
040
041
042
043
044
045 extern char _userprogs_table;
046
047
048
049
050
051
052 struct elf32_mapped_program
053 {
054 sos_vaddr_t vaddr;
055 sos_size_t size;
056 int ref_cnt;
057
058 struct sos_umem_vmm_mapped_resource mr;
059 };
060
061
062
063
064 static void elf32prog_ref(struct sos_umem_vmm_vr * vr)
065 {
066 struct elf32_mapped_program * elf32prog_resource;
067 elf32prog_resource = (struct elf32_mapped_program*) sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
068
069 elf32prog_resource->ref_cnt ++;
070 }
071
072
073
074
075 static void elf32prog_unref(struct sos_umem_vmm_vr * vr)
076 {
077 struct elf32_mapped_program * elf32prog_resource;
078 elf32prog_resource
079 = (struct elf32_mapped_program*)
080 sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
081
082 elf32prog_resource->ref_cnt --;
083 SOS_ASSERT_FATAL(elf32prog_resource->ref_cnt >= 0);
084
085
086 if (elf32prog_resource->ref_cnt == 0)
087 sos_kfree((sos_vaddr_t)elf32prog_resource);
088 }
089
090
091
092 static sos_ret_t elf32prog_page_in(struct sos_umem_vmm_vr * vr,
093 sos_uaddr_t uaddr,
094 sos_bool_t write_access)
095 {
096 struct elf32_mapped_program * elf32prog_resource;
097 sos_ret_t retval = SOS_OK;
098 sos_paddr_t ppage_paddr;
099 sos_uaddr_t upage_uaddr = SOS_PAGE_ALIGN_INF(uaddr);
100 sos_uoffset_t offset_in_prog;
101 sos_size_t size_to_copy;
102 sos_ui32_t access_rights = sos_umem_vmm_get_prot_of_vr(vr);
103
104 elf32prog_resource
105 = (struct elf32_mapped_program*)
106 sos_umem_vmm_get_mapped_resource_of_vr(vr)->custom_data;
107
108
109
110 offset_in_prog = upage_uaddr - sos_umem_vmm_get_start_of_vr(vr)
111 + sos_umem_vmm_get_offset_in_resource(vr);
112 size_to_copy = SOS_PAGE_SIZE;
113 if (offset_in_prog + size_to_copy > elf32prog_resource->size)
114 size_to_copy = elf32prog_resource->size - offset_in_prog;
115
116
117
118 ppage_paddr = sos_physmem_ref_physpage_new(FALSE);
119 if (! ppage_paddr)
120 return -SOS_ENOMEM;
121
122
123
124 retval = sos_paging_map(ppage_paddr,
125 upage_uaddr,
126 TRUE,
127 access_rights | SOS_VM_MAP_PROT_WRITE);
128 SOS_ASSERT_FATAL(SOS_OK == retval);
129 sos_physmem_unref_physpage(ppage_paddr);
130
131
132 memcpy((void*)upage_uaddr,
133 (void*)elf32prog_resource->vaddr + offset_in_prog,
134 size_to_copy);
135 if (size_to_copy < SOS_PAGE_SIZE)
136 memset((void*)(upage_uaddr + size_to_copy), 0x0,
137 SOS_PAGE_SIZE - size_to_copy);
138
139
140 if (! (access_rights & SOS_VM_MAP_PROT_WRITE))
141 return sos_paging_set_prot(upage_uaddr,
142 access_rights & ~SOS_VM_MAP_PROT_WRITE);
143
144 return retval;
145 }
146
147
148 static struct sos_umem_vmm_vr_ops elf32prog_ops = (struct sos_umem_vmm_vr_ops)
149 {
150 .ref = elf32prog_ref,
151 .unref = elf32prog_unref,
152 .page_in = elf32prog_page_in,
153 .unmap = NULL
154 };
155
156
157 static sos_ret_t elf32prog_mmap(struct sos_umem_vmm_vr *vr)
158 {
159 return sos_umem_vmm_set_ops_of_vr(vr, &elf32prog_ops);
160 }
161
162
163
164
165
166
167
168
169
170
171 static struct userprog_entry * lookup_userprog(const char *name);
172
173
174 sos_uaddr_t sos_binfmt_elf32_map(struct sos_umem_vmm_as * dest_as,
175 const char * progname)
176 {
177 int i;
178
179
180
181
182
183 typedef unsigned long Elf32_Addr;
184 typedef unsigned long Elf32_Word;
185 typedef unsigned short Elf32_Half;
186 typedef unsigned long Elf32_Off;
187 typedef signed long Elf32_Sword;
188
189
190
191 #define EI_NIDENT 16
192 typedef struct {
193 unsigned char e_ident[EI_NIDENT];
194 Elf32_Half e_type;
195 Elf32_Half e_machine;
196 Elf32_Word e_version;
197 Elf32_Addr e_entry;
198 Elf32_Off e_phoff;
199 Elf32_Off e_shoff;
200 Elf32_Word e_flags;
201 Elf32_Half e_ehsize;
202 Elf32_Half e_phentsize;
203 Elf32_Half e_phnum;
204 Elf32_Half e_shentsize;
205 Elf32_Half e_shnum;
206 Elf32_Half e_shstrndx;
207 } __attribute__((packed)) Elf32_Ehdr_t;
208
209
210 #define ELFMAG0 0x7f
211 #define ELFMAG1 'E'
212 #define ELFMAG2 'L'
213 #define ELFMAG3 'F'
214
215
216 #define EI_MAG0 0
217 #define EI_MAG1 1
218 #define EI_MAG2 2
219 #define EI_MAG3 3
220 #define EI_CLASS 4
221 #define EI_DATA 5
222 #define EI_VERSION 6
223 #define EI_PAD 7
224
225
226 #define ELFCLASSNONE 0
227 #define ELFCLASS32 1
228 #define ELFCLASS64 2
229
230
231 #define ELFDATANONE 0
232 #define ELFDATA2LSB 1
233 #define ELFDATA2MSB 2
234
235
236 #define ET_NONE 0
237 #define ET_REL 1
238 #define ET_EXEC 2
239 #define ET_DYN 3
240 #define ET_CORE 4
241 #define ET_LOPROC 0xff00
242 #define ET_HIPROC 0xffff
243
244
245 #define EM_NONE 0
246 #define EM_M32 1
247 #define EM_SPARC 2
248 #define EM_386 3
249 #define EM_68K 4
250 #define EM_88K 5
251 #define EM_860 7
252 #define EM_MIPS 8
253
254
255 #define EV_NONE 0
256 #define EV_CURRENT 1
257
258 typedef struct {
259 Elf32_Word p_type;
260 Elf32_Off p_offset;
261 Elf32_Addr p_vaddr;
262 Elf32_Addr p_paddr;
263 Elf32_Word p_filesz;
264 Elf32_Word p_memsz;
265 Elf32_Word p_flags;
266 Elf32_Word p_align;
267 } __attribute__((packed)) Elf32_Phdr_t;
268
269
270 #define PT_NULL 0
271 #define PT_LOAD 1
272 #define PT_DYNAMIC 2
273 #define PT_INTERP 3
274 #define PT_NOTE 4
275 #define PT_SHLIB 5
276 #define PT_PHDR 6
277 #define PT_LOPROC 0x70000000
278 #define PT_HIPROC 0x7fffffff
279
280
281 #define PF_X 1
282 #define PF_W 2
283 #define PF_R 4
284
285
286 Elf32_Ehdr_t *elf_hdr;
287 Elf32_Phdr_t *elf_phdrs;
288
289 struct elf32_mapped_program * mapped_prog;
290 struct userprog_entry * prog;
291 sos_uaddr_t prog_top_user_address = 0;
292
293 mapped_prog
294 = (struct elf32_mapped_program*)
295 sos_kmalloc(sizeof(struct elf32_mapped_program), 0);
296 if (! mapped_prog)
297 return -SOS_ENOMEM;
298
299 prog = lookup_userprog(progname);
300 if (! prog)
301 {
302 sos_kfree((sos_vaddr_t)mapped_prog);
303 return 0;
304 }
305
306
307 memset(mapped_prog, 0x0, sizeof(*mapped_prog));
308 mapped_prog->mr.custom_data = mapped_prog;
309 mapped_prog->mr.mmap = elf32prog_mmap;
310 mapped_prog->mr.allowed_access_rights
311 = SOS_VM_MAP_PROT_READ
312 | SOS_VM_MAP_PROT_WRITE
313 | SOS_VM_MAP_PROT_EXEC;
314 mapped_prog->vaddr = prog->bottom_vaddr;
315 mapped_prog->size = prog->top_vaddr - prog->bottom_vaddr;
316
317 elf_hdr = (Elf32_Ehdr_t*) prog->bottom_vaddr;
318
319
320
321 if (prog->bottom_vaddr + sizeof(Elf32_Ehdr_t) > prog->top_vaddr)
322 {
323 sos_bochs_printf("ELF prog %s: incorrect header\n", prog->name);
324 return 0;
325 }
326
327
328 #define ELF_CHECK(hdr,field,expected_value) \
329 ({ if ((hdr)->field != (expected_value)) \
330 { \
331 sos_bochs_printf("ELF prog %s: for %s, expected %x, got %x\n", \
332 prog->name, \
333 #field, \
334 (unsigned)(expected_value), \
335 (unsigned)(hdr)->field); \
336 return 0; \
337 } \
338 })
339
340 ELF_CHECK(elf_hdr, e_ident[EI_MAG0], ELFMAG0);
341 ELF_CHECK(elf_hdr, e_ident[EI_MAG1], ELFMAG1);
342 ELF_CHECK(elf_hdr, e_ident[EI_MAG2], ELFMAG2);
343 ELF_CHECK(elf_hdr, e_ident[EI_MAG3], ELFMAG3);
344 ELF_CHECK(elf_hdr, e_ident[EI_CLASS], ELFCLASS32);
345 ELF_CHECK(elf_hdr, e_ident[EI_DATA], ELFDATA2LSB);
346 ELF_CHECK(elf_hdr, e_type, ET_EXEC);
347 ELF_CHECK(elf_hdr, e_version, EV_CURRENT);
348
349
350 elf_phdrs = (Elf32_Phdr_t*) (prog->bottom_vaddr + elf_hdr->e_phoff);
351
352
353
354 for (i = 0 ; i < elf_hdr->e_phnum ; i++)
355 {
356 sos_ui32_t prot_flags;
357 sos_uaddr_t uaddr;
358
359
360 if (elf_phdrs[i].p_type != PT_LOAD)
361 {
362 if (elf_phdrs[i].p_memsz != 0)
363 {
364 sos_display_fatal_error("ELF: non-empty non-LOAD segments not supported yet");
365 }
366 continue;
367 }
368
369 if (elf_phdrs[i].p_vaddr < SOS_PAGING_BASE_USER_ADDRESS)
370 {
371 sos_display_fatal_error("User program has an incorrect address");
372 }
373
374 prot_flags = 0;
375 if (elf_phdrs[i].p_flags & SOS_VM_MAP_PROT_READ)
376 prot_flags |= SOS_VM_MAP_PROT_READ;
377 if (elf_phdrs[i].p_flags & SOS_VM_MAP_PROT_WRITE)
378 prot_flags |= SOS_VM_MAP_PROT_WRITE;
379 if (elf_phdrs[i].p_flags & SOS_VM_MAP_PROT_EXEC)
380 prot_flags |= SOS_VM_MAP_PROT_EXEC;
381
382 uaddr = elf_phdrs[i].p_vaddr;
383 SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(uaddr));
384
385
386
387 SOS_ASSERT_FATAL(SOS_OK
388 == sos_umem_vmm_map(dest_as, &uaddr,
389 SOS_PAGE_ALIGN_SUP(elf_phdrs[i].p_filesz),
390 prot_flags,
391 SOS_VR_MAP_FIXED,
392 & mapped_prog->mr,
393 elf_phdrs[i].p_offset));
394
395
396 uaddr += SOS_PAGE_ALIGN_SUP(elf_phdrs[i].p_filesz);
397 if (SOS_PAGE_ALIGN_SUP(elf_phdrs[i].p_filesz)
398 < SOS_PAGE_ALIGN_SUP(elf_phdrs[i].p_memsz))
399 SOS_ASSERT_FATAL(SOS_OK
400 == sos_dev_zero_map(dest_as, &uaddr,
401 SOS_PAGE_ALIGN_SUP(elf_phdrs[i].p_memsz)
402 - SOS_PAGE_ALIGN_SUP(elf_phdrs[i].p_filesz),
403 prot_flags,
404 SOS_VR_MAP_FIXED));
405
406 if (prog_top_user_address
407 < uaddr + SOS_PAGE_ALIGN_SUP(elf_phdrs[i].p_memsz))
408 prog_top_user_address
409 = uaddr + SOS_PAGE_ALIGN_SUP(elf_phdrs[i].p_memsz);
410 }
411
412
413 sos_umem_vmm_init_heap(dest_as, prog_top_user_address);
414
415 return elf_hdr->e_entry;
416 }
417
418
419
420
421
422 static struct userprog_entry * lookup_userprog(const char *name)
423 {
424 struct userprog_entry *prog;
425
426 if (! name)
427 return NULL;
428
429
430
431 for (prog = (struct userprog_entry*) & _userprogs_table ;
432 prog && (prog->name != NULL) ;
433 prog++)
434 {
435 if (0 == strcmp(name, prog->name))
436
437 return prog;
438 }
439
440 return NULL;
441 }