| ../sos-code-article4/sos/kmem_slab.c (1970-01-01 01:00:00.000000000 +0100
) |
|
| ../sos-code-article5/sos/kmem_slab.c (2004-12-18 21:12:14.000000000 +0100
) |
|
|
|
|
| | /* Copyright (C) 2000 Thomas Petazzoni |
| | Copyright (C) 2004 David Decotigny |
| | |
| | This program is free software; you can redistribute it and/or |
| | modify it under the terms of the GNU General Public License |
| | as published by the Free Software Foundation; either version 2 |
| | of the License, or (at your option) any later version. |
| | |
| | This program is distributed in the hope that it will be useful, |
| | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| | GNU General Public License for more details. |
| | |
| | You should have received a copy of the GNU General Public License |
| | along with this program; if not, write to the Free Software |
| | Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
| | USA. |
| | */ |
| | #include <sos/macros.h> |
| | #include <sos/klibc.h> |
| | #include <sos/list.h> |
| | #include <sos/assert.h> |
| | #include <hwcore/paging.h> |
| | #include <sos/physmem.h> |
| | #include <sos/kmem_vmm.h> |
| | |
| | #include "kmem_slab.h" |
| | |
| | /* Dimensioning constants */ |
| | #define NB_PAGES_IN_SLAB_OF_CACHES 1 |
| | #define NB_PAGES_IN_SLAB_OF_RANGES 1 |
| | |
| | /** The structure of a slab cache */ |
| | struct sos_kslab_cache |
| | { |
| | char *name; |
| | |
| | /* non mutable characteristics of this slab */ |
| | sos_size_t original_obj_size; /* asked object size */ |
| | sos_size_t alloc_obj_size; /* actual object size, taking the |
| | alignment constraints into account */ |
| | sos_count_t nb_objects_per_slab; |
| | sos_count_t nb_pages_per_slab; |
| | sos_count_t min_free_objects; |
| | |
| | /* slab cache flags */ |
| | // #define SOS_KSLAB_CREATE_MAP (1<<0) /* See kmem_slab.h */ |
| | // #define SOS_KSLAB_CREATE_ZERO (1<<1) /* " " " " " " " " */ |
| | #define ON_SLAB (1<<31) /* struct sos_kslab is included inside the slab */ |
| | sos_ui32_t flags; |
| | |
| | /* Supervision data (updated at run-time) */ |
| | sos_count_t nb_free_objects; |
| | |
| | /* The lists of slabs owned by this cache */ |
| | struct sos_kslab *slab_list; /* head = non full, tail = full */ |
| | |
| | /* The caches are linked together on the kslab_cache_list */ |
| | struct sos_kslab_cache *prev, *next; |
| | }; |
| | |
| | |
| | /** The structure of a slab */ |
| | struct sos_kslab |
| | { |
| | /** Number of free objects on this slab */ |
| | sos_count_t nb_free; |
| | |
| | /** The list of these free objects */ |
| | struct sos_kslab_free_object *free; |
| | |
| | /** The address of the associated range structure */ |
| | struct sos_kmem_range *range; |
| | |
| | /** Virtual start address of this range */ |
| | sos_vaddr_t first_object; |
| | |
| | /** Slab cache owning this slab */ |
| | struct sos_kslab_cache *cache; |
| | |
| | /** Links to the other slabs managed by the same cache */ |
| | struct sos_kslab *prev, *next; |
| | }; |
| | |
| | |
| | /** The structure of the free objects in the slab */ |
| | struct sos_kslab_free_object |
| | { |
| | struct sos_kslab_free_object *prev, *next; |
| | }; |
| | |
| | /** The cache of slab caches */ |
| | static struct sos_kslab_cache *cache_of_struct_kslab_cache; |
| | |
| | /** The cache of slab structures for non-ON_SLAB caches */ |
| | static struct sos_kslab_cache *cache_of_struct_kslab; |
| | |
| | /** The list of slab caches */ |
| | static struct sos_kslab_cache *kslab_cache_list; |
| | |
| | /* Helper function to initialize a cache structure */ |
| | static sos_ret_t |
| | cache_initialize(/*out*/struct sos_kslab_cache *the_cache, |
| | const char* name, |
| | sos_size_t obj_size, |
| | sos_count_t pages_per_slab, |
| | sos_count_t min_free_objs, |
| | sos_ui32_t cache_flags) |
| | { |
| | unsigned int space_left; |
| | sos_size_t alloc_obj_size; |
| | |
| | if (obj_size <= 0) |
| | return -SOS_EINVAL; |
| | |
| | /* Default allocation size is the requested one */ |
| | alloc_obj_size = obj_size; |
| | |
| | /* Make sure the requested size is large enough to store a |
| | free_object structure */ |
| | if (alloc_obj_size < sizeof(struct sos_kslab_free_object)) |
| | alloc_obj_size = sizeof(struct sos_kslab_free_object); |
| | |
| | /* Align obj_size on 4 bytes */ |
| | alloc_obj_size = SOS_ALIGN_SUP(alloc_obj_size, sizeof(int)); |
| | |
| | /* Make sure supplied number of pages per slab is consistent with |
| | actual allocated object size */ |
| | if (alloc_obj_size > pages_per_slab*SOS_PAGE_SIZE) |
| | return -SOS_EINVAL; |
| | |
| | /* Refuse too large slabs */ |
| | if (pages_per_slab > MAX_PAGES_PER_SLAB) |
| | return -SOS_ENOMEM; |
| | |
| | /* Fills in the cache structure */ |
| | memset(the_cache, 0x0, sizeof(struct sos_kslab_cache)); |
| | the_cache->name = (char*)name; |
| | the_cache->flags = cache_flags; |
| | the_cache->original_obj_size = obj_size; |
| | the_cache->alloc_obj_size = alloc_obj_size; |
| | the_cache->min_free_objects = min_free_objs; |
| | the_cache->nb_pages_per_slab = pages_per_slab; |
| | |
| | /* Small size objets => the slab structure is allocated directly in |
| | the slab */ |
| | if(alloc_obj_size <= sizeof(struct sos_kslab)) |
| | the_cache->flags |= ON_SLAB; |
| | |
| | /* |
| | * Compute the space left once the maximum number of objects |
| | * have been allocated in the slab |
| | */ |
| | space_left = the_cache->nb_pages_per_slab*SOS_PAGE_SIZE; |
| | if(the_cache->flags & ON_SLAB) |
| | space_left -= sizeof(struct sos_kslab); |
| | the_cache->nb_objects_per_slab = space_left / alloc_obj_size; |
| | space_left -= the_cache->nb_objects_per_slab*alloc_obj_size; |
| | |
| | /* Make sure a single slab is large enough to contain the minimum |
| | number of objects requested */ |
| | if (the_cache->nb_objects_per_slab < min_free_objs) |
| | return -SOS_EINVAL; |
| | |
| | /* If there is now enough place for both the objects and the slab |
| | structure, then make the slab structure ON_SLAB */ |
| | if (space_left >= sizeof(struct sos_kslab)) |
| | the_cache->flags |= ON_SLAB; |
| | |
| | return SOS_OK; |
| | } |
| | |
| | |
| | /** Helper function to add a new slab for the given cache. */ |
| | static sos_ret_t |
| | cache_add_slab(struct sos_kslab_cache *kslab_cache, |
| | sos_vaddr_t vaddr_slab, |
| | struct sos_kslab *slab) |
| | { |
| | int i; |
| | |
| | /* Setup the slab structure */ |
| | memset(slab, 0x0, sizeof(struct sos_kslab)); |
| | slab->cache = kslab_cache; |
| | |
| | /* Establish the address of the first free object */ |
| | slab->first_object = vaddr_slab; |
| | |
| | /* Account for this new slab in the cache */ |
| | slab->nb_free = kslab_cache->nb_objects_per_slab; |
| | kslab_cache->nb_free_objects += slab->nb_free; |
| | |
| | /* Build the list of free objects */ |
| | for (i = 0 ; i < kslab_cache->nb_objects_per_slab ; i++) |
| | { |
| | sos_vaddr_t obj_vaddr; |
| | |
| | /* Set object's address */ |
| | obj_vaddr = slab->first_object + i*kslab_cache->alloc_obj_size; |
| | |
| | /* Add it to the list of free objects */ |
| | list_add_tail(slab->free, |
| | (struct sos_kslab_free_object *)obj_vaddr); |
| | } |
| | |
| | /* Add the slab to the cache's slab list: add the head of the list |
| | since this slab is non full */ |
| | list_add_head(kslab_cache->slab_list, slab); |
| | |
| | return SOS_OK; |
| | } |
| | |
| | |
| | /** Helper function to allocate a new slab for the given kslab_cache */ |
| | static sos_ret_t |
| | cache_grow(struct sos_kslab_cache *kslab_cache, |
| | sos_ui32_t alloc_flags) |
| | { |
| | sos_ui32_t range_alloc_flags; |
| | |
| | struct sos_kmem_range *new_range; |
| | sos_vaddr_t new_range_start; |
| | |
| | struct sos_kslab *new_slab; |
| | |
| | /* |
| | * Setup the flags for the range allocation |
| | */ |
| | range_alloc_flags = 0; |
| | |
| | /* Atomic ? */ |
| | if (alloc_flags & SOS_KSLAB_ALLOC_ATOMIC) |
| | range_alloc_flags |= SOS_KMEM_VMM_ATOMIC; |
| | |
| | /* Need physical mapping NOW ? */ |
| | if (kslab_cache->flags & (SOS_KSLAB_CREATE_MAP |
| | | SOS_KSLAB_CREATE_ZERO)) |
| | range_alloc_flags |= SOS_KMEM_VMM_MAP; |
| | |
| | /* Allocate the range */ |
| | new_range = sos_kmem_vmm_new_range(kslab_cache->nb_pages_per_slab, |
| | range_alloc_flags, |
| | & new_range_start); |
| | if (! new_range) |
| | return -SOS_ENOMEM; |
| | |
| | /* Allocate the slab structure */ |
| | if (kslab_cache->flags & ON_SLAB) |
| | { |
| | /* Slab structure is ON the slab: simply set its address to the |
| | end of the range */ |
| | sos_vaddr_t slab_vaddr |
| | = new_range_start + kslab_cache->nb_pages_per_slab*SOS_PAGE_SIZE |
| | - sizeof(struct sos_kslab); |
| | new_slab = (struct sos_kslab*)slab_vaddr; |
| | } |
| | else |
| | { |
| | /* Slab structure is OFF the slab: allocate it from the cache of |
| | slab structures */ |
| | sos_vaddr_t slab_vaddr |
| | = sos_kmem_cache_alloc(cache_of_struct_kslab, |
| | alloc_flags); |
| | if (! slab_vaddr) |
| | { |
| | sos_kmem_vmm_del_range(new_range); |
| | return -SOS_ENOMEM; |
| | } |
| | new_slab = (struct sos_kslab*)slab_vaddr; |
| | } |
| | |
| | cache_add_slab(kslab_cache, new_range_start, new_slab); |
| | new_slab->range = new_range; |
| | |
| | /* Set the backlink from range to this slab */ |
| | sos_kmem_vmm_set_slab(new_range, new_slab); |
| | |
| | return SOS_OK; |
| | } |
| | |
| | |
| | /** |
| | * Helper function to release a slab |
| | * |
| | * The corresponding range is always deleted, except when the @param |
| | * must_del_range_now is not set. This happens only when the function |
| | * gets called from sos_kmem_cache_release_struct_range(), to avoid |
| | * large recursions. |
| | */ |
| | static sos_ret_t |
| | cache_release_slab(struct sos_kslab *slab, |
| | sos_bool_t must_del_range_now) |
| | { |
| | struct sos_kslab_cache *kslab_cache = slab->cache; |
| | struct sos_kmem_range *range = slab->range; |
| | |
| | SOS_ASSERT_FATAL(kslab_cache != NULL); |
| | SOS_ASSERT_FATAL(range != NULL); |
| | SOS_ASSERT_FATAL(slab->nb_free == slab->cache->nb_objects_per_slab); |
| | |
| | /* First, remove the slab from the slabs' list of the cache */ |
| | list_delete(kslab_cache->slab_list, slab); |
| | slab->cache->nb_free_objects -= slab->nb_free; |
| | |
| | /* Release the slab structure if it is OFF slab */ |
| | if (! (slab->cache->flags & ON_SLAB)) |
| | sos_kmem_cache_free((sos_vaddr_t)slab); |
| | |
| | /* Ok, the range is not bound to any slab anymore */ |
| | sos_kmem_vmm_set_slab(range, NULL); |
| | |
| | /* Always delete the range now, unless we are told not to do so (see |
| | sos_kmem_cache_release_struct_range() below) */ |
| | if (must_del_range_now) |
| | return sos_kmem_vmm_del_range(range); |
| | |
| | return SOS_OK; |
| | } |
| | |
| | |
| | /** |
| | * Helper function to create the initial cache of caches, with a very |
| | * first slab in it, so that new cache structures can be simply allocated. |
| | * @return the cache structure for the cache of caches |
| | */ |
| | static struct sos_kslab_cache * |
| | create_cache_of_caches(sos_vaddr_t vaddr_first_slab_of_caches, |
| | int nb_pages) |
| | { |
| | /* The preliminary cache structure we need in order to allocate the |
| | first slab in the cache of caches (allocated on the stack !) */ |
| | struct sos_kslab_cache fake_cache_of_caches; |
| | |
| | /* The real cache structure for the cache of caches */ |
| | struct sos_kslab_cache *real_cache_of_caches; |
| | |
| | /* The kslab structure for this very first slab */ |
| | struct sos_kslab *slab_of_caches; |
| | |
| | /* Init the cache structure for the cache of caches */ |
| | if (cache_initialize(& fake_cache_of_caches, |
| | "Caches", sizeof(struct sos_kslab_cache), |
| | nb_pages, 0, SOS_KSLAB_CREATE_MAP | ON_SLAB)) |
| | /* Something wrong with the parameters */ |
| | return NULL; |
| | |
| | memset((void*)vaddr_first_slab_of_caches, 0x0, nb_pages*SOS_PAGE_SIZE); |
| | |
| | /* Add the pages for the 1st slab of caches */ |
| | slab_of_caches = (struct sos_kslab*)(vaddr_first_slab_of_caches |
| | + nb_pages*SOS_PAGE_SIZE |
| | - sizeof(struct sos_kslab)); |
| | |
| | /* Add the abovementioned 1st slab to the cache of caches */ |
| | cache_add_slab(& fake_cache_of_caches, |
| | vaddr_first_slab_of_caches, |
| | slab_of_caches); |
| | |
| | /* Now we allocate a cache structure, which will be the real cache |
| | of caches, ie a cache structure allocated INSIDE the cache of |
| | caches, not inside the stack */ |
| | real_cache_of_caches |
| | = (struct sos_kslab_cache*) sos_kmem_cache_alloc(& fake_cache_of_caches, |
| | 0); |
| | /* We initialize it */ |
| | memcpy(real_cache_of_caches, & fake_cache_of_caches, |
| | sizeof(struct sos_kslab_cache)); |
| | /* We need to update the slab's 'cache' field */ |
| | slab_of_caches->cache = real_cache_of_caches; |
| | |
| | /* Add the cache to the list of slab caches */ |
| | list_add_tail(kslab_cache_list, real_cache_of_caches); |
| | |
| | return real_cache_of_caches; |
| | } |
| | |
| | |
| | /** |
| | * Helper function to create the initial cache of ranges, with a very |
| | * first slab in it, so that new kmem_range structures can be simply |
| | * allocated. |
| | * @return the cache of kmem_range |
| | */ |
| | static struct sos_kslab_cache * |
| | create_cache_of_ranges(sos_vaddr_t vaddr_first_slab_of_ranges, |
| | sos_size_t sizeof_struct_range, |
| | int nb_pages) |
| | { |
| | /* The cache structure for the cache of kmem_range */ |
| | struct sos_kslab_cache *cache_of_ranges; |
| | |
| | /* The kslab structure for the very first slab of ranges */ |
| | struct sos_kslab *slab_of_ranges; |
| | |
| | cache_of_ranges = (struct sos_kslab_cache*) |
| | sos_kmem_cache_alloc(cache_of_struct_kslab_cache, |
| | 0); |
| | if (! cache_of_ranges) |
| | return NULL; |
| | |
| | /* Init the cache structure for the cache of ranges with min objects |
| | per slab = 2 !!! */ |
| | if (cache_initialize(cache_of_ranges, |
| | "struct kmem_range", |
| | sizeof_struct_range, |
| | nb_pages, 2, SOS_KSLAB_CREATE_MAP | ON_SLAB)) |
| | /* Something wrong with the parameters */ |
| | return NULL; |
| | |
| | /* Add the cache to the list of slab caches */ |
| | list_add_tail(kslab_cache_list, cache_of_ranges); |
| | |
| | /* |
| | * Add the first slab for this cache |
| | */ |
| | memset((void*)vaddr_first_slab_of_ranges, 0x0, nb_pages*SOS_PAGE_SIZE); |
| | |
| | /* Add the pages for the 1st slab of ranges */ |
| | slab_of_ranges = (struct sos_kslab*)(vaddr_first_slab_of_ranges |
| | + nb_pages*SOS_PAGE_SIZE |
| | - sizeof(struct sos_kslab)); |
| | |
| | cache_add_slab(cache_of_ranges, |
| | vaddr_first_slab_of_ranges, |
| | slab_of_ranges); |
| | |
| | return cache_of_ranges; |
| | } |
| | |
| | |
| | struct sos_kslab_cache * |
| | sos_kmem_cache_setup_prepare(sos_vaddr_t kernel_core_base, |
| | sos_vaddr_t kernel_core_top, |
| | sos_size_t sizeof_struct_range, |
| | /* results */ |
| | struct sos_kslab **first_struct_slab_of_caches, |
| | sos_vaddr_t *first_slab_of_caches_base, |
| | sos_count_t *first_slab_of_caches_nb_pages, |
| | struct sos_kslab **first_struct_slab_of_ranges, |
| | sos_vaddr_t *first_slab_of_ranges_base, |
| | sos_count_t *first_slab_of_ranges_nb_pages) |
| | { |
| | int i; |
| | sos_ret_t retval; |
| | sos_vaddr_t vaddr; |
| | |
| | /* The cache of ranges we are about to allocate */ |
| | struct sos_kslab_cache *cache_of_ranges; |
| | |
| | /* In the begining, there isn't any cache */ |
| | kslab_cache_list = NULL; |
| | cache_of_struct_kslab = NULL; |
| | cache_of_struct_kslab_cache = NULL; |
| | |
| | /* |
| | * Create the cache of caches, initialised with 1 allocated slab |
| | */ |
| | |
| | /* Allocate the pages needed for the 1st slab of caches, and map them |
| | in kernel space, right after the kernel */ |
| | *first_slab_of_caches_base = SOS_PAGE_ALIGN_SUP(kernel_core_top); |
| | for (i = 0, vaddr = *first_slab_of_caches_base ; |
| | i < NB_PAGES_IN_SLAB_OF_CACHES ; |
| | i++, vaddr += SOS_PAGE_SIZE) |
| | { |
| | sos_paddr_t ppage_paddr; |
| | |
| | ppage_paddr |
| | = sos_physmem_ref_physpage_new(FALSE); |
| | SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL); |
| | |
| | retval = sos_paging_map(ppage_paddr, vaddr, |
| | FALSE, |
| | SOS_VM_MAP_ATOMIC |
| | | SOS_VM_MAP_PROT_READ |
| | | SOS_VM_MAP_PROT_WRITE); |
| | SOS_ASSERT_FATAL(retval == SOS_OK); |
| | |
| | retval = sos_physmem_unref_physpage(ppage_paddr); |
| | SOS_ASSERT_FATAL(retval == FALSE); |
| | } |
| | |
| | /* Create the cache of caches */ |
| | *first_slab_of_caches_nb_pages = NB_PAGES_IN_SLAB_OF_CACHES; |
| | cache_of_struct_kslab_cache |
| | = create_cache_of_caches(*first_slab_of_caches_base, |
| | NB_PAGES_IN_SLAB_OF_CACHES); |
| | SOS_ASSERT_FATAL(cache_of_struct_kslab_cache != NULL); |
| | |
| | /* Retrieve the slab that should have been allocated */ |
| | *first_struct_slab_of_caches |
| | = list_get_head(cache_of_struct_kslab_cache->slab_list); |
| | |
| | |
| | /* |
| | * Create the cache of ranges, initialised with 1 allocated slab |
| | */ |
| | *first_slab_of_ranges_base = vaddr; |
| | /* Allocate the 1st slab */ |
| | for (i = 0, vaddr = *first_slab_of_ranges_base ; |
| | i < NB_PAGES_IN_SLAB_OF_RANGES ; |
| | i++, vaddr += SOS_PAGE_SIZE) |
| | { |
| | sos_paddr_t ppage_paddr; |
| | |
| | ppage_paddr |
| | = sos_physmem_ref_physpage_new(FALSE); |
| | SOS_ASSERT_FATAL(ppage_paddr != (sos_paddr_t)NULL); |
| | |
| | retval = sos_paging_map(ppage_paddr, vaddr, |
| | FALSE, |
| | SOS_VM_MAP_ATOMIC |
| | | SOS_VM_MAP_PROT_READ |
| | | SOS_VM_MAP_PROT_WRITE); |
| | SOS_ASSERT_FATAL(retval == SOS_OK); |
| | |
| | retval = sos_physmem_unref_physpage(ppage_paddr); |
| | SOS_ASSERT_FATAL(retval == FALSE); |
| | } |
| | |
| | /* Create the cache of ranges */ |
| | *first_slab_of_ranges_nb_pages = NB_PAGES_IN_SLAB_OF_RANGES; |
| | cache_of_ranges = create_cache_of_ranges(*first_slab_of_ranges_base, |
| | sizeof_struct_range, |
| | NB_PAGES_IN_SLAB_OF_RANGES); |
| | SOS_ASSERT_FATAL(cache_of_ranges != NULL); |
| | |
| | /* Retrieve the slab that should have been allocated */ |
| | *first_struct_slab_of_ranges |
| | = list_get_head(cache_of_ranges->slab_list); |
| | |
| | /* |
| | * Create the cache of slabs, without any allocated slab yet |
| | */ |
| | cache_of_struct_kslab |
| | = sos_kmem_cache_create("off-slab slab structures", |
| | sizeof(struct sos_kslab), |
| | 1, |
| | 0, |
| | SOS_KSLAB_CREATE_MAP); |
| | SOS_ASSERT_FATAL(cache_of_struct_kslab != NULL); |
| | |
| | return cache_of_ranges; |
| | } |
| | |
| | |
| | sos_ret_t |
| | sos_kmem_cache_setup_commit(struct sos_kslab *first_struct_slab_of_caches, |
| | struct sos_kmem_range *first_range_of_caches, |
| | struct sos_kslab *first_struct_slab_of_ranges, |
| | struct sos_kmem_range *first_range_of_ranges) |
| | { |
| | first_struct_slab_of_caches->range = first_range_of_caches; |
| | first_struct_slab_of_ranges->range = first_range_of_ranges; |
| | return SOS_OK; |
| | } |
| | |
| | |
| | struct sos_kslab_cache * |
| | sos_kmem_cache_create(const char* name, |
| | sos_size_t obj_size, |
| | sos_count_t pages_per_slab, |
| | sos_count_t min_free_objs, |
| | sos_ui32_t cache_flags) |
| | { |
| | struct sos_kslab_cache *new_cache; |
| | |
| | /* Allocate the new cache */ |
| | new_cache = (struct sos_kslab_cache*) |
| | sos_kmem_cache_alloc(cache_of_struct_kslab_cache, |
| | 0/* NOT ATOMIC */); |
| | if (! new_cache) |
| | return NULL; |
| | |
| | if (cache_initialize(new_cache, name, obj_size, |
| | pages_per_slab, min_free_objs, |
| | cache_flags)) |
| | { |
| | /* Something was wrong */ |
| | sos_kmem_cache_free((sos_vaddr_t)new_cache); |
| | return NULL; |
| | } |
| | |
| | /* Add the cache to the list of slab caches */ |
| | list_add_tail(kslab_cache_list, new_cache); |
| | |
| | /* if the min_free_objs is set, pre-allocate a slab */ |
| | if (min_free_objs) |
| | { |
| | if (cache_grow(new_cache, 0 /* Not atomic */) != SOS_OK) |
| | { |
| | sos_kmem_cache_destroy(new_cache); |
| | return NULL; /* Not enough memory */ |
| | } |
| | } |
| | |
| | return new_cache; |
| | } |
| | |
| | |
| | sos_ret_t sos_kmem_cache_destroy(struct sos_kslab_cache *kslab_cache) |
| | { |
| | int nb_slabs; |
| | struct sos_kslab *slab; |
| | |
| | if (! kslab_cache) |
| | return -SOS_EINVAL; |
| | |
| | /* Refuse to destroy the cache if there are any objects still |
| | allocated */ |
| | list_foreach(kslab_cache->slab_list, slab, nb_slabs) |
| | { |
| | if (slab->nb_free != kslab_cache->nb_objects_per_slab) |
| | return -SOS_EBUSY; |
| | } |
| | |
| | /* Remove all the slabs */ |
| | while ((slab = list_get_head(kslab_cache->slab_list)) != NULL) |
| | { |
| | cache_release_slab(slab, TRUE); |
| | } |
| | |
| | /* Remove the cache */ |
| | return sos_kmem_cache_free((sos_vaddr_t)kslab_cache); |
| | } |
| | |
| | |
| | sos_vaddr_t sos_kmem_cache_alloc(struct sos_kslab_cache *kslab_cache, |
| | sos_ui32_t alloc_flags) |
| | { |
| | sos_vaddr_t obj_vaddr; |
| | struct sos_kslab * slab_head; |
| | #define ALLOC_RET return |
| | |
| | /* If the slab at the head of the slabs' list has no free object, |
| | then the other slabs don't either => need to allocate a new |
| | slab */ |
| | if ((! kslab_cache->slab_list) |
| | || (! list_get_head(kslab_cache->slab_list)->free)) |
| | { |
| | if (cache_grow(kslab_cache, alloc_flags) != SOS_OK) |
| | /* Not enough memory or blocking alloc */ |
| | ALLOC_RET( (sos_vaddr_t)NULL); |
| | } |
| | |
| | /* Here: we are sure that list_get_head(kslab_cache->slab_list) |
| | exists *AND* that list_get_head(kslab_cache->slab_list)->free is |
| | NOT NULL */ |
| | slab_head = list_get_head(kslab_cache->slab_list); |
| | SOS_ASSERT_FATAL(slab_head != NULL); |
| | |
| | /* Allocate the object at the head of the slab at the head of the |
| | slabs' list */ |
| | obj_vaddr = (sos_vaddr_t)list_pop_head(slab_head->free); |
| | slab_head->nb_free --; |
| | kslab_cache->nb_free_objects --; |
| | |
| | /* If needed, reset object's contents */ |
| | if (kslab_cache->flags & SOS_KSLAB_CREATE_ZERO) |
| | memset((void*)obj_vaddr, 0x0, kslab_cache->alloc_obj_size); |
| | |
| | /* Slab is now full ? */ |
| | if (slab_head->free == NULL) |
| | { |
| | /* Transfer it at the tail of the slabs' list */ |
| | struct sos_kslab *slab; |
| | slab = list_pop_head(kslab_cache->slab_list); |
| | list_add_tail(kslab_cache->slab_list, slab); |
| | } |
| | |
| | /* |
| | * For caches that require a minimum amount of free objects left, |
| | * allocate a slab if needed. |
| | * |
| | * Notice the "== min_objects - 1": we did not write " < |
| | * min_objects" because for the cache of kmem structure, this would |
| | * lead to an chicken-and-egg problem, since cache_grow below would |
| | * call cache_alloc again for the kmem_vmm cache, so we return here |
| | * with the same cache. If the test were " < min_objects", then we |
| | * would call cache_grow again for the kmem_vmm cache again and |
| | * again... until we reach the bottom of our stack (infinite |
| | * recursion). By telling precisely "==", then the cache_grow would |
| | * only be called the first time. |
| | */ |
| | if ((kslab_cache->min_free_objects > 0) |
| | && (kslab_cache->nb_free_objects == (kslab_cache->min_free_objects - 1))) |
| | { |
| | /* No: allocate a new slab now */ |
| | if (cache_grow(kslab_cache, alloc_flags) != SOS_OK) |
| | { |
| | /* Not enough free memory or blocking alloc => undo the |
| | allocation */ |
| | sos_kmem_cache_free(obj_vaddr); |
| | ALLOC_RET( (sos_vaddr_t)NULL); |
| | } |
| | } |
| | |
| | ALLOC_RET(obj_vaddr); |
| | } |
| | |
| | |
| | /** |
| | * Helper function to free the object located at the given address. |
| | * |
| | * @param empty_slab is the address of the slab to release, if removing |
| | * the object causes the slab to become empty. |
| | */ |
| | inline static |
| | sos_ret_t |
| | free_object(sos_vaddr_t vaddr, |
| | struct sos_kslab ** empty_slab) |
| | { |
| | struct sos_kslab_cache *kslab_cache; |
| | |
| | /* Lookup the slab containing the object in the slabs' list */ |
| | struct sos_kslab *slab = sos_kmem_vmm_resolve_slab(vaddr); |
| | |
| | /* By default, consider that the slab will not become empty */ |
| | *empty_slab = NULL; |
| | |
| | /* Did not find the slab */ |
| | if (! slab) |
| | return -SOS_EINVAL; |
| | |
| | SOS_ASSERT_FATAL(slab->cache); |
| | kslab_cache = slab->cache; |
| | |
| | /* |
| | * Check whether the address really could mark the start of an actual |
| | * allocated object |
| | */ |
| | /* Address multiple of an object's size ? */ |
| | if (( (vaddr - slab->first_object) |
| | % kslab_cache->alloc_obj_size) != 0) |
| | return -SOS_EINVAL; |
| | /* Address not too large ? */ |
| | if (( (vaddr - slab->first_object) |
| | / kslab_cache->alloc_obj_size) >= kslab_cache->nb_objects_per_slab) |
| | return -SOS_EINVAL; |
| | |
| | /* |
| | * Ok: we now release the object |
| | */ |
| | |
| | /* Did find a full slab => will not be full any more => move it |
| | to the head of the slabs' list */ |
| | if (! slab->free) |
| | { |
| | list_delete(kslab_cache->slab_list, slab); |
| | list_add_head(kslab_cache->slab_list, slab); |
| | } |
| | |
| | /* Release the object */ |
| | list_add_head(slab->free, (struct sos_kslab_free_object*)vaddr); |
| | slab->nb_free++; |
| | kslab_cache->nb_free_objects++; |
| | SOS_ASSERT_FATAL(slab->nb_free <= slab->cache->nb_objects_per_slab); |
| | |
| | /* Cause the slab to be released if it becomes empty, and if we are |
| | allowed to do it */ |
| | if ((slab->nb_free >= kslab_cache->nb_objects_per_slab) |
| | && (kslab_cache->nb_free_objects - slab->nb_free |
| | >= kslab_cache->min_free_objects)) |
| | { |
| | *empty_slab = slab; |
| | } |
| | |
| | return SOS_OK; |
| | } |
| | |
| | |
| | sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr) |
| | { |
| | sos_ret_t retval; |
| | struct sos_kslab *empty_slab; |
| | |
| | /* Remove the object from the slab */ |
| | retval = free_object(vaddr, & empty_slab); |
| | if (retval != SOS_OK) |
| | return retval; |
| | |
| | /* Remove the slab and the underlying range if needed */ |
| | if (empty_slab != NULL) |
| | return cache_release_slab(empty_slab, TRUE); |
| | |
| | return SOS_OK; |
| | } |
| | |
| | |
| | struct sos_kmem_range * |
| | sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range) |
| | { |
| | sos_ret_t retval; |
| | struct sos_kslab *empty_slab; |
| | |
| | /* Remove the object from the slab */ |
| | retval = free_object((sos_vaddr_t)the_range, & empty_slab); |
| | if (retval != SOS_OK) |
| | return NULL; |
| | |
| | /* Remove the slab BUT NOT the underlying range if needed */ |
| | if (empty_slab != NULL) |
| | { |
| | struct sos_kmem_range *empty_range = empty_slab->range; |
| | SOS_ASSERT_FATAL(cache_release_slab(empty_slab, FALSE) == SOS_OK); |
| | SOS_ASSERT_FATAL(empty_range != NULL); |
| | return empty_range; |
| | } |
| | |
| | return NULL; |
| | } |
| | |
| | |
| ../sos-code-article4/sos/kmem_slab.h (1970-01-01 01:00:00.000000000 +0100
) |
|
| ../sos-code-article5/sos/kmem_slab.h (2004-12-18 21:12:14.000000000 +0100
) |
|
|
|
|
| | /* Copyright (C) 2000 Thomas Petazzoni |
| | Copyright (C) 2004 David Decotigny |
| | |
| | This program is free software; you can redistribute it and/or |
| | modify it under the terms of the GNU General Public License |
| | as published by the Free Software Foundation; either version 2 |
| | of the License, or (at your option) any later version. |
| | |
| | This program is distributed in the hope that it will be useful, |
| | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| | GNU General Public License for more details. |
| | |
| | You should have received a copy of the GNU General Public License |
| | along with this program; if not, write to the Free Software |
| | Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
| | USA. |
| | */ |
| | #ifndef _SOS_KMEM_SLAB_H_ |
| | #define _SOS_KMEM_SLAB_H_ |
| | |
| | /** |
| | * @file kmem_slab.h |
| | * |
| | * Kernel Memory Allocator based on Bonwick's slab llocator (Solaris |
| | * 2.4, Linux 2.4). This allocator achieves good memory utilization |
| | * ratio (memory effectively used / memory requested) ie limited |
| | * fragmentation, while elegantly handling cache-effect considerations |
| | * (TLB locality through the notion of "cache" of slabs, and the |
| | * dcache utilization through the notion of cache colouring to |
| | * decrease the conflicts in the dcache for accesses to different data |
| | * in the same cache). |
| | * |
| | * This allocator relies on the range allocator (kmem_vmm.h) to |
| | * allocate the slabs, which itself relies on the slab allocator to |
| | * allocate its "range" data structures, thus leading to a |
| | * chicken-and-egg problem. We solve this problem by introducing the |
| | * notion of "min_free_objs" for the slab caches, in order for the cache |
| | * of ranges to always have enough ranges in reserve to complete the |
| | * range allocation before being urged to allocate a new slab of |
| | * ranges, which would require the allocation of a new range. |
| | * |
| | * Compared to Bonwick's recommendations, we don't handle ctor/dtor |
| | * routines on the objects, so that we can alter the objects once they |
| | * are set free. Thus, the list of free object is stored in the free |
| | * objects themselves, not alongside the objects (this also implies that |
| | * the SOS_KSLAB_CREATE_MAP flag below is meaningless). We also don't |
| | * implement the cache colouring (trivial to add, but we omit it for |
| | * readability reasons), and the only alignment constraint we respect |
| | * is that allocated objects are aligned on a 4B boundary: for other |
| | * alignment constraints, the user must integrate them in the |
| | * "object_size" parameter to "sos_kmem_cache_create()". |
| | * |
| | * References : |
| | * - J. Bonwick's paper, "The slab allocator: An object-caching kernel |
| | * memory allocator", In USENIX Summer 1994 Technical Conference |
| | * - The bible, aka "Unix internals : the new frontiers" (section |
| | * 12.10), Uresh Vahalia, Prentice Hall 1996, ISBN 0131019082 |
| | * - "The Linux slab allocator", B. Fitzgibbons, |
| | * http://www.cc.gatech.edu/people/home/bradf/cs7001/proj2/ |
| | * - The Kos, http://kos.enix.org/ |
| | */ |
| | #include <sos/types.h> |
| | #include <sos/errno.h> |
| | |
| | /** Opaque data structure that defines a Cache of slabs */ |
| | struct sos_kslab_cache; |
| | |
| | /** Opaque data structure that defines a slab. Exported only to |
| | kmem_vmm.h */ |
| | struct sos_kslab; |
| | |
| | #include "kmem_vmm.h" |
| | |
| | |
| | /** The maximum allowed pages for each slab */ |
| | #define MAX_PAGES_PER_SLAB 32 /* 128 kB */ |
| | |
| | |
| | /** |
| | * Initialize the slab cache of slab caches, and prepare the cache of |
| | * kmem_range for kmem_vmm. |
| | * |
| | * @param kernel_core_base The virtual address of the first byte used |
| | * by the kernel code/data |
| | * |
| | * @param kernel_core_top The virtual address of the first byte after |
| | * the kernel code/data. |
| | * |
| | * @param sizeof_struct_range the size of the objects (aka "struct |
| | * sos_kmem_vmm_ranges") to be allocated in the cache of ranges |
| | * |
| | * @param first_struct_slab_of_caches (output value) the virtual |
| | * address of the first slab structure that gets allocated for the |
| | * cache of caches. The function actually manually allocate the first |
| | * slab of the cache of caches because of a chicken-and-egg thing. The |
| | * address of the slab is used by the kmem_vmm_setup routine to |
| | * finalize the allocation of the slab, in order for it to behave like |
| | * a real slab afterwards. |
| | * |
| | * @param first_slab_of_caches_base (output value) the virtual address |
| | * of the slab associated to the slab structure. |
| | * |
| | * @param first_slab_of_caches_nb_pages (output value) the number of |
| | * (virtual) pages used by the first slab of the cache of caches. |
| | * |
| | * @param first_struct_slab_of_ranges (output value) the virtual address |
| | * of the first slab that gets allocated for the cache of ranges. Same |
| | * explanation as above. |
| | * |
| | * @param first_slab_of_ranges_base (output value) the virtual address |
| | * of the slab associated to the slab structure. |
| | * |
| | * @param first_slab_of_ranges_nb_pages (output value) the number of |
| | * (virtual) pages used by the first slab of the cache of ranges. |
| | * |
| | * @return the cache of kmem_range immediatly usable |
| | */ |
| | struct sos_kslab_cache * |
| | sos_kmem_cache_setup_prepare(sos_vaddr_t kernel_core_base, |
| | sos_vaddr_t kernel_core_top, |
| | sos_size_t sizeof_struct_range, |
| | /* results */ |
| | struct sos_kslab **first_struct_slab_of_caches, |
| | sos_vaddr_t *first_slab_of_caches_base, |
| | sos_count_t *first_slab_of_caches_nb_pages, |
| | struct sos_kslab **first_struct_slab_of_ranges, |
| | sos_vaddr_t *first_slab_of_ranges_base, |
| | sos_count_t *first_slab_of_ranges_nb_pages); |
| | |
| | /** |
| | * Update the configuration of the cache subsystem once the vmm |
| | * subsystem has been fully initialized |
| | */ |
| | sos_ret_t |
| | sos_kmem_cache_setup_commit(struct sos_kslab *first_struct_slab_of_caches, |
| | struct sos_kmem_range *first_range_of_caches, |
| | struct sos_kslab *first_struct_slab_of_ranges, |
| | struct sos_kmem_range *first_range_of_ranges); |
| | |
| | |
| | /* |
| | * Flags for sos_kmem_cache_create() |
| | */ |
| | /** The slabs should be initially mapped in physical memory */ |
| | #define SOS_KSLAB_CREATE_MAP (1<<0) |
| | /** The object should always be set to zero at allocation (implies |
| | SOS_KSLAB_CREATE_MAP) */ |
| | #define SOS_KSLAB_CREATE_ZERO (1<<1) |
| | |
| | /** |
| | * @note this function MAY block (involved allocations are not atomic) |
| | * @param name must remain valid during the whole cache's life |
| | * (shallow copy) ! |
| | * @param cache_flags An or-ed combination of the SOS_KSLAB_CREATE_* flags |
| | */ |
| | struct sos_kslab_cache * |
| | sos_kmem_cache_create(const char* name, |
| | sos_size_t object_size, |
| | sos_count_t pages_per_slab, |
| | sos_count_t min_free_objects, |
| | sos_ui32_t cache_flags); |
| | |
| | sos_ret_t sos_kmem_cache_destroy(struct sos_kslab_cache *kslab_cache); |
| | |
| | |
| | /* |
| | * Flags for sos_kmem_cache_alloc() |
| | */ |
| | /** Allocation should either succeed or fail, without blocking */ |
| | #define SOS_KSLAB_ALLOC_ATOMIC (1<<0) |
| | |
| | /** |
| | * Allocate an object from the given cache. |
| | * |
| | * @param alloc_flags An or-ed combination of the SOS_KSLAB_ALLOC_* flags |
| | */ |
| | sos_vaddr_t sos_kmem_cache_alloc(struct sos_kslab_cache *kslab_cache, |
| | sos_ui32_t alloc_flags); |
| | |
| | |
| | /** |
| | * Free an object (assumed to be already allocated and not already |
| | * free) at the given virtual address. |
| | */ |
| | sos_ret_t sos_kmem_cache_free(sos_vaddr_t vaddr); |
| | |
| | |
| | /* |
| | * Function reserved to kmem_vmm.c. Does almost everything |
| | * sos_kmem_cache_free() does, except it does not call |
| | * sos_kmem_vmm_del_range() if it needs to. This is aimed at avoiding |
| | * large recursion when a range is freed with |
| | * sos_kmem_vmm_del_range(). |
| | * |
| | * @param the_range The range structure to free |
| | * |
| | * @return NULL when the range containing 'the_range' still contains |
| | * other ranges, or the address of the range which owned 'the_range' |
| | * if it becomes empty. |
| | */ |
| | struct sos_kmem_range * |
| | sos_kmem_cache_release_struct_range(struct sos_kmem_range *the_range); |
| | |
| | |
| | #endif /* _SOS_KMEM_SLAB_H_ */ |
| | |
| ../sos-code-article4/sos/kmem_vmm.c (1970-01-01 01:00:00.000000000 +0100
) |
|
| ../sos-code-article5/sos/kmem_vmm.c (2004-12-18 21:12:14.000000000 +0100
) |
|
|
|
|
| | /* Copyright (C) 2000 Thomas Petazzoni |
| | Copyright (C) 2004 David Decotigny |
| | |
| | This program is free software; you can redistribute it and/or |
| | modify it under the terms of the GNU General Public License |
| | as published by the Free Software Foundation; either version 2 |
| | of the License, or (at your option) any later version. |
| | |
| | This program is distributed in the hope that it will be useful, |
| | but WITHOUT ANY WARRANTY; without even the implied warranty of |
| | MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| | GNU General Public License for more details. |
| | |
| | You should have received a copy of the GNU General Public License |
| | along with this program; if not, write to the Free Software |
| | Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, |
| | USA. |
| | */ |
| | |
| | #include <sos/list.h> |
| | #include <sos/physmem.h> |
| | #include <hwcore/paging.h> |
| | #include <sos/assert.h> |
| | |
| | #include "kmem_vmm.h" |
| | |
| | /** The structure of a range of kernel-space virtual addresses */ |
| | struct sos_kmem_range |
| | { |
| | sos_vaddr_t base_vaddr; |
| | sos_count_t nb_pages; |
| | |
| | /* The slab owning this range, or NULL */ |
| | struct sos_kslab *slab; |
| | |
| | struct sos_kmem_range *prev, *next; |
| | }; |
| | const int sizeof_struct_sos_kmem_range = sizeof(struct sos_kmem_range); |
| | |
| | /** The ranges are SORTED in (strictly) ascending base addresses */ |
| | static struct sos_kmem_range *kmem_free_range_list, *kmem_used_range_list; |
| | |
| | /** The slab cache for the kmem ranges */ |
| | static struct sos_kslab_cache *kmem_range_cache; |
| | |
| | |
| | |
| | /** Helper function to get the closest preceding or containing |
| | range for the given virtual address */ |
| | static struct sos_kmem_range * |
| | get_closest_preceding_kmem_range(struct sos_kmem_range *the_list, |
| | sos_vaddr_t vaddr) |
| | { |
| | int nb_elements; |
| | struct sos_kmem_range *a_range, *ret_range; |
| | |
| | /* kmem_range list is kept SORTED, so we exit as soon as vaddr >= a |
| | range base address */ |
| | ret_range = NULL; |
| | list_foreach(the_list, a_range, nb_elements) |
| | { |
| | if (vaddr < a_range->base_vaddr) |
| | return ret_range; |
| | ret_range = a_range; |
| | } |
| | |
| | /* This will always be the LAST range in the kmem area */ |
| | return ret_range; |
| | } |
| | |
| | |
| | /** |
| | * Helper function to lookup a free range large enough to hold nb_pages |
| | * pages (first fit) |
| | */ |
| | static struct sos_kmem_range *find_suitable_free_range(sos_count_t nb_pages) |
| | { |
| | int nb_elements; |
| | struct sos_kmem_range *r; |
| | |
| | list_foreach(kmem_free_range_list, r, nb_elements) |
| | { |
| | if (r->nb_pages >= nb_pages) |
| | return r; |
| | } |
| | |
| | return NULL; |
| | } |
| | |
| | |
| | /** |
| | * Helper function to add a_range in the_list, in strictly ascending order. |
| | * |
| | * @return The (possibly) new head of the_list |
| | */ |
| | static struct sos_kmem_range *insert_range(struct sos_kmem_range *the_list, |
| | struct sos_kmem_range *a_range) |
| | { |
| | struct sos_kmem_range *prec_used; |
| | |
| | /** Look for any preceding range */ |
| | prec_used = get_closest_preceding_kmem_range(the_list, |
| | a_range->base_vaddr); |
| | /** insert a_range /after/ this prec_used */ |
| | if (prec_used != NULL) |
| | list_insert_after(the_list, prec_used, a_range); |
| | else /* Insert at the beginning of the list */ |
| | list_add_head(the_list, a_range); |
| | |
| | return the_list; |
| | } |
| | |
| | |
| | /** |
| | * Helper function to retrieve the range owning the given vaddr, by |
| | * scanning the physical memory first if vaddr is mapped in RAM |
| | */ |
| | static struct sos_kmem_range *lookup_range(sos_vaddr_t vaddr) |
| | { |
| | struct sos_kmem_range *range; |
| | |
| | /* First: try to retrieve the physical page mapped at this address */ |
| | sos_paddr_t ppage_paddr = SOS_PAGE_ALIGN_INF(sos_paging_get_paddr(vaddr)); |
| | if (ppage_paddr) |
| | { |
| | range = sos_physmem_get_kmem_range(ppage_paddr); |
| | |
| | /* If a page is mapped at this address, it is EXPECTED that it |
| | is really associated with a range */ |
| | SOS_ASSERT_FATAL(range != NULL); |
| | } |
| | |
| | /* Otherwise scan the list of used ranges, looking for the range |
| | owning the address */ |
| | else |
| | { |
| | range = get_closest_preceding_kmem_range(kmem_used_range_list, |
| | vaddr); |
| | /* Not found */ |
| | if (! range) |
| | return NULL; |
| | |
| | /* vaddr not covered by this range */ |
| | if ( (vaddr < range->base_vaddr) |
| | || (vaddr >= (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE)) ) |
| | return NULL; |
| | } |
| | |
| | return range; |
| | } |
| | |
| | |
| | /** |
| | * Helper function for sos_kmem_vmm_setup() to initialize a new range |
| | * that maps a given area as free or as already used. |
| | * This function either succeeds or halts the whole system. |
| | */ |
| | static struct sos_kmem_range * |
| | create_range(sos_bool_t is_free, |
| | sos_vaddr_t base_vaddr, |
| | sos_vaddr_t top_vaddr, |
| | struct sos_kslab *associated_slab) |
| | { |
| | struct sos_kmem_range *range; |
| | |
| | SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(base_vaddr)); |
| | SOS_ASSERT_FATAL(SOS_IS_PAGE_ALIGNED(top_vaddr)); |
| | |
| | if ((top_vaddr - base_vaddr) < SOS_PAGE_SIZE) |
| | return NULL; |
| | |
| | range = (struct sos_kmem_range*)sos_kmem_cache_alloc(kmem_range_cache, |
| | SOS_KSLAB_ALLOC_ATOMIC); |
| | SOS_ASSERT_FATAL(range != NULL); |
| | |
| | range->base_vaddr = base_vaddr; |
| | range->nb_pages = (top_vaddr - base_vaddr) / SOS_PAGE_SIZE; |
| | |
| | if (is_free) |
| | { |
| | list_add_tail(kmem_free_range_list, |
| | range); |
| | } |
| | else |
| | { |
| | sos_vaddr_t vaddr; |
| | range->slab = associated_slab; |
| | list_add_tail(kmem_used_range_list, |
| | range); |
| | |
| | /* Ok, set the range owner for the pages in this page */ |
| | for (vaddr = base_vaddr ; |
| | vaddr < top_vaddr ; |
| | vaddr += SOS_PAGE_SIZE) |
| | { |
| | sos_paddr_t ppage_paddr = sos_paging_get_paddr(vaddr); |
| | SOS_ASSERT_FATAL((void*)ppage_paddr != NULL); |
| | sos_physmem_set_kmem_range(ppage_paddr, range); |
| | } |
| | } |
| | |
| | return range; |
| | } |
| | |
| | |
| | sos_ret_t sos_kmem_vmm_setup(sos_vaddr_t kernel_core_base, |
| | sos_vaddr_t kernel_core_top, |
| | sos_vaddr_t bootstrap_stack_bottom_vaddr, |
| | sos_vaddr_t bootstrap_stack_top_vaddr) |
| | { |
| | struct sos_kslab *first_struct_slab_of_caches, |
| | *first_struct_slab_of_ranges; |
| | sos_vaddr_t first_slab_of_caches_base, |
| | first_slab_of_caches_nb_pages, |
| | first_slab_of_ranges_base, |
| | first_slab_of_ranges_nb_pages; |
| | struct sos_kmem_range *first_range_of_caches, |
| | *first_range_of_ranges; |
| | |
| | list_init(kmem_free_range_list); |
| | list_init(kmem_used_range_list); |
| | |
| | kmem_range_cache |
| | = sos_kmem_cache_setup_prepare(kernel_core_base, |
| | kernel_core_top, |
| | sizeof(struct sos_kmem_range), |
| | & first_struct_slab_of_caches, |
| | & first_slab_of_caches_base, |
| | & first_slab_of_caches_nb_pages, |
| | & first_struct_slab_of_ranges, |
| | & first_slab_of_ranges_base, |
| | & first_slab_of_ranges_nb_pages); |
| | SOS_ASSERT_FATAL(kmem_range_cache != NULL); |
| | |
| | /* Mark virtual addresses 16kB - Video as FREE */ |
| | create_range(TRUE, |
| | SOS_KMEM_VMM_BASE, |
| | SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START), |
| | NULL); |
| | |
| | /* Mark virtual addresses in Video hardware mapping as NOT FREE */ |
| | create_range(FALSE, |
| | SOS_PAGE_ALIGN_INF(BIOS_N_VIDEO_START), |
| | SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END), |
| | NULL); |
| | |
| | /* Mark virtual addresses Video - Kernel as FREE */ |
| | create_range(TRUE, |
| | SOS_PAGE_ALIGN_SUP(BIOS_N_VIDEO_END), |
| | SOS_PAGE_ALIGN_INF(kernel_core_base), |
| | NULL); |
| | |
| | /* Mark virtual addresses in Kernel code/data up to the bootstrap stack |
| | as NOT FREE */ |
| | create_range(FALSE, |
| | SOS_PAGE_ALIGN_INF(kernel_core_base), |
| | bootstrap_stack_bottom_vaddr, |
| | NULL); |
| | |
| | /* Mark virtual addresses in the bootstrap stack as NOT FREE too, |
| | but in another vmm region in order to be un-allocated later */ |
| | create_range(FALSE, |
| | bootstrap_stack_bottom_vaddr, |
| | bootstrap_stack_top_vaddr, |
| | NULL); |
| | |
| | /* Mark the remaining virtual addresses in Kernel code/data after |
| | the bootstrap stack as NOT FREE */ |
| | create_range(FALSE, |
| | bootstrap_stack_top_vaddr, |
| | SOS_PAGE_ALIGN_SUP(kernel_core_top), |
| | NULL); |
| | |
| | /* Mark virtual addresses in the first slab of the cache of caches |
| | as NOT FREE */ |
| | SOS_ASSERT_FATAL(SOS_PAGE_ALIGN_SUP(kernel_core_top) |
| | == first_slab_of_caches_base); |
| | SOS_ASSERT_FATAL(first_struct_slab_of_caches != NULL); |
| | first_range_of_caches |
| | = create_range(FALSE, |
| | first_slab_of_caches_base, |
| | first_slab_of_caches_base |
| | + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE, |
| | first_struct_slab_of_caches); |
| | |
| | /* Mark virtual addresses in the first slab of the cache of ranges |
| | as NOT FREE */ |
| | SOS_ASSERT_FATAL((first_slab_of_caches_base |
| | + first_slab_of_caches_nb_pages*SOS_PAGE_SIZE) |
| | == first_slab_of_ranges_base); |
| | SOS_ASSERT_FATAL(first_struct_slab_of_ranges != NULL); |
| | first_range_of_ranges |
| | = create_range(FALSE, |
| | first_slab_of_ranges_base, |
| | first_slab_of_ranges_base |
| | + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE, |
| | first_struct_slab_of_ranges); |
| | |
| | /* Mark virtual addresses after these slabs as FREE */ |
| | create_range(TRUE, |
| | first_slab_of_ranges_base |
| | + first_slab_of_ranges_nb_pages*SOS_PAGE_SIZE, |
| | SOS_KMEM_VMM_TOP, |
| | NULL); |
| | |
| | /* Update the cache subsystem so that the artificially-created |
| | caches of caches and ranges really behave like *normal* caches (ie |
| | those allocated by the normal slab API) */ |
| | sos_kmem_cache_setup_commit(first_struct_slab_of_caches, |
| | first_range_of_caches, |
| | first_struct_slab_of_ranges, |
| | first_range_of_ranges); |
| | |
| | return SOS_OK; |
| | } |
| | |
| | |
| | /** |
| | * Allocate a new kernel area spanning one or multiple pages. |
| | * |
| | * @eturn a new range structure |
| | */ |
| | struct sos_kmem_range *sos_kmem_vmm_new_range(sos_count_t nb_pages, |
| | sos_ui32_t flags, |
| | sos_vaddr_t * range_start) |
| | { |
| | struct sos_kmem_range *free_range, *new_range; |
| | |
| | if (nb_pages <= 0) |
| | return NULL; |
| | |
| | /* Find a suitable free range to hold the size-sized object */ |
| | free_range = find_suitable_free_range(nb_pages); |
| | if (free_range == NULL) |
| | return NULL; |
| | |
| | /* If range has exactly the requested size, just move it to the |
| | "used" list */ |
| | if(free_range->nb_pages == nb_pages) |
| | { |
| | list_delete(kmem_free_range_list, free_range); |
| | kmem_used_range_list = insert_range(kmem_used_range_list, |
| | free_range); |
| | /* The new_range is exactly the free_range */ |
| | new_range = free_range; |
| | } |
| | |
| | /* Otherwise the range is bigger than the requested size, split it. |
| | This involves reducing its size, and allocate a new range, which |
| | is going to be added to the "used" list */ |
| | else |
| | { |
| | /* free_range split in { new_range | free_range } */ |
| | new_range = (struct sos_kmem_range*) |
| | sos_kmem_cache_alloc(kmem_range_cache, |
| | (flags & SOS_KMEM_VMM_ATOMIC)? |
| | SOS_KSLAB_ALLOC_ATOMIC:0); |
| | if (! new_range) |
| | return NULL; |
| | |
| | new_range->base_vaddr = free_range->base_vaddr; |
| | new_range->nb_pages = nb_pages; |
| | free_range->base_vaddr += nb_pages*SOS_PAGE_SIZE; |
| | free_range->nb_pages -= nb_pages; |
| | |
| | /* free_range is still at the same place in the list */ |
| | /* insert new_range in the used list */ |
| | kmem_used_range_list = insert_range(kmem_used_range_list, |
| | new_range); |
| | } |
| | |
| | /* By default, the range is not associated with any slab */ |
| | new_range->slab = NULL; |
| | |
| | /* If mapping of physical pages is needed, map them now */ |
| | if (flags & SOS_KMEM_VMM_MAP) |
| | { |
| | int i; |
| | for (i = 0 ; i < nb_pages ; i ++) |
| | { |
| | /* Get a new physical page */ |
| | sos_paddr_t ppage_paddr |
| | = sos_physmem_ref_physpage_new(! (flags & SOS_KMEM_VMM_ATOMIC)); |
| | |
| | /* Map the page in kernel space */ |
| | if (ppage_paddr) |
| | { |
| | if (sos_paging_map(ppage_paddr, |
| | new_range->base_vaddr |
| | + i * SOS_PAGE_SIZE, |
| | FALSE /* Not a user page */, |
| | ((flags & SOS_KMEM_VMM_ATOMIC)? |
| | SOS_VM_MAP_ATOMIC:0) |
| | | SOS_VM_MAP_PROT_READ |
| | | SOS_VM_MAP_PROT_WRITE)) |
| | { |
| | /* Failed => force unallocation, see below */ |
| | sos_physmem_unref_physpage(ppage_paddr); |
| | ppage_paddr = (sos_paddr_t)NULL; |
| | } |
| | else |
| | { |
| | /* Success : page can be unreferenced since it is |
| | now mapped */ |
| | sos_physmem_unref_physpage(ppage_paddr); |
| | } |
| | } |
| | |
| | /* Undo the allocation if failed to allocate or map a new page */ |
| | if (! ppage_paddr) |
| | { |
| | sos_kmem_vmm_del_range(new_range); |
| | return NULL; |
| | } |
| | |
| | /* Ok, set the range owner for this page */ |
| | sos_physmem_set_kmem_range(ppage_paddr, new_range); |
| | } |
| | } |
| | |
| | /* Otherwise we need a correct page fault handler to support |
| | deferred mapping (aka demand paging) of ranges */ |
| | else |
| | SOS_ASSERT_FATAL(! "No demand paging yet"); |
| | |
| | if (range_start) |
| | *range_start = new_range->base_vaddr; |
| | |
| | return new_range; |
| | } |
| | |
| | |
| | sos_ret_t sos_kmem_vmm_del_range(struct sos_kmem_range *range) |
| | { |
| | int i; |
| | struct sos_kmem_range *ranges_to_free; |
| | list_init(ranges_to_free); |
| | |
| | SOS_ASSERT_FATAL(range != NULL); |
| | SOS_ASSERT_FATAL(range->slab == NULL); |
| | |
| | /* Remove the range from the 'USED' list now */ |
| | list_delete(kmem_used_range_list, range); |
| | |
| | /* |
| | * The following do..while() loop is here to avoid an indirect |
| | * recursion: if we call directly kmem_cache_free() from inside the |
| | * current function, we take the risk to re-enter the current function |
| | * (sos_kmem_vmm_del_range()) again, which may cause problem if it |
| | * in turn calls kmem_slab again and sos_kmem_vmm_del_range again, |
| | * and again and again. This may happen while freeing ranges of |
| | * struct sos_kslab... |
| | * |
| | * To avoid this,we choose to call a special function of kmem_slab |
| | * doing almost the same as sos_kmem_cache_free(), but which does |
| | * NOT call us (ie sos_kmem_vmm_del_range()): instead WE add the |
| | * range that is to be freed to a list, and the do..while() loop is |
| | * here to process this list ! The recursion is replaced by |
| | * classical iterations. |
| | */ |
| | do |
| | { |
| | /* Ok, we got the range. Now, insert this range in the free list */ |
| | kmem_free_range_list = insert_range(kmem_free_range_list, range); |
| | |
| | /* Unmap the physical pages */ |
| | for (i = 0 ; i < range->nb_pages ; i ++) |
| | { |
| | /* This will work even if no page is mapped at this address */ |
| | sos_paging_unmap(range->base_vaddr + i*SOS_PAGE_SIZE); |
| | } |
| | |
| | /* Eventually coalesce it with prev/next free ranges (there is |
| | always a valid prev/next link since the list is circular). Note: |
| | the tests below will lead to correct behaviour even if the list |
| | is limited to the 'range' singleton, at least as long as the |
| | range is not zero-sized */ |
| | /* Merge with preceding one ? */ |
| | if (range->prev->base_vaddr + range->prev->nb_pages*SOS_PAGE_SIZE |
| | == range->base_vaddr) |
| | { |
| | struct sos_kmem_range *empty_range_of_ranges = NULL; |
| | struct sos_kmem_range *prec_free = range->prev; |
| | |
| | /* Merge them */ |
| | prec_free->nb_pages += range->nb_pages; |
| | list_delete(kmem_free_range_list, range); |
| | |
| | /* Mark the range as free. This may cause the slab owning |
| | the range to become empty */ |
| | empty_range_of_ranges = |
| | sos_kmem_cache_release_struct_range(range); |
| | |
| | /* If this causes the slab owning the range to become empty, |
| | add the range corresponding to the slab at the end of the |
| | list of the ranges to be freed: it will be actually freed |
| | in one of the next iterations of the do{} loop. */ |
| | if (empty_range_of_ranges != NULL) |
| | { |
| | list_delete(kmem_used_range_list, empty_range_of_ranges); |
| | list_add_tail(ranges_to_free, empty_range_of_ranges); |
| | } |
| | |
| | /* Set range to the beginning of this coelescion */ |
| | range = prec_free; |
| | } |
| | |
| | /* Merge with next one ? [NO 'else' since range may be the result of |
| | the merge above] */ |
| | if (range->base_vaddr + range->nb_pages*SOS_PAGE_SIZE |
| | == range->next->base_vaddr) |
| | { |
| | struct sos_kmem_range *empty_range_of_ranges = NULL; |
| | struct sos_kmem_range *next_range = range->next; |
| | |
| | /* Merge them */ |
| | range->nb_pages += next_range->nb_pages; |
| | list_delete(kmem_free_range_list, next_range); |
| | |
| | /* Mark the next_range as free. This may cause the slab |
| | owning the next_range to become empty */ |
| | empty_range_of_ranges = |
| | sos_kmem_cache_release_struct_range(next_range); |
| | |
| | /* If this causes the slab owning the next_range to become |
| | empty, add the range corresponding to the slab at the end |
| | of the list of the ranges to be freed: it will be |
| | actually freed in one of the next iterations of the |
| | do{} loop. */ |
| | if (empty_range_of_ranges != NULL) |
| | { |
| | list_delete(kmem_used_range_list, empty_range_of_ranges); |
| | list_add_tail(ranges_to_free, empty_range_of_ranges); |
| | } |
| | } |
| | |
| | |
| | /* If deleting the range(s) caused one or more range(s) to be |
| | freed, get the next one to free */ |
| | if (list_is_empty(ranges_to_free)) |
| | range = NULL; /* No range left to free */ |
| | else |
| | range = list_pop_head(ranges_to_free); |
| | |
| | } |
| | /* Stop when there is no range left to be freed for now */ |
| | while (range != NULL); |
| | |
| | return SOS_OK; |
| | } |
| | |
| | |
| | sos_vaddr_t sos_kmem_vmm_alloc(sos_count_t nb_pages, |
| | sos_ui32_t flags) |
| | { |
| | struct sos_kmem_range *range |
| | = sos_kmem_vmm_new_range(nb_pages, |
| | flags, |
| | NULL); |
| | if (! range) |
| | return (sos_vaddr_t)NULL; |
| | |
| | return range->base_vaddr; |
| | } |
| | |
| | |
| | sos_ret_t sos_kmem_vmm_free(sos_vaddr_t vaddr) |
| | { |
| | struct sos_kmem_range *range = lookup_range(vaddr); |
| | |
| | /* We expect that the given address is the base address of the |
| | range */ |
| | if (!range || (range->base_vaddr != vaddr)) |
| | return -SOS_EINVAL; |
| | |
| | /* We expect that this range is not held by any cache */ |
| | if (range->slab != NULL) |
| | return -SOS_EBUSY; |
| | |
| | return sos_kmem_vmm_del_range(range); |
| | } |
| | |
| | |
| | sos_ret_t sos_kmem_vmm_set_slab(struct sos_kmem_range *range, |
| | struct sos_kslab *slab) |
| | { |
| | if (! range) |
| | return -SOS_EINVAL; |
| | |
| | range->slab = slab; |
| | return SOS_OK; |
| | } |
| | |
| | struct sos_kslab * sos_kmem_vmm_resolve_slab(sos_vaddr_t vaddr) |
| | { |
| | struct sos_kmem_range *range = lookup_range(vaddr); |
| | if (! range) |
| | return NULL; |
| | |
| | return range->slab; |
| | } |
| | |
| | |
| | sos_bool_t sos_kmem_vmm_is_valid_vaddr(sos_vaddr_t vaddr) |
| | { |
| | struct sos_kmem_range *range = lookup_range(vaddr); |
| | return (range != NULL); |
| | } |
| | |
| ../sos-code-article4/sos/main.c (2004-12-18 21:12:11.000000000 +0100
) |
|
| ../sos-code-article5/sos/main.c (2004-12-18 21:12:14.000000000 +0100
) |
|
|
|
|
| #include <sos/list.h> | #include <sos/list.h> |
| #include <sos/physmem.h> | #include <sos/physmem.h> |
| #include <hwcore/paging.h> | #include <hwcore/paging.h> |
| #include <sos/list.h> | #include <sos/kmem_vmm.h> |
| | #include <sos/kmalloc.h> |
| #include <sos/assert.h> | #include <sos/assert.h> |
| #include <drivers/x86_videomem.h> | #include <drivers/x86_videomem.h> |
|
|
|
| clock_count); | clock_count); |
| clock_count++; | clock_count++; |
| } | } |
| | struct digit |
| | { |
| | struct digit *prev, *next; |
| | char value; |
| | }; |
| | |
| | /* Representation of a big (positive) integer: Most Significant Digit |
| | (MSD) is the HEAD of the list. Least Significant Digit (LSD) is the |
| | TAIL of the list */ |
| | typedef struct digit * big_number_t; |
| | |
| | |
| /* Page fault exception handler */ | /* Add a new digit after the LSD */ |
| static void pgflt_ex(int exid) | void bn_push_lsd(big_number_t * bn, char value) |
| sos_bochs_printf("Got page fault\n"); | struct digit *d; |
| sos_x86_videomem_printf(10, 30, | d = (struct digit*) sos_kmalloc(sizeof(struct digit), 0); |
| SOS_X86_VIDEO_FG_LTRED | SOS_X86_VIDEO_BG_BLUE, | SOS_ASSERT_FATAL(d != NULL); |
| "Got EXPECTED (?) Page fault ! But where ???"); | d->value = value; |
| for (;;) ; | list_add_tail(*bn, d); |
| | |
| static void test_paging(sos_vaddr_t sos_kernel_core_top_vaddr) | |
| | /* Add a new digit before the MSD */ |
| | void bn_push_msd(big_number_t * bn, char value) |
| /* The (linear) address of the page holding the code we are | struct digit *d; |
| currently executing */ | d = (struct digit*) sos_kmalloc(sizeof(struct digit), 0); |
| sos_vaddr_t vpage_code = SOS_PAGE_ALIGN_INF(test_paging); | SOS_ASSERT_FATAL(d != NULL); |
| | d->value = value; |
| | list_add_head(*bn, d); |
| | } |
| /* The new physical page that will hold the code */ | |
| sos_paddr_t ppage_new; | |
| /* Where this page will be mapped temporarily in order to copy the | /* Construct a big integer from a (machine) integer */ |
| code into it: right after the kernel code/data */ | big_number_t bn_new(unsigned long int i) |
| sos_vaddr_t vpage_tmp = sos_kernel_core_top_vaddr; | { |
| | big_number_t retval; |
| unsigned i; | list_init(retval); |
| | do |
| | { |
| | bn_push_msd(&retval, i%10); |
| | i /= 10; |
| | } |
| | while (i != 0); |
| /* Bind the page fault exception to one of our routines */ | return retval; |
| sos_exception_set_routine(SOS_EXCEPT_PAGE_FAULT, | } |
| pgflt_ex); | |
| /* | |
| * Test 1: move the page where we execute the code elsewhere in | |
| * physical memory | |
| */ | |
| sos_x86_videomem_printf(4, 0, | |
| SOS_X86_VIDEO_FG_LTGREEN | SOS_X86_VIDEO_BG_BLUE, | |
| "Moving current code elsewhere in physical memory:"); | |
| | /* Create a new big integer from another big integer */ |
| | big_number_t bn_copy(const big_number_t bn) |
| | { |
| | big_number_t retval; |
| | int nb_elts; |
| | struct digit *d; |
| | |
| /* Allocate a new physical page */ | list_init(retval); |
| ppage_new = sos_physmem_ref_physpage_new(FALSE); | list_foreach(bn, d, nb_elts) |
| if (! ppage_new) | |
| /* STOP ! No memory left */ | bn_push_lsd(&retval, d->value); |
| sos_x86_videomem_putstring(20, 0, | |
| SOS_X86_VIDEO_FG_LTRED | |
| | SOS_X86_VIDEO_BG_BLUE, | |
| "test_paging : Cannot allocate page"); | |
| return; | |
| | |
| sos_x86_videomem_printf(5, 0, | return retval; |
| SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, | } |
| "Hello from the address 0x%x in physical memory", | |
| sos_paging_get_paddr(vpage_code)); | |
| sos_x86_videomem_printf(6, 0, | |
| SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, | |
| "Transfer vpage 0x%x: ppage 0x%x -> 0x%x (tmp vpage 0x%x)", | |
| vpage_code, | |
| sos_paging_get_paddr(vpage_code), | |
| ppage_new, | |
| (unsigned)vpage_tmp); | |
| | |
| /* Map the page somewhere (right after the kernel mapping) in order | |
| to copy the code we are currently executing */ | |
| sos_paging_map(ppage_new, vpage_tmp, | |
| FALSE, | |
| SOS_VM_MAP_ATOMIC | |
| | SOS_VM_MAP_PROT_READ | |
| | SOS_VM_MAP_PROT_WRITE); | |
| | |
| /* Ok, the new page is referenced by the mapping, we can release our | |
| reference to it */ | |
| sos_physmem_unref_physpage(ppage_new); | |
| | |
| /* Copy the contents of the current page of code to this new page | |
| mapping */ | |
| memcpy((void*)vpage_tmp, | |
| (void*)vpage_code, | |
| SOS_PAGE_SIZE); | |
| | |
| /* Transfer the mapping of the current page of code to this new page */ | |
| sos_paging_map(ppage_new, vpage_code, | |
| FALSE, | |
| SOS_VM_MAP_ATOMIC | |
| | SOS_VM_MAP_PROT_READ | |
| | SOS_VM_MAP_PROT_WRITE); | |
| | |
| /* Ok, here we are: we have changed the physcal page that holds the | |
| code we are executing ;). However, this new page is mapped at 2 | |
| virtual addresses: | |
| - vpage_tmp | |
| - vpage_code | |
| We can safely unmap it from sos_kernel_core_top_vaddr, while | |
| still keeping the vpage_code mapping */ | |
| sos_paging_unmap(vpage_tmp); | |
| sos_x86_videomem_printf(7, 0, | /* Free the memory used by a big integer */ |
| SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, | void bn_del(big_number_t * bn) |
| "Hello from the address 0x%x in physical memory", | { |
| sos_paging_get_paddr(vpage_code)); | struct digit *d; |
| sos_x86_videomem_printf(9, 0, | list_collapse(*bn, d) |
| SOS_X86_VIDEO_FG_LTGREEN | SOS_X86_VIDEO_BG_BLUE, | { |
| "Provoking a page fault:"); | sos_kfree((sos_vaddr_t)d); |
| | } |
| | } |
| | |
| | |
| | /* Shift left a big integer: bn := bn*10^shift */ |
| | void bn_shift(big_number_t *bn, int shift) |
| | { |
| | for ( ; shift > 0 ; shift --) |
| | { |
| | bn_push_lsd(bn, 0); |
| | } |
| | } |
| | |
| | |
| | /* Dump the big integer in bochs */ |
| | void bn_print_bochs(const big_number_t bn) |
| | { |
| | int nb_elts; |
| | const struct digit *d; |
| | |
| | if (list_is_empty(bn)) |
| | sos_bochs_printf("0"); |
| | else |
| | list_foreach(bn, d, nb_elts) |
| | sos_bochs_printf("%d", d->value); |
| | } |
| | |
| | /* Dump the big integer on the console */ |
| | void bn_print_console(unsigned char row, unsigned char col, |
| | unsigned char attribute, |
| | const big_number_t bn, |
| | int nb_decimals) |
| | { |
| | if (list_is_empty(bn)) |
| | sos_x86_videomem_printf(row, col, attribute, "0"); |
| | else |
| | { |
| | int nb_elts; |
| | const struct digit *d; |
| | unsigned char x = col; |
| | |
| | list_foreach(bn, d, nb_elts) |
| | { |
| | if (nb_elts == 0) |
| | { |
| | sos_x86_videomem_printf(row, x, attribute, "%d.", d->value); |
| | x += 2; |
| | } |
| | else if (nb_elts < nb_decimals) |
| | { |
| | sos_x86_videomem_printf(row, x, attribute, "%d", d->value); |
| | x ++; |
| | } |
| | } |
| | |
| | sos_x86_videomem_printf(row, x, attribute, " . 10^{%d} ", nb_elts-1); |
| | } |
| | } |
| /* | |
| * Test 2: make sure the #PF handler works | |
| */ | |
| /* Scan part of the kernel up to a page fault. This page fault | /* Result is the addition of 2 big integers */ |
| should occur on the first page unmapped after the kernel area, | big_number_t bn_add (const big_number_t bn1, const big_number_t bn2) |
| which is exactly the page we temporarily mapped/unmapped | { |
| (vpage_tmp) above to move the kernel code we are executing */ | big_number_t retval; |
| for (i = vpage_code ; /* none */ ; i += SOS_PAGE_SIZE) | const struct digit *d1, *d2; |
| { | sos_bool_t bn1_end = FALSE, bn2_end = FALSE; |
| unsigned *pint = (unsigned *)SOS_PAGE_ALIGN_INF(i); | char carry = 0; |
| sos_bochs_printf("Test vaddr 0x%x : val=", (unsigned)pint); | |
| sos_x86_videomem_printf(10, 0, | list_init(retval); |
| SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, | d1 = list_get_tail(bn1); |
| "Test vaddr 0x%x : val= ", | bn1_end = list_is_empty(bn1); |
| (unsigned)pint); | d2 = list_get_tail(bn2); |
| sos_bochs_printf("0x%x\n", *pint); | bn2_end = list_is_empty(bn2); |
| sos_x86_videomem_printf(10, 30, | do |
| SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, | { |
| "0x%x ", *pint); | if (! bn1_end) |
| } | carry += d1->value; |
| | if (! bn2_end) |
| /* BAD ! Did not get the page fault... */ | carry += d2->value; |
| sos_x86_videomem_printf(20, 0, | |
| SOS_X86_VIDEO_FG_LTRED | SOS_X86_VIDEO_BG_BLUE, | bn_push_msd(&retval, carry % 10); |
| "We should have had a #PF at vaddr 0x%x !", | carry /= 10; |
| vpage_tmp); | |
| | if (! bn1_end) |
| | d1 = d1->prev; |
| | if (! bn2_end) |
| | d2 = d2->prev; |
| | if (d1 == list_get_tail(bn1)) |
| | bn1_end = TRUE; |
| | if (d2 == list_get_tail(bn2)) |
| | bn2_end = TRUE; |
| | } |
| | while (!bn1_end || !bn2_end); |
| | |
| | if (carry > 0) |
| | { |
| | bn_push_msd(&retval, carry); |
| | } |
| | |
| | return retval; |
| | |
| | |
| | /* Result is the multiplication of a big integer by a single digit */ |
| | big_number_t bn_muli (const big_number_t bn, char digit) |
| | { |
| | big_number_t retval; |
| | int nb_elts; |
| | char carry = 0; |
| | const struct digit *d; |
| | |
| | list_init(retval); |
| | list_foreach_backward(bn, d, nb_elts) |
| | { |
| | carry += d->value * digit; |
| | bn_push_msd(&retval, carry % 10); |
| | carry /= 10; |
| | } |
| | |
| | if (carry > 0) |
| | { |
| | bn_push_msd(&retval, carry); |
| | } |
| | |
| | return retval; |
| | } |
| | |
| | |
| | /* Result is the multiplication of 2 big integers */ |
| | big_number_t bn_mult(const big_number_t bn1, const big_number_t bn2) |
| | { |
| | int shift = 0; |
| | big_number_t retval; |
| | int nb_elts; |
| | struct digit *d; |
| | |
| | list_init(retval); |
| | list_foreach_backward(bn2, d, nb_elts) |
| | { |
| | big_number_t retmult = bn_muli(bn1, d->value); |
| | big_number_t old_retval = retval; |
| | bn_shift(& retmult, shift); |
| | retval = bn_add(old_retval, retmult); |
| | bn_del(& retmult); |
| | bn_del(& old_retval); |
| | shift ++; |
| | } |
| | |
| | return retval; |
| | } |
| | |
| | |
| | /* Result is the factorial of an integer */ |
| | big_number_t bn_fact(unsigned long int v) |
| | { |
| | unsigned long int i; |
| | big_number_t retval = bn_new(1); |
| | for (i = 1 ; i <= v ; i++) |
| | { |
| | big_number_t I = bn_new(i); |
| | big_number_t tmp = bn_mult(retval, I); |
| | sos_x86_videomem_printf(4, 0, |
| | SOS_X86_VIDEO_BG_BLUE | SOS_X86_VIDEO_FG_LTGREEN, |
| | "%d! = ", (int)i); |
| | bn_print_console(4, 8, SOS_X86_VIDEO_BG_BLUE | SOS_X86_VIDEO_FG_WHITE, |
| | tmp, 55); |
| | bn_del(& I); |
| | bn_del(& retval); |
| | retval = tmp; |
| | } |
| | |
| | return retval; |
| | } |
| | |
| | |
| | void bn_test() |
| | { |
| | big_number_t bn = bn_fact(1000); |
| | sos_bochs_printf("1000! = "); |
| | bn_print_bochs(bn); |
| | sos_bochs_printf("\n"); |
| | |
| | } |
| | |
| | |
| | |
| /* The C entry point of our operating system */ | /* The C entry point of our operating system */ |
| void sos_main(unsigned long magic, unsigned long addr) | void sos_main(unsigned long magic, unsigned long addr) |
| { | { |
|
|
|
| SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, | SOS_X86_VIDEO_FG_YELLOW | SOS_X86_VIDEO_BG_BLUE, |
| "Paged-memory mode is activated"); | "Paged-memory mode is activated"); |
| | |
| test_paging(sos_kernel_core_top_paddr); | |
| | /* |
| | * Setup kernel virtual memory allocator |
| | */ |
| | |
| | if (sos_kmem_vmm_setup(sos_kernel_core_base_paddr, |
| | sos_kernel_core_top_paddr, |
| | bootstrap_stack_bottom, |
| | bootstrap_stack_bottom + bootstrap_stack_size)) |
| | sos_bochs_printf("Could not setup the Kernel virtual space allocator\n"); |
| | |
| | if (sos_kmalloc_setup()) |
| | sos_bochs_printf("Could not setup the Kmalloc subsystem\n"); |
| | |
| | /* Run some kmalloc tests */ |
| | bn_test(); |
| /* | /* |
| * Enabling the HW interrupts here, this will make the timer HW | * Enabling the HW interrupts here, this will make the timer HW |
| * interrupt call our clk_it handler | * interrupt call our clk_it handler |
| */ | */ |
| asm volatile ("sti\n"); | asm volatile ("sti\n"); |
| | |
| /* An operatig system never ends */ | /* An operatig system never ends */ |
| for (;;) | for (;;) |
| continue; | continue; |
| | |