diff options
author | John Koleszar <jkoleszar@google.com> | 2012-07-13 15:21:29 -0700 |
---|---|---|
committer | John Koleszar <jkoleszar@google.com> | 2012-07-17 11:46:03 -0700 |
commit | c6b9039fd94aede59ac1086a379955137fc8e1b8 (patch) | |
tree | f9b20b2ca2114fe9303c8226bb3b368568fd5509 /vpx_mem | |
parent | 8697c6e454e02c6cf644daa9d29fabd07e846f18 (diff) | |
download | libvpx-c6b9039fd94aede59ac1086a379955137fc8e1b8.tar libvpx-c6b9039fd94aede59ac1086a379955137fc8e1b8.tar.gz libvpx-c6b9039fd94aede59ac1086a379955137fc8e1b8.tar.bz2 libvpx-c6b9039fd94aede59ac1086a379955137fc8e1b8.zip |
Restyle code
Approximate the Google style guide[1] so that that there's a written
document to follow and tools to check compliance[2].
[1]: http://google-styleguide.googlecode.com/svn/trunk/cppguide.xml
[2]: http://google-styleguide.googlecode.com/svn/trunk/cpplint/cpplint.py
Change-Id: Idf40e3d8dddcc72150f6af127b13e5dab838685f
Diffstat (limited to 'vpx_mem')
-rw-r--r-- | vpx_mem/include/vpx_mem_intrnl.h | 20 | ||||
-rw-r--r-- | vpx_mem/include/vpx_mem_tracker.h | 287 | ||||
-rw-r--r-- | vpx_mem/memory_manager/hmm_alloc.c | 64 | ||||
-rw-r--r-- | vpx_mem/memory_manager/hmm_base.c | 501 | ||||
-rw-r--r-- | vpx_mem/memory_manager/hmm_dflt_abort.c | 33 | ||||
-rw-r--r-- | vpx_mem/memory_manager/hmm_grow.c | 41 | ||||
-rw-r--r-- | vpx_mem/memory_manager/hmm_largest.c | 61 | ||||
-rw-r--r-- | vpx_mem/memory_manager/hmm_resize.c | 131 | ||||
-rw-r--r-- | vpx_mem/memory_manager/hmm_shrink.c | 136 | ||||
-rw-r--r-- | vpx_mem/memory_manager/hmm_true.c | 19 | ||||
-rw-r--r-- | vpx_mem/memory_manager/include/cavl_if.h | 57 | ||||
-rw-r--r-- | vpx_mem/memory_manager/include/cavl_impl.h | 1579 | ||||
-rw-r--r-- | vpx_mem/memory_manager/include/heapmm.h | 77 | ||||
-rw-r--r-- | vpx_mem/memory_manager/include/hmm_cnfg.h | 10 | ||||
-rw-r--r-- | vpx_mem/memory_manager/include/hmm_intrnl.h | 64 | ||||
-rw-r--r-- | vpx_mem/vpx_mem.c | 807 | ||||
-rw-r--r-- | vpx_mem/vpx_mem.h | 140 | ||||
-rw-r--r-- | vpx_mem/vpx_mem_tracker.c | 710 |
18 files changed, 2219 insertions, 2518 deletions
diff --git a/vpx_mem/include/vpx_mem_intrnl.h b/vpx_mem/include/vpx_mem_intrnl.h index 6e261ba7f..0f58cfc8f 100644 --- a/vpx_mem/include/vpx_mem_intrnl.h +++ b/vpx_mem/include/vpx_mem_intrnl.h @@ -47,8 +47,8 @@ vpx_memcpy, _memset, and _memmove*/ #ifndef DEFAULT_ALIGNMENT # if defined(VXWORKS) # define DEFAULT_ALIGNMENT 32 /*default addr alignment to use in - calls to vpx_* functions other - than vpx_memalign*/ +calls to vpx_* functions other +than vpx_memalign*/ # else # define DEFAULT_ALIGNMENT 1 # endif @@ -60,9 +60,9 @@ vpx_memcpy, _memset, and _memmove*/ #if CONFIG_MEM_TRACKER # define TRY_BOUNDS_CHECK 1 /*when set to 1 pads each allocation, - integrity can be checked using - vpx_memory_tracker_check_integrity - or on free by defining*/ +integrity can be checked using +vpx_memory_tracker_check_integrity +or on free by defining*/ /*TRY_BOUNDS_CHECK_ON_FREE*/ #else # define TRY_BOUNDS_CHECK 0 @@ -70,13 +70,13 @@ vpx_memcpy, _memset, and _memmove*/ #if TRY_BOUNDS_CHECK # define TRY_BOUNDS_CHECK_ON_FREE 0 /*checks mem integrity on every - free, very expensive*/ +free, very expensive*/ # define BOUNDS_CHECK_VALUE 0xdeadbeef /*value stored before/after ea. - mem addr for bounds checking*/ +mem addr for bounds checking*/ # define BOUNDS_CHECK_PAD_SIZE 32 /*size of the padding before and - after ea allocation to be filled - with BOUNDS_CHECK_VALUE. - this should be a multiple of 4*/ +after ea allocation to be filled +with BOUNDS_CHECK_VALUE. +this should be a multiple of 4*/ #else # define BOUNDS_CHECK_VALUE 0 # define BOUNDS_CHECK_PAD_SIZE 0 diff --git a/vpx_mem/include/vpx_mem_tracker.h b/vpx_mem/include/vpx_mem_tracker.h index ef2b29b07..3be0d2ddb 100644 --- a/vpx_mem/include/vpx_mem_tracker.h +++ b/vpx_mem/include/vpx_mem_tracker.h @@ -23,158 +23,157 @@ #include <stdarg.h> -struct mem_block -{ - size_t addr; - unsigned int size, - line; - char *file; - struct mem_block *prev, - * next; - - int padded; // This mem_block has padding for integrity checks. - // As of right now, this should only be 0 if - // using vpx_mem_alloc to allocate cache memory. - // 2005-01-11 tjf +struct mem_block { + size_t addr; + unsigned int size, + line; + char *file; + struct mem_block *prev, + * next; + + int padded; // This mem_block has padding for integrity checks. + // As of right now, this should only be 0 if + // using vpx_mem_alloc to allocate cache memory. + // 2005-01-11 tjf }; #if defined(__cplusplus) extern "C" { #endif - /* - vpx_memory_tracker_init(int padding_size, int pad_value) - padding_size - the size of the padding before and after each mem addr. - Values > 0 indicate that integrity checks can be performed - by inspecting these areas. - pad_value - the initial value within the padding area before and after - each mem addr. - - Initializes the memory tracker interface. Should be called before any - other calls to the memory tracker. - */ - int vpx_memory_tracker_init(int padding_size, int pad_value); - - /* - vpx_memory_tracker_destroy() - Deinitializes the memory tracker interface - */ - void vpx_memory_tracker_destroy(); - - /* - vpx_memory_tracker_add(size_t addr, unsigned int size, - char * file, unsigned int line) - addr - memory address to be added to list - size - size of addr - file - the file addr was referenced from - line - the line in file addr was referenced from - Adds memory address addr, it's size, file and line it came from - to the memory tracker allocation table - */ - void vpx_memory_tracker_add(size_t addr, unsigned int size, - char *file, unsigned int line, - int padded); - - /* - vpx_memory_tracker_add(size_t addr, unsigned int size, char * file, unsigned int line) - addr - memory address to be added to be removed - padded - if 0, disables bounds checking on this memory block even if bounds - checking is enabled. (for example, when allocating cache memory, we still want - to check for memory leaks, but we do not waste cache space for bounds check padding) - Removes the specified address from the memory tracker's allocation - table - Return: - 0: on success - -1: if memory allocation table's mutex could not be locked - -2: if the addr was not found in the list - */ - int vpx_memory_tracker_remove(size_t addr); - - /* - vpx_memory_tracker_find(unsigned int addr) - addr - address to be found in the memory tracker's - allocation table - Return: - If found, pointer to the memory block that matches addr - NULL otherwise - */ - struct mem_block *vpx_memory_tracker_find(size_t addr); - - /* - vpx_memory_tracker_dump() - Dumps the current contents of the memory - tracker allocation table - */ - void vpx_memory_tracker_dump(); - - /* - vpx_memory_tracker_check_integrity() - If a padding_size was provided to vpx_memory_tracker_init() - This function will verify that the region before and after each - memory address contains the specified pad_value. Should the check - fail, the filename and line of the check will be printed out. - */ - void vpx_memory_tracker_check_integrity(char *file, unsigned int line); - - /* - vpx_memory_tracker_set_log_type - type - value representing the logging type to use - option - type specific option. This will be interpreted differently - based on the type. - Sets the logging type for the memory tracker. - Values currently supported: - 0: if option is NULL, log to stderr, otherwise interpret option as a - filename and attempt to open it. - 1: Use output_debug_string (WIN32 only), option ignored - Return: - 0: on success - -1: if the logging type could not be set, because the value was invalid - or because a file could not be opened - */ - int vpx_memory_tracker_set_log_type(int type, char *option); - - /* - vpx_memory_tracker_set_log_func - userdata - ptr to be passed to the supplied logfunc, can be NULL - logfunc - the logging function to be used to output data from - vpx_memory_track_dump/check_integrity - Sets a logging function to be used by the memory tracker. - Return: - 0: on success - -1: if the logging type could not be set because logfunc was NULL - */ - int vpx_memory_tracker_set_log_func(void *userdata, - void(*logfunc)(void *userdata, - const char *fmt, va_list args)); - - /* Wrappers to standard library functions. */ - typedef void*(* mem_track_malloc_func)(size_t); - typedef void*(* mem_track_calloc_func)(size_t, size_t); - typedef void*(* mem_track_realloc_func)(void *, size_t); - typedef void (* mem_track_free_func)(void *); - typedef void*(* mem_track_memcpy_func)(void *, const void *, size_t); - typedef void*(* mem_track_memset_func)(void *, int, size_t); - typedef void*(* mem_track_memmove_func)(void *, const void *, size_t); - - /* - vpx_memory_tracker_set_functions - - Sets the function pointers for the standard library functions. - - Return: - 0: on success - -1: if the use global function pointers is not set. - */ - int vpx_memory_tracker_set_functions(mem_track_malloc_func g_malloc_l - , mem_track_calloc_func g_calloc_l - , mem_track_realloc_func g_realloc_l - , mem_track_free_func g_free_l - , mem_track_memcpy_func g_memcpy_l - , mem_track_memset_func g_memset_l - , mem_track_memmove_func g_memmove_l); + /* + vpx_memory_tracker_init(int padding_size, int pad_value) + padding_size - the size of the padding before and after each mem addr. + Values > 0 indicate that integrity checks can be performed + by inspecting these areas. + pad_value - the initial value within the padding area before and after + each mem addr. + + Initializes the memory tracker interface. Should be called before any + other calls to the memory tracker. + */ + int vpx_memory_tracker_init(int padding_size, int pad_value); + + /* + vpx_memory_tracker_destroy() + Deinitializes the memory tracker interface + */ + void vpx_memory_tracker_destroy(); + + /* + vpx_memory_tracker_add(size_t addr, unsigned int size, + char * file, unsigned int line) + addr - memory address to be added to list + size - size of addr + file - the file addr was referenced from + line - the line in file addr was referenced from + Adds memory address addr, it's size, file and line it came from + to the memory tracker allocation table + */ + void vpx_memory_tracker_add(size_t addr, unsigned int size, + char *file, unsigned int line, + int padded); + + /* + vpx_memory_tracker_add(size_t addr, unsigned int size, char * file, unsigned int line) + addr - memory address to be added to be removed + padded - if 0, disables bounds checking on this memory block even if bounds + checking is enabled. (for example, when allocating cache memory, we still want + to check for memory leaks, but we do not waste cache space for bounds check padding) + Removes the specified address from the memory tracker's allocation + table + Return: + 0: on success + -1: if memory allocation table's mutex could not be locked + -2: if the addr was not found in the list + */ + int vpx_memory_tracker_remove(size_t addr); + + /* + vpx_memory_tracker_find(unsigned int addr) + addr - address to be found in the memory tracker's + allocation table + Return: + If found, pointer to the memory block that matches addr + NULL otherwise + */ + struct mem_block *vpx_memory_tracker_find(size_t addr); + + /* + vpx_memory_tracker_dump() + Dumps the current contents of the memory + tracker allocation table + */ + void vpx_memory_tracker_dump(); + + /* + vpx_memory_tracker_check_integrity() + If a padding_size was provided to vpx_memory_tracker_init() + This function will verify that the region before and after each + memory address contains the specified pad_value. Should the check + fail, the filename and line of the check will be printed out. + */ + void vpx_memory_tracker_check_integrity(char *file, unsigned int line); + + /* + vpx_memory_tracker_set_log_type + type - value representing the logging type to use + option - type specific option. This will be interpreted differently + based on the type. + Sets the logging type for the memory tracker. + Values currently supported: + 0: if option is NULL, log to stderr, otherwise interpret option as a + filename and attempt to open it. + 1: Use output_debug_string (WIN32 only), option ignored + Return: + 0: on success + -1: if the logging type could not be set, because the value was invalid + or because a file could not be opened + */ + int vpx_memory_tracker_set_log_type(int type, char *option); + + /* + vpx_memory_tracker_set_log_func + userdata - ptr to be passed to the supplied logfunc, can be NULL + logfunc - the logging function to be used to output data from + vpx_memory_track_dump/check_integrity + Sets a logging function to be used by the memory tracker. + Return: + 0: on success + -1: if the logging type could not be set because logfunc was NULL + */ + int vpx_memory_tracker_set_log_func(void *userdata, + void(*logfunc)(void *userdata, + const char *fmt, va_list args)); + + /* Wrappers to standard library functions. */ + typedef void *(* mem_track_malloc_func)(size_t); + typedef void *(* mem_track_calloc_func)(size_t, size_t); + typedef void *(* mem_track_realloc_func)(void *, size_t); + typedef void (* mem_track_free_func)(void *); + typedef void *(* mem_track_memcpy_func)(void *, const void *, size_t); + typedef void *(* mem_track_memset_func)(void *, int, size_t); + typedef void *(* mem_track_memmove_func)(void *, const void *, size_t); + + /* + vpx_memory_tracker_set_functions + + Sets the function pointers for the standard library functions. + + Return: + 0: on success + -1: if the use global function pointers is not set. + */ + int vpx_memory_tracker_set_functions(mem_track_malloc_func g_malloc_l +, mem_track_calloc_func g_calloc_l +, mem_track_realloc_func g_realloc_l +, mem_track_free_func g_free_l +, mem_track_memcpy_func g_memcpy_l +, mem_track_memset_func g_memset_l +, mem_track_memmove_func g_memmove_l); #if defined(__cplusplus) } #endif -#endif //__VPX_MEM_TRACKER_H__ +#endif // __VPX_MEM_TRACKER_H__ diff --git a/vpx_mem/memory_manager/hmm_alloc.c b/vpx_mem/memory_manager/hmm_alloc.c index 22c4a54ee..ab3562dfb 100644 --- a/vpx_mem/memory_manager/hmm_alloc.c +++ b/vpx_mem/memory_manager/hmm_alloc.c @@ -15,46 +15,44 @@ #include "hmm_intrnl.h" -void *U(alloc)(U(descriptor) *desc, U(size_aau) n) -{ +void *U(alloc)(U(descriptor) *desc, U(size_aau) n) { #ifdef HMM_AUDIT_FAIL - if (desc->avl_tree_root) - AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root)) + if (desc->avl_tree_root) + AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root)) #endif - if (desc->last_freed) - { + if (desc->last_freed) { #ifdef HMM_AUDIT_FAIL - AUDIT_BLOCK(desc->last_freed) + AUDIT_BLOCK(desc->last_freed) #endif - U(into_free_collection)(desc, (head_record *)(desc->last_freed)); + U(into_free_collection)(desc, (head_record *)(desc->last_freed)); - desc->last_freed = 0; - } - - /* Add space for block header. */ - n += HEAD_AAUS; - - /* Convert n from number of address alignment units to block alignment - ** units. */ - n = DIV_ROUND_UP(n, HMM_BLOCK_ALIGN_UNIT); - - if (n < MIN_BLOCK_BAUS) - n = MIN_BLOCK_BAUS; - - { - /* Search for the first node of the bin containing the smallest - ** block big enough to satisfy request. */ - ptr_record *ptr_rec_ptr = - U(avl_search)( - (U(avl_avl) *) & (desc->avl_tree_root), (U(size_bau)) n, - AVL_GREATER_EQUAL); - - /* If an approprate bin is found, satisfy the allocation request, - ** otherwise return null pointer. */ - return(ptr_rec_ptr ? - U(alloc_from_bin)(desc, ptr_rec_ptr, (U(size_bau)) n) : 0); + desc->last_freed = 0; } + + /* Add space for block header. */ + n += HEAD_AAUS; + + /* Convert n from number of address alignment units to block alignment + ** units. */ + n = DIV_ROUND_UP(n, HMM_BLOCK_ALIGN_UNIT); + + if (n < MIN_BLOCK_BAUS) + n = MIN_BLOCK_BAUS; + + { + /* Search for the first node of the bin containing the smallest + ** block big enough to satisfy request. */ + ptr_record *ptr_rec_ptr = + U(avl_search)( + (U(avl_avl) *) & (desc->avl_tree_root), (U(size_bau)) n, + AVL_GREATER_EQUAL); + + /* If an approprate bin is found, satisfy the allocation request, + ** otherwise return null pointer. */ + return(ptr_rec_ptr ? + U(alloc_from_bin)(desc, ptr_rec_ptr, (U(size_bau)) n) : 0); + } } diff --git a/vpx_mem/memory_manager/hmm_base.c b/vpx_mem/memory_manager/hmm_base.c index ad1da032e..0eff59d20 100644 --- a/vpx_mem/memory_manager/hmm_base.c +++ b/vpx_mem/memory_manager/hmm_base.c @@ -15,58 +15,53 @@ #include "hmm_intrnl.h" -void U(init)(U(descriptor) *desc) -{ - desc->avl_tree_root = 0; - desc->last_freed = 0; +void U(init)(U(descriptor) *desc) { + desc->avl_tree_root = 0; + desc->last_freed = 0; } /* Remove a free block from a bin's doubly-linked list when it is not, ** the first block in the bin. */ void U(dll_remove)( - /* Pointer to pointer record in the block to be removed. */ - ptr_record *to_remove) -{ - to_remove->prev->next = to_remove->next; + /* Pointer to pointer record in the block to be removed. */ + ptr_record *to_remove) { + to_remove->prev->next = to_remove->next; - if (to_remove->next) - to_remove->next->prev = to_remove->prev; + if (to_remove->next) + to_remove->next->prev = to_remove->prev; } /* Put a block into the free collection of a heap. */ void U(into_free_collection)( - /* Pointer to heap descriptor. */ - U(descriptor) *desc, - /* Pointer to head record of block. */ - head_record *head_ptr) -{ - ptr_record *ptr_rec_ptr = HEAD_TO_PTR_REC(head_ptr); - - ptr_record *bin_front_ptr = - U(avl_insert)((U(avl_avl) *) & (desc->avl_tree_root), ptr_rec_ptr); - - if (bin_front_ptr != ptr_rec_ptr) - { - /* The block was not inserted into the AVL tree because there is - ** already a bin for the size of the block. */ - - MARK_SUCCESSIVE_BLOCK_IN_FREE_BIN(head_ptr) - ptr_rec_ptr->self = ptr_rec_ptr; - - /* Make the block the new second block in the bin's doubly-linked - ** list. */ - ptr_rec_ptr->prev = bin_front_ptr; - ptr_rec_ptr->next = bin_front_ptr->next; - bin_front_ptr->next = ptr_rec_ptr; - - if (ptr_rec_ptr->next) - ptr_rec_ptr->next->prev = ptr_rec_ptr; - } - else - /* Block is first block in new bin. */ - ptr_rec_ptr->next = 0; + /* Pointer to heap descriptor. */ + U(descriptor) *desc, + /* Pointer to head record of block. */ + head_record *head_ptr) { + ptr_record *ptr_rec_ptr = HEAD_TO_PTR_REC(head_ptr); + + ptr_record *bin_front_ptr = + U(avl_insert)((U(avl_avl) *) & (desc->avl_tree_root), ptr_rec_ptr); + + if (bin_front_ptr != ptr_rec_ptr) { + /* The block was not inserted into the AVL tree because there is + ** already a bin for the size of the block. */ + + MARK_SUCCESSIVE_BLOCK_IN_FREE_BIN(head_ptr) + ptr_rec_ptr->self = ptr_rec_ptr; + + /* Make the block the new second block in the bin's doubly-linked + ** list. */ + ptr_rec_ptr->prev = bin_front_ptr; + ptr_rec_ptr->next = bin_front_ptr->next; + bin_front_ptr->next = ptr_rec_ptr; + + if (ptr_rec_ptr->next) + ptr_rec_ptr->next->prev = ptr_rec_ptr; + } else + /* Block is first block in new bin. */ + ptr_rec_ptr->next = 0; } /* Allocate a block from a given bin. Returns a pointer to the payload @@ -74,268 +69,245 @@ void U(into_free_collection)( ** to calling this function. */ void *U(alloc_from_bin)( - /* Pointer to heap descriptor. */ - U(descriptor) *desc, - /* Pointer to pointer record of first block in bin. */ - ptr_record *bin_front_ptr, - /* Number of BAUs needed in the allocated block. If the block taken - ** from the bin is significantly larger than the number of BAUs needed, - ** the "extra" BAUs are split off to form a new free block. */ - U(size_bau) n_baus) -{ - head_record *head_ptr; - U(size_bau) rem_baus; - - if (bin_front_ptr->next) - { - /* There are multiple blocks in this bin. Use the 2nd block in - ** the bin to avoid needless change to the AVL tree. - */ - - ptr_record *ptr_rec_ptr = bin_front_ptr->next; - head_ptr = PTR_REC_TO_HEAD(ptr_rec_ptr); + /* Pointer to heap descriptor. */ + U(descriptor) *desc, + /* Pointer to pointer record of first block in bin. */ + ptr_record *bin_front_ptr, + /* Number of BAUs needed in the allocated block. If the block taken + ** from the bin is significantly larger than the number of BAUs needed, + ** the "extra" BAUs are split off to form a new free block. */ + U(size_bau) n_baus) { + head_record *head_ptr; + U(size_bau) rem_baus; + + if (bin_front_ptr->next) { + /* There are multiple blocks in this bin. Use the 2nd block in + ** the bin to avoid needless change to the AVL tree. + */ + + ptr_record *ptr_rec_ptr = bin_front_ptr->next; + head_ptr = PTR_REC_TO_HEAD(ptr_rec_ptr); #ifdef AUDIT_FAIL - AUDIT_BLOCK(head_ptr) + AUDIT_BLOCK(head_ptr) #endif - U(dll_remove)(ptr_rec_ptr); - } - else - { - /* There is only one block in the bin, so it has to be removed - ** from the AVL tree. - */ + U(dll_remove)(ptr_rec_ptr); + } else { + /* There is only one block in the bin, so it has to be removed + ** from the AVL tree. + */ - head_ptr = PTR_REC_TO_HEAD(bin_front_ptr); + head_ptr = PTR_REC_TO_HEAD(bin_front_ptr); - U(avl_remove)( - (U(avl_avl) *) &(desc->avl_tree_root), BLOCK_BAUS(head_ptr)); - } + U(avl_remove)( + (U(avl_avl) *) & (desc->avl_tree_root), BLOCK_BAUS(head_ptr)); + } - MARK_BLOCK_ALLOCATED(head_ptr) + MARK_BLOCK_ALLOCATED(head_ptr) - rem_baus = BLOCK_BAUS(head_ptr) - n_baus; + rem_baus = BLOCK_BAUS(head_ptr) - n_baus; - if (rem_baus >= MIN_BLOCK_BAUS) - { - /* Since there are enough "extra" BAUs, split them off to form - ** a new free block. - */ + if (rem_baus >= MIN_BLOCK_BAUS) { + /* Since there are enough "extra" BAUs, split them off to form + ** a new free block. + */ - head_record *rem_head_ptr = - (head_record *) BAUS_FORWARD(head_ptr, n_baus); + head_record *rem_head_ptr = + (head_record *) BAUS_FORWARD(head_ptr, n_baus); - /* Change the next block's header to reflect the fact that the - ** block preceeding it is now smaller. - */ - SET_PREV_BLOCK_BAUS( - BAUS_FORWARD(head_ptr, head_ptr->block_size), rem_baus) + /* Change the next block's header to reflect the fact that the + ** block preceeding it is now smaller. + */ + SET_PREV_BLOCK_BAUS( + BAUS_FORWARD(head_ptr, head_ptr->block_size), rem_baus) - head_ptr->block_size = n_baus; + head_ptr->block_size = n_baus; - rem_head_ptr->previous_block_size = n_baus; - rem_head_ptr->block_size = rem_baus; + rem_head_ptr->previous_block_size = n_baus; + rem_head_ptr->block_size = rem_baus; - desc->last_freed = rem_head_ptr; - } + desc->last_freed = rem_head_ptr; + } - return(HEAD_TO_PTR_REC(head_ptr)); + return(HEAD_TO_PTR_REC(head_ptr)); } /* Take a block out of the free collection. */ void U(out_of_free_collection)( - /* Descriptor of heap that block is in. */ - U(descriptor) *desc, - /* Pointer to head of block to take out of free collection. */ - head_record *head_ptr) -{ - ptr_record *ptr_rec_ptr = HEAD_TO_PTR_REC(head_ptr); - - if (ptr_rec_ptr->self == ptr_rec_ptr) - /* Block is not the front block in its bin, so all we have to - ** do is take it out of the bin's doubly-linked list. */ - U(dll_remove)(ptr_rec_ptr); + /* Descriptor of heap that block is in. */ + U(descriptor) *desc, + /* Pointer to head of block to take out of free collection. */ + head_record *head_ptr) { + ptr_record *ptr_rec_ptr = HEAD_TO_PTR_REC(head_ptr); + + if (ptr_rec_ptr->self == ptr_rec_ptr) + /* Block is not the front block in its bin, so all we have to + ** do is take it out of the bin's doubly-linked list. */ + U(dll_remove)(ptr_rec_ptr); + else { + ptr_record *next = ptr_rec_ptr->next; + + if (next) + /* Block is the front block in its bin, and there is at least + ** one other block in the bin. Substitute the next block for + ** the front block. */ + U(avl_subst)((U(avl_avl) *) & (desc->avl_tree_root), next); else - { - ptr_record *next = ptr_rec_ptr->next; - - if (next) - /* Block is the front block in its bin, and there is at least - ** one other block in the bin. Substitute the next block for - ** the front block. */ - U(avl_subst)((U(avl_avl) *) &(desc->avl_tree_root), next); - else - /* Block is the front block in its bin, but there is no other - ** block in the bin. Eliminate the bin. */ - U(avl_remove)( - (U(avl_avl) *) &(desc->avl_tree_root), BLOCK_BAUS(head_ptr)); - } + /* Block is the front block in its bin, but there is no other + ** block in the bin. Eliminate the bin. */ + U(avl_remove)( + (U(avl_avl) *) & (desc->avl_tree_root), BLOCK_BAUS(head_ptr)); + } } -void U(free)(U(descriptor) *desc, void *payload_ptr) -{ - /* Flags if coalesce with adjacent block. */ - int coalesce; +void U(free)(U(descriptor) *desc, void *payload_ptr) { + /* Flags if coalesce with adjacent block. */ + int coalesce; - head_record *fwd_head_ptr; - head_record *free_head_ptr = PTR_REC_TO_HEAD(payload_ptr); + head_record *fwd_head_ptr; + head_record *free_head_ptr = PTR_REC_TO_HEAD(payload_ptr); - desc->num_baus_can_shrink = 0; + desc->num_baus_can_shrink = 0; #ifdef HMM_AUDIT_FAIL - AUDIT_BLOCK(free_head_ptr) + AUDIT_BLOCK(free_head_ptr) - /* Make sure not freeing an already free block. */ - if (!IS_BLOCK_ALLOCATED(free_head_ptr)) - HMM_AUDIT_FAIL + /* Make sure not freeing an already free block. */ + if (!IS_BLOCK_ALLOCATED(free_head_ptr)) + HMM_AUDIT_FAIL - if (desc->avl_tree_root) - /* Audit root block in AVL tree. */ - AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root)) + if (desc->avl_tree_root) + /* Audit root block in AVL tree. */ + AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root)) #endif - fwd_head_ptr = - (head_record *) BAUS_FORWARD(free_head_ptr, free_head_ptr->block_size); + fwd_head_ptr = + (head_record *) BAUS_FORWARD(free_head_ptr, free_head_ptr->block_size); - if (free_head_ptr->previous_block_size) - { - /* Coalesce with backward block if possible. */ + if (free_head_ptr->previous_block_size) { + /* Coalesce with backward block if possible. */ - head_record *bkwd_head_ptr = - (head_record *) BAUS_BACKWARD( - free_head_ptr, free_head_ptr->previous_block_size); + head_record *bkwd_head_ptr = + (head_record *) BAUS_BACKWARD( + free_head_ptr, free_head_ptr->previous_block_size); #ifdef HMM_AUDIT_FAIL - AUDIT_BLOCK(bkwd_head_ptr) + AUDIT_BLOCK(bkwd_head_ptr) #endif - if (bkwd_head_ptr == (head_record *)(desc->last_freed)) - { - desc->last_freed = 0; - coalesce = 1; - } - else if (IS_BLOCK_ALLOCATED(bkwd_head_ptr)) - coalesce = 0; - else - { - U(out_of_free_collection)(desc, bkwd_head_ptr); - coalesce = 1; - } - - if (coalesce) - { - bkwd_head_ptr->block_size += free_head_ptr->block_size; - SET_PREV_BLOCK_BAUS(fwd_head_ptr, BLOCK_BAUS(bkwd_head_ptr)) - free_head_ptr = bkwd_head_ptr; - } + if (bkwd_head_ptr == (head_record *)(desc->last_freed)) { + desc->last_freed = 0; + coalesce = 1; + } else if (IS_BLOCK_ALLOCATED(bkwd_head_ptr)) + coalesce = 0; + else { + U(out_of_free_collection)(desc, bkwd_head_ptr); + coalesce = 1; } - if (fwd_head_ptr->block_size == 0) - { - /* Block to be freed is last block before dummy end-of-chunk block. */ - desc->end_of_shrinkable_chunk = - BAUS_FORWARD(fwd_head_ptr, DUMMY_END_BLOCK_BAUS); - desc->num_baus_can_shrink = BLOCK_BAUS(free_head_ptr); - - if (PREV_BLOCK_BAUS(free_head_ptr) == 0) - /* Free block is the entire chunk, so shrinking can eliminate - ** entire chunk including dummy end block. */ - desc->num_baus_can_shrink += DUMMY_END_BLOCK_BAUS; + if (coalesce) { + bkwd_head_ptr->block_size += free_head_ptr->block_size; + SET_PREV_BLOCK_BAUS(fwd_head_ptr, BLOCK_BAUS(bkwd_head_ptr)) + free_head_ptr = bkwd_head_ptr; } - else - { - /* Coalesce with forward block if possible. */ + } + + if (fwd_head_ptr->block_size == 0) { + /* Block to be freed is last block before dummy end-of-chunk block. */ + desc->end_of_shrinkable_chunk = + BAUS_FORWARD(fwd_head_ptr, DUMMY_END_BLOCK_BAUS); + desc->num_baus_can_shrink = BLOCK_BAUS(free_head_ptr); + + if (PREV_BLOCK_BAUS(free_head_ptr) == 0) + /* Free block is the entire chunk, so shrinking can eliminate + ** entire chunk including dummy end block. */ + desc->num_baus_can_shrink += DUMMY_END_BLOCK_BAUS; + } else { + /* Coalesce with forward block if possible. */ #ifdef HMM_AUDIT_FAIL - AUDIT_BLOCK(fwd_head_ptr) + AUDIT_BLOCK(fwd_head_ptr) #endif - if (fwd_head_ptr == (head_record *)(desc->last_freed)) - { - desc->last_freed = 0; - coalesce = 1; - } - else if (IS_BLOCK_ALLOCATED(fwd_head_ptr)) - coalesce = 0; - else - { - U(out_of_free_collection)(desc, fwd_head_ptr); - coalesce = 1; - } - - if (coalesce) - { - free_head_ptr->block_size += fwd_head_ptr->block_size; - - fwd_head_ptr = - (head_record *) BAUS_FORWARD( - fwd_head_ptr, BLOCK_BAUS(fwd_head_ptr)); - - SET_PREV_BLOCK_BAUS(fwd_head_ptr, BLOCK_BAUS(free_head_ptr)) - - if (fwd_head_ptr->block_size == 0) - { - /* Coalesced block to be freed is last block before dummy - ** end-of-chunk block. */ - desc->end_of_shrinkable_chunk = - BAUS_FORWARD(fwd_head_ptr, DUMMY_END_BLOCK_BAUS); - desc->num_baus_can_shrink = BLOCK_BAUS(free_head_ptr); - - if (PREV_BLOCK_BAUS(free_head_ptr) == 0) - /* Free block is the entire chunk, so shrinking can - ** eliminate entire chunk including dummy end block. */ - desc->num_baus_can_shrink += DUMMY_END_BLOCK_BAUS; - } - } + if (fwd_head_ptr == (head_record *)(desc->last_freed)) { + desc->last_freed = 0; + coalesce = 1; + } else if (IS_BLOCK_ALLOCATED(fwd_head_ptr)) + coalesce = 0; + else { + U(out_of_free_collection)(desc, fwd_head_ptr); + coalesce = 1; + } + + if (coalesce) { + free_head_ptr->block_size += fwd_head_ptr->block_size; + + fwd_head_ptr = + (head_record *) BAUS_FORWARD( + fwd_head_ptr, BLOCK_BAUS(fwd_head_ptr)); + + SET_PREV_BLOCK_BAUS(fwd_head_ptr, BLOCK_BAUS(free_head_ptr)) + + if (fwd_head_ptr->block_size == 0) { + /* Coalesced block to be freed is last block before dummy + ** end-of-chunk block. */ + desc->end_of_shrinkable_chunk = + BAUS_FORWARD(fwd_head_ptr, DUMMY_END_BLOCK_BAUS); + desc->num_baus_can_shrink = BLOCK_BAUS(free_head_ptr); + + if (PREV_BLOCK_BAUS(free_head_ptr) == 0) + /* Free block is the entire chunk, so shrinking can + ** eliminate entire chunk including dummy end block. */ + desc->num_baus_can_shrink += DUMMY_END_BLOCK_BAUS; + } } + } - if (desc->last_freed) - { - /* There is a last freed block, but it is not adjacent to the - ** block being freed by this call to free, so put the last - ** freed block into the free collection. - */ + if (desc->last_freed) { + /* There is a last freed block, but it is not adjacent to the + ** block being freed by this call to free, so put the last + ** freed block into the free collection. + */ #ifdef HMM_AUDIT_FAIL - AUDIT_BLOCK(desc->last_freed) + AUDIT_BLOCK(desc->last_freed) #endif - U(into_free_collection)(desc, (head_record *)(desc->last_freed)); - } + U(into_free_collection)(desc, (head_record *)(desc->last_freed)); + } - desc->last_freed = free_head_ptr; + desc->last_freed = free_head_ptr; } -void U(new_chunk)(U(descriptor) *desc, void *start, U(size_bau) n_baus) -{ +void U(new_chunk)(U(descriptor) *desc, void *start, U(size_bau) n_baus) { #ifdef HMM_AUDIT_FAIL - if (desc->avl_tree_root) - /* Audit root block in AVL tree. */ - AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root)) + if (desc->avl_tree_root) + /* Audit root block in AVL tree. */ + AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root)) #endif #undef HEAD_PTR #define HEAD_PTR ((head_record *) start) - /* Make the chunk one big free block followed by a dummy end block. - */ + /* Make the chunk one big free block followed by a dummy end block. + */ - n_baus -= DUMMY_END_BLOCK_BAUS; + n_baus -= DUMMY_END_BLOCK_BAUS; - HEAD_PTR->previous_block_size = 0; - HEAD_PTR->block_size = n_baus; + HEAD_PTR->previous_block_size = 0; + HEAD_PTR->block_size = n_baus; - U(into_free_collection)(desc, HEAD_PTR); + U(into_free_collection)(desc, HEAD_PTR); - /* Set up the dummy end block. */ - start = BAUS_FORWARD(start, n_baus); - HEAD_PTR->previous_block_size = n_baus; - HEAD_PTR->block_size = 0; + /* Set up the dummy end block. */ + start = BAUS_FORWARD(start, n_baus); + HEAD_PTR->previous_block_size = n_baus; + HEAD_PTR->block_size = 0; #undef HEAD_PTR } @@ -345,12 +317,11 @@ void U(new_chunk)(U(descriptor) *desc, void *start, U(size_bau) n_baus) /* Function that does audit fail actions defined my preprocessor symbol, ** and returns a dummy integer value. */ -int U(audit_block_fail_dummy_return)(void) -{ - HMM_AUDIT_FAIL +int U(audit_block_fail_dummy_return)(void) { + HMM_AUDIT_FAIL - /* Dummy return. */ - return(0); + /* Dummy return. */ + return(0); } #endif @@ -372,9 +343,9 @@ int U(audit_block_fail_dummy_return)(void) */ #define AVL_GET_LESS(H, ACCESS) \ - (((ACCESS) ? AUDIT_BLOCK_AS_EXPR(PTR_REC_TO_HEAD(H)) : 0), (H)->self) + (((ACCESS) ? AUDIT_BLOCK_AS_EXPR(PTR_REC_TO_HEAD(H)) : 0), (H)->self) #define AVL_GET_GREATER(H, ACCESS) \ - (((ACCESS) ? AUDIT_BLOCK_AS_EXPR(PTR_REC_TO_HEAD(H)) : 0), (H)->prev) + (((ACCESS) ? AUDIT_BLOCK_AS_EXPR(PTR_REC_TO_HEAD(H)) : 0), (H)->prev) #else @@ -396,39 +367,39 @@ int U(audit_block_fail_dummy_return)(void) */ #define AVL_GET_BALANCE_FACTOR(H) \ - ((((head_record *) (PTR_REC_TO_HEAD(H)))->block_size & \ - HIGH_BIT_BAU_SIZE) ? \ - (((head_record *) (PTR_REC_TO_HEAD(H)))->previous_block_size & \ - HIGH_BIT_BAU_SIZE ? 0 : -1) : 1) + ((((head_record *) (PTR_REC_TO_HEAD(H)))->block_size & \ + HIGH_BIT_BAU_SIZE) ? \ + (((head_record *) (PTR_REC_TO_HEAD(H)))->previous_block_size & \ + HIGH_BIT_BAU_SIZE ? 0 : -1) : 1) #define AVL_SET_BALANCE_FACTOR(H, BF) \ - { \ - register head_record *p = \ - (head_record *) PTR_REC_TO_HEAD(H); \ - register int bal_f = (BF); \ - \ - if (bal_f <= 0) \ - p->block_size |= HIGH_BIT_BAU_SIZE; \ - else \ - p->block_size &= ~HIGH_BIT_BAU_SIZE; \ - if (bal_f >= 0) \ - p->previous_block_size |= HIGH_BIT_BAU_SIZE; \ - else \ - p->previous_block_size &= ~HIGH_BIT_BAU_SIZE; \ - } + { \ + register head_record *p = \ + (head_record *) PTR_REC_TO_HEAD(H); \ + register int bal_f = (BF); \ + \ + if (bal_f <= 0) \ + p->block_size |= HIGH_BIT_BAU_SIZE; \ + else \ + p->block_size &= ~HIGH_BIT_BAU_SIZE; \ + if (bal_f >= 0) \ + p->previous_block_size |= HIGH_BIT_BAU_SIZE; \ + else \ + p->previous_block_size &= ~HIGH_BIT_BAU_SIZE; \ + } #define COMPARE_KEY_KEY(K1, K2) ((K1) == (K2) ? 0 : ((K1) > (K2) ? 1 : -1)) #define AVL_COMPARE_KEY_NODE(K, H) \ - COMPARE_KEY_KEY(K, BLOCK_BAUS(PTR_REC_TO_HEAD(H))) + COMPARE_KEY_KEY(K, BLOCK_BAUS(PTR_REC_TO_HEAD(H))) #define AVL_COMPARE_NODE_NODE(H1, H2) \ - COMPARE_KEY_KEY(BLOCK_BAUS(PTR_REC_TO_HEAD(H1)), \ - BLOCK_BAUS(PTR_REC_TO_HEAD(H2))) + COMPARE_KEY_KEY(BLOCK_BAUS(PTR_REC_TO_HEAD(H1)), \ + BLOCK_BAUS(PTR_REC_TO_HEAD(H2))) #define AVL_NULL ((ptr_record *) 0) #define AVL_IMPL_MASK \ - ( AVL_IMPL_INSERT | AVL_IMPL_SEARCH | AVL_IMPL_REMOVE | AVL_IMPL_SUBST ) + ( AVL_IMPL_INSERT | AVL_IMPL_SEARCH | AVL_IMPL_REMOVE | AVL_IMPL_SUBST ) #include "cavl_impl.h" diff --git a/vpx_mem/memory_manager/hmm_dflt_abort.c b/vpx_mem/memory_manager/hmm_dflt_abort.c index d92435cfa..51c3cc27a 100644 --- a/vpx_mem/memory_manager/hmm_dflt_abort.c +++ b/vpx_mem/memory_manager/hmm_dflt_abort.c @@ -29,26 +29,25 @@ static int entered = 0; /* Print abort message, file and line. Terminate execution. */ -void hmm_dflt_abort(const char *file, const char *line) -{ - /* Avoid use of printf(), which is more likely to use heap. */ +void hmm_dflt_abort(const char *file, const char *line) { + /* Avoid use of printf(), which is more likely to use heap. */ - if (entered) + if (entered) - /* The standard I/O functions called a heap function and caused - ** an indirect recursive call to this function. So we'll have - ** to just exit without printing a message. */ - while (1); + /* The standard I/O functions called a heap function and caused + ** an indirect recursive call to this function. So we'll have + ** to just exit without printing a message. */ + while (1); - entered = 1; + entered = 1; - fputs("\n_abort - Heap corruption\n" "File: ", stderr); - fputs(file, stderr); - fputs(" Line: ", stderr); - fputs(line, stderr); - fputs("\n\n", stderr); - fputs("hmm_dflt_abort: while(1)!!!\n", stderr); - fflush(stderr); + fputs("\n_abort - Heap corruption\n" "File: ", stderr); + fputs(file, stderr); + fputs(" Line: ", stderr); + fputs(line, stderr); + fputs("\n\n", stderr); + fputs("hmm_dflt_abort: while(1)!!!\n", stderr); + fflush(stderr); - while (1); + while (1); } diff --git a/vpx_mem/memory_manager/hmm_grow.c b/vpx_mem/memory_manager/hmm_grow.c index 9a4b6e416..0e8637374 100644 --- a/vpx_mem/memory_manager/hmm_grow.c +++ b/vpx_mem/memory_manager/hmm_grow.c @@ -15,36 +15,35 @@ #include "hmm_intrnl.h" -void U(grow_chunk)(U(descriptor) *desc, void *end, U(size_bau) n_baus) -{ +void U(grow_chunk)(U(descriptor) *desc, void *end, U(size_bau) n_baus) { #undef HEAD_PTR #define HEAD_PTR ((head_record *) end) - end = BAUS_BACKWARD(end, DUMMY_END_BLOCK_BAUS); + end = BAUS_BACKWARD(end, DUMMY_END_BLOCK_BAUS); #ifdef HMM_AUDIT_FAIL - if (HEAD_PTR->block_size != 0) - /* Chunk does not have valid dummy end block. */ - HMM_AUDIT_FAIL + if (HEAD_PTR->block_size != 0) + /* Chunk does not have valid dummy end block. */ + HMM_AUDIT_FAIL #endif - /* Create a new block that absorbs the old dummy end block. */ - HEAD_PTR->block_size = n_baus; - - /* Set up the new dummy end block. */ - { - head_record *dummy = (head_record *) BAUS_FORWARD(end, n_baus); - dummy->previous_block_size = n_baus; - dummy->block_size = 0; - } - - /* Simply free the new block, allowing it to coalesce with any - ** free block at that was the last block in the chunk prior to - ** growth. - */ - U(free)(desc, HEAD_TO_PTR_REC(end)); + /* Create a new block that absorbs the old dummy end block. */ + HEAD_PTR->block_size = n_baus; + + /* Set up the new dummy end block. */ + { + head_record *dummy = (head_record *) BAUS_FORWARD(end, n_baus); + dummy->previous_block_size = n_baus; + dummy->block_size = 0; + } + + /* Simply free the new block, allowing it to coalesce with any + ** free block at that was the last block in the chunk prior to + ** growth. + */ + U(free)(desc, HEAD_TO_PTR_REC(end)); #undef HEAD_PTR } diff --git a/vpx_mem/memory_manager/hmm_largest.c b/vpx_mem/memory_manager/hmm_largest.c index c3c6f2c42..192758df9 100644 --- a/vpx_mem/memory_manager/hmm_largest.c +++ b/vpx_mem/memory_manager/hmm_largest.c @@ -15,46 +15,43 @@ #include "hmm_intrnl.h" -U(size_aau) U(largest_available)(U(descriptor) *desc) -{ - U(size_bau) largest; - - if (!(desc->avl_tree_root)) - largest = 0; - else - { +U(size_aau) U(largest_available)(U(descriptor) *desc) { + U(size_bau) largest; + + if (!(desc->avl_tree_root)) + largest = 0; + else { #ifdef HMM_AUDIT_FAIL - /* Audit root block in AVL tree. */ - AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root)) + /* Audit root block in AVL tree. */ + AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root)) #endif - largest = - BLOCK_BAUS( - PTR_REC_TO_HEAD( - U(avl_search)( - (U(avl_avl) *) & (desc->avl_tree_root), - (U(size_bau)) ~(U(size_bau)) 0, AVL_LESS))); - } + largest = + BLOCK_BAUS( + PTR_REC_TO_HEAD( + U(avl_search)( + (U(avl_avl) *) & (desc->avl_tree_root), + (U(size_bau)) ~(U(size_bau)) 0, AVL_LESS))); + } - if (desc->last_freed) - { - /* Size of last freed block. */ - register U(size_bau) lf_size; + if (desc->last_freed) { + /* Size of last freed block. */ + register U(size_bau) lf_size; #ifdef HMM_AUDIT_FAIL - AUDIT_BLOCK(desc->last_freed) + AUDIT_BLOCK(desc->last_freed) #endif - lf_size = BLOCK_BAUS(desc->last_freed); + lf_size = BLOCK_BAUS(desc->last_freed); - if (lf_size > largest) - largest = lf_size; - } + if (lf_size > largest) + largest = lf_size; + } - /* Convert largest size to AAUs and subract head size leaving payload - ** size. - */ - return(largest ? - ((largest * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) - HEAD_AAUS) : - 0); + /* Convert largest size to AAUs and subract head size leaving payload + ** size. + */ + return(largest ? + ((largest * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) - HEAD_AAUS) : + 0); } diff --git a/vpx_mem/memory_manager/hmm_resize.c b/vpx_mem/memory_manager/hmm_resize.c index f90da9692..baa5a8f9e 100644 --- a/vpx_mem/memory_manager/hmm_resize.c +++ b/vpx_mem/memory_manager/hmm_resize.c @@ -15,105 +15,100 @@ #include "hmm_intrnl.h" -int U(resize)(U(descriptor) *desc, void *mem, U(size_aau) n) -{ - U(size_aau) i; - head_record *next_head_ptr; - head_record *head_ptr = PTR_REC_TO_HEAD(mem); +int U(resize)(U(descriptor) *desc, void *mem, U(size_aau) n) { + U(size_aau) i; + head_record *next_head_ptr; + head_record *head_ptr = PTR_REC_TO_HEAD(mem); - /* Flag. */ - int next_block_free; + /* Flag. */ + int next_block_free; - /* Convert n from desired block size in AAUs to BAUs. */ - n += HEAD_AAUS; - n = DIV_ROUND_UP(n, HMM_BLOCK_ALIGN_UNIT); + /* Convert n from desired block size in AAUs to BAUs. */ + n += HEAD_AAUS; + n = DIV_ROUND_UP(n, HMM_BLOCK_ALIGN_UNIT); - if (n < MIN_BLOCK_BAUS) - n = MIN_BLOCK_BAUS; + if (n < MIN_BLOCK_BAUS) + n = MIN_BLOCK_BAUS; #ifdef HMM_AUDIT_FAIL - AUDIT_BLOCK(head_ptr) + AUDIT_BLOCK(head_ptr) - if (!IS_BLOCK_ALLOCATED(head_ptr)) - HMM_AUDIT_FAIL + if (!IS_BLOCK_ALLOCATED(head_ptr)) + HMM_AUDIT_FAIL - if (desc->avl_tree_root) - AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root)) + if (desc->avl_tree_root) + AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root)) #endif - i = head_ptr->block_size; + i = head_ptr->block_size; - next_head_ptr = - (head_record *) BAUS_FORWARD(head_ptr, head_ptr->block_size); + next_head_ptr = + (head_record *) BAUS_FORWARD(head_ptr, head_ptr->block_size); - next_block_free = - (next_head_ptr == desc->last_freed) || - !IS_BLOCK_ALLOCATED(next_head_ptr); + next_block_free = + (next_head_ptr == desc->last_freed) || + !IS_BLOCK_ALLOCATED(next_head_ptr); - if (next_block_free) - /* Block can expand into next free block. */ - i += BLOCK_BAUS(next_head_ptr); + if (next_block_free) + /* Block can expand into next free block. */ + i += BLOCK_BAUS(next_head_ptr); - if (n > i) - /* Not enough room for block to expand. */ - return(-1); + if (n > i) + /* Not enough room for block to expand. */ + return(-1); - if (next_block_free) - { + if (next_block_free) { #ifdef HMM_AUDIT_FAIL - AUDIT_BLOCK(next_head_ptr) + AUDIT_BLOCK(next_head_ptr) #endif - if (next_head_ptr == desc->last_freed) - desc->last_freed = 0; - else - U(out_of_free_collection)(desc, next_head_ptr); + if (next_head_ptr == desc->last_freed) + desc->last_freed = 0; + else + U(out_of_free_collection)(desc, next_head_ptr); - next_head_ptr = - (head_record *) BAUS_FORWARD(head_ptr, (U(size_bau)) i); - } + next_head_ptr = + (head_record *) BAUS_FORWARD(head_ptr, (U(size_bau)) i); + } - /* Set i to number of "extra" BAUs. */ - i -= n; + /* Set i to number of "extra" BAUs. */ + i -= n; - if (i < MIN_BLOCK_BAUS) - /* Not enough extra BAUs to be a block on their own, so just keep them - ** in the block being resized. - */ - { - n += i; - i = n; - } - else - { - /* There are enough "leftover" BAUs in the next block to - ** form a remainder block. */ + if (i < MIN_BLOCK_BAUS) + /* Not enough extra BAUs to be a block on their own, so just keep them + ** in the block being resized. + */ + { + n += i; + i = n; + } else { + /* There are enough "leftover" BAUs in the next block to + ** form a remainder block. */ - head_record *rem_head_ptr; + head_record *rem_head_ptr; - rem_head_ptr = (head_record *) BAUS_FORWARD(head_ptr, n); + rem_head_ptr = (head_record *) BAUS_FORWARD(head_ptr, n); - rem_head_ptr->previous_block_size = (U(size_bau)) n; - rem_head_ptr->block_size = (U(size_bau)) i; + rem_head_ptr->previous_block_size = (U(size_bau)) n; + rem_head_ptr->block_size = (U(size_bau)) i; - if (desc->last_freed) - { + if (desc->last_freed) { #ifdef HMM_AUDIT_FAIL - AUDIT_BLOCK(desc->last_freed) + AUDIT_BLOCK(desc->last_freed) #endif - U(into_free_collection)(desc, (head_record *)(desc->last_freed)); + U(into_free_collection)(desc, (head_record *)(desc->last_freed)); - desc->last_freed = 0; - } - - desc->last_freed = rem_head_ptr; + desc->last_freed = 0; } - head_ptr->block_size = (U(size_bau)) n; - next_head_ptr->previous_block_size = (U(size_bau)) i; + desc->last_freed = rem_head_ptr; + } + + head_ptr->block_size = (U(size_bau)) n; + next_head_ptr->previous_block_size = (U(size_bau)) i; - return(0); + return(0); } diff --git a/vpx_mem/memory_manager/hmm_shrink.c b/vpx_mem/memory_manager/hmm_shrink.c index 78fe268ba..f80aeead7 100644 --- a/vpx_mem/memory_manager/hmm_shrink.c +++ b/vpx_mem/memory_manager/hmm_shrink.c @@ -15,97 +15,89 @@ #include "hmm_intrnl.h" -void U(shrink_chunk)(U(descriptor) *desc, U(size_bau) n_baus_to_shrink) -{ - head_record *dummy_end_block = (head_record *) - BAUS_BACKWARD(desc->end_of_shrinkable_chunk, DUMMY_END_BLOCK_BAUS); +void U(shrink_chunk)(U(descriptor) *desc, U(size_bau) n_baus_to_shrink) { + head_record *dummy_end_block = (head_record *) + BAUS_BACKWARD(desc->end_of_shrinkable_chunk, DUMMY_END_BLOCK_BAUS); #ifdef HMM_AUDIT_FAIL - if (dummy_end_block->block_size != 0) - /* Chunk does not have valid dummy end block. */ - HMM_AUDIT_FAIL + if (dummy_end_block->block_size != 0) + /* Chunk does not have valid dummy end block. */ + HMM_AUDIT_FAIL #endif - if (n_baus_to_shrink) - { - head_record *last_block = (head_record *) - BAUS_BACKWARD( - dummy_end_block, dummy_end_block->previous_block_size); + if (n_baus_to_shrink) { + head_record *last_block = (head_record *) + BAUS_BACKWARD( + dummy_end_block, dummy_end_block->previous_block_size); #ifdef HMM_AUDIT_FAIL - AUDIT_BLOCK(last_block) + AUDIT_BLOCK(last_block) #endif - if (last_block == desc->last_freed) - { - U(size_bau) bs = BLOCK_BAUS(last_block); - - /* Chunk will not be shrunk out of existence if - ** 1. There is at least one allocated block in the chunk - ** and the amount to shrink is exactly the size of the - ** last block, OR - ** 2. After the last block is shrunk, there will be enough - ** BAUs left in it to form a minimal size block. */ - int chunk_will_survive = - (PREV_BLOCK_BAUS(last_block) && (n_baus_to_shrink == bs)) || - (n_baus_to_shrink <= (U(size_bau))(bs - MIN_BLOCK_BAUS)); - - if (chunk_will_survive || - (!PREV_BLOCK_BAUS(last_block) && - (n_baus_to_shrink == - (U(size_bau))(bs + DUMMY_END_BLOCK_BAUS)))) - { - desc->last_freed = 0; - - if (chunk_will_survive) - { - bs -= n_baus_to_shrink; - - if (bs) - { - /* The last (non-dummy) block was not completely - ** eliminated by the shrink. */ - - last_block->block_size = bs; - - /* Create new dummy end record. - */ - dummy_end_block = - (head_record *) BAUS_FORWARD(last_block, bs); - dummy_end_block->previous_block_size = bs; - dummy_end_block->block_size = 0; + if (last_block == desc->last_freed) { + U(size_bau) bs = BLOCK_BAUS(last_block); + + /* Chunk will not be shrunk out of existence if + ** 1. There is at least one allocated block in the chunk + ** and the amount to shrink is exactly the size of the + ** last block, OR + ** 2. After the last block is shrunk, there will be enough + ** BAUs left in it to form a minimal size block. */ + int chunk_will_survive = + (PREV_BLOCK_BAUS(last_block) && (n_baus_to_shrink == bs)) || + (n_baus_to_shrink <= (U(size_bau))(bs - MIN_BLOCK_BAUS)); + + if (chunk_will_survive || + (!PREV_BLOCK_BAUS(last_block) && + (n_baus_to_shrink == + (U(size_bau))(bs + DUMMY_END_BLOCK_BAUS)))) { + desc->last_freed = 0; + + if (chunk_will_survive) { + bs -= n_baus_to_shrink; + + if (bs) { + /* The last (non-dummy) block was not completely + ** eliminated by the shrink. */ + + last_block->block_size = bs; + + /* Create new dummy end record. + */ + dummy_end_block = + (head_record *) BAUS_FORWARD(last_block, bs); + dummy_end_block->previous_block_size = bs; + dummy_end_block->block_size = 0; #ifdef HMM_AUDIT_FAIL - if (desc->avl_tree_root) - AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root)) + if (desc->avl_tree_root) + AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root)) #endif - U(into_free_collection)(desc, last_block); - } - else - { - /* The last (non-dummy) block was completely - ** eliminated by the shrink. Make its head - ** the new dummy end block. - */ - last_block->block_size = 0; - last_block->previous_block_size &= ~HIGH_BIT_BAU_SIZE; - } - } - } + U(into_free_collection)(desc, last_block); + } else { + /* The last (non-dummy) block was completely + ** eliminated by the shrink. Make its head + ** the new dummy end block. + */ + last_block->block_size = 0; + last_block->previous_block_size &= ~HIGH_BIT_BAU_SIZE; + } + } + } #ifdef HMM_AUDIT_FAIL - else - HMM_AUDIT_FAIL + else + HMM_AUDIT_FAIL #endif - } + } #ifdef HMM_AUDIT_FAIL - else - HMM_AUDIT_FAIL + else + HMM_AUDIT_FAIL #endif - } + } } diff --git a/vpx_mem/memory_manager/hmm_true.c b/vpx_mem/memory_manager/hmm_true.c index 3f7be8f70..4428c3e34 100644 --- a/vpx_mem/memory_manager/hmm_true.c +++ b/vpx_mem/memory_manager/hmm_true.c @@ -15,18 +15,17 @@ #include "hmm_intrnl.h" -U(size_aau) U(true_size)(void *payload_ptr) -{ - register head_record *head_ptr = PTR_REC_TO_HEAD(payload_ptr); +U(size_aau) U(true_size)(void *payload_ptr) { + register head_record *head_ptr = PTR_REC_TO_HEAD(payload_ptr); #ifdef HMM_AUDIT_FAIL - AUDIT_BLOCK(head_ptr) + AUDIT_BLOCK(head_ptr) #endif - /* Convert block size from BAUs to AAUs. Subtract head size, leaving - ** payload size. - */ - return( - (BLOCK_BAUS(head_ptr) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) - - HEAD_AAUS); + /* Convert block size from BAUs to AAUs. Subtract head size, leaving + ** payload size. + */ + return( + (BLOCK_BAUS(head_ptr) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) - + HEAD_AAUS); } diff --git a/vpx_mem/memory_manager/include/cavl_if.h b/vpx_mem/memory_manager/include/cavl_if.h index 1b2c9b738..ec6e525b7 100644 --- a/vpx_mem/memory_manager/include/cavl_if.h +++ b/vpx_mem/memory_manager/include/cavl_if.h @@ -32,13 +32,12 @@ #ifndef AVL_SEARCH_TYPE_DEFINED_ #define AVL_SEARCH_TYPE_DEFINED_ -typedef enum -{ - AVL_EQUAL = 1, - AVL_LESS = 2, - AVL_GREATER = 4, - AVL_LESS_EQUAL = AVL_EQUAL | AVL_LESS, - AVL_GREATER_EQUAL = AVL_EQUAL | AVL_GREATER +typedef enum { + AVL_EQUAL = 1, + AVL_LESS = 2, + AVL_GREATER = 4, + AVL_LESS_EQUAL = AVL_EQUAL | AVL_LESS, + AVL_GREATER_EQUAL = AVL_EQUAL | AVL_GREATER } avl_search_type; @@ -75,15 +74,14 @@ avl_search_type; #endif -typedef struct -{ +typedef struct { #ifdef AVL_INSIDE_STRUCT - AVL_INSIDE_STRUCT + AVL_INSIDE_STRUCT #endif - AVL_HANDLE root; + AVL_HANDLE root; } L_(avl); @@ -108,7 +106,7 @@ L_SC AVL_HANDLE L_(subst)(L_(avl) *tree, AVL_HANDLE new_node); #ifdef AVL_BUILD_ITER_TYPE L_SC int L_(build)( - L_(avl) *tree, AVL_BUILD_ITER_TYPE p, L_SIZE num_nodes); + L_(avl) *tree, AVL_BUILD_ITER_TYPE p, L_SIZE num_nodes); #endif @@ -153,7 +151,7 @@ L_SC int L_(build)( /* Maximum depth may be more than number of bits in a long. */ #define L_BIT_ARR_DEFN(NAME) \ - unsigned long NAME[((AVL_MAX_DEPTH) + L_LONG_BIT - 1) / L_LONG_BIT]; + unsigned long NAME[((AVL_MAX_DEPTH) + L_LONG_BIT - 1) / L_LONG_BIT]; #else @@ -164,29 +162,28 @@ L_SC int L_(build)( #endif /* Iterator structure. */ -typedef struct -{ - /* Tree being iterated over. */ - L_(avl) *tree_; - - /* Records a path into the tree. If bit n is true, indicates - ** take greater branch from the nth node in the path, otherwise - ** take the less branch. bit 0 gives branch from root, and - ** so on. */ - L_BIT_ARR_DEFN(branch) - - /* Zero-based depth of path into tree. */ - unsigned depth; - - /* Handles of nodes in path from root to current node (returned by *). */ - AVL_HANDLE path_h[(AVL_MAX_DEPTH) - 1]; +typedef struct { + /* Tree being iterated over. */ + L_(avl) *tree_; + + /* Records a path into the tree. If bit n is true, indicates + ** take greater branch from the nth node in the path, otherwise + ** take the less branch. bit 0 gives branch from root, and + ** so on. */ + L_BIT_ARR_DEFN(branch) + + /* Zero-based depth of path into tree. */ + unsigned depth; + + /* Handles of nodes in path from root to current node (returned by *). */ + AVL_HANDLE path_h[(AVL_MAX_DEPTH) - 1]; } L_(iter); /* Iterator function prototypes. */ L_SC void L_(start_iter)( - L_(avl) *tree, L_(iter) *iter, AVL_KEY k, avl_search_type st); + L_(avl) *tree, L_(iter) *iter, AVL_KEY k, avl_search_type st); L_SC void L_(start_iter_least)(L_(avl) *tree, L_(iter) *iter); diff --git a/vpx_mem/memory_manager/include/cavl_impl.h b/vpx_mem/memory_manager/include/cavl_impl.h index 5e165dd4d..cf7deb7fb 100644 --- a/vpx_mem/memory_manager/include/cavl_impl.h +++ b/vpx_mem/memory_manager/include/cavl_impl.h @@ -110,16 +110,16 @@ #define L_BIT_ARR_DEFN(NAME) unsigned long NAME[L_BIT_ARR_LONGS]; #define L_BIT_ARR_VAL(BIT_ARR, BIT_NUM) \ - ((BIT_ARR)[(BIT_NUM) / L_LONG_BIT] & (1L << ((BIT_NUM) % L_LONG_BIT))) + ((BIT_ARR)[(BIT_NUM) / L_LONG_BIT] & (1L << ((BIT_NUM) % L_LONG_BIT))) #define L_BIT_ARR_0(BIT_ARR, BIT_NUM) \ - (BIT_ARR)[(BIT_NUM) / L_LONG_BIT] &= ~(1L << ((BIT_NUM) % L_LONG_BIT)); + (BIT_ARR)[(BIT_NUM) / L_LONG_BIT] &= ~(1L << ((BIT_NUM) % L_LONG_BIT)); #define L_BIT_ARR_1(BIT_ARR, BIT_NUM) \ - (BIT_ARR)[(BIT_NUM) / L_LONG_BIT] |= 1L << ((BIT_NUM) % L_LONG_BIT); + (BIT_ARR)[(BIT_NUM) / L_LONG_BIT] |= 1L << ((BIT_NUM) % L_LONG_BIT); #define L_BIT_ARR_ALL(BIT_ARR, BIT_VAL) \ - { int i = L_BIT_ARR_LONGS; do (BIT_ARR)[--i] = 0L - (BIT_VAL); while(i); } + { int i = L_BIT_ARR_LONGS; do (BIT_ARR)[--i] = 0L - (BIT_VAL); while(i); } #else /* The bit array can definitely fit in one long */ @@ -138,7 +138,7 @@ #ifdef AVL_READ_ERRORS_HAPPEN #define L_CHECK_READ_ERROR(ERROR_RETURN) \ - { if (AVL_READ_ERROR) return(ERROR_RETURN); } + { if (AVL_READ_ERROR) return(ERROR_RETURN); } #else @@ -179,18 +179,16 @@ #if (L_IMPL_MASK & AVL_IMPL_INIT) -L_SC void L_(init)(L_(avl) *l_tree) -{ - l_tree->root = AVL_NULL; +L_SC void L_(init)(L_(avl) *l_tree) { + l_tree->root = AVL_NULL; } #endif #if (L_IMPL_MASK & AVL_IMPL_IS_EMPTY) -L_SC int L_(is_empty)(L_(avl) *l_tree) -{ - return(l_tree->root == AVL_NULL); +L_SC int L_(is_empty)(L_(avl) *l_tree) { + return(l_tree->root == AVL_NULL); } #endif @@ -201,358 +199,305 @@ L_SC int L_(is_empty)(L_(avl) *l_tree) /* Balances subtree, returns handle of root node of subtree after balancing. */ -L_SC AVL_HANDLE L_(balance)(L_BALANCE_PARAM_DECL_PREFIX AVL_HANDLE bal_h) -{ - AVL_HANDLE deep_h; +L_SC AVL_HANDLE L_(balance)(L_BALANCE_PARAM_DECL_PREFIX AVL_HANDLE bal_h) { + AVL_HANDLE deep_h; - /* Either the "greater than" or the "less than" subtree of - ** this node has to be 2 levels deeper (or else it wouldn't - ** need balancing). - */ - if (AVL_GET_BALANCE_FACTOR(bal_h) > 0) - { - /* "Greater than" subtree is deeper. */ + /* Either the "greater than" or the "less than" subtree of + ** this node has to be 2 levels deeper (or else it wouldn't + ** need balancing). + */ + if (AVL_GET_BALANCE_FACTOR(bal_h) > 0) { + /* "Greater than" subtree is deeper. */ - deep_h = AVL_GET_GREATER(bal_h, 1); + deep_h = AVL_GET_GREATER(bal_h, 1); - L_CHECK_READ_ERROR(AVL_NULL) + L_CHECK_READ_ERROR(AVL_NULL) - if (AVL_GET_BALANCE_FACTOR(deep_h) < 0) - { - int bf; - - AVL_HANDLE old_h = bal_h; - bal_h = AVL_GET_LESS(deep_h, 1); - L_CHECK_READ_ERROR(AVL_NULL) - AVL_SET_GREATER(old_h, AVL_GET_LESS(bal_h, 1)) - AVL_SET_LESS(deep_h, AVL_GET_GREATER(bal_h, 1)) - AVL_SET_LESS(bal_h, old_h) - AVL_SET_GREATER(bal_h, deep_h) - - bf = AVL_GET_BALANCE_FACTOR(bal_h); - - if (bf != 0) - { - if (bf > 0) - { - AVL_SET_BALANCE_FACTOR(old_h, -1) - AVL_SET_BALANCE_FACTOR(deep_h, 0) - } - else - { - AVL_SET_BALANCE_FACTOR(deep_h, 1) - AVL_SET_BALANCE_FACTOR(old_h, 0) - } - - AVL_SET_BALANCE_FACTOR(bal_h, 0) - } - else - { - AVL_SET_BALANCE_FACTOR(old_h, 0) - AVL_SET_BALANCE_FACTOR(deep_h, 0) - } - } - else - { - AVL_SET_GREATER(bal_h, AVL_GET_LESS(deep_h, 0)) - AVL_SET_LESS(deep_h, bal_h) - - if (AVL_GET_BALANCE_FACTOR(deep_h) == 0) - { - AVL_SET_BALANCE_FACTOR(deep_h, -1) - AVL_SET_BALANCE_FACTOR(bal_h, 1) - } - else - { - AVL_SET_BALANCE_FACTOR(deep_h, 0) - AVL_SET_BALANCE_FACTOR(bal_h, 0) - } - - bal_h = deep_h; + if (AVL_GET_BALANCE_FACTOR(deep_h) < 0) { + int bf; + + AVL_HANDLE old_h = bal_h; + bal_h = AVL_GET_LESS(deep_h, 1); + L_CHECK_READ_ERROR(AVL_NULL) + AVL_SET_GREATER(old_h, AVL_GET_LESS(bal_h, 1)) + AVL_SET_LESS(deep_h, AVL_GET_GREATER(bal_h, 1)) + AVL_SET_LESS(bal_h, old_h) + AVL_SET_GREATER(bal_h, deep_h) + + bf = AVL_GET_BALANCE_FACTOR(bal_h); + + if (bf != 0) { + if (bf > 0) { + AVL_SET_BALANCE_FACTOR(old_h, -1) + AVL_SET_BALANCE_FACTOR(deep_h, 0) + } else { + AVL_SET_BALANCE_FACTOR(deep_h, 1) + AVL_SET_BALANCE_FACTOR(old_h, 0) } + + AVL_SET_BALANCE_FACTOR(bal_h, 0) + } else { + AVL_SET_BALANCE_FACTOR(old_h, 0) + AVL_SET_BALANCE_FACTOR(deep_h, 0) + } + } else { + AVL_SET_GREATER(bal_h, AVL_GET_LESS(deep_h, 0)) + AVL_SET_LESS(deep_h, bal_h) + + if (AVL_GET_BALANCE_FACTOR(deep_h) == 0) { + AVL_SET_BALANCE_FACTOR(deep_h, -1) + AVL_SET_BALANCE_FACTOR(bal_h, 1) + } else { + AVL_SET_BALANCE_FACTOR(deep_h, 0) + AVL_SET_BALANCE_FACTOR(bal_h, 0) + } + + bal_h = deep_h; } - else - { - /* "Less than" subtree is deeper. */ + } else { + /* "Less than" subtree is deeper. */ - deep_h = AVL_GET_LESS(bal_h, 1); - L_CHECK_READ_ERROR(AVL_NULL) + deep_h = AVL_GET_LESS(bal_h, 1); + L_CHECK_READ_ERROR(AVL_NULL) - if (AVL_GET_BALANCE_FACTOR(deep_h) > 0) - { - int bf; - AVL_HANDLE old_h = bal_h; - bal_h = AVL_GET_GREATER(deep_h, 1); - L_CHECK_READ_ERROR(AVL_NULL) - AVL_SET_LESS(old_h, AVL_GET_GREATER(bal_h, 0)) - AVL_SET_GREATER(deep_h, AVL_GET_LESS(bal_h, 0)) - AVL_SET_GREATER(bal_h, old_h) - AVL_SET_LESS(bal_h, deep_h) - - bf = AVL_GET_BALANCE_FACTOR(bal_h); - - if (bf != 0) - { - if (bf < 0) - { - AVL_SET_BALANCE_FACTOR(old_h, 1) - AVL_SET_BALANCE_FACTOR(deep_h, 0) - } - else - { - AVL_SET_BALANCE_FACTOR(deep_h, -1) - AVL_SET_BALANCE_FACTOR(old_h, 0) - } - - AVL_SET_BALANCE_FACTOR(bal_h, 0) - } - else - { - AVL_SET_BALANCE_FACTOR(old_h, 0) - AVL_SET_BALANCE_FACTOR(deep_h, 0) - } - } - else - { - AVL_SET_LESS(bal_h, AVL_GET_GREATER(deep_h, 0)) - AVL_SET_GREATER(deep_h, bal_h) - - if (AVL_GET_BALANCE_FACTOR(deep_h) == 0) - { - AVL_SET_BALANCE_FACTOR(deep_h, 1) - AVL_SET_BALANCE_FACTOR(bal_h, -1) - } - else - { - AVL_SET_BALANCE_FACTOR(deep_h, 0) - AVL_SET_BALANCE_FACTOR(bal_h, 0) - } - - bal_h = deep_h; + if (AVL_GET_BALANCE_FACTOR(deep_h) > 0) { + int bf; + AVL_HANDLE old_h = bal_h; + bal_h = AVL_GET_GREATER(deep_h, 1); + L_CHECK_READ_ERROR(AVL_NULL) + AVL_SET_LESS(old_h, AVL_GET_GREATER(bal_h, 0)) + AVL_SET_GREATER(deep_h, AVL_GET_LESS(bal_h, 0)) + AVL_SET_GREATER(bal_h, old_h) + AVL_SET_LESS(bal_h, deep_h) + + bf = AVL_GET_BALANCE_FACTOR(bal_h); + + if (bf != 0) { + if (bf < 0) { + AVL_SET_BALANCE_FACTOR(old_h, 1) + AVL_SET_BALANCE_FACTOR(deep_h, 0) + } else { + AVL_SET_BALANCE_FACTOR(deep_h, -1) + AVL_SET_BALANCE_FACTOR(old_h, 0) } + + AVL_SET_BALANCE_FACTOR(bal_h, 0) + } else { + AVL_SET_BALANCE_FACTOR(old_h, 0) + AVL_SET_BALANCE_FACTOR(deep_h, 0) + } + } else { + AVL_SET_LESS(bal_h, AVL_GET_GREATER(deep_h, 0)) + AVL_SET_GREATER(deep_h, bal_h) + + if (AVL_GET_BALANCE_FACTOR(deep_h) == 0) { + AVL_SET_BALANCE_FACTOR(deep_h, 1) + AVL_SET_BALANCE_FACTOR(bal_h, -1) + } else { + AVL_SET_BALANCE_FACTOR(deep_h, 0) + AVL_SET_BALANCE_FACTOR(bal_h, 0) + } + + bal_h = deep_h; } + } - return(bal_h); + return(bal_h); } -L_SC AVL_HANDLE L_(insert)(L_(avl) *l_tree, AVL_HANDLE h) -{ - AVL_SET_LESS(h, AVL_NULL) - AVL_SET_GREATER(h, AVL_NULL) - AVL_SET_BALANCE_FACTOR(h, 0) +L_SC AVL_HANDLE L_(insert)(L_(avl) *l_tree, AVL_HANDLE h) { + AVL_SET_LESS(h, AVL_NULL) + AVL_SET_GREATER(h, AVL_NULL) + AVL_SET_BALANCE_FACTOR(h, 0) - if (l_tree->root == AVL_NULL) - l_tree->root = h; - else - { - /* Last unbalanced node encountered in search for insertion point. */ - AVL_HANDLE unbal = AVL_NULL; - /* Parent of last unbalanced node. */ - AVL_HANDLE parent_unbal = AVL_NULL; - /* Balance factor of last unbalanced node. */ - int unbal_bf; - - /* Zero-based depth in tree. */ - unsigned depth = 0, unbal_depth = 0; - - /* Records a path into the tree. If bit n is true, indicates - ** take greater branch from the nth node in the path, otherwise - ** take the less branch. bit 0 gives branch from root, and - ** so on. */ - L_BIT_ARR_DEFN(branch) - - AVL_HANDLE hh = l_tree->root; - AVL_HANDLE parent = AVL_NULL; - int cmp; - - do - { - if (AVL_GET_BALANCE_FACTOR(hh) != 0) - { - unbal = hh; - parent_unbal = parent; - unbal_depth = depth; - } - - cmp = AVL_COMPARE_NODE_NODE(h, hh); - - if (cmp == 0) - /* Duplicate key. */ - return(hh); - - parent = hh; - - if (cmp > 0) - { - hh = AVL_GET_GREATER(hh, 1); - L_BIT_ARR_1(branch, depth) - } - else - { - hh = AVL_GET_LESS(hh, 1); - L_BIT_ARR_0(branch, depth) - } - - L_CHECK_READ_ERROR(AVL_NULL) - depth++; - } - while (hh != AVL_NULL); + if (l_tree->root == AVL_NULL) + l_tree->root = h; + else { + /* Last unbalanced node encountered in search for insertion point. */ + AVL_HANDLE unbal = AVL_NULL; + /* Parent of last unbalanced node. */ + AVL_HANDLE parent_unbal = AVL_NULL; + /* Balance factor of last unbalanced node. */ + int unbal_bf; - /* Add node to insert as leaf of tree. */ - if (cmp < 0) - AVL_SET_LESS(parent, h) - else - AVL_SET_GREATER(parent, h) + /* Zero-based depth in tree. */ + unsigned depth = 0, unbal_depth = 0; - depth = unbal_depth; + /* Records a path into the tree. If bit n is true, indicates + ** take greater branch from the nth node in the path, otherwise + ** take the less branch. bit 0 gives branch from root, and + ** so on. */ + L_BIT_ARR_DEFN(branch) - if (unbal == AVL_NULL) - hh = l_tree->root; - else - { - cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1; - depth++; - unbal_bf = AVL_GET_BALANCE_FACTOR(unbal); - - if (cmp < 0) - unbal_bf--; - else /* cmp > 0 */ - unbal_bf++; - - hh = cmp < 0 ? AVL_GET_LESS(unbal, 1) : AVL_GET_GREATER(unbal, 1); - L_CHECK_READ_ERROR(AVL_NULL) - - if ((unbal_bf != -2) && (unbal_bf != 2)) - { - /* No rebalancing of tree is necessary. */ - AVL_SET_BALANCE_FACTOR(unbal, unbal_bf) - unbal = AVL_NULL; - } - } + AVL_HANDLE hh = l_tree->root; + AVL_HANDLE parent = AVL_NULL; + int cmp; - if (hh != AVL_NULL) - while (h != hh) - { - cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1; - depth++; - - if (cmp < 0) - { - AVL_SET_BALANCE_FACTOR(hh, -1) - hh = AVL_GET_LESS(hh, 1); - } - else /* cmp > 0 */ - { - AVL_SET_BALANCE_FACTOR(hh, 1) - hh = AVL_GET_GREATER(hh, 1); - } - - L_CHECK_READ_ERROR(AVL_NULL) - } - - if (unbal != AVL_NULL) - { - unbal = L_(balance)(L_BALANCE_PARAM_CALL_PREFIX unbal); - L_CHECK_READ_ERROR(AVL_NULL) - - if (parent_unbal == AVL_NULL) - l_tree->root = unbal; - else - { - depth = unbal_depth - 1; - cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1; - - if (cmp < 0) - AVL_SET_LESS(parent_unbal, unbal) - else /* cmp > 0 */ - AVL_SET_GREATER(parent_unbal, unbal) - } - } + do { + if (AVL_GET_BALANCE_FACTOR(hh) != 0) { + unbal = hh; + parent_unbal = parent; + unbal_depth = depth; + } - } + cmp = AVL_COMPARE_NODE_NODE(h, hh); - return(h); -} + if (cmp == 0) + /* Duplicate key. */ + return(hh); -#endif + parent = hh; -#if (L_IMPL_MASK & AVL_IMPL_SEARCH) + if (cmp > 0) { + hh = AVL_GET_GREATER(hh, 1); + L_BIT_ARR_1(branch, depth) + } else { + hh = AVL_GET_LESS(hh, 1); + L_BIT_ARR_0(branch, depth) + } + + L_CHECK_READ_ERROR(AVL_NULL) + depth++; + } while (hh != AVL_NULL); + + /* Add node to insert as leaf of tree. */ + if (cmp < 0) + AVL_SET_LESS(parent, h) + else + AVL_SET_GREATER(parent, h) + + depth = unbal_depth; + + if (unbal == AVL_NULL) + hh = l_tree->root; + else { + cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1; + depth++; + unbal_bf = AVL_GET_BALANCE_FACTOR(unbal); + + if (cmp < 0) + unbal_bf--; + else /* cmp > 0 */ + unbal_bf++; + + hh = cmp < 0 ? AVL_GET_LESS(unbal, 1) : AVL_GET_GREATER(unbal, 1); + L_CHECK_READ_ERROR(AVL_NULL) + + if ((unbal_bf != -2) && (unbal_bf != 2)) { + /* No rebalancing of tree is necessary. */ + AVL_SET_BALANCE_FACTOR(unbal, unbal_bf) + unbal = AVL_NULL; + } + } -L_SC AVL_HANDLE L_(search)(L_(avl) *l_tree, AVL_KEY k, avl_search_type st) -{ - int cmp, target_cmp; - AVL_HANDLE match_h = AVL_NULL; - AVL_HANDLE h = l_tree->root; + if (hh != AVL_NULL) + while (h != hh) { + cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1; + depth++; - if (st & AVL_LESS) - target_cmp = 1; - else if (st & AVL_GREATER) - target_cmp = -1; - else - target_cmp = 0; + if (cmp < 0) { + AVL_SET_BALANCE_FACTOR(hh, -1) + hh = AVL_GET_LESS(hh, 1); + } else { /* cmp > 0 */ + AVL_SET_BALANCE_FACTOR(hh, 1) + hh = AVL_GET_GREATER(hh, 1); + } - while (h != AVL_NULL) - { - cmp = AVL_COMPARE_KEY_NODE(k, h); + L_CHECK_READ_ERROR(AVL_NULL) + } - if (cmp == 0) - { - if (st & AVL_EQUAL) - { - match_h = h; - break; - } + if (unbal != AVL_NULL) { + unbal = L_(balance)(L_BALANCE_PARAM_CALL_PREFIX unbal); + L_CHECK_READ_ERROR(AVL_NULL) - cmp = -target_cmp; - } - else if (target_cmp != 0) - if (!((cmp ^ target_cmp) & L_MASK_HIGH_BIT)) - /* cmp and target_cmp are both positive or both negative. */ - match_h = h; + if (parent_unbal == AVL_NULL) + l_tree->root = unbal; + else { + depth = unbal_depth - 1; + cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1; - h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1); - L_CHECK_READ_ERROR(AVL_NULL) + if (cmp < 0) + AVL_SET_LESS(parent_unbal, unbal) + else /* cmp > 0 */ + AVL_SET_GREATER(parent_unbal, unbal) + } } - return(match_h); + } + + return(h); +} + +#endif + +#if (L_IMPL_MASK & AVL_IMPL_SEARCH) + +L_SC AVL_HANDLE L_(search)(L_(avl) *l_tree, AVL_KEY k, avl_search_type st) { + int cmp, target_cmp; + AVL_HANDLE match_h = AVL_NULL; + AVL_HANDLE h = l_tree->root; + + if (st & AVL_LESS) + target_cmp = 1; + else if (st & AVL_GREATER) + target_cmp = -1; + else + target_cmp = 0; + + while (h != AVL_NULL) { + cmp = AVL_COMPARE_KEY_NODE(k, h); + + if (cmp == 0) { + if (st & AVL_EQUAL) { + match_h = h; + break; + } + + cmp = -target_cmp; + } else if (target_cmp != 0) + if (!((cmp ^ target_cmp) & L_MASK_HIGH_BIT)) + /* cmp and target_cmp are both positive or both negative. */ + match_h = h; + + h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1); + L_CHECK_READ_ERROR(AVL_NULL) + } + + return(match_h); } #endif #if (L_IMPL_MASK & AVL_IMPL_SEARCH_LEAST) -L_SC AVL_HANDLE L_(search_least)(L_(avl) *l_tree) -{ - AVL_HANDLE h = l_tree->root; - AVL_HANDLE parent = AVL_NULL; +L_SC AVL_HANDLE L_(search_least)(L_(avl) *l_tree) { + AVL_HANDLE h = l_tree->root; + AVL_HANDLE parent = AVL_NULL; - while (h != AVL_NULL) - { - parent = h; - h = AVL_GET_LESS(h, 1); - L_CHECK_READ_ERROR(AVL_NULL) - } + while (h != AVL_NULL) { + parent = h; + h = AVL_GET_LESS(h, 1); + L_CHECK_READ_ERROR(AVL_NULL) + } - return(parent); + return(parent); } #endif #if (L_IMPL_MASK & AVL_IMPL_SEARCH_GREATEST) -L_SC AVL_HANDLE L_(search_greatest)(L_(avl) *l_tree) -{ - AVL_HANDLE h = l_tree->root; - AVL_HANDLE parent = AVL_NULL; +L_SC AVL_HANDLE L_(search_greatest)(L_(avl) *l_tree) { + AVL_HANDLE h = l_tree->root; + AVL_HANDLE parent = AVL_NULL; - while (h != AVL_NULL) - { - parent = h; - h = AVL_GET_GREATER(h, 1); - L_CHECK_READ_ERROR(AVL_NULL) - } + while (h != AVL_NULL) { + parent = h; + h = AVL_GET_GREATER(h, 1); + L_CHECK_READ_ERROR(AVL_NULL) + } - return(parent); + return(parent); } #endif @@ -564,284 +509,253 @@ L_SC AVL_HANDLE L_(search_greatest)(L_(avl) *l_tree) */ L_SC AVL_HANDLE L_(balance)(L_BALANCE_PARAM_DECL_PREFIX AVL_HANDLE bal_h); -L_SC AVL_HANDLE L_(remove)(L_(avl) *l_tree, AVL_KEY k) -{ - /* Zero-based depth in tree. */ - unsigned depth = 0, rm_depth; - - /* Records a path into the tree. If bit n is true, indicates - ** take greater branch from the nth node in the path, otherwise - ** take the less branch. bit 0 gives branch from root, and - ** so on. */ - L_BIT_ARR_DEFN(branch) - - AVL_HANDLE h = l_tree->root; - AVL_HANDLE parent = AVL_NULL; - AVL_HANDLE child; - AVL_HANDLE path; - int cmp, cmp_shortened_sub_with_path; - int reduced_depth; - int bf; - AVL_HANDLE rm; - AVL_HANDLE parent_rm; - - for (; ;) - { - if (h == AVL_NULL) - /* No node in tree with given key. */ - return(AVL_NULL); - - cmp = AVL_COMPARE_KEY_NODE(k, h); +L_SC AVL_HANDLE L_(remove)(L_(avl) *l_tree, AVL_KEY k) { + /* Zero-based depth in tree. */ + unsigned depth = 0, rm_depth; + + /* Records a path into the tree. If bit n is true, indicates + ** take greater branch from the nth node in the path, otherwise + ** take the less branch. bit 0 gives branch from root, and + ** so on. */ + L_BIT_ARR_DEFN(branch) + + AVL_HANDLE h = l_tree->root; + AVL_HANDLE parent = AVL_NULL; + AVL_HANDLE child; + AVL_HANDLE path; + int cmp, cmp_shortened_sub_with_path; + int reduced_depth; + int bf; + AVL_HANDLE rm; + AVL_HANDLE parent_rm; + + for (;;) { + if (h == AVL_NULL) + /* No node in tree with given key. */ + return(AVL_NULL); - if (cmp == 0) - /* Found node to remove. */ - break; + cmp = AVL_COMPARE_KEY_NODE(k, h); - parent = h; + if (cmp == 0) + /* Found node to remove. */ + break; - if (cmp > 0) - { - h = AVL_GET_GREATER(h, 1); - L_BIT_ARR_1(branch, depth) - } - else - { - h = AVL_GET_LESS(h, 1); - L_BIT_ARR_0(branch, depth) - } + parent = h; - L_CHECK_READ_ERROR(AVL_NULL) - depth++; - cmp_shortened_sub_with_path = cmp; + if (cmp > 0) { + h = AVL_GET_GREATER(h, 1); + L_BIT_ARR_1(branch, depth) + } else { + h = AVL_GET_LESS(h, 1); + L_BIT_ARR_0(branch, depth) } - rm = h; - parent_rm = parent; - rm_depth = depth; - - /* If the node to remove is not a leaf node, we need to get a - ** leaf node, or a node with a single leaf as its child, to put - ** in the place of the node to remove. We will get the greatest - ** node in the less subtree (of the node to remove), or the least - ** node in the greater subtree. We take the leaf node from the - ** deeper subtree, if there is one. */ - - if (AVL_GET_BALANCE_FACTOR(h) < 0) - { + L_CHECK_READ_ERROR(AVL_NULL) + depth++; + cmp_shortened_sub_with_path = cmp; + } + + rm = h; + parent_rm = parent; + rm_depth = depth; + + /* If the node to remove is not a leaf node, we need to get a + ** leaf node, or a node with a single leaf as its child, to put + ** in the place of the node to remove. We will get the greatest + ** node in the less subtree (of the node to remove), or the least + ** node in the greater subtree. We take the leaf node from the + ** deeper subtree, if there is one. */ + + if (AVL_GET_BALANCE_FACTOR(h) < 0) { + child = AVL_GET_LESS(h, 1); + L_BIT_ARR_0(branch, depth) + cmp = -1; + } else { + child = AVL_GET_GREATER(h, 1); + L_BIT_ARR_1(branch, depth) + cmp = 1; + } + + L_CHECK_READ_ERROR(AVL_NULL) + depth++; + + if (child != AVL_NULL) { + cmp = -cmp; + + do { + parent = h; + h = child; + + if (cmp < 0) { child = AVL_GET_LESS(h, 1); L_BIT_ARR_0(branch, depth) - cmp = -1; - } - else - { + } else { child = AVL_GET_GREATER(h, 1); L_BIT_ARR_1(branch, depth) - cmp = 1; - } + } - L_CHECK_READ_ERROR(AVL_NULL) - depth++; + L_CHECK_READ_ERROR(AVL_NULL) + depth++; + } while (child != AVL_NULL); - if (child != AVL_NULL) - { - cmp = -cmp; - - do - { - parent = h; - h = child; - - if (cmp < 0) - { - child = AVL_GET_LESS(h, 1); - L_BIT_ARR_0(branch, depth) - } - else - { - child = AVL_GET_GREATER(h, 1); - L_BIT_ARR_1(branch, depth) - } - - L_CHECK_READ_ERROR(AVL_NULL) - depth++; - } - while (child != AVL_NULL); + if (parent == rm) + /* Only went through do loop once. Deleted node will be replaced + ** in the tree structure by one of its immediate children. */ + cmp_shortened_sub_with_path = -cmp; + else + cmp_shortened_sub_with_path = cmp; + + /* Get the handle of the opposite child, which may not be null. */ + child = cmp > 0 ? AVL_GET_LESS(h, 0) : AVL_GET_GREATER(h, 0); + } - if (parent == rm) - /* Only went through do loop once. Deleted node will be replaced - ** in the tree structure by one of its immediate children. */ - cmp_shortened_sub_with_path = -cmp; + if (parent == AVL_NULL) + /* There were only 1 or 2 nodes in this tree. */ + l_tree->root = child; + else if (cmp_shortened_sub_with_path < 0) + AVL_SET_LESS(parent, child) + else + AVL_SET_GREATER(parent, child) + + /* "path" is the parent of the subtree being eliminated or reduced + ** from a depth of 2 to 1. If "path" is the node to be removed, we + ** set path to the node we're about to poke into the position of the + ** node to be removed. */ + path = parent == rm ? h : parent; + + if (h != rm) { + /* Poke in the replacement for the node to be removed. */ + AVL_SET_LESS(h, AVL_GET_LESS(rm, 0)) + AVL_SET_GREATER(h, AVL_GET_GREATER(rm, 0)) + AVL_SET_BALANCE_FACTOR(h, AVL_GET_BALANCE_FACTOR(rm)) + + if (parent_rm == AVL_NULL) + l_tree->root = h; + else { + depth = rm_depth - 1; + + if (L_BIT_ARR_VAL(branch, depth)) + AVL_SET_GREATER(parent_rm, h) else - cmp_shortened_sub_with_path = cmp; + AVL_SET_LESS(parent_rm, h) + } + } - /* Get the handle of the opposite child, which may not be null. */ - child = cmp > 0 ? AVL_GET_LESS(h, 0) : AVL_GET_GREATER(h, 0); - } + if (path != AVL_NULL) { + /* Create a temporary linked list from the parent of the path node + ** to the root node. */ + h = l_tree->root; + parent = AVL_NULL; + depth = 0; - if (parent == AVL_NULL) - /* There were only 1 or 2 nodes in this tree. */ - l_tree->root = child; - else if (cmp_shortened_sub_with_path < 0) - AVL_SET_LESS(parent, child) - else - AVL_SET_GREATER(parent, child) - - /* "path" is the parent of the subtree being eliminated or reduced - ** from a depth of 2 to 1. If "path" is the node to be removed, we - ** set path to the node we're about to poke into the position of the - ** node to be removed. */ - path = parent == rm ? h : parent; - - if (h != rm) - { - /* Poke in the replacement for the node to be removed. */ - AVL_SET_LESS(h, AVL_GET_LESS(rm, 0)) - AVL_SET_GREATER(h, AVL_GET_GREATER(rm, 0)) - AVL_SET_BALANCE_FACTOR(h, AVL_GET_BALANCE_FACTOR(rm)) - - if (parent_rm == AVL_NULL) - l_tree->root = h; - else - { - depth = rm_depth - 1; - - if (L_BIT_ARR_VAL(branch, depth)) - AVL_SET_GREATER(parent_rm, h) - else - AVL_SET_LESS(parent_rm, h) - } + while (h != path) { + if (L_BIT_ARR_VAL(branch, depth)) { + child = AVL_GET_GREATER(h, 1); + AVL_SET_GREATER(h, parent) + } else { + child = AVL_GET_LESS(h, 1); + AVL_SET_LESS(h, parent) + } + + L_CHECK_READ_ERROR(AVL_NULL) + depth++; + parent = h; + h = child; } - if (path != AVL_NULL) - { - /* Create a temporary linked list from the parent of the path node - ** to the root node. */ - h = l_tree->root; - parent = AVL_NULL; - depth = 0; - - while (h != path) - { - if (L_BIT_ARR_VAL(branch, depth)) - { - child = AVL_GET_GREATER(h, 1); - AVL_SET_GREATER(h, parent) - } - else - { - child = AVL_GET_LESS(h, 1); - AVL_SET_LESS(h, parent) - } - - L_CHECK_READ_ERROR(AVL_NULL) - depth++; - parent = h; - h = child; - } + /* Climb from the path node to the root node using the linked + ** list, restoring the tree structure and rebalancing as necessary. + */ + reduced_depth = 1; + cmp = cmp_shortened_sub_with_path; - /* Climb from the path node to the root node using the linked - ** list, restoring the tree structure and rebalancing as necessary. - */ - reduced_depth = 1; - cmp = cmp_shortened_sub_with_path; - - for (; ;) - { - if (reduced_depth) - { - bf = AVL_GET_BALANCE_FACTOR(h); - - if (cmp < 0) - bf++; - else /* cmp > 0 */ - bf--; - - if ((bf == -2) || (bf == 2)) - { - h = L_(balance)(L_BALANCE_PARAM_CALL_PREFIX h); - L_CHECK_READ_ERROR(AVL_NULL) - bf = AVL_GET_BALANCE_FACTOR(h); - } - else - AVL_SET_BALANCE_FACTOR(h, bf) - reduced_depth = (bf == 0); - } - - if (parent == AVL_NULL) - break; - - child = h; - h = parent; - depth--; - cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1; - - if (cmp < 0) - { - parent = AVL_GET_LESS(h, 1); - AVL_SET_LESS(h, child) - } - else - { - parent = AVL_GET_GREATER(h, 1); - AVL_SET_GREATER(h, child) - } - - L_CHECK_READ_ERROR(AVL_NULL) - } + for (;;) { + if (reduced_depth) { + bf = AVL_GET_BALANCE_FACTOR(h); + + if (cmp < 0) + bf++; + else /* cmp > 0 */ + bf--; + + if ((bf == -2) || (bf == 2)) { + h = L_(balance)(L_BALANCE_PARAM_CALL_PREFIX h); + L_CHECK_READ_ERROR(AVL_NULL) + bf = AVL_GET_BALANCE_FACTOR(h); + } else + AVL_SET_BALANCE_FACTOR(h, bf) + reduced_depth = (bf == 0); + } + + if (parent == AVL_NULL) + break; + + child = h; + h = parent; + depth--; + cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1; + + if (cmp < 0) { + parent = AVL_GET_LESS(h, 1); + AVL_SET_LESS(h, child) + } else { + parent = AVL_GET_GREATER(h, 1); + AVL_SET_GREATER(h, child) + } - l_tree->root = h; + L_CHECK_READ_ERROR(AVL_NULL) } - return(rm); + l_tree->root = h; + } + + return(rm); } #endif #if (L_IMPL_MASK & AVL_IMPL_SUBST) -L_SC AVL_HANDLE L_(subst)(L_(avl) *l_tree, AVL_HANDLE new_node) -{ - AVL_HANDLE h = l_tree->root; - AVL_HANDLE parent = AVL_NULL; - int cmp, last_cmp; - - /* Search for node already in tree with same key. */ - for (; ;) - { - if (h == AVL_NULL) - /* No node in tree with same key as new node. */ - return(AVL_NULL); +L_SC AVL_HANDLE L_(subst)(L_(avl) *l_tree, AVL_HANDLE new_node) { + AVL_HANDLE h = l_tree->root; + AVL_HANDLE parent = AVL_NULL; + int cmp, last_cmp; - cmp = AVL_COMPARE_NODE_NODE(new_node, h); - - if (cmp == 0) - /* Found the node to substitute new one for. */ - break; + /* Search for node already in tree with same key. */ + for (;;) { + if (h == AVL_NULL) + /* No node in tree with same key as new node. */ + return(AVL_NULL); - last_cmp = cmp; - parent = h; - h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1); - L_CHECK_READ_ERROR(AVL_NULL) - } + cmp = AVL_COMPARE_NODE_NODE(new_node, h); - /* Copy tree housekeeping fields from node in tree to new node. */ - AVL_SET_LESS(new_node, AVL_GET_LESS(h, 0)) - AVL_SET_GREATER(new_node, AVL_GET_GREATER(h, 0)) - AVL_SET_BALANCE_FACTOR(new_node, AVL_GET_BALANCE_FACTOR(h)) + if (cmp == 0) + /* Found the node to substitute new one for. */ + break; - if (parent == AVL_NULL) - /* New node is also new root. */ - l_tree->root = new_node; - else - { - /* Make parent point to new node. */ - if (last_cmp < 0) - AVL_SET_LESS(parent, new_node) - else - AVL_SET_GREATER(parent, new_node) - } - - return(h); + last_cmp = cmp; + parent = h; + h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1); + L_CHECK_READ_ERROR(AVL_NULL) + } + + /* Copy tree housekeeping fields from node in tree to new node. */ + AVL_SET_LESS(new_node, AVL_GET_LESS(h, 0)) + AVL_SET_GREATER(new_node, AVL_GET_GREATER(h, 0)) + AVL_SET_BALANCE_FACTOR(new_node, AVL_GET_BALANCE_FACTOR(h)) + + if (parent == AVL_NULL) + /* New node is also new root. */ + l_tree->root = new_node; + else { + /* Make parent point to new node. */ + if (last_cmp < 0) + AVL_SET_LESS(parent, new_node) + else + AVL_SET_GREATER(parent, new_node) + } + + return(h); } #endif @@ -851,144 +765,136 @@ L_SC AVL_HANDLE L_(subst)(L_(avl) *l_tree, AVL_HANDLE new_node) #if (L_IMPL_MASK & AVL_IMPL_BUILD) L_SC int L_(build)( - L_(avl) *l_tree, AVL_BUILD_ITER_TYPE p, L_SIZE num_nodes) -{ - /* Gives path to subtree being built. If bit n is false, branch - ** less from the node at depth n, if true branch greater. */ - L_BIT_ARR_DEFN(branch) - - /* If bit n is true, then for the current subtree at depth n, its - ** greater subtree has one more node than its less subtree. */ - L_BIT_ARR_DEFN(rem) - - /* Depth of root node of current subtree. */ - unsigned depth = 0; + L_(avl) *l_tree, AVL_BUILD_ITER_TYPE p, L_SIZE num_nodes) { + /* Gives path to subtree being built. If bit n is false, branch + ** less from the node at depth n, if true branch greater. */ + L_BIT_ARR_DEFN(branch) + + /* If bit n is true, then for the current subtree at depth n, its + ** greater subtree has one more node than its less subtree. */ + L_BIT_ARR_DEFN(rem) + + /* Depth of root node of current subtree. */ + unsigned depth = 0; + + /* Number of nodes in current subtree. */ + L_SIZE num_sub = num_nodes; + + /* The algorithm relies on a stack of nodes whose less subtree has + ** been built, but whose greater subtree has not yet been built. + ** The stack is implemented as linked list. The nodes are linked + ** together by having the "greater" handle of a node set to the + ** next node in the list. "less_parent" is the handle of the first + ** node in the list. */ + AVL_HANDLE less_parent = AVL_NULL; + + /* h is root of current subtree, child is one of its children. */ + AVL_HANDLE h; + AVL_HANDLE child; + + if (num_nodes == 0) { + l_tree->root = AVL_NULL; + return(1); + } - /* Number of nodes in current subtree. */ - L_SIZE num_sub = num_nodes; + for (;;) { + while (num_sub > 2) { + /* Subtract one for root of subtree. */ + num_sub--; - /* The algorithm relies on a stack of nodes whose less subtree has - ** been built, but whose greater subtree has not yet been built. - ** The stack is implemented as linked list. The nodes are linked - ** together by having the "greater" handle of a node set to the - ** next node in the list. "less_parent" is the handle of the first - ** node in the list. */ - AVL_HANDLE less_parent = AVL_NULL; + if (num_sub & 1) + L_BIT_ARR_1(rem, depth) + else + L_BIT_ARR_0(rem, depth) + L_BIT_ARR_0(branch, depth) + depth++; - /* h is root of current subtree, child is one of its children. */ - AVL_HANDLE h; - AVL_HANDLE child; + num_sub >>= 1; + } - if (num_nodes == 0) - { - l_tree->root = AVL_NULL; - return(1); + if (num_sub == 2) { + /* Build a subtree with two nodes, slanting to greater. + ** I arbitrarily chose to always have the extra node in the + ** greater subtree when there is an odd number of nodes to + ** split between the two subtrees. */ + + h = AVL_BUILD_ITER_VAL(p); + L_CHECK_READ_ERROR(0) + AVL_BUILD_ITER_INCR(p) + child = AVL_BUILD_ITER_VAL(p); + L_CHECK_READ_ERROR(0) + AVL_BUILD_ITER_INCR(p) + AVL_SET_LESS(child, AVL_NULL) + AVL_SET_GREATER(child, AVL_NULL) + AVL_SET_BALANCE_FACTOR(child, 0) + AVL_SET_GREATER(h, child) + AVL_SET_LESS(h, AVL_NULL) + AVL_SET_BALANCE_FACTOR(h, 1) + } else { /* num_sub == 1 */ + /* Build a subtree with one node. */ + + h = AVL_BUILD_ITER_VAL(p); + L_CHECK_READ_ERROR(0) + AVL_BUILD_ITER_INCR(p) + AVL_SET_LESS(h, AVL_NULL) + AVL_SET_GREATER(h, AVL_NULL) + AVL_SET_BALANCE_FACTOR(h, 0) } - for (; ;) - { - while (num_sub > 2) - { - /* Subtract one for root of subtree. */ - num_sub--; - - if (num_sub & 1) - L_BIT_ARR_1(rem, depth) - else - L_BIT_ARR_0(rem, depth) - L_BIT_ARR_0(branch, depth) - depth++; - - num_sub >>= 1; + while (depth) { + depth--; + + if (!L_BIT_ARR_VAL(branch, depth)) + /* We've completed a less subtree. */ + break; + + /* We've completed a greater subtree, so attach it to + ** its parent (that is less than it). We pop the parent + ** off the stack of less parents. */ + child = h; + h = less_parent; + less_parent = AVL_GET_GREATER(h, 1); + L_CHECK_READ_ERROR(0) + AVL_SET_GREATER(h, child) + /* num_sub = 2 * (num_sub - rem[depth]) + rem[depth] + 1 */ + num_sub <<= 1; + num_sub += L_BIT_ARR_VAL(rem, depth) ? 0 : 1; + + if (num_sub & (num_sub - 1)) + /* num_sub is not a power of 2. */ + AVL_SET_BALANCE_FACTOR(h, 0) + else + /* num_sub is a power of 2. */ + AVL_SET_BALANCE_FACTOR(h, 1) } - if (num_sub == 2) - { - /* Build a subtree with two nodes, slanting to greater. - ** I arbitrarily chose to always have the extra node in the - ** greater subtree when there is an odd number of nodes to - ** split between the two subtrees. */ - - h = AVL_BUILD_ITER_VAL(p); - L_CHECK_READ_ERROR(0) - AVL_BUILD_ITER_INCR(p) - child = AVL_BUILD_ITER_VAL(p); - L_CHECK_READ_ERROR(0) - AVL_BUILD_ITER_INCR(p) - AVL_SET_LESS(child, AVL_NULL) - AVL_SET_GREATER(child, AVL_NULL) - AVL_SET_BALANCE_FACTOR(child, 0) - AVL_SET_GREATER(h, child) - AVL_SET_LESS(h, AVL_NULL) - AVL_SET_BALANCE_FACTOR(h, 1) - } - else /* num_sub == 1 */ - { - /* Build a subtree with one node. */ - - h = AVL_BUILD_ITER_VAL(p); - L_CHECK_READ_ERROR(0) - AVL_BUILD_ITER_INCR(p) - AVL_SET_LESS(h, AVL_NULL) - AVL_SET_GREATER(h, AVL_NULL) - AVL_SET_BALANCE_FACTOR(h, 0) - } + if (num_sub == num_nodes) + /* We've completed the full tree. */ + break; - while (depth) - { - depth--; - - if (!L_BIT_ARR_VAL(branch, depth)) - /* We've completed a less subtree. */ - break; - - /* We've completed a greater subtree, so attach it to - ** its parent (that is less than it). We pop the parent - ** off the stack of less parents. */ - child = h; - h = less_parent; - less_parent = AVL_GET_GREATER(h, 1); - L_CHECK_READ_ERROR(0) - AVL_SET_GREATER(h, child) - /* num_sub = 2 * (num_sub - rem[depth]) + rem[depth] + 1 */ - num_sub <<= 1; - num_sub += L_BIT_ARR_VAL(rem, depth) ? 0 : 1; - - if (num_sub & (num_sub - 1)) - /* num_sub is not a power of 2. */ - AVL_SET_BALANCE_FACTOR(h, 0) - else - /* num_sub is a power of 2. */ - AVL_SET_BALANCE_FACTOR(h, 1) - } - - if (num_sub == num_nodes) - /* We've completed the full tree. */ - break; - - /* The subtree we've completed is the less subtree of the - ** next node in the sequence. */ - - child = h; - h = AVL_BUILD_ITER_VAL(p); - L_CHECK_READ_ERROR(0) - AVL_BUILD_ITER_INCR(p) - AVL_SET_LESS(h, child) + /* The subtree we've completed is the less subtree of the + ** next node in the sequence. */ - /* Put h into stack of less parents. */ - AVL_SET_GREATER(h, less_parent) - less_parent = h; + child = h; + h = AVL_BUILD_ITER_VAL(p); + L_CHECK_READ_ERROR(0) + AVL_BUILD_ITER_INCR(p) + AVL_SET_LESS(h, child) - /* Proceed to creating greater than subtree of h. */ - L_BIT_ARR_1(branch, depth) - num_sub += L_BIT_ARR_VAL(rem, depth) ? 1 : 0; - depth++; + /* Put h into stack of less parents. */ + AVL_SET_GREATER(h, less_parent) + less_parent = h; - } /* end for ( ; ; ) */ + /* Proceed to creating greater than subtree of h. */ + L_BIT_ARR_1(branch, depth) + num_sub += L_BIT_ARR_VAL(rem, depth) ? 1 : 0; + depth++; - l_tree->root = h; + } /* end for (;; ) */ - return(1); + l_tree->root = h; + + return(1); } #endif @@ -1001,9 +907,8 @@ L_SC int L_(build)( ** invalid. (Depth is zero-base.) It's not necessary to initialize ** iterators prior to passing them to the "start" function. */ -L_SC void L_(init_iter)(L_(iter) *iter) -{ - iter->depth = ~0; +L_SC void L_(init_iter)(L_(iter) *iter) { + iter->depth = ~0; } #endif @@ -1011,7 +916,7 @@ L_SC void L_(init_iter)(L_(iter) *iter) #ifdef AVL_READ_ERRORS_HAPPEN #define L_CHECK_READ_ERROR_INV_DEPTH \ - { if (AVL_READ_ERROR) { iter->depth = ~0; return; } } + { if (AVL_READ_ERROR) { iter->depth = ~0; return; } } #else @@ -1022,174 +927,157 @@ L_SC void L_(init_iter)(L_(iter) *iter) #if (L_IMPL_MASK & AVL_IMPL_START_ITER) L_SC void L_(start_iter)( - L_(avl) *l_tree, L_(iter) *iter, AVL_KEY k, avl_search_type st) -{ - AVL_HANDLE h = l_tree->root; - unsigned d = 0; - int cmp, target_cmp; - - /* Save the tree that we're going to iterate through in a - ** member variable. */ - iter->tree_ = l_tree; - - iter->depth = ~0; + L_(avl) *l_tree, L_(iter) *iter, AVL_KEY k, avl_search_type st) { + AVL_HANDLE h = l_tree->root; + unsigned d = 0; + int cmp, target_cmp; + + /* Save the tree that we're going to iterate through in a + ** member variable. */ + iter->tree_ = l_tree; + + iter->depth = ~0; + + if (h == AVL_NULL) + /* Tree is empty. */ + return; + + if (st & AVL_LESS) + /* Key can be greater than key of starting node. */ + target_cmp = 1; + else if (st & AVL_GREATER) + /* Key can be less than key of starting node. */ + target_cmp = -1; + else + /* Key must be same as key of starting node. */ + target_cmp = 0; + + for (;;) { + cmp = AVL_COMPARE_KEY_NODE(k, h); + + if (cmp == 0) { + if (st & AVL_EQUAL) { + /* Equal node was sought and found as starting node. */ + iter->depth = d; + break; + } + + cmp = -target_cmp; + } else if (target_cmp != 0) + if (!((cmp ^ target_cmp) & L_MASK_HIGH_BIT)) + /* cmp and target_cmp are both negative or both positive. */ + iter->depth = d; + + h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1); + L_CHECK_READ_ERROR_INV_DEPTH if (h == AVL_NULL) - /* Tree is empty. */ - return; - - if (st & AVL_LESS) - /* Key can be greater than key of starting node. */ - target_cmp = 1; - else if (st & AVL_GREATER) - /* Key can be less than key of starting node. */ - target_cmp = -1; - else - /* Key must be same as key of starting node. */ - target_cmp = 0; - - for (; ;) - { - cmp = AVL_COMPARE_KEY_NODE(k, h); - - if (cmp == 0) - { - if (st & AVL_EQUAL) - { - /* Equal node was sought and found as starting node. */ - iter->depth = d; - break; - } - - cmp = -target_cmp; - } - else if (target_cmp != 0) - if (!((cmp ^ target_cmp) & L_MASK_HIGH_BIT)) - /* cmp and target_cmp are both negative or both positive. */ - iter->depth = d; - - h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1); - L_CHECK_READ_ERROR_INV_DEPTH - - if (h == AVL_NULL) - break; - - if (cmp > 0) - L_BIT_ARR_1(iter->branch, d) - else - L_BIT_ARR_0(iter->branch, d) - iter->path_h[d++] = h; - } + break; + + if (cmp > 0) + L_BIT_ARR_1(iter->branch, d) + else + L_BIT_ARR_0(iter->branch, d) + iter->path_h[d++] = h; + } } #endif #if (L_IMPL_MASK & AVL_IMPL_START_ITER_LEAST) -L_SC void L_(start_iter_least)(L_(avl) *l_tree, L_(iter) *iter) -{ - AVL_HANDLE h = l_tree->root; +L_SC void L_(start_iter_least)(L_(avl) *l_tree, L_(iter) *iter) { + AVL_HANDLE h = l_tree->root; - iter->tree_ = l_tree; + iter->tree_ = l_tree; - iter->depth = ~0; + iter->depth = ~0; - L_BIT_ARR_ALL(iter->branch, 0) + L_BIT_ARR_ALL(iter->branch, 0) - while (h != AVL_NULL) - { - if (iter->depth != ~0) - iter->path_h[iter->depth] = h; + while (h != AVL_NULL) { + if (iter->depth != ~0) + iter->path_h[iter->depth] = h; - iter->depth++; - h = AVL_GET_LESS(h, 1); - L_CHECK_READ_ERROR_INV_DEPTH - } + iter->depth++; + h = AVL_GET_LESS(h, 1); + L_CHECK_READ_ERROR_INV_DEPTH + } } #endif #if (L_IMPL_MASK & AVL_IMPL_START_ITER_GREATEST) -L_SC void L_(start_iter_greatest)(L_(avl) *l_tree, L_(iter) *iter) -{ - AVL_HANDLE h = l_tree->root; +L_SC void L_(start_iter_greatest)(L_(avl) *l_tree, L_(iter) *iter) { + AVL_HANDLE h = l_tree->root; - iter->tree_ = l_tree; + iter->tree_ = l_tree; - iter->depth = ~0; + iter->depth = ~0; - L_BIT_ARR_ALL(iter->branch, 1) + L_BIT_ARR_ALL(iter->branch, 1) - while (h != AVL_NULL) - { - if (iter->depth != ~0) - iter->path_h[iter->depth] = h; + while (h != AVL_NULL) { + if (iter->depth != ~0) + iter->path_h[iter->depth] = h; - iter->depth++; - h = AVL_GET_GREATER(h, 1); - L_CHECK_READ_ERROR_INV_DEPTH - } + iter->depth++; + h = AVL_GET_GREATER(h, 1); + L_CHECK_READ_ERROR_INV_DEPTH + } } #endif #if (L_IMPL_MASK & AVL_IMPL_GET_ITER) -L_SC AVL_HANDLE L_(get_iter)(L_(iter) *iter) -{ - if (iter->depth == ~0) - return(AVL_NULL); +L_SC AVL_HANDLE L_(get_iter)(L_(iter) *iter) { + if (iter->depth == ~0) + return(AVL_NULL); - return(iter->depth == 0 ? - iter->tree_->root : iter->path_h[iter->depth - 1]); + return(iter->depth == 0 ? + iter->tree_->root : iter->path_h[iter->depth - 1]); } #endif #if (L_IMPL_MASK & AVL_IMPL_INCR_ITER) -L_SC void L_(incr_iter)(L_(iter) *iter) -{ +L_SC void L_(incr_iter)(L_(iter) *iter) { #define l_tree (iter->tree_) - if (iter->depth != ~0) - { - AVL_HANDLE h = - AVL_GET_GREATER((iter->depth == 0 ? - iter->tree_->root : iter->path_h[iter->depth - 1]), 1); - L_CHECK_READ_ERROR_INV_DEPTH + if (iter->depth != ~0) { + AVL_HANDLE h = + AVL_GET_GREATER((iter->depth == 0 ? + iter->tree_->root : iter->path_h[iter->depth - 1]), 1); + L_CHECK_READ_ERROR_INV_DEPTH - if (h == AVL_NULL) - do - { - if (iter->depth == 0) - { - iter->depth = ~0; - break; - } - - iter->depth--; - } - while (L_BIT_ARR_VAL(iter->branch, iter->depth)); - else - { - L_BIT_ARR_1(iter->branch, iter->depth) - iter->path_h[iter->depth++] = h; + if (h == AVL_NULL) + do { + if (iter->depth == 0) { + iter->depth = ~0; + break; + } - for (; ;) - { - h = AVL_GET_LESS(h, 1); - L_CHECK_READ_ERROR_INV_DEPTH + iter->depth--; + } while (L_BIT_ARR_VAL(iter->branch, iter->depth)); + else { + L_BIT_ARR_1(iter->branch, iter->depth) + iter->path_h[iter->depth++] = h; - if (h == AVL_NULL) - break; + for (;;) { + h = AVL_GET_LESS(h, 1); + L_CHECK_READ_ERROR_INV_DEPTH - L_BIT_ARR_0(iter->branch, iter->depth) - iter->path_h[iter->depth++] = h; - } - } + if (h == AVL_NULL) + break; + + L_BIT_ARR_0(iter->branch, iter->depth) + iter->path_h[iter->depth++] = h; + } } + } #undef l_tree } @@ -1198,47 +1086,40 @@ L_SC void L_(incr_iter)(L_(iter) *iter) #if (L_IMPL_MASK & AVL_IMPL_DECR_ITER) -L_SC void L_(decr_iter)(L_(iter) *iter) -{ +L_SC void L_(decr_iter)(L_(iter) *iter) { #define l_tree (iter->tree_) - if (iter->depth != ~0) - { - AVL_HANDLE h = - AVL_GET_LESS((iter->depth == 0 ? - iter->tree_->root : iter->path_h[iter->depth - 1]), 1); - L_CHECK_READ_ERROR_INV_DEPTH + if (iter->depth != ~0) { + AVL_HANDLE h = + AVL_GET_LESS((iter->depth == 0 ? + iter->tree_->root : iter->path_h[iter->depth - 1]), 1); + L_CHECK_READ_ERROR_INV_DEPTH - if (h == AVL_NULL) - do - { - if (iter->depth == 0) - { - iter->depth = ~0; - break; - } - - iter->depth--; - } - while (!L_BIT_ARR_VAL(iter->branch, iter->depth)); - else - { - L_BIT_ARR_0(iter->branch, iter->depth) - iter->path_h[iter->depth++] = h; + if (h == AVL_NULL) + do { + if (iter->depth == 0) { + iter->depth = ~0; + break; + } - for (; ;) - { - h = AVL_GET_GREATER(h, 1); - L_CHECK_READ_ERROR_INV_DEPTH + iter->depth--; + } while (!L_BIT_ARR_VAL(iter->branch, iter->depth)); + else { + L_BIT_ARR_0(iter->branch, iter->depth) + iter->path_h[iter->depth++] = h; - if (h == AVL_NULL) - break; + for (;;) { + h = AVL_GET_GREATER(h, 1); + L_CHECK_READ_ERROR_INV_DEPTH - L_BIT_ARR_1(iter->branch, iter->depth) - iter->path_h[iter->depth++] = h; - } - } + if (h == AVL_NULL) + break; + + L_BIT_ARR_1(iter->branch, iter->depth) + iter->path_h[iter->depth++] = h; + } } + } #undef l_tree } diff --git a/vpx_mem/memory_manager/include/heapmm.h b/vpx_mem/memory_manager/include/heapmm.h index 33004cadc..4934c2d8a 100644 --- a/vpx_mem/memory_manager/include/heapmm.h +++ b/vpx_mem/memory_manager/include/heapmm.h @@ -81,30 +81,29 @@ #include "hmm_cnfg.h" /* Heap descriptor. */ -typedef struct HMM_UNIQUE(structure) -{ - /* private: */ - - /* Pointer to (payload of) root node in AVL tree. This field should - ** really be the AVL tree descriptor (type avl_avl). But (in the - ** instantiation of the AVL tree generic package used in package) the - ** AVL tree descriptor simply contains a pointer to the root. So, - ** whenever a pointer to the AVL tree descriptor is needed, I use the - ** cast: - ** - ** (avl_avl *) &(heap_desc->avl_tree_root) - ** - ** (where heap_desc is a pointer to a heap descriptor). This trick - ** allows me to avoid including cavl_if.h in this external header. */ - void *avl_tree_root; - - /* Pointer to first byte of last block freed, after any coalescing. */ - void *last_freed; - - /* public: */ - - HMM_UNIQUE(size_bau) num_baus_can_shrink; - void *end_of_shrinkable_chunk; +typedef struct HMM_UNIQUE(structure) { + /* private: */ + + /* Pointer to (payload of) root node in AVL tree. This field should + ** really be the AVL tree descriptor (type avl_avl). But (in the + ** instantiation of the AVL tree generic package used in package) the + ** AVL tree descriptor simply contains a pointer to the root. So, + ** whenever a pointer to the AVL tree descriptor is needed, I use the + ** cast: + ** + ** (avl_avl *) &(heap_desc->avl_tree_root) + ** + ** (where heap_desc is a pointer to a heap descriptor). This trick + ** allows me to avoid including cavl_if.h in this external header. */ + void *avl_tree_root; + + /* Pointer to first byte of last block freed, after any coalescing. */ + void *last_freed; + + /* public: */ + + HMM_UNIQUE(size_bau) num_baus_can_shrink; + void *end_of_shrinkable_chunk; } HMM_UNIQUE(descriptor); @@ -113,41 +112,41 @@ HMM_UNIQUE(descriptor); void HMM_UNIQUE(init)(HMM_UNIQUE(descriptor) *desc); void *HMM_UNIQUE(alloc)( - HMM_UNIQUE(descriptor) *desc, HMM_UNIQUE(size_aau) num_addr_align_units); + HMM_UNIQUE(descriptor) *desc, HMM_UNIQUE(size_aau) num_addr_align_units); /* NOT YET IMPLEMENTED */ void *HMM_UNIQUE(greedy_alloc)( - HMM_UNIQUE(descriptor) *desc, HMM_UNIQUE(size_aau) needed_addr_align_units, - HMM_UNIQUE(size_aau) coveted_addr_align_units); + HMM_UNIQUE(descriptor) *desc, HMM_UNIQUE(size_aau) needed_addr_align_units, + HMM_UNIQUE(size_aau) coveted_addr_align_units); int HMM_UNIQUE(resize)( - HMM_UNIQUE(descriptor) *desc, void *mem, - HMM_UNIQUE(size_aau) num_addr_align_units); + HMM_UNIQUE(descriptor) *desc, void *mem, + HMM_UNIQUE(size_aau) num_addr_align_units); /* NOT YET IMPLEMENTED */ int HMM_UNIQUE(greedy_resize)( - HMM_UNIQUE(descriptor) *desc, void *mem, - HMM_UNIQUE(size_aau) needed_addr_align_units, - HMM_UNIQUE(size_aau) coveted_addr_align_units); + HMM_UNIQUE(descriptor) *desc, void *mem, + HMM_UNIQUE(size_aau) needed_addr_align_units, + HMM_UNIQUE(size_aau) coveted_addr_align_units); void HMM_UNIQUE(free)(HMM_UNIQUE(descriptor) *desc, void *mem); HMM_UNIQUE(size_aau) HMM_UNIQUE(true_size)(void *mem); HMM_UNIQUE(size_aau) HMM_UNIQUE(largest_available)( - HMM_UNIQUE(descriptor) *desc); + HMM_UNIQUE(descriptor) *desc); void HMM_UNIQUE(new_chunk)( - HMM_UNIQUE(descriptor) *desc, void *start_of_chunk, - HMM_UNIQUE(size_bau) num_block_align_units); + HMM_UNIQUE(descriptor) *desc, void *start_of_chunk, + HMM_UNIQUE(size_bau) num_block_align_units); void HMM_UNIQUE(grow_chunk)( - HMM_UNIQUE(descriptor) *desc, void *end_of_chunk, - HMM_UNIQUE(size_bau) num_block_align_units); + HMM_UNIQUE(descriptor) *desc, void *end_of_chunk, + HMM_UNIQUE(size_bau) num_block_align_units); /* NOT YET IMPLEMENTED */ void HMM_UNIQUE(shrink_chunk)( - HMM_UNIQUE(descriptor) *desc, - HMM_UNIQUE(size_bau) num_block_align_units); + HMM_UNIQUE(descriptor) *desc, + HMM_UNIQUE(size_bau) num_block_align_units); #endif /* defined HMM_PROCESS */ diff --git a/vpx_mem/memory_manager/include/hmm_cnfg.h b/vpx_mem/memory_manager/include/hmm_cnfg.h index 30b9f5045..2c3391d7b 100644 --- a/vpx_mem/memory_manager/include/hmm_cnfg.h +++ b/vpx_mem/memory_manager/include/hmm_cnfg.h @@ -45,8 +45,8 @@ #define HMM_UNIQUE(BASE) hmm_ ## BASE /* Number of bytes in an Address Alignment Unit (AAU). */ -//fwg -//#define HMM_ADDR_ALIGN_UNIT sizeof(int) +// fwg +// #define HMM_ADDR_ALIGN_UNIT sizeof(int) #define HMM_ADDR_ALIGN_UNIT 32 /* Number of AAUs in a Block Alignment Unit (BAU). */ @@ -65,7 +65,7 @@ void hmm_dflt_abort(const char *, const char *); ** statement. If you remove the definition of this macro, no self-auditing ** will be performed. */ #define HMM_AUDIT_FAIL \ - hmm_dflt_abort(__FILE__, HMM_SYM_TO_STRING(__LINE__)); + hmm_dflt_abort(__FILE__, HMM_SYM_TO_STRING(__LINE__)); #elif HMM_CNFG_NUM == 0 @@ -90,8 +90,8 @@ extern const char *HMM_UNIQUE(fail_file); extern unsigned HMM_UNIQUE(fail_line); #define HMM_AUDIT_FAIL \ - { HMM_UNIQUE(fail_file) = __FILE__; HMM_UNIQUE(fail_line) = __LINE__; \ - longjmp(HMM_UNIQUE(jmp_buf), 1); } + { HMM_UNIQUE(fail_file) = __FILE__; HMM_UNIQUE(fail_line) = __LINE__; \ + longjmp(HMM_UNIQUE(jmp_buf), 1); } #elif HMM_CNFG_NUM == 1 diff --git a/vpx_mem/memory_manager/include/hmm_intrnl.h b/vpx_mem/memory_manager/include/hmm_intrnl.h index 5d62abc59..27cefe449 100644 --- a/vpx_mem/memory_manager/include/hmm_intrnl.h +++ b/vpx_mem/memory_manager/include/hmm_intrnl.h @@ -26,34 +26,32 @@ /* Mask of high bit of variable of size_bau type. */ #define HIGH_BIT_BAU_SIZE \ - ((U(size_bau)) ~ (((U(size_bau)) ~ (U(size_bau)) 0) >> 1)) + ((U(size_bau)) ~ (((U(size_bau)) ~ (U(size_bau)) 0) >> 1)) /* Add a given number of AAUs to pointer. */ #define AAUS_FORWARD(PTR, AAU_OFFSET) \ - (((char *) (PTR)) + ((AAU_OFFSET) * ((U(size_aau)) HMM_ADDR_ALIGN_UNIT))) + (((char *) (PTR)) + ((AAU_OFFSET) * ((U(size_aau)) HMM_ADDR_ALIGN_UNIT))) /* Subtract a given number of AAUs from pointer. */ #define AAUS_BACKWARD(PTR, AAU_OFFSET) \ - (((char *) (PTR)) - ((AAU_OFFSET) * ((U(size_aau)) HMM_ADDR_ALIGN_UNIT))) + (((char *) (PTR)) - ((AAU_OFFSET) * ((U(size_aau)) HMM_ADDR_ALIGN_UNIT))) /* Add a given number of BAUs to a pointer. */ #define BAUS_FORWARD(PTR, BAU_OFFSET) \ - AAUS_FORWARD((PTR), (BAU_OFFSET) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) + AAUS_FORWARD((PTR), (BAU_OFFSET) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) /* Subtract a given number of BAUs to a pointer. */ #define BAUS_BACKWARD(PTR, BAU_OFFSET) \ - AAUS_BACKWARD((PTR), (BAU_OFFSET) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) + AAUS_BACKWARD((PTR), (BAU_OFFSET) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) -typedef struct head_struct -{ - /* Sizes in Block Alignment Units. */ - HMM_UNIQUE(size_bau) previous_block_size, block_size; +typedef struct head_struct { + /* Sizes in Block Alignment Units. */ + HMM_UNIQUE(size_bau) previous_block_size, block_size; } head_record; -typedef struct ptr_struct -{ - struct ptr_struct *self, *prev, *next; +typedef struct ptr_struct { + struct ptr_struct *self, *prev, *next; } ptr_record; @@ -71,50 +69,50 @@ ptr_record; /* Minimum number of BAUs in a block (allowing room for the pointer record. */ #define MIN_BLOCK_BAUS \ - DIV_ROUND_UP(HEAD_AAUS + PTR_RECORD_AAUS, HMM_BLOCK_ALIGN_UNIT) + DIV_ROUND_UP(HEAD_AAUS + PTR_RECORD_AAUS, HMM_BLOCK_ALIGN_UNIT) /* Return number of BAUs in block (masking off high bit containing block ** status). */ #define BLOCK_BAUS(HEAD_PTR) \ - (((head_record *) (HEAD_PTR))->block_size & ~HIGH_BIT_BAU_SIZE) + (((head_record *) (HEAD_PTR))->block_size & ~HIGH_BIT_BAU_SIZE) /* Return number of BAUs in previous block (masking off high bit containing ** block status). */ #define PREV_BLOCK_BAUS(HEAD_PTR) \ - (((head_record *) (HEAD_PTR))->previous_block_size & ~HIGH_BIT_BAU_SIZE) + (((head_record *) (HEAD_PTR))->previous_block_size & ~HIGH_BIT_BAU_SIZE) /* Set number of BAUs in previous block, preserving high bit containing ** block status. */ #define SET_PREV_BLOCK_BAUS(HEAD_PTR, N_BAUS) \ - { register head_record *h_ptr = (head_record *) (HEAD_PTR); \ - h_ptr->previous_block_size &= HIGH_BIT_BAU_SIZE; \ - h_ptr->previous_block_size |= (N_BAUS); } + { register head_record *h_ptr = (head_record *) (HEAD_PTR); \ + h_ptr->previous_block_size &= HIGH_BIT_BAU_SIZE; \ + h_ptr->previous_block_size |= (N_BAUS); } /* Convert pointer to pointer record of block to pointer to block's head ** record. */ #define PTR_REC_TO_HEAD(PTR_REC_PTR) \ - ((head_record *) AAUS_BACKWARD(PTR_REC_PTR, HEAD_AAUS)) + ((head_record *) AAUS_BACKWARD(PTR_REC_PTR, HEAD_AAUS)) /* Convert pointer to block head to pointer to block's pointer record. */ #define HEAD_TO_PTR_REC(HEAD_PTR) \ - ((ptr_record *) AAUS_FORWARD(HEAD_PTR, HEAD_AAUS)) + ((ptr_record *) AAUS_FORWARD(HEAD_PTR, HEAD_AAUS)) /* Returns non-zero if block is allocated. */ #define IS_BLOCK_ALLOCATED(HEAD_PTR) \ - (((((head_record *) (HEAD_PTR))->block_size | \ - ((head_record *) (HEAD_PTR))->previous_block_size) & \ - HIGH_BIT_BAU_SIZE) == 0) + (((((head_record *) (HEAD_PTR))->block_size | \ + ((head_record *) (HEAD_PTR))->previous_block_size) & \ + HIGH_BIT_BAU_SIZE) == 0) #define MARK_BLOCK_ALLOCATED(HEAD_PTR) \ - { register head_record *h_ptr = (head_record *) (HEAD_PTR); \ - h_ptr->block_size &= ~HIGH_BIT_BAU_SIZE; \ - h_ptr->previous_block_size &= ~HIGH_BIT_BAU_SIZE; } + { register head_record *h_ptr = (head_record *) (HEAD_PTR); \ + h_ptr->block_size &= ~HIGH_BIT_BAU_SIZE; \ + h_ptr->previous_block_size &= ~HIGH_BIT_BAU_SIZE; } /* Mark a block as free when it is not the first block in a bin (and ** therefore not a node in the AVL tree). */ #define MARK_SUCCESSIVE_BLOCK_IN_FREE_BIN(HEAD_PTR) \ - { register head_record *h_ptr = (head_record *) (HEAD_PTR); \ - h_ptr->block_size |= HIGH_BIT_BAU_SIZE; } + { register head_record *h_ptr = (head_record *) (HEAD_PTR); \ + h_ptr->block_size |= HIGH_BIT_BAU_SIZE; } /* Prototypes for internal functions implemented in one file and called in ** another. @@ -125,7 +123,7 @@ void U(into_free_collection)(U(descriptor) *desc, head_record *head_ptr); void U(out_of_free_collection)(U(descriptor) *desc, head_record *head_ptr); void *U(alloc_from_bin)( - U(descriptor) *desc, ptr_record *bin_front_ptr, U(size_bau) n_baus); + U(descriptor) *desc, ptr_record *bin_front_ptr, U(size_bau) n_baus); #ifdef HMM_AUDIT_FAIL @@ -137,12 +135,12 @@ int U(audit_block_fail_dummy_return)(void); /* Auditing a block consists of checking that the size in its head ** matches the previous block size in the head of the next block. */ #define AUDIT_BLOCK_AS_EXPR(HEAD_PTR) \ - ((BLOCK_BAUS(HEAD_PTR) == \ - PREV_BLOCK_BAUS(BAUS_FORWARD(HEAD_PTR, BLOCK_BAUS(HEAD_PTR)))) ? \ - 0 : U(audit_block_fail_dummy_return)()) + ((BLOCK_BAUS(HEAD_PTR) == \ + PREV_BLOCK_BAUS(BAUS_FORWARD(HEAD_PTR, BLOCK_BAUS(HEAD_PTR)))) ? \ + 0 : U(audit_block_fail_dummy_return)()) #define AUDIT_BLOCK(HEAD_PTR) \ - { void *h_ptr = (HEAD_PTR); AUDIT_BLOCK_AS_EXPR(h_ptr); } + { void *h_ptr = (HEAD_PTR); AUDIT_BLOCK_AS_EXPR(h_ptr); } #endif diff --git a/vpx_mem/vpx_mem.c b/vpx_mem/vpx_mem.c index eade43222..059248bab 100644 --- a/vpx_mem/vpx_mem.c +++ b/vpx_mem/vpx_mem.c @@ -51,15 +51,14 @@ static void *vpx_mm_realloc(void *memblk, size_t size); #endif /*CONFIG_MEM_MANAGER*/ #if USE_GLOBAL_FUNCTION_POINTERS -struct GLOBAL_FUNC_POINTERS -{ - g_malloc_func g_malloc; - g_calloc_func g_calloc; - g_realloc_func g_realloc; - g_free_func g_free; - g_memcpy_func g_memcpy; - g_memset_func g_memset; - g_memmove_func g_memmove; +struct GLOBAL_FUNC_POINTERS { + g_malloc_func g_malloc; + g_calloc_func g_calloc; + g_realloc_func g_realloc; + g_free_func g_free; + g_memcpy_func g_memcpy; + g_memset_func g_memset; + g_memmove_func g_memmove; } *g_func = NULL; # define VPX_MALLOC_L g_func->g_malloc @@ -77,346 +76,314 @@ struct GLOBAL_FUNC_POINTERS # define VPX_MEMMOVE_L memmove #endif /* USE_GLOBAL_FUNCTION_POINTERS */ -unsigned int vpx_mem_get_version() -{ - unsigned int ver = ((unsigned int)(unsigned char)VPX_MEM_VERSION_CHIEF << 24 | - (unsigned int)(unsigned char)VPX_MEM_VERSION_MAJOR << 16 | - (unsigned int)(unsigned char)VPX_MEM_VERSION_MINOR << 8 | - (unsigned int)(unsigned char)VPX_MEM_VERSION_PATCH); - return ver; +unsigned int vpx_mem_get_version() { + unsigned int ver = ((unsigned int)(unsigned char)VPX_MEM_VERSION_CHIEF << 24 | + (unsigned int)(unsigned char)VPX_MEM_VERSION_MAJOR << 16 | + (unsigned int)(unsigned char)VPX_MEM_VERSION_MINOR << 8 | + (unsigned int)(unsigned char)VPX_MEM_VERSION_PATCH); + return ver; } -int vpx_mem_set_heap_size(size_t size) -{ - int ret = -1; +int vpx_mem_set_heap_size(size_t size) { + int ret = -1; #if CONFIG_MEM_MANAGER #if MM_DYNAMIC_MEMORY - if (!g_mng_memory_allocated && size) - { - g_mm_memory_size = size; - ret = 0; - } - else - ret = -3; + if (!g_mng_memory_allocated && size) { + g_mm_memory_size = size; + ret = 0; + } else + ret = -3; #else - ret = -2; + ret = -2; #endif #else - (void)size; + (void)size; #endif - return ret; + return ret; } -void *vpx_memalign(size_t align, size_t size) -{ - void *addr, - * x = NULL; +void *vpx_memalign(size_t align, size_t size) { + void *addr, + * x = NULL; #if CONFIG_MEM_MANAGER - int number_aau; + int number_aau; - if (vpx_mm_create_heap_memory() < 0) - { - _P(printf("[vpx][mm] ERROR vpx_memalign() Couldn't create memory for Heap.\n");) - } + if (vpx_mm_create_heap_memory() < 0) { + _P(printf("[vpx][mm] ERROR vpx_memalign() Couldn't create memory for Heap.\n");) + } - number_aau = ((size + align - 1 + ADDRESS_STORAGE_SIZE) >> - SHIFT_HMM_ADDR_ALIGN_UNIT) + 1; + number_aau = ((size + align - 1 + ADDRESS_STORAGE_SIZE) >> + SHIFT_HMM_ADDR_ALIGN_UNIT) + 1; - addr = hmm_alloc(&hmm_d, number_aau); + addr = hmm_alloc(&hmm_d, number_aau); #else - addr = VPX_MALLOC_L(size + align - 1 + ADDRESS_STORAGE_SIZE); + addr = VPX_MALLOC_L(size + align - 1 + ADDRESS_STORAGE_SIZE); #endif /*CONFIG_MEM_MANAGER*/ - if (addr) - { - x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align); - /* save the actual malloc address */ - ((size_t *)x)[-1] = (size_t)addr; - } + if (addr) { + x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align); + /* save the actual malloc address */ + ((size_t *)x)[-1] = (size_t)addr; + } - return x; + return x; } -void *vpx_malloc(size_t size) -{ - return vpx_memalign(DEFAULT_ALIGNMENT, size); +void *vpx_malloc(size_t size) { + return vpx_memalign(DEFAULT_ALIGNMENT, size); } -void *vpx_calloc(size_t num, size_t size) -{ - void *x; +void *vpx_calloc(size_t num, size_t size) { + void *x; - x = vpx_memalign(DEFAULT_ALIGNMENT, num * size); + x = vpx_memalign(DEFAULT_ALIGNMENT, num * size); - if (x) - VPX_MEMSET_L(x, 0, num * size); + if (x) + VPX_MEMSET_L(x, 0, num * size); - return x; + return x; } -void *vpx_realloc(void *memblk, size_t size) -{ - void *addr, - * new_addr = NULL; - int align = DEFAULT_ALIGNMENT; - - /* - The realloc() function changes the size of the object pointed to by - ptr to the size specified by size, and returns a pointer to the - possibly moved block. The contents are unchanged up to the lesser - of the new and old sizes. If ptr is null, realloc() behaves like - malloc() for the specified size. If size is zero (0) and ptr is - not a null pointer, the object pointed to is freed. - */ - if (!memblk) - new_addr = vpx_malloc(size); - else if (!size) - vpx_free(memblk); - else - { - addr = (void *)(((size_t *)memblk)[-1]); - memblk = NULL; +void *vpx_realloc(void *memblk, size_t size) { + void *addr, + * new_addr = NULL; + int align = DEFAULT_ALIGNMENT; + + /* + The realloc() function changes the size of the object pointed to by + ptr to the size specified by size, and returns a pointer to the + possibly moved block. The contents are unchanged up to the lesser + of the new and old sizes. If ptr is null, realloc() behaves like + malloc() for the specified size. If size is zero (0) and ptr is + not a null pointer, the object pointed to is freed. + */ + if (!memblk) + new_addr = vpx_malloc(size); + else if (!size) + vpx_free(memblk); + else { + addr = (void *)(((size_t *)memblk)[-1]); + memblk = NULL; #if CONFIG_MEM_MANAGER - new_addr = vpx_mm_realloc(addr, size + align + ADDRESS_STORAGE_SIZE); + new_addr = vpx_mm_realloc(addr, size + align + ADDRESS_STORAGE_SIZE); #else - new_addr = VPX_REALLOC_L(addr, size + align + ADDRESS_STORAGE_SIZE); + new_addr = VPX_REALLOC_L(addr, size + align + ADDRESS_STORAGE_SIZE); #endif - if (new_addr) - { - addr = new_addr; - new_addr = (void *)(((size_t) - ((unsigned char *)new_addr + ADDRESS_STORAGE_SIZE) + (align - 1)) & - (size_t) - align); - /* save the actual malloc address */ - ((size_t *)new_addr)[-1] = (size_t)addr; - } + if (new_addr) { + addr = new_addr; + new_addr = (void *)(((size_t) + ((unsigned char *)new_addr + ADDRESS_STORAGE_SIZE) + (align - 1)) & + (size_t) - align); + /* save the actual malloc address */ + ((size_t *)new_addr)[-1] = (size_t)addr; } + } - return new_addr; + return new_addr; } -void vpx_free(void *memblk) -{ - if (memblk) - { - void *addr = (void *)(((size_t *)memblk)[-1]); +void vpx_free(void *memblk) { + if (memblk) { + void *addr = (void *)(((size_t *)memblk)[-1]); #if CONFIG_MEM_MANAGER - hmm_free(&hmm_d, addr); + hmm_free(&hmm_d, addr); #else - VPX_FREE_L(addr); + VPX_FREE_L(addr); #endif - } + } } #if CONFIG_MEM_TRACKER -void *xvpx_memalign(size_t align, size_t size, char *file, int line) -{ +void *xvpx_memalign(size_t align, size_t size, char *file, int line) { #if TRY_BOUNDS_CHECK - unsigned char *x_bounds; + unsigned char *x_bounds; #endif - void *x; + void *x; - if (g_alloc_count == 0) - { + if (g_alloc_count == 0) { #if TRY_BOUNDS_CHECK - int i_rv = vpx_memory_tracker_init(BOUNDS_CHECK_PAD_SIZE, BOUNDS_CHECK_VALUE); + int i_rv = vpx_memory_tracker_init(BOUNDS_CHECK_PAD_SIZE, BOUNDS_CHECK_VALUE); #else - int i_rv = vpx_memory_tracker_init(0, 0); + int i_rv = vpx_memory_tracker_init(0, 0); #endif - if (i_rv < 0) - { - _P(printf("ERROR xvpx_malloc MEM_TRACK_USAGE error vpx_memory_tracker_init().\n");) - } + if (i_rv < 0) { + _P(printf("ERROR xvpx_malloc MEM_TRACK_USAGE error vpx_memory_tracker_init().\n");) } + } #if TRY_BOUNDS_CHECK - { - int i; - unsigned int tempme = BOUNDS_CHECK_VALUE; - - x_bounds = vpx_memalign(align, size + (BOUNDS_CHECK_PAD_SIZE * 2)); - - if (x_bounds) - { - /*we're aligning the address twice here but to keep things - consistent we want to have the padding come before the stored - address so no matter what free function gets called we will - attempt to free the correct address*/ - x_bounds = (unsigned char *)(((size_t *)x_bounds)[-1]); - x = align_addr(x_bounds + BOUNDS_CHECK_PAD_SIZE + ADDRESS_STORAGE_SIZE, - (int)align); - /* save the actual malloc address */ - ((size_t *)x)[-1] = (size_t)x_bounds; - - for (i = 0; i < BOUNDS_CHECK_PAD_SIZE; i += sizeof(unsigned int)) - { - VPX_MEMCPY_L(x_bounds + i, &tempme, sizeof(unsigned int)); - VPX_MEMCPY_L((unsigned char *)x + size + i, - &tempme, sizeof(unsigned int)); - } - } - else - x = NULL; - } + { + int i; + unsigned int tempme = BOUNDS_CHECK_VALUE; + + x_bounds = vpx_memalign(align, size + (BOUNDS_CHECK_PAD_SIZE * 2)); + + if (x_bounds) { + /*we're aligning the address twice here but to keep things + consistent we want to have the padding come before the stored + address so no matter what free function gets called we will + attempt to free the correct address*/ + x_bounds = (unsigned char *)(((size_t *)x_bounds)[-1]); + x = align_addr(x_bounds + BOUNDS_CHECK_PAD_SIZE + ADDRESS_STORAGE_SIZE, + (int)align); + /* save the actual malloc address */ + ((size_t *)x)[-1] = (size_t)x_bounds; + + for (i = 0; i < BOUNDS_CHECK_PAD_SIZE; i += sizeof(unsigned int)) { + VPX_MEMCPY_L(x_bounds + i, &tempme, sizeof(unsigned int)); + VPX_MEMCPY_L((unsigned char *)x + size + i, + &tempme, sizeof(unsigned int)); + } + } else + x = NULL; + } #else - x = vpx_memalign(align, size); + x = vpx_memalign(align, size); #endif /*TRY_BOUNDS_CHECK*/ - g_alloc_count++; + g_alloc_count++; - vpx_memory_tracker_add((size_t)x, (unsigned int)size, file, line, 1); + vpx_memory_tracker_add((size_t)x, (unsigned int)size, file, line, 1); - return x; + return x; } -void *xvpx_malloc(size_t size, char *file, int line) -{ - return xvpx_memalign(DEFAULT_ALIGNMENT, size, file, line); +void *xvpx_malloc(size_t size, char *file, int line) { + return xvpx_memalign(DEFAULT_ALIGNMENT, size, file, line); } -void *xvpx_calloc(size_t num, size_t size, char *file, int line) -{ - void *x = xvpx_memalign(DEFAULT_ALIGNMENT, num * size, file, line); +void *xvpx_calloc(size_t num, size_t size, char *file, int line) { + void *x = xvpx_memalign(DEFAULT_ALIGNMENT, num * size, file, line); - if (x) - VPX_MEMSET_L(x, 0, num * size); + if (x) + VPX_MEMSET_L(x, 0, num * size); - return x; + return x; } -void *xvpx_realloc(void *memblk, size_t size, char *file, int line) -{ - struct mem_block *p = NULL; - int orig_size = 0, - orig_line = 0; - char *orig_file = NULL; +void *xvpx_realloc(void *memblk, size_t size, char *file, int line) { + struct mem_block *p = NULL; + int orig_size = 0, + orig_line = 0; + char *orig_file = NULL; #if TRY_BOUNDS_CHECK - unsigned char *x_bounds = memblk ? - (unsigned char *)(((size_t *)memblk)[-1]) : - NULL; + unsigned char *x_bounds = memblk ? + (unsigned char *)(((size_t *)memblk)[-1]) : + NULL; #endif - void *x; + void *x; - if (g_alloc_count == 0) - { + if (g_alloc_count == 0) { #if TRY_BOUNDS_CHECK - if (!vpx_memory_tracker_init(BOUNDS_CHECK_PAD_SIZE, BOUNDS_CHECK_VALUE)) + if (!vpx_memory_tracker_init(BOUNDS_CHECK_PAD_SIZE, BOUNDS_CHECK_VALUE)) #else - if (!vpx_memory_tracker_init(0, 0)) + if (!vpx_memory_tracker_init(0, 0)) #endif - { - _P(printf("ERROR xvpx_malloc MEM_TRACK_USAGE error vpx_memory_tracker_init().\n");) - } - } - - if ((p = vpx_memory_tracker_find((size_t)memblk))) { - orig_size = p->size; - orig_file = p->file; - orig_line = p->line; + _P(printf("ERROR xvpx_malloc MEM_TRACK_USAGE error vpx_memory_tracker_init().\n");) } + } + + if ((p = vpx_memory_tracker_find((size_t)memblk))) { + orig_size = p->size; + orig_file = p->file; + orig_line = p->line; + } #if TRY_BOUNDS_CHECK_ON_FREE - vpx_memory_tracker_check_integrity(file, line); + vpx_memory_tracker_check_integrity(file, line); #endif - /* have to do this regardless of success, because - * the memory that does get realloc'd may change - * the bounds values of this block - */ - vpx_memory_tracker_remove((size_t)memblk); + /* have to do this regardless of success, because + * the memory that does get realloc'd may change + * the bounds values of this block + */ + vpx_memory_tracker_remove((size_t)memblk); #if TRY_BOUNDS_CHECK - { - int i; - unsigned int tempme = BOUNDS_CHECK_VALUE; - - x_bounds = vpx_realloc(memblk, size + (BOUNDS_CHECK_PAD_SIZE * 2)); - - if (x_bounds) - { - x_bounds = (unsigned char *)(((size_t *)x_bounds)[-1]); - x = align_addr(x_bounds + BOUNDS_CHECK_PAD_SIZE + ADDRESS_STORAGE_SIZE, - (int)DEFAULT_ALIGNMENT); - /* save the actual malloc address */ - ((size_t *)x)[-1] = (size_t)x_bounds; - - for (i = 0; i < BOUNDS_CHECK_PAD_SIZE; i += sizeof(unsigned int)) - { - VPX_MEMCPY_L(x_bounds + i, &tempme, sizeof(unsigned int)); - VPX_MEMCPY_L((unsigned char *)x + size + i, - &tempme, sizeof(unsigned int)); - } - } - else - x = NULL; - } + { + int i; + unsigned int tempme = BOUNDS_CHECK_VALUE; + + x_bounds = vpx_realloc(memblk, size + (BOUNDS_CHECK_PAD_SIZE * 2)); + + if (x_bounds) { + x_bounds = (unsigned char *)(((size_t *)x_bounds)[-1]); + x = align_addr(x_bounds + BOUNDS_CHECK_PAD_SIZE + ADDRESS_STORAGE_SIZE, + (int)DEFAULT_ALIGNMENT); + /* save the actual malloc address */ + ((size_t *)x)[-1] = (size_t)x_bounds; + + for (i = 0; i < BOUNDS_CHECK_PAD_SIZE; i += sizeof(unsigned int)) { + VPX_MEMCPY_L(x_bounds + i, &tempme, sizeof(unsigned int)); + VPX_MEMCPY_L((unsigned char *)x + size + i, + &tempme, sizeof(unsigned int)); + } + } else + x = NULL; + } #else - x = vpx_realloc(memblk, size); + x = vpx_realloc(memblk, size); #endif /*TRY_BOUNDS_CHECK*/ - if (!memblk) ++g_alloc_count; + if (!memblk) ++g_alloc_count; - if (x) - vpx_memory_tracker_add((size_t)x, (unsigned int)size, file, line, 1); - else - vpx_memory_tracker_add((size_t)memblk, orig_size, orig_file, orig_line, 1); + if (x) + vpx_memory_tracker_add((size_t)x, (unsigned int)size, file, line, 1); + else + vpx_memory_tracker_add((size_t)memblk, orig_size, orig_file, orig_line, 1); - return x; + return x; } -void xvpx_free(void *p_address, char *file, int line) -{ +void xvpx_free(void *p_address, char *file, int line) { #if TRY_BOUNDS_CHECK - unsigned char *p_bounds_address = (unsigned char *)p_address; - /*p_bounds_address -= BOUNDS_CHECK_PAD_SIZE;*/ + unsigned char *p_bounds_address = (unsigned char *)p_address; + /*p_bounds_address -= BOUNDS_CHECK_PAD_SIZE;*/ #endif #if !TRY_BOUNDS_CHECK_ON_FREE - (void)file; - (void)line; + (void)file; + (void)line; #endif - if (p_address) - { + if (p_address) { #if TRY_BOUNDS_CHECK_ON_FREE - vpx_memory_tracker_check_integrity(file, line); + vpx_memory_tracker_check_integrity(file, line); #endif - /* if the addr isn't found in the list, assume it was allocated via - * vpx_ calls not xvpx_, therefore it does not contain any padding - */ - if (vpx_memory_tracker_remove((size_t)p_address) == -2) - { - p_bounds_address = p_address; - _P(fprintf(stderr, "[vpx_mem][xvpx_free] addr: %p not found in" - " list; freed from file:%s" - " line:%d\n", p_address, file, line)); - } - else - --g_alloc_count; + /* if the addr isn't found in the list, assume it was allocated via + * vpx_ calls not xvpx_, therefore it does not contain any padding + */ + if (vpx_memory_tracker_remove((size_t)p_address) == -2) { + p_bounds_address = p_address; + _P(fprintf(stderr, "[vpx_mem][xvpx_free] addr: %p not found in" + " list; freed from file:%s" + " line:%d\n", p_address, file, line)); + } else + --g_alloc_count; #if TRY_BOUNDS_CHECK - vpx_free(p_bounds_address); + vpx_free(p_bounds_address); #else - vpx_free(p_address); + vpx_free(p_address); #endif - if (!g_alloc_count) - vpx_memory_tracker_destroy(); - } + if (!g_alloc_count) + vpx_memory_tracker_destroy(); + } } #endif /*CONFIG_MEM_TRACKER*/ @@ -426,297 +393,265 @@ void xvpx_free(void *p_address, char *file, int line) #include <task_lib.h> /*for task_delay()*/ /* This function is only used to get a stack trace of the player object so we can se where we are having a problem. */ -static int get_my_tt(int task) -{ - tt(task); +static int get_my_tt(int task) { + tt(task); - return 0; + return 0; } -static void vx_sleep(int msec) -{ - int ticks_to_sleep = 0; +static void vx_sleep(int msec) { + int ticks_to_sleep = 0; - if (msec) - { - int msec_per_tick = 1000 / sys_clk_rate_get(); + if (msec) { + int msec_per_tick = 1000 / sys_clk_rate_get(); - if (msec < msec_per_tick) - ticks_to_sleep++; - else - ticks_to_sleep = msec / msec_per_tick; - } + if (msec < msec_per_tick) + ticks_to_sleep++; + else + ticks_to_sleep = msec / msec_per_tick; + } - task_delay(ticks_to_sleep); + task_delay(ticks_to_sleep); } #endif #endif -void *vpx_memcpy(void *dest, const void *source, size_t length) -{ +void *vpx_memcpy(void *dest, const void *source, size_t length) { #if CONFIG_MEM_CHECKS - if (((int)dest < 0x4000) || ((int)source < 0x4000)) - { - _P(printf("WARNING: vpx_memcpy dest:0x%x source:0x%x len:%d\n", (int)dest, (int)source, length);) + if (((int)dest < 0x4000) || ((int)source < 0x4000)) { + _P(printf("WARNING: vpx_memcpy dest:0x%x source:0x%x len:%d\n", (int)dest, (int)source, length);) #if defined(VXWORKS) - sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0); + sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0); - vx_sleep(10000); + vx_sleep(10000); #endif - } + } #endif - return VPX_MEMCPY_L(dest, source, length); + return VPX_MEMCPY_L(dest, source, length); } -void *vpx_memset(void *dest, int val, size_t length) -{ +void *vpx_memset(void *dest, int val, size_t length) { #if CONFIG_MEM_CHECKS - if ((int)dest < 0x4000) - { - _P(printf("WARNING: vpx_memset dest:0x%x val:%d len:%d\n", (int)dest, val, length);) + if ((int)dest < 0x4000) { + _P(printf("WARNING: vpx_memset dest:0x%x val:%d len:%d\n", (int)dest, val, length);) #if defined(VXWORKS) - sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0); + sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0); - vx_sleep(10000); + vx_sleep(10000); #endif - } + } #endif - return VPX_MEMSET_L(dest, val, length); + return VPX_MEMSET_L(dest, val, length); } -void *vpx_memmove(void *dest, const void *src, size_t count) -{ +void *vpx_memmove(void *dest, const void *src, size_t count) { #if CONFIG_MEM_CHECKS - if (((int)dest < 0x4000) || ((int)src < 0x4000)) - { - _P(printf("WARNING: vpx_memmove dest:0x%x src:0x%x count:%d\n", (int)dest, (int)src, count);) + if (((int)dest < 0x4000) || ((int)src < 0x4000)) { + _P(printf("WARNING: vpx_memmove dest:0x%x src:0x%x count:%d\n", (int)dest, (int)src, count);) #if defined(VXWORKS) - sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0); + sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0); - vx_sleep(10000); + vx_sleep(10000); #endif - } + } #endif - return VPX_MEMMOVE_L(dest, src, count); + return VPX_MEMMOVE_L(dest, src, count); } #if CONFIG_MEM_MANAGER -static int vpx_mm_create_heap_memory() -{ - int i_rv = 0; +static int vpx_mm_create_heap_memory() { + int i_rv = 0; - if (!g_mng_memory_allocated) - { + if (!g_mng_memory_allocated) { #if MM_DYNAMIC_MEMORY - g_p_mng_memory_raw = - (unsigned char *)malloc(g_mm_memory_size + HMM_ADDR_ALIGN_UNIT); - - if (g_p_mng_memory_raw) - { - g_p_mng_memory = (unsigned char *)((((unsigned int)g_p_mng_memory_raw) + - HMM_ADDR_ALIGN_UNIT - 1) & - -(int)HMM_ADDR_ALIGN_UNIT); - - _P(printf("[vpx][mm] total memory size:%d g_p_mng_memory_raw:0x%x g_p_mng_memory:0x%x\n" - , g_mm_memory_size + HMM_ADDR_ALIGN_UNIT - , (unsigned int)g_p_mng_memory_raw - , (unsigned int)g_p_mng_memory);) - } - else - { - _P(printf("[vpx][mm] Couldn't allocate memory:%d for vpx memory manager.\n" - , g_mm_memory_size);) - - i_rv = -1; - } + g_p_mng_memory_raw = + (unsigned char *)malloc(g_mm_memory_size + HMM_ADDR_ALIGN_UNIT); + + if (g_p_mng_memory_raw) { + g_p_mng_memory = (unsigned char *)((((unsigned int)g_p_mng_memory_raw) + + HMM_ADDR_ALIGN_UNIT - 1) & + -(int)HMM_ADDR_ALIGN_UNIT); + + _P(printf("[vpx][mm] total memory size:%d g_p_mng_memory_raw:0x%x g_p_mng_memory:0x%x\n" +, g_mm_memory_size + HMM_ADDR_ALIGN_UNIT +, (unsigned int)g_p_mng_memory_raw +, (unsigned int)g_p_mng_memory);) + } else { + _P(printf("[vpx][mm] Couldn't allocate memory:%d for vpx memory manager.\n" +, g_mm_memory_size);) + + i_rv = -1; + } - if (g_p_mng_memory) + if (g_p_mng_memory) #endif - { - int chunk_size = 0; + { + int chunk_size = 0; - g_mng_memory_allocated = 1; + g_mng_memory_allocated = 1; - hmm_init(&hmm_d); + hmm_init(&hmm_d); - chunk_size = g_mm_memory_size >> SHIFT_HMM_ADDR_ALIGN_UNIT; + chunk_size = g_mm_memory_size >> SHIFT_HMM_ADDR_ALIGN_UNIT; - chunk_size -= DUMMY_END_BLOCK_BAUS; + chunk_size -= DUMMY_END_BLOCK_BAUS; - _P(printf("[vpx][mm] memory size:%d for vpx memory manager. g_p_mng_memory:0x%x chunk_size:%d\n" - , g_mm_memory_size - , (unsigned int)g_p_mng_memory - , chunk_size);) + _P(printf("[vpx][mm] memory size:%d for vpx memory manager. g_p_mng_memory:0x%x chunk_size:%d\n" +, g_mm_memory_size +, (unsigned int)g_p_mng_memory +, chunk_size);) - hmm_new_chunk(&hmm_d, (void *)g_p_mng_memory, chunk_size); - } + hmm_new_chunk(&hmm_d, (void *)g_p_mng_memory, chunk_size); + } #if MM_DYNAMIC_MEMORY - else - { - _P(printf("[vpx][mm] Couldn't allocate memory:%d for vpx memory manager.\n" - , g_mm_memory_size);) + else { + _P(printf("[vpx][mm] Couldn't allocate memory:%d for vpx memory manager.\n" +, g_mm_memory_size);) - i_rv = -1; - } + i_rv = -1; + } #endif - } + } - return i_rv; + return i_rv; } -static void *vpx_mm_realloc(void *memblk, size_t size) -{ - void *p_ret = NULL; +static void *vpx_mm_realloc(void *memblk, size_t size) { + void *p_ret = NULL; - if (vpx_mm_create_heap_memory() < 0) - { - _P(printf("[vpx][mm] ERROR vpx_mm_realloc() Couldn't create memory for Heap.\n");) - } - else - { - int i_rv = 0; - int old_num_aaus; - int new_num_aaus; + if (vpx_mm_create_heap_memory() < 0) { + _P(printf("[vpx][mm] ERROR vpx_mm_realloc() Couldn't create memory for Heap.\n");) + } else { + int i_rv = 0; + int old_num_aaus; + int new_num_aaus; + + old_num_aaus = hmm_true_size(memblk); + new_num_aaus = (size >> SHIFT_HMM_ADDR_ALIGN_UNIT) + 1; + + if (old_num_aaus == new_num_aaus) { + p_ret = memblk; + } else { + i_rv = hmm_resize(&hmm_d, memblk, new_num_aaus); + + if (i_rv == 0) { + p_ret = memblk; + } else { + /* Error. Try to malloc and then copy data. */ + void *p_from_malloc; - old_num_aaus = hmm_true_size(memblk); new_num_aaus = (size >> SHIFT_HMM_ADDR_ALIGN_UNIT) + 1; + p_from_malloc = hmm_alloc(&hmm_d, new_num_aaus); - if (old_num_aaus == new_num_aaus) - { - p_ret = memblk; - } - else - { - i_rv = hmm_resize(&hmm_d, memblk, new_num_aaus); - - if (i_rv == 0) - { - p_ret = memblk; - } - else - { - /* Error. Try to malloc and then copy data. */ - void *p_from_malloc; - - new_num_aaus = (size >> SHIFT_HMM_ADDR_ALIGN_UNIT) + 1; - p_from_malloc = hmm_alloc(&hmm_d, new_num_aaus); - - if (p_from_malloc) - { - vpx_memcpy(p_from_malloc, memblk, size); - hmm_free(&hmm_d, memblk); - - p_ret = p_from_malloc; - } - } + if (p_from_malloc) { + vpx_memcpy(p_from_malloc, memblk, size); + hmm_free(&hmm_d, memblk); + + p_ret = p_from_malloc; } + } } + } - return p_ret; + return p_ret; } #endif /*CONFIG_MEM_MANAGER*/ #if USE_GLOBAL_FUNCTION_POINTERS # if CONFIG_MEM_TRACKER extern int vpx_memory_tracker_set_functions(g_malloc_func g_malloc_l - , g_calloc_func g_calloc_l - , g_realloc_func g_realloc_l - , g_free_func g_free_l - , g_memcpy_func g_memcpy_l - , g_memset_func g_memset_l - , g_memmove_func g_memmove_l); +, g_calloc_func g_calloc_l +, g_realloc_func g_realloc_l +, g_free_func g_free_l +, g_memcpy_func g_memcpy_l +, g_memset_func g_memset_l +, g_memmove_func g_memmove_l); # endif #endif /*USE_GLOBAL_FUNCTION_POINTERS*/ int vpx_mem_set_functions(g_malloc_func g_malloc_l - , g_calloc_func g_calloc_l - , g_realloc_func g_realloc_l - , g_free_func g_free_l - , g_memcpy_func g_memcpy_l - , g_memset_func g_memset_l - , g_memmove_func g_memmove_l) -{ +, g_calloc_func g_calloc_l +, g_realloc_func g_realloc_l +, g_free_func g_free_l +, g_memcpy_func g_memcpy_l +, g_memset_func g_memset_l +, g_memmove_func g_memmove_l) { #if USE_GLOBAL_FUNCTION_POINTERS - /* If use global functions is turned on then the - application must set the global functions before - it does anything else or vpx_mem will have - unpredictable results. */ - if (!g_func) - { - g_func = (struct GLOBAL_FUNC_POINTERS *) - g_malloc_l(sizeof(struct GLOBAL_FUNC_POINTERS)); + /* If use global functions is turned on then the + application must set the global functions before + it does anything else or vpx_mem will have + unpredictable results. */ + if (!g_func) { + g_func = (struct GLOBAL_FUNC_POINTERS *) + g_malloc_l(sizeof(struct GLOBAL_FUNC_POINTERS)); - if (!g_func) - { - return -1; - } + if (!g_func) { + return -1; } + } #if CONFIG_MEM_TRACKER - { - int rv = 0; - rv = vpx_memory_tracker_set_functions(g_malloc_l - , g_calloc_l - , g_realloc_l - , g_free_l - , g_memcpy_l - , g_memset_l - , g_memmove_l); - - if (rv < 0) - { - return rv; - } + { + int rv = 0; + rv = vpx_memory_tracker_set_functions(g_malloc_l +, g_calloc_l +, g_realloc_l +, g_free_l +, g_memcpy_l +, g_memset_l +, g_memmove_l); + + if (rv < 0) { + return rv; } + } #endif - g_func->g_malloc = g_malloc_l; - g_func->g_calloc = g_calloc_l; - g_func->g_realloc = g_realloc_l; - g_func->g_free = g_free_l; - g_func->g_memcpy = g_memcpy_l; - g_func->g_memset = g_memset_l; - g_func->g_memmove = g_memmove_l; + g_func->g_malloc = g_malloc_l; + g_func->g_calloc = g_calloc_l; + g_func->g_realloc = g_realloc_l; + g_func->g_free = g_free_l; + g_func->g_memcpy = g_memcpy_l; + g_func->g_memset = g_memset_l; + g_func->g_memmove = g_memmove_l; - return 0; + return 0; #else - (void)g_malloc_l; - (void)g_calloc_l; - (void)g_realloc_l; - (void)g_free_l; - (void)g_memcpy_l; - (void)g_memset_l; - (void)g_memmove_l; - return -1; + (void)g_malloc_l; + (void)g_calloc_l; + (void)g_realloc_l; + (void)g_free_l; + (void)g_memcpy_l; + (void)g_memset_l; + (void)g_memmove_l; + return -1; #endif } -int vpx_mem_unset_functions() -{ +int vpx_mem_unset_functions() { #if USE_GLOBAL_FUNCTION_POINTERS - if (g_func) - { - g_free_func temp_free = g_func->g_free; - temp_free(g_func); - g_func = NULL; - } + if (g_func) { + g_free_func temp_free = g_func->g_free; + temp_free(g_func); + g_func = NULL; + } #endif - return 0; + return 0; } diff --git a/vpx_mem/vpx_mem.h b/vpx_mem/vpx_mem.h index 749eaa42e..6ee5f5d7e 100644 --- a/vpx_mem/vpx_mem.h +++ b/vpx_mem/vpx_mem.h @@ -30,11 +30,11 @@ #endif #ifndef VPX_CHECK_MEM_FUNCTIONS # define VPX_CHECK_MEM_FUNCTIONS 0 /* enable basic safety checks in _memcpy, - _memset, and _memmove */ +_memset, and _memmove */ #endif #ifndef REPLACE_BUILTIN_FUNCTIONS # define REPLACE_BUILTIN_FUNCTIONS 0 /* replace builtin functions with their - vpx_ equivalents */ +vpx_ equivalents */ #endif #include <stdlib.h> @@ -44,60 +44,60 @@ extern "C" { #endif - /* - vpx_mem_get_version() - provided for runtime version checking. Returns an unsigned int of the form - CHIEF | MAJOR | MINOR | PATCH, where the chief version number is the high - order byte. - */ - unsigned int vpx_mem_get_version(void); - - /* - vpx_mem_set_heap_size(size_t size) - size - size in bytes for the memory manager to allocate for its heap - Sets the memory manager's initial heap size - Return: - 0: on success - -1: if memory manager calls have not been included in the vpx_mem lib - -2: if the memory manager has been compiled to use static memory - -3: if the memory manager has already allocated its heap - */ - int vpx_mem_set_heap_size(size_t size); - - void *vpx_memalign(size_t align, size_t size); - void *vpx_malloc(size_t size); - void *vpx_calloc(size_t num, size_t size); - void *vpx_realloc(void *memblk, size_t size); - void vpx_free(void *memblk); - - void *vpx_memcpy(void *dest, const void *src, size_t length); - void *vpx_memset(void *dest, int val, size_t length); - void *vpx_memmove(void *dest, const void *src, size_t count); - - /* special memory functions */ - void *vpx_mem_alloc(int id, size_t size, size_t align); - void vpx_mem_free(int id, void *mem, size_t size); - - /* Wrappers to standard library functions. */ - typedef void*(* g_malloc_func)(size_t); - typedef void*(* g_calloc_func)(size_t, size_t); - typedef void*(* g_realloc_func)(void *, size_t); - typedef void (* g_free_func)(void *); - typedef void*(* g_memcpy_func)(void *, const void *, size_t); - typedef void*(* g_memset_func)(void *, int, size_t); - typedef void*(* g_memmove_func)(void *, const void *, size_t); - - int vpx_mem_set_functions(g_malloc_func g_malloc_l - , g_calloc_func g_calloc_l - , g_realloc_func g_realloc_l - , g_free_func g_free_l - , g_memcpy_func g_memcpy_l - , g_memset_func g_memset_l - , g_memmove_func g_memmove_l); - int vpx_mem_unset_functions(void); - - - /* some defines for backward compatibility */ + /* + vpx_mem_get_version() + provided for runtime version checking. Returns an unsigned int of the form + CHIEF | MAJOR | MINOR | PATCH, where the chief version number is the high + order byte. + */ + unsigned int vpx_mem_get_version(void); + + /* + vpx_mem_set_heap_size(size_t size) + size - size in bytes for the memory manager to allocate for its heap + Sets the memory manager's initial heap size + Return: + 0: on success + -1: if memory manager calls have not been included in the vpx_mem lib + -2: if the memory manager has been compiled to use static memory + -3: if the memory manager has already allocated its heap + */ + int vpx_mem_set_heap_size(size_t size); + + void *vpx_memalign(size_t align, size_t size); + void *vpx_malloc(size_t size); + void *vpx_calloc(size_t num, size_t size); + void *vpx_realloc(void *memblk, size_t size); + void vpx_free(void *memblk); + + void *vpx_memcpy(void *dest, const void *src, size_t length); + void *vpx_memset(void *dest, int val, size_t length); + void *vpx_memmove(void *dest, const void *src, size_t count); + + /* special memory functions */ + void *vpx_mem_alloc(int id, size_t size, size_t align); + void vpx_mem_free(int id, void *mem, size_t size); + + /* Wrappers to standard library functions. */ + typedef void *(* g_malloc_func)(size_t); + typedef void *(* g_calloc_func)(size_t, size_t); + typedef void *(* g_realloc_func)(void *, size_t); + typedef void (* g_free_func)(void *); + typedef void *(* g_memcpy_func)(void *, const void *, size_t); + typedef void *(* g_memset_func)(void *, int, size_t); + typedef void *(* g_memmove_func)(void *, const void *, size_t); + + int vpx_mem_set_functions(g_malloc_func g_malloc_l +, g_calloc_func g_calloc_l +, g_realloc_func g_realloc_l +, g_free_func g_free_l +, g_memcpy_func g_memcpy_l +, g_memset_func g_memset_l +, g_memmove_func g_memmove_l); + int vpx_mem_unset_functions(void); + + + /* some defines for backward compatibility */ #define DMEM_GENERAL 0 #define duck_memalign(X,Y,Z) vpx_memalign(X,Y) @@ -124,13 +124,13 @@ extern "C" { #if CONFIG_MEM_TRACKER #include <stdarg.h> - /*from vpx_mem/vpx_mem_tracker.c*/ - extern void vpx_memory_tracker_dump(); - extern void vpx_memory_tracker_check_integrity(char *file, unsigned int line); - extern int vpx_memory_tracker_set_log_type(int type, char *option); - extern int vpx_memory_tracker_set_log_func(void *userdata, - void(*logfunc)(void *userdata, - const char *fmt, va_list args)); + /*from vpx_mem/vpx_mem_tracker.c*/ + extern void vpx_memory_tracker_dump(); + extern void vpx_memory_tracker_check_integrity(char *file, unsigned int line); + extern int vpx_memory_tracker_set_log_type(int type, char *option); + extern int vpx_memory_tracker_set_log_func(void *userdata, + void(*logfunc)(void *userdata, + const char *fmt, va_list args)); # ifndef __VPX_MEM_C__ # define vpx_memalign(align, size) xvpx_memalign((align), (size), __FILE__, __LINE__) # define vpx_malloc(size) xvpx_malloc((size), __FILE__, __LINE__) @@ -142,13 +142,13 @@ extern "C" { # define vpx_mem_free(id,mem,size) xvpx_mem_free(id, mem, size, __FILE__, __LINE__) # endif - void *xvpx_memalign(size_t align, size_t size, char *file, int line); - void *xvpx_malloc(size_t size, char *file, int line); - void *xvpx_calloc(size_t num, size_t size, char *file, int line); - void *xvpx_realloc(void *memblk, size_t size, char *file, int line); - void xvpx_free(void *memblk, char *file, int line); - void *xvpx_mem_alloc(int id, size_t size, size_t align, char *file, int line); - void xvpx_mem_free(int id, void *mem, size_t size, char *file, int line); + void *xvpx_memalign(size_t align, size_t size, char *file, int line); + void *xvpx_malloc(size_t size, char *file, int line); + void *xvpx_calloc(size_t num, size_t size, char *file, int line); + void *xvpx_realloc(void *memblk, size_t size, char *file, int line); + void xvpx_free(void *memblk, char *file, int line); + void *xvpx_mem_alloc(int id, size_t size, size_t align, char *file, int line); + void xvpx_mem_free(int id, void *mem, size_t size, char *file, int line); #else # ifndef __VPX_MEM_C__ diff --git a/vpx_mem/vpx_mem_tracker.c b/vpx_mem/vpx_mem_tracker.c index 9e8623a9a..5b2103b55 100644 --- a/vpx_mem/vpx_mem_tracker.c +++ b/vpx_mem/vpx_mem_tracker.c @@ -40,20 +40,20 @@ #include <stdio.h> #include <stdlib.h> -#include <string.h> //VXWORKS doesn't have a malloc/memory.h file, -//this should pull in malloc,free,etc. +#include <string.h> // VXWORKS doesn't have a malloc/memory.h file, +// this should pull in malloc,free,etc. #include <stdarg.h> #include "include/vpx_mem_tracker.h" -#undef vpx_malloc //undefine any vpx_mem macros that may affect calls to -#undef vpx_free //memory functions in this file +#undef vpx_malloc // undefine any vpx_mem macros that may affect calls to +#undef vpx_free // memory functions in this file #undef vpx_memcpy #undef vpx_memset #ifndef USE_GLOBAL_FUNCTION_POINTERS -# define USE_GLOBAL_FUNCTION_POINTERS 0 //use function pointers instead of compiled functions. +# define USE_GLOBAL_FUNCTION_POINTERS 0 // use function pointers instead of compiled functions. #endif #if USE_GLOBAL_FUNCTION_POINTERS @@ -94,39 +94,37 @@ static int memory_tracker_unlock_mutex(); #endif #ifndef VPX_NO_GLOBALS -struct memory_tracker -{ - struct mem_block *head, - * tail; - int len, - totalsize; - unsigned int current_allocated, - max_allocated; +struct memory_tracker { + struct mem_block *head, + * tail; + int len, + totalsize; + unsigned int current_allocated, + max_allocated; #if HAVE_PTHREAD_H - pthread_mutex_t mutex; + pthread_mutex_t mutex; #elif defined(WIN32) || defined(_WIN32_WCE) - HANDLE mutex; + HANDLE mutex; #elif defined(VXWORKS) - SEM_ID mutex; + SEM_ID mutex; #elif defined(NO_MUTEX) #else #error "No mutex type defined for this platform!" #endif - int padding_size, - pad_value; + int padding_size, + pad_value; }; -static struct memory_tracker memtrack; //our global memory allocation list -static int g_b_mem_tracker_inited = 0; //indicates whether the global list has -//been initialized (1:yes/0:no) -static struct -{ - FILE *file; - int type; - void (*func)(void *userdata, const char *fmt, va_list args); - void *userdata; +static struct memory_tracker memtrack; // our global memory allocation list +static int g_b_mem_tracker_inited = 0; // indicates whether the global list has +// been initialized (1:yes/0:no) +static struct { + FILE *file; + int type; + void (*func)(void *userdata, const char *fmt, va_list args); + void *userdata; } g_logging = {NULL, 0, NULL, NULL}; #else # include "vpx_global_handling.h" @@ -157,60 +155,54 @@ extern void *vpx_memset(void *dest, int val, size_t length); Initializes global memory tracker structure Allocates the head of the list */ -int vpx_memory_tracker_init(int padding_size, int pad_value) -{ - if (!g_b_mem_tracker_inited) - { - if ((memtrack.head = (struct mem_block *) - MEM_TRACK_MALLOC(sizeof(struct mem_block)))) - { - int ret; +int vpx_memory_tracker_init(int padding_size, int pad_value) { + if (!g_b_mem_tracker_inited) { + if ((memtrack.head = (struct mem_block *) + MEM_TRACK_MALLOC(sizeof(struct mem_block)))) { + int ret; - MEM_TRACK_MEMSET(memtrack.head, 0, sizeof(struct mem_block)); + MEM_TRACK_MEMSET(memtrack.head, 0, sizeof(struct mem_block)); - memtrack.tail = memtrack.head; + memtrack.tail = memtrack.head; - memtrack.current_allocated = 0; - memtrack.max_allocated = 0; + memtrack.current_allocated = 0; + memtrack.max_allocated = 0; - memtrack.padding_size = padding_size; - memtrack.pad_value = pad_value; + memtrack.padding_size = padding_size; + memtrack.pad_value = pad_value; #if HAVE_PTHREAD_H - ret = pthread_mutex_init(&memtrack.mutex, - NULL); /*mutex attributes (NULL=default)*/ + ret = pthread_mutex_init(&memtrack.mutex, + NULL); /*mutex attributes (NULL=default)*/ #elif defined(WIN32) || defined(_WIN32_WCE) - memtrack.mutex = CreateMutex(NULL, /*security attributes*/ - FALSE, /*we don't want initial ownership*/ - NULL); /*mutex name*/ - ret = !memtrack.mutex; + memtrack.mutex = CreateMutex(NULL, /*security attributes*/ + FALSE, /*we don't want initial ownership*/ + NULL); /*mutex name*/ + ret = !memtrack.mutex; #elif defined(VXWORKS) - memtrack.mutex = sem_bcreate(SEM_Q_FIFO, /*SEM_Q_FIFO non-priority based mutex*/ - SEM_FULL); /*SEM_FULL initial state is unlocked*/ - ret = !memtrack.mutex; + memtrack.mutex = sem_bcreate(SEM_Q_FIFO, /*SEM_Q_FIFO non-priority based mutex*/ + SEM_FULL); /*SEM_FULL initial state is unlocked*/ + ret = !memtrack.mutex; #elif defined(NO_MUTEX) - ret = 0; + ret = 0; #endif - if (ret) - { - memtrack_log("vpx_memory_tracker_init: Error creating mutex!\n"); - - MEM_TRACK_FREE(memtrack.head); - memtrack.head = NULL; - } - else - { - memtrack_log("Memory Tracker init'd, v."vpx_mem_tracker_version" pad_size:%d pad_val:0x%x %d\n" - , padding_size - , pad_value - , pad_value); - g_b_mem_tracker_inited = 1; - } - } + if (ret) { + memtrack_log("vpx_memory_tracker_init: Error creating mutex!\n"); + + MEM_TRACK_FREE(memtrack.head); + memtrack.head = NULL; + } else { + memtrack_log("Memory Tracker init'd, v."vpx_mem_tracker_version" pad_size:%d pad_val:0x%x %d\n" +, padding_size +, pad_value +, pad_value); + g_b_mem_tracker_inited = 1; + } } + } - return g_b_mem_tracker_inited; + return g_b_mem_tracker_inited; } /* @@ -218,39 +210,35 @@ int vpx_memory_tracker_init(int padding_size, int pad_value) If our global struct was initialized zeros out all its members, frees memory and destroys it's mutex */ -void vpx_memory_tracker_destroy() -{ - if (!memory_tracker_lock_mutex()) - { - struct mem_block *p = memtrack.head, - * p2 = memtrack.head; +void vpx_memory_tracker_destroy() { + if (!memory_tracker_lock_mutex()) { + struct mem_block *p = memtrack.head, + * p2 = memtrack.head; - memory_tracker_dump(); + memory_tracker_dump(); - while (p) - { - p2 = p; - p = p->next; + while (p) { + p2 = p; + p = p->next; - MEM_TRACK_FREE(p2); - } + MEM_TRACK_FREE(p2); + } - memtrack.head = NULL; - memtrack.tail = NULL; - memtrack.len = 0; - memtrack.current_allocated = 0; - memtrack.max_allocated = 0; + memtrack.head = NULL; + memtrack.tail = NULL; + memtrack.len = 0; + memtrack.current_allocated = 0; + memtrack.max_allocated = 0; - if (!g_logging.type && g_logging.file && g_logging.file != stderr) - { - fclose(g_logging.file); - g_logging.file = NULL; - } + if (!g_logging.type && g_logging.file && g_logging.file != stderr) { + fclose(g_logging.file); + g_logging.file = NULL; + } - memory_tracker_unlock_mutex(); + memory_tracker_unlock_mutex(); - g_b_mem_tracker_inited = 0; - } + g_b_mem_tracker_inited = 0; + } } /* @@ -265,9 +253,8 @@ void vpx_memory_tracker_destroy() */ void vpx_memory_tracker_add(size_t addr, unsigned int size, char *file, unsigned int line, - int padded) -{ - memory_tracker_add(addr, size, file, line, padded); + int padded) { + memory_tracker_add(addr, size, file, line, padded); } /* @@ -278,9 +265,8 @@ void vpx_memory_tracker_add(size_t addr, unsigned int size, Return: Same as described for memory_tracker_remove */ -int vpx_memory_tracker_remove(size_t addr) -{ - return memory_tracker_remove(addr); +int vpx_memory_tracker_remove(size_t addr) { + return memory_tracker_remove(addr); } /* @@ -290,17 +276,15 @@ int vpx_memory_tracker_remove(size_t addr) If found, pointer to the memory block that matches addr NULL otherwise */ -struct mem_block *vpx_memory_tracker_find(size_t addr) -{ - struct mem_block *p = NULL; - - if (!memory_tracker_lock_mutex()) - { - p = memory_tracker_find(addr); - memory_tracker_unlock_mutex(); - } +struct mem_block *vpx_memory_tracker_find(size_t addr) { + struct mem_block *p = NULL; - return p; + if (!memory_tracker_lock_mutex()) { + p = memory_tracker_find(addr); + memory_tracker_unlock_mutex(); + } + + return p; } /* @@ -309,13 +293,11 @@ struct mem_block *vpx_memory_tracker_find(size_t addr) library function to dump the current contents of the global memory allocation list */ -void vpx_memory_tracker_dump() -{ - if (!memory_tracker_lock_mutex()) - { - memory_tracker_dump(); - memory_tracker_unlock_mutex(); - } +void vpx_memory_tracker_dump() { + if (!memory_tracker_lock_mutex()) { + memory_tracker_dump(); + memory_tracker_unlock_mutex(); + } } /* @@ -326,13 +308,11 @@ void vpx_memory_tracker_dump() integrity check function to inspect every address in the global memory allocation list */ -void vpx_memory_tracker_check_integrity(char *file, unsigned int line) -{ - if (!memory_tracker_lock_mutex()) - { - memory_tracker_check_integrity(file, line); - memory_tracker_unlock_mutex(); - } +void vpx_memory_tracker_check_integrity(char *file, unsigned int line) { + if (!memory_tracker_lock_mutex()) { + memory_tracker_check_integrity(file, line); + memory_tracker_unlock_mutex(); + } } /* @@ -344,43 +324,38 @@ void vpx_memory_tracker_check_integrity(char *file, unsigned int line) -1: if the logging type could not be set, because the value was invalid or because a file could not be opened */ -int vpx_memory_tracker_set_log_type(int type, char *option) -{ - int ret = -1; +int vpx_memory_tracker_set_log_type(int type, char *option) { + int ret = -1; - switch (type) - { + switch (type) { case 0: - g_logging.type = 0; + g_logging.type = 0; - if (!option) - { - g_logging.file = stderr; - ret = 0; - } - else - { - if ((g_logging.file = fopen((char *)option, "w"))) - ret = 0; - } + if (!option) { + g_logging.file = stderr; + ret = 0; + } else { + if ((g_logging.file = fopen((char *)option, "w"))) + ret = 0; + } - break; + break; #if defined(WIN32) && !defined(_WIN32_WCE) case 1: - g_logging.type = type; - ret = 0; - break; + g_logging.type = type; + ret = 0; + break; #endif default: - break; - } + break; + } - //output the version to the new logging destination - if (!ret) - memtrack_log("Memory Tracker logging initialized, " - "Memory Tracker v."vpx_mem_tracker_version"\n"); + // output the version to the new logging destination + if (!ret) + memtrack_log("Memory Tracker logging initialized, " + "Memory Tracker v."vpx_mem_tracker_version"\n"); - return ret; + return ret; } /* @@ -392,24 +367,22 @@ int vpx_memory_tracker_set_log_type(int type, char *option) */ int vpx_memory_tracker_set_log_func(void *userdata, void(*logfunc)(void *userdata, - const char *fmt, va_list args)) -{ - int ret = -1; - - if (logfunc) - { - g_logging.type = -1; - g_logging.userdata = userdata; - g_logging.func = logfunc; - ret = 0; - } - - //output the version to the new logging destination - if (!ret) - memtrack_log("Memory Tracker logging initialized, " - "Memory Tracker v."vpx_mem_tracker_version"\n"); - - return ret; + const char *fmt, va_list args)) { + int ret = -1; + + if (logfunc) { + g_logging.type = -1; + g_logging.userdata = userdata; + g_logging.func = logfunc; + ret = 0; + } + + // output the version to the new logging destination + if (!ret) + memtrack_log("Memory Tracker logging initialized, " + "Memory Tracker v."vpx_mem_tracker_version"\n"); + + return ret; } /* @@ -425,79 +398,73 @@ int vpx_memory_tracker_set_log_func(void *userdata, * */ -static void memtrack_log(const char *fmt, ...) -{ - va_list list; +static void memtrack_log(const char *fmt, ...) { + va_list list; - va_start(list, fmt); + va_start(list, fmt); - switch (g_logging.type) - { + switch (g_logging.type) { case -1: - if (g_logging.func) - g_logging.func(g_logging.userdata, fmt, list); + if (g_logging.func) + g_logging.func(g_logging.userdata, fmt, list); - break; + break; case 0: - if (g_logging.file) - { - vfprintf(g_logging.file, fmt, list); - fflush(g_logging.file); - } + if (g_logging.file) { + vfprintf(g_logging.file, fmt, list); + fflush(g_logging.file); + } - break; + break; #if defined(WIN32) && !defined(_WIN32_WCE) - case 1: - { - char temp[1024]; - _vsnprintf(temp, sizeof(temp) / sizeof(char) - 1, fmt, list); - OutputDebugString(temp); + case 1: { + char temp[1024]; + _vsnprintf(temp, sizeof(temp) / sizeof(char) - 1, fmt, list); + OutputDebugString(temp); } break; #endif default: - break; - } + break; + } - va_end(list); + va_end(list); } /* memory_tracker_dump() Dumps the current contents of the global memory allocation list */ -static void memory_tracker_dump() -{ - int i = 0; - struct mem_block *p = (memtrack.head ? memtrack.head->next : NULL); +static void memory_tracker_dump() { + int i = 0; + struct mem_block *p = (memtrack.head ? memtrack.head->next : NULL); - memtrack_log("\n_currently Allocated= %d; Max allocated= %d\n", - memtrack.current_allocated, memtrack.max_allocated); + memtrack_log("\n_currently Allocated= %d; Max allocated= %d\n", + memtrack.current_allocated, memtrack.max_allocated); - while (p) - { + while (p) { #if defined(WIN32) && !defined(_WIN32_WCE) - /*when using outputdebugstring, output filenames so they - can be clicked to be opened in visual studio*/ - if (g_logging.type == 1) - memtrack_log("memblocks[%d].addr= 0x%.8x, memblocks[%d].size= %d, file:\n" - " %s(%d):\n", i, - p->addr, i, p->size, - p->file, p->line); - else + /*when using outputdebugstring, output filenames so they + can be clicked to be opened in visual studio*/ + if (g_logging.type == 1) + memtrack_log("memblocks[%d].addr= 0x%.8x, memblocks[%d].size= %d, file:\n" + " %s(%d):\n", i, + p->addr, i, p->size, + p->file, p->line); + else #endif - memtrack_log("memblocks[%d].addr= 0x%.8x, memblocks[%d].size= %d, file: %s, line: %d\n", i, - p->addr, i, p->size, - p->file, p->line); + memtrack_log("memblocks[%d].addr= 0x%.8x, memblocks[%d].size= %d, file: %s, line: %d\n", i, + p->addr, i, p->size, + p->file, p->line); - p = p->next; - ++i; - } + p = p->next; + ++i; + } - memtrack_log("\n"); + memtrack_log("\n"); } /* @@ -508,55 +475,49 @@ static void memory_tracker_dump() this function will check ea. addr in the list verifying that addr-padding_size and addr+padding_size is filled with pad_value */ -static void memory_tracker_check_integrity(char *file, unsigned int line) -{ - if (memtrack.padding_size) - { - int i, - index = 0; - unsigned char *p_show_me, - * p_show_me2; - unsigned int tempme = memtrack.pad_value, - dead1, - dead2; - unsigned char *x_bounds; - struct mem_block *p = memtrack.head->next; - - while (p) - { - //x_bounds = (unsigned char*)p->addr; - //back up VPX_BYTE_ALIGNMENT - //x_bounds -= memtrack.padding_size; - - if (p->padded) // can the bounds be checked? - { - /*yes, move to the address that was actually allocated - by the vpx_* calls*/ - x_bounds = (unsigned char *)(((size_t *)p->addr)[-1]); - - for (i = 0; i < memtrack.padding_size; i += sizeof(unsigned int)) - { - p_show_me = (x_bounds + i); - p_show_me2 = (unsigned char *)(p->addr + p->size + i); - - MEM_TRACK_MEMCPY(&dead1, p_show_me, sizeof(unsigned int)); - MEM_TRACK_MEMCPY(&dead2, p_show_me2, sizeof(unsigned int)); - - if ((dead1 != tempme) || (dead2 != tempme)) - { - memtrack_log("\n[vpx_mem integrity check failed]:\n" - " index[%d,%d] {%s:%d} addr=0x%x, size=%d," - " file: %s, line: %d c0:0x%x c1:0x%x\n", - index, i, file, line, p->addr, p->size, p->file, - p->line, dead1, dead2); - } - } - } - - ++index; - p = p->next; +static void memory_tracker_check_integrity(char *file, unsigned int line) { + if (memtrack.padding_size) { + int i, + index = 0; + unsigned char *p_show_me, + * p_show_me2; + unsigned int tempme = memtrack.pad_value, + dead1, + dead2; + unsigned char *x_bounds; + struct mem_block *p = memtrack.head->next; + + while (p) { + // x_bounds = (unsigned char*)p->addr; + // back up VPX_BYTE_ALIGNMENT + // x_bounds -= memtrack.padding_size; + + if (p->padded) { // can the bounds be checked? + /*yes, move to the address that was actually allocated + by the vpx_* calls*/ + x_bounds = (unsigned char *)(((size_t *)p->addr)[-1]); + + for (i = 0; i < memtrack.padding_size; i += sizeof(unsigned int)) { + p_show_me = (x_bounds + i); + p_show_me2 = (unsigned char *)(p->addr + p->size + i); + + MEM_TRACK_MEMCPY(&dead1, p_show_me, sizeof(unsigned int)); + MEM_TRACK_MEMCPY(&dead2, p_show_me2, sizeof(unsigned int)); + + if ((dead1 != tempme) || (dead2 != tempme)) { + memtrack_log("\n[vpx_mem integrity check failed]:\n" + " index[%d,%d] {%s:%d} addr=0x%x, size=%d," + " file: %s, line: %d c0:0x%x c1:0x%x\n", + index, i, file, line, p->addr, p->size, p->file, + p->line, dead1, dead2); + } } + } + + ++index; + p = p->next; } + } } /* @@ -568,43 +529,38 @@ static void memory_tracker_check_integrity(char *file, unsigned int line) */ void memory_tracker_add(size_t addr, unsigned int size, char *file, unsigned int line, - int padded) -{ - if (!memory_tracker_lock_mutex()) - { - struct mem_block *p; + int padded) { + if (!memory_tracker_lock_mutex()) { + struct mem_block *p; - p = MEM_TRACK_MALLOC(sizeof(struct mem_block)); + p = MEM_TRACK_MALLOC(sizeof(struct mem_block)); - if (p) - { - p->prev = memtrack.tail; - p->prev->next = p; - p->addr = addr; - p->size = size; - p->line = line; - p->file = file; - p->padded = padded; - p->next = NULL; + if (p) { + p->prev = memtrack.tail; + p->prev->next = p; + p->addr = addr; + p->size = size; + p->line = line; + p->file = file; + p->padded = padded; + p->next = NULL; - memtrack.tail = p; + memtrack.tail = p; - memtrack.current_allocated += size; + memtrack.current_allocated += size; - if (memtrack.current_allocated > memtrack.max_allocated) - memtrack.max_allocated = memtrack.current_allocated; + if (memtrack.current_allocated > memtrack.max_allocated) + memtrack.max_allocated = memtrack.current_allocated; - //memtrack_log("memory_tracker_add: added addr=0x%.8x\n", addr); + // memtrack_log("memory_tracker_add: added addr=0x%.8x\n", addr); - memory_tracker_unlock_mutex(); - } - else - { - memtrack_log("memory_tracker_add: error allocating memory!\n"); - memory_tracker_unlock_mutex(); - vpx_memory_tracker_destroy(); - } + memory_tracker_unlock_mutex(); + } else { + memtrack_log("memory_tracker_add: error allocating memory!\n"); + memory_tracker_unlock_mutex(); + vpx_memory_tracker_destroy(); } + } } /* @@ -617,41 +573,36 @@ void memory_tracker_add(size_t addr, unsigned int size, -1: if the mutex could not be locked -2: if the addr was not found in the list */ -int memory_tracker_remove(size_t addr) -{ - int ret = -1; - - if (!memory_tracker_lock_mutex()) - { - struct mem_block *p; +int memory_tracker_remove(size_t addr) { + int ret = -1; - if ((p = memory_tracker_find(addr))) - { - memtrack.current_allocated -= p->size; + if (!memory_tracker_lock_mutex()) { + struct mem_block *p; - p->prev->next = p->next; + if ((p = memory_tracker_find(addr))) { + memtrack.current_allocated -= p->size; - if (p->next) - p->next->prev = p->prev; - else - memtrack.tail = p->prev; + p->prev->next = p->next; - ret = 0; - MEM_TRACK_FREE(p); - } - else - { - if (addr) - memtrack_log("memory_tracker_remove(): addr not found in list," - " 0x%.8x\n", addr); + if (p->next) + p->next->prev = p->prev; + else + memtrack.tail = p->prev; - ret = -2; - } + ret = 0; + MEM_TRACK_FREE(p); + } else { + if (addr) + memtrack_log("memory_tracker_remove(): addr not found in list," + " 0x%.8x\n", addr); - memory_tracker_unlock_mutex(); + ret = -2; } - return ret; + memory_tracker_unlock_mutex(); + } + + return ret; } /* @@ -662,19 +613,17 @@ int memory_tracker_remove(size_t addr) the need for repeated locking and unlocking as in Remove Returns: pointer to the mem block if found, NULL otherwise */ -static struct mem_block *memory_tracker_find(size_t addr) -{ - struct mem_block *p = NULL; +static struct mem_block *memory_tracker_find(size_t addr) { + struct mem_block *p = NULL; - if (memtrack.head) - { - p = memtrack.head->next; + if (memtrack.head) { + p = memtrack.head->next; - while (p && (p->addr != addr)) - p = p->next; - } + while (p && (p->addr != addr)) + p = p->next; + } - return p; + return p; } @@ -687,28 +636,25 @@ static struct mem_block *memory_tracker_find(size_t addr) <0: Failure, either the mutex was not initialized or the call to lock the mutex failed */ -static int memory_tracker_lock_mutex() -{ - int ret = -1; +static int memory_tracker_lock_mutex() { + int ret = -1; - if (g_b_mem_tracker_inited) - { + if (g_b_mem_tracker_inited) { #if HAVE_PTHREAD_H - ret = pthread_mutex_lock(&memtrack.mutex); + ret = pthread_mutex_lock(&memtrack.mutex); #elif defined(WIN32) || defined(_WIN32_WCE) - ret = WaitForSingleObject(memtrack.mutex, INFINITE); + ret = WaitForSingleObject(memtrack.mutex, INFINITE); #elif defined(VXWORKS) - ret = sem_take(memtrack.mutex, WAIT_FOREVER); + ret = sem_take(memtrack.mutex, WAIT_FOREVER); #endif - if (ret) - { - memtrack_log("memory_tracker_lock_mutex: mutex lock failed\n"); - } + if (ret) { + memtrack_log("memory_tracker_lock_mutex: mutex lock failed\n"); } + } - return ret; + return ret; } /* @@ -719,28 +665,25 @@ static int memory_tracker_lock_mutex() <0: Failure, either the mutex was not initialized or the call to unlock the mutex failed */ -static int memory_tracker_unlock_mutex() -{ - int ret = -1; +static int memory_tracker_unlock_mutex() { + int ret = -1; - if (g_b_mem_tracker_inited) - { + if (g_b_mem_tracker_inited) { #if HAVE_PTHREAD_H - ret = pthread_mutex_unlock(&memtrack.mutex); + ret = pthread_mutex_unlock(&memtrack.mutex); #elif defined(WIN32) || defined(_WIN32_WCE) - ret = !ReleaseMutex(memtrack.mutex); + ret = !ReleaseMutex(memtrack.mutex); #elif defined(VXWORKS) - ret = sem_give(memtrack.mutex); + ret = sem_give(memtrack.mutex); #endif - if (ret) - { - memtrack_log("memory_tracker_unlock_mutex: mutex unlock failed\n"); - } + if (ret) { + memtrack_log("memory_tracker_unlock_mutex: mutex unlock failed\n"); } + } - return ret; + return ret; } #endif @@ -754,45 +697,44 @@ static int memory_tracker_unlock_mutex() -1: if the use global function pointers is not set. */ int vpx_memory_tracker_set_functions(mem_track_malloc_func g_malloc_l - , mem_track_calloc_func g_calloc_l - , mem_track_realloc_func g_realloc_l - , mem_track_free_func g_free_l - , mem_track_memcpy_func g_memcpy_l - , mem_track_memset_func g_memset_l - , mem_track_memmove_func g_memmove_l) -{ +, mem_track_calloc_func g_calloc_l +, mem_track_realloc_func g_realloc_l +, mem_track_free_func g_free_l +, mem_track_memcpy_func g_memcpy_l +, mem_track_memset_func g_memset_l +, mem_track_memmove_func g_memmove_l) { #if USE_GLOBAL_FUNCTION_POINTERS - if (g_malloc_l) - g_malloc = g_malloc_l; + if (g_malloc_l) + g_malloc = g_malloc_l; - if (g_calloc_l) - g_calloc = g_calloc_l; + if (g_calloc_l) + g_calloc = g_calloc_l; - if (g_realloc_l) - g_realloc = g_realloc_l; + if (g_realloc_l) + g_realloc = g_realloc_l; - if (g_free_l) - g_free = g_free_l; + if (g_free_l) + g_free = g_free_l; - if (g_memcpy_l) - g_memcpy = g_memcpy_l; + if (g_memcpy_l) + g_memcpy = g_memcpy_l; - if (g_memset_l) - g_memset = g_memset_l; + if (g_memset_l) + g_memset = g_memset_l; - if (g_memmove_l) - g_memmove = g_memmove_l; + if (g_memmove_l) + g_memmove = g_memmove_l; - return 0; + return 0; #else - (void)g_malloc_l; - (void)g_calloc_l; - (void)g_realloc_l; - (void)g_free_l; - (void)g_memcpy_l; - (void)g_memset_l; - (void)g_memmove_l; - return -1; + (void)g_malloc_l; + (void)g_calloc_l; + (void)g_realloc_l; + (void)g_free_l; + (void)g_memcpy_l; + (void)g_memset_l; + (void)g_memmove_l; + return -1; #endif } |