summaryrefslogtreecommitdiff
path: root/vpx_mem
diff options
context:
space:
mode:
authorJohn Koleszar <jkoleszar@google.com>2010-05-18 11:58:33 -0400
committerJohn Koleszar <jkoleszar@google.com>2010-05-18 11:58:33 -0400
commit0ea50ce9cb4b65eee6afa1d041fe8beb5abda667 (patch)
tree1f3b9019f28bc56fd3156f96e5a9653a983ee61b /vpx_mem
downloadlibvpx-0ea50ce9cb4b65eee6afa1d041fe8beb5abda667.tar
libvpx-0ea50ce9cb4b65eee6afa1d041fe8beb5abda667.tar.gz
libvpx-0ea50ce9cb4b65eee6afa1d041fe8beb5abda667.tar.bz2
libvpx-0ea50ce9cb4b65eee6afa1d041fe8beb5abda667.zip
Initial WebM release
Diffstat (limited to 'vpx_mem')
-rw-r--r--vpx_mem/include/nds/vpx_mem_nds.h29
-rw-r--r--vpx_mem/include/vpx_mem_intrnl.h98
-rw-r--r--vpx_mem/include/vpx_mem_tracker.h179
-rw-r--r--vpx_mem/intel_linux/vpx_mem.c950
-rw-r--r--vpx_mem/intel_linux/vpx_mem_tracker.c811
-rw-r--r--vpx_mem/memory_manager/hmm_alloc.c59
-rw-r--r--vpx_mem/memory_manager/hmm_base.c433
-rw-r--r--vpx_mem/memory_manager/hmm_dflt_abort.c53
-rw-r--r--vpx_mem/memory_manager/hmm_grow.c49
-rw-r--r--vpx_mem/memory_manager/hmm_largest.c59
-rw-r--r--vpx_mem/memory_manager/hmm_resize.c118
-rw-r--r--vpx_mem/memory_manager/hmm_shrink.c110
-rw-r--r--vpx_mem/memory_manager/hmm_true.c31
-rw-r--r--vpx_mem/memory_manager/include/cavl_if.h226
-rw-r--r--vpx_mem/memory_manager/include/cavl_impl.h1266
-rw-r--r--vpx_mem/memory_manager/include/heapmm.h152
-rw-r--r--vpx_mem/memory_manager/include/hmm_cnfg.h115
-rw-r--r--vpx_mem/memory_manager/include/hmm_intrnl.h160
-rw-r--r--vpx_mem/nds/vpx_mem_nds.c65
-rw-r--r--vpx_mem/ti_c6x/vpx_mem_ti_6cx.c115
-rw-r--r--vpx_mem/vpx_mem.c719
-rw-r--r--vpx_mem/vpx_mem.h178
-rw-r--r--vpx_mem/vpx_mem.mk22
-rw-r--r--vpx_mem/vpx_mem_tracker.c822
24 files changed, 6819 insertions, 0 deletions
diff --git a/vpx_mem/include/nds/vpx_mem_nds.h b/vpx_mem/include/nds/vpx_mem_nds.h
new file mode 100644
index 000000000..c33240398
--- /dev/null
+++ b/vpx_mem/include/nds/vpx_mem_nds.h
@@ -0,0 +1,29 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+#ifndef __VPX_MEM_NDS_H__
+#define __VPX_MEM_NDS_H__
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+#include <nitro.h>
+#include <nitro/os.h>
+
+ void *vpx_mem_nds_alloc(osarena_id id, osheap_handle handle, size_t size, size_t align);
+ void vpx_mem_nds_free(osarena_id id, osheap_handle handle, void *mem);
+ int vpx_nds_alloc_heap(osarena_id id, u32 size);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /*__VPX_MEM_NDS_H__*/
diff --git a/vpx_mem/include/vpx_mem_intrnl.h b/vpx_mem/include/vpx_mem_intrnl.h
new file mode 100644
index 000000000..3b68d8615
--- /dev/null
+++ b/vpx_mem/include/vpx_mem_intrnl.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+#ifndef __VPX_MEM_INTRNL_H__
+#define __VPX_MEM_INTRNL_H__
+#include "vpx_ports/config.h"
+
+#ifndef CONFIG_MEM_MANAGER
+# if defined(VXWORKS)
+# define CONFIG_MEM_MANAGER 1 //include heap manager functionality,
+//default: enabled on vxworks
+# else
+# define CONFIG_MEM_MANAGER 0 //include heap manager functionality
+# endif
+#endif /*CONFIG_MEM_MANAGER*/
+
+#ifndef CONFIG_MEM_TRACKER
+# define CONFIG_MEM_TRACKER 1 //include xvpx_* calls in the lib
+#endif
+
+#ifndef CONFIG_MEM_CHECKS
+# define CONFIG_MEM_CHECKS 0 //include some basic safety checks in
+//vpx_memcpy, _memset, and _memmove
+#endif
+
+#ifndef USE_GLOBAL_FUNCTION_POINTERS
+# define USE_GLOBAL_FUNCTION_POINTERS 0 //use function pointers instead of compiled functions.
+#endif
+
+#if CONFIG_MEM_TRACKER
+# include "vpx_mem_tracker.h"
+# if VPX_MEM_TRACKER_VERSION_CHIEF != 2 || VPX_MEM_TRACKER_VERSION_MAJOR != 5
+# error "vpx_mem requires memory tracker version 2.5 to track memory usage"
+# endif
+#endif
+
+#define ADDRESS_STORAGE_SIZE sizeof(size_t)
+
+#ifndef DEFAULT_ALIGNMENT
+# if defined(VXWORKS)
+# define DEFAULT_ALIGNMENT 32 //default addr alignment to use in
+//calls to vpx_* functions other
+//than vpx_memalign
+# else
+# define DEFAULT_ALIGNMENT 1
+# endif
+#endif
+
+#if DEFAULT_ALIGNMENT < 1
+# error "DEFAULT_ALIGNMENT must be >= 1!"
+#endif
+
+#if CONFIG_MEM_TRACKER
+# define TRY_BOUNDS_CHECK 1 //when set to 1 pads each allocation,
+//integrity can be checked using
+//vpx_memory_tracker_check_integrity
+//or on free by defining
+//TRY_BOUNDS_CHECK_ON_FREE
+#else
+# define TRY_BOUNDS_CHECK 0
+#endif /*CONFIG_MEM_TRACKER*/
+
+#if TRY_BOUNDS_CHECK
+# define TRY_BOUNDS_CHECK_ON_FREE 0 //checks mem integrity on every
+//free, very expensive
+# define BOUNDS_CHECK_VALUE 0xdeadbeef //value stored before/after ea.
+//mem addr for bounds checking
+# define BOUNDS_CHECK_PAD_SIZE 32 //size of the padding before and
+//after ea allocation to be filled
+//with BOUNDS_CHECK_VALUE.
+//this should be a multiple of 4
+#else
+# define BOUNDS_CHECK_VALUE 0
+# define BOUNDS_CHECK_PAD_SIZE 0
+#endif /*TRY_BOUNDS_CHECK*/
+
+#ifndef REMOVE_PRINTFS
+# define REMOVE_PRINTFS 0
+#endif
+
+/* Should probably use a vpx_mem logger function. */
+#if REMOVE_PRINTFS
+# define _P(x)
+#else
+# define _P(x) x
+#endif
+
+/*returns an addr aligned to the byte boundary specified by align*/
+#define align_addr(addr,align) (void*)(((size_t)(addr) + ((align) - 1)) & (size_t)-(align))
+
+#endif /*__VPX_MEM_INTRNL_H__*/
diff --git a/vpx_mem/include/vpx_mem_tracker.h b/vpx_mem/include/vpx_mem_tracker.h
new file mode 100644
index 000000000..ab85d19c4
--- /dev/null
+++ b/vpx_mem/include/vpx_mem_tracker.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+#ifndef __VPX_MEM_TRACKER_H__
+#define __VPX_MEM_TRACKER_H__
+
+/* vpx_mem_tracker version info */
+#define vpx_mem_tracker_version "2.5.1.1"
+
+#define VPX_MEM_TRACKER_VERSION_CHIEF 2
+#define VPX_MEM_TRACKER_VERSION_MAJOR 5
+#define VPX_MEM_TRACKER_VERSION_MINOR 1
+#define VPX_MEM_TRACKER_VERSION_PATCH 1
+/* END - vpx_mem_tracker version info */
+
+#include <stdarg.h>
+
+struct mem_block
+{
+ size_t addr;
+ unsigned int size,
+ line;
+ char *file;
+ struct mem_block *prev,
+ * next;
+
+ int padded; // This mem_block has padding for integrity checks.
+ // As of right now, this should only be 0 if
+ // using vpx_mem_alloc to allocate cache memory.
+ // 2005-01-11 tjf
+};
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+ /*
+ vpx_memory_tracker_init(int padding_size, int pad_value)
+ padding_size - the size of the padding before and after each mem addr.
+ Values > 0 indicate that integrity checks can be performed
+ by inspecting these areas.
+ pad_value - the initial value within the padding area before and after
+ each mem addr.
+
+ Initializes the memory tracker interface. Should be called before any
+ other calls to the memory tracker.
+ */
+ int vpx_memory_tracker_init(int padding_size, int pad_value);
+
+ /*
+ vpx_memory_tracker_destroy()
+ Deinitializes the memory tracker interface
+ */
+ void vpx_memory_tracker_destroy();
+
+ /*
+ vpx_memory_tracker_add(size_t addr, unsigned int size,
+ char * file, unsigned int line)
+ addr - memory address to be added to list
+ size - size of addr
+ file - the file addr was referenced from
+ line - the line in file addr was referenced from
+ Adds memory address addr, it's size, file and line it came from
+ to the memory tracker allocation table
+ */
+ void vpx_memory_tracker_add(size_t addr, unsigned int size,
+ char *file, unsigned int line,
+ int padded);
+
+ /*
+ vpx_memory_tracker_add(size_t addr, unsigned int size, char * file, unsigned int line)
+ addr - memory address to be added to be removed
+ padded - if 0, disables bounds checking on this memory block even if bounds
+ checking is enabled. (for example, when allocating cache memory, we still want
+ to check for memory leaks, but we do not waste cache space for bounds check padding)
+ Removes the specified address from the memory tracker's allocation
+ table
+ Return:
+ 0: on success
+ -1: if memory allocation table's mutex could not be locked
+ -2: if the addr was not found in the list
+ */
+ int vpx_memory_tracker_remove(size_t addr);
+
+ /*
+ vpx_memory_tracker_find(unsigned int addr)
+ addr - address to be found in the memory tracker's
+ allocation table
+ Return:
+ If found, pointer to the memory block that matches addr
+ NULL otherwise
+ */
+ struct mem_block *vpx_memory_tracker_find(size_t addr);
+
+ /*
+ vpx_memory_tracker_dump()
+ Dumps the current contents of the memory
+ tracker allocation table
+ */
+ void vpx_memory_tracker_dump();
+
+ /*
+ vpx_memory_tracker_check_integrity()
+ If a padding_size was provided to vpx_memory_tracker_init()
+ This function will verify that the region before and after each
+ memory address contains the specified pad_value. Should the check
+ fail, the filename and line of the check will be printed out.
+ */
+ void vpx_memory_tracker_check_integrity(char *file, unsigned int line);
+
+ /*
+ vpx_memory_tracker_set_log_type
+ type - value representing the logging type to use
+ option - type specific option. This will be interpreted differently
+ based on the type.
+ Sets the logging type for the memory tracker.
+ Values currently supported:
+ 0: if option is NULL, log to stderr, otherwise interpret option as a
+ filename and attempt to open it.
+ 1: Use output_debug_string (WIN32 only), option ignored
+ Return:
+ 0: on success
+ -1: if the logging type could not be set, because the value was invalid
+ or because a file could not be opened
+ */
+ int vpx_memory_tracker_set_log_type(int type, char *option);
+
+ /*
+ vpx_memory_tracker_set_log_func
+ userdata - ptr to be passed to the supplied logfunc, can be NULL
+ logfunc - the logging function to be used to output data from
+ vpx_memory_track_dump/check_integrity
+ Sets a logging function to be used by the memory tracker.
+ Return:
+ 0: on success
+ -1: if the logging type could not be set because logfunc was NULL
+ */
+ int vpx_memory_tracker_set_log_func(void *userdata,
+ void(*logfunc)(void *userdata,
+ const char *fmt, va_list args));
+
+ /* Wrappers to standard library functions. */
+ typedef void*(* mem_track_malloc_func)(size_t);
+ typedef void*(* mem_track_calloc_func)(size_t, size_t);
+ typedef void*(* mem_track_realloc_func)(void *, size_t);
+ typedef void (* mem_track_free_func)(void *);
+ typedef void*(* mem_track_memcpy_func)(void *, const void *, size_t);
+ typedef void*(* mem_track_memset_func)(void *, int, size_t);
+ typedef void*(* mem_track_memmove_func)(void *, const void *, size_t);
+
+ /*
+ vpx_memory_tracker_set_functions
+
+ Sets the function pointers for the standard library functions.
+
+ Return:
+ 0: on success
+ -1: if the use global function pointers is not set.
+ */
+ int vpx_memory_tracker_set_functions(mem_track_malloc_func g_malloc_l
+ , mem_track_calloc_func g_calloc_l
+ , mem_track_realloc_func g_realloc_l
+ , mem_track_free_func g_free_l
+ , mem_track_memcpy_func g_memcpy_l
+ , mem_track_memset_func g_memset_l
+ , mem_track_memmove_func g_memmove_l);
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif //__VPX_MEM_TRACKER_H__
diff --git a/vpx_mem/intel_linux/vpx_mem.c b/vpx_mem/intel_linux/vpx_mem.c
new file mode 100644
index 000000000..002e407ef
--- /dev/null
+++ b/vpx_mem/intel_linux/vpx_mem.c
@@ -0,0 +1,950 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+#define __VPX_MEM_C__
+
+#include "vpx_mem.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+#ifndef CONFIG_MEM_MANAGER
+# if defined(VXWORKS)
+# define CONFIG_MEM_MANAGER 1 //include heap manager functionality,
+//default: enabled on vxworks
+# else
+# define CONFIG_MEM_MANAGER 0 //include heap manager functionality
+# endif
+#endif
+
+#ifndef CONFIG_MEM_TRACKER
+# define CONFIG_MEM_TRACKER 1 //include xvpx_* calls in the lib
+#endif
+
+#ifndef CONFIG_MEM_CHECKS
+# define CONFIG_MEM_CHECKS 0 //include some basic safety checks in
+//vpx_memcpy, _memset, and _memmove
+#endif
+
+#ifndef USE_GLOBAL_FUNCTION_POINTERS
+# define USE_GLOBAL_FUNCTION_POINTERS 0 //use function pointers instead of compiled functions.
+#endif
+
+#if CONFIG_MEM_TRACKER
+# include "vpx_mem_tracker.h"
+# if VPX_MEM_TRACKER_VERSION_CHIEF != 2 || VPX_MEM_TRACKER_VERSION_MAJOR != 5
+# error "vpx_mem requires memory tracker version 2.5 to track memory usage"
+# endif
+#endif
+
+#define ADDRESS_STORAGE_SIZE sizeof(size_t)
+
+#ifndef DEFAULT_ALIGNMENT
+# if defined(VXWORKS)
+# define DEFAULT_ALIGNMENT 32 //default addr alignment to use in
+//calls to vpx_* functions other
+//than vpx_memalign
+# else
+# define DEFAULT_ALIGNMENT 1
+# endif
+#endif
+
+#if DEFAULT_ALIGNMENT < 1
+# error "DEFAULT_ALIGNMENT must be >= 1!"
+#endif
+
+#if CONFIG_MEM_TRACKER
+# define TRY_BOUNDS_CHECK 1 //when set to 1 pads each allocation,
+//integrity can be checked using
+//vpx_memory_tracker_check_integrity
+//or on free by defining
+//TRY_BOUNDS_CHECK_ON_FREE
+static unsigned long g_alloc_count = 0;
+
+#else
+# define TRY_BOUNDS_CHECK 0
+#endif
+
+#if TRY_BOUNDS_CHECK
+# define TRY_BOUNDS_CHECK_ON_FREE 0 //checks mem integrity on every
+//free, very expensive
+# define BOUNDS_CHECK_VALUE 0xdeadbeef //value stored before/after ea.
+//mem addr for bounds checking
+# define BOUNDS_CHECK_PAD_SIZE 32 //size of the padding before and
+//after ea allocation to be filled
+//with BOUNDS_CHECK_VALUE.
+//this should be a multiple of 4
+#else
+# define BOUNDS_CHECK_VALUE 0
+# define BOUNDS_CHECK_PAD_SIZE 0
+#endif
+
+#if CONFIG_MEM_MANAGER
+# include "heapmm.h"
+# include "hmm_intrnl.h"
+
+# define SHIFT_HMM_ADDR_ALIGN_UNIT 5
+# define TOTAL_MEMORY_TO_ALLOCATE 20971520 // 20 * 1024 * 1024
+
+# define MM_DYNAMIC_MEMORY 1
+# if MM_DYNAMIC_MEMORY
+static unsigned char *g_p_mng_memory_raw = NULL;
+static unsigned char *g_p_mng_memory = NULL;
+# else
+static unsigned char g_p_mng_memory[TOTAL_MEMORY_TO_ALLOCATE];
+# endif
+
+static size_t g_mm_memory_size = TOTAL_MEMORY_TO_ALLOCATE;
+
+static hmm_descriptor hmm_d;
+static int g_mng_memory_allocated = 0;
+
+static int vpx_mm_create_heap_memory();
+static void *vpx_mm_realloc(void *memblk, size_t size);
+#endif //CONFIG_MEM_MANAGER
+
+#if USE_GLOBAL_FUNCTION_POINTERS
+
+struct GLOBAL_FUNC_POINTERS
+{
+ g_malloc_func g_malloc;
+ g_calloc_func g_calloc;
+ g_realloc_func g_realloc;
+ g_free_func g_free;
+ g_memcpy_func g_memcpy;
+ g_memset_func g_memset;
+ g_memmove_func g_memmove;
+};
+struct GLOBAL_FUNC_POINTERS *g_func = 0;
+
+# define VPX_MALLOC_L g_func->g_malloc
+# define VPX_REALLOC_L g_func->g_realloc
+# define VPX_FREE_L g_func->g_free
+# define VPX_MEMCPY_L g_func->g_memcpy
+# define VPX_MEMSET_L g_func->g_memset
+# define VPX_MEMMOVE_L g_func->g_memmove
+
+#else
+# define VPX_MALLOC_L malloc
+# define VPX_REALLOC_L realloc
+# define VPX_FREE_L free
+# define VPX_MEMCPY_L memcpy
+# define VPX_MEMSET_L memset
+# define VPX_MEMMOVE_L memmove
+#endif // USE_GLOBAL_FUNCTION_POINTERS
+
+/* Should probably use a vpx_mem logger function. */
+#define __REMOVE_PRINTFS
+#ifdef __REMOVE_PRINTFS
+#define _P(x)
+#else
+#define _P(x) x
+#endif
+
+/*returns an addr aligned to the byte boundary specified by align*/
+#define align_addr(addr,align) \
+ (void*)(((size_t)(addr) + ((align) - 1)) & (size_t)-(align))
+
+unsigned int vpx_mem_get_version()
+{
+ unsigned int ver = ((unsigned int)(unsigned char)VPX_MEM_VERSION_CHIEF << 24 |
+ (unsigned int)(unsigned char)VPX_MEM_VERSION_MAJOR << 16 |
+ (unsigned int)(unsigned char)VPX_MEM_VERSION_MINOR << 8 |
+ (unsigned int)(unsigned char)VPX_MEM_VERSION_PATCH);
+ return ver;
+}
+
+int vpx_mem_set_heap_size(size_t size)
+{
+ int ret = -1;
+
+#if CONFIG_MEM_MANAGER
+#if MM_DYNAMIC_MEMORY
+
+ if (!g_mng_memory_allocated && size)
+ {
+ g_mm_memory_size = size;
+ ret = 0;
+ }
+ else
+ ret = -3;
+
+#else
+ ret = -2;
+#endif
+#else
+ (void)size;
+#endif
+
+ return ret;
+}
+
+void *vpx_memalign(size_t align, size_t size)
+{
+ void *addr,
+ * x = NULL;
+
+#if CONFIG_MEM_MANAGER
+ int number_aau;
+
+ if (vpx_mm_create_heap_memory() < 0)
+ {
+ _P(printf("[vpx][mm] ERROR vpx_memalign() Couldn't create memory for Heap.\n");)
+ }
+
+ number_aau = ((size + align - 1 + ADDRESS_STORAGE_SIZE) >>
+ SHIFT_HMM_ADDR_ALIGN_UNIT) + 1;
+
+ addr = hmm_alloc(&hmm_d, number_aau);
+#else
+ addr = VPX_MALLOC_L(size + align - 1 + ADDRESS_STORAGE_SIZE);
+#endif //CONFIG_MEM_MANAGER
+
+ if (addr)
+ {
+ x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align);
+ /* save the actual malloc address */
+ ((size_t *)x)[-1] = (size_t)addr;
+ }
+
+ return x;
+}
+
+void *vpx_malloc(size_t size)
+{
+ return vpx_memalign(DEFAULT_ALIGNMENT, size);
+}
+
+void *vpx_calloc(size_t num, size_t size)
+{
+ void *x;
+
+ x = vpx_memalign(DEFAULT_ALIGNMENT, num * size);
+
+ if (x)
+ VPX_MEMSET_L(x, 0, num * size);
+
+ return x;
+}
+
+void *vpx_realloc(void *memblk, size_t size)
+{
+ void *addr,
+ * new_addr = NULL;
+ int align = DEFAULT_ALIGNMENT;
+
+ /*
+ The realloc() function changes the size of the object pointed to by
+ ptr to the size specified by size, and returns a pointer to the
+ possibly moved block. The contents are unchanged up to the lesser
+ of the new and old sizes. If ptr is null, realloc() behaves like
+ malloc() for the specified size. If size is zero (0) and ptr is
+ not a null pointer, the object pointed to is freed.
+ */
+ if (!memblk)
+ new_addr = vpx_malloc(size);
+ else if (!size)
+ vpx_free(memblk);
+ else
+ {
+ addr = (void *)(((size_t *)memblk)[-1]);
+ memblk = NULL;
+
+#if CONFIG_MEM_MANAGER
+ new_addr = vpx_mm_realloc(addr, size + align + ADDRESS_STORAGE_SIZE);
+#else
+ new_addr = VPX_REALLOC_L(addr, size + align + ADDRESS_STORAGE_SIZE);
+#endif
+
+ if (new_addr)
+ {
+ addr = new_addr;
+ new_addr = (void *)(((size_t)
+ ((unsigned char *)new_addr + ADDRESS_STORAGE_SIZE) + (align - 1)) &
+ (size_t) - align);
+ /* save the actual malloc address */
+ ((size_t *)new_addr)[-1] = (size_t)addr;
+ }
+ }
+
+ return new_addr;
+}
+
+void vpx_free(void *memblk)
+{
+ if (memblk)
+ {
+ void *addr = (void *)(((size_t *)memblk)[-1]);
+#if CONFIG_MEM_MANAGER
+ hmm_free(&hmm_d, addr);
+#else
+ VPX_FREE_L(addr);
+#endif
+ }
+}
+
+void *vpx_mem_alloc(int id, size_t size, size_t align)
+{
+#if defined CHIP_DM642 || defined __uClinux__
+ void *mem = (void *)mem_alloc(id, size, align);
+
+ if (!mem)
+ {
+ _P(fprintf(stderr,
+ "\n"
+ "*********************************************************\n"
+ "WARNING: mem_alloc returned 0 for id=%p size=%u align=%u.\n"
+ "*********************************************************\n",
+ mem, size, align));
+ // should no longer need this. Softier says it's fixed. 2005-01-21 tjf
+ //#if defined __uClinux__
+ //while(1)usleep(1000000);
+ //#endif
+ }
+
+#if defined __uClinux__
+ else if (mem == (void *)0xFFFFFFFF)
+ {
+ // out of memory/error
+ mem = (void *)0;
+
+ _P(fprintf(stderr,
+ "\n"
+ "******************************************************\n"
+ "ERROR: mem_alloc id=%p size=%u align=%u OUT OF MEMORY.\n"
+ "******************************************************\n",
+ mem, size, align));
+ }
+
+#endif // __uClinux__
+
+ return mem;
+#else
+ (void)id;
+ (void)size;
+ (void)align;
+ return (void *)0;
+#endif
+}
+
+void vpx_mem_free(int id, void *mem, size_t size)
+{
+#if defined CHIP_DM642 || defined __uClinux__
+
+ if (!mem)
+ {
+ _P(fprintf(stderr,
+ "\n"
+ "**************************************\n"
+ "WARNING: 0 being free'd id=%p size=%u.\n"
+ "**************************************\n",
+ id, size));
+
+ // should no longer need this. Softier says it's fixed. 2005-01-21 tjf
+ //#if defined __uClinux__
+ //while(1)usleep(1000000);
+ //#endif
+ }
+
+ mem_free(id, mem, size);
+#else
+ (void)id;
+ (void)mem;
+ (void)size;
+#endif
+}
+
+
+#if CONFIG_MEM_TRACKER
+
+void *xvpx_mem_alloc(int id, size_t size, size_t align, char *file, int line)
+{
+ void *mem = vpx_mem_alloc(id, size, align);
+
+ vpx_memory_tracker_add((size_t)mem, size, file, line, 0);
+
+ return mem;
+}
+
+void xvpx_mem_free(int id, void *mem, size_t size, char *file, int line)
+{
+ if (vpx_memory_tracker_remove((size_t)mem) == -2)
+ {
+#if REMOVE_PRINTFS
+ (void)file;
+ (void)line;
+#endif
+ _P(fprintf(stderr, "[vpx_mem][xvpx_mem_free] addr: %p (id=%p size=%u) "
+ "not found in list; freed from file:%s"
+ " line:%d\n", mem, id, size, file, line));
+ }
+
+ vpx_mem_free(id, mem, size);
+}
+
+void *xvpx_memalign(size_t align, size_t size, char *file, int line)
+{
+#if TRY_BOUNDS_CHECK
+ unsigned char *x_bounds;
+#endif
+
+ void *x;
+
+ if (g_alloc_count == 0)
+ {
+#if TRY_BOUNDS_CHECK
+ int i_rv = vpx_memory_tracker_init(BOUNDS_CHECK_PAD_SIZE, BOUNDS_CHECK_VALUE);
+#else
+ int i_rv = vpx_memory_tracker_init(0, 0);
+#endif
+
+ if (i_rv < 0)
+ {
+ _P(printf("ERROR xvpx_malloc MEM_TRACK_USAGE error vpx_memory_tracker_init().\n");)
+ }
+ }
+
+#if TRY_BOUNDS_CHECK
+ {
+ int i;
+ unsigned int tempme = BOUNDS_CHECK_VALUE;
+
+ x_bounds = vpx_memalign(align, size + (BOUNDS_CHECK_PAD_SIZE * 2));
+
+ if (x_bounds)
+ {
+ /*we're aligning the address twice here but to keep things
+ consistent we want to have the padding come before the stored
+ address so no matter what free function gets called we will
+ attempt to free the correct address*/
+ x_bounds = (unsigned char *)(((size_t *)x_bounds)[-1]);
+ x = align_addr(x_bounds + BOUNDS_CHECK_PAD_SIZE + ADDRESS_STORAGE_SIZE,
+ (int)align);
+ /* save the actual malloc address */
+ ((size_t *)x)[-1] = (size_t)x_bounds;
+
+ for (i = 0; i < BOUNDS_CHECK_PAD_SIZE; i += sizeof(unsigned int))
+ {
+ VPX_MEMCPY_L(x_bounds + i, &tempme, sizeof(unsigned int));
+ VPX_MEMCPY_L((unsigned char *)x + size + i,
+ &tempme, sizeof(unsigned int));
+ }
+ }
+ else
+ x = NULL;
+ }
+#else
+ x = vpx_memalign(align, size);
+#endif //TRY_BOUNDS_CHECK
+
+ g_alloc_count++;
+
+ vpx_memory_tracker_add((size_t)x, size, file, line, 1);
+
+ return x;
+}
+
+void *xvpx_malloc(size_t size, char *file, int line)
+{
+ return xvpx_memalign(DEFAULT_ALIGNMENT, size, file, line);
+}
+
+void *xvpx_calloc(size_t num, size_t size, char *file, int line)
+{
+ void *x = xvpx_memalign(DEFAULT_ALIGNMENT, num * size, file, line);
+
+ if (x)
+ VPX_MEMSET_L(x, 0, num * size);
+
+ return x;
+}
+
+void *xvpx_realloc(void *memblk, size_t size, char *file, int line)
+{
+ struct mem_block *p = NULL;
+ int orig_size = 0,
+ orig_line = 0;
+ char *orig_file = NULL;
+
+#if TRY_BOUNDS_CHECK
+ unsigned char *x_bounds = memblk ?
+ (unsigned char *)(((size_t *)memblk)[-1]) :
+ NULL;
+#endif
+
+ void *x;
+
+ if (g_alloc_count == 0)
+ {
+#if TRY_BOUNDS_CHECK
+
+ if (!vpx_memory_tracker_init(BOUNDS_CHECK_PAD_SIZE, BOUNDS_CHECK_VALUE))
+#else
+ if (!vpx_memory_tracker_init(0, 0))
+#endif
+ {
+ _P(printf("ERROR xvpx_malloc MEM_TRACK_USAGE error vpx_memory_tracker_init().\n");)
+ }
+ }
+
+ if (p = vpx_memory_tracker_find((size_t)memblk))
+ {
+ orig_size = p->size;
+ orig_file = p->file;
+ orig_line = p->line;
+ }
+
+#if TRY_BOUNDS_CHECK_ON_FREE
+ vpx_memory_tracker_check_integrity(file, line);
+#endif
+
+ //have to do this regardless of success, because
+ //the memory that does get realloc'd may change
+ //the bounds values of this block
+ vpx_memory_tracker_remove((size_t)memblk);
+
+#if TRY_BOUNDS_CHECK
+ {
+ int i;
+ unsigned int tempme = BOUNDS_CHECK_VALUE;
+
+ x_bounds = vpx_realloc(memblk, size + (BOUNDS_CHECK_PAD_SIZE * 2));
+
+ if (x_bounds)
+ {
+ x_bounds = (unsigned char *)(((size_t *)x_bounds)[-1]);
+ x = align_addr(x_bounds + BOUNDS_CHECK_PAD_SIZE + ADDRESS_STORAGE_SIZE,
+ (int)DEFAULT_ALIGNMENT);
+ /* save the actual malloc address */
+ ((size_t *)x)[-1] = (size_t)x_bounds;
+
+ for (i = 0; i < BOUNDS_CHECK_PAD_SIZE; i += sizeof(unsigned int))
+ {
+ VPX_MEMCPY_L(x_bounds + i, &tempme, sizeof(unsigned int));
+ VPX_MEMCPY_L((unsigned char *)x + size + i,
+ &tempme, sizeof(unsigned int));
+ }
+ }
+ else
+ x = NULL;
+ }
+#else
+ x = vpx_realloc(memblk, size);
+#endif //TRY_BOUNDS_CHECK
+
+ if (x)
+ vpx_memory_tracker_add((size_t)x, size, file, line, 1);
+ else
+ vpx_memory_tracker_add((size_t)memblk, orig_size, orig_file, orig_line, 1);
+
+ return x;
+}
+
+void xvpx_free(void *p_address, char *file, int line)
+{
+#if TRY_BOUNDS_CHECK
+ unsigned char *p_bounds_address = (unsigned char *)p_address;
+ //p_bounds_address -= BOUNDS_CHECK_PAD_SIZE;
+#endif
+
+#if !TRY_BOUNDS_CHECK_ON_FREE
+ (void)file;
+ (void)line;
+#endif
+
+ if (p_address)
+ {
+#if TRY_BOUNDS_CHECK_ON_FREE
+ vpx_memory_tracker_check_integrity(file, line);
+#endif
+
+ //if the addr isn't found in the list, assume it was allocated via
+ //vpx_ calls not xvpx_, therefore it does not contain any padding
+ if (vpx_memory_tracker_remove((size_t)p_address) == -2)
+ {
+ p_bounds_address = p_address;
+ _P(fprintf(stderr, "[vpx_mem][xvpx_free] addr: %p not found in"
+ " list; freed from file:%s"
+ " line:%d\n", p_address, file, line));
+ }
+ else
+ --g_alloc_count;
+
+#if TRY_BOUNDS_CHECK
+ vpx_free(p_bounds_address);
+#else
+ vpx_free(p_address);
+#endif
+
+ if (!g_alloc_count)
+ vpx_memory_tracker_destroy();
+ }
+}
+
+#endif /*CONFIG_MEM_TRACKER*/
+
+#if CONFIG_MEM_CHECKS
+#if defined(VXWORKS)
+#include <task_lib.h> //for task_delay()
+/* This function is only used to get a stack trace of the player
+object so we can se where we are having a problem. */
+static int get_my_tt(int task)
+{
+ tt(task);
+
+ return 0;
+}
+
+static void vx_sleep(int msec)
+{
+ int ticks_to_sleep = 0;
+
+ if (msec)
+ {
+ int msec_per_tick = 1000 / sys_clk_rate_get();
+
+ if (msec < msec_per_tick)
+ ticks_to_sleep++;
+ else
+ ticks_to_sleep = msec / msec_per_tick;
+ }
+
+ task_delay(ticks_to_sleep);
+}
+#endif
+#endif
+
+void *vpx_memcpy(void *dest, const void *source, size_t length)
+{
+#if CONFIG_MEM_CHECKS
+
+ if (((int)dest < 0x4000) || ((int)source < 0x4000))
+ {
+ _P(printf("WARNING: vpx_memcpy dest:0x%x source:0x%x len:%d\n", (int)dest, (int)source, length);)
+
+#if defined(VXWORKS)
+ sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
+
+ vx_sleep(10000);
+#endif
+ }
+
+#endif
+
+ return VPX_MEMCPY_L(dest, source, length);
+}
+
+void *vpx_memset(void *dest, int val, size_t length)
+{
+#if CONFIG_MEM_CHECKS
+
+ if ((int)dest < 0x4000)
+ {
+ _P(printf("WARNING: vpx_memset dest:0x%x val:%d len:%d\n", (int)dest, val, length);)
+
+#if defined(VXWORKS)
+ sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
+
+ vx_sleep(10000);
+#endif
+ }
+
+#endif
+
+ return VPX_MEMSET_L(dest, val, length);
+}
+
+void *vpx_memmove(void *dest, const void *src, size_t count)
+{
+#if CONFIG_MEM_CHECKS
+
+ if (((int)dest < 0x4000) || ((int)src < 0x4000))
+ {
+ _P(printf("WARNING: vpx_memmove dest:0x%x src:0x%x count:%d\n", (int)dest, (int)src, count);)
+
+#if defined(VXWORKS)
+ sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
+
+ vx_sleep(10000);
+#endif
+ }
+
+#endif
+
+ return VPX_MEMMOVE_L(dest, src, count);
+}
+
+#if CONFIG_MEM_MANAGER
+
+static int vpx_mm_create_heap_memory()
+{
+ int i_rv = 0;
+
+ if (!g_mng_memory_allocated)
+ {
+#if MM_DYNAMIC_MEMORY
+ g_p_mng_memory_raw =
+ (unsigned char *)malloc(g_mm_memory_size + HMM_ADDR_ALIGN_UNIT);
+
+ if (g_p_mng_memory_raw)
+ {
+ g_p_mng_memory = (unsigned char *)((((unsigned int)g_p_mng_memory_raw) +
+ HMM_ADDR_ALIGN_UNIT - 1) &
+ -(int)HMM_ADDR_ALIGN_UNIT);
+
+ _P(printf("[vpx][mm] total memory size:%d g_p_mng_memory_raw:0x%x g_p_mng_memory:0x%x\n"
+ , g_mm_memory_size + HMM_ADDR_ALIGN_UNIT
+ , (unsigned int)g_p_mng_memory_raw
+ , (unsigned int)g_p_mng_memory);)
+ }
+ else
+ {
+ _P(printf("[vpx][mm] Couldn't allocate memory:%d for vpx memory manager.\n"
+ , g_mm_memory_size);)
+
+ i_rv = -1;
+ }
+
+ if (g_p_mng_memory)
+#endif
+ {
+ int chunk_size = 0;
+
+ g_mng_memory_allocated = 1;
+
+ hmm_init(&hmm_d);
+
+ chunk_size = g_mm_memory_size >> SHIFT_HMM_ADDR_ALIGN_UNIT;
+
+ chunk_size -= DUMMY_END_BLOCK_BAUS;
+
+ _P(printf("[vpx][mm] memory size:%d for vpx memory manager. g_p_mng_memory:0x%x chunk_size:%d\n"
+ , g_mm_memory_size
+ , (unsigned int)g_p_mng_memory
+ , chunk_size);)
+
+ hmm_new_chunk(&hmm_d, (void *)g_p_mng_memory, chunk_size);
+ }
+
+#if MM_DYNAMIC_MEMORY
+ else
+ {
+ _P(printf("[vpx][mm] Couldn't allocate memory:%d for vpx memory manager.\n"
+ , g_mm_memory_size);)
+
+ i_rv = -1;
+ }
+
+#endif
+ }
+
+ return i_rv;
+}
+
+static void *vpx_mm_realloc(void *memblk, size_t size)
+{
+ void *p_ret = NULL;
+
+ if (vpx_mm_create_heap_memory() < 0)
+ {
+ _P(printf("[vpx][mm] ERROR vpx_mm_realloc() Couldn't create memory for Heap.\n");)
+ }
+ else
+ {
+ int i_rv = 0;
+ int old_num_aaus;
+ int new_num_aaus;
+
+ old_num_aaus = hmm_true_size(memblk);
+ new_num_aaus = (size >> SHIFT_HMM_ADDR_ALIGN_UNIT) + 1;
+
+ if (old_num_aaus == new_num_aaus)
+ {
+ p_ret = memblk;
+ }
+ else
+ {
+ i_rv = hmm_resize(&hmm_d, memblk, new_num_aaus);
+
+ if (i_rv == 0)
+ {
+ p_ret = memblk;
+ }
+ else
+ {
+ /* Error. Try to malloc and then copy data. */
+ void *p_from_malloc;
+
+ new_num_aaus = (size >> SHIFT_HMM_ADDR_ALIGN_UNIT) + 1;
+ p_from_malloc = hmm_alloc(&hmm_d, new_num_aaus);
+
+ if (p_from_malloc)
+ {
+ vpx_memcpy(p_from_malloc, memblk, size);
+ hmm_free(&hmm_d, memblk);
+
+ p_ret = p_from_malloc;
+ }
+ }
+ }
+ }
+
+ return p_ret;
+}
+#endif //CONFIG_MEM_MANAGER
+
+#if USE_GLOBAL_FUNCTION_POINTERS
+# if CONFIG_MEM_TRACKER
+extern int vpx_memory_tracker_set_functions(g_malloc_func g_malloc_l
+ , g_calloc_func g_calloc_l
+ , g_realloc_func g_realloc_l
+ , g_free_func g_free_l
+ , g_memcpy_func g_memcpy_l
+ , g_memset_func g_memset_l
+ , g_memmove_func g_memmove_l);
+# endif
+#endif
+int vpx_mem_set_functions(g_malloc_func g_malloc_l
+ , g_calloc_func g_calloc_l
+ , g_realloc_func g_realloc_l
+ , g_free_func g_free_l
+ , g_memcpy_func g_memcpy_l
+ , g_memset_func g_memset_l
+ , g_memmove_func g_memmove_l)
+{
+#if USE_GLOBAL_FUNCTION_POINTERS
+
+ /* If use global functions is turned on then the
+ application must set the global functions before
+ it does anything else or vpx_mem will have
+ unpredictable results. */
+ if (!g_func)
+ {
+ g_func = (struct GLOBAL_FUNC_POINTERS *)g_malloc_l(sizeof(struct GLOBAL_FUNC_POINTERS));
+
+ if (!g_func)
+ {
+ return -1;
+ }
+ }
+
+#if CONFIG_MEM_TRACKER
+ {
+ int rv = 0;
+ rv = vpx_memory_tracker_set_functions(g_malloc_l
+ , g_calloc_l
+ , g_realloc_l
+ , g_free_l
+ , g_memcpy_l
+ , g_memset_l
+ , g_memmove_l);
+
+ if (rv < 0)
+ {
+ return rv;
+ }
+ }
+#endif
+
+ if (g_malloc_l)
+ g_func->g_malloc = g_malloc_l;
+ else
+ g_func->g_malloc = 0;
+
+ if (g_calloc_l)
+ g_func->g_calloc = g_calloc_l;
+ else
+ g_func->g_calloc = 0;
+
+ if (g_realloc_l)
+ g_func->g_realloc = g_realloc_l;
+ else
+ g_func->g_realloc = 0;
+
+ if (g_free_l)
+ g_func->g_free = g_free_l;
+ else
+ g_func->g_free = 0;
+
+ if (g_memcpy_l)
+ g_func->g_memcpy = g_memcpy_l;
+ else
+ g_func->g_memcpy = 0;
+
+ if (g_memset_l)
+ g_func->g_memset = g_memset_l;
+ else
+ g_func->g_memset = 0;
+
+ if (g_memmove_l)
+ g_func->g_memmove = g_memmove_l;
+ else
+ g_func->g_memmove = 0;
+
+ return 0;
+#else
+ (void)g_malloc_l;
+ (void)g_calloc_l;
+ (void)g_realloc_l;
+ (void)g_free_l;
+ (void)g_memcpy_l;
+ (void)g_memset_l;
+ (void)g_memmove_l;
+ return -1;
+#endif
+}
+
+int vpx_mem_unset_functions()
+{
+#if USE_GLOBAL_FUNCTION_POINTERS
+
+ if (g_func)
+ {
+ g_free_func temp_free;
+
+ temp_free = g_func->g_free;
+
+ temp_free(g_func);
+ g_func = 0;
+ }
+
+#endif
+ return 0;
+}
+
+#ifdef _INTEL_LINUX
+void *_intel_fast_memcpy(void *dest, const void *src, size_t count)
+{
+
+ //memcpy(dest, src, count);
+ char *dst8 = (char *)dest;
+ char *src8 = (char *)src;
+
+ while (count--)
+ {
+ *dst8++ = *src8++;
+ }
+
+ return dest;
+}
+
+void *_intel_fast_memset(void *dest, int c, size_t count)
+{
+ memset(dest, c, count);
+ return dest;
+}
+
+void *_VEC_memzero(void *dest, int c, size_t count)
+{
+ memset(dest, 0, count);
+ return dest;
+}
+
+#endif //_ICC
diff --git a/vpx_mem/intel_linux/vpx_mem_tracker.c b/vpx_mem/intel_linux/vpx_mem_tracker.c
new file mode 100644
index 000000000..fa023e348
--- /dev/null
+++ b/vpx_mem/intel_linux/vpx_mem_tracker.c
@@ -0,0 +1,811 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+/*
+ vpx_mem_tracker.c
+
+ jwz 2003-09-30:
+ Stores a list of addreses, their size, and file and line they came from.
+ All exposed lib functions are prefaced by vpx_ and allow the global list
+ to be thread safe.
+ Current supported platforms are:
+ Linux, Win32, win_ce and vx_works
+ Further support can be added by defining the platform specific mutex
+ in the memory_tracker struct as well as calls to create/destroy/lock/unlock
+ the mutex in vpx_memory_tracker_init/Destroy and memory_tracker_lock_mutex/unlock_mutex
+*/
+
+#define NO_MUTEX
+
+#if defined(__uClinux__)
+# include <lddk.h>
+#endif
+
+#if defined(LINUX) || defined(__uClinux__)
+# include <pthread.h>
+#elif defined(WIN32) || defined(_WIN32_WCE)
+# define WIN32_LEAN_AND_MEAN
+# include <windows.h>
+# include <winbase.h>
+#elif defined(VXWORKS)
+# include <sem_lib.h>
+#elif defined(NDS_NITRO)
+# include <nitro.h>
+# include <nitro/os.h>
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h> //VXWORKS doesn't have a malloc/memory.h file,
+//this should pull in malloc,free,etc.
+#include <stdarg.h>
+
+#include "vpx_mem_tracker.h"
+
+#undef vpx_malloc //undefine any vpx_mem macros that may affect calls to
+#undef vpx_free //memory functions in this file
+#undef vpx_memcpy
+#undef vpx_memset
+
+
+#ifndef USE_GLOBAL_FUNCTION_POINTERS
+# define USE_GLOBAL_FUNCTION_POINTERS 0 //use function pointers instead of compiled functions.
+#endif
+
+#if USE_GLOBAL_FUNCTION_POINTERS
+static mem_track_malloc_func g_malloc = malloc;
+static mem_track_calloc_func g_calloc = calloc;
+static mem_track_realloc_func g_realloc = realloc;
+static mem_track_free_func g_free = free;
+static mem_track_memcpy_func g_memcpy = memcpy;
+static mem_track_memset_func g_memset = memset;
+static mem_track_memmove_func g_memmove = memmove;
+# define MEM_TRACK_MALLOC g_malloc
+# define MEM_TRACK_FREE g_free
+# define MEM_TRACK_MEMCPY g_memcpy
+# define MEM_TRACK_MEMSET g_memset
+#else
+# define MEM_TRACK_MALLOC vpx_malloc
+# define MEM_TRACK_FREE vpx_free
+# define MEM_TRACK_MEMCPY vpx_memcpy
+# define MEM_TRACK_MEMSET vpx_memset
+#endif // USE_GLOBAL_FUNCTION_POINTERS
+
+
+struct memory_tracker
+{
+ struct mem_block *head,
+ * tail;
+ int len,
+ totalsize;
+ unsigned int current_allocated,
+ max_allocated;
+
+#if defined(LINUX) || defined(__uClinux__)
+ pthread_mutex_t mutex;
+#elif defined(WIN32) || defined(_WIN32_WCE)
+ HANDLE mutex;
+#elif defined(VXWORKS)
+ SEM_ID mutex;
+#elif defined(NDS_NITRO)
+ OSMutex mutex;
+#elif defined(NO_MUTEX)
+#else
+#error "No mutex type defined for this platform!"
+#endif
+
+ int padding_size,
+ pad_value;
+};
+
+/* prototypes for internal library functions */
+static void memtrack_log(const char *fmt, ...);
+static void memory_tracker_dump();
+static void memory_tracker_check_integrity(char *file, unsigned int line);
+static void memory_tracker_add(size_t addr, unsigned int size,
+ char *file, unsigned int line,
+ int padded);
+static int memory_tracker_remove(size_t addr);
+static struct mem_block *memory_tracker_find(size_t addr);
+
+#if defined(NO_MUTEX)
+# define memory_tracker_lock_mutex() (!g_b_mem_tracker_inited)
+# define memory_tracker_unlock_mutex()
+#else
+static int memory_tracker_lock_mutex();
+static int memory_tracker_unlock_mutex();
+#endif
+
+static struct memory_tracker memtrack; //our global memory allocation list
+static int g_b_mem_tracker_inited = 0; //indicates whether the global list has
+//been initialized (1:yes/0:no)
+static struct
+{
+ FILE *file;
+ int type;
+ void (*func)(void *userdata, const char *fmt, va_list args);
+ void *userdata;
+} g_logging = {0};
+
+extern void *vpx_malloc(size_t size);
+extern void vpx_free(void *memblk);
+extern void *vpx_memcpy(void *dest, const void *src, size_t length);
+extern void *vpx_memset(void *dest, int val, size_t length);
+
+/*
+ *
+ * Exposed library functions
+ *
+*/
+
+/*
+ vpx_memory_tracker_init(int padding_size, int pad_value)
+ padding_size - the size of the padding before and after each mem addr.
+ Values > 0 indicate that integrity checks can be performed
+ by inspecting these areas.
+ pad_value - the initial value within the padding area before and after
+ each mem addr.
+
+ Initializes global memory tracker structure
+ Allocates the head of the list
+*/
+int vpx_memory_tracker_init(int padding_size, int pad_value)
+{
+ if (!g_b_mem_tracker_inited)
+ {
+ if (memtrack.head = (struct mem_block *)MEM_TRACK_MALLOC(sizeof(struct mem_block)))
+ {
+ int ret;
+
+ MEM_TRACK_MEMSET(memtrack.head, 0, sizeof(struct mem_block));
+
+ memtrack.tail = memtrack.head;
+
+ memtrack.current_allocated = 0;
+ memtrack.max_allocated = 0;
+
+ memtrack.padding_size = padding_size;
+ memtrack.pad_value = pad_value;
+
+#if defined(LINUX) || defined(__uClinux__)
+ ret = pthread_mutex_init(&memtrack.mutex,
+ NULL); /*mutex attributes (NULL=default)*/
+#elif defined(WIN32) || defined(_WIN32_WCE)
+ memtrack.mutex = create_mutex(NULL, /*security attributes*/
+ FALSE, /*we don't want initial ownership*/
+ NULL); /*mutex name*/
+ ret = !memtrack.mutex;
+#elif defined(VXWORKS)
+ memtrack.mutex = sem_bcreate(SEM_Q_FIFO, /*SEM_Q_FIFO non-priority based mutex*/
+ SEM_FULL); /*SEM_FULL initial state is unlocked*/
+ ret = !memtrack.mutex;
+#elif defined(NDS_NITRO)
+ os_init_mutex(&memtrack.mutex);
+ ret = 0;
+#elif defined(NO_MUTEX)
+ ret = 0;
+#endif
+
+ if (ret)
+ {
+ memtrack_log("vpx_memory_tracker_init: Error creating mutex!\n");
+
+ MEM_TRACK_FREE(memtrack.head);
+ memtrack.head = NULL;
+ }
+ else
+ {
+ memtrack_log("Memory Tracker init'd, v."vpx_mem_tracker_version" pad_size:%d pad_val:0x%x %d\n"
+ , padding_size
+ , pad_value
+ , pad_value);
+ g_b_mem_tracker_inited = 1;
+ }
+ }
+ }
+
+ return g_b_mem_tracker_inited;
+}
+
+/*
+ vpx_memory_tracker_destroy()
+ If our global struct was initialized zeros out all its members,
+ frees memory and destroys it's mutex
+*/
+void vpx_memory_tracker_destroy()
+{
+
+ if (!memory_tracker_lock_mutex())
+ {
+ struct mem_block *p = memtrack.head,
+ * p2 = memtrack.head;
+
+ memory_tracker_dump();
+
+ while (p)
+ {
+ p2 = p;
+ p = p->next;
+
+ MEM_TRACK_FREE(p2);
+ }
+
+ memtrack.head = NULL;
+ memtrack.tail = NULL;
+ memtrack.len = 0;
+ memtrack.current_allocated = 0;
+ memtrack.max_allocated = 0;
+
+ if ((g_logging.type == 0) && (g_logging.file != 0)) //&& (g_logging.file != stderr) )
+ {
+#if !defined(NDS_NITRO)
+ fclose(g_logging.file);
+#endif
+ g_logging.file = NULL;
+ }
+
+ memory_tracker_unlock_mutex();
+
+ g_b_mem_tracker_inited = 0;
+
+ }
+
+}
+
+/*
+ vpx_memory_tracker_add(size_t addr, unsigned int size,
+ char * file, unsigned int line)
+ addr - memory address to be added to list
+ size - size of addr
+ file - the file addr was referenced from
+ line - the line in file addr was referenced from
+ Adds memory address addr, it's size, file and line it came from
+ to the global list via the thread safe internal library function
+*/
+void vpx_memory_tracker_add(size_t addr, unsigned int size,
+ char *file, unsigned int line,
+ int padded)
+{
+ memory_tracker_add(addr, size, file, line, padded);
+}
+
+/*
+ vpx_memory_tracker_remove(size_t addr)
+ addr - memory address to be removed from list
+ Removes addr from the global list via the thread safe
+ internal remove function
+ Return:
+ Same as described for memory_tracker_remove
+*/
+int vpx_memory_tracker_remove(size_t addr)
+{
+ return memory_tracker_remove(addr);
+}
+
+/*
+ vpx_memory_tracker_find(size_t addr)
+ addr - address to be found in list
+ Return:
+ If found, pointer to the memory block that matches addr
+ NULL otherwise
+*/
+struct mem_block *vpx_memory_tracker_find(size_t addr)
+{
+ struct mem_block *p = NULL;
+
+ if (!memory_tracker_lock_mutex())
+ {
+ p = memory_tracker_find(addr);
+ memory_tracker_unlock_mutex();
+ }
+
+ return p;
+}
+
+/*
+ vpx_memory_tracker_dump()
+ Locks the memory tracker's mutex and calls the internal
+ library function to dump the current contents of the
+ global memory allocation list
+*/
+void vpx_memory_tracker_dump()
+{
+ if (!memory_tracker_lock_mutex())
+ {
+ memory_tracker_dump();
+ memory_tracker_unlock_mutex();
+ }
+}
+
+/*
+ vpx_memory_tracker_check_integrity(char* file, unsigned int line)
+ file - The file name where the check was placed
+ line - The line in file where the check was placed
+ Locks the memory tracker's mutex and calls the internal
+ integrity check function to inspect every address in the global
+ memory allocation list
+*/
+void vpx_memory_tracker_check_integrity(char *file, unsigned int line)
+{
+ if (!memory_tracker_lock_mutex())
+ {
+ memory_tracker_check_integrity(file, line);
+ memory_tracker_unlock_mutex();
+ }
+}
+
+/*
+ vpx_memory_tracker_set_log_type
+ Sets the logging type for the memory tracker. Based on the value it will
+ direct its output to the appropriate place.
+ Return:
+ 0: on success
+ -1: if the logging type could not be set, because the value was invalid
+ or because a file could not be opened
+*/
+int vpx_memory_tracker_set_log_type(int type, char *option)
+{
+ int ret = -1;
+
+
+ switch (type)
+ {
+ case 0:
+ g_logging.type = 0;
+
+ if (!option)
+ {
+ // g_logging.file = stderr;
+ ret = 0;
+ }
+
+#if !defined(NDS_NITRO)
+ else
+ {
+ if (g_logging.file = fopen((char *)option, "w"))
+ ret = 0;
+ }
+
+#endif
+ break;
+#if defined(WIN32) && !defined(_WIN32_WCE)
+ case 1:
+ g_logging.type = type;
+ ret = 0;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ //output the version to the new logging destination
+ if (!ret)
+ memtrack_log("Memory Tracker logging initialized, "
+ "Memory Tracker v."vpx_mem_tracker_version"\n");
+
+ return ret;
+}
+
+/*
+ vpx_memory_tracker_set_log_func
+ Sets a logging function to be used by the memory tracker.
+ Return:
+ 0: on success
+ -1: if the logging type could not be set because logfunc was NULL
+*/
+int vpx_memory_tracker_set_log_func(void *userdata,
+ void(*logfunc)(void *userdata,
+ const char *fmt, va_list args))
+{
+ int ret = -1;
+
+ if (logfunc)
+ {
+ g_logging.type = -1;
+ g_logging.userdata = userdata;
+ g_logging.func = logfunc;
+ ret = 0;
+ }
+
+ //output the version to the new logging destination
+ if (!ret)
+ memtrack_log("Memory Tracker logging initialized, "
+ "Memory Tracker v."vpx_mem_tracker_version"\n");
+
+ return ret;
+}
+
+/*
+ *
+ * END - Exposed library functions
+ *
+*/
+
+
+/*
+ *
+ * Internal library functions
+ *
+*/
+
+static void memtrack_log(const char *fmt, ...)
+{
+ va_list list;
+
+ va_start(list, fmt);
+
+ switch (g_logging.type)
+ {
+ case -1:
+
+ if (g_logging.func)
+ g_logging.func(g_logging.userdata, fmt, list);
+
+ break;
+ case 0:
+
+ if (g_logging.file)
+ {
+ vfprintf(g_logging.file, fmt, list);
+ fflush(g_logging.file);
+ }
+
+ break;
+#if defined(WIN32) && !defined(_WIN32_WCE)
+ case 1:
+ {
+ char temp[1024];
+ _vsnprintf(temp, sizeof(temp) / sizeof(char) - 1, fmt, list);
+ output_debug_string(temp);
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+
+ va_end(list);
+}
+
+/*
+ memory_tracker_dump()
+ Dumps the current contents of the global memory allocation list
+*/
+static void memory_tracker_dump()
+{
+ int i = 0;
+ struct mem_block *p = (memtrack.head ? memtrack.head->next : NULL);
+
+ memtrack_log("\n_currently Allocated= %d; Max allocated= %d\n",
+ memtrack.current_allocated, memtrack.max_allocated);
+
+ while (p)
+ {
+#if defined(WIN32) && !defined(_WIN32_WCE)
+
+ /*when using outputdebugstring, output filenames so they
+ can be clicked to be opened in visual studio*/
+ if (g_logging.type == 1)
+ memtrack_log("memblocks[%d].addr= 0x%.8x, memblocks[%d].size= %d, file:\n"
+ " %s(%d):\n", i,
+ p->addr, i, p->size,
+ p->file, p->line);
+ else
+#endif
+ memtrack_log("memblocks[%d].addr= 0x%.8x, memblocks[%d].size= %d, file: %s, line: %d\n", i,
+ p->addr, i, p->size,
+ p->file, p->line);
+
+ p = p->next;
+ ++i;
+ }
+
+ memtrack_log("\n");
+}
+
+/*
+ memory_tracker_check_integrity(char* file, unsigned int file)
+ file - the file name where the check was placed
+ line - the line in file where the check was placed
+ If a padding_size was supplied to vpx_memory_tracker_init()
+ this function will check ea. addr in the list verifying that
+ addr-padding_size and addr+padding_size is filled with pad_value
+*/
+static void memory_tracker_check_integrity(char *file, unsigned int line)
+{
+ if (memtrack.padding_size)
+ {
+ int i,
+ index = 0;
+ unsigned char *p_show_me,
+ * p_show_me2;
+ unsigned int tempme = memtrack.pad_value,
+ dead1,
+ dead2;
+ unsigned char *x_bounds;
+ struct mem_block *p = memtrack.head->next;
+
+ while (p)
+ {
+ //x_bounds = (unsigned char*)p->addr;
+ //back up VPX_BYTE_ALIGNMENT
+ //x_bounds -= memtrack.padding_size;
+
+ if (p->padded) // can the bounds be checked?
+ {
+ /*yes, move to the address that was actually allocated
+ by the vpx_* calls*/
+ x_bounds = (unsigned char *)(((size_t *)p->addr)[-1]);
+
+ for (i = 0; i < memtrack.padding_size; i += sizeof(unsigned int))
+ {
+ p_show_me = (x_bounds + i);
+ p_show_me2 = (unsigned char *)(p->addr + p->size + i);
+
+ MEM_TRACK_MEMCPY(&dead1, p_show_me, sizeof(unsigned int));
+ MEM_TRACK_MEMCPY(&dead2, p_show_me2, sizeof(unsigned int));
+
+ if ((dead1 != tempme) || (dead2 != tempme))
+ {
+ memtrack_log("\n[vpx_mem integrity check failed]:\n"
+ " index[%d] {%s:%d} addr=0x%x, size=%d,"
+ " file: %s, line: %d c0:0x%x c1:0x%x\n",
+ index, file, line, p->addr, p->size, p->file,
+ p->line, dead1, dead2);
+ }
+ }
+ }
+
+ ++index;
+ p = p->next;
+ }
+ }
+}
+
+/*
+ memory_tracker_add(size_t addr, unsigned int size,
+ char * file, unsigned int line)
+ Adds an address (addr), it's size, file and line number to our list.
+ Adjusts the total bytes allocated and max bytes allocated if necessary.
+ If memory cannot be allocated the list will be destroyed.
+*/
+void memory_tracker_add(size_t addr, unsigned int size,
+ char *file, unsigned int line,
+ int padded)
+{
+ if (!memory_tracker_lock_mutex())
+ {
+ struct mem_block *p;
+
+ p = MEM_TRACK_MALLOC(sizeof(struct mem_block));
+
+ if (p)
+ {
+ p->prev = memtrack.tail;
+ p->prev->next = p;
+ p->addr = addr;
+ p->size = size;
+ p->line = line;
+ p->file = file;
+ p->padded = padded;
+ p->next = NULL;
+
+ memtrack.tail = p;
+
+ memtrack.current_allocated += size;
+
+ if (memtrack.current_allocated > memtrack.max_allocated)
+ memtrack.max_allocated = memtrack.current_allocated;
+
+ //memtrack_log("memory_tracker_add: added addr=0x%.8x\n", addr);
+
+ memory_tracker_unlock_mutex();
+ }
+ else
+ {
+ memtrack_log("memory_tracker_add: error allocating memory!\n");
+ memory_tracker_unlock_mutex();
+ vpx_memory_tracker_destroy();
+ }
+ }
+}
+
+/*
+ memory_tracker_remove(size_t addr)
+ Removes an address and its corresponding size (if they exist)
+ from the memory tracker list and adjusts the current number
+ of bytes allocated.
+ Return:
+ 0: on success
+ -1: if the mutex could not be locked
+ -2: if the addr was not found in the list
+*/
+int memory_tracker_remove(size_t addr)
+{
+ int ret = -1;
+
+ if (!memory_tracker_lock_mutex())
+ {
+ struct mem_block *p;
+
+ if (p = memory_tracker_find(addr))
+ {
+ memtrack.current_allocated -= p->size;
+
+ p->prev->next = p->next;
+
+ if (p->next)
+ p->next->prev = p->prev;
+ else
+ memtrack.tail = p->prev;
+
+ ret = 0;
+ MEM_TRACK_FREE(p);
+ }
+ else
+ {
+ memtrack_log("memory_tracker_remove(): addr not found in list, 0x%.8x\n", addr);
+ ret = -2;
+ }
+
+ memory_tracker_unlock_mutex();
+ }
+
+ return ret;
+}
+
+/*
+ memory_tracker_find(size_t addr)
+ Finds an address in our addrs list
+ NOTE: the mutex MUST be locked in the other internal
+ functions before calling this one. This avoids
+ the need for repeated locking and unlocking as in Remove
+ Returns: pointer to the mem block if found, NULL otherwise
+*/
+static struct mem_block *memory_tracker_find(size_t addr)
+{
+ struct mem_block *p = NULL;
+
+ if (memtrack.head)
+ {
+ p = memtrack.head->next;
+
+ while (p && (p->addr != addr))
+ p = p->next;
+ }
+
+ return p;
+}
+
+
+#if !defined(NO_MUTEX)
+/*
+ memory_tracker_lock_mutex()
+ Locks the memory tracker mutex with a platform specific call
+ Returns:
+ 0: Success
+ <0: Failure, either the mutex was not initialized
+ or the call to lock the mutex failed
+*/
+static int memory_tracker_lock_mutex()
+{
+ int ret = -1;
+
+ if (g_b_mem_tracker_inited)
+ {
+
+#if defined(LINUX) || defined(__uClinux__)
+ ret = pthread_mutex_lock(&memtrack.mutex);
+#elif defined(WIN32) || defined(_WIN32_WCE)
+ ret = WaitForSingleObject(memtrack.mutex, INFINITE);
+#elif defined(VXWORKS)
+ ret = sem_take(memtrack.mutex, WAIT_FOREVER);
+#elif defined(NDS_NITRO)
+ os_lock_mutex(&memtrack.mutex);
+ ret = 0;
+#endif
+
+ if (ret)
+ {
+ memtrack_log("memory_tracker_lock_mutex: mutex lock failed\n");
+ }
+ }
+
+ return ret;
+}
+
+/*
+ memory_tracker_unlock_mutex()
+ Unlocks the memory tracker mutex with a platform specific call
+ Returns:
+ 0: Success
+ <0: Failure, either the mutex was not initialized
+ or the call to unlock the mutex failed
+*/
+static int memory_tracker_unlock_mutex()
+{
+ int ret = -1;
+
+ if (g_b_mem_tracker_inited)
+ {
+
+#if defined(LINUX) || defined(__uClinux__)
+ ret = pthread_mutex_unlock(&memtrack.mutex);
+#elif defined(WIN32) || defined(_WIN32_WCE)
+ ret = !release_mutex(memtrack.mutex);
+#elif defined(VXWORKS)
+ ret = sem_give(memtrack.mutex);
+#elif defined(NDS_NITRO)
+ os_unlock_mutex(&memtrack.mutex);
+ ret = 0;
+#endif
+
+ if (ret)
+ {
+ memtrack_log("memory_tracker_unlock_mutex: mutex unlock failed\n");
+ }
+ }
+
+ return ret;
+}
+#endif
+
+/*
+ vpx_memory_tracker_set_functions
+
+ Sets the function pointers for the standard library functions.
+
+ Return:
+ 0: on success
+ -1: if the use global function pointers is not set.
+*/
+int vpx_memory_tracker_set_functions(mem_track_malloc_func g_malloc_l
+ , mem_track_calloc_func g_calloc_l
+ , mem_track_realloc_func g_realloc_l
+ , mem_track_free_func g_free_l
+ , mem_track_memcpy_func g_memcpy_l
+ , mem_track_memset_func g_memset_l
+ , mem_track_memmove_func g_memmove_l)
+{
+#if USE_GLOBAL_FUNCTION_POINTERS
+
+ if (g_malloc_l)
+ g_malloc = g_malloc_l;
+
+ if (g_calloc_l)
+ g_calloc = g_calloc_l;
+
+ if (g_realloc_l)
+ g_realloc = g_realloc_l;
+
+ if (g_free_l)
+ g_free = g_free_l;
+
+ if (g_memcpy_l)
+ g_memcpy = g_memcpy_l;
+
+ if (g_memset_l)
+ g_memset = g_memset_l;
+
+ if (g_memmove_l)
+ g_memmove = g_memmove_l;
+
+ return 0;
+#else
+ (void)g_malloc_l;
+ (void)g_calloc_l;
+ (void)g_realloc_l;
+ (void)g_free_l;
+ (void)g_memcpy_l;
+ (void)g_memset_l;
+ (void)g_memmove_l;
+ return -1;
+#endif
+}
diff --git a/vpx_mem/memory_manager/hmm_alloc.c b/vpx_mem/memory_manager/hmm_alloc.c
new file mode 100644
index 000000000..9abd81ee4
--- /dev/null
+++ b/vpx_mem/memory_manager/hmm_alloc.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+/* This code is in the public domain.
+** Version: 1.1 Author: Walt Karas
+*/
+
+#include "hmm_intrnl.h"
+
+void *U(alloc)(U(descriptor) *desc, U(size_aau) n)
+{
+#ifdef HMM_AUDIT_FAIL
+
+ if (desc->avl_tree_root)
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+#endif
+
+ if (desc->last_freed)
+ {
+#ifdef HMM_AUDIT_FAIL
+ AUDIT_BLOCK(desc->last_freed)
+#endif
+
+ U(into_free_collection)(desc, (head_record *)(desc->last_freed));
+
+ desc->last_freed = 0;
+ }
+
+ /* Add space for block header. */
+ n += HEAD_AAUS;
+
+ /* Convert n from number of address alignment units to block alignment
+ ** units. */
+ n = DIV_ROUND_UP(n, HMM_BLOCK_ALIGN_UNIT);
+
+ if (n < MIN_BLOCK_BAUS)
+ n = MIN_BLOCK_BAUS;
+
+ {
+ /* Search for the first node of the bin containing the smallest
+ ** block big enough to satisfy request. */
+ ptr_record *ptr_rec_ptr =
+ U(avl_search)(
+ (U(avl_avl) *) & (desc->avl_tree_root), (U(size_bau)) n,
+ AVL_GREATER_EQUAL);
+
+ /* If an approprate bin is found, satisfy the allocation request,
+ ** otherwise return null pointer. */
+ return(ptr_rec_ptr ?
+ U(alloc_from_bin)(desc, ptr_rec_ptr, (U(size_bau)) n) : 0);
+ }
+}
diff --git a/vpx_mem/memory_manager/hmm_base.c b/vpx_mem/memory_manager/hmm_base.c
new file mode 100644
index 000000000..0cacc3f8f
--- /dev/null
+++ b/vpx_mem/memory_manager/hmm_base.c
@@ -0,0 +1,433 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+/* This code is in the public domain.
+** Version: 1.1 Author: Walt Karas
+*/
+
+#include "hmm_intrnl.h"
+
+void U(init)(U(descriptor) *desc)
+{
+ desc->avl_tree_root = 0;
+ desc->last_freed = 0;
+}
+
+/* Remove a free block from a bin's doubly-linked list when it is not,
+** the first block in the bin.
+*/
+void U(dll_remove)(
+ /* Pointer to pointer record in the block to be removed. */
+ ptr_record *to_remove)
+{
+ to_remove->prev->next = to_remove->next;
+
+ if (to_remove->next)
+ to_remove->next->prev = to_remove->prev;
+}
+
+/* Put a block into the free collection of a heap.
+*/
+void U(into_free_collection)(
+ /* Pointer to heap descriptor. */
+ U(descriptor) *desc,
+ /* Pointer to head record of block. */
+ head_record *head_ptr)
+{
+ ptr_record *ptr_rec_ptr = HEAD_TO_PTR_REC(head_ptr);
+
+ ptr_record *bin_front_ptr =
+ U(avl_insert)((U(avl_avl) *) & (desc->avl_tree_root), ptr_rec_ptr);
+
+ if (bin_front_ptr != ptr_rec_ptr)
+ {
+ /* The block was not inserted into the AVL tree because there is
+ ** already a bin for the size of the block. */
+
+ MARK_SUCCESSIVE_BLOCK_IN_FREE_BIN(head_ptr)
+ ptr_rec_ptr->self = ptr_rec_ptr;
+
+ /* Make the block the new second block in the bin's doubly-linked
+ ** list. */
+ ptr_rec_ptr->prev = bin_front_ptr;
+ ptr_rec_ptr->next = bin_front_ptr->next;
+ bin_front_ptr->next = ptr_rec_ptr;
+
+ if (ptr_rec_ptr->next)
+ ptr_rec_ptr->next->prev = ptr_rec_ptr;
+ }
+ else
+ /* Block is first block in new bin. */
+ ptr_rec_ptr->next = 0;
+}
+
+/* Allocate a block from a given bin. Returns a pointer to the payload
+** of the removed block. The "last freed" pointer must be null prior
+** to calling this function.
+*/
+void *U(alloc_from_bin)(
+ /* Pointer to heap descriptor. */
+ U(descriptor) *desc,
+ /* Pointer to pointer record of first block in bin. */
+ ptr_record *bin_front_ptr,
+ /* Number of BAUs needed in the allocated block. If the block taken
+ ** from the bin is significantly larger than the number of BAUs needed,
+ ** the "extra" BAUs are split off to form a new free block. */
+ U(size_bau) n_baus)
+{
+ head_record *head_ptr;
+ U(size_bau) rem_baus;
+
+ if (bin_front_ptr->next)
+ {
+ /* There are multiple blocks in this bin. Use the 2nd block in
+ ** the bin to avoid needless change to the AVL tree.
+ */
+
+ ptr_record *ptr_rec_ptr = bin_front_ptr->next;
+ head_ptr = PTR_REC_TO_HEAD(ptr_rec_ptr);
+
+#ifdef AUDIT_FAIL
+ AUDIT_BLOCK(head_ptr)
+#endif
+
+ U(dll_remove)(ptr_rec_ptr);
+ }
+ else
+ {
+ /* There is only one block in the bin, so it has to be removed
+ ** from the AVL tree.
+ */
+
+ head_ptr = PTR_REC_TO_HEAD(bin_front_ptr);
+
+ U(avl_remove)(
+ (U(avl_avl) *) &(desc->avl_tree_root), BLOCK_BAUS(head_ptr));
+ }
+
+ MARK_BLOCK_ALLOCATED(head_ptr)
+
+ rem_baus = BLOCK_BAUS(head_ptr) - n_baus;
+
+ if (rem_baus >= MIN_BLOCK_BAUS)
+ {
+ /* Since there are enough "extra" BAUs, split them off to form
+ ** a new free block.
+ */
+
+ head_record *rem_head_ptr =
+ (head_record *) BAUS_FORWARD(head_ptr, n_baus);
+
+ /* Change the next block's header to reflect the fact that the
+ ** block preceeding it is now smaller.
+ */
+ SET_PREV_BLOCK_BAUS(
+ BAUS_FORWARD(head_ptr, head_ptr->block_size), rem_baus)
+
+ head_ptr->block_size = n_baus;
+
+ rem_head_ptr->previous_block_size = n_baus;
+ rem_head_ptr->block_size = rem_baus;
+
+ desc->last_freed = rem_head_ptr;
+ }
+
+ return(HEAD_TO_PTR_REC(head_ptr));
+}
+
+/* Take a block out of the free collection.
+*/
+void U(out_of_free_collection)(
+ /* Descriptor of heap that block is in. */
+ U(descriptor) *desc,
+ /* Pointer to head of block to take out of free collection. */
+ head_record *head_ptr)
+{
+ ptr_record *ptr_rec_ptr = HEAD_TO_PTR_REC(head_ptr);
+
+ if (ptr_rec_ptr->self == ptr_rec_ptr)
+ /* Block is not the front block in its bin, so all we have to
+ ** do is take it out of the bin's doubly-linked list. */
+ U(dll_remove)(ptr_rec_ptr);
+ else
+ {
+ ptr_record *next = ptr_rec_ptr->next;
+
+ if (next)
+ /* Block is the front block in its bin, and there is at least
+ ** one other block in the bin. Substitute the next block for
+ ** the front block. */
+ U(avl_subst)((U(avl_avl) *) &(desc->avl_tree_root), next);
+ else
+ /* Block is the front block in its bin, but there is no other
+ ** block in the bin. Eliminate the bin. */
+ U(avl_remove)(
+ (U(avl_avl) *) &(desc->avl_tree_root), BLOCK_BAUS(head_ptr));
+ }
+}
+
+void U(free)(U(descriptor) *desc, void *payload_ptr)
+{
+ /* Flags if coalesce with adjacent block. */
+ int coalesce;
+
+ head_record *fwd_head_ptr;
+ head_record *free_head_ptr = PTR_REC_TO_HEAD(payload_ptr);
+
+ desc->num_baus_can_shrink = 0;
+
+#ifdef HMM_AUDIT_FAIL
+
+ AUDIT_BLOCK(free_head_ptr)
+
+ /* Make sure not freeing an already free block. */
+ if (!IS_BLOCK_ALLOCATED(free_head_ptr))
+ HMM_AUDIT_FAIL
+
+ if (desc->avl_tree_root)
+ /* Audit root block in AVL tree. */
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+
+#endif
+
+ fwd_head_ptr =
+ (head_record *) BAUS_FORWARD(free_head_ptr, free_head_ptr->block_size);
+
+ if (free_head_ptr->previous_block_size)
+ {
+ /* Coalesce with backward block if possible. */
+
+ head_record *bkwd_head_ptr =
+ (head_record *) BAUS_BACKWARD(
+ free_head_ptr, free_head_ptr->previous_block_size);
+
+#ifdef HMM_AUDIT_FAIL
+ AUDIT_BLOCK(bkwd_head_ptr)
+#endif
+
+ if (bkwd_head_ptr == (head_record *)(desc->last_freed))
+ {
+ desc->last_freed = 0;
+ coalesce = 1;
+ }
+ else if (IS_BLOCK_ALLOCATED(bkwd_head_ptr))
+ coalesce = 0;
+ else
+ {
+ U(out_of_free_collection)(desc, bkwd_head_ptr);
+ coalesce = 1;
+ }
+
+ if (coalesce)
+ {
+ bkwd_head_ptr->block_size += free_head_ptr->block_size;
+ SET_PREV_BLOCK_BAUS(fwd_head_ptr, BLOCK_BAUS(bkwd_head_ptr))
+ free_head_ptr = bkwd_head_ptr;
+ }
+ }
+
+ if (fwd_head_ptr->block_size == 0)
+ {
+ /* Block to be freed is last block before dummy end-of-chunk block. */
+ desc->end_of_shrinkable_chunk =
+ BAUS_FORWARD(fwd_head_ptr, DUMMY_END_BLOCK_BAUS);
+ desc->num_baus_can_shrink = BLOCK_BAUS(free_head_ptr);
+
+ if (PREV_BLOCK_BAUS(free_head_ptr) == 0)
+ /* Free block is the entire chunk, so shrinking can eliminate
+ ** entire chunk including dummy end block. */
+ desc->num_baus_can_shrink += DUMMY_END_BLOCK_BAUS;
+ }
+ else
+ {
+ /* Coalesce with forward block if possible. */
+
+#ifdef HMM_AUDIT_FAIL
+ AUDIT_BLOCK(fwd_head_ptr)
+#endif
+
+ if (fwd_head_ptr == (head_record *)(desc->last_freed))
+ {
+ desc->last_freed = 0;
+ coalesce = 1;
+ }
+ else if (IS_BLOCK_ALLOCATED(fwd_head_ptr))
+ coalesce = 0;
+ else
+ {
+ U(out_of_free_collection)(desc, fwd_head_ptr);
+ coalesce = 1;
+ }
+
+ if (coalesce)
+ {
+ free_head_ptr->block_size += fwd_head_ptr->block_size;
+
+ fwd_head_ptr =
+ (head_record *) BAUS_FORWARD(
+ fwd_head_ptr, BLOCK_BAUS(fwd_head_ptr));
+
+ SET_PREV_BLOCK_BAUS(fwd_head_ptr, BLOCK_BAUS(free_head_ptr))
+
+ if (fwd_head_ptr->block_size == 0)
+ {
+ /* Coalesced block to be freed is last block before dummy
+ ** end-of-chunk block. */
+ desc->end_of_shrinkable_chunk =
+ BAUS_FORWARD(fwd_head_ptr, DUMMY_END_BLOCK_BAUS);
+ desc->num_baus_can_shrink = BLOCK_BAUS(free_head_ptr);
+
+ if (PREV_BLOCK_BAUS(free_head_ptr) == 0)
+ /* Free block is the entire chunk, so shrinking can
+ ** eliminate entire chunk including dummy end block. */
+ desc->num_baus_can_shrink += DUMMY_END_BLOCK_BAUS;
+ }
+ }
+ }
+
+ if (desc->last_freed)
+ {
+ /* There is a last freed block, but it is not adjacent to the
+ ** block being freed by this call to free, so put the last
+ ** freed block into the free collection.
+ */
+
+#ifdef HMM_AUDIT_FAIL
+ AUDIT_BLOCK(desc->last_freed)
+#endif
+
+ U(into_free_collection)(desc, (head_record *)(desc->last_freed));
+ }
+
+ desc->last_freed = free_head_ptr;
+}
+
+void U(new_chunk)(U(descriptor) *desc, void *start, U(size_bau) n_baus)
+{
+#ifdef HMM_AUDIT_FAIL
+
+ if (desc->avl_tree_root)
+ /* Audit root block in AVL tree. */
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+#endif
+
+#undef HEAD_PTR
+#define HEAD_PTR ((head_record *) start)
+
+ /* Make the chunk one big free block followed by a dummy end block.
+ */
+
+ n_baus -= DUMMY_END_BLOCK_BAUS;
+
+ HEAD_PTR->previous_block_size = 0;
+ HEAD_PTR->block_size = n_baus;
+
+ U(into_free_collection)(desc, HEAD_PTR);
+
+ /* Set up the dummy end block. */
+ start = BAUS_FORWARD(start, n_baus);
+ HEAD_PTR->previous_block_size = n_baus;
+ HEAD_PTR->block_size = 0;
+
+#undef HEAD_PTR
+}
+
+#ifdef HMM_AUDIT_FAIL
+
+/* Function that does audit fail actions defined my preprocessor symbol,
+** and returns a dummy integer value.
+*/
+int U(audit_block_fail_dummy_return)(void)
+{
+ HMM_AUDIT_FAIL
+
+ /* Dummy return. */
+ return(0);
+}
+
+#endif
+
+/* AVL Tree instantiation. */
+
+#ifdef HMM_AUDIT_FAIL
+
+/* The AVL tree generic package passes an ACCESS of 1 when it "touches"
+** a child node for the first time during a particular operation. I use
+** this feature to audit only one time (per operation) the free blocks
+** that are tree nodes. Since the root node is not a child node, it has
+** to be audited directly.
+*/
+
+/* The pain you feel while reading these macros will not be in vain. It
+** will remove all doubt from you mind that C++ inline functions are
+** a very good thing.
+*/
+
+#define AVL_GET_LESS(H, ACCESS) \
+ (((ACCESS) ? AUDIT_BLOCK_AS_EXPR(PTR_REC_TO_HEAD(H)) : 0), (H)->self)
+#define AVL_GET_GREATER(H, ACCESS) \
+ (((ACCESS) ? AUDIT_BLOCK_AS_EXPR(PTR_REC_TO_HEAD(H)) : 0), (H)->prev)
+
+#else
+
+#define AVL_GET_LESS(H, ACCESS) ((H)->self)
+#define AVL_GET_GREATER(H, ACCESS) ((H)->prev)
+
+#endif
+
+#define AVL_SET_LESS(H, LH) (H)->self = (LH);
+#define AVL_SET_GREATER(H, GH) (H)->prev = (GH);
+
+/* high bit of high bit of
+** block_size previous_block_size balance factor
+** ----------- ------------------- --------------
+** 0 0 n/a (block allocated)
+** 0 1 1
+** 1 0 -1
+** 1 1 0
+*/
+
+#define AVL_GET_BALANCE_FACTOR(H) \
+ ((((head_record *) (PTR_REC_TO_HEAD(H)))->block_size & \
+ HIGH_BIT_BAU_SIZE) ? \
+ (((head_record *) (PTR_REC_TO_HEAD(H)))->previous_block_size & \
+ HIGH_BIT_BAU_SIZE ? 0 : -1) : 1)
+
+#define AVL_SET_BALANCE_FACTOR(H, BF) \
+ { \
+ register head_record *p = \
+ (head_record *) PTR_REC_TO_HEAD(H); \
+ register int bal_f = (BF); \
+ \
+ if (bal_f <= 0) \
+ p->block_size |= HIGH_BIT_BAU_SIZE; \
+ else \
+ p->block_size &= ~HIGH_BIT_BAU_SIZE; \
+ if (bal_f >= 0) \
+ p->previous_block_size |= HIGH_BIT_BAU_SIZE; \
+ else \
+ p->previous_block_size &= ~HIGH_BIT_BAU_SIZE; \
+ }
+
+#define COMPARE_KEY_KEY(K1, K2) ((K1) == (K2) ? 0 : ((K1) > (K2) ? 1 : -1))
+
+#define AVL_COMPARE_KEY_NODE(K, H) \
+ COMPARE_KEY_KEY(K, BLOCK_BAUS(PTR_REC_TO_HEAD(H)))
+
+#define AVL_COMPARE_NODE_NODE(H1, H2) \
+ COMPARE_KEY_KEY(BLOCK_BAUS(PTR_REC_TO_HEAD(H1)), \
+ BLOCK_BAUS(PTR_REC_TO_HEAD(H2)))
+
+#define AVL_NULL ((ptr_record *) 0)
+
+#define AVL_IMPL_MASK \
+ ( AVL_IMPL_INSERT | AVL_IMPL_SEARCH | AVL_IMPL_REMOVE | AVL_IMPL_SUBST )
+
+#include "cavl_impl.h"
diff --git a/vpx_mem/memory_manager/hmm_dflt_abort.c b/vpx_mem/memory_manager/hmm_dflt_abort.c
new file mode 100644
index 000000000..dc59f5507
--- /dev/null
+++ b/vpx_mem/memory_manager/hmm_dflt_abort.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+/* This code is in the public domain.
+** Version: 1.1 Author: Walt Karas
+*/
+
+/* The function in this file performs default actions if self-auditing
+** finds heap corruption. Don't rely on this code to handle the
+** case where HMM is being used to implement the malloc and free standard
+** library functions. Rewrite the function if necessary to avoid using
+** I/O and execution termination functions that call malloc or free.
+** In Unix, for example, you would replace the fputs calls with calls
+** to the write system call using file handle number 2.
+*/
+#include "hmm_intrnl.h"
+#include <stdio.h>
+#include <stdlib.h>
+
+static int entered = 0;
+
+/* Print abort message, file and line. Terminate execution.
+*/
+void hmm_dflt_abort(const char *file, const char *line)
+{
+ /* Avoid use of printf(), which is more likely to use heap. */
+
+ if (entered)
+
+ /* The standard I/O functions called a heap function and caused
+ ** an indirect recursive call to this function. So we'll have
+ ** to just exit without printing a message. */
+ while (1);
+
+ entered = 1;
+
+ fputs("\n_abort - Heap corruption\n" "File: ", stderr);
+ fputs(file, stderr);
+ fputs(" Line: ", stderr);
+ fputs(line, stderr);
+ fputs("\n\n", stderr);
+ fputs("hmm_dflt_abort: while(1)!!!\n", stderr);
+ fflush(stderr);
+
+ while (1);
+}
diff --git a/vpx_mem/memory_manager/hmm_grow.c b/vpx_mem/memory_manager/hmm_grow.c
new file mode 100644
index 000000000..79d75a74b
--- /dev/null
+++ b/vpx_mem/memory_manager/hmm_grow.c
@@ -0,0 +1,49 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+/* This code is in the public domain.
+** Version: 1.1 Author: Walt Karas
+*/
+
+#include "hmm_intrnl.h"
+
+void U(grow_chunk)(U(descriptor) *desc, void *end, U(size_bau) n_baus)
+{
+#undef HEAD_PTR
+#define HEAD_PTR ((head_record *) end)
+
+ end = BAUS_BACKWARD(end, DUMMY_END_BLOCK_BAUS);
+
+#ifdef HMM_AUDIT_FAIL
+
+ if (HEAD_PTR->block_size != 0)
+ /* Chunk does not have valid dummy end block. */
+ HMM_AUDIT_FAIL
+
+#endif
+
+ /* Create a new block that absorbs the old dummy end block. */
+ HEAD_PTR->block_size = n_baus;
+
+ /* Set up the new dummy end block. */
+ {
+ head_record *dummy = (head_record *) BAUS_FORWARD(end, n_baus);
+ dummy->previous_block_size = n_baus;
+ dummy->block_size = 0;
+ }
+
+ /* Simply free the new block, allowing it to coalesce with any
+ ** free block at that was the last block in the chunk prior to
+ ** growth.
+ */
+ U(free)(desc, HEAD_TO_PTR_REC(end));
+
+#undef HEAD_PTR
+}
diff --git a/vpx_mem/memory_manager/hmm_largest.c b/vpx_mem/memory_manager/hmm_largest.c
new file mode 100644
index 000000000..5ebe398e0
--- /dev/null
+++ b/vpx_mem/memory_manager/hmm_largest.c
@@ -0,0 +1,59 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+/* This code is in the public domain.
+** Version: 1.1 Author: Walt Karas
+*/
+
+#include "hmm_intrnl.h"
+
+U(size_aau) U(largest_available)(U(descriptor) *desc)
+{
+ U(size_bau) largest;
+
+ if (!(desc->avl_tree_root))
+ largest = 0;
+ else
+ {
+#ifdef HMM_AUDIT_FAIL
+ /* Audit root block in AVL tree. */
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+#endif
+
+ largest =
+ BLOCK_BAUS(
+ PTR_REC_TO_HEAD(
+ U(avl_search)(
+ (U(avl_avl) *) & (desc->avl_tree_root),
+ (U(size_bau)) ~(U(size_bau)) 0, AVL_LESS)));
+ }
+
+ if (desc->last_freed)
+ {
+ /* Size of last freed block. */
+ register U(size_bau) lf_size;
+
+#ifdef HMM_AUDIT_FAIL
+ AUDIT_BLOCK(desc->last_freed)
+#endif
+
+ lf_size = BLOCK_BAUS(desc->last_freed);
+
+ if (lf_size > largest)
+ largest = lf_size;
+ }
+
+ /* Convert largest size to AAUs and subract head size leaving payload
+ ** size.
+ */
+ return(largest ?
+ ((largest * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) - HEAD_AAUS) :
+ 0);
+}
diff --git a/vpx_mem/memory_manager/hmm_resize.c b/vpx_mem/memory_manager/hmm_resize.c
new file mode 100644
index 000000000..6e3f2f041
--- /dev/null
+++ b/vpx_mem/memory_manager/hmm_resize.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+/* This code is in the public domain.
+** Version: 1.1 Author: Walt Karas
+*/
+
+#include "hmm_intrnl.h"
+
+int U(resize)(U(descriptor) *desc, void *mem, U(size_aau) n)
+{
+ U(size_aau) i;
+ head_record *next_head_ptr;
+ head_record *head_ptr = PTR_REC_TO_HEAD(mem);
+
+ /* Flag. */
+ int next_block_free;
+
+ /* Convert n from desired block size in AAUs to BAUs. */
+ n += HEAD_AAUS;
+ n = DIV_ROUND_UP(n, HMM_BLOCK_ALIGN_UNIT);
+
+ if (n < MIN_BLOCK_BAUS)
+ n = MIN_BLOCK_BAUS;
+
+#ifdef HMM_AUDIT_FAIL
+
+ AUDIT_BLOCK(head_ptr)
+
+ if (!IS_BLOCK_ALLOCATED(head_ptr))
+ HMM_AUDIT_FAIL
+
+ if (desc->avl_tree_root)
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+
+#endif
+
+ i = head_ptr->block_size;
+
+ next_head_ptr =
+ (head_record *) BAUS_FORWARD(head_ptr, head_ptr->block_size);
+
+ next_block_free =
+ (next_head_ptr == desc->last_freed) ||
+ !IS_BLOCK_ALLOCATED(next_head_ptr);
+
+ if (next_block_free)
+ /* Block can expand into next free block. */
+ i += BLOCK_BAUS(next_head_ptr);
+
+ if (n > i)
+ /* Not enough room for block to expand. */
+ return(-1);
+
+ if (next_block_free)
+ {
+#ifdef HMM_AUDIT_FAIL
+ AUDIT_BLOCK(next_head_ptr)
+#endif
+
+ if (next_head_ptr == desc->last_freed)
+ desc->last_freed = 0;
+ else
+ U(out_of_free_collection)(desc, next_head_ptr);
+
+ next_head_ptr =
+ (head_record *) BAUS_FORWARD(head_ptr, (U(size_bau)) i);
+ }
+
+ /* Set i to number of "extra" BAUs. */
+ i -= n;
+
+ if (i < MIN_BLOCK_BAUS)
+ /* Not enough extra BAUs to be a block on their own, so just keep them
+ ** in the block being resized.
+ */
+ {
+ n += i;
+ i = n;
+ }
+ else
+ {
+ /* There are enough "leftover" BAUs in the next block to
+ ** form a remainder block. */
+
+ head_record *rem_head_ptr;
+
+ rem_head_ptr = (head_record *) BAUS_FORWARD(head_ptr, n);
+
+ rem_head_ptr->previous_block_size = (U(size_bau)) n;
+ rem_head_ptr->block_size = (U(size_bau)) i;
+
+ if (desc->last_freed)
+ {
+#ifdef HMM_AUDIT_FAIL
+ AUDIT_BLOCK(desc->last_freed)
+#endif
+
+ U(into_free_collection)(desc, (head_record *)(desc->last_freed));
+
+ desc->last_freed = 0;
+ }
+
+ desc->last_freed = rem_head_ptr;
+ }
+
+ head_ptr->block_size = (U(size_bau)) n;
+ next_head_ptr->previous_block_size = (U(size_bau)) i;
+
+ return(0);
+}
diff --git a/vpx_mem/memory_manager/hmm_shrink.c b/vpx_mem/memory_manager/hmm_shrink.c
new file mode 100644
index 000000000..5ef9b233f
--- /dev/null
+++ b/vpx_mem/memory_manager/hmm_shrink.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+/* This code is in the public domain.
+** Version: 1.1 Author: Walt Karas
+*/
+
+#include "hmm_intrnl.h"
+
+void U(shrink_chunk)(U(descriptor) *desc, U(size_bau) n_baus_to_shrink)
+{
+ head_record *dummy_end_block = (head_record *)
+ BAUS_BACKWARD(desc->end_of_shrinkable_chunk, DUMMY_END_BLOCK_BAUS);
+
+#ifdef HMM_AUDIT_FAIL
+
+ if (dummy_end_block->block_size != 0)
+ /* Chunk does not have valid dummy end block. */
+ HMM_AUDIT_FAIL
+
+#endif
+
+ if (n_baus_to_shrink)
+ {
+ head_record *last_block = (head_record *)
+ BAUS_BACKWARD(
+ dummy_end_block, dummy_end_block->previous_block_size);
+
+#ifdef HMM_AUDIT_FAIL
+ AUDIT_BLOCK(last_block)
+#endif
+
+ if (last_block == desc->last_freed)
+ {
+ U(size_bau) bs = BLOCK_BAUS(last_block);
+
+ /* Chunk will not be shrunk out of existence if
+ ** 1. There is at least one allocated block in the chunk
+ ** and the amount to shrink is exactly the size of the
+ ** last block, OR
+ ** 2. After the last block is shrunk, there will be enough
+ ** BAUs left in it to form a minimal size block. */
+ int chunk_will_survive =
+ (PREV_BLOCK_BAUS(last_block) && (n_baus_to_shrink == bs)) ||
+ (n_baus_to_shrink <= (U(size_bau))(bs - MIN_BLOCK_BAUS));
+
+ if (chunk_will_survive ||
+ (!PREV_BLOCK_BAUS(last_block) &&
+ (n_baus_to_shrink ==
+ (U(size_bau))(bs + DUMMY_END_BLOCK_BAUS))))
+ {
+ desc->last_freed = 0;
+
+ if (chunk_will_survive)
+ {
+ bs -= n_baus_to_shrink;
+
+ if (bs)
+ {
+ /* The last (non-dummy) block was not completely
+ ** eliminated by the shrink. */
+
+ last_block->block_size = bs;
+
+ /* Create new dummy end record.
+ */
+ dummy_end_block =
+ (head_record *) BAUS_FORWARD(last_block, bs);
+ dummy_end_block->previous_block_size = bs;
+ dummy_end_block->block_size = 0;
+
+#ifdef HMM_AUDIT_FAIL
+
+ if (desc->avl_tree_root)
+ AUDIT_BLOCK(PTR_REC_TO_HEAD(desc->avl_tree_root))
+#endif
+
+ U(into_free_collection)(desc, last_block);
+ }
+ else
+ {
+ /* The last (non-dummy) block was completely
+ ** eliminated by the shrink. Make its head
+ ** the new dummy end block.
+ */
+ last_block->block_size = 0;
+ last_block->previous_block_size &= ~HIGH_BIT_BAU_SIZE;
+ }
+ }
+ }
+
+#ifdef HMM_AUDIT_FAIL
+ else
+ HMM_AUDIT_FAIL
+#endif
+ }
+
+#ifdef HMM_AUDIT_FAIL
+ else
+ HMM_AUDIT_FAIL
+#endif
+ }
+}
diff --git a/vpx_mem/memory_manager/hmm_true.c b/vpx_mem/memory_manager/hmm_true.c
new file mode 100644
index 000000000..41103c89e
--- /dev/null
+++ b/vpx_mem/memory_manager/hmm_true.c
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+/* This code is in the public domain.
+** Version: 1.1 Author: Walt Karas
+*/
+
+#include "hmm_intrnl.h"
+
+U(size_aau) U(true_size)(void *payload_ptr)
+{
+ register head_record *head_ptr = PTR_REC_TO_HEAD(payload_ptr);
+
+#ifdef HMM_AUDIT_FAIL
+ AUDIT_BLOCK(head_ptr)
+#endif
+
+ /* Convert block size from BAUs to AAUs. Subtract head size, leaving
+ ** payload size.
+ */
+ return(
+ (BLOCK_BAUS(head_ptr) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT)) -
+ HEAD_AAUS);
+}
diff --git a/vpx_mem/memory_manager/include/cavl_if.h b/vpx_mem/memory_manager/include/cavl_if.h
new file mode 100644
index 000000000..e2733ef2f
--- /dev/null
+++ b/vpx_mem/memory_manager/include/cavl_if.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+/* Abstract AVL Tree Generic C Package.
+** Interface generation header file.
+**
+** This code is in the public domain. See cavl_tree.html for interface
+** documentation.
+**
+** Version: 1.5 Author: Walt Karas
+*/
+
+/* This header contains the definition of CHAR_BIT (number of bits in a
+** char). */
+#include <limits.h>
+
+#undef L_
+#undef L_EST_LONG_BIT
+#undef L_SIZE
+#undef L_SC
+#undef L_LONG_BIT
+#undef L_BIT_ARR_DEFN
+
+#ifndef AVL_SEARCH_TYPE_DEFINED_
+#define AVL_SEARCH_TYPE_DEFINED_
+
+typedef enum
+{
+ AVL_EQUAL = 1,
+ AVL_LESS = 2,
+ AVL_GREATER = 4,
+ AVL_LESS_EQUAL = AVL_EQUAL | AVL_LESS,
+ AVL_GREATER_EQUAL = AVL_EQUAL | AVL_GREATER
+}
+avl_search_type;
+
+#endif
+
+#ifdef AVL_UNIQUE
+
+#define L_ AVL_UNIQUE
+
+#else
+
+#define L_(X) X
+
+#endif
+
+/* Determine storage class for function prototypes. */
+#ifdef AVL_PRIVATE
+
+#define L_SC static
+
+#else
+
+#define L_SC extern
+
+#endif
+
+#ifdef AVL_SIZE
+
+#define L_SIZE AVL_SIZE
+
+#else
+
+#define L_SIZE unsigned long
+
+#endif
+
+typedef struct
+{
+#ifdef AVL_INSIDE_STRUCT
+
+ AVL_INSIDE_STRUCT
+
+#endif
+
+ AVL_HANDLE root;
+}
+L_(avl);
+
+/* Function prototypes. */
+
+L_SC void L_(init)(L_(avl) *tree);
+
+L_SC int L_(is_empty)(L_(avl) *tree);
+
+L_SC AVL_HANDLE L_(insert)(L_(avl) *tree, AVL_HANDLE h);
+
+L_SC AVL_HANDLE L_(search)(L_(avl) *tree, AVL_KEY k, avl_search_type st);
+
+L_SC AVL_HANDLE L_(search_least)(L_(avl) *tree);
+
+L_SC AVL_HANDLE L_(search_greatest)(L_(avl) *tree);
+
+L_SC AVL_HANDLE L_(remove)(L_(avl) *tree, AVL_KEY k);
+
+L_SC AVL_HANDLE L_(subst)(L_(avl) *tree, AVL_HANDLE new_node);
+
+#ifdef AVL_BUILD_ITER_TYPE
+
+L_SC int L_(build)(
+ L_(avl) *tree, AVL_BUILD_ITER_TYPE p, L_SIZE num_nodes);
+
+#endif
+
+/* ANSI C/ISO C++ require that a long have at least 32 bits. Set
+** L_EST_LONG_BIT to be the greatest multiple of 8 in the range
+** 32 - 64 (inclusive) that is less than or equal to the number of
+** bits in a long.
+*/
+
+#if (((LONG_MAX >> 31) >> 7) == 0)
+
+#define L_EST_LONG_BIT 32
+
+#elif (((LONG_MAX >> 31) >> 15) == 0)
+
+#define L_EST_LONG_BIT 40
+
+#elif (((LONG_MAX >> 31) >> 23) == 0)
+
+#define L_EST_LONG_BIT 48
+
+#elif (((LONG_MAX >> 31) >> 31) == 0)
+
+#define L_EST_LONG_BIT 56
+
+#else
+
+#define L_EST_LONG_BIT 64
+
+#endif
+
+/* Number of bits in a long. */
+#define L_LONG_BIT (sizeof(long) * CHAR_BIT)
+
+/* The macro L_BIT_ARR_DEFN defines a bit array whose index is a (0-based)
+** node depth. The definition depends on whether the maximum depth is more
+** or less than the number of bits in a single long.
+*/
+
+#if ((AVL_MAX_DEPTH) > L_EST_LONG_BIT)
+
+/* Maximum depth may be more than number of bits in a long. */
+
+#define L_BIT_ARR_DEFN(NAME) \
+ unsigned long NAME[((AVL_MAX_DEPTH) + L_LONG_BIT - 1) / L_LONG_BIT];
+
+#else
+
+/* Maximum depth is definitely less than number of bits in a long. */
+
+#define L_BIT_ARR_DEFN(NAME) unsigned long NAME;
+
+#endif
+
+/* Iterator structure. */
+typedef struct
+{
+ /* Tree being iterated over. */
+ L_(avl) *tree_;
+
+ /* Records a path into the tree. If bit n is true, indicates
+ ** take greater branch from the nth node in the path, otherwise
+ ** take the less branch. bit 0 gives branch from root, and
+ ** so on. */
+ L_BIT_ARR_DEFN(branch)
+
+ /* Zero-based depth of path into tree. */
+ unsigned depth;
+
+ /* Handles of nodes in path from root to current node (returned by *). */
+ AVL_HANDLE path_h[(AVL_MAX_DEPTH) - 1];
+}
+L_(iter);
+
+/* Iterator function prototypes. */
+
+L_SC void L_(start_iter)(
+ L_(avl) *tree, L_(iter) *iter, AVL_KEY k, avl_search_type st);
+
+L_SC void L_(start_iter_least)(L_(avl) *tree, L_(iter) *iter);
+
+L_SC void L_(start_iter_greatest)(L_(avl) *tree, L_(iter) *iter);
+
+L_SC AVL_HANDLE L_(get_iter)(L_(iter) *iter);
+
+L_SC void L_(incr_iter)(L_(iter) *iter);
+
+L_SC void L_(decr_iter)(L_(iter) *iter);
+
+L_SC void L_(init_iter)(L_(iter) *iter);
+
+#define AVL_IMPL_INIT 1
+#define AVL_IMPL_IS_EMPTY (1 << 1)
+#define AVL_IMPL_INSERT (1 << 2)
+#define AVL_IMPL_SEARCH (1 << 3)
+#define AVL_IMPL_SEARCH_LEAST (1 << 4)
+#define AVL_IMPL_SEARCH_GREATEST (1 << 5)
+#define AVL_IMPL_REMOVE (1 << 6)
+#define AVL_IMPL_BUILD (1 << 7)
+#define AVL_IMPL_START_ITER (1 << 8)
+#define AVL_IMPL_START_ITER_LEAST (1 << 9)
+#define AVL_IMPL_START_ITER_GREATEST (1 << 10)
+#define AVL_IMPL_GET_ITER (1 << 11)
+#define AVL_IMPL_INCR_ITER (1 << 12)
+#define AVL_IMPL_DECR_ITER (1 << 13)
+#define AVL_IMPL_INIT_ITER (1 << 14)
+#define AVL_IMPL_SUBST (1 << 15)
+
+#define AVL_IMPL_ALL (~0)
+
+#undef L_
+#undef L_EST_LONG_BIT
+#undef L_SIZE
+#undef L_SC
+#undef L_LONG_BIT
+#undef L_BIT_ARR_DEFN
diff --git a/vpx_mem/memory_manager/include/cavl_impl.h b/vpx_mem/memory_manager/include/cavl_impl.h
new file mode 100644
index 000000000..267bc7312
--- /dev/null
+++ b/vpx_mem/memory_manager/include/cavl_impl.h
@@ -0,0 +1,1266 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+/* Abstract AVL Tree Generic C Package.
+** Implementation generation header file.
+**
+** This code is in the public domain. See cavl_tree.html for interface
+** documentation.
+**
+** Version: 1.5 Author: Walt Karas
+*/
+
+#undef L_
+#undef L_EST_LONG_BIT
+#undef L_SIZE
+#undef l_tree
+#undef L_MASK_HIGH_BIT
+#undef L_LONG_BIT
+#undef L_BIT_ARR_DEFN
+#undef L_BIT_ARR_VAL
+#undef L_BIT_ARR_0
+#undef L_BIT_ARR_1
+#undef L_BIT_ARR_ALL
+#undef L_BIT_ARR_LONGS
+#undef L_IMPL_MASK
+#undef L_CHECK_READ_ERROR
+#undef L_CHECK_READ_ERROR_INV_DEPTH
+#undef L_SC
+#undef L_BALANCE_PARAM_PREFIX
+
+#ifdef AVL_UNIQUE
+
+#define L_ AVL_UNIQUE
+
+#else
+
+#define L_(X) X
+
+#endif
+
+/* Determine correct storage class for functions */
+#ifdef AVL_PRIVATE
+
+#define L_SC static
+
+#else
+
+#define L_SC
+
+#endif
+
+#ifdef AVL_SIZE
+
+#define L_SIZE AVL_SIZE
+
+#else
+
+#define L_SIZE unsigned long
+
+#endif
+
+#define L_MASK_HIGH_BIT ((int) ~ ((~ (unsigned) 0) >> 1))
+
+/* ANSI C/ISO C++ require that a long have at least 32 bits. Set
+** L_EST_LONG_BIT to be the greatest multiple of 8 in the range
+** 32 - 64 (inclusive) that is less than or equal to the number of
+** bits in a long.
+*/
+
+#if (((LONG_MAX >> 31) >> 7) == 0)
+
+#define L_EST_LONG_BIT 32
+
+#elif (((LONG_MAX >> 31) >> 15) == 0)
+
+#define L_EST_LONG_BIT 40
+
+#elif (((LONG_MAX >> 31) >> 23) == 0)
+
+#define L_EST_LONG_BIT 48
+
+#elif (((LONG_MAX >> 31) >> 31) == 0)
+
+#define L_EST_LONG_BIT 56
+
+#else
+
+#define L_EST_LONG_BIT 64
+
+#endif
+
+#define L_LONG_BIT (sizeof(long) * CHAR_BIT)
+
+#if ((AVL_MAX_DEPTH) > L_EST_LONG_BIT)
+
+/* The maximum depth may be greater than the number of bits in a long,
+** so multiple longs are needed to hold a bit array indexed by node
+** depth. */
+
+#define L_BIT_ARR_LONGS (((AVL_MAX_DEPTH) + L_LONG_BIT - 1) / L_LONG_BIT)
+
+#define L_BIT_ARR_DEFN(NAME) unsigned long NAME[L_BIT_ARR_LONGS];
+
+#define L_BIT_ARR_VAL(BIT_ARR, BIT_NUM) \
+ ((BIT_ARR)[(BIT_NUM) / L_LONG_BIT] & (1L << ((BIT_NUM) % L_LONG_BIT)))
+
+#define L_BIT_ARR_0(BIT_ARR, BIT_NUM) \
+ (BIT_ARR)[(BIT_NUM) / L_LONG_BIT] &= ~(1L << ((BIT_NUM) % L_LONG_BIT));
+
+#define L_BIT_ARR_1(BIT_ARR, BIT_NUM) \
+ (BIT_ARR)[(BIT_NUM) / L_LONG_BIT] |= 1L << ((BIT_NUM) % L_LONG_BIT);
+
+#define L_BIT_ARR_ALL(BIT_ARR, BIT_VAL) \
+ { int i = L_BIT_ARR_LONGS; do (BIT_ARR)[--i] = 0L - (BIT_VAL); while(i); }
+
+#else /* The bit array can definitely fit in one long */
+
+#define L_BIT_ARR_DEFN(NAME) unsigned long NAME;
+
+#define L_BIT_ARR_VAL(BIT_ARR, BIT_NUM) ((BIT_ARR) & (1L << (BIT_NUM)))
+
+#define L_BIT_ARR_0(BIT_ARR, BIT_NUM) (BIT_ARR) &= ~(1L << (BIT_NUM));
+
+#define L_BIT_ARR_1(BIT_ARR, BIT_NUM) (BIT_ARR) |= 1L << (BIT_NUM);
+
+#define L_BIT_ARR_ALL(BIT_ARR, BIT_VAL) (BIT_ARR) = 0L - (BIT_VAL);
+
+#endif
+
+#ifdef AVL_READ_ERRORS_HAPPEN
+
+#define L_CHECK_READ_ERROR(ERROR_RETURN) \
+ { if (AVL_READ_ERROR) return(ERROR_RETURN); }
+
+#else
+
+#define L_CHECK_READ_ERROR(ERROR_RETURN)
+
+#endif
+
+/* The presumed reason that an instantiation places additional fields
+** inside the AVL tree structure is that the SET_ and GET_ macros
+** need these fields. The "balance" function does not explicitly use
+** any fields in the AVL tree structure, so only pass an AVL tree
+** structure pointer to "balance" if it has instantiation-specific
+** fields that are (presumably) needed by the SET_/GET_ calls within
+** "balance".
+*/
+#ifdef AVL_INSIDE_STRUCT
+
+#define L_BALANCE_PARAM_CALL_PREFIX l_tree,
+#define L_BALANCE_PARAM_DECL_PREFIX L_(avl) *l_tree,
+
+#else
+
+#define L_BALANCE_PARAM_CALL_PREFIX
+#define L_BALANCE_PARAM_DECL_PREFIX
+
+#endif
+
+#ifdef AVL_IMPL_MASK
+
+#define L_IMPL_MASK (AVL_IMPL_MASK)
+
+#else
+
+/* Define all functions. */
+#define L_IMPL_MASK AVL_IMPL_ALL
+
+#endif
+
+#if (L_IMPL_MASK & AVL_IMPL_INIT)
+
+L_SC void L_(init)(L_(avl) *l_tree)
+{
+ l_tree->root = AVL_NULL;
+}
+
+#endif
+
+#if (L_IMPL_MASK & AVL_IMPL_IS_EMPTY)
+
+L_SC int L_(is_empty)(L_(avl) *l_tree)
+{
+ return(l_tree->root == AVL_NULL);
+}
+
+#endif
+
+/* Put the private balance function in the same compilation module as
+** the insert function. */
+#if (L_IMPL_MASK & AVL_IMPL_INSERT)
+
+/* Balances subtree, returns handle of root node of subtree after balancing.
+*/
+L_SC AVL_HANDLE L_(balance)(L_BALANCE_PARAM_DECL_PREFIX AVL_HANDLE bal_h)
+{
+ AVL_HANDLE deep_h;
+
+ /* Either the "greater than" or the "less than" subtree of
+ ** this node has to be 2 levels deeper (or else it wouldn't
+ ** need balancing).
+ */
+ if (AVL_GET_BALANCE_FACTOR(bal_h) > 0)
+ {
+ /* "Greater than" subtree is deeper. */
+
+ deep_h = AVL_GET_GREATER(bal_h, 1);
+
+ L_CHECK_READ_ERROR(AVL_NULL)
+
+ if (AVL_GET_BALANCE_FACTOR(deep_h) < 0)
+ {
+ int bf;
+
+ AVL_HANDLE old_h = bal_h;
+ bal_h = AVL_GET_LESS(deep_h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ AVL_SET_GREATER(old_h, AVL_GET_LESS(bal_h, 1))
+ AVL_SET_LESS(deep_h, AVL_GET_GREATER(bal_h, 1))
+ AVL_SET_LESS(bal_h, old_h)
+ AVL_SET_GREATER(bal_h, deep_h)
+
+ bf = AVL_GET_BALANCE_FACTOR(bal_h);
+
+ if (bf != 0)
+ {
+ if (bf > 0)
+ {
+ AVL_SET_BALANCE_FACTOR(old_h, -1)
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ }
+ else
+ {
+ AVL_SET_BALANCE_FACTOR(deep_h, 1)
+ AVL_SET_BALANCE_FACTOR(old_h, 0)
+ }
+
+ AVL_SET_BALANCE_FACTOR(bal_h, 0)
+ }
+ else
+ {
+ AVL_SET_BALANCE_FACTOR(old_h, 0)
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ }
+ }
+ else
+ {
+ AVL_SET_GREATER(bal_h, AVL_GET_LESS(deep_h, 0))
+ AVL_SET_LESS(deep_h, bal_h)
+
+ if (AVL_GET_BALANCE_FACTOR(deep_h) == 0)
+ {
+ AVL_SET_BALANCE_FACTOR(deep_h, -1)
+ AVL_SET_BALANCE_FACTOR(bal_h, 1)
+ }
+ else
+ {
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ AVL_SET_BALANCE_FACTOR(bal_h, 0)
+ }
+
+ bal_h = deep_h;
+ }
+ }
+ else
+ {
+ /* "Less than" subtree is deeper. */
+
+ deep_h = AVL_GET_LESS(bal_h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+
+ if (AVL_GET_BALANCE_FACTOR(deep_h) > 0)
+ {
+ int bf;
+ AVL_HANDLE old_h = bal_h;
+ bal_h = AVL_GET_GREATER(deep_h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ AVL_SET_LESS(old_h, AVL_GET_GREATER(bal_h, 0))
+ AVL_SET_GREATER(deep_h, AVL_GET_LESS(bal_h, 0))
+ AVL_SET_GREATER(bal_h, old_h)
+ AVL_SET_LESS(bal_h, deep_h)
+
+ bf = AVL_GET_BALANCE_FACTOR(bal_h);
+
+ if (bf != 0)
+ {
+ if (bf < 0)
+ {
+ AVL_SET_BALANCE_FACTOR(old_h, 1)
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ }
+ else
+ {
+ AVL_SET_BALANCE_FACTOR(deep_h, -1)
+ AVL_SET_BALANCE_FACTOR(old_h, 0)
+ }
+
+ AVL_SET_BALANCE_FACTOR(bal_h, 0)
+ }
+ else
+ {
+ AVL_SET_BALANCE_FACTOR(old_h, 0)
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ }
+ }
+ else
+ {
+ AVL_SET_LESS(bal_h, AVL_GET_GREATER(deep_h, 0))
+ AVL_SET_GREATER(deep_h, bal_h)
+
+ if (AVL_GET_BALANCE_FACTOR(deep_h) == 0)
+ {
+ AVL_SET_BALANCE_FACTOR(deep_h, 1)
+ AVL_SET_BALANCE_FACTOR(bal_h, -1)
+ }
+ else
+ {
+ AVL_SET_BALANCE_FACTOR(deep_h, 0)
+ AVL_SET_BALANCE_FACTOR(bal_h, 0)
+ }
+
+ bal_h = deep_h;
+ }
+ }
+
+ return(bal_h);
+}
+
+L_SC AVL_HANDLE L_(insert)(L_(avl) *l_tree, AVL_HANDLE h)
+{
+ AVL_SET_LESS(h, AVL_NULL)
+ AVL_SET_GREATER(h, AVL_NULL)
+ AVL_SET_BALANCE_FACTOR(h, 0)
+
+ if (l_tree->root == AVL_NULL)
+ l_tree->root = h;
+ else
+ {
+ /* Last unbalanced node encountered in search for insertion point. */
+ AVL_HANDLE unbal = AVL_NULL;
+ /* Parent of last unbalanced node. */
+ AVL_HANDLE parent_unbal = AVL_NULL;
+ /* Balance factor of last unbalanced node. */
+ int unbal_bf;
+
+ /* Zero-based depth in tree. */
+ unsigned depth = 0, unbal_depth = 0;
+
+ /* Records a path into the tree. If bit n is true, indicates
+ ** take greater branch from the nth node in the path, otherwise
+ ** take the less branch. bit 0 gives branch from root, and
+ ** so on. */
+ L_BIT_ARR_DEFN(branch)
+
+ AVL_HANDLE hh = l_tree->root;
+ AVL_HANDLE parent = AVL_NULL;
+ int cmp;
+
+ do
+ {
+ if (AVL_GET_BALANCE_FACTOR(hh) != 0)
+ {
+ unbal = hh;
+ parent_unbal = parent;
+ unbal_depth = depth;
+ }
+
+ cmp = AVL_COMPARE_NODE_NODE(h, hh);
+
+ if (cmp == 0)
+ /* Duplicate key. */
+ return(hh);
+
+ parent = hh;
+
+ if (cmp > 0)
+ {
+ hh = AVL_GET_GREATER(hh, 1);
+ L_BIT_ARR_1(branch, depth)
+ }
+ else
+ {
+ hh = AVL_GET_LESS(hh, 1);
+ L_BIT_ARR_0(branch, depth)
+ }
+
+ L_CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+ }
+ while (hh != AVL_NULL);
+
+ /* Add node to insert as leaf of tree. */
+ if (cmp < 0)
+ AVL_SET_LESS(parent, h)
+ else
+ AVL_SET_GREATER(parent, h)
+
+ depth = unbal_depth;
+
+ if (unbal == AVL_NULL)
+ hh = l_tree->root;
+ else
+ {
+ cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
+ depth++;
+ unbal_bf = AVL_GET_BALANCE_FACTOR(unbal);
+
+ if (cmp < 0)
+ unbal_bf--;
+ else /* cmp > 0 */
+ unbal_bf++;
+
+ hh = cmp < 0 ? AVL_GET_LESS(unbal, 1) : AVL_GET_GREATER(unbal, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+
+ if ((unbal_bf != -2) && (unbal_bf != 2))
+ {
+ /* No rebalancing of tree is necessary. */
+ AVL_SET_BALANCE_FACTOR(unbal, unbal_bf)
+ unbal = AVL_NULL;
+ }
+ }
+
+ if (hh != AVL_NULL)
+ while (h != hh)
+ {
+ cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
+ depth++;
+
+ if (cmp < 0)
+ {
+ AVL_SET_BALANCE_FACTOR(hh, -1)
+ hh = AVL_GET_LESS(hh, 1);
+ }
+ else /* cmp > 0 */
+ {
+ AVL_SET_BALANCE_FACTOR(hh, 1)
+ hh = AVL_GET_GREATER(hh, 1);
+ }
+
+ L_CHECK_READ_ERROR(AVL_NULL)
+ }
+
+ if (unbal != AVL_NULL)
+ {
+ unbal = L_(balance)(L_BALANCE_PARAM_CALL_PREFIX unbal);
+ L_CHECK_READ_ERROR(AVL_NULL)
+
+ if (parent_unbal == AVL_NULL)
+ l_tree->root = unbal;
+ else
+ {
+ depth = unbal_depth - 1;
+ cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
+
+ if (cmp < 0)
+ AVL_SET_LESS(parent_unbal, unbal)
+ else /* cmp > 0 */
+ AVL_SET_GREATER(parent_unbal, unbal)
+ }
+ }
+
+ }
+
+ return(h);
+}
+
+#endif
+
+#if (L_IMPL_MASK & AVL_IMPL_SEARCH)
+
+L_SC AVL_HANDLE L_(search)(L_(avl) *l_tree, AVL_KEY k, avl_search_type st)
+{
+ int cmp, target_cmp;
+ AVL_HANDLE match_h = AVL_NULL;
+ AVL_HANDLE h = l_tree->root;
+
+ if (st & AVL_LESS)
+ target_cmp = 1;
+ else if (st & AVL_GREATER)
+ target_cmp = -1;
+ else
+ target_cmp = 0;
+
+ while (h != AVL_NULL)
+ {
+ cmp = AVL_COMPARE_KEY_NODE(k, h);
+
+ if (cmp == 0)
+ {
+ if (st & AVL_EQUAL)
+ {
+ match_h = h;
+ break;
+ }
+
+ cmp = -target_cmp;
+ }
+ else if (target_cmp != 0)
+ if (!((cmp ^ target_cmp) & L_MASK_HIGH_BIT))
+ /* cmp and target_cmp are both positive or both negative. */
+ match_h = h;
+
+ h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ }
+
+ return(match_h);
+}
+
+#endif
+
+#if (L_IMPL_MASK & AVL_IMPL_SEARCH_LEAST)
+
+L_SC AVL_HANDLE L_(search_least)(L_(avl) *l_tree)
+{
+ AVL_HANDLE h = l_tree->root;
+ AVL_HANDLE parent = AVL_NULL;
+
+ while (h != AVL_NULL)
+ {
+ parent = h;
+ h = AVL_GET_LESS(h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ }
+
+ return(parent);
+}
+
+#endif
+
+#if (L_IMPL_MASK & AVL_IMPL_SEARCH_GREATEST)
+
+L_SC AVL_HANDLE L_(search_greatest)(L_(avl) *l_tree)
+{
+ AVL_HANDLE h = l_tree->root;
+ AVL_HANDLE parent = AVL_NULL;
+
+ while (h != AVL_NULL)
+ {
+ parent = h;
+ h = AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ }
+
+ return(parent);
+}
+
+#endif
+
+#if (L_IMPL_MASK & AVL_IMPL_REMOVE)
+
+/* Prototype of balance function (called by remove) in case not in
+** same compilation unit.
+*/
+L_SC AVL_HANDLE L_(balance)(L_BALANCE_PARAM_DECL_PREFIX AVL_HANDLE bal_h);
+
+L_SC AVL_HANDLE L_(remove)(L_(avl) *l_tree, AVL_KEY k)
+{
+ /* Zero-based depth in tree. */
+ unsigned depth = 0, rm_depth;
+
+ /* Records a path into the tree. If bit n is true, indicates
+ ** take greater branch from the nth node in the path, otherwise
+ ** take the less branch. bit 0 gives branch from root, and
+ ** so on. */
+ L_BIT_ARR_DEFN(branch)
+
+ AVL_HANDLE h = l_tree->root;
+ AVL_HANDLE parent = AVL_NULL;
+ AVL_HANDLE child;
+ AVL_HANDLE path;
+ int cmp, cmp_shortened_sub_with_path;
+ int reduced_depth;
+ int bf;
+ AVL_HANDLE rm;
+ AVL_HANDLE parent_rm;
+
+ for (; ;)
+ {
+ if (h == AVL_NULL)
+ /* No node in tree with given key. */
+ return(AVL_NULL);
+
+ cmp = AVL_COMPARE_KEY_NODE(k, h);
+
+ if (cmp == 0)
+ /* Found node to remove. */
+ break;
+
+ parent = h;
+
+ if (cmp > 0)
+ {
+ h = AVL_GET_GREATER(h, 1);
+ L_BIT_ARR_1(branch, depth)
+ }
+ else
+ {
+ h = AVL_GET_LESS(h, 1);
+ L_BIT_ARR_0(branch, depth)
+ }
+
+ L_CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+ cmp_shortened_sub_with_path = cmp;
+ }
+
+ rm = h;
+ parent_rm = parent;
+ rm_depth = depth;
+
+ /* If the node to remove is not a leaf node, we need to get a
+ ** leaf node, or a node with a single leaf as its child, to put
+ ** in the place of the node to remove. We will get the greatest
+ ** node in the less subtree (of the node to remove), or the least
+ ** node in the greater subtree. We take the leaf node from the
+ ** deeper subtree, if there is one. */
+
+ if (AVL_GET_BALANCE_FACTOR(h) < 0)
+ {
+ child = AVL_GET_LESS(h, 1);
+ L_BIT_ARR_0(branch, depth)
+ cmp = -1;
+ }
+ else
+ {
+ child = AVL_GET_GREATER(h, 1);
+ L_BIT_ARR_1(branch, depth)
+ cmp = 1;
+ }
+
+ L_CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+
+ if (child != AVL_NULL)
+ {
+ cmp = -cmp;
+
+ do
+ {
+ parent = h;
+ h = child;
+
+ if (cmp < 0)
+ {
+ child = AVL_GET_LESS(h, 1);
+ L_BIT_ARR_0(branch, depth)
+ }
+ else
+ {
+ child = AVL_GET_GREATER(h, 1);
+ L_BIT_ARR_1(branch, depth)
+ }
+
+ L_CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+ }
+ while (child != AVL_NULL);
+
+ if (parent == rm)
+ /* Only went through do loop once. Deleted node will be replaced
+ ** in the tree structure by one of its immediate children. */
+ cmp_shortened_sub_with_path = -cmp;
+ else
+ cmp_shortened_sub_with_path = cmp;
+
+ /* Get the handle of the opposite child, which may not be null. */
+ child = cmp > 0 ? AVL_GET_LESS(h, 0) : AVL_GET_GREATER(h, 0);
+ }
+
+ if (parent == AVL_NULL)
+ /* There were only 1 or 2 nodes in this tree. */
+ l_tree->root = child;
+ else if (cmp_shortened_sub_with_path < 0)
+ AVL_SET_LESS(parent, child)
+ else
+ AVL_SET_GREATER(parent, child)
+
+ /* "path" is the parent of the subtree being eliminated or reduced
+ ** from a depth of 2 to 1. If "path" is the node to be removed, we
+ ** set path to the node we're about to poke into the position of the
+ ** node to be removed. */
+ path = parent == rm ? h : parent;
+
+ if (h != rm)
+ {
+ /* Poke in the replacement for the node to be removed. */
+ AVL_SET_LESS(h, AVL_GET_LESS(rm, 0))
+ AVL_SET_GREATER(h, AVL_GET_GREATER(rm, 0))
+ AVL_SET_BALANCE_FACTOR(h, AVL_GET_BALANCE_FACTOR(rm))
+
+ if (parent_rm == AVL_NULL)
+ l_tree->root = h;
+ else
+ {
+ depth = rm_depth - 1;
+
+ if (L_BIT_ARR_VAL(branch, depth))
+ AVL_SET_GREATER(parent_rm, h)
+ else
+ AVL_SET_LESS(parent_rm, h)
+ }
+ }
+
+ if (path != AVL_NULL)
+ {
+ /* Create a temporary linked list from the parent of the path node
+ ** to the root node. */
+ h = l_tree->root;
+ parent = AVL_NULL;
+ depth = 0;
+
+ while (h != path)
+ {
+ if (L_BIT_ARR_VAL(branch, depth))
+ {
+ child = AVL_GET_GREATER(h, 1);
+ AVL_SET_GREATER(h, parent)
+ }
+ else
+ {
+ child = AVL_GET_LESS(h, 1);
+ AVL_SET_LESS(h, parent)
+ }
+
+ L_CHECK_READ_ERROR(AVL_NULL)
+ depth++;
+ parent = h;
+ h = child;
+ }
+
+ /* Climb from the path node to the root node using the linked
+ ** list, restoring the tree structure and rebalancing as necessary.
+ */
+ reduced_depth = 1;
+ cmp = cmp_shortened_sub_with_path;
+
+ for (; ;)
+ {
+ if (reduced_depth)
+ {
+ bf = AVL_GET_BALANCE_FACTOR(h);
+
+ if (cmp < 0)
+ bf++;
+ else /* cmp > 0 */
+ bf--;
+
+ if ((bf == -2) || (bf == 2))
+ {
+ h = L_(balance)(L_BALANCE_PARAM_CALL_PREFIX h);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ bf = AVL_GET_BALANCE_FACTOR(h);
+ }
+ else
+ AVL_SET_BALANCE_FACTOR(h, bf)
+ reduced_depth = (bf == 0);
+ }
+
+ if (parent == AVL_NULL)
+ break;
+
+ child = h;
+ h = parent;
+ depth--;
+ cmp = L_BIT_ARR_VAL(branch, depth) ? 1 : -1;
+
+ if (cmp < 0)
+ {
+ parent = AVL_GET_LESS(h, 1);
+ AVL_SET_LESS(h, child)
+ }
+ else
+ {
+ parent = AVL_GET_GREATER(h, 1);
+ AVL_SET_GREATER(h, child)
+ }
+
+ L_CHECK_READ_ERROR(AVL_NULL)
+ }
+
+ l_tree->root = h;
+ }
+
+ return(rm);
+}
+
+#endif
+
+#if (L_IMPL_MASK & AVL_IMPL_SUBST)
+
+L_SC AVL_HANDLE L_(subst)(L_(avl) *l_tree, AVL_HANDLE new_node)
+{
+ AVL_HANDLE h = l_tree->root;
+ AVL_HANDLE parent = AVL_NULL;
+ int cmp, last_cmp;
+
+ /* Search for node already in tree with same key. */
+ for (; ;)
+ {
+ if (h == AVL_NULL)
+ /* No node in tree with same key as new node. */
+ return(AVL_NULL);
+
+ cmp = AVL_COMPARE_NODE_NODE(new_node, h);
+
+ if (cmp == 0)
+ /* Found the node to substitute new one for. */
+ break;
+
+ last_cmp = cmp;
+ parent = h;
+ h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR(AVL_NULL)
+ }
+
+ /* Copy tree housekeeping fields from node in tree to new node. */
+ AVL_SET_LESS(new_node, AVL_GET_LESS(h, 0))
+ AVL_SET_GREATER(new_node, AVL_GET_GREATER(h, 0))
+ AVL_SET_BALANCE_FACTOR(new_node, AVL_GET_BALANCE_FACTOR(h))
+
+ if (parent == AVL_NULL)
+ /* New node is also new root. */
+ l_tree->root = new_node;
+ else
+ {
+ /* Make parent point to new node. */
+ if (last_cmp < 0)
+ AVL_SET_LESS(parent, new_node)
+ else
+ AVL_SET_GREATER(parent, new_node)
+ }
+
+ return(h);
+}
+
+#endif
+
+#ifdef AVL_BUILD_ITER_TYPE
+
+#if (L_IMPL_MASK & AVL_IMPL_BUILD)
+
+L_SC int L_(build)(
+ L_(avl) *l_tree, AVL_BUILD_ITER_TYPE p, L_SIZE num_nodes)
+{
+ /* Gives path to subtree being built. If bit n is false, branch
+ ** less from the node at depth n, if true branch greater. */
+ L_BIT_ARR_DEFN(branch)
+
+ /* If bit n is true, then for the current subtree at depth n, its
+ ** greater subtree has one more node than its less subtree. */
+ L_BIT_ARR_DEFN(rem)
+
+ /* Depth of root node of current subtree. */
+ unsigned depth = 0;
+
+ /* Number of nodes in current subtree. */
+ L_SIZE num_sub = num_nodes;
+
+ /* The algorithm relies on a stack of nodes whose less subtree has
+ ** been built, but whose greater subtree has not yet been built.
+ ** The stack is implemented as linked list. The nodes are linked
+ ** together by having the "greater" handle of a node set to the
+ ** next node in the list. "less_parent" is the handle of the first
+ ** node in the list. */
+ AVL_HANDLE less_parent = AVL_NULL;
+
+ /* h is root of current subtree, child is one of its children. */
+ AVL_HANDLE h;
+ AVL_HANDLE child;
+
+ if (num_nodes == 0)
+ {
+ l_tree->root = AVL_NULL;
+ return(1);
+ }
+
+ for (; ;)
+ {
+ while (num_sub > 2)
+ {
+ /* Subtract one for root of subtree. */
+ num_sub--;
+
+ if (num_sub & 1)
+ L_BIT_ARR_1(rem, depth)
+ else
+ L_BIT_ARR_0(rem, depth)
+ L_BIT_ARR_0(branch, depth)
+ depth++;
+
+ num_sub >>= 1;
+ }
+
+ if (num_sub == 2)
+ {
+ /* Build a subtree with two nodes, slanting to greater.
+ ** I arbitrarily chose to always have the extra node in the
+ ** greater subtree when there is an odd number of nodes to
+ ** split between the two subtrees. */
+
+ h = AVL_BUILD_ITER_VAL(p);
+ L_CHECK_READ_ERROR(0)
+ AVL_BUILD_ITER_INCR(p)
+ child = AVL_BUILD_ITER_VAL(p);
+ L_CHECK_READ_ERROR(0)
+ AVL_BUILD_ITER_INCR(p)
+ AVL_SET_LESS(child, AVL_NULL)
+ AVL_SET_GREATER(child, AVL_NULL)
+ AVL_SET_BALANCE_FACTOR(child, 0)
+ AVL_SET_GREATER(h, child)
+ AVL_SET_LESS(h, AVL_NULL)
+ AVL_SET_BALANCE_FACTOR(h, 1)
+ }
+ else /* num_sub == 1 */
+ {
+ /* Build a subtree with one node. */
+
+ h = AVL_BUILD_ITER_VAL(p);
+ L_CHECK_READ_ERROR(0)
+ AVL_BUILD_ITER_INCR(p)
+ AVL_SET_LESS(h, AVL_NULL)
+ AVL_SET_GREATER(h, AVL_NULL)
+ AVL_SET_BALANCE_FACTOR(h, 0)
+ }
+
+ while (depth)
+ {
+ depth--;
+
+ if (!L_BIT_ARR_VAL(branch, depth))
+ /* We've completed a less subtree. */
+ break;
+
+ /* We've completed a greater subtree, so attach it to
+ ** its parent (that is less than it). We pop the parent
+ ** off the stack of less parents. */
+ child = h;
+ h = less_parent;
+ less_parent = AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR(0)
+ AVL_SET_GREATER(h, child)
+ /* num_sub = 2 * (num_sub - rem[depth]) + rem[depth] + 1 */
+ num_sub <<= 1;
+ num_sub += L_BIT_ARR_VAL(rem, depth) ? 0 : 1;
+
+ if (num_sub & (num_sub - 1))
+ /* num_sub is not a power of 2. */
+ AVL_SET_BALANCE_FACTOR(h, 0)
+ else
+ /* num_sub is a power of 2. */
+ AVL_SET_BALANCE_FACTOR(h, 1)
+ }
+
+ if (num_sub == num_nodes)
+ /* We've completed the full tree. */
+ break;
+
+ /* The subtree we've completed is the less subtree of the
+ ** next node in the sequence. */
+
+ child = h;
+ h = AVL_BUILD_ITER_VAL(p);
+ L_CHECK_READ_ERROR(0)
+ AVL_BUILD_ITER_INCR(p)
+ AVL_SET_LESS(h, child)
+
+ /* Put h into stack of less parents. */
+ AVL_SET_GREATER(h, less_parent)
+ less_parent = h;
+
+ /* Proceed to creating greater than subtree of h. */
+ L_BIT_ARR_1(branch, depth)
+ num_sub += L_BIT_ARR_VAL(rem, depth) ? 1 : 0;
+ depth++;
+
+ } /* end for ( ; ; ) */
+
+ l_tree->root = h;
+
+ return(1);
+}
+
+#endif
+
+#endif
+
+#if (L_IMPL_MASK & AVL_IMPL_INIT_ITER)
+
+/* Initialize depth to invalid value, to indicate iterator is
+** invalid. (Depth is zero-base.) It's not necessary to initialize
+** iterators prior to passing them to the "start" function.
+*/
+L_SC void L_(init_iter)(L_(iter) *iter)
+{
+ iter->depth = ~0;
+}
+
+#endif
+
+#ifdef AVL_READ_ERRORS_HAPPEN
+
+#define L_CHECK_READ_ERROR_INV_DEPTH \
+ { if (AVL_READ_ERROR) { iter->depth = ~0; return; } }
+
+#else
+
+#define L_CHECK_READ_ERROR_INV_DEPTH
+
+#endif
+
+#if (L_IMPL_MASK & AVL_IMPL_START_ITER)
+
+L_SC void L_(start_iter)(
+ L_(avl) *l_tree, L_(iter) *iter, AVL_KEY k, avl_search_type st)
+{
+ AVL_HANDLE h = l_tree->root;
+ unsigned d = 0;
+ int cmp, target_cmp;
+
+ /* Save the tree that we're going to iterate through in a
+ ** member variable. */
+ iter->tree_ = l_tree;
+
+ iter->depth = ~0;
+
+ if (h == AVL_NULL)
+ /* Tree is empty. */
+ return;
+
+ if (st & AVL_LESS)
+ /* Key can be greater than key of starting node. */
+ target_cmp = 1;
+ else if (st & AVL_GREATER)
+ /* Key can be less than key of starting node. */
+ target_cmp = -1;
+ else
+ /* Key must be same as key of starting node. */
+ target_cmp = 0;
+
+ for (; ;)
+ {
+ cmp = AVL_COMPARE_KEY_NODE(k, h);
+
+ if (cmp == 0)
+ {
+ if (st & AVL_EQUAL)
+ {
+ /* Equal node was sought and found as starting node. */
+ iter->depth = d;
+ break;
+ }
+
+ cmp = -target_cmp;
+ }
+ else if (target_cmp != 0)
+ if (!((cmp ^ target_cmp) & L_MASK_HIGH_BIT))
+ /* cmp and target_cmp are both negative or both positive. */
+ iter->depth = d;
+
+ h = cmp < 0 ? AVL_GET_LESS(h, 1) : AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
+
+ if (h == AVL_NULL)
+ break;
+
+ if (cmp > 0)
+ L_BIT_ARR_1(iter->branch, d)
+ else
+ L_BIT_ARR_0(iter->branch, d)
+ iter->path_h[d++] = h;
+ }
+}
+
+#endif
+
+#if (L_IMPL_MASK & AVL_IMPL_START_ITER_LEAST)
+
+L_SC void L_(start_iter_least)(L_(avl) *l_tree, L_(iter) *iter)
+{
+ AVL_HANDLE h = l_tree->root;
+
+ iter->tree_ = l_tree;
+
+ iter->depth = ~0;
+
+ L_BIT_ARR_ALL(iter->branch, 0)
+
+ while (h != AVL_NULL)
+ {
+ if (iter->depth != ~0)
+ iter->path_h[iter->depth] = h;
+
+ iter->depth++;
+ h = AVL_GET_LESS(h, 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
+ }
+}
+
+#endif
+
+#if (L_IMPL_MASK & AVL_IMPL_START_ITER_GREATEST)
+
+L_SC void L_(start_iter_greatest)(L_(avl) *l_tree, L_(iter) *iter)
+{
+ AVL_HANDLE h = l_tree->root;
+
+ iter->tree_ = l_tree;
+
+ iter->depth = ~0;
+
+ L_BIT_ARR_ALL(iter->branch, 1)
+
+ while (h != AVL_NULL)
+ {
+ if (iter->depth != ~0)
+ iter->path_h[iter->depth] = h;
+
+ iter->depth++;
+ h = AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
+ }
+}
+
+#endif
+
+#if (L_IMPL_MASK & AVL_IMPL_GET_ITER)
+
+L_SC AVL_HANDLE L_(get_iter)(L_(iter) *iter)
+{
+ if (iter->depth == ~0)
+ return(AVL_NULL);
+
+ return(iter->depth == 0 ?
+ iter->tree_->root : iter->path_h[iter->depth - 1]);
+}
+
+#endif
+
+#if (L_IMPL_MASK & AVL_IMPL_INCR_ITER)
+
+L_SC void L_(incr_iter)(L_(iter) *iter)
+{
+#define l_tree (iter->tree_)
+
+ if (iter->depth != ~0)
+ {
+ AVL_HANDLE h =
+ AVL_GET_GREATER((iter->depth == 0 ?
+ iter->tree_->root : iter->path_h[iter->depth - 1]), 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
+
+ if (h == AVL_NULL)
+ do
+ {
+ if (iter->depth == 0)
+ {
+ iter->depth = ~0;
+ break;
+ }
+
+ iter->depth--;
+ }
+ while (L_BIT_ARR_VAL(iter->branch, iter->depth));
+ else
+ {
+ L_BIT_ARR_1(iter->branch, iter->depth)
+ iter->path_h[iter->depth++] = h;
+
+ for (; ;)
+ {
+ h = AVL_GET_LESS(h, 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
+
+ if (h == AVL_NULL)
+ break;
+
+ L_BIT_ARR_0(iter->branch, iter->depth)
+ iter->path_h[iter->depth++] = h;
+ }
+ }
+ }
+
+#undef l_tree
+}
+
+#endif
+
+#if (L_IMPL_MASK & AVL_IMPL_DECR_ITER)
+
+L_SC void L_(decr_iter)(L_(iter) *iter)
+{
+#define l_tree (iter->tree_)
+
+ if (iter->depth != ~0)
+ {
+ AVL_HANDLE h =
+ AVL_GET_LESS((iter->depth == 0 ?
+ iter->tree_->root : iter->path_h[iter->depth - 1]), 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
+
+ if (h == AVL_NULL)
+ do
+ {
+ if (iter->depth == 0)
+ {
+ iter->depth = ~0;
+ break;
+ }
+
+ iter->depth--;
+ }
+ while (!L_BIT_ARR_VAL(iter->branch, iter->depth));
+ else
+ {
+ L_BIT_ARR_0(iter->branch, iter->depth)
+ iter->path_h[iter->depth++] = h;
+
+ for (; ;)
+ {
+ h = AVL_GET_GREATER(h, 1);
+ L_CHECK_READ_ERROR_INV_DEPTH
+
+ if (h == AVL_NULL)
+ break;
+
+ L_BIT_ARR_1(iter->branch, iter->depth)
+ iter->path_h[iter->depth++] = h;
+ }
+ }
+ }
+
+#undef l_tree
+}
+
+#endif
+
+/* Tidy up the preprocessor symbol name space. */
+#undef L_
+#undef L_EST_LONG_BIT
+#undef L_SIZE
+#undef L_MASK_HIGH_BIT
+#undef L_LONG_BIT
+#undef L_BIT_ARR_DEFN
+#undef L_BIT_ARR_VAL
+#undef L_BIT_ARR_0
+#undef L_BIT_ARR_1
+#undef L_BIT_ARR_ALL
+#undef L_CHECK_READ_ERROR
+#undef L_CHECK_READ_ERROR_INV_DEPTH
+#undef L_BIT_ARR_LONGS
+#undef L_IMPL_MASK
+#undef L_CHECK_READ_ERROR
+#undef L_CHECK_READ_ERROR_INV_DEPTH
+#undef L_SC
+#undef L_BALANCE_PARAM_CALL_PREFIX
+#undef L_BALANCE_PARAM_DECL_PREFIX
diff --git a/vpx_mem/memory_manager/include/heapmm.h b/vpx_mem/memory_manager/include/heapmm.h
new file mode 100644
index 000000000..933e30dd7
--- /dev/null
+++ b/vpx_mem/memory_manager/include/heapmm.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+/* This code is in the public domain.
+** Version: 1.1 Author: Walt Karas
+*/
+
+/* External header file for Heap Memory Manager. See documentation in
+** heapmm.html.
+*/
+
+#undef HMM_PROCESS
+
+/* Include once per configuration in a particular translation unit. */
+
+#ifndef HMM_CNFG_NUM
+
+/* Default configuration. */
+
+#ifndef HMM_INC_CNFG_DFLT
+#define HMM_INC_CNFG_DFLT
+#define HMM_PROCESS
+#endif
+
+#elif HMM_CNFG_NUM == 0
+
+/* Test configuration. */
+
+#ifndef HMM_INC_CNFG_0
+#define HMM_INC_CNFG_0
+#define HMM_PROCESS
+#endif
+
+#elif HMM_CNFG_NUM == 1
+
+#ifndef HMM_INC_CNFG_1
+#define HMM_INC_CNFG_1
+#define HMM_PROCESS
+#endif
+
+#elif HMM_CNFG_NUM == 2
+
+#ifndef HMM_INC_CNFG_2
+#define HMM_INC_CNFG_2
+#define HMM_PROCESS
+#endif
+
+#elif HMM_CNFG_NUM == 3
+
+#ifndef HMM_INC_CNFG_3
+#define HMM_INC_CNFG_3
+#define HMM_PROCESS
+#endif
+
+#elif HMM_CNFG_NUM == 4
+
+#ifndef HMM_INC_CNFG_4
+#define HMM_INC_CNFG_4
+#define HMM_PROCESS
+#endif
+
+#elif HMM_CNFG_NUM == 5
+
+#ifndef HMM_INC_CNFG_5
+#define HMM_INC_CNFG_5
+#define HMM_PROCESS
+#endif
+
+#endif
+
+#ifdef HMM_PROCESS
+
+#include "hmm_cnfg.h"
+
+/* Heap descriptor. */
+typedef struct HMM_UNIQUE(structure)
+{
+ /* private: */
+
+ /* Pointer to (payload of) root node in AVL tree. This field should
+ ** really be the AVL tree descriptor (type avl_avl). But (in the
+ ** instantiation of the AVL tree generic package used in package) the
+ ** AVL tree descriptor simply contains a pointer to the root. So,
+ ** whenever a pointer to the AVL tree descriptor is needed, I use the
+ ** cast:
+ **
+ ** (avl_avl *) &(heap_desc->avl_tree_root)
+ **
+ ** (where heap_desc is a pointer to a heap descriptor). This trick
+ ** allows me to avoid including cavl_if.h in this external header. */
+ void *avl_tree_root;
+
+ /* Pointer to first byte of last block freed, after any coalescing. */
+ void *last_freed;
+
+ /* public: */
+
+ HMM_UNIQUE(size_bau) num_baus_can_shrink;
+ void *end_of_shrinkable_chunk;
+}
+HMM_UNIQUE(descriptor);
+
+/* Prototypes for externally-callable functions. */
+
+void HMM_UNIQUE(init)(HMM_UNIQUE(descriptor) *desc);
+
+void *HMM_UNIQUE(alloc)(
+ HMM_UNIQUE(descriptor) *desc, HMM_UNIQUE(size_aau) num_addr_align_units);
+
+/* NOT YET IMPLEMENTED */
+void *HMM_UNIQUE(greedy_alloc)(
+ HMM_UNIQUE(descriptor) *desc, HMM_UNIQUE(size_aau) needed_addr_align_units,
+ HMM_UNIQUE(size_aau) coveted_addr_align_units);
+
+int HMM_UNIQUE(resize)(
+ HMM_UNIQUE(descriptor) *desc, void *mem,
+ HMM_UNIQUE(size_aau) num_addr_align_units);
+
+/* NOT YET IMPLEMENTED */
+int HMM_UNIQUE(greedy_resize)(
+ HMM_UNIQUE(descriptor) *desc, void *mem,
+ HMM_UNIQUE(size_aau) needed_addr_align_units,
+ HMM_UNIQUE(size_aau) coveted_addr_align_units);
+
+void HMM_UNIQUE(free)(HMM_UNIQUE(descriptor) *desc, void *mem);
+
+HMM_UNIQUE(size_aau) HMM_UNIQUE(true_size)(void *mem);
+
+HMM_UNIQUE(size_aau) HMM_UNIQUE(largest_available)(
+ HMM_UNIQUE(descriptor) *desc);
+
+void HMM_UNIQUE(new_chunk)(
+ HMM_UNIQUE(descriptor) *desc, void *start_of_chunk,
+ HMM_UNIQUE(size_bau) num_block_align_units);
+
+void HMM_UNIQUE(grow_chunk)(
+ HMM_UNIQUE(descriptor) *desc, void *end_of_chunk,
+ HMM_UNIQUE(size_bau) num_block_align_units);
+
+/* NOT YET IMPLEMENTED */
+void HMM_UNIQUE(shrink_chunk)(
+ HMM_UNIQUE(descriptor) *desc,
+ HMM_UNIQUE(size_bau) num_block_align_units);
+
+#endif /* defined HMM_PROCESS */
diff --git a/vpx_mem/memory_manager/include/hmm_cnfg.h b/vpx_mem/memory_manager/include/hmm_cnfg.h
new file mode 100644
index 000000000..86e4e9fa8
--- /dev/null
+++ b/vpx_mem/memory_manager/include/hmm_cnfg.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+/* This code is in the public domain.
+** Version: 1.1 Author: Walt Karas
+*/
+
+/* Configure Heap Memory Manager for processor architecture, compiler,
+** and desired performance characteristics. This file is included
+** by heapmm.h, so these definitions can be used by code external to
+** HMM. You can change the default configuration, and/or create alternate
+** configuration(s).
+*/
+
+/* To allow for multiple configurations of HMM to be used in the same
+** compilation unit, undefine all preprocessor symbols that will be
+** defined below.
+*/
+#undef HMM_ADDR_ALIGN_UNIT
+#undef HMM_BLOCK_ALIGN_UNIT
+#undef HMM_UNIQUE
+#undef HMM_DESC_PARAM
+#undef HMM_SYM_TO_STRING
+#undef HMM_SYM_TO_STRING
+#undef HMM_AUDIT_FAIL
+
+/* Turn X into a string after one macro expansion pass of X. This trick
+** works with both GCC and Visual C++. */
+#define HMM_SYM_TO_STRING(X) HMM_SYM_TO_STRING(X)
+#define HMM_SYM_TO_STRING(X) #X
+
+#ifndef HMM_CNFG_NUM
+
+/* Default configuration. */
+
+/* Use hmm_ prefix to avoid identifier conflicts. */
+#define HMM_UNIQUE(BASE) hmm_ ## BASE
+
+/* Number of bytes in an Address Alignment Unit (AAU). */
+//fwg
+//#define HMM_ADDR_ALIGN_UNIT sizeof(int)
+#define HMM_ADDR_ALIGN_UNIT 32
+
+/* Number of AAUs in a Block Alignment Unit (BAU). */
+#define HMM_BLOCK_ALIGN_UNIT 1
+
+/* Type of unsigned integer big enough to hold the size of a Block in AAUs. */
+typedef unsigned long HMM_UNIQUE(size_aau);
+
+/* Type of unsigned integer big enough to hold the size of a Block/Chunk
+** in BAUs. The high bit will be robbed. */
+typedef unsigned long HMM_UNIQUE(size_bau);
+
+void hmm_dflt_abort(const char *, const char *);
+
+/* Actions upon a self-audit failure. Must expand to a single complete
+** statement. If you remove the definition of this macro, no self-auditing
+** will be performed. */
+#define HMM_AUDIT_FAIL \
+ hmm_dflt_abort(__FILE__, HMM_SYM_TO_STRING(__LINE__));
+
+#elif HMM_CNFG_NUM == 0
+
+/* Definitions for testing. */
+
+#define HMM_UNIQUE(BASE) thmm_ ## BASE
+
+#define HMM_ADDR_ALIGN_UNIT sizeof(int)
+
+#define HMM_BLOCK_ALIGN_UNIT 3
+
+typedef unsigned HMM_UNIQUE(size_aau);
+
+typedef unsigned short HMM_UNIQUE(size_bau);
+
+/* Under this test setup, a long jump is done if there is a self-audit
+** failure.
+*/
+
+extern jmp_buf HMM_UNIQUE(jmp_buf);
+extern const char *HMM_UNIQUE(fail_file);
+extern unsigned HMM_UNIQUE(fail_line);
+
+#define HMM_AUDIT_FAIL \
+ { HMM_UNIQUE(fail_file) = __FILE__; HMM_UNIQUE(fail_line) = __LINE__; \
+ longjmp(HMM_UNIQUE(jmp_buf), 1); }
+
+#elif HMM_CNFG_NUM == 1
+
+/* Put configuration 1 definitions here (if there is a configuration 1). */
+
+#elif HMM_CNFG_NUM == 2
+
+/* Put configuration 2 definitions here. */
+
+#elif HMM_CNFG_NUM == 3
+
+/* Put configuration 3 definitions here. */
+
+#elif HMM_CNFG_NUM == 4
+
+/* Put configuration 4 definitions here. */
+
+#elif HMM_CNFG_NUM == 5
+
+/* Put configuration 5 definitions here. */
+
+#endif
diff --git a/vpx_mem/memory_manager/include/hmm_intrnl.h b/vpx_mem/memory_manager/include/hmm_intrnl.h
new file mode 100644
index 000000000..6e2be08fc
--- /dev/null
+++ b/vpx_mem/memory_manager/include/hmm_intrnl.h
@@ -0,0 +1,160 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+/* This code is in the public domain.
+** Version: 1.1 Author: Walt Karas
+*/
+
+#ifndef HMM_INTRNL_H_
+#define HMM_INTRNL_H_
+
+#ifdef __uClinux__
+# include <lddk.h>
+#endif
+
+#include "heapmm.h"
+
+#define U(BASE) HMM_UNIQUE(BASE)
+
+/* Mask of high bit of variable of size_bau type. */
+#define HIGH_BIT_BAU_SIZE \
+ ((U(size_bau)) ~ (((U(size_bau)) ~ (U(size_bau)) 0) >> 1))
+
+/* Add a given number of AAUs to pointer. */
+#define AAUS_FORWARD(PTR, AAU_OFFSET) \
+ (((char *) (PTR)) + ((AAU_OFFSET) * ((U(size_aau)) HMM_ADDR_ALIGN_UNIT)))
+
+/* Subtract a given number of AAUs from pointer. */
+#define AAUS_BACKWARD(PTR, AAU_OFFSET) \
+ (((char *) (PTR)) - ((AAU_OFFSET) * ((U(size_aau)) HMM_ADDR_ALIGN_UNIT)))
+
+/* Add a given number of BAUs to a pointer. */
+#define BAUS_FORWARD(PTR, BAU_OFFSET) \
+ AAUS_FORWARD((PTR), (BAU_OFFSET) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT))
+
+/* Subtract a given number of BAUs to a pointer. */
+#define BAUS_BACKWARD(PTR, BAU_OFFSET) \
+ AAUS_BACKWARD((PTR), (BAU_OFFSET) * ((U(size_aau)) HMM_BLOCK_ALIGN_UNIT))
+
+typedef struct head_struct
+{
+ /* Sizes in Block Alignment Units. */
+ HMM_UNIQUE(size_bau) previous_block_size, block_size;
+}
+head_record;
+
+typedef struct ptr_struct
+{
+ struct ptr_struct *self, *prev, *next;
+}
+ptr_record;
+
+/* Divide and round up any fraction to the next whole number. */
+#define DIV_ROUND_UP(NUMER, DENOM) (((NUMER) + (DENOM) - 1) / (DENOM))
+
+/* Number of AAUs in a block head. */
+#define HEAD_AAUS DIV_ROUND_UP(sizeof(head_record), HMM_ADDR_ALIGN_UNIT)
+
+/* Number of AAUs in a block pointer record. */
+#define PTR_RECORD_AAUS DIV_ROUND_UP(sizeof(ptr_record), HMM_ADDR_ALIGN_UNIT)
+
+/* Number of BAUs in a dummy end record (at end of chunk). */
+#define DUMMY_END_BLOCK_BAUS DIV_ROUND_UP(HEAD_AAUS, HMM_BLOCK_ALIGN_UNIT)
+
+/* Minimum number of BAUs in a block (allowing room for the pointer record. */
+#define MIN_BLOCK_BAUS \
+ DIV_ROUND_UP(HEAD_AAUS + PTR_RECORD_AAUS, HMM_BLOCK_ALIGN_UNIT)
+
+/* Return number of BAUs in block (masking off high bit containing block
+** status). */
+#define BLOCK_BAUS(HEAD_PTR) \
+ (((head_record *) (HEAD_PTR))->block_size & ~HIGH_BIT_BAU_SIZE)
+
+/* Return number of BAUs in previous block (masking off high bit containing
+** block status). */
+#define PREV_BLOCK_BAUS(HEAD_PTR) \
+ (((head_record *) (HEAD_PTR))->previous_block_size & ~HIGH_BIT_BAU_SIZE)
+
+/* Set number of BAUs in previous block, preserving high bit containing
+** block status. */
+#define SET_PREV_BLOCK_BAUS(HEAD_PTR, N_BAUS) \
+ { register head_record *h_ptr = (head_record *) (HEAD_PTR); \
+ h_ptr->previous_block_size &= HIGH_BIT_BAU_SIZE; \
+ h_ptr->previous_block_size |= (N_BAUS); }
+
+/* Convert pointer to pointer record of block to pointer to block's head
+** record. */
+#define PTR_REC_TO_HEAD(PTR_REC_PTR) \
+ ((head_record *) AAUS_BACKWARD(PTR_REC_PTR, HEAD_AAUS))
+
+/* Convert pointer to block head to pointer to block's pointer record. */
+#define HEAD_TO_PTR_REC(HEAD_PTR) \
+ ((ptr_record *) AAUS_FORWARD(HEAD_PTR, HEAD_AAUS))
+
+/* Returns non-zero if block is allocated. */
+#define IS_BLOCK_ALLOCATED(HEAD_PTR) \
+ (((((head_record *) (HEAD_PTR))->block_size | \
+ ((head_record *) (HEAD_PTR))->previous_block_size) & \
+ HIGH_BIT_BAU_SIZE) == 0)
+
+#define MARK_BLOCK_ALLOCATED(HEAD_PTR) \
+ { register head_record *h_ptr = (head_record *) (HEAD_PTR); \
+ h_ptr->block_size &= ~HIGH_BIT_BAU_SIZE; \
+ h_ptr->previous_block_size &= ~HIGH_BIT_BAU_SIZE; }
+
+/* Mark a block as free when it is not the first block in a bin (and
+** therefore not a node in the AVL tree). */
+#define MARK_SUCCESSIVE_BLOCK_IN_FREE_BIN(HEAD_PTR) \
+ { register head_record *h_ptr = (head_record *) (HEAD_PTR); \
+ h_ptr->block_size |= HIGH_BIT_BAU_SIZE; }
+
+/* Prototypes for internal functions implemented in one file and called in
+** another.
+*/
+
+void U(into_free_collection)(U(descriptor) *desc, head_record *head_ptr);
+
+void U(out_of_free_collection)(U(descriptor) *desc, head_record *head_ptr);
+
+void *U(alloc_from_bin)(
+ U(descriptor) *desc, ptr_record *bin_front_ptr, U(size_bau) n_baus);
+
+#ifdef HMM_AUDIT_FAIL
+
+/* Simply contains a reference to the HMM_AUDIT_FAIL macro and a
+** dummy return. */
+int U(audit_block_fail_dummy_return)(void);
+
+
+/* Auditing a block consists of checking that the size in its head
+** matches the previous block size in the head of the next block. */
+#define AUDIT_BLOCK_AS_EXPR(HEAD_PTR) \
+ ((BLOCK_BAUS(HEAD_PTR) == \
+ PREV_BLOCK_BAUS(BAUS_FORWARD(HEAD_PTR, BLOCK_BAUS(HEAD_PTR)))) ? \
+ 0 : U(audit_block_fail_dummy_return)())
+
+#define AUDIT_BLOCK(HEAD_PTR) \
+ { void *h_ptr = (HEAD_PTR); AUDIT_BLOCK_AS_EXPR(h_ptr); }
+
+#endif
+
+/* Interface to AVL tree generic package instantiation. */
+
+#define AVL_UNIQUE(BASE) U(avl_ ## BASE)
+
+#define AVL_HANDLE ptr_record *
+
+#define AVL_KEY U(size_bau)
+
+#define AVL_MAX_DEPTH 64
+
+#include "cavl_if.h"
+
+#endif /* Include once. */
diff --git a/vpx_mem/nds/vpx_mem_nds.c b/vpx_mem/nds/vpx_mem_nds.c
new file mode 100644
index 000000000..f2a3043b2
--- /dev/null
+++ b/vpx_mem/nds/vpx_mem_nds.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+#define __VPX_MEM_C__
+#include "vpx_mem.h"
+#include <nitro.h>
+#include "vpx_mem_intrnl.h"
+
+// Allocate memory from the Arena specified by id. Align it to
+// the value specified by align.
+void *vpx_mem_nds_alloc(osarena_id id, osheap_handle handle, size_t size, size_t align)
+{
+ void *addr,
+ * x = NULL;
+
+ addr = os_alloc_from_heap((osarena_id) id, handle,
+ size + align - 1 + ADDRESS_STORAGE_SIZE);
+
+ if (addr)
+ {
+ x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align);
+
+ // save the actual malloc address
+ ((size_t *)x)[-1] = (size_t)addr;
+ }
+
+ return x;
+}
+
+// Free them memory allocated by vpx_mem_nds_alloc
+void vpx_mem_nds_free(osarena_id id, osheap_handle handle, void *mem)
+{
+ if (mem)
+ {
+ void *addr = (void *)(((size_t *)mem)[-1]);
+ os_free_to_heap(id, handle, addr);
+ }
+}
+
+int vpx_nds_alloc_heap(osarena_id id, u32 size)
+{
+ osheap_handle arena_handle;
+ void *nstart;
+ void *heap_start;
+
+ nstart = os_init_alloc(id, os_get_arena_lo(id), os_get_arena_hi(id), 1);
+ os_set_arena_lo(id, nstart);
+
+ heap_start = os_alloc_from_arena_lo(id, size, 32);
+ arena_handle = os_create_heap(id, heap_start, (void *)((u32)heap_start + size));
+
+ if (os_check_heap(id, arena_handle) == -1)
+ return -1; //ERROR: DTCM heap is not consistent
+
+ (void)os_set_current_heap(id, arena_handle);
+
+ return arena_handle;
+}
diff --git a/vpx_mem/ti_c6x/vpx_mem_ti_6cx.c b/vpx_mem/ti_c6x/vpx_mem_ti_6cx.c
new file mode 100644
index 000000000..6501855c0
--- /dev/null
+++ b/vpx_mem/ti_c6x/vpx_mem_ti_6cx.c
@@ -0,0 +1,115 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+#define __VPX_MEM_C__
+
+#include "..\include\vpx_mem.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "..\include\vpx_mem_intrnl.h"
+
+void *vpx_mem_alloc(int id, size_t size, size_t align)
+{
+#if defined CHIP_DM642 || defined __uClinux__
+ void *mem = (void *)mem_alloc(id, size, align);
+
+ if (!mem)
+ {
+ _P(fprintf(stderr,
+ "\n"
+ "*********************************************************\n"
+ "WARNING: mem_alloc returned 0 for id=%p size=%u align=%u.\n"
+ "*********************************************************\n",
+ mem, size, align));
+ // should no longer need this. Softier says it's fixed. 2005-01-21 tjf
+ //#if defined __uClinux__
+ //while(1)usleep(1000000);
+ //#endif
+ }
+
+#if defined __uClinux__
+ else if (mem == (void *)0xFFFFFFFF)
+ {
+ // out of memory/error
+ mem = (void *)0;
+
+ _P(fprintf(stderr,
+ "\n"
+ "******************************************************\n"
+ "ERROR: mem_alloc id=%p size=%u align=%u OUT OF MEMORY.\n"
+ "******************************************************\n",
+ mem, size, align));
+ }
+
+#endif // __uClinux__
+
+ return mem;
+#else
+ (void)id;
+ (void)size;
+ (void)align;
+ return (void *)0;
+#endif
+}
+
+void vpx_mem_free(int id, void *mem, size_t size)
+{
+#if defined CHIP_DM642 || defined __uClinux__
+
+ if (!mem)
+ {
+ _P(fprintf(stderr,
+ "\n"
+ "**************************************\n"
+ "WARNING: 0 being free'd id=%p size=%u.\n"
+ "**************************************\n",
+ id, size));
+
+ // should no longer need this. Softier says it's fixed. 2005-01-21 tjf
+ //#if defined __uClinux__
+ //while(1)usleep(1000000);
+ //#endif
+ }
+
+ mem_free(id, mem, size);
+#else
+ (void)id;
+ (void)mem;
+ (void)size;
+#endif
+}
+
+#if CONFIG_MEM_TRACKER
+void *xvpx_mem_alloc(int id, size_t size, size_t align, char *file, int line)
+{
+ void *mem = vpx_mem_alloc(id, size, align);
+
+ vpx_memory_tracker_add((size_t)mem, size, file, line, 0);
+
+ return mem;
+}
+
+void xvpx_mem_free(int id, void *mem, size_t size, char *file, int line)
+{
+ if (vpx_memory_tracker_remove((size_t)mem) == -2)
+ {
+#if REMOVE_PRINTFS
+ (void)file;
+ (void)line;
+#endif
+ _P(fprintf(stderr, "[vpx_mem][xvpx_mem_free] addr: %p (id=%p size=%u) "
+ "not found in list; freed from file:%s"
+ " line:%d\n", mem, id, size, file, line));
+ }
+
+ vpx_mem_free(id, mem, size);
+}
+#endif /*CONFIG_MEM_TRACKER*/
diff --git a/vpx_mem/vpx_mem.c b/vpx_mem/vpx_mem.c
new file mode 100644
index 000000000..f6b1a3550
--- /dev/null
+++ b/vpx_mem/vpx_mem.c
@@ -0,0 +1,719 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+#define __VPX_MEM_C__
+
+#include "vpx_mem.h"
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include "include/vpx_mem_intrnl.h"
+
+#if CONFIG_MEM_TRACKER
+#ifndef VPX_NO_GLOBALS
+static unsigned long g_alloc_count = 0;
+#else
+#include "vpx_global_handling.h"
+#define g_alloc_count vpxglobalm(vpxmem,g_alloc_count)
+#endif
+#endif
+
+#if CONFIG_MEM_MANAGER
+# include "heapmm.h"
+# include "hmm_intrnl.h"
+
+# define SHIFT_HMM_ADDR_ALIGN_UNIT 5
+# define TOTAL_MEMORY_TO_ALLOCATE 20971520 // 20 * 1024 * 1024
+
+# define MM_DYNAMIC_MEMORY 1
+# if MM_DYNAMIC_MEMORY
+static unsigned char *g_p_mng_memory_raw = NULL;
+static unsigned char *g_p_mng_memory = NULL;
+# else
+static unsigned char g_p_mng_memory[TOTAL_MEMORY_TO_ALLOCATE];
+# endif
+
+static size_t g_mm_memory_size = TOTAL_MEMORY_TO_ALLOCATE;
+
+static hmm_descriptor hmm_d;
+static int g_mng_memory_allocated = 0;
+
+static int vpx_mm_create_heap_memory();
+static void *vpx_mm_realloc(void *memblk, size_t size);
+#endif //CONFIG_MEM_MANAGER
+
+#if USE_GLOBAL_FUNCTION_POINTERS
+struct GLOBAL_FUNC_POINTERS
+{
+ g_malloc_func g_malloc;
+ g_calloc_func g_calloc;
+ g_realloc_func g_realloc;
+ g_free_func g_free;
+ g_memcpy_func g_memcpy;
+ g_memset_func g_memset;
+ g_memmove_func g_memmove;
+} *g_func = NULL;
+
+# define VPX_MALLOC_L g_func->g_malloc
+# define VPX_REALLOC_L g_func->g_realloc
+# define VPX_FREE_L g_func->g_free
+# define VPX_MEMCPY_L g_func->g_memcpy
+# define VPX_MEMSET_L g_func->g_memset
+# define VPX_MEMMOVE_L g_func->g_memmove
+#else
+# define VPX_MALLOC_L malloc
+# define VPX_REALLOC_L realloc
+# define VPX_FREE_L free
+# define VPX_MEMCPY_L memcpy
+# define VPX_MEMSET_L memset
+# define VPX_MEMMOVE_L memmove
+#endif // USE_GLOBAL_FUNCTION_POINTERS
+
+unsigned int vpx_mem_get_version()
+{
+ unsigned int ver = ((unsigned int)(unsigned char)VPX_MEM_VERSION_CHIEF << 24 |
+ (unsigned int)(unsigned char)VPX_MEM_VERSION_MAJOR << 16 |
+ (unsigned int)(unsigned char)VPX_MEM_VERSION_MINOR << 8 |
+ (unsigned int)(unsigned char)VPX_MEM_VERSION_PATCH);
+ return ver;
+}
+
+int vpx_mem_set_heap_size(size_t size)
+{
+ int ret = -1;
+
+#if CONFIG_MEM_MANAGER
+#if MM_DYNAMIC_MEMORY
+
+ if (!g_mng_memory_allocated && size)
+ {
+ g_mm_memory_size = size;
+ ret = 0;
+ }
+ else
+ ret = -3;
+
+#else
+ ret = -2;
+#endif
+#else
+ (void)size;
+#endif
+
+ return ret;
+}
+
+void *vpx_memalign(size_t align, size_t size)
+{
+ void *addr,
+ * x = NULL;
+
+#if CONFIG_MEM_MANAGER
+ int number_aau;
+
+ if (vpx_mm_create_heap_memory() < 0)
+ {
+ _P(printf("[vpx][mm] ERROR vpx_memalign() Couldn't create memory for Heap.\n");)
+ }
+
+ number_aau = ((size + align - 1 + ADDRESS_STORAGE_SIZE) >>
+ SHIFT_HMM_ADDR_ALIGN_UNIT) + 1;
+
+ addr = hmm_alloc(&hmm_d, number_aau);
+#else
+ addr = VPX_MALLOC_L(size + align - 1 + ADDRESS_STORAGE_SIZE);
+#endif //CONFIG_MEM_MANAGER
+
+ if (addr)
+ {
+ x = align_addr((unsigned char *)addr + ADDRESS_STORAGE_SIZE, (int)align);
+ /* save the actual malloc address */
+ ((size_t *)x)[-1] = (size_t)addr;
+ }
+
+ return x;
+}
+
+void *vpx_malloc(size_t size)
+{
+ return vpx_memalign(DEFAULT_ALIGNMENT, size);
+}
+
+void *vpx_calloc(size_t num, size_t size)
+{
+ void *x;
+
+ x = vpx_memalign(DEFAULT_ALIGNMENT, num * size);
+
+ if (x)
+ VPX_MEMSET_L(x, 0, num * size);
+
+ return x;
+}
+
+void *vpx_realloc(void *memblk, size_t size)
+{
+ void *addr,
+ * new_addr = NULL;
+ int align = DEFAULT_ALIGNMENT;
+
+ /*
+ The realloc() function changes the size of the object pointed to by
+ ptr to the size specified by size, and returns a pointer to the
+ possibly moved block. The contents are unchanged up to the lesser
+ of the new and old sizes. If ptr is null, realloc() behaves like
+ malloc() for the specified size. If size is zero (0) and ptr is
+ not a null pointer, the object pointed to is freed.
+ */
+ if (!memblk)
+ new_addr = vpx_malloc(size);
+ else if (!size)
+ vpx_free(memblk);
+ else
+ {
+ addr = (void *)(((size_t *)memblk)[-1]);
+ memblk = NULL;
+
+#if CONFIG_MEM_MANAGER
+ new_addr = vpx_mm_realloc(addr, size + align + ADDRESS_STORAGE_SIZE);
+#else
+ new_addr = VPX_REALLOC_L(addr, size + align + ADDRESS_STORAGE_SIZE);
+#endif
+
+ if (new_addr)
+ {
+ addr = new_addr;
+ new_addr = (void *)(((size_t)
+ ((unsigned char *)new_addr + ADDRESS_STORAGE_SIZE) + (align - 1)) &
+ (size_t) - align);
+ /* save the actual malloc address */
+ ((size_t *)new_addr)[-1] = (size_t)addr;
+ }
+ }
+
+ return new_addr;
+}
+
+void vpx_free(void *memblk)
+{
+ if (memblk)
+ {
+ void *addr = (void *)(((size_t *)memblk)[-1]);
+#if CONFIG_MEM_MANAGER
+ hmm_free(&hmm_d, addr);
+#else
+ VPX_FREE_L(addr);
+#endif
+ }
+}
+
+#if CONFIG_MEM_TRACKER
+void *xvpx_memalign(size_t align, size_t size, char *file, int line)
+{
+#if TRY_BOUNDS_CHECK
+ unsigned char *x_bounds;
+#endif
+
+ void *x;
+
+ if (g_alloc_count == 0)
+ {
+#if TRY_BOUNDS_CHECK
+ int i_rv = vpx_memory_tracker_init(BOUNDS_CHECK_PAD_SIZE, BOUNDS_CHECK_VALUE);
+#else
+ int i_rv = vpx_memory_tracker_init(0, 0);
+#endif
+
+ if (i_rv < 0)
+ {
+ _P(printf("ERROR xvpx_malloc MEM_TRACK_USAGE error vpx_memory_tracker_init().\n");)
+ }
+ }
+
+#if TRY_BOUNDS_CHECK
+ {
+ int i;
+ unsigned int tempme = BOUNDS_CHECK_VALUE;
+
+ x_bounds = vpx_memalign(align, size + (BOUNDS_CHECK_PAD_SIZE * 2));
+
+ if (x_bounds)
+ {
+ /*we're aligning the address twice here but to keep things
+ consistent we want to have the padding come before the stored
+ address so no matter what free function gets called we will
+ attempt to free the correct address*/
+ x_bounds = (unsigned char *)(((size_t *)x_bounds)[-1]);
+ x = align_addr(x_bounds + BOUNDS_CHECK_PAD_SIZE + ADDRESS_STORAGE_SIZE,
+ (int)align);
+ /* save the actual malloc address */
+ ((size_t *)x)[-1] = (size_t)x_bounds;
+
+ for (i = 0; i < BOUNDS_CHECK_PAD_SIZE; i += sizeof(unsigned int))
+ {
+ VPX_MEMCPY_L(x_bounds + i, &tempme, sizeof(unsigned int));
+ VPX_MEMCPY_L((unsigned char *)x + size + i,
+ &tempme, sizeof(unsigned int));
+ }
+ }
+ else
+ x = NULL;
+ }
+#else
+ x = vpx_memalign(align, size);
+#endif //TRY_BOUNDS_CHECK
+
+ g_alloc_count++;
+
+ vpx_memory_tracker_add((size_t)x, (unsigned int)size, file, line, 1);
+
+ return x;
+}
+
+void *xvpx_malloc(size_t size, char *file, int line)
+{
+ return xvpx_memalign(DEFAULT_ALIGNMENT, size, file, line);
+}
+
+void *xvpx_calloc(size_t num, size_t size, char *file, int line)
+{
+ void *x = xvpx_memalign(DEFAULT_ALIGNMENT, num * size, file, line);
+
+ if (x)
+ VPX_MEMSET_L(x, 0, num * size);
+
+ return x;
+}
+
+void *xvpx_realloc(void *memblk, size_t size, char *file, int line)
+{
+ struct mem_block *p = NULL;
+ int orig_size = 0,
+ orig_line = 0;
+ char *orig_file = NULL;
+
+#if TRY_BOUNDS_CHECK
+ unsigned char *x_bounds = memblk ?
+ (unsigned char *)(((size_t *)memblk)[-1]) :
+ NULL;
+#endif
+
+ void *x;
+
+ if (g_alloc_count == 0)
+ {
+#if TRY_BOUNDS_CHECK
+
+ if (!vpx_memory_tracker_init(BOUNDS_CHECK_PAD_SIZE, BOUNDS_CHECK_VALUE))
+#else
+ if (!vpx_memory_tracker_init(0, 0))
+#endif
+ {
+ _P(printf("ERROR xvpx_malloc MEM_TRACK_USAGE error vpx_memory_tracker_init().\n");)
+ }
+ }
+
+ if ((p = vpx_memory_tracker_find((size_t)memblk)))
+ {
+ orig_size = p->size;
+ orig_file = p->file;
+ orig_line = p->line;
+ }
+
+#if TRY_BOUNDS_CHECK_ON_FREE
+ vpx_memory_tracker_check_integrity(file, line);
+#endif
+
+ //have to do this regardless of success, because
+ //the memory that does get realloc'd may change
+ //the bounds values of this block
+ vpx_memory_tracker_remove((size_t)memblk);
+
+#if TRY_BOUNDS_CHECK
+ {
+ int i;
+ unsigned int tempme = BOUNDS_CHECK_VALUE;
+
+ x_bounds = vpx_realloc(memblk, size + (BOUNDS_CHECK_PAD_SIZE * 2));
+
+ if (x_bounds)
+ {
+ x_bounds = (unsigned char *)(((size_t *)x_bounds)[-1]);
+ x = align_addr(x_bounds + BOUNDS_CHECK_PAD_SIZE + ADDRESS_STORAGE_SIZE,
+ (int)DEFAULT_ALIGNMENT);
+ /* save the actual malloc address */
+ ((size_t *)x)[-1] = (size_t)x_bounds;
+
+ for (i = 0; i < BOUNDS_CHECK_PAD_SIZE; i += sizeof(unsigned int))
+ {
+ VPX_MEMCPY_L(x_bounds + i, &tempme, sizeof(unsigned int));
+ VPX_MEMCPY_L((unsigned char *)x + size + i,
+ &tempme, sizeof(unsigned int));
+ }
+ }
+ else
+ x = NULL;
+ }
+#else
+ x = vpx_realloc(memblk, size);
+#endif //TRY_BOUNDS_CHECK
+
+ if (!memblk) ++g_alloc_count;
+
+ if (x)
+ vpx_memory_tracker_add((size_t)x, (unsigned int)size, file, line, 1);
+ else
+ vpx_memory_tracker_add((size_t)memblk, orig_size, orig_file, orig_line, 1);
+
+ return x;
+}
+
+void xvpx_free(void *p_address, char *file, int line)
+{
+#if TRY_BOUNDS_CHECK
+ unsigned char *p_bounds_address = (unsigned char *)p_address;
+ //p_bounds_address -= BOUNDS_CHECK_PAD_SIZE;
+#endif
+
+#if !TRY_BOUNDS_CHECK_ON_FREE
+ (void)file;
+ (void)line;
+#endif
+
+ if (p_address)
+ {
+#if TRY_BOUNDS_CHECK_ON_FREE
+ vpx_memory_tracker_check_integrity(file, line);
+#endif
+
+ //if the addr isn't found in the list, assume it was allocated via
+ //vpx_ calls not xvpx_, therefore it does not contain any padding
+ if (vpx_memory_tracker_remove((size_t)p_address) == -2)
+ {
+ p_bounds_address = p_address;
+ _P(fprintf(stderr, "[vpx_mem][xvpx_free] addr: %p not found in"
+ " list; freed from file:%s"
+ " line:%d\n", p_address, file, line));
+ }
+ else
+ --g_alloc_count;
+
+#if TRY_BOUNDS_CHECK
+ vpx_free(p_bounds_address);
+#else
+ vpx_free(p_address);
+#endif
+
+ if (!g_alloc_count)
+ vpx_memory_tracker_destroy();
+ }
+}
+
+#endif /*CONFIG_MEM_TRACKER*/
+
+#if CONFIG_MEM_CHECKS
+#if defined(VXWORKS)
+#include <task_lib.h> //for task_delay()
+/* This function is only used to get a stack trace of the player
+object so we can se where we are having a problem. */
+static int get_my_tt(int task)
+{
+ tt(task);
+
+ return 0;
+}
+
+static void vx_sleep(int msec)
+{
+ int ticks_to_sleep = 0;
+
+ if (msec)
+ {
+ int msec_per_tick = 1000 / sys_clk_rate_get();
+
+ if (msec < msec_per_tick)
+ ticks_to_sleep++;
+ else
+ ticks_to_sleep = msec / msec_per_tick;
+ }
+
+ task_delay(ticks_to_sleep);
+}
+#endif
+#endif
+
+void *vpx_memcpy(void *dest, const void *source, size_t length)
+{
+#if CONFIG_MEM_CHECKS
+
+ if (((int)dest < 0x4000) || ((int)source < 0x4000))
+ {
+ _P(printf("WARNING: vpx_memcpy dest:0x%x source:0x%x len:%d\n", (int)dest, (int)source, length);)
+
+#if defined(VXWORKS)
+ sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
+
+ vx_sleep(10000);
+#endif
+ }
+
+#endif
+
+ return VPX_MEMCPY_L(dest, source, length);
+}
+
+void *vpx_memset(void *dest, int val, size_t length)
+{
+#if CONFIG_MEM_CHECKS
+
+ if ((int)dest < 0x4000)
+ {
+ _P(printf("WARNING: vpx_memset dest:0x%x val:%d len:%d\n", (int)dest, val, length);)
+
+#if defined(VXWORKS)
+ sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
+
+ vx_sleep(10000);
+#endif
+ }
+
+#endif
+
+ return VPX_MEMSET_L(dest, val, length);
+}
+
+void *vpx_memmove(void *dest, const void *src, size_t count)
+{
+#if CONFIG_MEM_CHECKS
+
+ if (((int)dest < 0x4000) || ((int)src < 0x4000))
+ {
+ _P(printf("WARNING: vpx_memmove dest:0x%x src:0x%x count:%d\n", (int)dest, (int)src, count);)
+
+#if defined(VXWORKS)
+ sp(get_my_tt, task_id_self(), 0, 0, 0, 0, 0, 0, 0, 0);
+
+ vx_sleep(10000);
+#endif
+ }
+
+#endif
+
+ return VPX_MEMMOVE_L(dest, src, count);
+}
+
+#if CONFIG_MEM_MANAGER
+
+static int vpx_mm_create_heap_memory()
+{
+ int i_rv = 0;
+
+ if (!g_mng_memory_allocated)
+ {
+#if MM_DYNAMIC_MEMORY
+ g_p_mng_memory_raw =
+ (unsigned char *)malloc(g_mm_memory_size + HMM_ADDR_ALIGN_UNIT);
+
+ if (g_p_mng_memory_raw)
+ {
+ g_p_mng_memory = (unsigned char *)((((unsigned int)g_p_mng_memory_raw) +
+ HMM_ADDR_ALIGN_UNIT - 1) &
+ -(int)HMM_ADDR_ALIGN_UNIT);
+
+ _P(printf("[vpx][mm] total memory size:%d g_p_mng_memory_raw:0x%x g_p_mng_memory:0x%x\n"
+ , g_mm_memory_size + HMM_ADDR_ALIGN_UNIT
+ , (unsigned int)g_p_mng_memory_raw
+ , (unsigned int)g_p_mng_memory);)
+ }
+ else
+ {
+ _P(printf("[vpx][mm] Couldn't allocate memory:%d for vpx memory manager.\n"
+ , g_mm_memory_size);)
+
+ i_rv = -1;
+ }
+
+ if (g_p_mng_memory)
+#endif
+ {
+ int chunk_size = 0;
+
+ g_mng_memory_allocated = 1;
+
+ hmm_init(&hmm_d);
+
+ chunk_size = g_mm_memory_size >> SHIFT_HMM_ADDR_ALIGN_UNIT;
+
+ chunk_size -= DUMMY_END_BLOCK_BAUS;
+
+ _P(printf("[vpx][mm] memory size:%d for vpx memory manager. g_p_mng_memory:0x%x chunk_size:%d\n"
+ , g_mm_memory_size
+ , (unsigned int)g_p_mng_memory
+ , chunk_size);)
+
+ hmm_new_chunk(&hmm_d, (void *)g_p_mng_memory, chunk_size);
+ }
+
+#if MM_DYNAMIC_MEMORY
+ else
+ {
+ _P(printf("[vpx][mm] Couldn't allocate memory:%d for vpx memory manager.\n"
+ , g_mm_memory_size);)
+
+ i_rv = -1;
+ }
+
+#endif
+ }
+
+ return i_rv;
+}
+
+static void *vpx_mm_realloc(void *memblk, size_t size)
+{
+ void *p_ret = NULL;
+
+ if (vpx_mm_create_heap_memory() < 0)
+ {
+ _P(printf("[vpx][mm] ERROR vpx_mm_realloc() Couldn't create memory for Heap.\n");)
+ }
+ else
+ {
+ int i_rv = 0;
+ int old_num_aaus;
+ int new_num_aaus;
+
+ old_num_aaus = hmm_true_size(memblk);
+ new_num_aaus = (size >> SHIFT_HMM_ADDR_ALIGN_UNIT) + 1;
+
+ if (old_num_aaus == new_num_aaus)
+ {
+ p_ret = memblk;
+ }
+ else
+ {
+ i_rv = hmm_resize(&hmm_d, memblk, new_num_aaus);
+
+ if (i_rv == 0)
+ {
+ p_ret = memblk;
+ }
+ else
+ {
+ /* Error. Try to malloc and then copy data. */
+ void *p_from_malloc;
+
+ new_num_aaus = (size >> SHIFT_HMM_ADDR_ALIGN_UNIT) + 1;
+ p_from_malloc = hmm_alloc(&hmm_d, new_num_aaus);
+
+ if (p_from_malloc)
+ {
+ vpx_memcpy(p_from_malloc, memblk, size);
+ hmm_free(&hmm_d, memblk);
+
+ p_ret = p_from_malloc;
+ }
+ }
+ }
+ }
+
+ return p_ret;
+}
+#endif //CONFIG_MEM_MANAGER
+
+#if USE_GLOBAL_FUNCTION_POINTERS
+# if CONFIG_MEM_TRACKER
+extern int vpx_memory_tracker_set_functions(g_malloc_func g_malloc_l
+ , g_calloc_func g_calloc_l
+ , g_realloc_func g_realloc_l
+ , g_free_func g_free_l
+ , g_memcpy_func g_memcpy_l
+ , g_memset_func g_memset_l
+ , g_memmove_func g_memmove_l);
+# endif
+#endif //USE_GLOBAL_FUNCTION_POINTERS
+int vpx_mem_set_functions(g_malloc_func g_malloc_l
+ , g_calloc_func g_calloc_l
+ , g_realloc_func g_realloc_l
+ , g_free_func g_free_l
+ , g_memcpy_func g_memcpy_l
+ , g_memset_func g_memset_l
+ , g_memmove_func g_memmove_l)
+{
+#if USE_GLOBAL_FUNCTION_POINTERS
+
+ /* If use global functions is turned on then the
+ application must set the global functions before
+ it does anything else or vpx_mem will have
+ unpredictable results. */
+ if (!g_func)
+ {
+ g_func = (struct GLOBAL_FUNC_POINTERS *)
+ g_malloc_l(sizeof(struct GLOBAL_FUNC_POINTERS));
+
+ if (!g_func)
+ {
+ return -1;
+ }
+ }
+
+#if CONFIG_MEM_TRACKER
+ {
+ int rv = 0;
+ rv = vpx_memory_tracker_set_functions(g_malloc_l
+ , g_calloc_l
+ , g_realloc_l
+ , g_free_l
+ , g_memcpy_l
+ , g_memset_l
+ , g_memmove_l);
+
+ if (rv < 0)
+ {
+ return rv;
+ }
+ }
+#endif
+
+ g_func->g_malloc = g_malloc_l;
+ g_func->g_calloc = g_calloc_l;
+ g_func->g_realloc = g_realloc_l;
+ g_func->g_free = g_free_l;
+ g_func->g_memcpy = g_memcpy_l;
+ g_func->g_memset = g_memset_l;
+ g_func->g_memmove = g_memmove_l;
+
+ return 0;
+#else
+ (void)g_malloc_l;
+ (void)g_calloc_l;
+ (void)g_realloc_l;
+ (void)g_free_l;
+ (void)g_memcpy_l;
+ (void)g_memset_l;
+ (void)g_memmove_l;
+ return -1;
+#endif
+}
+
+int vpx_mem_unset_functions()
+{
+#if USE_GLOBAL_FUNCTION_POINTERS
+
+ if (g_func)
+ {
+ g_free_func temp_free = g_func->g_free;
+ temp_free(g_func);
+ g_func = NULL;
+ }
+
+#endif
+ return 0;
+}
diff --git a/vpx_mem/vpx_mem.h b/vpx_mem/vpx_mem.h
new file mode 100644
index 000000000..6ccb9be55
--- /dev/null
+++ b/vpx_mem/vpx_mem.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+#ifndef __VPX_MEM_H__
+#define __VPX_MEM_H__
+
+#if defined(__uClinux__)
+# include <lddk.h>
+#endif
+
+/* vpx_mem version info */
+#define vpx_mem_version "2.2.1.5"
+
+#define VPX_MEM_VERSION_CHIEF 2
+#define VPX_MEM_VERSION_MAJOR 2
+#define VPX_MEM_VERSION_MINOR 1
+#define VPX_MEM_VERSION_PATCH 5
+/* end - vpx_mem version info */
+
+#ifndef VPX_TRACK_MEM_USAGE
+# define VPX_TRACK_MEM_USAGE 0 //enable memory tracking/integrity checks
+#endif
+#ifndef VPX_CHECK_MEM_FUNCTIONS
+# define VPX_CHECK_MEM_FUNCTIONS 0 //enable basic safety checks in _memcpy,
+//_memset, and _memmove
+#endif
+#ifndef REPLACE_BUILTIN_FUNCTIONS
+# define REPLACE_BUILTIN_FUNCTIONS 0 //replace builtin functions with their
+//vpx_ equivalents
+#endif
+
+#include <stdlib.h>
+#include <stddef.h>
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+ /*
+ vpx_mem_get_version()
+ provided for runtime version checking. Returns an unsigned int of the form
+ CHIEF | MAJOR | MINOR | PATCH, where the chief version number is the high
+ order byte.
+ */
+ unsigned int vpx_mem_get_version(void);
+
+ /*
+ vpx_mem_set_heap_size(size_t size)
+ size - size in bytes for the memory manager to allocate for its heap
+ Sets the memory manager's initial heap size
+ Return:
+ 0: on success
+ -1: if memory manager calls have not been included in the vpx_mem lib
+ -2: if the memory manager has been compiled to use static memory
+ -3: if the memory manager has already allocated its heap
+ */
+ int vpx_mem_set_heap_size(size_t size);
+
+ void *vpx_memalign(size_t align, size_t size);
+ void *vpx_malloc(size_t size);
+ void *vpx_calloc(size_t num, size_t size);
+ void *vpx_realloc(void *memblk, size_t size);
+ void vpx_free(void *memblk);
+
+ void *vpx_memcpy(void *dest, const void *src, size_t length);
+ void *vpx_memset(void *dest, int val, size_t length);
+ void *vpx_memmove(void *dest, const void *src, size_t count);
+
+// special memory functions
+ void *vpx_mem_alloc(int id, size_t size, size_t align);
+ void vpx_mem_free(int id, void *mem, size_t size);
+
+ /* Wrappers to standard library functions. */
+ typedef void*(* g_malloc_func)(size_t);
+ typedef void*(* g_calloc_func)(size_t, size_t);
+ typedef void*(* g_realloc_func)(void *, size_t);
+ typedef void (* g_free_func)(void *);
+ typedef void*(* g_memcpy_func)(void *, const void *, size_t);
+ typedef void*(* g_memset_func)(void *, int, size_t);
+ typedef void*(* g_memmove_func)(void *, const void *, size_t);
+
+ int vpx_mem_set_functions(g_malloc_func g_malloc_l
+ , g_calloc_func g_calloc_l
+ , g_realloc_func g_realloc_l
+ , g_free_func g_free_l
+ , g_memcpy_func g_memcpy_l
+ , g_memset_func g_memset_l
+ , g_memmove_func g_memmove_l);
+ int vpx_mem_unset_functions(void);
+
+
+ /* some defines for backward compatibility */
+#define DMEM_GENERAL 0
+
+#define duck_memalign(X,Y,Z) vpx_memalign(X,Y)
+#define duck_malloc(X,Y) vpx_malloc(X)
+#define duck_calloc(X,Y,Z) vpx_calloc(X,Y)
+#define duck_realloc vpx_realloc
+#define duck_free vpx_free
+#define duck_memcpy vpx_memcpy
+#define duck_memmove vpx_memmove
+#define duck_memset vpx_memset
+
+#if REPLACE_BUILTIN_FUNCTIONS
+# ifndef __VPX_MEM_C__
+# define memalign vpx_memalign
+# define malloc vpx_malloc
+# define calloc vpx_calloc
+# define realloc vpx_realloc
+# define free vpx_free
+# define memcpy vpx_memcpy
+# define memmove vpx_memmove
+# define memset vpx_memset
+# endif
+#endif
+
+#if CONFIG_MEM_TRACKER
+#include <stdarg.h>
+ /*from vpx_mem/vpx_mem_tracker.c*/
+ extern void vpx_memory_tracker_dump();
+ extern void vpx_memory_tracker_check_integrity(char *file, unsigned int line);
+ extern int vpx_memory_tracker_set_log_type(int type, char *option);
+ extern int vpx_memory_tracker_set_log_func(void *userdata,
+ void(*logfunc)(void *userdata,
+ const char *fmt, va_list args));
+# ifndef __VPX_MEM_C__
+# define vpx_memalign(align, size) xvpx_memalign((align), (size), __FILE__, __LINE__)
+# define vpx_malloc(size) xvpx_malloc((size), __FILE__, __LINE__)
+# define vpx_calloc(num, size) xvpx_calloc(num, size, __FILE__, __LINE__)
+# define vpx_realloc(addr, size) xvpx_realloc(addr, size, __FILE__, __LINE__)
+# define vpx_free(addr) xvpx_free(addr, __FILE__, __LINE__)
+# define vpx_memory_tracker_check_integrity() vpx_memory_tracker_check_integrity(__FILE__, __LINE__)
+# define vpx_mem_alloc(id,size,align) xvpx_mem_alloc(id, size, align, __FILE__, __LINE__)
+# define vpx_mem_free(id,mem,size) xvpx_mem_free(id, mem, size, __FILE__, __LINE__)
+# endif
+
+ void *xvpx_memalign(size_t align, size_t size, char *file, int line);
+ void *xvpx_malloc(size_t size, char *file, int line);
+ void *xvpx_calloc(size_t num, size_t size, char *file, int line);
+ void *xvpx_realloc(void *memblk, size_t size, char *file, int line);
+ void xvpx_free(void *memblk, char *file, int line);
+ void *xvpx_mem_alloc(int id, size_t size, size_t align, char *file, int line);
+ void xvpx_mem_free(int id, void *mem, size_t size, char *file, int line);
+
+#else
+# ifndef __VPX_MEM_C__
+# define vpx_memory_tracker_dump()
+# define vpx_memory_tracker_check_integrity()
+# define vpx_memory_tracker_set_log_type(t,o) 0
+# define vpx_memory_tracker_set_log_func(u,f) 0
+# endif
+#endif
+
+#if !VPX_CHECK_MEM_FUNCTIONS
+# ifndef __VPX_MEM_C__
+# include <string.h>
+# define vpx_memcpy memcpy
+# define vpx_memset memset
+# define vpx_memmove memmove
+# endif
+#endif
+
+#ifdef VPX_MEM_PLTFRM
+# include VPX_MEM_PLTFRM
+#endif
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* __VPX_MEM_H__ */
diff --git a/vpx_mem/vpx_mem.mk b/vpx_mem/vpx_mem.mk
new file mode 100644
index 000000000..4663c5a91
--- /dev/null
+++ b/vpx_mem/vpx_mem.mk
@@ -0,0 +1,22 @@
+MEM_SRCS-yes += vpx_mem.mk
+MEM_SRCS-yes += vpx_mem.c
+MEM_SRCS-yes += vpx_mem.h
+MEM_SRCS-yes += include/vpx_mem_intrnl.h
+
+MEM_SRCS-$(CONFIG_MEM_TRACKER) += vpx_mem_tracker.c
+MEM_SRCS-$(CONFIG_MEM_TRACKER) += include/vpx_mem_tracker.h
+
+MEM_SRCS-$(CONFIG_MEM_MANAGER) += memory_manager/hmm_true.c
+MEM_SRCS-$(CONFIG_MEM_MANAGER) += memory_manager/hmm_resize.c
+MEM_SRCS-$(CONFIG_MEM_MANAGER) += memory_manager/hmm_shrink.c
+MEM_SRCS-$(CONFIG_MEM_MANAGER) += memory_manager/hmm_largest.c
+MEM_SRCS-$(CONFIG_MEM_MANAGER) += memory_manager/hmm_dflt_abort.c
+MEM_SRCS-$(CONFIG_MEM_MANAGER) += memory_manager/hmm_base.c
+MEM_SRCS-$(CONFIG_MEM_MANAGER) += memory_manager/include
+MEM_SRCS-$(CONFIG_MEM_MANAGER) += memory_manager/include/hmm_intrnl.h
+MEM_SRCS-$(CONFIG_MEM_MANAGER) += memory_manager/include/cavl_if.h
+MEM_SRCS-$(CONFIG_MEM_MANAGER) += memory_manager/include/hmm_cnfg.h
+MEM_SRCS-$(CONFIG_MEM_MANAGER) += memory_manager/include/heapmm.h
+MEM_SRCS-$(CONFIG_MEM_MANAGER) += memory_manager/include/cavl_impl.h
+MEM_SRCS-$(CONFIG_MEM_MANAGER) += memory_manager/hmm_grow.c
+MEM_SRCS-$(CONFIG_MEM_MANAGER) += memory_manager/hmm_alloc.c
diff --git a/vpx_mem/vpx_mem_tracker.c b/vpx_mem/vpx_mem_tracker.c
new file mode 100644
index 000000000..4427e27fc
--- /dev/null
+++ b/vpx_mem/vpx_mem_tracker.c
@@ -0,0 +1,822 @@
+/*
+ * Copyright (c) 2010 The VP8 project authors. All Rights Reserved.
+ *
+ * Use of this source code is governed by a BSD-style license and patent
+ * grant that can be found in the LICENSE file in the root of the source
+ * tree. All contributing project authors may be found in the AUTHORS
+ * file in the root of the source tree.
+ */
+
+
+/*
+ vpx_mem_tracker.c
+
+ jwz 2003-09-30:
+ Stores a list of addreses, their size, and file and line they came from.
+ All exposed lib functions are prefaced by vpx_ and allow the global list
+ to be thread safe.
+ Current supported platforms are:
+ Linux, Win32, win_ce and vx_works
+ Further support can be added by defining the platform specific mutex
+ in the memory_tracker struct as well as calls to create/destroy/lock/unlock
+ the mutex in vpx_memory_tracker_init/Destroy and memory_tracker_lock_mutex/unlock_mutex
+*/
+#include "vpx_ports/config.h"
+
+#if defined(__uClinux__)
+# include <lddk.h>
+#endif
+
+#if HAVE_PTHREAD_H
+# include <pthread.h>
+#elif defined(WIN32) || defined(_WIN32_WCE)
+# define WIN32_LEAN_AND_MEAN
+# include <windows.h>
+# include <winbase.h>
+#elif defined(VXWORKS)
+# include <sem_lib.h>
+#elif defined(NDS_NITRO)
+# include <nitro.h>
+# include <nitro/os.h>
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h> //VXWORKS doesn't have a malloc/memory.h file,
+//this should pull in malloc,free,etc.
+#include <stdarg.h>
+
+#include "include/vpx_mem_tracker.h"
+
+#undef vpx_malloc //undefine any vpx_mem macros that may affect calls to
+#undef vpx_free //memory functions in this file
+#undef vpx_memcpy
+#undef vpx_memset
+
+
+#ifndef USE_GLOBAL_FUNCTION_POINTERS
+# define USE_GLOBAL_FUNCTION_POINTERS 0 //use function pointers instead of compiled functions.
+#endif
+
+#if USE_GLOBAL_FUNCTION_POINTERS
+static mem_track_malloc_func g_malloc = malloc;
+static mem_track_calloc_func g_calloc = calloc;
+static mem_track_realloc_func g_realloc = realloc;
+static mem_track_free_func g_free = free;
+static mem_track_memcpy_func g_memcpy = memcpy;
+static mem_track_memset_func g_memset = memset;
+static mem_track_memmove_func g_memmove = memmove;
+# define MEM_TRACK_MALLOC g_malloc
+# define MEM_TRACK_FREE g_free
+# define MEM_TRACK_MEMCPY g_memcpy
+# define MEM_TRACK_MEMSET g_memset
+#else
+# define MEM_TRACK_MALLOC vpx_malloc
+# define MEM_TRACK_FREE vpx_free
+# define MEM_TRACK_MEMCPY vpx_memcpy
+# define MEM_TRACK_MEMSET vpx_memset
+#endif // USE_GLOBAL_FUNCTION_POINTERS
+
+/* prototypes for internal library functions */
+static void memtrack_log(const char *fmt, ...);
+static void memory_tracker_dump();
+static void memory_tracker_check_integrity(char *file, unsigned int line);
+static void memory_tracker_add(size_t addr, unsigned int size,
+ char *file, unsigned int line,
+ int padded);
+static int memory_tracker_remove(size_t addr);
+static struct mem_block *memory_tracker_find(size_t addr);
+
+#if defined(NO_MUTEX)
+# define memory_tracker_lock_mutex() (!g_b_mem_tracker_inited)
+# define memory_tracker_unlock_mutex()
+#else
+static int memory_tracker_lock_mutex();
+static int memory_tracker_unlock_mutex();
+#endif
+
+#ifndef VPX_NO_GLOBALS
+struct memory_tracker
+{
+ struct mem_block *head,
+ * tail;
+ int len,
+ totalsize;
+ unsigned int current_allocated,
+ max_allocated;
+
+#if HAVE_PTHREAD_H
+ pthread_mutex_t mutex;
+#elif defined(WIN32) || defined(_WIN32_WCE)
+ HANDLE mutex;
+#elif defined(VXWORKS)
+ SEM_ID mutex;
+#elif defined(NDS_NITRO)
+ OSMutex mutex;
+#elif defined(NO_MUTEX)
+#else
+#error "No mutex type defined for this platform!"
+#endif
+
+ int padding_size,
+ pad_value;
+};
+
+static struct memory_tracker memtrack; //our global memory allocation list
+static int g_b_mem_tracker_inited = 0; //indicates whether the global list has
+//been initialized (1:yes/0:no)
+static struct
+{
+ FILE *file;
+ int type;
+ void (*func)(void *userdata, const char *fmt, va_list args);
+ void *userdata;
+} g_logging = {NULL, 0, NULL, NULL};
+#else
+# include "vpx_global_handling.h"
+#define g_b_mem_tracker_inited vpxglobalm(vpxmem,g_b_mem_tracker_inited)
+#define g_logging vpxglobalm(vpxmem,g_logging)
+#define memtrack vpxglobalm(vpxmem,memtrack)
+#endif // #ifndef VPX_NO_GLOBALS
+
+extern void *vpx_malloc(size_t size);
+extern void vpx_free(void *memblk);
+extern void *vpx_memcpy(void *dest, const void *src, size_t length);
+extern void *vpx_memset(void *dest, int val, size_t length);
+
+/*
+ *
+ * Exposed library functions
+ *
+*/
+
+/*
+ vpx_memory_tracker_init(int padding_size, int pad_value)
+ padding_size - the size of the padding before and after each mem addr.
+ Values > 0 indicate that integrity checks can be performed
+ by inspecting these areas.
+ pad_value - the initial value within the padding area before and after
+ each mem addr.
+
+ Initializes global memory tracker structure
+ Allocates the head of the list
+*/
+int vpx_memory_tracker_init(int padding_size, int pad_value)
+{
+ if (!g_b_mem_tracker_inited)
+ {
+ if ((memtrack.head = (struct mem_block *)
+ MEM_TRACK_MALLOC(sizeof(struct mem_block))))
+ {
+ int ret;
+
+ MEM_TRACK_MEMSET(memtrack.head, 0, sizeof(struct mem_block));
+
+ memtrack.tail = memtrack.head;
+
+ memtrack.current_allocated = 0;
+ memtrack.max_allocated = 0;
+
+ memtrack.padding_size = padding_size;
+ memtrack.pad_value = pad_value;
+
+#if HAVE_PTHREAD_H
+ ret = pthread_mutex_init(&memtrack.mutex,
+ NULL); /*mutex attributes (NULL=default)*/
+#elif defined(WIN32) || defined(_WIN32_WCE)
+ memtrack.mutex = CreateMutex(NULL, /*security attributes*/
+ FALSE, /*we don't want initial ownership*/
+ NULL); /*mutex name*/
+ ret = !memtrack.mutex;
+#elif defined(VXWORKS)
+ memtrack.mutex = sem_bcreate(SEM_Q_FIFO, /*SEM_Q_FIFO non-priority based mutex*/
+ SEM_FULL); /*SEM_FULL initial state is unlocked*/
+ ret = !memtrack.mutex;
+#elif defined(NDS_NITRO)
+ os_init_mutex(&memtrack.mutex);
+ ret = 0;
+#elif defined(NO_MUTEX)
+ ret = 0;
+#endif
+
+ if (ret)
+ {
+ memtrack_log("vpx_memory_tracker_init: Error creating mutex!\n");
+
+ MEM_TRACK_FREE(memtrack.head);
+ memtrack.head = NULL;
+ }
+ else
+ {
+ memtrack_log("Memory Tracker init'd, v."vpx_mem_tracker_version" pad_size:%d pad_val:0x%x %d\n"
+ , padding_size
+ , pad_value
+ , pad_value);
+ g_b_mem_tracker_inited = 1;
+ }
+ }
+ }
+
+ return g_b_mem_tracker_inited;
+}
+
+/*
+ vpx_memory_tracker_destroy()
+ If our global struct was initialized zeros out all its members,
+ frees memory and destroys it's mutex
+*/
+void vpx_memory_tracker_destroy()
+{
+ if (!memory_tracker_lock_mutex())
+ {
+ struct mem_block *p = memtrack.head,
+ * p2 = memtrack.head;
+
+ memory_tracker_dump();
+
+ while (p)
+ {
+ p2 = p;
+ p = p->next;
+
+ MEM_TRACK_FREE(p2);
+ }
+
+ memtrack.head = NULL;
+ memtrack.tail = NULL;
+ memtrack.len = 0;
+ memtrack.current_allocated = 0;
+ memtrack.max_allocated = 0;
+
+ if (!g_logging.type && g_logging.file && g_logging.file != stderr)
+ {
+#if !defined(NDS_NITRO)
+ fclose(g_logging.file);
+#endif
+ g_logging.file = NULL;
+ }
+
+ memory_tracker_unlock_mutex();
+
+ g_b_mem_tracker_inited = 0;
+ }
+}
+
+/*
+ vpx_memory_tracker_add(size_t addr, unsigned int size,
+ char * file, unsigned int line)
+ addr - memory address to be added to list
+ size - size of addr
+ file - the file addr was referenced from
+ line - the line in file addr was referenced from
+ Adds memory address addr, it's size, file and line it came from
+ to the global list via the thread safe internal library function
+*/
+void vpx_memory_tracker_add(size_t addr, unsigned int size,
+ char *file, unsigned int line,
+ int padded)
+{
+ memory_tracker_add(addr, size, file, line, padded);
+}
+
+/*
+ vpx_memory_tracker_remove(size_t addr)
+ addr - memory address to be removed from list
+ Removes addr from the global list via the thread safe
+ internal remove function
+ Return:
+ Same as described for memory_tracker_remove
+*/
+int vpx_memory_tracker_remove(size_t addr)
+{
+ return memory_tracker_remove(addr);
+}
+
+/*
+ vpx_memory_tracker_find(size_t addr)
+ addr - address to be found in list
+ Return:
+ If found, pointer to the memory block that matches addr
+ NULL otherwise
+*/
+struct mem_block *vpx_memory_tracker_find(size_t addr)
+{
+ struct mem_block *p = NULL;
+
+ if (!memory_tracker_lock_mutex())
+ {
+ p = memory_tracker_find(addr);
+ memory_tracker_unlock_mutex();
+ }
+
+ return p;
+}
+
+/*
+ vpx_memory_tracker_dump()
+ Locks the memory tracker's mutex and calls the internal
+ library function to dump the current contents of the
+ global memory allocation list
+*/
+void vpx_memory_tracker_dump()
+{
+ if (!memory_tracker_lock_mutex())
+ {
+ memory_tracker_dump();
+ memory_tracker_unlock_mutex();
+ }
+}
+
+/*
+ vpx_memory_tracker_check_integrity(char* file, unsigned int line)
+ file - The file name where the check was placed
+ line - The line in file where the check was placed
+ Locks the memory tracker's mutex and calls the internal
+ integrity check function to inspect every address in the global
+ memory allocation list
+*/
+void vpx_memory_tracker_check_integrity(char *file, unsigned int line)
+{
+ if (!memory_tracker_lock_mutex())
+ {
+ memory_tracker_check_integrity(file, line);
+ memory_tracker_unlock_mutex();
+ }
+}
+
+/*
+ vpx_memory_tracker_set_log_type
+ Sets the logging type for the memory tracker. Based on the value it will
+ direct its output to the appropriate place.
+ Return:
+ 0: on success
+ -1: if the logging type could not be set, because the value was invalid
+ or because a file could not be opened
+*/
+int vpx_memory_tracker_set_log_type(int type, char *option)
+{
+ int ret = -1;
+
+ switch (type)
+ {
+ case 0:
+ g_logging.type = 0;
+
+ if (!option)
+ {
+ g_logging.file = stderr;
+ ret = 0;
+ }
+
+#if !defined(NDS_NITRO)
+ else
+ {
+ if ((g_logging.file = fopen((char *)option, "w")))
+ ret = 0;
+ }
+
+#endif
+ break;
+#if defined(WIN32) && !defined(_WIN32_WCE)
+ case 1:
+ g_logging.type = type;
+ ret = 0;
+ break;
+#endif
+ default:
+ break;
+ }
+
+ //output the version to the new logging destination
+ if (!ret)
+ memtrack_log("Memory Tracker logging initialized, "
+ "Memory Tracker v."vpx_mem_tracker_version"\n");
+
+ return ret;
+}
+
+/*
+ vpx_memory_tracker_set_log_func
+ Sets a logging function to be used by the memory tracker.
+ Return:
+ 0: on success
+ -1: if the logging type could not be set because logfunc was NULL
+*/
+int vpx_memory_tracker_set_log_func(void *userdata,
+ void(*logfunc)(void *userdata,
+ const char *fmt, va_list args))
+{
+ int ret = -1;
+
+ if (logfunc)
+ {
+ g_logging.type = -1;
+ g_logging.userdata = userdata;
+ g_logging.func = logfunc;
+ ret = 0;
+ }
+
+ //output the version to the new logging destination
+ if (!ret)
+ memtrack_log("Memory Tracker logging initialized, "
+ "Memory Tracker v."vpx_mem_tracker_version"\n");
+
+ return ret;
+}
+
+/*
+ *
+ * END - Exposed library functions
+ *
+*/
+
+
+/*
+ *
+ * Internal library functions
+ *
+*/
+
+static void memtrack_log(const char *fmt, ...)
+{
+ va_list list;
+
+ va_start(list, fmt);
+
+ switch (g_logging.type)
+ {
+ case -1:
+
+ if (g_logging.func)
+ g_logging.func(g_logging.userdata, fmt, list);
+
+ break;
+ case 0:
+
+ if (g_logging.file)
+ {
+ vfprintf(g_logging.file, fmt, list);
+ fflush(g_logging.file);
+ }
+
+ break;
+#if defined(WIN32) && !defined(_WIN32_WCE)
+ case 1:
+ {
+ char temp[1024];
+ _vsnprintf(temp, sizeof(temp) / sizeof(char) - 1, fmt, list);
+ OutputDebugString(temp);
+ }
+ break;
+#endif
+ default:
+ break;
+ }
+
+ va_end(list);
+}
+
+/*
+ memory_tracker_dump()
+ Dumps the current contents of the global memory allocation list
+*/
+static void memory_tracker_dump()
+{
+ int i = 0;
+ struct mem_block *p = (memtrack.head ? memtrack.head->next : NULL);
+
+ memtrack_log("\n_currently Allocated= %d; Max allocated= %d\n",
+ memtrack.current_allocated, memtrack.max_allocated);
+
+ while (p)
+ {
+#if defined(WIN32) && !defined(_WIN32_WCE)
+
+ /*when using outputdebugstring, output filenames so they
+ can be clicked to be opened in visual studio*/
+ if (g_logging.type == 1)
+ memtrack_log("memblocks[%d].addr= 0x%.8x, memblocks[%d].size= %d, file:\n"
+ " %s(%d):\n", i,
+ p->addr, i, p->size,
+ p->file, p->line);
+ else
+#endif
+ memtrack_log("memblocks[%d].addr= 0x%.8x, memblocks[%d].size= %d, file: %s, line: %d\n", i,
+ p->addr, i, p->size,
+ p->file, p->line);
+
+#ifdef NDS_NITRO
+
+ if (!(i % 20)) os_sleep(500);
+
+#endif
+
+ p = p->next;
+ ++i;
+ }
+
+ memtrack_log("\n");
+}
+
+/*
+ memory_tracker_check_integrity(char* file, unsigned int file)
+ file - the file name where the check was placed
+ line - the line in file where the check was placed
+ If a padding_size was supplied to vpx_memory_tracker_init()
+ this function will check ea. addr in the list verifying that
+ addr-padding_size and addr+padding_size is filled with pad_value
+*/
+static void memory_tracker_check_integrity(char *file, unsigned int line)
+{
+ if (memtrack.padding_size)
+ {
+ int i,
+ index = 0;
+ unsigned char *p_show_me,
+ * p_show_me2;
+ unsigned int tempme = memtrack.pad_value,
+ dead1,
+ dead2;
+ unsigned char *x_bounds;
+ struct mem_block *p = memtrack.head->next;
+
+ while (p)
+ {
+ //x_bounds = (unsigned char*)p->addr;
+ //back up VPX_BYTE_ALIGNMENT
+ //x_bounds -= memtrack.padding_size;
+
+ if (p->padded) // can the bounds be checked?
+ {
+ /*yes, move to the address that was actually allocated
+ by the vpx_* calls*/
+ x_bounds = (unsigned char *)(((size_t *)p->addr)[-1]);
+
+ for (i = 0; i < memtrack.padding_size; i += sizeof(unsigned int))
+ {
+ p_show_me = (x_bounds + i);
+ p_show_me2 = (unsigned char *)(p->addr + p->size + i);
+
+ MEM_TRACK_MEMCPY(&dead1, p_show_me, sizeof(unsigned int));
+ MEM_TRACK_MEMCPY(&dead2, p_show_me2, sizeof(unsigned int));
+
+ if ((dead1 != tempme) || (dead2 != tempme))
+ {
+ memtrack_log("\n[vpx_mem integrity check failed]:\n"
+ " index[%d,%d] {%s:%d} addr=0x%x, size=%d,"
+ " file: %s, line: %d c0:0x%x c1:0x%x\n",
+ index, i, file, line, p->addr, p->size, p->file,
+ p->line, dead1, dead2);
+ }
+ }
+ }
+
+ ++index;
+ p = p->next;
+ }
+ }
+}
+
+/*
+ memory_tracker_add(size_t addr, unsigned int size,
+ char * file, unsigned int line)
+ Adds an address (addr), it's size, file and line number to our list.
+ Adjusts the total bytes allocated and max bytes allocated if necessary.
+ If memory cannot be allocated the list will be destroyed.
+*/
+void memory_tracker_add(size_t addr, unsigned int size,
+ char *file, unsigned int line,
+ int padded)
+{
+ if (!memory_tracker_lock_mutex())
+ {
+ struct mem_block *p;
+
+ p = MEM_TRACK_MALLOC(sizeof(struct mem_block));
+
+ if (p)
+ {
+ p->prev = memtrack.tail;
+ p->prev->next = p;
+ p->addr = addr;
+ p->size = size;
+ p->line = line;
+ p->file = file;
+ p->padded = padded;
+ p->next = NULL;
+
+ memtrack.tail = p;
+
+ memtrack.current_allocated += size;
+
+ if (memtrack.current_allocated > memtrack.max_allocated)
+ memtrack.max_allocated = memtrack.current_allocated;
+
+ //memtrack_log("memory_tracker_add: added addr=0x%.8x\n", addr);
+
+ memory_tracker_unlock_mutex();
+ }
+ else
+ {
+ memtrack_log("memory_tracker_add: error allocating memory!\n");
+ memory_tracker_unlock_mutex();
+ vpx_memory_tracker_destroy();
+ }
+ }
+}
+
+/*
+ memory_tracker_remove(size_t addr)
+ Removes an address and its corresponding size (if they exist)
+ from the memory tracker list and adjusts the current number
+ of bytes allocated.
+ Return:
+ 0: on success
+ -1: if the mutex could not be locked
+ -2: if the addr was not found in the list
+*/
+int memory_tracker_remove(size_t addr)
+{
+ int ret = -1;
+
+ if (!memory_tracker_lock_mutex())
+ {
+ struct mem_block *p;
+
+ if ((p = memory_tracker_find(addr)))
+ {
+ memtrack.current_allocated -= p->size;
+
+ p->prev->next = p->next;
+
+ if (p->next)
+ p->next->prev = p->prev;
+ else
+ memtrack.tail = p->prev;
+
+ ret = 0;
+ MEM_TRACK_FREE(p);
+ }
+ else
+ {
+ if (addr)
+ memtrack_log("memory_tracker_remove(): addr not found in list,"
+ " 0x%.8x\n", addr);
+
+ ret = -2;
+ }
+
+ memory_tracker_unlock_mutex();
+ }
+
+ return ret;
+}
+
+/*
+ memory_tracker_find(size_t addr)
+ Finds an address in our addrs list
+ NOTE: the mutex MUST be locked in the other internal
+ functions before calling this one. This avoids
+ the need for repeated locking and unlocking as in Remove
+ Returns: pointer to the mem block if found, NULL otherwise
+*/
+static struct mem_block *memory_tracker_find(size_t addr)
+{
+ struct mem_block *p = NULL;
+
+ if (memtrack.head)
+ {
+ p = memtrack.head->next;
+
+ while (p && (p->addr != addr))
+ p = p->next;
+ }
+
+ return p;
+}
+
+
+#if !defined(NO_MUTEX)
+/*
+ memory_tracker_lock_mutex()
+ Locks the memory tracker mutex with a platform specific call
+ Returns:
+ 0: Success
+ <0: Failure, either the mutex was not initialized
+ or the call to lock the mutex failed
+*/
+static int memory_tracker_lock_mutex()
+{
+ int ret = -1;
+
+ if (g_b_mem_tracker_inited)
+ {
+
+#if HAVE_PTHREAD_H
+ ret = pthread_mutex_lock(&memtrack.mutex);
+#elif defined(WIN32) || defined(_WIN32_WCE)
+ ret = WaitForSingleObject(memtrack.mutex, INFINITE);
+#elif defined(VXWORKS)
+ ret = sem_take(memtrack.mutex, WAIT_FOREVER);
+#elif defined(NDS_NITRO)
+ os_lock_mutex(&memtrack.mutex);
+ ret = 0;
+#endif
+
+ if (ret)
+ {
+ memtrack_log("memory_tracker_lock_mutex: mutex lock failed\n");
+ }
+ }
+
+ return ret;
+}
+
+/*
+ memory_tracker_unlock_mutex()
+ Unlocks the memory tracker mutex with a platform specific call
+ Returns:
+ 0: Success
+ <0: Failure, either the mutex was not initialized
+ or the call to unlock the mutex failed
+*/
+static int memory_tracker_unlock_mutex()
+{
+ int ret = -1;
+
+ if (g_b_mem_tracker_inited)
+ {
+
+#if HAVE_PTHREAD_H
+ ret = pthread_mutex_unlock(&memtrack.mutex);
+#elif defined(WIN32) || defined(_WIN32_WCE)
+ ret = !ReleaseMutex(memtrack.mutex);
+#elif defined(VXWORKS)
+ ret = sem_give(memtrack.mutex);
+#elif defined(NDS_NITRO)
+ os_unlock_mutex(&memtrack.mutex);
+ ret = 0;
+#endif
+
+ if (ret)
+ {
+ memtrack_log("memory_tracker_unlock_mutex: mutex unlock failed\n");
+ }
+ }
+
+ return ret;
+}
+#endif
+
+/*
+ vpx_memory_tracker_set_functions
+
+ Sets the function pointers for the standard library functions.
+
+ Return:
+ 0: on success
+ -1: if the use global function pointers is not set.
+*/
+int vpx_memory_tracker_set_functions(mem_track_malloc_func g_malloc_l
+ , mem_track_calloc_func g_calloc_l
+ , mem_track_realloc_func g_realloc_l
+ , mem_track_free_func g_free_l
+ , mem_track_memcpy_func g_memcpy_l
+ , mem_track_memset_func g_memset_l
+ , mem_track_memmove_func g_memmove_l)
+{
+#if USE_GLOBAL_FUNCTION_POINTERS
+
+ if (g_malloc_l)
+ g_malloc = g_malloc_l;
+
+ if (g_calloc_l)
+ g_calloc = g_calloc_l;
+
+ if (g_realloc_l)
+ g_realloc = g_realloc_l;
+
+ if (g_free_l)
+ g_free = g_free_l;
+
+ if (g_memcpy_l)
+ g_memcpy = g_memcpy_l;
+
+ if (g_memset_l)
+ g_memset = g_memset_l;
+
+ if (g_memmove_l)
+ g_memmove = g_memmove_l;
+
+ return 0;
+#else
+ (void)g_malloc_l;
+ (void)g_calloc_l;
+ (void)g_realloc_l;
+ (void)g_free_l;
+ (void)g_memcpy_l;
+ (void)g_memset_l;
+ (void)g_memmove_l;
+ return -1;
+#endif
+}