summaryrefslogtreecommitdiff
path: root/vp8
diff options
context:
space:
mode:
authorYunqing Wang <yunqingwang@google.com>2011-04-27 13:40:39 -0400
committerYunqing Wang <yunqingwang@google.com>2011-04-27 13:53:28 -0400
commit5abafcc38115eab70d77109d26be151205fd2172 (patch)
treefe5e26ecc731c36915476488a30addaecdece6bd /vp8
parent0da77a840b79c8037272687ba5fa8c3e01885572 (diff)
downloadlibvpx-5abafcc38115eab70d77109d26be151205fd2172.tar
libvpx-5abafcc38115eab70d77109d26be151205fd2172.tar.gz
libvpx-5abafcc38115eab70d77109d26be151205fd2172.tar.bz2
libvpx-5abafcc38115eab70d77109d26be151205fd2172.zip
Use insertion sort instead of quick sort
Insertion sort performs better for sorting small arrays. In real- time encoding (speed=-5), test on test set showed 1.7% performance gain with 0% PSNR change in average. Change-Id: Ie02eaa6fed662866a937299194c590d41b25bc3d
Diffstat (limited to 'vp8')
-rw-r--r--vp8/encoder/rdopt.c125
1 files changed, 47 insertions, 78 deletions
diff --git a/vp8/encoder/rdopt.c b/vp8/encoder/rdopt.c
index e99d6f0d1..d4294648c 100644
--- a/vp8/encoder/rdopt.c
+++ b/vp8/encoder/rdopt.c
@@ -1430,86 +1430,55 @@ static int vp8_rd_pick_best_mbsegmentation(VP8_COMP *cpi, MACROBLOCK *x,
return bsi.segment_rd;
}
-static void swap(int *x,int *y)
+static void insertsortmv(int arr[], int len)
{
- int tmp;
+ int i, j, k;
- tmp = *x;
- *x = *y;
- *y = tmp;
-}
+ for ( i = 1 ; i <= len-1 ; i++ )
+ {
+ for ( j = 0 ; j < i ; j++ )
+ {
+ if ( arr[j] > arr[i] )
+ {
+ int temp;
-static void quicksortmv(int arr[],int left, int right)
-{
- int lidx,ridx,pivot;
-
- lidx = left;
- ridx = right;
-
- if( left < right)
- {
- pivot = (left + right)/2;
-
- while(lidx <=pivot && ridx >=pivot)
- {
- while(arr[lidx] < arr[pivot] && lidx <= pivot)
- lidx++;
- while(arr[ridx] > arr[pivot] && ridx >= pivot)
- ridx--;
- swap(&arr[lidx], &arr[ridx]);
- lidx++;
- ridx--;
- if(lidx-1 == pivot)
- {
- ridx++;
- pivot = ridx;
- }
- else if(ridx+1 == pivot)
- {
- lidx--;
- pivot = lidx;
- }
- }
- quicksortmv(arr, left, pivot - 1);
- quicksortmv(arr, pivot + 1, right);
- }
+ temp = arr[i];
+
+ for ( k = i; k >j; k--)
+ arr[k] = arr[k - 1] ;
+
+ arr[j] = temp ;
+ }
+ }
+ }
}
-static void quicksortsad(int arr[],int idx[], int left, int right)
+static void insertsortsad(int arr[],int idx[], int len)
{
- int lidx,ridx,pivot;
-
- lidx = left;
- ridx = right;
-
- if( left < right)
- {
- pivot = (left + right)/2;
-
- while(lidx <=pivot && ridx >=pivot)
- {
- while(arr[lidx] < arr[pivot] && lidx <= pivot)
- lidx++;
- while(arr[ridx] > arr[pivot] && ridx >= pivot)
- ridx--;
- swap(&arr[lidx], &arr[ridx]);
- swap(&idx[lidx], &idx[ridx]);
- lidx++;
- ridx--;
- if(lidx-1 == pivot)
- {
- ridx++;
- pivot = ridx;
- }
- else if(ridx+1 == pivot)
- {
- lidx--;
- pivot = lidx;
- }
- }
- quicksortsad(arr, idx, left, pivot - 1);
- quicksortsad(arr, idx, pivot + 1, right);
- }
+ int i, j, k;
+
+ for ( i = 1 ; i <= len-1 ; i++ )
+ {
+ for ( j = 0 ; j < i ; j++ )
+ {
+ if ( arr[j] > arr[i] )
+ {
+ int temp, tempi;
+
+ temp = arr[i];
+ tempi = idx[i];
+
+ for ( k = i; k >j; k--)
+ {
+ arr[k] = arr[k - 1] ;
+ idx[k] = idx[k - 1];
+ }
+
+ arr[j] = temp ;
+ idx[j] = tempi;
+ }
+ }
+ }
}
//The improved MV prediction
@@ -1645,8 +1614,8 @@ void vp8_mv_pred
mvy[i] = near_mvs[i].as_mv.col;
}
- quicksortmv (mvx, 0, vcnt-1);
- quicksortmv (mvy, 0, vcnt-1);
+ insertsortmv(mvx, vcnt);
+ insertsortmv(mvy, vcnt);
mv.as_mv.row = mvx[vcnt/2];
mv.as_mv.col = mvy[vcnt/2];
@@ -1709,10 +1678,10 @@ void vp8_cal_sad(VP8_COMP *cpi, MACROBLOCKD *xd, MACROBLOCK *x, int recon_yoffse
if(cpi->common.last_frame_type != KEY_FRAME)
{
- quicksortsad(near_sad, near_sadidx, 0, 7);
+ insertsortsad(near_sad, near_sadidx, 8);
}else
{
- quicksortsad(near_sad, near_sadidx, 0, 2);
+ insertsortsad(near_sad, near_sadidx, 3);
}
}