00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021
00022
00030 #ifndef FFMPEG_DSPUTIL_H
00031 #define FFMPEG_DSPUTIL_H
00032
00033 #include "avcodec.h"
00034
00035
00036
00037
00038 typedef short DCTELEM;
00039 typedef int DWTELEM;
00040 typedef short IDWTELEM;
00041
00042 void fdct_ifast (DCTELEM *data);
00043 void fdct_ifast248 (DCTELEM *data);
00044 void ff_jpeg_fdct_islow (DCTELEM *data);
00045 void ff_fdct248_islow (DCTELEM *data);
00046
00047 void j_rev_dct (DCTELEM *data);
00048 void j_rev_dct4 (DCTELEM *data);
00049 void j_rev_dct2 (DCTELEM *data);
00050 void j_rev_dct1 (DCTELEM *data);
00051 void ff_wmv2_idct_c(DCTELEM *data);
00052
00053 void ff_fdct_mmx(DCTELEM *block);
00054 void ff_fdct_mmx2(DCTELEM *block);
00055 void ff_fdct_sse2(DCTELEM *block);
00056
00057 void ff_h264_idct8_add_c(uint8_t *dst, DCTELEM *block, int stride);
00058 void ff_h264_idct_add_c(uint8_t *dst, DCTELEM *block, int stride);
00059 void ff_h264_idct8_dc_add_c(uint8_t *dst, DCTELEM *block, int stride);
00060 void ff_h264_idct_dc_add_c(uint8_t *dst, DCTELEM *block, int stride);
00061 void ff_h264_lowres_idct_add_c(uint8_t *dst, int stride, DCTELEM *block);
00062 void ff_h264_lowres_idct_put_c(uint8_t *dst, int stride, DCTELEM *block);
00063
00064 void ff_vector_fmul_add_add_c(float *dst, const float *src0, const float *src1,
00065 const float *src2, int src3, int blocksize, int step);
00066 void ff_float_to_int16_c(int16_t *dst, const float *src, int len);
00067
00068
00069 extern const uint8_t ff_alternate_horizontal_scan[64];
00070 extern const uint8_t ff_alternate_vertical_scan[64];
00071 extern const uint8_t ff_zigzag_direct[64];
00072 extern const uint8_t ff_zigzag248_direct[64];
00073
00074
00075 #define MAX_NEG_CROP 1024
00076
00077
00078 extern uint32_t ff_squareTbl[512];
00079 extern uint8_t ff_cropTbl[256 + 2 * MAX_NEG_CROP];
00080
00081
00082 void ff_vp3_idct_c(DCTELEM *block);
00083 void ff_vp3_idct_put_c(uint8_t *dest, int line_size, DCTELEM *block);
00084 void ff_vp3_idct_add_c(uint8_t *dest, int line_size, DCTELEM *block);
00085
00086
00087 void ff_img_copy_plane(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
00088 void ff_shrink22(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
00089 void ff_shrink44(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
00090 void ff_shrink88(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
00091
00092 void ff_gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
00093 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height);
00094
00095
00096
00097
00098
00099
00100
00101
00102
00103
00104
00105
00106
00107
00108
00109
00110
00111
00112
00113
00114
00115
00116 typedef void (*op_pixels_func)(uint8_t *block, const uint8_t *pixels, int line_size, int h);
00117 typedef void (*tpel_mc_func)(uint8_t *block, const uint8_t *pixels, int line_size, int w, int h);
00118 typedef void (*qpel_mc_func)(uint8_t *dst, uint8_t *src, int stride);
00119 typedef void (*h264_chroma_mc_func)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x, int y);
00120 typedef void (*h264_weight_func)(uint8_t *block, int stride, int log2_denom, int weight, int offset);
00121 typedef void (*h264_biweight_func)(uint8_t *dst, uint8_t *src, int stride, int log2_denom, int weightd, int weights, int offset);
00122
00123 #define DEF_OLD_QPEL(name)\
00124 void ff_put_ ## name (uint8_t *dst, uint8_t *src, int stride);\
00125 void ff_put_no_rnd_ ## name (uint8_t *dst, uint8_t *src, int stride);\
00126 void ff_avg_ ## name (uint8_t *dst, uint8_t *src, int stride);
00127
00128 DEF_OLD_QPEL(qpel16_mc11_old_c)
00129 DEF_OLD_QPEL(qpel16_mc31_old_c)
00130 DEF_OLD_QPEL(qpel16_mc12_old_c)
00131 DEF_OLD_QPEL(qpel16_mc32_old_c)
00132 DEF_OLD_QPEL(qpel16_mc13_old_c)
00133 DEF_OLD_QPEL(qpel16_mc33_old_c)
00134 DEF_OLD_QPEL(qpel8_mc11_old_c)
00135 DEF_OLD_QPEL(qpel8_mc31_old_c)
00136 DEF_OLD_QPEL(qpel8_mc12_old_c)
00137 DEF_OLD_QPEL(qpel8_mc32_old_c)
00138 DEF_OLD_QPEL(qpel8_mc13_old_c)
00139 DEF_OLD_QPEL(qpel8_mc33_old_c)
00140
00141 #define CALL_2X_PIXELS(a, b, n)\
00142 static void a(uint8_t *block, const uint8_t *pixels, int line_size, int h){\
00143 b(block , pixels , line_size, h);\
00144 b(block+n, pixels+n, line_size, h);\
00145 }
00146
00147
00148
00149
00150 typedef int (*me_cmp_func)(void *s, uint8_t *blk1, uint8_t *blk2, int line_size, int h);
00151
00152
00153
00154 typedef struct slice_buffer_s slice_buffer;
00155
00159 typedef struct DSPContext {
00160
00161 void (*get_pixels)(DCTELEM *block, const uint8_t *pixels, int line_size);
00162 void (*diff_pixels)(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride);
00163 void (*put_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size);
00164 void (*put_signed_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size);
00165 void (*add_pixels_clamped)(const DCTELEM *block, uint8_t *pixels, int line_size);
00166 void (*add_pixels8)(uint8_t *pixels, DCTELEM *block, int line_size);
00167 void (*add_pixels4)(uint8_t *pixels, DCTELEM *block, int line_size);
00168 int (*sum_abs_dctelem)(DCTELEM *block);
00172 void (*gmc1)(uint8_t *dst, uint8_t *src, int srcStride, int h, int x16, int y16, int rounder);
00176 void (*gmc )(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy,
00177 int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height);
00178 void (*clear_blocks)(DCTELEM *blocks);
00179 int (*pix_sum)(uint8_t * pix, int line_size);
00180 int (*pix_norm1)(uint8_t * pix, int line_size);
00181
00182
00183 me_cmp_func sad[5];
00184 me_cmp_func sse[5];
00185 me_cmp_func hadamard8_diff[5];
00186 me_cmp_func dct_sad[5];
00187 me_cmp_func quant_psnr[5];
00188 me_cmp_func bit[5];
00189 me_cmp_func rd[5];
00190 me_cmp_func vsad[5];
00191 me_cmp_func vsse[5];
00192 me_cmp_func nsse[5];
00193 me_cmp_func w53[5];
00194 me_cmp_func w97[5];
00195 me_cmp_func dct_max[5];
00196 me_cmp_func dct264_sad[5];
00197
00198 me_cmp_func me_pre_cmp[5];
00199 me_cmp_func me_cmp[5];
00200 me_cmp_func me_sub_cmp[5];
00201 me_cmp_func mb_cmp[5];
00202 me_cmp_func ildct_cmp[5];
00203 me_cmp_func frame_skip_cmp[5];
00204
00205 int (*ssd_int8_vs_int16)(const int8_t *pix1, const int16_t *pix2,
00206 int size);
00207
00218 op_pixels_func put_pixels_tab[4][4];
00219
00230 op_pixels_func avg_pixels_tab[4][4];
00231
00242 op_pixels_func put_no_rnd_pixels_tab[4][4];
00243
00254 op_pixels_func avg_no_rnd_pixels_tab[4][4];
00255
00256 void (*put_no_rnd_pixels_l2[2])(uint8_t *block, const uint8_t *a, const uint8_t *b, int line_size, int h);
00257
00268 tpel_mc_func put_tpel_pixels_tab[11];
00269 tpel_mc_func avg_tpel_pixels_tab[11];
00270
00271 qpel_mc_func put_qpel_pixels_tab[2][16];
00272 qpel_mc_func avg_qpel_pixels_tab[2][16];
00273 qpel_mc_func put_no_rnd_qpel_pixels_tab[2][16];
00274 qpel_mc_func avg_no_rnd_qpel_pixels_tab[2][16];
00275 qpel_mc_func put_mspel_pixels_tab[8];
00276
00280 h264_chroma_mc_func put_h264_chroma_pixels_tab[3];
00281
00282 h264_chroma_mc_func put_no_rnd_h264_chroma_pixels_tab[3];
00283 h264_chroma_mc_func avg_h264_chroma_pixels_tab[3];
00284
00285 qpel_mc_func put_h264_qpel_pixels_tab[4][16];
00286 qpel_mc_func avg_h264_qpel_pixels_tab[4][16];
00287
00288 qpel_mc_func put_2tap_qpel_pixels_tab[4][16];
00289 qpel_mc_func avg_2tap_qpel_pixels_tab[4][16];
00290
00291 h264_weight_func weight_h264_pixels_tab[10];
00292 h264_biweight_func biweight_h264_pixels_tab[10];
00293
00294
00295 qpel_mc_func put_cavs_qpel_pixels_tab[2][16];
00296 qpel_mc_func avg_cavs_qpel_pixels_tab[2][16];
00297 void (*cavs_filter_lv)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2);
00298 void (*cavs_filter_lh)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2);
00299 void (*cavs_filter_cv)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2);
00300 void (*cavs_filter_ch)(uint8_t *pix, int stride, int alpha, int beta, int tc, int bs1, int bs2);
00301 void (*cavs_idct8_add)(uint8_t *dst, DCTELEM *block, int stride);
00302
00303 me_cmp_func pix_abs[2][4];
00304
00305
00306 void (*add_bytes)(uint8_t *dst, uint8_t *src, int w);
00307 void (*diff_bytes)(uint8_t *dst, uint8_t *src1, uint8_t *src2,int w);
00312 void (*sub_hfyu_median_prediction)(uint8_t *dst, uint8_t *src1, uint8_t *src2, int w, int *left, int *left_top);
00313 void (*bswap_buf)(uint32_t *dst, const uint32_t *src, int w);
00314
00315 void (*h264_v_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
00316 void (*h264_h_loop_filter_luma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
00317 void (*h264_v_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
00318 void (*h264_h_loop_filter_chroma)(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0);
00319 void (*h264_v_loop_filter_chroma_intra)(uint8_t *pix, int stride, int alpha, int beta);
00320 void (*h264_h_loop_filter_chroma_intra)(uint8_t *pix, int stride, int alpha, int beta);
00321
00322 void (*h264_loop_filter_strength)(int16_t bS[2][4][4], uint8_t nnz[40], int8_t ref[2][40], int16_t mv[2][40][2],
00323 int bidir, int edges, int step, int mask_mv0, int mask_mv1);
00324
00325 void (*h263_v_loop_filter)(uint8_t *src, int stride, int qscale);
00326 void (*h263_h_loop_filter)(uint8_t *src, int stride, int qscale);
00327
00328 void (*h261_loop_filter)(uint8_t *src, int stride);
00329
00330 void (*x8_v_loop_filter)(uint8_t *src, int stride, int qscale);
00331 void (*x8_h_loop_filter)(uint8_t *src, int stride, int qscale);
00332
00333
00334 void (*vorbis_inverse_coupling)(float *mag, float *ang, int blocksize);
00335
00336 void (*flac_compute_autocorr)(const int32_t *data, int len, int lag, double *autoc);
00337
00338 void (*vector_fmul)(float *dst, const float *src, int len);
00339 void (*vector_fmul_reverse)(float *dst, const float *src0, const float *src1, int len);
00340
00341 void (*vector_fmul_add_add)(float *dst, const float *src0, const float *src1, const float *src2, int src3, int len, int step);
00342
00343
00344
00345 void (*float_to_int16)(int16_t *dst, const float *src, int len);
00346
00347
00348 void (*fdct)(DCTELEM *block);
00349 void (*fdct248)(DCTELEM *block);
00350
00351
00352 void (*idct)(DCTELEM *block);
00353
00359 void (*idct_put)(uint8_t *dest, int line_size, DCTELEM *block);
00360
00365 void (*idct_add)(uint8_t *dest, int line_size, DCTELEM *block);
00366
00379 uint8_t idct_permutation[64];
00380 int idct_permutation_type;
00381 #define FF_NO_IDCT_PERM 1
00382 #define FF_LIBMPEG2_IDCT_PERM 2
00383 #define FF_SIMPLE_IDCT_PERM 3
00384 #define FF_TRANSPOSE_IDCT_PERM 4
00385 #define FF_PARTTRANS_IDCT_PERM 5
00386
00387 int (*try_8x8basis)(int16_t rem[64], int16_t weight[64], int16_t basis[64], int scale);
00388 void (*add_8x8basis)(int16_t rem[64], int16_t basis[64], int scale);
00389 #define BASIS_SHIFT 16
00390 #define RECON_SHIFT 6
00391
00392
00393 void (*h264_idct_add)(uint8_t *dst, DCTELEM *block, int stride);
00394 void (*h264_idct8_add)(uint8_t *dst, DCTELEM *block, int stride);
00395 void (*h264_idct_dc_add)(uint8_t *dst, DCTELEM *block, int stride);
00396 void (*h264_idct8_dc_add)(uint8_t *dst, DCTELEM *block, int stride);
00397 void (*h264_dct)(DCTELEM block[4][4]);
00398
00399
00400 void (*vertical_compose97i)(IDWTELEM *b0, IDWTELEM *b1, IDWTELEM *b2, IDWTELEM *b3, IDWTELEM *b4, IDWTELEM *b5, int width);
00401 void (*horizontal_compose97i)(IDWTELEM *b, int width);
00402 void (*inner_add_yblock)(const uint8_t *obmc, const int obmc_stride, uint8_t * * block, int b_w, int b_h, int src_x, int src_y, int src_stride, slice_buffer * sb, int add, uint8_t * dst8);
00403
00404 void (*prefetch)(void *mem, int stride, int h);
00405
00406 void (*shrink[4])(uint8_t *dst, int dst_wrap, const uint8_t *src, int src_wrap, int width, int height);
00407
00408
00409 void (*vc1_inv_trans_8x8)(DCTELEM *b);
00410 void (*vc1_inv_trans_8x4)(uint8_t *dest, int line_size, DCTELEM *block);
00411 void (*vc1_inv_trans_4x8)(uint8_t *dest, int line_size, DCTELEM *block);
00412 void (*vc1_inv_trans_4x4)(uint8_t *dest, int line_size, DCTELEM *block);
00413 void (*vc1_v_overlap)(uint8_t* src, int stride);
00414 void (*vc1_h_overlap)(uint8_t* src, int stride);
00415
00416
00417
00418 op_pixels_func put_vc1_mspel_pixels_tab[16];
00419
00420
00421 void (*x8_spatial_compensation[12])(uint8_t *src , uint8_t *dst, int linesize);
00422 void (*x8_setup_spatial_compensation)(uint8_t *src, uint8_t *dst, int linesize,
00423 int * range, int * sum, int edges);
00424
00425 } DSPContext;
00426
00427 void dsputil_static_init(void);
00428 void dsputil_init(DSPContext* p, AVCodecContext *avctx);
00429
00430 int ff_check_alignment(void);
00431
00436 void ff_block_permute(DCTELEM *block, uint8_t *permutation, const uint8_t *scantable, int last);
00437
00438 void ff_set_cmp(DSPContext* c, me_cmp_func *cmp, int type);
00439
00440 #define BYTE_VEC32(c) ((c)*0x01010101UL)
00441
00442 static inline uint32_t rnd_avg32(uint32_t a, uint32_t b)
00443 {
00444 return (a | b) - (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
00445 }
00446
00447 static inline uint32_t no_rnd_avg32(uint32_t a, uint32_t b)
00448 {
00449 return (a & b) + (((a ^ b) & ~BYTE_VEC32(0x01)) >> 1);
00450 }
00451
00452 static inline int get_penalty_factor(int lambda, int lambda2, int type){
00453 switch(type&0xFF){
00454 default:
00455 case FF_CMP_SAD:
00456 return lambda>>FF_LAMBDA_SHIFT;
00457 case FF_CMP_DCT:
00458 return (3*lambda)>>(FF_LAMBDA_SHIFT+1);
00459 case FF_CMP_W53:
00460 return (4*lambda)>>(FF_LAMBDA_SHIFT);
00461 case FF_CMP_W97:
00462 return (2*lambda)>>(FF_LAMBDA_SHIFT);
00463 case FF_CMP_SATD:
00464 case FF_CMP_DCT264:
00465 return (2*lambda)>>FF_LAMBDA_SHIFT;
00466 case FF_CMP_RD:
00467 case FF_CMP_PSNR:
00468 case FF_CMP_SSE:
00469 case FF_CMP_NSSE:
00470 return lambda2>>FF_LAMBDA_SHIFT;
00471 case FF_CMP_BIT:
00472 return 1;
00473 }
00474 }
00475
00481 #define emms_c()
00482
00483
00484
00485 int mm_support(void);
00486
00487 void dsputil_init_alpha(DSPContext* c, AVCodecContext *avctx);
00488 void dsputil_init_armv4l(DSPContext* c, AVCodecContext *avctx);
00489 void dsputil_init_bfin(DSPContext* c, AVCodecContext *avctx);
00490 void dsputil_init_mlib(DSPContext* c, AVCodecContext *avctx);
00491 void dsputil_init_mmi(DSPContext* c, AVCodecContext *avctx);
00492 void dsputil_init_mmx(DSPContext* c, AVCodecContext *avctx);
00493 void dsputil_init_ppc(DSPContext* c, AVCodecContext *avctx);
00494 void dsputil_init_sh4(DSPContext* c, AVCodecContext *avctx);
00495 void dsputil_init_vis(DSPContext* c, AVCodecContext *avctx);
00496
00497 #define DECLARE_ALIGNED_16(t, v) DECLARE_ALIGNED(16, t, v)
00498
00499 #if defined(HAVE_MMX)
00500
00501 #undef emms_c
00502
00503 #define MM_MMX 0x0001
00504 #define MM_3DNOW 0x0004
00505 #define MM_MMXEXT 0x0002
00506 #define MM_SSE 0x0008
00507 #define MM_SSE2 0x0010
00508 #define MM_3DNOWEXT 0x0020
00509 #define MM_SSE3 0x0040
00510 #define MM_SSSE3 0x0080
00511
00512 extern int mm_flags;
00513
00514 void add_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size);
00515 void put_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size);
00516 void put_signed_pixels_clamped_mmx(const DCTELEM *block, uint8_t *pixels, int line_size);
00517
00518 static inline void emms(void)
00519 {
00520 __asm __volatile ("emms;":::"memory");
00521 }
00522
00523
00524 #define emms_c() \
00525 {\
00526 if (mm_flags & MM_MMX)\
00527 emms();\
00528 }
00529
00530 #define DECLARE_ALIGNED_8(t, v) DECLARE_ALIGNED(8, t, v)
00531
00532 #define STRIDE_ALIGN 8
00533
00534 void dsputil_init_pix_mmx(DSPContext* c, AVCodecContext *avctx);
00535
00536 #elif defined(ARCH_ARMV4L)
00537
00538
00539
00540 #define DECLARE_ALIGNED_8(t, v) DECLARE_ALIGNED(4, t, v)
00541 #define STRIDE_ALIGN 4
00542
00543 #define MM_IWMMXT 0x0100
00544
00545 extern int mm_flags;
00546
00547 #elif defined(HAVE_MLIB)
00548
00549
00550 #define DECLARE_ALIGNED_8(t, v) DECLARE_ALIGNED(8, t, v)
00551 #define STRIDE_ALIGN 8
00552
00553 #elif defined(HAVE_VIS)
00554
00555
00556 #define DECLARE_ALIGNED_8(t, v) DECLARE_ALIGNED(8, t, v)
00557 #define STRIDE_ALIGN 8
00558
00559 #elif defined(ARCH_ALPHA)
00560
00561 #define DECLARE_ALIGNED_8(t, v) DECLARE_ALIGNED(8, t, v)
00562 #define STRIDE_ALIGN 8
00563
00564 #elif defined(ARCH_POWERPC)
00565
00566 #define MM_ALTIVEC 0x0001
00567
00568 extern int mm_flags;
00569
00570 #define DECLARE_ALIGNED_8(t, v) DECLARE_ALIGNED(16, t, v)
00571 #define STRIDE_ALIGN 16
00572
00573 #elif defined(HAVE_MMI)
00574
00575 #define DECLARE_ALIGNED_8(t, v) DECLARE_ALIGNED(16, t, v)
00576 #define STRIDE_ALIGN 16
00577
00578 #elif defined(ARCH_SH4)
00579
00580 #define DECLARE_ALIGNED_8(t, v) DECLARE_ALIGNED(8, t, v)
00581 #define STRIDE_ALIGN 8
00582
00583 #elif defined(ARCH_BFIN)
00584
00585 #define DECLARE_ALIGNED_8(t, v) DECLARE_ALIGNED(8, t, v)
00586 #define STRIDE_ALIGN 8
00587
00588 #else
00589
00590 #define DECLARE_ALIGNED_8(t, v) DECLARE_ALIGNED(8, t, v)
00591 #define STRIDE_ALIGN 8
00592
00593 #endif
00594
00595
00596 void get_psnr(uint8_t *orig_image[3], uint8_t *coded_image[3],
00597 int orig_linesize[3], int coded_linesize,
00598 AVCodecContext *avctx);
00599
00600
00601
00602
00603
00604 typedef float FFTSample;
00605
00606 struct MDCTContext;
00607
00608 typedef struct FFTComplex {
00609 FFTSample re, im;
00610 } FFTComplex;
00611
00612 typedef struct FFTContext {
00613 int nbits;
00614 int inverse;
00615 uint16_t *revtab;
00616 FFTComplex *exptab;
00617 FFTComplex *exptab1;
00618 void (*fft_calc)(struct FFTContext *s, FFTComplex *z);
00619 void (*imdct_calc)(struct MDCTContext *s, FFTSample *output,
00620 const FFTSample *input, FFTSample *tmp);
00621 } FFTContext;
00622
00623 int ff_fft_init(FFTContext *s, int nbits, int inverse);
00624 void ff_fft_permute(FFTContext *s, FFTComplex *z);
00625 void ff_fft_calc_c(FFTContext *s, FFTComplex *z);
00626 void ff_fft_calc_sse(FFTContext *s, FFTComplex *z);
00627 void ff_fft_calc_3dn(FFTContext *s, FFTComplex *z);
00628 void ff_fft_calc_3dn2(FFTContext *s, FFTComplex *z);
00629 void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z);
00630
00631 static inline void ff_fft_calc(FFTContext *s, FFTComplex *z)
00632 {
00633 s->fft_calc(s, z);
00634 }
00635 void ff_fft_end(FFTContext *s);
00636
00637
00638
00639 typedef struct MDCTContext {
00640 int n;
00641 int nbits;
00642
00643 FFTSample *tcos;
00644 FFTSample *tsin;
00645 FFTContext fft;
00646 } MDCTContext;
00647
00654 void ff_kbd_window_init(float *window, float alpha, int n);
00655
00656 int ff_mdct_init(MDCTContext *s, int nbits, int inverse);
00657 void ff_imdct_calc(MDCTContext *s, FFTSample *output,
00658 const FFTSample *input, FFTSample *tmp);
00659 void ff_imdct_calc_3dn2(MDCTContext *s, FFTSample *output,
00660 const FFTSample *input, FFTSample *tmp);
00661 void ff_imdct_calc_sse(MDCTContext *s, FFTSample *output,
00662 const FFTSample *input, FFTSample *tmp);
00663 void ff_mdct_calc(MDCTContext *s, FFTSample *out,
00664 const FFTSample *input, FFTSample *tmp);
00665 void ff_mdct_end(MDCTContext *s);
00666
00667 #define WRAPPER8_16(name8, name16)\
00668 static int name16(void *s, uint8_t *dst, uint8_t *src, int stride, int h){\
00669 return name8(s, dst , src , stride, h)\
00670 +name8(s, dst+8 , src+8 , stride, h);\
00671 }
00672
00673 #define WRAPPER8_16_SQ(name8, name16)\
00674 static int name16(void *s, uint8_t *dst, uint8_t *src, int stride, int h){\
00675 int score=0;\
00676 score +=name8(s, dst , src , stride, 8);\
00677 score +=name8(s, dst+8 , src+8 , stride, 8);\
00678 if(h==16){\
00679 dst += 8*stride;\
00680 src += 8*stride;\
00681 score +=name8(s, dst , src , stride, 8);\
00682 score +=name8(s, dst+8 , src+8 , stride, 8);\
00683 }\
00684 return score;\
00685 }
00686
00687
00688 static inline void copy_block2(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
00689 {
00690 int i;
00691 for(i=0; i<h; i++)
00692 {
00693 AV_WN16(dst , AV_RN16(src ));
00694 dst+=dstStride;
00695 src+=srcStride;
00696 }
00697 }
00698
00699 static inline void copy_block4(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
00700 {
00701 int i;
00702 for(i=0; i<h; i++)
00703 {
00704 AV_WN32(dst , AV_RN32(src ));
00705 dst+=dstStride;
00706 src+=srcStride;
00707 }
00708 }
00709
00710 static inline void copy_block8(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
00711 {
00712 int i;
00713 for(i=0; i<h; i++)
00714 {
00715 AV_WN32(dst , AV_RN32(src ));
00716 AV_WN32(dst+4 , AV_RN32(src+4 ));
00717 dst+=dstStride;
00718 src+=srcStride;
00719 }
00720 }
00721
00722 static inline void copy_block9(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
00723 {
00724 int i;
00725 for(i=0; i<h; i++)
00726 {
00727 AV_WN32(dst , AV_RN32(src ));
00728 AV_WN32(dst+4 , AV_RN32(src+4 ));
00729 dst[8]= src[8];
00730 dst+=dstStride;
00731 src+=srcStride;
00732 }
00733 }
00734
00735 static inline void copy_block16(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
00736 {
00737 int i;
00738 for(i=0; i<h; i++)
00739 {
00740 AV_WN32(dst , AV_RN32(src ));
00741 AV_WN32(dst+4 , AV_RN32(src+4 ));
00742 AV_WN32(dst+8 , AV_RN32(src+8 ));
00743 AV_WN32(dst+12, AV_RN32(src+12));
00744 dst+=dstStride;
00745 src+=srcStride;
00746 }
00747 }
00748
00749 static inline void copy_block17(uint8_t *dst, uint8_t *src, int dstStride, int srcStride, int h)
00750 {
00751 int i;
00752 for(i=0; i<h; i++)
00753 {
00754 AV_WN32(dst , AV_RN32(src ));
00755 AV_WN32(dst+4 , AV_RN32(src+4 ));
00756 AV_WN32(dst+8 , AV_RN32(src+8 ));
00757 AV_WN32(dst+12, AV_RN32(src+12));
00758 dst[16]= src[16];
00759 dst+=dstStride;
00760 src+=srcStride;
00761 }
00762 }
00763
00764 #endif