yorick-mpeg-0.1/0000755000076500001440000000000011254317044013166 5ustar frigautusersyorick-mpeg-0.1/config.h0000644000076500001440000000064311254260253014606 0ustar frigautusers/* if the code is really written in portable ANSI C, * this file is unnecessary */ /* include inttypes.h if not present, should get rid of this */ #undef EMULATE_INTTYPES #undef EMULATE_FAST_INT /* used in common.h to define unaligned32, almost certainly should eliminate */ #define ARCH_X86 1 /* used in mem.c, almost certainly should eliminate */ #undef HAVE_OSX #define HAVE_MALLOC_H 1 #define HAVE_MEMALIGN 1 yorick-mpeg-0.1/CVS/0000755000076500001440000000000010741674566013640 5ustar frigautusersyorick-mpeg-0.1/CVS/Entries0000644000076500001440000000042711254260253015157 0ustar frigautusers/Makefile/1.1.1.1/Sat Oct 27 22:18:21 2007// /README/1.1.1.1/Sat Oct 27 22:18:21 2007// /mpeg.i/1.1.1.1/Sat Oct 27 22:18:21 2007// /mpgtest.i/1.1.1.1/Sat Oct 27 22:18:21 2007// /ympeg.c/1.1.1.1/Sat Oct 27 22:18:21 2007// D/libavcodec//// /config.h/1.4/Fri Jan 11 14:12:11 2008// yorick-mpeg-0.1/CVS/Repository0000644000076500001440000000001411254260253015715 0ustar frigautusersyorick-mpeg yorick-mpeg-0.1/CVS/Root0000644000076500001440000000007011254260253014463 0ustar frigautusers:ext:frigaut@yorick.cvs.sourceforge.net:/cvsroot/yorick yorick-mpeg-0.1/libavcodec/0000755000076500001440000000000011254317044015261 5ustar frigautusersyorick-mpeg-0.1/libavcodec/avcodec.h0000644000076500001440000004637511254260253017054 0ustar frigautusers#ifndef AVCODEC_H #define AVCODEC_H /** * @file avcodec.h * external api header. */ #ifdef __cplusplus extern "C" { #endif #include "common.h" #include /* size_t */ #define FFMPEG_VERSION_INT 0x000409 #define FFMPEG_VERSION "0.4.9-pre1" #define LIBAVCODEC_VERSION_INT FFMPEG_VERSION_INT #define LIBAVCODEC_VERSION FFMPEG_VERSION #define AV_NOPTS_VALUE int64_t_C(0x8000000000000000) #define AV_TIME_BASE 1000000 typedef struct AVRational{ int num; int den; } AVRational; #define av_q2d(a) ((double)(a).num/(double)(a).den) enum CodecID { CODEC_ID_NONE, CODEC_ID_MPEG1VIDEO }; enum CodecType { CODEC_TYPE_UNKNOWN = -1, CODEC_TYPE_VIDEO }; /** * Pixel format. Notes: * * PIX_FMT_RGBA32 is handled in an endian-specific manner. A RGBA * color is put together as: * (A << 24) | (R << 16) | (G << 8) | B * This is stored as BGRA on little endian CPU architectures and ARGB on * big endian CPUs. * * When the pixel format is palettized RGB (PIX_FMT_PAL8), the palettized * image data is stored in AVFrame.data[0]. The palette is transported in * AVFrame.data[1] and, is 1024 bytes long (256 4-byte entries) and is * formatted the same as in PIX_FMT_RGBA32 described above (i.e., it is * also endian-specific). Note also that the individual RGB palette * components stored in AVFrame.data[1] should be in the range 0..255. * This is important as many custom PAL8 video codecs that were designed * to run on the IBM VGA graphics adapter use 6-bit palette components. */ enum PixelFormat { PIX_FMT_YUV420P, PIX_FMT_YUV422, /* unused, but keeps numbering same as ffmpeg */ PIX_FMT_RGB24, PIX_FMT_NB }; /* motion estimation type, EPZS by default */ enum Motion_Est_ID { ME_ZERO = 1, ME_FULL, ME_LOG, ME_PHODS, ME_EPZS, ME_X1 }; #define FF_MAX_B_FRAMES 8 /** * Codec uses get_buffer() for allocating buffers. * direct rendering method 1 */ /** codec has a non zero delay and needs to be feeded with NULL at the end to get the delayed data */ #define CODEC_CAP_DELAY 0x0020 /* the following defines might change, so dont expect compatibility if u use them */ #define MB_TYPE_INTRA4x4 0x0001 #define FF_COMMON_FRAME \ /**\ * pointer to the picture planes.\ * this might be different from the first allocated byte\ * - encoding: \ * - decoding: \ */\ uint8_t *data[4];\ int linesize[4];\ /**\ * pointer to the first allocated byte of the picture. can be used in get_buffer/release_buffer\ * this isnt used by lavc unless the default get/release_buffer() is used\ * - encoding: \ * - decoding: \ */\ uint8_t *base[4];\ /**\ * 1 -> keyframe, 0-> not\ * - encoding: set by lavc\ * - decoding: set by lavc\ */\ int key_frame;\ \ /**\ * picture type of the frame, see ?_TYPE below.\ * - encoding: set by lavc for coded_picture (and set by user for input)\ * - decoding: set by lavc\ */\ int pict_type;\ \ /**\ * presentation timestamp in AV_TIME_BASE (=micro seconds currently) (time when frame should be shown to user)\ * if AV_NOPTS_VALUE then the frame_rate will be used as reference\ * - encoding: MUST be set by user\ * - decoding: set by lavc\ */\ int64_t pts;\ \ /**\ * picture number in bitstream order.\ * - encoding: set by\ * - decoding: set by lavc\ */\ int coded_picture_number;\ /**\ * picture number in display order.\ * - encoding: set by\ * - decoding: set by lavc\ */\ int display_picture_number;\ \ /**\ * quality (between 1 (good) and FF_LAMBDA_MAX (bad)) \ * - encoding: set by lavc for coded_picture (and set by user for input)\ * - decoding: set by lavc\ */\ int quality; \ \ /**\ * buffer age (1->was last buffer and dint change, 2->..., ...).\ * set to INT_MAX if the buffer has not been used yet \ * - encoding: unused\ * - decoding: MUST be set by get_buffer()\ */\ int age;\ \ /**\ * is this picture used as reference\ * - encoding: unused\ * - decoding: set by lavc (before get_buffer() call))\ */\ int reference;\ \ /**\ * Motion vector table\ * - encoding: set by user\ * - decoding: set by lavc\ */\ int16_t (*motion_val[2])[2];\ \ /**\ * Macroblock type table\ * mb_type_base + mb_width + 2\ * - encoding: set by user\ * - decoding: set by lavc\ */\ uint32_t *mb_type;\ \ /**\ * type of the buffer (to keep track of who has to dealloc data[*])\ * - encoding: set by the one who allocs it\ * - decoding: set by the one who allocs it\ * Note: user allocated (direct rendering) & internal buffers can not coexist currently\ */\ int type;\ \ /**\ * \ */\ int qscale_type;\ \ /**\ * Motion referece frame index\ * - encoding: set by user\ * - decoding: set by lavc\ */\ int8_t *ref_index[2]; #define FF_BUFFER_TYPE_INTERNAL 1 #define FF_BUFFER_TYPE_USER 2 /*/< Direct rendering buffers (image is (de)allocated by user)*/ #define FF_BUFFER_TYPE_SHARED 4 /*/< buffer from somewher else, dont dealloc image (data/base), all other tables are not shared*/ #define FF_BUFFER_TYPE_COPY 8 /*/< just a (modified) copy of some other buffer, dont dealloc anything*/ #define FF_I_TYPE 1 /* Intra*/ #define FF_P_TYPE 2 /* Predicted*/ #define FF_B_TYPE 3 /* Bi-dir predicted*/ /** * Audio Video Frame. */ typedef struct AVFrame { FF_COMMON_FRAME } AVFrame; #define DEFAULT_FRAME_RATE_BASE 1001000 /** * Used by av_log */ typedef struct AVCLASS AVClass; struct AVCLASS { const char* class_name; const char* (*item_name)(void*); /* actually passing a pointer to an AVCodecContext or AVFormatContext, which begin with an AVClass. Needed because av_log is in libavcodec and has no visibility of AVIn/OutputFormat */ }; /** * main external api structure. */ typedef struct AVCodecContext { /** * Info on struct for av_log * - set by avcodec_alloc_context */ AVClass *av_class; /** * the average bitrate. * - encoding: set by user. unused for constant quantizer encoding * - decoding: set by lavc. 0 or some bitrate if this info is available in the stream */ int bit_rate; /** * number of bits the bitstream is allowed to diverge from the reference. * the reference can be CBR (for CBR pass1) or VBR (for pass2) * - encoding: set by user. unused for constant quantizer encoding * - decoding: unused */ int bit_rate_tolerance; /** * CODEC_FLAG_*. * - encoding: set by user. * - decoding: set by user. */ int flags; /* video only */ /** * frames per sec multiplied by frame_rate_base. * for variable fps this is the precission, so if the timestamps * can be specified in msec precssion then this is 1000*frame_rate_base * - encoding: MUST be set by user * - decoding: set by lavc. 0 or the frame_rate if available */ int frame_rate; /** * width / height. * - encoding: MUST be set by user. * - decoding: set by user if known, codec should override / dynamically change if needed */ int width, height; #define FF_ASPECT_EXTENDED 15 /** * the number of pictures in a group of pitures, or 0 for intra_only. * - encoding: set by user. * - decoding: unused */ int gop_size; /** * pixel format, see PIX_FMT_xxx. * - encoding: FIXME: used by ffmpeg to decide whether an pix_fmt * conversion is in order. This only works for * codecs with one supported pix_fmt, we should * do something for a generic case as well. * - decoding: set by lavc. */ enum PixelFormat pix_fmt; /* the following data should not be initialized */ int frame_size; /*/< in samples, initialized when calling 'init' */ int frame_number; /*/< audio or video frame number */ int real_pict_num; /*/< returns the real picture number of previous encoded frame */ /** * number of frames the decoded output will be delayed relative to * the encoded input. * - encoding: set by lavc. * - decoding: unused */ int delay; /** * minimum quantizer. * - encoding: set by user. * - decoding: unused */ int qmin; /** * maximum quantizer. * - encoding: set by user. * - decoding: unused */ int qmax; /** * maximum quantizer difference etween frames. * - encoding: set by user. * - decoding: unused */ int max_qdiff; /** * maximum number of b frames between non b frames. * note: the output will be delayed by max_b_frames+1 relative to the input * - encoding: set by user. * - decoding: unused */ int max_b_frames; /** * qscale factor between ip and b frames. * - encoding: set by user. * - decoding: unused */ float b_quant_factor; /** obsolete FIXME remove */ int b_frame_strategy; struct AVCodec *codec; void *priv_data; /* statistics, used for 2-pass encoding */ int mv_bits; int header_bits; int i_tex_bits; int p_tex_bits; int i_count; int p_count; int skip_count; int misc_bits; /** * number of bits used for the previously encoded frame. * - encoding: set by lavc * - decoding: unused */ int frame_bits; char codec_name[32]; enum CodecType codec_type; /* see CODEC_TYPE_xxx */ enum CodecID codec_id; /* see CODEC_ID_xxx */ /** * qscale offset between ip and b frames. * if > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset) * if < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset) * - encoding: set by user. * - decoding: unused */ float b_quant_offset; /** * called at the beginning of each frame to get a buffer for it. * if pic.reference is set then the frame will be read later by lavc * avcodec_align_dimensions() should be used to find the required width and * height, as they normally need to be rounded up to the next multiple of 16 * - encoding: unused * - decoding: set by lavc, user can override */ int (*get_buffer)(struct AVCodecContext *c, AVFrame *pic); /** * called to release buffers which where allocated with get_buffer. * a released buffer can be reused in get_buffer() * pic.data[*] must be set to NULL * - encoding: unused * - decoding: set by lavc, user can override */ void (*release_buffer)(struct AVCodecContext *c, AVFrame *pic); /** * maximum bitrate. * - encoding: set by user. * - decoding: unused */ int rc_max_rate; /** * minimum bitrate. * - encoding: set by user. * - decoding: unused */ int rc_min_rate; /** * decoder bitstream buffer size. * - encoding: set by user. * - decoding: unused */ int rc_buffer_size; float rc_buffer_aggressivity; /** * qscale factor between p and i frames. * if > 0 then the last p frame quantizer will be used (q= lastp_q*factor+offset) * if < 0 then normal ratecontrol will be done (q= -normal_q*factor+offset) * - encoding: set by user. * - decoding: unused */ float i_quant_factor; /** * qscale offset between p and i frames. * - encoding: set by user. * - decoding: unused */ float i_quant_offset; /** * initial complexity for pass1 ratecontrol. * - encoding: set by user. * - decoding: unused */ float rc_initial_cplx; /** * dct algorithm, see FF_DCT_* below. * - encoding: set by user * - decoding: unused */ int dct_algo; #define FF_DCT_AUTO 0 #define FF_DCT_FASTINT 1 #define FF_DCT_INT 2 #define FF_DCT_MMX 3 #define FF_DCT_MLIB 4 #define FF_DCT_ALTIVEC 5 #define FF_DCT_FAAN 6 /** * slice count. * - encoding: set by lavc * - decoding: set by user (or 0) */ int slice_count; /** * slice offsets in the frame in bytes. * - encoding: set/allocated by lavc * - decoding: set/allocated by user (or NULL) */ int *slice_offset; /** * bits per sample/pixel from the demuxer (needed for huffyuv). * - encoding: set by lavc * - decoding: set by user */ int bits_per_sample; /** * sample aspect ratio (0 if unknown). * numerator and denominator must be relative prime and smaller then 256 for some video standards * - encoding: set by user. * - decoding: set by lavc. */ AVRational sample_aspect_ratio; /** * the picture in the bitstream. * - encoding: set by lavc * - decoding: set by lavc */ AVFrame *coded_frame; #define FF_CMP_SAD 0 #define FF_CMP_VSAD 8 /** * frame_rate_base. * for variable fps this is 1 * - encoding: set by user. * - decoding: set by lavc. * @todo move this after frame_rate */ int frame_rate_base; /** * intra quantizer bias. * - encoding: set by user. * - decoding: unused */ int intra_quant_bias; #define FF_DEFAULT_QUANT_BIAS 999999 /** * inter quantizer bias. * - encoding: set by user. * - decoding: unused */ int inter_quant_bias; /** * internal_buffer count. * Dont touch, used by lavc default_get_buffer() */ int internal_buffer_count; /** * internal_buffers. * Dont touch, used by lavc default_get_buffer() */ void *internal_buffer; #define FF_LAMBDA_SHIFT 7 #define FF_LAMBDA_SCALE (1< #define AV_LOG_QUIET -1 #define AV_LOG_ERROR 0 #define AV_LOG_INFO 1 #define AV_LOG_DEBUG 2 #ifdef __GNUC__ extern void av_log(void*, int level, const char *fmt, ...) __attribute__ ((__format__ (__printf__, 3, 4))); #else extern void av_log(void*, int level, const char *fmt, ...); #endif extern void av_vlog(void*, int level, const char *fmt, va_list); extern int av_log_get_level(void); extern void av_log_set_level(int); extern void av_log_set_callback(void (*)(void*, int, const char*, va_list)); /* endian macros */ #if !defined(BE_16) || !defined(BE_32) || !defined(LE_16) || !defined(LE_32) #define BE_16(x) ((((uint8_t*)(x))[0] << 8) | ((uint8_t*)(x))[1]) #define BE_32(x) ((((uint8_t*)(x))[0] << 24) | \ (((uint8_t*)(x))[1] << 16) | \ (((uint8_t*)(x))[2] << 8) | \ ((uint8_t*)(x))[3]) #define LE_16(x) ((((uint8_t*)(x))[1] << 8) | ((uint8_t*)(x))[0]) #define LE_32(x) ((((uint8_t*)(x))[3] << 24) | \ (((uint8_t*)(x))[2] << 16) | \ (((uint8_t*)(x))[1] << 8) | \ ((uint8_t*)(x))[0]) #endif #ifdef __cplusplus } #endif #endif /* AVCODEC_H */ yorick-mpeg-0.1/libavcodec/bswap.h0000644000076500001440000000124711254260253016551 0ustar frigautusers/** * @file bswap.h * byte swap. */ #ifndef __BSWAP_H__ #define __BSWAP_H__ #ifdef HAVE_BYTESWAP_H #include #else #define bswap_16(x) (((x) & 0x00ff) << 8 | ((x) & 0xff00) >> 8) /* code from bits/byteswap.h (C) 1997, 1998 Free Software Foundation, Inc.*/ #define bswap_32(x) \ ((((x) & 0xff000000) >> 24) | (((x) & 0x00ff0000) >> 8) | \ (((x) & 0x0000ff00) << 8) | (((x) & 0x000000ff) << 24)) #endif /* be2me ... BigEndian to MachineEndian*/ /* le2me ... LittleEndian to MachineEndian*/ #ifdef WORDS_BIGENDIAN #define be2me_16(x) (x) #define be2me_32(x) (x) #else #define be2me_16(x) bswap_16(x) #define be2me_32(x) bswap_32(x) #endif #endif yorick-mpeg-0.1/libavcodec/common.c0000644000076500001440000000664611254260253016730 0ustar frigautusers/* * Common bit i/o utils * Copyright (c) 2000, 2001 Fabrice Bellard. * Copyright (c) 2002-2004 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * alternative bitstream reader & writer by Michael Niedermayer */ /** * @file common.c * common internal api. */ #include "avcodec.h" const uint8_t ff_sqrt_tab[128]={ 0, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4, 4, 4, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 9, 10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,10,11,11,11,11, 11,11,11 }; const uint8_t ff_log2_tab[256]={ 0,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4, 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7 }; void align_put_bits(PutBitContext *s) { #ifdef ALT_BITSTREAM_WRITER put_bits(s,( - s->index) & 7,0); #else put_bits(s,s->bit_left & 7,0); #endif } int64_t ff_gcd(int64_t a, int64_t b){ if(b) return ff_gcd(b, a%b); else return a; } #ifndef ALT_BITSTREAM_WRITER void put_bits(PutBitContext *s, int n, unsigned int value) { unsigned int bit_buf; int bit_left; assert(n == 32 || value < (1U << n)); bit_buf = s->bit_buf; bit_left = s->bit_left; /* XXX: optimize */ if (n < bit_left) { bit_buf = (bit_buf<> (n - bit_left); *(uint32_t *)s->buf_ptr = be2me_32(bit_buf); /*printf("bitbuf = %08x\n", bit_buf);*/ s->buf_ptr+=4; bit_left+=32 - n; bit_buf = value; } s->bit_buf = bit_buf; s->bit_left = bit_left; } #else void put_bits(PutBitContext *s, int n, unsigned int value) { # ifdef ALIGNED_BITSTREAM_WRITER int index= s->index; uint32_t *ptr= ((uint32_t *)s->buf)+(index>>5); value<<= 32-n; ptr[0] |= be2me_32(value>>(index&31)); ptr[1] = be2me_32(value<<(32-(index&31))); index+= n; s->index= index; # else /*ALIGNED_BITSTREAM_WRITER*/ int index= s->index; uint32_t *ptr= (uint32_t*)(((uint8_t *)s->buf)+(index>>3)); ptr[0] |= be2me_32(value<<(32-n-(index&7) )); ptr[1] = 0; index+= n; s->index= index; # endif /*!ALIGNED_BITSTREAM_WRITER*/ } #endif yorick-mpeg-0.1/libavcodec/common.h0000644000076500001440000001417511254260253016731 0ustar frigautusers/** * @file common.h * common internal api header. */ #ifndef COMMON_H #define COMMON_H #if defined(WIN32) && !defined(__MINGW32__) && !defined(__CYGWIN__) # define CONFIG_WIN32 #endif /*#define ALT_BITSTREAM_WRITER*/ /*#define ALIGNED_BITSTREAM_WRITER*/ #ifdef HAVE_AV_CONFIG_H /* only include the following when compiling package */ # include "config.h" # include # include # include # include # include # ifndef __BEOS__ # include # else # include "berrno.h" # endif # include # include #endif /* HAVE_AV_CONFIG_H */ #ifndef M_PI #define M_PI 3.14159265358979323846 #endif #ifndef EMULATE_INTTYPES # include #else typedef signed char int8_t; typedef signed short int16_t; typedef signed int int32_t; typedef unsigned char uint8_t; typedef unsigned short uint16_t; typedef unsigned int uint32_t; # ifdef CONFIG_WIN32 typedef signed __int64 int64_t; typedef unsigned __int64 uint64_t; # else /* other OS */ typedef signed long long int64_t; typedef unsigned long long uint64_t; # endif /* other OS */ #endif /* HAVE_INTTYPES_H */ #ifdef EMULATE_FAST_INT /* note that we don't emulate 64bit ints */ typedef signed char int_fast8_t; typedef signed int int_fast16_t; typedef signed int int_fast32_t; typedef unsigned char uint_fast8_t; typedef unsigned int uint_fast16_t; typedef unsigned int uint_fast32_t; #endif #ifndef INT_BIT # if INT_MAX != 2147483647 # define INT_BIT 64 # else # define INT_BIT 32 # endif #endif #ifdef CONFIG_WIN32 /* windows */ # if !defined(__MINGW32__) && !defined(__CYGWIN__) # define int64_t_C(c) (c ## i64) # define uint64_t_C(c) (c ## i64) # ifdef HAVE_AV_CONFIG_H # define inline __inline # endif # else # define int64_t_C(c) (c ## LL) # define uint64_t_C(c) (c ## ULL) # endif /* __MINGW32__ */ # ifdef HAVE_AV_CONFIG_H # ifdef _DEBUG # define DEBUG # endif # define snprintf _snprintf # define vsnprintf _vsnprintf # endif #elif defined (CONFIG_OS2) /* OS/2 EMX */ # ifndef int64_t_C # define int64_t_C(c) (c ## LL) # define uint64_t_C(c) (c ## ULL) # endif # ifdef HAVE_AV_CONFIG_H # include # endif /* HAVE_AV_CONFIG_H */ #else /* unix */ # ifndef int64_t_C # if LONG_MAX > 2147483647 # define int64_t_C(c) (c ## L) # define uint64_t_C(c) (c ## UL) # else # define int64_t_C(c) (c ## LL) # define uint64_t_C(c) (c ## ULL) # endif # endif # ifdef HAVE_AV_CONFIG_H # ifdef USE_FASTMEMCPY # include "fastmemcpy.h" # endif # endif /* HAVE_AV_CONFIG_H */ #endif /* !CONFIG_WIN32 && !CONFIG_OS2 */ #ifndef INT64_MAX # define INT64_MAX int64_t_C(0x7FFFFFFFFFFFFFFF) #endif #ifdef HAVE_AV_CONFIG_H /* this ifdef extends to bottom of file */ #include "bswap.h" #ifndef DEBUG # define NDEBUG #endif #include /* assume b>0 */ #define ROUNDED_DIV(a,b) (((a)>0 ? (a) + ((b)>>1) : (a) - ((b)>>1))/(b)) #define ABS(a) ((a) >= 0 ? (a) : (-(a))) #define FFMAX(a,b) ((a) > (b) ? (a) : (b)) #define FFMIN(a,b) ((a) > (b) ? (b) : (a)) #define NEG_SSR32(a,s) ((( int32_t)(a))>>(32-(s))) #define NEG_USR32(a,s) (((uint32_t)(a))>>(32-(s))) /* bit output */ /* buf and buf_end must be present and used by every alternative writer. */ typedef struct PutBitContext { #ifdef ALT_BITSTREAM_WRITER uint8_t *buf, *buf_end; int index; #else uint32_t bit_buf; int bit_left; uint8_t *buf, *buf_ptr, *buf_end; #endif } PutBitContext; /* return the number of bits output */ #ifdef ALT_BITSTREAM_WRITER # define put_bits_count(s) ((s)->index) #else # define put_bits_count(s) (((s)->buf_ptr-(s)->buf)*8 + 32 - (s)->bit_left) #endif void align_put_bits(PutBitContext *s); /* used to avoid missaligned exceptions on some archs (alpha, ...) */ #ifdef ARCH_X86 /*#if 1*/ # define unaligned32(a) (*(uint32_t*)(a)) #else # ifdef __GNUC__ static inline uint32_t unaligned32(const void *v) { struct Unaligned { uint32_t i; } __attribute__((packed)); return ((const struct Unaligned *) v)->i; } # elif defined(__DECC) # define unaligned32(a) (*(__unaligned uint32_t*)(a)) # else # define unaligned32(a) (*(uint32_t*)(a)) # endif #endif /*!ARCH_X86*/ extern void put_bits(PutBitContext *s, int n, unsigned int value); #ifdef ALT_BITSTREAM_WRITER # define pbBufPtr(s) ((s)->buf + ((s)->index>>3)) #else # define pbBufPtr(s) ((s)->buf_ptr) #endif /** * * PutBitContext must be flushed & aligned to a byte boundary before calling this. */ #ifdef ALT_BITSTREAM_WRITER /* assert((put_bits_count(s)&7)==0); */ # define skip_put_bytes(s,n) ((s)->index += (n)<<3), \ FIXME may need some cleaning of the buffer #else /* assert((put_bits_count(s)&7)==0); */ /* assert(s->bit_left==32); */ # define skip_put_bytes(s,n) ((s)->buf_ptr += (n)) #endif /* misc math functions */ extern const uint8_t ff_log2_tab[256]; #define av_log2_16bit(v) (((v)&0xff00)? 8+ff_log2_tab[(unsigned int)(v)>>8] :\ ff_log2_tab[v]) #define av_log2(v) (((v)&0xffff0000)? \ 16+av_log2_16bit((unsigned int)(v)>>16) : av_log2_16bit(v)) /* median of 3 */ #define mid_pred(a,b,c) \ (((a)>(b))? (((c)>(b))? (((c)>(a))?(a):(c)) : (b)) :\ (((b)>(c))? (((c)>(a))? (c):(a)) : (b))) #define clip(a,amin,amax) (((int)(a)<(int)(amin))?(int)(amin):\ (((int)(a)>(int)(amax))?(int)(amax):(int)(a))) /* math */ extern const uint8_t ff_sqrt_tab[128]; int64_t ff_gcd(int64_t a, int64_t b); #define COPY3_IF_LT(x,y,a,b,c,d) if((y)<(x)){(x)=(y);(a)=(b);(c)=(d);} /* avoid usage of various functions */ #define malloc please_use_av_malloc #define free please_use_av_free #define realloc please_use_av_realloc #define time time_is_forbidden_due_to_security_issues #define rand rand_is_forbidden_due_to_state_trashing #define srand srand_is_forbidden_due_to_state_trashing #if !(defined(LIBAVFORMAT_BUILD) || defined(_FRAMEHOOK_H)) # define printf please_use_av_log # define fprintf please_use_av_log #endif #define CHECKED_ALLOCZ(p, size)\ {\ p= av_mallocz(size);\ if(p==NULL && (size)!=0){\ perror("malloc");\ goto fail;\ }\ } #endif /* HAVE_AV_CONFIG_H */ #endif /* COMMON_H */ yorick-mpeg-0.1/libavcodec/CVS/0000755000076500001440000000000010741554772015727 5ustar frigautusersyorick-mpeg-0.1/libavcodec/CVS/Entries0000644000076500001440000000171411254260253017252 0ustar frigautusers/Makefile/1.1.1.1/Sat Oct 27 22:18:24 2007// /README/1.1.1.1/Sat Oct 27 22:18:21 2007// /avcodec.h/1.1.1.1/Sat Oct 27 22:18:22 2007// /bswap.h/1.1.1.1/Sat Oct 27 22:18:26 2007// /common.c/1.1.1.1/Sat Oct 27 22:18:22 2007// /common.h/1.1.1.1/Sat Oct 27 22:18:27 2007// /dsputil.c/1.1.1.1/Sat Oct 27 22:18:24 2007// /dsputil.h/1.1.1.1/Sat Oct 27 22:18:24 2007// /imgconvert.c/1.1.1.1/Sat Oct 27 22:18:24 2007// /integer.c/1.1.1.1/Sat Oct 27 22:18:21 2007// /integer.h/1.1.1.1/Sat Oct 27 22:18:24 2007// /jfdctint.c/1.1.1.1/Sat Oct 27 22:18:23 2007// /motion_est.c/1.1.1.1/Sat Oct 27 22:18:23 2007// /mpeg12.c/1.1.1.1/Sat Oct 27 22:18:26 2007// /mpegvideo.c/1.1.1.1/Sat Oct 27 22:18:27 2007// /mpegvideo.h/1.1.1.1/Sat Oct 27 22:18:22 2007// /ratecontrol.c/1.1.1.1/Sat Oct 27 22:18:27 2007// /simple_idct.c/1.1.1.1/Sat Oct 27 22:18:25 2007// /simple_idct.h/1.1.1.1/Sat Oct 27 22:18:26 2007// /utils.c/1.1.1.1/Sat Oct 27 22:18:24 2007// /mem.c/1.3/Fri Jan 11 02:52:02 2008// D yorick-mpeg-0.1/libavcodec/CVS/Repository0000644000076500001440000000002711254260253020014 0ustar frigautusersyorick-mpeg/libavcodec yorick-mpeg-0.1/libavcodec/CVS/Root0000644000076500001440000000007011254260253016556 0ustar frigautusers:ext:frigaut@yorick.cvs.sourceforge.net:/cvsroot/yorick yorick-mpeg-0.1/libavcodec/dsputil.c0000644000076500001440000005311411254260253017114 0ustar frigautusers/* * DSP utils * Copyright (c) 2000, 2001 Fabrice Bellard. * Copyright (c) 2002-2004 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * gmc & q-pel & 32/64 bit based MC by Michael Niedermayer */ /** * @file dsputil.c * DSP utils */ #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" #include "simple_idct.h" uint8_t cropTbl[256 + 2 * MAX_NEG_CROP]; uint32_t squareTbl[512]; const uint8_t ff_zigzag_direct[64] = { 0, 1, 8, 16, 9, 2, 3, 10, 17, 24, 32, 25, 18, 11, 4, 5, 12, 19, 26, 33, 40, 48, 41, 34, 27, 20, 13, 6, 7, 14, 21, 28, 35, 42, 49, 56, 57, 50, 43, 36, 29, 22, 15, 23, 30, 37, 44, 51, 58, 59, 52, 45, 38, 31, 39, 46, 53, 60, 61, 54, 47, 55, 62, 63 }; const uint8_t ff_alternate_horizontal_scan[64] = { 0, 1, 2, 3, 8, 9, 16, 17, 10, 11, 4, 5, 6, 7, 15, 14, 13, 12, 19, 18, 24, 25, 32, 33, 26, 27, 20, 21, 22, 23, 28, 29, 30, 31, 34, 35, 40, 41, 48, 49, 42, 43, 36, 37, 38, 39, 44, 45, 46, 47, 50, 51, 56, 57, 58, 59, 52, 53, 54, 55, 60, 61, 62, 63, }; const uint8_t ff_alternate_vertical_scan[64] = { 0, 8, 16, 24, 1, 9, 2, 10, 17, 25, 32, 40, 48, 56, 57, 49, 41, 33, 26, 18, 3, 11, 4, 12, 19, 27, 34, 42, 50, 58, 35, 43, 51, 59, 20, 28, 5, 13, 6, 14, 21, 29, 36, 44, 52, 60, 37, 45, 53, 61, 22, 30, 7, 15, 23, 31, 38, 46, 54, 62, 39, 47, 55, 63, }; /* Input permutation for the simple_idct_mmx */ static const uint8_t simple_mmx_permutation[64]={ 0x00, 0x08, 0x04, 0x09, 0x01, 0x0C, 0x05, 0x0D, 0x10, 0x18, 0x14, 0x19, 0x11, 0x1C, 0x15, 0x1D, 0x20, 0x28, 0x24, 0x29, 0x21, 0x2C, 0x25, 0x2D, 0x12, 0x1A, 0x16, 0x1B, 0x13, 0x1E, 0x17, 0x1F, 0x02, 0x0A, 0x06, 0x0B, 0x03, 0x0E, 0x07, 0x0F, 0x30, 0x38, 0x34, 0x39, 0x31, 0x3C, 0x35, 0x3D, 0x22, 0x2A, 0x26, 0x2B, 0x23, 0x2E, 0x27, 0x2F, 0x32, 0x3A, 0x36, 0x3B, 0x33, 0x3E, 0x37, 0x3F, }; static int pix_sum_c(uint8_t * pix, int line_size) { int s, i, j; s = 0; for (i = 0; i < 16; i++) { for (j = 0; j < 16; j += 8) { s += pix[0]; s += pix[1]; s += pix[2]; s += pix[3]; s += pix[4]; s += pix[5]; s += pix[6]; s += pix[7]; pix += 8; } pix += line_size - 16; } return s; } static int pix_norm1_c(uint8_t * pix, int line_size) { int s, i, j; uint32_t *sq = squareTbl + 256; s = 0; for (i = 0; i < 16; i++) { for (j = 0; j < 16; j += 8) { #if LONG_MAX > 2147483647 register uint64_t x=*(uint64_t*)pix; s += sq[x&0xff]; s += sq[(x>>8)&0xff]; s += sq[(x>>16)&0xff]; s += sq[(x>>24)&0xff]; s += sq[(x>>32)&0xff]; s += sq[(x>>40)&0xff]; s += sq[(x>>48)&0xff]; s += sq[(x>>56)&0xff]; #else register uint32_t x=*(uint32_t*)pix; s += sq[x&0xff]; s += sq[(x>>8)&0xff]; s += sq[(x>>16)&0xff]; s += sq[(x>>24)&0xff]; x=*(uint32_t*)(pix+4); s += sq[x&0xff]; s += sq[(x>>8)&0xff]; s += sq[(x>>16)&0xff]; s += sq[(x>>24)&0xff]; #endif pix += 8; } pix += line_size - 16; } return s; } int sse8_c(void *v, uint8_t * pix1, uint8_t * pix2, int line_size, int h) { int s, i; uint32_t *sq = squareTbl + 256; s = 0; for (i = 0; i < h; i++) { s += sq[(int)pix1[0] - (int)pix2[0]]; s += sq[(int)pix1[1] - (int)pix2[1]]; s += sq[(int)pix1[2] - (int)pix2[2]]; s += sq[(int)pix1[3] - (int)pix2[3]]; s += sq[(int)pix1[4] - (int)pix2[4]]; s += sq[(int)pix1[5] - (int)pix2[5]]; s += sq[(int)pix1[6] - (int)pix2[6]]; s += sq[(int)pix1[7] - (int)pix2[7]]; pix1 += line_size; pix2 += line_size; } return s; } int sse16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int s, i; uint32_t *sq = squareTbl + 256; s = 0; for (i = 0; i < h; i++) { s += sq[(int)pix1[ 0] - (int)pix2[ 0]]; s += sq[(int)pix1[ 1] - (int)pix2[ 1]]; s += sq[(int)pix1[ 2] - (int)pix2[ 2]]; s += sq[(int)pix1[ 3] - (int)pix2[ 3]]; s += sq[(int)pix1[ 4] - (int)pix2[ 4]]; s += sq[(int)pix1[ 5] - (int)pix2[ 5]]; s += sq[(int)pix1[ 6] - (int)pix2[ 6]]; s += sq[(int)pix1[ 7] - (int)pix2[ 7]]; s += sq[(int)pix1[ 8] - (int)pix2[ 8]]; s += sq[(int)pix1[ 9] - (int)pix2[ 9]]; s += sq[(int)pix1[10] - (int)pix2[10]]; s += sq[(int)pix1[11] - (int)pix2[11]]; s += sq[(int)pix1[12] - (int)pix2[12]]; s += sq[(int)pix1[13] - (int)pix2[13]]; s += sq[(int)pix1[14] - (int)pix2[14]]; s += sq[(int)pix1[15] - (int)pix2[15]]; pix1 += line_size; pix2 += line_size; } return s; } static void get_pixels_c(DCTELEM *block, const uint8_t *pixels, int line_size) { int i; /* read the pixels */ for(i=0;i<8;i++) { block[0] = pixels[0]; block[1] = pixels[1]; block[2] = pixels[2]; block[3] = pixels[3]; block[4] = pixels[4]; block[5] = pixels[5]; block[6] = pixels[6]; block[7] = pixels[7]; pixels += line_size; block += 8; } } static void diff_pixels_c(DCTELEM *block, const uint8_t *s1, const uint8_t *s2, int stride){ int i; /* read the pixels */ for(i=0;i<8;i++) { block[0] = (int)s1[0] - (int)s2[0]; block[1] = (int)s1[1] - (int)s2[1]; block[2] = (int)s1[2] - (int)s2[2]; block[3] = (int)s1[3] - (int)s2[3]; block[4] = (int)s1[4] - (int)s2[4]; block[5] = (int)s1[5] - (int)s2[5]; block[6] = (int)s1[6] - (int)s2[6]; block[7] = (int)s1[7] - (int)s2[7]; s1 += stride; s2 += stride; block += 8; } } #define avg2(a,b) ((a+b+1)>>1) #define avg4(a,b,c,d) ((a+b+c+d+2)>>2) static void gmc1_c(uint8_t *dst, uint8_t *src, int stride, int h, int x16, int y16, int rounder) { const int A=(16-x16)*(16-y16); const int B=( x16)*(16-y16); const int C=(16-x16)*( y16); const int D=( x16)*( y16); int i; for(i=0; i>8; dst[1]= (A*src[1] + B*src[2] + C*src[stride+1] + D*src[stride+2] + rounder)>>8; dst[2]= (A*src[2] + B*src[3] + C*src[stride+2] + D*src[stride+3] + rounder)>>8; dst[3]= (A*src[3] + B*src[4] + C*src[stride+3] + D*src[stride+4] + rounder)>>8; dst[4]= (A*src[4] + B*src[5] + C*src[stride+4] + D*src[stride+5] + rounder)>>8; dst[5]= (A*src[5] + B*src[6] + C*src[stride+5] + D*src[stride+6] + rounder)>>8; dst[6]= (A*src[6] + B*src[7] + C*src[stride+6] + D*src[stride+7] + rounder)>>8; dst[7]= (A*src[7] + B*src[8] + C*src[stride+7] + D*src[stride+8] + rounder)>>8; dst+= stride; src+= stride; } } static void gmc_c(uint8_t *dst, uint8_t *src, int stride, int h, int ox, int oy, int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height) { int y, vx, vy; const int s= 1<>16; src_y= vy>>16; frac_x= src_x&(s-1); frac_y= src_y&(s-1); src_x>>=shift; src_y>>=shift; if((unsigned)src_x < width){ if((unsigned)src_y < height){ index= src_x + src_y*stride; dst[y*stride + x]= ( ( src[index ]*(s-frac_x) + src[index +1]* frac_x )*(s-frac_y) + ( src[index+stride ]*(s-frac_x) + src[index+stride+1]* frac_x )* frac_y + r)>>(shift*2); }else{ index= src_x + clip(src_y, 0, height)*stride; dst[y*stride + x]= ( ( src[index ]*(s-frac_x) + src[index +1]* frac_x )*s + r)>>(shift*2); } }else{ if((unsigned)src_y < height){ index= clip(src_x, 0, width) + src_y*stride; dst[y*stride + x]= ( ( src[index ]*(s-frac_y) + src[index+stride ]* frac_y )*s + r)>>(shift*2); }else{ index= clip(src_x, 0, width) + clip(src_y, 0, height)*stride; dst[y*stride + x]= src[index ]; } } vx+= dxx; vy+= dyx; } ox += dxy; oy += dyy; } } int pix_abs16_c(void *v, uint8_t *pix1, uint8_t *pix2, int line_size, int h) { int s, i; s = 0; for(i=0;i>2)\ + ((b&0xFCFCFCFCUL)>>2);\ uint32_t l1,h1;\ \ pixels+=line_size;\ for(i=0; i>2)\ + ((b&0xFCFCFCFCUL)>>2);\ OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\ pixels+=line_size;\ block +=line_size;\ a= LD32(pixels );\ b= LD32(pixels+1);\ l0= (a&0x03030303UL)\ + (b&0x03030303UL)\ + 0x02020202UL;\ h0= ((a&0xFCFCFCFCUL)>>2)\ + ((b&0xFCFCFCFCUL)>>2);\ OP(*((uint32_t*)block), h0+h1+(((l0+l1)>>2)&0x0F0F0F0FUL));\ pixels+=line_size;\ block +=line_size;\ }\ pixels+=4-line_size*(h+1);\ block +=4-line_size*h;\ }\ }\ \ CALL_2X_PIXELS(OPNAME ## _pixels16_c , OPNAME ## _pixels8_c , 8)\ CALL_2X_PIXELS(OPNAME ## _pixels16_x2_c , OPNAME ## _pixels8_x2_c , 8)\ CALL_2X_PIXELS(OPNAME ## _pixels16_y2_c , OPNAME ## _pixels8_y2_c , 8)\ CALL_2X_PIXELS(OPNAME ## _pixels16_xy2_c, OPNAME ## _pixels8_xy2_c, 8)\ #define op_avg(a, b) a = rnd_avg32(a, b) #define op_put(a, b) a = b PIXOP2(avg, op_avg) PIXOP2(put, op_put) #undef op_avg #undef op_put /** * memset(blocks, 0, sizeof(DCTELEM)*6*64) */ static void clear_blocks_c(DCTELEM *blocks) { memset(blocks, 0, sizeof(DCTELEM)*6*64); } /* init static data */ void dsputil_static_init(void) { int i; for(i=0;i<256;i++) cropTbl[i + MAX_NEG_CROP] = i; for(i=0;iget_pixels = get_pixels_c; c->diff_pixels = diff_pixels_c; c->gmc1 = gmc1_c; c->gmc = gmc_c; c->clear_blocks = clear_blocks_c; c->pix_sum = pix_sum_c; c->pix_norm1 = pix_norm1_c; /* TODO [0] 16 [1] 8 */ c->pix_abs[0][0] = pix_abs16_c; c->pix_abs[0][1] = pix_abs16_x2_c; c->pix_abs[0][2] = pix_abs16_y2_c; c->pix_abs[0][3] = pix_abs16_xy2_c; c->pix_abs[1][0] = pix_abs8_c; c->pix_abs[1][1] = pix_abs8_x2_c; c->pix_abs[1][2] = pix_abs8_y2_c; c->pix_abs[1][3] = pix_abs8_xy2_c; #define dspfunc(PFX, IDX, NUM) \ c->PFX ## _pixels_tab[IDX][0] = PFX ## _pixels ## NUM ## _c; \ c->PFX ## _pixels_tab[IDX][1] = PFX ## _pixels ## NUM ## _x2_c; \ c->PFX ## _pixels_tab[IDX][2] = PFX ## _pixels ## NUM ## _y2_c; \ c->PFX ## _pixels_tab[IDX][3] = PFX ## _pixels ## NUM ## _xy2_c dspfunc(put, 0, 16); dspfunc(put, 1, 8); dspfunc(avg, 0, 16); dspfunc(avg, 1, 8); #undef dspfunc } yorick-mpeg-0.1/libavcodec/dsputil.h0000644000076500001440000001225611254260253017123 0ustar frigautusers/* * DSP utils * Copyright (c) 2000, 2001, 2002 Fabrice Bellard. * Copyright (c) 2002-2004 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** * @file dsputil.h * DSP utils. */ #ifndef DSPUTIL_H #define DSPUTIL_H #include "common.h" #include "avcodec.h" typedef short DCTELEM; extern void ff_jpeg_fdct_islow(DCTELEM *data); /* encoding scans */ extern const uint8_t ff_alternate_horizontal_scan[64]; extern const uint8_t ff_alternate_vertical_scan[64]; extern const uint8_t ff_zigzag_direct[64]; extern const uint8_t ff_zigzag248_direct[64]; /* pixel operations */ #define MAX_NEG_CROP 1024 /* temporary */ extern uint32_t squareTbl[512]; extern uint8_t cropTbl[256 + 2 * MAX_NEG_CROP]; /* add and put pixel (decoding) */ /* blocksizes for op_pixels_func are 8x4,8x8 16x8 16x16*/ /*h for op_pixels_func is limited to {width/2, width} but never larger than 16 and never smaller then 4*/ typedef void (*op_pixels_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int h); typedef void (*tpel_mc_func)(uint8_t *block/*align width (8 or 16)*/, const uint8_t *pixels/*align 1*/, int line_size, int w, int h); typedef void (*qpel_mc_func)(uint8_t *dst/*align width (8 or 16)*/, uint8_t *src/*align 1*/, int stride); typedef void (*h264_chroma_mc_func)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x, int y); /* motion estimation */ /* h is limited to {width/2, width, 2*width} but never larger than 16 and never smaller then 2*/ /* allthough currently h<4 is not used as functions with width <8 are not used and neither implemented*/ typedef int (*me_cmp_func)(void /*MpegEncContext*/ *s, uint8_t *blk1/*align width (8 or 16)*/, uint8_t *blk2/*align 1*/, int line_size, int h); /** * DSPContext. */ typedef struct DSPContext { /* pixel ops : interface with DCT */ void (*get_pixels)(DCTELEM *block/*align 16*/, const uint8_t *pixels/*align 8*/, int line_size); void (*diff_pixels)(DCTELEM *block/*align 16*/, const uint8_t *s1/*align 8*/, const uint8_t *s2/*align 8*/, int stride); /** * translational global motion compensation. */ void (*gmc1)(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int srcStride, int h, int x16, int y16, int rounder); /** * global motion compensation. */ void (*gmc )(uint8_t *dst/*align 8*/, uint8_t *src/*align 1*/, int stride, int h, int ox, int oy, int dxx, int dxy, int dyx, int dyy, int shift, int r, int width, int height); void (*clear_blocks)(DCTELEM *blocks/*align 16*/); int (*pix_sum)(uint8_t * pix, int line_size); int (*pix_norm1)(uint8_t * pix, int line_size); /** * Halfpel motion compensation with rounding (a+b+1)>>1. * this is an array[4][4] of motion compensation funcions for 4 * horizontal blocksizes (8,16) and the 4 halfpel positions
* *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ] * @param block destination where the result is stored * @param pixels source * @param line_size number of bytes in a horizontal line of block * @param h height */ op_pixels_func put_pixels_tab[2][4]; /** * Halfpel motion compensation with rounding (a+b+1)>>1. * This is an array[4][4] of motion compensation functions for 4 * horizontal blocksizes (8,16) and the 4 halfpel positions
* *pixels_tab[ 0->16xH 1->8xH ][ xhalfpel + 2*yhalfpel ] * @param block destination into which the result is averaged (a+b+1)>>1 * @param pixels source * @param line_size number of bytes in a horizontal line of block * @param h height */ op_pixels_func avg_pixels_tab[2][4]; me_cmp_func pix_abs[2][4]; } DSPContext; extern int sse16_c(void *s, uint8_t *blk1,uint8_t *blk2, int line_size, int h); extern int sse8_c(void *s, uint8_t *blk1,uint8_t *blk2, int line_size, int h); extern int pix_abs16_c(void *s, uint8_t *blk1,uint8_t *blk2, int line_size, int h); extern int pix_abs8_c(void *s, uint8_t *blk1,uint8_t *blk2, int line_size, int h); void dsputil_static_init(void); void dsputil_init(DSPContext* p, AVCodecContext *avctx); #define BYTE_VEC32(c) ((c)*0x01010101UL) #define rnd_avg32(a,b) (((a)|(b)) - ((((a)^(b)) & ~BYTE_VEC32(0x01)) >> 1)) #define __align8 #ifdef __GNUC__ struct unaligned_32 { uint32_t l; } __attribute__((packed)); #define LD32(a) (((const struct unaligned_32 *) (a))->l) #else /*#define LD32(a) (*((uint32_t*)(a)))*/ #define LD32(a) unaligned32(a) #endif #undef lrintf #define lrintf(x) ((long)(0.5+(x))) #endif yorick-mpeg-0.1/libavcodec/imgconvert.c0000644000076500001440000001677611254260253017622 0ustar frigautusers/* * Misc image convertion routines * Copyright (c) 2001, 2002, 2003 Fabrice Bellard. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** * @file imgconvert.c * Misc image convertion routines. */ /* TODO: * - write 'ffimg' program to test all the image related stuff * - move all api to slice based system * - integrate deinterlacing, postprocessing and scaling in the conversion process */ #include "avcodec.h" #include "dsputil.h" #define FF_COLOR_RGB 0 /* RGB color space */ #define FF_COLOR_GRAY 1 /* gray color space */ #define FF_COLOR_YUV 2 /* YUV color space. 16 <= Y <= 235, 16 <= U, V <= 240 */ #define FF_COLOR_YUV_JPEG 3 /* YUV color space. 0 <= Y <= 255, 0 <= U, V <= 255 */ #define FF_PIXEL_PLANAR 0 /* each channel has one component in AVPicture */ #define FF_PIXEL_PACKED 1 /* only one components containing all the channels */ #define FF_PIXEL_PALETTE 2 /* one components containing indexes for a palette */ typedef struct PixFmtInfo { const char *name; uint8_t nb_channels; /* number of channels (including alpha) */ uint8_t color_type; /* color type (see FF_COLOR_xxx constants) */ uint8_t pixel_type; /* pixel storage type (see FF_PIXEL_xxx constants) */ uint8_t is_alpha : 1; /* true if alpha can be specified */ uint8_t x_chroma_shift; /* X chroma subsampling factor is 2 ^ shift */ uint8_t y_chroma_shift; /* Y chroma subsampling factor is 2 ^ shift */ uint8_t depth; /* bit depth of the color components */ } PixFmtInfo; /* this table gives more information about formats */ static PixFmtInfo pix_fmt_info[PIX_FMT_NB] = { /* YUV formats */ { "yuv420p", 3, FF_COLOR_YUV, FF_PIXEL_PLANAR, 0, 1, 1, 8 }, { "yuv422", 1, FF_COLOR_YUV, FF_PIXEL_PACKED, 0, 1, 0, 8 }, /* unused */ /* RGB formats */ { "rgb24", 3, FF_COLOR_RGB, FF_PIXEL_PACKED, 0, 0, 0, 8 } }; void avcodec_get_chroma_sub_sample(int pix_fmt, int *h_shift, int *v_shift) { *h_shift = pix_fmt_info[pix_fmt].x_chroma_shift; *v_shift = pix_fmt_info[pix_fmt].y_chroma_shift; } /* Picture field are filled with 'ptr' addresses. Also return size */ int avpicture_fill(AVPicture *picture, uint8_t *ptr, int pix_fmt, int width, int height) { int size, w2, h2, size2; PixFmtInfo *pinfo; pinfo = &pix_fmt_info[pix_fmt]; size = width * height; switch(pix_fmt) { case PIX_FMT_YUV420P: w2 = (width + (1 << pinfo->x_chroma_shift) - 1) >> pinfo->x_chroma_shift; h2 = (height + (1 << pinfo->y_chroma_shift) - 1) >> pinfo->y_chroma_shift; size2 = w2 * h2; picture->data[0] = ptr; picture->data[1] = picture->data[0] + size; picture->data[2] = picture->data[1] + size2; picture->linesize[0] = width; picture->linesize[1] = w2; picture->linesize[2] = w2; return size + 2 * size2; case PIX_FMT_RGB24: picture->data[0] = ptr; picture->data[1] = NULL; picture->data[2] = NULL; picture->linesize[0] = width * 3; return size * 3; default: picture->data[0] = NULL; picture->data[1] = NULL; picture->data[2] = NULL; picture->data[3] = NULL; return -1; } } int avpicture_get_size(int pix_fmt, int width, int height) { AVPicture dummy_pict; return avpicture_fill(&dummy_pict, NULL, pix_fmt, width, height); } #define SCALEBITS 10 #define ONE_HALF (1 << (SCALEBITS - 1)) #define FIX(x) ((int) ((x) * (1<> SCALEBITS) #define RGB_TO_U_CCIR(r1, g1, b1, shift)\ (((- FIX(0.16874*224.0/255.0) * r1 - FIX(0.33126*224.0/255.0) * g1 + \ FIX(0.50000*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128) #define RGB_TO_V_CCIR(r1, g1, b1, shift)\ (((FIX(0.50000*224.0/255.0) * r1 - FIX(0.41869*224.0/255.0) * g1 - \ FIX(0.08131*224.0/255.0) * b1 + (ONE_HALF << shift) - 1) >> (SCALEBITS + shift)) + 128) #undef RGB_IN #define RGB_IN(r, g, b, s)\ {\ r = (s)[0];\ g = (s)[1];\ b = (s)[2];\ } #undef BPP #define BPP 3 static void rgb24_to_yuv420p(AVPicture *dst, const AVPicture *src, int width, int height); static void rgb24_to_yuv420p(AVPicture *dst, const AVPicture *src, int width, int height) { int wrap, wrap3, width2; int r, g, b, r1, g1, b1, w; uint8_t *lum, *cb, *cr; const uint8_t *p; lum = dst->data[0]; cb = dst->data[1]; cr = dst->data[2]; width2 = (width + 1) >> 1; wrap = dst->linesize[0]; wrap3 = src->linesize[0]; p = src->data[0]; for (; height>=2 ; height-=2) { for(w = width ; w>=2 ; w-=2) { RGB_IN(r, g, b, p); r1 = r; g1 = g; b1 = b; lum[0] = RGB_TO_Y_CCIR(r, g, b); RGB_IN(r, g, b, p + BPP); r1 += r; g1 += g; b1 += b; lum[1] = RGB_TO_Y_CCIR(r, g, b); p += wrap3; lum += wrap; RGB_IN(r, g, b, p); r1 += r; g1 += g; b1 += b; lum[0] = RGB_TO_Y_CCIR(r, g, b); RGB_IN(r, g, b, p + BPP); r1 += r; g1 += g; b1 += b; lum[1] = RGB_TO_Y_CCIR(r, g, b); cb[0] = RGB_TO_U_CCIR(r1, g1, b1, 2); cr[0] = RGB_TO_V_CCIR(r1, g1, b1, 2); cb++; cr++; p += -wrap3 + 2 * BPP; lum += -wrap + 2; } if (w) { RGB_IN(r, g, b, p); r1 = r; g1 = g; b1 = b; lum[0] = RGB_TO_Y_CCIR(r, g, b); p += wrap3; lum += wrap; RGB_IN(r, g, b, p); r1 += r; g1 += g; b1 += b; lum[0] = RGB_TO_Y_CCIR(r, g, b); cb[0] = RGB_TO_U_CCIR(r1, g1, b1, 1); cr[0] = RGB_TO_V_CCIR(r1, g1, b1, 1); cb++; cr++; p += -wrap3 + BPP; lum += -wrap + 1; } p += wrap3 + (wrap3 - width * BPP); lum += wrap + (wrap - width); cb += dst->linesize[1] - width2; cr += dst->linesize[2] - width2; } /* handle odd height */ if (height) { for(w = width; w >= 2; w -= 2) { RGB_IN(r, g, b, p); r1 = r; g1 = g; b1 = b; lum[0] = RGB_TO_Y_CCIR(r, g, b); RGB_IN(r, g, b, p + BPP); r1 += r; g1 += g; b1 += b; lum[1] = RGB_TO_Y_CCIR(r, g, b); cb[0] = RGB_TO_U_CCIR(r1, g1, b1, 1); cr[0] = RGB_TO_V_CCIR(r1, g1, b1, 1); cb++; cr++; p += 2 * BPP; lum += 2; } if (w) { RGB_IN(r, g, b, p); lum[0] = RGB_TO_Y_CCIR(r, g, b); cb[0] = RGB_TO_U_CCIR(r, g, b, 0); cr[0] = RGB_TO_V_CCIR(r, g, b, 0); } } } /* XXX: always use linesize. Return -1 if not supported */ int img_convert(AVPicture *dst, int dst_pix_fmt, const AVPicture *src, int src_pix_fmt, int src_width, int src_height) { if (src_pix_fmt!=PIX_FMT_RGB24 || dst_pix_fmt!=PIX_FMT_YUV420P) return -1; if (src_width <= 0 || src_height <= 0) return 0; rgb24_to_yuv420p(dst, src, src_width, src_height); return 0; } #undef FIX yorick-mpeg-0.1/libavcodec/integer.c0000644000076500001440000001213111254260253017057 0ustar frigautusers/* * arbitrary precision integers * Copyright (c) 2004 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /** * @file integer.c * arbitrary precision integers. * @author Michael Niedermayer */ #include "common.h" #include "integer.h" AVInteger av_add_i(AVInteger a, AVInteger b){ int i, carry=0; for(i=0; i>16) + a.v[i] + b.v[i]; a.v[i]= carry; } return a; } AVInteger av_sub_i(AVInteger a, AVInteger b){ int i, carry=0; for(i=0; i>16) + a.v[i] - b.v[i]; a.v[i]= carry; } return a; } int av_log2_i(AVInteger a){ int i; for(i=AV_INTEGER_SIZE-1; i>=0; i--){ if(a.v[i]) return av_log2_16bit(a.v[i]) + 16*i; } return -1; } AVInteger av_mul_i(AVInteger a, AVInteger b){ AVInteger out; int i, j; int na= (av_log2_i(a)+16) >> 4; int nb= (av_log2_i(b)+16) >> 4; memset(&out, 0, sizeof(out)); for(i=0; i>16) + out.v[j] + a.v[i]*b.v[j-i]; out.v[j]= carry; } } return out; } int av_cmp_i(AVInteger a, AVInteger b){ int i; int v= (int16_t)a.v[AV_INTEGER_SIZE-1] - (int16_t)b.v[AV_INTEGER_SIZE-1]; if(v) return (v>>16)|1; for(i=AV_INTEGER_SIZE-2; i>=0; i--){ int v= a.v[i] - b.v[i]; if(v) return (v>>16)|1; } return 0; } AVInteger av_shr_i(AVInteger a, int s){ AVInteger out; int i; for(i=0; i>4); unsigned int v=0; if(index+1=0) v = a.v[index+1]<<16; if(index =0) v+= a.v[index ]; out.v[i]= v >> (s&15); } return out; } AVInteger av_mod_i(AVInteger *quot, AVInteger a, AVInteger b){ int i= av_log2_i(a) - av_log2_i(b); AVInteger quot_temp; if(!quot) quot = "_temp; assert((int16_t)a[AV_INTEGER_SIZE-1] >= 0 && (int16_t)b[AV_INTEGER_SIZE-1] >= 0); assert(av_log2(b)>=0); if(i > 0) b= av_shr_i(b, -i); memset(quot, 0, sizeof(AVInteger)); while(i-- >= 0){ *quot= av_shr_i(*quot, -1); if(av_cmp_i(a, b) >= 0){ a= av_sub_i(a, b); quot->v[0] += 1; } b= av_shr_i(b, 1); } return a; } AVInteger av_div_i(AVInteger a, AVInteger b){ AVInteger quot; av_mod_i(", a, b); return quot; } AVInteger av_int2i(int64_t a){ AVInteger out; int i; for(i=0; i>=16; } return out; } int64_t av_i2int(AVInteger a){ int i; int64_t out=(int8_t)a.v[AV_INTEGER_SIZE-1]; for(i= AV_INTEGER_SIZE-2; i>=0; i--){ out = (out<<16) + a.v[i]; } return out; } #if 0 #undef NDEBUG #include const uint8_t ff_log2_tab[256]={ 0,0,1,1,2,2,2,2,3,3,3,3,3,3,3,3,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4,4, 5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5,5, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6,6, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7, 7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7,7 }; main(){ int64_t a,b; for(a=7; a<256*256*256; a+=13215){ for(b=3; b<256*256*256; b+=27118){ AVInteger ai= av_int2i(a); AVInteger bi= av_int2i(b); assert(av_i2int(ai) == a); assert(av_i2int(bi) == b); assert(av_i2int(av_add_i(ai,bi)) == a+b); assert(av_i2int(av_sub_i(ai,bi)) == a-b); assert(av_i2int(av_mul_i(ai,bi)) == a*b); assert(av_i2int(av_shr_i(ai, 9)) == a>>9); assert(av_i2int(av_shr_i(ai,-9)) == a<<9); assert(av_i2int(av_shr_i(ai, 17)) == a>>17); assert(av_i2int(av_shr_i(ai,-17)) == a<<17); assert(av_log2_i(ai) == av_log2(a)); assert(av_i2int(av_div_i(ai,bi)) == a/b); } } } #endif yorick-mpeg-0.1/libavcodec/integer.h0000644000076500001440000000277311254260253017077 0ustar frigautusers/* * arbitrary precision integers * Copyright (c) 2004 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * */ /** * @file integer.h * arbitrary precision integers * @author Michael Niedermayer */ #ifndef INTEGER_H #define INTEGER_H #define AV_INTEGER_SIZE 8 typedef struct AVInteger{ uint16_t v[AV_INTEGER_SIZE]; } AVInteger; AVInteger av_add_i(AVInteger a, AVInteger b); AVInteger av_sub_i(AVInteger a, AVInteger b); int av_log2_i(AVInteger a); AVInteger av_mul_i(AVInteger a, AVInteger b); int av_cmp_i(AVInteger a, AVInteger b); AVInteger av_shr_i(AVInteger a, int s); AVInteger av_mod_i(AVInteger *quot, AVInteger a, AVInteger b); AVInteger av_div_i(AVInteger a, AVInteger b); AVInteger av_int2i(int64_t a); int64_t av_i2int(AVInteger a); #endif /* INTEGER_H*/ yorick-mpeg-0.1/libavcodec/jfdctint.c0000644000076500001440000002625611254260253017244 0ustar frigautusers/* * jfdctint.c * * Copyright (C) 1991-1996, Thomas G. Lane. * This file is part of the Independent JPEG Group's software. * For conditions of distribution and use, see the accompanying README file. * * This file contains a slow-but-accurate integer implementation of the * forward DCT (Discrete Cosine Transform). * * A 2-D DCT can be done by 1-D DCT on each row followed by 1-D DCT * on each column. Direct algorithms are also available, but they are * much more complex and seem not to be any faster when reduced to code. * * This implementation is based on an algorithm described in * C. Loeffler, A. Ligtenberg and G. Moschytz, "Practical Fast 1-D DCT * Algorithms with 11 Multiplications", Proc. Int'l. Conf. on Acoustics, * Speech, and Signal Processing 1989 (ICASSP '89), pp. 988-991. * The primary algorithm described there uses 11 multiplies and 29 adds. * We use their alternate method with 12 multiplies and 32 adds. * The advantage of this method is that no data path contains more than one * multiplication; this allows a very simple and accurate implementation in * scaled fixed-point arithmetic, with a minimal number of shifts. */ /** * @file jfdctint.c * Independent JPEG Group's slow & accurate dct. */ #include #include #include "common.h" #include "dsputil.h" #define DCTSIZE 8 #define BITS_IN_JSAMPLE 8 #define RIGHT_SHIFT(x, n) ((x) >> (n)) #define MULTIPLY16C16(var,const) ((var)*(const)) #if 1 /*def USE_ACCURATE_ROUNDING*/ #define DESCALE(x,n) RIGHT_SHIFT((x) + (1 << ((n) - 1)), n) #else #define DESCALE(x,n) RIGHT_SHIFT(x, n) #endif /* * This module is specialized to the case DCTSIZE = 8. */ #if DCTSIZE != 8 Sorry, this code only copes with 8x8 DCTs. /* deliberate syntax err */ #endif /* * The poop on this scaling stuff is as follows: * * Each 1-D DCT step produces outputs which are a factor of sqrt(N) * larger than the true DCT outputs. The final outputs are therefore * a factor of N larger than desired; since N=8 this can be cured by * a simple right shift at the end of the algorithm. The advantage of * this arrangement is that we save two multiplications per 1-D DCT, * because the y0 and y4 outputs need not be divided by sqrt(N). * In the IJG code, this factor of 8 is removed by the quantization step * (in jcdctmgr.c), NOT in this module. * * We have to do addition and subtraction of the integer inputs, which * is no problem, and multiplication by fractional constants, which is * a problem to do in integer arithmetic. We multiply all the constants * by CONST_SCALE and convert them to integer constants (thus retaining * CONST_BITS bits of precision in the constants). After doing a * multiplication we have to divide the product by CONST_SCALE, with proper * rounding, to produce the correct output. This division can be done * cheaply as a right shift of CONST_BITS bits. We postpone shifting * as long as possible so that partial sums can be added together with * full fractional precision. * * The outputs of the first pass are scaled up by PASS1_BITS bits so that * they are represented to better-than-integral precision. These outputs * require BITS_IN_JSAMPLE + PASS1_BITS + 3 bits; this fits in a 16-bit word * with the recommended scaling. (For 12-bit sample data, the intermediate * array is int32_t anyway.) * * To avoid overflow of the 32-bit intermediate results in pass 2, we must * have BITS_IN_JSAMPLE + CONST_BITS + PASS1_BITS <= 26. Error analysis * shows that the values given below are the most effective. */ #if BITS_IN_JSAMPLE == 8 #define CONST_BITS 13 #define PASS1_BITS 4 /* set this to 2 if 16x16 multiplies are faster */ #else #define CONST_BITS 13 #define PASS1_BITS 1 /* lose a little precision to avoid overflow */ #endif /* Some C compilers fail to reduce "FIX(constant)" at compile time, thus * causing a lot of useless floating-point operations at run time. * To get around this we use the following pre-calculated constants. * If you change CONST_BITS you may want to add appropriate values. * (With a reasonable C compiler, you can just rely on the FIX() macro...) */ #if CONST_BITS == 13 #define FIX_0_298631336 ((int32_t) 2446) /* FIX(0.298631336) */ #define FIX_0_390180644 ((int32_t) 3196) /* FIX(0.390180644) */ #define FIX_0_541196100 ((int32_t) 4433) /* FIX(0.541196100) */ #define FIX_0_765366865 ((int32_t) 6270) /* FIX(0.765366865) */ #define FIX_0_899976223 ((int32_t) 7373) /* FIX(0.899976223) */ #define FIX_1_175875602 ((int32_t) 9633) /* FIX(1.175875602) */ #define FIX_1_501321110 ((int32_t) 12299) /* FIX(1.501321110) */ #define FIX_1_847759065 ((int32_t) 15137) /* FIX(1.847759065) */ #define FIX_1_961570560 ((int32_t) 16069) /* FIX(1.961570560) */ #define FIX_2_053119869 ((int32_t) 16819) /* FIX(2.053119869) */ #define FIX_2_562915447 ((int32_t) 20995) /* FIX(2.562915447) */ #define FIX_3_072711026 ((int32_t) 25172) /* FIX(3.072711026) */ #else #define FIX_0_298631336 FIX(0.298631336) #define FIX_0_390180644 FIX(0.390180644) #define FIX_0_541196100 FIX(0.541196100) #define FIX_0_765366865 FIX(0.765366865) #define FIX_0_899976223 FIX(0.899976223) #define FIX_1_175875602 FIX(1.175875602) #define FIX_1_501321110 FIX(1.501321110) #define FIX_1_847759065 FIX(1.847759065) #define FIX_1_961570560 FIX(1.961570560) #define FIX_2_053119869 FIX(2.053119869) #define FIX_2_562915447 FIX(2.562915447) #define FIX_3_072711026 FIX(3.072711026) #endif /* Multiply an int32_t variable by an int32_t constant to yield an int32_t result. * For 8-bit samples with the recommended scaling, all the variable * and constant values involved are no more than 16 bits wide, so a * 16x16->32 bit multiply can be used instead of a full 32x32 multiply. * For 12-bit samples, a full 32-bit multiplication will be needed. */ #if BITS_IN_JSAMPLE == 8 && CONST_BITS<=13 && PASS1_BITS<=2 #define MULTIPLY(var,const) MULTIPLY16C16(var,const) #else #define MULTIPLY(var,const) ((var) * (const)) #endif /* * Perform the forward DCT on one block of samples. */ void ff_jpeg_fdct_islow (DCTELEM * data) { int_fast32_t tmp0, tmp1, tmp2, tmp3, tmp4, tmp5, tmp6, tmp7; int_fast32_t tmp10, tmp11, tmp12, tmp13; int_fast32_t z1, z2, z3, z4, z5; DCTELEM *dataptr; int ctr; /* Pass 1: process rows. */ /* Note results are scaled up by sqrt(8) compared to a true DCT; */ /* furthermore, we scale the results by 2**PASS1_BITS. */ dataptr = data; for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { tmp0 = dataptr[0] + dataptr[7]; tmp7 = dataptr[0] - dataptr[7]; tmp1 = dataptr[1] + dataptr[6]; tmp6 = dataptr[1] - dataptr[6]; tmp2 = dataptr[2] + dataptr[5]; tmp5 = dataptr[2] - dataptr[5]; tmp3 = dataptr[3] + dataptr[4]; tmp4 = dataptr[3] - dataptr[4]; /* Even part per LL&M figure 1 --- note that published figure is faulty; * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". */ tmp10 = tmp0 + tmp3; tmp13 = tmp0 - tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; dataptr[0] = (DCTELEM) ((tmp10 + tmp11) << PASS1_BITS); dataptr[4] = (DCTELEM) ((tmp10 - tmp11) << PASS1_BITS); z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); dataptr[2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), CONST_BITS-PASS1_BITS); dataptr[6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), CONST_BITS-PASS1_BITS); /* Odd part per figure 8 --- note paper omits factor of sqrt(2). * cK represents cos(K*pi/16). * i0..i3 in the paper are tmp4..tmp7 here. */ z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; z3 = tmp4 + tmp6; z4 = tmp5 + tmp7; z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ z3 += z5; z4 += z5; dataptr[7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS-PASS1_BITS); dataptr[5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS-PASS1_BITS); dataptr[3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS-PASS1_BITS); dataptr[1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS-PASS1_BITS); dataptr += DCTSIZE; /* advance pointer to next row */ } /* Pass 2: process columns. * We remove the PASS1_BITS scaling, but leave the results scaled up * by an overall factor of 8. */ dataptr = data; for (ctr = DCTSIZE-1; ctr >= 0; ctr--) { tmp0 = dataptr[DCTSIZE*0] + dataptr[DCTSIZE*7]; tmp7 = dataptr[DCTSIZE*0] - dataptr[DCTSIZE*7]; tmp1 = dataptr[DCTSIZE*1] + dataptr[DCTSIZE*6]; tmp6 = dataptr[DCTSIZE*1] - dataptr[DCTSIZE*6]; tmp2 = dataptr[DCTSIZE*2] + dataptr[DCTSIZE*5]; tmp5 = dataptr[DCTSIZE*2] - dataptr[DCTSIZE*5]; tmp3 = dataptr[DCTSIZE*3] + dataptr[DCTSIZE*4]; tmp4 = dataptr[DCTSIZE*3] - dataptr[DCTSIZE*4]; /* Even part per LL&M figure 1 --- note that published figure is faulty; * rotator "sqrt(2)*c1" should be "sqrt(2)*c6". */ tmp10 = tmp0 + tmp3; tmp13 = tmp0 - tmp3; tmp11 = tmp1 + tmp2; tmp12 = tmp1 - tmp2; dataptr[DCTSIZE*0] = (DCTELEM) DESCALE(tmp10 + tmp11, PASS1_BITS); dataptr[DCTSIZE*4] = (DCTELEM) DESCALE(tmp10 - tmp11, PASS1_BITS); z1 = MULTIPLY(tmp12 + tmp13, FIX_0_541196100); dataptr[DCTSIZE*2] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp13, FIX_0_765366865), CONST_BITS+PASS1_BITS); dataptr[DCTSIZE*6] = (DCTELEM) DESCALE(z1 + MULTIPLY(tmp12, - FIX_1_847759065), CONST_BITS+PASS1_BITS); /* Odd part per figure 8 --- note paper omits factor of sqrt(2). * cK represents cos(K*pi/16). * i0..i3 in the paper are tmp4..tmp7 here. */ z1 = tmp4 + tmp7; z2 = tmp5 + tmp6; z3 = tmp4 + tmp6; z4 = tmp5 + tmp7; z5 = MULTIPLY(z3 + z4, FIX_1_175875602); /* sqrt(2) * c3 */ tmp4 = MULTIPLY(tmp4, FIX_0_298631336); /* sqrt(2) * (-c1+c3+c5-c7) */ tmp5 = MULTIPLY(tmp5, FIX_2_053119869); /* sqrt(2) * ( c1+c3-c5+c7) */ tmp6 = MULTIPLY(tmp6, FIX_3_072711026); /* sqrt(2) * ( c1+c3+c5-c7) */ tmp7 = MULTIPLY(tmp7, FIX_1_501321110); /* sqrt(2) * ( c1+c3-c5-c7) */ z1 = MULTIPLY(z1, - FIX_0_899976223); /* sqrt(2) * (c7-c3) */ z2 = MULTIPLY(z2, - FIX_2_562915447); /* sqrt(2) * (-c1-c3) */ z3 = MULTIPLY(z3, - FIX_1_961570560); /* sqrt(2) * (-c3-c5) */ z4 = MULTIPLY(z4, - FIX_0_390180644); /* sqrt(2) * (c5-c3) */ z3 += z5; z4 += z5; dataptr[DCTSIZE*7] = (DCTELEM) DESCALE(tmp4 + z1 + z3, CONST_BITS+PASS1_BITS); dataptr[DCTSIZE*5] = (DCTELEM) DESCALE(tmp5 + z2 + z4, CONST_BITS+PASS1_BITS); dataptr[DCTSIZE*3] = (DCTELEM) DESCALE(tmp6 + z2 + z3, CONST_BITS+PASS1_BITS); dataptr[DCTSIZE*1] = (DCTELEM) DESCALE(tmp7 + z1 + z4, CONST_BITS+PASS1_BITS); dataptr++; /* advance pointer to next column */ } } yorick-mpeg-0.1/libavcodec/Makefile0000644000076500001440000000326111254260733016725 0ustar frigautusers# # This bears essentially no resemblance to the original, # which bore the following notice: # libavcodec Makefile # (c) 2000-2003 Fabrice Bellard # MAKE=make CC=gcc AR=ar RANLIB=ranlib STRIP=strip OPTFLAGS=-O3 -g -ansi -pedantic -Wall -Wstrict-prototypes -Wmissing-prototypes -Wmissing-declarations LIBPREF=lib LIBSUF=.a # following is necessary on Linux systems to avoid SELinux problems # if you link this to a shared library (like mpeg.so) # see http://people.redhat.com/drepper/textrelocs.html PICFLAG=-fPIC # NOTE: -I.. is needed to include config.h CFLAGS=$(OPTFLAGS) $(PICFLAG) -DHAVE_AV_CONFIG_H -I.. -D_FILE_OFFSET_BITS=64 -D_LARGEFILE_SOURCE -D_GNU_SOURCE OBJS= common.o utils.o mem.o dsputil.o mpegvideo.o mpeg12.o imgconvert.o \ ratecontrol.o motion_est.o integer.o simple_idct.o jfdctint.o ASM_OBJS= LIB= $(LIBPREF)avcodec$(LIBSUF) all: $(LIB) $(LIB): $(OBJS) rm -f $@ $(AR) rc $@ $(OBJS) $(RANLIB) $@ # common.h->../config.h bswap.h # avcodec.h->common.h # dsputil.h->common.h avcodec.h # mpegvideo.h->dsputil.h AVCODEC_H=avcodec.h common.h bswap.h ../config.h common.o: $(AVCODEC_H) utils.o: integer.h mpegvideo.h dsputil.h $(AVCODEC_H) mem.o: $(AVCODEC_H) dsputil.o: simple_idct.h dsputil.h $(AVCODEC_H) mpegvideo.o: mpegvideo.h simple_idct.h dsputil.h $(AVCODEC_H) imgconvert.o: dsputil.h $(AVCODEC_H) mpeg12.o: mpegvideo.h dsputil.h $(AVCODEC_H) ratecontrol.o: mpegvideo.h dsputil.h $(AVCODEC_H) motion_est.o: mpegvideo.h dsputil.h $(AVCODEC_H) integer.o: integer.h common.h bswap.h ../config.h simple_idct.o: simple_idct.h dsputil.h $(AVCODEC_H) jfdctint.o: dsputil.h $(AVCODEC_H) clean: rm -f *.o *.d *~ .depend $(LIB) *.so distclean: clean rm -f Makefile.bak .depend yorick-mpeg-0.1/libavcodec/mem.c0000644000076500001440000000673511254260253016215 0ustar frigautusers/* * default memory allocator for libavcodec * Copyright (c) 2002 Fabrice Bellard. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** * @file mem.c * default memory allocator for libavcodec. */ #include "avcodec.h" /* here we can use OS dependant allocation functions */ #undef malloc #undef free #undef realloc #ifdef HAVE_MALLOC_H #ifdef HAVE_OSX #include #define memalign(align,size) malloc (size) #else #include #endif #endif /* you can redefine av_malloc and av_free in your project to use your memory allocator. You do not need to suppress this file because the linker will do it automatically */ /** * Memory allocation of size byte with alignment suitable for all * memory accesses (including vectors if available on the * CPU). av_malloc(0) must return a non NULL pointer. */ void *av_malloc(unsigned int size) { void *ptr; #ifdef MEMALIGN_HACK int diff; ptr = malloc(size+16+1); diff= ((-(int)ptr - 1)&15) + 1; ptr += diff; ((char*)ptr)[-1]= diff; #elif defined (HAVE_MEMALIGN) ptr = memalign(16,size); /* Why 64? Indeed, we should align it: on 4 for 386 on 16 for 486 on 32 for 586, PPro - k6-III on 64 for K7 (maybe for P3 too). Because L1 and L2 caches are aligned on those values. But I don't want to code such logic here! */ /* Why 16? because some cpus need alignment, for example SSE2 on P4, & most RISC cpus it will just trigger an exception and the unaligned load will be done in the exception handler or it will just segfault (SSE2 on P4) Why not larger? because i didnt see a difference in benchmarks ... */ /* benchmarks with p3 memalign(64)+1 3071,3051,3032 memalign(64)+2 3051,3032,3041 memalign(64)+4 2911,2896,2915 memalign(64)+8 2545,2554,2550 memalign(64)+16 2543,2572,2563 memalign(64)+32 2546,2545,2571 memalign(64)+64 2570,2533,2558 btw, malloc seems to do 8 byte alignment by default here */ #else ptr = malloc(size); #endif return ptr; } /** * av_realloc semantics (same as glibc): if ptr is NULL and size > 0, * identical to malloc(size). If size is zero, it is identical to * free(ptr) and NULL is returned. */ void *av_realloc(void *ptr, unsigned int size) { #ifdef MEMALIGN_HACK /*FIXME this isnt aligned correctly though it probably isnt needed*/ int diff= ptr ? ((char*)ptr)[-1] : 0; return realloc(ptr - diff, size + diff) + diff; #else return realloc(ptr, size); #endif } /* NOTE: ptr = NULL is explicetly allowed */ void av_free(void *ptr) { /* XXX: this test should not be needed on most libcs */ if (ptr) #ifdef MEMALIGN_HACK free(ptr - ((char*)ptr)[-1]); #else free(ptr); #endif } yorick-mpeg-0.1/libavcodec/motion_est.c0000644000076500001440000005270511254260253017615 0ustar frigautusers/* * Motion estimation * Copyright (c) 2000,2001 Fabrice Bellard. * Copyright (c) 2002-2004 Michael Niedermayer * * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * new Motion Estimation (X1/EPZS) by Michael Niedermayer */ /** * @file motion_est.c * Motion estimation. */ #include #include #include #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" #undef NDEBUG #include #define SQ(a) ((a)*(a)) #define P_LEFT P[1] #define P_TOP P[2] #define P_TOPRIGHT P[3] #define P_MEDIAN P[4] #define P_MV1 P[9] #define init_ref(init_ref2)\ {\ int i, offset[3];\ offset[0] = (mb_y<<4)*c->stride + (mb_x<<4);\ offset[1] = offset[2] = (mb_y<<3)*c->uvstride + (mb_x<<3);\ for(i=0; i<3; i++){\ c->src[0][i]= s->new_picture.data[i] + offset[i];\ c->ref[0][i]= s->last_picture.data[i] + offset[i];\ init_ref2\ }\ } #define init_ref2 c->ref[2][i]= s->next_picture.data[i] + offset[i]; #define cmp(x,y) pix_abs16_c(s, c->src[0][0], c->ref[ref_index][0] + (x) + (y)*c->stride, c->stride, 16) #define CHECK_MV(x,y)\ {\ const int key= ((y)<me; int best[2]={0, 0}; int d, dmin; int map_generation; const int penalty_factor= c->penalty_factor; const int ref_mv_stride= s->mb_stride; /*pass as arg FIXME*/ const int ref_mv_xy= s->mb_x + s->mb_y*ref_mv_stride; /*add to last_mv beforepassing FIXME*/ uint32_t * const score_map= c->score_map; const int xmin= c->xmin; const int ymin= c->ymin; const int xmax= c->xmax; const int ymax= c->ymax; uint8_t *mv_penalty= c->current_mv_penalty; const int pred_x= c->pred_x; const int pred_y= c->pred_y; uint32_t *map= c->map; const int shift= 1; c->map_generation+= 1<<(ME_MAP_MV_BITS*2); if(c->map_generation==0){ c->map_generation= 1<<(ME_MAP_MV_BITS*2); memset(c->map, 0, sizeof(uint32_t)*ME_MAP_SIZE); } map_generation= c->map_generation; dmin= cmp(0, 0); map[0]= map_generation; score_map[0]= dmin; /* first line */ if (s->first_slice_line) { CHECK_MV(P_LEFT[0]>>shift, P_LEFT[1]>>shift) CHECK_CLIPED_MV((last_mv[ref_mv_xy][0]*ref_mv_scale + (1<<15))>>16, (last_mv[ref_mv_xy][1]*ref_mv_scale + (1<<15))>>16) }else{ if(dmin<256 && ( P_LEFT[0] |P_LEFT[1] |P_TOP[0] |P_TOP[1] |P_TOPRIGHT[0]|P_TOPRIGHT[1])==0){ *mx_ptr= 0; *my_ptr= 0; c->skip=1; return dmin; } CHECK_MV(P_MEDIAN[0]>>shift, P_MEDIAN[1]>>shift) if(dmin>256*2){ CHECK_CLIPED_MV((last_mv[ref_mv_xy][0]*ref_mv_scale + (1<<15))>>16, (last_mv[ref_mv_xy][1]*ref_mv_scale + (1<<15))>>16) CHECK_MV(P_LEFT[0] >>shift, P_LEFT[1] >>shift) CHECK_MV(P_TOP[0] >>shift, P_TOP[1] >>shift) CHECK_MV(P_TOPRIGHT[0]>>shift, P_TOPRIGHT[1]>>shift) } } if(dmin>256*4){ CHECK_CLIPED_MV((last_mv[ref_mv_xy+1][0]*ref_mv_scale + (1<<15))>>16, (last_mv[ref_mv_xy+1][1]*ref_mv_scale + (1<<15))>>16) if(s->mb_y+1end_mb_y) /*FIXME replace at least with last_slice_line*/ CHECK_CLIPED_MV((last_mv[ref_mv_xy+ref_mv_stride][0]*ref_mv_scale + (1<<15))>>16, (last_mv[ref_mv_xy+ref_mv_stride][1]*ref_mv_scale + (1<<15))>>16) } /* dmin= diamond_search(s, best, dmin, ref_index, penalty_factor); */ { int next_dir=-1; { /* ensure that the best point is in the MAP as h/qpel refinement needs it */ const int key= (best[1]<xmin) CHECK_MV_DIR(x-1, y , 0) if(dir!=3 && y>ymin) CHECK_MV_DIR(x , y-1, 1) if(dir!=0 && xlambda>>FF_LAMBDA_SHIFT) void ff_init_me(MpegEncContext *s) { MotionEstContext * const c= &s->me; c->avctx= s->avctx; c->flags = 0; if(s->linesize){ c->stride = s->linesize; c->uvstride= s->uvlinesize; }else{ c->stride = 16*s->mb_width + 32; c->uvstride= 8*s->mb_width + 16; } c->temp= c->scratchpad; } #define Z_THRESHOLD 256 #define CHECK_SAD_HALF_MV(suffix, x, y) \ {\ d= s->dsp.pix_abs[0][(x?1:0)+(y?2:0)](NULL, pix, ptr+((x)>>1), stride, 16);\ d += (mv_penalty[pen_x + x] + mv_penalty[pen_y + y])*penalty_factor;\ COPY3_IF_LT(dminh, d, dx, x, dy, y)\ } static int sad_hpel_motion_search(MpegEncContext * s, int *mx_ptr, int *my_ptr, int dmin, int ref_index) { MotionEstContext * const c= &s->me; const int penalty_factor= c->sub_penalty_factor; int mx, my, dminh; uint8_t *pix, *ptr; int stride= c->stride; uint32_t * const score_map= c->score_map; const int xmin= c->xmin; const int ymin= c->ymin; const int xmax= c->xmax; const int ymax= c->ymax; uint8_t *mv_penalty= c->current_mv_penalty; const int pred_x= c->pred_x; const int pred_y= c->pred_y; if(c->skip){ *mx_ptr = 0; *my_ptr = 0; return dmin; } pix = c->src[0][0]; mx = *mx_ptr; my = *my_ptr; ptr = c->ref[ref_index][0] + (my * stride) + mx; dminh = dmin; if (mx > xmin && mx < xmax && my > ymin && my < ymax) { int dx=0, dy=0; int d, pen_x, pen_y; const int index= (my<mb_x + s->mb_y*s->mb_stride; s->p_mv_table[xy][0] = mx; s->p_mv_table[xy][1] = my; /* has allready been set to the 4 MV if 4MV is done */ if(mv4){ int mot_xy= s->block_index[0]; s->current_picture.motion_val[0][mot_xy ][0]= mx; s->current_picture.motion_val[0][mot_xy ][1]= my; s->current_picture.motion_val[0][mot_xy+1][0]= mx; s->current_picture.motion_val[0][mot_xy+1][1]= my; mot_xy += s->b8_stride; s->current_picture.motion_val[0][mot_xy ][0]= mx; s->current_picture.motion_val[0][mot_xy ][1]= my; s->current_picture.motion_val[0][mot_xy+1][0]= mx; s->current_picture.motion_val[0][mot_xy+1][1]= my; } } static int ff_sqrt(int a) { int ret=0, s, ret_sq=0; if(a<128) return ff_sqrt_tab[a]; for(s=15 ; s>=0 ; s--){ int b= ret_sq + (1<<(s<<1)) + (ret<<(s+1)); if(b<=a){ ret_sq=b; ret+= 1<xmin = - 16*mb_x;\ c->ymin = - 16*mb_y;\ c->xmax = - 16*mb_x + s->mb_width *16 - 16;\ c->ymax = - 16*mb_y + s->mb_height*16 - 16 void ff_estimate_p_frame_motion(MpegEncContext * s, int mb_x, int mb_y) { MotionEstContext * const c= &s->me; uint8_t *pix, *ppix; int sum, varc, vard, mx, my, dmin; int P[10][2]; const int shift= 1; int mb_type=0; Picture * const pic= &s->current_picture; init_ref(); assert(s->linesize == c->stride); assert(s->uvlinesize == c->uvstride); c->penalty_factor = get_penalty_factor(s); c->sub_penalty_factor= get_penalty_factor(s); c->mb_penalty_factor = get_penalty_factor(s); c->current_mv_penalty= c->mv_penalty[s->f_code] + MAX_MV; get_limits; c->skip=0; /* intra / predictive decision */ pix = c->src[0][0]; sum = s->dsp.pix_sum(pix, s->linesize); varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8; pic->mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8; pic->mb_var [s->mb_stride * mb_y + mb_x] = varc; c->mb_var_sum_temp += varc; { const int mot_stride = s->b8_stride; const int mot_xy = s->block_index[0]; P_LEFT[0] = s->current_picture.motion_val[0][mot_xy - 1][0]; P_LEFT[1] = s->current_picture.motion_val[0][mot_xy - 1][1]; if(P_LEFT[0] > (c->xmax<xmax<first_slice_line) { P_TOP[0] = s->current_picture.motion_val[0][mot_xy - mot_stride ][0]; P_TOP[1] = s->current_picture.motion_val[0][mot_xy - mot_stride ][1]; P_TOPRIGHT[0] = s->current_picture.motion_val[0][mot_xy - mot_stride + 2][0]; P_TOPRIGHT[1] = s->current_picture.motion_val[0][mot_xy - mot_stride + 2][1]; if(P_TOP[1] > (c->ymax<ymax<xmin<xmin< (c->ymax<ymax<pred_x= P_LEFT[0]; c->pred_y= P_LEFT[1]; }else{ c->pred_x= P_LEFT[0]; c->pred_y= P_LEFT[1]; } } dmin = epzs_motion_search(s, &mx, &my, P, 0, s->p_mv_table, (1<<16)>>shift); /* At this point (mx,my) are full-pell and the relative displacement */ ppix = c->ref[0][0] + (my * s->linesize) + mx; vard = (sse16_c(NULL, pix, ppix, s->linesize, 16) + 128)>>8; pic->mc_mb_var[s->mb_stride * mb_y + mb_x] = vard; c->mc_mb_var_sum_temp += vard; if(mb_type){ if (vard <= 64 || vard < varc) c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc); else c->scene_change_score+= s->qscale; if(mb_type == CANDIDATE_MB_TYPE_INTER){ sad_hpel_motion_search(s, &mx, &my, dmin, 0); set_p_mv_tables(s, mx, my, 1); }else{ mx <<=shift; my <<=shift; } }else{ int intra_score, i; mb_type= CANDIDATE_MB_TYPE_INTER; dmin= sad_hpel_motion_search(s, &mx, &my, dmin, 0); set_p_mv_tables(s, mx, my, 1); /* get intra luma score */ { int mean= (sum+128)>>8; mean*= 0x01010101; for(i=0; i<16; i++){ *(uint32_t*)(&c->scratchpad[i*s->linesize+ 0]) = mean; *(uint32_t*)(&c->scratchpad[i*s->linesize+ 4]) = mean; *(uint32_t*)(&c->scratchpad[i*s->linesize+ 8]) = mean; *(uint32_t*)(&c->scratchpad[i*s->linesize+12]) = mean; } intra_score= pix_abs16_c(s, c->scratchpad, pix, s->linesize, 16); } intra_score += c->mb_penalty_factor*16; if(intra_score < dmin){ mb_type= CANDIDATE_MB_TYPE_INTRA; s->current_picture.mb_type[mb_y*s->mb_stride + mb_x]= CANDIDATE_MB_TYPE_INTRA; /*FIXME cleanup*/ }else s->current_picture.mb_type[mb_y*s->mb_stride + mb_x]= 0; if (vard <= 64 || vard < varc) { /*FIXME*/ c->scene_change_score+= ff_sqrt(vard) - ff_sqrt(varc); }else{ c->scene_change_score+= s->qscale; } } s->mb_type[mb_y*s->mb_stride + mb_x]= mb_type; } static int ff_estimate_motion_b(MpegEncContext * s, int mb_x, int mb_y, int16_t (*mv_table)[2], int ref_index, int f_code) { MotionEstContext * const c= &s->me; int mx, my, dmin; int P[10][2]; const int shift= 1; const int mot_stride = s->mb_stride; const int mot_xy = mb_y*mot_stride + mb_x; uint8_t * const mv_penalty= c->mv_penalty[f_code] + MAX_MV; int mv_scale; c->penalty_factor = get_penalty_factor(s); c->sub_penalty_factor= get_penalty_factor(s); c->mb_penalty_factor = get_penalty_factor(s); c->current_mv_penalty= mv_penalty; get_limits; P_LEFT[0] = mv_table[mot_xy - 1][0]; P_LEFT[1] = mv_table[mot_xy - 1][1]; if(P_LEFT[0] > (c->xmax<xmax<first_slice_line) { P_TOP[0] = mv_table[mot_xy - mot_stride ][0]; P_TOP[1] = mv_table[mot_xy - mot_stride ][1]; P_TOPRIGHT[0] = mv_table[mot_xy - mot_stride + 1 ][0]; P_TOPRIGHT[1] = mv_table[mot_xy - mot_stride + 1 ][1]; if(P_TOP[1] > (c->ymax<ymax<xmin<xmin< (c->ymax<ymax<pred_x= P_LEFT[0]; c->pred_y= P_LEFT[1]; if(mv_table == s->b_forw_mv_table){ mv_scale= (s->pb_time<<16) / (s->pp_time<pb_time - s->pp_time)<<16) / (s->pp_time<p_mv_table, mv_scale); dmin= sad_hpel_motion_search(s, &mx, &my, dmin, ref_index); mv_table[mot_xy][0]= mx; mv_table[mot_xy][1]= my; return dmin; } /* refine the bidir vectors in hq mode and return the score in both lq & hq mode*/ static int bidir_refine(MpegEncContext * s, int mb_x, int mb_y) { const int mot_stride = s->mb_stride; const int xy = mb_y *mot_stride + mb_x; int fbmin; int pred_fx= s->b_bidir_forw_mv_table[xy-1][0]; int pred_fy= s->b_bidir_forw_mv_table[xy-1][1]; int pred_bx= s->b_bidir_back_mv_table[xy-1][0]; int pred_by= s->b_bidir_back_mv_table[xy-1][1]; int motion_fx= s->b_bidir_forw_mv_table[xy][0]= s->b_forw_mv_table[xy][0]; int motion_fy= s->b_bidir_forw_mv_table[xy][1]= s->b_forw_mv_table[xy][1]; int motion_bx= s->b_bidir_back_mv_table[xy][0]= s->b_back_mv_table[xy][0]; int motion_by= s->b_bidir_back_mv_table[xy][1]= s->b_back_mv_table[xy][1]; /*FIXME do refinement and add flag*/ { /*FIXME optimize?*/ /*FIXME better f_code prediction (max mv & distance)*/ /*FIXME pointers*/ MotionEstContext * const c= &s->me; uint8_t * const mv_penalty= c->mv_penalty[s->f_code] + MAX_MV; /* f_code of the prev frame*/ int stride= c->stride; uint8_t *dest_y = c->scratchpad; uint8_t *ptr; int dxy; int src_x, src_y; uint8_t **src_data= c->src[0]; uint8_t **ref_data= c->ref[0]; uint8_t **ref2_data= c->ref[2]; dxy = ((motion_fy & 1) << 1) | (motion_fx & 1); src_x = motion_fx >> 1; src_y = motion_fy >> 1; ptr = ref_data[0] + (src_y * stride) + src_x; s->dsp.put_pixels_tab[0][dxy](dest_y , ptr , stride, 16); dxy = ((motion_by & 1) << 1) | (motion_bx & 1); src_x = motion_bx >> 1; src_y = motion_by >> 1; ptr = ref2_data[0] + (src_y * stride) + src_x; s->dsp.avg_pixels_tab[0][dxy](dest_y , ptr , stride, 16); fbmin = (mv_penalty[motion_fx-pred_fx] + mv_penalty[motion_fy-pred_fy])* c->mb_penalty_factor + (mv_penalty[motion_bx-pred_bx] + mv_penalty[motion_by-pred_by])* c->mb_penalty_factor + pix_abs16_c(s, src_data[0], dest_y, stride, 16); /*FIXME new_pic*/ } return fbmin; } void ff_estimate_b_frame_motion(MpegEncContext * s, int mb_x, int mb_y) { MotionEstContext * const c= &s->me; const int penalty_factor= c->mb_penalty_factor; int fmin, bmin, dmin, fbmin, bimin, fimin; int type=0; init_ref(init_ref2); get_limits; c->skip=0; dmin= INT_MAX; /*FIXME penalty stuff for non mpeg4*/ c->skip=0; fmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_forw_mv_table, 0, s->f_code) + 3*penalty_factor; c->skip=0; bmin= ff_estimate_motion_b(s, mb_x, mb_y, s->b_back_mv_table, 2, s->b_code) + 2*penalty_factor; c->skip=0; fbmin= bidir_refine(s, mb_x, mb_y) + penalty_factor; fimin= bimin= INT_MAX; { int score= fmin; type = CANDIDATE_MB_TYPE_FORWARD; if (dmin <= score){ score = dmin; type = CANDIDATE_MB_TYPE_DIRECT; } if(bmin>16; c->mc_mb_var_sum_temp += score; s->current_picture.mc_mb_var[mb_y*s->mb_stride + mb_x] = score; /*FIXME use SSE*/ } s->mb_type[mb_y*s->mb_stride + mb_x]= type; } /* find best f_code for ME which do unlimited searches */ int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type) { int score[8]; int i, y; uint8_t * fcode_tab= s->fcode_tab; int best_fcode=-1; int best_score=-10000000; for(i=0; i<8; i++) score[i]= s->mb_num*(8-i); for(y=0; ymb_height; y++){ int x; int xy= y*s->mb_stride; for(x=0; xmb_width; x++){ if(s->mb_type[xy] & type){ int fcode= FFMAX(fcode_tab[mv_table[xy][0] + MAX_MV], fcode_tab[mv_table[xy][1] + MAX_MV]); int j; for(j=0; jpict_type==B_TYPE || s->current_picture.mc_mb_var[xy] < s->current_picture.mb_var[xy]) score[j]-= 170; } } xy++; } } for(i=1; i<8; i++){ if(score[i] > best_score){ best_score= score[i]; best_fcode= i; } } return best_fcode; } /** * * @param truncate 1 for truncation, 0 for using intra */ void ff_fix_long_mvs(MpegEncContext * s, int16_t (*mv_table)[2], int f_code, int type, int truncate) { int y, h_range, v_range; /* RAL: 8 in MPEG-1, 16 in MPEG-4*/ int range = (8 << f_code); h_range= range; v_range= range; /* clip / convert to intra 16x16 type MVs */ for(y=0; ymb_height; y++){ int x; int xy= y*s->mb_stride; for(x=0; xmb_width; x++){ if (s->mb_type[xy] & type){ /* RAL: "type" test added...*/ if( mv_table[xy][0] >=h_range || mv_table[xy][0] <-h_range || mv_table[xy][1] >=v_range || mv_table[xy][1] <-v_range){ if(truncate){ if (mv_table[xy][0] > h_range-1) mv_table[xy][0]= h_range-1; else if(mv_table[xy][0] < -h_range ) mv_table[xy][0]= -h_range; if (mv_table[xy][1] > v_range-1) mv_table[xy][1]= v_range-1; else if(mv_table[xy][1] < -v_range ) mv_table[xy][1]= -v_range; }else{ s->mb_type[xy] &= ~type; s->mb_type[xy] |= CANDIDATE_MB_TYPE_INTRA; mv_table[xy][0]= mv_table[xy][1]= 0; } } } xy++; } } } yorick-mpeg-0.1/libavcodec/mpeg12.c0000644000076500001440000006435711254260253016536 0ustar frigautusers/* * MPEG1 codec / MPEG2 decoder * Copyright (c) 2000,2001 Fabrice Bellard. * Copyright (c) 2002-2004 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** * @file mpeg12.c * MPEG1/2 codec */ /*#define DEBUG*/ #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" const int16_t ff_mpeg1_default_intra_matrix[64] = { 8, 16, 19, 22, 26, 27, 29, 34, 16, 16, 22, 24, 27, 29, 34, 37, 19, 22, 26, 27, 29, 34, 34, 38, 22, 22, 26, 27, 29, 34, 37, 40, 22, 26, 27, 29, 32, 35, 40, 48, 26, 27, 29, 32, 35, 40, 48, 58, 26, 27, 29, 34, 38, 46, 56, 69, 27, 29, 35, 38, 46, 56, 69, 83 }; const int16_t ff_mpeg1_default_non_intra_matrix[64] = { 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, }; static const uint16_t vlc_dc_lum_code[12] = { 0x4, 0x0, 0x1, 0x5, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe, 0x1ff, }; static const unsigned char vlc_dc_lum_bits[12] = { 3, 2, 2, 3, 3, 4, 5, 6, 7, 8, 9, 9, }; const uint16_t vlc_dc_chroma_code[12] = { 0x0, 0x1, 0x2, 0x6, 0xe, 0x1e, 0x3e, 0x7e, 0xfe, 0x1fe, 0x3fe, 0x3ff, }; const unsigned char vlc_dc_chroma_bits[12] = { 2, 2, 2, 3, 4, 5, 6, 7, 8, 9, 10, 10, }; static const uint16_t mpeg1_vlc[113][2] = { { 0x3, 2 }, { 0x4, 4 }, { 0x5, 5 }, { 0x6, 7 }, { 0x26, 8 }, { 0x21, 8 }, { 0xa, 10 }, { 0x1d, 12 }, { 0x18, 12 }, { 0x13, 12 }, { 0x10, 12 }, { 0x1a, 13 }, { 0x19, 13 }, { 0x18, 13 }, { 0x17, 13 }, { 0x1f, 14 }, { 0x1e, 14 }, { 0x1d, 14 }, { 0x1c, 14 }, { 0x1b, 14 }, { 0x1a, 14 }, { 0x19, 14 }, { 0x18, 14 }, { 0x17, 14 }, { 0x16, 14 }, { 0x15, 14 }, { 0x14, 14 }, { 0x13, 14 }, { 0x12, 14 }, { 0x11, 14 }, { 0x10, 14 }, { 0x18, 15 }, { 0x17, 15 }, { 0x16, 15 }, { 0x15, 15 }, { 0x14, 15 }, { 0x13, 15 }, { 0x12, 15 }, { 0x11, 15 }, { 0x10, 15 }, { 0x3, 3 }, { 0x6, 6 }, { 0x25, 8 }, { 0xc, 10 }, { 0x1b, 12 }, { 0x16, 13 }, { 0x15, 13 }, { 0x1f, 15 }, { 0x1e, 15 }, { 0x1d, 15 }, { 0x1c, 15 }, { 0x1b, 15 }, { 0x1a, 15 }, { 0x19, 15 }, { 0x13, 16 }, { 0x12, 16 }, { 0x11, 16 }, { 0x10, 16 }, { 0x5, 4 }, { 0x4, 7 }, { 0xb, 10 }, { 0x14, 12 }, { 0x14, 13 }, { 0x7, 5 }, { 0x24, 8 }, { 0x1c, 12 }, { 0x13, 13 }, { 0x6, 5 }, { 0xf, 10 }, { 0x12, 12 }, { 0x7, 6 }, { 0x9, 10 }, { 0x12, 13 }, { 0x5, 6 }, { 0x1e, 12 }, { 0x14, 16 }, { 0x4, 6 }, { 0x15, 12 }, { 0x7, 7 }, { 0x11, 12 }, { 0x5, 7 }, { 0x11, 13 }, { 0x27, 8 }, { 0x10, 13 }, { 0x23, 8 }, { 0x1a, 16 }, { 0x22, 8 }, { 0x19, 16 }, { 0x20, 8 }, { 0x18, 16 }, { 0xe, 10 }, { 0x17, 16 }, { 0xd, 10 }, { 0x16, 16 }, { 0x8, 10 }, { 0x15, 16 }, { 0x1f, 12 }, { 0x1a, 12 }, { 0x19, 12 }, { 0x17, 12 }, { 0x16, 12 }, { 0x1f, 13 }, { 0x1e, 13 }, { 0x1d, 13 }, { 0x1c, 13 }, { 0x1b, 13 }, { 0x1f, 16 }, { 0x1e, 16 }, { 0x1d, 16 }, { 0x1c, 16 }, { 0x1b, 16 }, { 0x1, 6 }, /* escape */ { 0x2, 2 }, /* EOB */ }; static const int8_t mpeg1_level[111] = { 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 1, 2, 3, 4, 5, 1, 2, 3, 4, 1, 2, 3, 1, 2, 3, 1, 2, 3, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, }; static const int8_t mpeg1_run[111] = { 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3, 4, 4, 4, 5, 5, 5, 6, 6, 6, 7, 7, 8, 8, 9, 9, 10, 10, 11, 11, 12, 12, 13, 13, 14, 14, 15, 15, 16, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, }; static RLTable rl_mpeg1 = { 111, 111, mpeg1_vlc, mpeg1_run, mpeg1_level, }; static const uint8_t mbAddrIncrTable[36][2] = { {0x1, 1}, {0x3, 3}, {0x2, 3}, {0x3, 4}, {0x2, 4}, {0x3, 5}, {0x2, 5}, {0x7, 7}, {0x6, 7}, {0xb, 8}, {0xa, 8}, {0x9, 8}, {0x8, 8}, {0x7, 8}, {0x6, 8}, {0x17, 10}, {0x16, 10}, {0x15, 10}, {0x14, 10}, {0x13, 10}, {0x12, 10}, {0x23, 11}, {0x22, 11}, {0x21, 11}, {0x20, 11}, {0x1f, 11}, {0x1e, 11}, {0x1d, 11}, {0x1c, 11}, {0x1b, 11}, {0x1a, 11}, {0x19, 11}, {0x18, 11}, {0x8, 11}, /* escape */ {0xf, 11}, /* stuffing */ {0x0, 8}, /* end (and 15 more 0 bits should follow) */ }; static const uint8_t mbPatTable[64][2] = { {0x1, 9}, {0xb, 5}, {0x9, 5}, {0xd, 6}, {0xd, 4}, {0x17, 7}, {0x13, 7}, {0x1f, 8}, {0xc, 4}, {0x16, 7}, {0x12, 7}, {0x1e, 8}, {0x13, 5}, {0x1b, 8}, {0x17, 8}, {0x13, 8}, {0xb, 4}, {0x15, 7}, {0x11, 7}, {0x1d, 8}, {0x11, 5}, {0x19, 8}, {0x15, 8}, {0x11, 8}, {0xf, 6}, {0xf, 8}, {0xd, 8}, {0x3, 9}, {0xf, 5}, {0xb, 8}, {0x7, 8}, {0x7, 9}, {0xa, 4}, {0x14, 7}, {0x10, 7}, {0x1c, 8}, {0xe, 6}, {0xe, 8}, {0xc, 8}, {0x2, 9}, {0x10, 5}, {0x18, 8}, {0x14, 8}, {0x10, 8}, {0xe, 5}, {0xa, 8}, {0x6, 8}, {0x6, 9}, {0x12, 5}, {0x1a, 8}, {0x16, 8}, {0x12, 8}, {0xd, 5}, {0x9, 8}, {0x5, 8}, {0x5, 9}, {0xc, 5}, {0x8, 8}, {0x4, 8}, {0x4, 9}, {0x7, 3}, {0xa, 5}, {0x8, 5}, {0xc, 6} }; static const uint8_t mbMotionVectorTable[17][2] = { { 0x1, 1 }, { 0x1, 2 }, { 0x1, 3 }, { 0x1, 4 }, { 0x3, 6 }, { 0x5, 7 }, { 0x4, 7 }, { 0x3, 7 }, { 0xb, 9 }, { 0xa, 9 }, { 0x9, 9 }, { 0x11, 10 }, { 0x10, 10 }, { 0xf, 10 }, { 0xe, 10 }, { 0xd, 10 }, { 0xc, 10 }, }; static const AVRational frame_rate_tab[] = { { 0, 0}, {24000, 1001}, { 24, 1}, { 25, 1}, {30000, 1001}, { 30, 1}, { 50, 1}, {60000, 1001}, { 60, 1}, /* Xing's 15fps: (9)*/ { 15, 1}, /* libmpeg3's "Unofficial economy rates": (10-13)*/ { 5, 1}, { 10, 1}, { 12, 1}, { 15, 1}, { 0, 0}, }; uint8_t ff_mpeg1_dc_scale_table[128]={ /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31*/ 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, 8, }; static const float mpeg1_aspect[16]={ 0.0000, 1.0000, 0.6735, 0.7031, 0.7615, 0.8055, 0.8437, 0.8935, 0.9157, 0.9815, 1.0255, 1.0695, 1.0950, 1.1575, 1.2015, }; /* Start codes. */ #define SEQ_END_CODE 0x000001b7 #define SEQ_START_CODE 0x000001b3 #define GOP_START_CODE 0x000001b8 #define PICTURE_START_CODE 0x00000100 #define SLICE_MIN_START_CODE 0x00000101 #define SLICE_MAX_START_CODE 0x000001af #define EXT_START_CODE 0x000001b5 #define USER_START_CODE 0x000001b2 static void mpeg1_encode_block(MpegEncContext *s, DCTELEM *block, int n); static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code); /* RAL: f_code parameter added*/ static uint8_t (*mv_penalty)[MAX_MV*2+1]= NULL; static uint8_t fcode_tab[MAX_MV*2+1]; static uint32_t uni_mpeg1_ac_vlc_bits[64*64*2]; static uint8_t uni_mpeg1_ac_vlc_len [64*64*2]; /* simple include everything table for dc, first byte is bits number next 3 are code*/ static uint32_t mpeg1_lum_dc_uni[512]; static uint32_t mpeg1_chr_dc_uni[512]; static uint8_t mpeg1_index_run[2][64]; static int8_t mpeg1_max_level[2][64]; static void init_uni_ac_vlc(RLTable *rl, uint32_t *uni_ac_vlc_bits, uint8_t *uni_ac_vlc_len) { int i; for(i=0; i<128; i++){ int level= i-64; int run; for(run=0; run<64; run++){ int len, bits, code; int alevel= ABS(level); int sign= (level>>31)&1; if (alevel > rl->max_level[0][run]) code= 111; /*rl->n*/ else code= rl->index_run[0][run] + alevel - 1; if (code < 111 /* rl->n */) { /* store the vlc & sign at once */ len= mpeg1_vlc[code][1]+1; bits= (mpeg1_vlc[code][0]<<1) + sign; } else { len= mpeg1_vlc[111/*rl->n*/][1]+6; bits= mpeg1_vlc[111/*rl->n*/][0]<<6; bits|= run; if (alevel < 128) { bits<<=8; len+=8; bits|= level & 0xff; } else { bits<<=16; len+=16; bits|= level & 0xff; if (level < 0) { bits|= 0x8001 + level + 255; } else { bits|= level & 0xffff; } } } uni_ac_vlc_bits[UNI_AC_ENC_INDEX(run, i)]= bits; uni_ac_vlc_len [UNI_AC_ENC_INDEX(run, i)]= len; } } } static int find_frame_rate_index(MpegEncContext *s) { int i; int64_t dmin= INT64_MAX; int64_t d; for(i=1;i<14;i++) { int64_t n0= int64_t_C(1001)/frame_rate_tab[i].den * frame_rate_tab[i].num * s->avctx->frame_rate_base; int64_t n1= int64_t_C(1001) * s->avctx->frame_rate; d = ABS(n0 - n1); if(d < dmin){ dmin=d; s->frame_rate_index= i; } } if(dmin) return -1; else return 0; } static int encode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; if(MPV_encode_init(avctx) < 0) return -1; if(find_frame_rate_index(s) < 0){ av_log(avctx, AV_LOG_ERROR, "MPEG1/2 doesnt support %d/%d fps\n", avctx->frame_rate, avctx->frame_rate_base); return -1; } return 0; } static void put_header(MpegEncContext *s, int header) { align_put_bits(&s->pb); put_bits(&s->pb, 16, header>>16); put_bits(&s->pb, 16, header&0xFFFF); } /* put sequence header if needed */ static void mpeg1_encode_sequence_header(MpegEncContext *s) { unsigned int vbv_buffer_size; unsigned int fps, v; int i; uint64_t time_code; float best_aspect_error= 1E10; float aspect_ratio= av_q2d(s->avctx->sample_aspect_ratio); int constraint_parameter_flag; if(aspect_ratio==0.0) aspect_ratio= 1.0; /*pixel aspect 1:1 (VGA)*/ if (s->current_picture.key_frame) { AVRational framerate= frame_rate_tab[s->frame_rate_index]; int aspect_ratio_info = 1; /* mpeg1 header repeated every gop */ put_header(s, SEQ_START_CODE); put_bits(&s->pb, 12, s->width); put_bits(&s->pb, 12, s->height); for(i=1; i<15; i++){ float error= aspect_ratio; error-= 1.0/mpeg1_aspect[i]; error= ABS(error); if(error < best_aspect_error){ best_aspect_error= error; aspect_ratio_info= i; } } put_bits(&s->pb, 4, aspect_ratio_info); put_bits(&s->pb, 4, s->frame_rate_index); if(s->avctx->rc_max_rate){ v = (s->avctx->rc_max_rate + 399) / 400; if (v > 0x3ffff) v = 0x3ffff; }else{ v= 0x3FFFF; } if(s->avctx->rc_buffer_size) vbv_buffer_size = s->avctx->rc_buffer_size; else /* VBV calculation: Scaled so that a VCD has the proper VBV size of 40 kilobytes */ vbv_buffer_size = (( 20 * s->bit_rate) / (1151929 / 2)) * 8 * 1024; vbv_buffer_size= (vbv_buffer_size + 16383) / 16384; put_bits(&s->pb, 18, v & 0x3FFFF); put_bits(&s->pb, 1, 1); /* marker */ put_bits(&s->pb, 10, vbv_buffer_size & 0x3FF); constraint_parameter_flag= s->width <= 768 && s->height <= 576 && s->mb_width * s->mb_height <= 396 && s->mb_width * s->mb_height * framerate.num <= framerate.den*396*25 && framerate.num <= framerate.den*30 && vbv_buffer_size <= 20 && v <= 1856000/400; put_bits(&s->pb, 1, constraint_parameter_flag); ff_write_quant_matrix(&s->pb, s->avctx->intra_matrix); ff_write_quant_matrix(&s->pb, s->avctx->inter_matrix); put_header(s, GOP_START_CODE); put_bits(&s->pb, 1, 0); /* do drop frame */ /* time code : we must convert from the real frame rate to a fake mpeg frame rate in case of low frame rate */ fps = (framerate.num + framerate.den/2)/ framerate.den; time_code = s->current_picture_ptr->coded_picture_number; s->gop_picture_number = time_code; put_bits(&s->pb, 5, (uint32_t)((time_code / (fps * 3600)) % 24)); put_bits(&s->pb, 6, (uint32_t)((time_code / (fps * 60)) % 60)); put_bits(&s->pb, 1, 1); put_bits(&s->pb, 6, (uint32_t)((time_code / fps) % 60)); put_bits(&s->pb, 6, (uint32_t)((time_code % fps))); put_bits(&s->pb, 1, 0); put_bits(&s->pb, 1, 0); /* broken link */ } } #define encode_mb_skip_run(run) \ while (run >= 33) {\ put_bits(&s->pb, 11, 0x008);\ run -= 33;\ }\ put_bits(&s->pb, mbAddrIncrTable[run][1], mbAddrIncrTable[run][0]) void ff_mpeg1_encode_slice_header(MpegEncContext *s) { put_header(s, SLICE_MIN_START_CODE + s->mb_y); put_bits(&s->pb, 5, s->qscale); /* quantizer scale */ put_bits(&s->pb, 1, 0); /* slice extra information */ } void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number) { mpeg1_encode_sequence_header(s); /* mpeg1 picture header */ put_header(s, PICTURE_START_CODE); /* temporal reference */ /* RAL: s->picture_number instead of s->fake_picture_number*/ put_bits(&s->pb, 10, (s->picture_number - s->gop_picture_number) & 0x3ff); put_bits(&s->pb, 3, s->pict_type); s->vbv_delay_ptr= s->pb.buf + put_bits_count(&s->pb)/8; put_bits(&s->pb, 16, 0xFFFF); /* vbv_delay */ /* RAL: Forward f_code also needed for B frames*/ if (s->pict_type == P_TYPE || s->pict_type == B_TYPE) { put_bits(&s->pb, 1, 0); /* half pel coordinates */ put_bits(&s->pb, 3, s->f_code); /* forward_f_code */ } /* RAL: Backward f_code necessary for B frames*/ if (s->pict_type == B_TYPE) { put_bits(&s->pb, 1, 0); /* half pel coordinates */ put_bits(&s->pb, 3, s->b_code); /* backward_f_code */ } put_bits(&s->pb, 1, 0); /* extra bit picture */ s->mb_y=0; ff_mpeg1_encode_slice_header(s); } #define get_bits_diff(s) (last=(s)->last_bits, ((s)->last_bits=put_bits_count(&(s)->pb)-last)) void mpeg1_encode_mb(MpegEncContext *s, DCTELEM block[6][64], int motion_x, int motion_y) { int i, cbp, last; const int mb_x = s->mb_x; const int mb_y = s->mb_y; const int first_mb= mb_x == s->resync_mb_x && mb_y == s->resync_mb_y; /* compute cbp */ cbp = 0; for(i=0;i<6;i++) { if (s->block_last_index[i] >= 0) cbp |= 1 << (5 - i); } if (cbp == 0 && !first_mb && (mb_x != s->mb_width - 1 || (mb_y != s->mb_height - 1)) && ((s->pict_type == P_TYPE && (motion_x | motion_y) == 0) || (s->pict_type == B_TYPE && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) | ((s->mv_dir & MV_DIR_BACKWARD) ? ((s->mv[1][0][0] - s->last_mv[1][0][0])|(s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) { s->mb_skip_run++; s->qscale -= s->dquant; s->skip_count++; s->misc_bits++; s->last_bits++; if(s->pict_type == P_TYPE){ s->last_mv[0][1][0]= s->last_mv[0][0][0]= s->last_mv[0][1][1]= s->last_mv[0][0][1]= 0; } } else { if(first_mb){ assert(s->mb_skip_run == 0); encode_mb_skip_run(s->mb_x); }else{ encode_mb_skip_run(s->mb_skip_run); } if (s->pict_type == I_TYPE) { if(s->dquant && cbp){ put_bits(&s->pb, 2, 1); /* macroblock_type : macroblock_quant = 1 */ put_bits(&s->pb, 5, s->qscale); }else{ put_bits(&s->pb, 1, 1); /* macroblock_type : macroblock_quant = 0 */ s->qscale -= s->dquant; } s->misc_bits+= get_bits_diff(s); s->i_count++; } else if (s->mb_intra) { if(s->dquant && cbp){ put_bits(&s->pb, 6, 0x01); put_bits(&s->pb, 5, s->qscale); }else{ put_bits(&s->pb, 5, 0x03); s->qscale -= s->dquant; } s->misc_bits+= get_bits_diff(s); s->i_count++; memset(s->last_mv, 0, sizeof(s->last_mv)); } else if (s->pict_type == P_TYPE) { if (cbp != 0) { if ((motion_x|motion_y) == 0) { if(s->dquant){ put_bits(&s->pb, 5, 1); /* macroblock_pattern & quant */ put_bits(&s->pb, 5, s->qscale); }else{ put_bits(&s->pb, 2, 1); /* macroblock_pattern only */ } s->misc_bits+= get_bits_diff(s); } else { if(s->dquant){ put_bits(&s->pb, 5, 2); /* motion + cbp */ put_bits(&s->pb, 5, s->qscale); }else{ put_bits(&s->pb, 1, 1); /* motion + cbp */ } s->misc_bits+= get_bits_diff(s); mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); /* RAL: f_code parameter added*/ mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); /* RAL: f_code parameter added*/ s->mv_bits+= get_bits_diff(s); } } else { put_bits(&s->pb, 3, 1); /* motion only */ s->misc_bits+= get_bits_diff(s); mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); /* RAL: f_code parameter added*/ mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); /* RAL: f_code parameter added*/ s->qscale -= s->dquant; s->mv_bits+= get_bits_diff(s); } s->last_mv[0][1][0]= s->last_mv[0][0][0]= motion_x; s->last_mv[0][1][1]= s->last_mv[0][0][1]= motion_y; if(cbp) put_bits(&s->pb, mbPatTable[cbp][1], mbPatTable[cbp][0]); s->f_count++; } else{ static const int mb_type_len[4]={0,3,4,2}; /*bak,for,bi*/ if (cbp){ /* With coded bloc pattern*/ if (s->dquant) { if(s->mv_dir == MV_DIR_FORWARD) put_bits(&s->pb, 6, 3); else put_bits(&s->pb, mb_type_len[s->mv_dir]+3, 2); put_bits(&s->pb, 5, s->qscale); } else { put_bits(&s->pb, mb_type_len[s->mv_dir], 3); } }else{ /* No coded bloc pattern*/ put_bits(&s->pb, mb_type_len[s->mv_dir], 2); s->qscale -= s->dquant; } s->misc_bits += get_bits_diff(s); if (s->mv_dir&MV_DIR_FORWARD){ mpeg1_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code); mpeg1_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code); s->last_mv[0][0][0]=s->last_mv[0][1][0]= s->mv[0][0][0]; s->last_mv[0][0][1]=s->last_mv[0][1][1]= s->mv[0][0][1]; s->f_count++; } if (s->mv_dir&MV_DIR_BACKWARD){ mpeg1_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code); mpeg1_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code); s->last_mv[1][0][0]=s->last_mv[1][1][0]= s->mv[1][0][0]; s->last_mv[1][0][1]=s->last_mv[1][1][1]= s->mv[1][0][1]; s->b_count++; } s->mv_bits += get_bits_diff(s); if(cbp) put_bits(&s->pb, mbPatTable[cbp][1], mbPatTable[cbp][0]); } for(i=0;i<6;i++) { if (cbp & (1 << (5 - i))) { mpeg1_encode_block(s, block[i], i); } } s->mb_skip_run = 0; if(s->mb_intra) s->i_tex_bits+= get_bits_diff(s); else s->p_tex_bits+= get_bits_diff(s); } } /* RAL: Parameter added: f_or_b_code*/ static void mpeg1_encode_motion(MpegEncContext *s, int val, int f_or_b_code) { int code, bit_size, l, bits, range, sign; if (val == 0) { /* zero vector */ code = 0; put_bits(&s->pb, mbMotionVectorTable[0][1], mbMotionVectorTable[0][0]); } else { bit_size = f_or_b_code - 1; range = 1 << bit_size; /* modulo encoding */ l= INT_BIT - 5 - bit_size; val= (val<>l; if (val >= 0) { val--; code = (val >> bit_size) + 1; bits = val & (range - 1); sign = 0; } else { val = -val; val--; code = (val >> bit_size) + 1; bits = val & (range - 1); sign = 1; } assert(code > 0 && code <= 16); put_bits(&s->pb, mbMotionVectorTable[code][1], mbMotionVectorTable[code][0]); put_bits(&s->pb, 1, sign); if (bit_size > 0) { put_bits(&s->pb, bit_size, bits); } } } void ff_mpeg1_encode_init(MpegEncContext *s) { static int done=0; if(!done){ int f_code; int mv; int i; done=1; init_rl(&rl_mpeg1); for(i=0; i<64; i++) { mpeg1_max_level[0][i]= rl_mpeg1.max_level[0][i]; mpeg1_index_run[0][i]= rl_mpeg1.index_run[0][i]; } init_uni_ac_vlc(&rl_mpeg1, uni_mpeg1_ac_vlc_bits, uni_mpeg1_ac_vlc_len); /* build unified dc encoding tables */ for(i=-255; i<256; i++) { int adiff, index; int bits, code; int diff=i; adiff = ABS(diff); if(diff<0) diff--; index = av_log2(2*adiff); bits= vlc_dc_lum_bits[index] + index; code= (vlc_dc_lum_code[index]<> bit_size) + 1; if(code<17){ len= mbMotionVectorTable[code][1] + 1 + bit_size; }else{ len= mbMotionVectorTable[16][1] + 2 + bit_size; } } mv_penalty[f_code][mv+MAX_MV]= len; } } for(f_code=MAX_FCODE; f_code>0; f_code--){ for(mv=-(8<me.mv_penalty= mv_penalty; s->fcode_tab= fcode_tab; s->min_qcoeff=-255; s->max_qcoeff= 255; s->intra_ac_vlc_length= s->inter_ac_vlc_length= s->intra_ac_vlc_last_length= s->inter_ac_vlc_last_length= uni_mpeg1_ac_vlc_len; } static void mpeg1_encode_block(MpegEncContext *s, DCTELEM *block, int n) { int alevel, level, last_non_zero, dc, diff, i, j, run, last_index, sign; int code, component; last_index = s->block_last_index[n]; /* DC coef */ if (s->mb_intra) { component = (n <= 3 ? 0 : n - 4 + 1); dc = block[0]; /* overflow is impossible */ diff = dc - s->last_dc[component]; if(((unsigned) (diff+255)) >= 511){ int index; if(diff<0){ index= av_log2_16bit(-2*diff); diff--; }else{ index= av_log2_16bit(2*diff); } if (component == 0) { put_bits(&s->pb, vlc_dc_lum_bits[index] + index, (vlc_dc_lum_code[index]<pb, vlc_dc_chroma_bits[index] + index, (vlc_dc_chroma_code[index]<pb, mpeg1_lum_dc_uni[diff+255]&0xFF, mpeg1_lum_dc_uni[diff+255]>>8); } else { put_bits(&s->pb, mpeg1_chr_dc_uni[diff+255]&0xFF, mpeg1_chr_dc_uni[diff+255]>>8); } } s->last_dc[component] = dc; i = 1; } else { /* encode the first coefficient : needs to be done here because it is handled slightly differently */ level = block[0]; if (abs(level) == 1) { code = ((uint32_t)level >> 31); /* the sign bit */ put_bits(&s->pb, 2, code | 0x02); i = 1; } else { i = 0; last_non_zero = -1; goto next_coef; } } /* now quantify & encode AC coefs */ last_non_zero = i - 1; for(;i<=last_index;i++) { j = s->intra_scantable.permutated[i]; level = block[j]; next_coef: if (level != 0) { run = i - last_non_zero - 1; alevel= level; sign= alevel>>31; alevel= (alevel^sign)-sign; sign&=1; if (alevel <= mpeg1_max_level[0][run]){ code= mpeg1_index_run[0][run] + alevel - 1; /* store the vlc & sign at once */ put_bits(&s->pb, mpeg1_vlc[code][1]+1, (mpeg1_vlc[code][0]<<1) + sign); } else { /* escape seems to be pretty rare <5% so i dont optimize it */ put_bits(&s->pb, mpeg1_vlc[111/*rl->n*/][1], mpeg1_vlc[111/*rl->n*/][0]); /* escape: only clip in this case */ put_bits(&s->pb, 6, run); if (alevel < 128) { put_bits(&s->pb, 8, level & 0xff); } else { if (level < 0) { put_bits(&s->pb, 16, 0x8001 + level + 255); } else { put_bits(&s->pb, 16, level & 0xffff); } } } last_non_zero = i; } } /* end of block */ put_bits(&s->pb, 2, 0x2); } AVCodec mpeg1video_encoder = { "mpeg1video", CODEC_TYPE_VIDEO, CODEC_ID_MPEG1VIDEO, sizeof(MpegEncContext), encode_init, MPV_encode_picture, MPV_encode_end, 0, CODEC_CAP_DELAY, 0, 0, frame_rate_tab+1, 0 }; yorick-mpeg-0.1/libavcodec/mpegvideo.c0000644000076500001440000016536111254260253017417 0ustar frigautusers/* * The simplest mpeg encoder (well, it was the simplest!) * Copyright (c) 2000,2001 Fabrice Bellard. * Copyright (c) 2002-2004 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA * * 4MV & hq & b-frame encoding stuff by Michael Niedermayer */ /** * @file mpegvideo.c * The simplest mpeg encoder (well, it was the simplest!). */ #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" #include "simple_idct.h" #include static void encode_picture(MpegEncContext *s, int picture_number); static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, DCTELEM *block, int n, int qscale); static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, DCTELEM *block, int n, int qscale); static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow); static const uint8_t ff_default_chroma_qscale_table[32]={ /* 0 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31*/ 0, 1, 2, 3, 4, 5, 6, 7, 8, 9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29,30,31 }; static uint8_t (*default_mv_penalty)[MAX_MV*2+1]=NULL; static uint8_t default_fcode_tab[MAX_MV*2+1]; static void convert_matrix(DSPContext *dsp, int (*qmat)[64], uint16_t (*qmat16)[2][64], const uint16_t *quant_matrix, int bias, int qmin, int qmax) { int qscale; for(qscale=qmin; qscale<=qmax; qscale++){ int i; for(i=0;i<64;i++) { /* We can safely suppose that 16 <= quant_matrix[i] <= 255 So 16 <= qscale * quant_matrix[i] <= 7905 so (1<<19) / 16 >= (1<<19) / (qscale * quant_matrix[i]) >= (1<<19) / 7905 so 32768 >= (1<<19) / (qscale * quant_matrix[i]) >= 67 */ qmat[qscale][i] = (int)((uint64_t_C(1) << QMAT_SHIFT) / (qscale * quant_matrix[i])); qmat16[qscale][0][i] = (1 << QMAT_SHIFT_MMX) / (qscale * quant_matrix[i]); if(qmat16[qscale][0][i]==0 || qmat16[qscale][0][i]==128*256) qmat16[qscale][0][i]=128*256-1; qmat16[qscale][1][i]= ROUNDED_DIV(bias<<(16-QUANT_BIAS_SHIFT), qmat16[qscale][0][i]); } } } void ff_init_scantable(ScanTable *st, const uint8_t *src_scantable) { int i; int end; st->scantable= src_scantable; for(i=0; i<64; i++){ int j; j = src_scantable[i]; st->permutated[i] = j; /*#ifdef ARCH_POWERPC st->inverse[j] = i; #endif*/ } end=-1; for(i=0; i<64; i++){ int j; j = st->permutated[i]; if(j>end) end=j; st->raster_end[i]= end; } } void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix) { int i; if(matrix){ put_bits(pb, 1, 1); for(i=0;i<64;i++) { put_bits(pb, 8, matrix[ ff_zigzag_direct[i] ]); } }else put_bits(pb, 1, 0); } /* init common dct for both encoder and decoder */ int DCT_common_init(MpegEncContext *s) { ff_init_scantable(&s->inter_scantable , ff_zigzag_direct); ff_init_scantable(&s->intra_scantable , ff_zigzag_direct); ff_init_scantable(&s->intra_h_scantable, ff_alternate_horizontal_scan); ff_init_scantable(&s->intra_v_scantable, ff_alternate_vertical_scan); return 0; } static void copy_picture(Picture *dst, Picture *src) { *dst = *src; dst->type= FF_BUFFER_TYPE_COPY; } static void copy_picture_attributes(MpegEncContext *s, AVFrame *dst, AVFrame *src) { dst->pict_type = src->pict_type; dst->quality = src->quality; dst->coded_picture_number = src->coded_picture_number; dst->display_picture_number = src->display_picture_number; dst->pts = src->pts; } /** * allocates a Picture * The pixels are allocated/set by calling get_buffer() if shared=0 */ static int alloc_picture(MpegEncContext *s, Picture *pic, int shared) { const int big_mb_num= s->mb_stride*(s->mb_height+1) + 1; /*the +1 is needed so memset(,,stride*height) doesnt sig11*/ const int mb_array_size= s->mb_stride*s->mb_height; const int b8_array_size= s->b8_stride*s->mb_height*2; int i; if(shared){ assert(pic->data[0]); assert(pic->type == 0 || pic->type == FF_BUFFER_TYPE_SHARED); pic->type= FF_BUFFER_TYPE_SHARED; }else{ int r; assert(!pic->data[0]); r= s->avctx->get_buffer(s->avctx, (AVFrame*)pic); if(r<0 || !pic->age || !pic->type || !pic->data[0]){ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (%d %d %d %p)\n", r, pic->age, pic->type, pic->data[0]); return -1; } if(s->linesize && (s->linesize != pic->linesize[0] || s->uvlinesize != pic->linesize[1])){ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (stride changed)\n"); return -1; } if(pic->linesize[1] != pic->linesize[2]){ av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed (uv stride missmatch)\n"); return -1; } s->linesize = pic->linesize[0]; s->uvlinesize= pic->linesize[1]; } CHECKED_ALLOCZ(pic->mb_var , mb_array_size * sizeof(int16_t)) CHECKED_ALLOCZ(pic->mc_mb_var, mb_array_size * sizeof(int16_t)) CHECKED_ALLOCZ(pic->mb_mean , mb_array_size * sizeof(int8_t)) CHECKED_ALLOCZ(pic->mb_type_base , big_mb_num * sizeof(uint32_t)) pic->mb_type= pic->mb_type_base + s->mb_stride+1; for(i=0; i<2; i++){ CHECKED_ALLOCZ(pic->motion_val_base[i], 2 * (b8_array_size+2) * sizeof(int16_t)) pic->motion_val[i]= pic->motion_val_base[i]+2; CHECKED_ALLOCZ(pic->ref_index[i], b8_array_size * sizeof(uint8_t)) } /*it might be nicer if the application would keep track of these but it would require a API change*/ memmove(s->prev_pict_types+1, s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE-1); s->prev_pict_types[0]= s->pict_type; if(pic->age < PREV_PICT_TYPES_BUFFER_SIZE && s->prev_pict_types[pic->age] == B_TYPE) pic->age= INT_MAX; /* skiped MBs in b frames are quite rare in mpeg1/2 and its a bit tricky to skip them anyway*/ return 0; fail: /*for the CHECKED_ALLOCZ macro*/ return -1; } /** * deallocates a picture */ static void free_picture(MpegEncContext *s, Picture *pic) { int i; if(pic->data[0] && pic->type!=FF_BUFFER_TYPE_SHARED){ s->avctx->release_buffer(s->avctx, (AVFrame*)pic); } av_freep(&pic->mb_var); av_freep(&pic->mc_mb_var); av_freep(&pic->mb_mean); av_freep(&pic->mb_type_base); pic->mb_type= NULL; for(i=0; i<2; i++){ av_freep(&pic->motion_val_base[i]); av_freep(&pic->ref_index[i]); } if(pic->type == FF_BUFFER_TYPE_SHARED){ for(i=0; i<4; i++){ pic->base[i]= pic->data[i]= NULL; } pic->type= 0; } } static int init_duplicate_context(MpegEncContext *s, MpegEncContext *base) { /* edge emu needs blocksize + filter length - 1 */ CHECKED_ALLOCZ(s->allocated_edge_emu_buffer, (s->width+64)*2*17*2); /*(width + edge + align)*interlaced*MBsize*tolerance*/ s->edge_emu_buffer= s->allocated_edge_emu_buffer + (s->width+64)*2*17; /*FIXME should be linesize instead of s->width*2 but that isnt known before get_buffer()*/ CHECKED_ALLOCZ(s->me.scratchpad, (s->width+64)*4*16*2*sizeof(uint8_t)) s->rd_scratchpad= s->me.scratchpad; s->b_scratchpad= s->me.scratchpad; CHECKED_ALLOCZ(s->me.map , ME_MAP_SIZE*sizeof(uint32_t)) CHECKED_ALLOCZ(s->me.score_map, ME_MAP_SIZE*sizeof(uint32_t)) CHECKED_ALLOCZ(s->blocks, 64*12*2 * sizeof(DCTELEM)) s->block= s->blocks[0]; return 0; fail: return -1; /*free() through MPV_common_end()*/ } static void free_duplicate_context(MpegEncContext *s){ if(s==NULL) return; av_freep(&s->allocated_edge_emu_buffer); s->edge_emu_buffer= NULL; av_freep(&s->me.scratchpad); s->rd_scratchpad= s->b_scratchpad= NULL; av_freep(&s->me.map); av_freep(&s->me.score_map); av_freep(&s->blocks); s->block= NULL; } /** * sets the given MpegEncContext to common defaults (same for encoding and decoding). * the changed fields will not depend upon the prior state of the MpegEncContext. */ static void MPV_common_defaults(MpegEncContext *s) { s->y_dc_scale_table= s->c_dc_scale_table= ff_mpeg1_dc_scale_table; s->chroma_qscale_table= ff_default_chroma_qscale_table; s->coded_picture_number = 0; s->picture_number = 0; s->input_picture_number = 0; s->picture_in_gop_number = 0; s->f_code = 1; s->b_code = 1; } /** * sets the given MpegEncContext to defaults for encoding. * the changed fields will not depend upon the prior state of the MpegEncContext. */ static void MPV_encode_defaults(MpegEncContext *s){ static int done=0; MPV_common_defaults(s); if(!done){ int i; done=1; default_mv_penalty= av_mallocz( sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1) ); memset(default_mv_penalty, 0, sizeof(uint8_t)*(MAX_FCODE+1)*(2*MAX_MV+1)); memset(default_fcode_tab , 0, sizeof(uint8_t)*(2*MAX_MV+1)); for(i=-16; i<16; i++){ default_fcode_tab[i + MAX_MV]= 1; } } s->me.mv_penalty= default_mv_penalty; s->fcode_tab= default_fcode_tab; } /** * init common structure for both encoder and decoder. * this assumes that some variables like width/height are already set */ int MPV_common_init(MpegEncContext *s) { int y_size, c_size, yc_size, mb_array_size, mv_table_size, x, y; dsputil_init(&s->dsp, s->avctx); DCT_common_init(s); s->flags= s->avctx->flags; s->flags2= s->avctx->flags2; s->mb_width = (s->width + 15) / 16; s->mb_height = (s->height + 15) / 16; s->mb_stride = s->mb_width + 1; s->b8_stride = s->mb_width*2 + 1; mb_array_size= s->mb_height * s->mb_stride; mv_table_size= (s->mb_height+2) * s->mb_stride + 1; /* set chroma shifts */ avcodec_get_chroma_sub_sample(s->avctx->pix_fmt,&(s->chroma_x_shift), &(s->chroma_y_shift) ); /* set default edge pos, will be overriden in decode_header if needed */ s->h_edge_pos= s->mb_width*16; s->v_edge_pos= s->mb_height*16; s->mb_num = s->mb_width * s->mb_height; s->block_wrap[0]= s->block_wrap[1]= s->block_wrap[2]= s->block_wrap[3]= s->b8_stride; s->block_wrap[4]= s->block_wrap[5]= s->mb_stride; y_size = s->b8_stride * (2 * s->mb_height + 1); c_size = s->mb_stride * (s->mb_height + 1); yc_size = y_size + 2 * c_size; s->avctx->coded_frame= (AVFrame*)&s->current_picture; CHECKED_ALLOCZ(s->mb_index2xy, (s->mb_num+1)*sizeof(int)) /*error ressilience code looks cleaner with this*/ for(y=0; ymb_height; y++){ for(x=0; xmb_width; x++){ s->mb_index2xy[ x + y*s->mb_width ] = x + y*s->mb_stride; } } s->mb_index2xy[ s->mb_height*s->mb_width ] = (s->mb_height-1)*s->mb_stride + s->mb_width; /*FIXME really needed?*/ /* Allocate MV tables */ CHECKED_ALLOCZ(s->p_mv_table_base , mv_table_size * 2 * sizeof(int16_t)) CHECKED_ALLOCZ(s->b_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t)) CHECKED_ALLOCZ(s->b_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t)) CHECKED_ALLOCZ(s->b_bidir_forw_mv_table_base , mv_table_size * 2 * sizeof(int16_t)) CHECKED_ALLOCZ(s->b_bidir_back_mv_table_base , mv_table_size * 2 * sizeof(int16_t)) CHECKED_ALLOCZ(s->b_direct_mv_table_base , mv_table_size * 2 * sizeof(int16_t)) s->p_mv_table = s->p_mv_table_base + s->mb_stride + 1; s->b_forw_mv_table = s->b_forw_mv_table_base + s->mb_stride + 1; s->b_back_mv_table = s->b_back_mv_table_base + s->mb_stride + 1; s->b_bidir_forw_mv_table= s->b_bidir_forw_mv_table_base + s->mb_stride + 1; s->b_bidir_back_mv_table= s->b_bidir_back_mv_table_base + s->mb_stride + 1; s->b_direct_mv_table = s->b_direct_mv_table_base + s->mb_stride + 1; /* Allocate MB type table */ CHECKED_ALLOCZ(s->mb_type , mb_array_size * sizeof(uint16_t)) /*needed for encoding*/ CHECKED_ALLOCZ(s->lambda_table, mb_array_size * sizeof(int)) CHECKED_ALLOCZ(s->q_intra_matrix, 64*32 * sizeof(int)) CHECKED_ALLOCZ(s->q_inter_matrix, 64*32 * sizeof(int)) CHECKED_ALLOCZ(s->q_intra_matrix16, 64*32*2 * sizeof(uint16_t)) CHECKED_ALLOCZ(s->q_inter_matrix16, 64*32*2 * sizeof(uint16_t)) CHECKED_ALLOCZ(s->input_picture, MAX_PICTURE_COUNT * sizeof(Picture*)) CHECKED_ALLOCZ(s->reordered_input_picture, MAX_PICTURE_COUNT * sizeof(Picture*)) CHECKED_ALLOCZ(s->picture, MAX_PICTURE_COUNT * sizeof(Picture)) CHECKED_ALLOCZ(s->prev_pict_types, PREV_PICT_TYPES_BUFFER_SIZE); s->parse_context.state= -1; s->context_initialized = 1; s->thread_context[0]= s; if(init_duplicate_context(s->thread_context[0], s) < 0) goto fail; s->thread_context[0]->start_mb_y= 0; s->thread_context[0]->end_mb_y = s->mb_height; return 0; fail: MPV_common_end(s); return -1; } /* init common structure for both encoder and decoder */ void MPV_common_end(MpegEncContext *s) { int i, j, k; free_duplicate_context(s->thread_context[0]); av_freep(&s->parse_context.buffer); s->parse_context.buffer_size=0; av_freep(&s->mb_type); av_freep(&s->p_mv_table_base); av_freep(&s->b_forw_mv_table_base); av_freep(&s->b_back_mv_table_base); av_freep(&s->b_bidir_forw_mv_table_base); av_freep(&s->b_bidir_back_mv_table_base); av_freep(&s->b_direct_mv_table_base); s->p_mv_table= NULL; s->b_forw_mv_table= NULL; s->b_back_mv_table= NULL; s->b_bidir_forw_mv_table= NULL; s->b_bidir_back_mv_table= NULL; s->b_direct_mv_table= NULL; for(i=0; i<2; i++){ for(j=0; j<2; j++){ for(k=0; k<2; k++){ av_freep(&s->b_field_mv_table_base[i][j][k]); s->b_field_mv_table[i][j][k]=NULL; } av_freep(&s->b_field_select_table[i][j]); av_freep(&s->p_field_mv_table_base[i][j]); s->p_field_mv_table[i][j]=NULL; } av_freep(&s->p_field_select_table[i]); } av_freep(&s->prev_pict_types); av_freep(&s->mb_index2xy); av_freep(&s->lambda_table); av_freep(&s->q_intra_matrix); av_freep(&s->q_inter_matrix); av_freep(&s->q_intra_matrix16); av_freep(&s->q_inter_matrix16); av_freep(&s->input_picture); av_freep(&s->reordered_input_picture); if(s->picture){ for(i=0; ipicture[i]); } } av_freep(&s->picture); s->context_initialized = 0; s->last_picture_ptr= s->next_picture_ptr= s->current_picture_ptr= NULL; for(i=0; i<3; i++) av_freep(&s->visualization_buffer[i]); } /* init video encoder */ int MPV_encode_init(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; int i, dummy; int chroma_h_shift, chroma_v_shift; MPV_encode_defaults(s); avctx->pix_fmt = PIX_FMT_YUV420P; /* FIXME*/ s->bit_rate = avctx->bit_rate; s->width = avctx->width; s->height = avctx->height; if(avctx->gop_size > 600){ av_log(avctx, AV_LOG_ERROR, "Warning keyframe interval too large! reducing it ...\n"); avctx->gop_size=600; } s->gop_size = avctx->gop_size; s->avctx = avctx; s->flags= avctx->flags; s->flags2= avctx->flags2; s->max_b_frames= avctx->max_b_frames; s->codec_id= avctx->codec->id; s->intra_dc_precision= avctx->intra_dc_precision; if (s->gop_size <= 1) { s->intra_only = 1; s->gop_size = 12; } else { s->intra_only = 0; } if(avctx->rc_max_rate && !avctx->rc_buffer_size){ av_log(avctx, AV_LOG_ERROR, "a vbv buffer size is needed, for encoding with a maximum bitrate\n"); return -1; } if(avctx->rc_min_rate && avctx->rc_max_rate != avctx->rc_min_rate){ av_log(avctx, AV_LOG_INFO, "Warning min_rate > 0 but min_rate != max_rate isnt recommanded!\n"); } if(avctx->rc_min_rate && avctx->rc_min_rate > avctx->bit_rate){ av_log(avctx, AV_LOG_INFO, "bitrate below min bitrate\n"); return -1; } if(avctx->rc_max_rate && avctx->rc_max_rate < avctx->bit_rate){ av_log(avctx, AV_LOG_INFO, "bitrate above max bitrate\n"); return -1; } if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && int64_t_C(90000) * (avctx->rc_buffer_size-1) > s->avctx->rc_max_rate*int64_t_C(0xFFFF)){ av_log(avctx, AV_LOG_INFO, "Warning vbv_delay will be set to 0xFFFF (=VBR) as the specified vbv buffer is too large for the given bitrate!\n"); } i= ff_gcd(avctx->frame_rate, avctx->frame_rate_base); if(i > 1){ av_log(avctx, AV_LOG_INFO, "removing common factors from framerate\n"); avctx->frame_rate /= i; avctx->frame_rate_base /= i; /* return -1;*/ } { s->intra_quant_bias= 3<<(QUANT_BIAS_SHIFT-3); /*(a + x*3/8)/x*/ s->inter_quant_bias= 0; } if(avctx->intra_quant_bias != FF_DEFAULT_QUANT_BIAS) s->intra_quant_bias= avctx->intra_quant_bias; if(avctx->inter_quant_bias != FF_DEFAULT_QUANT_BIAS) s->inter_quant_bias= avctx->inter_quant_bias; avcodec_get_chroma_sub_sample(avctx->pix_fmt, &chroma_h_shift, &chroma_v_shift); av_reduce(&s->time_increment_resolution, &dummy, s->avctx->frame_rate, s->avctx->frame_rate_base, (1<<16)-1); s->out_format = FMT_MPEG1; avctx->delay= (s->max_b_frames + 1); /* init */ if (MPV_common_init(s) < 0) return -1; ff_mpeg1_encode_init(s); /* init q matrix */ for(i=0;i<64;i++) { s->intra_matrix[i] = ff_mpeg1_default_intra_matrix[i]; s->inter_matrix[i] = ff_mpeg1_default_non_intra_matrix[i]; if(s->avctx->intra_matrix) s->intra_matrix[i] = s->avctx->intra_matrix[i]; if(s->avctx->inter_matrix) s->inter_matrix[i] = s->avctx->inter_matrix[i]; } /* precompute matrix */ /* for mjpeg, we do include qscale in the matrix */ { convert_matrix(&s->dsp, s->q_intra_matrix, s->q_intra_matrix16, s->intra_matrix, s->intra_quant_bias, 1, 31); convert_matrix(&s->dsp, s->q_inter_matrix, s->q_inter_matrix16, s->inter_matrix, s->inter_quant_bias, 1, 31); } if(ff_rate_control_init(s) < 0) return -1; return 0; } int MPV_encode_end(AVCodecContext *avctx) { MpegEncContext *s = avctx->priv_data; ff_rate_control_uninit(s); MPV_common_end(s); return 0; } void init_rl(RLTable *rl) { int8_t max_level[MAX_RUN+1], max_run[MAX_LEVEL+1]; uint8_t index_run[MAX_RUN+1]; int last, run, level, start, end, i; /* compute max_level[], max_run[] and index_run[] */ for(last=0;last<2;last++) { if (last == 0) { start = 0; end = rl->last; } else { start = rl->last; end = rl->n; } memset(max_level, 0, MAX_RUN + 1); memset(max_run, 0, MAX_LEVEL + 1); memset(index_run, rl->n, MAX_RUN + 1); for(i=start;itable_run[i]; level = rl->table_level[i]; if (index_run[run] == rl->n) index_run[run] = i; if (level > max_level[run]) max_level[run] = level; if (run > max_run[level]) max_run[level] = run; } rl->max_level[last] = av_malloc(MAX_RUN + 1); memcpy(rl->max_level[last], max_level, MAX_RUN + 1); rl->max_run[last] = av_malloc(MAX_LEVEL + 1); memcpy(rl->max_run[last], max_run, MAX_LEVEL + 1); rl->index_run[last] = av_malloc(MAX_RUN + 1); memcpy(rl->index_run[last], index_run, MAX_RUN + 1); } } int ff_find_unused_picture(MpegEncContext *s, int shared){ int i; if(shared){ for(i=0; ipicture[i].data[0]==NULL && s->picture[i].type==0) return i; } }else{ for(i=0; ipicture[i].data[0]==NULL && s->picture[i].type!=0) return i; /*FIXME*/ } for(i=0; ipicture[i].data[0]==NULL) return i; } } assert(0); return -1; } /** * generic function for encode/decode called after coding/decoding the header and before a frame is coded/decoded */ int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx) { assert(s->last_picture_ptr==NULL); /* mark&release old frames */ if (s->pict_type != B_TYPE && s->last_picture_ptr && s->last_picture_ptr != s->next_picture_ptr && s->last_picture_ptr->data[0]) { avctx->release_buffer(avctx, (AVFrame*)s->last_picture_ptr); } alloc: s->current_picture_ptr->pict_type= s->pict_type; s->current_picture_ptr->key_frame= s->pict_type == I_TYPE; copy_picture(&s->current_picture, s->current_picture_ptr); { if (s->pict_type != B_TYPE) { s->last_picture_ptr= s->next_picture_ptr; if(!s->dropable) s->next_picture_ptr= s->current_picture_ptr; } /* av_log(s->avctx, AV_LOG_DEBUG, "L%p N%p C%p L%p N%p C%p type:%d drop:%d\n", s->last_picture_ptr, s->next_picture_ptr,s->current_picture_ptr, s->last_picture_ptr ? s->last_picture_ptr->data[0] : NULL, s->next_picture_ptr ? s->next_picture_ptr->data[0] : NULL, s->current_picture_ptr ? s->current_picture_ptr->data[0] : NULL, s->pict_type, s->dropable);*/ if(s->last_picture_ptr) copy_picture(&s->last_picture, s->last_picture_ptr); if(s->next_picture_ptr) copy_picture(&s->next_picture, s->next_picture_ptr); if(s->pict_type != I_TYPE && (s->last_picture_ptr==NULL || s->last_picture_ptr->data[0]==NULL)){ av_log(avctx, AV_LOG_ERROR, "warning: first frame is no keyframe\n"); assert(s->pict_type != B_TYPE); /*these should have been dropped if we dont have a reference*/ goto alloc; } assert(s->pict_type == I_TYPE || (s->last_picture_ptr && s->last_picture_ptr->data[0])); } return 0; } /* generic function for encode/decode called after a frame has been coded/decoded */ void MPV_frame_end(MpegEncContext *s) { int i; /* draw edge for correct motion prediction if outside */ s->last_pict_type = s->pict_type; if(s->pict_type!=B_TYPE){ s->last_non_b_pict_type= s->pict_type; } /* release non refernce frames */ for(i=0; ipicture[i].data[0] && !s->picture[i].reference /*&& s->picture[i].type!=FF_BUFFER_TYPE_SHARED*/){ s->avctx->release_buffer(s->avctx, (AVFrame*)&s->picture[i]); } } } static int get_sae(uint8_t *src, int ref, int stride) { int x,y; int acc=0; for(y=0; y<16; y++){ for(x=0; x<16; x++){ acc+= ABS(src[x+y*stride] - ref); } } return acc; } static int get_intra_count(MpegEncContext *s, uint8_t *src, uint8_t *ref, int stride) { int x, y, w, h; int acc=0; w= s->width &~15; h= s->height&~15; for(y=0; ydsp.pix_sum(src + offset, stride) + 128)>>8; int sae = get_sae(src + offset, mean, stride); acc+= sae + 500 < sad; } } return acc; } static int load_input_picture(MpegEncContext *s, AVFrame *pic_arg) { AVFrame *pic=NULL; int i; const int encoding_delay= s->max_b_frames; int direct=1; if(pic_arg){ if(encoding_delay) direct=0; if(pic_arg->linesize[0] != s->linesize) direct=0; if(pic_arg->linesize[1] != s->uvlinesize) direct=0; if(pic_arg->linesize[2] != s->uvlinesize) direct=0; if(direct){ i= ff_find_unused_picture(s, 1); pic= (AVFrame*)&s->picture[i]; pic->reference= 3; for(i=0; i<4; i++){ pic->data[i]= pic_arg->data[i]; pic->linesize[i]= pic_arg->linesize[i]; } alloc_picture(s, (Picture*)pic, 1); }else{ int offset= 16; i= ff_find_unused_picture(s, 0); pic= (AVFrame*)&s->picture[i]; pic->reference= 3; alloc_picture(s, (Picture*)pic, 0); if( pic->data[0] + offset == pic_arg->data[0] && pic->data[1] + offset == pic_arg->data[1] && pic->data[2] + offset == pic_arg->data[2]){ /* empty*/ }else{ int h_chroma_shift, v_chroma_shift; avcodec_get_chroma_sub_sample(s->avctx->pix_fmt, &h_chroma_shift, &v_chroma_shift); for(i=0; i<3; i++){ int src_stride= pic_arg->linesize[i]; int dst_stride= i ? s->uvlinesize : s->linesize; int h_shift= i ? h_chroma_shift : 0; int v_shift= i ? v_chroma_shift : 0; int w= s->width >>h_shift; int h= s->height>>v_shift; uint8_t *src= pic_arg->data[i]; uint8_t *dst= pic->data[i] + offset; if(src_stride==dst_stride) memcpy(dst, src, src_stride*h); else{ while(h--){ memcpy(dst, src, w); dst += dst_stride; src += src_stride; } } } } } copy_picture_attributes(s, pic, pic_arg); pic->display_picture_number= s->input_picture_number++; if(pic->pts != AV_NOPTS_VALUE){ s->user_specified_pts= pic->pts; }else{ if(s->user_specified_pts){ pic->pts= s->user_specified_pts + AV_TIME_BASE*(int64_t)s->avctx->frame_rate_base / s->avctx->frame_rate; av_log(s->avctx, AV_LOG_INFO, "Warning: AVFrame.pts=? trying to guess (%Ld)\n", pic->pts); }else{ pic->pts= av_rescale(pic->display_picture_number*(int64_t)s->avctx->frame_rate_base, AV_TIME_BASE, s->avctx->frame_rate); } } } /* shift buffer entries */ for(i=1; iencoding_delay+1*/; i++) s->input_picture[i-1]= s->input_picture[i]; s->input_picture[encoding_delay]= (Picture*)pic; return 0; } static void select_input_picture(MpegEncContext *s) { int i; for(i=1; ireordered_input_picture[i-1]= s->reordered_input_picture[i]; s->reordered_input_picture[MAX_PICTURE_COUNT-1]= NULL; /* set next picture types & ordering */ if(s->reordered_input_picture[0]==NULL && s->input_picture[0]){ if(/*s->picture_in_gop_number >= s->gop_size ||*/ s->next_picture_ptr==NULL || s->intra_only){ s->reordered_input_picture[0]= s->input_picture[0]; s->reordered_input_picture[0]->pict_type= I_TYPE; s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++; }else{ int b_frames; if(s->input_picture[0]->pict_type){ /* user selected pict_type */ for(b_frames=0; b_framesmax_b_frames+1; b_frames++){ if(s->input_picture[b_frames]->pict_type!=B_TYPE) break; } if(b_frames > s->max_b_frames){ av_log(s->avctx, AV_LOG_ERROR, "warning, too many bframes in a row\n"); b_frames = s->max_b_frames; } }else if(s->avctx->b_frame_strategy==0){ b_frames= s->max_b_frames; while(b_frames && !s->input_picture[b_frames]) b_frames--; }else if(s->avctx->b_frame_strategy==1){ for(i=1; imax_b_frames+1; i++){ if(s->input_picture[i] && s->input_picture[i]->b_frame_score==0){ s->input_picture[i]->b_frame_score= get_intra_count(s, s->input_picture[i ]->data[0], s->input_picture[i-1]->data[0], s->linesize) + 1; } } for(i=0; imax_b_frames; i++){ if(s->input_picture[i]==NULL || s->input_picture[i]->b_frame_score - 1 > s->mb_num/40) break; } b_frames= FFMAX(0, i-1); /* reset scores */ for(i=0; iinput_picture[i]->b_frame_score=0; } }else{ av_log(s->avctx, AV_LOG_ERROR, "illegal b frame strategy\n"); b_frames=0; } if(s->picture_in_gop_number + b_frames >= s->gop_size){ s->input_picture[b_frames]->pict_type= I_TYPE; } s->reordered_input_picture[0]= s->input_picture[b_frames]; if(s->reordered_input_picture[0]->pict_type != I_TYPE) s->reordered_input_picture[0]->pict_type= P_TYPE; s->reordered_input_picture[0]->coded_picture_number= s->coded_picture_number++; for(i=0; ireordered_input_picture[i+1]= s->input_picture[i]; s->reordered_input_picture[i+1]->pict_type= B_TYPE; s->reordered_input_picture[i+1]->coded_picture_number= s->coded_picture_number++; } } } if(s->reordered_input_picture[0]){ s->reordered_input_picture[0]->reference= s->reordered_input_picture[0]->pict_type!=B_TYPE ? 3 : 0; copy_picture(&s->new_picture, s->reordered_input_picture[0]); if(s->reordered_input_picture[0]->type == FF_BUFFER_TYPE_SHARED){ /* input is a shared pix, so we cant modifiy it -> alloc a new one & ensure that the shared one is reuseable*/ int i= ff_find_unused_picture(s, 0); Picture *pic= &s->picture[i]; /* mark us unused / free shared pic */ for(i=0; i<4; i++) s->reordered_input_picture[0]->data[i]= NULL; s->reordered_input_picture[0]->type= 0; pic->reference = s->reordered_input_picture[0]->reference; alloc_picture(s, pic, 0); copy_picture_attributes(s, (AVFrame*)pic, (AVFrame*)s->reordered_input_picture[0]); s->current_picture_ptr= pic; }else{ /* input is not a shared pix -> reuse buffer for current_pix*/ assert( s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_USER || s->reordered_input_picture[0]->type==FF_BUFFER_TYPE_INTERNAL); s->current_picture_ptr= s->reordered_input_picture[0]; for(i=0; i<4; i++){ s->new_picture.data[i]+=16; } } copy_picture(&s->current_picture, s->current_picture_ptr); s->picture_number= s->new_picture.display_picture_number; }else{ memset(&s->new_picture, 0, sizeof(Picture)); } } #ifdef ALT_BITSTREAM_WRITER # define init_put_bits(s, buffer, buffer_size)\ (s)->buf = (buffer);\ (s)->buf_end = (s)->buf + (buffer_size);\ (s)->index=0;\ ((uint32_t*)((s)->buf))[0]=0 #else # define init_put_bits(s, buffer, buffer_size)\ (s)->buf = (buffer);\ (s)->buf_end = (s)->buf + (buffer_size);\ (s)->buf_ptr = (s)->buf;\ (s)->bit_left=32;\ (s)->bit_buf=0 #endif /* pad the end of the output stream with zeros */ #ifdef ALT_BITSTREAM_WRITER # define flush_put_bits(s) align_put_bits(s) #else # define flush_put_bits(s)\ (s)->bit_buf<<= (s)->bit_left;\ while ((s)->bit_left < 32) {\ /* XXX: should test end of buffer */\ *(s)->buf_ptr++=(s)->bit_buf >> 24;\ (s)->bit_buf<<=8;\ (s)->bit_left+=8;\ }\ (s)->bit_left=32;\ (s)->bit_buf=0 #endif int MPV_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data) { MpegEncContext *s = avctx->priv_data; AVFrame *pic_arg = data; int stuffing_count; int start_y= s->thread_context[0]->start_mb_y; int end_y= s->thread_context[0]-> end_mb_y; int h= s->mb_height; uint8_t *start= buf + buf_size*start_y/h; uint8_t *end = buf + buf_size* end_y/h; init_put_bits(&s->thread_context[0]->pb, start, end - start); s->picture_in_gop_number++; load_input_picture(s, pic_arg); select_input_picture(s); /* output? */ if(s->new_picture.data[0]){ s->pict_type= s->new_picture.pict_type; MPV_frame_start(s, avctx); encode_picture(s, s->picture_number); avctx->real_pict_num = s->picture_number; avctx->header_bits = s->header_bits; avctx->mv_bits = s->mv_bits; avctx->misc_bits = s->misc_bits; avctx->i_tex_bits = s->i_tex_bits; avctx->p_tex_bits = s->p_tex_bits; avctx->i_count = s->i_count; avctx->p_count = s->mb_num - s->i_count - s->skip_count; /*FIXME f/b_count in avctx*/ avctx->skip_count = s->skip_count; MPV_frame_end(s); flush_put_bits(&s->pb); s->frame_bits = put_bits_count(&s->pb); stuffing_count= ff_vbv_update(s, s->frame_bits); if(stuffing_count){ while(stuffing_count--){ put_bits(&s->pb, 8, 0); } flush_put_bits(&s->pb); s->frame_bits = put_bits_count(&s->pb); } /* update mpeg1/2 vbv_delay for CBR */ if(s->avctx->rc_max_rate && s->avctx->rc_min_rate == s->avctx->rc_max_rate && int64_t_C(90000)*(avctx->rc_buffer_size-1) <= s->avctx->rc_max_rate*int64_t_C(0xFFFF)){ int vbv_delay; vbv_delay= lrintf(90000 * s->rc_context.buffer_index / s->avctx->rc_max_rate); assert(vbv_delay < 0xFFFF); s->vbv_delay_ptr[0] &= 0xF8; s->vbv_delay_ptr[0] |= vbv_delay>>13; s->vbv_delay_ptr[1] = vbv_delay>>5; s->vbv_delay_ptr[2] &= 0x07; s->vbv_delay_ptr[2] |= vbv_delay<<3; } s->total_bits += s->frame_bits; avctx->frame_bits = s->frame_bits; }else{ assert((pbBufPtr(&s->pb) == s->pb.buf)); s->frame_bits=0; } assert((s->frame_bits&7)==0); return s->frame_bits/8; } /** * Copies a rectangular area of samples to a temporary buffer and replicates the boarder samples. * @param buf destination buffer * @param src source buffer * @param linesize number of bytes between 2 vertically adjacent samples in both the source and destination buffers * @param block_w width of block * @param block_h height of block * @param src_x x coordinate of the top left sample of the block in the source buffer * @param src_y y coordinate of the top left sample of the block in the source buffer * @param w width of the source buffer * @param h height of the source buffer */ void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h, int src_x, int src_y, int w, int h) { int x, y; int start_y, start_x, end_y, end_x; if(src_y>= h){ src+= (h-1-src_y)*linesize; src_y=h-1; }else if(src_y<=-block_h){ src+= (1-block_h-src_y)*linesize; src_y=1-block_h; } if(src_x>= w){ src+= (w-1-src_x); src_x=w-1; }else if(src_x<=-block_w){ src+= (1-block_w-src_x); src_x=1-block_w; } start_y= FFMAX(0, -src_y); start_x= FFMAX(0, -src_x); end_y= FFMIN(block_h, h-src_y); end_x= FFMIN(block_w, w-src_x); /* copy existing part*/ for(y=start_y; yv_edge_pos; linesize = s->current_picture.linesize[0]; uvlinesize = s->current_picture.linesize[1]; dxy = ((motion_y & 1) << 1) | (motion_x & 1); src_x = s->mb_x* 16 + (motion_x >> 1); src_y =(s->mb_y<<4) + (motion_y >> 1); if(s->chroma_y_shift){ mx = motion_x / 2; my = motion_y / 2; uvdxy = ((my & 1) << 1) | (mx & 1); uvsrc_x = s->mb_x* 8 + (mx >> 1); uvsrc_y = (s->mb_y<<3) + (my >> 1); } else { if(s->chroma_x_shift){ /*Chroma422*/ mx = motion_x / 2; uvdxy = ((motion_y & 1) << 1) | (mx & 1); uvsrc_x = s->mb_x* 8 + (mx >> 1); uvsrc_y = src_y; } else { /*Chroma444*/ uvdxy = dxy; uvsrc_x = src_x; uvsrc_y = src_y; } } ptr_y = ref_picture[0] + src_y * linesize + src_x; ptr_cb = ref_picture[1] + uvsrc_y * uvlinesize + uvsrc_x; ptr_cr = ref_picture[2] + uvsrc_y * uvlinesize + uvsrc_x; if( (unsigned)src_x > s->h_edge_pos - (motion_x&1) - 16 || (unsigned)src_y > v_edge_pos - (motion_y&1) - 16){ av_log(s->avctx,AV_LOG_DEBUG,"MPEG motion vector out of boundary\n"); return; } pix_op[0][dxy](dest_y, ptr_y, linesize, 16); pix_op[s->chroma_x_shift][uvdxy](dest_cb, ptr_cb, uvlinesize, 16 >> s->chroma_y_shift); pix_op[s->chroma_x_shift][uvdxy](dest_cr, ptr_cr, uvlinesize, 16 >> s->chroma_y_shift); } /* put block[] to dest[] */ #define put_dct(i, dest, line_size, qscale) \ dct_unquantize_mpeg1_intra_c(s, block[i], (i), (qscale));\ simple_idct_put((dest), (line_size), block[i]) #define add_dequant_dct(i, dest, line_size, qscale) \ if (s->block_last_index[i] >= 0) {\ dct_unquantize_mpeg1_inter_c(s, block[i], (i), (qscale));\ simple_idct_add((dest), (line_size), block[i]);\ } /* generic function called after a macroblock has been parsed by the decoder or after it has been encoded by the encoder. Important variables used: s->mb_intra : true if intra macroblock s->mv_dir : motion vector direction s->mv : motion vector */ void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]) { /* update DC predictors for P macroblocks */ if (!s->mb_intra) { s->last_dc[0] = s->last_dc[1] = s->last_dc[2] = 128 << s->intra_dc_precision; } if (!(s->intra_only || s->pict_type==B_TYPE)) { /*FIXME precalc*/ uint8_t *dest_y, *dest_cb, *dest_cr; int dct_linesize, dct_offset; const int linesize= s->current_picture.linesize[0]; /*not s->linesize as this woulnd be wrong for field pics*/ const int uvlinesize= s->current_picture.linesize[1]; dct_linesize = linesize; dct_offset = linesize*8; dest_y= s->dest[0]; dest_cb= s->dest[1]; dest_cr= s->dest[2]; if (!s->mb_intra) { /* motion handling */ /* add dct residue */ add_dequant_dct(0, dest_y, dct_linesize, s->qscale); add_dequant_dct(1, dest_y + 8, dct_linesize, s->qscale); add_dequant_dct(2, dest_y + dct_offset, dct_linesize, s->qscale); add_dequant_dct(3, dest_y + dct_offset + 8, dct_linesize, s->qscale); add_dequant_dct(4, dest_cb, uvlinesize, s->chroma_qscale); add_dequant_dct(5, dest_cr, uvlinesize, s->chroma_qscale); } else { /* dct only in intra block */ put_dct(0, dest_y, dct_linesize, s->qscale); put_dct(1, dest_y + 8, dct_linesize, s->qscale); put_dct(2, dest_y + dct_offset, dct_linesize, s->qscale); put_dct(3, dest_y + dct_offset + 8, dct_linesize, s->qscale); put_dct(4, dest_cb, uvlinesize, s->chroma_qscale); put_dct(5, dest_cr, uvlinesize, s->chroma_qscale); } } } void ff_init_block_index(MpegEncContext *s){ /*FIXME maybe rename*/ const int linesize= s->current_picture.linesize[0]; /*not s->linesize as this woulnd be wrong for field pics*/ const int uvlinesize= s->current_picture.linesize[1]; s->block_index[0]= s->b8_stride*(s->mb_y*2 ) - 2 + s->mb_x*2; s->block_index[1]= s->b8_stride*(s->mb_y*2 ) - 1 + s->mb_x*2; s->block_index[2]= s->b8_stride*(s->mb_y*2 + 1) - 2 + s->mb_x*2; s->block_index[3]= s->b8_stride*(s->mb_y*2 + 1) - 1 + s->mb_x*2; s->block_index[4]= s->mb_stride*(s->mb_y + 1) + s->b8_stride*s->mb_height*2 + s->mb_x - 1; s->block_index[5]= s->mb_stride*(s->mb_y + s->mb_height + 2) + s->b8_stride*s->mb_height*2 + s->mb_x - 1; /*block_index is not used by mpeg2, so it is not affected by chroma_format*/ s->dest[0] = s->current_picture.data[0] + (s->mb_x - 1)*16; s->dest[1] = s->current_picture.data[1] + (s->mb_x - 1)*(16 >> s->chroma_x_shift); s->dest[2] = s->current_picture.data[2] + (s->mb_x - 1)*(16 >> s->chroma_x_shift); s->dest[0] += s->mb_y * linesize * 16; s->dest[1] += s->mb_y * uvlinesize * (16 >> s->chroma_y_shift); s->dest[2] += s->mb_y * uvlinesize * (16 >> s->chroma_y_shift); } static void encode_mb(MpegEncContext *s, int motion_x, int motion_y) { const int mb_x= s->mb_x; const int mb_y= s->mb_y; int i; int skip_dct[6]; int dct_offset = s->linesize*8; /*default for progressive frames*/ uint8_t *ptr_y, *ptr_cb, *ptr_cr; int wrap_y, wrap_c; for(i=0; i<6; i++) skip_dct[i]=0; wrap_y = s->linesize; wrap_c = s->uvlinesize; ptr_y = s->new_picture.data[0] + (mb_y * 16 * wrap_y) + mb_x * 16; ptr_cb = s->new_picture.data[1] + (mb_y * 8 * wrap_c) + mb_x * 8; ptr_cr = s->new_picture.data[2] + (mb_y * 8 * wrap_c) + mb_x * 8; if(mb_x*16+16 > s->width || mb_y*16+16 > s->height){ ff_emulated_edge_mc(s->edge_emu_buffer , ptr_y , wrap_y,16,16,mb_x*16,mb_y*16, s->width , s->height); ptr_y= s->edge_emu_buffer; ff_emulated_edge_mc(s->edge_emu_buffer+18*wrap_y , ptr_cb, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); ptr_cb= s->edge_emu_buffer+18*wrap_y; ff_emulated_edge_mc(s->edge_emu_buffer+18*wrap_y+9, ptr_cr, wrap_c, 8, 8, mb_x*8, mb_y*8, s->width>>1, s->height>>1); ptr_cr= s->edge_emu_buffer+18*wrap_y+9; } if (s->mb_intra) { s->dsp.get_pixels(s->block[0], ptr_y , wrap_y); s->dsp.get_pixels(s->block[1], ptr_y + 8, wrap_y); s->dsp.get_pixels(s->block[2], ptr_y + dct_offset , wrap_y); s->dsp.get_pixels(s->block[3], ptr_y + dct_offset + 8, wrap_y); s->dsp.get_pixels(s->block[4], ptr_cb, wrap_c); s->dsp.get_pixels(s->block[5], ptr_cr, wrap_c); }else{ op_pixels_func (*op_pix)[4]; uint8_t *dest_y, *dest_cb, *dest_cr; dest_y = s->dest[0]; dest_cb = s->dest[1]; dest_cr = s->dest[2]; op_pix = s->dsp.put_pixels_tab; if (s->mv_dir & MV_DIR_FORWARD) { mpeg_motion(s, dest_y, dest_cb, dest_cr, s->last_picture.data, op_pix, s->mv[0][0][0], s->mv[0][0][1]); op_pix = s->dsp.avg_pixels_tab; } if (s->mv_dir & MV_DIR_BACKWARD) { mpeg_motion(s, dest_y, dest_cb, dest_cr, s->next_picture.data, op_pix, s->mv[1][0][0], s->mv[1][0][1]); } s->dsp.diff_pixels(s->block[0], ptr_y , dest_y , wrap_y); s->dsp.diff_pixels(s->block[1], ptr_y + 8, dest_y + 8, wrap_y); s->dsp.diff_pixels(s->block[2], ptr_y + dct_offset , dest_y + dct_offset , wrap_y); s->dsp.diff_pixels(s->block[3], ptr_y + dct_offset + 8, dest_y + dct_offset + 8, wrap_y); { s->dsp.diff_pixels(s->block[4], ptr_cb, dest_cb, wrap_c); s->dsp.diff_pixels(s->block[5], ptr_cr, dest_cr, wrap_c); } /* pre quantization */ if(s->current_picture.mc_mb_var[s->mb_stride*mb_y+ mb_x] < 2*s->qscale*s->qscale){ /*FIXME optimize*/ if(pix_abs8_c(NULL, ptr_y , dest_y , wrap_y, 8) < 20*s->qscale) skip_dct[0]= 1; if(pix_abs8_c(NULL, ptr_y + 8, dest_y + 8, wrap_y, 8) < 20*s->qscale) skip_dct[1]= 1; if(pix_abs8_c(NULL, ptr_y +dct_offset , dest_y +dct_offset , wrap_y, 8) < 20*s->qscale) skip_dct[2]= 1; if(pix_abs8_c(NULL, ptr_y +dct_offset+ 8, dest_y +dct_offset+ 8, wrap_y, 8) < 20*s->qscale) skip_dct[3]= 1; if(pix_abs8_c(NULL, ptr_cb , dest_cb , wrap_c, 8) < 20*s->qscale) skip_dct[4]= 1; if(pix_abs8_c(NULL, ptr_cr , dest_cr , wrap_c, 8) < 20*s->qscale) skip_dct[5]= 1; } } /* DCT & quantize */ assert(s->qscale==8); for(i=0;i<6;i++) { if(!skip_dct[i]){ int overflow; s->block_last_index[i] = dct_quantize_c(s, s->block[i], i, s->qscale, &overflow); /* FIXME we could decide to change to quantizer instead of clipping*/ /* JS: I don't think that would be a good idea it could lower quality instead*/ /* of improve it. Just INTRADC clipping deserves changes in quantizer*/ if (overflow) { int icc; DCTELEM *block = s->block[i]; int last_index = s->block_last_index[i]; const int maxlevel= s->max_qcoeff; const int minlevel= s->min_qcoeff; overflow=0; if(s->mb_intra){ icc=1; /*skip clipping of intra dc*/ }else icc=0; for(;icc<=last_index; icc++){ const int j= s->intra_scantable.permutated[icc]; int level = block[j]; if (level>maxlevel){ level=maxlevel; overflow++; }else if(levelavctx, AV_LOG_INFO, "warning, cliping %d dct coefficents to %d..%d\n", overflow, minlevel, maxlevel); } }else s->block_last_index[i]= -1; } /* huffman encode */ mpeg1_encode_mb(s, s->block, motion_x, motion_y); } void ff_mpeg_flush(AVCodecContext *avctx) { int i; MpegEncContext *s = avctx->priv_data; if(s==NULL || s->picture==NULL) return; for(i=0; ipicture[i].data[0] && ( s->picture[i].type == FF_BUFFER_TYPE_INTERNAL || s->picture[i].type == FF_BUFFER_TYPE_USER)) avctx->release_buffer(avctx, (AVFrame*)&s->picture[i]); } s->current_picture_ptr = s->last_picture_ptr = s->next_picture_ptr = NULL; s->parse_context.state= -1; s->parse_context.frame_start_found= 0; s->parse_context.overread= 0; s->parse_context.overread_index= 0; s->parse_context.index= 0; s->parse_context.last_index= 0; } void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length) { const uint16_t *srcw= (uint16_t*)src; int words= length>>4; int bits= length&15; int i; if(length==0) return; if(words < 16){ for(i=0; i>(16-bits)); } static int sse(MpegEncContext *s, uint8_t *src1, uint8_t *src2, int w, int h, int stride) { uint32_t *sq = squareTbl + 256; int acc=0; int x,y; if(w==16 && h==16) return sse16_c(NULL, src1, src2, stride, 16); else if(w==8 && h==8) return sse8_c(NULL, src1, src2, stride, 8); for(y=0; y=0); return acc; } static int estimate_motion_thread(AVCodecContext *c, void *arg) { MpegEncContext *s= arg; s->first_slice_line=1; for(s->mb_y= s->start_mb_y; s->mb_y < s->end_mb_y; s->mb_y++) { s->mb_x=0; /*for block init below*/ ff_init_block_index(s); for(s->mb_x=0; s->mb_x < s->mb_width; s->mb_x++) { s->block_index[0]+=2; s->block_index[1]+=2; s->block_index[2]+=2; s->block_index[3]+=2; /* compute motion vector & mb_type and store in context */ if(s->pict_type==B_TYPE) ff_estimate_b_frame_motion(s, s->mb_x, s->mb_y); else ff_estimate_p_frame_motion(s, s->mb_x, s->mb_y); } s->first_slice_line=0; } return 0; } static int mb_var_thread(AVCodecContext *c, void *arg) { MpegEncContext *s= arg; int mb_x, mb_y; for(mb_y=s->start_mb_y; mb_y < s->end_mb_y; mb_y++) { for(mb_x=0; mb_x < s->mb_width; mb_x++) { int xx = mb_x * 16; int yy = mb_y * 16; uint8_t *pix = s->new_picture.data[0] + (yy * s->linesize) + xx; int varc; int sum = s->dsp.pix_sum(pix, s->linesize); varc = (s->dsp.pix_norm1(pix, s->linesize) - (((unsigned)(sum*sum))>>8) + 500 + 128)>>8; s->current_picture.mb_var [s->mb_stride * mb_y + mb_x] = varc; s->current_picture.mb_mean[s->mb_stride * mb_y + mb_x] = (sum+128)>>8; s->me.mb_var_sum_temp += varc; } } return 0; } /** * set qscale and update qscale dependant variables. */ void ff_set_qscale(MpegEncContext * s, int qscale) { if (qscale < 1) qscale = 1; else if (qscale > 31) qscale = 31; s->qscale = qscale; s->chroma_qscale= s->chroma_qscale_table[qscale]; s->y_dc_scale= s->y_dc_scale_table[ qscale ]; s->c_dc_scale= s->c_dc_scale_table[ s->chroma_qscale ]; } static int encode_thread(AVCodecContext *c, void *arg) { MpegEncContext *s= arg; int mb_x, mb_y; int i; uint8_t bit_buf[2][3000]; uint8_t bit_buf2[2][3000]; uint8_t bit_buf_tex[2][3000]; PutBitContext pb[2], pb2[2], tex_pb[2]; for(i=0; i<2; i++){ init_put_bits(&pb [i], bit_buf [i], 3000); init_put_bits(&pb2 [i], bit_buf2 [i], 3000); init_put_bits(&tex_pb[i], bit_buf_tex[i], 3000); } s->last_bits= put_bits_count(&s->pb); s->mv_bits=0; s->misc_bits=0; s->i_tex_bits=0; s->p_tex_bits=0; s->i_count=0; s->f_count=0; s->b_count=0; s->skip_count=0; for(i=0; i<3; i++){ /* init last dc values */ /* note: quant matrix value (8) is implied here */ s->last_dc[i] = 128 << s->intra_dc_precision; } s->mb_skip_run = 0; memset(s->last_mv, 0, sizeof(s->last_mv)); s->last_mv_dir = 0; s->resync_mb_x=0; s->resync_mb_y=0; s->first_slice_line = 1; for(mb_y= s->start_mb_y; mb_y < s->end_mb_y; mb_y++) { s->mb_x=0; s->mb_y= mb_y; ff_set_qscale(s, s->qscale); ff_init_block_index(s); for(mb_x=0; mb_x < s->mb_width; mb_x++) { const int xy= mb_y*s->mb_stride + mb_x; int mb_type= s->mb_type[xy]; s->mb_x = mb_x; s->block_index[0]+=2; s->block_index[1]+=2; s->block_index[2]+=2; s->block_index[3]+=2; s->block_index[4]++; s->block_index[5]++; s->dest[0]+= 16; s->dest[1]+= 8; s->dest[2]+= 8; /* write gob / video packet header */ if( (s->resync_mb_x == s->mb_x) && s->resync_mb_y+1 == s->mb_y){ s->first_slice_line=0; } s->dquant=0; /*only for QP_RD*/ { int motion_x, motion_y; /* only one MB-Type possible*/ switch(mb_type){ case CANDIDATE_MB_TYPE_INTRA: s->mv_dir = 0; s->mb_intra= 1; motion_x= s->mv[0][0][0] = 0; motion_y= s->mv[0][0][1] = 0; break; case CANDIDATE_MB_TYPE_INTER: s->mv_dir = MV_DIR_FORWARD; s->mb_intra= 0; motion_x= s->mv[0][0][0] = s->p_mv_table[xy][0]; motion_y= s->mv[0][0][1] = s->p_mv_table[xy][1]; break; case CANDIDATE_MB_TYPE_BIDIR: s->mv_dir = MV_DIR_FORWARD | MV_DIR_BACKWARD; s->mb_intra= 0; motion_x=0; motion_y=0; s->mv[0][0][0] = s->b_bidir_forw_mv_table[xy][0]; s->mv[0][0][1] = s->b_bidir_forw_mv_table[xy][1]; s->mv[1][0][0] = s->b_bidir_back_mv_table[xy][0]; s->mv[1][0][1] = s->b_bidir_back_mv_table[xy][1]; break; case CANDIDATE_MB_TYPE_BACKWARD: s->mv_dir = MV_DIR_BACKWARD; s->mb_intra= 0; motion_x= s->mv[1][0][0] = s->b_back_mv_table[xy][0]; motion_y= s->mv[1][0][1] = s->b_back_mv_table[xy][1]; break; case CANDIDATE_MB_TYPE_FORWARD: s->mv_dir = MV_DIR_FORWARD; s->mb_intra= 0; motion_x= s->mv[0][0][0] = s->b_forw_mv_table[xy][0]; motion_y= s->mv[0][0][1] = s->b_forw_mv_table[xy][1]; /* printf(" %d %d ", motion_x, motion_y);*/ break; default: motion_x=motion_y=0; /*gcc warning fix*/ av_log(s->avctx, AV_LOG_ERROR, "illegal MB type\n"); } encode_mb(s, motion_x, motion_y); /* RAL: Update last macrobloc type*/ s->last_mv_dir = s->mv_dir; MPV_decode_mb(s, s->block); } /* clean the MV table in IPS frames for direct mode in B frames */ if(s->mb_intra /* && I,P_TYPE */){ s->p_mv_table[xy][0]=0; s->p_mv_table[xy][1]=0; } } } align_put_bits(&s->pb); flush_put_bits(&s->pb); return 0; } /* must be called before writing the header */ void ff_set_mpeg4_time(MpegEncContext * s, int picture_number) { assert(s->current_picture_ptr->pts != AV_NOPTS_VALUE); s->time= (s->current_picture_ptr->pts*s->time_increment_resolution + AV_TIME_BASE/2)/AV_TIME_BASE; if(s->pict_type==B_TYPE){ s->pb_time= s->pp_time - (s->last_non_b_time - s->time); }else{ s->pp_time= s->time - s->last_non_b_time; s->last_non_b_time= s->time; } } static void encode_picture(MpegEncContext *s, int picture_number) { int i; int bits; s->picture_number = picture_number; /* Reset the average MB variance */ s->me.mb_var_sum_temp = s->me.mc_mb_var_sum_temp = 0; ff_set_mpeg4_time(s, s->picture_number); /*FIXME rename and use has_b_frames or similar*/ s->me.scene_change_score=0; s->mb_intra=0; /*for the rate distoration & bit compare functions*/ ff_init_me(s); /* Estimate motion for every MB */ if(s->pict_type != I_TYPE){ estimate_motion_thread(s->avctx, s->thread_context[0]); }else /* if(s->pict_type == I_TYPE) */{ /* I-Frame */ for(i=0; imb_stride*s->mb_height; i++) s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA; /* finding spatial complexity for I-frame rate control */ mb_var_thread(s->avctx, s->thread_context[0]); } s->current_picture.mc_mb_var_sum= s->current_picture_ptr->mc_mb_var_sum= s->me.mc_mb_var_sum_temp; s->current_picture. mb_var_sum= s->current_picture_ptr-> mb_var_sum= s->me. mb_var_sum_temp; if(s->me.scene_change_score > 0 && s->pict_type == P_TYPE){ s->pict_type= I_TYPE; for(i=0; imb_stride*s->mb_height; i++) s->mb_type[i]= CANDIDATE_MB_TYPE_INTRA; /*printf("Scene change detected, encoding as I Frame %d %d\n", s->current_picture.mb_var_sum, s->current_picture.mc_mb_var_sum);*/ } if(s->pict_type==P_TYPE) { s->f_code= ff_get_best_fcode(s, s->p_mv_table, CANDIDATE_MB_TYPE_INTER); ff_fix_long_mvs(s, s->p_mv_table, s->f_code, CANDIDATE_MB_TYPE_INTER, 0); } if(s->pict_type==B_TYPE) { int a, b; a = ff_get_best_fcode(s, s->b_forw_mv_table, CANDIDATE_MB_TYPE_FORWARD); b = ff_get_best_fcode(s, s->b_bidir_forw_mv_table, CANDIDATE_MB_TYPE_BIDIR); s->f_code = FFMAX(a, b); a = ff_get_best_fcode(s, s->b_back_mv_table, CANDIDATE_MB_TYPE_BACKWARD); b = ff_get_best_fcode(s, s->b_bidir_back_mv_table, CANDIDATE_MB_TYPE_BIDIR); s->b_code = FFMAX(a, b); ff_fix_long_mvs(s, s->b_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_FORWARD, 1); ff_fix_long_mvs(s, s->b_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BACKWARD, 1); ff_fix_long_mvs(s, s->b_bidir_forw_mv_table, s->f_code, CANDIDATE_MB_TYPE_BIDIR, 1); ff_fix_long_mvs(s, s->b_bidir_back_mv_table, s->b_code, CANDIDATE_MB_TYPE_BIDIR, 1); } s->current_picture.quality = ff_rate_estimate_qscale(s); /*FIXME pic_ptr*/ s->lambda= s->current_picture.quality; s->qscale= (s->lambda*139 + FF_LAMBDA_SCALE*64) >> (FF_LAMBDA_SHIFT + 7); s->qscale= clip(s->qscale, s->avctx->qmin, s->avctx->qmax); s->lambda2= (s->lambda*s->lambda + FF_LAMBDA_SCALE/2) >> FF_LAMBDA_SHIFT; if(s->qscale < 3 && s->max_qcoeff<=128 && s->pict_type==I_TYPE) s->qscale= 3; /*reduce cliping problems*/ /*FIXME var duplication*/ s->current_picture.key_frame= s->pict_type == I_TYPE; /*FIXME pic_ptr*/ s->current_picture.pict_type= s->pict_type; if(s->current_picture.key_frame) s->picture_in_gop_number=0; s->last_bits= put_bits_count(&s->pb); mpeg1_encode_picture_header(s, picture_number); bits= put_bits_count(&s->pb); s->header_bits= bits - s->last_bits; encode_thread(s->avctx, s->thread_context[0]); } static int dct_quantize_c(MpegEncContext *s, DCTELEM *block, int n, int qscale, int *overflow) { int i, j, level, last_non_zero, q, start_i; const int *qmat; const uint8_t *scantable= s->intra_scantable.scantable; int bias; int max=0; unsigned int threshold1, threshold2; ff_jpeg_fdct_islow(block); if (s->mb_intra) { if (n < 4) q = s->y_dc_scale; else q = s->c_dc_scale; q = q << 3; /* note: block[0] is assumed to be positive */ block[0] = (block[0] + (q >> 1)) / q; start_i = 1; last_non_zero = 0; qmat = s->q_intra_matrix[qscale]; bias= s->intra_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT); } else { start_i = 0; last_non_zero = -1; qmat = s->q_inter_matrix[qscale]; bias= s->inter_quant_bias<<(QMAT_SHIFT - QUANT_BIAS_SHIFT); } threshold1= (1<=start_i;i--) { j = scantable[i]; level = block[j] * qmat[j]; if(((unsigned)(level+threshold1))>threshold2){ last_non_zero = i; break; }else{ block[j]=0; } } for(i=start_i; i<=last_non_zero; i++) { j = scantable[i]; level = block[j] * qmat[j]; if(((unsigned)(level+threshold1))>threshold2){ if(level>0){ level= (bias + level)>>QMAT_SHIFT; block[j]= level; }else{ level= (bias - level)>>QMAT_SHIFT; block[j]= -level; } max |=level; }else{ block[j]=0; } } *overflow= s->max_qcoeff < max; /*overflow might have happend*/ return last_non_zero; } static void dct_unquantize_mpeg1_intra_c(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, level, nCoeffs; const uint16_t *quant_matrix; nCoeffs= s->block_last_index[n]; if (n < 4) block[0] = block[0] * s->y_dc_scale; else block[0] = block[0] * s->c_dc_scale; /* XXX: only mpeg1 */ quant_matrix = s->intra_matrix; for(i=1;i<=nCoeffs;i++) { int j= s->intra_scantable.permutated[i]; level = block[j]; if (level) { if (level < 0) { level = -level; level = (int)(level * qscale * quant_matrix[j]) >> 3; level = (level - 1) | 1; level = -level; } else { level = (int)(level * qscale * quant_matrix[j]) >> 3; level = (level - 1) | 1; } block[j] = level; } } } static void dct_unquantize_mpeg1_inter_c(MpegEncContext *s, DCTELEM *block, int n, int qscale) { int i, level, nCoeffs; const uint16_t *quant_matrix; nCoeffs= s->block_last_index[n]; quant_matrix = s->inter_matrix; for(i=0; i<=nCoeffs; i++) { int j= s->intra_scantable.permutated[i]; level = block[j]; if (level) { if (level < 0) { level = -level; level = (((level << 1) + 1) * qscale * ((int) (quant_matrix[j]))) >> 4; level = (level - 1) | 1; level = -level; } else { level = (((level << 1) + 1) * qscale * ((int) (quant_matrix[j]))) >> 4; level = (level - 1) | 1; } block[j] = level; } } } yorick-mpeg-0.1/libavcodec/mpegvideo.h0000644000076500001440000005153611254260253017422 0ustar frigautusers/* * Generic DCT based hybrid video encoder * Copyright (c) 2000, 2001, 2002 Fabrice Bellard. * Copyright (c) 2002-2004 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** * @file mpegvideo.h * mpegvideo header. */ #ifndef AVCODEC_MPEGVIDEO_H #define AVCODEC_MPEGVIDEO_H #include "dsputil.h" #define FRAME_SKIPED 100 /*/< return value for header parsers if frame is not coded*/ enum OutputFormat { FMT_MPEG1 }; #define EDGE_WIDTH 16 #define MPEG_BUF_SIZE (16 * 1024) #define QMAT_SHIFT_MMX 16 #define QMAT_SHIFT 22 #define MAX_FCODE 7 #define MAX_MV 2048 #define MAX_PICTURE_COUNT 15 #define ME_MAP_SIZE 64 #define ME_MAP_SHIFT 3 #define ME_MAP_MV_BITS 11 /* run length table */ #define MAX_RUN 64 #define MAX_LEVEL 64 #define I_TYPE FF_I_TYPE /*/< Intra*/ #define P_TYPE FF_P_TYPE /*/< Predicted*/ #define B_TYPE FF_B_TYPE /*/< Bi-dir predicted*/ typedef struct Predictor{ double coeff; double count; double decay; } Predictor; typedef struct RateControlEntry{ int pict_type; float qscale; int mv_bits; int i_tex_bits; int p_tex_bits; int misc_bits; uint64_t expected_bits; int new_pict_type; float new_qscale; int mc_mb_var_sum; int mb_var_sum; int i_count; int f_code; int b_code; }RateControlEntry; /** * rate control context. */ typedef struct RateControlContext{ FILE *stats_file; int num_entries; /*/< number of RateControlEntries */ RateControlEntry *entry; double buffer_index; /*/< amount of bits in the video/audio buffer */ Predictor pred[5]; double short_term_qsum; /*/< sum of recent qscales */ double short_term_qcount; /*/< count of recent qscales */ double pass1_rc_eq_output_sum;/*/< sum of the output of the rc equation, this is used for normalization */ double pass1_wanted_bits; /*/< bits which should have been outputed by the pass1 code (including complexity init) */ double last_qscale; double last_qscale_for[5]; /*/< last qscale for a specific pict type, used for max_diff & ipb factor stuff */ int last_mc_mb_var_sum; int last_mb_var_sum; uint64_t i_cplx_sum[5]; uint64_t p_cplx_sum[5]; uint64_t mv_bits_sum[5]; uint64_t qscale_sum[5]; int frame_count[5]; int last_non_b_pict_type; }RateControlContext; /** * Scantable. */ typedef struct ScanTable{ const uint8_t *scantable; uint8_t permutated[64]; uint8_t raster_end[64]; /*#ifdef ARCH_POWERPC*/ /** Used by dct_quantise_alitvec to find last-non-zero */ /* uint8_t __align8 inverse[64]; */ /*#endif*/ } ScanTable; /** * Picture. */ typedef struct Picture{ FF_COMMON_FRAME /** * halfpel luma planes. */ uint8_t *interpolated[3]; int16_t (*motion_val_base[2])[2]; uint32_t *mb_type_base; #define MB_TYPE_INTRA MB_TYPE_INTRA4x4 /*default mb_type if theres just one type*/ int mb_var_sum; /*/< sum of MB variance for current frame */ int mc_mb_var_sum; /*/< motion compensated MB variance for current frame */ uint16_t *mb_var; /*/< Table for MB variances */ uint16_t *mc_mb_var; /*/< Table for motion compensated MB variances */ uint8_t *mb_mean; /*/< Table for MB luminance */ int b_frame_score; /* */ } Picture; typedef struct ParseContext{ uint8_t *buffer; int index; int last_index; int buffer_size; uint32_t state; /*/< contains the last few bytes in MSB order*/ int frame_start_found; int overread; /*/< the number of bytes which where irreversibly read from the next frame*/ int overread_index; /*/< the index into ParseContext.buffer of the overreaded bytes*/ } ParseContext; struct MpegEncContext; /** * Motion estimation context. */ typedef struct MotionEstContext{ AVCodecContext *avctx; int skip; /* set if ME is skiped for the current MB */ uint8_t *scratchpad; /* data area for the me algo, so that the ME doesnt need to malloc/free */ uint8_t *best_mb; uint8_t *temp; uint32_t *map; /* map to avoid duplicate evaluations */ uint32_t *score_map; /* map to store the scores */ int map_generation; int penalty_factor; int sub_penalty_factor; int mb_penalty_factor; int flags; int xmin; int xmax; int ymin; int ymax; int pred_x; int pred_y; uint8_t *src[4][4]; uint8_t *ref[4][4]; int stride; int uvstride; /* temp variables for picture complexity calculation */ int mc_mb_var_sum_temp; int mb_var_sum_temp; int scene_change_score; uint8_t (*mv_penalty)[MAX_MV*2+1]; /* bits needed to encode a MV */ uint8_t *current_mv_penalty; }MotionEstContext; /** * MpegEncContext. */ typedef struct MpegEncContext { struct AVCodecContext *avctx; /* the following parameters must be initialized before encoding */ int width, height;/*/< picture size. must be a multiple of 16 */ int gop_size; int intra_only; /*/< if true, only intra pictures are generated */ int bit_rate; /*/< wanted bit rate */ enum OutputFormat out_format; /*/< output format */ int codec_id; /* see CODEC_ID_xxx */ int flags; /*/< AVCodecContext.flags (HQ, MV4, ...) */ int flags2; /*/< AVCodecContext.flags2*/ int max_b_frames; /*/< max number of b-frames for encoding */ /* the following fields are managed internally by the encoder */ /** bit output */ PutBitContext pb; /* sequence parameters */ int context_initialized; int input_picture_number; /*/< used to set pic->display_picture_number, shouldnt be used for/by anything else*/ int coded_picture_number; /*/< used to set pic->coded_picture_number, shouldnt be used for/by anything else*/ int picture_number; /*FIXME remove, unclear definition*/ int picture_in_gop_number; /*/< 0-> first pic in gop, ... */ int b_frames_since_non_b; /*/< used for encoding, relative to not yet reordered input */ int64_t user_specified_pts;/*/< last non zero pts from AVFrame which was passed into avcodec_encode_video()*/ int mb_width, mb_height; /*/< number of MBs horizontally & vertically */ int mb_stride; /*/< mb_width+1 used for some arrays to allow simple addressng of left & top MBs withoutt sig11*/ int b8_stride; /*/< 2*mb_width+1 used for some 8x8 block arrays to allow simple addressng*/ int h_edge_pos, v_edge_pos;/*/< horizontal / vertical position of the right/bottom edge (pixel replicateion)*/ int mb_num; /*/< number of MBs of a picture */ int linesize; /*/< line size, in bytes, may be different from width */ int uvlinesize; /*/< line size, for chroma in bytes, may be different from width */ Picture *picture; /*/< main picture buffer */ Picture **input_picture; /*/< next pictures on display order for encoding*/ Picture **reordered_input_picture; /*/< pointer to the next pictures in codedorder for encoding*/ int start_mb_y; /*/< start mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)*/ int end_mb_y; /*/< end mb_y of this thread (so current thread should process start_mb_y <= row < end_mb_y)*/ struct MpegEncContext *thread_context[1]; /** * copy of the previous picture structure. * note, linesize & data, might not match the previous picture (for field pictures) */ Picture last_picture; /** * copy of the next picture structure. * note, linesize & data, might not match the next picture (for field pictures) */ Picture next_picture; /** * copy of the source picture structure for encoding. * note, linesize & data, might not match the source picture (for field pictures) */ Picture new_picture; /** * copy of the current picture structure. * note, linesize & data, might not match the current picture (for field pictures) */ Picture current_picture; /*/< buffer to store the decompressed current picture */ Picture *last_picture_ptr; /*/< pointer to the previous picture.*/ Picture *next_picture_ptr; /*/< pointer to the next picture (for bidir pred) */ Picture *current_picture_ptr; /*/< pointer to the current picture*/ uint8_t *visualization_buffer[3]; /*< temporary buffer vor MV visualization*/ int last_dc[3]; /*/< last DC values for MPEG1 */ int y_dc_scale, c_dc_scale; uint8_t *y_dc_scale_table; /*/< qscale -> y_dc_scale table */ uint8_t *c_dc_scale_table; /*/< qscale -> c_dc_scale table */ const uint8_t *chroma_qscale_table; /*/< qscale -> chroma_qscale (h263)*/ uint8_t *prev_pict_types; /*/< previous picture types in bitstream order, used for mb skip */ #define PREV_PICT_TYPES_BUFFER_SIZE 256 uint8_t *allocated_edge_emu_buffer; uint8_t *edge_emu_buffer; /*/< points into the middle of allocated_edge_emu_buffer*/ uint8_t *rd_scratchpad; /*/< scartchpad for rate distortion mb decission*/ uint8_t *b_scratchpad; /*/< scratchpad used for writing into write only buffers*/ int qscale; /*/< QP */ int chroma_qscale; /*/< chroma QP */ int lambda; /*/< lagrange multipler used in rate distortion*/ int lambda2; /*/< (lambda*lambda) >> FF_LAMBDA_SHIFT */ int *lambda_table; int dquant; /*/< qscale difference to prev qscale */ int pict_type; /*/< I_TYPE, P_TYPE, B_TYPE, ... */ int last_pict_type; /*FIXME removes*/ int last_non_b_pict_type; /*/< used for mpeg4 gmc b-frames & ratecontrol */ int dropable; int frame_rate_index; int frame_rate_ext_n; /*/< MPEG-2 specific framerate modificators (numerator)*/ int frame_rate_ext_d; /*/< MPEG-2 specific framerate modificators (denominator)*/ int decode; /*/< if 0 then decoding will be skiped (for encoding b frames for example)*/ DSPContext dsp; /*/< pointers for accelerated dsp fucntions */ int f_code; /*/< forward MV resolution */ int b_code; /*/< backward MV resolution for B Frames (mpeg4) */ int16_t (*p_mv_table_base)[2]; int16_t (*b_forw_mv_table_base)[2]; int16_t (*b_back_mv_table_base)[2]; int16_t (*b_bidir_forw_mv_table_base)[2]; int16_t (*b_bidir_back_mv_table_base)[2]; int16_t (*b_direct_mv_table_base)[2]; int16_t (*p_field_mv_table_base[2][2])[2]; int16_t (*b_field_mv_table_base[2][2][2])[2]; int16_t (*p_mv_table)[2]; /*/< MV table (1MV per MB) p-frame encoding */ int16_t (*b_forw_mv_table)[2]; /*/< MV table (1MV per MB) forward mode b-frame encoding */ int16_t (*b_back_mv_table)[2]; /*/< MV table (1MV per MB) backward mode b-frame encoding */ int16_t (*b_bidir_forw_mv_table)[2]; /*/< MV table (1MV per MB) bidir mode b-frame encoding */ int16_t (*b_bidir_back_mv_table)[2]; /*/< MV table (1MV per MB) bidir mode b-frame encoding */ int16_t (*b_direct_mv_table)[2]; /*/< MV table (1MV per MB) direct mode b-frame encoding */ int16_t (*p_field_mv_table[2][2])[2]; /*/< MV table (2MV per MB) interlaced p-frame encoding*/ int16_t (*b_field_mv_table[2][2][2])[2];/*/< MV table (4MV per MB) interlaced b-frame encoding*/ uint8_t (*p_field_select_table[2]); uint8_t (*b_field_select_table[2][2]); int mv_dir; #define MV_DIR_BACKWARD 1 #define MV_DIR_FORWARD 2 #define MV_DIRECT 4 /*/< bidirectional mode where the difference equals the MV of the last P/S/I-Frame (mpeg4)*/ /**motion vectors for a macroblock first coordinate : 0 = forward 1 = backward second " : depend on type third " : 0 = x, 1 = y */ int mv[2][4][2]; int field_select[2][2]; int last_mv[2][2][2]; /*/< last MV, used for MV prediction in MPEG1 & B-frame MPEG4 */ uint8_t *fcode_tab; /*/< smallest fcode needed for each MV */ MotionEstContext me; /* macroblock layer */ int mb_x, mb_y; int mb_skip_run; int mb_intra; uint16_t *mb_type; /*/< Table for candidate MB types for encoding*/ #define CANDIDATE_MB_TYPE_INTRA 0x01 #define CANDIDATE_MB_TYPE_INTER 0x02 #define CANDIDATE_MB_TYPE_INTER4V 0x04 #define CANDIDATE_MB_TYPE_SKIPED 0x08 #define CANDIDATE_MB_TYPE_DIRECT 0x10 #define CANDIDATE_MB_TYPE_FORWARD 0x20 #define CANDIDATE_MB_TYPE_BACKWARD 0x40 #define CANDIDATE_MB_TYPE_BIDIR 0x80 #define CANDIDATE_MB_TYPE_INTER_I 0x100 #define CANDIDATE_MB_TYPE_FORWARD_I 0x200 #define CANDIDATE_MB_TYPE_BACKWARD_I 0x400 #define CANDIDATE_MB_TYPE_BIDIR_I 0x800 int block_index[6]; /*/< index to current MB in block based arrays with edges*/ int block_wrap[6]; uint8_t *dest[3]; int *mb_index2xy; /*/< mb_index -> mb_x + mb_y*mb_stride*/ /** matrix transmitted in the bitstream */ uint16_t intra_matrix[64]; uint16_t chroma_intra_matrix[64]; uint16_t inter_matrix[64]; uint16_t chroma_inter_matrix[64]; #define QUANT_BIAS_SHIFT 8 int intra_quant_bias; /*/< bias for the quantizer */ int inter_quant_bias; /*/< bias for the quantizer */ int min_qcoeff; /*/< minimum encodable coefficient */ int max_qcoeff; /*/< maximum encodable coefficient */ int ac_esc_length; /*/< num of bits needed to encode the longest esc */ uint8_t *intra_ac_vlc_length; uint8_t *intra_ac_vlc_last_length; uint8_t *inter_ac_vlc_length; uint8_t *inter_ac_vlc_last_length; uint8_t *luma_dc_vlc_length; uint8_t *chroma_dc_vlc_length; #define UNI_AC_ENC_INDEX(run,level) ((run)*128 + (level)) int coded_score[6]; /** precomputed matrix (combine qscale and DCT renorm) */ int (*q_intra_matrix)[64]; int (*q_inter_matrix)[64]; /** identical to the above but for MMX & these are not permutated, second 64 entries are bias*/ uint16_t (*q_intra_matrix16)[2][64]; uint16_t (*q_inter_matrix16)[2][64]; int block_last_index[12]; /*/< last non zero coefficient in block*/ /* scantables */ ScanTable __align8 intra_scantable; ScanTable intra_h_scantable; ScanTable intra_v_scantable; ScanTable inter_scantable; /*/< if inter == intra then intra should be used to reduce tha cache usage*/ void *opaque; /*/< private data for the user*/ /* bit rate control */ int64_t wanted_bits; int64_t total_bits; int frame_bits; /*/< bits used for the current frame */ RateControlContext rc_context; /*/< contains stuff only accessed in ratecontrol.c*/ /* statistics, used for 2-pass encoding */ int mv_bits; int header_bits; int i_tex_bits; int p_tex_bits; int i_count; int f_count; int b_count; int skip_count; int misc_bits; /*/< cbp, mb_type*/ int last_bits; /*/< temp var used for calculating the above vars*/ int resync_mb_x; /*/< x position of last resync marker */ int resync_mb_y; /*/< y position of last resync marker */ int mb_num_left; /*/< number of MBs left in this video packet (for partitioned Slices only)*/ int next_p_frame_damaged; /*/< set if the next p frame is damaged, to avoid showing trashed b frames */ ParseContext parse_context; /* mpeg4 specific */ int time_increment_resolution; int64_t time; /* time of current frame */ uint16_t pp_time; /* time distance between the last 2 p,s,i frames */ uint16_t pb_time; /* time distance between the last b and p,s,i frame */ int64_t last_non_b_time; PutBitContext tex_pb; /*/< used for data partitioned VOPs */ PutBitContext pb2; /*/< used for data partitioned VOPs */ int t_frame; /*/< time distance of first I -> B, used for interlaced b frames */ /* lavc specific stuff, used to workaround bugs in libavcodec */ int ffmpeg_version; int lavc_build; /* MSMPEG4 specific */ int first_slice_line; /*/< used in mpeg4 too to handle resync markers */ int inter_intra_pred; int mspel; /* Mpeg1 specific */ int gop_picture_number; /*/< index of the first picture of a GOP based on fake_pic_num & mpeg1 specific */ int last_mv_dir; /*/< last mv_dir, used for b frame encoding */ int broken_link; /*/< no_output_of_prior_pics_flag*/ uint8_t *vbv_delay_ptr; /*/< pointer to vbv_delay in the bitstream */ int intra_dc_precision; int chroma_x_shift;/*depend on pix_format, that depend on chroma_format*/ int chroma_y_shift; int first_slice; DCTELEM (*block)[64]; /*/< points to one of the following blocks */ DCTELEM (*blocks)[6][64]; /* for HQ mode we need to keep the best block*/ } MpegEncContext; int DCT_common_init(MpegEncContext *s); int MPV_common_init(MpegEncContext *s); void MPV_common_end(MpegEncContext *s); void MPV_decode_mb(MpegEncContext *s, DCTELEM block[12][64]); int MPV_frame_start(MpegEncContext *s, AVCodecContext *avctx); void MPV_frame_end(MpegEncContext *s); int MPV_encode_init(AVCodecContext *avctx); int MPV_encode_end(AVCodecContext *avctx); int MPV_encode_picture(AVCodecContext *avctx, unsigned char *buf, int buf_size, void *data); void ff_copy_bits(PutBitContext *pb, uint8_t *src, int length); void ff_init_scantable(ScanTable *st, const uint8_t *src_scantable); void ff_draw_horiz_band(MpegEncContext *s, int y, int h); void ff_emulated_edge_mc(uint8_t *buf, uint8_t *src, int linesize, int block_w, int block_h, int src_x, int src_y, int w, int h); #define END_NOT_FOUND -100 int ff_combine_frame(ParseContext *pc, int next, uint8_t **buf, int *buf_size); void ff_mpeg_flush(AVCodecContext *avctx); void ff_write_quant_matrix(PutBitContext *pb, uint16_t *matrix); int ff_find_unused_picture(MpegEncContext *s, int shared); void ff_er_frame_start(MpegEncContext *s); void ff_er_frame_end(MpegEncContext *s); void ff_er_add_slice(MpegEncContext *s, int startx, int starty, int endx, int endy, int status); void ff_init_block_index(MpegEncContext *s); /* motion_est.c */ void ff_estimate_p_frame_motion(MpegEncContext * s, int mb_x, int mb_y); void ff_estimate_b_frame_motion(MpegEncContext * s, int mb_x, int mb_y); int ff_get_best_fcode(MpegEncContext * s, int16_t (*mv_table)[2], int type); void ff_fix_long_mvs(MpegEncContext * s, int16_t (*mv_table)[2], int f_code, int type, int truncate); void ff_init_me(MpegEncContext *s); /* mpeg12.c */ extern const int16_t ff_mpeg1_default_intra_matrix[64]; extern const int16_t ff_mpeg1_default_non_intra_matrix[64]; extern uint8_t ff_mpeg1_dc_scale_table[128]; void mpeg1_encode_picture_header(MpegEncContext *s, int picture_number); void mpeg1_encode_mb(MpegEncContext *s, DCTELEM block[6][64], int motion_x, int motion_y); void ff_mpeg1_encode_init(MpegEncContext *s); void ff_mpeg1_encode_slice_header(MpegEncContext *s); int ff_mpeg1_find_frame_end(ParseContext *pc, const uint8_t *buf,int buf_size); /** RLTable. */ typedef struct RLTable { int n; /*/< number of entries of table_vlc minus 1 */ int last; /*/< number of values for last = 0 */ const uint16_t (*table_vlc)[2]; const int8_t *table_run; const int8_t *table_level; uint8_t *index_run[2]; /*/< encoding only */ int8_t *max_level[2]; /*/< encoding & decoding */ int8_t *max_run[2]; /*/< encoding & decoding */ } RLTable; void init_rl(RLTable *rl); void ff_set_mpeg4_time(MpegEncContext * s, int picture_number); void ff_set_qscale(MpegEncContext * s, int qscale); /* rate control */ int ff_rate_control_init(MpegEncContext *s); float ff_rate_estimate_qscale(MpegEncContext *s); void ff_write_pass1_stats(MpegEncContext *s); void ff_rate_control_uninit(MpegEncContext *s); double ff_eval(char *s, double *const_value, const char **const_name, double (**func1)(void *, double), const char **func1_name, double (**func2)(void *, double, double), char **func2_name, void *opaque); int ff_vbv_update(MpegEncContext *s, int frame_size); #endif /* AVCODEC_MPEGVIDEO_H */ yorick-mpeg-0.1/libavcodec/ratecontrol.c0000644000076500001440000002773211254260253017773 0ustar frigautusers/* * Rate control for video encoders * * Copyright (c) 2002-2004 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** * @file ratecontrol.c * Rate control for video encoders. */ #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" #include #undef NDEBUG /* allways check asserts, the speed effect is far too small to disable them*/ #include #ifndef M_E #define M_E 2.718281828 #endif static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_factor, int frame_num); int ff_rate_control_init(MpegEncContext *s) { RateControlContext *rcc= &s->rc_context; int i; for(i=0; i<5; i++){ rcc->pred[i].coeff= FF_QP2LAMBDA * 7.0; rcc->pred[i].count= 1.0; rcc->pred[i].decay= 0.4; rcc->i_cplx_sum [i]= rcc->p_cplx_sum [i]= rcc->mv_bits_sum[i]= rcc->qscale_sum [i]= rcc->frame_count[i]= 1; /* 1 is better cuz of 1/0 and such*/ rcc->last_qscale_for[i]=FF_QP2LAMBDA * 5; } rcc->buffer_index= s->avctx->rc_initial_buffer_occupancy; rcc->short_term_qsum=0.001; rcc->short_term_qcount=0.001; rcc->pass1_rc_eq_output_sum= 0.001; rcc->pass1_wanted_bits=0.001; /* init stuff with the user specified complexity */ if(s->avctx->rc_initial_cplx){ for(i=0; i<60*30; i++){ double bits= s->avctx->rc_initial_cplx * (i/10000.0 + 1.0)*s->mb_num; RateControlEntry rce; double q; if (i%((s->gop_size+3)/4)==0) rce.pict_type= I_TYPE; else if(i%(s->max_b_frames+1)) rce.pict_type= B_TYPE; else rce.pict_type= P_TYPE; rce.new_pict_type= rce.pict_type; rce.mc_mb_var_sum= bits*s->mb_num/100000; rce.mb_var_sum = s->mb_num; rce.qscale = FF_QP2LAMBDA * 2; rce.f_code = 2; rce.b_code = 1; rce.misc_bits= 1; if(s->pict_type== I_TYPE){ rce.i_count = s->mb_num; rce.i_tex_bits= bits; rce.p_tex_bits= 0; rce.mv_bits= 0; }else{ rce.i_count = 0; /*FIXME we do know this approx*/ rce.i_tex_bits= 0; rce.p_tex_bits= bits*0.9; rce.mv_bits= bits*0.1; } rcc->i_cplx_sum [rce.pict_type] += rce.i_tex_bits*rce.qscale; rcc->p_cplx_sum [rce.pict_type] += rce.p_tex_bits*rce.qscale; rcc->mv_bits_sum[rce.pict_type] += rce.mv_bits; rcc->frame_count[rce.pict_type] ++; bits= rce.i_tex_bits + rce.p_tex_bits; q= get_qscale(s, &rce, rcc->pass1_wanted_bits/rcc->pass1_rc_eq_output_sum, i); rcc->pass1_wanted_bits+= s->bit_rate/(s->avctx->frame_rate / (double)s->avctx->frame_rate_base); } } return 0; } void ff_rate_control_uninit(MpegEncContext *s) { RateControlContext *rcc= &s->rc_context; av_freep(&rcc->entry); } #define qp2bits(qp) \ (rce->qscale * (double)(rce->i_tex_bits + rce->p_tex_bits+1)/ (qp)) #define bits2qp(bits) \ (rce->qscale * (double)(rce->i_tex_bits + rce->p_tex_bits+1)/ (bits)) int ff_vbv_update(MpegEncContext *s, int frame_size){ RateControlContext *rcc= &s->rc_context; const double fps= (double)s->avctx->frame_rate / (double)s->avctx->frame_rate_base; const int buffer_size= s->avctx->rc_buffer_size; const double min_rate= s->avctx->rc_min_rate/fps; const double max_rate= s->avctx->rc_max_rate/fps; /*printf("%d %f %d %f %f\n", buffer_size, rcc->buffer_index, frame_size, min_rate, max_rate);*/ if(buffer_size){ int left; rcc->buffer_index-= frame_size; if(rcc->buffer_index < 0){ av_log(s->avctx, AV_LOG_ERROR, "rc buffer underflow\n"); rcc->buffer_index= 0; } left= buffer_size - rcc->buffer_index - 1; rcc->buffer_index += clip(left, min_rate, max_rate); if(rcc->buffer_index > buffer_size){ int stuffing= ceil((rcc->buffer_index - buffer_size)/8); rcc->buffer_index -= 8*stuffing; return stuffing; } } return 0; } /** * modifies the bitrate curve from pass1 for one frame */ static double get_qscale(MpegEncContext *s, RateControlEntry *rce, double rate_factor, int frame_num) { RateControlContext *rcc= &s->rc_context; double q, bits; const int pict_type= rce->new_pict_type; /* this is rc_eq="tex^qComp" with qComp=0.5 */ bits = sqrt((rce->i_tex_bits + rce->p_tex_bits)*(double)rce->qscale); rcc->pass1_rc_eq_output_sum+= bits; bits*=rate_factor; if(bits<0.0) bits=0.0; bits+= 1.0; /*avoid 1/0 issues*/ q= bits2qp(bits); /* I/B difference */ if (pict_type==I_TYPE && s->avctx->i_quant_factor<0.0) q= -q*s->avctx->i_quant_factor + s->avctx->i_quant_offset; else if(pict_type==B_TYPE && s->avctx->b_quant_factor<0.0) q= -q*s->avctx->b_quant_factor + s->avctx->b_quant_offset; return q; } static double get_diff_limited_q(MpegEncContext *s, RateControlEntry *rce, double q){ RateControlContext *rcc= &s->rc_context; AVCodecContext *a= s->avctx; const int pict_type= rce->new_pict_type; const double last_p_q = rcc->last_qscale_for[P_TYPE]; const double last_non_b_q= rcc->last_qscale_for[rcc->last_non_b_pict_type]; if (pict_type==I_TYPE && (a->i_quant_factor>0.0 || rcc->last_non_b_pict_type==P_TYPE)) q= last_p_q *ABS(a->i_quant_factor) + a->i_quant_offset; else if(pict_type==B_TYPE && a->b_quant_factor>0.0) q= last_non_b_q* a->b_quant_factor + a->b_quant_offset; /* last qscale / qdiff stuff */ if(rcc->last_non_b_pict_type==pict_type || pict_type!=I_TYPE){ double last_q= rcc->last_qscale_for[pict_type]; const int maxdiff= FF_QP2LAMBDA * a->max_qdiff; if (q > last_q + maxdiff) q= last_q + maxdiff; else if(q < last_q - maxdiff) q= last_q - maxdiff; } rcc->last_qscale_for[pict_type]= q; /*Note we cant do that after blurring*/ if(pict_type!=B_TYPE) rcc->last_non_b_pict_type= pict_type; return q; } /** * gets the qmin & qmax for pict_type */ static void get_qminmax(int *qmin_ret, int *qmax_ret, MpegEncContext *s, int pict_type){ int qmin= s->avctx->lmin; int qmax= s->avctx->lmax; assert(qmin <= qmax); if(pict_type==B_TYPE){ qmin= (int)(qmin*ABS(s->avctx->b_quant_factor)+s->avctx->b_quant_offset + 0.5); qmax= (int)(qmax*ABS(s->avctx->b_quant_factor)+s->avctx->b_quant_offset + 0.5); }else if(pict_type==I_TYPE){ qmin= (int)(qmin*ABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5); qmax= (int)(qmax*ABS(s->avctx->i_quant_factor)+s->avctx->i_quant_offset + 0.5); } qmin= clip(qmin, 1, FF_LAMBDA_MAX); qmax= clip(qmax, 1, FF_LAMBDA_MAX); if(qmaxrc_context; int qmin, qmax; double bits; const int pict_type= rce->new_pict_type; const double buffer_size= s->avctx->rc_buffer_size; const double fps= (double)s->avctx->frame_rate / (double)s->avctx->frame_rate_base; const double min_rate= s->avctx->rc_min_rate / fps; const double max_rate= s->avctx->rc_max_rate / fps; get_qminmax(&qmin, &qmax, s, pict_type); bits= qp2bits(q); /*printf("q:%f\n", q);*/ /* buffer overflow/underflow protection */ if(buffer_size){ double expected_size= rcc->buffer_index; double q_limit; if(min_rate){ double d= 2*(buffer_size - expected_size)/buffer_size; if(d>1.0) d=1.0; else if(d<0.0001) d=0.0001; q*= pow(d, 1.0/s->avctx->rc_buffer_aggressivity); q_limit= bits2qp(FFMAX((min_rate - buffer_size + rcc->buffer_index)*3, 1)); if (q > q_limit) q= q_limit; } if(max_rate){ double d= 2*expected_size/buffer_size; if(d>1.0) d=1.0; else if(d<0.0001) d=0.0001; q/= pow(d, 1.0/s->avctx->rc_buffer_aggressivity); q_limit= bits2qp(FFMAX(rcc->buffer_index/3, 1)); if(q < q_limit) q= q_limit; } } if (qqmax) q=qmax; return q; } /*----------------------------------*/ /* 1 Pass Code*/ static double predict_size(Predictor *p, double q, double var) { return p->coeff*var / (q*p->count); } static void update_predictor(Predictor *p, double q, double var, double size) { double new_coeff= size*q / (var + 1); if(var<10) return; p->count*= p->decay; p->coeff*= p->decay; p->count++; p->coeff+= new_coeff; } float ff_rate_estimate_qscale(MpegEncContext *s) { float q; int qmin, qmax; float br_compensation; double diff; double fps; int picture_number= s->picture_number; int64_t wanted_bits; RateControlContext *rcc= &s->rc_context; AVCodecContext *a= s->avctx; RateControlEntry local_rce, *rce; double bits; double rate_factor; int var; const int pict_type= s->pict_type; Picture * const pic= &s->current_picture; get_qminmax(&qmin, &qmax, s, pict_type); fps= (double)s->avctx->frame_rate / (double)s->avctx->frame_rate_base; /*printf("input_pic_num:%d pic_num:%d frame_rate:%d\n", s->input_picture_number, s->picture_number, s->frame_rate);*/ /* update predictors */ if(picture_number>2){ const int last_var= s->last_pict_type == I_TYPE ? rcc->last_mb_var_sum : rcc->last_mc_mb_var_sum; update_predictor(&rcc->pred[s->last_pict_type], rcc->last_qscale, sqrt(last_var), s->frame_bits); } rce= &local_rce; wanted_bits= (uint64_t)(s->bit_rate*(double)picture_number/fps); diff= s->total_bits - wanted_bits; br_compensation= (a->bit_rate_tolerance - diff)/a->bit_rate_tolerance; if(br_compensation<=0.0) br_compensation=0.001; var= pict_type == I_TYPE ? pic->mb_var_sum : pic->mc_mb_var_sum; rce->pict_type= rce->new_pict_type= pict_type; rce->mc_mb_var_sum= pic->mc_mb_var_sum; rce->mb_var_sum = pic-> mb_var_sum; rce->qscale = FF_QP2LAMBDA * 2; rce->f_code = s->f_code; rce->b_code = s->b_code; rce->misc_bits= 1; bits= predict_size(&rcc->pred[pict_type], rce->qscale, sqrt(var)); if(pict_type== I_TYPE){ rce->i_count = s->mb_num; rce->i_tex_bits= bits; rce->p_tex_bits= 0; rce->mv_bits= 0; }else{ rce->i_count = 0; /*FIXME we do know this approx*/ rce->i_tex_bits= 0; rce->p_tex_bits= bits*0.9; rce->mv_bits= bits*0.1; } rcc->i_cplx_sum [pict_type] += rce->i_tex_bits*rce->qscale; rcc->p_cplx_sum [pict_type] += rce->p_tex_bits*rce->qscale; rcc->mv_bits_sum[pict_type] += rce->mv_bits; rcc->frame_count[pict_type] ++; bits= rce->i_tex_bits + rce->p_tex_bits; rate_factor= rcc->pass1_wanted_bits/rcc->pass1_rc_eq_output_sum * br_compensation; q= get_qscale(s, rce, rate_factor, picture_number); assert(q>0.0); q= get_diff_limited_q(s, rce, q); assert(q>0.0); q= modify_qscale(s, rce, q, picture_number); rcc->pass1_wanted_bits+= s->bit_rate/fps; assert(q>0.0); if (qqmax) q=qmax; q= (int)(q + 0.5); rcc->last_qscale= q; rcc->last_mc_mb_var_sum= pic->mc_mb_var_sum; rcc->last_mb_var_sum= pic->mb_var_sum; return q; } yorick-mpeg-0.1/libavcodec/README0000644000076500001440000000355411254260253016147 0ustar frigautusersThis code began as part of the ffmpeg package. I have left the copyright notices and source file names unchanged, but they bear only a limited resemblance to the original. Mostly, I have ripped out coding that was intended to optimize the original for speed, and to support anything other than encoding MPEG-1 format video, which is to say almost all of ffmpeg is missing. My goals have been completely orthogonal to those of the ffmpeg authors. I have to support yorick on a wide variety of platforms, and I was unable to get ffmpeg to build on many of them. I'm sure if I had been willing to do things like build gcc in order to be able to build ffmpeg, I might have been able to coax it to build on a few more systems. I only have a passing interest in performance; anything within a factor of two of what ffmpeg achieves is easily good enough. However, I need the code to be absolutely portable and stable. The interface to ffmpeg was still changing as of 0.4.9-pre1 enough to require rewriting any code which called it in order to support newer versions (to allow for changes in structs and so on). Eventually, I would like to move away from the ffmpeg avcodec.h API to something much smaller, but I haven't attempted that yet. Originally, I intended to make it so that the caller (ympeg.c in this case) would have the option of linking against the real ffmpeg shared library instead of this stripped version, but even that limited portability is impossible when the avcodec.h API changes incompatibly as new version are released. In its current state, this library builds and works correctly on pretty much the same paltforms where I can get ffmpeg to compile and work, so I haven't really gained anything at all by the exercise, except for independence from changes in ffmpeg on those platforms. In short, this still doesn't work, and needs lots more work. Dave Munro 27/Oct/2007 yorick-mpeg-0.1/libavcodec/simple_idct.c0000644000076500001440000002370611254260253017730 0ustar frigautusers/* * Simple IDCT * * Copyright (c) 2001 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** * @file simple_idct.c * simpleidct in C. */ /* based upon some outcommented c code from mpeg2dec (idct_mmx.c written by Aaron Holtzman ) */ #include "avcodec.h" #include "dsputil.h" #include "simple_idct.h" #if 0 #define W1 2841 /* 2048*sqrt (2)*cos (1*pi/16) */ #define W2 2676 /* 2048*sqrt (2)*cos (2*pi/16) */ #define W3 2408 /* 2048*sqrt (2)*cos (3*pi/16) */ #define W4 2048 /* 2048*sqrt (2)*cos (4*pi/16) */ #define W5 1609 /* 2048*sqrt (2)*cos (5*pi/16) */ #define W6 1108 /* 2048*sqrt (2)*cos (6*pi/16) */ #define W7 565 /* 2048*sqrt (2)*cos (7*pi/16) */ #define ROW_SHIFT 8 #define COL_SHIFT 17 #else #define W1 22725 /*cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5*/ #define W2 21407 /*cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5*/ #define W3 19266 /*cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5*/ #define W4 16383 /*cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5*/ #define W5 12873 /*cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5*/ #define W6 8867 /*cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5*/ #define W7 4520 /*cos(i*M_PI/16)*sqrt(2)*(1<<14) + 0.5*/ #define ROW_SHIFT 11 #define COL_SHIFT 20 /* 6*/ #endif /*#if defined(ARCH_POWERPC_405)*/ #if 0 /* signed 16x16 -> 32 multiply add accumulate */ #define MAC16(rt, ra, rb) \ asm ("maclhw %0, %2, %3" : "=r" (rt) : "0" (rt), "r" (ra), "r" (rb)); /* signed 16x16 -> 32 multiply */ #define MUL16(rt, ra, rb) \ asm ("mullhw %0, %1, %2" : "=r" (rt) : "r" (ra), "r" (rb)); #else /* signed 16x16 -> 32 multiply add accumulate */ #define MAC16(rt, ra, rb) rt += (ra) * (rb) /* signed 16x16 -> 32 multiply */ #define MUL16(rt, ra, rb) rt = (ra) * (rb) #endif static void idctRowCondDC(DCTELEM * row) { int a0, a1, a2, a3, b0, b1, b2, b3; #ifdef FAST_64BIT uint64_t temp; #else uint32_t temp; #endif #ifdef FAST_64BIT # ifdef WORDS_BIGENDIAN # define ROW0_MASK 0xffff000000000000LL # else # define ROW0_MASK 0xffffLL # endif if(sizeof(DCTELEM)==2){ if ( ((((uint64_t *)row)[0] & ~ROW0_MASK) | ((uint64_t *)row)[1]) == 0) { temp = (row[0] << 3) & 0xffff; temp += temp << 16; temp += temp << 32; ((uint64_t *)row)[0] = temp; ((uint64_t *)row)[1] = temp; return; } }else{ if (!(row[1]|row[2]|row[3]|row[4]|row[5]|row[6]|row[7])) { row[0]=row[1]=row[2]=row[3]=row[4]=row[5]=row[6]=row[7]= row[0] << 3; return; } } #else if(sizeof(DCTELEM)==2){ if (!(((uint32_t*)row)[1] | ((uint32_t*)row)[2] | ((uint32_t*)row)[3] | row[1])) { temp = (row[0] << 3) & 0xffff; temp += temp << 16; ((uint32_t*)row)[0]=((uint32_t*)row)[1] = ((uint32_t*)row)[2]=((uint32_t*)row)[3] = temp; return; } }else{ if (!(row[1]|row[2]|row[3]|row[4]|row[5]|row[6]|row[7])) { row[0]=row[1]=row[2]=row[3]=row[4]=row[5]=row[6]=row[7]= row[0] << 3; return; } } #endif a0 = (W4 * row[0]) + (1 << (ROW_SHIFT - 1)); a1 = a0; a2 = a0; a3 = a0; /* no need to optimize : gcc does it */ a0 += W2 * row[2]; a1 += W6 * row[2]; a2 -= W6 * row[2]; a3 -= W2 * row[2]; MUL16(b0, W1, row[1]); MAC16(b0, W3, row[3]); MUL16(b1, W3, row[1]); MAC16(b1, -W7, row[3]); MUL16(b2, W5, row[1]); MAC16(b2, -W1, row[3]); MUL16(b3, W7, row[1]); MAC16(b3, -W5, row[3]); #ifdef FAST_64BIT temp = ((uint64_t*)row)[1]; #else temp = ((uint32_t*)row)[2] | ((uint32_t*)row)[3]; #endif if (temp != 0) { a0 += W4*row[4] + W6*row[6]; a1 += - W4*row[4] - W2*row[6]; a2 += - W4*row[4] + W2*row[6]; a3 += W4*row[4] - W6*row[6]; MAC16(b0, W5, row[5]); MAC16(b0, W7, row[7]); MAC16(b1, -W1, row[5]); MAC16(b1, -W5, row[7]); MAC16(b2, W7, row[5]); MAC16(b2, W3, row[7]); MAC16(b3, W3, row[5]); MAC16(b3, -W1, row[7]); } row[0] = (a0 + b0) >> ROW_SHIFT; row[7] = (a0 - b0) >> ROW_SHIFT; row[1] = (a1 + b1) >> ROW_SHIFT; row[6] = (a1 - b1) >> ROW_SHIFT; row[2] = (a2 + b2) >> ROW_SHIFT; row[5] = (a2 - b2) >> ROW_SHIFT; row[3] = (a3 + b3) >> ROW_SHIFT; row[4] = (a3 - b3) >> ROW_SHIFT; } static void idctSparseColPut(uint8_t *dest, int line_size, DCTELEM * col) { int a0, a1, a2, a3, b0, b1, b2, b3; uint8_t *cm = cropTbl + MAX_NEG_CROP; /* XXX: I did that only to give same values as previous code */ a0 = W4 * (col[8*0] + ((1<<(COL_SHIFT-1))/W4)); a1 = a0; a2 = a0; a3 = a0; a0 += + W2*col[8*2]; a1 += + W6*col[8*2]; a2 += - W6*col[8*2]; a3 += - W2*col[8*2]; MUL16(b0, W1, col[8*1]); MUL16(b1, W3, col[8*1]); MUL16(b2, W5, col[8*1]); MUL16(b3, W7, col[8*1]); MAC16(b0, + W3, col[8*3]); MAC16(b1, - W7, col[8*3]); MAC16(b2, - W1, col[8*3]); MAC16(b3, - W5, col[8*3]); if(col[8*4]){ a0 += + W4*col[8*4]; a1 += - W4*col[8*4]; a2 += - W4*col[8*4]; a3 += + W4*col[8*4]; } if (col[8*5]) { MAC16(b0, + W5, col[8*5]); MAC16(b1, - W1, col[8*5]); MAC16(b2, + W7, col[8*5]); MAC16(b3, + W3, col[8*5]); } if(col[8*6]){ a0 += + W6*col[8*6]; a1 += - W2*col[8*6]; a2 += + W2*col[8*6]; a3 += - W6*col[8*6]; } if (col[8*7]) { MAC16(b0, + W7, col[8*7]); MAC16(b1, - W5, col[8*7]); MAC16(b2, + W3, col[8*7]); MAC16(b3, - W1, col[8*7]); } dest[0] = cm[(a0 + b0) >> COL_SHIFT]; dest += line_size; dest[0] = cm[(a1 + b1) >> COL_SHIFT]; dest += line_size; dest[0] = cm[(a2 + b2) >> COL_SHIFT]; dest += line_size; dest[0] = cm[(a3 + b3) >> COL_SHIFT]; dest += line_size; dest[0] = cm[(a3 - b3) >> COL_SHIFT]; dest += line_size; dest[0] = cm[(a2 - b2) >> COL_SHIFT]; dest += line_size; dest[0] = cm[(a1 - b1) >> COL_SHIFT]; dest += line_size; dest[0] = cm[(a0 - b0) >> COL_SHIFT]; } static void idctSparseColAdd(uint8_t *dest, int line_size, DCTELEM * col) { int a0, a1, a2, a3, b0, b1, b2, b3; uint8_t *cm = cropTbl + MAX_NEG_CROP; /* XXX: I did that only to give same values as previous code */ a0 = W4 * (col[8*0] + ((1<<(COL_SHIFT-1))/W4)); a1 = a0; a2 = a0; a3 = a0; a0 += + W2*col[8*2]; a1 += + W6*col[8*2]; a2 += - W6*col[8*2]; a3 += - W2*col[8*2]; MUL16(b0, W1, col[8*1]); MUL16(b1, W3, col[8*1]); MUL16(b2, W5, col[8*1]); MUL16(b3, W7, col[8*1]); MAC16(b0, + W3, col[8*3]); MAC16(b1, - W7, col[8*3]); MAC16(b2, - W1, col[8*3]); MAC16(b3, - W5, col[8*3]); if(col[8*4]){ a0 += + W4*col[8*4]; a1 += - W4*col[8*4]; a2 += - W4*col[8*4]; a3 += + W4*col[8*4]; } if (col[8*5]) { MAC16(b0, + W5, col[8*5]); MAC16(b1, - W1, col[8*5]); MAC16(b2, + W7, col[8*5]); MAC16(b3, + W3, col[8*5]); } if(col[8*6]){ a0 += + W6*col[8*6]; a1 += - W2*col[8*6]; a2 += + W2*col[8*6]; a3 += - W6*col[8*6]; } if (col[8*7]) { MAC16(b0, + W7, col[8*7]); MAC16(b1, - W5, col[8*7]); MAC16(b2, + W3, col[8*7]); MAC16(b3, - W1, col[8*7]); } dest[0] = cm[(int)dest[0] + ((a0 + b0) >> COL_SHIFT)]; dest += line_size; dest[0] = cm[(int)dest[0] + ((a1 + b1) >> COL_SHIFT)]; dest += line_size; dest[0] = cm[(int)dest[0] + ((a2 + b2) >> COL_SHIFT)]; dest += line_size; dest[0] = cm[(int)dest[0] + ((a3 + b3) >> COL_SHIFT)]; dest += line_size; dest[0] = cm[(int)dest[0] + ((a3 - b3) >> COL_SHIFT)]; dest += line_size; dest[0] = cm[(int)dest[0] + ((a2 - b2) >> COL_SHIFT)]; dest += line_size; dest[0] = cm[(int)dest[0] + ((a1 - b1) >> COL_SHIFT)]; dest += line_size; dest[0] = cm[(int)dest[0] + ((a0 - b0) >> COL_SHIFT)]; } static void idctSparseCol(DCTELEM * col) { int a0, a1, a2, a3, b0, b1, b2, b3; /* XXX: I did that only to give same values as previous code */ a0 = W4 * (col[8*0] + ((1<<(COL_SHIFT-1))/W4)); a1 = a0; a2 = a0; a3 = a0; a0 += + W2*col[8*2]; a1 += + W6*col[8*2]; a2 += - W6*col[8*2]; a3 += - W2*col[8*2]; MUL16(b0, W1, col[8*1]); MUL16(b1, W3, col[8*1]); MUL16(b2, W5, col[8*1]); MUL16(b3, W7, col[8*1]); MAC16(b0, + W3, col[8*3]); MAC16(b1, - W7, col[8*3]); MAC16(b2, - W1, col[8*3]); MAC16(b3, - W5, col[8*3]); if(col[8*4]){ a0 += + W4*col[8*4]; a1 += - W4*col[8*4]; a2 += - W4*col[8*4]; a3 += + W4*col[8*4]; } if (col[8*5]) { MAC16(b0, + W5, col[8*5]); MAC16(b1, - W1, col[8*5]); MAC16(b2, + W7, col[8*5]); MAC16(b3, + W3, col[8*5]); } if(col[8*6]){ a0 += + W6*col[8*6]; a1 += - W2*col[8*6]; a2 += + W2*col[8*6]; a3 += - W6*col[8*6]; } if (col[8*7]) { MAC16(b0, + W7, col[8*7]); MAC16(b1, - W5, col[8*7]); MAC16(b2, + W3, col[8*7]); MAC16(b3, - W1, col[8*7]); } col[0 ] = ((a0 + b0) >> COL_SHIFT); col[8 ] = ((a1 + b1) >> COL_SHIFT); col[16] = ((a2 + b2) >> COL_SHIFT); col[24] = ((a3 + b3) >> COL_SHIFT); col[32] = ((a3 - b3) >> COL_SHIFT); col[40] = ((a2 - b2) >> COL_SHIFT); col[48] = ((a1 - b1) >> COL_SHIFT); col[56] = ((a0 - b0) >> COL_SHIFT); } void simple_idct_put(uint8_t *dest, int line_size, DCTELEM *block) { int i; for(i=0; i<8; i++) idctRowCondDC(block + i*8); for(i=0; i<8; i++) idctSparseColPut(dest + i, line_size, block + i); } void simple_idct_add(uint8_t *dest, int line_size, DCTELEM *block) { int i; for(i=0; i<8; i++) idctRowCondDC(block + i*8); for(i=0; i<8; i++) idctSparseColAdd(dest + i, line_size, block + i); } yorick-mpeg-0.1/libavcodec/simple_idct.h0000644000076500001440000000177011254260253017732 0ustar frigautusers/* * Simple IDCT * * Copyright (c) 2001 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** * @file simple_idct.h * simple idct header. */ void simple_idct_put(uint8_t *dest, int line_size, DCTELEM *block); void simple_idct_add(uint8_t *dest, int line_size, DCTELEM *block); yorick-mpeg-0.1/libavcodec/utils.c0000644000076500001440000002673411254260253016600 0ustar frigautusers/* * utils for libavcodec * Copyright (c) 2001 Fabrice Bellard. * Copyright (c) 2003 Michel Bardiaux for the av_log API * Copyright (c) 2002-2004 Michael Niedermayer * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ /** * @file utils.c * utils. */ #include "avcodec.h" #include "dsputil.h" #include "mpegvideo.h" #include "integer.h" #include #include static void avcodec_default_free_buffers(AVCodecContext *s); void *av_mallocz(unsigned int size) { void *ptr; ptr = av_malloc(size); if (!ptr) return NULL; memset(ptr, 0, size); return ptr; } /** * realloc which does nothing if the block is large enough */ void *av_fast_realloc(void *ptr, unsigned int *size, unsigned int min_size) { if(min_size < *size) return ptr; *size= 17*min_size/16 + 32; return av_realloc(ptr, *size); } /** * Frees memory and sets the pointer to NULL. * @param arg pointer to the pointer which should be freed */ void av_freep(void *arg) { void **ptr= (void**)arg; av_free(*ptr); *ptr = NULL; } /* encoder management */ AVCodec *first_avcodec; void register_avcodec(AVCodec *format) { AVCodec **p; p = &first_avcodec; while (*p != NULL) p = &(*p)->next; *p = format; format->next = NULL; } typedef struct InternalBuffer{ int last_pic_num; uint8_t *base[4]; uint8_t *data[4]; int linesize[4]; }InternalBuffer; #define INTERNAL_BUFFER_SIZE 32 #define ALIGN(x, a) (((x)+(a)-1)&~((a)-1)) void avcodec_align_dimensions(AVCodecContext *s, int *width, int *height){ int w_align= 1; int h_align= 1; switch(s->pix_fmt){ case PIX_FMT_YUV420P: w_align= 16; /*FIXME check for non mpeg style codecs and use less alignment*/ h_align= 16; break; default: w_align= 1; h_align= 1; break; } *width = ALIGN(*width , w_align); *height= ALIGN(*height, h_align); } int avcodec_default_get_buffer(AVCodecContext *s, AVFrame *pic){ int i; int w= s->width; int h= s->height; InternalBuffer *buf; int *picture_number; assert(pic->data[0]==NULL); assert(INTERNAL_BUFFER_SIZE > s->internal_buffer_count); if(s->internal_buffer==NULL){ s->internal_buffer= av_mallocz(INTERNAL_BUFFER_SIZE*sizeof(InternalBuffer)); } buf= &((InternalBuffer*)s->internal_buffer)[s->internal_buffer_count]; picture_number= &(((InternalBuffer*)s->internal_buffer)[INTERNAL_BUFFER_SIZE-1]).last_pic_num; /*FIXME ugly hack*/ (*picture_number)++; if(buf->base[0]){ pic->age= *picture_number - buf->last_pic_num; buf->last_pic_num= *picture_number; }else{ int h_chroma_shift, v_chroma_shift; int s_align, pixel_size; avcodec_get_chroma_sub_sample(s->pix_fmt, &h_chroma_shift, &v_chroma_shift); switch(s->pix_fmt){ case PIX_FMT_YUV422: pixel_size=2; break; case PIX_FMT_RGB24: pixel_size=3; break; default: pixel_size=1; } avcodec_align_dimensions(s, &w, &h); /*#if defined(ARCH_POWERPC)*/ #if 0 s_align= 16; #else s_align= 8; #endif w+= EDGE_WIDTH*2; h+= EDGE_WIDTH*2; buf->last_pic_num= -256*256*256*64; for(i=0; i<3; i++){ const int h_shift= i==0 ? 0 : h_chroma_shift; const int v_shift= i==0 ? 0 : v_chroma_shift; /*FIXME next ensures that linesize= 2^x uvlinesize, thats needed because some MC code assumes it*/ buf->linesize[i]= ALIGN(pixel_size*w>>h_shift, s_align<<(h_chroma_shift-h_shift)); buf->base[i]= av_mallocz((buf->linesize[i]*h>>v_shift)+16); /*FIXME 16*/ if(buf->base[i]==NULL) return -1; memset(buf->base[i], 128, buf->linesize[i]*h>>v_shift); buf->data[i] = buf->base[i] + ALIGN((buf->linesize[i]*EDGE_WIDTH>>v_shift) + (EDGE_WIDTH>>h_shift), s_align); } pic->age= 256*256*256*64; } pic->type= FF_BUFFER_TYPE_INTERNAL; for(i=0; i<4; i++){ pic->base[i]= buf->base[i]; pic->data[i]= buf->data[i]; pic->linesize[i]= buf->linesize[i]; } s->internal_buffer_count++; return 0; } void avcodec_default_release_buffer(AVCodecContext *s, AVFrame *pic){ int i; InternalBuffer *buf, *last, temp; assert(pic->type==FF_BUFFER_TYPE_INTERNAL); assert(s->internal_buffer_count); buf = NULL; /* avoids warning */ for(i=0; iinternal_buffer_count; i++){ /*just 3-5 checks so is not worth to optimize*/ buf= &((InternalBuffer*)s->internal_buffer)[i]; if(buf->data[0] == pic->data[0]) break; } assert(i < s->internal_buffer_count); s->internal_buffer_count--; last = &((InternalBuffer*)s->internal_buffer)[s->internal_buffer_count]; temp= *buf; *buf= *last; *last= temp; for(i=0; i<3; i++){ pic->data[i]=NULL; /* pic->base[i]=NULL;*/ } } static const char* context_to_name(void* ptr) { AVCodecContext *avc= ptr; if(avc && avc->codec && avc->codec->name) return avc->codec->name; else return "NULL"; } static AVClass av_codec_context_class = { "AVCodecContext", context_to_name }; void avcodec_get_context_defaults(AVCodecContext *s) { memset(s, 0, sizeof(AVCodecContext)); s->av_class= &av_codec_context_class; s->bit_rate= 800*1000; s->bit_rate_tolerance= s->bit_rate*10; s->qmin= 2; s->qmax= 31; s->max_qdiff= 3; s->b_quant_factor=1.25; s->b_quant_offset=1.25; s->i_quant_factor=-0.8; s->i_quant_offset=0.0; s->frame_rate_base= 1; s->frame_rate = 25; s->gop_size= 50; s->get_buffer= avcodec_default_get_buffer; s->release_buffer= avcodec_default_release_buffer; s->lmin= FF_QP2LAMBDA * s->qmin; s->lmax= FF_QP2LAMBDA * s->qmax; s->sample_aspect_ratio.num= 0; s->sample_aspect_ratio.den= 1; s->intra_quant_bias= FF_DEFAULT_QUANT_BIAS; s->inter_quant_bias= FF_DEFAULT_QUANT_BIAS; } /** * allocates a AVCodecContext and set it to defaults. * this can be deallocated by simply calling free() */ AVCodecContext *avcodec_alloc_context(void) { AVCodecContext *avctx= av_malloc(sizeof(AVCodecContext)); if(avctx==NULL) return NULL; avcodec_get_context_defaults(avctx); return avctx; } void avcodec_get_frame_defaults(AVFrame *pic) { memset(pic, 0, sizeof(AVFrame)); pic->pts= AV_NOPTS_VALUE; } /** * allocates a AVPFrame and set it to defaults. * this can be deallocated by simply calling free() */ AVFrame *avcodec_alloc_frame(void) { AVFrame *pic= av_malloc(sizeof(AVFrame)); if(pic==NULL) return NULL; avcodec_get_frame_defaults(pic); return pic; } int avcodec_open(AVCodecContext *avctx, AVCodec *codec) { int ret; if(avctx->codec) return -1; avctx->codec = codec; avctx->codec_id = codec->id; avctx->frame_number = 0; if (codec->priv_data_size > 0) { avctx->priv_data = av_mallocz(codec->priv_data_size); if (!avctx->priv_data) return -ENOMEM; } else { avctx->priv_data = NULL; } ret = avctx->codec->init(avctx); if (ret < 0) { av_freep(&avctx->priv_data); return ret; } return 0; } int avcodec_encode_video(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVFrame *pict) { if((avctx->codec->capabilities & CODEC_CAP_DELAY) || pict){ int ret = avctx->codec->encode(avctx, buf, buf_size, (void *)pict); avctx->frame_number++; return ret; }else return 0; } int avcodec_close(AVCodecContext *avctx) { if (avctx->codec->close) avctx->codec->close(avctx); avcodec_default_free_buffers(avctx); av_freep(&avctx->priv_data); avctx->codec = NULL; return 0; } AVCodec *avcodec_find_encoder(enum CodecID id) { AVCodec *p; p = first_avcodec; while (p) { if (p->encode != NULL && p->id == id) return p; p = p->next; } return NULL; } unsigned avcodec_version( void ) { return LIBAVCODEC_VERSION_INT; } /* must be called before any other functions */ void avcodec_init(void) { static int inited = 0; if (inited != 0) return; inited = 1; dsputil_static_init(); } static void avcodec_default_free_buffers(AVCodecContext *s) { int i, j; if(s->internal_buffer==NULL) return; for(i=0; iinternal_buffer)[i]; for(j=0; j<4; j++){ av_freep(&buf->base[j]); buf->data[j]= NULL; } } av_freep(&s->internal_buffer); s->internal_buffer_count=0; } int av_reduce(int *dst_nom, int *dst_den, int64_t nom, int64_t den, int64_t max){ int exact=1, sign=0; int64_t gcd; assert(den != 0); if(den < 0) return av_reduce(dst_nom, dst_den, -nom, -den, max); sign= nom < 0; nom= ABS(nom); gcd = ff_gcd(nom, den); nom /= gcd; den /= gcd; if(nom > max || den > max){ AVRational a0={0,1}, a1={1,0}; exact=0; for(;;){ int64_t x= nom / den; int64_t a2n= x*a1.num + a0.num; int64_t a2d= x*a1.den + a0.den; if(a2n > max || a2d > max) break; nom %= den; a0= a1; a1.num= a2n; a1.den= a2d; if(nom==0) break; x= nom; nom=den; den=x; } nom= a1.num; den= a1.den; } assert(ff_gcd(nom, den) == 1); *dst_nom = sign ? -nom : nom; *dst_den = den; return exact; } int64_t av_rescale(int64_t a, int64_t b, int64_t c) { AVInteger ai, ci; assert(c > 0); assert(b >=0); if(a<0) return -av_rescale(-a, b, c); if(b<=INT_MAX && c<=INT_MAX){ if(a<=INT_MAX) return (a * b + c/2)/c; else return a/c*b + (a%c*b + c/2)/c; } ai= av_mul_i(av_int2i(a), av_int2i(b)); ci= av_int2i(c); ai= av_add_i(ai, av_shr_i(ci,1)); return av_i2int(av_div_i(ai, ci)); } /* av_log API */ static int av_log_level = AV_LOG_DEBUG; static void av_log_default_callback(void* ptr, int level, const char* fmt, va_list vl) { static int print_prefix=1; AVClass* avc= ptr ? *(AVClass**)ptr : NULL; if(level>av_log_level) return; #undef fprintf if(print_prefix && avc) { fprintf(stderr, "[%s @ %p]", avc->item_name(ptr), avc); } #define fprintf please_use_av_log print_prefix= strstr(fmt, "\n") != NULL; vfprintf(stderr, fmt, vl); } static void (*av_log_callback)(void*, int, const char*, va_list) = av_log_default_callback; void av_log(void* avcl, int level, const char *fmt, ...) { va_list vl; va_start(vl, fmt); av_vlog(avcl, level, fmt, vl); va_end(vl); } void av_vlog(void* avcl, int level, const char *fmt, va_list vl) { av_log_callback(avcl, level, fmt, vl); } int av_log_get_level(void) { return av_log_level; } void av_log_set_level(int level) { av_log_level = level; } void av_log_set_callback(void (*callback)(void*, int, const char*, va_list)) { av_log_callback = callback; } yorick-mpeg-0.1/Makefile0000644000076500001440000001025611254261005014625 0ustar frigautusers# these values filled in by yorick -batch make.i Y_MAKEDIR=/usr/lib/yorick/2.1 Y_EXE=/usr/lib/yorick/2.1/bin/yorick Y_EXE_PKGS= Y_EXE_HOME=/usr/lib/yorick/2.1 Y_EXE_SITE=/usr/share/yorick/2.1 # ----------------------------------------------------- optimization flags # options for make command line, e.g.- make COPT=-g TGT=exe COPT=$(COPT_DEFAULT) TGT=$(DEFAULT_TGT) # ------------------------------------------------ macros for this package PKG_NAME=mpeg PKG_I=mpeg.i OBJS=ympeg.o # change to give the executable a name other than yorick PKG_EXENAME=yorick # PKG_DEPLIBS=-Lsomedir -lsomelib for dependencies of this package PKG_DEPLIBS=-Llibavcodec -lavcodec # set compiler (or rarely loader) flags specific to this package PKG_CFLAGS=-Ilibavcodec PKG_LDFLAGS= # list of additional package names you want in PKG_EXENAME # (typically Y_EXE_PKGS should be first here) EXTRA_PKGS=$(Y_EXE_PKGS) # list of additional files for clean PKG_CLEAN=test.mpg # autoload file for this package, if any PKG_I_START= # non-pkg.i include files for this package, if any PKG_I_EXTRA=mpgtest.i # -------------------------------- standard targets and rules (in Makepkg) # set macros Makepkg uses in target and dependency names # DLL_TARGETS, LIB_TARGETS, EXE_TARGETS # are any additional targets (defined below) prerequisite to # the plugin library, archive library, and executable, respectively PKG_I_DEPS=$(PKG_I) Y_DISTMAKE=distmake include $(Y_MAKEDIR)/Make.cfg include $(Y_MAKEDIR)/Makepkg include $(Y_MAKEDIR)/Make$(TGT) # override macros Makepkg sets for rules and other macros # Y_HOME and Y_SITE in Make.cfg may not be correct (e.g.- relocatable) Y_HOME=$(Y_EXE_HOME) Y_SITE=$(Y_EXE_SITE) # reduce chance of yorick-1.5 corrupting this Makefile MAKE_TEMPLATE = protect-against-1.5 # ------------------------------------- targets and rules for this package AVCODEC_H=libavcodec/avcodec.h libavcodec/common.h libavcodec/bswap.h ympeg.o: libavcodec/libavcodec.a $(AVCODEC_H) clean:: cd libavcodec; $(MAKE) clean libavcodec/libavcodec.a: cd libavcodec; $(MAKE) # simple example: #myfunc.o: myapi.h # more complex example (also consider using PKG_CFLAGS above): #myfunc.o: myapi.h myfunc.c # $(CC) $(CPPFLAGS) $(CFLAGS) -DMY_SWITCH -o $@ -c myfunc.c # -------------------------------------------------------- pkg_mngr # for the binary package production (add full path to lib*.a below): PKG_DEPLIBS_STATIC=-lm PKG_ARCH = $(OSTYPE)-$(MACHTYPE) # or linux or windows PKG_VERSION = $(shell (awk '{if ($$1=="Version:") print $$2}' $(PKG_NAME).info)) # .info might not exist, in which case he line above will exit in error. # packages or devel_pkgs: PKG_DEST_URL = packages package: $(MAKE) $(LD_DLL) -o $(PKG_NAME).so $(OBJS) ywrap.o $(PKG_DEPLIBS_STATIC) $(DLL_DEF) mkdir -p binaries/$(PKG_NAME)/dist/y_home/lib mkdir -p binaries/$(PKG_NAME)/dist/y_site/i mkdir -p binaries/$(PKG_NAME)/dist/y_site/i0 cp -p $(PKG_I) binaries/$(PKG_NAME)/dist/y_site/i0/ cp -p $(PKG_I_EXTRA) binaries/$(PKG_NAME)/dist/y_site/i/ cp -p $(PKG_NAME).so binaries/$(PKG_NAME)/dist/y_home/lib/ if test -n "$(PKG_I_START)"; then cp -p $(PKG_I_START) \ binaries/$(PKG_NAME)/dist/y_home/i-start/; fi cat $(PKG_NAME).info | sed -e 's/OS:/OS: $(PKG_ARCH)/' > tmp.info mv tmp.info binaries/$(PKG_NAME)/$(PKG_NAME).info cd binaries; tar zcvf $(PKG_NAME)-$(PKG_VERSION)-$(PKG_ARCH).tgz $(PKG_NAME) distbin: if test -f "binaries/$(PKG_NAME)-$(PKG_VERSION)-$(PKG_ARCH).tgz" ; then \ ncftpput -f $(HOME)/.ncftp/maumae www/yorick/$(PKG_DEST_URL)/$(PKG_ARCH)/tarballs/ \ binaries/$(PKG_NAME)-$(PKG_VERSION)-$(PKG_ARCH).tgz; fi if test -f "binaries/$(PKG_NAME)/$(PKG_NAME).info" ; then \ ncftpput -f $(HOME)/.ncftp/maumae www/yorick/$(PKG_DEST_URL)/$(PKG_ARCH)/info/ \ binaries/$(PKG_NAME)/$(PKG_NAME).info; fi distsrc: make clean; rm -rf binaries cd ..; tar --exclude binaries --exclude .svn -zcvf \ $(PKG_NAME)-$(PKG_VERSION)-src.tgz yorick-$(PKG_NAME)-$(PKG_VERSION);\ ncftpput -f $(HOME)/.ncftp/maumae www/yorick/$(PKG_DEST_URL)/src/ \ $(PKG_NAME)-$(PKG_VERSION)-src.tgz ncftpput -f $(HOME)/.ncftp/maumae www/yorick/contrib/ \ ../$(PKG_NAME)-$(PKG_VERSION)-src.tgz # -------------------------------------------------------- end of Makefile yorick-mpeg-0.1/mpeg.i0000644000076500001440000000670511254316764014310 0ustar frigautusers/* * mpeg.i -- $Id: mpeg.i,v 1.1.1.1 2007/10/27 22:18:21 dhmunro Exp $ * yorick interface to mpeg movie encoding * */ plug_in, "mpeg"; extern mpeg_create; /* DOCUMENT mpeg = mpeg_create(filename) * or mpeg = mpeg_create(filename, params) * * Create an mpeg-1 movie file FILENAME. Write frames with mpeg_write, * close with mpeg_close. The return value is an mpeg encoder object. * * If given, PARAMS is [bit_rate, frame_rate, gop_size, max_b_frames] * which default to [ 400000, 25, 10, 1 ] * The rates are per second, the gop_size is the number of frames * before an I-frame is emitted, and max_b_frames is the largest * number of consecutive B-frames. (The third kind of frame is the * P-frame; generally, the encoder emits B-frames until it is forced * to emit a P-frame by max_b_frames, or an I-frame by gop_size. The * smaller these numbers, the higher quality the movie, but the lower * the compression.) Any of the four PARAMS values may be zero to * get the default value, except for max_b_frames, which should be <0 * to get the default value. * * SEE ALSO: mpeg_write, mpeg_close, mpeg_movie */ extern mpeg_write; /* DOCUMENT mpeg_write, mpeg, rgb * * Write a frame RGB into the mpeg file corresponding to the MPEG * encoder returned by mpeg_create. RGB is a 3-by-width-by-height * array of char. Every frame must have the same width and height. * To finish the movie and close the file, call mpeg_close. * * Note that you may have trouble rendering the resulting mpeg * file if the image width and height are note multiples of 8. * * SEE ALSO: mpeg_create, mpeg_close, mpeg_movie */ func mpeg_close(&mpeg) /* DOCUMENT mpeg_close, mpeg * * Close the mpeg file corresponding to the MPEG encoder. Actually, * this merely destroys the reference to the encoder; the file will * remain open until the final reference is destroyed. * * SEE ALSO: mpeg_create, mpeg_write, mpeg_movie */ { mpeg = []; } func mpeg_movie(filename, draw_frame,time_limit,min_interframe,bracket_time) /* DOCUMENT mpeg_movie, filename, draw_frame * or mpeg_movie, filename, draw_frame, time_limit * or mpeg_movie, filename, draw_frame, time_limit, min_interframe * * An extension of the movie function (#include "movie.i") that generates * an mpeg file FILENAME. The other arguments are the same as the movie * function (which see). The draw_frame function is: * * func draw_frame(i) * { * // Input argument i is the frame number. * // draw_frame should return non-zero if there are more * // frames in this movie. A zero return will stop the * // movie. * // draw_frame must NOT include any fma command if the * // making_movie variable is set (movie sets this variable * // before calling draw_frame) * } * * SEE ALSO: movie, mpeg_create, mpeg_write, mpeg_close */ { require, "movie.i"; _mpeg_movie_mpeg = mpeg_create(filename, [0, 0, 16, 2]); fma = _mpeg_movie_fma; _mpeg_movie_count = 0; return movie(draw_frame, time_limit, min_interframe, bracket_time); } if (is_void(_mpeg_movie_fma0)) _mpeg_movie_fma0 = fma; func _mpeg_movie_fma { /* movie function does one fma before first draw_frame, skip it */ if (_mpeg_movie_count++) { rgb = rgb_read(); /* trim image until divisible by 8 */ n = dimsof(rgb)(3:4) & 7; if (anyof(n)) rgb = rgb(,n(1)/2+1:-(n(1)+1)/2,n(2)/2+1:-(n(2)+1)/2); mpeg_write, _mpeg_movie_mpeg, rgb; } _mpeg_movie_fma0; } yorick-mpeg-0.1/mpeg.info0000644000076500001440000000124611254260253014775 0ustar frigautusersPackage: mpeg Kind: plugin Version: 0.1 Revision: 1 Description: mpeg output License: BSD, GPL for libavcodec Author: David Munro Maintainer: Francois Rigaut OS: Depends: yorick(>=1.6.02) Source: http://www.maumae.net/yorick/packages/%o/tarballs/mpeg-%v-%o.tgz Source-MD5: Source-Directory: contrib/yorick-mpeg DocFiles: README Homepage: http://www.maumae.net/yorick/doc/plugins.php DescDetail: << This is a standalone mpeg movie encoder for yorick, intended to replace the mpeg encoder in the yorick-z package, which requires the ffmpeg package. yorick-mpeg builds a stripped down version of libavcodec. << DescUsage: << - << DescPort: << << yorick-mpeg-0.1/mpgtest.i0000644000076500001440000000066111254260253015025 0ustar frigautusers/* * mpgtest.i -- $Id: mpgtest.i,v 1.1.1.1 2007/10/27 22:18:21 dhmunro Exp $ * test yorick mpeg encoder */ require, "mpeg.i"; require, "movie.i"; if (is_void(_orig_movie)) _orig_movie = movie; require, "demo2.i"; func mpgtest(void) { _mpgtest_name = "test.mpg"; movie = _mpgtest_movie; demo2, 3; } func _mpgtest_movie(__f, __a, __b, __c) { movie = _orig_movie; return mpeg_movie(_mpgtest_name, __f, __a, __b, __c); } yorick-mpeg-0.1/README0000644000076500001440000000131711254260253014047 0ustar frigautusersThis is a standalone mpeg movie encoder for yorick, intended to replace the mpeg encoder in the yorick-z package, which requires the ffmpeg package. The libavcodec directory contains a version of ffmpeg 0.4.9-pre1 which has been stripped of everything except MPEG-1 encoding. See the libavcodec subdirectory README for more, and the ffmpeg web pages: http://ffmpeg.mplayerhq.hu/ http://sourceforge.net/projects/ffmpeg/ This package is not very complete, and you will probably have to configure libavcodec/Makefile by hand. It can be made to run on both Linux and Mac OS X platforms, but I have had much less success elsewhere. In particular, it does not seem to run correctly under Windows or on AIX SP machines. yorick-mpeg-0.1/ympeg.c0000644000076500001440000003126111254260253014455 0ustar frigautusers/* * ympeg.c -- $Id: ympeg.c,v 1.1.1.1 2007/10/27 22:18:21 dhmunro Exp $ * mpeg encoding interface for yorick */ #include "ydata.h" #include "yio.h" #include "defmem.h" #include "pstdlib.h" #include /* default parameter values */ #define YMPG_BIT_RATE 400000 #define YMPG_FRAME_RATE 24 #define YMPG_GOP_SIZE 10 #define YMPG_MAX_B_FRAMES 1 #ifdef YAVC_SHARED /* -DYAVC_SHARED to dynamically link libavcodec using dlopen */ # include "yio.h" # include "yavcodec.h" # define YAVC_(f) 0 #else /* -UYAVC_SHARED to statically link libavcodec */ # include "avcodec.h" # define YAVC_(f) f #endif static AVCodec *yavc_encoder = YAVC_(&mpeg1video_encoder); static unsigned int (*yavc_version)(void) = YAVC_(avcodec_version); static void (*yavc_init)(void) = YAVC_(avcodec_init); static void (*yavc_register)(AVCodec *format) = YAVC_(register_avcodec); static AVCodec *(*yavc_find_encoder)(enum CodecID id) = YAVC_(avcodec_find_encoder); static AVCodecContext *(*yavc_alloc_context)(void) = YAVC_(avcodec_alloc_context); static AVFrame *(*yavc_alloc_frame)(void) = YAVC_(avcodec_alloc_frame); static int (*yavc_open)(AVCodecContext *avctx, AVCodec *codec) = YAVC_(avcodec_open); static int (*yavc_encode_video)(AVCodecContext *avctx, uint8_t *buf, int buf_size, const AVFrame *pict) = YAVC_(avcodec_encode_video); static int (*yavc_close)(AVCodecContext *avctx) = YAVC_(avcodec_close); static void *(*yavc_malloc)(unsigned int size) = YAVC_(av_malloc); static void (*yavc_free)(void *ptr) = YAVC_(av_free); static int (*yavc_fill)(AVPicture *picture, uint8_t *ptr, int pix_fmt, int width, int height) = YAVC_(avpicture_fill); static int (*yavc_get_size)(int pix_fmt, int width, int height) = YAVC_(avpicture_get_size); static int (*yavc_convert)(AVPicture *dst, int dst_pix_fmt, AVPicture *src, int pix_fmt, int width, int height) = YAVC_(img_convert); static int yavc_bld_version = LIBAVCODEC_VERSION_INT; static int yavc_lib_version = -1; /*--------------------------------------------------------------------------*/ extern BuiltIn Y_mpeg_create, Y_mpeg_write; typedef struct ympg_stream ympg_stream; typedef struct ympg_block ympg_block; /* implement zlib state as a foreign yorick data type */ struct ympg_stream { int references; /* reference counter */ Operations *ops; /* virtual function table */ FILE *f; AVCodecContext *c; AVCodec *codec; uint8_t *in, *out; AVFrame *frame; long nout, nframes; int width, height, outsize; }; extern ympg_stream *ympg_create(char *filename, long *params); extern void ympg_free(void *ympg); /* ******* Use Unref(ympg) ******* */ extern Operations ympg_ops; extern PromoteOp PromXX; extern UnaryOp ToAnyX, NegateX, ComplementX, NotX, TrueX; extern BinaryOp AddX, SubtractX, MultiplyX, DivideX, ModuloX, PowerX; extern BinaryOp EqualX, NotEqualX, GreaterX, GreaterEQX; extern BinaryOp ShiftLX, ShiftRX, OrX, AndX, XorX; extern BinaryOp AssignX, MatMultX; extern UnaryOp EvalX, SetupX, PrintX; extern MemberOp GetMemberX; static UnaryOp ympg_print; Operations ympg_ops = { &ympg_free, T_OPAQUE, 0, T_STRING, "zlib_stream", {&PromXX, &PromXX, &PromXX, &PromXX, &PromXX, &PromXX, &PromXX, &PromXX}, &ToAnyX, &ToAnyX, &ToAnyX, &ToAnyX, &ToAnyX, &ToAnyX, &ToAnyX, &NegateX, &ComplementX, &NotX, &TrueX, &AddX, &SubtractX, &MultiplyX, &DivideX, &ModuloX, &PowerX, &EqualX, &NotEqualX, &GreaterX, &GreaterEQX, &ShiftLX, &ShiftRX, &OrX, &AndX, &XorX, &AssignX, &EvalX, &SetupX, &GetMemberX, &MatMultX, &ympg_print }; /*--------------------------------------------------------------------------*/ /* Set up a block allocator which grabs space for 32 ympg_stream objects * at a time. Since ympg_stream contains an ops pointer, the alignment * of a ympg_stream must be at least as strict as a void*. */ static MemryBlock ympg_mblock = {0, 0, sizeof(ympg_stream), 32*sizeof(ympg_stream)}; static int ympg_initialized = 0; static void ympg_link(void); ympg_stream * ympg_create(char *filename, long *params) { char *name = p_native(filename); FILE *f = (name && name[0])? fopen(name, "w") : 0; ympg_stream *ympg = 0; p_free(name); if (f) { AVCodec *codec; if (params && (params[0]<0 || params[1]<0 || params[2]<0)) YError("mpeg_create: bad parameter list dimensions or values"); if (ympg_initialized != 1) { if (!yavc_convert) ympg_link(); yavc_lib_version = yavc_version(); yavc_init(); yavc_register(yavc_encoder); ympg_initialized = 1; } codec = yavc_find_encoder(CODEC_ID_MPEG1VIDEO); if (codec) { ympg = NextUnit(&ympg_mblock); ympg->references = 0; ympg->ops = &ympg_ops; ympg->f = f; ympg->c = yavc_alloc_context(); /* ffmpeg 0.4.8 bit_rate was first item in AVCodecContext */ if (yavc_lib_version < 0x000409) /* ffmpeg 0.4.8 bit_rate was first item in AVCodecContext */ ympg->c = (void *)&ympg->c->bit_rate; ympg->codec = codec; ympg->frame = yavc_alloc_frame(); ympg->in = ympg->out = 0; ympg->width = ympg->height = ympg->outsize = 0; ympg->nout = ympg->nframes = 0; if (!ympg->c || !ympg->frame) { if (ympg->c) yavc_free(ympg->c); if (ympg->frame) yavc_free(ympg->frame); FreeUnit(&ympg_mblock, ympg); ympg = 0; YError("mpeg_create: yavc_alloc_context or alloc_frame failed"); } else { ympg->c->bit_rate = (params && params[0])? params[0] : YMPG_BIT_RATE; ympg->c->frame_rate = (params && params[1])? params[1] : YMPG_FRAME_RATE; /* note c->frame_rate_base=1 by default, unnecessary for mpeg1? */ ympg->c->gop_size = (params && params[2])? params[2] : YMPG_GOP_SIZE; ympg->c->max_b_frames = (params && params[3]>=0)? params[3] : YMPG_MAX_B_FRAMES; } } else { YError("mpeg_create: failed to find MPEG1VIDEO encoder"); } } else { YError("mpeg_create: fopen failed to create mpeg output file"); } return ympg; } void ympg_free(void *ympgv) /* ******* Use Unref(ympg) ******* */ { ympg_stream *ympg = ympgv; /* get the delayed frames */ if (ympg->f && ympg->nframes) { if (ympg->nout) for (;;) { ympg->nout = yavc_encode_video(ympg->c, ympg->out, ympg->outsize, 0); if (!ympg->nout) break; fwrite(ympg->out, 1, ympg->nout, ympg->f); } /* add sequence end code to mpeg file */ ympg->out[0] = 0x00; ympg->out[1] = 0x00; ympg->out[2] = 0x01; ympg->out[3] = 0xb7; fwrite(ympg->out, 1, 4, ympg->f); } if (ympg->f) fclose(ympg->f); ympg->f = 0; if (ympg->c) { if (!ympg->codec) yavc_close(ympg->c); yavc_free(ympg->c); } ympg->c = 0; if (ympg->out) yavc_free(ympg->out); ympg->out = 0; if (ympg->in) yavc_free(ympg->in); ympg->in = 0; if (ympg->frame) yavc_free(ympg->frame); ympg->frame = 0; FreeUnit(&ympg_mblock, ympg); } /* ARGSUSED */ static void ympg_print(Operand *op) { /* ympg_stream *yzs = op->value; */ ForceNewline(); PrintFunc("mpeg encoder object"); ForceNewline(); } /*--------------------------------------------------------------------------*/ void Y_mpeg_create(int nArgs) { char *filename = (nArgs>=1 && nArgs<=2)? YGetString(sp-nArgs+1) : 0; long bad_params[4] = { -1, -1, -1, -1 }; long *params = 0; if (nArgs == 2) { Dimension *dims = 0; params = YGet_L(sp-nArgs+2, 1, &dims); if (!dims || dims->next || dims->number!=4) params = bad_params; } PushDataBlock(ympg_create(filename, params)); } void Y_mpeg_write(int nArgs) { Operand op; Symbol *stack = sp-nArgs+1; int ndims; Dimension *dims; long idims[3]; uint8_t *image; AVPicture img; int width, height, wyuv, hyuv; ympg_stream *ympg; if (nArgs != 2) YError("mpeg_write takes at exactly 2 arguments"); if (!stack->ops) YError("mpeg_write takes no keywords"); stack->ops->FormOperand(stack, &op); if (op.ops != &ympg_ops) YError("mpeg_write: first argument must be an mpeg encoder object"); ympg = op.value; image = (uint8_t *)YGet_C(stack+1, 0, &dims); ndims = YGet_dims(dims, idims, 3); width = idims[1]; height = idims[2]; if (ndims!=3 || idims[0]!=3 || width<8 || height<8) YError("mpeg_write: image not rgb or too small"); wyuv = (width+7) & ~7; hyuv = (height+7) & ~7; if (ympg->codec) { int size = yavc_get_size(PIX_FMT_YUV420P, wyuv, hyuv); ympg->in = yavc_malloc(size); ympg->outsize = size>100512? size+512 : 100512; ympg->out = yavc_malloc(ympg->outsize); if (!ympg->in || !ympg->out) YError("mpeg_write: av_malloc memory manager failed"); /* note: ffmpeg source routinely casts AVFrame* to AVPicture* */ yavc_fill((AVPicture*)ympg->frame, ympg->in, PIX_FMT_YUV420P, wyuv, hyuv); /* set picture size */ ympg->c->width = wyuv; ympg->c->height = hyuv; if (yavc_open(ympg->c, ympg->codec) < 0) YError("mpeg_create: avcodec_open failed"); ympg->codec = 0; } else if (wyuv!=ympg->c->width || hyuv!=ympg->c->height) { YError("mpeg_write: image dimensions differ from previous frame"); } yavc_fill(&img, image, PIX_FMT_RGB24, width, height); /* note: ffmpeg source routinely casts AVFrame* to AVPicture* */ if (yavc_convert((AVPicture*)ympg->frame, PIX_FMT_YUV420P, &img, PIX_FMT_RGB24, width, height) < 0) YError("mpeg_write: avcodec RGB24 --> YUV420P converter missing"); ympg->nout = yavc_encode_video(ympg->c, ympg->out, ympg->outsize, ympg->frame); while (ympg->nout==ympg->outsize) { fwrite(ympg->out, 1, ympg->nout, ympg->f); ympg->nout = yavc_encode_video(ympg->c, ympg->out, ympg->outsize, 0); } if (ympg->nout) fwrite(ympg->out, 1, ympg->nout, ympg->f); ympg->nframes++; } #ifdef YAVC_SHARED #define NSYMS 15 static struct symadd_t { char *name; int is_data; void *paddr; } ympg_symadd[NSYMS] = {{"mpeg1video_encoder", 1, &yavc_encoder}, {"avcodec_version", 0, &yavc_version}, {"avcodec_init", 0, &yavc_init}, {"register_avcodec", 0, &yavc_register}, {"avcodec_find_encoder", 0, &yavc_find_encoder}, {"avcodec_alloc_context", 0, &yavc_alloc_context}, {"avcodec_alloc_frame", 0, &yavc_alloc_frame}, {"avcodec_open", 0, &yavc_open}, {"avcodec_encode_video", 0, &yavc_encode_video}, {"avcodec_close", 0, &yavc_close}, {"av_malloc", 0, &yavc_malloc}, {"av_free", 0, &yavc_free}, {"avpicture_fill", 0, &yavc_fill}, {"avpicture_get_size", 0, &yavc_get_size}, /* img_convert must be final entry */ {"img_convert", 0, &yavc_convert}}; static void ympg_link(void) { if (!ympg_initialized) { char *yavc_path[] = { 0, 0, "libavcodec", "/lib/libavcodec", "/usr/lib/libavcodec", "/usr/local/lib/libavcodec", "/sw/lib/libavcodec", 0 }; char **yavc_name = yavc_path; char *yavc_env = Ygetenv("Y_LIBAVCODEC"); void *dll = 0; /* look for libavcodec first at name in Y_LIBAVCODEC environment * variable (not including .so or other extension), then Y_HOME/lib, * then as simply "libavcodec" (current working directory?), * then in system places /lib, /usr/lib, /usr/local/lib */ if (yavc_env && yavc_env[0]) yavc_path[0] = yavc_env; else yavc_name++; if (yHomeDir && yHomeDir[0]) { char *yhscan = yHomeDir; while (yhscan[1]) yhscan++; yavc_path[1] = p_strncat(yHomeDir, (yhscan[0]=='/')? "lib/libavcodec" : "/lib/libavcodec", 0); } else { yavc_name++; } for ( ; *yavc_name ; yavc_name++) { dll = p_dlopen(*yavc_name); if (dll) { int i, mask; for (i=0,mask=1 ; i Url: http://yorick.sourceforge.net BuildRoot: %{_tmppath}/%{name}-%{version}-%{release}-buildroot Requires: yorick >= 2.1 %description This is a standalone mpeg movie encoder for yorick, intended to replace the mpeg encoder in the yorick-z package, which requires the ffmpeg package. yorick-mpeg builds a stripped down version of libavcodec. %prep %setup -q %build yorick -batch make.i make clean make %install rm -rf $RPM_BUILD_ROOT mkdir -p $RPM_BUILD_ROOT/usr/lib/yorick/lib mkdir -p $RPM_BUILD_ROOT/usr/lib/yorick/i0 mkdir -p $RPM_BUILD_ROOT/usr/lib/yorick/i mkdir -p $RPM_BUILD_ROOT/usr/share/doc/%{name} mkdir -p $RPM_BUILD_ROOT/usr/lib/yorick/packages/installed install -m 755 mpeg.so $RPM_BUILD_ROOT/usr/lib/yorick/lib install -m 644 mpeg.i $RPM_BUILD_ROOT/usr/lib/yorick/i0 install -m 644 mpgtest.i $RPM_BUILD_ROOT/usr/lib/yorick/i install -m 644 README $RPM_BUILD_ROOT/usr/share/doc/%{name} install -m 644 mpeg.info $RPM_BUILD_ROOT/usr/lib/yorick/packages/installed %clean rm -rf $RPM_BUILD_ROOT %files %defattr(-,root,root) /usr/lib/yorick/lib /usr/lib/yorick/i0 /usr/lib/yorick/i /usr/share/doc/%{name} /usr/lib/yorick/packages/installed/* %changelog * Fri Jan 11 2008 - initial release