From 090768194fdbd611b995572f424c6fae690eb329 Mon Sep 17 00:00:00 2001 From: Mohamed Tarek Date: Sat, 17 Jul 2010 07:46:38 +0000 Subject: Enable ff_copy_bits in ffmpeg_bitstream.c and put_bits.h and intreadwrite.h to codeclib. git-svn-id: svn://svn.rockbox.org/rockbox/trunk@27452 a1c6a512-1295-4272-9138-f99709370657 --- apps/codecs/lib/ffmpeg_bitstream.c | 8 +- apps/codecs/lib/ffmpeg_get_bits.h | 11 +- apps/codecs/lib/ffmpeg_intreadwrite.h | 484 ++++++++++++++++++++++++++++++++++ apps/codecs/lib/ffmpeg_put_bits.h | 323 +++++++++++++++++++++++ 4 files changed, 813 insertions(+), 13 deletions(-) create mode 100644 apps/codecs/lib/ffmpeg_intreadwrite.h create mode 100644 apps/codecs/lib/ffmpeg_put_bits.h (limited to 'apps/codecs') diff --git a/apps/codecs/lib/ffmpeg_bitstream.c b/apps/codecs/lib/ffmpeg_bitstream.c index be0a3a11f6..542f2671e5 100644 --- a/apps/codecs/lib/ffmpeg_bitstream.c +++ b/apps/codecs/lib/ffmpeg_bitstream.c @@ -30,7 +30,8 @@ //#include "avcodec.h" #include "ffmpeg_get_bits.h" -//#include "put_bits.h" +#include "ffmpeg_put_bits.h" +#include "ffmpeg_intreadwrite.h" #define av_log(...) @@ -65,6 +66,7 @@ void ff_put_string(PutBitContext *pb, const char *string, int terminate_string) if(terminate_string) put_bits(pb, 8, 0); } +#endif void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length) { @@ -74,7 +76,7 @@ void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length) if(length==0) return; - if(CONFIG_SMALL || words < 16 || put_bits_count(pb)&7){ + if(words < 16 || put_bits_count(pb)&7){ for(i=0; i>(16-bits)); } -#endif + /* VLC decoding */ //#define DEBUG_VLC diff --git a/apps/codecs/lib/ffmpeg_get_bits.h b/apps/codecs/lib/ffmpeg_get_bits.h index 1a461e9e25..8fce395a4d 100644 --- a/apps/codecs/lib/ffmpeg_get_bits.h +++ b/apps/codecs/lib/ffmpeg_get_bits.h @@ -28,6 +28,7 @@ #include #include +#include "ffmpeg_intreadwrite.h" //#include //#include "libavutil/bswap.h" //#include "libavutil/common.h" @@ -56,16 +57,6 @@ #define av_const __attribute__((const)) #define av_always_inline inline __attribute__((always_inline)) -/* Coldfire cpu's support unaligned long reads */ -#ifdef CPU_COLDFIRE -#define AV_RB32(x) (*(const uint32_t*)(x)) -#else -/* The following define is taken from libavutil/intreadwrite.h */ -#define AV_RB32(x) ((((const uint8_t*)(x))[0] << 24) | \ - (((const uint8_t*)(x))[1] << 16) | \ - (((const uint8_t*)(x))[2] << 8) | \ - ((const uint8_t*)(x))[3]) -#endif /* The following is taken from mathops.h */ #ifndef sign_extend diff --git a/apps/codecs/lib/ffmpeg_intreadwrite.h b/apps/codecs/lib/ffmpeg_intreadwrite.h new file mode 100644 index 0000000000..ca718106d1 --- /dev/null +++ b/apps/codecs/lib/ffmpeg_intreadwrite.h @@ -0,0 +1,484 @@ +/* + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +#ifndef AVUTIL_INTREADWRITE_H +#define AVUTIL_INTREADWRITE_H + +#include +/* + * Arch-specific headers can provide any combination of + * AV_[RW][BLN](16|24|32|64) and AV_(COPY|SWAP|ZERO)(64|128) macros. + * Preprocessor symbols must be defined, even if these are implemented + * as inline functions. + */ + +/* + * Map AV_RNXX <-> AV_R[BL]XX for all variants provided by per-arch headers. + */ +#define HAVE_BIGENDIAN 0 +#if HAVE_BIGENDIAN + +# if defined(AV_RN16) && !defined(AV_RB16) +# define AV_RB16(p) AV_RN16(p) +# elif !defined(AV_RN16) && defined(AV_RB16) +# define AV_RN16(p) AV_RB16(p) +# endif + +# if defined(AV_WN16) && !defined(AV_WB16) +# define AV_WB16(p, v) AV_WN16(p, v) +# elif !defined(AV_WN16) && defined(AV_WB16) +# define AV_WN16(p, v) AV_WB16(p, v) +# endif + +# if defined(AV_RN24) && !defined(AV_RB24) +# define AV_RB24(p) AV_RN24(p) +# elif !defined(AV_RN24) && defined(AV_RB24) +# define AV_RN24(p) AV_RB24(p) +# endif + +# if defined(AV_WN24) && !defined(AV_WB24) +# define AV_WB24(p, v) AV_WN24(p, v) +# elif !defined(AV_WN24) && defined(AV_WB24) +# define AV_WN24(p, v) AV_WB24(p, v) +# endif + +# if defined(AV_RN32) && !defined(AV_RB32) +# define AV_RB32(p) AV_RN32(p) +# elif !defined(AV_RN32) && defined(AV_RB32) +# define AV_RN32(p) AV_RB32(p) +# endif + +# if defined(AV_WN32) && !defined(AV_WB32) +# define AV_WB32(p, v) AV_WN32(p, v) +# elif !defined(AV_WN32) && defined(AV_WB32) +# define AV_WN32(p, v) AV_WB32(p, v) +# endif + +# if defined(AV_RN64) && !defined(AV_RB64) +# define AV_RB64(p) AV_RN64(p) +# elif !defined(AV_RN64) && defined(AV_RB64) +# define AV_RN64(p) AV_RB64(p) +# endif + +# if defined(AV_WN64) && !defined(AV_WB64) +# define AV_WB64(p, v) AV_WN64(p, v) +# elif !defined(AV_WN64) && defined(AV_WB64) +# define AV_WN64(p, v) AV_WB64(p, v) +# endif + +#else /* HAVE_BIGENDIAN */ + +# if defined(AV_RN16) && !defined(AV_RL16) +# define AV_RL16(p) AV_RN16(p) +# elif !defined(AV_RN16) && defined(AV_RL16) +# define AV_RN16(p) AV_RL16(p) +# endif + +# if defined(AV_WN16) && !defined(AV_WL16) +# define AV_WL16(p, v) AV_WN16(p, v) +# elif !defined(AV_WN16) && defined(AV_WL16) +# define AV_WN16(p, v) AV_WL16(p, v) +# endif + +# if defined(AV_RN24) && !defined(AV_RL24) +# define AV_RL24(p) AV_RN24(p) +# elif !defined(AV_RN24) && defined(AV_RL24) +# define AV_RN24(p) AV_RL24(p) +# endif + +# if defined(AV_WN24) && !defined(AV_WL24) +# define AV_WL24(p, v) AV_WN24(p, v) +# elif !defined(AV_WN24) && defined(AV_WL24) +# define AV_WN24(p, v) AV_WL24(p, v) +# endif + +# if defined(AV_RN32) && !defined(AV_RL32) +# define AV_RL32(p) AV_RN32(p) +# elif !defined(AV_RN32) && defined(AV_RL32) +# define AV_RN32(p) AV_RL32(p) +# endif + +# if defined(AV_WN32) && !defined(AV_WL32) +# define AV_WL32(p, v) AV_WN32(p, v) +# elif !defined(AV_WN32) && defined(AV_WL32) +# define AV_WN32(p, v) AV_WL32(p, v) +# endif + +# if defined(AV_RN64) && !defined(AV_RL64) +# define AV_RL64(p) AV_RN64(p) +# elif !defined(AV_RN64) && defined(AV_RL64) +# define AV_RN64(p) AV_RL64(p) +# endif + +# if defined(AV_WN64) && !defined(AV_WL64) +# define AV_WL64(p, v) AV_WN64(p, v) +# elif !defined(AV_WN64) && defined(AV_WL64) +# define AV_WN64(p, v) AV_WL64(p, v) +# endif + +#endif /* !HAVE_BIGENDIAN */ + +#define HAVE_ATTRIBUTE_PACKED 0 +#define HAVE_FAST_UNALIGNED 0 +/* + * Define AV_[RW]N helper macros to simplify definitions not provided + * by per-arch headers. + */ + +#if HAVE_ATTRIBUTE_PACKED + +union unaligned_64 { uint64_t l; } __attribute__((packed)) av_alias; +union unaligned_32 { uint32_t l; } __attribute__((packed)) av_alias; +union unaligned_16 { uint16_t l; } __attribute__((packed)) av_alias; + +# define AV_RN(s, p) (((const union unaligned_##s *) (p))->l) +# define AV_WN(s, p, v) ((((union unaligned_##s *) (p))->l) = (v)) + +#elif defined(__DECC) + +# define AV_RN(s, p) (*((const __unaligned uint##s##_t*)(p))) +# define AV_WN(s, p, v) (*((__unaligned uint##s##_t*)(p)) = (v)) + +#elif HAVE_FAST_UNALIGNED + +# define AV_RN(s, p) (((const av_alias##s*)(p))->u##s) +# define AV_WN(s, p, v) (((av_alias##s*)(p))->u##s = (v)) + +#else + +#ifndef AV_RB16 +# define AV_RB16(x) \ + ((((const uint8_t*)(x))[0] << 8) | \ + ((const uint8_t*)(x))[1]) +#endif +#ifndef AV_WB16 +# define AV_WB16(p, d) do { \ + ((uint8_t*)(p))[1] = (d); \ + ((uint8_t*)(p))[0] = (d)>>8; \ + } while(0) +#endif + +#ifndef AV_RL16 +# define AV_RL16(x) \ + ((((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL16 +# define AV_WL16(p, d) do { \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + } while(0) +#endif + +#ifndef AV_RB32 +/* Coldfire cpu's support unaligned long reads */ +#ifdef CPU_COLDFIRE +#define AV_RB32(x) (*(const uint32_t*)(x)) +#else +# define AV_RB32(x) \ + ((((const uint8_t*)(x))[0] << 24) | \ + (((const uint8_t*)(x))[1] << 16) | \ + (((const uint8_t*)(x))[2] << 8) | \ + ((const uint8_t*)(x))[3]) +#endif +#endif +#ifndef AV_WB32 +# define AV_WB32(p, d) do { \ + ((uint8_t*)(p))[3] = (d); \ + ((uint8_t*)(p))[2] = (d)>>8; \ + ((uint8_t*)(p))[1] = (d)>>16; \ + ((uint8_t*)(p))[0] = (d)>>24; \ + } while(0) +#endif + +#ifndef AV_RL32 +# define AV_RL32(x) \ + ((((const uint8_t*)(x))[3] << 24) | \ + (((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL32 +# define AV_WL32(p, d) do { \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + } while(0) +#endif + +#ifndef AV_RB64 +# define AV_RB64(x) \ + (((uint64_t)((const uint8_t*)(x))[0] << 56) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 48) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[5] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[6] << 8) | \ + (uint64_t)((const uint8_t*)(x))[7]) +#endif +#ifndef AV_WB64 +# define AV_WB64(p, d) do { \ + ((uint8_t*)(p))[7] = (d); \ + ((uint8_t*)(p))[6] = (d)>>8; \ + ((uint8_t*)(p))[5] = (d)>>16; \ + ((uint8_t*)(p))[4] = (d)>>24; \ + ((uint8_t*)(p))[3] = (d)>>32; \ + ((uint8_t*)(p))[2] = (d)>>40; \ + ((uint8_t*)(p))[1] = (d)>>48; \ + ((uint8_t*)(p))[0] = (d)>>56; \ + } while(0) +#endif + +#ifndef AV_RL64 +# define AV_RL64(x) \ + (((uint64_t)((const uint8_t*)(x))[7] << 56) | \ + ((uint64_t)((const uint8_t*)(x))[6] << 48) | \ + ((uint64_t)((const uint8_t*)(x))[5] << 40) | \ + ((uint64_t)((const uint8_t*)(x))[4] << 32) | \ + ((uint64_t)((const uint8_t*)(x))[3] << 24) | \ + ((uint64_t)((const uint8_t*)(x))[2] << 16) | \ + ((uint64_t)((const uint8_t*)(x))[1] << 8) | \ + (uint64_t)((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL64 +# define AV_WL64(p, d) do { \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + ((uint8_t*)(p))[3] = (d)>>24; \ + ((uint8_t*)(p))[4] = (d)>>32; \ + ((uint8_t*)(p))[5] = (d)>>40; \ + ((uint8_t*)(p))[6] = (d)>>48; \ + ((uint8_t*)(p))[7] = (d)>>56; \ + } while(0) +#endif + +#if HAVE_BIGENDIAN +# define AV_RN(s, p) AV_RB##s(p) +# define AV_WN(s, p, v) AV_WB##s(p, v) +#else +# define AV_RN(s, p) AV_RL##s(p) +# define AV_WN(s, p, v) AV_WL##s(p, v) +#endif + +#endif /* HAVE_FAST_UNALIGNED */ + +#ifndef AV_RN16 +# define AV_RN16(p) AV_RN(16, p) +#endif + +#ifndef AV_RN32 +# define AV_RN32(p) AV_RN(32, p) +#endif + +#ifndef AV_RN64 +# define AV_RN64(p) AV_RN(64, p) +#endif + +#ifndef AV_WN16 +# define AV_WN16(p, v) AV_WN(16, p, v) +#endif + +#ifndef AV_WN32 +# define AV_WN32(p, v) AV_WN(32, p, v) +#endif + +#ifndef AV_WN64 +# define AV_WN64(p, v) AV_WN(64, p, v) +#endif + +#if HAVE_BIGENDIAN +# define AV_RB(s, p) AV_RN##s(p) +# define AV_WB(s, p, v) AV_WN##s(p, v) +# define AV_RL(s, p) bswap_##s(AV_RN##s(p)) +# define AV_WL(s, p, v) AV_WN##s(p, bswap_##s(v)) +#else +# define AV_RB(s, p) bswap_##s(AV_RN##s(p)) +# define AV_WB(s, p, v) AV_WN##s(p, bswap_##s(v)) +# define AV_RL(s, p) AV_RN##s(p) +# define AV_WL(s, p, v) AV_WN##s(p, v) +#endif + +#define AV_RB8(x) (((const uint8_t*)(x))[0]) +#define AV_WB8(p, d) do { ((uint8_t*)(p))[0] = (d); } while(0) + +#define AV_RL8(x) AV_RB8(x) +#define AV_WL8(p, d) AV_WB8(p, d) + +#ifndef AV_RB16 +# define AV_RB16(p) AV_RB(16, p) +#endif +#ifndef AV_WB16 +# define AV_WB16(p, v) AV_WB(16, p, v) +#endif + +#ifndef AV_RL16 +# define AV_RL16(p) AV_RL(16, p) +#endif +#ifndef AV_WL16 +# define AV_WL16(p, v) AV_WL(16, p, v) +#endif + +#ifndef AV_RB32 +# define AV_RB32(p) AV_RB(32, p) +#endif +#ifndef AV_WB32 +# define AV_WB32(p, v) AV_WB(32, p, v) +#endif + +#ifndef AV_RL32 +# define AV_RL32(p) AV_RL(32, p) +#endif +#ifndef AV_WL32 +# define AV_WL32(p, v) AV_WL(32, p, v) +#endif + +#ifndef AV_RB64 +# define AV_RB64(p) AV_RB(64, p) +#endif +#ifndef AV_WB64 +# define AV_WB64(p, v) AV_WB(64, p, v) +#endif + +#ifndef AV_RL64 +# define AV_RL64(p) AV_RL(64, p) +#endif +#ifndef AV_WL64 +# define AV_WL64(p, v) AV_WL(64, p, v) +#endif + +#ifndef AV_RB24 +# define AV_RB24(x) \ + ((((const uint8_t*)(x))[0] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[2]) +#endif +#ifndef AV_WB24 +# define AV_WB24(p, d) do { \ + ((uint8_t*)(p))[2] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[0] = (d)>>16; \ + } while(0) +#endif + +#ifndef AV_RL24 +# define AV_RL24(x) \ + ((((const uint8_t*)(x))[2] << 16) | \ + (((const uint8_t*)(x))[1] << 8) | \ + ((const uint8_t*)(x))[0]) +#endif +#ifndef AV_WL24 +# define AV_WL24(p, d) do { \ + ((uint8_t*)(p))[0] = (d); \ + ((uint8_t*)(p))[1] = (d)>>8; \ + ((uint8_t*)(p))[2] = (d)>>16; \ + } while(0) +#endif + +/* + * The AV_[RW]NA macros access naturally aligned data + * in a type-safe way. + */ + +#define AV_RNA(s, p) (((const av_alias##s*)(p))->u##s) +#define AV_WNA(s, p, v) (((av_alias##s*)(p))->u##s = (v)) + +#ifndef AV_RN16A +# define AV_RN16A(p) AV_RNA(16, p) +#endif + +#ifndef AV_RN32A +# define AV_RN32A(p) AV_RNA(32, p) +#endif + +#ifndef AV_RN64A +# define AV_RN64A(p) AV_RNA(64, p) +#endif + +#ifndef AV_WN16A +# define AV_WN16A(p, v) AV_WNA(16, p, v) +#endif + +#ifndef AV_WN32A +# define AV_WN32A(p, v) AV_WNA(32, p, v) +#endif + +#ifndef AV_WN64A +# define AV_WN64A(p, v) AV_WNA(64, p, v) +#endif + +/* Parameters for AV_COPY*, AV_SWAP*, AV_ZERO* must be + * naturally aligned. They may be implemented using MMX, + * so emms_c() must be called before using any float code + * afterwards. + */ + +#define AV_COPY(n, d, s) \ + (((av_alias##n*)(d))->u##n = ((const av_alias##n*)(s))->u##n) + +#ifndef AV_COPY16 +# define AV_COPY16(d, s) AV_COPY(16, d, s) +#endif + +#ifndef AV_COPY32 +# define AV_COPY32(d, s) AV_COPY(32, d, s) +#endif + +#ifndef AV_COPY64 +# define AV_COPY64(d, s) AV_COPY(64, d, s) +#endif + +#ifndef AV_COPY128 +# define AV_COPY128(d, s) \ + do { \ + AV_COPY64(d, s); \ + AV_COPY64((char*)(d)+8, (char*)(s)+8); \ + } while(0) +#endif + +#define AV_SWAP(n, a, b) FFSWAP(av_alias##n, *(av_alias##n*)(a), *(av_alias##n*)(b)) + +#ifndef AV_SWAP64 +# define AV_SWAP64(a, b) AV_SWAP(64, a, b) +#endif + +#define AV_ZERO(n, d) (((av_alias##n*)(d))->u##n = 0) + +#ifndef AV_ZERO16 +# define AV_ZERO16(d) AV_ZERO(16, d) +#endif + +#ifndef AV_ZERO32 +# define AV_ZERO32(d) AV_ZERO(32, d) +#endif + +#ifndef AV_ZERO64 +# define AV_ZERO64(d) AV_ZERO(64, d) +#endif + +#ifndef AV_ZERO128 +# define AV_ZERO128(d) \ + do { \ + AV_ZERO64(d); \ + AV_ZERO64((char*)(d)+8); \ + } while(0) +#endif + +#endif /* AVUTIL_INTREADWRITE_H */ diff --git a/apps/codecs/lib/ffmpeg_put_bits.h b/apps/codecs/lib/ffmpeg_put_bits.h new file mode 100644 index 0000000000..38db55fe18 --- /dev/null +++ b/apps/codecs/lib/ffmpeg_put_bits.h @@ -0,0 +1,323 @@ +/* + * copyright (c) 2004 Michael Niedermayer + * + * This file is part of FFmpeg. + * + * FFmpeg is free software; you can redistribute it and/or + * modify it under the terms of the GNU Lesser General Public + * License as published by the Free Software Foundation; either + * version 2.1 of the License, or (at your option) any later version. + * + * FFmpeg is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * Lesser General Public License for more details. + * + * You should have received a copy of the GNU Lesser General Public + * License along with FFmpeg; if not, write to the Free Software + * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA + */ + +/** + * @file libavcodec/put_bits.h + * bitstream writer API + */ + +#ifndef AVCODEC_PUT_BITS_H +#define AVCODEC_PUT_BITS_H + +#include +#include +#include "ffmpeg_bswap.h" +#include "ffmpeg_intreadwrite.h" + +#define av_log(...) +#define HAVE_FAST_UNALIGNED 0 + +/* buf and buf_end must be present and used by every alternative writer. */ +typedef struct PutBitContext { +#ifdef ALT_BITSTREAM_WRITER + uint8_t *buf, *buf_end; + int index; +#else + uint32_t bit_buf; + int bit_left; + uint8_t *buf, *buf_ptr, *buf_end; +#endif + int size_in_bits; +} PutBitContext; + +/** + * Initializes the PutBitContext s. + * + * @param buffer the buffer where to put bits + * @param buffer_size the size in bytes of buffer + */ +static inline void init_put_bits(PutBitContext *s, uint8_t *buffer, int buffer_size) +{ + if(buffer_size < 0) { + buffer_size = 0; + buffer = NULL; + } + + s->size_in_bits= 8*buffer_size; + s->buf = buffer; + s->buf_end = s->buf + buffer_size; +#ifdef ALT_BITSTREAM_WRITER + s->index=0; + ((uint32_t*)(s->buf))[0]=0; +// memset(buffer, 0, buffer_size); +#else + s->buf_ptr = s->buf; + s->bit_left=32; + s->bit_buf=0; +#endif +} + +/** + * Returns the total number of bits written to the bitstream. + */ +static inline int put_bits_count(PutBitContext *s) +{ +#ifdef ALT_BITSTREAM_WRITER + return s->index; +#else + return (s->buf_ptr - s->buf) * 8 + 32 - s->bit_left; +#endif +} + +/** + * Pads the end of the output stream with zeros. + */ +static inline void flush_put_bits(PutBitContext *s) +{ +#ifdef ALT_BITSTREAM_WRITER + align_put_bits(s); +#else +#ifndef BITSTREAM_WRITER_LE + s->bit_buf<<= s->bit_left; +#endif + while (s->bit_left < 32) { + /* XXX: should test end of buffer */ +#ifdef BITSTREAM_WRITER_LE + *s->buf_ptr++=s->bit_buf; + s->bit_buf>>=8; +#else + *s->buf_ptr++=s->bit_buf >> 24; + s->bit_buf<<=8; +#endif + s->bit_left+=8; + } + s->bit_left=32; + s->bit_buf=0; +#endif +} + +#if defined(ALT_BITSTREAM_WRITER) || defined(BITSTREAM_WRITER_LE) +#define align_put_bits align_put_bits_unsupported_here +#define ff_put_string ff_put_string_unsupported_here +#define ff_copy_bits ff_copy_bits_unsupported_here +#else +/** + * Pads the bitstream with zeros up to the next byte boundary. + */ +void align_put_bits(PutBitContext *s); + +/** + * Puts the string string in the bitstream. + * + * @param terminate_string 0-terminates the written string if value is 1 + */ +void ff_put_string(PutBitContext *pb, const char *string, int terminate_string); + +/** + * Copies the content of src to the bitstream. + * + * @param length the number of bits of src to copy + */ +void ff_copy_bits(PutBitContext *pb, const uint8_t *src, int length); +#endif + +/** + * Writes up to 31 bits into a bitstream. + * Use put_bits32 to write 32 bits. + */ +static inline void put_bits(PutBitContext *s, int n, unsigned int value) +#ifndef ALT_BITSTREAM_WRITER +{ + unsigned int bit_buf; + int bit_left; + + // printf("put_bits=%d %x\n", n, value); + //assert(n <= 31 && value < (1U << n)); + + bit_buf = s->bit_buf; + bit_left = s->bit_left; + + // printf("n=%d value=%x cnt=%d buf=%x\n", n, value, bit_cnt, bit_buf); + /* XXX: optimize */ +#ifdef BITSTREAM_WRITER_LE + bit_buf |= value << (32 - bit_left); + if (n >= bit_left) { +#if !HAVE_FAST_UNALIGNED + if (3 & (intptr_t) s->buf_ptr) { + AV_WL32(s->buf_ptr, bit_buf); + } else +#endif + *(uint32_t *)s->buf_ptr = le2me_32(bit_buf); + s->buf_ptr+=4; + bit_buf = (bit_left==32)?0:value >> bit_left; + bit_left+=32; + } + bit_left-=n; +#else + if (n < bit_left) { + bit_buf = (bit_buf<> (n - bit_left); +#if !HAVE_FAST_UNALIGNED + if (3 & (intptr_t) s->buf_ptr) { + AV_WB32(s->buf_ptr, bit_buf); + } else +#endif + *(uint32_t *)s->buf_ptr = be2me_32(bit_buf); + //printf("bitbuf = %08x\n", bit_buf); + s->buf_ptr+=4; + bit_left+=32 - n; + bit_buf = value; + } +#endif + + s->bit_buf = bit_buf; + s->bit_left = bit_left; +} +#else /* ALT_BITSTREAM_WRITER defined */ +{ +# ifdef ALIGNED_BITSTREAM_WRITER +# if ARCH_X86 + __asm__ volatile( + "movl %0, %%ecx \n\t" + "xorl %%eax, %%eax \n\t" + "shrdl %%cl, %1, %%eax \n\t" + "shrl %%cl, %1 \n\t" + "movl %0, %%ecx \n\t" + "shrl $3, %%ecx \n\t" + "andl $0xFFFFFFFC, %%ecx \n\t" + "bswapl %1 \n\t" + "orl %1, (%2, %%ecx) \n\t" + "bswapl %%eax \n\t" + "addl %3, %0 \n\t" + "movl %%eax, 4(%2, %%ecx) \n\t" + : "=&r" (s->index), "=&r" (value) + : "r" (s->buf), "r" (n), "0" (s->index), "1" (value<<(-n)) + : "%eax", "%ecx" + ); +# else + int index= s->index; + uint32_t *ptr= ((uint32_t *)s->buf)+(index>>5); + + value<<= 32-n; + + ptr[0] |= be2me_32(value>>(index&31)); + ptr[1] = be2me_32(value<<(32-(index&31))); +//if(n>24) printf("%d %d\n", n, value); + index+= n; + s->index= index; +# endif +# else //ALIGNED_BITSTREAM_WRITER +# if ARCH_X86 + __asm__ volatile( + "movl $7, %%ecx \n\t" + "andl %0, %%ecx \n\t" + "addl %3, %%ecx \n\t" + "negl %%ecx \n\t" + "shll %%cl, %1 \n\t" + "bswapl %1 \n\t" + "movl %0, %%ecx \n\t" + "shrl $3, %%ecx \n\t" + "orl %1, (%%ecx, %2) \n\t" + "addl %3, %0 \n\t" + "movl $0, 4(%%ecx, %2) \n\t" + : "=&r" (s->index), "=&r" (value) + : "r" (s->buf), "r" (n), "0" (s->index), "1" (value) + : "%ecx" + ); +# else + int index= s->index; + uint32_t *ptr= (uint32_t*)(((uint8_t *)s->buf)+(index>>3)); + + ptr[0] |= be2me_32(value<<(32-n-(index&7) )); + ptr[1] = 0; +//if(n>24) printf("%d %d\n", n, value); + index+= n; + s->index= index; +# endif +# endif //!ALIGNED_BITSTREAM_WRITER +} +#endif + +static inline void put_sbits(PutBitContext *pb, int n, int32_t value) +{ + //assert(n >= 0 && n <= 31); + + put_bits(pb, n, value & ((1<buf + (s->index>>3); +#else + return s->buf_ptr; +#endif +} + +/** + * Skips the given number of bytes. + * PutBitContext must be flushed & aligned to a byte boundary before calling this. + */ +static inline void skip_put_bytes(PutBitContext *s, int n) +{ + //assert((put_bits_count(s)&7)==0); +#ifdef ALT_BITSTREAM_WRITER + FIXME may need some cleaning of the buffer + s->index += n<<3; +#else + //assert(s->bit_left==32); + s->buf_ptr += n; +#endif +} + +/** + * Skips the given number of bits. + * Must only be used if the actual values in the bitstream do not matter. + * If n is 0 the behavior is undefined. + */ +static inline void skip_put_bits(PutBitContext *s, int n) +{ +#ifdef ALT_BITSTREAM_WRITER + s->index += n; +#else + s->bit_left -= n; + s->buf_ptr-= 4*(s->bit_left>>5); + s->bit_left &= 31; +#endif +} + +/** + * Changes the end of the buffer. + * + * @param size the new size in bytes of the buffer where to put bits + */ +static inline void set_put_bits_buffer_size(PutBitContext *s, int size) +{ + s->buf_end= s->buf + size; +} + +#endif /* AVCODEC_PUT_BITS_H */ -- cgit v1.2.3