summaryrefslogtreecommitdiff
path: root/ffmpeg/libavcodec/huffyuvenc.c
diff options
context:
space:
mode:
authorTim Redfern <tim@eclectronics.org>2013-12-29 12:19:38 +0000
committerTim Redfern <tim@eclectronics.org>2013-12-29 12:19:38 +0000
commitf7813a5324be39d13ab536c245d15dfc602a7849 (patch)
treefad99148b88823d34a5df2f0a25881a002eb291b /ffmpeg/libavcodec/huffyuvenc.c
parentb7a5a477b8ff4d4e3028b9dfb9a9df0a41463f92 (diff)
basic type mechanism working
Diffstat (limited to 'ffmpeg/libavcodec/huffyuvenc.c')
-rw-r--r--ffmpeg/libavcodec/huffyuvenc.c152
1 files changed, 87 insertions, 65 deletions
diff --git a/ffmpeg/libavcodec/huffyuvenc.c b/ffmpeg/libavcodec/huffyuvenc.c
index 95dcb88..3a55d54 100644
--- a/ffmpeg/libavcodec/huffyuvenc.c
+++ b/ffmpeg/libavcodec/huffyuvenc.c
@@ -56,14 +56,16 @@ static inline int sub_left_prediction(HYuvContext *s, uint8_t *dst,
static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
const uint8_t *src, int w,
- int *red, int *green, int *blue, int *alpha)
+ int *red, int *green, int *blue,
+ int *alpha)
{
int i;
- int r,g,b,a;
+ int r, g, b, a;
r = *red;
g = *green;
b = *blue;
a = *alpha;
+
for (i = 0; i < FFMIN(w, 4); i++) {
const int rt = src[i * 4 + R];
const int gt = src[i * 4 + G];
@@ -87,29 +89,32 @@ static inline void sub_left_prediction_bgr32(HYuvContext *s, uint8_t *dst,
*alpha = src[(w - 1) * 4 + A];
}
-static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst, const uint8_t *src, int w, int *red, int *green, int *blue){
+static inline void sub_left_prediction_rgb24(HYuvContext *s, uint8_t *dst,
+ uint8_t *src, int w,
+ int *red, int *green, int *blue)
+{
int i;
- int r,g,b;
+ int r, g, b;
r = *red;
g = *green;
b = *blue;
- for (i = 0; i < FFMIN(w,16); i++) {
- const int rt = src[i*3 + 0];
- const int gt = src[i*3 + 1];
- const int bt = src[i*3 + 2];
- dst[i*3 + 0] = rt - r;
- dst[i*3 + 1] = gt - g;
- dst[i*3 + 2] = bt - b;
+ for (i = 0; i < FFMIN(w, 16); i++) {
+ const int rt = src[i * 3 + 0];
+ const int gt = src[i * 3 + 1];
+ const int bt = src[i * 3 + 2];
+ dst[i * 3 + 0] = rt - r;
+ dst[i * 3 + 1] = gt - g;
+ dst[i * 3 + 2] = bt - b;
r = rt;
g = gt;
b = bt;
}
- s->dsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w*3 - 48);
+ s->dsp.diff_bytes(dst + 48, src + 48, src + 48 - 3, w * 3 - 48);
- *red = src[(w - 1)*3 + 0];
- *green = src[(w - 1)*3 + 1];
- *blue = src[(w - 1)*3 + 2];
+ *red = src[(w - 1) * 3 + 0];
+ *green = src[(w - 1) * 3 + 1];
+ *blue = src[(w - 1) * 3 + 2];
}
static int store_table(HYuvContext *s, const uint8_t *len, uint8_t *buf)
@@ -151,13 +156,18 @@ static av_cold int encode_init(AVCodecContext *avctx)
}
s->version = 2;
- avctx->coded_frame = &s->picture;
+ avctx->coded_frame = av_frame_alloc();
+ if (!avctx->coded_frame)
+ return AVERROR(ENOMEM);
+
+ avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I;
+ avctx->coded_frame->key_frame = 1;
switch (avctx->pix_fmt) {
case AV_PIX_FMT_YUV420P:
case AV_PIX_FMT_YUV422P:
if (s->width & 1) {
- av_log(avctx, AV_LOG_ERROR, "width must be even for this colorspace\n");
+ av_log(avctx, AV_LOG_ERROR, "Width must be even for this colorspace.\n");
return AVERROR(EINVAL);
}
s->bitstream_bpp = avctx->pix_fmt == AV_PIX_FMT_YUV420P ? 12 : 16;
@@ -384,43 +394,48 @@ static inline int encode_bgra_bitstream(HYuvContext *s, int count, int planes)
{
int i;
- if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < 4*planes*count) {
+ if (s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb) >> 3) <
+ 4 * planes * count) {
av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n");
return -1;
}
-#define LOAD3\
- int g = s->temp[0][planes==3 ? 3*i + 1 : 4*i + G];\
- int b = (s->temp[0][planes==3 ? 3*i + 2 : 4*i + B] - g) & 0xff;\
- int r = (s->temp[0][planes==3 ? 3*i + 0 : 4*i + R] - g) & 0xff;\
- int a = s->temp[0][planes*i + A];
-#define STAT3\
- s->stats[0][b]++;\
- s->stats[1][g]++;\
- s->stats[2][r]++;\
- if(planes==4) s->stats[2][a]++;
-#define WRITE3\
- put_bits(&s->pb, s->len[1][g], s->bits[1][g]);\
- put_bits(&s->pb, s->len[0][b], s->bits[0][b]);\
- put_bits(&s->pb, s->len[2][r], s->bits[2][r]);\
- if(planes==4) put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
+#define LOAD_GBRA \
+ int g = s->temp[0][planes == 3 ? 3 * i + 1 : 4 * i + G]; \
+ int b =(s->temp[0][planes == 3 ? 3 * i + 2 : 4 * i + B] - g) & 0xFF;\
+ int r =(s->temp[0][planes == 3 ? 3 * i + 0 : 4 * i + R] - g) & 0xFF;\
+ int a = s->temp[0][planes * i + A];
+
+#define STAT_BGRA \
+ s->stats[0][b]++; \
+ s->stats[1][g]++; \
+ s->stats[2][r]++; \
+ if (planes == 4) \
+ s->stats[2][a]++;
+
+#define WRITE_GBRA \
+ put_bits(&s->pb, s->len[1][g], s->bits[1][g]); \
+ put_bits(&s->pb, s->len[0][b], s->bits[0][b]); \
+ put_bits(&s->pb, s->len[2][r], s->bits[2][r]); \
+ if (planes == 4) \
+ put_bits(&s->pb, s->len[2][a], s->bits[2][a]);
if ((s->flags & CODEC_FLAG_PASS1) &&
(s->avctx->flags2 & CODEC_FLAG2_NO_OUTPUT)) {
for (i = 0; i < count; i++) {
- LOAD3;
- STAT3;
+ LOAD_GBRA;
+ STAT_BGRA;
}
} else if (s->context || (s->flags & CODEC_FLAG_PASS1)) {
for (i = 0; i < count; i++) {
- LOAD3;
- STAT3;
- WRITE3;
+ LOAD_GBRA;
+ STAT_BGRA;
+ WRITE_GBRA;
}
} else {
for (i = 0; i < count; i++) {
- LOAD3;
- WRITE3;
+ LOAD_GBRA;
+ WRITE_GBRA;
}
}
return 0;
@@ -436,16 +451,12 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
const int fake_ystride = s->interlaced ? pict->linesize[0]*2 : pict->linesize[0];
const int fake_ustride = s->interlaced ? pict->linesize[1]*2 : pict->linesize[1];
const int fake_vstride = s->interlaced ? pict->linesize[2]*2 : pict->linesize[2];
- AVFrame * const p = &s->picture;
+ const AVFrame * const p = pict;
int i, j, size = 0, ret;
if ((ret = ff_alloc_packet2(avctx, pkt, width * height * 3 * 4 + FF_MIN_BUFFER_SIZE)) < 0)
return ret;
- *p = *pict;
- p->pict_type = AV_PICTURE_TYPE_I;
- p->key_frame = 1;
-
if (s->context) {
for (i = 0; i < 3; i++) {
ff_huff_gen_len_table(s->len[i], s->stats[i]);
@@ -578,41 +589,48 @@ static int encode_frame(AVCodecContext *avctx, AVPacket *pkt,
put_bits(&s->pb, 8, leftg = data[G]);
put_bits(&s->pb, 8, leftb = data[B]);
- sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1, &leftr, &leftg, &leftb, &lefta);
+ sub_left_prediction_bgr32(s, s->temp[0], data + 4, width - 1,
+ &leftr, &leftg, &leftb, &lefta);
encode_bgra_bitstream(s, width - 1, 4);
for (y = 1; y < s->height; y++) {
uint8_t *dst = data + y*stride;
if (s->predictor == PLANE && s->interlaced < y) {
s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width * 4);
- sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb, &lefta);
+ sub_left_prediction_bgr32(s, s->temp[0], s->temp[1], width,
+ &leftr, &leftg, &leftb, &lefta);
} else {
- sub_left_prediction_bgr32(s, s->temp[0], dst, width, &leftr, &leftg, &leftb, &lefta);
+ sub_left_prediction_bgr32(s, s->temp[0], dst, width,
+ &leftr, &leftg, &leftb, &lefta);
}
encode_bgra_bitstream(s, width, 4);
}
- }else if(avctx->pix_fmt == AV_PIX_FMT_RGB24){
- uint8_t *data = p->data[0] + (height-1)*p->linesize[0];
+ } else if (avctx->pix_fmt == AV_PIX_FMT_RGB24) {
+ uint8_t *data = p->data[0] + (height - 1) * p->linesize[0];
const int stride = -p->linesize[0];
const int fake_stride = -fake_ystride;
int y;
int leftr, leftg, leftb;
- put_bits(&s->pb, 8, leftr= data[0]);
- put_bits(&s->pb, 8, leftg= data[1]);
- put_bits(&s->pb, 8, leftb= data[2]);
+ put_bits(&s->pb, 8, leftr = data[0]);
+ put_bits(&s->pb, 8, leftg = data[1]);
+ put_bits(&s->pb, 8, leftb = data[2]);
put_bits(&s->pb, 8, 0);
- sub_left_prediction_rgb24(s, s->temp[0], data+3, width-1, &leftr, &leftg, &leftb);
+ sub_left_prediction_rgb24(s, s->temp[0], data + 3, width - 1,
+ &leftr, &leftg, &leftb);
encode_bgra_bitstream(s, width-1, 3);
- for(y=1; y<s->height; y++){
- uint8_t *dst = data + y*stride;
- if(s->predictor == PLANE && s->interlaced < y){
- s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride, width*3);
- sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width, &leftr, &leftg, &leftb);
- }else{
- sub_left_prediction_rgb24(s, s->temp[0], dst, width, &leftr, &leftg, &leftb);
+ for (y = 1; y < s->height; y++) {
+ uint8_t *dst = data + y * stride;
+ if (s->predictor == PLANE && s->interlaced < y) {
+ s->dsp.diff_bytes(s->temp[1], dst, dst - fake_stride,
+ width * 3);
+ sub_left_prediction_rgb24(s, s->temp[0], s->temp[1], width,
+ &leftr, &leftg, &leftb);
+ } else {
+ sub_left_prediction_rgb24(s, s->temp[0], dst, width,
+ &leftr, &leftg, &leftb);
}
encode_bgra_bitstream(s, width, 3);
}
@@ -664,12 +682,15 @@ static av_cold int encode_end(AVCodecContext *avctx)
av_freep(&avctx->extradata);
av_freep(&avctx->stats_out);
+ av_frame_free(&avctx->coded_frame);
+
return 0;
}
#if CONFIG_HUFFYUV_ENCODER
AVCodec ff_huffyuv_encoder = {
.name = "huffyuv",
+ .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_HUFFYUV,
.priv_data_size = sizeof(HYuvContext),
@@ -677,15 +698,16 @@ AVCodec ff_huffyuv_encoder = {
.encode2 = encode_frame,
.close = encode_end,
.pix_fmts = (const enum AVPixelFormat[]){
- AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
+ AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24,
+ AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
},
- .long_name = NULL_IF_CONFIG_SMALL("Huffyuv / HuffYUV"),
};
#endif
#if CONFIG_FFVHUFF_ENCODER
AVCodec ff_ffvhuff_encoder = {
.name = "ffvhuff",
+ .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
.type = AVMEDIA_TYPE_VIDEO,
.id = AV_CODEC_ID_FFVHUFF,
.priv_data_size = sizeof(HYuvContext),
@@ -693,8 +715,8 @@ AVCodec ff_ffvhuff_encoder = {
.encode2 = encode_frame,
.close = encode_end,
.pix_fmts = (const enum AVPixelFormat[]){
- AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24, AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
+ AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_RGB24,
+ AV_PIX_FMT_RGB32, AV_PIX_FMT_NONE
},
- .long_name = NULL_IF_CONFIG_SMALL("Huffyuv FFmpeg variant"),
};
#endif