diff options
| author | Tim Redfern <tim@eclectronics.org> | 2013-12-29 12:19:38 +0000 |
|---|---|---|
| committer | Tim Redfern <tim@eclectronics.org> | 2013-12-29 12:19:38 +0000 |
| commit | f7813a5324be39d13ab536c245d15dfc602a7849 (patch) | |
| tree | fad99148b88823d34a5df2f0a25881a002eb291b /ffmpeg/libavcodec/ljpegenc.c | |
| parent | b7a5a477b8ff4d4e3028b9dfb9a9df0a41463f92 (diff) | |
basic type mechanism working
Diffstat (limited to 'ffmpeg/libavcodec/ljpegenc.c')
| -rw-r--r-- | ffmpeg/libavcodec/ljpegenc.c | 406 |
1 files changed, 248 insertions, 158 deletions
diff --git a/ffmpeg/libavcodec/ljpegenc.c b/ffmpeg/libavcodec/ljpegenc.c index 76c3cb9..35b82fd 100644 --- a/ffmpeg/libavcodec/ljpegenc.c +++ b/ffmpeg/libavcodec/ljpegenc.c @@ -30,207 +30,297 @@ * lossless JPEG encoder. */ +#include "libavutil/frame.h" +#include "libavutil/mem.h" +#include "libavutil/pixdesc.h" + #include "avcodec.h" +#include "dsputil.h" #include "internal.h" #include "mpegvideo.h" #include "mjpeg.h" #include "mjpegenc.h" +typedef struct LJpegEncContext { + DSPContext dsp; + ScanTable scantable; + uint16_t matrix[64]; -static int encode_picture_lossless(AVCodecContext *avctx, AVPacket *pkt, - const AVFrame *pict, int *got_packet) -{ - MpegEncContext * const s = avctx->priv_data; - MJpegContext * const m = s->mjpeg_ctx; - const int width= s->width; - const int height= s->height; - AVFrame * const p = &s->current_picture.f; - const int predictor= avctx->prediction_method+1; - const int mb_width = (width + s->mjpeg_hsample[0] - 1) / s->mjpeg_hsample[0]; - const int mb_height = (height + s->mjpeg_vsample[0] - 1) / s->mjpeg_vsample[0]; - int ret, max_pkt_size = FF_MIN_BUFFER_SIZE; - - if (avctx->pix_fmt == AV_PIX_FMT_BGRA) - max_pkt_size += width * height * 3 * 4; - else { - max_pkt_size += mb_width * mb_height * 3 * 4 - * s->mjpeg_hsample[0] * s->mjpeg_vsample[0]; - } + int vsample[3]; + int hsample[3]; - if (!s->edge_emu_buffer && - (ret = ff_mpv_frame_size_alloc(s, pict->linesize[0])) < 0) { - av_log(avctx, AV_LOG_ERROR, "failed to allocate context scratch buffers.\n"); - return ret; - } + uint16_t huff_code_dc_luminance[12]; + uint16_t huff_code_dc_chrominance[12]; + uint8_t huff_size_dc_luminance[12]; + uint8_t huff_size_dc_chrominance[12]; - if ((ret = ff_alloc_packet2(avctx, pkt, max_pkt_size)) < 0) - return ret; + uint16_t (*scratch)[4]; +} LJpegEncContext; - init_put_bits(&s->pb, pkt->data, pkt->size); +static int ljpeg_encode_bgr(AVCodecContext *avctx, PutBitContext *pb, + const AVFrame *frame) +{ + LJpegEncContext *s = avctx->priv_data; + const int width = frame->width; + const int height = frame->height; + const int linesize = frame->linesize[0]; + uint16_t (*buffer)[4] = s->scratch; + const int predictor = avctx->prediction_method+1; + int left[3], top[3], topleft[3]; + int x, y, i; + + for (i = 0; i < 3; i++) + buffer[0][i] = 1 << (9 - 1); + + for (y = 0; y < height; y++) { + const int modified_predictor = y ? predictor : 1; + uint8_t *ptr = frame->data[0] + (linesize * y); + + if (pb->buf_end - pb->buf - (put_bits_count(pb) >> 3) < width * 3 * 4) { + av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n"); + return -1; + } - av_frame_unref(p); - ret = av_frame_ref(p, pict); - if (ret < 0) - return ret; - p->pict_type= AV_PICTURE_TYPE_I; - p->key_frame= 1; + for (i = 0; i < 3; i++) + top[i]= left[i]= topleft[i]= buffer[0][i]; + + for (x = 0; x < width; x++) { + if(avctx->pix_fmt == AV_PIX_FMT_BGR24){ + buffer[x][1] = ptr[3 * x + 0] - ptr[3 * x + 1] + 0x100; + buffer[x][2] = ptr[3 * x + 2] - ptr[3 * x + 1] + 0x100; + buffer[x][0] = (ptr[3 * x + 0] + 2 * ptr[3 * x + 1] + ptr[3 * x + 2]) >> 2; + }else{ + buffer[x][1] = ptr[4 * x + 0] - ptr[4 * x + 1] + 0x100; + buffer[x][2] = ptr[4 * x + 2] - ptr[4 * x + 1] + 0x100; + buffer[x][0] = (ptr[4 * x + 0] + 2 * ptr[4 * x + 1] + ptr[4 * x + 2]) >> 2; + } - ff_mjpeg_encode_picture_header(s); + for (i = 0; i < 3; i++) { + int pred, diff; - s->header_bits= put_bits_count(&s->pb); + PREDICT(pred, topleft[i], top[i], left[i], modified_predictor); - if(avctx->pix_fmt == AV_PIX_FMT_BGR0 - || avctx->pix_fmt == AV_PIX_FMT_BGRA - || avctx->pix_fmt == AV_PIX_FMT_BGR24){ - int x, y, i; - const int linesize= p->linesize[0]; - uint16_t (*buffer)[4]= (void *) s->rd_scratchpad; - int left[3], top[3], topleft[3]; - - for(i=0; i<3; i++){ - buffer[0][i]= 1 << (9 - 1); - } + topleft[i] = top[i]; + top[i] = buffer[x+1][i]; - for(y = 0; y < height; y++) { - const int modified_predictor= y ? predictor : 1; - uint8_t *ptr = p->data[0] + (linesize * y); + left[i] = buffer[x][i]; - if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < width*3*4){ - av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); - return -1; - } + diff = ((left[i] - pred + 0x100) & 0x1FF) - 0x100; - for(i=0; i<3; i++){ - top[i]= left[i]= topleft[i]= buffer[0][i]; + if (i == 0) + ff_mjpeg_encode_dc(pb, diff, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly + else + ff_mjpeg_encode_dc(pb, diff, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance); } - for(x = 0; x < width; x++) { - if(avctx->pix_fmt == AV_PIX_FMT_BGR24){ - buffer[x][1] = ptr[3*x+0] - ptr[3*x+1] + 0x100; - buffer[x][2] = ptr[3*x+2] - ptr[3*x+1] + 0x100; - buffer[x][0] = (ptr[3*x+0] + 2*ptr[3*x+1] + ptr[3*x+2])>>2; - }else{ - buffer[x][1] = ptr[4*x+0] - ptr[4*x+1] + 0x100; - buffer[x][2] = ptr[4*x+2] - ptr[4*x+1] + 0x100; - buffer[x][0] = (ptr[4*x+0] + 2*ptr[4*x+1] + ptr[4*x+2])>>2; - } - - for(i=0;i<3;i++) { - int pred, diff; - - PREDICT(pred, topleft[i], top[i], left[i], modified_predictor); - - topleft[i]= top[i]; - top[i]= buffer[x+1][i]; + } + } - left[i]= buffer[x][i]; + return 0; +} - diff= ((left[i] - pred + 0x100)&0x1FF) - 0x100; +static inline void ljpeg_encode_yuv_mb(LJpegEncContext *s, PutBitContext *pb, + const AVFrame *frame, int predictor, + int mb_x, int mb_y) +{ + int i; + + if (mb_x == 0 || mb_y == 0) { + for (i = 0; i < 3; i++) { + uint8_t *ptr; + int x, y, h, v, linesize; + h = s->hsample[i]; + v = s->vsample[i]; + linesize = frame->linesize[i]; + + for (y = 0; y < v; y++) { + for (x = 0; x < h; x++) { + int pred; + + ptr = frame->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap + if (y == 0 && mb_y == 0) { + if (x == 0 && mb_x == 0) + pred = 128; + else + pred = ptr[-1]; + } else { + if (x == 0 && mb_x == 0) { + pred = ptr[-linesize]; + } else { + PREDICT(pred, ptr[-linesize - 1], ptr[-linesize], + ptr[-1], predictor); + } + } - if(i==0) - ff_mjpeg_encode_dc(s, diff, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly + if (i == 0) + ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly else - ff_mjpeg_encode_dc(s, diff, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance); + ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance); } } } - }else{ - int mb_x, mb_y, i; - - for(mb_y = 0; mb_y < mb_height; mb_y++) { - if(s->pb.buf_end - s->pb.buf - (put_bits_count(&s->pb)>>3) < mb_width * 4 * 3 * s->mjpeg_hsample[0] * s->mjpeg_vsample[0]){ - av_log(s->avctx, AV_LOG_ERROR, "encoded frame too large\n"); - return -1; - } - for(mb_x = 0; mb_x < mb_width; mb_x++) { - if(mb_x==0 || mb_y==0){ - for(i=0;i<3;i++) { - uint8_t *ptr; - int x, y, h, v, linesize; - h = s->mjpeg_hsample[i]; - v = s->mjpeg_vsample[i]; - linesize= p->linesize[i]; - - for(y=0; y<v; y++){ - for(x=0; x<h; x++){ - int pred; - - ptr = p->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap - if(y==0 && mb_y==0){ - if(x==0 && mb_x==0){ - pred= 128; - }else{ - pred= ptr[-1]; - } - }else{ - if(x==0 && mb_x==0){ - pred= ptr[-linesize]; - }else{ - PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor); - } - } - - if(i==0) - ff_mjpeg_encode_dc(s, *ptr - pred, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly - else - ff_mjpeg_encode_dc(s, *ptr - pred, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance); - } - } - } - }else{ - for(i=0;i<3;i++) { - uint8_t *ptr; - int x, y, h, v, linesize; - h = s->mjpeg_hsample[i]; - v = s->mjpeg_vsample[i]; - linesize= p->linesize[i]; - - for(y=0; y<v; y++){ - for(x=0; x<h; x++){ - int pred; - - ptr = p->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap - PREDICT(pred, ptr[-linesize-1], ptr[-linesize], ptr[-1], predictor); - - if(i==0) - ff_mjpeg_encode_dc(s, *ptr - pred, m->huff_size_dc_luminance, m->huff_code_dc_luminance); //FIXME ugly - else - ff_mjpeg_encode_dc(s, *ptr - pred, m->huff_size_dc_chrominance, m->huff_code_dc_chrominance); - } - } - } + } else { + for (i = 0; i < 3; i++) { + uint8_t *ptr; + int x, y, h, v, linesize; + h = s->hsample[i]; + v = s->vsample[i]; + linesize = frame->linesize[i]; + + for (y = 0; y < v; y++) { + for (x = 0; x < h; x++) { + int pred; + + ptr = frame->data[i] + (linesize * (v * mb_y + y)) + (h * mb_x + x); //FIXME optimize this crap + PREDICT(pred, ptr[-linesize - 1], ptr[-linesize], ptr[-1], predictor); + + if (i == 0) + ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_luminance, s->huff_code_dc_luminance); //FIXME ugly + else + ff_mjpeg_encode_dc(pb, *ptr - pred, s->huff_size_dc_chrominance, s->huff_code_dc_chrominance); } } } } +} + +static int ljpeg_encode_yuv(AVCodecContext *avctx, PutBitContext *pb, + const AVFrame *frame) +{ + const int predictor = avctx->prediction_method + 1; + LJpegEncContext *s = avctx->priv_data; + const int mb_width = (avctx->width + s->hsample[0] - 1) / s->hsample[0]; + const int mb_height = (avctx->height + s->vsample[0] - 1) / s->vsample[0]; + int mb_x, mb_y; + + for (mb_y = 0; mb_y < mb_height; mb_y++) { + if (pb->buf_end - pb->buf - (put_bits_count(pb) >> 3) < + mb_width * 4 * 3 * s->hsample[0] * s->vsample[0]) { + av_log(avctx, AV_LOG_ERROR, "encoded frame too large\n"); + return -1; + } + + for (mb_x = 0; mb_x < mb_width; mb_x++) + ljpeg_encode_yuv_mb(s, pb, frame, predictor, mb_x, mb_y); + } + + return 0; +} + +static int ljpeg_encode_frame(AVCodecContext *avctx, AVPacket *pkt, + const AVFrame *pict, int *got_packet) +{ + LJpegEncContext *s = avctx->priv_data; + PutBitContext pb; + const int width = avctx->width; + const int height = avctx->height; + const int mb_width = (width + s->hsample[0] - 1) / s->hsample[0]; + const int mb_height = (height + s->vsample[0] - 1) / s->vsample[0]; + int max_pkt_size = FF_MIN_BUFFER_SIZE; + int ret, header_bits; + + if( avctx->pix_fmt == AV_PIX_FMT_BGR0 + || avctx->pix_fmt == AV_PIX_FMT_BGRA + || avctx->pix_fmt == AV_PIX_FMT_BGR24) + max_pkt_size += width * height * 3 * 4; + else { + max_pkt_size += mb_width * mb_height * 3 * 4 + * s->hsample[0] * s->vsample[0]; + } + + if ((ret = ff_alloc_packet2(avctx, pkt, max_pkt_size)) < 0) + return ret; + + init_put_bits(&pb, pkt->data, pkt->size); + + ff_mjpeg_encode_picture_header(avctx, &pb, &s->scantable, + s->matrix); + + header_bits = put_bits_count(&pb); + + if( avctx->pix_fmt == AV_PIX_FMT_BGR0 + || avctx->pix_fmt == AV_PIX_FMT_BGRA + || avctx->pix_fmt == AV_PIX_FMT_BGR24) + ret = ljpeg_encode_bgr(avctx, &pb, pict); + else + ret = ljpeg_encode_yuv(avctx, &pb, pict); + if (ret < 0) + return ret; emms_c(); - av_assert0(s->esc_pos == s->header_bits >> 3); - ff_mjpeg_encode_stuffing(s); - ff_mjpeg_encode_picture_trailer(s); - s->picture_number++; - flush_put_bits(&s->pb); - pkt->size = put_bits_ptr(&s->pb) - s->pb.buf; + ff_mjpeg_escape_FF(&pb, header_bits >> 3); + ff_mjpeg_encode_picture_trailer(&pb, header_bits); + + flush_put_bits(&pb); + pkt->size = put_bits_ptr(&pb) - pb.buf; pkt->flags |= AV_PKT_FLAG_KEY; *got_packet = 1; return 0; -// return (put_bits_count(&f->pb)+7)/8; } +static av_cold int ljpeg_encode_close(AVCodecContext *avctx) +{ + LJpegEncContext *s = avctx->priv_data; + + av_frame_free(&avctx->coded_frame); + av_freep(&s->scratch); + + return 0; +} + +static av_cold int ljpeg_encode_init(AVCodecContext *avctx) +{ + LJpegEncContext *s = avctx->priv_data; + + if ((avctx->pix_fmt == AV_PIX_FMT_YUV420P || + avctx->pix_fmt == AV_PIX_FMT_YUV422P || + avctx->pix_fmt == AV_PIX_FMT_YUV444P) && + avctx->strict_std_compliance > FF_COMPLIANCE_UNOFFICIAL) { + av_log(avctx, AV_LOG_ERROR, + "Limited range YUV is non-standard, set strict_std_compliance to " + "at least unofficial to use it.\n"); + return AVERROR(EINVAL); + } + + avctx->coded_frame = av_frame_alloc(); + if (!avctx->coded_frame) + return AVERROR(ENOMEM); -AVCodec ff_ljpeg_encoder = { //FIXME avoid MPV_* lossless JPEG should not need them + avctx->coded_frame->pict_type = AV_PICTURE_TYPE_I; + avctx->coded_frame->key_frame = 1; + + s->scratch = av_malloc_array(avctx->width + 1, sizeof(*s->scratch)); + + ff_dsputil_init(&s->dsp, avctx); + ff_init_scantable(s->dsp.idct_permutation, &s->scantable, ff_zigzag_direct); + + ff_mjpeg_init_hvsample(avctx, s->hsample, s->vsample); + + ff_mjpeg_build_huffman_codes(s->huff_size_dc_luminance, + s->huff_code_dc_luminance, + avpriv_mjpeg_bits_dc_luminance, + avpriv_mjpeg_val_dc); + ff_mjpeg_build_huffman_codes(s->huff_size_dc_chrominance, + s->huff_code_dc_chrominance, + avpriv_mjpeg_bits_dc_chrominance, + avpriv_mjpeg_val_dc); + + return 0; +} + +AVCodec ff_ljpeg_encoder = { .name = "ljpeg", + .long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"), .type = AVMEDIA_TYPE_VIDEO, .id = AV_CODEC_ID_LJPEG, - .priv_data_size = sizeof(MpegEncContext), - .init = ff_MPV_encode_init, - .encode2 = encode_picture_lossless, - .close = ff_MPV_encode_end, + .priv_data_size = sizeof(LJpegEncContext), + .init = ljpeg_encode_init, + .encode2 = ljpeg_encode_frame, + .close = ljpeg_encode_close, .pix_fmts = (const enum AVPixelFormat[]){ - AV_PIX_FMT_BGR24, AV_PIX_FMT_BGRA, AV_PIX_FMT_BGR0, + AV_PIX_FMT_BGR24 , AV_PIX_FMT_BGRA , AV_PIX_FMT_BGR0, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ422P, - AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUV422P, + AV_PIX_FMT_YUV420P , AV_PIX_FMT_YUV444P , AV_PIX_FMT_YUV422P, AV_PIX_FMT_NONE}, - .long_name = NULL_IF_CONFIG_SMALL("Lossless JPEG"), }; |
