me
/
guix
Archived
1
0
Fork 0
This repository has been archived on 2024-08-07. You can view files and clone it, but cannot push or open issues/pull-requests.
guix/gnu/packages/patches/libquicktime-ffmpeg.patch

1310 lines
45 KiB
Diff
Raw Normal View History

Submitted By: Ken Moffat <ken at linuxfromscratch dot org>
Date: 2018-05-12
Initial Package Version: 1.2.4
Upstream Status: Defunct
Origin: Gentoo
Description: Accumulated fixes up to and including building with ffmpeg-4
From gentoo, their libav-9.patch, ffmpeg2.patch, CVE-2016-2399.patch,
ffmpeg29.patch, ffmpeg4.patch, and seds to files lqt_ffmpeg.c, video.c,
audio.c in plugins/ffmpeg/ to change CODEC_ID to AV_CODEC_ID.
Build-tested only.
diff -Naur a/plugins/ffmpeg/audio.c b/plugins/ffmpeg/audio.c
--- a/plugins/ffmpeg/audio.c 2012-03-29 20:44:28.000000000 +0100
+++ b/plugins/ffmpeg/audio.c 2018-05-11 23:15:21.057985300 +0100
@@ -45,6 +45,11 @@
#define ENCODE_AUDIO 1
#endif
+#ifndef AVCODEC_MAX_AUDIO_FRAME_SIZE
+/* from libavcodec/avcodec.h dated Dec 23 2012 */
+#define AVCODEC_MAX_AUDIO_FRAME_SIZE 192000 // 1 second of 48khz 32bit audio
+#endif
+
/* The following code was ported from gmerlin_avdecoder (http://gmerlin.sourceforge.net) */
/* MPEG Audio header parsing code */
@@ -540,7 +545,7 @@
#if DECODE_AUDIO3 || DECODE_AUDIO4
codec->pkt.data = codec->chunk_buffer;
- codec->pkt.size = packet_size + FF_INPUT_BUFFER_PADDING_SIZE;
+ codec->pkt.size = packet_size + AV_INPUT_BUFFER_PADDING_SIZE;
#if DECODE_AUDIO4
frame_bytes = avcodec_decode_audio4(codec->avctx, &f,
@@ -578,7 +583,7 @@
(codec->sample_buffer_end - codec->sample_buffer_start)],
&bytes_decoded,
codec->chunk_buffer,
- packet_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ packet_size + AV_INPUT_BUFFER_PADDING_SIZE);
if(frame_bytes < 0)
{
lqt_log(file, LQT_LOG_ERROR, LOG_DOMAIN, "avcodec_decode_audio2 error");
@@ -626,7 +631,7 @@
{
/* If the codec is mp3, make sure to decode the very last frame */
- if((codec->avctx->codec_id == CODEC_ID_MP3) &&
+ if((codec->avctx->codec_id == AV_CODEC_ID_MP3) &&
(codec->bytes_in_chunk_buffer >= 4))
{
if(!mpa_decode_header(&mph, codec->chunk_buffer, (const mpa_header*)0))
@@ -640,13 +645,13 @@
return 0;
}
- if(codec->chunk_buffer_alloc < mph.frame_bytes + FF_INPUT_BUFFER_PADDING_SIZE)
+ if(codec->chunk_buffer_alloc < mph.frame_bytes + AV_INPUT_BUFFER_PADDING_SIZE)
{
- codec->chunk_buffer_alloc = mph.frame_bytes + FF_INPUT_BUFFER_PADDING_SIZE;
+ codec->chunk_buffer_alloc = mph.frame_bytes + AV_INPUT_BUFFER_PADDING_SIZE;
codec->chunk_buffer = realloc(codec->chunk_buffer, codec->chunk_buffer_alloc);
}
memset(codec->chunk_buffer + codec->bytes_in_chunk_buffer, 0,
- mph.frame_bytes - codec->bytes_in_chunk_buffer + FF_INPUT_BUFFER_PADDING_SIZE);
+ mph.frame_bytes - codec->bytes_in_chunk_buffer + AV_INPUT_BUFFER_PADDING_SIZE);
num_samples = mph.samples_per_frame;
codec->bytes_in_chunk_buffer = mph.frame_bytes;
}
@@ -690,12 +695,12 @@
{
- /* BIG NOTE: We pass extra FF_INPUT_BUFFER_PADDING_SIZE for the buffer size
+ /* BIG NOTE: We pass extra AV_INPUT_BUFFER_PADDING_SIZE for the buffer size
because we know, that lqt_read_audio_chunk allocates 16 extra bytes for us */
/* Some really broken mp3 files have the header bytes split across 2 chunks */
- if(codec->avctx->codec_id == CODEC_ID_MP3)
+ if(codec->avctx->codec_id == AV_CODEC_ID_MP3)
{
if(codec->bytes_in_chunk_buffer < 4)
{
@@ -756,7 +761,7 @@
#if DECODE_AUDIO3 || DECODE_AUDIO4
codec->pkt.data = &codec->chunk_buffer[bytes_used];
- codec->pkt.size = codec->bytes_in_chunk_buffer + FF_INPUT_BUFFER_PADDING_SIZE;
+ codec->pkt.size = codec->bytes_in_chunk_buffer + AV_INPUT_BUFFER_PADDING_SIZE;
#if DECODE_AUDIO4
@@ -793,7 +798,7 @@
(codec->sample_buffer_end - codec->sample_buffer_start)],
&bytes_decoded,
&codec->chunk_buffer[bytes_used],
- codec->bytes_in_chunk_buffer + FF_INPUT_BUFFER_PADDING_SIZE);
+ codec->bytes_in_chunk_buffer + AV_INPUT_BUFFER_PADDING_SIZE);
#endif
if(frame_bytes < 0)
{
@@ -806,7 +811,7 @@
if(bytes_decoded < 0)
{
- if(codec->avctx->codec_id == CODEC_ID_MP3)
+ if(codec->avctx->codec_id == AV_CODEC_ID_MP3)
{
/* For mp3, bytes_decoded < 0 means, that the frame should be muted */
memset(&codec->sample_buffer[track_map->channels * (codec->sample_buffer_end -
@@ -833,7 +838,7 @@
}
}
- /* This happens because ffmpeg adds FF_INPUT_BUFFER_PADDING_SIZE to the bytes returned */
+ /* This happens because ffmpeg adds AV_INPUT_BUFFER_PADDING_SIZE to the bytes returned */
if(codec->bytes_in_chunk_buffer < 0)
codec->bytes_in_chunk_buffer = 0;
@@ -866,8 +871,8 @@
quicktime_audio_map_t *track_map = &file->atracks[track];
quicktime_ffmpeg_audio_codec_t *codec = track_map->codec->priv;
- if((codec->decoder->id == CODEC_ID_MP2) ||
- (codec->decoder->id == CODEC_ID_MP3))
+ if((codec->decoder->id == AV_CODEC_ID_MP2) ||
+ (codec->decoder->id == AV_CODEC_ID_MP3))
{
mpa_header h;
uint32_t header;
@@ -909,7 +914,7 @@
else
track_map->ci.bitrate = h.bitrate;
}
- else if(codec->decoder->id == CODEC_ID_AC3)
+ else if(codec->decoder->id == AV_CODEC_ID_AC3)
{
a52_header h;
uint8_t * ptr;
@@ -986,7 +991,7 @@
#endif
/* Some codecs need extra stuff */
- if(codec->decoder->id == CODEC_ID_ALAC)
+ if(codec->decoder->id == AV_CODEC_ID_ALAC)
{
header = quicktime_wave_get_user_atom(track_map->track, "alac", &header_len);
if(header)
@@ -995,7 +1000,7 @@
codec->avctx->extradata_size = header_len;
}
}
- if(codec->decoder->id == CODEC_ID_QDM2)
+ if(codec->decoder->id == AV_CODEC_ID_QDM2)
{
header = quicktime_wave_get_user_atom(track_map->track, "QDCA", &header_len);
if(header)
@@ -1261,7 +1266,7 @@
pkt.data = codec->chunk_buffer;
pkt.size = codec->chunk_buffer_alloc;
- avcodec_get_frame_defaults(&f);
+ av_frame_unref(&f);
f.nb_samples = codec->avctx->frame_size;
avcodec_fill_audio_frame(&f, channels, codec->avctx->sample_fmt,
@@ -1495,9 +1500,9 @@
codec_base->decode_audio = lqt_ffmpeg_decode_audio;
codec_base->set_parameter = set_parameter;
- if((decoder->id == CODEC_ID_MP3) || (decoder->id == CODEC_ID_MP2))
+ if((decoder->id == AV_CODEC_ID_MP3) || (decoder->id == AV_CODEC_ID_MP2))
codec_base->read_packet = read_packet_mpa;
- else if(decoder->id == CODEC_ID_AC3)
+ else if(decoder->id == AV_CODEC_ID_AC3)
{
codec_base->write_packet = write_packet_ac3;
codec_base->read_packet = read_packet_ac3;
diff -Naur a/plugins/ffmpeg/ffmpeg.h b/plugins/ffmpeg/ffmpeg.h
--- a/plugins/ffmpeg/ffmpeg.h 2012-02-15 19:48:30.000000000 +0000
+++ b/plugins/ffmpeg/ffmpeg.h 2018-05-11 23:10:24.204992468 +0100
@@ -27,6 +27,7 @@
#include <quicktime/qtprivate.h>
#include AVCODEC_HEADER
+#include <libavutil/mem.h>
void quicktime_init_video_codec_ffmpeg(quicktime_codec_t * codec,
quicktime_video_map_t *vtrack,
diff -Naur a/plugins/ffmpeg/lqt_ffmpeg.c b/plugins/ffmpeg/lqt_ffmpeg.c
--- a/plugins/ffmpeg/lqt_ffmpeg.c 2012-02-15 19:48:30.000000000 +0000
+++ b/plugins/ffmpeg/lqt_ffmpeg.c 2018-05-11 23:15:07.583985628 +0100
@@ -386,7 +386,7 @@
struct CODECIDMAP codecidmap_v[] =
{
{
- .id = CODEC_ID_MPEG1VIDEO,
+ .id = AV_CODEC_ID_MPEG1VIDEO,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -397,7 +397,7 @@
.wav_ids = { LQT_WAV_ID_NONE }
},
{
- .id = CODEC_ID_MPEG4,
+ .id = AV_CODEC_ID_MPEG4,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -415,7 +415,7 @@
.compression_id = LQT_COMPRESSION_MPEG4_ASP,
},
{
- .id = CODEC_ID_MSMPEG4V1,
+ .id = AV_CODEC_ID_MSMPEG4V1,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -426,7 +426,7 @@
.wav_ids = { LQT_WAV_ID_NONE },
},
{
- .id = CODEC_ID_MSMPEG4V2,
+ .id = AV_CODEC_ID_MSMPEG4V2,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -437,7 +437,7 @@
.wav_ids = { LQT_WAV_ID_NONE },
},
{
- .id = CODEC_ID_MSMPEG4V3,
+ .id = AV_CODEC_ID_MSMPEG4V3,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -453,7 +453,7 @@
.do_encode = 1,
},
{
- .id = CODEC_ID_MSMPEG4V3,
+ .id = AV_CODEC_ID_MSMPEG4V3,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -468,7 +468,7 @@
},
#if 0
{
- .id = CODEC_ID_WMV1,
+ .id = AV_CODEC_ID_WMV1,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -481,7 +481,7 @@
},
#endif
{
- .id = CODEC_ID_H263,
+ .id = AV_CODEC_ID_H263,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -493,7 +493,7 @@
.compatibility_flags = LQT_FILE_QT_OLD | LQT_FILE_QT | LQT_FILE_MP4 | LQT_FILE_3GP,
},
{
- .id = CODEC_ID_H263,
+ .id = AV_CODEC_ID_H263,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -508,7 +508,7 @@
.do_encode = 1,
},
{
- .id = CODEC_ID_H264,
+ .id = AV_CODEC_ID_H264,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -519,7 +519,7 @@
.wav_ids = { LQT_WAV_ID_NONE },
},
{
- .id = CODEC_ID_H263P,
+ .id = AV_CODEC_ID_H263P,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -533,7 +533,7 @@
.do_encode = 1,
},
{
- .id = CODEC_ID_H263I,
+ .id = AV_CODEC_ID_H263I,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -544,7 +544,7 @@
.wav_ids = { LQT_WAV_ID_NONE },
},
{
- .id = CODEC_ID_SVQ1,
+ .id = AV_CODEC_ID_SVQ1,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -555,7 +555,7 @@
.wav_ids = { LQT_WAV_ID_NONE },
},
{
- .id = CODEC_ID_SVQ3,
+ .id = AV_CODEC_ID_SVQ3,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -566,7 +566,7 @@
.wav_ids = { LQT_WAV_ID_NONE },
},
{
- .id = CODEC_ID_MJPEG,
+ .id = AV_CODEC_ID_MJPEG,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -580,7 +580,7 @@
.do_encode = 1,
},
{
- .id = CODEC_ID_MJPEGB,
+ .id = AV_CODEC_ID_MJPEGB,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -594,7 +594,7 @@
},
#if LIBAVCODEC_BUILD >= 3346688
{
- .id = CODEC_ID_TARGA,
+ .id = AV_CODEC_ID_TARGA,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -606,7 +606,7 @@
#endif
#if LIBAVCODEC_BUILD >= 3347456
{
- .id = CODEC_ID_TIFF,
+ .id = AV_CODEC_ID_TIFF,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -617,7 +617,7 @@
},
#endif
{
- .id = CODEC_ID_8BPS,
+ .id = AV_CODEC_ID_8BPS,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -627,7 +627,7 @@
.wav_ids = { LQT_WAV_ID_NONE },
},
{
- .id = CODEC_ID_INDEO3,
+ .id = AV_CODEC_ID_INDEO3,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -638,7 +638,7 @@
.wav_ids = { LQT_WAV_ID_NONE },
},
{
- .id = CODEC_ID_RPZA,
+ .id = AV_CODEC_ID_RPZA,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -648,7 +648,7 @@
.wav_ids = { LQT_WAV_ID_NONE },
},
{
- .id = CODEC_ID_SMC,
+ .id = AV_CODEC_ID_SMC,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -658,7 +658,7 @@
.wav_ids = { LQT_WAV_ID_NONE },
},
{
- .id = CODEC_ID_CINEPAK,
+ .id = AV_CODEC_ID_CINEPAK,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -669,7 +669,7 @@
.wav_ids = { LQT_WAV_ID_NONE },
},
{
- .id = CODEC_ID_CYUV,
+ .id = AV_CODEC_ID_CYUV,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -680,7 +680,7 @@
.wav_ids = { LQT_WAV_ID_NONE },
},
{
- .id = CODEC_ID_QTRLE,
+ .id = AV_CODEC_ID_QTRLE,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -693,7 +693,7 @@
.encoding_colormodels = (int[]){ BC_RGB888, BC_RGBA8888, LQT_COLORMODEL_NONE },
},
{
- .id = CODEC_ID_MSRLE,
+ .id = AV_CODEC_ID_MSRLE,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -703,7 +703,7 @@
.wav_ids = { LQT_WAV_ID_NONE },
},
{
- .id = CODEC_ID_DVVIDEO,
+ .id = AV_CODEC_ID_DVVIDEO,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -719,7 +719,7 @@
.image_sizes = image_sizes_dv,
},
{
- .id = CODEC_ID_DVVIDEO,
+ .id = AV_CODEC_ID_DVVIDEO,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -735,7 +735,7 @@
.image_sizes = image_sizes_dv,
},
{
- .id = CODEC_ID_DVVIDEO,
+ .id = AV_CODEC_ID_DVVIDEO,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -751,7 +751,7 @@
},
/* DVCPRO HD (decoding only for now) */
{
- .id = CODEC_ID_DVVIDEO,
+ .id = AV_CODEC_ID_DVVIDEO,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -772,7 +772,7 @@
// .do_encode = 1
},
{
- .id = CODEC_ID_FFVHUFF,
+ .id = AV_CODEC_ID_FFVHUFF,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -785,7 +785,7 @@
.do_encode = 1
},
{
- .id = CODEC_ID_FFV1,
+ .id = AV_CODEC_ID_FFV1,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -801,7 +801,7 @@
},
#if LIBAVCODEC_BUILD >= 3352576
{
- .id = CODEC_ID_DNXHD,
+ .id = AV_CODEC_ID_DNXHD,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -817,7 +817,7 @@
},
#endif
{
- .id = CODEC_ID_MPEG2VIDEO,
+ .id = AV_CODEC_ID_MPEG2VIDEO,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -839,7 +839,7 @@
struct CODECIDMAP codecidmap_a[] =
{
{
- .id = CODEC_ID_MP3,
+ .id = AV_CODEC_ID_MP3,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -851,7 +851,7 @@
.wav_ids = { 0x50, 0x55, LQT_WAV_ID_NONE },
},
{
- .id = CODEC_ID_MP2,
+ .id = AV_CODEC_ID_MP2,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -866,7 +866,7 @@
.compression_id = LQT_COMPRESSION_MP2,
},
{
- .id = CODEC_ID_AC3,
+ .id = AV_CODEC_ID_AC3,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -881,7 +881,7 @@
.compression_id = LQT_COMPRESSION_AC3,
},
{
- .id = CODEC_ID_QDM2,
+ .id = AV_CODEC_ID_QDM2,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -896,7 +896,7 @@
#if 1
/* Doesn't work as long as audio chunks are not split into VBR "Samples" */
{
- .id = CODEC_ID_ALAC,
+ .id = AV_CODEC_ID_ALAC,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -909,7 +909,7 @@
#if 1
/* Sounds ugly */
{
- .id = CODEC_ID_ADPCM_MS,
+ .id = AV_CODEC_ID_ADPCM_MS,
.index = -1,
.encoder = NULL,
.decoder = NULL,
@@ -922,7 +922,7 @@
#if 1
/* Sounds ugly */
{
- .id = CODEC_ID_ADPCM_IMA_WAV,
+ .id = AV_CODEC_ID_ADPCM_IMA_WAV,
.index = -1,
.encoder = NULL,
.decoder = NULL,
diff -Naur a/plugins/ffmpeg/params.c b/plugins/ffmpeg/params.c
--- a/plugins/ffmpeg/params.c 2012-03-07 14:10:41.000000000 +0000
+++ b/plugins/ffmpeg/params.c 2018-05-11 23:11:59.803990160 +0100
@@ -101,6 +101,17 @@
} \
}
+#define PARAM_DICT_INT(name, dict_name) \
+ { \
+ if(!strcasecmp(name, key)) \
+ { \
+ char buf[128]; \
+ snprintf(buf, sizeof(buf), "%d", *(int*)value); \
+ av_dict_set(options, dict_name, buf, 0); \
+ found = 1; \
+ } \
+ }
+
#define PARAM_DICT_FLAG(name, dict_name) \
{ \
if(!strcasecmp(name, key)) \
@@ -113,16 +124,6 @@
}
-enum_t me_method[] =
- {
- { "Zero", ME_ZERO },
- { "Phods", ME_PHODS },
- { "Log", ME_LOG },
- { "X1", ME_X1 },
- { "Epzs", ME_EPZS },
- { "Full", ME_FULL }
- };
-
enum_t prediction_method[] =
{
{ "Left", FF_PRED_LEFT },
@@ -152,15 +153,6 @@
{ "Rate distoration", FF_MB_DECISION_RD }
};
-enum_t coder_type[] =
- {
- { "VLC", FF_CODER_TYPE_VLC },
- { "Arithmetic", FF_CODER_TYPE_AC },
- { "Raw", FF_CODER_TYPE_RAW },
- { "RLE", FF_CODER_TYPE_RLE },
- { "Deflate", FF_CODER_TYPE_DEFLATE },
- };
-
#define PARAM_ENUM(name, var, arr) \
if(!strcasecmp(key, name)) \
{ \
@@ -192,7 +184,7 @@
PARAM_INT_SCALE("ff_bit_rate_audio",bit_rate,1000);
PARAM_INT_SCALE("ff_bit_rate_video",bit_rate,1000);
PARAM_INT_SCALE("ff_bit_rate_tolerance",bit_rate_tolerance,1000);
- PARAM_ENUM("ff_me_method",me_method,me_method);
+ PARAM_DICT_INT("ff_me_method","motion-est");
PARAM_INT("ff_gop_size",gop_size);
PARAM_FLOAT("ff_qcompress",qcompress);
PARAM_FLOAT("ff_qblur",qblur);
@@ -202,17 +194,23 @@
PARAM_INT("ff_max_b_frames",max_b_frames);
PARAM_FLOAT("ff_b_quant_factor",b_quant_factor);
PARAM_INT("ff_b_frame_strategy",b_frame_strategy);
+
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ PARAM_DICT_INT("ff_luma_elim_threshold","luma_elim_threshold");
+ PARAM_DICT_INT("ff_chroma_elim_threshold","chroma_elim_threshold");
+#else
PARAM_INT("ff_luma_elim_threshold",luma_elim_threshold);
PARAM_INT("ff_chroma_elim_threshold",chroma_elim_threshold);
+#endif
+
PARAM_INT("ff_strict_std_compliance",strict_std_compliance);
PARAM_QP2LAMBDA("ff_b_quant_offset",b_quant_offset);
PARAM_INT("ff_rc_min_rate",rc_min_rate);
PARAM_INT("ff_rc_max_rate",rc_max_rate);
PARAM_INT_SCALE("ff_rc_buffer_size",rc_buffer_size,1000);
- PARAM_FLOAT("ff_rc_buffer_aggressivity",rc_buffer_aggressivity);
PARAM_FLOAT("ff_i_quant_factor",i_quant_factor);
PARAM_QP2LAMBDA("ff_i_quant_offset",i_quant_offset);
- PARAM_FLOAT("ff_rc_initial_cplx",rc_initial_cplx);
+ PARAM_DICT_INT("ff_rc_initial_cplx","rc_init_cplx");
PARAM_FLOAT("ff_lumi_masking",lumi_masking);
PARAM_FLOAT("ff_temporal_cplx_masking",temporal_cplx_masking);
PARAM_FLOAT("ff_spatial_cplx_masking",spatial_cplx_masking);
@@ -237,58 +235,68 @@
PARAM_INT("ff_me_range",me_range);
PARAM_ENUM("ff_mb_decision",mb_decision,mb_decision);
PARAM_INT("ff_scenechange_threshold",scenechange_threshold);
- PARAM_QP2LAMBDA("ff_lmin", lmin);
- PARAM_QP2LAMBDA("ff_lmax", lmax);
+ PARAM_DICT_INT("ff_lmin", "lmin");
+ PARAM_DICT_INT("ff_lmax", "lmax");
PARAM_INT("ff_noise_reduction",noise_reduction);
PARAM_INT_SCALE("ff_rc_initial_buffer_occupancy",rc_initial_buffer_occupancy,1000);
+
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ PARAM_DICT_INT("ff_inter_threshold","inter_threshold");
+ PARAM_DICT_INT("ff_quantizer_noise_shaping","quantizer_noise_shaping");
+#else
PARAM_INT("ff_inter_threshold",inter_threshold);
PARAM_INT("ff_quantizer_noise_shaping",quantizer_noise_shaping);
+#endif
+
PARAM_INT("ff_thread_count",thread_count);
- PARAM_INT("ff_me_threshold",me_threshold);
- PARAM_INT("ff_mb_threshold",mb_threshold);
PARAM_INT("ff_nsse_weight",nsse_weight);
- PARAM_FLOAT("ff_border_masking",border_masking);
+ PARAM_DICT_INT("ff_border_masking","border_mask");
PARAM_QP2LAMBDA("ff_mb_lmin", mb_lmin);
PARAM_QP2LAMBDA("ff_mb_lmax", mb_lmax);
PARAM_INT("ff_me_penalty_compensation",me_penalty_compensation);
PARAM_INT("ff_bidir_refine",bidir_refine);
PARAM_INT("ff_brd_scale",brd_scale);
- PARAM_INT("ff_scenechange_factor",scenechange_factor);
- PARAM_FLAG("ff_flag_qscale",CODEC_FLAG_QSCALE);
- PARAM_FLAG("ff_flag_4mv",CODEC_FLAG_4MV);
- PARAM_FLAG("ff_flag_qpel",CODEC_FLAG_QPEL);
- PARAM_FLAG("ff_flag_gmc",CODEC_FLAG_GMC);
- PARAM_FLAG("ff_flag_mv0",CODEC_FLAG_MV0);
+ PARAM_FLAG("ff_flag_qscale",AV_CODEC_FLAG_QSCALE);
+ PARAM_FLAG("ff_flag_4mv",AV_CODEC_FLAG_4MV);
+ PARAM_FLAG("ff_flag_qpel",AV_CODEC_FLAG_QPEL);
+ PARAM_DICT_FLAG("ff_flag_gmc","gmc");
+ PARAM_DICT_FLAG("ff_flag_mv0","mpv_flags");
// PARAM_FLAG("ff_flag_part",CODEC_FLAG_PART); // Unused
- PARAM_FLAG("ff_flag_gray",CODEC_FLAG_GRAY);
- PARAM_FLAG("ff_flag_emu_edge",CODEC_FLAG_EMU_EDGE);
- PARAM_FLAG("ff_flag_normalize_aqp",CODEC_FLAG_NORMALIZE_AQP);
+ PARAM_FLAG("ff_flag_gray",AV_CODEC_FLAG_GRAY);
+ PARAM_DICT_FLAG("ff_flag_normalize_aqp","naq");
// PARAM_FLAG("ff_flag_alt_scan",CODEC_FLAG_ALT_SCAN); // Unused
#if LIBAVCODEC_VERSION_INT < ((52<<16)+(0<<8)+0)
PARAM_FLAG("ff_flag_trellis_quant",CODEC_FLAG_TRELLIS_QUANT);
#else
PARAM_INT("ff_trellis",trellis);
#endif
- PARAM_FLAG("ff_flag_bitexact",CODEC_FLAG_BITEXACT);
- PARAM_FLAG("ff_flag_ac_pred",CODEC_FLAG_AC_PRED);
+ PARAM_FLAG("ff_flag_bitexact",AV_CODEC_FLAG_BITEXACT);
+ PARAM_FLAG("ff_flag_ac_pred",AV_CODEC_FLAG_AC_PRED);
// PARAM_FLAG("ff_flag_h263p_umv",CODEC_FLAG_H263P_UMV); // Unused
- PARAM_FLAG("ff_flag_cbp_rd",CODEC_FLAG_CBP_RD);
- PARAM_FLAG("ff_flag_qp_rd",CODEC_FLAG_QP_RD);
+
+#if LIBAVCODEC_VERSION_MAJOR >= 55
+ PARAM_DICT_FLAG("ff_flag_cbp_rd","cbp_rd");
+ PARAM_DICT_FLAG("ff_flag_qp_rd","qp_rd");
+ PARAM_DICT_FLAG("ff_flag2_strict_gop","strict_gop");
+#else
+ PARAM_FLAG("ff_flag_cbp_rd",AV_CODEC_FLAG_CBP_RD);
+ PARAM_FLAG("ff_flag_qp_rd",AV_CODEC_FLAG_QP_RD);
+ PARAM_FLAG2("ff_flag2_strict_gop",AV_CODEC_FLAG2_STRICT_GOP);
+#endif
#if LIBAVCODEC_VERSION_MAJOR >= 54
PARAM_DICT_FLAG("ff_flag_h263p_aiv", "aiv");
PARAM_DICT_FLAG("ff_flag_obmc","obmc");
PARAM_DICT_FLAG("ff_flag_h263p_slice_struct","structured_slices");
#else
- PARAM_FLAG("ff_flag_h263p_aiv",CODEC_FLAG_H263P_AIV);
- PARAM_FLAG("ff_flag_obmc",CODEC_FLAG_OBMC);
- PARAM_FLAG("ff_flag_h263p_slice_struct",CODEC_FLAG_H263P_SLICE_STRUCT);
+ PARAM_FLAG("ff_flag_h263p_aiv",AV_CODEC_FLAG_H263P_AIV);
+ PARAM_FLAG("ff_flag_obmc",AV_CODEC_FLAG_OBMC);
+ PARAM_FLAG("ff_flag_h263p_slice_struct",AV_CODEC_FLAG_H263P_SLICE_STRUCT);
#endif
- PARAM_FLAG("ff_flag_loop_filter",CODEC_FLAG_LOOP_FILTER);
- PARAM_FLAG("ff_flag_closed_gop",CODEC_FLAG_CLOSED_GOP);
- PARAM_FLAG2("ff_flag2_fast",CODEC_FLAG2_FAST);
- PARAM_FLAG2("ff_flag2_strict_gop",CODEC_FLAG2_STRICT_GOP);
- PARAM_ENUM("ff_coder_type",coder_type,coder_type);
+ PARAM_FLAG("ff_flag_loop_filter",AV_CODEC_FLAG_LOOP_FILTER);
+ PARAM_FLAG("ff_flag_closed_gop",AV_CODEC_FLAG_CLOSED_GOP);
+ PARAM_FLAG2("ff_flag2_fast",AV_CODEC_FLAG2_FAST);
+ PARAM_DICT_INT("ff_coder_type","coder");
}
diff -Naur a/plugins/ffmpeg/params.h b/plugins/ffmpeg/params.h
--- a/plugins/ffmpeg/params.h 2011-05-11 16:13:39.000000000 +0100
+++ b/plugins/ffmpeg/params.h 2018-05-11 23:11:59.803990160 +0100
@@ -149,7 +149,7 @@
.type = LQT_PARAMETER_INT, \
.val_default = { .val_int = 0 }, \
.val_min = { .val_int = 0 }, \
- .val_max = { .val_int = FF_MAX_B_FRAMES }, \
+ .val_max = { .val_int = 16 }, \
.help_string = TRS("Maximum number of B-frames between non B-frames") \
}
diff -Naur a/plugins/ffmpeg/video.c b/plugins/ffmpeg/video.c
--- a/plugins/ffmpeg/video.c 2012-02-25 19:46:56.000000000 +0000
+++ b/plugins/ffmpeg/video.c 2018-05-11 23:15:15.697985432 +0100
@@ -37,10 +37,10 @@
#endif
-#ifdef PIX_FMT_YUV422P10
-#define PIX_FMT_YUV422P10_OR_DUMMY PIX_FMT_YUV422P10
+#ifdef AV_PIX_FMT_YUV422P10
+#define AV_PIX_FMT_YUV422P10_OR_DUMMY AV_PIX_FMT_YUV422P10
#else
-#define PIX_FMT_YUV422P10_OR_DUMMY -1234
+#define AV_PIX_FMT_YUV422P10_OR_DUMMY -1234
#endif
#if LIBAVCODEC_VERSION_INT >= ((54<<16)|(1<<8)|0)
@@ -90,9 +90,9 @@
int imx_bitrate;
int imx_strip_vbi;
- /* In some cases FFMpeg would report something like PIX_FMT_YUV422P, while
- we would like to treat it as PIX_FMT_YUVJ422P. It's only used for decoding */
- enum PixelFormat reinterpret_pix_fmt;
+ /* In some cases FFMpeg would report something like AV_PIX_FMT_YUV422P, while
+ we would like to treat it as AV_PIX_FMT_YUVJ422P. It's only used for decoding */
+ enum AVPixelFormat reinterpret_pix_fmt;
int is_imx;
int y_offset;
@@ -137,42 +137,42 @@
static const struct
{
- enum PixelFormat ffmpeg_id;
+ enum AVPixelFormat ffmpeg_id;
int lqt_id;
int exact;
}
colormodels[] =
{
- { PIX_FMT_YUV420P, BC_YUV420P, 1 }, ///< Planar YUV 4:2:0 (1 Cr & Cb sample per 2x2 Y samples)
+ { AV_PIX_FMT_YUV420P, BC_YUV420P, 1 }, ///< Planar YUV 4:2:0 (1 Cr & Cb sample per 2x2 Y samples)
#if LIBAVUTIL_VERSION_INT < (50<<16)
- { PIX_FMT_YUV422, BC_YUV422, 1 },
+ { AV_PIX_FMT_YUV422, BC_YUV422, 1 },
#else
- { PIX_FMT_YUYV422, BC_YUV422, 1 },
+ { AV_PIX_FMT_YUYV422, BC_YUV422, 1 },
#endif
- { PIX_FMT_RGB24, BC_RGB888, 1 }, ///< Packed pixel, 3 bytes per pixel, RGBRGB...
- { PIX_FMT_BGR24, BC_BGR888, 1 }, ///< Packed pixel, 3 bytes per pixel, BGRBGR...
- { PIX_FMT_YUV422P, BC_YUV422P, 1 }, ///< Planar YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples)
- { PIX_FMT_YUV444P, BC_YUV444P, 1 }, ///< Planar YUV 4:4:4 (1 Cr & Cb sample per 1x1 Y samples)
- { PIX_FMT_YUV411P, BC_YUV411P, 1 }, ///< Planar YUV 4:1:1 (1 Cr & Cb sample per 4x1 Y samples)
- { PIX_FMT_YUV422P16, BC_YUV422P16, 1 }, ///< Planar 16 bit YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples)
-#ifdef PIX_FMT_YUV422P10
- { PIX_FMT_YUV422P10, BC_YUV422P10, 1 }, ///< 10 bit samples in uint16_t containers, planar 4:2:2
-#endif
- { PIX_FMT_RGB565, BC_RGB565, 1 }, ///< always stored in cpu endianness
- { PIX_FMT_YUVJ420P, BC_YUVJ420P, 1 }, ///< Planar YUV 4:2:0 full scale (jpeg)
- { PIX_FMT_YUVJ422P, BC_YUVJ422P, 1 }, ///< Planar YUV 4:2:2 full scale (jpeg)
- { PIX_FMT_YUVJ444P, BC_YUVJ444P, 1 }, ///< Planar YUV 4:4:4 full scale (jpeg)
+ { AV_PIX_FMT_RGB24, BC_RGB888, 1 }, ///< Packed pixel, 3 bytes per pixel, RGBRGB...
+ { AV_PIX_FMT_BGR24, BC_BGR888, 1 }, ///< Packed pixel, 3 bytes per pixel, BGRBGR...
+ { AV_PIX_FMT_YUV422P, BC_YUV422P, 1 }, ///< Planar YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples)
+ { AV_PIX_FMT_YUV444P, BC_YUV444P, 1 }, ///< Planar YUV 4:4:4 (1 Cr & Cb sample per 1x1 Y samples)
+ { AV_PIX_FMT_YUV411P, BC_YUV411P, 1 }, ///< Planar YUV 4:1:1 (1 Cr & Cb sample per 4x1 Y samples)
+ { AV_PIX_FMT_YUV422P16, BC_YUV422P16, 1 }, ///< Planar 16 bit YUV 4:2:2 (1 Cr & Cb sample per 2x1 Y samples)
+#ifdef AV_PIX_FMT_YUV422P10
+ { AV_PIX_FMT_YUV422P10, BC_YUV422P10, 1 }, ///< 10 bit samples in uint16_t containers, planar 4:2:2
+#endif
+ { AV_PIX_FMT_RGB565, BC_RGB565, 1 }, ///< always stored in cpu endianness
+ { AV_PIX_FMT_YUVJ420P, BC_YUVJ420P, 1 }, ///< Planar YUV 4:2:0 full scale (jpeg)
+ { AV_PIX_FMT_YUVJ422P, BC_YUVJ422P, 1 }, ///< Planar YUV 4:2:2 full scale (jpeg)
+ { AV_PIX_FMT_YUVJ444P, BC_YUVJ444P, 1 }, ///< Planar YUV 4:4:4 full scale (jpeg)
#if LIBAVUTIL_VERSION_INT < (50<<16)
- { PIX_FMT_RGBA32, BC_RGBA8888, 0 }, ///< Packed pixel, 4 bytes per pixel, BGRABGRA...
+ { AV_PIX_FMT_RGBA32, BC_RGBA8888, 0 }, ///< Packed pixel, 4 bytes per pixel, BGRABGRA...
#else
- { PIX_FMT_RGB32, BC_RGBA8888, 0 }, ///< Packed pixel, 4 bytes per pixel, BGRABGRA...
+ { AV_PIX_FMT_RGB32, BC_RGBA8888, 0 }, ///< Packed pixel, 4 bytes per pixel, BGRABGRA...
#endif
- { PIX_FMT_RGB555, BC_RGB888, 0 }, ///< always stored in cpu endianness, most significant bit to 1
- { PIX_FMT_GRAY8, BC_RGB888, 0 },
- { PIX_FMT_MONOWHITE, BC_RGB888, 0 }, ///< 0 is white
- { PIX_FMT_MONOBLACK, BC_RGB888, 0 }, ///< 0 is black
- { PIX_FMT_PAL8, BC_RGB888, 0 }, ///< 8 bit with RGBA palette
- { PIX_FMT_YUV410P, BC_YUV420P, 0 }, ///< Planar YUV 4:1:0 (1 Cr & Cb sample per 4x4 Y samples)
+ { AV_PIX_FMT_RGB555, BC_RGB888, 0 }, ///< always stored in cpu endianness, most significant bit to 1
+ { AV_PIX_FMT_GRAY8, BC_RGB888, 0 },
+ { AV_PIX_FMT_MONOWHITE, BC_RGB888, 0 }, ///< 0 is white
+ { AV_PIX_FMT_MONOBLACK, BC_RGB888, 0 }, ///< 0 is black
+ { AV_PIX_FMT_PAL8, BC_RGB888, 0 }, ///< 8 bit with RGBA palette
+ { AV_PIX_FMT_YUV410P, BC_YUV420P, 0 }, ///< Planar YUV 4:1:0 (1 Cr & Cb sample per 4x4 Y samples)
};
static const struct
@@ -343,16 +343,16 @@
if (!codec->pix_fmts)
return 0;
- for (i = 0; codec->pix_fmts[i] != PIX_FMT_NONE; ++i)
+ for (i = 0; codec->pix_fmts[i] != AV_PIX_FMT_NONE; ++i)
{
- if (codec->pix_fmts[i] == PIX_FMT_YUV422P10_OR_DUMMY)
+ if (codec->pix_fmts[i] == AV_PIX_FMT_YUV422P10_OR_DUMMY)
return 1;
}
return 0;
}
-static enum PixelFormat lqt_ffmpeg_get_ffmpeg_colormodel(int id)
+static enum AVPixelFormat lqt_ffmpeg_get_ffmpeg_colormodel(int id)
{
int i;
@@ -361,10 +361,10 @@
if(colormodels[i].lqt_id == id)
return colormodels[i].ffmpeg_id;
}
- return PIX_FMT_NB;
+ return AV_PIX_FMT_NB;
}
-static int lqt_ffmpeg_get_lqt_colormodel(enum PixelFormat id, int * exact)
+static int lqt_ffmpeg_get_lqt_colormodel(enum AVPixelFormat id, int * exact)
{
int i;
@@ -400,26 +400,26 @@
codec->reinterpret_pix_fmt = codec->avctx->pix_fmt;
/* First we try codec-specific colormodel matching. */
- if(codec->decoder->id == CODEC_ID_DNXHD)
+ if(codec->decoder->id == AV_CODEC_ID_DNXHD)
{
- /* FFMpeg supports PIX_FMT_YUV422P and PIX_FMT_YUV422P10 for DNxHD, which
- we sometimes interpret as PIX_FMT_YUVJ422P and PIX_FMT_YUVJ422P10. */
- if (codec->avctx->pix_fmt == PIX_FMT_YUV422P || codec->avctx->pix_fmt == PIX_FMT_YUV422P10_OR_DUMMY)
+ /* FFMpeg supports AV_PIX_FMT_YUV422P and AV_PIX_FMT_YUV422P10 for DNxHD, which
+ we sometimes interpret as AV_PIX_FMT_YUVJ422P and AV_PIX_FMT_YUVJ422P10. */
+ if (codec->avctx->pix_fmt == AV_PIX_FMT_YUV422P || codec->avctx->pix_fmt == AV_PIX_FMT_YUV422P10_OR_DUMMY)
{
- int p10 = (codec->avctx->pix_fmt == PIX_FMT_YUV422P10_OR_DUMMY);
+ int p10 = (codec->avctx->pix_fmt == AV_PIX_FMT_YUV422P10_OR_DUMMY);
*exact = 1;
if (lqt_ffmpeg_get_avid_yuv_range(vtrack->track) == AVID_FULL_YUV_RANGE)
{
vtrack->stream_cmodel = p10 ? BC_YUVJ422P10 : BC_YUVJ422P;
- codec->reinterpret_pix_fmt = p10 ? PIX_FMT_YUV422P10_OR_DUMMY : PIX_FMT_YUVJ422P;
- // Note: reinterpret_pix_fmt should really be PIX_FMT_YUVJ422P10, except
+ codec->reinterpret_pix_fmt = p10 ? AV_PIX_FMT_YUV422P10_OR_DUMMY : AV_PIX_FMT_YUVJ422P;
+ // Note: reinterpret_pix_fmt should really be AV_PIX_FMT_YUVJ422P10, except
// there is no such colormodel in FFMpeg. Fortunately, it's not a problem
// in this case, as reinterpret_pix_fmt is only used when *exact == 0.
}
else
{
vtrack->stream_cmodel = p10 ? BC_YUV422P10 : BC_YUV422P;
- codec->reinterpret_pix_fmt = p10 ? PIX_FMT_YUV422P10_OR_DUMMY : PIX_FMT_YUV422P;
+ codec->reinterpret_pix_fmt = p10 ? AV_PIX_FMT_YUV422P10_OR_DUMMY : AV_PIX_FMT_YUV422P;
}
return;
}
@@ -438,16 +438,16 @@
quicktime_ffmpeg_video_codec_t *codec = vtrack->codec->priv;
codec->avctx->pix_fmt = lqt_ffmpeg_get_ffmpeg_colormodel(vtrack->stream_cmodel);
- if (codec->encoder->id == CODEC_ID_DNXHD)
+ if (codec->encoder->id == AV_CODEC_ID_DNXHD)
{
- /* FFMpeg's DNxHD encoder only supports PIX_FMT_YUV422P and PIX_FMT_YUV422P10
- and doesn't know anything about PIX_FMT_YUVJ422P and PIX_FMT_YUVJ422P10
+ /* FFMpeg's DNxHD encoder only supports AV_PIX_FMT_YUV422P and AV_PIX_FMT_YUV422P10
+ and doesn't know anything about AV_PIX_FMT_YUVJ422P and AV_PIX_FMT_YUVJ422P10
(in fact, the latter doesn't even exist) */
- codec->avctx->pix_fmt = PIX_FMT_YUV422P;
+ codec->avctx->pix_fmt = AV_PIX_FMT_YUV422P;
if (vtrack->stream_cmodel == BC_YUV422P10 || vtrack->stream_cmodel == BC_YUVJ422P10)
{
if (lqt_tenbit_dnxhd_supported(codec->encoder))
- codec->avctx->pix_fmt = PIX_FMT_YUV422P10_OR_DUMMY;
+ codec->avctx->pix_fmt = AV_PIX_FMT_YUV422P10_OR_DUMMY;
}
}
}
@@ -458,7 +458,7 @@
/* From avcodec.h: */
/*
- * PIX_FMT_RGBA32 is handled in an endian-specific manner. A RGBA
+ * AV_PIX_FMT_RGBA32 is handled in an endian-specific manner. A RGBA
* color is put together as:
* (A << 24) | (R << 16) | (G << 8) | B
* This is stored as BGRA on little endian CPU architectures and ARGB on
@@ -530,7 +530,7 @@
*/
static void convert_image_decode(quicktime_ffmpeg_video_codec_t *codec,
- AVFrame * in_frame, enum PixelFormat in_format,
+ AVFrame * in_frame, enum AVPixelFormat in_format,
unsigned char ** out_frame, int out_format,
int width, int height, int row_span, int row_span_uv)
{
@@ -547,9 +547,9 @@
* RGBA format like in ffmpeg??
*/
#if LIBAVUTIL_VERSION_INT < (50<<16)
- if((in_format == PIX_FMT_RGBA32) && (out_format == BC_RGBA8888))
+ if((in_format == AV_PIX_FMT_RGBA32) && (out_format == BC_RGBA8888))
#else
- if((in_format == PIX_FMT_RGB32) && (out_format == BC_RGBA8888))
+ if((in_format == AV_PIX_FMT_RGB32) && (out_format == BC_RGBA8888))
#endif
{
convert_image_decode_rgba(in_frame, out_frame, width, height, codec->y_offset);
@@ -728,13 +728,13 @@
/* Set extradata: It's done differently for each codec */
- if(codec->decoder->id == CODEC_ID_SVQ3)
+ if(codec->decoder->id == AV_CODEC_ID_SVQ3)
{
extradata = trak->mdia.minf.stbl.stsd.table[0].table_raw + 4;
extradata_size = trak->mdia.minf.stbl.stsd.table[0].table_raw_size - 4;
}
- else if(codec->decoder->id == CODEC_ID_H264)
+ else if(codec->decoder->id == AV_CODEC_ID_H264)
{
user_atom = quicktime_stsd_get_user_atom(trak, "avcC", &user_atom_len);
@@ -753,7 +753,7 @@
}
}
- else if(codec->decoder->id == CODEC_ID_MPEG4)
+ else if(codec->decoder->id == AV_CODEC_ID_MPEG4)
{
if(trak->mdia.minf.stbl.stsd.table[0].has_esds)
{
@@ -781,7 +781,7 @@
if(extradata)
{
codec->extradata =
- calloc(1, extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
+ calloc(1, extradata_size + AV_INPUT_BUFFER_PADDING_SIZE);
memcpy(codec->extradata, extradata, extradata_size);
codec->avctx->extradata_size = extradata_size;
codec->avctx->extradata = codec->extradata;
@@ -829,7 +829,7 @@
if(avcodec_open2(codec->avctx, codec->decoder, NULL) != 0)
return -1;
#endif
- codec->frame = avcodec_alloc_frame();
+ codec->frame = av_frame_alloc();
vtrack->stream_cmodel = LQT_COLORMODEL_NONE;
codec->initialized = 1;
}
@@ -929,10 +929,10 @@
#ifdef HAVE_LIBSWSCALE
#if LIBAVUTIL_VERSION_INT < (50<<16)
- if(!((codec->avctx->pix_fmt == PIX_FMT_RGBA32) &&
+ if(!((codec->avctx->pix_fmt == AV_PIX_FMT_RGBA32) &&
(vtrack->stream_cmodel == BC_RGBA8888)))
#else
- if(!((codec->avctx->pix_fmt == PIX_FMT_RGB32) &&
+ if(!((codec->avctx->pix_fmt == AV_PIX_FMT_RGB32) &&
(vtrack->stream_cmodel == BC_RGBA8888)))
#endif
{
@@ -947,15 +947,15 @@
}
#endif
}
- if(codec->decoder->id == CODEC_ID_DVVIDEO)
+ if(codec->decoder->id == AV_CODEC_ID_DVVIDEO)
{
if(vtrack->stream_cmodel == BC_YUV420P)
vtrack->chroma_placement = LQT_CHROMA_PLACEMENT_DVPAL;
vtrack->interlace_mode = LQT_INTERLACE_BOTTOM_FIRST;
vtrack->ci.id = LQT_COMPRESSION_DV;
}
- else if((codec->decoder->id == CODEC_ID_MPEG4) ||
- (codec->decoder->id == CODEC_ID_H264))
+ else if((codec->decoder->id == AV_CODEC_ID_MPEG4) ||
+ (codec->decoder->id == AV_CODEC_ID_H264))
{
if(vtrack->stream_cmodel == BC_YUV420P)
vtrack->chroma_placement = LQT_CHROMA_PLACEMENT_MPEG2;
@@ -1140,8 +1140,8 @@
codec->avctx->qmin = 1;
codec->avctx->qmax = 3;
codec->avctx->rtp_payload_size = 1; // ??
- codec->avctx->rc_buffer_aggressivity = 0.25;
- codec->avctx->flags |= CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_LOW_DELAY;
+ av_dict_set(&codec->options, "rc_buf_aggressivity", "0.25", 0);
+ codec->avctx->flags |= AV_CODEC_FLAG_INTERLACED_DCT|AV_CODEC_FLAG_LOW_DELAY;
#if (LIBAVCODEC_VERSION_MAJOR < 54)
codec->avctx->flags2 |= CODEC_FLAG2_INTRA_VLC|CODEC_FLAG2_NON_LINEAR_QUANT;
@@ -1299,13 +1299,13 @@
{
if(vtrack->stream_cmodel == BC_YUV420P)
{
- if(codec->encoder->id == CODEC_ID_MPEG4)
+ if(codec->encoder->id == AV_CODEC_ID_MPEG4)
{
vtrack->chroma_placement = LQT_CHROMA_PLACEMENT_MPEG2;
/* enable interlaced encoding */
vtrack->interlace_mode = LQT_INTERLACE_NONE;
}
- else if(codec->encoder->id == CODEC_ID_DVVIDEO)
+ else if(codec->encoder->id == AV_CODEC_ID_DVVIDEO)
{
vtrack->chroma_placement = LQT_CHROMA_PLACEMENT_DVPAL;
}
@@ -1318,7 +1318,7 @@
if(!codec->initialized)
{
- codec->frame = avcodec_alloc_frame();
+ codec->frame = av_frame_alloc();
/* time_base is 1/framerate for constant framerate */
@@ -1328,7 +1328,7 @@
// codec->avctx->time_base.den = 1;
// codec->avctx->time_base.num = lqt_video_time_scale(file, track);
- if(codec->avctx->flags & CODEC_FLAG_QSCALE)
+ if(codec->avctx->flags & AV_CODEC_FLAG_QSCALE)
codec->avctx->global_quality = codec->qscale;
codec->avctx->width = width;
@@ -1340,11 +1340,11 @@
codec->avctx->sample_aspect_ratio.num = pixel_width;
codec->avctx->sample_aspect_ratio.den = pixel_height;
/* Use global headers for mp4v */
- if(codec->encoder->id == CODEC_ID_MPEG4)
+ if(codec->encoder->id == AV_CODEC_ID_MPEG4)
{
if(!(file->file_type & (LQT_FILE_AVI|LQT_FILE_AVI_ODML)))
{
- codec->avctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
+ codec->avctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
codec->write_global_header = 1;
}
@@ -1360,16 +1360,16 @@
{
lqt_log(file, LQT_LOG_INFO, LOG_DOMAIN, "Enabling interlaced encoding");
codec->avctx->flags |=
- (CODEC_FLAG_INTERLACED_DCT|CODEC_FLAG_INTERLACED_ME|CODEC_FLAG_ALT_SCAN);
+ (AV_CODEC_FLAG_INTERLACED_DCT|AV_CODEC_FLAG_INTERLACED_ME|AV_CODEC_FLAG_ALT_SCAN);
}
#endif
}
- else if((codec->encoder->id == CODEC_ID_MSMPEG4V3) && (trak->strl) &&
+ else if((codec->encoder->id == AV_CODEC_ID_MSMPEG4V3) && (trak->strl) &&
!strncmp(trak->strl->strf.bh.biCompression, "DIV3", 4))
{
strncpy(trak->strl->strh.fccHandler, "div3", 4);
}
- else if((codec->encoder->id == CODEC_ID_H263) &&
+ else if((codec->encoder->id == AV_CODEC_ID_H263) &&
(file->file_type & (LQT_FILE_MP4|LQT_FILE_3GP)))
{
uint8_t d263_data[] =
@@ -1383,34 +1383,34 @@
strncpy(trak->mdia.minf.stbl.stsd.table[0].format,
"s263", 4);
}
- else if(codec->encoder->id == CODEC_ID_FFVHUFF)
+ else if(codec->encoder->id == AV_CODEC_ID_FFVHUFF)
{
if(!(file->file_type & (LQT_FILE_AVI|LQT_FILE_AVI_ODML)))
{
- codec->avctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
+ codec->avctx->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
codec->write_global_header = 1;
}
}
- else if(codec->encoder->id == CODEC_ID_QTRLE)
+ else if(codec->encoder->id == AV_CODEC_ID_QTRLE)
{
if(vtrack->stream_cmodel == BC_RGBA8888)
{
/* Libquicktime doesn't natively support a color model equivalent
- to PIX_FMT_ARGB, which is required for QTRLE with alpha channel.
+ to AV_PIX_FMT_ARGB, which is required for QTRLE with alpha channel.
So, we use BC_RGBA8888 and do ad hoc conversion below. */
- codec->avctx->pix_fmt = PIX_FMT_ARGB;
+ codec->avctx->pix_fmt = AV_PIX_FMT_ARGB;
vtrack->track->mdia.minf.stbl.stsd.table[0].depth = 32;
}
}
- else if(codec->encoder->id == CODEC_ID_DVVIDEO)
+ else if(codec->encoder->id == AV_CODEC_ID_DVVIDEO)
{
set_dv_fourcc(width, height, vtrack->stream_cmodel, trak);
}
- else if(codec->encoder->id == CODEC_ID_DNXHD)
+ else if(codec->encoder->id == AV_CODEC_ID_DNXHD)
{
if(vtrack->interlace_mode != LQT_INTERLACE_NONE)
{
- codec->avctx->flags |= CODEC_FLAG_INTERLACED_DCT;
+ codec->avctx->flags |= AV_CODEC_FLAG_INTERLACED_DCT;
}
}
else if(codec->is_imx)
@@ -1422,7 +1422,7 @@
if(codec->pass == 1)
{
codec->stats_file = fopen(codec->stats_filename, "w");
- codec->avctx->flags |= CODEC_FLAG_PASS1;
+ codec->avctx->flags |= AV_CODEC_FLAG_PASS1;
}
else if(codec->pass == codec->total_passes)
{
@@ -1438,7 +1438,7 @@
fclose(codec->stats_file);
codec->stats_file = (FILE*)0;
- codec->avctx->flags |= CODEC_FLAG_PASS2;
+ codec->avctx->flags |= AV_CODEC_FLAG_PASS2;
}
}
/* Open codec */
@@ -1467,7 +1467,7 @@
}
// codec->lqt_colormodel = ffmepg_2_lqt(codec->com.ffcodec_enc);
- if(codec->y_offset != 0 || codec->avctx->pix_fmt == PIX_FMT_ARGB)
+ if(codec->y_offset != 0 || codec->avctx->pix_fmt == AV_PIX_FMT_ARGB)
{
if(!codec->tmp_rows)
{
@@ -1492,7 +1492,7 @@
vtrack->stream_cmodel,
0, 0, 0, codec->y_offset);
}
- else if(codec->avctx->pix_fmt == PIX_FMT_ARGB)
+ else if(codec->avctx->pix_fmt == AV_PIX_FMT_ARGB)
{
convert_rgba_to_argb(row_pointers[0], vtrack->stream_row_span,
codec->tmp_rows[0], codec->tmp_row_span,
@@ -1516,7 +1516,7 @@
}
codec->frame->pts = vtrack->timestamp;
- if(codec->avctx->flags & CODEC_FLAG_QSCALE)
+ if(codec->avctx->flags & AV_CODEC_FLAG_QSCALE)
codec->frame->quality = codec->qscale;
#ifdef DO_INTERLACE
if(vtrack->interlace_mode != LQT_INTERLACE_NONE)
@@ -1558,12 +1558,12 @@
#endif
- if(!was_initialized && codec->encoder->id == CODEC_ID_DNXHD)
+ if(!was_initialized && codec->encoder->id == AV_CODEC_ID_DNXHD)
setup_avid_atoms(file, vtrack, codec->buffer, bytes_encoded);
if(bytes_encoded)
{
- if (pts == AV_NOPTS_VALUE || (codec->encoder->id == CODEC_ID_DNXHD && pts == 0))
+ if (pts == AV_NOPTS_VALUE || (codec->encoder->id == AV_CODEC_ID_DNXHD && pts == 0))
{
/* Some codecs don't bother generating presentation timestamps.
FFMpeg's DNxHD encoder doesn't even bother to set it to AV_NOPTS_VALUE. */
@@ -1590,17 +1590,16 @@
if(codec->write_global_header && !codec->global_header_written)
{
- if(codec->encoder->id == CODEC_ID_FFVHUFF)
+ if(codec->encoder->id == AV_CODEC_ID_FFVHUFF)
{
quicktime_user_atoms_add_atom(&trak->mdia.minf.stbl.stsd.table[0].user_atoms,
"glbl",
codec->avctx->extradata, codec->avctx->extradata_size );
}
- else if(codec->encoder->id == CODEC_ID_MPEG4)
+ else if(codec->encoder->id == AV_CODEC_ID_MPEG4)
{
int advanced = 0;
- if(codec->avctx->max_b_frames ||
- (codec->avctx->flags & (CODEC_FLAG_QPEL|CODEC_FLAG_GMC)))
+ if(codec->avctx->max_b_frames)
advanced = 1;
setup_header_mpeg4(file, track, codec->avctx->extradata,
@@ -1903,18 +1902,18 @@
codec_base->encode_video = lqt_ffmpeg_encode_video;
codec_base->set_pass = set_pass_ffmpeg;
- if(encoder->id == CODEC_ID_MPEG4)
+ if(encoder->id == AV_CODEC_ID_MPEG4)
{
codec_base->writes_compressed = writes_compressed_mpeg4;
codec_base->init_compressed = init_compressed_mpeg4;
codec_base->write_packet = write_packet_mpeg4;
}
- else if(encoder->id == CODEC_ID_MPEG2VIDEO)
+ else if(encoder->id == AV_CODEC_ID_MPEG2VIDEO)
{
codec_base->writes_compressed = writes_compressed_imx;
codec_base->init_compressed = init_compressed_imx;
}
- else if(encoder->id == CODEC_ID_DVVIDEO)
+ else if(encoder->id == AV_CODEC_ID_DVVIDEO)
{
codec_base->init_compressed = init_compressed_dv;
}
@@ -1922,7 +1921,7 @@
}
if(decoder)
{
- if(decoder->id == CODEC_ID_H264)
+ if(decoder->id == AV_CODEC_ID_H264)
codec_base->read_packet = read_packet_h264;
codec_base->decode_video = lqt_ffmpeg_decode_video;
}
diff -Naur a/src/util.c b/src/util.c
--- a/src/util.c 2011-01-07 14:40:47.000000000 +0000
+++ b/src/util.c 2018-05-11 23:11:04.367991499 +0100
@@ -340,9 +340,14 @@
void quicktime_read_pascal(quicktime_t *file, char *data)
{
- char len = quicktime_read_char(file);
- quicktime_read_data(file, (uint8_t*)data, len);
- data[(int)len] = 0;
+ int len = quicktime_read_char(file);
+ if ((len > 0) && (len < 256)) {
+ /* data[] is expected to be 256 bytes long */
+ quicktime_read_data(file, (uint8_t*)data, len);
+ data[len] = 0;
+ } else {
+ data[0] = 0;
+ }
}
void quicktime_write_pascal(quicktime_t *file, char *data)