mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2025-12-06 14:59:59 +01:00
Compare commits
23 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
10506de9ad | ||
|
|
af3fccfeff | ||
|
|
ec1b5216fc | ||
|
|
066ff02621 | ||
|
|
398a70309e | ||
|
|
acd13f1255 | ||
|
|
1c98cf4ddd | ||
|
|
63c1e291ef | ||
|
|
7ebc27e1fa | ||
|
|
bc5777bdab | ||
|
|
7d23ccac8d | ||
|
|
2f04b78b95 | ||
|
|
fd05e20650 | ||
|
|
a163384467 | ||
|
|
b9875b7583 | ||
|
|
ebc1c49e41 | ||
|
|
6feec11e48 | ||
|
|
67126555fc | ||
|
|
041231fcd6 | ||
|
|
765fb1f224 | ||
|
|
5060a615c7 | ||
|
|
1665ac6a44 | ||
|
|
3c7e973430 |
@@ -42,6 +42,7 @@ version 4.1:
|
||||
- xstack filter
|
||||
- pcm vidc decoder and encoder
|
||||
- (a)graphmonitor filter
|
||||
- yadif_cuda filter
|
||||
|
||||
|
||||
version 4.0:
|
||||
|
||||
15
RELEASE_NOTES
Normal file
15
RELEASE_NOTES
Normal file
@@ -0,0 +1,15 @@
|
||||
|
||||
┌─────────────────────────────────────────────┐
|
||||
│ RELEASE NOTES for FFmpeg 4.1 "al-Khwarizmi" │
|
||||
└─────────────────────────────────────────────┘
|
||||
|
||||
The FFmpeg Project proudly presents FFmpeg 4.1 "al-Khwarizmi", about 6
|
||||
months after the release of FFmpeg 4.0.
|
||||
|
||||
A complete Changelog is available at the root of the project, and the
|
||||
complete Git history on https://git.ffmpeg.org/gitweb/ffmpeg.git
|
||||
|
||||
We hope you will like this release as much as we enjoyed working on it, and
|
||||
as usual, if you have any questions about it, or any FFmpeg related topic,
|
||||
feel free to join us on the #ffmpeg IRC channel (on irc.freenode.net) or ask
|
||||
on the mailing-lists.
|
||||
4
configure
vendored
4
configure
vendored
@@ -2957,6 +2957,7 @@ h264_rkmpp_decoder_deps="rkmpp"
|
||||
h264_rkmpp_decoder_select="h264_mp4toannexb_bsf"
|
||||
h264_vaapi_encoder_select="cbs_h264 vaapi_encode"
|
||||
h264_v4l2m2m_decoder_deps="v4l2_m2m h264_v4l2_m2m"
|
||||
h264_v4l2m2m_decoder_select="h264_mp4toannexb_bsf"
|
||||
h264_v4l2m2m_encoder_deps="v4l2_m2m h264_v4l2_m2m"
|
||||
hevc_amf_encoder_deps="amf"
|
||||
hevc_cuvid_decoder_deps="cuvid"
|
||||
@@ -2971,6 +2972,7 @@ hevc_rkmpp_decoder_select="hevc_mp4toannexb_bsf"
|
||||
hevc_vaapi_encoder_deps="VAEncPictureParameterBufferHEVC"
|
||||
hevc_vaapi_encoder_select="cbs_h265 vaapi_encode"
|
||||
hevc_v4l2m2m_decoder_deps="v4l2_m2m hevc_v4l2_m2m"
|
||||
hevc_v4l2m2m_decoder_select="hevc_mp4toannexb_bsf"
|
||||
hevc_v4l2m2m_encoder_deps="v4l2_m2m hevc_v4l2_m2m"
|
||||
mjpeg_cuvid_decoder_deps="cuvid"
|
||||
mjpeg_qsv_encoder_deps="libmfx"
|
||||
@@ -3180,6 +3182,7 @@ image2_alias_pix_demuxer_select="image2_demuxer"
|
||||
image2_brender_pix_demuxer_select="image2_demuxer"
|
||||
ipod_muxer_select="mov_muxer"
|
||||
ismv_muxer_select="mov_muxer"
|
||||
ivf_muxer_select="av1_metadata_bsf vp9_superframe_bsf"
|
||||
matroska_audio_muxer_select="matroska_muxer"
|
||||
matroska_demuxer_select="iso_media riffdec"
|
||||
matroska_demuxer_suggest="bzlib lzo zlib"
|
||||
@@ -3481,6 +3484,7 @@ zscale_filter_deps="libzimg const_nan"
|
||||
scale_vaapi_filter_deps="vaapi"
|
||||
vpp_qsv_filter_deps="libmfx"
|
||||
vpp_qsv_filter_select="qsvvpp"
|
||||
yadif_cuda_filter_deps="cuda_sdk"
|
||||
|
||||
# examples
|
||||
avio_dir_cmd_deps="avformat avutil"
|
||||
|
||||
@@ -38,7 +38,7 @@ PROJECT_NAME = FFmpeg
|
||||
# could be handy for archiving the generated documentation or if some version
|
||||
# control system is used.
|
||||
|
||||
PROJECT_NUMBER =
|
||||
PROJECT_NUMBER = 4.1
|
||||
|
||||
# Using the PROJECT_BRIEF tag one can provide an optional one line description
|
||||
# for a project that appears at the top of each page and should give viewer a
|
||||
|
||||
@@ -17943,6 +17943,64 @@ filter").
|
||||
It accepts the following parameters:
|
||||
|
||||
|
||||
@table @option
|
||||
|
||||
@item mode
|
||||
The interlacing mode to adopt. It accepts one of the following values:
|
||||
|
||||
@table @option
|
||||
@item 0, send_frame
|
||||
Output one frame for each frame.
|
||||
@item 1, send_field
|
||||
Output one frame for each field.
|
||||
@item 2, send_frame_nospatial
|
||||
Like @code{send_frame}, but it skips the spatial interlacing check.
|
||||
@item 3, send_field_nospatial
|
||||
Like @code{send_field}, but it skips the spatial interlacing check.
|
||||
@end table
|
||||
|
||||
The default value is @code{send_frame}.
|
||||
|
||||
@item parity
|
||||
The picture field parity assumed for the input interlaced video. It accepts one
|
||||
of the following values:
|
||||
|
||||
@table @option
|
||||
@item 0, tff
|
||||
Assume the top field is first.
|
||||
@item 1, bff
|
||||
Assume the bottom field is first.
|
||||
@item -1, auto
|
||||
Enable automatic detection of field parity.
|
||||
@end table
|
||||
|
||||
The default value is @code{auto}.
|
||||
If the interlacing is unknown or the decoder does not export this information,
|
||||
top field first will be assumed.
|
||||
|
||||
@item deint
|
||||
Specify which frames to deinterlace. Accept one of the following
|
||||
values:
|
||||
|
||||
@table @option
|
||||
@item 0, all
|
||||
Deinterlace all frames.
|
||||
@item 1, interlaced
|
||||
Only deinterlace frames marked as interlaced.
|
||||
@end table
|
||||
|
||||
The default value is @code{all}.
|
||||
@end table
|
||||
|
||||
@section yadif_cuda
|
||||
|
||||
Deinterlace the input video using the @ref{yadif} algorithm, but implemented
|
||||
in CUDA so that it can work as part of a GPU accelerated pipeline with nvdec
|
||||
and/or nvenc.
|
||||
|
||||
It accepts the following parameters:
|
||||
|
||||
|
||||
@table @option
|
||||
|
||||
@item mode
|
||||
|
||||
@@ -591,14 +591,21 @@ static int decode_residual_block(AVSContext *h, GetBitContext *gb,
|
||||
}
|
||||
|
||||
|
||||
static inline void decode_residual_chroma(AVSContext *h)
|
||||
static inline int decode_residual_chroma(AVSContext *h)
|
||||
{
|
||||
if (h->cbp & (1 << 4))
|
||||
decode_residual_block(h, &h->gb, chroma_dec, 0,
|
||||
if (h->cbp & (1 << 4)) {
|
||||
int ret = decode_residual_block(h, &h->gb, chroma_dec, 0,
|
||||
ff_cavs_chroma_qp[h->qp], h->cu, h->c_stride);
|
||||
if (h->cbp & (1 << 5))
|
||||
decode_residual_block(h, &h->gb, chroma_dec, 0,
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
if (h->cbp & (1 << 5)) {
|
||||
int ret = decode_residual_block(h, &h->gb, chroma_dec, 0,
|
||||
ff_cavs_chroma_qp[h->qp], h->cv, h->c_stride);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline int decode_residual_inter(AVSContext *h)
|
||||
@@ -649,6 +656,7 @@ static int decode_mb_i(AVSContext *h, int cbp_code)
|
||||
uint8_t top[18];
|
||||
uint8_t *left = NULL;
|
||||
uint8_t *d;
|
||||
int ret;
|
||||
|
||||
ff_cavs_init_mb(h);
|
||||
|
||||
@@ -692,8 +700,11 @@ static int decode_mb_i(AVSContext *h, int cbp_code)
|
||||
ff_cavs_load_intra_pred_luma(h, top, &left, block);
|
||||
h->intra_pred_l[h->pred_mode_Y[scan3x3[block]]]
|
||||
(d, top, left, h->l_stride);
|
||||
if (h->cbp & (1<<block))
|
||||
decode_residual_block(h, gb, intra_dec, 1, h->qp, d, h->l_stride);
|
||||
if (h->cbp & (1<<block)) {
|
||||
ret = decode_residual_block(h, gb, intra_dec, 1, h->qp, d, h->l_stride);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
}
|
||||
}
|
||||
|
||||
/* chroma intra prediction */
|
||||
@@ -703,7 +714,9 @@ static int decode_mb_i(AVSContext *h, int cbp_code)
|
||||
h->intra_pred_c[pred_mode_uv](h->cv, &h->top_border_v[h->mbx * 10],
|
||||
h->left_border_v, h->c_stride);
|
||||
|
||||
decode_residual_chroma(h);
|
||||
ret = decode_residual_chroma(h);
|
||||
if (ret < 0)
|
||||
return ret;
|
||||
ff_cavs_filter(h, I_8X8);
|
||||
set_mv_intra(h);
|
||||
return 0;
|
||||
|
||||
@@ -996,7 +996,10 @@ static int cbs_av1_read_unit(CodedBitstreamContext *ctx,
|
||||
case AV1_OBU_REDUNDANT_FRAME_HEADER:
|
||||
{
|
||||
err = cbs_av1_read_frame_header_obu(ctx, &gbc,
|
||||
&obu->obu.frame_header);
|
||||
&obu->obu.frame_header,
|
||||
obu->header.obu_type ==
|
||||
AV1_OBU_REDUNDANT_FRAME_HEADER,
|
||||
unit->data_ref);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
@@ -1016,7 +1019,8 @@ static int cbs_av1_read_unit(CodedBitstreamContext *ctx,
|
||||
break;
|
||||
case AV1_OBU_FRAME:
|
||||
{
|
||||
err = cbs_av1_read_frame_obu(ctx, &gbc, &obu->obu.frame);
|
||||
err = cbs_av1_read_frame_obu(ctx, &gbc, &obu->obu.frame,
|
||||
unit->data_ref);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
@@ -1124,7 +1128,10 @@ static int cbs_av1_write_obu(CodedBitstreamContext *ctx,
|
||||
case AV1_OBU_REDUNDANT_FRAME_HEADER:
|
||||
{
|
||||
err = cbs_av1_write_frame_header_obu(ctx, pbc,
|
||||
&obu->obu.frame_header);
|
||||
&obu->obu.frame_header,
|
||||
obu->header.obu_type ==
|
||||
AV1_OBU_REDUNDANT_FRAME_HEADER,
|
||||
NULL);
|
||||
if (err < 0)
|
||||
return err;
|
||||
}
|
||||
@@ -1141,7 +1148,7 @@ static int cbs_av1_write_obu(CodedBitstreamContext *ctx,
|
||||
break;
|
||||
case AV1_OBU_FRAME:
|
||||
{
|
||||
err = cbs_av1_write_frame_obu(ctx, pbc, &obu->obu.frame);
|
||||
err = cbs_av1_write_frame_obu(ctx, pbc, &obu->obu.frame, NULL);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
@@ -1179,7 +1186,7 @@ static int cbs_av1_write_obu(CodedBitstreamContext *ctx,
|
||||
if (err < 0)
|
||||
return err;
|
||||
end_pos = put_bits_count(pbc);
|
||||
obu->obu_size = (end_pos - start_pos + 7) / 8;
|
||||
obu->obu_size = header_size = (end_pos - start_pos + 7) / 8;
|
||||
} else {
|
||||
// Empty OBU.
|
||||
obu->obu_size = 0;
|
||||
@@ -1302,6 +1309,7 @@ static void cbs_av1_close(CodedBitstreamContext *ctx)
|
||||
CodedBitstreamAV1Context *priv = ctx->priv_data;
|
||||
|
||||
av_buffer_unref(&priv->sequence_header_ref);
|
||||
av_buffer_unref(&priv->frame_header_ref);
|
||||
|
||||
av_freep(&priv->write_buffer);
|
||||
}
|
||||
|
||||
@@ -87,8 +87,8 @@ typedef struct AV1RawSequenceHeader {
|
||||
uint8_t seq_level_idx[AV1_MAX_OPERATING_POINTS];
|
||||
uint8_t seq_tier[AV1_MAX_OPERATING_POINTS];
|
||||
uint8_t decoder_model_present_for_this_op[AV1_MAX_OPERATING_POINTS];
|
||||
uint8_t decoder_buffer_delay[AV1_MAX_OPERATING_POINTS];
|
||||
uint8_t encoder_buffer_delay[AV1_MAX_OPERATING_POINTS];
|
||||
uint32_t decoder_buffer_delay[AV1_MAX_OPERATING_POINTS];
|
||||
uint32_t encoder_buffer_delay[AV1_MAX_OPERATING_POINTS];
|
||||
uint8_t low_delay_mode_flag[AV1_MAX_OPERATING_POINTS];
|
||||
uint8_t initial_display_delay_present_for_this_op[AV1_MAX_OPERATING_POINTS];
|
||||
uint8_t initial_display_delay_minus_1[AV1_MAX_OPERATING_POINTS];
|
||||
@@ -399,7 +399,10 @@ typedef struct CodedBitstreamAV1Context {
|
||||
AV1RawSequenceHeader *sequence_header;
|
||||
AVBufferRef *sequence_header_ref;
|
||||
|
||||
int seen_frame_header;
|
||||
int seen_frame_header;
|
||||
AVBufferRef *frame_header_ref;
|
||||
uint8_t *frame_header;
|
||||
size_t frame_header_size;
|
||||
|
||||
int temporal_id;
|
||||
int spatial_id;
|
||||
|
||||
@@ -1463,24 +1463,90 @@ static int FUNC(uncompressed_header)(CodedBitstreamContext *ctx, RWContext *rw,
|
||||
}
|
||||
|
||||
static int FUNC(frame_header_obu)(CodedBitstreamContext *ctx, RWContext *rw,
|
||||
AV1RawFrameHeader *current)
|
||||
AV1RawFrameHeader *current, int redundant,
|
||||
AVBufferRef *rw_buffer_ref)
|
||||
{
|
||||
CodedBitstreamAV1Context *priv = ctx->priv_data;
|
||||
int err;
|
||||
|
||||
HEADER("Frame Header");
|
||||
int start_pos, fh_bits, fh_bytes, err;
|
||||
uint8_t *fh_start;
|
||||
|
||||
if (priv->seen_frame_header) {
|
||||
// Nothing to do.
|
||||
if (!redundant) {
|
||||
av_log(ctx->log_ctx, AV_LOG_ERROR, "Invalid repeated "
|
||||
"frame header OBU.\n");
|
||||
return AVERROR_INVALIDDATA;
|
||||
} else {
|
||||
GetBitContext fh;
|
||||
size_t i, b;
|
||||
uint32_t val;
|
||||
|
||||
HEADER("Redundant Frame Header");
|
||||
|
||||
av_assert0(priv->frame_header_ref && priv->frame_header);
|
||||
|
||||
init_get_bits(&fh, priv->frame_header,
|
||||
priv->frame_header_size);
|
||||
for (i = 0; i < priv->frame_header_size; i += 8) {
|
||||
b = FFMIN(priv->frame_header_size - i, 8);
|
||||
val = get_bits(&fh, b);
|
||||
xf(b, frame_header_copy[i],
|
||||
val, val, val, 1, i / 8);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if (redundant)
|
||||
HEADER("Redundant Frame Header (used as Frame Header)");
|
||||
else
|
||||
HEADER("Frame Header");
|
||||
|
||||
priv->seen_frame_header = 1;
|
||||
|
||||
#ifdef READ
|
||||
start_pos = get_bits_count(rw);
|
||||
#else
|
||||
start_pos = put_bits_count(rw);
|
||||
#endif
|
||||
|
||||
CHECK(FUNC(uncompressed_header)(ctx, rw, current));
|
||||
|
||||
if (current->show_existing_frame) {
|
||||
priv->seen_frame_header = 0;
|
||||
} else {
|
||||
priv->seen_frame_header = 1;
|
||||
|
||||
av_buffer_unref(&priv->frame_header_ref);
|
||||
|
||||
#ifdef READ
|
||||
fh_bits = get_bits_count(rw) - start_pos;
|
||||
fh_start = (uint8_t*)rw->buffer + start_pos / 8;
|
||||
#else
|
||||
// Need to flush the bitwriter so that we can copy its output,
|
||||
// but use a copy so we don't affect the caller's structure.
|
||||
{
|
||||
PutBitContext tmp = *rw;
|
||||
flush_put_bits(&tmp);
|
||||
}
|
||||
|
||||
fh_bits = put_bits_count(rw) - start_pos;
|
||||
fh_start = rw->buf + start_pos / 8;
|
||||
#endif
|
||||
fh_bytes = (fh_bits + 7) / 8;
|
||||
|
||||
priv->frame_header_size = fh_bits;
|
||||
|
||||
if (rw_buffer_ref) {
|
||||
priv->frame_header_ref = av_buffer_ref(rw_buffer_ref);
|
||||
if (!priv->frame_header_ref)
|
||||
return AVERROR(ENOMEM);
|
||||
priv->frame_header = fh_start;
|
||||
} else {
|
||||
priv->frame_header_ref =
|
||||
av_buffer_alloc(fh_bytes + AV_INPUT_BUFFER_PADDING_SIZE);
|
||||
if (!priv->frame_header_ref)
|
||||
return AVERROR(ENOMEM);
|
||||
priv->frame_header = priv->frame_header_ref->data;
|
||||
memcpy(priv->frame_header, fh_start, fh_bytes);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1524,11 +1590,13 @@ static int FUNC(tile_group_obu)(CodedBitstreamContext *ctx, RWContext *rw,
|
||||
}
|
||||
|
||||
static int FUNC(frame_obu)(CodedBitstreamContext *ctx, RWContext *rw,
|
||||
AV1RawFrame *current)
|
||||
AV1RawFrame *current,
|
||||
AVBufferRef *rw_buffer_ref)
|
||||
{
|
||||
int err;
|
||||
|
||||
CHECK(FUNC(frame_header_obu)(ctx, rw, ¤t->header));
|
||||
CHECK(FUNC(frame_header_obu)(ctx, rw, ¤t->header,
|
||||
0, rw_buffer_ref));
|
||||
|
||||
CHECK(FUNC(byte_alignment)(ctx, rw));
|
||||
|
||||
|
||||
@@ -3056,6 +3056,7 @@ static int decode_studio_vop_header(Mpeg4DecContext *ctx, GetBitContext *gb)
|
||||
if (get_bits_left(gb) <= 32)
|
||||
return 0;
|
||||
|
||||
s->partitioned_frame = 0;
|
||||
s->decode_mb = mpeg4_decode_studio_mb;
|
||||
|
||||
decode_smpte_tc(ctx, gb);
|
||||
|
||||
@@ -101,7 +101,7 @@ static int mpegaudio_parse(AVCodecParserContext *s1,
|
||||
"MP3ADU full parser");
|
||||
*poutbuf = NULL;
|
||||
*poutbuf_size = 0;
|
||||
return 0; /* parsers must not return error codes */
|
||||
return buf_size; /* parsers must not return error codes */
|
||||
}
|
||||
|
||||
break;
|
||||
|
||||
@@ -601,7 +601,11 @@ int ff_nvdec_frame_params(AVCodecContext *avctx,
|
||||
frames_ctx->format = AV_PIX_FMT_CUDA;
|
||||
frames_ctx->width = (avctx->coded_width + 1) & ~1;
|
||||
frames_ctx->height = (avctx->coded_height + 1) & ~1;
|
||||
frames_ctx->initial_pool_size = dpb_size;
|
||||
/*
|
||||
* We add two extra frames to the pool to account for deinterlacing filters
|
||||
* holding onto their frames.
|
||||
*/
|
||||
frames_ctx->initial_pool_size = dpb_size + 2;
|
||||
|
||||
frames_ctx->free = nvdec_free_dummy;
|
||||
frames_ctx->pool = av_buffer_pool_init(0, nvdec_alloc_dummy);
|
||||
|
||||
@@ -57,27 +57,25 @@ static int decompress(GetByteContext *gb, int size, PutByteContext *pb, const ui
|
||||
b = lut[2 * idx];
|
||||
|
||||
while (1) {
|
||||
if (bytestream2_get_bytes_left_p(pb) <= 0)
|
||||
if (bytestream2_get_bytes_left_p(pb) <= 0 || bytestream2_get_eof(pb))
|
||||
return 0;
|
||||
if (((b & 0xFF00u) != 0x8000u) || (b & 0xFFu)) {
|
||||
if ((b & 0xFF00u) != 0x8000u || (b & 0xFFu)) {
|
||||
if ((b & 0xFF00u) != 0x8000u) {
|
||||
bytestream2_put_le16(pb, b);
|
||||
} else if (b & 0xFFu) {
|
||||
} else {
|
||||
idx = 0;
|
||||
for (int i = 0; i < (b & 0xFFu); i++)
|
||||
bytestream2_put_le32(pb, 0);
|
||||
}
|
||||
c = b >> 16;
|
||||
if (c & 0xFF00u) {
|
||||
c = (((c >> 8) & 0xFFu) | (c & 0xFF00)) & 0xF00F;
|
||||
fill = lut[2 * idx + 1];
|
||||
if ((c & 0xFF00u) == 0x1000) {
|
||||
if ((c & 0xF000u) == 0x1000) {
|
||||
bytestream2_put_le16(pb, fill);
|
||||
c &= 0xFFFF00FFu;
|
||||
} else {
|
||||
bytestream2_put_le32(pb, fill);
|
||||
c &= 0xFFFF00FFu;
|
||||
}
|
||||
c = (c >> 8) & 0x0Fu;
|
||||
}
|
||||
while (c) {
|
||||
a <<= 4;
|
||||
|
||||
@@ -407,7 +407,8 @@ OBJS-$(CONFIG_WAVEFORM_FILTER) += vf_waveform.o
|
||||
OBJS-$(CONFIG_WEAVE_FILTER) += vf_weave.o
|
||||
OBJS-$(CONFIG_XBR_FILTER) += vf_xbr.o
|
||||
OBJS-$(CONFIG_XSTACK_FILTER) += vf_stack.o framesync.o
|
||||
OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o
|
||||
OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o yadif_common.o
|
||||
OBJS-$(CONFIG_YADIF_CUDA_FILTER) += vf_yadif_cuda.o vf_yadif_cuda.ptx.o yadif_common.o
|
||||
OBJS-$(CONFIG_ZMQ_FILTER) += f_zmq.o
|
||||
OBJS-$(CONFIG_ZOOMPAN_FILTER) += vf_zoompan.o
|
||||
OBJS-$(CONFIG_ZSCALE_FILTER) += vf_zscale.o
|
||||
|
||||
@@ -389,6 +389,7 @@ extern AVFilter ff_vf_weave;
|
||||
extern AVFilter ff_vf_xbr;
|
||||
extern AVFilter ff_vf_xstack;
|
||||
extern AVFilter ff_vf_yadif;
|
||||
extern AVFilter ff_vf_yadif_cuda;
|
||||
extern AVFilter ff_vf_zmq;
|
||||
extern AVFilter ff_vf_zoompan;
|
||||
extern AVFilter ff_vf_zscale;
|
||||
|
||||
@@ -31,7 +31,7 @@
|
||||
|
||||
#define LIBAVFILTER_VERSION_MAJOR 7
|
||||
#define LIBAVFILTER_VERSION_MINOR 40
|
||||
#define LIBAVFILTER_VERSION_MICRO 100
|
||||
#define LIBAVFILTER_VERSION_MICRO 101
|
||||
|
||||
#define LIBAVFILTER_VERSION_INT AV_VERSION_INT(LIBAVFILTER_VERSION_MAJOR, \
|
||||
LIBAVFILTER_VERSION_MINOR, \
|
||||
|
||||
@@ -22,7 +22,6 @@
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/cpu.h"
|
||||
#include "libavutil/common.h"
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "avfilter.h"
|
||||
@@ -254,166 +253,6 @@ static void filter(AVFilterContext *ctx, AVFrame *dstpic,
|
||||
emms_c();
|
||||
}
|
||||
|
||||
static int return_frame(AVFilterContext *ctx, int is_second)
|
||||
{
|
||||
YADIFContext *yadif = ctx->priv;
|
||||
AVFilterLink *link = ctx->outputs[0];
|
||||
int tff, ret;
|
||||
|
||||
if (yadif->parity == -1) {
|
||||
tff = yadif->cur->interlaced_frame ?
|
||||
yadif->cur->top_field_first : 1;
|
||||
} else {
|
||||
tff = yadif->parity ^ 1;
|
||||
}
|
||||
|
||||
if (is_second) {
|
||||
yadif->out = ff_get_video_buffer(link, link->w, link->h);
|
||||
if (!yadif->out)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
av_frame_copy_props(yadif->out, yadif->cur);
|
||||
yadif->out->interlaced_frame = 0;
|
||||
}
|
||||
|
||||
filter(ctx, yadif->out, tff ^ !is_second, tff);
|
||||
|
||||
if (is_second) {
|
||||
int64_t cur_pts = yadif->cur->pts;
|
||||
int64_t next_pts = yadif->next->pts;
|
||||
|
||||
if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) {
|
||||
yadif->out->pts = cur_pts + next_pts;
|
||||
} else {
|
||||
yadif->out->pts = AV_NOPTS_VALUE;
|
||||
}
|
||||
}
|
||||
ret = ff_filter_frame(ctx->outputs[0], yadif->out);
|
||||
|
||||
yadif->frame_pending = (yadif->mode&1) && !is_second;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int checkstride(YADIFContext *yadif, const AVFrame *a, const AVFrame *b)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < yadif->csp->nb_components; i++)
|
||||
if (a->linesize[i] != b->linesize[i])
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fixstride(AVFilterLink *link, AVFrame *f)
|
||||
{
|
||||
AVFrame *dst = ff_default_get_video_buffer(link, f->width, f->height);
|
||||
if(!dst)
|
||||
return;
|
||||
av_frame_copy_props(dst, f);
|
||||
av_image_copy(dst->data, dst->linesize,
|
||||
(const uint8_t **)f->data, f->linesize,
|
||||
dst->format, dst->width, dst->height);
|
||||
av_frame_unref(f);
|
||||
av_frame_move_ref(f, dst);
|
||||
av_frame_free(&dst);
|
||||
}
|
||||
|
||||
static int filter_frame(AVFilterLink *link, AVFrame *frame)
|
||||
{
|
||||
AVFilterContext *ctx = link->dst;
|
||||
YADIFContext *yadif = ctx->priv;
|
||||
|
||||
av_assert0(frame);
|
||||
|
||||
if (yadif->frame_pending)
|
||||
return_frame(ctx, 1);
|
||||
|
||||
if (yadif->prev)
|
||||
av_frame_free(&yadif->prev);
|
||||
yadif->prev = yadif->cur;
|
||||
yadif->cur = yadif->next;
|
||||
yadif->next = frame;
|
||||
|
||||
if (!yadif->cur &&
|
||||
!(yadif->cur = av_frame_clone(yadif->next)))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if (checkstride(yadif, yadif->next, yadif->cur)) {
|
||||
av_log(ctx, AV_LOG_VERBOSE, "Reallocating frame due to differing stride\n");
|
||||
fixstride(link, yadif->next);
|
||||
}
|
||||
if (checkstride(yadif, yadif->next, yadif->cur))
|
||||
fixstride(link, yadif->cur);
|
||||
if (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))
|
||||
fixstride(link, yadif->prev);
|
||||
if (checkstride(yadif, yadif->next, yadif->cur) || (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Failed to reallocate frame\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!yadif->prev)
|
||||
return 0;
|
||||
|
||||
if ((yadif->deint && !yadif->cur->interlaced_frame) ||
|
||||
ctx->is_disabled ||
|
||||
(yadif->deint && !yadif->prev->interlaced_frame && yadif->prev->repeat_pict) ||
|
||||
(yadif->deint && !yadif->next->interlaced_frame && yadif->next->repeat_pict)
|
||||
) {
|
||||
yadif->out = av_frame_clone(yadif->cur);
|
||||
if (!yadif->out)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
av_frame_free(&yadif->prev);
|
||||
if (yadif->out->pts != AV_NOPTS_VALUE)
|
||||
yadif->out->pts *= 2;
|
||||
return ff_filter_frame(ctx->outputs[0], yadif->out);
|
||||
}
|
||||
|
||||
yadif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h);
|
||||
if (!yadif->out)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
av_frame_copy_props(yadif->out, yadif->cur);
|
||||
yadif->out->interlaced_frame = 0;
|
||||
|
||||
if (yadif->out->pts != AV_NOPTS_VALUE)
|
||||
yadif->out->pts *= 2;
|
||||
|
||||
return return_frame(ctx, 0);
|
||||
}
|
||||
|
||||
static int request_frame(AVFilterLink *link)
|
||||
{
|
||||
AVFilterContext *ctx = link->src;
|
||||
YADIFContext *yadif = ctx->priv;
|
||||
int ret;
|
||||
|
||||
if (yadif->frame_pending) {
|
||||
return_frame(ctx, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (yadif->eof)
|
||||
return AVERROR_EOF;
|
||||
|
||||
ret = ff_request_frame(ctx->inputs[0]);
|
||||
|
||||
if (ret == AVERROR_EOF && yadif->cur) {
|
||||
AVFrame *next = av_frame_clone(yadif->next);
|
||||
|
||||
if (!next)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
next->pts = yadif->next->pts * 2 - yadif->cur->pts;
|
||||
|
||||
filter_frame(ctx->inputs[0], next);
|
||||
yadif->eof = 1;
|
||||
} else if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static av_cold void uninit(AVFilterContext *ctx)
|
||||
{
|
||||
YADIFContext *yadif = ctx->priv;
|
||||
@@ -492,6 +331,7 @@ static int config_props(AVFilterLink *link)
|
||||
}
|
||||
|
||||
s->csp = av_pix_fmt_desc_get(link->format);
|
||||
s->filter = filter;
|
||||
if (s->csp->comp[0].depth > 8) {
|
||||
s->filter_line = filter_line_c_16bit;
|
||||
s->filter_edges = filter_edges_16bit;
|
||||
@@ -507,37 +347,19 @@ static int config_props(AVFilterLink *link)
|
||||
}
|
||||
|
||||
|
||||
#define OFFSET(x) offsetof(YADIFContext, x)
|
||||
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
|
||||
|
||||
#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
|
||||
|
||||
static const AVOption yadif_options[] = {
|
||||
{ "mode", "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=YADIF_MODE_SEND_FRAME}, 0, 3, FLAGS, "mode"},
|
||||
CONST("send_frame", "send one frame for each frame", YADIF_MODE_SEND_FRAME, "mode"),
|
||||
CONST("send_field", "send one frame for each field", YADIF_MODE_SEND_FIELD, "mode"),
|
||||
CONST("send_frame_nospatial", "send one frame for each frame, but skip spatial interlacing check", YADIF_MODE_SEND_FRAME_NOSPATIAL, "mode"),
|
||||
CONST("send_field_nospatial", "send one frame for each field, but skip spatial interlacing check", YADIF_MODE_SEND_FIELD_NOSPATIAL, "mode"),
|
||||
|
||||
{ "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=YADIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" },
|
||||
CONST("tff", "assume top field first", YADIF_PARITY_TFF, "parity"),
|
||||
CONST("bff", "assume bottom field first", YADIF_PARITY_BFF, "parity"),
|
||||
CONST("auto", "auto detect parity", YADIF_PARITY_AUTO, "parity"),
|
||||
|
||||
{ "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=YADIF_DEINT_ALL}, 0, 1, FLAGS, "deint" },
|
||||
CONST("all", "deinterlace all frames", YADIF_DEINT_ALL, "deint"),
|
||||
CONST("interlaced", "only deinterlace frames marked as interlaced", YADIF_DEINT_INTERLACED, "deint"),
|
||||
|
||||
{ NULL }
|
||||
static const AVClass yadif_class = {
|
||||
.class_name = "yadif",
|
||||
.item_name = av_default_item_name,
|
||||
.option = ff_yadif_options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
.category = AV_CLASS_CATEGORY_FILTER,
|
||||
};
|
||||
|
||||
AVFILTER_DEFINE_CLASS(yadif);
|
||||
|
||||
static const AVFilterPad avfilter_vf_yadif_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = filter_frame,
|
||||
.filter_frame = ff_yadif_filter_frame,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
@@ -546,7 +368,7 @@ static const AVFilterPad avfilter_vf_yadif_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.request_frame = request_frame,
|
||||
.request_frame = ff_yadif_request_frame,
|
||||
.config_props = config_props,
|
||||
},
|
||||
{ NULL }
|
||||
|
||||
426
libavfilter/vf_yadif_cuda.c
Normal file
426
libavfilter/vf_yadif_cuda.c
Normal file
@@ -0,0 +1,426 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Philip Langdale <philipl@overt.org>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include <cuda.h>
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/hwcontext_cuda.h"
|
||||
#include "internal.h"
|
||||
#include "yadif.h"
|
||||
|
||||
extern char vf_yadif_cuda_ptx[];
|
||||
|
||||
typedef struct DeintCUDAContext {
|
||||
YADIFContext yadif;
|
||||
|
||||
AVCUDADeviceContext *hwctx;
|
||||
AVBufferRef *device_ref;
|
||||
AVBufferRef *input_frames_ref;
|
||||
AVHWFramesContext *input_frames;
|
||||
|
||||
CUcontext cu_ctx;
|
||||
CUstream stream;
|
||||
CUmodule cu_module;
|
||||
CUfunction cu_func_uchar;
|
||||
CUfunction cu_func_uchar2;
|
||||
CUfunction cu_func_ushort;
|
||||
CUfunction cu_func_ushort2;
|
||||
} DeintCUDAContext;
|
||||
|
||||
#define DIV_UP(a, b) ( ((a) + (b) - 1) / (b) )
|
||||
#define ALIGN_UP(a, b) (((a) + (b) - 1) & ~((b) - 1))
|
||||
#define BLOCKX 32
|
||||
#define BLOCKY 16
|
||||
|
||||
static int check_cu(AVFilterContext *avctx, CUresult err, const char *func)
|
||||
{
|
||||
const char *err_name;
|
||||
const char *err_string;
|
||||
|
||||
av_log(avctx, AV_LOG_TRACE, "Calling %s\n", func);
|
||||
|
||||
if (err == CUDA_SUCCESS)
|
||||
return 0;
|
||||
|
||||
cuGetErrorName(err, &err_name);
|
||||
cuGetErrorString(err, &err_string);
|
||||
|
||||
av_log(avctx, AV_LOG_ERROR, "%s failed", func);
|
||||
if (err_name && err_string)
|
||||
av_log(avctx, AV_LOG_ERROR, " -> %s: %s", err_name, err_string);
|
||||
av_log(avctx, AV_LOG_ERROR, "\n");
|
||||
|
||||
return AVERROR_EXTERNAL;
|
||||
}
|
||||
|
||||
#define CHECK_CU(x) check_cu(ctx, (x), #x)
|
||||
|
||||
static CUresult call_kernel(AVFilterContext *ctx, CUfunction func,
|
||||
CUdeviceptr prev, CUdeviceptr cur, CUdeviceptr next,
|
||||
CUarray_format format, int channels,
|
||||
int src_width, // Width is pixels per channel
|
||||
int src_height, // Height is pixels per channel
|
||||
int src_pitch, // Pitch is bytes
|
||||
CUdeviceptr dst,
|
||||
int dst_width, // Width is pixels per channel
|
||||
int dst_height, // Height is pixels per channel
|
||||
int dst_pitch, // Pitch is pixels per channel
|
||||
int parity, int tff)
|
||||
{
|
||||
DeintCUDAContext *s = ctx->priv;
|
||||
CUtexObject tex_prev = 0, tex_cur = 0, tex_next = 0;
|
||||
CUresult err;
|
||||
int skip_spatial_check = s->yadif.mode&2;
|
||||
|
||||
void *args[] = { &dst, &tex_prev, &tex_cur, &tex_next,
|
||||
&dst_width, &dst_height, &dst_pitch,
|
||||
&src_width, &src_height, &parity, &tff,
|
||||
&skip_spatial_check };
|
||||
|
||||
CUDA_TEXTURE_DESC tex_desc = {
|
||||
.filterMode = CU_TR_FILTER_MODE_POINT,
|
||||
.flags = CU_TRSF_READ_AS_INTEGER,
|
||||
};
|
||||
|
||||
CUDA_RESOURCE_DESC res_desc = {
|
||||
.resType = CU_RESOURCE_TYPE_PITCH2D,
|
||||
.res.pitch2D.format = format,
|
||||
.res.pitch2D.numChannels = channels,
|
||||
.res.pitch2D.width = src_width,
|
||||
.res.pitch2D.height = src_height,
|
||||
.res.pitch2D.pitchInBytes = src_pitch,
|
||||
};
|
||||
|
||||
res_desc.res.pitch2D.devPtr = (CUdeviceptr)prev;
|
||||
err = CHECK_CU(cuTexObjectCreate(&tex_prev, &res_desc, &tex_desc, NULL));
|
||||
if (err != CUDA_SUCCESS) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
res_desc.res.pitch2D.devPtr = (CUdeviceptr)cur;
|
||||
err = CHECK_CU(cuTexObjectCreate(&tex_cur, &res_desc, &tex_desc, NULL));
|
||||
if (err != CUDA_SUCCESS) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
res_desc.res.pitch2D.devPtr = (CUdeviceptr)next;
|
||||
err = CHECK_CU(cuTexObjectCreate(&tex_next, &res_desc, &tex_desc, NULL));
|
||||
if (err != CUDA_SUCCESS) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = CHECK_CU(cuLaunchKernel(func,
|
||||
DIV_UP(dst_width, BLOCKX), DIV_UP(dst_height, BLOCKY), 1,
|
||||
BLOCKX, BLOCKY, 1,
|
||||
0, s->stream, args, NULL));
|
||||
|
||||
exit:
|
||||
if (tex_prev)
|
||||
CHECK_CU(cuTexObjectDestroy(tex_prev));
|
||||
if (tex_cur)
|
||||
CHECK_CU(cuTexObjectDestroy(tex_cur));
|
||||
if (tex_next)
|
||||
CHECK_CU(cuTexObjectDestroy(tex_next));
|
||||
|
||||
return err;
|
||||
}
|
||||
|
||||
static void filter(AVFilterContext *ctx, AVFrame *dst,
|
||||
int parity, int tff)
|
||||
{
|
||||
DeintCUDAContext *s = ctx->priv;
|
||||
YADIFContext *y = &s->yadif;
|
||||
CUcontext dummy;
|
||||
CUresult err;
|
||||
int i;
|
||||
|
||||
err = CHECK_CU(cuCtxPushCurrent(s->cu_ctx));
|
||||
if (err != CUDA_SUCCESS) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
for (i = 0; i < y->csp->nb_components; i++) {
|
||||
CUfunction func;
|
||||
CUarray_format format;
|
||||
int pixel_size, channels;
|
||||
const AVComponentDescriptor *comp = &y->csp->comp[i];
|
||||
|
||||
if (comp->plane < i) {
|
||||
// We process planes as a whole, so don't reprocess
|
||||
// them for additional components
|
||||
continue;
|
||||
}
|
||||
|
||||
pixel_size = (comp->depth + comp->shift) / 8;
|
||||
channels = comp->step / pixel_size;
|
||||
if (pixel_size > 2 || channels > 2) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format: %s\n", y->csp->name);
|
||||
goto exit;
|
||||
}
|
||||
switch (pixel_size) {
|
||||
case 1:
|
||||
func = channels == 1 ? s->cu_func_uchar : s->cu_func_uchar2;
|
||||
format = CU_AD_FORMAT_UNSIGNED_INT8;
|
||||
break;
|
||||
case 2:
|
||||
func = channels == 1 ? s->cu_func_ushort : s->cu_func_ushort2;
|
||||
format = CU_AD_FORMAT_UNSIGNED_INT16;
|
||||
break;
|
||||
default:
|
||||
av_log(ctx, AV_LOG_ERROR, "Unsupported pixel format: %s\n", y->csp->name);
|
||||
goto exit;
|
||||
}
|
||||
av_log(ctx, AV_LOG_TRACE,
|
||||
"Deinterlacing plane %d: pixel_size: %d channels: %d\n",
|
||||
comp->plane, pixel_size, channels);
|
||||
call_kernel(ctx, func,
|
||||
(CUdeviceptr)y->prev->data[i],
|
||||
(CUdeviceptr)y->cur->data[i],
|
||||
(CUdeviceptr)y->next->data[i],
|
||||
format, channels,
|
||||
AV_CEIL_RSHIFT(y->cur->width, i ? y->csp->log2_chroma_w : 0),
|
||||
AV_CEIL_RSHIFT(y->cur->height, i ? y->csp->log2_chroma_h : 0),
|
||||
y->cur->linesize[i],
|
||||
(CUdeviceptr)dst->data[i],
|
||||
AV_CEIL_RSHIFT(dst->width, i ? y->csp->log2_chroma_w : 0),
|
||||
AV_CEIL_RSHIFT(dst->height, i ? y->csp->log2_chroma_h : 0),
|
||||
dst->linesize[i] / comp->step,
|
||||
parity, tff);
|
||||
}
|
||||
|
||||
err = CHECK_CU(cuStreamSynchronize(s->stream));
|
||||
if (err != CUDA_SUCCESS) {
|
||||
goto exit;
|
||||
}
|
||||
|
||||
exit:
|
||||
CHECK_CU(cuCtxPopCurrent(&dummy));
|
||||
return;
|
||||
}
|
||||
|
||||
static av_cold void deint_cuda_uninit(AVFilterContext *ctx)
|
||||
{
|
||||
CUcontext dummy;
|
||||
DeintCUDAContext *s = ctx->priv;
|
||||
YADIFContext *y = &s->yadif;
|
||||
|
||||
if (s->cu_module) {
|
||||
CHECK_CU(cuCtxPushCurrent(s->cu_ctx));
|
||||
CHECK_CU(cuModuleUnload(s->cu_module));
|
||||
CHECK_CU(cuCtxPopCurrent(&dummy));
|
||||
}
|
||||
|
||||
av_frame_free(&y->prev);
|
||||
av_frame_free(&y->cur);
|
||||
av_frame_free(&y->next);
|
||||
|
||||
av_buffer_unref(&s->device_ref);
|
||||
s->hwctx = NULL;
|
||||
av_buffer_unref(&s->input_frames_ref);
|
||||
s->input_frames = NULL;
|
||||
}
|
||||
|
||||
static int deint_cuda_query_formats(AVFilterContext *ctx)
|
||||
{
|
||||
enum AVPixelFormat pix_fmts[] = {
|
||||
AV_PIX_FMT_CUDA, AV_PIX_FMT_NONE,
|
||||
};
|
||||
int ret;
|
||||
|
||||
if ((ret = ff_formats_ref(ff_make_format_list(pix_fmts),
|
||||
&ctx->inputs[0]->out_formats)) < 0)
|
||||
return ret;
|
||||
if ((ret = ff_formats_ref(ff_make_format_list(pix_fmts),
|
||||
&ctx->outputs[0]->in_formats)) < 0)
|
||||
return ret;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int config_input(AVFilterLink *inlink)
|
||||
{
|
||||
AVFilterContext *ctx = inlink->dst;
|
||||
DeintCUDAContext *s = ctx->priv;
|
||||
|
||||
if (!inlink->hw_frames_ctx) {
|
||||
av_log(ctx, AV_LOG_ERROR, "A hardware frames reference is "
|
||||
"required to associate the processing device.\n");
|
||||
return AVERROR(EINVAL);
|
||||
}
|
||||
|
||||
s->input_frames_ref = av_buffer_ref(inlink->hw_frames_ctx);
|
||||
if (!s->input_frames_ref) {
|
||||
av_log(ctx, AV_LOG_ERROR, "A input frames reference create "
|
||||
"failed.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
s->input_frames = (AVHWFramesContext*)s->input_frames_ref->data;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int config_output(AVFilterLink *link)
|
||||
{
|
||||
AVHWFramesContext *output_frames;
|
||||
AVFilterContext *ctx = link->src;
|
||||
DeintCUDAContext *s = ctx->priv;
|
||||
YADIFContext *y = &s->yadif;
|
||||
int ret = 0;
|
||||
CUcontext dummy;
|
||||
CUresult err;
|
||||
|
||||
av_assert0(s->input_frames);
|
||||
s->device_ref = av_buffer_ref(s->input_frames->device_ref);
|
||||
if (!s->device_ref) {
|
||||
av_log(ctx, AV_LOG_ERROR, "A device reference create "
|
||||
"failed.\n");
|
||||
return AVERROR(ENOMEM);
|
||||
}
|
||||
s->hwctx = ((AVHWDeviceContext*)s->device_ref->data)->hwctx;
|
||||
s->cu_ctx = s->hwctx->cuda_ctx;
|
||||
s->stream = s->hwctx->stream;
|
||||
|
||||
link->hw_frames_ctx = av_hwframe_ctx_alloc(s->device_ref);
|
||||
if (!link->hw_frames_ctx) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Failed to create HW frame context "
|
||||
"for output.\n");
|
||||
ret = AVERROR(ENOMEM);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
output_frames = (AVHWFramesContext*)link->hw_frames_ctx->data;
|
||||
|
||||
output_frames->format = AV_PIX_FMT_CUDA;
|
||||
output_frames->sw_format = s->input_frames->sw_format;
|
||||
output_frames->width = ctx->inputs[0]->w;
|
||||
output_frames->height = ctx->inputs[0]->h;
|
||||
|
||||
output_frames->initial_pool_size = 4;
|
||||
|
||||
ret = ff_filter_init_hw_frames(ctx, link, 10);
|
||||
if (ret < 0)
|
||||
goto exit;
|
||||
|
||||
ret = av_hwframe_ctx_init(link->hw_frames_ctx);
|
||||
if (ret < 0) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Failed to initialise CUDA frame "
|
||||
"context for output: %d\n", ret);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
link->time_base.num = ctx->inputs[0]->time_base.num;
|
||||
link->time_base.den = ctx->inputs[0]->time_base.den * 2;
|
||||
link->w = ctx->inputs[0]->w;
|
||||
link->h = ctx->inputs[0]->h;
|
||||
|
||||
if(y->mode & 1)
|
||||
link->frame_rate = av_mul_q(ctx->inputs[0]->frame_rate,
|
||||
(AVRational){2, 1});
|
||||
|
||||
if (link->w < 3 || link->h < 3) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Video of less than 3 columns or lines is not supported\n");
|
||||
ret = AVERROR(EINVAL);
|
||||
goto exit;
|
||||
}
|
||||
|
||||
y->csp = av_pix_fmt_desc_get(output_frames->sw_format);
|
||||
y->filter = filter;
|
||||
|
||||
err = CHECK_CU(cuCtxPushCurrent(s->cu_ctx));
|
||||
if (err != CUDA_SUCCESS) {
|
||||
ret = AVERROR_EXTERNAL;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = CHECK_CU(cuModuleLoadData(&s->cu_module, vf_yadif_cuda_ptx));
|
||||
if (err != CUDA_SUCCESS) {
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = CHECK_CU(cuModuleGetFunction(&s->cu_func_uchar, s->cu_module, "yadif_uchar"));
|
||||
if (err != CUDA_SUCCESS) {
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = CHECK_CU(cuModuleGetFunction(&s->cu_func_uchar2, s->cu_module, "yadif_uchar2"));
|
||||
if (err != CUDA_SUCCESS) {
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err= CHECK_CU(cuModuleGetFunction(&s->cu_func_ushort, s->cu_module, "yadif_ushort"));
|
||||
if (err != CUDA_SUCCESS) {
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
err = CHECK_CU(cuModuleGetFunction(&s->cu_func_ushort2, s->cu_module, "yadif_ushort2"));
|
||||
if (err != CUDA_SUCCESS) {
|
||||
ret = AVERROR_INVALIDDATA;
|
||||
goto exit;
|
||||
}
|
||||
|
||||
exit:
|
||||
CHECK_CU(cuCtxPopCurrent(&dummy));
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static const AVClass yadif_cuda_class = {
|
||||
.class_name = "yadif_cuda",
|
||||
.item_name = av_default_item_name,
|
||||
.option = ff_yadif_options,
|
||||
.version = LIBAVUTIL_VERSION_INT,
|
||||
.category = AV_CLASS_CATEGORY_FILTER,
|
||||
};
|
||||
|
||||
static const AVFilterPad deint_cuda_inputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.filter_frame = ff_yadif_filter_frame,
|
||||
.config_props = config_input,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
static const AVFilterPad deint_cuda_outputs[] = {
|
||||
{
|
||||
.name = "default",
|
||||
.type = AVMEDIA_TYPE_VIDEO,
|
||||
.request_frame = ff_yadif_request_frame,
|
||||
.config_props = config_output,
|
||||
},
|
||||
{ NULL }
|
||||
};
|
||||
|
||||
AVFilter ff_vf_yadif_cuda = {
|
||||
.name = "yadif_cuda",
|
||||
.description = NULL_IF_CONFIG_SMALL("Deinterlace CUDA frames"),
|
||||
.priv_size = sizeof(DeintCUDAContext),
|
||||
.priv_class = &yadif_cuda_class,
|
||||
.uninit = deint_cuda_uninit,
|
||||
.query_formats = deint_cuda_query_formats,
|
||||
.inputs = deint_cuda_inputs,
|
||||
.outputs = deint_cuda_outputs,
|
||||
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
|
||||
.flags_internal = FF_FILTER_FLAG_HWFRAME_AWARE,
|
||||
};
|
||||
299
libavfilter/vf_yadif_cuda.cu
Normal file
299
libavfilter/vf_yadif_cuda.cu
Normal file
@@ -0,0 +1,299 @@
|
||||
/*
|
||||
* Copyright (C) 2018 Philip Langdale <philipl@overt.org>
|
||||
*
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
template<typename T>
|
||||
__inline__ __device__ T spatial_predictor(T a, T b, T c, T d, T e, T f, T g,
|
||||
T h, T i, T j, T k, T l, T m, T n)
|
||||
{
|
||||
int spatial_pred = (d + k)/2;
|
||||
int spatial_score = abs(c - j) + abs(d - k) + abs(e - l);
|
||||
|
||||
int score = abs(b - k) + abs(c - l) + abs(d - m);
|
||||
if (score < spatial_score) {
|
||||
spatial_pred = (c + l)/2;
|
||||
spatial_score = score;
|
||||
score = abs(a - l) + abs(b - m) + abs(c - n);
|
||||
if (score < spatial_score) {
|
||||
spatial_pred = (b + m)/2;
|
||||
spatial_score = score;
|
||||
}
|
||||
}
|
||||
score = abs(d - i) + abs(e - j) + abs(f - k);
|
||||
if (score < spatial_score) {
|
||||
spatial_pred = (e + j)/2;
|
||||
spatial_score = score;
|
||||
score = abs(e - h) + abs(f - i) + abs(g - j);
|
||||
if (score < spatial_score) {
|
||||
spatial_pred = (f + i)/2;
|
||||
spatial_score = score;
|
||||
}
|
||||
}
|
||||
return spatial_pred;
|
||||
}
|
||||
|
||||
__inline__ __device__ int max3(int a, int b, int c)
|
||||
{
|
||||
int x = max(a, b);
|
||||
return max(x, c);
|
||||
}
|
||||
|
||||
__inline__ __device__ int min3(int a, int b, int c)
|
||||
{
|
||||
int x = min(a, b);
|
||||
return min(x, c);
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
__inline__ __device__ T temporal_predictor(T A, T B, T C, T D, T E, T F,
|
||||
T G, T H, T I, T J, T K, T L,
|
||||
T spatial_pred, bool skip_check)
|
||||
{
|
||||
int p0 = (C + H) / 2;
|
||||
int p1 = F;
|
||||
int p2 = (D + I) / 2;
|
||||
int p3 = G;
|
||||
int p4 = (E + J) / 2;
|
||||
|
||||
int tdiff0 = abs(D - I);
|
||||
int tdiff1 = (abs(A - F) + abs(B - G)) / 2;
|
||||
int tdiff2 = (abs(K - F) + abs(G - L)) / 2;
|
||||
|
||||
int diff = max3(tdiff0, tdiff1, tdiff2);
|
||||
|
||||
if (!skip_check) {
|
||||
int maxi = max3(p2 - p3, p2 - p1, min(p0 - p1, p4 - p3));
|
||||
int mini = min3(p2 - p3, p2 - p1, max(p0 - p1, p4 - p3));
|
||||
diff = max3(diff, mini, -maxi);
|
||||
}
|
||||
|
||||
if (spatial_pred > p2 + diff) {
|
||||
spatial_pred = p2 + diff;
|
||||
}
|
||||
if (spatial_pred < p2 - diff) {
|
||||
spatial_pred = p2 - diff;
|
||||
}
|
||||
|
||||
return spatial_pred;
|
||||
}
|
||||
|
||||
template<typename T>
|
||||
__inline__ __device__ void yadif_single(T *dst,
|
||||
cudaTextureObject_t prev,
|
||||
cudaTextureObject_t cur,
|
||||
cudaTextureObject_t next,
|
||||
int dst_width, int dst_height, int dst_pitch,
|
||||
int src_width, int src_height,
|
||||
int parity, int tff, bool skip_spatial_check)
|
||||
{
|
||||
// Identify location
|
||||
int xo = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
int yo = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (xo >= dst_width || yo >= dst_height) {
|
||||
return;
|
||||
}
|
||||
|
||||
// Don't modify the primary field
|
||||
if (yo % 2 == parity) {
|
||||
dst[yo*dst_pitch+xo] = tex2D<T>(cur, xo, yo);
|
||||
return;
|
||||
}
|
||||
|
||||
// Calculate spatial prediction
|
||||
T a = tex2D<T>(cur, xo - 3, yo - 1);
|
||||
T b = tex2D<T>(cur, xo - 2, yo - 1);
|
||||
T c = tex2D<T>(cur, xo - 1, yo - 1);
|
||||
T d = tex2D<T>(cur, xo - 0, yo - 1);
|
||||
T e = tex2D<T>(cur, xo + 1, yo - 1);
|
||||
T f = tex2D<T>(cur, xo + 2, yo - 1);
|
||||
T g = tex2D<T>(cur, xo + 3, yo - 1);
|
||||
|
||||
T h = tex2D<T>(cur, xo - 3, yo + 1);
|
||||
T i = tex2D<T>(cur, xo - 2, yo + 1);
|
||||
T j = tex2D<T>(cur, xo - 1, yo + 1);
|
||||
T k = tex2D<T>(cur, xo - 0, yo + 1);
|
||||
T l = tex2D<T>(cur, xo + 1, yo + 1);
|
||||
T m = tex2D<T>(cur, xo + 2, yo + 1);
|
||||
T n = tex2D<T>(cur, xo + 3, yo + 1);
|
||||
|
||||
T spatial_pred =
|
||||
spatial_predictor(a, b, c, d, e, f, g, h, i, j, k, l, m, n);
|
||||
|
||||
// Calculate temporal prediction
|
||||
int is_second_field = !(parity ^ tff);
|
||||
|
||||
cudaTextureObject_t prev2 = prev;
|
||||
cudaTextureObject_t prev1 = is_second_field ? cur : prev;
|
||||
cudaTextureObject_t next1 = is_second_field ? next : cur;
|
||||
cudaTextureObject_t next2 = next;
|
||||
|
||||
T A = tex2D<T>(prev2, xo, yo - 1);
|
||||
T B = tex2D<T>(prev2, xo, yo + 1);
|
||||
T C = tex2D<T>(prev1, xo, yo - 2);
|
||||
T D = tex2D<T>(prev1, xo, yo + 0);
|
||||
T E = tex2D<T>(prev1, xo, yo + 2);
|
||||
T F = tex2D<T>(cur, xo, yo - 1);
|
||||
T G = tex2D<T>(cur, xo, yo + 1);
|
||||
T H = tex2D<T>(next1, xo, yo - 2);
|
||||
T I = tex2D<T>(next1, xo, yo + 0);
|
||||
T J = tex2D<T>(next1, xo, yo + 2);
|
||||
T K = tex2D<T>(next2, xo, yo - 1);
|
||||
T L = tex2D<T>(next2, xo, yo + 1);
|
||||
|
||||
spatial_pred = temporal_predictor(A, B, C, D, E, F, G, H, I, J, K, L,
|
||||
spatial_pred, skip_spatial_check);
|
||||
|
||||
dst[yo*dst_pitch+xo] = spatial_pred;
|
||||
}
|
||||
|
||||
template <typename T>
|
||||
__inline__ __device__ void yadif_double(T *dst,
|
||||
cudaTextureObject_t prev,
|
||||
cudaTextureObject_t cur,
|
||||
cudaTextureObject_t next,
|
||||
int dst_width, int dst_height, int dst_pitch,
|
||||
int src_width, int src_height,
|
||||
int parity, int tff, bool skip_spatial_check)
|
||||
{
|
||||
int xo = blockIdx.x * blockDim.x + threadIdx.x;
|
||||
int yo = blockIdx.y * blockDim.y + threadIdx.y;
|
||||
|
||||
if (xo >= dst_width || yo >= dst_height) {
|
||||
return;
|
||||
}
|
||||
|
||||
if (yo % 2 == parity) {
|
||||
// Don't modify the primary field
|
||||
dst[yo*dst_pitch+xo] = tex2D<T>(cur, xo, yo);
|
||||
return;
|
||||
}
|
||||
|
||||
T a = tex2D<T>(cur, xo - 3, yo - 1);
|
||||
T b = tex2D<T>(cur, xo - 2, yo - 1);
|
||||
T c = tex2D<T>(cur, xo - 1, yo - 1);
|
||||
T d = tex2D<T>(cur, xo - 0, yo - 1);
|
||||
T e = tex2D<T>(cur, xo + 1, yo - 1);
|
||||
T f = tex2D<T>(cur, xo + 2, yo - 1);
|
||||
T g = tex2D<T>(cur, xo + 3, yo - 1);
|
||||
|
||||
T h = tex2D<T>(cur, xo - 3, yo + 1);
|
||||
T i = tex2D<T>(cur, xo - 2, yo + 1);
|
||||
T j = tex2D<T>(cur, xo - 1, yo + 1);
|
||||
T k = tex2D<T>(cur, xo - 0, yo + 1);
|
||||
T l = tex2D<T>(cur, xo + 1, yo + 1);
|
||||
T m = tex2D<T>(cur, xo + 2, yo + 1);
|
||||
T n = tex2D<T>(cur, xo + 3, yo + 1);
|
||||
|
||||
T spatial_pred;
|
||||
spatial_pred.x =
|
||||
spatial_predictor(a.x, b.x, c.x, d.x, e.x, f.x, g.x, h.x, i.x, j.x, k.x, l.x, m.x, n.x);
|
||||
spatial_pred.y =
|
||||
spatial_predictor(a.y, b.y, c.y, d.y, e.y, f.y, g.y, h.y, i.y, j.y, k.y, l.y, m.y, n.y);
|
||||
|
||||
// Calculate temporal prediction
|
||||
int is_second_field = !(parity ^ tff);
|
||||
|
||||
cudaTextureObject_t prev2 = prev;
|
||||
cudaTextureObject_t prev1 = is_second_field ? cur : prev;
|
||||
cudaTextureObject_t next1 = is_second_field ? next : cur;
|
||||
cudaTextureObject_t next2 = next;
|
||||
|
||||
T A = tex2D<T>(prev2, xo, yo - 1);
|
||||
T B = tex2D<T>(prev2, xo, yo + 1);
|
||||
T C = tex2D<T>(prev1, xo, yo - 2);
|
||||
T D = tex2D<T>(prev1, xo, yo + 0);
|
||||
T E = tex2D<T>(prev1, xo, yo + 2);
|
||||
T F = tex2D<T>(cur, xo, yo - 1);
|
||||
T G = tex2D<T>(cur, xo, yo + 1);
|
||||
T H = tex2D<T>(next1, xo, yo - 2);
|
||||
T I = tex2D<T>(next1, xo, yo + 0);
|
||||
T J = tex2D<T>(next1, xo, yo + 2);
|
||||
T K = tex2D<T>(next2, xo, yo - 1);
|
||||
T L = tex2D<T>(next2, xo, yo + 1);
|
||||
|
||||
spatial_pred.x =
|
||||
temporal_predictor(A.x, B.x, C.x, D.x, E.x, F.x, G.x, H.x, I.x, J.x, K.x, L.x,
|
||||
spatial_pred.x, skip_spatial_check);
|
||||
spatial_pred.y =
|
||||
temporal_predictor(A.y, B.y, C.y, D.y, E.y, F.y, G.y, H.y, I.y, J.y, K.y, L.y,
|
||||
spatial_pred.y, skip_spatial_check);
|
||||
|
||||
dst[yo*dst_pitch+xo] = spatial_pred;
|
||||
}
|
||||
|
||||
extern "C" {
|
||||
|
||||
__global__ void yadif_uchar(unsigned char *dst,
|
||||
cudaTextureObject_t prev,
|
||||
cudaTextureObject_t cur,
|
||||
cudaTextureObject_t next,
|
||||
int dst_width, int dst_height, int dst_pitch,
|
||||
int src_width, int src_height,
|
||||
int parity, int tff, bool skip_spatial_check)
|
||||
{
|
||||
yadif_single(dst, prev, cur, next,
|
||||
dst_width, dst_height, dst_pitch,
|
||||
src_width, src_height,
|
||||
parity, tff, skip_spatial_check);
|
||||
}
|
||||
|
||||
__global__ void yadif_ushort(unsigned short *dst,
|
||||
cudaTextureObject_t prev,
|
||||
cudaTextureObject_t cur,
|
||||
cudaTextureObject_t next,
|
||||
int dst_width, int dst_height, int dst_pitch,
|
||||
int src_width, int src_height,
|
||||
int parity, int tff, bool skip_spatial_check)
|
||||
{
|
||||
yadif_single(dst, prev, cur, next,
|
||||
dst_width, dst_height, dst_pitch,
|
||||
src_width, src_height,
|
||||
parity, tff, skip_spatial_check);
|
||||
}
|
||||
|
||||
__global__ void yadif_uchar2(uchar2 *dst,
|
||||
cudaTextureObject_t prev,
|
||||
cudaTextureObject_t cur,
|
||||
cudaTextureObject_t next,
|
||||
int dst_width, int dst_height, int dst_pitch,
|
||||
int src_width, int src_height,
|
||||
int parity, int tff, bool skip_spatial_check)
|
||||
{
|
||||
yadif_double(dst, prev, cur, next,
|
||||
dst_width, dst_height, dst_pitch,
|
||||
src_width, src_height,
|
||||
parity, tff, skip_spatial_check);
|
||||
}
|
||||
|
||||
__global__ void yadif_ushort2(ushort2 *dst,
|
||||
cudaTextureObject_t prev,
|
||||
cudaTextureObject_t cur,
|
||||
cudaTextureObject_t next,
|
||||
int dst_width, int dst_height, int dst_pitch,
|
||||
int src_width, int src_height,
|
||||
int parity, int tff, bool skip_spatial_check)
|
||||
{
|
||||
yadif_double(dst, prev, cur, next,
|
||||
dst_width, dst_height, dst_pitch,
|
||||
src_width, src_height,
|
||||
parity, tff, skip_spatial_check);
|
||||
}
|
||||
|
||||
} /* extern "C" */
|
||||
@@ -19,6 +19,7 @@
|
||||
#ifndef AVFILTER_YADIF_H
|
||||
#define AVFILTER_YADIF_H
|
||||
|
||||
#include "libavutil/opt.h"
|
||||
#include "libavutil/pixdesc.h"
|
||||
#include "avfilter.h"
|
||||
|
||||
@@ -54,6 +55,8 @@ typedef struct YADIFContext {
|
||||
AVFrame *prev;
|
||||
AVFrame *out;
|
||||
|
||||
void (*filter)(AVFilterContext *ctx, AVFrame *dstpic, int parity, int tff);
|
||||
|
||||
/**
|
||||
* Required alignment for filter_line
|
||||
*/
|
||||
@@ -71,4 +74,10 @@ typedef struct YADIFContext {
|
||||
|
||||
void ff_yadif_init_x86(YADIFContext *yadif);
|
||||
|
||||
int ff_yadif_filter_frame(AVFilterLink *link, AVFrame *frame);
|
||||
|
||||
int ff_yadif_request_frame(AVFilterLink *link);
|
||||
|
||||
extern const AVOption ff_yadif_options[];
|
||||
|
||||
#endif /* AVFILTER_YADIF_H */
|
||||
|
||||
209
libavfilter/yadif_common.c
Normal file
209
libavfilter/yadif_common.c
Normal file
@@ -0,0 +1,209 @@
|
||||
/*
|
||||
* Copyright (C) 2006-2011 Michael Niedermayer <michaelni@gmx.at>
|
||||
* 2010 James Darnley <james.darnley@gmail.com>
|
||||
|
||||
* This file is part of FFmpeg.
|
||||
*
|
||||
* FFmpeg is free software; you can redistribute it and/or
|
||||
* modify it under the terms of the GNU Lesser General Public
|
||||
* License as published by the Free Software Foundation; either
|
||||
* version 2.1 of the License, or (at your option) any later version.
|
||||
*
|
||||
* FFmpeg is distributed in the hope that it will be useful,
|
||||
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
||||
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
||||
* Lesser General Public License for more details.
|
||||
*
|
||||
* You should have received a copy of the GNU Lesser General Public
|
||||
* License along with FFmpeg; if not, write to the Free Software
|
||||
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
||||
*/
|
||||
|
||||
#include "libavutil/avassert.h"
|
||||
#include "libavutil/imgutils.h"
|
||||
#include "internal.h"
|
||||
#include "yadif.h"
|
||||
|
||||
static int return_frame(AVFilterContext *ctx, int is_second)
|
||||
{
|
||||
YADIFContext *yadif = ctx->priv;
|
||||
AVFilterLink *link = ctx->outputs[0];
|
||||
int tff, ret;
|
||||
|
||||
if (yadif->parity == -1) {
|
||||
tff = yadif->cur->interlaced_frame ?
|
||||
yadif->cur->top_field_first : 1;
|
||||
} else {
|
||||
tff = yadif->parity ^ 1;
|
||||
}
|
||||
|
||||
if (is_second) {
|
||||
yadif->out = ff_get_video_buffer(link, link->w, link->h);
|
||||
if (!yadif->out)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
av_frame_copy_props(yadif->out, yadif->cur);
|
||||
yadif->out->interlaced_frame = 0;
|
||||
}
|
||||
|
||||
yadif->filter(ctx, yadif->out, tff ^ !is_second, tff);
|
||||
|
||||
if (is_second) {
|
||||
int64_t cur_pts = yadif->cur->pts;
|
||||
int64_t next_pts = yadif->next->pts;
|
||||
|
||||
if (next_pts != AV_NOPTS_VALUE && cur_pts != AV_NOPTS_VALUE) {
|
||||
yadif->out->pts = cur_pts + next_pts;
|
||||
} else {
|
||||
yadif->out->pts = AV_NOPTS_VALUE;
|
||||
}
|
||||
}
|
||||
ret = ff_filter_frame(ctx->outputs[0], yadif->out);
|
||||
|
||||
yadif->frame_pending = (yadif->mode&1) && !is_second;
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int checkstride(YADIFContext *yadif, const AVFrame *a, const AVFrame *b)
|
||||
{
|
||||
int i;
|
||||
for (i = 0; i < yadif->csp->nb_components; i++)
|
||||
if (a->linesize[i] != b->linesize[i])
|
||||
return 1;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static void fixstride(AVFilterLink *link, AVFrame *f)
|
||||
{
|
||||
AVFrame *dst = ff_default_get_video_buffer(link, f->width, f->height);
|
||||
if(!dst)
|
||||
return;
|
||||
av_frame_copy_props(dst, f);
|
||||
av_image_copy(dst->data, dst->linesize,
|
||||
(const uint8_t **)f->data, f->linesize,
|
||||
dst->format, dst->width, dst->height);
|
||||
av_frame_unref(f);
|
||||
av_frame_move_ref(f, dst);
|
||||
av_frame_free(&dst);
|
||||
}
|
||||
|
||||
int ff_yadif_filter_frame(AVFilterLink *link, AVFrame *frame)
|
||||
{
|
||||
AVFilterContext *ctx = link->dst;
|
||||
YADIFContext *yadif = ctx->priv;
|
||||
|
||||
av_assert0(frame);
|
||||
|
||||
if (yadif->frame_pending)
|
||||
return_frame(ctx, 1);
|
||||
|
||||
if (yadif->prev)
|
||||
av_frame_free(&yadif->prev);
|
||||
yadif->prev = yadif->cur;
|
||||
yadif->cur = yadif->next;
|
||||
yadif->next = frame;
|
||||
|
||||
if (!yadif->cur &&
|
||||
!(yadif->cur = av_frame_clone(yadif->next)))
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
if (checkstride(yadif, yadif->next, yadif->cur)) {
|
||||
av_log(ctx, AV_LOG_VERBOSE, "Reallocating frame due to differing stride\n");
|
||||
fixstride(link, yadif->next);
|
||||
}
|
||||
if (checkstride(yadif, yadif->next, yadif->cur))
|
||||
fixstride(link, yadif->cur);
|
||||
if (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))
|
||||
fixstride(link, yadif->prev);
|
||||
if (checkstride(yadif, yadif->next, yadif->cur) || (yadif->prev && checkstride(yadif, yadif->next, yadif->prev))) {
|
||||
av_log(ctx, AV_LOG_ERROR, "Failed to reallocate frame\n");
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!yadif->prev)
|
||||
return 0;
|
||||
|
||||
if ((yadif->deint && !yadif->cur->interlaced_frame) ||
|
||||
ctx->is_disabled ||
|
||||
(yadif->deint && !yadif->prev->interlaced_frame && yadif->prev->repeat_pict) ||
|
||||
(yadif->deint && !yadif->next->interlaced_frame && yadif->next->repeat_pict)
|
||||
) {
|
||||
yadif->out = av_frame_clone(yadif->cur);
|
||||
if (!yadif->out)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
av_frame_free(&yadif->prev);
|
||||
if (yadif->out->pts != AV_NOPTS_VALUE)
|
||||
yadif->out->pts *= 2;
|
||||
return ff_filter_frame(ctx->outputs[0], yadif->out);
|
||||
}
|
||||
|
||||
yadif->out = ff_get_video_buffer(ctx->outputs[0], link->w, link->h);
|
||||
if (!yadif->out)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
av_frame_copy_props(yadif->out, yadif->cur);
|
||||
yadif->out->interlaced_frame = 0;
|
||||
|
||||
if (yadif->out->pts != AV_NOPTS_VALUE)
|
||||
yadif->out->pts *= 2;
|
||||
|
||||
return return_frame(ctx, 0);
|
||||
}
|
||||
|
||||
int ff_yadif_request_frame(AVFilterLink *link)
|
||||
{
|
||||
AVFilterContext *ctx = link->src;
|
||||
YADIFContext *yadif = ctx->priv;
|
||||
int ret;
|
||||
|
||||
if (yadif->frame_pending) {
|
||||
return_frame(ctx, 1);
|
||||
return 0;
|
||||
}
|
||||
|
||||
if (yadif->eof)
|
||||
return AVERROR_EOF;
|
||||
|
||||
ret = ff_request_frame(ctx->inputs[0]);
|
||||
|
||||
if (ret == AVERROR_EOF && yadif->cur) {
|
||||
AVFrame *next = av_frame_clone(yadif->next);
|
||||
|
||||
if (!next)
|
||||
return AVERROR(ENOMEM);
|
||||
|
||||
next->pts = yadif->next->pts * 2 - yadif->cur->pts;
|
||||
|
||||
ff_yadif_filter_frame(ctx->inputs[0], next);
|
||||
yadif->eof = 1;
|
||||
} else if (ret < 0) {
|
||||
return ret;
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
#define OFFSET(x) offsetof(YADIFContext, x)
|
||||
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
|
||||
|
||||
#define CONST(name, help, val, unit) { name, help, 0, AV_OPT_TYPE_CONST, {.i64=val}, INT_MIN, INT_MAX, FLAGS, unit }
|
||||
|
||||
const AVOption ff_yadif_options[] = {
|
||||
{ "mode", "specify the interlacing mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=YADIF_MODE_SEND_FRAME}, 0, 3, FLAGS, "mode"},
|
||||
CONST("send_frame", "send one frame for each frame", YADIF_MODE_SEND_FRAME, "mode"),
|
||||
CONST("send_field", "send one frame for each field", YADIF_MODE_SEND_FIELD, "mode"),
|
||||
CONST("send_frame_nospatial", "send one frame for each frame, but skip spatial interlacing check", YADIF_MODE_SEND_FRAME_NOSPATIAL, "mode"),
|
||||
CONST("send_field_nospatial", "send one frame for each field, but skip spatial interlacing check", YADIF_MODE_SEND_FIELD_NOSPATIAL, "mode"),
|
||||
|
||||
{ "parity", "specify the assumed picture field parity", OFFSET(parity), AV_OPT_TYPE_INT, {.i64=YADIF_PARITY_AUTO}, -1, 1, FLAGS, "parity" },
|
||||
CONST("tff", "assume top field first", YADIF_PARITY_TFF, "parity"),
|
||||
CONST("bff", "assume bottom field first", YADIF_PARITY_BFF, "parity"),
|
||||
CONST("auto", "auto detect parity", YADIF_PARITY_AUTO, "parity"),
|
||||
|
||||
{ "deint", "specify which frames to deinterlace", OFFSET(deint), AV_OPT_TYPE_INT, {.i64=YADIF_DEINT_ALL}, 0, 1, FLAGS, "deint" },
|
||||
CONST("all", "deinterlace all frames", YADIF_DEINT_ALL, "deint"),
|
||||
CONST("interlaced", "only deinterlace frames marked as interlaced", YADIF_DEINT_INTERLACED, "deint"),
|
||||
|
||||
{ NULL }
|
||||
};
|
||||
@@ -513,7 +513,7 @@ static int ftp_features(FTPContext *s)
|
||||
static const char *feat_command = "FEAT\r\n";
|
||||
static const char *enable_utf8_command = "OPTS UTF8 ON\r\n";
|
||||
static const int feat_codes[] = {211, 0};
|
||||
static const int opts_codes[] = {200, 451, 0};
|
||||
static const int opts_codes[] = {200, 202, 451, 0};
|
||||
|
||||
av_freep(&s->features);
|
||||
if (ftp_send_command(s, feat_command, feat_codes, &s->features) != 211) {
|
||||
@@ -521,7 +521,8 @@ static int ftp_features(FTPContext *s)
|
||||
}
|
||||
|
||||
if (ftp_has_feature(s, "UTF8")) {
|
||||
if (ftp_send_command(s, enable_utf8_command, opts_codes, NULL) == 200)
|
||||
int ret = ftp_send_command(s, enable_utf8_command, opts_codes, NULL);
|
||||
if (ret == 200 || ret == 202)
|
||||
s->utf8 = 1;
|
||||
}
|
||||
|
||||
|
||||
@@ -97,6 +97,8 @@ static int ivf_check_bitstream(struct AVFormatContext *s, const AVPacket *pkt)
|
||||
|
||||
if (st->codecpar->codec_id == AV_CODEC_ID_VP9)
|
||||
ret = ff_stream_add_bitstream_filter(st, "vp9_superframe", NULL);
|
||||
else if (st->codecpar->codec_id == AV_CODEC_ID_AV1)
|
||||
ret = ff_stream_add_bitstream_filter(st, "av1_metadata", "td=insert");
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
@@ -48,7 +48,7 @@
|
||||
|
||||
static int header = 0;
|
||||
|
||||
static void decode(AVCodecContext *dec_ctx, AVFrame *frame,
|
||||
static int decode(AVCodecContext *dec_ctx, AVFrame *frame,
|
||||
AVPacket *pkt)
|
||||
{
|
||||
static uint64_t frame_cnt = 0;
|
||||
@@ -57,20 +57,20 @@ static void decode(AVCodecContext *dec_ctx, AVFrame *frame,
|
||||
ret = avcodec_send_packet(dec_ctx, pkt);
|
||||
if (ret < 0) {
|
||||
fprintf(stderr, "Error sending a packet for decoding: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
while (ret >= 0) {
|
||||
const AVPixFmtDescriptor *desc;
|
||||
char *sum;
|
||||
char sum[AV_HASH_MAX_SIZE * 2 + 1];
|
||||
struct AVHashContext *hash;
|
||||
|
||||
ret = avcodec_receive_frame(dec_ctx, frame);
|
||||
if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
|
||||
return;
|
||||
return 0;
|
||||
} else if (ret < 0) {
|
||||
fprintf(stderr, "Error during decoding: %s\n", av_err2str(ret));
|
||||
exit(1);
|
||||
return ret;
|
||||
}
|
||||
|
||||
if (!header) {
|
||||
@@ -87,9 +87,10 @@ static void decode(AVCodecContext *dec_ctx, AVFrame *frame,
|
||||
header = 1;
|
||||
}
|
||||
desc = av_pix_fmt_desc_get(dec_ctx->pix_fmt);
|
||||
av_hash_alloc(&hash, "md5");
|
||||
if ((ret = av_hash_alloc(&hash, "md5")) < 0) {
|
||||
return ret;
|
||||
}
|
||||
av_hash_init(hash);
|
||||
sum = av_mallocz(av_hash_get_size(hash) * 2 + 1);
|
||||
|
||||
for (int i = 0; i < frame->height; i++)
|
||||
av_hash_update(hash, &frame->data[0][i * frame->linesize[0]], frame->width);
|
||||
@@ -104,25 +105,25 @@ static void decode(AVCodecContext *dec_ctx, AVFrame *frame,
|
||||
(frame->width * frame->height + 2 * (frame->height >> desc->log2_chroma_h) * (frame->width >> desc->log2_chroma_w)), sum);
|
||||
frame_cnt += 1;
|
||||
av_hash_freep(&hash);
|
||||
av_free(sum);
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
|
||||
int main(int argc, char **argv)
|
||||
{
|
||||
const AVCodec *codec;
|
||||
const AVCodec *codec = NULL;
|
||||
AVCodecContext *c = NULL;
|
||||
AVFrame *frame;
|
||||
AVFrame *frame = NULL;
|
||||
unsigned int threads;
|
||||
AVPacket *pkt;
|
||||
FILE *fd;
|
||||
FILE *file = NULL;
|
||||
char nal[MAX_SLICES * UINT16_MAX + AV_INPUT_BUFFER_PADDING_SIZE];
|
||||
int nals = 0;
|
||||
int nals = 0, ret = 0;
|
||||
char *p = nal;
|
||||
|
||||
if (argc < 4) {
|
||||
fprintf(stderr, "Usage: %s <threads> <input file> <output file>\n", argv[0]);
|
||||
exit(1);
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!(threads = strtoul(argv[1], NULL, 0)))
|
||||
@@ -134,17 +135,20 @@ int main(int argc, char **argv)
|
||||
setmode(fileno(stdout), O_BINARY);
|
||||
#endif
|
||||
|
||||
if (!(pkt = av_packet_alloc()))
|
||||
exit(1);
|
||||
if (!(pkt = av_packet_alloc())) {
|
||||
return -1;
|
||||
}
|
||||
|
||||
if (!(codec = avcodec_find_decoder(AV_CODEC_ID_H264))) {
|
||||
fprintf(stderr, "Codec not found\n");
|
||||
exit(1);
|
||||
ret = -1;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!(c = avcodec_alloc_context3(codec))) {
|
||||
fprintf(stderr, "Could not allocate video codec context\n");
|
||||
exit(1);
|
||||
ret = -1;
|
||||
goto err;
|
||||
}
|
||||
|
||||
c->width = 352;
|
||||
@@ -154,15 +158,16 @@ int main(int argc, char **argv)
|
||||
c->thread_type = FF_THREAD_SLICE;
|
||||
c->thread_count = threads;
|
||||
|
||||
if (avcodec_open2(c, codec, NULL) < 0) {
|
||||
if ((ret = avcodec_open2(c, codec, NULL)) < 0) {
|
||||
fprintf(stderr, "Could not open codec\n");
|
||||
exit(1);
|
||||
goto err;
|
||||
}
|
||||
|
||||
#if HAVE_THREADS
|
||||
if (c->active_thread_type != FF_THREAD_SLICE) {
|
||||
fprintf(stderr, "Couldn't activate slice threading: %d\n", c->active_thread_type);
|
||||
exit(1);
|
||||
ret = -1;
|
||||
goto err;
|
||||
}
|
||||
#else
|
||||
fprintf(stderr, "WARN: not using threads, only checking decoding slice NALUs\n");
|
||||
@@ -170,34 +175,37 @@ int main(int argc, char **argv)
|
||||
|
||||
if (!(frame = av_frame_alloc())) {
|
||||
fprintf(stderr, "Could not allocate video frame\n");
|
||||
exit(1);
|
||||
ret = -1;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (!(fd = fopen(argv[2], "rb"))) {
|
||||
if (!(file = fopen(argv[2], "rb"))) {
|
||||
fprintf(stderr, "Couldn't open NALU file: %s\n", argv[2]);
|
||||
exit(1);
|
||||
ret = -1;
|
||||
goto err;
|
||||
}
|
||||
|
||||
while(1) {
|
||||
uint16_t size = 0;
|
||||
ssize_t ret = fread(&size, 1, sizeof(uint16_t), fd);
|
||||
if (ret < 0) {
|
||||
perror("Couldn't read size");
|
||||
exit(1);
|
||||
} else if (ret != sizeof(uint16_t))
|
||||
size_t ret = fread(&size, 1, sizeof(uint16_t), file);
|
||||
if (ret != sizeof(uint16_t))
|
||||
break;
|
||||
|
||||
size = ntohs(size);
|
||||
ret = fread(p, 1, size, fd);
|
||||
if (ret < 0 || ret != size) {
|
||||
ret = fread(p, 1, size, file);
|
||||
if (ret != size) {
|
||||
perror("Couldn't read data");
|
||||
exit(1);
|
||||
goto err;
|
||||
}
|
||||
p += ret;
|
||||
|
||||
if (++nals >= threads) {
|
||||
int decret = 0;
|
||||
pkt->data = nal;
|
||||
pkt->size = p - nal;
|
||||
decode(c, frame, pkt);
|
||||
if ((decret = decode(c, frame, pkt)) < 0) {
|
||||
goto err;
|
||||
}
|
||||
memset(nal, 0, MAX_SLICES * UINT16_MAX + AV_INPUT_BUFFER_PADDING_SIZE);
|
||||
nals = 0;
|
||||
p = nal;
|
||||
@@ -207,15 +215,19 @@ int main(int argc, char **argv)
|
||||
if (nals) {
|
||||
pkt->data = nal;
|
||||
pkt->size = p - nal;
|
||||
decode(c, frame, pkt);
|
||||
if ((ret = decode(c, frame, pkt)) < 0) {
|
||||
goto err;
|
||||
}
|
||||
}
|
||||
|
||||
decode(c, frame, NULL);
|
||||
ret = decode(c, frame, NULL);
|
||||
|
||||
fclose(fd);
|
||||
avcodec_free_context(&c);
|
||||
err:
|
||||
if (file)
|
||||
fclose(file);
|
||||
av_frame_free(&frame);
|
||||
avcodec_free_context(&c);
|
||||
av_packet_free(&pkt);
|
||||
|
||||
return 0;
|
||||
return ret;
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user