mirror of
https://git.ffmpeg.org/ffmpeg.git
synced 2025-12-26 17:00:09 +01:00
This commit follows the same logic as 061a0c14bb, but for the encode API: The
new public encoding API will no longer be a wrapper around the old deprecated
one, and the internal API used by the encoders now consists of a single
receive_packet() callback that pulls frames as required.
amf encoders adapted by James Almer
librav1e encoder adapted by James Almer
nvidia encoders adapted by James Almer
MediaFoundation encoders adapted by James Almer
vaapi encoders adapted by Linjie Fu
v4l2_m2m encoders adapted by Andriy Gelman
Signed-off-by: James Almer <jamrial@gmail.com>
609 lines
20 KiB
C
609 lines
20 KiB
C
/*
|
|
* generic encoding-related code
|
|
*
|
|
* This file is part of FFmpeg.
|
|
*
|
|
* FFmpeg is free software; you can redistribute it and/or
|
|
* modify it under the terms of the GNU Lesser General Public
|
|
* License as published by the Free Software Foundation; either
|
|
* version 2.1 of the License, or (at your option) any later version.
|
|
*
|
|
* FFmpeg is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
|
|
* Lesser General Public License for more details.
|
|
*
|
|
* You should have received a copy of the GNU Lesser General Public
|
|
* License along with FFmpeg; if not, write to the Free Software
|
|
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
|
|
*/
|
|
|
|
#include "libavutil/attributes.h"
|
|
#include "libavutil/avassert.h"
|
|
#include "libavutil/frame.h"
|
|
#include "libavutil/imgutils.h"
|
|
#include "libavutil/internal.h"
|
|
#include "libavutil/samplefmt.h"
|
|
|
|
#include "avcodec.h"
|
|
#include "encode.h"
|
|
#include "frame_thread_encoder.h"
|
|
#include "internal.h"
|
|
|
|
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
|
|
{
|
|
if (avpkt->size < 0) {
|
|
av_log(avctx, AV_LOG_ERROR, "Invalid negative user packet size %d\n", avpkt->size);
|
|
return AVERROR(EINVAL);
|
|
}
|
|
if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
|
|
av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n",
|
|
size, INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE);
|
|
return AVERROR(EINVAL);
|
|
}
|
|
|
|
if (avctx && 2*min_size < size) { // FIXME The factor needs to be finetuned
|
|
av_assert0(!avpkt->data || avpkt->data != avctx->internal->byte_buffer);
|
|
if (!avpkt->data || avpkt->size < size) {
|
|
av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size);
|
|
avpkt->data = avctx->internal->byte_buffer;
|
|
avpkt->size = avctx->internal->byte_buffer_size;
|
|
}
|
|
}
|
|
|
|
if (avpkt->data) {
|
|
AVBufferRef *buf = avpkt->buf;
|
|
|
|
if (avpkt->size < size) {
|
|
av_log(avctx, AV_LOG_ERROR, "User packet is too small (%d < %"PRId64")\n", avpkt->size, size);
|
|
return AVERROR(EINVAL);
|
|
}
|
|
|
|
av_init_packet(avpkt);
|
|
avpkt->buf = buf;
|
|
avpkt->size = size;
|
|
return 0;
|
|
} else {
|
|
int ret = av_new_packet(avpkt, size);
|
|
if (ret < 0)
|
|
av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
/**
|
|
* Pad last frame with silence.
|
|
*/
|
|
static int pad_last_frame(AVCodecContext *s, AVFrame *frame, const AVFrame *src)
|
|
{
|
|
int ret;
|
|
|
|
frame->format = src->format;
|
|
frame->channel_layout = src->channel_layout;
|
|
frame->channels = src->channels;
|
|
frame->nb_samples = s->frame_size;
|
|
ret = av_frame_get_buffer(frame, 0);
|
|
if (ret < 0)
|
|
goto fail;
|
|
|
|
ret = av_frame_copy_props(frame, src);
|
|
if (ret < 0)
|
|
goto fail;
|
|
|
|
if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0,
|
|
src->nb_samples, s->channels, s->sample_fmt)) < 0)
|
|
goto fail;
|
|
if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples,
|
|
frame->nb_samples - src->nb_samples,
|
|
s->channels, s->sample_fmt)) < 0)
|
|
goto fail;
|
|
|
|
return 0;
|
|
|
|
fail:
|
|
av_frame_unref(frame);
|
|
return ret;
|
|
}
|
|
|
|
int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
|
|
AVPacket *avpkt,
|
|
const AVFrame *frame,
|
|
int *got_packet_ptr)
|
|
{
|
|
AVFrame *extended_frame = NULL;
|
|
AVFrame *padded_frame = NULL;
|
|
int ret;
|
|
AVPacket user_pkt = *avpkt;
|
|
int needs_realloc = !user_pkt.data;
|
|
|
|
*got_packet_ptr = 0;
|
|
|
|
if (!avctx->codec->encode2) {
|
|
av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
|
|
return AVERROR(ENOSYS);
|
|
}
|
|
|
|
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
|
|
av_packet_unref(avpkt);
|
|
return 0;
|
|
}
|
|
|
|
/* ensure that extended_data is properly set */
|
|
if (frame && !frame->extended_data) {
|
|
if (av_sample_fmt_is_planar(avctx->sample_fmt) &&
|
|
avctx->channels > AV_NUM_DATA_POINTERS) {
|
|
av_log(avctx, AV_LOG_ERROR, "Encoding to a planar sample format, "
|
|
"with more than %d channels, but extended_data is not set.\n",
|
|
AV_NUM_DATA_POINTERS);
|
|
return AVERROR(EINVAL);
|
|
}
|
|
av_log(avctx, AV_LOG_WARNING, "extended_data is not set.\n");
|
|
|
|
extended_frame = av_frame_alloc();
|
|
if (!extended_frame)
|
|
return AVERROR(ENOMEM);
|
|
|
|
memcpy(extended_frame, frame, sizeof(AVFrame));
|
|
extended_frame->extended_data = extended_frame->data;
|
|
frame = extended_frame;
|
|
}
|
|
|
|
/* extract audio service type metadata */
|
|
if (frame) {
|
|
AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
|
|
if (sd && sd->size >= sizeof(enum AVAudioServiceType))
|
|
avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
|
|
}
|
|
|
|
/* check for valid frame size */
|
|
if (frame) {
|
|
if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) {
|
|
if (frame->nb_samples > avctx->frame_size) {
|
|
av_log(avctx, AV_LOG_ERROR, "more samples than frame size (avcodec_encode_audio2)\n");
|
|
ret = AVERROR(EINVAL);
|
|
goto end;
|
|
}
|
|
} else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
|
|
/* if we already got an undersized frame, that must have been the last */
|
|
if (avctx->internal->last_audio_frame) {
|
|
av_log(avctx, AV_LOG_ERROR, "frame_size (%d) was not respected for a non-last frame (avcodec_encode_audio2)\n", avctx->frame_size);
|
|
ret = AVERROR(EINVAL);
|
|
goto end;
|
|
}
|
|
|
|
if (frame->nb_samples < avctx->frame_size) {
|
|
if (!(padded_frame = av_frame_alloc())) {
|
|
ret = AVERROR(ENOMEM);
|
|
goto end;
|
|
}
|
|
ret = pad_last_frame(avctx, padded_frame, frame);
|
|
if (ret < 0)
|
|
goto end;
|
|
|
|
frame = padded_frame;
|
|
avctx->internal->last_audio_frame = 1;
|
|
}
|
|
|
|
if (frame->nb_samples != avctx->frame_size) {
|
|
av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d) (avcodec_encode_audio2)\n", frame->nb_samples, avctx->frame_size);
|
|
ret = AVERROR(EINVAL);
|
|
goto end;
|
|
}
|
|
}
|
|
}
|
|
|
|
av_assert0(avctx->codec->encode2);
|
|
|
|
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
|
|
if (!ret) {
|
|
if (*got_packet_ptr) {
|
|
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
|
|
if (avpkt->pts == AV_NOPTS_VALUE)
|
|
avpkt->pts = frame->pts;
|
|
if (!avpkt->duration)
|
|
avpkt->duration = ff_samples_to_time_base(avctx,
|
|
frame->nb_samples);
|
|
}
|
|
avpkt->dts = avpkt->pts;
|
|
} else {
|
|
avpkt->size = 0;
|
|
}
|
|
}
|
|
if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
|
|
needs_realloc = 0;
|
|
if (user_pkt.data) {
|
|
if (user_pkt.size >= avpkt->size) {
|
|
memcpy(user_pkt.data, avpkt->data, avpkt->size);
|
|
} else {
|
|
av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
|
|
avpkt->size = user_pkt.size;
|
|
ret = -1;
|
|
}
|
|
avpkt->buf = user_pkt.buf;
|
|
avpkt->data = user_pkt.data;
|
|
} else if (!avpkt->buf) {
|
|
ret = av_packet_make_refcounted(avpkt);
|
|
if (ret < 0)
|
|
goto end;
|
|
}
|
|
}
|
|
|
|
if (!ret) {
|
|
if (needs_realloc && avpkt->data) {
|
|
ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
|
|
if (ret >= 0)
|
|
avpkt->data = avpkt->buf->data;
|
|
}
|
|
if (frame)
|
|
avctx->frame_number++;
|
|
}
|
|
|
|
if (ret < 0 || !*got_packet_ptr) {
|
|
av_packet_unref(avpkt);
|
|
goto end;
|
|
}
|
|
|
|
/* NOTE: if we add any audio encoders which output non-keyframe packets,
|
|
* this needs to be moved to the encoders, but for now we can do it
|
|
* here to simplify things */
|
|
avpkt->flags |= AV_PKT_FLAG_KEY;
|
|
|
|
end:
|
|
av_frame_free(&padded_frame);
|
|
av_free(extended_frame);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
|
|
AVPacket *avpkt,
|
|
const AVFrame *frame,
|
|
int *got_packet_ptr)
|
|
{
|
|
int ret;
|
|
AVPacket user_pkt = *avpkt;
|
|
int needs_realloc = !user_pkt.data;
|
|
|
|
*got_packet_ptr = 0;
|
|
|
|
if (!avctx->codec->encode2) {
|
|
av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
|
|
return AVERROR(ENOSYS);
|
|
}
|
|
|
|
if ((avctx->flags&AV_CODEC_FLAG_PASS1) && avctx->stats_out)
|
|
avctx->stats_out[0] = '\0';
|
|
|
|
if (!frame &&
|
|
!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
|
|
(avctx->internal->frame_thread_encoder && avctx->active_thread_type & FF_THREAD_FRAME))) {
|
|
av_packet_unref(avpkt);
|
|
return 0;
|
|
}
|
|
|
|
if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
|
|
return AVERROR(EINVAL);
|
|
|
|
if (frame && frame->format == AV_PIX_FMT_NONE)
|
|
av_log(avctx, AV_LOG_WARNING, "AVFrame.format is not set\n");
|
|
if (frame && (frame->width == 0 || frame->height == 0))
|
|
av_log(avctx, AV_LOG_WARNING, "AVFrame.width or height is not set\n");
|
|
|
|
av_assert0(avctx->codec->encode2);
|
|
|
|
|
|
if (CONFIG_FRAME_THREAD_ENCODER &&
|
|
avctx->internal->frame_thread_encoder && (avctx->active_thread_type & FF_THREAD_FRAME))
|
|
ret = ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr);
|
|
else {
|
|
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
|
|
if (*got_packet_ptr && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
|
|
avpkt->pts = avpkt->dts = frame->pts;
|
|
}
|
|
av_assert0(ret <= 0);
|
|
|
|
emms_c();
|
|
|
|
if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
|
|
needs_realloc = 0;
|
|
if (user_pkt.data) {
|
|
if (user_pkt.size >= avpkt->size) {
|
|
memcpy(user_pkt.data, avpkt->data, avpkt->size);
|
|
} else {
|
|
av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
|
|
avpkt->size = user_pkt.size;
|
|
ret = -1;
|
|
}
|
|
avpkt->buf = user_pkt.buf;
|
|
avpkt->data = user_pkt.data;
|
|
} else if (!avpkt->buf) {
|
|
ret = av_packet_make_refcounted(avpkt);
|
|
if (ret < 0)
|
|
return ret;
|
|
}
|
|
}
|
|
|
|
if (!ret) {
|
|
if (!*got_packet_ptr)
|
|
avpkt->size = 0;
|
|
|
|
if (needs_realloc && avpkt->data) {
|
|
ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
|
|
if (ret >= 0)
|
|
avpkt->data = avpkt->buf->data;
|
|
}
|
|
|
|
if (frame)
|
|
avctx->frame_number++;
|
|
}
|
|
|
|
if (ret < 0 || !*got_packet_ptr)
|
|
av_packet_unref(avpkt);
|
|
|
|
return ret;
|
|
}
|
|
|
|
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
|
|
const AVSubtitle *sub)
|
|
{
|
|
int ret;
|
|
if (sub->start_display_time) {
|
|
av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
|
|
return -1;
|
|
}
|
|
|
|
ret = avctx->codec->encode_sub(avctx, buf, buf_size, sub);
|
|
avctx->frame_number++;
|
|
return ret;
|
|
}
|
|
|
|
int ff_encode_get_frame(AVCodecContext *avctx, AVFrame *frame)
|
|
{
|
|
AVCodecInternal *avci = avctx->internal;
|
|
|
|
if (avci->draining)
|
|
return AVERROR_EOF;
|
|
|
|
if (!avci->buffer_frame->buf[0])
|
|
return AVERROR(EAGAIN);
|
|
|
|
av_frame_move_ref(frame, avci->buffer_frame);
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int encode_simple_internal(AVCodecContext *avctx, AVPacket *avpkt)
|
|
{
|
|
AVCodecInternal *avci = avctx->internal;
|
|
EncodeSimpleContext *es = &avci->es;
|
|
AVFrame *frame = es->in_frame;
|
|
int got_packet;
|
|
int ret;
|
|
|
|
if (avci->draining_done)
|
|
return AVERROR_EOF;
|
|
|
|
if (!frame->buf[0] && !avci->draining) {
|
|
av_frame_unref(frame);
|
|
ret = ff_encode_get_frame(avctx, frame);
|
|
if (ret < 0 && ret != AVERROR_EOF)
|
|
return ret;
|
|
}
|
|
|
|
if (!frame->buf[0]) {
|
|
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
|
|
(avci->frame_thread_encoder && avctx->active_thread_type & FF_THREAD_FRAME)))
|
|
return AVERROR_EOF;
|
|
|
|
// Flushing is signaled with a NULL frame
|
|
frame = NULL;
|
|
}
|
|
|
|
got_packet = 0;
|
|
|
|
av_assert0(avctx->codec->encode2);
|
|
|
|
if (CONFIG_FRAME_THREAD_ENCODER &&
|
|
avci->frame_thread_encoder && (avctx->active_thread_type & FF_THREAD_FRAME))
|
|
ret = ff_thread_video_encode_frame(avctx, avpkt, frame, &got_packet);
|
|
else {
|
|
ret = avctx->codec->encode2(avctx, avpkt, frame, &got_packet);
|
|
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO && !ret && got_packet &&
|
|
!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
|
|
avpkt->pts = avpkt->dts = frame->pts;
|
|
}
|
|
|
|
av_assert0(ret <= 0);
|
|
|
|
emms_c();
|
|
|
|
if (!ret && got_packet) {
|
|
if (avpkt->data) {
|
|
ret = av_packet_make_refcounted(avpkt);
|
|
if (ret < 0)
|
|
goto end;
|
|
}
|
|
|
|
if (frame && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
|
|
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
|
|
if (avpkt->pts == AV_NOPTS_VALUE)
|
|
avpkt->pts = frame->pts;
|
|
if (!avpkt->duration)
|
|
avpkt->duration = ff_samples_to_time_base(avctx,
|
|
frame->nb_samples);
|
|
}
|
|
}
|
|
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
|
|
/* NOTE: if we add any audio encoders which output non-keyframe packets,
|
|
* this needs to be moved to the encoders, but for now we can do it
|
|
* here to simplify things */
|
|
avpkt->flags |= AV_PKT_FLAG_KEY;
|
|
avpkt->dts = avpkt->pts;
|
|
}
|
|
}
|
|
|
|
if (avci->draining && !got_packet)
|
|
avci->draining_done = 1;
|
|
|
|
end:
|
|
if (ret < 0 || !got_packet)
|
|
av_packet_unref(avpkt);
|
|
|
|
if (frame) {
|
|
if (!ret)
|
|
avctx->frame_number++;
|
|
av_frame_unref(frame);
|
|
}
|
|
|
|
if (got_packet)
|
|
// Encoders must always return ref-counted buffers.
|
|
// Side-data only packets have no data and can be not ref-counted.
|
|
av_assert0(!avpkt->data || avpkt->buf);
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int encode_simple_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
|
|
{
|
|
int ret;
|
|
|
|
while (!avpkt->data && !avpkt->side_data) {
|
|
ret = encode_simple_internal(avctx, avpkt);
|
|
if (ret < 0)
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
static int encode_receive_packet_internal(AVCodecContext *avctx, AVPacket *avpkt)
|
|
{
|
|
AVCodecInternal *avci = avctx->internal;
|
|
int ret;
|
|
|
|
if (avci->draining_done)
|
|
return AVERROR_EOF;
|
|
|
|
av_assert0(!avpkt->data && !avpkt->side_data);
|
|
|
|
if (avctx->codec->type == AVMEDIA_TYPE_VIDEO) {
|
|
if ((avctx->flags & AV_CODEC_FLAG_PASS1) && avctx->stats_out)
|
|
avctx->stats_out[0] = '\0';
|
|
if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
|
|
return AVERROR(EINVAL);
|
|
}
|
|
|
|
if (avctx->codec->receive_packet) {
|
|
ret = avctx->codec->receive_packet(avctx, avpkt);
|
|
if (!ret)
|
|
// Encoders must always return ref-counted buffers.
|
|
// Side-data only packets have no data and can be not ref-counted.
|
|
av_assert0(!avpkt->data || avpkt->buf);
|
|
} else
|
|
ret = encode_simple_receive_packet(avctx, avpkt);
|
|
|
|
if (ret == AVERROR_EOF)
|
|
avci->draining_done = 1;
|
|
|
|
return ret;
|
|
}
|
|
|
|
static int encode_send_frame_internal(AVCodecContext *avctx, const AVFrame *src)
|
|
{
|
|
AVCodecInternal *avci = avctx->internal;
|
|
AVFrame *dst = avci->buffer_frame;
|
|
int ret;
|
|
|
|
if (avctx->codec->type == AVMEDIA_TYPE_AUDIO) {
|
|
/* extract audio service type metadata */
|
|
AVFrameSideData *sd = av_frame_get_side_data(src, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
|
|
if (sd && sd->size >= sizeof(enum AVAudioServiceType))
|
|
avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
|
|
|
|
/* check for valid frame size */
|
|
if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) {
|
|
if (src->nb_samples > avctx->frame_size) {
|
|
av_log(avctx, AV_LOG_ERROR, "more samples than frame size\n");
|
|
return AVERROR(EINVAL);
|
|
}
|
|
} else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
|
|
/* if we already got an undersized frame, that must have been the last */
|
|
if (avctx->internal->last_audio_frame) {
|
|
av_log(avctx, AV_LOG_ERROR, "frame_size (%d) was not respected for a non-last frame\n", avctx->frame_size);
|
|
return AVERROR(EINVAL);
|
|
}
|
|
|
|
if (src->nb_samples < avctx->frame_size) {
|
|
ret = pad_last_frame(avctx, dst, src);
|
|
if (ret < 0)
|
|
return ret;
|
|
|
|
avctx->internal->last_audio_frame = 1;
|
|
} else if (src->nb_samples > avctx->frame_size) {
|
|
av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d)\n", src->nb_samples, avctx->frame_size);
|
|
return AVERROR(EINVAL);
|
|
}
|
|
}
|
|
}
|
|
|
|
if (!dst->data[0]) {
|
|
ret = av_frame_ref(dst, src);
|
|
if (ret < 0)
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
|
|
{
|
|
AVCodecInternal *avci = avctx->internal;
|
|
int ret;
|
|
|
|
if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
|
|
return AVERROR(EINVAL);
|
|
|
|
if (avci->draining)
|
|
return AVERROR_EOF;
|
|
|
|
if (avci->buffer_frame->data[0])
|
|
return AVERROR(EAGAIN);
|
|
|
|
if (!frame) {
|
|
avci->draining = 1;
|
|
} else {
|
|
ret = encode_send_frame_internal(avctx, frame);
|
|
if (ret < 0)
|
|
return ret;
|
|
}
|
|
|
|
if (!avci->buffer_pkt->data && !avci->buffer_pkt->side_data) {
|
|
ret = encode_receive_packet_internal(avctx, avci->buffer_pkt);
|
|
if (ret < 0 && ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|
|
|
|
int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
|
|
{
|
|
AVCodecInternal *avci = avctx->internal;
|
|
int ret;
|
|
|
|
av_packet_unref(avpkt);
|
|
|
|
if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
|
|
return AVERROR(EINVAL);
|
|
|
|
if (avci->buffer_pkt->data || avci->buffer_pkt->side_data) {
|
|
av_packet_move_ref(avpkt, avci->buffer_pkt);
|
|
} else {
|
|
ret = encode_receive_packet_internal(avctx, avpkt);
|
|
if (ret < 0)
|
|
return ret;
|
|
}
|
|
|
|
return 0;
|
|
}
|