pineapple-src/externals/ffmpeg/libavcodec/encode.c

459 lines
15 KiB
C
Raw Normal View History

2021-02-09 04:25:58 +01:00
/*
* generic encoding-related code
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/attributes.h"
#include "libavutil/avassert.h"
#include "libavutil/frame.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/samplefmt.h"
#include "avcodec.h"
#include "frame_thread_encoder.h"
#include "internal.h"
int ff_alloc_packet2(AVCodecContext *avctx, AVPacket *avpkt, int64_t size, int64_t min_size)
{
if (avpkt->size < 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid negative user packet size %d\n", avpkt->size);
return AVERROR(EINVAL);
}
if (size < 0 || size > INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE) {
av_log(avctx, AV_LOG_ERROR, "Invalid minimum required packet size %"PRId64" (max allowed is %d)\n",
size, INT_MAX - AV_INPUT_BUFFER_PADDING_SIZE);
return AVERROR(EINVAL);
}
if (avctx && 2*min_size < size) { // FIXME The factor needs to be finetuned
av_assert0(!avpkt->data || avpkt->data != avctx->internal->byte_buffer);
if (!avpkt->data || avpkt->size < size) {
av_fast_padded_malloc(&avctx->internal->byte_buffer, &avctx->internal->byte_buffer_size, size);
avpkt->data = avctx->internal->byte_buffer;
avpkt->size = avctx->internal->byte_buffer_size;
}
}
if (avpkt->data) {
AVBufferRef *buf = avpkt->buf;
if (avpkt->size < size) {
av_log(avctx, AV_LOG_ERROR, "User packet is too small (%d < %"PRId64")\n", avpkt->size, size);
return AVERROR(EINVAL);
}
av_init_packet(avpkt);
avpkt->buf = buf;
avpkt->size = size;
return 0;
} else {
int ret = av_new_packet(avpkt, size);
if (ret < 0)
av_log(avctx, AV_LOG_ERROR, "Failed to allocate packet of size %"PRId64"\n", size);
return ret;
}
}
/**
* Pad last frame with silence.
*/
static int pad_last_frame(AVCodecContext *s, AVFrame **dst, const AVFrame *src)
{
AVFrame *frame = NULL;
int ret;
if (!(frame = av_frame_alloc()))
return AVERROR(ENOMEM);
frame->format = src->format;
frame->channel_layout = src->channel_layout;
frame->channels = src->channels;
frame->nb_samples = s->frame_size;
ret = av_frame_get_buffer(frame, 0);
if (ret < 0)
goto fail;
ret = av_frame_copy_props(frame, src);
if (ret < 0)
goto fail;
if ((ret = av_samples_copy(frame->extended_data, src->extended_data, 0, 0,
src->nb_samples, s->channels, s->sample_fmt)) < 0)
goto fail;
if ((ret = av_samples_set_silence(frame->extended_data, src->nb_samples,
frame->nb_samples - src->nb_samples,
s->channels, s->sample_fmt)) < 0)
goto fail;
*dst = frame;
return 0;
fail:
av_frame_free(&frame);
return ret;
}
int attribute_align_arg avcodec_encode_audio2(AVCodecContext *avctx,
AVPacket *avpkt,
const AVFrame *frame,
int *got_packet_ptr)
{
AVFrame *extended_frame = NULL;
AVFrame *padded_frame = NULL;
int ret;
AVPacket user_pkt = *avpkt;
int needs_realloc = !user_pkt.data;
*got_packet_ptr = 0;
if (!avctx->codec->encode2) {
av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
return AVERROR(ENOSYS);
}
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY) && !frame) {
av_packet_unref(avpkt);
return 0;
}
/* ensure that extended_data is properly set */
if (frame && !frame->extended_data) {
if (av_sample_fmt_is_planar(avctx->sample_fmt) &&
avctx->channels > AV_NUM_DATA_POINTERS) {
av_log(avctx, AV_LOG_ERROR, "Encoding to a planar sample format, "
"with more than %d channels, but extended_data is not set.\n",
AV_NUM_DATA_POINTERS);
return AVERROR(EINVAL);
}
av_log(avctx, AV_LOG_WARNING, "extended_data is not set.\n");
extended_frame = av_frame_alloc();
if (!extended_frame)
return AVERROR(ENOMEM);
memcpy(extended_frame, frame, sizeof(AVFrame));
extended_frame->extended_data = extended_frame->data;
frame = extended_frame;
}
/* extract audio service type metadata */
if (frame) {
AVFrameSideData *sd = av_frame_get_side_data(frame, AV_FRAME_DATA_AUDIO_SERVICE_TYPE);
if (sd && sd->size >= sizeof(enum AVAudioServiceType))
avctx->audio_service_type = *(enum AVAudioServiceType*)sd->data;
}
/* check for valid frame size */
if (frame) {
if (avctx->codec->capabilities & AV_CODEC_CAP_SMALL_LAST_FRAME) {
if (frame->nb_samples > avctx->frame_size) {
av_log(avctx, AV_LOG_ERROR, "more samples than frame size (avcodec_encode_audio2)\n");
ret = AVERROR(EINVAL);
goto end;
}
} else if (!(avctx->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE)) {
/* if we already got an undersized frame, that must have been the last */
if (avctx->internal->last_audio_frame) {
av_log(avctx, AV_LOG_ERROR, "frame_size (%d) was not respected for a non-last frame (avcodec_encode_audio2)\n", avctx->frame_size);
ret = AVERROR(EINVAL);
goto end;
}
if (frame->nb_samples < avctx->frame_size) {
ret = pad_last_frame(avctx, &padded_frame, frame);
if (ret < 0)
goto end;
frame = padded_frame;
avctx->internal->last_audio_frame = 1;
}
if (frame->nb_samples != avctx->frame_size) {
av_log(avctx, AV_LOG_ERROR, "nb_samples (%d) != frame_size (%d) (avcodec_encode_audio2)\n", frame->nb_samples, avctx->frame_size);
ret = AVERROR(EINVAL);
goto end;
}
}
}
av_assert0(avctx->codec->encode2);
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
if (!ret) {
if (*got_packet_ptr) {
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY)) {
if (avpkt->pts == AV_NOPTS_VALUE)
avpkt->pts = frame->pts;
if (!avpkt->duration)
avpkt->duration = ff_samples_to_time_base(avctx,
frame->nb_samples);
}
avpkt->dts = avpkt->pts;
} else {
avpkt->size = 0;
}
}
if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
needs_realloc = 0;
if (user_pkt.data) {
if (user_pkt.size >= avpkt->size) {
memcpy(user_pkt.data, avpkt->data, avpkt->size);
} else {
av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
avpkt->size = user_pkt.size;
ret = -1;
}
avpkt->buf = user_pkt.buf;
avpkt->data = user_pkt.data;
} else if (!avpkt->buf) {
ret = av_packet_make_refcounted(avpkt);
if (ret < 0)
goto end;
}
}
if (!ret) {
if (needs_realloc && avpkt->data) {
ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
if (ret >= 0)
avpkt->data = avpkt->buf->data;
}
if (frame)
avctx->frame_number++;
}
if (ret < 0 || !*got_packet_ptr) {
av_packet_unref(avpkt);
goto end;
}
/* NOTE: if we add any audio encoders which output non-keyframe packets,
* this needs to be moved to the encoders, but for now we can do it
* here to simplify things */
avpkt->flags |= AV_PKT_FLAG_KEY;
end:
av_frame_free(&padded_frame);
av_free(extended_frame);
return ret;
}
int attribute_align_arg avcodec_encode_video2(AVCodecContext *avctx,
AVPacket *avpkt,
const AVFrame *frame,
int *got_packet_ptr)
{
int ret;
AVPacket user_pkt = *avpkt;
int needs_realloc = !user_pkt.data;
*got_packet_ptr = 0;
if (!avctx->codec->encode2) {
av_log(avctx, AV_LOG_ERROR, "This encoder requires using the avcodec_send_frame() API.\n");
return AVERROR(ENOSYS);
}
if ((avctx->flags&AV_CODEC_FLAG_PASS1) && avctx->stats_out)
avctx->stats_out[0] = '\0';
if (!frame &&
!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY ||
(avctx->internal->frame_thread_encoder && avctx->active_thread_type & FF_THREAD_FRAME))) {
av_packet_unref(avpkt);
return 0;
}
if (av_image_check_size2(avctx->width, avctx->height, avctx->max_pixels, AV_PIX_FMT_NONE, 0, avctx))
return AVERROR(EINVAL);
if (frame && frame->format == AV_PIX_FMT_NONE)
av_log(avctx, AV_LOG_WARNING, "AVFrame.format is not set\n");
if (frame && (frame->width == 0 || frame->height == 0))
av_log(avctx, AV_LOG_WARNING, "AVFrame.width or height is not set\n");
av_assert0(avctx->codec->encode2);
if (CONFIG_FRAME_THREAD_ENCODER &&
avctx->internal->frame_thread_encoder && (avctx->active_thread_type & FF_THREAD_FRAME))
ret = ff_thread_video_encode_frame(avctx, avpkt, frame, got_packet_ptr);
else {
ret = avctx->codec->encode2(avctx, avpkt, frame, got_packet_ptr);
if (*got_packet_ptr && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
avpkt->pts = avpkt->dts = frame->pts;
}
av_assert0(ret <= 0);
emms_c();
if (avpkt->data && avpkt->data == avctx->internal->byte_buffer) {
needs_realloc = 0;
if (user_pkt.data) {
if (user_pkt.size >= avpkt->size) {
memcpy(user_pkt.data, avpkt->data, avpkt->size);
} else {
av_log(avctx, AV_LOG_ERROR, "Provided packet is too small, needs to be %d\n", avpkt->size);
avpkt->size = user_pkt.size;
ret = -1;
}
avpkt->buf = user_pkt.buf;
avpkt->data = user_pkt.data;
} else if (!avpkt->buf) {
ret = av_packet_make_refcounted(avpkt);
if (ret < 0)
return ret;
}
}
if (!ret) {
if (!*got_packet_ptr)
avpkt->size = 0;
if (needs_realloc && avpkt->data) {
ret = av_buffer_realloc(&avpkt->buf, avpkt->size + AV_INPUT_BUFFER_PADDING_SIZE);
if (ret >= 0)
avpkt->data = avpkt->buf->data;
}
if (frame)
avctx->frame_number++;
}
if (ret < 0 || !*got_packet_ptr)
av_packet_unref(avpkt);
return ret;
}
int avcodec_encode_subtitle(AVCodecContext *avctx, uint8_t *buf, int buf_size,
const AVSubtitle *sub)
{
int ret;
if (sub->start_display_time) {
av_log(avctx, AV_LOG_ERROR, "start_display_time must be 0.\n");
return -1;
}
ret = avctx->codec->encode_sub(avctx, buf, buf_size, sub);
avctx->frame_number++;
return ret;
}
static int do_encode(AVCodecContext *avctx, const AVFrame *frame, int *got_packet)
{
int ret;
*got_packet = 0;
av_packet_unref(avctx->internal->buffer_pkt);
avctx->internal->buffer_pkt_valid = 0;
if (avctx->codec_type == AVMEDIA_TYPE_VIDEO) {
ret = avcodec_encode_video2(avctx, avctx->internal->buffer_pkt,
frame, got_packet);
} else if (avctx->codec_type == AVMEDIA_TYPE_AUDIO) {
ret = avcodec_encode_audio2(avctx, avctx->internal->buffer_pkt,
frame, got_packet);
} else {
ret = AVERROR(EINVAL);
}
if (ret >= 0 && *got_packet) {
// Encoders must always return ref-counted buffers.
// Side-data only packets have no data and can be not ref-counted.
av_assert0(!avctx->internal->buffer_pkt->data || avctx->internal->buffer_pkt->buf);
avctx->internal->buffer_pkt_valid = 1;
ret = 0;
} else {
av_packet_unref(avctx->internal->buffer_pkt);
}
return ret;
}
int attribute_align_arg avcodec_send_frame(AVCodecContext *avctx, const AVFrame *frame)
{
if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
return AVERROR(EINVAL);
if (avctx->internal->draining)
return AVERROR_EOF;
if (!frame) {
avctx->internal->draining = 1;
if (!(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
return 0;
}
if (avctx->codec->send_frame)
return avctx->codec->send_frame(avctx, frame);
// Emulation via old API. Do it here instead of avcodec_receive_packet, because:
// 1. if the AVFrame is not refcounted, the copying will be much more
// expensive than copying the packet data
// 2. assume few users use non-refcounted AVPackets, so usually no copy is
// needed
if (avctx->internal->buffer_pkt_valid)
return AVERROR(EAGAIN);
return do_encode(avctx, frame, &(int){0});
}
int attribute_align_arg avcodec_receive_packet(AVCodecContext *avctx, AVPacket *avpkt)
{
av_packet_unref(avpkt);
if (!avcodec_is_open(avctx) || !av_codec_is_encoder(avctx->codec))
return AVERROR(EINVAL);
if (avctx->codec->receive_packet) {
int ret;
if (avctx->internal->draining && !(avctx->codec->capabilities & AV_CODEC_CAP_DELAY))
return AVERROR_EOF;
ret = avctx->codec->receive_packet(avctx, avpkt);
if (!ret)
// Encoders must always return ref-counted buffers.
// Side-data only packets have no data and can be not ref-counted.
av_assert0(!avpkt->data || avpkt->buf);
return ret;
}
// Emulation via old API.
if (!avctx->internal->buffer_pkt_valid) {
int got_packet;
int ret;
if (!avctx->internal->draining)
return AVERROR(EAGAIN);
ret = do_encode(avctx, NULL, &got_packet);
if (ret < 0)
return ret;
if (ret >= 0 && !got_packet)
return AVERROR_EOF;
}
av_packet_move_ref(avpkt, avctx->internal->buffer_pkt);
avctx->internal->buffer_pkt_valid = 0;
return 0;
}