early-access version 1680

This commit is contained in:
pineappleEA
2021-05-13 11:45:27 +02:00
parent 1434d96e7d
commit 66ed389c6f
311 changed files with 6452 additions and 2597 deletions

View File

@@ -182,6 +182,7 @@ OBJS-$(CONFIG_CAS_FILTER) += vf_cas.o
OBJS-$(CONFIG_CHROMABER_VULKAN_FILTER) += vf_chromaber_vulkan.o vulkan.o
OBJS-$(CONFIG_CHROMAHOLD_FILTER) += vf_chromakey.o
OBJS-$(CONFIG_CHROMAKEY_FILTER) += vf_chromakey.o
OBJS-$(CONFIG_CHROMANR_FILTER) += vf_chromanr.o
OBJS-$(CONFIG_CHROMASHIFT_FILTER) += vf_chromashift.o
OBJS-$(CONFIG_CIESCOPE_FILTER) += vf_ciescope.o
OBJS-$(CONFIG_CODECVIEW_FILTER) += vf_codecview.o

View File

@@ -416,8 +416,6 @@ static int aeval_config_output(AVFilterLink *outlink)
return 0;
}
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
EvalContext *eval = inlink->dst->priv;

View File

@@ -293,10 +293,19 @@ static int request_frame(AVFilterLink *outlink)
return ret;
}
#if FF_API_CHILD_CLASS_NEXT
static const AVClass *resample_child_class_next(const AVClass *prev)
{
return prev ? NULL : swr_get_class();
}
#endif
static const AVClass *resample_child_class_iterate(void **iter)
{
const AVClass *c = *iter ? NULL : swr_get_class();
*iter = (void*)(uintptr_t)c;
return c;
}
static void *resample_child_next(void *obj, void *prev)
{
@@ -317,7 +326,10 @@ static const AVClass aresample_class = {
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
#if FF_API_CHILD_CLASS_NEXT
.child_class_next = resample_child_class_next,
#endif
.child_class_iterate = resample_child_class_iterate,
.child_next = resample_child_next,
};

View File

@@ -64,6 +64,9 @@ typedef struct LADSPAContext {
int nb_samples;
int64_t pts;
int64_t duration;
int in_trim;
int out_pad;
int latency;
} LADSPAContext;
#define OFFSET(x) offsetof(LADSPAContext, x)
@@ -81,11 +84,28 @@ static const AVOption ladspa_options[] = {
{ "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
{ "duration", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=-1}, -1, INT64_MAX, FLAGS },
{ "d", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=-1}, -1, INT64_MAX, FLAGS },
{ "latency", "enable latency compensation", OFFSET(latency), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
{ "l", "enable latency compensation", OFFSET(latency), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(ladspa);
static int find_latency(AVFilterContext *ctx, LADSPAContext *s)
{
int latency = 0;
for (int ctl = 0; ctl < s->nb_outputcontrols; ctl++) {
if (av_strcasecmp("latency", s->desc->PortNames[s->ocmap[ctl]]))
continue;
latency = lrintf(s->octlv[ctl]);
break;
}
return latency;
}
static void print_ctl_info(AVFilterContext *ctx, int level,
LADSPAContext *s, int ctl, unsigned long *map,
LADSPA_Data *values, int print)
@@ -143,12 +163,13 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
AVFilterContext *ctx = inlink->dst;
LADSPAContext *s = ctx->priv;
AVFrame *out;
int i, h, p;
int i, h, p, new_out_samples;
av_assert0(in->channels == (s->nb_inputs * s->nb_handles));
if (!s->nb_outputs ||
(av_frame_is_writable(in) && s->nb_inputs == s->nb_outputs &&
s->in_trim == 0 && s->out_pad == 0 &&
!(s->desc->Properties & LADSPA_PROPERTY_INPLACE_BROKEN))) {
out = in;
} else {
@@ -176,6 +197,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
}
s->desc->run(s->handles[h], in->nb_samples);
if (s->latency)
s->in_trim = s->out_pad = find_latency(ctx, s);
s->latency = 0;
}
for (i = 0; i < s->nb_outputcontrols; i++)
@@ -184,6 +208,25 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
if (out != in)
av_frame_free(&in);
new_out_samples = out->nb_samples;
if (s->in_trim > 0) {
int trim = FFMIN(new_out_samples, s->in_trim);
new_out_samples -= trim;
s->in_trim -= trim;
}
if (new_out_samples <= 0) {
av_frame_free(&out);
return 0;
} else if (new_out_samples < out->nb_samples) {
int offset = out->nb_samples - new_out_samples;
for (int ch = 0; ch < out->channels; ch++)
memmove(out->extended_data[ch], out->extended_data[ch] + sizeof(float) * offset,
sizeof(float) * new_out_samples);
out->nb_samples = new_out_samples;
}
return ff_filter_frame(ctx->outputs[0], out);
}
@@ -195,8 +238,19 @@ static int request_frame(AVFilterLink *outlink)
int64_t t;
int i;
if (ctx->nb_inputs)
return ff_request_frame(ctx->inputs[0]);
if (ctx->nb_inputs) {
int ret = ff_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF && s->out_pad > 0) {
AVFrame *frame = ff_get_audio_buffer(outlink, FFMIN(2048, s->out_pad));
if (!frame)
return AVERROR(ENOMEM);
s->out_pad -= frame->nb_samples;
return filter_frame(ctx->inputs[0], frame);
}
return ret;
}
t = av_rescale(s->pts, AV_TIME_BASE, s->sample_rate);
if (s->duration >= 0 && t >= s->duration)
@@ -415,6 +469,7 @@ static av_cold int init(AVFilterContext *ctx)
} else {
// argument is a shared object name
char *paths = av_strdup(getenv("LADSPA_PATH"));
const char *home_path = getenv("HOME");
const char *separator = ":";
if (paths) {
@@ -426,7 +481,12 @@ static av_cold int init(AVFilterContext *ctx)
}
av_free(paths);
if (!s->dl_handle && (paths = av_asprintf("%s/.ladspa/lib", getenv("HOME")))) {
if (!s->dl_handle && home_path && (paths = av_asprintf("%s/.ladspa", home_path))) {
s->dl_handle = try_load(paths, s->dl_name);
av_free(paths);
}
if (!s->dl_handle && home_path && (paths = av_asprintf("%s/.ladspa/lib", home_path))) {
s->dl_handle = try_load(paths, s->dl_name);
av_free(paths);
}

View File

@@ -306,10 +306,19 @@ fail:
return ret;
}
#if FF_API_CHILD_CLASS_NEXT
static const AVClass *resample_child_class_next(const AVClass *prev)
{
return prev ? NULL : avresample_get_class();
}
#endif
static const AVClass *resample_child_class_iterate(void **iter)
{
const AVClass *c = *iter ? NULL : avresample_get_class();
*iter = (void*)(uintptr_t)c;
return c;
}
static void *resample_child_next(void *obj, void *prev)
{
@@ -321,7 +330,10 @@ static const AVClass resample_class = {
.class_name = "resample",
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
#if FF_API_CHILD_CLASS_NEXT
.child_class_next = resample_child_class_next,
#endif
.child_class_iterate = resample_child_class_iterate,
.child_next = resample_child_next,
};

View File

@@ -212,6 +212,7 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar
rubberband_set_time_ratio(s->rbs, 1. / s->tempo);
rubberband_set_pitch_scale(s->rbs, s->pitch);
s->nb_samples = rubberband_get_samples_required(s->rbs);
return 0;
}

View File

@@ -335,10 +335,6 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar
return ret;
}
#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;

View File

@@ -172,6 +172,7 @@ extern AVFilter ff_vf_bwdif;
extern AVFilter ff_vf_cas;
extern AVFilter ff_vf_chromahold;
extern AVFilter ff_vf_chromakey;
extern AVFilter ff_vf_chromanr;
extern AVFilter ff_vf_chromashift;
extern AVFilter ff_vf_ciescope;
extern AVFilter ff_vf_codecview;

View File

@@ -583,6 +583,7 @@ static void *filter_child_next(void *obj, void *prev)
return NULL;
}
#if FF_API_CHILD_CLASS_NEXT
static const AVClass *filter_child_class_next(const AVClass *prev)
{
void *opaque = NULL;
@@ -604,6 +605,18 @@ static const AVClass *filter_child_class_next(const AVClass *prev)
return NULL;
}
#endif
static const AVClass *filter_child_class_iterate(void **iter)
{
const AVFilter *f;
while ((f = av_filter_iterate(iter)))
if (f->priv_class)
return f->priv_class;
return NULL;
}
#define OFFSET(x) offsetof(AVFilterContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM
@@ -625,7 +638,10 @@ static const AVClass avfilter_class = {
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_FILTER,
.child_next = filter_child_next,
#if FF_API_CHILD_CLASS_NEXT
.child_class_next = filter_child_class_next,
#endif
.child_class_iterate = filter_child_class_iterate,
.option = avfilter_options,
};

View File

@@ -9,5 +9,6 @@ OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_mat
OBJS-$(CONFIG_DNN) += dnn/dnn_backend_native_layer_mathunary.o
DNN-OBJS-$(CONFIG_LIBTENSORFLOW) += dnn/dnn_backend_tf.o
DNN-OBJS-$(CONFIG_LIBOPENVINO) += dnn/dnn_backend_openvino.o
OBJS-$(CONFIG_DNN) += $(DNN-OBJS-yes)

View File

@@ -23,6 +23,8 @@
* DNN native backend implementation.
*/
#include <math.h>
#include "dnn_backend_native.h"
#include "libavutil/avassert.h"
#include "dnn_backend_native_layer_mathunary.h"
@@ -80,6 +82,54 @@ int dnn_execute_layer_math_unary(DnnOperand *operands, const int32_t *input_oper
for (int i = 0; i < dims_count; ++i)
dst[i] = FFABS(src[i]);
return 0;
case DMUO_SIN:
for (int i = 0; i < dims_count; ++i)
dst[i] = sin(src[i]);
return 0;
case DMUO_COS:
for (int i = 0; i < dims_count; ++i)
dst[i] = cos(src[i]);
return 0;
case DMUO_TAN:
for (int i = 0; i < dims_count; ++i)
dst[i] = tan(src[i]);
return 0;
case DMUO_ASIN:
for (int i = 0; i < dims_count; ++i)
dst[i] = asin(src[i]);
return 0;
case DMUO_ACOS:
for (int i = 0; i < dims_count; ++i)
dst[i] = acos(src[i]);
return 0;
case DMUO_ATAN:
for (int i = 0; i < dims_count; ++i)
dst[i] = atan(src[i]);
return 0;
case DMUO_SINH:
for (int i = 0; i < dims_count; ++i)
dst[i] = sinh(src[i]);
return 0;
case DMUO_COSH:
for (int i = 0; i < dims_count; ++i)
dst[i] = cosh(src[i]);
return 0;
case DMUO_TANH:
for (int i = 0; i < dims_count; ++i)
dst[i] = tanh(src[i]);
return 0;
case DMUO_ASINH:
for (int i = 0; i < dims_count; ++i)
dst[i] = asinh(src[i]);
return 0;
case DMUO_ACOSH:
for (int i = 0; i < dims_count; ++i)
dst[i] = acosh(src[i]);
return 0;
case DMUO_ATANH:
for (int i = 0; i < dims_count; ++i)
dst[i] = atanh(src[i]);
return 0;
default:
return -1;
}

View File

@@ -31,6 +31,18 @@
typedef enum {
DMUO_ABS = 0,
DMUO_SIN = 1,
DMUO_COS = 2,
DMUO_TAN = 3,
DMUO_ASIN = 4,
DMUO_ACOS = 5,
DMUO_ATAN = 6,
DMUO_SINH = 7,
DMUO_COSH = 8,
DMUO_TANH = 9,
DMUO_ASINH = 10,
DMUO_ACOSH = 11,
DMUO_ATANH = 12,
DMUO_COUNT
} DNNMathUnaryOperation;

View File

@@ -0,0 +1,261 @@
/*
* Copyright (c) 2020
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* DNN OpenVINO backend implementation.
*/
#include "dnn_backend_openvino.h"
#include "libavformat/avio.h"
#include "libavutil/avassert.h"
#include <c_api/ie_c_api.h>
typedef struct OVModel{
ie_core_t *core;
ie_network_t *network;
ie_executable_network_t *exe_network;
ie_infer_request_t *infer_request;
ie_blob_t *input_blob;
ie_blob_t **output_blobs;
uint32_t nb_output;
} OVModel;
static DNNDataType precision_to_datatype(precision_e precision)
{
switch (precision)
{
case FP32:
return DNN_FLOAT;
default:
av_assert0(!"not supported yet.");
return DNN_FLOAT;
}
}
static DNNReturnType get_input_ov(void *model, DNNData *input, const char *input_name)
{
OVModel *ov_model = (OVModel *)model;
char *model_input_name = NULL;
IEStatusCode status;
size_t model_input_count = 0;
dimensions_t dims;
precision_e precision;
status = ie_network_get_inputs_number(ov_model->network, &model_input_count);
if (status != OK)
return DNN_ERROR;
for (size_t i = 0; i < model_input_count; i++) {
status = ie_network_get_input_name(ov_model->network, i, &model_input_name);
if (status != OK)
return DNN_ERROR;
if (strcmp(model_input_name, input_name) == 0) {
ie_network_name_free(&model_input_name);
status |= ie_network_get_input_dims(ov_model->network, input_name, &dims);
status |= ie_network_get_input_precision(ov_model->network, input_name, &precision);
if (status != OK)
return DNN_ERROR;
// The order of dims in the openvino is fixed and it is always NCHW for 4-D data.
// while we pass NHWC data from FFmpeg to openvino
status = ie_network_set_input_layout(ov_model->network, input_name, NHWC);
if (status != OK)
return DNN_ERROR;
input->channels = dims.dims[1];
input->height = dims.dims[2];
input->width = dims.dims[3];
input->dt = precision_to_datatype(precision);
return DNN_SUCCESS;
}
ie_network_name_free(&model_input_name);
}
return DNN_ERROR;
}
static DNNReturnType set_input_output_ov(void *model, DNNData *input, const char *input_name, const char **output_names, uint32_t nb_output)
{
OVModel *ov_model = (OVModel *)model;
IEStatusCode status;
dimensions_t dims;
precision_e precision;
ie_blob_buffer_t blob_buffer;
status = ie_exec_network_create_infer_request(ov_model->exe_network, &ov_model->infer_request);
if (status != OK)
goto err;
status = ie_infer_request_get_blob(ov_model->infer_request, input_name, &ov_model->input_blob);
if (status != OK)
goto err;
status |= ie_blob_get_dims(ov_model->input_blob, &dims);
status |= ie_blob_get_precision(ov_model->input_blob, &precision);
if (status != OK)
goto err;
av_assert0(input->channels == dims.dims[1]);
av_assert0(input->height == dims.dims[2]);
av_assert0(input->width == dims.dims[3]);
av_assert0(input->dt == precision_to_datatype(precision));
status = ie_blob_get_buffer(ov_model->input_blob, &blob_buffer);
if (status != OK)
goto err;
input->data = blob_buffer.buffer;
// outputs
ov_model->nb_output = 0;
av_freep(&ov_model->output_blobs);
ov_model->output_blobs = av_mallocz_array(nb_output, sizeof(*ov_model->output_blobs));
if (!ov_model->output_blobs)
goto err;
for (int i = 0; i < nb_output; i++) {
const char *output_name = output_names[i];
status = ie_infer_request_get_blob(ov_model->infer_request, output_name, &(ov_model->output_blobs[i]));
if (status != OK)
goto err;
ov_model->nb_output++;
}
return DNN_SUCCESS;
err:
if (ov_model->output_blobs) {
for (uint32_t i = 0; i < ov_model->nb_output; i++) {
ie_blob_free(&(ov_model->output_blobs[i]));
}
av_freep(&ov_model->output_blobs);
}
if (ov_model->input_blob)
ie_blob_free(&ov_model->input_blob);
if (ov_model->infer_request)
ie_infer_request_free(&ov_model->infer_request);
return DNN_ERROR;
}
DNNModel *ff_dnn_load_model_ov(const char *model_filename)
{
DNNModel *model = NULL;
OVModel *ov_model = NULL;
IEStatusCode status;
ie_config_t config = {NULL, NULL, NULL};
model = av_malloc(sizeof(DNNModel));
if (!model){
return NULL;
}
ov_model = av_mallocz(sizeof(OVModel));
if (!ov_model)
goto err;
status = ie_core_create("", &ov_model->core);
if (status != OK)
goto err;
status = ie_core_read_network(ov_model->core, model_filename, NULL, &ov_model->network);
if (status != OK)
goto err;
status = ie_core_load_network(ov_model->core, ov_model->network, "CPU", &config, &ov_model->exe_network);
if (status != OK)
goto err;
model->model = (void *)ov_model;
model->set_input_output = &set_input_output_ov;
model->get_input = &get_input_ov;
return model;
err:
if (model)
av_freep(&model);
if (ov_model) {
if (ov_model->exe_network)
ie_exec_network_free(&ov_model->exe_network);
if (ov_model->network)
ie_network_free(&ov_model->network);
if (ov_model->core)
ie_core_free(&ov_model->core);
av_freep(&ov_model);
}
return NULL;
}
DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNData *outputs, uint32_t nb_output)
{
dimensions_t dims;
precision_e precision;
ie_blob_buffer_t blob_buffer;
OVModel *ov_model = (OVModel *)model->model;
uint32_t nb = FFMIN(nb_output, ov_model->nb_output);
IEStatusCode status = ie_infer_request_infer(ov_model->infer_request);
if (status != OK)
return DNN_ERROR;
for (uint32_t i = 0; i < nb; ++i) {
status = ie_blob_get_buffer(ov_model->output_blobs[i], &blob_buffer);
if (status != OK)
return DNN_ERROR;
status |= ie_blob_get_dims(ov_model->output_blobs[i], &dims);
status |= ie_blob_get_precision(ov_model->output_blobs[i], &precision);
if (status != OK)
return DNN_ERROR;
outputs[i].channels = dims.dims[1];
outputs[i].height = dims.dims[2];
outputs[i].width = dims.dims[3];
outputs[i].dt = precision_to_datatype(precision);
outputs[i].data = blob_buffer.buffer;
}
return DNN_SUCCESS;
}
void ff_dnn_free_model_ov(DNNModel **model)
{
if (*model){
OVModel *ov_model = (OVModel *)(*model)->model;
if (ov_model->output_blobs) {
for (uint32_t i = 0; i < ov_model->nb_output; i++) {
ie_blob_free(&(ov_model->output_blobs[i]));
}
av_freep(&ov_model->output_blobs);
}
if (ov_model->input_blob)
ie_blob_free(&ov_model->input_blob);
if (ov_model->infer_request)
ie_infer_request_free(&ov_model->infer_request);
if (ov_model->exe_network)
ie_exec_network_free(&ov_model->exe_network);
if (ov_model->network)
ie_network_free(&ov_model->network);
if (ov_model->core)
ie_core_free(&ov_model->core);
av_freep(&ov_model);
av_freep(model);
}
}

View File

@@ -0,0 +1,38 @@
/*
* Copyright (c) 2020
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* DNN inference functions interface for OpenVINO backend.
*/
#ifndef AVFILTER_DNN_DNN_BACKEND_OPENVINO_H
#define AVFILTER_DNN_DNN_BACKEND_OPENVINO_H
#include "../dnn_interface.h"
DNNModel *ff_dnn_load_model_ov(const char *model_filename);
DNNReturnType ff_dnn_execute_model_ov(const DNNModel *model, DNNData *outputs, uint32_t nb_output);
void ff_dnn_free_model_ov(DNNModel **model);
#endif

View File

@@ -26,6 +26,7 @@
#include "../dnn_interface.h"
#include "dnn_backend_native.h"
#include "dnn_backend_tf.h"
#include "dnn_backend_openvino.h"
#include "libavutil/mem.h"
DNNModule *ff_get_dnn_module(DNNBackendType backend_type)
@@ -53,6 +54,16 @@ DNNModule *ff_get_dnn_module(DNNBackendType backend_type)
return NULL;
#endif
break;
case DNN_OV:
#if (CONFIG_LIBOPENVINO == 1)
dnn_module->load_model = &ff_dnn_load_model_ov;
dnn_module->execute_model = &ff_dnn_execute_model_ov;
dnn_module->free_model = &ff_dnn_free_model_ov;
#else
av_freep(&dnn_module);
return NULL;
#endif
break;
default:
av_log(NULL, AV_LOG_ERROR, "Module backend_type is not native or tensorflow\n");
av_freep(&dnn_module);

View File

@@ -30,7 +30,7 @@
typedef enum {DNN_SUCCESS, DNN_ERROR} DNNReturnType;
typedef enum {DNN_NATIVE, DNN_TF} DNNBackendType;
typedef enum {DNN_NATIVE, DNN_TF, DNN_OV} DNNBackendType;
typedef enum {DNN_FLOAT = 1, DNN_UINT8 = 4} DNNDataType;

View File

@@ -325,9 +325,6 @@ static double get_concatdec_select(AVFrame *frame, int64_t pts)
return NAN;
}
#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
static void select_frame(AVFilterContext *ctx, AVFrame *frame)
{
SelectContext *select = ctx->priv;

View File

@@ -475,9 +475,6 @@ static av_cold void uninit(AVFilterContext *ctx)
av_freep(&s->intervals);
}
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
static int filter_frame(AVFilterLink *inlink, AVFrame *ref)
{
AVFilterContext *ctx = inlink->dst;

View File

@@ -53,6 +53,13 @@ static const AVClass framesync_class = {
.parent_log_context_offset = OFFSET(parent),
};
const AVClass *ff_framesync_child_class_iterate(void **iter)
{
const AVClass *c = *iter ? NULL : &framesync_class;
*iter = (void *)(uintptr_t)c;
return c;
}
enum {
STATE_BOF,
STATE_RUN,

View File

@@ -297,6 +297,8 @@ int ff_framesync_dualinput_get(FFFrameSync *fs, AVFrame **f0, AVFrame **f1);
*/
int ff_framesync_dualinput_get_writable(FFFrameSync *fs, AVFrame **f0, AVFrame **f1);
const AVClass *ff_framesync_child_class_iterate(void **iter);
#define FRAMESYNC_DEFINE_CLASS(name, context, field) \
static int name##_framesync_preinit(AVFilterContext *ctx) { \
context *s = ctx->priv; \
@@ -318,6 +320,7 @@ static const AVClass name##_class = { \
.version = LIBAVUTIL_VERSION_INT, \
.category = AV_CLASS_CATEGORY_FILTER, \
.child_class_next = name##_child_class_next, \
.child_class_iterate = ff_framesync_child_class_iterate, \
.child_next = name##_child_next, \
}

View File

@@ -234,6 +234,10 @@ void ff_avfilter_link_set_out_status(AVFilterLink *link, int status, int64_t pts
void ff_command_queue_pop(AVFilterContext *filter);
#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
/* misc trace functions */
#define FF_TPRINTF_START(ctx, func) ff_tlog(NULL, "%-16s: ", #func)

View File

@@ -142,10 +142,6 @@ static int config_input(AVFilterLink *inlink)
return 0;
}
#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
#define BUF_SIZE 64
static inline char *double2int64str(char *buf, double v)

View File

@@ -51,6 +51,8 @@ enum Projections {
BARREL_SPLIT,
TSPYRAMID,
HEQUIRECTANGULAR,
EQUISOLID,
ORTHOGRAPHIC,
NB_PROJECTIONS,
};

View File

@@ -30,7 +30,7 @@
#include "libavutil/version.h"
#define LIBAVFILTER_VERSION_MAJOR 7
#define LIBAVFILTER_VERSION_MINOR 85
#define LIBAVFILTER_VERSION_MINOR 87
#define LIBAVFILTER_VERSION_MICRO 100

257
externals/ffmpeg/libavfilter/vf_chromanr.c vendored Executable file
View File

@@ -0,0 +1,257 @@
/*
* Copyright (c) 2020 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avstring.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "video.h"
typedef struct ChromaNRContext {
const AVClass *class;
float threshold;
int thres;
int sizew;
int sizeh;
int stepw;
int steph;
int depth;
int chroma_w;
int chroma_h;
int nb_planes;
int linesize[4];
int planeheight[4];
int planewidth[4];
AVFrame *out;
int (*filter_slice)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
} ChromaNRContext;
static int query_formats(AVFilterContext *ctx)
{
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVJ440P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVJ420P, AV_PIX_FMT_YUVJ411P,
AV_PIX_FMT_YUV420P9, AV_PIX_FMT_YUV422P9, AV_PIX_FMT_YUV444P9,
AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUV444P10,
AV_PIX_FMT_YUV444P12, AV_PIX_FMT_YUV422P12, AV_PIX_FMT_YUV440P12, AV_PIX_FMT_YUV420P12,
AV_PIX_FMT_YUV444P14, AV_PIX_FMT_YUV422P14, AV_PIX_FMT_YUV420P14,
AV_PIX_FMT_YUV420P16, AV_PIX_FMT_YUV422P16, AV_PIX_FMT_YUV444P16,
AV_PIX_FMT_YUVA420P9, AV_PIX_FMT_YUVA422P9, AV_PIX_FMT_YUVA444P9,
AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_YUVA444P10,
AV_PIX_FMT_YUVA422P12, AV_PIX_FMT_YUVA444P12,
AV_PIX_FMT_YUVA420P16, AV_PIX_FMT_YUVA422P16, AV_PIX_FMT_YUVA444P16,
AV_PIX_FMT_NONE
};
AVFilterFormats *fmts_list = ff_make_format_list(pix_fmts);
if (!fmts_list)
return AVERROR(ENOMEM);
return ff_set_common_formats(ctx, fmts_list);
}
#define FILTER_FUNC(name, type) \
static int filter_slice##name(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs) \
{ \
ChromaNRContext *s = ctx->priv; \
AVFrame *in = arg; \
AVFrame *out = s->out; \
const int in_ylinesize = in->linesize[0]; \
const int in_ulinesize = in->linesize[1]; \
const int in_vlinesize = in->linesize[2]; \
const int out_ulinesize = out->linesize[1]; \
const int out_vlinesize = out->linesize[2]; \
const int chroma_w = s->chroma_w; \
const int chroma_h = s->chroma_h; \
const int stepw = s->stepw; \
const int steph = s->steph; \
const int sizew = s->sizew; \
const int sizeh = s->sizeh; \
const int thres = s->thres; \
const int h = s->planeheight[1]; \
const int w = s->planewidth[1]; \
const int slice_start = (h * jobnr) / nb_jobs; \
const int slice_end = (h * (jobnr+1)) / nb_jobs; \
type *out_uptr = (type *)(out->data[1] + slice_start * out_ulinesize); \
type *out_vptr = (type *)(out->data[2] + slice_start * out_vlinesize); \
\
{ \
const int h = s->planeheight[0]; \
const int slice_start = (h * jobnr) / nb_jobs; \
const int slice_end = (h * (jobnr+1)) / nb_jobs; \
\
av_image_copy_plane(out->data[0] + slice_start * out->linesize[0], \
out->linesize[0], \
in->data[0] + slice_start * in->linesize[0], \
in->linesize[0], \
s->linesize[0], slice_end - slice_start); \
\
if (s->nb_planes == 4) { \
av_image_copy_plane(out->data[3] + slice_start * out->linesize[3], \
out->linesize[3], \
in->data[3] + slice_start * in->linesize[3], \
in->linesize[3], \
s->linesize[3], slice_end - slice_start); \
} \
} \
\
for (int y = slice_start; y < slice_end; y++) { \
const type *in_yptr = (const type *)(in->data[0] + y * chroma_h * in_ylinesize); \
const type *in_uptr = (const type *)(in->data[1] + y * in_ulinesize); \
const type *in_vptr = (const type *)(in->data[2] + y * in_vlinesize); \
\
for (int x = 0; x < w; x++) { \
const int cy = in_yptr[x * chroma_w]; \
const int cu = in_uptr[x]; \
const int cv = in_vptr[x]; \
int su = cu; \
int sv = cv; \
int cn = 1; \
\
for (int yy = FFMAX(0, y - sizeh); yy < FFMIN(y + sizeh, h); yy += steph) { \
const type *in_yptr = (const type *)(in->data[0] + yy * chroma_h * in_ylinesize); \
const type *in_uptr = (const type *)(in->data[1] + yy * in_ulinesize); \
const type *in_vptr = (const type *)(in->data[2] + yy * in_vlinesize); \
\
for (int xx = FFMAX(0, x - sizew); xx < FFMIN(x + sizew, w); xx += stepw) { \
const int Y = in_yptr[xx * chroma_w]; \
const int U = in_uptr[xx]; \
const int V = in_vptr[xx]; \
\
if (FFABS(cu - U) + FFABS(cv - V) + FFABS(cy - Y) < thres && \
xx != x && yy != y) { \
su += U; \
sv += V; \
cn++; \
} \
} \
} \
\
out_uptr[x] = su / cn; \
out_vptr[x] = sv / cn; \
} \
\
out_uptr += out_ulinesize / sizeof(type); \
out_vptr += out_vlinesize / sizeof(type); \
} \
\
return 0; \
}
FILTER_FUNC(8, uint8_t)
FILTER_FUNC(16, uint16_t)
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
ChromaNRContext *s = ctx->priv;
AVFrame *out;
out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
s->out = out;
ctx->internal->execute(ctx, s->filter_slice, in, NULL,
FFMIN3(s->planeheight[1],
s->planeheight[2],
ff_filter_get_nb_threads(ctx)));
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ChromaNRContext *s = ctx->priv;
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(inlink->format);
int ret;
s->nb_planes = desc->nb_components;
s->depth = desc->comp[0].depth;
s->thres = s->threshold * (1 << (s->depth - 8));
s->filter_slice = s->depth <= 8 ? filter_slice8 : filter_slice16;
s->chroma_w = 1 << desc->log2_chroma_w;
s->chroma_h = 1 << desc->log2_chroma_h;
s->planeheight[1] = s->planeheight[2] = AV_CEIL_RSHIFT(inlink->h, desc->log2_chroma_h);
s->planeheight[0] = s->planeheight[3] = inlink->h;
s->planewidth[1] = s->planewidth[2] = AV_CEIL_RSHIFT(inlink->w, desc->log2_chroma_w);
s->planewidth[0] = s->planewidth[3] = inlink->w;
if ((ret = av_image_fill_linesizes(s->linesize, inlink->format, inlink->w)) < 0)
return ret;
return 0;
}
#define OFFSET(x) offsetof(ChromaNRContext, x)
#define VF AV_OPT_FLAG_VIDEO_PARAM | AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption chromanr_options[] = {
{ "thres", "set u/v threshold", OFFSET(threshold), AV_OPT_TYPE_FLOAT, {.dbl=30}, 1, 200, VF },
{ "sizew", "set horizontal size", OFFSET(sizew), AV_OPT_TYPE_INT, {.i64=5}, 1, 100, VF },
{ "sizeh", "set vertical size", OFFSET(sizeh), AV_OPT_TYPE_INT, {.i64=5}, 1, 100, VF },
{ "stepw", "set horizontal step", OFFSET(stepw), AV_OPT_TYPE_INT, {.i64=1}, 1, 50, VF },
{ "steph", "set vertical step", OFFSET(steph), AV_OPT_TYPE_INT, {.i64=1}, 1, 50, VF },
{ NULL }
};
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_VIDEO,
},
{ NULL }
};
AVFILTER_DEFINE_CLASS(chromanr);
AVFilter ff_vf_chromanr = {
.name = "chromanr",
.description = NULL_IF_CONFIG_SMALL("Reduce chrominance noise."),
.priv_size = sizeof(ChromaNRContext),
.priv_class = &chromanr_class,
.query_formats = query_formats,
.outputs = outputs,
.inputs = inputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC | AVFILTER_FLAG_SLICE_THREADS,
.process_command = ff_filter_process_command,
};

View File

@@ -111,7 +111,7 @@ static float get_component(float v, float l,
v += m;
v += h;
return av_clipf(v + 0.5f, 0, 1);
return av_clipf(v, 0, 1);
}
static float hfun(float n, float h, float s, float l)
@@ -188,9 +188,9 @@ static int color_balance8_p(AVFilterContext *ctx, void *arg, int jobnr, int nb_j
if (s->preserve_lightness)
preservel(&r, &g, &b, l);
dstr[j] = av_clip_uint8(r * max);
dstg[j] = av_clip_uint8(g * max);
dstb[j] = av_clip_uint8(b * max);
dstr[j] = av_clip_uint8(lrintf(r * max));
dstg[j] = av_clip_uint8(lrintf(g * max));
dstb[j] = av_clip_uint8(lrintf(b * max));
if (in != out && out->linesize[3])
dsta[j] = srca[j];
}
@@ -242,9 +242,9 @@ static int color_balance16_p(AVFilterContext *ctx, void *arg, int jobnr, int nb_
if (s->preserve_lightness)
preservel(&r, &g, &b, l);
dstr[j] = av_clip_uintp2_c(r * max, depth);
dstg[j] = av_clip_uintp2_c(g * max, depth);
dstb[j] = av_clip_uintp2_c(b * max, depth);
dstr[j] = av_clip_uintp2_c(lrintf(r * max), depth);
dstg[j] = av_clip_uintp2_c(lrintf(g * max), depth);
dstb[j] = av_clip_uintp2_c(lrintf(b * max), depth);
if (in != out && out->linesize[3])
dsta[j] = srca[j];
}
@@ -299,9 +299,9 @@ static int color_balance8(AVFilterContext *ctx, void *arg, int jobnr, int nb_job
if (s->preserve_lightness)
preservel(&r, &g, &b, l);
dst[j + roffset] = av_clip_uint8(r * max);
dst[j + goffset] = av_clip_uint8(g * max);
dst[j + boffset] = av_clip_uint8(b * max);
dst[j + roffset] = av_clip_uint8(lrintf(r * max));
dst[j + goffset] = av_clip_uint8(lrintf(g * max));
dst[j + boffset] = av_clip_uint8(lrintf(b * max));
if (in != out && step == 4)
dst[j + aoffset] = src[j + aoffset];
}
@@ -351,9 +351,9 @@ static int color_balance16(AVFilterContext *ctx, void *arg, int jobnr, int nb_jo
if (s->preserve_lightness)
preservel(&r, &g, &b, l);
dst[j + roffset] = av_clip_uintp2_c(r * max, depth);
dst[j + goffset] = av_clip_uintp2_c(g * max, depth);
dst[j + boffset] = av_clip_uintp2_c(b * max, depth);
dst[j + roffset] = av_clip_uintp2_c(lrintf(r * max), depth);
dst[j + goffset] = av_clip_uintp2_c(lrintf(g * max), depth);
dst[j + boffset] = av_clip_uintp2_c(lrintf(b * max), depth);
if (in != out && step == 4)
dst[j + aoffset] = src[j + aoffset];
}

View File

@@ -55,7 +55,6 @@ enum var_name {
VAR_T,
VAR_VARS_NB
};
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
static int set_expr(AVExpr **pexpr, const char *expr, const char *option, void *log_ctx)
{

View File

@@ -58,10 +58,13 @@ typedef struct DnnProcessingContext {
#define OFFSET(x) offsetof(DnnProcessingContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM | AV_OPT_FLAG_VIDEO_PARAM
static const AVOption dnn_processing_options[] = {
{ "dnn_backend", "DNN backend", OFFSET(backend_type), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, FLAGS, "backend" },
{ "dnn_backend", "DNN backend", OFFSET(backend_type), AV_OPT_TYPE_INT, { .i64 = 0 }, INT_MIN, INT_MAX, FLAGS, "backend" },
{ "native", "native backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 0 }, 0, 0, FLAGS, "backend" },
#if (CONFIG_LIBTENSORFLOW == 1)
{ "tensorflow", "tensorflow backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 1 }, 0, 0, FLAGS, "backend" },
#endif
#if (CONFIG_LIBOPENVINO == 1)
{ "openvino", "openvino backend flag", 0, AV_OPT_TYPE_CONST, { .i64 = 2 }, 0, 0, FLAGS, "backend" },
#endif
{ "model", "path to model file", OFFSET(model_filename), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
{ "input", "input name of the model", OFFSET(model_inputname), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },

View File

@@ -294,7 +294,7 @@ static void double_threshold(int low, int high, int w, int h,
continue;
}
if ((!i || i == w - 1 || !j || j == h - 1) &&
if (!(!i || i == w - 1 || !j || j == h - 1) &&
src[i] > low &&
(src[-src_linesize + i-1] > high ||
src[-src_linesize + i ] > high ||

View File

@@ -249,8 +249,6 @@ static int query_formats(AVFilterContext *ctx)
return ff_set_common_formats(ctx, fmts_list);
}
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;

View File

@@ -363,9 +363,6 @@ static void apply_lut10(HueContext *s,
}
}
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
static int filter_frame(AVFilterLink *inlink, AVFrame *inpic)
{
HueContext *hue = inlink->dst->priv;

View File

@@ -154,6 +154,7 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar
static const enum AVPixelFormat alpha_pix_fmts[] = {
AV_PIX_FMT_YUVA420P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_YUVA444P,
AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_YUVA422P10,
AV_PIX_FMT_ARGB, AV_PIX_FMT_ABGR, AV_PIX_FMT_RGBA,
AV_PIX_FMT_BGRA, AV_PIX_FMT_GBRAP, AV_PIX_FMT_NONE
};
@@ -172,6 +173,14 @@ static int query_formats(AVFilterContext *ctx)
AV_PIX_FMT_YUVA420P, AV_PIX_FMT_NONE
};
static const enum AVPixelFormat main_pix_fmts_yuv420p10[] = {
AV_PIX_FMT_YUV420P10, AV_PIX_FMT_YUVA420P10,
AV_PIX_FMT_NONE
};
static const enum AVPixelFormat overlay_pix_fmts_yuv420p10[] = {
AV_PIX_FMT_YUVA420P10, AV_PIX_FMT_NONE
};
static const enum AVPixelFormat main_pix_fmts_yuv422[] = {
AV_PIX_FMT_YUV422P, AV_PIX_FMT_YUVJ422P, AV_PIX_FMT_YUVA422P, AV_PIX_FMT_NONE
};
@@ -179,6 +188,13 @@ static int query_formats(AVFilterContext *ctx)
AV_PIX_FMT_YUVA422P, AV_PIX_FMT_NONE
};
static const enum AVPixelFormat main_pix_fmts_yuv422p10[] = {
AV_PIX_FMT_YUV422P10, AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_NONE
};
static const enum AVPixelFormat overlay_pix_fmts_yuv422p10[] = {
AV_PIX_FMT_YUVA422P10, AV_PIX_FMT_NONE
};
static const enum AVPixelFormat main_pix_fmts_yuv444[] = {
AV_PIX_FMT_YUV444P, AV_PIX_FMT_YUVJ444P, AV_PIX_FMT_YUVA444P, AV_PIX_FMT_NONE
};
@@ -217,6 +233,13 @@ static int query_formats(AVFilterContext *ctx)
goto fail;
}
break;
case OVERLAY_FORMAT_YUV420P10:
if (!(main_formats = ff_make_format_list(main_pix_fmts_yuv420p10)) ||
!(overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv420p10))) {
ret = AVERROR(ENOMEM);
goto fail;
}
break;
case OVERLAY_FORMAT_YUV422:
if (!(main_formats = ff_make_format_list(main_pix_fmts_yuv422)) ||
!(overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv422))) {
@@ -224,6 +247,13 @@ static int query_formats(AVFilterContext *ctx)
goto fail;
}
break;
case OVERLAY_FORMAT_YUV422P10:
if (!(main_formats = ff_make_format_list(main_pix_fmts_yuv422p10)) ||
!(overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv422p10))) {
ret = AVERROR(ENOMEM);
goto fail;
}
break;
case OVERLAY_FORMAT_YUV444:
if (!(main_formats = ff_make_format_list(main_pix_fmts_yuv444)) ||
!(overlay_formats = ff_make_format_list(overlay_pix_fmts_yuv444))) {
@@ -441,190 +471,216 @@ static av_always_inline void blend_slice_packed_rgb(AVFilterContext *ctx,
}
}
static av_always_inline void blend_plane(AVFilterContext *ctx,
AVFrame *dst, const AVFrame *src,
int src_w, int src_h,
int dst_w, int dst_h,
int i, int hsub, int vsub,
int x, int y,
int main_has_alpha,
int dst_plane,
int dst_offset,
int dst_step,
int straight,
int yuv,
int jobnr,
int nb_jobs)
{
OverlayContext *octx = ctx->priv;
int src_wp = AV_CEIL_RSHIFT(src_w, hsub);
int src_hp = AV_CEIL_RSHIFT(src_h, vsub);
int dst_wp = AV_CEIL_RSHIFT(dst_w, hsub);
int dst_hp = AV_CEIL_RSHIFT(dst_h, vsub);
int yp = y>>vsub;
int xp = x>>hsub;
uint8_t *s, *sp, *d, *dp, *dap, *a, *da, *ap;
int jmax, j, k, kmax;
int slice_start, slice_end;
j = FFMAX(-yp, 0);
jmax = FFMIN3(-yp + dst_hp, FFMIN(src_hp, dst_hp), yp + src_hp);
slice_start = j + (jmax * jobnr) / nb_jobs;
slice_end = j + (jmax * (jobnr+1)) / nb_jobs;
sp = src->data[i] + (slice_start) * src->linesize[i];
dp = dst->data[dst_plane]
+ (yp + slice_start) * dst->linesize[dst_plane]
+ dst_offset;
ap = src->data[3] + (slice_start << vsub) * src->linesize[3];
dap = dst->data[3] + ((yp + slice_start) << vsub) * dst->linesize[3];
for (j = slice_start; j < slice_end; j++) {
k = FFMAX(-xp, 0);
d = dp + (xp+k) * dst_step;
s = sp + k;
a = ap + (k<<hsub);
da = dap + ((xp+k) << hsub);
kmax = FFMIN(-xp + dst_wp, src_wp);
if (((vsub && j+1 < src_hp) || !vsub) && octx->blend_row[i]) {
int c = octx->blend_row[i](d, da, s, a, kmax - k, src->linesize[3]);
s += c;
d += dst_step * c;
da += (1 << hsub) * c;
a += (1 << hsub) * c;
k += c;
}
for (; k < kmax; k++) {
int alpha_v, alpha_h, alpha;
// average alpha for color components, improve quality
if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) {
alpha = (a[0] + a[src->linesize[3]] +
a[1] + a[src->linesize[3]+1]) >> 2;
} else if (hsub || vsub) {
alpha_h = hsub && k+1 < src_wp ?
(a[0] + a[1]) >> 1 : a[0];
alpha_v = vsub && j+1 < src_hp ?
(a[0] + a[src->linesize[3]]) >> 1 : a[0];
alpha = (alpha_v + alpha_h) >> 1;
} else
alpha = a[0];
// if the main channel has an alpha channel, alpha has to be calculated
// to create an un-premultiplied (straight) alpha value
if (main_has_alpha && alpha != 0 && alpha != 255) {
// average alpha for color components, improve quality
uint8_t alpha_d;
if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) {
alpha_d = (da[0] + da[dst->linesize[3]] +
da[1] + da[dst->linesize[3]+1]) >> 2;
} else if (hsub || vsub) {
alpha_h = hsub && k+1 < src_wp ?
(da[0] + da[1]) >> 1 : da[0];
alpha_v = vsub && j+1 < src_hp ?
(da[0] + da[dst->linesize[3]]) >> 1 : da[0];
alpha_d = (alpha_v + alpha_h) >> 1;
} else
alpha_d = da[0];
alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d);
}
if (straight) {
*d = FAST_DIV255(*d * (255 - alpha) + *s * alpha);
} else {
if (i && yuv)
*d = av_clip(FAST_DIV255((*d - 128) * (255 - alpha)) + *s - 128, -128, 128) + 128;
else
*d = FFMIN(FAST_DIV255(*d * (255 - alpha)) + *s, 255);
}
s++;
d += dst_step;
da += 1 << hsub;
a += 1 << hsub;
}
dp += dst->linesize[dst_plane];
sp += src->linesize[i];
ap += (1 << vsub) * src->linesize[3];
dap += (1 << vsub) * dst->linesize[3];
}
#define DEFINE_BLEND_PLANE(depth, nbits) \
static av_always_inline void blend_plane_##depth##_##nbits##bits(AVFilterContext *ctx, \
AVFrame *dst, const AVFrame *src, \
int src_w, int src_h, \
int dst_w, int dst_h, \
int i, int hsub, int vsub, \
int x, int y, \
int main_has_alpha, \
int dst_plane, \
int dst_offset, \
int dst_step, \
int straight, \
int yuv, \
int jobnr, \
int nb_jobs) \
{ \
OverlayContext *octx = ctx->priv; \
int src_wp = AV_CEIL_RSHIFT(src_w, hsub); \
int src_hp = AV_CEIL_RSHIFT(src_h, vsub); \
int dst_wp = AV_CEIL_RSHIFT(dst_w, hsub); \
int dst_hp = AV_CEIL_RSHIFT(dst_h, vsub); \
int yp = y>>vsub; \
int xp = x>>hsub; \
uint##depth##_t *s, *sp, *d, *dp, *dap, *a, *da, *ap; \
int jmax, j, k, kmax; \
int slice_start, slice_end; \
const uint##depth##_t max = (1 << nbits) - 1; \
const uint##depth##_t mid = (1 << (nbits -1)) ; \
int bytes = depth / 8; \
\
dst_step /= bytes; \
j = FFMAX(-yp, 0); \
jmax = FFMIN3(-yp + dst_hp, FFMIN(src_hp, dst_hp), yp + src_hp); \
\
slice_start = j + (jmax * jobnr) / nb_jobs; \
slice_end = j + (jmax * (jobnr+1)) / nb_jobs; \
\
sp = (uint##depth##_t *)(src->data[i] + (slice_start) * src->linesize[i]); \
dp = (uint##depth##_t *)(dst->data[dst_plane] \
+ (yp + slice_start) * dst->linesize[dst_plane] \
+ dst_offset); \
ap = (uint##depth##_t *)(src->data[3] + (slice_start << vsub) * src->linesize[3]); \
dap = (uint##depth##_t *)(dst->data[3] + ((yp + slice_start) << vsub) * dst->linesize[3]); \
\
for (j = slice_start; j < slice_end; j++) { \
k = FFMAX(-xp, 0); \
d = dp + (xp+k) * dst_step; \
s = sp + k; \
a = ap + (k<<hsub); \
da = dap + ((xp+k) << hsub); \
kmax = FFMIN(-xp + dst_wp, src_wp); \
\
if (nbits == 8 && ((vsub && j+1 < src_hp) || !vsub) && octx->blend_row[i]) { \
int c = octx->blend_row[i]((uint8_t*)d, (uint8_t*)da, (uint8_t*)s, \
(uint8_t*)a, kmax - k, src->linesize[3]); \
\
s += c; \
d += dst_step * c; \
da += (1 << hsub) * c; \
a += (1 << hsub) * c; \
k += c; \
} \
for (; k < kmax; k++) { \
int alpha_v, alpha_h, alpha; \
\
/* average alpha for color components, improve quality */ \
if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) { \
alpha = (a[0] + a[src->linesize[3]] + \
a[1] + a[src->linesize[3]+1]) >> 2; \
} else if (hsub || vsub) { \
alpha_h = hsub && k+1 < src_wp ? \
(a[0] + a[1]) >> 1 : a[0]; \
alpha_v = vsub && j+1 < src_hp ? \
(a[0] + a[src->linesize[3]]) >> 1 : a[0]; \
alpha = (alpha_v + alpha_h) >> 1; \
} else \
alpha = a[0]; \
/* if the main channel has an alpha channel, alpha has to be calculated */ \
/* to create an un-premultiplied (straight) alpha value */ \
if (main_has_alpha && alpha != 0 && alpha != max) { \
/* average alpha for color components, improve quality */ \
uint8_t alpha_d; \
if (hsub && vsub && j+1 < src_hp && k+1 < src_wp) { \
alpha_d = (da[0] + da[dst->linesize[3]] + \
da[1] + da[dst->linesize[3]+1]) >> 2; \
} else if (hsub || vsub) { \
alpha_h = hsub && k+1 < src_wp ? \
(da[0] + da[1]) >> 1 : da[0]; \
alpha_v = vsub && j+1 < src_hp ? \
(da[0] + da[dst->linesize[3]]) >> 1 : da[0]; \
alpha_d = (alpha_v + alpha_h) >> 1; \
} else \
alpha_d = da[0]; \
alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d); \
} \
if (straight) { \
if (nbits > 8) \
*d = (*d * (max - alpha) + *s * alpha) / max; \
else \
*d = FAST_DIV255(*d * (255 - alpha) + *s * alpha); \
} else { \
if (nbits > 8) { \
if (i && yuv) \
*d = av_clip((*d * (max - alpha) + *s * alpha) / max + *s - mid, -mid, mid) + mid; \
else \
*d = FFMIN((*d * (max - alpha) + *s * alpha) / max + *s, max); \
} else { \
if (i && yuv) \
*d = av_clip(FAST_DIV255((*d - mid) * (max - alpha)) + *s - mid, -mid, mid) + mid; \
else \
*d = FFMIN(FAST_DIV255(*d * (max - alpha)) + *s, max); \
} \
} \
s++; \
d += dst_step; \
da += 1 << hsub; \
a += 1 << hsub; \
} \
dp += dst->linesize[dst_plane] / bytes; \
sp += src->linesize[i] / bytes; \
ap += (1 << vsub) * src->linesize[3] / bytes; \
dap += (1 << vsub) * dst->linesize[3] / bytes; \
} \
}
DEFINE_BLEND_PLANE(8, 8);
DEFINE_BLEND_PLANE(16, 10);
static inline void alpha_composite(const AVFrame *src, const AVFrame *dst,
int src_w, int src_h,
int dst_w, int dst_h,
int x, int y,
int jobnr, int nb_jobs)
{
uint8_t alpha; ///< the amount of overlay to blend on to main
uint8_t *s, *sa, *d, *da;
int i, imax, j, jmax;
int slice_start, slice_end;
imax = FFMIN(-y + dst_h, src_h);
slice_start = (imax * jobnr) / nb_jobs;
slice_end = ((imax * (jobnr+1)) / nb_jobs);
i = FFMAX(-y, 0);
sa = src->data[3] + (i + slice_start) * src->linesize[3];
da = dst->data[3] + (y + i + slice_start) * dst->linesize[3];
for (i = i + slice_start; i < slice_end; i++) {
j = FFMAX(-x, 0);
s = sa + j;
d = da + x+j;
for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) {
alpha = *s;
if (alpha != 0 && alpha != 255) {
uint8_t alpha_d = *d;
alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d);
}
switch (alpha) {
case 0:
break;
case 255:
*d = *s;
break;
default:
// apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha
*d += FAST_DIV255((255 - *d) * *s);
}
d += 1;
s += 1;
}
da += dst->linesize[3];
sa += src->linesize[3];
}
#define DEFINE_ALPHA_COMPOSITE(depth, nbits) \
static inline void alpha_composite_##depth##_##nbits##bits(const AVFrame *src, const AVFrame *dst, \
int src_w, int src_h, \
int dst_w, int dst_h, \
int x, int y, \
int jobnr, int nb_jobs) \
{ \
uint##depth##_t alpha; /* the amount of overlay to blend on to main */ \
uint##depth##_t *s, *sa, *d, *da; \
int i, imax, j, jmax; \
int slice_start, slice_end; \
const uint##depth##_t max = (1 << nbits) - 1; \
int bytes = depth / 8; \
\
imax = FFMIN(-y + dst_h, src_h); \
slice_start = (imax * jobnr) / nb_jobs; \
slice_end = ((imax * (jobnr+1)) / nb_jobs); \
\
i = FFMAX(-y, 0); \
sa = (uint##depth##_t *)(src->data[3] + (i + slice_start) * src->linesize[3]); \
da = (uint##depth##_t *)(dst->data[3] + (y + i + slice_start) * dst->linesize[3]); \
\
for (i = i + slice_start; i < slice_end; i++) { \
j = FFMAX(-x, 0); \
s = sa + j; \
d = da + x+j; \
\
for (jmax = FFMIN(-x + dst_w, src_w); j < jmax; j++) { \
alpha = *s; \
if (alpha != 0 && alpha != max) { \
uint8_t alpha_d = *d; \
alpha = UNPREMULTIPLY_ALPHA(alpha, alpha_d); \
} \
if (alpha == max) \
*d = *s; \
else if (alpha > 0) { \
/* apply alpha compositing: main_alpha += (1-main_alpha) * overlay_alpha */ \
if (nbits > 8) \
*d += (max - *d) * *s / max; \
else \
*d += FAST_DIV255((max - *d) * *s); \
} \
d += 1; \
s += 1; \
} \
da += dst->linesize[3] / bytes; \
sa += src->linesize[3] / bytes; \
} \
}
DEFINE_ALPHA_COMPOSITE(8, 8);
DEFINE_ALPHA_COMPOSITE(16, 10);
static av_always_inline void blend_slice_yuv(AVFilterContext *ctx,
AVFrame *dst, const AVFrame *src,
int hsub, int vsub,
int main_has_alpha,
int x, int y,
int is_straight,
int jobnr, int nb_jobs)
{
OverlayContext *s = ctx->priv;
const int src_w = src->width;
const int src_h = src->height;
const int dst_w = dst->width;
const int dst_h = dst->height;
blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, x, y, main_has_alpha,
s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, s->main_desc->comp[0].step, is_straight, 1,
jobnr, nb_jobs);
blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, x, y, main_has_alpha,
s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, s->main_desc->comp[1].step, is_straight, 1,
jobnr, nb_jobs);
blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, x, y, main_has_alpha,
s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, s->main_desc->comp[2].step, is_straight, 1,
jobnr, nb_jobs);
if (main_has_alpha)
alpha_composite(src, dst, src_w, src_h, dst_w, dst_h, x, y, jobnr, nb_jobs);
#define DEFINE_BLEND_SLICE_YUV(depth, nbits) \
static av_always_inline void blend_slice_yuv_##depth##_##nbits##bits(AVFilterContext *ctx, \
AVFrame *dst, const AVFrame *src, \
int hsub, int vsub, \
int main_has_alpha, \
int x, int y, \
int is_straight, \
int jobnr, int nb_jobs) \
{ \
OverlayContext *s = ctx->priv; \
const int src_w = src->width; \
const int src_h = src->height; \
const int dst_w = dst->width; \
const int dst_h = dst->height; \
\
blend_plane_##depth##_##nbits##bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, \
x, y, main_has_alpha, s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, \
s->main_desc->comp[0].step, is_straight, 1, jobnr, nb_jobs); \
blend_plane_##depth##_##nbits##bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, \
x, y, main_has_alpha, s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, \
s->main_desc->comp[1].step, is_straight, 1, jobnr, nb_jobs); \
blend_plane_##depth##_##nbits##bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, \
x, y, main_has_alpha, s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, \
s->main_desc->comp[2].step, is_straight, 1, jobnr, nb_jobs); \
\
if (main_has_alpha) \
alpha_composite_##depth##_##nbits##bits(src, dst, src_w, src_h, dst_w, dst_h, x, y, \
jobnr, nb_jobs); \
}
DEFINE_BLEND_SLICE_YUV(8, 8);
DEFINE_BLEND_SLICE_YUV(16, 10);
static av_always_inline void blend_slice_planar_rgb(AVFilterContext *ctx,
AVFrame *dst, const AVFrame *src,
@@ -641,25 +697,25 @@ static av_always_inline void blend_slice_planar_rgb(AVFilterContext *ctx,
const int dst_w = dst->width;
const int dst_h = dst->height;
blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, x, y, main_has_alpha,
blend_plane_8_8bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 0, 0, 0, x, y, main_has_alpha,
s->main_desc->comp[1].plane, s->main_desc->comp[1].offset, s->main_desc->comp[1].step, is_straight, 0,
jobnr, nb_jobs);
blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, x, y, main_has_alpha,
blend_plane_8_8bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 1, hsub, vsub, x, y, main_has_alpha,
s->main_desc->comp[2].plane, s->main_desc->comp[2].offset, s->main_desc->comp[2].step, is_straight, 0,
jobnr, nb_jobs);
blend_plane(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, x, y, main_has_alpha,
blend_plane_8_8bits(ctx, dst, src, src_w, src_h, dst_w, dst_h, 2, hsub, vsub, x, y, main_has_alpha,
s->main_desc->comp[0].plane, s->main_desc->comp[0].offset, s->main_desc->comp[0].step, is_straight, 0,
jobnr, nb_jobs);
if (main_has_alpha)
alpha_composite(src, dst, src_w, src_h, dst_w, dst_h, x, y, jobnr, nb_jobs);
alpha_composite_8_8bits(src, dst, src_w, src_h, dst_w, dst_h, x, y, jobnr, nb_jobs);
}
static int blend_slice_yuv420(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
OverlayContext *s = ctx->priv;
ThreadData *td = arg;
blend_slice_yuv(ctx, td->dst, td->src, 1, 1, 0, s->x, s->y, 1, jobnr, nb_jobs);
blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 1, 0, s->x, s->y, 1, jobnr, nb_jobs);
return 0;
}
@@ -667,7 +723,39 @@ static int blend_slice_yuva420(AVFilterContext *ctx, void *arg, int jobnr, int n
{
OverlayContext *s = ctx->priv;
ThreadData *td = arg;
blend_slice_yuv(ctx, td->dst, td->src, 1, 1, 1, s->x, s->y, 1, jobnr, nb_jobs);
blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 1, 1, s->x, s->y, 1, jobnr, nb_jobs);
return 0;
}
static int blend_slice_yuv420p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
OverlayContext *s = ctx->priv;
ThreadData *td = arg;
blend_slice_yuv_16_10bits(ctx, td->dst, td->src, 1, 1, 0, s->x, s->y, 1, jobnr, nb_jobs);
return 0;
}
static int blend_slice_yuva420p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
OverlayContext *s = ctx->priv;
ThreadData *td = arg;
blend_slice_yuv_16_10bits(ctx, td->dst, td->src, 1, 1, 1, s->x, s->y, 1, jobnr, nb_jobs);
return 0;
}
static int blend_slice_yuv422p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
OverlayContext *s = ctx->priv;
ThreadData *td = arg;
blend_slice_yuv_16_10bits(ctx, td->dst, td->src, 1, 0, 0, s->x, s->y, 1, jobnr, nb_jobs);
return 0;
}
static int blend_slice_yuva422p10(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
OverlayContext *s = ctx->priv;
ThreadData *td = arg;
blend_slice_yuv_16_10bits(ctx, td->dst, td->src, 1, 0, 1, s->x, s->y, 1, jobnr, nb_jobs);
return 0;
}
@@ -675,7 +763,7 @@ static int blend_slice_yuv422(AVFilterContext *ctx, void *arg, int jobnr, int nb
{
OverlayContext *s = ctx->priv;
ThreadData *td = arg;
blend_slice_yuv(ctx, td->dst, td->src, 1, 0, 0, s->x, s->y, 1, jobnr, nb_jobs);
blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 0, 0, s->x, s->y, 1, jobnr, nb_jobs);
return 0;
}
@@ -683,7 +771,7 @@ static int blend_slice_yuva422(AVFilterContext *ctx, void *arg, int jobnr, int n
{
OverlayContext *s = ctx->priv;
ThreadData *td = arg;
blend_slice_yuv(ctx, td->dst, td->src, 1, 0, 1, s->x, s->y, 1, jobnr, nb_jobs);
blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 0, 1, s->x, s->y, 1, jobnr, nb_jobs);
return 0;
}
@@ -691,7 +779,7 @@ static int blend_slice_yuv444(AVFilterContext *ctx, void *arg, int jobnr, int nb
{
OverlayContext *s = ctx->priv;
ThreadData *td = arg;
blend_slice_yuv(ctx, td->dst, td->src, 0, 0, 0, s->x, s->y, 1, jobnr, nb_jobs);
blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 0, 0, 0, s->x, s->y, 1, jobnr, nb_jobs);
return 0;
}
@@ -699,7 +787,7 @@ static int blend_slice_yuva444(AVFilterContext *ctx, void *arg, int jobnr, int n
{
OverlayContext *s = ctx->priv;
ThreadData *td = arg;
blend_slice_yuv(ctx, td->dst, td->src, 0, 0, 1, s->x, s->y, 1, jobnr, nb_jobs);
blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 0, 0, 1, s->x, s->y, 1, jobnr, nb_jobs);
return 0;
}
@@ -723,7 +811,7 @@ static int blend_slice_yuv420_pm(AVFilterContext *ctx, void *arg, int jobnr, int
{
OverlayContext *s = ctx->priv;
ThreadData *td = arg;
blend_slice_yuv(ctx, td->dst, td->src, 1, 1, 0, s->x, s->y, 0, jobnr, nb_jobs);
blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 1, 0, s->x, s->y, 0, jobnr, nb_jobs);
return 0;
}
@@ -731,7 +819,7 @@ static int blend_slice_yuva420_pm(AVFilterContext *ctx, void *arg, int jobnr, in
{
OverlayContext *s = ctx->priv;
ThreadData *td = arg;
blend_slice_yuv(ctx, td->dst, td->src, 1, 1, 1, s->x, s->y, 0, jobnr, nb_jobs);
blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 1, 1, s->x, s->y, 0, jobnr, nb_jobs);
return 0;
}
@@ -739,7 +827,7 @@ static int blend_slice_yuv422_pm(AVFilterContext *ctx, void *arg, int jobnr, int
{
OverlayContext *s = ctx->priv;
ThreadData *td = arg;
blend_slice_yuv(ctx, td->dst, td->src, 1, 0, 0, s->x, s->y, 0, jobnr, nb_jobs);
blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 0, 0, s->x, s->y, 0, jobnr, nb_jobs);
return 0;
}
@@ -747,7 +835,7 @@ static int blend_slice_yuva422_pm(AVFilterContext *ctx, void *arg, int jobnr, in
{
OverlayContext *s = ctx->priv;
ThreadData *td = arg;
blend_slice_yuv(ctx, td->dst, td->src, 1, 0, 1, s->x, s->y, 0, jobnr, nb_jobs);
blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 1, 0, 1, s->x, s->y, 0, jobnr, nb_jobs);
return 0;
}
@@ -755,7 +843,7 @@ static int blend_slice_yuv444_pm(AVFilterContext *ctx, void *arg, int jobnr, int
{
OverlayContext *s = ctx->priv;
ThreadData *td = arg;
blend_slice_yuv(ctx, td->dst, td->src, 0, 0, 0, s->x, s->y, 0, jobnr, nb_jobs);
blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 0, 0, 0, s->x, s->y, 0, jobnr, nb_jobs);
return 0;
}
@@ -763,7 +851,7 @@ static int blend_slice_yuva444_pm(AVFilterContext *ctx, void *arg, int jobnr, in
{
OverlayContext *s = ctx->priv;
ThreadData *td = arg;
blend_slice_yuv(ctx, td->dst, td->src, 0, 0, 1, s->x, s->y, 0, jobnr, nb_jobs);
blend_slice_yuv_8_8bits(ctx, td->dst, td->src, 0, 0, 1, s->x, s->y, 0, jobnr, nb_jobs);
return 0;
}
@@ -834,9 +922,15 @@ static int config_input_main(AVFilterLink *inlink)
case OVERLAY_FORMAT_YUV420:
s->blend_slice = s->main_has_alpha ? blend_slice_yuva420 : blend_slice_yuv420;
break;
case OVERLAY_FORMAT_YUV420P10:
s->blend_slice = s->main_has_alpha ? blend_slice_yuva420p10 : blend_slice_yuv420p10;
break;
case OVERLAY_FORMAT_YUV422:
s->blend_slice = s->main_has_alpha ? blend_slice_yuva422 : blend_slice_yuv422;
break;
case OVERLAY_FORMAT_YUV422P10:
s->blend_slice = s->main_has_alpha ? blend_slice_yuva422p10 : blend_slice_yuv422p10;
break;
case OVERLAY_FORMAT_YUV444:
s->blend_slice = s->main_has_alpha ? blend_slice_yuva444 : blend_slice_yuv444;
break;
@@ -851,9 +945,15 @@ static int config_input_main(AVFilterLink *inlink)
case AV_PIX_FMT_YUVA420P:
s->blend_slice = blend_slice_yuva420;
break;
case AV_PIX_FMT_YUVA420P10:
s->blend_slice = blend_slice_yuva420p10;
break;
case AV_PIX_FMT_YUVA422P:
s->blend_slice = blend_slice_yuva422;
break;
case AV_PIX_FMT_YUVA422P10:
s->blend_slice = blend_slice_yuva422p10;
break;
case AV_PIX_FMT_YUVA444P:
s->blend_slice = blend_slice_yuva444;
break;
@@ -1005,7 +1105,9 @@ static const AVOption overlay_options[] = {
{ "shortest", "force termination when the shortest input terminates", OFFSET(fs.opt_shortest), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
{ "format", "set output format", OFFSET(format), AV_OPT_TYPE_INT, {.i64=OVERLAY_FORMAT_YUV420}, 0, OVERLAY_FORMAT_NB-1, FLAGS, "format" },
{ "yuv420", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV420}, .flags = FLAGS, .unit = "format" },
{ "yuv420p10", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV420P10}, .flags = FLAGS, .unit = "format" },
{ "yuv422", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV422}, .flags = FLAGS, .unit = "format" },
{ "yuv422p10", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV422P10}, .flags = FLAGS, .unit = "format" },
{ "yuv444", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_YUV444}, .flags = FLAGS, .unit = "format" },
{ "rgb", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_RGB}, .flags = FLAGS, .unit = "format" },
{ "gbrp", "", 0, AV_OPT_TYPE_CONST, {.i64=OVERLAY_FORMAT_GBRP}, .flags = FLAGS, .unit = "format" },

View File

@@ -41,7 +41,9 @@ enum var_name {
enum OverlayFormat {
OVERLAY_FORMAT_YUV420,
OVERLAY_FORMAT_YUV420P10,
OVERLAY_FORMAT_YUV422,
OVERLAY_FORMAT_YUV422P10,
OVERLAY_FORMAT_YUV444,
OVERLAY_FORMAT_RGB,
OVERLAY_FORMAT_GBRP,

View File

@@ -415,8 +415,6 @@ static av_always_inline void simple_rotate(uint8_t *dst, const uint8_t *src, int
}
}
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
static int filter_slice(AVFilterContext *ctx, void *arg, int job, int nb_jobs)
{
ThreadData *td = arg;

View File

@@ -647,8 +647,6 @@ static int scale_slice(AVFilterLink *link, AVFrame *out_buf, AVFrame *cur_pic, s
out,out_stride);
}
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
static int scale_frame(AVFilterLink *link, AVFrame *in, AVFrame **frame_out)
{
AVFilterContext *ctx = link->dst;
@@ -882,10 +880,19 @@ static int process_command(AVFilterContext *ctx, const char *cmd, const char *ar
return ret;
}
#if FF_API_CHILD_CLASS_NEXT
static const AVClass *child_class_next(const AVClass *prev)
{
return prev ? NULL : sws_get_class();
}
#endif
static const AVClass *child_class_iterate(void **iter)
{
const AVClass *c = *iter ? NULL : sws_get_class();
*iter = (void*)(uintptr_t)c;
return c;
}
#define OFFSET(x) offsetof(ScaleContext, x)
#define FLAGS AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
@@ -944,7 +951,10 @@ static const AVClass scale_class = {
.option = scale_options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_FILTER,
#if FF_API_CHILD_CLASS_NEXT
.child_class_next = child_class_next,
#endif
.child_class_iterate = child_class_iterate,
};
static const AVFilterPad avfilter_vf_scale_inputs[] = {
@@ -984,7 +994,10 @@ static const AVClass scale2ref_class = {
.option = scale_options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_FILTER,
#if FF_API_CHILD_CLASS_NEXT
.child_class_next = child_class_next,
#endif
.child_class_iterate = child_class_iterate,
};
static const AVFilterPad avfilter_vf_scale2ref_inputs[] = {

View File

@@ -64,7 +64,7 @@ static void dump_spherical(AVFilterContext *ctx, AVFrame *frame, AVFrameSideData
av_log(ctx, AV_LOG_INFO, "spherical information: ");
if (sd->size < sizeof(*spherical)) {
av_log(ctx, AV_LOG_ERROR, "invalid data");
av_log(ctx, AV_LOG_ERROR, "invalid data\n");
return;
}
@@ -75,7 +75,7 @@ static void dump_spherical(AVFilterContext *ctx, AVFrame *frame, AVFrameSideData
else if (spherical->projection == AV_SPHERICAL_EQUIRECTANGULAR_TILE)
av_log(ctx, AV_LOG_INFO, "tiled equirectangular ");
else {
av_log(ctx, AV_LOG_WARNING, "unknown");
av_log(ctx, AV_LOG_WARNING, "unknown\n");
return;
}
@@ -102,7 +102,7 @@ static void dump_stereo3d(AVFilterContext *ctx, AVFrameSideData *sd)
av_log(ctx, AV_LOG_INFO, "stereoscopic information: ");
if (sd->size < sizeof(*stereo)) {
av_log(ctx, AV_LOG_ERROR, "invalid data");
av_log(ctx, AV_LOG_ERROR, "invalid data\n");
return;
}
@@ -114,6 +114,22 @@ static void dump_stereo3d(AVFilterContext *ctx, AVFrameSideData *sd)
av_log(ctx, AV_LOG_INFO, " (inverted)");
}
static void dump_s12m_timecode(AVFilterContext *ctx, AVFrameSideData *sd)
{
const uint32_t *tc = (const uint32_t *)sd->data;
if ((sd->size != sizeof(uint32_t) * 4) || (tc[0] > 3)) {
av_log(ctx, AV_LOG_ERROR, "invalid data\n");
return;
}
for (int j = 1; j <= tc[0]; j++) {
char tcbuf[AV_TIMECODE_STR_SIZE];
av_timecode_make_smpte_tc_string(tcbuf, tc[j], 0);
av_log(ctx, AV_LOG_INFO, "timecode - %s%s", tcbuf, j != tc[0] ? ", " : "");
}
}
static void dump_roi(AVFilterContext *ctx, AVFrameSideData *sd)
{
int nb_rois;
@@ -123,7 +139,7 @@ static void dump_roi(AVFilterContext *ctx, AVFrameSideData *sd)
roi = (const AVRegionOfInterest *)sd->data;
roi_size = roi->self_size;
if (!roi_size || sd->size % roi_size != 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid AVRegionOfInterest.self_size.");
av_log(ctx, AV_LOG_ERROR, "Invalid AVRegionOfInterest.self_size.\n");
return;
}
nb_rois = sd->size / roi_size;
@@ -142,7 +158,7 @@ static void dump_mastering_display(AVFilterContext *ctx, AVFrameSideData *sd)
av_log(ctx, AV_LOG_INFO, "mastering display: ");
if (sd->size < sizeof(*mastering_display)) {
av_log(ctx, AV_LOG_ERROR, "invalid data");
av_log(ctx, AV_LOG_ERROR, "invalid data\n");
return;
}
@@ -190,6 +206,33 @@ static void dump_video_enc_params(AVFilterContext *ctx, AVFrameSideData *sd)
av_log(ctx, AV_LOG_INFO, "%u blocks; ", par->nb_blocks);
}
static void dump_sei_unregistered_metadata(AVFilterContext *ctx, AVFrameSideData *sd)
{
const int uuid_size = 16;
uint8_t *user_data = sd->data;
int i;
if (sd->size < uuid_size) {
av_log(ctx, AV_LOG_ERROR, "invalid data(%d < UUID(%d-bytes))\n", sd->size, uuid_size);
return;
}
av_log(ctx, AV_LOG_INFO, "User Data Unregistered:\n");
av_log(ctx, AV_LOG_INFO, "UUID=");
for (i = 0; i < uuid_size; i++) {
av_log(ctx, AV_LOG_INFO, "%02x", user_data[i]);
if (i == 3 || i == 5 || i == 7 || i == 9)
av_log(ctx, AV_LOG_INFO, "-");
}
av_log(ctx, AV_LOG_INFO, "\n");
av_log(ctx, AV_LOG_INFO, "User Data=");
for (; i < sd->size; i++) {
av_log(ctx, AV_LOG_INFO, "%02x", user_data[i]);
}
av_log(ctx, AV_LOG_INFO, "\n");
}
static void dump_color_property(AVFilterContext *ctx, AVFrame *frame)
{
const char *color_range_str = av_color_range_name(frame->color_range);
@@ -337,17 +380,7 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
dump_stereo3d(ctx, sd);
break;
case AV_FRAME_DATA_S12M_TIMECODE: {
uint32_t *tc = (uint32_t*)sd->data;
int m = FFMIN(tc[0],3);
if (sd->size != 16) {
av_log(ctx, AV_LOG_ERROR, "invalid data");
break;
}
for (int j = 1; j <= m; j++) {
char tcbuf[AV_TIMECODE_STR_SIZE];
av_timecode_make_smpte_tc_string(tcbuf, tc[j], 0);
av_log(ctx, AV_LOG_INFO, "timecode - %s%s", tcbuf, j != m ? ", " : "");
}
dump_s12m_timecode(ctx, sd);
break;
}
case AV_FRAME_DATA_DISPLAYMATRIX:
@@ -375,8 +408,11 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
case AV_FRAME_DATA_VIDEO_ENC_PARAMS:
dump_video_enc_params(ctx, sd);
break;
case AV_FRAME_DATA_SEI_UNREGISTERED:
dump_sei_unregistered_metadata(ctx, sd);
break;
default:
av_log(ctx, AV_LOG_WARNING, "unknown side data type %d (%d bytes)",
av_log(ctx, AV_LOG_WARNING, "unknown side data type %d (%d bytes)\n",
sd->type, sd->size);
break;
}

View File

@@ -76,7 +76,7 @@ static int config_output(AVFilterLink *outlink)
return 0;
}
static int disp_palette(AVFrame *out, const AVFrame *in, int size)
static void disp_palette(AVFrame *out, const AVFrame *in, int size)
{
int x, y, i, j;
uint32_t *dst = (uint32_t *)out->data[0];
@@ -88,12 +88,10 @@ static int disp_palette(AVFrame *out, const AVFrame *in, int size)
for (j = 0; j < size; j++)
for (i = 0; i < size; i++)
dst[(y*dst_linesize + x) * size + j*dst_linesize + i] = pal[y*16 + x];
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
int ret;
AVFrame *out;
AVFilterContext *ctx = inlink->dst;
const ShowPaletteContext *s = ctx->priv;
@@ -105,9 +103,9 @@ static int filter_frame(AVFilterLink *inlink, AVFrame *in)
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
ret = disp_palette(out, in, s->size);
disp_palette(out, in, s->size);
av_frame_free(&in);
return ret < 0 ? ret : ff_filter_frame(outlink, out);
return ff_filter_frame(outlink, out);
}
static const AVFilterPad showpalette_inputs[] = {

View File

@@ -44,10 +44,19 @@ enum mode {
NB_MODES
};
#if FF_API_CHILD_CLASS_NEXT
static const AVClass *child_class_next(const AVClass *prev)
{
return prev ? NULL : avcodec_dct_get_class();
}
#endif
static const AVClass *child_class_iterate(void **iter)
{
const AVClass *c = *iter ? NULL : avcodec_dct_get_class();
*iter = (void*)(uintptr_t)c;
return c;
}
static void *child_next(void *obj, void *prev)
{
@@ -74,7 +83,10 @@ static const AVClass spp_class = {
.option = spp_options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_FILTER,
#if FF_API_CHILD_CLASS_NEXT
.child_class_next = child_class_next,
#endif
.child_class_iterate = child_class_iterate,
.child_next = child_next,
};

View File

@@ -478,7 +478,6 @@ static av_cold int init_subtitles(AVFilterContext *ctx)
end:
av_dict_free(&codec_opts);
avcodec_close(dec_ctx);
avcodec_free_context(&dec_ctx);
avformat_close_input(&fmt);
return ret;

View File

@@ -468,10 +468,8 @@ static av_cold void uninit(AVFilterContext *ctx)
av_freep(&uspp->src[i]);
}
for (i = 0; i < (1 << uspp->log2_count); i++) {
avcodec_close(uspp->avctx_enc[i]);
av_freep(&uspp->avctx_enc[i]);
}
for (i = 0; i < (1 << uspp->log2_count); i++)
avcodec_free_context(&uspp->avctx_enc[i]);
av_freep(&uspp->non_b_qp_table);
av_freep(&uspp->outbuf);

View File

@@ -81,6 +81,8 @@ static const AVOption v360_options[] = {
{ "tsp", "truncated square pyramid", 0, AV_OPT_TYPE_CONST, {.i64=TSPYRAMID}, 0, 0, FLAGS, "in" },
{ "hequirect", "half equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=HEQUIRECTANGULAR},0, 0, FLAGS, "in" },
{ "he", "half equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=HEQUIRECTANGULAR},0, 0, FLAGS, "in" },
{ "equisolid", "equisolid", 0, AV_OPT_TYPE_CONST, {.i64=EQUISOLID}, 0, 0, FLAGS, "in" },
{ "og", "orthographic", 0, AV_OPT_TYPE_CONST, {.i64=ORTHOGRAPHIC}, 0, 0, FLAGS, "in" },
{ "output", "set output projection", OFFSET(out), AV_OPT_TYPE_INT, {.i64=CUBEMAP_3_2}, 0, NB_PROJECTIONS-1, FLAGS, "out" },
{ "e", "equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=EQUIRECTANGULAR}, 0, 0, FLAGS, "out" },
{ "equirect", "equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=EQUIRECTANGULAR}, 0, 0, FLAGS, "out" },
@@ -108,6 +110,8 @@ static const AVOption v360_options[] = {
{ "tsp", "truncated square pyramid", 0, AV_OPT_TYPE_CONST, {.i64=TSPYRAMID}, 0, 0, FLAGS, "out" },
{ "hequirect", "half equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=HEQUIRECTANGULAR},0, 0, FLAGS, "out" },
{ "he", "half equirectangular", 0, AV_OPT_TYPE_CONST, {.i64=HEQUIRECTANGULAR},0, 0, FLAGS, "out" },
{ "equisolid", "equisolid", 0, AV_OPT_TYPE_CONST, {.i64=EQUISOLID}, 0, 0, FLAGS, "out" },
{ "og", "orthographic", 0, AV_OPT_TYPE_CONST, {.i64=ORTHOGRAPHIC}, 0, 0, FLAGS, "out" },
{ "interp", "set interpolation method", OFFSET(interp), AV_OPT_TYPE_INT, {.i64=BILINEAR}, 0, NB_INTERP_METHODS-1, FLAGS, "interp" },
{ "near", "nearest neighbour", 0, AV_OPT_TYPE_CONST, {.i64=NEAREST}, 0, 0, FLAGS, "interp" },
{ "nearest", "nearest neighbour", 0, AV_OPT_TYPE_CONST, {.i64=NEAREST}, 0, 0, FLAGS, "interp" },
@@ -1819,6 +1823,217 @@ static int xyz_to_stereographic(const V360Context *s,
return visible;
}
/**
* Prepare data for processing equisolid output format.
*
* @param ctx filter context
*
* @return error code
*/
static int prepare_equisolid_out(AVFilterContext *ctx)
{
V360Context *s = ctx->priv;
s->flat_range[0] = sinf(s->h_fov * M_PI / 720.f);
s->flat_range[1] = sinf(s->v_fov * M_PI / 720.f);
return 0;
}
/**
* Calculate 3D coordinates on sphere for corresponding frame position in equisolid format.
*
* @param s filter private context
* @param i horizontal position on frame [0, width)
* @param j vertical position on frame [0, height)
* @param width frame width
* @param height frame height
* @param vec coordinates on sphere
*/
static int equisolid_to_xyz(const V360Context *s,
int i, int j, int width, int height,
float *vec)
{
const float x = ((2.f * i + 1.f) / width - 1.f) * s->flat_range[0];
const float y = ((2.f * j + 1.f) / height - 1.f) * s->flat_range[1];
const float r = hypotf(x, y);
const float theta = asinf(r) * 2.f;
const float sin_theta = sinf(theta);
vec[0] = x / r * sin_theta;
vec[1] = y / r * sin_theta;
vec[2] = cosf(theta);
normalize_vector(vec);
return 1;
}
/**
* Prepare data for processing equisolid input format.
*
* @param ctx filter context
*
* @return error code
*/
static int prepare_equisolid_in(AVFilterContext *ctx)
{
V360Context *s = ctx->priv;
s->iflat_range[0] = sinf(FFMIN(s->ih_fov, 359.f) * M_PI / 720.f);
s->iflat_range[1] = sinf(FFMIN(s->iv_fov, 359.f) * M_PI / 720.f);
return 0;
}
/**
* Calculate frame position in equisolid format for corresponding 3D coordinates on sphere.
*
* @param s filter private context
* @param vec coordinates on sphere
* @param width frame width
* @param height frame height
* @param us horizontal coordinates for interpolation window
* @param vs vertical coordinates for interpolation window
* @param du horizontal relative coordinate
* @param dv vertical relative coordinate
*/
static int xyz_to_equisolid(const V360Context *s,
const float *vec, int width, int height,
int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
{
const float theta = acosf(vec[2]);
const float r = sinf(theta * 0.5f);
const float c = r / hypotf(vec[0], vec[1]);
const float x = vec[0] * c / s->iflat_range[0] * s->input_mirror_modifier[0];
const float y = vec[1] * c / s->iflat_range[1] * s->input_mirror_modifier[1];
const float uf = (x + 1.f) * width / 2.f;
const float vf = (y + 1.f) * height / 2.f;
const int ui = floorf(uf);
const int vi = floorf(vf);
const int visible = isfinite(x) && isfinite(y) && vi >= 0 && vi < height && ui >= 0 && ui < width;
*du = visible ? uf - ui : 0.f;
*dv = visible ? vf - vi : 0.f;
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
us[i][j] = visible ? av_clip(ui + j - 1, 0, width - 1) : 0;
vs[i][j] = visible ? av_clip(vi + i - 1, 0, height - 1) : 0;
}
}
return visible;
}
/**
* Prepare data for processing orthographic output format.
*
* @param ctx filter context
*
* @return error code
*/
static int prepare_orthographic_out(AVFilterContext *ctx)
{
V360Context *s = ctx->priv;
s->flat_range[0] = sinf(FFMIN(s->h_fov, 180.f) * M_PI / 360.f);
s->flat_range[1] = sinf(FFMIN(s->v_fov, 180.f) * M_PI / 360.f);
return 0;
}
/**
* Calculate 3D coordinates on sphere for corresponding frame position in orthographic format.
*
* @param s filter private context
* @param i horizontal position on frame [0, width)
* @param j vertical position on frame [0, height)
* @param width frame width
* @param height frame height
* @param vec coordinates on sphere
*/
static int orthographic_to_xyz(const V360Context *s,
int i, int j, int width, int height,
float *vec)
{
const float x = ((2.f * i + 1.f) / width - 1.f) * s->flat_range[0];
const float y = ((2.f * j + 1.f) / height - 1.f) * s->flat_range[1];
const float r = hypotf(x, y);
const float theta = asinf(r);
vec[0] = x;
vec[1] = y;
vec[2] = cosf(theta);
normalize_vector(vec);
return 1;
}
/**
* Prepare data for processing orthographic input format.
*
* @param ctx filter context
*
* @return error code
*/
static int prepare_orthographic_in(AVFilterContext *ctx)
{
V360Context *s = ctx->priv;
s->iflat_range[0] = sinf(FFMIN(s->ih_fov, 180.f) * M_PI / 360.f);
s->iflat_range[1] = sinf(FFMIN(s->iv_fov, 180.f) * M_PI / 360.f);
return 0;
}
/**
* Calculate frame position in orthographic format for corresponding 3D coordinates on sphere.
*
* @param s filter private context
* @param vec coordinates on sphere
* @param width frame width
* @param height frame height
* @param us horizontal coordinates for interpolation window
* @param vs vertical coordinates for interpolation window
* @param du horizontal relative coordinate
* @param dv vertical relative coordinate
*/
static int xyz_to_orthographic(const V360Context *s,
const float *vec, int width, int height,
int16_t us[4][4], int16_t vs[4][4], float *du, float *dv)
{
const float theta = acosf(vec[2]);
const float r = sinf(theta);
const float c = r / hypotf(vec[0], vec[1]);
const float x = vec[0] * c / s->iflat_range[0] * s->input_mirror_modifier[0];
const float y = vec[1] * c / s->iflat_range[1] * s->input_mirror_modifier[1];
const float uf = (x + 1.f) * width / 2.f;
const float vf = (y + 1.f) * height / 2.f;
const int ui = floorf(uf);
const int vi = floorf(vf);
const int visible = vec[2] >= 0.f && isfinite(x) && isfinite(y) && vi >= 0 && vi < height && ui >= 0 && ui < width;
*du = visible ? uf - ui : 0.f;
*dv = visible ? vf - vi : 0.f;
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 4; j++) {
us[i][j] = visible ? av_clip(ui + j - 1, 0, width - 1) : 0;
vs[i][j] = visible ? av_clip(vi + i - 1, 0, height - 1) : 0;
}
}
return visible;
}
/**
* Calculate frame position in equirectangular format for corresponding 3D coordinates on sphere.
*
@@ -3644,6 +3859,29 @@ static int allocate_plane(V360Context *s, int sizeof_uv, int sizeof_ker, int siz
static void fov_from_dfov(int format, float d_fov, float w, float h, float *h_fov, float *v_fov)
{
switch (format) {
case ORTHOGRAPHIC:
{
const float d = 0.5f * hypotf(w, h);
const float l = sinf(d_fov * M_PI / 360.f) / d;
*h_fov = asinf(w * 0.5 * l) * 360.f / M_PI;
*v_fov = asinf(h * 0.5 * l) * 360.f / M_PI;
if (d_fov > 180.f) {
*h_fov = 180.f - *h_fov;
*v_fov = 180.f - *v_fov;
}
}
break;
case EQUISOLID:
{
const float d = 0.5f * hypotf(w, h);
const float l = d / (sinf(d_fov * M_PI / 720.f));
*h_fov = 2.f * asinf(w * 0.5f / l) * 360.f / M_PI;
*v_fov = 2.f * asinf(h * 0.5f / l) * 360.f / M_PI;
}
break;
case STEREOGRAPHIC:
{
const float d = 0.5f * hypotf(w, h);
@@ -4014,6 +4252,18 @@ static int config_output(AVFilterLink *outlink)
wf = w * 2.f;
hf = h;
break;
case EQUISOLID:
s->in_transform = xyz_to_equisolid;
err = prepare_equisolid_in(ctx);
wf = w;
hf = h / 2.f;
break;
case ORTHOGRAPHIC:
s->in_transform = xyz_to_orthographic;
err = prepare_orthographic_in(ctx);
wf = w;
hf = h / 2.f;
break;
default:
av_log(ctx, AV_LOG_ERROR, "Specified input format is not handled.\n");
return AVERROR_BUG;
@@ -4150,6 +4400,18 @@ static int config_output(AVFilterLink *outlink)
w = lrintf(wf / 2.f);
h = lrintf(hf);
break;
case EQUISOLID:
s->out_transform = equisolid_to_xyz;
prepare_out = prepare_equisolid_out;
w = lrintf(wf);
h = lrintf(hf * 2.f);
break;
case ORTHOGRAPHIC:
s->out_transform = orthographic_to_xyz;
prepare_out = prepare_orthographic_out;
w = lrintf(wf);
h = lrintf(hf * 2.f);
break;
default:
av_log(ctx, AV_LOG_ERROR, "Specified output format is not handled.\n");
return AVERROR_BUG;
@@ -4231,8 +4493,11 @@ static int config_output(AVFilterLink *outlink)
s->map[1] = s->map[2] = 1;
}
for (int i = 0; i < s->nb_allocated; i++)
allocate_plane(s, sizeof_uv, sizeof_ker, sizeof_mask * have_alpha * s->alpha, i);
for (int i = 0; i < s->nb_allocated; i++) {
err = allocate_plane(s, sizeof_uv, sizeof_ker, sizeof_mask * have_alpha * s->alpha, i);
if (err < 0)
return err;
}
calculate_rotation_matrix(s->yaw, s->pitch, s->roll, s->rot_mat, s->rotation_order);
set_mirror_modifier(s->h_flip, s->v_flip, s->d_flip, s->output_mirror_modifier);

View File

@@ -71,7 +71,7 @@ static const AVOption vaguedenoiser_options[] = {
{ "method", "set filtering method", OFFSET(method), AV_OPT_TYPE_INT, {.i64=2 }, 0, 2, FLAGS, "method" },
{ "hard", "hard thresholding", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "method" },
{ "soft", "soft thresholding", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "method" },
{ "garrote", "garotte thresholding", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "method" },
{ "garrote", "garrote thresholding", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "method" },
{ "nsteps", "set number of steps", OFFSET(nsteps), AV_OPT_TYPE_INT, {.i64=6 }, 1, 32, FLAGS },
{ "percent", "set percent of full denoising", OFFSET(percent),AV_OPT_TYPE_FLOAT, {.dbl=85}, 0,100, FLAGS },
{ "planes", "set planes to filter", OFFSET(planes), AV_OPT_TYPE_INT, {.i64=15 }, 0, 15, FLAGS },

View File

@@ -155,9 +155,6 @@ static double get_natural_factor(const VignetteContext *s, int x, int y)
}
}
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts) * av_q2d(tb))
static void update_context(VignetteContext *s, AVFilterLink *inlink, AVFrame *frame)
{
int x, y;

View File

@@ -38,7 +38,8 @@ static const char *const var_names[] = {
"on",
"duration",
"pduration",
"time",
"in_time", "it",
"out_time", "time", "ot",
"frame",
"zoom",
"pzoom",
@@ -61,7 +62,8 @@ enum var_name {
VAR_ON,
VAR_DURATION,
VAR_PDURATION,
VAR_TIME,
VAR_IN_TIME, VAR_IT,
VAR_TIME, VAR_OUT_TIME, VAR_OT,
VAR_FRAME,
VAR_ZOOM,
VAR_PZOOM,
@@ -155,6 +157,7 @@ static int output_single_frame(AVFilterContext *ctx, AVFrame *in, double *var_va
{
ZPContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFilterLink *inlink = ctx->inputs[0];
int64_t pts = s->frame_count;
int k, x, y, w, h, ret = 0;
uint8_t *input[4];
@@ -165,7 +168,10 @@ static int output_single_frame(AVFilterContext *ctx, AVFrame *in, double *var_va
var_values[VAR_PY] = s->y;
var_values[VAR_PZOOM] = s->prev_zoom;
var_values[VAR_PDURATION] = s->prev_nb_frames;
var_values[VAR_TIME] = pts * av_q2d(outlink->time_base);
var_values[VAR_IN_TIME] = var_values[VAR_IT] = in->pts == AV_NOPTS_VALUE ?
NAN : in->pts * av_q2d(inlink->time_base);
var_values[VAR_OUT_TIME] = pts * av_q2d(outlink->time_base);
var_values[VAR_TIME] = var_values[VAR_OT] = var_values[VAR_OUT_TIME];
var_values[VAR_FRAME] = i;
var_values[VAR_ON] = outlink->frame_count_in;