early-access version 1432

This commit is contained in:
pineappleEA
2021-02-09 04:25:58 +01:00
parent de64eab4b4
commit 3d5a9d908a
7336 changed files with 1773492 additions and 111 deletions

1
externals/ffmpeg/libavfilter/.gitignore vendored Executable file
View File

@@ -0,0 +1 @@
/filter_list.c

543
externals/ffmpeg/libavfilter/Makefile vendored Executable file
View File

@@ -0,0 +1,543 @@
NAME = avfilter
DESC = FFmpeg audio/video filtering library
HEADERS = avfilter.h \
buffersink.h \
buffersrc.h \
version.h \
OBJS = allfilters.o \
audio.o \
avfilter.o \
avfiltergraph.o \
buffersink.o \
buffersrc.o \
drawutils.o \
fifo.o \
formats.o \
framepool.o \
framequeue.o \
graphdump.o \
graphparser.o \
transform.o \
video.o \
OBJS-$(HAVE_THREADS) += pthread.o
# subsystems
OBJS-$(CONFIG_QSVVPP) += qsvvpp.o
OBJS-$(CONFIG_SCENE_SAD) += scene_sad.o
include $(SRC_PATH)/libavfilter/dnn/Makefile
# audio filters
OBJS-$(CONFIG_ABENCH_FILTER) += f_bench.o
OBJS-$(CONFIG_ACOMPRESSOR_FILTER) += af_sidechaincompress.o
OBJS-$(CONFIG_ACONTRAST_FILTER) += af_acontrast.o
OBJS-$(CONFIG_ACOPY_FILTER) += af_acopy.o
OBJS-$(CONFIG_ACROSSFADE_FILTER) += af_afade.o
OBJS-$(CONFIG_ACROSSOVER_FILTER) += af_acrossover.o
OBJS-$(CONFIG_ACRUSHER_FILTER) += af_acrusher.o
OBJS-$(CONFIG_ACUE_FILTER) += f_cue.o
OBJS-$(CONFIG_ADECLICK_FILTER) += af_adeclick.o
OBJS-$(CONFIG_ADECLIP_FILTER) += af_adeclick.o
OBJS-$(CONFIG_ADELAY_FILTER) += af_adelay.o
OBJS-$(CONFIG_ADERIVATIVE_FILTER) += af_aderivative.o
OBJS-$(CONFIG_AECHO_FILTER) += af_aecho.o
OBJS-$(CONFIG_AEMPHASIS_FILTER) += af_aemphasis.o
OBJS-$(CONFIG_AEVAL_FILTER) += aeval.o
OBJS-$(CONFIG_AFADE_FILTER) += af_afade.o
OBJS-$(CONFIG_AFFTDN_FILTER) += af_afftdn.o
OBJS-$(CONFIG_AFFTFILT_FILTER) += af_afftfilt.o
OBJS-$(CONFIG_AFIR_FILTER) += af_afir.o
OBJS-$(CONFIG_AFORMAT_FILTER) += af_aformat.o
OBJS-$(CONFIG_AGATE_FILTER) += af_agate.o
OBJS-$(CONFIG_AIIR_FILTER) += af_aiir.o
OBJS-$(CONFIG_AINTEGRAL_FILTER) += af_aderivative.o
OBJS-$(CONFIG_AINTERLEAVE_FILTER) += f_interleave.o
OBJS-$(CONFIG_ALIMITER_FILTER) += af_alimiter.o
OBJS-$(CONFIG_ALLPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_ALOOP_FILTER) += f_loop.o
OBJS-$(CONFIG_AMERGE_FILTER) += af_amerge.o
OBJS-$(CONFIG_AMETADATA_FILTER) += f_metadata.o
OBJS-$(CONFIG_AMIX_FILTER) += af_amix.o
OBJS-$(CONFIG_AMULTIPLY_FILTER) += af_amultiply.o
OBJS-$(CONFIG_ANEQUALIZER_FILTER) += af_anequalizer.o
OBJS-$(CONFIG_ANLMDN_FILTER) += af_anlmdn.o
OBJS-$(CONFIG_ANLMS_FILTER) += af_anlms.o
OBJS-$(CONFIG_ANULL_FILTER) += af_anull.o
OBJS-$(CONFIG_APAD_FILTER) += af_apad.o
OBJS-$(CONFIG_APERMS_FILTER) += f_perms.o
OBJS-$(CONFIG_APHASER_FILTER) += af_aphaser.o generate_wave_table.o
OBJS-$(CONFIG_APULSATOR_FILTER) += af_apulsator.o
OBJS-$(CONFIG_AREALTIME_FILTER) += f_realtime.o
OBJS-$(CONFIG_ARESAMPLE_FILTER) += af_aresample.o
OBJS-$(CONFIG_AREVERSE_FILTER) += f_reverse.o
OBJS-$(CONFIG_ARNNDN_FILTER) += af_arnndn.o
OBJS-$(CONFIG_ASELECT_FILTER) += f_select.o
OBJS-$(CONFIG_ASENDCMD_FILTER) += f_sendcmd.o
OBJS-$(CONFIG_ASETNSAMPLES_FILTER) += af_asetnsamples.o
OBJS-$(CONFIG_ASETPTS_FILTER) += setpts.o
OBJS-$(CONFIG_ASETRATE_FILTER) += af_asetrate.o
OBJS-$(CONFIG_ASETTB_FILTER) += settb.o
OBJS-$(CONFIG_ASHOWINFO_FILTER) += af_ashowinfo.o
OBJS-$(CONFIG_ASIDEDATA_FILTER) += f_sidedata.o
OBJS-$(CONFIG_ASOFTCLIP_FILTER) += af_asoftclip.o
OBJS-$(CONFIG_ASPLIT_FILTER) += split.o
OBJS-$(CONFIG_ASR_FILTER) += af_asr.o
OBJS-$(CONFIG_ASTATS_FILTER) += af_astats.o
OBJS-$(CONFIG_ASTREAMSELECT_FILTER) += f_streamselect.o framesync.o
OBJS-$(CONFIG_ASUBBOOST_FILTER) += af_asubboost.o
OBJS-$(CONFIG_ATEMPO_FILTER) += af_atempo.o
OBJS-$(CONFIG_ATRIM_FILTER) += trim.o
OBJS-$(CONFIG_AXCORRELATE_FILTER) += af_axcorrelate.o
OBJS-$(CONFIG_AZMQ_FILTER) += f_zmq.o
OBJS-$(CONFIG_BANDPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_BANDREJECT_FILTER) += af_biquads.o
OBJS-$(CONFIG_BASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_BIQUAD_FILTER) += af_biquads.o
OBJS-$(CONFIG_BS2B_FILTER) += af_bs2b.o
OBJS-$(CONFIG_CHANNELMAP_FILTER) += af_channelmap.o
OBJS-$(CONFIG_CHANNELSPLIT_FILTER) += af_channelsplit.o
OBJS-$(CONFIG_CHORUS_FILTER) += af_chorus.o generate_wave_table.o
OBJS-$(CONFIG_COMPAND_FILTER) += af_compand.o
OBJS-$(CONFIG_COMPENSATIONDELAY_FILTER) += af_compensationdelay.o
OBJS-$(CONFIG_CROSSFEED_FILTER) += af_crossfeed.o
OBJS-$(CONFIG_CRYSTALIZER_FILTER) += af_crystalizer.o
OBJS-$(CONFIG_DCSHIFT_FILTER) += af_dcshift.o
OBJS-$(CONFIG_DEESSER_FILTER) += af_deesser.o
OBJS-$(CONFIG_DRMETER_FILTER) += af_drmeter.o
OBJS-$(CONFIG_DYNAUDNORM_FILTER) += af_dynaudnorm.o
OBJS-$(CONFIG_EARWAX_FILTER) += af_earwax.o
OBJS-$(CONFIG_EBUR128_FILTER) += f_ebur128.o
OBJS-$(CONFIG_EQUALIZER_FILTER) += af_biquads.o
OBJS-$(CONFIG_EXTRASTEREO_FILTER) += af_extrastereo.o
OBJS-$(CONFIG_FIREQUALIZER_FILTER) += af_firequalizer.o
OBJS-$(CONFIG_FLANGER_FILTER) += af_flanger.o generate_wave_table.o
OBJS-$(CONFIG_HAAS_FILTER) += af_haas.o
OBJS-$(CONFIG_HDCD_FILTER) += af_hdcd.o
OBJS-$(CONFIG_HEADPHONE_FILTER) += af_headphone.o
OBJS-$(CONFIG_HIGHPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_HIGHSHELF_FILTER) += af_biquads.o
OBJS-$(CONFIG_JOIN_FILTER) += af_join.o
OBJS-$(CONFIG_LADSPA_FILTER) += af_ladspa.o
OBJS-$(CONFIG_LOUDNORM_FILTER) += af_loudnorm.o ebur128.o
OBJS-$(CONFIG_LOWPASS_FILTER) += af_biquads.o
OBJS-$(CONFIG_LOWSHELF_FILTER) += af_biquads.o
OBJS-$(CONFIG_LV2_FILTER) += af_lv2.o
OBJS-$(CONFIG_MCOMPAND_FILTER) += af_mcompand.o
OBJS-$(CONFIG_PAN_FILTER) += af_pan.o
OBJS-$(CONFIG_REPLAYGAIN_FILTER) += af_replaygain.o
OBJS-$(CONFIG_RESAMPLE_FILTER) += af_resample.o
OBJS-$(CONFIG_RUBBERBAND_FILTER) += af_rubberband.o
OBJS-$(CONFIG_SIDECHAINCOMPRESS_FILTER) += af_sidechaincompress.o
OBJS-$(CONFIG_SIDECHAINGATE_FILTER) += af_agate.o
OBJS-$(CONFIG_SILENCEDETECT_FILTER) += af_silencedetect.o
OBJS-$(CONFIG_SILENCEREMOVE_FILTER) += af_silenceremove.o
OBJS-$(CONFIG_SOFALIZER_FILTER) += af_sofalizer.o
OBJS-$(CONFIG_STEREOTOOLS_FILTER) += af_stereotools.o
OBJS-$(CONFIG_STEREOWIDEN_FILTER) += af_stereowiden.o
OBJS-$(CONFIG_SUPEREQUALIZER_FILTER) += af_superequalizer.o
OBJS-$(CONFIG_SURROUND_FILTER) += af_surround.o
OBJS-$(CONFIG_TREBLE_FILTER) += af_biquads.o
OBJS-$(CONFIG_TREMOLO_FILTER) += af_tremolo.o
OBJS-$(CONFIG_VIBRATO_FILTER) += af_vibrato.o generate_wave_table.o
OBJS-$(CONFIG_VOLUME_FILTER) += af_volume.o
OBJS-$(CONFIG_VOLUMEDETECT_FILTER) += af_volumedetect.o
OBJS-$(CONFIG_AEVALSRC_FILTER) += aeval.o
OBJS-$(CONFIG_AFIRSRC_FILTER) += asrc_afirsrc.o
OBJS-$(CONFIG_ANOISESRC_FILTER) += asrc_anoisesrc.o
OBJS-$(CONFIG_ANULLSRC_FILTER) += asrc_anullsrc.o
OBJS-$(CONFIG_FLITE_FILTER) += asrc_flite.o
OBJS-$(CONFIG_HILBERT_FILTER) += asrc_hilbert.o
OBJS-$(CONFIG_SINC_FILTER) += asrc_sinc.o
OBJS-$(CONFIG_SINE_FILTER) += asrc_sine.o
OBJS-$(CONFIG_ANULLSINK_FILTER) += asink_anullsink.o
# video filters
OBJS-$(CONFIG_ADDROI_FILTER) += vf_addroi.o
OBJS-$(CONFIG_ALPHAEXTRACT_FILTER) += vf_extractplanes.o
OBJS-$(CONFIG_ALPHAMERGE_FILTER) += vf_alphamerge.o
OBJS-$(CONFIG_AMPLIFY_FILTER) += vf_amplify.o
OBJS-$(CONFIG_ASS_FILTER) += vf_subtitles.o
OBJS-$(CONFIG_ATADENOISE_FILTER) += vf_atadenoise.o
OBJS-$(CONFIG_AVGBLUR_FILTER) += vf_avgblur.o
OBJS-$(CONFIG_AVGBLUR_OPENCL_FILTER) += vf_avgblur_opencl.o opencl.o \
opencl/avgblur.o boxblur.o
OBJS-$(CONFIG_AVGBLUR_VULKAN_FILTER) += vf_avgblur_vulkan.o vulkan.o
OBJS-$(CONFIG_BBOX_FILTER) += bbox.o vf_bbox.o
OBJS-$(CONFIG_BENCH_FILTER) += f_bench.o
OBJS-$(CONFIG_BILATERAL_FILTER) += vf_bilateral.o
OBJS-$(CONFIG_BITPLANENOISE_FILTER) += vf_bitplanenoise.o
OBJS-$(CONFIG_BLACKDETECT_FILTER) += vf_blackdetect.o
OBJS-$(CONFIG_BLACKFRAME_FILTER) += vf_blackframe.o
OBJS-$(CONFIG_BLEND_FILTER) += vf_blend.o framesync.o
OBJS-$(CONFIG_BM3D_FILTER) += vf_bm3d.o framesync.o
OBJS-$(CONFIG_BOXBLUR_FILTER) += vf_boxblur.o boxblur.o
OBJS-$(CONFIG_BOXBLUR_OPENCL_FILTER) += vf_avgblur_opencl.o opencl.o \
opencl/avgblur.o boxblur.o
OBJS-$(CONFIG_BWDIF_FILTER) += vf_bwdif.o yadif_common.o
OBJS-$(CONFIG_CAS_FILTER) += vf_cas.o
OBJS-$(CONFIG_CHROMABER_VULKAN_FILTER) += vf_chromaber_vulkan.o vulkan.o
OBJS-$(CONFIG_CHROMAHOLD_FILTER) += vf_chromakey.o
OBJS-$(CONFIG_CHROMAKEY_FILTER) += vf_chromakey.o
OBJS-$(CONFIG_CHROMASHIFT_FILTER) += vf_chromashift.o
OBJS-$(CONFIG_CIESCOPE_FILTER) += vf_ciescope.o
OBJS-$(CONFIG_CODECVIEW_FILTER) += vf_codecview.o
OBJS-$(CONFIG_COLORBALANCE_FILTER) += vf_colorbalance.o
OBJS-$(CONFIG_COLORCHANNELMIXER_FILTER) += vf_colorchannelmixer.o
OBJS-$(CONFIG_COLORKEY_FILTER) += vf_colorkey.o
OBJS-$(CONFIG_COLORKEY_OPENCL_FILTER) += vf_colorkey_opencl.o opencl.o \
opencl/colorkey.o
OBJS-$(CONFIG_COLORHOLD_FILTER) += vf_colorkey.o
OBJS-$(CONFIG_COLORLEVELS_FILTER) += vf_colorlevels.o
OBJS-$(CONFIG_COLORMATRIX_FILTER) += vf_colormatrix.o
OBJS-$(CONFIG_COLORSPACE_FILTER) += vf_colorspace.o colorspace.o colorspacedsp.o
OBJS-$(CONFIG_CONVOLUTION_FILTER) += vf_convolution.o
OBJS-$(CONFIG_CONVOLUTION_OPENCL_FILTER) += vf_convolution_opencl.o opencl.o \
opencl/convolution.o
OBJS-$(CONFIG_CONVOLVE_FILTER) += vf_convolve.o framesync.o
OBJS-$(CONFIG_COPY_FILTER) += vf_copy.o
OBJS-$(CONFIG_COREIMAGE_FILTER) += vf_coreimage.o
OBJS-$(CONFIG_COVER_RECT_FILTER) += vf_cover_rect.o lavfutils.o
OBJS-$(CONFIG_CROP_FILTER) += vf_crop.o
OBJS-$(CONFIG_CROPDETECT_FILTER) += vf_cropdetect.o
OBJS-$(CONFIG_CUE_FILTER) += f_cue.o
OBJS-$(CONFIG_CURVES_FILTER) += vf_curves.o
OBJS-$(CONFIG_DATASCOPE_FILTER) += vf_datascope.o
OBJS-$(CONFIG_DBLUR_FILTER) += vf_dblur.o
OBJS-$(CONFIG_DCTDNOIZ_FILTER) += vf_dctdnoiz.o
OBJS-$(CONFIG_DEBAND_FILTER) += vf_deband.o
OBJS-$(CONFIG_DEBLOCK_FILTER) += vf_deblock.o
OBJS-$(CONFIG_DECIMATE_FILTER) += vf_decimate.o
OBJS-$(CONFIG_DERAIN_FILTER) += vf_derain.o
OBJS-$(CONFIG_DECONVOLVE_FILTER) += vf_convolve.o framesync.o
OBJS-$(CONFIG_DEDOT_FILTER) += vf_dedot.o
OBJS-$(CONFIG_DEFLATE_FILTER) += vf_neighbor.o
OBJS-$(CONFIG_DEFLICKER_FILTER) += vf_deflicker.o
OBJS-$(CONFIG_DEINTERLACE_QSV_FILTER) += vf_deinterlace_qsv.o
OBJS-$(CONFIG_DEINTERLACE_VAAPI_FILTER) += vf_deinterlace_vaapi.o vaapi_vpp.o
OBJS-$(CONFIG_DEJUDDER_FILTER) += vf_dejudder.o
OBJS-$(CONFIG_DELOGO_FILTER) += vf_delogo.o
OBJS-$(CONFIG_DENOISE_VAAPI_FILTER) += vf_misc_vaapi.o vaapi_vpp.o
OBJS-$(CONFIG_DESHAKE_OPENCL_FILTER) += vf_deshake_opencl.o opencl.o \
opencl/deshake.o
OBJS-$(CONFIG_DESHAKE_FILTER) += vf_deshake.o
OBJS-$(CONFIG_DESPILL_FILTER) += vf_despill.o
OBJS-$(CONFIG_DETELECINE_FILTER) += vf_detelecine.o
OBJS-$(CONFIG_DILATION_FILTER) += vf_neighbor.o
OBJS-$(CONFIG_DILATION_OPENCL_FILTER) += vf_neighbor_opencl.o opencl.o \
opencl/neighbor.o
OBJS-$(CONFIG_DISPLACE_FILTER) += vf_displace.o framesync.o
OBJS-$(CONFIG_DNN_PROCESSING_FILTER) += vf_dnn_processing.o
OBJS-$(CONFIG_DOUBLEWEAVE_FILTER) += vf_weave.o
OBJS-$(CONFIG_DRAWBOX_FILTER) += vf_drawbox.o
OBJS-$(CONFIG_DRAWGRAPH_FILTER) += f_drawgraph.o
OBJS-$(CONFIG_DRAWGRID_FILTER) += vf_drawbox.o
OBJS-$(CONFIG_DRAWTEXT_FILTER) += vf_drawtext.o
OBJS-$(CONFIG_EDGEDETECT_FILTER) += vf_edgedetect.o
OBJS-$(CONFIG_ELBG_FILTER) += vf_elbg.o
OBJS-$(CONFIG_ENTROPY_FILTER) += vf_entropy.o
OBJS-$(CONFIG_EQ_FILTER) += vf_eq.o
OBJS-$(CONFIG_EROSION_FILTER) += vf_neighbor.o
OBJS-$(CONFIG_EROSION_OPENCL_FILTER) += vf_neighbor_opencl.o opencl.o \
opencl/neighbor.o
OBJS-$(CONFIG_EXTRACTPLANES_FILTER) += vf_extractplanes.o
OBJS-$(CONFIG_FADE_FILTER) += vf_fade.o
OBJS-$(CONFIG_FFTDNOIZ_FILTER) += vf_fftdnoiz.o
OBJS-$(CONFIG_FFTFILT_FILTER) += vf_fftfilt.o
OBJS-$(CONFIG_FIELD_FILTER) += vf_field.o
OBJS-$(CONFIG_FIELDHINT_FILTER) += vf_fieldhint.o
OBJS-$(CONFIG_FIELDMATCH_FILTER) += vf_fieldmatch.o
OBJS-$(CONFIG_FIELDORDER_FILTER) += vf_fieldorder.o
OBJS-$(CONFIG_FILLBORDERS_FILTER) += vf_fillborders.o
OBJS-$(CONFIG_FIND_RECT_FILTER) += vf_find_rect.o lavfutils.o
OBJS-$(CONFIG_FLOODFILL_FILTER) += vf_floodfill.o
OBJS-$(CONFIG_FORMAT_FILTER) += vf_format.o
OBJS-$(CONFIG_FPS_FILTER) += vf_fps.o
OBJS-$(CONFIG_FRAMEPACK_FILTER) += vf_framepack.o
OBJS-$(CONFIG_FRAMERATE_FILTER) += vf_framerate.o
OBJS-$(CONFIG_FRAMESTEP_FILTER) += vf_framestep.o
OBJS-$(CONFIG_FREEZEDETECT_FILTER) += vf_freezedetect.o
OBJS-$(CONFIG_FREEZEFRAMES_FILTER) += vf_freezeframes.o
OBJS-$(CONFIG_FREI0R_FILTER) += vf_frei0r.o
OBJS-$(CONFIG_FSPP_FILTER) += vf_fspp.o
OBJS-$(CONFIG_GBLUR_FILTER) += vf_gblur.o
OBJS-$(CONFIG_GEQ_FILTER) += vf_geq.o
OBJS-$(CONFIG_GRADFUN_FILTER) += vf_gradfun.o
OBJS-$(CONFIG_GRAPHMONITOR_FILTER) += f_graphmonitor.o
OBJS-$(CONFIG_GREYEDGE_FILTER) += vf_colorconstancy.o
OBJS-$(CONFIG_HALDCLUT_FILTER) += vf_lut3d.o framesync.o
OBJS-$(CONFIG_HFLIP_FILTER) += vf_hflip.o
OBJS-$(CONFIG_HISTEQ_FILTER) += vf_histeq.o
OBJS-$(CONFIG_HISTOGRAM_FILTER) += vf_histogram.o
OBJS-$(CONFIG_HQDN3D_FILTER) += vf_hqdn3d.o
OBJS-$(CONFIG_HQX_FILTER) += vf_hqx.o
OBJS-$(CONFIG_HSTACK_FILTER) += vf_stack.o framesync.o
OBJS-$(CONFIG_HUE_FILTER) += vf_hue.o
OBJS-$(CONFIG_HWDOWNLOAD_FILTER) += vf_hwdownload.o
OBJS-$(CONFIG_HWMAP_FILTER) += vf_hwmap.o
OBJS-$(CONFIG_HWUPLOAD_CUDA_FILTER) += vf_hwupload_cuda.o
OBJS-$(CONFIG_HWUPLOAD_FILTER) += vf_hwupload.o
OBJS-$(CONFIG_HYSTERESIS_FILTER) += vf_hysteresis.o framesync.o
OBJS-$(CONFIG_IDET_FILTER) += vf_idet.o
OBJS-$(CONFIG_IL_FILTER) += vf_il.o
OBJS-$(CONFIG_INFLATE_FILTER) += vf_neighbor.o
OBJS-$(CONFIG_INTERLACE_FILTER) += vf_tinterlace.o
OBJS-$(CONFIG_INTERLEAVE_FILTER) += f_interleave.o
OBJS-$(CONFIG_KERNDEINT_FILTER) += vf_kerndeint.o
OBJS-$(CONFIG_LAGFUN_FILTER) += vf_lagfun.o
OBJS-$(CONFIG_LENSCORRECTION_FILTER) += vf_lenscorrection.o
OBJS-$(CONFIG_LENSFUN_FILTER) += vf_lensfun.o
OBJS-$(CONFIG_LIBVMAF_FILTER) += vf_libvmaf.o framesync.o
OBJS-$(CONFIG_LIMITER_FILTER) += vf_limiter.o
OBJS-$(CONFIG_LOOP_FILTER) += f_loop.o
OBJS-$(CONFIG_LUMAKEY_FILTER) += vf_lumakey.o
OBJS-$(CONFIG_LUT1D_FILTER) += vf_lut3d.o
OBJS-$(CONFIG_LUT_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUT2_FILTER) += vf_lut2.o framesync.o
OBJS-$(CONFIG_LUT3D_FILTER) += vf_lut3d.o framesync.o
OBJS-$(CONFIG_LUTRGB_FILTER) += vf_lut.o
OBJS-$(CONFIG_LUTYUV_FILTER) += vf_lut.o
OBJS-$(CONFIG_MASKEDCLAMP_FILTER) += vf_maskedclamp.o framesync.o
OBJS-$(CONFIG_MASKEDMAX_FILTER) += vf_maskedminmax.o framesync.o
OBJS-$(CONFIG_MASKEDMERGE_FILTER) += vf_maskedmerge.o framesync.o
OBJS-$(CONFIG_MASKEDMIN_FILTER) += vf_maskedminmax.o framesync.o
OBJS-$(CONFIG_MASKEDTHRESHOLD_FILTER) += vf_maskedthreshold.o framesync.o
OBJS-$(CONFIG_MASKFUN_FILTER) += vf_maskfun.o
OBJS-$(CONFIG_MCDEINT_FILTER) += vf_mcdeint.o
OBJS-$(CONFIG_MEDIAN_FILTER) += vf_median.o
OBJS-$(CONFIG_MERGEPLANES_FILTER) += vf_mergeplanes.o framesync.o
OBJS-$(CONFIG_MESTIMATE_FILTER) += vf_mestimate.o motion_estimation.o
OBJS-$(CONFIG_METADATA_FILTER) += f_metadata.o
OBJS-$(CONFIG_MIDEQUALIZER_FILTER) += vf_midequalizer.o framesync.o
OBJS-$(CONFIG_MINTERPOLATE_FILTER) += vf_minterpolate.o motion_estimation.o
OBJS-$(CONFIG_MIX_FILTER) += vf_mix.o framesync.o
OBJS-$(CONFIG_MPDECIMATE_FILTER) += vf_mpdecimate.o
OBJS-$(CONFIG_NEGATE_FILTER) += vf_lut.o
OBJS-$(CONFIG_NLMEANS_FILTER) += vf_nlmeans.o
OBJS-$(CONFIG_NLMEANS_OPENCL_FILTER) += vf_nlmeans_opencl.o opencl.o opencl/nlmeans.o
OBJS-$(CONFIG_NNEDI_FILTER) += vf_nnedi.o
OBJS-$(CONFIG_NOFORMAT_FILTER) += vf_format.o
OBJS-$(CONFIG_NOISE_FILTER) += vf_noise.o
OBJS-$(CONFIG_NORMALIZE_FILTER) += vf_normalize.o
OBJS-$(CONFIG_NULL_FILTER) += vf_null.o
OBJS-$(CONFIG_OCR_FILTER) += vf_ocr.o
OBJS-$(CONFIG_OCV_FILTER) += vf_libopencv.o
OBJS-$(CONFIG_OSCILLOSCOPE_FILTER) += vf_datascope.o
OBJS-$(CONFIG_OVERLAY_FILTER) += vf_overlay.o framesync.o
OBJS-$(CONFIG_OVERLAY_CUDA_FILTER) += vf_overlay_cuda.o framesync.o vf_overlay_cuda.ptx.o
OBJS-$(CONFIG_OVERLAY_OPENCL_FILTER) += vf_overlay_opencl.o opencl.o \
opencl/overlay.o framesync.o
OBJS-$(CONFIG_OVERLAY_QSV_FILTER) += vf_overlay_qsv.o framesync.o
OBJS-$(CONFIG_OVERLAY_VULKAN_FILTER) += vf_overlay_vulkan.o vulkan.o
OBJS-$(CONFIG_OWDENOISE_FILTER) += vf_owdenoise.o
OBJS-$(CONFIG_PAD_FILTER) += vf_pad.o
OBJS-$(CONFIG_PAD_OPENCL_FILTER) += vf_pad_opencl.o opencl.o opencl/pad.o
OBJS-$(CONFIG_PALETTEGEN_FILTER) += vf_palettegen.o
OBJS-$(CONFIG_PALETTEUSE_FILTER) += vf_paletteuse.o framesync.o
OBJS-$(CONFIG_PERMS_FILTER) += f_perms.o
OBJS-$(CONFIG_PERSPECTIVE_FILTER) += vf_perspective.o
OBJS-$(CONFIG_PHASE_FILTER) += vf_phase.o
OBJS-$(CONFIG_PHOTOSENSITIVITY_FILTER) += vf_photosensitivity.o
OBJS-$(CONFIG_PIXDESCTEST_FILTER) += vf_pixdesctest.o
OBJS-$(CONFIG_PIXSCOPE_FILTER) += vf_datascope.o
OBJS-$(CONFIG_PP_FILTER) += vf_pp.o
OBJS-$(CONFIG_PP7_FILTER) += vf_pp7.o
OBJS-$(CONFIG_PREMULTIPLY_FILTER) += vf_premultiply.o framesync.o
OBJS-$(CONFIG_PREWITT_FILTER) += vf_convolution.o
OBJS-$(CONFIG_PREWITT_OPENCL_FILTER) += vf_convolution_opencl.o opencl.o \
opencl/convolution.o
OBJS-$(CONFIG_PROCAMP_VAAPI_FILTER) += vf_procamp_vaapi.o vaapi_vpp.o
OBJS-$(CONFIG_PROGRAM_OPENCL_FILTER) += vf_program_opencl.o opencl.o framesync.o
OBJS-$(CONFIG_PSEUDOCOLOR_FILTER) += vf_pseudocolor.o
OBJS-$(CONFIG_PSNR_FILTER) += vf_psnr.o framesync.o
OBJS-$(CONFIG_PULLUP_FILTER) += vf_pullup.o
OBJS-$(CONFIG_QP_FILTER) += vf_qp.o
OBJS-$(CONFIG_RANDOM_FILTER) += vf_random.o
OBJS-$(CONFIG_READEIA608_FILTER) += vf_readeia608.o
OBJS-$(CONFIG_READVITC_FILTER) += vf_readvitc.o
OBJS-$(CONFIG_REALTIME_FILTER) += f_realtime.o
OBJS-$(CONFIG_REMAP_FILTER) += vf_remap.o framesync.o
OBJS-$(CONFIG_REMOVEGRAIN_FILTER) += vf_removegrain.o
OBJS-$(CONFIG_REMOVELOGO_FILTER) += bbox.o lswsutils.o lavfutils.o vf_removelogo.o
OBJS-$(CONFIG_REPEATFIELDS_FILTER) += vf_repeatfields.o
OBJS-$(CONFIG_REVERSE_FILTER) += f_reverse.o
OBJS-$(CONFIG_RGBASHIFT_FILTER) += vf_chromashift.o
OBJS-$(CONFIG_ROBERTS_FILTER) += vf_convolution.o
OBJS-$(CONFIG_ROBERTS_OPENCL_FILTER) += vf_convolution_opencl.o opencl.o \
opencl/convolution.o
OBJS-$(CONFIG_ROTATE_FILTER) += vf_rotate.o
OBJS-$(CONFIG_SAB_FILTER) += vf_sab.o
OBJS-$(CONFIG_SCALE_FILTER) += vf_scale.o scale_eval.o
OBJS-$(CONFIG_SCALE_CUDA_FILTER) += vf_scale_cuda.o vf_scale_cuda.ptx.o scale_eval.o
OBJS-$(CONFIG_SCALE_NPP_FILTER) += vf_scale_npp.o scale_eval.o
OBJS-$(CONFIG_SCALE_QSV_FILTER) += vf_scale_qsv.o
OBJS-$(CONFIG_SCALE_VAAPI_FILTER) += vf_scale_vaapi.o scale_eval.o vaapi_vpp.o
OBJS-$(CONFIG_SCALE_VULKAN_FILTER) += vf_scale_vulkan.o vulkan.o
OBJS-$(CONFIG_SCALE2REF_FILTER) += vf_scale.o scale_eval.o
OBJS-$(CONFIG_SCDET_FILTER) += vf_scdet.o
OBJS-$(CONFIG_SCROLL_FILTER) += vf_scroll.o
OBJS-$(CONFIG_SELECT_FILTER) += f_select.o
OBJS-$(CONFIG_SELECTIVECOLOR_FILTER) += vf_selectivecolor.o
OBJS-$(CONFIG_SENDCMD_FILTER) += f_sendcmd.o
OBJS-$(CONFIG_SEPARATEFIELDS_FILTER) += vf_separatefields.o
OBJS-$(CONFIG_SETDAR_FILTER) += vf_aspect.o
OBJS-$(CONFIG_SETFIELD_FILTER) += vf_setparams.o
OBJS-$(CONFIG_SETPARAMS_FILTER) += vf_setparams.o
OBJS-$(CONFIG_SETPTS_FILTER) += setpts.o
OBJS-$(CONFIG_SETRANGE_FILTER) += vf_setparams.o
OBJS-$(CONFIG_SETSAR_FILTER) += vf_aspect.o
OBJS-$(CONFIG_SETTB_FILTER) += settb.o
OBJS-$(CONFIG_SHARPNESS_VAAPI_FILTER) += vf_misc_vaapi.o vaapi_vpp.o
OBJS-$(CONFIG_SHOWINFO_FILTER) += vf_showinfo.o
OBJS-$(CONFIG_SHOWPALETTE_FILTER) += vf_showpalette.o
OBJS-$(CONFIG_SHUFFLEFRAMES_FILTER) += vf_shuffleframes.o
OBJS-$(CONFIG_SHUFFLEPLANES_FILTER) += vf_shuffleplanes.o
OBJS-$(CONFIG_SIDEDATA_FILTER) += f_sidedata.o
OBJS-$(CONFIG_SIGNALSTATS_FILTER) += vf_signalstats.o
OBJS-$(CONFIG_SIGNATURE_FILTER) += vf_signature.o
OBJS-$(CONFIG_SMARTBLUR_FILTER) += vf_smartblur.o
OBJS-$(CONFIG_SOBEL_FILTER) += vf_convolution.o
OBJS-$(CONFIG_SOBEL_OPENCL_FILTER) += vf_convolution_opencl.o opencl.o \
opencl/convolution.o
OBJS-$(CONFIG_SPLIT_FILTER) += split.o
OBJS-$(CONFIG_SPP_FILTER) += vf_spp.o
OBJS-$(CONFIG_SR_FILTER) += vf_sr.o
OBJS-$(CONFIG_SSIM_FILTER) += vf_ssim.o framesync.o
OBJS-$(CONFIG_STEREO3D_FILTER) += vf_stereo3d.o
OBJS-$(CONFIG_STREAMSELECT_FILTER) += f_streamselect.o framesync.o
OBJS-$(CONFIG_SUBTITLES_FILTER) += vf_subtitles.o
OBJS-$(CONFIG_SUPER2XSAI_FILTER) += vf_super2xsai.o
OBJS-$(CONFIG_SWAPRECT_FILTER) += vf_swaprect.o
OBJS-$(CONFIG_SWAPUV_FILTER) += vf_swapuv.o
OBJS-$(CONFIG_TBLEND_FILTER) += vf_blend.o framesync.o
OBJS-$(CONFIG_TELECINE_FILTER) += vf_telecine.o
OBJS-$(CONFIG_THISTOGRAM_FILTER) += vf_histogram.o
OBJS-$(CONFIG_THRESHOLD_FILTER) += vf_threshold.o framesync.o
OBJS-$(CONFIG_THUMBNAIL_FILTER) += vf_thumbnail.o
OBJS-$(CONFIG_THUMBNAIL_CUDA_FILTER) += vf_thumbnail_cuda.o vf_thumbnail_cuda.ptx.o
OBJS-$(CONFIG_TILE_FILTER) += vf_tile.o
OBJS-$(CONFIG_TINTERLACE_FILTER) += vf_tinterlace.o
OBJS-$(CONFIG_TLUT2_FILTER) += vf_lut2.o framesync.o
OBJS-$(CONFIG_TMEDIAN_FILTER) += vf_xmedian.o framesync.o
OBJS-$(CONFIG_TMIX_FILTER) += vf_mix.o framesync.o
OBJS-$(CONFIG_TONEMAP_FILTER) += vf_tonemap.o colorspace.o
OBJS-$(CONFIG_TONEMAP_OPENCL_FILTER) += vf_tonemap_opencl.o colorspace.o opencl.o \
opencl/tonemap.o opencl/colorspace_common.o
OBJS-$(CONFIG_TONEMAP_VAAPI_FILTER) += vf_tonemap_vaapi.o vaapi_vpp.o
OBJS-$(CONFIG_TPAD_FILTER) += vf_tpad.o
OBJS-$(CONFIG_TRANSPOSE_FILTER) += vf_transpose.o
OBJS-$(CONFIG_TRANSPOSE_NPP_FILTER) += vf_transpose_npp.o
OBJS-$(CONFIG_TRANSPOSE_OPENCL_FILTER) += vf_transpose_opencl.o opencl.o opencl/transpose.o
OBJS-$(CONFIG_TRANSPOSE_VAAPI_FILTER) += vf_transpose_vaapi.o vaapi_vpp.o
OBJS-$(CONFIG_TRIM_FILTER) += trim.o
OBJS-$(CONFIG_UNPREMULTIPLY_FILTER) += vf_premultiply.o framesync.o
OBJS-$(CONFIG_UNSHARP_FILTER) += vf_unsharp.o
OBJS-$(CONFIG_UNSHARP_OPENCL_FILTER) += vf_unsharp_opencl.o opencl.o \
opencl/unsharp.o
OBJS-$(CONFIG_UNTILE_FILTER) += vf_untile.o
OBJS-$(CONFIG_USPP_FILTER) += vf_uspp.o
OBJS-$(CONFIG_V360_FILTER) += vf_v360.o
OBJS-$(CONFIG_VAGUEDENOISER_FILTER) += vf_vaguedenoiser.o
OBJS-$(CONFIG_VECTORSCOPE_FILTER) += vf_vectorscope.o
OBJS-$(CONFIG_VFLIP_FILTER) += vf_vflip.o
OBJS-$(CONFIG_VFRDET_FILTER) += vf_vfrdet.o
OBJS-$(CONFIG_VIBRANCE_FILTER) += vf_vibrance.o
OBJS-$(CONFIG_VIDSTABDETECT_FILTER) += vidstabutils.o vf_vidstabdetect.o
OBJS-$(CONFIG_VIDSTABTRANSFORM_FILTER) += vidstabutils.o vf_vidstabtransform.o
OBJS-$(CONFIG_VIGNETTE_FILTER) += vf_vignette.o
OBJS-$(CONFIG_VMAFMOTION_FILTER) += vf_vmafmotion.o framesync.o
OBJS-$(CONFIG_VPP_QSV_FILTER) += vf_vpp_qsv.o
OBJS-$(CONFIG_VSTACK_FILTER) += vf_stack.o framesync.o
OBJS-$(CONFIG_W3FDIF_FILTER) += vf_w3fdif.o
OBJS-$(CONFIG_WAVEFORM_FILTER) += vf_waveform.o
OBJS-$(CONFIG_WEAVE_FILTER) += vf_weave.o
OBJS-$(CONFIG_XBR_FILTER) += vf_xbr.o
OBJS-$(CONFIG_XFADE_FILTER) += vf_xfade.o
OBJS-$(CONFIG_XFADE_OPENCL_FILTER) += vf_xfade_opencl.o opencl.o opencl/xfade.o
OBJS-$(CONFIG_XMEDIAN_FILTER) += vf_xmedian.o framesync.o
OBJS-$(CONFIG_XSTACK_FILTER) += vf_stack.o framesync.o
OBJS-$(CONFIG_YADIF_FILTER) += vf_yadif.o yadif_common.o
OBJS-$(CONFIG_YADIF_CUDA_FILTER) += vf_yadif_cuda.o vf_yadif_cuda.ptx.o \
yadif_common.o
OBJS-$(CONFIG_YAEPBLUR_FILTER) += vf_yaepblur.o
OBJS-$(CONFIG_ZMQ_FILTER) += f_zmq.o
OBJS-$(CONFIG_ZOOMPAN_FILTER) += vf_zoompan.o
OBJS-$(CONFIG_ZSCALE_FILTER) += vf_zscale.o
OBJS-$(CONFIG_ALLRGB_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_ALLYUV_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_CELLAUTO_FILTER) += vsrc_cellauto.o
OBJS-$(CONFIG_COLOR_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_COREIMAGESRC_FILTER) += vf_coreimage.o
OBJS-$(CONFIG_FREI0R_SRC_FILTER) += vf_frei0r.o
OBJS-$(CONFIG_GRADIENTS_FILTER) += vsrc_gradients.o
OBJS-$(CONFIG_HALDCLUTSRC_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_LIFE_FILTER) += vsrc_life.o
OBJS-$(CONFIG_MANDELBROT_FILTER) += vsrc_mandelbrot.o
OBJS-$(CONFIG_MPTESTSRC_FILTER) += vsrc_mptestsrc.o
OBJS-$(CONFIG_NULLSRC_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_OPENCLSRC_FILTER) += vf_program_opencl.o opencl.o
OBJS-$(CONFIG_PAL75BARS_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_PAL100BARS_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_RGBTESTSRC_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_SIERPINSKI_FILTER) += vsrc_sierpinski.o
OBJS-$(CONFIG_SMPTEBARS_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_SMPTEHDBARS_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_TESTSRC_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_TESTSRC2_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_YUVTESTSRC_FILTER) += vsrc_testsrc.o
OBJS-$(CONFIG_NULLSINK_FILTER) += vsink_nullsink.o
# multimedia filters
OBJS-$(CONFIG_ABITSCOPE_FILTER) += avf_abitscope.o
OBJS-$(CONFIG_ADRAWGRAPH_FILTER) += f_drawgraph.o
OBJS-$(CONFIG_AGRAPHMONITOR_FILTER) += f_graphmonitor.o
OBJS-$(CONFIG_AHISTOGRAM_FILTER) += avf_ahistogram.o
OBJS-$(CONFIG_APHASEMETER_FILTER) += avf_aphasemeter.o
OBJS-$(CONFIG_AVECTORSCOPE_FILTER) += avf_avectorscope.o
OBJS-$(CONFIG_CONCAT_FILTER) += avf_concat.o
OBJS-$(CONFIG_SHOWCQT_FILTER) += avf_showcqt.o lswsutils.o lavfutils.o
OBJS-$(CONFIG_SHOWFREQS_FILTER) += avf_showfreqs.o
OBJS-$(CONFIG_SHOWSPATIAL_FILTER) += avf_showspatial.o
OBJS-$(CONFIG_SHOWSPECTRUM_FILTER) += avf_showspectrum.o
OBJS-$(CONFIG_SHOWSPECTRUMPIC_FILTER) += avf_showspectrum.o
OBJS-$(CONFIG_SHOWVOLUME_FILTER) += avf_showvolume.o
OBJS-$(CONFIG_SHOWWAVES_FILTER) += avf_showwaves.o
OBJS-$(CONFIG_SHOWWAVESPIC_FILTER) += avf_showwaves.o
OBJS-$(CONFIG_SPECTRUMSYNTH_FILTER) += vaf_spectrumsynth.o
# multimedia sources
OBJS-$(CONFIG_AMOVIE_FILTER) += src_movie.o
OBJS-$(CONFIG_MOVIE_FILTER) += src_movie.o
# Windows resource file
SLIBOBJS-$(HAVE_GNU_WINDRES) += avfilterres.o
SKIPHEADERS-$(CONFIG_LIBVIDSTAB) += vidstabutils.h
OBJS-$(CONFIG_SHARED) += log2_tab.o
SKIPHEADERS-$(CONFIG_QSVVPP) += qsvvpp.h
SKIPHEADERS-$(CONFIG_OPENCL) += opencl.h
SKIPHEADERS-$(CONFIG_VAAPI) += vaapi_vpp.h
SKIPHEADERS-$(CONFIG_VULKAN) += vulkan.h
OBJS-$(CONFIG_LIBGLSLANG) += glslang.o
TOOLS = graph2dot
TESTPROGS = drawutils filtfmts formats integral
TOOLS-$(CONFIG_LIBZMQ) += zmqsend
clean::
$(RM) $(CLEANSUFFIXES:%=libavfilter/dnn/%)
OPENCL = $(subst $(SRC_PATH)/,,$(wildcard $(SRC_PATH)/libavfilter/opencl/*.cl))
.SECONDARY: $(OPENCL:.cl=.c)
libavfilter/opencl/%.c: TAG = OPENCL
libavfilter/opencl/%.c: $(SRC_PATH)/libavfilter/opencl/%.cl
$(M)$(SRC_PATH)/tools/cl2c $< $@

View File

@@ -0,0 +1,3 @@
OBJS-$(CONFIG_NLMEANS_FILTER) += aarch64/vf_nlmeans_init.o
NEON-OBJS-$(CONFIG_NLMEANS_FILTER) += aarch64/vf_nlmeans_neon.o

View File

@@ -0,0 +1,33 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/aarch64/cpu.h"
#include "libavfilter/vf_nlmeans.h"
void ff_compute_safe_ssd_integral_image_neon(uint32_t *dst, ptrdiff_t dst_linesize_32,
const uint8_t *s1, ptrdiff_t linesize1,
const uint8_t *s2, ptrdiff_t linesize2,
int w, int h);
av_cold void ff_nlmeans_init_aarch64(NLMeansDSPContext *dsp)
{
int cpu_flags = av_get_cpu_flags();
if (have_neon(cpu_flags))
dsp->compute_safe_ssd_integral_image = ff_compute_safe_ssd_integral_image_neon;
}

View File

@@ -0,0 +1,80 @@
/*
* Copyright (c) 2018 Clément Bœsch <u pkh me>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/aarch64/asm.S"
// acc_sum_store(ABCD) = {X+A, X+A+B, X+A+B+C, X+A+B+C+D}
.macro acc_sum_store x, xb
dup v24.4S, v24.S[3] // ...X -> XXXX
ext v25.16B, v26.16B, \xb, #12 // ext(0000,ABCD,12)=0ABC
add v24.4S, v24.4S, \x // XXXX+ABCD={X+A,X+B,X+C,X+D}
add v24.4S, v24.4S, v25.4S // {X+A,X+B+A,X+C+B,X+D+C} (+0ABC)
ext v25.16B, v26.16B, v25.16B, #12 // ext(0000,0ABC,12)=00AB
add v24.4S, v24.4S, v25.4S // {X+A,X+B+A,X+C+B+A,X+D+C+B} (+00AB)
ext v25.16B, v26.16B, v25.16B, #12 // ext(0000,00AB,12)=000A
add v24.4S, v24.4S, v25.4S // {X+A,X+B+A,X+C+B+A,X+D+C+B+A} (+000A)
st1 {v24.4S}, [x0], #16 // write 4x32-bit final values
.endm
function ff_compute_safe_ssd_integral_image_neon, export=1
movi v26.4S, #0 // used as zero for the "rotations" in acc_sum_store
sub x3, x3, w6, UXTW // s1 padding (s1_linesize - w)
sub x5, x5, w6, UXTW // s2 padding (s2_linesize - w)
sub x9, x0, w1, UXTW #2 // dst_top
sub x1, x1, w6, UXTW // dst padding (dst_linesize_32 - w)
lsl x1, x1, #2 // dst padding expressed in bytes
1: mov w10, w6 // width copy for each line
sub x0, x0, #16 // beginning of the dst line minus 4 sums
sub x8, x9, #4 // dst_top-1
ld1 {v24.4S}, [x0], #16 // load ...X (contextual last sums)
2: ld1 {v0.16B}, [x2], #16 // s1[x + 0..15]
ld1 {v1.16B}, [x4], #16 // s2[x + 0..15]
ld1 {v16.4S,v17.4S}, [x8], #32 // dst_top[x + 0..7 - 1]
usubl v2.8H, v0.8B, v1.8B // d[x + 0..7] = s1[x + 0..7] - s2[x + 0..7]
usubl2 v3.8H, v0.16B, v1.16B // d[x + 8..15] = s1[x + 8..15] - s2[x + 8..15]
ld1 {v18.4S,v19.4S}, [x8], #32 // dst_top[x + 8..15 - 1]
smull v4.4S, v2.4H, v2.4H // d[x + 0..3]^2
smull2 v5.4S, v2.8H, v2.8H // d[x + 4..7]^2
ld1 {v20.4S,v21.4S}, [x9], #32 // dst_top[x + 0..7]
smull v6.4S, v3.4H, v3.4H // d[x + 8..11]^2
smull2 v7.4S, v3.8H, v3.8H // d[x + 12..15]^2
ld1 {v22.4S,v23.4S}, [x9], #32 // dst_top[x + 8..15]
sub v0.4S, v20.4S, v16.4S // dst_top[x + 0..3] - dst_top[x + 0..3 - 1]
sub v1.4S, v21.4S, v17.4S // dst_top[x + 4..7] - dst_top[x + 4..7 - 1]
add v0.4S, v0.4S, v4.4S // + d[x + 0..3]^2
add v1.4S, v1.4S, v5.4S // + d[x + 4..7]^2
sub v2.4S, v22.4S, v18.4S // dst_top[x + 8..11] - dst_top[x + 8..11 - 1]
sub v3.4S, v23.4S, v19.4S // dst_top[x + 12..15] - dst_top[x + 12..15 - 1]
add v2.4S, v2.4S, v6.4S // + d[x + 8..11]^2
add v3.4S, v3.4S, v7.4S // + d[x + 12..15]^2
acc_sum_store v0.4S, v0.16B // accumulate and store dst[ 0..3]
acc_sum_store v1.4S, v1.16B // accumulate and store dst[ 4..7]
acc_sum_store v2.4S, v2.16B // accumulate and store dst[ 8..11]
acc_sum_store v3.4S, v3.16B // accumulate and store dst[12..15]
subs w10, w10, #16 // width dec
b.ne 2b // loop til next line
add x2, x2, x3 // skip to next line (s1)
add x4, x4, x5 // skip to next line (s2)
add x0, x0, x1 // skip to next line (dst)
add x9, x9, x1 // skip to next line (dst_top)
subs w7, w7, #1 // height dec
b.ne 1b
ret
endfunc

490
externals/ffmpeg/libavfilter/aeval.c vendored Executable file
View File

@@ -0,0 +1,490 @@
/*
* Copyright (c) 2011 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* eval audio source
*/
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
static const char * const var_names[] = {
"ch", ///< the value of the current channel
"n", ///< number of frame
"nb_in_channels",
"nb_out_channels",
"t", ///< timestamp expressed in seconds
"s", ///< sample rate
NULL
};
enum var_name {
VAR_CH,
VAR_N,
VAR_NB_IN_CHANNELS,
VAR_NB_OUT_CHANNELS,
VAR_T,
VAR_S,
VAR_VARS_NB
};
typedef struct EvalContext {
const AVClass *class;
char *sample_rate_str;
int sample_rate;
int64_t chlayout;
char *chlayout_str;
int nb_channels; ///< number of output channels
int nb_in_channels; ///< number of input channels
int same_chlayout; ///< set output as input channel layout
int64_t pts;
AVExpr **expr;
char *exprs;
int nb_samples; ///< number of samples per requested frame
int64_t duration;
uint64_t n;
double var_values[VAR_VARS_NB];
double *channel_values;
int64_t out_channel_layout;
} EvalContext;
static double val(void *priv, double ch)
{
EvalContext *eval = priv;
return eval->channel_values[FFMIN((int)ch, eval->nb_in_channels-1)];
}
static double (* const aeval_func1[])(void *, double) = { val, NULL };
static const char * const aeval_func1_names[] = { "val", NULL };
#define OFFSET(x) offsetof(EvalContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption aevalsrc_options[]= {
{ "exprs", "set the '|'-separated list of channels expressions", OFFSET(exprs), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS },
{ "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
{ "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
{ "sample_rate", "set the sample rate", OFFSET(sample_rate_str), AV_OPT_TYPE_STRING, {.str = "44100"}, 0, 0, FLAGS },
{ "s", "set the sample rate", OFFSET(sample_rate_str), AV_OPT_TYPE_STRING, {.str = "44100"}, 0, 0, FLAGS },
{ "duration", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
{ "d", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = -1}, -1, INT64_MAX, FLAGS },
{ "channel_layout", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "c", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(aevalsrc);
static int parse_channel_expressions(AVFilterContext *ctx,
int expected_nb_channels)
{
EvalContext *eval = ctx->priv;
char *args1 = av_strdup(eval->exprs);
char *expr, *last_expr = NULL, *buf;
double (* const *func1)(void *, double) = NULL;
const char * const *func1_names = NULL;
int i, ret = 0;
if (!args1)
return AVERROR(ENOMEM);
if (!eval->exprs) {
av_log(ctx, AV_LOG_ERROR, "Channels expressions list is empty\n");
return AVERROR(EINVAL);
}
if (!strcmp(ctx->filter->name, "aeval")) {
func1 = aeval_func1;
func1_names = aeval_func1_names;
}
#define ADD_EXPRESSION(expr_) do { \
if (!av_dynarray2_add((void **)&eval->expr, &eval->nb_channels, \
sizeof(*eval->expr), NULL)) { \
ret = AVERROR(ENOMEM); \
goto end; \
} \
eval->expr[eval->nb_channels-1] = NULL; \
ret = av_expr_parse(&eval->expr[eval->nb_channels - 1], expr_, \
var_names, func1_names, func1, \
NULL, NULL, 0, ctx); \
if (ret < 0) \
goto end; \
} while (0)
/* reset expressions */
for (i = 0; i < eval->nb_channels; i++) {
av_expr_free(eval->expr[i]);
eval->expr[i] = NULL;
}
av_freep(&eval->expr);
eval->nb_channels = 0;
buf = args1;
while (expr = av_strtok(buf, "|", &buf)) {
ADD_EXPRESSION(expr);
last_expr = expr;
}
if (expected_nb_channels > eval->nb_channels)
for (i = eval->nb_channels; i < expected_nb_channels; i++)
ADD_EXPRESSION(last_expr);
if (expected_nb_channels > 0 && eval->nb_channels != expected_nb_channels) {
av_log(ctx, AV_LOG_ERROR,
"Mismatch between the specified number of channel expressions '%d' "
"and the number of expected output channels '%d' for the specified channel layout\n",
eval->nb_channels, expected_nb_channels);
ret = AVERROR(EINVAL);
goto end;
}
end:
av_free(args1);
return ret;
}
static av_cold int init(AVFilterContext *ctx)
{
EvalContext *eval = ctx->priv;
int ret = 0;
if (eval->chlayout_str) {
if (!strcmp(eval->chlayout_str, "same") && !strcmp(ctx->filter->name, "aeval")) {
eval->same_chlayout = 1;
} else {
ret = ff_parse_channel_layout(&eval->chlayout, NULL, eval->chlayout_str, ctx);
if (ret < 0)
return ret;
ret = parse_channel_expressions(ctx, av_get_channel_layout_nb_channels(eval->chlayout));
if (ret < 0)
return ret;
}
} else {
/* guess channel layout from nb expressions/channels */
if ((ret = parse_channel_expressions(ctx, -1)) < 0)
return ret;
eval->chlayout = av_get_default_channel_layout(eval->nb_channels);
if (!eval->chlayout && eval->nb_channels <= 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid number of channels '%d' provided\n",
eval->nb_channels);
return AVERROR(EINVAL);
}
}
if (eval->sample_rate_str)
if ((ret = ff_parse_sample_rate(&eval->sample_rate, eval->sample_rate_str, ctx)))
return ret;
eval->n = 0;
return ret;
}
static av_cold void uninit(AVFilterContext *ctx)
{
EvalContext *eval = ctx->priv;
int i;
for (i = 0; i < eval->nb_channels; i++) {
av_expr_free(eval->expr[i]);
eval->expr[i] = NULL;
}
av_freep(&eval->expr);
av_freep(&eval->channel_values);
}
static int config_props(AVFilterLink *outlink)
{
EvalContext *eval = outlink->src->priv;
char buf[128];
outlink->time_base = (AVRational){1, eval->sample_rate};
outlink->sample_rate = eval->sample_rate;
eval->var_values[VAR_S] = eval->sample_rate;
eval->var_values[VAR_NB_IN_CHANNELS] = NAN;
eval->var_values[VAR_NB_OUT_CHANNELS] = outlink->channels;
av_get_channel_layout_string(buf, sizeof(buf), 0, eval->chlayout);
av_log(outlink->src, AV_LOG_VERBOSE,
"sample_rate:%d chlayout:%s duration:%"PRId64"\n",
eval->sample_rate, buf, eval->duration);
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
EvalContext *eval = ctx->priv;
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE };
int64_t chlayouts[] = { eval->chlayout ? eval->chlayout : FF_COUNT2LAYOUT(eval->nb_channels) , -1 };
int sample_rates[] = { eval->sample_rate, -1 };
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
int ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats (ctx, formats);
if (ret < 0)
return ret;
layouts = avfilter_make_format64_list(chlayouts);
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_rates);
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static int request_frame(AVFilterLink *outlink)
{
EvalContext *eval = outlink->src->priv;
AVFrame *samplesref;
int i, j;
int64_t t = av_rescale(eval->n, AV_TIME_BASE, eval->sample_rate);
int nb_samples;
if (eval->duration >= 0 && t >= eval->duration)
return AVERROR_EOF;
if (eval->duration >= 0) {
nb_samples = FFMIN(eval->nb_samples, av_rescale(eval->duration, eval->sample_rate, AV_TIME_BASE) - eval->pts);
if (!nb_samples)
return AVERROR_EOF;
} else {
nb_samples = eval->nb_samples;
}
samplesref = ff_get_audio_buffer(outlink, nb_samples);
if (!samplesref)
return AVERROR(ENOMEM);
/* evaluate expression for each single sample and for each channel */
for (i = 0; i < nb_samples; i++, eval->n++) {
eval->var_values[VAR_N] = eval->n;
eval->var_values[VAR_T] = eval->var_values[VAR_N] * (double)1/eval->sample_rate;
for (j = 0; j < eval->nb_channels; j++) {
*((double *) samplesref->extended_data[j] + i) =
av_expr_eval(eval->expr[j], eval->var_values, NULL);
}
}
samplesref->pts = eval->pts;
samplesref->sample_rate = eval->sample_rate;
eval->pts += nb_samples;
return ff_filter_frame(outlink, samplesref);
}
#if CONFIG_AEVALSRC_FILTER
static const AVFilterPad aevalsrc_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_props,
.request_frame = request_frame,
},
{ NULL }
};
AVFilter ff_asrc_aevalsrc = {
.name = "aevalsrc",
.description = NULL_IF_CONFIG_SMALL("Generate an audio signal generated by an expression."),
.query_formats = query_formats,
.init = init,
.uninit = uninit,
.priv_size = sizeof(EvalContext),
.inputs = NULL,
.outputs = aevalsrc_outputs,
.priv_class = &aevalsrc_class,
};
#endif /* CONFIG_AEVALSRC_FILTER */
#define OFFSET(x) offsetof(EvalContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption aeval_options[]= {
{ "exprs", "set the '|'-separated list of channels expressions", OFFSET(exprs), AV_OPT_TYPE_STRING, {.str = NULL}, .flags = FLAGS },
{ "channel_layout", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ "c", "set channel layout", OFFSET(chlayout_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(aeval);
static int aeval_query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts;
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
EvalContext *eval = ctx->priv;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE
};
int ret;
// inlink supports any channel layout
layouts = ff_all_channel_counts();
if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
return ret;
if (eval->same_chlayout) {
layouts = ff_all_channel_counts();
if ((ret = ff_set_common_channel_layouts(ctx, layouts)) < 0)
return ret;
} else {
// outlink supports only requested output channel layout
layouts = NULL;
if ((ret = ff_add_channel_layout(&layouts,
eval->out_channel_layout ? eval->out_channel_layout :
FF_COUNT2LAYOUT(eval->nb_channels))) < 0)
return ret;
if ((ret = ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts)) < 0)
return ret;
}
formats = ff_make_format_list(sample_fmts);
if ((ret = ff_set_common_formats(ctx, formats)) < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
static int aeval_config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
EvalContext *eval = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
int ret;
if (eval->same_chlayout) {
eval->chlayout = inlink->channel_layout;
if ((ret = parse_channel_expressions(ctx, inlink->channels)) < 0)
return ret;
}
eval->n = 0;
eval->nb_in_channels = eval->var_values[VAR_NB_IN_CHANNELS] = inlink->channels;
eval->var_values[VAR_NB_OUT_CHANNELS] = outlink->channels;
eval->var_values[VAR_S] = inlink->sample_rate;
eval->var_values[VAR_T] = NAN;
eval->channel_values = av_realloc_f(eval->channel_values,
inlink->channels, sizeof(*eval->channel_values));
if (!eval->channel_values)
return AVERROR(ENOMEM);
return 0;
}
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
EvalContext *eval = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
int nb_samples = in->nb_samples;
AVFrame *out;
double t0;
int i, j;
out = ff_get_audio_buffer(outlink, nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
t0 = TS2T(in->pts, inlink->time_base);
/* evaluate expression for each single sample and for each channel */
for (i = 0; i < nb_samples; i++, eval->n++) {
eval->var_values[VAR_N] = eval->n;
eval->var_values[VAR_T] = t0 + i * (double)1/inlink->sample_rate;
for (j = 0; j < inlink->channels; j++)
eval->channel_values[j] = *((double *) in->extended_data[j] + i);
for (j = 0; j < outlink->channels; j++) {
eval->var_values[VAR_CH] = j;
*((double *) out->extended_data[j] + i) =
av_expr_eval(eval->expr[j], eval->var_values, eval);
}
}
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
#if CONFIG_AEVAL_FILTER
static const AVFilterPad aeval_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad aeval_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = aeval_config_output,
},
{ NULL }
};
AVFilter ff_af_aeval = {
.name = "aeval",
.description = NULL_IF_CONFIG_SMALL("Filter audio signal according to a specified expression."),
.query_formats = aeval_query_formats,
.init = init,
.uninit = uninit,
.priv_size = sizeof(EvalContext),
.inputs = aeval_inputs,
.outputs = aeval_outputs,
.priv_class = &aeval_class,
};
#endif /* CONFIG_AEVAL_FILTER */

219
externals/ffmpeg/libavfilter/af_acontrast.c vendored Executable file
View File

@@ -0,0 +1,219 @@
/*
* Copyright (c) 2008 Rob Sykes
* Copyright (c) 2017 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "formats.h"
typedef struct AudioContrastContext {
const AVClass *class;
float contrast;
void (*filter)(void **dst, const void **src,
int nb_samples, int channels, float contrast);
} AudioContrastContext;
#define OFFSET(x) offsetof(AudioContrastContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption acontrast_options[] = {
{ "contrast", "set contrast", OFFSET(contrast), AV_OPT_TYPE_FLOAT, {.dbl=33}, 0, 100, A },
{ NULL }
};
AVFILTER_DEFINE_CLASS(acontrast);
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
static void filter_flt(void **d, const void **s,
int nb_samples, int channels,
float contrast)
{
const float *src = s[0];
float *dst = d[0];
int n, c;
for (n = 0; n < nb_samples; n++) {
for (c = 0; c < channels; c++) {
float d = src[c] * M_PI_2;
dst[c] = sinf(d + contrast * sinf(d * 4));
}
dst += c;
src += c;
}
}
static void filter_dbl(void **d, const void **s,
int nb_samples, int channels,
float contrast)
{
const double *src = s[0];
double *dst = d[0];
int n, c;
for (n = 0; n < nb_samples; n++) {
for (c = 0; c < channels; c++) {
double d = src[c] * M_PI_2;
dst[c] = sin(d + contrast * sin(d * 4));
}
dst += c;
src += c;
}
}
static void filter_fltp(void **d, const void **s,
int nb_samples, int channels,
float contrast)
{
int n, c;
for (c = 0; c < channels; c++) {
const float *src = s[c];
float *dst = d[c];
for (n = 0; n < nb_samples; n++) {
float d = src[n] * M_PI_2;
dst[n] = sinf(d + contrast * sinf(d * 4));
}
}
}
static void filter_dblp(void **d, const void **s,
int nb_samples, int channels,
float contrast)
{
int n, c;
for (c = 0; c < channels; c++) {
const double *src = s[c];
double *dst = d[c];
for (n = 0; n < nb_samples; n++) {
double d = src[n] * M_PI_2;
dst[n] = sin(d + contrast * sin(d * 4));
}
}
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
AudioContrastContext *s = ctx->priv;
switch (inlink->format) {
case AV_SAMPLE_FMT_FLT: s->filter = filter_flt; break;
case AV_SAMPLE_FMT_DBL: s->filter = filter_dbl; break;
case AV_SAMPLE_FMT_FLTP: s->filter = filter_fltp; break;
case AV_SAMPLE_FMT_DBLP: s->filter = filter_dblp; break;
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
AudioContrastContext *s = ctx->priv;
AVFrame *out;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
s->filter((void **)out->extended_data, (const void **)in->extended_data,
in->nb_samples, in->channels, s->contrast / 750);
if (out != in)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_acontrast = {
.name = "acontrast",
.description = NULL_IF_CONFIG_SMALL("Simple audio dynamic range compression/expansion filter."),
.query_formats = query_formats,
.priv_size = sizeof(AudioContrastContext),
.priv_class = &acontrast_class,
.inputs = inputs,
.outputs = outputs,
};

70
externals/ffmpeg/libavfilter/af_acopy.c vendored Executable file
View File

@@ -0,0 +1,70 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *out = ff_get_audio_buffer(outlink, in->nb_samples);
int ret;
if (!out) {
ret = AVERROR(ENOMEM);
goto fail;
}
ret = av_frame_copy_props(out, in);
if (ret < 0)
goto fail;
ret = av_frame_copy(out, in);
if (ret < 0)
goto fail;
av_frame_free(&in);
return ff_filter_frame(outlink, out);
fail:
av_frame_free(&in);
av_frame_free(&out);
return ret;
}
static const AVFilterPad acopy_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad acopy_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_acopy = {
.name = "acopy",
.description = NULL_IF_CONFIG_SMALL("Copy the input audio unchanged to the output."),
.inputs = acopy_inputs,
.outputs = acopy_outputs,
};

376
externals/ffmpeg/libavfilter/af_acrossover.c vendored Executable file
View File

@@ -0,0 +1,376 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Crossover filter
*
* Split an audio stream into several bands.
*/
#include "libavutil/attributes.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/eval.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#define MAX_SPLITS 16
#define MAX_BANDS MAX_SPLITS + 1
typedef struct BiquadContext {
double a0, a1, a2;
double b1, b2;
double i1, i2;
double o1, o2;
} BiquadContext;
typedef struct CrossoverChannel {
BiquadContext lp[MAX_BANDS][4];
BiquadContext hp[MAX_BANDS][4];
} CrossoverChannel;
typedef struct AudioCrossoverContext {
const AVClass *class;
char *splits_str;
int order;
int filter_count;
int nb_splits;
float *splits;
CrossoverChannel *xover;
AVFrame *input_frame;
AVFrame *frames[MAX_BANDS];
} AudioCrossoverContext;
#define OFFSET(x) offsetof(AudioCrossoverContext, x)
#define AF AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption acrossover_options[] = {
{ "split", "set split frequencies", OFFSET(splits_str), AV_OPT_TYPE_STRING, {.str="500"}, 0, 0, AF },
{ "order", "set order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=1}, 0, 2, AF, "m" },
{ "2nd", "2nd order", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "m" },
{ "4th", "4th order", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "m" },
{ "8th", "8th order", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "m" },
{ NULL }
};
AVFILTER_DEFINE_CLASS(acrossover);
static av_cold int init(AVFilterContext *ctx)
{
AudioCrossoverContext *s = ctx->priv;
char *p, *arg, *saveptr = NULL;
int i, ret = 0;
s->splits = av_calloc(MAX_SPLITS, sizeof(*s->splits));
if (!s->splits)
return AVERROR(ENOMEM);
p = s->splits_str;
for (i = 0; i < MAX_SPLITS; i++) {
float freq;
if (!(arg = av_strtok(p, " |", &saveptr)))
break;
p = NULL;
if (av_sscanf(arg, "%f", &freq) != 1) {
av_log(ctx, AV_LOG_ERROR, "Invalid syntax for frequency[%d].\n", i);
return AVERROR(EINVAL);
}
if (freq <= 0) {
av_log(ctx, AV_LOG_ERROR, "Frequency %f must be positive number.\n", freq);
return AVERROR(EINVAL);
}
if (i > 0 && freq <= s->splits[i-1]) {
av_log(ctx, AV_LOG_ERROR, "Frequency %f must be in increasing order.\n", freq);
return AVERROR(EINVAL);
}
s->splits[i] = freq;
}
s->nb_splits = i;
for (i = 0; i <= s->nb_splits; i++) {
AVFilterPad pad = { 0 };
char *name;
pad.type = AVMEDIA_TYPE_AUDIO;
name = av_asprintf("out%d", ctx->nb_outputs);
if (!name)
return AVERROR(ENOMEM);
pad.name = name;
if ((ret = ff_insert_outpad(ctx, i, &pad)) < 0) {
av_freep(&pad.name);
return ret;
}
}
return ret;
}
static void set_lp(BiquadContext *b, double fc, double q, double sr)
{
double omega = 2.0 * M_PI * fc / sr;
double sn = sin(omega);
double cs = cos(omega);
double alpha = sn / (2. * q);
double inv = 1.0 / (1.0 + alpha);
b->a0 = (1. - cs) * 0.5 * inv;
b->a1 = (1. - cs) * inv;
b->a2 = b->a0;
b->b1 = -2. * cs * inv;
b->b2 = (1. - alpha) * inv;
}
static void set_hp(BiquadContext *b, double fc, double q, double sr)
{
double omega = 2 * M_PI * fc / sr;
double sn = sin(omega);
double cs = cos(omega);
double alpha = sn / (2 * q);
double inv = 1.0 / (1.0 + alpha);
b->a0 = inv * (1. + cs) / 2.;
b->a1 = -2. * b->a0;
b->a2 = b->a0;
b->b1 = -2. * cs * inv;
b->b2 = (1. - alpha) * inv;
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
AudioCrossoverContext *s = ctx->priv;
int ch, band, sample_rate = inlink->sample_rate;
double q;
s->xover = av_calloc(inlink->channels, sizeof(*s->xover));
if (!s->xover)
return AVERROR(ENOMEM);
switch (s->order) {
case 0:
q = 0.5;
s->filter_count = 1;
break;
case 1:
q = M_SQRT1_2;
s->filter_count = 2;
break;
case 2:
q = 0.54;
s->filter_count = 4;
break;
}
for (ch = 0; ch < inlink->channels; ch++) {
for (band = 0; band <= s->nb_splits; band++) {
set_lp(&s->xover[ch].lp[band][0], s->splits[band], q, sample_rate);
set_hp(&s->xover[ch].hp[band][0], s->splits[band], q, sample_rate);
if (s->order > 1) {
set_lp(&s->xover[ch].lp[band][1], s->splits[band], 1.34, sample_rate);
set_hp(&s->xover[ch].hp[band][1], s->splits[band], 1.34, sample_rate);
set_lp(&s->xover[ch].lp[band][2], s->splits[band], q, sample_rate);
set_hp(&s->xover[ch].hp[band][2], s->splits[band], q, sample_rate);
set_lp(&s->xover[ch].lp[band][3], s->splits[band], 1.34, sample_rate);
set_hp(&s->xover[ch].hp[band][3], s->splits[band], 1.34, sample_rate);
} else {
set_lp(&s->xover[ch].lp[band][1], s->splits[band], q, sample_rate);
set_hp(&s->xover[ch].hp[band][1], s->splits[band], q, sample_rate);
}
}
}
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static double biquad_process(BiquadContext *b, double in)
{
double out = in * b->a0 + b->i1 * b->a1 + b->i2 * b->a2 - b->o1 * b->b1 - b->o2 * b->b2;
b->i2 = b->i1;
b->o2 = b->o1;
b->i1 = in;
b->o1 = out;
return out;
}
static int filter_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
AudioCrossoverContext *s = ctx->priv;
AVFrame *in = s->input_frame;
AVFrame **frames = s->frames;
const int start = (in->channels * jobnr) / nb_jobs;
const int end = (in->channels * (jobnr+1)) / nb_jobs;
int f, band;
for (int ch = start; ch < end; ch++) {
const double *src = (const double *)in->extended_data[ch];
CrossoverChannel *xover = &s->xover[ch];
for (int i = 0; i < in->nb_samples; i++) {
double sample = src[i], lo, hi;
for (band = 0; band < ctx->nb_outputs; band++) {
double *dst = (double *)frames[band]->extended_data[ch];
lo = sample;
hi = sample;
for (f = 0; band + 1 < ctx->nb_outputs && f < s->filter_count; f++) {
BiquadContext *lp = &xover->lp[band][f];
lo = biquad_process(lp, lo);
}
for (f = 0; band + 1 < ctx->nb_outputs && f < s->filter_count; f++) {
BiquadContext *hp = &xover->hp[band][f];
hi = biquad_process(hp, hi);
}
dst[i] = lo;
sample = hi;
}
}
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AudioCrossoverContext *s = ctx->priv;
AVFrame **frames = s->frames;
int i, ret = 0;
for (i = 0; i < ctx->nb_outputs; i++) {
frames[i] = ff_get_audio_buffer(ctx->outputs[i], in->nb_samples);
if (!frames[i]) {
ret = AVERROR(ENOMEM);
break;
}
frames[i]->pts = in->pts;
}
if (ret < 0)
goto fail;
s->input_frame = in;
ctx->internal->execute(ctx, filter_channels, NULL, NULL, FFMIN(inlink->channels,
ff_filter_get_nb_threads(ctx)));
for (i = 0; i < ctx->nb_outputs; i++) {
ret = ff_filter_frame(ctx->outputs[i], frames[i]);
frames[i] = NULL;
if (ret < 0)
break;
}
fail:
for (i = 0; i < ctx->nb_outputs; i++)
av_frame_free(&frames[i]);
av_frame_free(&in);
s->input_frame = NULL;
return ret;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioCrossoverContext *s = ctx->priv;
int i;
av_freep(&s->splits);
av_freep(&s->xover);
for (i = 0; i < ctx->nb_outputs; i++)
av_freep(&ctx->output_pads[i].name);
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
AVFilter ff_af_acrossover = {
.name = "acrossover",
.description = NULL_IF_CONFIG_SMALL("Split audio into per-bands streams."),
.priv_size = sizeof(AudioCrossoverContext),
.priv_class = &acrossover_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = inputs,
.outputs = NULL,
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS |
AVFILTER_FLAG_SLICE_THREADS,
};

362
externals/ffmpeg/libavfilter/af_acrusher.c vendored Executable file
View File

@@ -0,0 +1,362 @@
/*
* Copyright (c) Markus Schmidt and Christian Holschuh
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/opt.h"
#include "avfilter.h"
#include "internal.h"
#include "audio.h"
typedef struct LFOContext {
double freq;
double offset;
int srate;
double amount;
double pwidth;
double phase;
} LFOContext;
typedef struct SRContext {
double target;
double real;
double samples;
double last;
} SRContext;
typedef struct ACrusherContext {
const AVClass *class;
double level_in;
double level_out;
double bits;
double mix;
int mode;
double dc;
double idc;
double aa;
double samples;
int is_lfo;
double lforange;
double lforate;
double sqr;
double aa1;
double coeff;
int round;
double sov;
double smin;
double sdiff;
LFOContext lfo;
SRContext *sr;
} ACrusherContext;
#define OFFSET(x) offsetof(ACrusherContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption acrusher_options[] = {
{ "level_in", "set level in", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
{ "level_out","set level out", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
{ "bits", "set bit reduction", OFFSET(bits), AV_OPT_TYPE_DOUBLE, {.dbl=8}, 1, 64, A },
{ "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=.5}, 0, 1, A },
{ "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A, "mode" },
{ "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A, "mode" },
{ "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A, "mode" },
{ "dc", "set DC", OFFSET(dc), AV_OPT_TYPE_DOUBLE, {.dbl=1}, .25, 4, A },
{ "aa", "set anti-aliasing", OFFSET(aa), AV_OPT_TYPE_DOUBLE, {.dbl=.5}, 0, 1, A },
{ "samples", "set sample reduction", OFFSET(samples), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 1, 250, A },
{ "lfo", "enable LFO", OFFSET(is_lfo), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
{ "lforange", "set LFO depth", OFFSET(lforange), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 1, 250, A },
{ "lforate", "set LFO rate", OFFSET(lforate), AV_OPT_TYPE_DOUBLE, {.dbl=.3}, .01, 200, A },
{ NULL }
};
AVFILTER_DEFINE_CLASS(acrusher);
static double samplereduction(ACrusherContext *s, SRContext *sr, double in)
{
sr->samples++;
if (sr->samples >= s->round) {
sr->target += s->samples;
sr->real += s->round;
if (sr->target + s->samples >= sr->real + 1) {
sr->last = in;
sr->target = 0;
sr->real = 0;
}
sr->samples = 0;
}
return sr->last;
}
static double add_dc(double s, double dc, double idc)
{
return s > 0 ? s * dc : s * idc;
}
static double remove_dc(double s, double dc, double idc)
{
return s > 0 ? s * idc : s * dc;
}
static inline double factor(double y, double k, double aa1, double aa)
{
return 0.5 * (sin(M_PI * (fabs(y - k) - aa1) / aa - M_PI_2) + 1);
}
static double bitreduction(ACrusherContext *s, double in)
{
const double sqr = s->sqr;
const double coeff = s->coeff;
const double aa = s->aa;
const double aa1 = s->aa1;
double y, k;
// add dc
in = add_dc(in, s->dc, s->idc);
// main rounding calculation depending on mode
// the idea for anti-aliasing:
// you need a function f which brings you to the scale, where
// you want to round and the function f_b (with f(f_b)=id) which
// brings you back to your original scale.
//
// then you can use the logic below in the following way:
// y = f(in) and k = roundf(y)
// if (y > k + aa1)
// k = f_b(k) + ( f_b(k+1) - f_b(k) ) * 0.5 * (sin(x - PI/2) + 1)
// if (y < k + aa1)
// k = f_b(k) - ( f_b(k+1) - f_b(k) ) * 0.5 * (sin(x - PI/2) + 1)
//
// whereas x = (fabs(f(in) - k) - aa1) * PI / aa
// for both cases.
switch (s->mode) {
case 0:
default:
// linear
y = in * coeff;
k = roundf(y);
if (k - aa1 <= y && y <= k + aa1) {
k /= coeff;
} else if (y > k + aa1) {
k = k / coeff + ((k + 1) / coeff - k / coeff) *
factor(y, k, aa1, aa);
} else {
k = k / coeff - (k / coeff - (k - 1) / coeff) *
factor(y, k, aa1, aa);
}
break;
case 1:
// logarithmic
y = sqr * log(fabs(in)) + sqr * sqr;
k = roundf(y);
if(!in) {
k = 0;
} else if (k - aa1 <= y && y <= k + aa1) {
k = in / fabs(in) * exp(k / sqr - sqr);
} else if (y > k + aa1) {
double x = exp(k / sqr - sqr);
k = FFSIGN(in) * (x + (exp((k + 1) / sqr - sqr) - x) *
factor(y, k, aa1, aa));
} else {
double x = exp(k / sqr - sqr);
k = in / fabs(in) * (x - (x - exp((k - 1) / sqr - sqr)) *
factor(y, k, aa1, aa));
}
break;
}
// mix between dry and wet signal
k += (in - k) * s->mix;
// remove dc
k = remove_dc(k, s->dc, s->idc);
return k;
}
static double lfo_get(LFOContext *lfo)
{
double phs = FFMIN(100., lfo->phase / FFMIN(1.99, FFMAX(0.01, lfo->pwidth)) + lfo->offset);
double val;
if (phs > 1)
phs = fmod(phs, 1.);
val = sin((phs * 360.) * M_PI / 180);
return val * lfo->amount;
}
static void lfo_advance(LFOContext *lfo, unsigned count)
{
lfo->phase = fabs(lfo->phase + count * lfo->freq * (1. / lfo->srate));
if (lfo->phase >= 1.)
lfo->phase = fmod(lfo->phase, 1.);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
ACrusherContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
const double *src = (const double *)in->data[0];
double *dst;
const double level_in = s->level_in;
const double level_out = s->level_out;
const double mix = s->mix;
int n, c;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(inlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
dst = (double *)out->data[0];
for (n = 0; n < in->nb_samples; n++) {
if (s->is_lfo) {
s->samples = s->smin + s->sdiff * (lfo_get(&s->lfo) + 0.5);
s->round = round(s->samples);
}
for (c = 0; c < inlink->channels; c++) {
double sample = src[c] * level_in;
sample = mix * samplereduction(s, &s->sr[c], sample) + src[c] * (1. - mix) * level_in;
dst[c] = bitreduction(s, sample) * level_out;
}
src += c;
dst += c;
if (s->is_lfo)
lfo_advance(&s->lfo, 1);
}
if (in != out)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static av_cold void uninit(AVFilterContext *ctx)
{
ACrusherContext *s = ctx->priv;
av_freep(&s->sr);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ACrusherContext *s = ctx->priv;
double rad, sunder, smax, sover;
s->idc = 1. / s->dc;
s->coeff = exp2(s->bits) - 1;
s->sqr = sqrt(s->coeff / 2);
s->aa1 = (1. - s->aa) / 2.;
s->round = round(s->samples);
rad = s->lforange / 2.;
s->smin = FFMAX(s->samples - rad, 1.);
sunder = s->samples - rad - s->smin;
smax = FFMIN(s->samples + rad, 250.);
sover = s->samples + rad - smax;
smax -= sunder;
s->smin -= sover;
s->sdiff = smax - s->smin;
s->lfo.freq = s->lforate;
s->lfo.pwidth = 1.;
s->lfo.srate = inlink->sample_rate;
s->lfo.amount = .5;
s->sr = av_calloc(inlink->channels, sizeof(*s->sr));
if (!s->sr)
return AVERROR(ENOMEM);
return 0;
}
static const AVFilterPad avfilter_af_acrusher_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad avfilter_af_acrusher_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_acrusher = {
.name = "acrusher",
.description = NULL_IF_CONFIG_SMALL("Reduce audio bit resolution."),
.priv_size = sizeof(ACrusherContext),
.priv_class = &acrusher_class,
.uninit = uninit,
.query_formats = query_formats,
.inputs = avfilter_af_acrusher_inputs,
.outputs = avfilter_af_acrusher_outputs,
};

797
externals/ffmpeg/libavfilter/af_adeclick.c vendored Executable file
View File

@@ -0,0 +1,797 @@
/*
* Copyright (c) 2018 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/audio_fifo.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "filters.h"
#include "formats.h"
#include "internal.h"
typedef struct DeclickChannel {
double *auxiliary;
double *detection;
double *acoefficients;
double *acorrelation;
double *tmp;
double *interpolated;
double *matrix;
int matrix_size;
double *vector;
int vector_size;
double *y;
int y_size;
uint8_t *click;
int *index;
unsigned *histogram;
int histogram_size;
} DeclickChannel;
typedef struct AudioDeclickContext {
const AVClass *class;
double w;
double overlap;
double threshold;
double ar;
double burst;
int method;
int nb_hbins;
int is_declip;
int ar_order;
int nb_burst_samples;
int window_size;
int hop_size;
int overlap_skip;
AVFrame *enabled;
AVFrame *in;
AVFrame *out;
AVFrame *buffer;
AVFrame *is;
DeclickChannel *chan;
int64_t pts;
int nb_channels;
uint64_t nb_samples;
uint64_t detected_errors;
int samples_left;
int eof;
AVAudioFifo *efifo;
AVAudioFifo *fifo;
double *window_func_lut;
int (*detector)(struct AudioDeclickContext *s, DeclickChannel *c,
double sigmae, double *detection,
double *acoefficients, uint8_t *click, int *index,
const double *src, double *dst);
} AudioDeclickContext;
#define OFFSET(x) offsetof(AudioDeclickContext, x)
#define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption adeclick_options[] = {
{ "w", "set window size", OFFSET(w), AV_OPT_TYPE_DOUBLE, {.dbl=55}, 10, 100, AF },
{ "o", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_DOUBLE, {.dbl=75}, 50, 95, AF },
{ "a", "set autoregression order", OFFSET(ar), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, 25, AF },
{ "t", "set threshold", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 100, AF },
{ "b", "set burst fusion", OFFSET(burst), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, 10, AF },
{ "m", "set overlap method", OFFSET(method), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AF, "m" },
{ "a", "overlap-add", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "m" },
{ "s", "overlap-save", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "m" },
{ NULL }
};
AVFILTER_DEFINE_CLASS(adeclick);
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
AudioDeclickContext *s = ctx->priv;
int i;
s->pts = AV_NOPTS_VALUE;
s->window_size = inlink->sample_rate * s->w / 1000.;
if (s->window_size < 100)
return AVERROR(EINVAL);
s->ar_order = FFMAX(s->window_size * s->ar / 100., 1);
s->nb_burst_samples = s->window_size * s->burst / 1000.;
s->hop_size = s->window_size * (1. - (s->overlap / 100.));
if (s->hop_size < 1)
return AVERROR(EINVAL);
s->window_func_lut = av_calloc(s->window_size, sizeof(*s->window_func_lut));
if (!s->window_func_lut)
return AVERROR(ENOMEM);
for (i = 0; i < s->window_size; i++)
s->window_func_lut[i] = sin(M_PI * i / s->window_size) *
(1. - (s->overlap / 100.)) * M_PI_2;
av_frame_free(&s->in);
av_frame_free(&s->out);
av_frame_free(&s->buffer);
av_frame_free(&s->is);
s->enabled = ff_get_audio_buffer(inlink, s->window_size);
s->in = ff_get_audio_buffer(inlink, s->window_size);
s->out = ff_get_audio_buffer(inlink, s->window_size);
s->buffer = ff_get_audio_buffer(inlink, s->window_size * 2);
s->is = ff_get_audio_buffer(inlink, s->window_size);
if (!s->in || !s->out || !s->buffer || !s->is || !s->enabled)
return AVERROR(ENOMEM);
s->efifo = av_audio_fifo_alloc(inlink->format, 1, s->window_size);
if (!s->efifo)
return AVERROR(ENOMEM);
s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->window_size);
if (!s->fifo)
return AVERROR(ENOMEM);
s->overlap_skip = s->method ? (s->window_size - s->hop_size) / 2 : 0;
if (s->overlap_skip > 0) {
av_audio_fifo_write(s->fifo, (void **)s->in->extended_data,
s->overlap_skip);
}
s->nb_channels = inlink->channels;
s->chan = av_calloc(inlink->channels, sizeof(*s->chan));
if (!s->chan)
return AVERROR(ENOMEM);
for (i = 0; i < inlink->channels; i++) {
DeclickChannel *c = &s->chan[i];
c->detection = av_calloc(s->window_size, sizeof(*c->detection));
c->auxiliary = av_calloc(s->ar_order + 1, sizeof(*c->auxiliary));
c->acoefficients = av_calloc(s->ar_order + 1, sizeof(*c->acoefficients));
c->acorrelation = av_calloc(s->ar_order + 1, sizeof(*c->acorrelation));
c->tmp = av_calloc(s->ar_order, sizeof(*c->tmp));
c->click = av_calloc(s->window_size, sizeof(*c->click));
c->index = av_calloc(s->window_size, sizeof(*c->index));
c->interpolated = av_calloc(s->window_size, sizeof(*c->interpolated));
if (!c->auxiliary || !c->acoefficients || !c->detection || !c->click ||
!c->index || !c->interpolated || !c->acorrelation || !c->tmp)
return AVERROR(ENOMEM);
}
return 0;
}
static void autocorrelation(const double *input, int order, int size,
double *output, double scale)
{
int i, j;
for (i = 0; i <= order; i++) {
double value = 0.;
for (j = i; j < size; j++)
value += input[j] * input[j - i];
output[i] = value * scale;
}
}
static double autoregression(const double *samples, int ar_order,
int nb_samples, double *k, double *r, double *a)
{
double alpha;
int i, j;
memset(a, 0, ar_order * sizeof(*a));
autocorrelation(samples, ar_order, nb_samples, r, 1. / nb_samples);
/* Levinson-Durbin algorithm */
k[0] = a[0] = -r[1] / r[0];
alpha = r[0] * (1. - k[0] * k[0]);
for (i = 1; i < ar_order; i++) {
double epsilon = 0.;
for (j = 0; j < i; j++)
epsilon += a[j] * r[i - j];
epsilon += r[i + 1];
k[i] = -epsilon / alpha;
alpha *= (1. - k[i] * k[i]);
for (j = i - 1; j >= 0; j--)
k[j] = a[j] + k[i] * a[i - j - 1];
for (j = 0; j <= i; j++)
a[j] = k[j];
}
k[0] = 1.;
for (i = 1; i <= ar_order; i++)
k[i] = a[i - 1];
return sqrt(alpha);
}
static int isfinite_array(double *samples, int nb_samples)
{
int i;
for (i = 0; i < nb_samples; i++)
if (!isfinite(samples[i]))
return 0;
return 1;
}
static int find_index(int *index, int value, int size)
{
int i, start, end;
if ((value < index[0]) || (value > index[size - 1]))
return 1;
i = start = 0;
end = size - 1;
while (start <= end) {
i = (end + start) / 2;
if (index[i] == value)
return 0;
if (value < index[i])
end = i - 1;
if (value > index[i])
start = i + 1;
}
return 1;
}
static int factorization(double *matrix, int n)
{
int i, j, k;
for (i = 0; i < n; i++) {
const int in = i * n;
double value;
value = matrix[in + i];
for (j = 0; j < i; j++)
value -= matrix[j * n + j] * matrix[in + j] * matrix[in + j];
if (value == 0.) {
return -1;
}
matrix[in + i] = value;
for (j = i + 1; j < n; j++) {
const int jn = j * n;
double x;
x = matrix[jn + i];
for (k = 0; k < i; k++)
x -= matrix[k * n + k] * matrix[in + k] * matrix[jn + k];
matrix[jn + i] = x / matrix[in + i];
}
}
return 0;
}
static int do_interpolation(DeclickChannel *c, double *matrix,
double *vector, int n, double *out)
{
int i, j, ret;
double *y;
ret = factorization(matrix, n);
if (ret < 0)
return ret;
av_fast_malloc(&c->y, &c->y_size, n * sizeof(*c->y));
y = c->y;
if (!y)
return AVERROR(ENOMEM);
for (i = 0; i < n; i++) {
const int in = i * n;
double value;
value = vector[i];
for (j = 0; j < i; j++)
value -= matrix[in + j] * y[j];
y[i] = value;
}
for (i = n - 1; i >= 0; i--) {
out[i] = y[i] / matrix[i * n + i];
for (j = i + 1; j < n; j++)
out[i] -= matrix[j * n + i] * out[j];
}
return 0;
}
static int interpolation(DeclickChannel *c, const double *src, int ar_order,
double *acoefficients, int *index, int nb_errors,
double *auxiliary, double *interpolated)
{
double *vector, *matrix;
int i, j;
av_fast_malloc(&c->matrix, &c->matrix_size, nb_errors * nb_errors * sizeof(*c->matrix));
matrix = c->matrix;
if (!matrix)
return AVERROR(ENOMEM);
av_fast_malloc(&c->vector, &c->vector_size, nb_errors * sizeof(*c->vector));
vector = c->vector;
if (!vector)
return AVERROR(ENOMEM);
autocorrelation(acoefficients, ar_order, ar_order + 1, auxiliary, 1.);
for (i = 0; i < nb_errors; i++) {
const int im = i * nb_errors;
for (j = i; j < nb_errors; j++) {
if (abs(index[j] - index[i]) <= ar_order) {
matrix[j * nb_errors + i] = matrix[im + j] = auxiliary[abs(index[j] - index[i])];
} else {
matrix[j * nb_errors + i] = matrix[im + j] = 0;
}
}
}
for (i = 0; i < nb_errors; i++) {
double value = 0.;
for (j = -ar_order; j <= ar_order; j++)
if (find_index(index, index[i] - j, nb_errors))
value -= src[index[i] - j] * auxiliary[abs(j)];
vector[i] = value;
}
return do_interpolation(c, matrix, vector, nb_errors, interpolated);
}
static int detect_clips(AudioDeclickContext *s, DeclickChannel *c,
double unused0,
double *unused1, double *unused2,
uint8_t *clip, int *index,
const double *src, double *dst)
{
const double threshold = s->threshold;
double max_amplitude = 0;
unsigned *histogram;
int i, nb_clips = 0;
av_fast_malloc(&c->histogram, &c->histogram_size, s->nb_hbins * sizeof(*c->histogram));
if (!c->histogram)
return AVERROR(ENOMEM);
histogram = c->histogram;
memset(histogram, 0, sizeof(*histogram) * s->nb_hbins);
for (i = 0; i < s->window_size; i++) {
const unsigned index = fmin(fabs(src[i]), 1) * (s->nb_hbins - 1);
histogram[index]++;
dst[i] = src[i];
clip[i] = 0;
}
for (i = s->nb_hbins - 1; i > 1; i--) {
if (histogram[i]) {
if (histogram[i] / (double)FFMAX(histogram[i - 1], 1) > threshold) {
max_amplitude = i / (double)s->nb_hbins;
}
break;
}
}
if (max_amplitude > 0.) {
for (i = 0; i < s->window_size; i++) {
clip[i] = fabs(src[i]) >= max_amplitude;
}
}
memset(clip, 0, s->ar_order * sizeof(*clip));
memset(clip + (s->window_size - s->ar_order), 0, s->ar_order * sizeof(*clip));
for (i = s->ar_order; i < s->window_size - s->ar_order; i++)
if (clip[i])
index[nb_clips++] = i;
return nb_clips;
}
static int detect_clicks(AudioDeclickContext *s, DeclickChannel *c,
double sigmae,
double *detection, double *acoefficients,
uint8_t *click, int *index,
const double *src, double *dst)
{
const double threshold = s->threshold;
int i, j, nb_clicks = 0, prev = -1;
memset(detection, 0, s->window_size * sizeof(*detection));
for (i = s->ar_order; i < s->window_size; i++) {
for (j = 0; j <= s->ar_order; j++) {
detection[i] += acoefficients[j] * src[i - j];
}
}
for (i = 0; i < s->window_size; i++) {
click[i] = fabs(detection[i]) > sigmae * threshold;
dst[i] = src[i];
}
for (i = 0; i < s->window_size; i++) {
if (!click[i])
continue;
if (prev >= 0 && (i > prev + 1) && (i <= s->nb_burst_samples + prev))
for (j = prev + 1; j < i; j++)
click[j] = 1;
prev = i;
}
memset(click, 0, s->ar_order * sizeof(*click));
memset(click + (s->window_size - s->ar_order), 0, s->ar_order * sizeof(*click));
for (i = s->ar_order; i < s->window_size - s->ar_order; i++)
if (click[i])
index[nb_clicks++] = i;
return nb_clicks;
}
typedef struct ThreadData {
AVFrame *out;
} ThreadData;
static int filter_channel(AVFilterContext *ctx, void *arg, int ch, int nb_jobs)
{
AudioDeclickContext *s = ctx->priv;
ThreadData *td = arg;
AVFrame *out = td->out;
const double *src = (const double *)s->in->extended_data[ch];
double *is = (double *)s->is->extended_data[ch];
double *dst = (double *)s->out->extended_data[ch];
double *ptr = (double *)out->extended_data[ch];
double *buf = (double *)s->buffer->extended_data[ch];
const double *w = s->window_func_lut;
DeclickChannel *c = &s->chan[ch];
double sigmae;
int j, ret;
sigmae = autoregression(src, s->ar_order, s->window_size, c->acoefficients, c->acorrelation, c->tmp);
if (isfinite_array(c->acoefficients, s->ar_order + 1)) {
double *interpolated = c->interpolated;
int *index = c->index;
int nb_errors;
nb_errors = s->detector(s, c, sigmae, c->detection, c->acoefficients,
c->click, index, src, dst);
if (nb_errors > 0) {
double *enabled = (double *)s->enabled->extended_data[0];
ret = interpolation(c, src, s->ar_order, c->acoefficients, index,
nb_errors, c->auxiliary, interpolated);
if (ret < 0)
return ret;
av_audio_fifo_peek(s->efifo, (void**)s->enabled->extended_data, s->window_size);
for (j = 0; j < nb_errors; j++) {
if (enabled[index[j]]) {
dst[index[j]] = interpolated[j];
is[index[j]] = 1;
}
}
}
} else {
memcpy(dst, src, s->window_size * sizeof(*dst));
}
if (s->method == 0) {
for (j = 0; j < s->window_size; j++)
buf[j] += dst[j] * w[j];
} else {
const int skip = s->overlap_skip;
for (j = 0; j < s->hop_size; j++)
buf[j] = dst[skip + j];
}
for (j = 0; j < s->hop_size; j++)
ptr[j] = buf[j];
memmove(buf, buf + s->hop_size, (s->window_size * 2 - s->hop_size) * sizeof(*buf));
memmove(is, is + s->hop_size, (s->window_size - s->hop_size) * sizeof(*is));
memset(buf + s->window_size * 2 - s->hop_size, 0, s->hop_size * sizeof(*buf));
memset(is + s->window_size - s->hop_size, 0, s->hop_size * sizeof(*is));
return 0;
}
static int filter_frame(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
AudioDeclickContext *s = ctx->priv;
AVFrame *out = NULL;
int ret = 0, j, ch, detected_errors = 0;
ThreadData td;
out = ff_get_audio_buffer(outlink, s->hop_size);
if (!out)
return AVERROR(ENOMEM);
ret = av_audio_fifo_peek(s->fifo, (void **)s->in->extended_data,
s->window_size);
if (ret < 0)
goto fail;
td.out = out;
ret = ctx->internal->execute(ctx, filter_channel, &td, NULL, inlink->channels);
if (ret < 0)
goto fail;
for (ch = 0; ch < s->in->channels; ch++) {
double *is = (double *)s->is->extended_data[ch];
for (j = 0; j < s->hop_size; j++) {
if (is[j])
detected_errors++;
}
}
av_audio_fifo_drain(s->fifo, s->hop_size);
av_audio_fifo_drain(s->efifo, s->hop_size);
if (s->samples_left > 0)
out->nb_samples = FFMIN(s->hop_size, s->samples_left);
out->pts = s->pts;
s->pts += av_rescale_q(s->hop_size, (AVRational){1, outlink->sample_rate}, outlink->time_base);
s->detected_errors += detected_errors;
s->nb_samples += out->nb_samples * inlink->channels;
ret = ff_filter_frame(outlink, out);
if (ret < 0)
return ret;
if (s->samples_left > 0) {
s->samples_left -= s->hop_size;
if (s->samples_left <= 0)
av_audio_fifo_drain(s->fifo, av_audio_fifo_size(s->fifo));
}
fail:
if (ret < 0)
av_frame_free(&out);
return ret;
}
static int activate(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AudioDeclickContext *s = ctx->priv;
AVFrame *in;
int ret, status;
int64_t pts;
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
ret = ff_inlink_consume_samples(inlink, s->window_size, s->window_size, &in);
if (ret < 0)
return ret;
if (ret > 0) {
double *e = (double *)s->enabled->extended_data[0];
if (s->pts == AV_NOPTS_VALUE)
s->pts = in->pts;
ret = av_audio_fifo_write(s->fifo, (void **)in->extended_data,
in->nb_samples);
for (int i = 0; i < in->nb_samples; i++)
e[i] = !ctx->is_disabled;
av_audio_fifo_write(s->efifo, (void**)s->enabled->extended_data, in->nb_samples);
av_frame_free(&in);
if (ret < 0)
return ret;
}
if (av_audio_fifo_size(s->fifo) >= s->window_size ||
s->samples_left > 0)
return filter_frame(inlink);
if (av_audio_fifo_size(s->fifo) >= s->window_size) {
ff_filter_set_ready(ctx, 100);
return 0;
}
if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
if (status == AVERROR_EOF) {
s->eof = 1;
s->samples_left = av_audio_fifo_size(s->fifo) - s->overlap_skip;
ff_filter_set_ready(ctx, 100);
return 0;
}
}
if (s->eof && s->samples_left <= 0) {
ff_outlink_set_status(outlink, AVERROR_EOF, s->pts);
return 0;
}
if (!s->eof)
FF_FILTER_FORWARD_WANTED(outlink, inlink);
return FFERROR_NOT_READY;
}
static av_cold int init(AVFilterContext *ctx)
{
AudioDeclickContext *s = ctx->priv;
s->is_declip = !strcmp(ctx->filter->name, "adeclip");
if (s->is_declip) {
s->detector = detect_clips;
} else {
s->detector = detect_clicks;
}
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioDeclickContext *s = ctx->priv;
int i;
av_log(ctx, AV_LOG_INFO, "Detected %s in %"PRId64" of %"PRId64" samples (%g%%).\n",
s->is_declip ? "clips" : "clicks", s->detected_errors,
s->nb_samples, 100. * s->detected_errors / s->nb_samples);
av_audio_fifo_free(s->fifo);
av_audio_fifo_free(s->efifo);
av_freep(&s->window_func_lut);
av_frame_free(&s->enabled);
av_frame_free(&s->in);
av_frame_free(&s->out);
av_frame_free(&s->buffer);
av_frame_free(&s->is);
if (s->chan) {
for (i = 0; i < s->nb_channels; i++) {
DeclickChannel *c = &s->chan[i];
av_freep(&c->detection);
av_freep(&c->auxiliary);
av_freep(&c->acoefficients);
av_freep(&c->acorrelation);
av_freep(&c->tmp);
av_freep(&c->click);
av_freep(&c->index);
av_freep(&c->interpolated);
av_freep(&c->matrix);
c->matrix_size = 0;
av_freep(&c->histogram);
c->histogram_size = 0;
av_freep(&c->vector);
c->vector_size = 0;
av_freep(&c->y);
c->y_size = 0;
}
}
av_freep(&s->chan);
s->nb_channels = 0;
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_adeclick = {
.name = "adeclick",
.description = NULL_IF_CONFIG_SMALL("Remove impulsive noise from input audio."),
.query_formats = query_formats,
.priv_size = sizeof(AudioDeclickContext),
.priv_class = &adeclick_class,
.init = init,
.activate = activate,
.uninit = uninit,
.inputs = inputs,
.outputs = outputs,
.flags = AVFILTER_FLAG_SLICE_THREADS | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};
static const AVOption adeclip_options[] = {
{ "w", "set window size", OFFSET(w), AV_OPT_TYPE_DOUBLE, {.dbl=55}, 10, 100, AF },
{ "o", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_DOUBLE, {.dbl=75}, 50, 95, AF },
{ "a", "set autoregression order", OFFSET(ar), AV_OPT_TYPE_DOUBLE, {.dbl=8}, 0, 25, AF },
{ "t", "set threshold", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl=10}, 1, 100, AF },
{ "n", "set histogram size", OFFSET(nb_hbins), AV_OPT_TYPE_INT, {.i64=1000}, 100, 9999, AF },
{ "m", "set overlap method", OFFSET(method), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AF, "m" },
{ "a", "overlap-add", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "m" },
{ "s", "overlap-save", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "m" },
{ NULL }
};
AVFILTER_DEFINE_CLASS(adeclip);
AVFilter ff_af_adeclip = {
.name = "adeclip",
.description = NULL_IF_CONFIG_SMALL("Remove clipping from input audio."),
.query_formats = query_formats,
.priv_size = sizeof(AudioDeclickContext),
.priv_class = &adeclip_class,
.init = init,
.activate = activate,
.uninit = uninit,
.inputs = inputs,
.outputs = outputs,
.flags = AVFILTER_FLAG_SLICE_THREADS | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};

359
externals/ffmpeg/libavfilter/af_adelay.c vendored Executable file
View File

@@ -0,0 +1,359 @@
/*
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avstring.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "avfilter.h"
#include "audio.h"
#include "filters.h"
#include "internal.h"
typedef struct ChanDelay {
int delay;
unsigned delay_index;
unsigned index;
uint8_t *samples;
} ChanDelay;
typedef struct AudioDelayContext {
const AVClass *class;
int all;
char *delays;
ChanDelay *chandelay;
int nb_delays;
int block_align;
int64_t padding;
int64_t max_delay;
int64_t next_pts;
int eof;
void (*delay_channel)(ChanDelay *d, int nb_samples,
const uint8_t *src, uint8_t *dst);
} AudioDelayContext;
#define OFFSET(x) offsetof(AudioDelayContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption adelay_options[] = {
{ "delays", "set list of delays for each channel", OFFSET(delays), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
{ "all", "use last available delay for remained channels", OFFSET(all), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
{ NULL }
};
AVFILTER_DEFINE_CLASS(adelay);
static int query_formats(AVFilterContext *ctx)
{
AVFilterChannelLayouts *layouts;
AVFilterFormats *formats;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_U8P, AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
#define DELAY(name, type, fill) \
static void delay_channel_## name ##p(ChanDelay *d, int nb_samples, \
const uint8_t *ssrc, uint8_t *ddst) \
{ \
const type *src = (type *)ssrc; \
type *dst = (type *)ddst; \
type *samples = (type *)d->samples; \
\
while (nb_samples) { \
if (d->delay_index < d->delay) { \
const int len = FFMIN(nb_samples, d->delay - d->delay_index); \
\
memcpy(&samples[d->delay_index], src, len * sizeof(type)); \
memset(dst, fill, len * sizeof(type)); \
d->delay_index += len; \
src += len; \
dst += len; \
nb_samples -= len; \
} else { \
*dst = samples[d->index]; \
samples[d->index] = *src; \
nb_samples--; \
d->index++; \
src++, dst++; \
d->index = d->index >= d->delay ? 0 : d->index; \
} \
} \
}
DELAY(u8, uint8_t, 0x80)
DELAY(s16, int16_t, 0)
DELAY(s32, int32_t, 0)
DELAY(flt, float, 0)
DELAY(dbl, double, 0)
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
AudioDelayContext *s = ctx->priv;
char *p, *arg, *saveptr = NULL;
int i;
s->chandelay = av_calloc(inlink->channels, sizeof(*s->chandelay));
if (!s->chandelay)
return AVERROR(ENOMEM);
s->nb_delays = inlink->channels;
s->block_align = av_get_bytes_per_sample(inlink->format);
p = s->delays;
for (i = 0; i < s->nb_delays; i++) {
ChanDelay *d = &s->chandelay[i];
float delay, div;
char type = 0;
int ret;
if (!(arg = av_strtok(p, "|", &saveptr)))
break;
p = NULL;
ret = av_sscanf(arg, "%d%c", &d->delay, &type);
if (ret != 2 || type != 'S') {
div = type == 's' ? 1.0 : 1000.0;
if (av_sscanf(arg, "%f", &delay) != 1) {
av_log(ctx, AV_LOG_ERROR, "Invalid syntax for delay.\n");
return AVERROR(EINVAL);
}
d->delay = delay * inlink->sample_rate / div;
}
if (d->delay < 0) {
av_log(ctx, AV_LOG_ERROR, "Delay must be non negative number.\n");
return AVERROR(EINVAL);
}
}
if (s->all && i) {
for (int j = i; j < s->nb_delays; j++)
s->chandelay[j].delay = s->chandelay[i-1].delay;
}
s->padding = s->chandelay[0].delay;
for (i = 1; i < s->nb_delays; i++) {
ChanDelay *d = &s->chandelay[i];
s->padding = FFMIN(s->padding, d->delay);
}
if (s->padding) {
for (i = 0; i < s->nb_delays; i++) {
ChanDelay *d = &s->chandelay[i];
d->delay -= s->padding;
}
}
for (i = 0; i < s->nb_delays; i++) {
ChanDelay *d = &s->chandelay[i];
if (!d->delay)
continue;
d->samples = av_malloc_array(d->delay, s->block_align);
if (!d->samples)
return AVERROR(ENOMEM);
s->max_delay = FFMAX(s->max_delay, d->delay);
}
switch (inlink->format) {
case AV_SAMPLE_FMT_U8P : s->delay_channel = delay_channel_u8p ; break;
case AV_SAMPLE_FMT_S16P: s->delay_channel = delay_channel_s16p; break;
case AV_SAMPLE_FMT_S32P: s->delay_channel = delay_channel_s32p; break;
case AV_SAMPLE_FMT_FLTP: s->delay_channel = delay_channel_fltp; break;
case AV_SAMPLE_FMT_DBLP: s->delay_channel = delay_channel_dblp; break;
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
AudioDelayContext *s = ctx->priv;
AVFrame *out_frame;
int i;
if (ctx->is_disabled || !s->delays)
return ff_filter_frame(ctx->outputs[0], frame);
out_frame = ff_get_audio_buffer(ctx->outputs[0], frame->nb_samples);
if (!out_frame) {
av_frame_free(&frame);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out_frame, frame);
for (i = 0; i < s->nb_delays; i++) {
ChanDelay *d = &s->chandelay[i];
const uint8_t *src = frame->extended_data[i];
uint8_t *dst = out_frame->extended_data[i];
if (!d->delay)
memcpy(dst, src, frame->nb_samples * s->block_align);
else
s->delay_channel(d, frame->nb_samples, src, dst);
}
out_frame->pts = s->next_pts;
s->next_pts += av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
av_frame_free(&frame);
return ff_filter_frame(ctx->outputs[0], out_frame);
}
static int activate(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AudioDelayContext *s = ctx->priv;
AVFrame *frame = NULL;
int ret, status;
int64_t pts;
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
if (s->padding) {
int nb_samples = FFMIN(s->padding, 2048);
frame = ff_get_audio_buffer(outlink, nb_samples);
if (!frame)
return AVERROR(ENOMEM);
s->padding -= nb_samples;
av_samples_set_silence(frame->extended_data, 0,
frame->nb_samples,
outlink->channels,
frame->format);
frame->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
return ff_filter_frame(outlink, frame);
}
ret = ff_inlink_consume_frame(inlink, &frame);
if (ret < 0)
return ret;
if (ret > 0)
return filter_frame(inlink, frame);
if (ff_inlink_acknowledge_status(inlink, &status, &pts)) {
if (status == AVERROR_EOF)
s->eof = 1;
}
if (s->eof && s->max_delay) {
int nb_samples = FFMIN(s->max_delay, 2048);
frame = ff_get_audio_buffer(outlink, nb_samples);
if (!frame)
return AVERROR(ENOMEM);
s->max_delay -= nb_samples;
av_samples_set_silence(frame->extended_data, 0,
frame->nb_samples,
outlink->channels,
frame->format);
frame->pts = s->next_pts;
return filter_frame(inlink, frame);
}
if (s->eof && s->max_delay == 0) {
ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts);
return 0;
}
if (!s->eof)
FF_FILTER_FORWARD_WANTED(outlink, inlink);
return FFERROR_NOT_READY;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioDelayContext *s = ctx->priv;
if (s->chandelay) {
for (int i = 0; i < s->nb_delays; i++)
av_freep(&s->chandelay[i].samples);
}
av_freep(&s->chandelay);
}
static const AVFilterPad adelay_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad adelay_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_adelay = {
.name = "adelay",
.description = NULL_IF_CONFIG_SMALL("Delay one or more audio channels."),
.query_formats = query_formats,
.priv_size = sizeof(AudioDelayContext),
.priv_class = &adelay_class,
.activate = activate,
.uninit = uninit,
.inputs = adelay_inputs,
.outputs = adelay_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};

207
externals/ffmpeg/libavfilter/af_aderivative.c vendored Executable file
View File

@@ -0,0 +1,207 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
typedef struct ADerivativeContext {
const AVClass *class;
AVFrame *prev;
void (*filter)(void **dst, void **prv, const void **src,
int nb_samples, int channels);
} ADerivativeContext;
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
static const enum AVSampleFormat derivative_sample_fmts[] = {
AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_S32P, AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
static const enum AVSampleFormat integral_sample_fmts[] = {
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
formats = ff_make_format_list(strcmp(ctx->filter->name, "aintegral") ?
derivative_sample_fmts : integral_sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
#define DERIVATIVE(name, type) \
static void aderivative_## name ##p(void **d, void **p, const void **s, \
int nb_samples, int channels) \
{ \
int n, c; \
\
for (c = 0; c < channels; c++) { \
const type *src = s[c]; \
type *dst = d[c]; \
type *prv = p[c]; \
\
for (n = 0; n < nb_samples; n++) { \
const type current = src[n]; \
\
dst[n] = current - prv[0]; \
prv[0] = current; \
} \
} \
}
DERIVATIVE(flt, float)
DERIVATIVE(dbl, double)
DERIVATIVE(s16, int16_t)
DERIVATIVE(s32, int32_t)
#define INTEGRAL(name, type) \
static void aintegral_## name ##p(void **d, void **p, const void **s, \
int nb_samples, int channels) \
{ \
int n, c; \
\
for (c = 0; c < channels; c++) { \
const type *src = s[c]; \
type *dst = d[c]; \
type *prv = p[c]; \
\
for (n = 0; n < nb_samples; n++) { \
const type current = src[n]; \
\
dst[n] = current + prv[0]; \
prv[0] = dst[n]; \
} \
} \
}
INTEGRAL(flt, float)
INTEGRAL(dbl, double)
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ADerivativeContext *s = ctx->priv;
switch (inlink->format) {
case AV_SAMPLE_FMT_FLTP: s->filter = aderivative_fltp; break;
case AV_SAMPLE_FMT_DBLP: s->filter = aderivative_dblp; break;
case AV_SAMPLE_FMT_S32P: s->filter = aderivative_s32p; break;
case AV_SAMPLE_FMT_S16P: s->filter = aderivative_s16p; break;
}
if (strcmp(ctx->filter->name, "aintegral"))
return 0;
switch (inlink->format) {
case AV_SAMPLE_FMT_FLTP: s->filter = aintegral_fltp; break;
case AV_SAMPLE_FMT_DBLP: s->filter = aintegral_dblp; break;
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
ADerivativeContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
if (!s->prev) {
s->prev = ff_get_audio_buffer(inlink, 1);
if (!s->prev) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
}
s->filter((void **)out->extended_data, (void **)s->prev->extended_data, (const void **)in->extended_data,
in->nb_samples, in->channels);
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static av_cold void uninit(AVFilterContext *ctx)
{
ADerivativeContext *s = ctx->priv;
av_frame_free(&s->prev);
}
static const AVFilterPad aderivative_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad aderivative_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_aderivative = {
.name = "aderivative",
.description = NULL_IF_CONFIG_SMALL("Compute derivative of input audio."),
.query_formats = query_formats,
.priv_size = sizeof(ADerivativeContext),
.uninit = uninit,
.inputs = aderivative_inputs,
.outputs = aderivative_outputs,
};
AVFilter ff_af_aintegral = {
.name = "aintegral",
.description = NULL_IF_CONFIG_SMALL("Compute integral of input audio."),
.query_formats = query_formats,
.priv_size = sizeof(ADerivativeContext),
.uninit = uninit,
.inputs = aderivative_inputs,
.outputs = aderivative_outputs,
};

390
externals/ffmpeg/libavfilter/af_aecho.c vendored Executable file
View File

@@ -0,0 +1,390 @@
/*
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "avfilter.h"
#include "audio.h"
#include "filters.h"
#include "internal.h"
typedef struct AudioEchoContext {
const AVClass *class;
float in_gain, out_gain;
char *delays, *decays;
float *delay, *decay;
int nb_echoes;
int delay_index;
uint8_t **delayptrs;
int max_samples, fade_out;
int *samples;
int eof;
int64_t next_pts;
void (*echo_samples)(struct AudioEchoContext *ctx, uint8_t **delayptrs,
uint8_t * const *src, uint8_t **dst,
int nb_samples, int channels);
} AudioEchoContext;
#define OFFSET(x) offsetof(AudioEchoContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption aecho_options[] = {
{ "in_gain", "set signal input gain", OFFSET(in_gain), AV_OPT_TYPE_FLOAT, {.dbl=0.6}, 0, 1, A },
{ "out_gain", "set signal output gain", OFFSET(out_gain), AV_OPT_TYPE_FLOAT, {.dbl=0.3}, 0, 1, A },
{ "delays", "set list of signal delays", OFFSET(delays), AV_OPT_TYPE_STRING, {.str="1000"}, 0, 0, A },
{ "decays", "set list of signal decays", OFFSET(decays), AV_OPT_TYPE_STRING, {.str="0.5"}, 0, 0, A },
{ NULL }
};
AVFILTER_DEFINE_CLASS(aecho);
static void count_items(char *item_str, int *nb_items)
{
char *p;
*nb_items = 1;
for (p = item_str; *p; p++) {
if (*p == '|')
(*nb_items)++;
}
}
static void fill_items(char *item_str, int *nb_items, float *items)
{
char *p, *saveptr = NULL;
int i, new_nb_items = 0;
p = item_str;
for (i = 0; i < *nb_items; i++) {
char *tstr = av_strtok(p, "|", &saveptr);
p = NULL;
if (tstr)
new_nb_items += av_sscanf(tstr, "%f", &items[new_nb_items]) == 1;
}
*nb_items = new_nb_items;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioEchoContext *s = ctx->priv;
av_freep(&s->delay);
av_freep(&s->decay);
av_freep(&s->samples);
if (s->delayptrs)
av_freep(&s->delayptrs[0]);
av_freep(&s->delayptrs);
}
static av_cold int init(AVFilterContext *ctx)
{
AudioEchoContext *s = ctx->priv;
int nb_delays, nb_decays, i;
if (!s->delays || !s->decays) {
av_log(ctx, AV_LOG_ERROR, "Missing delays and/or decays.\n");
return AVERROR(EINVAL);
}
count_items(s->delays, &nb_delays);
count_items(s->decays, &nb_decays);
s->delay = av_realloc_f(s->delay, nb_delays, sizeof(*s->delay));
s->decay = av_realloc_f(s->decay, nb_decays, sizeof(*s->decay));
if (!s->delay || !s->decay)
return AVERROR(ENOMEM);
fill_items(s->delays, &nb_delays, s->delay);
fill_items(s->decays, &nb_decays, s->decay);
if (nb_delays != nb_decays) {
av_log(ctx, AV_LOG_ERROR, "Number of delays %d differs from number of decays %d.\n", nb_delays, nb_decays);
return AVERROR(EINVAL);
}
s->nb_echoes = nb_delays;
if (!s->nb_echoes) {
av_log(ctx, AV_LOG_ERROR, "At least one decay & delay must be set.\n");
return AVERROR(EINVAL);
}
s->samples = av_realloc_f(s->samples, nb_delays, sizeof(*s->samples));
if (!s->samples)
return AVERROR(ENOMEM);
for (i = 0; i < nb_delays; i++) {
if (s->delay[i] <= 0 || s->delay[i] > 90000) {
av_log(ctx, AV_LOG_ERROR, "delay[%d]: %f is out of allowed range: (0, 90000]\n", i, s->delay[i]);
return AVERROR(EINVAL);
}
if (s->decay[i] <= 0 || s->decay[i] > 1) {
av_log(ctx, AV_LOG_ERROR, "decay[%d]: %f is out of allowed range: (0, 1]\n", i, s->decay[i]);
return AVERROR(EINVAL);
}
}
s->next_pts = AV_NOPTS_VALUE;
av_log(ctx, AV_LOG_DEBUG, "nb_echoes:%d\n", s->nb_echoes);
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterChannelLayouts *layouts;
AVFilterFormats *formats;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_S16P, AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
#define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
#define ECHO(name, type, min, max) \
static void echo_samples_## name ##p(AudioEchoContext *ctx, \
uint8_t **delayptrs, \
uint8_t * const *src, uint8_t **dst, \
int nb_samples, int channels) \
{ \
const double out_gain = ctx->out_gain; \
const double in_gain = ctx->in_gain; \
const int nb_echoes = ctx->nb_echoes; \
const int max_samples = ctx->max_samples; \
int i, j, chan, av_uninit(index); \
\
av_assert1(channels > 0); /* would corrupt delay_index */ \
\
for (chan = 0; chan < channels; chan++) { \
const type *s = (type *)src[chan]; \
type *d = (type *)dst[chan]; \
type *dbuf = (type *)delayptrs[chan]; \
\
index = ctx->delay_index; \
for (i = 0; i < nb_samples; i++, s++, d++) { \
double out, in; \
\
in = *s; \
out = in * in_gain; \
for (j = 0; j < nb_echoes; j++) { \
int ix = index + max_samples - ctx->samples[j]; \
ix = MOD(ix, max_samples); \
out += dbuf[ix] * ctx->decay[j]; \
} \
out *= out_gain; \
\
*d = av_clipd(out, min, max); \
dbuf[index] = in; \
\
index = MOD(index + 1, max_samples); \
} \
} \
ctx->delay_index = index; \
}
ECHO(dbl, double, -1.0, 1.0 )
ECHO(flt, float, -1.0, 1.0 )
ECHO(s16, int16_t, INT16_MIN, INT16_MAX)
ECHO(s32, int32_t, INT32_MIN, INT32_MAX)
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioEchoContext *s = ctx->priv;
float volume = 1.0;
int i;
for (i = 0; i < s->nb_echoes; i++) {
s->samples[i] = s->delay[i] * outlink->sample_rate / 1000.0;
s->max_samples = FFMAX(s->max_samples, s->samples[i]);
volume += s->decay[i];
}
if (s->max_samples <= 0) {
av_log(ctx, AV_LOG_ERROR, "Nothing to echo - missing delay samples.\n");
return AVERROR(EINVAL);
}
s->fade_out = s->max_samples;
if (volume * s->in_gain * s->out_gain > 1.0)
av_log(ctx, AV_LOG_WARNING,
"out_gain %f can cause saturation of output\n", s->out_gain);
switch (outlink->format) {
case AV_SAMPLE_FMT_DBLP: s->echo_samples = echo_samples_dblp; break;
case AV_SAMPLE_FMT_FLTP: s->echo_samples = echo_samples_fltp; break;
case AV_SAMPLE_FMT_S16P: s->echo_samples = echo_samples_s16p; break;
case AV_SAMPLE_FMT_S32P: s->echo_samples = echo_samples_s32p; break;
}
if (s->delayptrs)
av_freep(&s->delayptrs[0]);
av_freep(&s->delayptrs);
return av_samples_alloc_array_and_samples(&s->delayptrs, NULL,
outlink->channels,
s->max_samples,
outlink->format, 0);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
AudioEchoContext *s = ctx->priv;
AVFrame *out_frame;
if (av_frame_is_writable(frame)) {
out_frame = frame;
} else {
out_frame = ff_get_audio_buffer(ctx->outputs[0], frame->nb_samples);
if (!out_frame) {
av_frame_free(&frame);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out_frame, frame);
}
s->echo_samples(s, s->delayptrs, frame->extended_data, out_frame->extended_data,
frame->nb_samples, inlink->channels);
s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
if (frame != out_frame)
av_frame_free(&frame);
return ff_filter_frame(ctx->outputs[0], out_frame);
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioEchoContext *s = ctx->priv;
int nb_samples = FFMIN(s->fade_out, 2048);
AVFrame *frame = ff_get_audio_buffer(outlink, nb_samples);
if (!frame)
return AVERROR(ENOMEM);
s->fade_out -= nb_samples;
av_samples_set_silence(frame->extended_data, 0,
frame->nb_samples,
outlink->channels,
frame->format);
s->echo_samples(s, s->delayptrs, frame->extended_data, frame->extended_data,
frame->nb_samples, outlink->channels);
frame->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
return ff_filter_frame(outlink, frame);
}
static int activate(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AudioEchoContext *s = ctx->priv;
AVFrame *in;
int ret, status;
int64_t pts;
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
ret = ff_inlink_consume_frame(inlink, &in);
if (ret < 0)
return ret;
if (ret > 0)
return filter_frame(inlink, in);
if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
if (status == AVERROR_EOF)
s->eof = 1;
}
if (s->eof && s->fade_out <= 0) {
ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts);
return 0;
}
if (!s->eof)
FF_FILTER_FORWARD_WANTED(outlink, inlink);
return request_frame(outlink);
}
static const AVFilterPad aecho_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
static const AVFilterPad aecho_outputs[] = {
{
.name = "default",
.config_props = config_output,
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_aecho = {
.name = "aecho",
.description = NULL_IF_CONFIG_SMALL("Add echoing to the audio."),
.query_formats = query_formats,
.priv_size = sizeof(AudioEchoContext),
.priv_class = &aecho_class,
.init = init,
.activate = activate,
.uninit = uninit,
.inputs = aecho_inputs,
.outputs = aecho_outputs,
};

369
externals/ffmpeg/libavfilter/af_aemphasis.c vendored Executable file
View File

@@ -0,0 +1,369 @@
/*
* Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen, Damien Zammit and others
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/opt.h"
#include "avfilter.h"
#include "internal.h"
#include "audio.h"
typedef struct BiquadCoeffs {
double a0, a1, a2, b1, b2;
} BiquadCoeffs;
typedef struct BiquadD2 {
double a0, a1, a2, b1, b2, w1, w2;
} BiquadD2;
typedef struct RIAACurve {
BiquadD2 r1;
BiquadD2 brickw;
int use_brickw;
} RIAACurve;
typedef struct AudioEmphasisContext {
const AVClass *class;
int mode, type;
double level_in, level_out;
RIAACurve *rc;
} AudioEmphasisContext;
#define OFFSET(x) offsetof(AudioEmphasisContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption aemphasis_options[] = {
{ "level_in", "set input gain", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 64, FLAGS },
{ "level_out", "set output gain", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 64, FLAGS },
{ "mode", "set filter mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, FLAGS, "mode" },
{ "reproduction", NULL, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "mode" },
{ "production", NULL, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "mode" },
{ "type", "set filter type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=4}, 0, 8, FLAGS, "type" },
{ "col", "Columbia", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, FLAGS, "type" },
{ "emi", "EMI", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, FLAGS, "type" },
{ "bsi", "BSI (78RPM)", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, FLAGS, "type" },
{ "riaa", "RIAA", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, FLAGS, "type" },
{ "cd", "Compact Disc (CD)", 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, FLAGS, "type" },
{ "50fm", "50µs (FM)", 0, AV_OPT_TYPE_CONST, {.i64=5}, 0, 0, FLAGS, "type" },
{ "75fm", "75µs (FM)", 0, AV_OPT_TYPE_CONST, {.i64=6}, 0, 0, FLAGS, "type" },
{ "50kf", "50µs (FM-KF)", 0, AV_OPT_TYPE_CONST, {.i64=7}, 0, 0, FLAGS, "type" },
{ "75kf", "75µs (FM-KF)", 0, AV_OPT_TYPE_CONST, {.i64=8}, 0, 0, FLAGS, "type" },
{ NULL }
};
AVFILTER_DEFINE_CLASS(aemphasis);
static inline double biquad(BiquadD2 *bq, double in)
{
double n = in;
double tmp = n - bq->w1 * bq->b1 - bq->w2 * bq->b2;
double out = tmp * bq->a0 + bq->w1 * bq->a1 + bq->w2 * bq->a2;
bq->w2 = bq->w1;
bq->w1 = tmp;
return out;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
AudioEmphasisContext *s = ctx->priv;
const double *src = (const double *)in->data[0];
const double level_out = s->level_out;
const double level_in = s->level_in;
AVFrame *out;
double *dst;
int n, c;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
dst = (double *)out->data[0];
for (n = 0; n < in->nb_samples; n++) {
for (c = 0; c < inlink->channels; c++)
dst[c] = level_out * biquad(&s->rc[c].r1, s->rc[c].use_brickw ? biquad(&s->rc[c].brickw, src[c] * level_in) : src[c] * level_in);
dst += inlink->channels;
src += inlink->channels;
}
if (in != out)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterChannelLayouts *layouts;
AVFilterFormats *formats;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static inline void set_highshelf_rbj(BiquadD2 *bq, double freq, double q, double peak, double sr)
{
double A = sqrt(peak);
double w0 = freq * 2 * M_PI / sr;
double alpha = sin(w0) / (2 * q);
double cw0 = cos(w0);
double tmp = 2 * sqrt(A) * alpha;
double b0 = 0, ib0 = 0;
bq->a0 = A*( (A+1) + (A-1)*cw0 + tmp);
bq->a1 = -2*A*( (A-1) + (A+1)*cw0);
bq->a2 = A*( (A+1) + (A-1)*cw0 - tmp);
b0 = (A+1) - (A-1)*cw0 + tmp;
bq->b1 = 2*( (A-1) - (A+1)*cw0);
bq->b2 = (A+1) - (A-1)*cw0 - tmp;
ib0 = 1 / b0;
bq->b1 *= ib0;
bq->b2 *= ib0;
bq->a0 *= ib0;
bq->a1 *= ib0;
bq->a2 *= ib0;
}
static inline void set_lp_rbj(BiquadD2 *bq, double fc, double q, double sr, double gain)
{
double omega = 2.0 * M_PI * fc / sr;
double sn = sin(omega);
double cs = cos(omega);
double alpha = sn/(2 * q);
double inv = 1.0/(1.0 + alpha);
bq->a2 = bq->a0 = gain * inv * (1.0 - cs) * 0.5;
bq->a1 = bq->a0 + bq->a0;
bq->b1 = (-2.0 * cs * inv);
bq->b2 = ((1.0 - alpha) * inv);
}
static double freq_gain(BiquadCoeffs *c, double freq, double sr)
{
double zr, zi;
freq *= 2.0 * M_PI / sr;
zr = cos(freq);
zi = -sin(freq);
/* |(a0 + a1*z + a2*z^2)/(1 + b1*z + b2*z^2)| */
return hypot(c->a0 + c->a1*zr + c->a2*(zr*zr-zi*zi), c->a1*zi + 2*c->a2*zr*zi) /
hypot(1 + c->b1*zr + c->b2*(zr*zr-zi*zi), c->b1*zi + 2*c->b2*zr*zi);
}
static int config_input(AVFilterLink *inlink)
{
double i, j, k, g, t, a0, a1, a2, b1, b2, tau1, tau2, tau3;
double cutfreq, gain1kHz, gc, sr = inlink->sample_rate;
AVFilterContext *ctx = inlink->dst;
AudioEmphasisContext *s = ctx->priv;
BiquadCoeffs coeffs;
int ch;
s->rc = av_calloc(inlink->channels, sizeof(*s->rc));
if (!s->rc)
return AVERROR(ENOMEM);
switch (s->type) {
case 0: //"Columbia"
i = 100.;
j = 500.;
k = 1590.;
break;
case 1: //"EMI"
i = 70.;
j = 500.;
k = 2500.;
break;
case 2: //"BSI(78rpm)"
i = 50.;
j = 353.;
k = 3180.;
break;
case 3: //"RIAA"
default:
tau1 = 0.003180;
tau2 = 0.000318;
tau3 = 0.000075;
i = 1. / (2. * M_PI * tau1);
j = 1. / (2. * M_PI * tau2);
k = 1. / (2. * M_PI * tau3);
break;
case 4: //"CD Mastering"
tau1 = 0.000050;
tau2 = 0.000015;
tau3 = 0.0000001;// 1.6MHz out of audible range for null impact
i = 1. / (2. * M_PI * tau1);
j = 1. / (2. * M_PI * tau2);
k = 1. / (2. * M_PI * tau3);
break;
case 5: //"50µs FM (Europe)"
tau1 = 0.000050;
tau2 = tau1 / 20;// not used
tau3 = tau1 / 50;//
i = 1. / (2. * M_PI * tau1);
j = 1. / (2. * M_PI * tau2);
k = 1. / (2. * M_PI * tau3);
break;
case 6: //"75µs FM (US)"
tau1 = 0.000075;
tau2 = tau1 / 20;// not used
tau3 = tau1 / 50;//
i = 1. / (2. * M_PI * tau1);
j = 1. / (2. * M_PI * tau2);
k = 1. / (2. * M_PI * tau3);
break;
}
i *= 2 * M_PI;
j *= 2 * M_PI;
k *= 2 * M_PI;
t = 1. / sr;
//swap a1 b1, a2 b2
if (s->type == 7 || s->type == 8) {
double tau = (s->type == 7 ? 0.000050 : 0.000075);
double f = 1.0 / (2 * M_PI * tau);
double nyq = sr * 0.5;
double gain = sqrt(1.0 + nyq * nyq / (f * f)); // gain at Nyquist
double cfreq = sqrt((gain - 1.0) * f * f); // frequency
double q = 1.0;
if (s->type == 8)
q = pow((sr / 3269.0) + 19.5, -0.25); // somewhat poor curve-fit
if (s->type == 7)
q = pow((sr / 4750.0) + 19.5, -0.25);
if (s->mode == 0)
set_highshelf_rbj(&s->rc[0].r1, cfreq, q, 1. / gain, sr);
else
set_highshelf_rbj(&s->rc[0].r1, cfreq, q, gain, sr);
s->rc[0].use_brickw = 0;
} else {
s->rc[0].use_brickw = 1;
if (s->mode == 0) { // Reproduction
g = 1. / (4.+2.*i*t+2.*k*t+i*k*t*t);
a0 = (2.*t+j*t*t)*g;
a1 = (2.*j*t*t)*g;
a2 = (-2.*t+j*t*t)*g;
b1 = (-8.+2.*i*k*t*t)*g;
b2 = (4.-2.*i*t-2.*k*t+i*k*t*t)*g;
} else { // Production
g = 1. / (2.*t+j*t*t);
a0 = (4.+2.*i*t+2.*k*t+i*k*t*t)*g;
a1 = (-8.+2.*i*k*t*t)*g;
a2 = (4.-2.*i*t-2.*k*t+i*k*t*t)*g;
b1 = (2.*j*t*t)*g;
b2 = (-2.*t+j*t*t)*g;
}
coeffs.a0 = a0;
coeffs.a1 = a1;
coeffs.a2 = a2;
coeffs.b1 = b1;
coeffs.b2 = b2;
// the coeffs above give non-normalized value, so it should be normalized to produce 0dB at 1 kHz
// find actual gain
// Note: for FM emphasis, use 100 Hz for normalization instead
gain1kHz = freq_gain(&coeffs, 1000.0, sr);
// divide one filter's x[n-m] coefficients by that value
gc = 1.0 / gain1kHz;
s->rc[0].r1.a0 = coeffs.a0 * gc;
s->rc[0].r1.a1 = coeffs.a1 * gc;
s->rc[0].r1.a2 = coeffs.a2 * gc;
s->rc[0].r1.b1 = coeffs.b1;
s->rc[0].r1.b2 = coeffs.b2;
}
cutfreq = FFMIN(0.45 * sr, 21000.);
set_lp_rbj(&s->rc[0].brickw, cutfreq, 0.707, sr, 1.);
for (ch = 1; ch < inlink->channels; ch++) {
memcpy(&s->rc[ch], &s->rc[0], sizeof(RIAACurve));
}
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioEmphasisContext *s = ctx->priv;
av_freep(&s->rc);
}
static const AVFilterPad avfilter_af_aemphasis_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad avfilter_af_aemphasis_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_aemphasis = {
.name = "aemphasis",
.description = NULL_IF_CONFIG_SMALL("Audio emphasis."),
.priv_size = sizeof(AudioEmphasisContext),
.priv_class = &aemphasis_class,
.uninit = uninit,
.query_formats = query_formats,
.inputs = avfilter_af_aemphasis_inputs,
.outputs = avfilter_af_aemphasis_outputs,
};

641
externals/ffmpeg/libavfilter/af_afade.c vendored Executable file
View File

@@ -0,0 +1,641 @@
/*
* Copyright (c) 2013-2015 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* fade audio filter
*/
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "filters.h"
#include "internal.h"
typedef struct AudioFadeContext {
const AVClass *class;
int type;
int curve, curve2;
int64_t nb_samples;
int64_t start_sample;
int64_t duration;
int64_t start_time;
int overlap;
int cf0_eof;
int crossfade_is_over;
int64_t pts;
void (*fade_samples)(uint8_t **dst, uint8_t * const *src,
int nb_samples, int channels, int direction,
int64_t start, int64_t range, int curve);
void (*crossfade_samples)(uint8_t **dst, uint8_t * const *cf0,
uint8_t * const *cf1,
int nb_samples, int channels,
int curve0, int curve1);
} AudioFadeContext;
enum CurveType { TRI, QSIN, ESIN, HSIN, LOG, IPAR, QUA, CUB, SQU, CBR, PAR, EXP, IQSIN, IHSIN, DESE, DESI, LOSI, NONE, NB_CURVES };
#define OFFSET(x) offsetof(AudioFadeContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static double fade_gain(int curve, int64_t index, int64_t range)
{
#define CUBE(a) ((a)*(a)*(a))
double gain;
gain = av_clipd(1.0 * index / range, 0, 1.0);
switch (curve) {
case QSIN:
gain = sin(gain * M_PI / 2.0);
break;
case IQSIN:
/* 0.6... = 2 / M_PI */
gain = 0.6366197723675814 * asin(gain);
break;
case ESIN:
gain = 1.0 - cos(M_PI / 4.0 * (CUBE(2.0*gain - 1) + 1));
break;
case HSIN:
gain = (1.0 - cos(gain * M_PI)) / 2.0;
break;
case IHSIN:
/* 0.3... = 1 / M_PI */
gain = 0.3183098861837907 * acos(1 - 2 * gain);
break;
case EXP:
/* -11.5... = 5*ln(0.1) */
gain = exp(-11.512925464970227 * (1 - gain));
break;
case LOG:
gain = av_clipd(1 + 0.2 * log10(gain), 0, 1.0);
break;
case PAR:
gain = 1 - sqrt(1 - gain);
break;
case IPAR:
gain = (1 - (1 - gain) * (1 - gain));
break;
case QUA:
gain *= gain;
break;
case CUB:
gain = CUBE(gain);
break;
case SQU:
gain = sqrt(gain);
break;
case CBR:
gain = cbrt(gain);
break;
case DESE:
gain = gain <= 0.5 ? cbrt(2 * gain) / 2: 1 - cbrt(2 * (1 - gain)) / 2;
break;
case DESI:
gain = gain <= 0.5 ? CUBE(2 * gain) / 2: 1 - CUBE(2 * (1 - gain)) / 2;
break;
case LOSI: {
const double a = 1. / (1. - 0.787) - 1;
double A = 1. / (1.0 + exp(0 -((gain-0.5) * a * 2.0)));
double B = 1. / (1.0 + exp(a));
double C = 1. / (1.0 + exp(0-a));
gain = (A - B) / (C - B);
}
break;
case NONE:
gain = 1.0;
break;
}
return gain;
}
#define FADE_PLANAR(name, type) \
static void fade_samples_## name ##p(uint8_t **dst, uint8_t * const *src, \
int nb_samples, int channels, int dir, \
int64_t start, int64_t range, int curve) \
{ \
int i, c; \
\
for (i = 0; i < nb_samples; i++) { \
double gain = fade_gain(curve, start + i * dir, range); \
for (c = 0; c < channels; c++) { \
type *d = (type *)dst[c]; \
const type *s = (type *)src[c]; \
\
d[i] = s[i] * gain; \
} \
} \
}
#define FADE(name, type) \
static void fade_samples_## name (uint8_t **dst, uint8_t * const *src, \
int nb_samples, int channels, int dir, \
int64_t start, int64_t range, int curve) \
{ \
type *d = (type *)dst[0]; \
const type *s = (type *)src[0]; \
int i, c, k = 0; \
\
for (i = 0; i < nb_samples; i++) { \
double gain = fade_gain(curve, start + i * dir, range); \
for (c = 0; c < channels; c++, k++) \
d[k] = s[k] * gain; \
} \
}
FADE_PLANAR(dbl, double)
FADE_PLANAR(flt, float)
FADE_PLANAR(s16, int16_t)
FADE_PLANAR(s32, int32_t)
FADE(dbl, double)
FADE(flt, float)
FADE(s16, int16_t)
FADE(s32, int32_t)
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioFadeContext *s = ctx->priv;
switch (outlink->format) {
case AV_SAMPLE_FMT_DBL: s->fade_samples = fade_samples_dbl; break;
case AV_SAMPLE_FMT_DBLP: s->fade_samples = fade_samples_dblp; break;
case AV_SAMPLE_FMT_FLT: s->fade_samples = fade_samples_flt; break;
case AV_SAMPLE_FMT_FLTP: s->fade_samples = fade_samples_fltp; break;
case AV_SAMPLE_FMT_S16: s->fade_samples = fade_samples_s16; break;
case AV_SAMPLE_FMT_S16P: s->fade_samples = fade_samples_s16p; break;
case AV_SAMPLE_FMT_S32: s->fade_samples = fade_samples_s32; break;
case AV_SAMPLE_FMT_S32P: s->fade_samples = fade_samples_s32p; break;
}
if (s->duration)
s->nb_samples = av_rescale(s->duration, outlink->sample_rate, AV_TIME_BASE);
if (s->start_time)
s->start_sample = av_rescale(s->start_time, outlink->sample_rate, AV_TIME_BASE);
return 0;
}
#if CONFIG_AFADE_FILTER
static const AVOption afade_options[] = {
{ "type", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS, "type" },
{ "t", "set the fade direction", OFFSET(type), AV_OPT_TYPE_INT, {.i64 = 0 }, 0, 1, FLAGS, "type" },
{ "in", "fade-in", 0, AV_OPT_TYPE_CONST, {.i64 = 0 }, 0, 0, FLAGS, "type" },
{ "out", "fade-out", 0, AV_OPT_TYPE_CONST, {.i64 = 1 }, 0, 0, FLAGS, "type" },
{ "start_sample", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, FLAGS },
{ "ss", "set number of first sample to start fading", OFFSET(start_sample), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, FLAGS },
{ "nb_samples", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT64_MAX, FLAGS },
{ "ns", "set number of samples for fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT64, {.i64 = 44100}, 1, INT64_MAX, FLAGS },
{ "start_time", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT64_MAX, FLAGS },
{ "st", "set time to start fading", OFFSET(start_time), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT64_MAX, FLAGS },
{ "duration", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT64_MAX, FLAGS },
{ "d", "set fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, INT64_MAX, FLAGS },
{ "curve", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, 0, NB_CURVES - 1, FLAGS, "curve" },
{ "c", "set fade curve type", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, 0, NB_CURVES - 1, FLAGS, "curve" },
{ "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, FLAGS, "curve" },
{ "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, FLAGS, "curve" },
{ "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, FLAGS, "curve" },
{ "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, FLAGS, "curve" },
{ "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, FLAGS, "curve" },
{ "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, FLAGS, "curve" },
{ "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, FLAGS, "curve" },
{ "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, "curve" },
{ "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, "curve" },
{ "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, "curve" },
{ "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, FLAGS, "curve" },
{ "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, FLAGS, "curve" },
{ "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, FLAGS, "curve" },
{ "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, FLAGS, "curve" },
{ "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, FLAGS, "curve" },
{ "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, FLAGS, "curve" },
{ "losi", "logistic sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = LOSI }, 0, 0, FLAGS, "curve" },
{ "nofade", "no fade; keep audio as-is", 0, AV_OPT_TYPE_CONST, {.i64 = NONE }, 0, 0, FLAGS, "curve" },
{ NULL }
};
AVFILTER_DEFINE_CLASS(afade);
static av_cold int init(AVFilterContext *ctx)
{
AudioFadeContext *s = ctx->priv;
if (INT64_MAX - s->nb_samples < s->start_sample)
return AVERROR(EINVAL);
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AudioFadeContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
int nb_samples = buf->nb_samples;
AVFrame *out_buf;
int64_t cur_sample = av_rescale_q(buf->pts, inlink->time_base, (AVRational){1, inlink->sample_rate});
if ((!s->type && (s->start_sample + s->nb_samples < cur_sample)) ||
( s->type && (cur_sample + nb_samples < s->start_sample)))
return ff_filter_frame(outlink, buf);
if (av_frame_is_writable(buf)) {
out_buf = buf;
} else {
out_buf = ff_get_audio_buffer(outlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
av_frame_copy_props(out_buf, buf);
}
if ((!s->type && (cur_sample + nb_samples < s->start_sample)) ||
( s->type && (s->start_sample + s->nb_samples < cur_sample))) {
av_samples_set_silence(out_buf->extended_data, 0, nb_samples,
out_buf->channels, out_buf->format);
} else {
int64_t start;
if (!s->type)
start = cur_sample - s->start_sample;
else
start = s->start_sample + s->nb_samples - cur_sample;
s->fade_samples(out_buf->extended_data, buf->extended_data,
nb_samples, buf->channels,
s->type ? -1 : 1, start,
s->nb_samples, s->curve);
}
if (buf != out_buf)
av_frame_free(&buf);
return ff_filter_frame(outlink, out_buf);
}
static const AVFilterPad avfilter_af_afade_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad avfilter_af_afade_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
AVFilter ff_af_afade = {
.name = "afade",
.description = NULL_IF_CONFIG_SMALL("Fade in/out input audio."),
.query_formats = query_formats,
.priv_size = sizeof(AudioFadeContext),
.init = init,
.inputs = avfilter_af_afade_inputs,
.outputs = avfilter_af_afade_outputs,
.priv_class = &afade_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};
#endif /* CONFIG_AFADE_FILTER */
#if CONFIG_ACROSSFADE_FILTER
static const AVOption acrossfade_options[] = {
{ "nb_samples", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
{ "ns", "set number of samples for cross fade duration", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 44100}, 1, INT32_MAX/10, FLAGS },
{ "duration", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, 60000000, FLAGS },
{ "d", "set cross fade duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0. }, 0, 60000000, FLAGS },
{ "overlap", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
{ "o", "overlap 1st stream end with 2nd stream start", OFFSET(overlap), AV_OPT_TYPE_BOOL, {.i64 = 1 }, 0, 1, FLAGS },
{ "curve1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, 0, NB_CURVES - 1, FLAGS, "curve" },
{ "c1", "set fade curve type for 1st stream", OFFSET(curve), AV_OPT_TYPE_INT, {.i64 = TRI }, 0, NB_CURVES - 1, FLAGS, "curve" },
{ "tri", "linear slope", 0, AV_OPT_TYPE_CONST, {.i64 = TRI }, 0, 0, FLAGS, "curve" },
{ "qsin", "quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = QSIN }, 0, 0, FLAGS, "curve" },
{ "esin", "exponential sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = ESIN }, 0, 0, FLAGS, "curve" },
{ "hsin", "half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = HSIN }, 0, 0, FLAGS, "curve" },
{ "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64 = LOG }, 0, 0, FLAGS, "curve" },
{ "ipar", "inverted parabola", 0, AV_OPT_TYPE_CONST, {.i64 = IPAR }, 0, 0, FLAGS, "curve" },
{ "qua", "quadratic", 0, AV_OPT_TYPE_CONST, {.i64 = QUA }, 0, 0, FLAGS, "curve" },
{ "cub", "cubic", 0, AV_OPT_TYPE_CONST, {.i64 = CUB }, 0, 0, FLAGS, "curve" },
{ "squ", "square root", 0, AV_OPT_TYPE_CONST, {.i64 = SQU }, 0, 0, FLAGS, "curve" },
{ "cbr", "cubic root", 0, AV_OPT_TYPE_CONST, {.i64 = CBR }, 0, 0, FLAGS, "curve" },
{ "par", "parabola", 0, AV_OPT_TYPE_CONST, {.i64 = PAR }, 0, 0, FLAGS, "curve" },
{ "exp", "exponential", 0, AV_OPT_TYPE_CONST, {.i64 = EXP }, 0, 0, FLAGS, "curve" },
{ "iqsin", "inverted quarter of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IQSIN}, 0, 0, FLAGS, "curve" },
{ "ihsin", "inverted half of sine wave", 0, AV_OPT_TYPE_CONST, {.i64 = IHSIN}, 0, 0, FLAGS, "curve" },
{ "dese", "double-exponential seat", 0, AV_OPT_TYPE_CONST, {.i64 = DESE }, 0, 0, FLAGS, "curve" },
{ "desi", "double-exponential sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = DESI }, 0, 0, FLAGS, "curve" },
{ "losi", "logistic sigmoid", 0, AV_OPT_TYPE_CONST, {.i64 = LOSI }, 0, 0, FLAGS, "curve" },
{ "nofade", "no fade; keep audio as-is", 0, AV_OPT_TYPE_CONST, {.i64 = NONE }, 0, 0, FLAGS, "curve" },
{ "curve2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, 0, NB_CURVES - 1, FLAGS, "curve" },
{ "c2", "set fade curve type for 2nd stream", OFFSET(curve2), AV_OPT_TYPE_INT, {.i64 = TRI }, 0, NB_CURVES - 1, FLAGS, "curve" },
{ NULL }
};
AVFILTER_DEFINE_CLASS(acrossfade);
#define CROSSFADE_PLANAR(name, type) \
static void crossfade_samples_## name ##p(uint8_t **dst, uint8_t * const *cf0, \
uint8_t * const *cf1, \
int nb_samples, int channels, \
int curve0, int curve1) \
{ \
int i, c; \
\
for (i = 0; i < nb_samples; i++) { \
double gain0 = fade_gain(curve0, nb_samples - 1 - i, nb_samples); \
double gain1 = fade_gain(curve1, i, nb_samples); \
for (c = 0; c < channels; c++) { \
type *d = (type *)dst[c]; \
const type *s0 = (type *)cf0[c]; \
const type *s1 = (type *)cf1[c]; \
\
d[i] = s0[i] * gain0 + s1[i] * gain1; \
} \
} \
}
#define CROSSFADE(name, type) \
static void crossfade_samples_## name (uint8_t **dst, uint8_t * const *cf0, \
uint8_t * const *cf1, \
int nb_samples, int channels, \
int curve0, int curve1) \
{ \
type *d = (type *)dst[0]; \
const type *s0 = (type *)cf0[0]; \
const type *s1 = (type *)cf1[0]; \
int i, c, k = 0; \
\
for (i = 0; i < nb_samples; i++) { \
double gain0 = fade_gain(curve0, nb_samples - 1 - i, nb_samples); \
double gain1 = fade_gain(curve1, i, nb_samples); \
for (c = 0; c < channels; c++, k++) \
d[k] = s0[k] * gain0 + s1[k] * gain1; \
} \
}
CROSSFADE_PLANAR(dbl, double)
CROSSFADE_PLANAR(flt, float)
CROSSFADE_PLANAR(s16, int16_t)
CROSSFADE_PLANAR(s32, int32_t)
CROSSFADE(dbl, double)
CROSSFADE(flt, float)
CROSSFADE(s16, int16_t)
CROSSFADE(s32, int32_t)
static int activate(AVFilterContext *ctx)
{
AudioFadeContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *in = NULL, *out, *cf[2] = { NULL };
int ret = 0, nb_samples, status;
int64_t pts;
FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx);
if (s->crossfade_is_over) {
ret = ff_inlink_consume_frame(ctx->inputs[1], &in);
if (ret > 0) {
in->pts = s->pts;
s->pts += av_rescale_q(in->nb_samples,
(AVRational){ 1, outlink->sample_rate }, outlink->time_base);
return ff_filter_frame(outlink, in);
} else if (ret < 0) {
return ret;
} else if (ff_inlink_acknowledge_status(ctx->inputs[1], &status, &pts)) {
ff_outlink_set_status(ctx->outputs[0], status, pts);
return 0;
} else if (!ret) {
if (ff_outlink_frame_wanted(ctx->outputs[0])) {
ff_inlink_request_frame(ctx->inputs[1]);
return 0;
}
}
}
if (ff_inlink_queued_samples(ctx->inputs[0]) > s->nb_samples) {
nb_samples = ff_inlink_queued_samples(ctx->inputs[0]) - s->nb_samples;
if (nb_samples > 0) {
ret = ff_inlink_consume_samples(ctx->inputs[0], nb_samples, nb_samples, &in);
if (ret < 0) {
return ret;
}
}
in->pts = s->pts;
s->pts += av_rescale_q(in->nb_samples,
(AVRational){ 1, outlink->sample_rate }, outlink->time_base);
return ff_filter_frame(outlink, in);
} else if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->nb_samples &&
ff_inlink_queued_samples(ctx->inputs[1]) >= s->nb_samples && s->cf0_eof) {
if (s->overlap) {
out = ff_get_audio_buffer(outlink, s->nb_samples);
if (!out)
return AVERROR(ENOMEM);
ret = ff_inlink_consume_samples(ctx->inputs[0], s->nb_samples, s->nb_samples, &cf[0]);
if (ret < 0) {
av_frame_free(&out);
return ret;
}
ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_samples, s->nb_samples, &cf[1]);
if (ret < 0) {
av_frame_free(&out);
return ret;
}
s->crossfade_samples(out->extended_data, cf[0]->extended_data,
cf[1]->extended_data,
s->nb_samples, out->channels,
s->curve, s->curve2);
out->pts = s->pts;
s->pts += av_rescale_q(s->nb_samples,
(AVRational){ 1, outlink->sample_rate }, outlink->time_base);
s->crossfade_is_over = 1;
av_frame_free(&cf[0]);
av_frame_free(&cf[1]);
return ff_filter_frame(outlink, out);
} else {
out = ff_get_audio_buffer(outlink, s->nb_samples);
if (!out)
return AVERROR(ENOMEM);
ret = ff_inlink_consume_samples(ctx->inputs[0], s->nb_samples, s->nb_samples, &cf[0]);
if (ret < 0) {
av_frame_free(&out);
return ret;
}
s->fade_samples(out->extended_data, cf[0]->extended_data, s->nb_samples,
outlink->channels, -1, s->nb_samples - 1, s->nb_samples, s->curve);
out->pts = s->pts;
s->pts += av_rescale_q(s->nb_samples,
(AVRational){ 1, outlink->sample_rate }, outlink->time_base);
av_frame_free(&cf[0]);
ret = ff_filter_frame(outlink, out);
if (ret < 0)
return ret;
out = ff_get_audio_buffer(outlink, s->nb_samples);
if (!out)
return AVERROR(ENOMEM);
ret = ff_inlink_consume_samples(ctx->inputs[1], s->nb_samples, s->nb_samples, &cf[1]);
if (ret < 0) {
av_frame_free(&out);
return ret;
}
s->fade_samples(out->extended_data, cf[1]->extended_data, s->nb_samples,
outlink->channels, 1, 0, s->nb_samples, s->curve2);
out->pts = s->pts;
s->pts += av_rescale_q(s->nb_samples,
(AVRational){ 1, outlink->sample_rate }, outlink->time_base);
s->crossfade_is_over = 1;
av_frame_free(&cf[1]);
return ff_filter_frame(outlink, out);
}
} else if (ff_outlink_frame_wanted(ctx->outputs[0])) {
if (!s->cf0_eof && ff_outlink_get_status(ctx->inputs[0])) {
s->cf0_eof = 1;
}
if (ff_outlink_get_status(ctx->inputs[1])) {
ff_outlink_set_status(ctx->outputs[0], AVERROR_EOF, AV_NOPTS_VALUE);
return 0;
}
if (!s->cf0_eof)
ff_inlink_request_frame(ctx->inputs[0]);
else
ff_inlink_request_frame(ctx->inputs[1]);
return 0;
}
return ret;
}
static int acrossfade_config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioFadeContext *s = ctx->priv;
if (ctx->inputs[0]->sample_rate != ctx->inputs[1]->sample_rate) {
av_log(ctx, AV_LOG_ERROR,
"Inputs must have the same sample rate "
"%d for in0 vs %d for in1\n",
ctx->inputs[0]->sample_rate, ctx->inputs[1]->sample_rate);
return AVERROR(EINVAL);
}
outlink->sample_rate = ctx->inputs[0]->sample_rate;
outlink->time_base = ctx->inputs[0]->time_base;
outlink->channel_layout = ctx->inputs[0]->channel_layout;
outlink->channels = ctx->inputs[0]->channels;
switch (outlink->format) {
case AV_SAMPLE_FMT_DBL: s->crossfade_samples = crossfade_samples_dbl; break;
case AV_SAMPLE_FMT_DBLP: s->crossfade_samples = crossfade_samples_dblp; break;
case AV_SAMPLE_FMT_FLT: s->crossfade_samples = crossfade_samples_flt; break;
case AV_SAMPLE_FMT_FLTP: s->crossfade_samples = crossfade_samples_fltp; break;
case AV_SAMPLE_FMT_S16: s->crossfade_samples = crossfade_samples_s16; break;
case AV_SAMPLE_FMT_S16P: s->crossfade_samples = crossfade_samples_s16p; break;
case AV_SAMPLE_FMT_S32: s->crossfade_samples = crossfade_samples_s32; break;
case AV_SAMPLE_FMT_S32P: s->crossfade_samples = crossfade_samples_s32p; break;
}
config_output(outlink);
return 0;
}
static const AVFilterPad avfilter_af_acrossfade_inputs[] = {
{
.name = "crossfade0",
.type = AVMEDIA_TYPE_AUDIO,
},
{
.name = "crossfade1",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
static const AVFilterPad avfilter_af_acrossfade_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = acrossfade_config_output,
},
{ NULL }
};
AVFilter ff_af_acrossfade = {
.name = "acrossfade",
.description = NULL_IF_CONFIG_SMALL("Cross fade two input audio streams."),
.query_formats = query_formats,
.priv_size = sizeof(AudioFadeContext),
.activate = activate,
.priv_class = &acrossfade_class,
.inputs = avfilter_af_acrossfade_inputs,
.outputs = avfilter_af_acrossfade_outputs,
};
#endif /* CONFIG_ACROSSFADE_FILTER */

1434
externals/ffmpeg/libavfilter/af_afftdn.c vendored Executable file

File diff suppressed because it is too large Load Diff

489
externals/ffmpeg/libavfilter/af_afftfilt.c vendored Executable file
View File

@@ -0,0 +1,489 @@
/*
* Copyright (c) 2016 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or modify it
* under the terms of the GNU Lesser General Public License as published
* by the Free Software Foundation; either version 2.1 of the License,
* or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/audio_fifo.h"
#include "libavutil/avstring.h"
#include "libavfilter/internal.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "libavcodec/avfft.h"
#include "libavutil/eval.h"
#include "audio.h"
#include "filters.h"
#include "window_func.h"
typedef struct AFFTFiltContext {
const AVClass *class;
char *real_str;
char *img_str;
int fft_size;
int fft_bits;
FFTContext *fft, *ifft;
FFTComplex **fft_data;
FFTComplex **fft_temp;
int nb_exprs;
int channels;
int window_size;
AVExpr **real;
AVExpr **imag;
AVAudioFifo *fifo;
int64_t pts;
int hop_size;
float overlap;
AVFrame *buffer;
int eof;
int win_func;
float *window_func_lut;
} AFFTFiltContext;
static const char *const var_names[] = { "sr", "b", "nb", "ch", "chs", "pts", "re", "im", NULL };
enum { VAR_SAMPLE_RATE, VAR_BIN, VAR_NBBINS, VAR_CHANNEL, VAR_CHANNELS, VAR_PTS, VAR_REAL, VAR_IMAG, VAR_VARS_NB };
#define OFFSET(x) offsetof(AFFTFiltContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption afftfilt_options[] = {
{ "real", "set channels real expressions", OFFSET(real_str), AV_OPT_TYPE_STRING, {.str = "re" }, 0, 0, A },
{ "imag", "set channels imaginary expressions", OFFSET(img_str), AV_OPT_TYPE_STRING, {.str = "im" }, 0, 0, A },
{ "win_size", "set window size", OFFSET(fft_size), AV_OPT_TYPE_INT, {.i64=4096}, 16, 131072, A },
{ "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64 = WFUNC_HANNING}, 0, NB_WFUNC-1, A, "win_func" },
{ "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, A, "win_func" },
{ "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, A, "win_func" },
{ "hann", "Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, A, "win_func" },
{ "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, A, "win_func" },
{ "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, A, "win_func" },
{ "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, A, "win_func" },
{ "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, A, "win_func" },
{ "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, A, "win_func" },
{ "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, A, "win_func" },
{ "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, A, "win_func" },
{ "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, A, "win_func" },
{ "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, A, "win_func" },
{ "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, A, "win_func" },
{ "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, A, "win_func" },
{ "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, A, "win_func" },
{ "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, A, "win_func" },
{ "dolph", "Dolph-Chebyshev", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_DOLPH}, 0, 0, A, "win_func" },
{ "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, A, "win_func" },
{ "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, A, "win_func" },
{ "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, A, "win_func" },
{ "bohman", "Bohman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BOHMAN}, 0, 0, A, "win_func" },
{ "overlap", "set window overlap", OFFSET(overlap), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 1, A },
{ NULL },
};
AVFILTER_DEFINE_CLASS(afftfilt);
static inline double getreal(void *priv, double x, double ch)
{
AFFTFiltContext *s = priv;
int ich, ix;
ich = av_clip(ch, 0, s->nb_exprs - 1);
ix = av_clip(x, 0, s->window_size / 2);
return s->fft_data[ich][ix].re;
}
static inline double getimag(void *priv, double x, double ch)
{
AFFTFiltContext *s = priv;
int ich, ix;
ich = av_clip(ch, 0, s->nb_exprs - 1);
ix = av_clip(x, 0, s->window_size / 2);
return s->fft_data[ich][ix].im;
}
static double realf(void *priv, double x, double ch) { return getreal(priv, x, ch); }
static double imagf(void *priv, double x, double ch) { return getimag(priv, x, ch); }
static const char *const func2_names[] = { "real", "imag", NULL };
double (*func2[])(void *, double, double) = { realf, imagf, NULL };
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
AFFTFiltContext *s = ctx->priv;
char *saveptr = NULL;
int ret = 0, ch;
float overlap;
char *args;
const char *last_expr = "1";
s->channels = inlink->channels;
s->pts = AV_NOPTS_VALUE;
s->fft_bits = av_log2(s->fft_size);
s->fft = av_fft_init(s->fft_bits, 0);
s->ifft = av_fft_init(s->fft_bits, 1);
if (!s->fft || !s->ifft)
return AVERROR(ENOMEM);
s->window_size = 1 << s->fft_bits;
s->fft_data = av_calloc(inlink->channels, sizeof(*s->fft_data));
if (!s->fft_data)
return AVERROR(ENOMEM);
s->fft_temp = av_calloc(inlink->channels, sizeof(*s->fft_temp));
if (!s->fft_temp)
return AVERROR(ENOMEM);
for (ch = 0; ch < inlink->channels; ch++) {
s->fft_data[ch] = av_calloc(s->window_size, sizeof(**s->fft_data));
if (!s->fft_data[ch])
return AVERROR(ENOMEM);
}
for (ch = 0; ch < inlink->channels; ch++) {
s->fft_temp[ch] = av_calloc(s->window_size, sizeof(**s->fft_temp));
if (!s->fft_temp[ch])
return AVERROR(ENOMEM);
}
s->real = av_calloc(inlink->channels, sizeof(*s->real));
if (!s->real)
return AVERROR(ENOMEM);
s->imag = av_calloc(inlink->channels, sizeof(*s->imag));
if (!s->imag)
return AVERROR(ENOMEM);
args = av_strdup(s->real_str);
if (!args)
return AVERROR(ENOMEM);
for (ch = 0; ch < inlink->channels; ch++) {
char *arg = av_strtok(ch == 0 ? args : NULL, "|", &saveptr);
ret = av_expr_parse(&s->real[ch], arg ? arg : last_expr, var_names,
NULL, NULL, func2_names, func2, 0, ctx);
if (ret < 0)
goto fail;
if (arg)
last_expr = arg;
s->nb_exprs++;
}
av_freep(&args);
args = av_strdup(s->img_str ? s->img_str : s->real_str);
if (!args)
return AVERROR(ENOMEM);
saveptr = NULL;
last_expr = "1";
for (ch = 0; ch < inlink->channels; ch++) {
char *arg = av_strtok(ch == 0 ? args : NULL, "|", &saveptr);
ret = av_expr_parse(&s->imag[ch], arg ? arg : last_expr, var_names,
NULL, NULL, func2_names, func2, 0, ctx);
if (ret < 0)
goto fail;
if (arg)
last_expr = arg;
}
av_freep(&args);
s->fifo = av_audio_fifo_alloc(inlink->format, inlink->channels, s->window_size);
if (!s->fifo)
return AVERROR(ENOMEM);
s->window_func_lut = av_realloc_f(s->window_func_lut, s->window_size,
sizeof(*s->window_func_lut));
if (!s->window_func_lut)
return AVERROR(ENOMEM);
generate_window_func(s->window_func_lut, s->window_size, s->win_func, &overlap);
if (s->overlap == 1)
s->overlap = overlap;
s->hop_size = s->window_size * (1 - s->overlap);
if (s->hop_size <= 0)
return AVERROR(EINVAL);
s->buffer = ff_get_audio_buffer(inlink, s->window_size * 2);
if (!s->buffer)
return AVERROR(ENOMEM);
fail:
av_freep(&args);
return ret;
}
static int filter_frame(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
AFFTFiltContext *s = ctx->priv;
const int window_size = s->window_size;
const float f = 1. / (s->window_size / 2);
double values[VAR_VARS_NB];
AVFrame *out, *in = NULL;
int ch, n, ret, i;
if (!in) {
in = ff_get_audio_buffer(outlink, window_size);
if (!in)
return AVERROR(ENOMEM);
}
ret = av_audio_fifo_peek(s->fifo, (void **)in->extended_data, window_size);
if (ret < 0)
goto fail;
for (ch = 0; ch < inlink->channels; ch++) {
const float *src = (float *)in->extended_data[ch];
FFTComplex *fft_data = s->fft_data[ch];
for (n = 0; n < in->nb_samples; n++) {
fft_data[n].re = src[n] * s->window_func_lut[n];
fft_data[n].im = 0;
}
for (; n < window_size; n++) {
fft_data[n].re = 0;
fft_data[n].im = 0;
}
}
values[VAR_PTS] = s->pts;
values[VAR_SAMPLE_RATE] = inlink->sample_rate;
values[VAR_NBBINS] = window_size / 2;
values[VAR_CHANNELS] = inlink->channels;
for (ch = 0; ch < inlink->channels; ch++) {
FFTComplex *fft_data = s->fft_data[ch];
av_fft_permute(s->fft, fft_data);
av_fft_calc(s->fft, fft_data);
}
for (ch = 0; ch < inlink->channels; ch++) {
FFTComplex *fft_data = s->fft_data[ch];
FFTComplex *fft_temp = s->fft_temp[ch];
float *buf = (float *)s->buffer->extended_data[ch];
int x;
values[VAR_CHANNEL] = ch;
for (n = 0; n <= window_size / 2; n++) {
float fr, fi;
values[VAR_BIN] = n;
values[VAR_REAL] = fft_data[n].re;
values[VAR_IMAG] = fft_data[n].im;
fr = av_expr_eval(s->real[ch], values, s);
fi = av_expr_eval(s->imag[ch], values, s);
fft_temp[n].re = fr;
fft_temp[n].im = fi;
}
for (n = window_size / 2 + 1, x = window_size / 2 - 1; n < window_size; n++, x--) {
fft_temp[n].re = fft_temp[x].re;
fft_temp[n].im = -fft_temp[x].im;
}
av_fft_permute(s->ifft, fft_temp);
av_fft_calc(s->ifft, fft_temp);
for (i = 0; i < window_size; i++) {
buf[i] += s->fft_temp[ch][i].re * f;
}
}
out = ff_get_audio_buffer(outlink, s->hop_size);
if (!out) {
ret = AVERROR(ENOMEM);
goto fail;
}
out->pts = s->pts;
s->pts += av_rescale_q(s->hop_size, (AVRational){1, outlink->sample_rate}, outlink->time_base);
for (ch = 0; ch < inlink->channels; ch++) {
float *dst = (float *)out->extended_data[ch];
float *buf = (float *)s->buffer->extended_data[ch];
for (n = 0; n < s->hop_size; n++)
dst[n] = buf[n] * (1.f - s->overlap);
memmove(buf, buf + s->hop_size, window_size * 4);
}
ret = ff_filter_frame(outlink, out);
if (ret < 0)
goto fail;
av_audio_fifo_drain(s->fifo, s->hop_size);
fail:
av_frame_free(&in);
return ret < 0 ? ret : 0;
}
static int activate(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AFFTFiltContext *s = ctx->priv;
AVFrame *in = NULL;
int ret = 0, status;
int64_t pts;
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
if (!s->eof && av_audio_fifo_size(s->fifo) < s->window_size) {
ret = ff_inlink_consume_frame(inlink, &in);
if (ret < 0)
return ret;
if (ret > 0) {
ret = av_audio_fifo_write(s->fifo, (void **)in->extended_data,
in->nb_samples);
if (ret >= 0 && s->pts == AV_NOPTS_VALUE)
s->pts = in->pts;
av_frame_free(&in);
if (ret < 0)
return ret;
}
}
if ((av_audio_fifo_size(s->fifo) >= s->window_size) ||
(av_audio_fifo_size(s->fifo) > 0 && s->eof)) {
ret = filter_frame(inlink);
if (av_audio_fifo_size(s->fifo) >= s->window_size)
ff_filter_set_ready(ctx, 100);
return ret;
}
if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
if (status == AVERROR_EOF) {
s->eof = 1;
if (av_audio_fifo_size(s->fifo) >= 0) {
ff_filter_set_ready(ctx, 100);
return 0;
}
}
}
if (s->eof && av_audio_fifo_size(s->fifo) <= 0) {
ff_outlink_set_status(outlink, AVERROR_EOF, s->pts);
return 0;
}
if (!s->eof)
FF_FILTER_FORWARD_WANTED(outlink, inlink);
return FFERROR_NOT_READY;
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static av_cold void uninit(AVFilterContext *ctx)
{
AFFTFiltContext *s = ctx->priv;
int i;
av_fft_end(s->fft);
av_fft_end(s->ifft);
for (i = 0; i < s->channels; i++) {
if (s->fft_data)
av_freep(&s->fft_data[i]);
if (s->fft_temp)
av_freep(&s->fft_temp[i]);
}
av_freep(&s->fft_data);
av_freep(&s->fft_temp);
for (i = 0; i < s->nb_exprs; i++) {
av_expr_free(s->real[i]);
av_expr_free(s->imag[i]);
}
av_freep(&s->real);
av_freep(&s->imag);
av_frame_free(&s->buffer);
av_freep(&s->window_func_lut);
av_audio_fifo_free(s->fifo);
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_afftfilt = {
.name = "afftfilt",
.description = NULL_IF_CONFIG_SMALL("Apply arbitrary expressions to samples in frequency domain."),
.priv_size = sizeof(AFFTFiltContext),
.priv_class = &afftfilt_class,
.inputs = inputs,
.outputs = outputs,
.activate = activate,
.query_formats = query_formats,
.uninit = uninit,
};

978
externals/ffmpeg/libavfilter/af_afir.c vendored Executable file
View File

@@ -0,0 +1,978 @@
/*
* Copyright (c) 2017 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* An arbitrary audio FIR filter
*/
#include <float.h>
#include "libavutil/avstring.h"
#include "libavutil/common.h"
#include "libavutil/float_dsp.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
#include "libavutil/xga_font_data.h"
#include "libavcodec/avfft.h"
#include "audio.h"
#include "avfilter.h"
#include "filters.h"
#include "formats.h"
#include "internal.h"
#include "af_afir.h"
static void fcmul_add_c(float *sum, const float *t, const float *c, ptrdiff_t len)
{
int n;
for (n = 0; n < len; n++) {
const float cre = c[2 * n ];
const float cim = c[2 * n + 1];
const float tre = t[2 * n ];
const float tim = t[2 * n + 1];
sum[2 * n ] += tre * cre - tim * cim;
sum[2 * n + 1] += tre * cim + tim * cre;
}
sum[2 * n] += t[2 * n] * c[2 * n];
}
static void direct(const float *in, const FFTComplex *ir, int len, float *out)
{
for (int n = 0; n < len; n++)
for (int m = 0; m <= n; m++)
out[n] += ir[m].re * in[n - m];
}
static int fir_quantum(AVFilterContext *ctx, AVFrame *out, int ch, int offset)
{
AudioFIRContext *s = ctx->priv;
const float *in = (const float *)s->in->extended_data[ch] + offset;
float *block, *buf, *ptr = (float *)out->extended_data[ch] + offset;
const int nb_samples = FFMIN(s->min_part_size, out->nb_samples - offset);
int n, i, j;
for (int segment = 0; segment < s->nb_segments; segment++) {
AudioFIRSegment *seg = &s->seg[segment];
float *src = (float *)seg->input->extended_data[ch];
float *dst = (float *)seg->output->extended_data[ch];
float *sum = (float *)seg->sum->extended_data[ch];
if (s->min_part_size >= 8) {
s->fdsp->vector_fmul_scalar(src + seg->input_offset, in, s->dry_gain, FFALIGN(nb_samples, 4));
emms_c();
} else {
for (n = 0; n < nb_samples; n++)
src[seg->input_offset + n] = in[n] * s->dry_gain;
}
seg->output_offset[ch] += s->min_part_size;
if (seg->output_offset[ch] == seg->part_size) {
seg->output_offset[ch] = 0;
} else {
memmove(src, src + s->min_part_size, (seg->input_size - s->min_part_size) * sizeof(*src));
dst += seg->output_offset[ch];
for (n = 0; n < nb_samples; n++) {
ptr[n] += dst[n];
}
continue;
}
if (seg->part_size < 8) {
memset(dst, 0, sizeof(*dst) * seg->part_size * seg->nb_partitions);
j = seg->part_index[ch];
for (i = 0; i < seg->nb_partitions; i++) {
const int coffset = j * seg->coeff_size;
const FFTComplex *coeff = (const FFTComplex *)seg->coeff->extended_data[ch * !s->one2many] + coffset;
direct(src, coeff, nb_samples, dst);
if (j == 0)
j = seg->nb_partitions;
j--;
}
seg->part_index[ch] = (seg->part_index[ch] + 1) % seg->nb_partitions;
memmove(src, src + s->min_part_size, (seg->input_size - s->min_part_size) * sizeof(*src));
for (n = 0; n < nb_samples; n++) {
ptr[n] += dst[n];
}
continue;
}
memset(sum, 0, sizeof(*sum) * seg->fft_length);
block = (float *)seg->block->extended_data[ch] + seg->part_index[ch] * seg->block_size;
memset(block + seg->part_size, 0, sizeof(*block) * (seg->fft_length - seg->part_size));
memcpy(block, src, sizeof(*src) * seg->part_size);
av_rdft_calc(seg->rdft[ch], block);
block[2 * seg->part_size] = block[1];
block[1] = 0;
j = seg->part_index[ch];
for (i = 0; i < seg->nb_partitions; i++) {
const int coffset = j * seg->coeff_size;
const float *block = (const float *)seg->block->extended_data[ch] + i * seg->block_size;
const FFTComplex *coeff = (const FFTComplex *)seg->coeff->extended_data[ch * !s->one2many] + coffset;
s->afirdsp.fcmul_add(sum, block, (const float *)coeff, seg->part_size);
if (j == 0)
j = seg->nb_partitions;
j--;
}
sum[1] = sum[2 * seg->part_size];
av_rdft_calc(seg->irdft[ch], sum);
buf = (float *)seg->buffer->extended_data[ch];
for (n = 0; n < seg->part_size; n++) {
buf[n] += sum[n];
}
memcpy(dst, buf, seg->part_size * sizeof(*dst));
buf = (float *)seg->buffer->extended_data[ch];
memcpy(buf, sum + seg->part_size, seg->part_size * sizeof(*buf));
seg->part_index[ch] = (seg->part_index[ch] + 1) % seg->nb_partitions;
memmove(src, src + s->min_part_size, (seg->input_size - s->min_part_size) * sizeof(*src));
for (n = 0; n < nb_samples; n++) {
ptr[n] += dst[n];
}
}
if (s->min_part_size >= 8) {
s->fdsp->vector_fmul_scalar(ptr, ptr, s->wet_gain, FFALIGN(nb_samples, 4));
emms_c();
} else {
for (n = 0; n < nb_samples; n++)
ptr[n] *= s->wet_gain;
}
return 0;
}
static int fir_channel(AVFilterContext *ctx, AVFrame *out, int ch)
{
AudioFIRContext *s = ctx->priv;
for (int offset = 0; offset < out->nb_samples; offset += s->min_part_size) {
fir_quantum(ctx, out, ch, offset);
}
return 0;
}
static int fir_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
AVFrame *out = arg;
const int start = (out->channels * jobnr) / nb_jobs;
const int end = (out->channels * (jobnr+1)) / nb_jobs;
for (int ch = start; ch < end; ch++) {
fir_channel(ctx, out, ch);
}
return 0;
}
static int fir_frame(AudioFIRContext *s, AVFrame *in, AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFrame *out = NULL;
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
if (s->pts == AV_NOPTS_VALUE)
s->pts = in->pts;
s->in = in;
ctx->internal->execute(ctx, fir_channels, out, NULL, FFMIN(outlink->channels,
ff_filter_get_nb_threads(ctx)));
out->pts = s->pts;
if (s->pts != AV_NOPTS_VALUE)
s->pts += av_rescale_q(out->nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
av_frame_free(&in);
s->in = NULL;
return ff_filter_frame(outlink, out);
}
static void drawtext(AVFrame *pic, int x, int y, const char *txt, uint32_t color)
{
const uint8_t *font;
int font_height;
int i;
font = avpriv_cga_font, font_height = 8;
for (i = 0; txt[i]; i++) {
int char_y, mask;
uint8_t *p = pic->data[0] + y * pic->linesize[0] + (x + i * 8) * 4;
for (char_y = 0; char_y < font_height; char_y++) {
for (mask = 0x80; mask; mask >>= 1) {
if (font[txt[i] * font_height + char_y] & mask)
AV_WL32(p, color);
p += 4;
}
p += pic->linesize[0] - 8 * 4;
}
}
}
static void draw_line(AVFrame *out, int x0, int y0, int x1, int y1, uint32_t color)
{
int dx = FFABS(x1-x0);
int dy = FFABS(y1-y0), sy = y0 < y1 ? 1 : -1;
int err = (dx>dy ? dx : -dy) / 2, e2;
for (;;) {
AV_WL32(out->data[0] + y0 * out->linesize[0] + x0 * 4, color);
if (x0 == x1 && y0 == y1)
break;
e2 = err;
if (e2 >-dx) {
err -= dy;
x0--;
}
if (e2 < dy) {
err += dx;
y0 += sy;
}
}
}
static void draw_response(AVFilterContext *ctx, AVFrame *out)
{
AudioFIRContext *s = ctx->priv;
float *mag, *phase, *delay, min = FLT_MAX, max = FLT_MIN;
float min_delay = FLT_MAX, max_delay = FLT_MIN;
int prev_ymag = -1, prev_yphase = -1, prev_ydelay = -1;
char text[32];
int channel, i, x;
memset(out->data[0], 0, s->h * out->linesize[0]);
phase = av_malloc_array(s->w, sizeof(*phase));
mag = av_malloc_array(s->w, sizeof(*mag));
delay = av_malloc_array(s->w, sizeof(*delay));
if (!mag || !phase || !delay)
goto end;
channel = av_clip(s->ir_channel, 0, s->ir[s->selir]->channels - 1);
for (i = 0; i < s->w; i++) {
const float *src = (const float *)s->ir[s->selir]->extended_data[channel];
double w = i * M_PI / (s->w - 1);
double div, real_num = 0., imag_num = 0., real = 0., imag = 0.;
for (x = 0; x < s->nb_taps; x++) {
real += cos(-x * w) * src[x];
imag += sin(-x * w) * src[x];
real_num += cos(-x * w) * src[x] * x;
imag_num += sin(-x * w) * src[x] * x;
}
mag[i] = hypot(real, imag);
phase[i] = atan2(imag, real);
div = real * real + imag * imag;
delay[i] = (real_num * real + imag_num * imag) / div;
min = fminf(min, mag[i]);
max = fmaxf(max, mag[i]);
min_delay = fminf(min_delay, delay[i]);
max_delay = fmaxf(max_delay, delay[i]);
}
for (i = 0; i < s->w; i++) {
int ymag = mag[i] / max * (s->h - 1);
int ydelay = (delay[i] - min_delay) / (max_delay - min_delay) * (s->h - 1);
int yphase = (0.5 * (1. + phase[i] / M_PI)) * (s->h - 1);
ymag = s->h - 1 - av_clip(ymag, 0, s->h - 1);
yphase = s->h - 1 - av_clip(yphase, 0, s->h - 1);
ydelay = s->h - 1 - av_clip(ydelay, 0, s->h - 1);
if (prev_ymag < 0)
prev_ymag = ymag;
if (prev_yphase < 0)
prev_yphase = yphase;
if (prev_ydelay < 0)
prev_ydelay = ydelay;
draw_line(out, i, ymag, FFMAX(i - 1, 0), prev_ymag, 0xFFFF00FF);
draw_line(out, i, yphase, FFMAX(i - 1, 0), prev_yphase, 0xFF00FF00);
draw_line(out, i, ydelay, FFMAX(i - 1, 0), prev_ydelay, 0xFF00FFFF);
prev_ymag = ymag;
prev_yphase = yphase;
prev_ydelay = ydelay;
}
if (s->w > 400 && s->h > 100) {
drawtext(out, 2, 2, "Max Magnitude:", 0xDDDDDDDD);
snprintf(text, sizeof(text), "%.2f", max);
drawtext(out, 15 * 8 + 2, 2, text, 0xDDDDDDDD);
drawtext(out, 2, 12, "Min Magnitude:", 0xDDDDDDDD);
snprintf(text, sizeof(text), "%.2f", min);
drawtext(out, 15 * 8 + 2, 12, text, 0xDDDDDDDD);
drawtext(out, 2, 22, "Max Delay:", 0xDDDDDDDD);
snprintf(text, sizeof(text), "%.2f", max_delay);
drawtext(out, 11 * 8 + 2, 22, text, 0xDDDDDDDD);
drawtext(out, 2, 32, "Min Delay:", 0xDDDDDDDD);
snprintf(text, sizeof(text), "%.2f", min_delay);
drawtext(out, 11 * 8 + 2, 32, text, 0xDDDDDDDD);
}
end:
av_free(delay);
av_free(phase);
av_free(mag);
}
static int init_segment(AVFilterContext *ctx, AudioFIRSegment *seg,
int offset, int nb_partitions, int part_size)
{
AudioFIRContext *s = ctx->priv;
seg->rdft = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->rdft));
seg->irdft = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->irdft));
if (!seg->rdft || !seg->irdft)
return AVERROR(ENOMEM);
seg->fft_length = part_size * 2 + 1;
seg->part_size = part_size;
seg->block_size = FFALIGN(seg->fft_length, 32);
seg->coeff_size = FFALIGN(seg->part_size + 1, 32);
seg->nb_partitions = nb_partitions;
seg->input_size = offset + s->min_part_size;
seg->input_offset = offset;
seg->part_index = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->part_index));
seg->output_offset = av_calloc(ctx->inputs[0]->channels, sizeof(*seg->output_offset));
if (!seg->part_index || !seg->output_offset)
return AVERROR(ENOMEM);
for (int ch = 0; ch < ctx->inputs[0]->channels && part_size >= 8; ch++) {
seg->rdft[ch] = av_rdft_init(av_log2(2 * part_size), DFT_R2C);
seg->irdft[ch] = av_rdft_init(av_log2(2 * part_size), IDFT_C2R);
if (!seg->rdft[ch] || !seg->irdft[ch])
return AVERROR(ENOMEM);
}
seg->sum = ff_get_audio_buffer(ctx->inputs[0], seg->fft_length);
seg->block = ff_get_audio_buffer(ctx->inputs[0], seg->nb_partitions * seg->block_size);
seg->buffer = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
seg->coeff = ff_get_audio_buffer(ctx->inputs[1 + s->selir], seg->nb_partitions * seg->coeff_size * 2);
seg->input = ff_get_audio_buffer(ctx->inputs[0], seg->input_size);
seg->output = ff_get_audio_buffer(ctx->inputs[0], seg->part_size);
if (!seg->buffer || !seg->sum || !seg->block || !seg->coeff || !seg->input || !seg->output)
return AVERROR(ENOMEM);
return 0;
}
static void uninit_segment(AVFilterContext *ctx, AudioFIRSegment *seg)
{
AudioFIRContext *s = ctx->priv;
if (seg->rdft) {
for (int ch = 0; ch < s->nb_channels; ch++) {
av_rdft_end(seg->rdft[ch]);
}
}
av_freep(&seg->rdft);
if (seg->irdft) {
for (int ch = 0; ch < s->nb_channels; ch++) {
av_rdft_end(seg->irdft[ch]);
}
}
av_freep(&seg->irdft);
av_freep(&seg->output_offset);
av_freep(&seg->part_index);
av_frame_free(&seg->block);
av_frame_free(&seg->sum);
av_frame_free(&seg->buffer);
av_frame_free(&seg->coeff);
av_frame_free(&seg->input);
av_frame_free(&seg->output);
seg->input_size = 0;
}
static int convert_coeffs(AVFilterContext *ctx)
{
AudioFIRContext *s = ctx->priv;
int ret, i, ch, n, cur_nb_taps;
float power = 0;
if (!s->nb_taps) {
int part_size, max_part_size;
int left, offset = 0;
s->nb_taps = ff_inlink_queued_samples(ctx->inputs[1 + s->selir]);
if (s->nb_taps <= 0)
return AVERROR(EINVAL);
if (s->minp > s->maxp) {
s->maxp = s->minp;
}
left = s->nb_taps;
part_size = 1 << av_log2(s->minp);
max_part_size = 1 << av_log2(s->maxp);
s->min_part_size = part_size;
for (i = 0; left > 0; i++) {
int step = part_size == max_part_size ? INT_MAX : 1 + (i == 0);
int nb_partitions = FFMIN(step, (left + part_size - 1) / part_size);
s->nb_segments = i + 1;
ret = init_segment(ctx, &s->seg[i], offset, nb_partitions, part_size);
if (ret < 0)
return ret;
offset += nb_partitions * part_size;
left -= nb_partitions * part_size;
part_size *= 2;
part_size = FFMIN(part_size, max_part_size);
}
}
if (!s->ir[s->selir]) {
ret = ff_inlink_consume_samples(ctx->inputs[1 + s->selir], s->nb_taps, s->nb_taps, &s->ir[s->selir]);
if (ret < 0)
return ret;
if (ret == 0)
return AVERROR_BUG;
}
if (s->response)
draw_response(ctx, s->video);
s->gain = 1;
cur_nb_taps = s->ir[s->selir]->nb_samples;
switch (s->gtype) {
case -1:
/* nothing to do */
break;
case 0:
for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) {
float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch];
for (i = 0; i < cur_nb_taps; i++)
power += FFABS(time[i]);
}
s->gain = ctx->inputs[1 + s->selir]->channels / power;
break;
case 1:
for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) {
float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch];
for (i = 0; i < cur_nb_taps; i++)
power += time[i];
}
s->gain = ctx->inputs[1 + s->selir]->channels / power;
break;
case 2:
for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) {
float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch];
for (i = 0; i < cur_nb_taps; i++)
power += time[i] * time[i];
}
s->gain = sqrtf(ch / power);
break;
default:
return AVERROR_BUG;
}
s->gain = FFMIN(s->gain * s->ir_gain, 1.f);
av_log(ctx, AV_LOG_DEBUG, "power %f, gain %f\n", power, s->gain);
for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) {
float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch];
s->fdsp->vector_fmul_scalar(time, time, s->gain, FFALIGN(cur_nb_taps, 4));
}
av_log(ctx, AV_LOG_DEBUG, "nb_taps: %d\n", cur_nb_taps);
av_log(ctx, AV_LOG_DEBUG, "nb_segments: %d\n", s->nb_segments);
for (ch = 0; ch < ctx->inputs[1 + s->selir]->channels; ch++) {
float *time = (float *)s->ir[s->selir]->extended_data[!s->one2many * ch];
int toffset = 0;
for (i = FFMAX(1, s->length * s->nb_taps); i < s->nb_taps; i++)
time[i] = 0;
av_log(ctx, AV_LOG_DEBUG, "channel: %d\n", ch);
for (int segment = 0; segment < s->nb_segments; segment++) {
AudioFIRSegment *seg = &s->seg[segment];
float *block = (float *)seg->block->extended_data[ch];
FFTComplex *coeff = (FFTComplex *)seg->coeff->extended_data[ch];
av_log(ctx, AV_LOG_DEBUG, "segment: %d\n", segment);
for (i = 0; i < seg->nb_partitions; i++) {
const float scale = 1.f / seg->part_size;
const int coffset = i * seg->coeff_size;
const int remaining = s->nb_taps - toffset;
const int size = remaining >= seg->part_size ? seg->part_size : remaining;
if (size < 8) {
for (n = 0; n < size; n++)
coeff[coffset + n].re = time[toffset + n];
toffset += size;
continue;
}
memset(block, 0, sizeof(*block) * seg->fft_length);
memcpy(block, time + toffset, size * sizeof(*block));
av_rdft_calc(seg->rdft[0], block);
coeff[coffset].re = block[0] * scale;
coeff[coffset].im = 0;
for (n = 1; n < seg->part_size; n++) {
coeff[coffset + n].re = block[2 * n] * scale;
coeff[coffset + n].im = block[2 * n + 1] * scale;
}
coeff[coffset + seg->part_size].re = block[1] * scale;
coeff[coffset + seg->part_size].im = 0;
toffset += size;
}
av_log(ctx, AV_LOG_DEBUG, "nb_partitions: %d\n", seg->nb_partitions);
av_log(ctx, AV_LOG_DEBUG, "partition size: %d\n", seg->part_size);
av_log(ctx, AV_LOG_DEBUG, "block size: %d\n", seg->block_size);
av_log(ctx, AV_LOG_DEBUG, "fft_length: %d\n", seg->fft_length);
av_log(ctx, AV_LOG_DEBUG, "coeff_size: %d\n", seg->coeff_size);
av_log(ctx, AV_LOG_DEBUG, "input_size: %d\n", seg->input_size);
av_log(ctx, AV_LOG_DEBUG, "input_offset: %d\n", seg->input_offset);
}
}
s->have_coeffs = 1;
return 0;
}
static int check_ir(AVFilterLink *link, AVFrame *frame)
{
AVFilterContext *ctx = link->dst;
AudioFIRContext *s = ctx->priv;
int nb_taps, max_nb_taps;
nb_taps = ff_inlink_queued_samples(link);
max_nb_taps = s->max_ir_len * ctx->outputs[0]->sample_rate;
if (nb_taps > max_nb_taps) {
av_log(ctx, AV_LOG_ERROR, "Too big number of coefficients: %d > %d.\n", nb_taps, max_nb_taps);
return AVERROR(EINVAL);
}
return 0;
}
static int activate(AVFilterContext *ctx)
{
AudioFIRContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int ret, status, available, wanted;
AVFrame *in = NULL;
int64_t pts;
FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
if (s->response)
FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[1], ctx);
if (!s->eof_coeffs[s->selir]) {
AVFrame *ir = NULL;
ret = check_ir(ctx->inputs[1 + s->selir], ir);
if (ret < 0)
return ret;
if (ff_outlink_get_status(ctx->inputs[1 + s->selir]) == AVERROR_EOF)
s->eof_coeffs[s->selir] = 1;
if (!s->eof_coeffs[s->selir]) {
if (ff_outlink_frame_wanted(ctx->outputs[0]))
ff_inlink_request_frame(ctx->inputs[1 + s->selir]);
else if (s->response && ff_outlink_frame_wanted(ctx->outputs[1]))
ff_inlink_request_frame(ctx->inputs[1 + s->selir]);
return 0;
}
}
if (!s->have_coeffs && s->eof_coeffs[s->selir]) {
ret = convert_coeffs(ctx);
if (ret < 0)
return ret;
}
available = ff_inlink_queued_samples(ctx->inputs[0]);
wanted = FFMAX(s->min_part_size, (available / s->min_part_size) * s->min_part_size);
ret = ff_inlink_consume_samples(ctx->inputs[0], wanted, wanted, &in);
if (ret > 0)
ret = fir_frame(s, in, outlink);
if (ret < 0)
return ret;
if (s->response && s->have_coeffs) {
int64_t old_pts = s->video->pts;
int64_t new_pts = av_rescale_q(s->pts, ctx->inputs[0]->time_base, ctx->outputs[1]->time_base);
if (ff_outlink_frame_wanted(ctx->outputs[1]) && old_pts < new_pts) {
AVFrame *clone;
s->video->pts = new_pts;
clone = av_frame_clone(s->video);
if (!clone)
return AVERROR(ENOMEM);
return ff_filter_frame(ctx->outputs[1], clone);
}
}
if (ff_inlink_queued_samples(ctx->inputs[0]) >= s->min_part_size) {
ff_filter_set_ready(ctx, 10);
return 0;
}
if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
if (status == AVERROR_EOF) {
ff_outlink_set_status(ctx->outputs[0], status, pts);
if (s->response)
ff_outlink_set_status(ctx->outputs[1], status, pts);
return 0;
}
}
if (ff_outlink_frame_wanted(ctx->outputs[0]) &&
!ff_outlink_get_status(ctx->inputs[0])) {
ff_inlink_request_frame(ctx->inputs[0]);
return 0;
}
if (s->response &&
ff_outlink_frame_wanted(ctx->outputs[1]) &&
!ff_outlink_get_status(ctx->inputs[0])) {
ff_inlink_request_frame(ctx->inputs[0]);
return 0;
}
return FFERROR_NOT_READY;
}
static int query_formats(AVFilterContext *ctx)
{
AudioFIRContext *s = ctx->priv;
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE
};
static const enum AVPixelFormat pix_fmts[] = {
AV_PIX_FMT_RGB0,
AV_PIX_FMT_NONE
};
int ret;
if (s->response) {
AVFilterLink *videolink = ctx->outputs[1];
formats = ff_make_format_list(pix_fmts);
if ((ret = ff_formats_ref(formats, &videolink->in_formats)) < 0)
return ret;
}
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
if (s->ir_format) {
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
} else {
AVFilterChannelLayouts *mono = NULL;
ret = ff_add_channel_layout(&mono, AV_CH_LAYOUT_MONO);
if (ret)
return ret;
if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->out_channel_layouts)) < 0)
return ret;
if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
return ret;
for (int i = 1; i < ctx->nb_inputs; i++) {
if ((ret = ff_channel_layouts_ref(mono, &ctx->inputs[i]->out_channel_layouts)) < 0)
return ret;
}
}
formats = ff_make_format_list(sample_fmts);
if ((ret = ff_set_common_formats(ctx, formats)) < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioFIRContext *s = ctx->priv;
s->one2many = ctx->inputs[1 + s->selir]->channels == 1;
outlink->sample_rate = ctx->inputs[0]->sample_rate;
outlink->time_base = ctx->inputs[0]->time_base;
outlink->channel_layout = ctx->inputs[0]->channel_layout;
outlink->channels = ctx->inputs[0]->channels;
s->nb_channels = outlink->channels;
s->nb_coef_channels = ctx->inputs[1 + s->selir]->channels;
s->pts = AV_NOPTS_VALUE;
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioFIRContext *s = ctx->priv;
for (int i = 0; i < s->nb_segments; i++) {
uninit_segment(ctx, &s->seg[i]);
}
av_freep(&s->fdsp);
for (int i = 0; i < s->nb_irs; i++) {
av_frame_free(&s->ir[i]);
}
for (int i = 0; i < ctx->nb_inputs; i++)
av_freep(&ctx->input_pads[i].name);
for (int i = 0; i < ctx->nb_outputs; i++)
av_freep(&ctx->output_pads[i].name);
av_frame_free(&s->video);
}
static int config_video(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioFIRContext *s = ctx->priv;
outlink->sample_aspect_ratio = (AVRational){1,1};
outlink->w = s->w;
outlink->h = s->h;
outlink->frame_rate = s->frame_rate;
outlink->time_base = av_inv_q(outlink->frame_rate);
av_frame_free(&s->video);
s->video = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!s->video)
return AVERROR(ENOMEM);
return 0;
}
void ff_afir_init(AudioFIRDSPContext *dsp)
{
dsp->fcmul_add = fcmul_add_c;
if (ARCH_X86)
ff_afir_init_x86(dsp);
}
static av_cold int init(AVFilterContext *ctx)
{
AudioFIRContext *s = ctx->priv;
AVFilterPad pad, vpad;
int ret;
pad = (AVFilterPad) {
.name = av_strdup("main"),
.type = AVMEDIA_TYPE_AUDIO,
};
if (!pad.name)
return AVERROR(ENOMEM);
ret = ff_insert_inpad(ctx, 0, &pad);
if (ret < 0) {
av_freep(&pad.name);
return ret;
}
for (int n = 0; n < s->nb_irs; n++) {
pad = (AVFilterPad) {
.name = av_asprintf("ir%d", n),
.type = AVMEDIA_TYPE_AUDIO,
};
if (!pad.name)
return AVERROR(ENOMEM);
ret = ff_insert_inpad(ctx, n + 1, &pad);
if (ret < 0) {
av_freep(&pad.name);
return ret;
}
}
pad = (AVFilterPad) {
.name = av_strdup("default"),
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
};
if (!pad.name)
return AVERROR(ENOMEM);
ret = ff_insert_outpad(ctx, 0, &pad);
if (ret < 0) {
av_freep(&pad.name);
return ret;
}
if (s->response) {
vpad = (AVFilterPad){
.name = av_strdup("filter_response"),
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_video,
};
if (!vpad.name)
return AVERROR(ENOMEM);
ret = ff_insert_outpad(ctx, 1, &vpad);
if (ret < 0) {
av_freep(&vpad.name);
return ret;
}
}
s->fdsp = avpriv_float_dsp_alloc(0);
if (!s->fdsp)
return AVERROR(ENOMEM);
ff_afir_init(&s->afirdsp);
return 0;
}
static int process_command(AVFilterContext *ctx,
const char *cmd,
const char *arg,
char *res,
int res_len,
int flags)
{
AudioFIRContext *s = ctx->priv;
int prev_ir = s->selir;
int ret = ff_filter_process_command(ctx, cmd, arg, res, res_len, flags);
if (ret < 0)
return ret;
s->selir = FFMIN(s->nb_irs - 1, s->selir);
if (prev_ir != s->selir) {
s->have_coeffs = 0;
}
return 0;
}
#define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define AFR AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
#define VF AV_OPT_FLAG_VIDEO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define OFFSET(x) offsetof(AudioFIRContext, x)
static const AVOption afir_options[] = {
{ "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AF },
{ "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 10, AF },
{ "length", "set IR length", OFFSET(length), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
{ "gtype", "set IR auto gain type",OFFSET(gtype), AV_OPT_TYPE_INT, {.i64=0}, -1, 2, AF, "gtype" },
{ "none", "without auto gain", 0, AV_OPT_TYPE_CONST, {.i64=-1}, 0, 0, AF, "gtype" },
{ "peak", "peak gain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "gtype" },
{ "dc", "DC gain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "gtype" },
{ "gn", "gain to noise", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, AF, "gtype" },
{ "irgain", "set IR gain", OFFSET(ir_gain), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AF },
{ "irfmt", "set IR format", OFFSET(ir_format), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AF, "irfmt" },
{ "mono", "single channel", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "irfmt" },
{ "input", "same as input", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "irfmt" },
{ "maxir", "set max IR length", OFFSET(max_ir_len), AV_OPT_TYPE_FLOAT, {.dbl=30}, 0.1, 60, AF },
{ "response", "show IR frequency response", OFFSET(response), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, VF },
{ "channel", "set IR channel to display frequency response", OFFSET(ir_channel), AV_OPT_TYPE_INT, {.i64=0}, 0, 1024, VF },
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, VF },
{ "rate", "set video rate", OFFSET(frame_rate), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT32_MAX, VF },
{ "minp", "set min partition size", OFFSET(minp), AV_OPT_TYPE_INT, {.i64=8192}, 1, 32768, AF },
{ "maxp", "set max partition size", OFFSET(maxp), AV_OPT_TYPE_INT, {.i64=8192}, 8, 32768, AF },
{ "nbirs", "set number of input IRs",OFFSET(nb_irs),AV_OPT_TYPE_INT, {.i64=1}, 1, 32, AF },
{ "ir", "select IR", OFFSET(selir), AV_OPT_TYPE_INT, {.i64=0}, 0, 31, AFR },
{ NULL }
};
AVFILTER_DEFINE_CLASS(afir);
AVFilter ff_af_afir = {
.name = "afir",
.description = NULL_IF_CONFIG_SMALL("Apply Finite Impulse Response filter with supplied coefficients in additional stream(s)."),
.priv_size = sizeof(AudioFIRContext),
.priv_class = &afir_class,
.query_formats = query_formats,
.init = init,
.activate = activate,
.uninit = uninit,
.process_command = process_command,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS |
AVFILTER_FLAG_DYNAMIC_OUTPUTS |
AVFILTER_FLAG_SLICE_THREADS,
};

106
externals/ffmpeg/libavfilter/af_afir.h vendored Executable file
View File

@@ -0,0 +1,106 @@
/*
* Copyright (c) 2017 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_AFIR_H
#define AVFILTER_AFIR_H
#include "libavutil/common.h"
#include "libavutil/float_dsp.h"
#include "libavutil/opt.h"
#include "libavcodec/avfft.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
typedef struct AudioFIRSegment {
int nb_partitions;
int part_size;
int block_size;
int fft_length;
int coeff_size;
int input_size;
int input_offset;
int *output_offset;
int *part_index;
AVFrame *sum;
AVFrame *block;
AVFrame *buffer;
AVFrame *coeff;
AVFrame *input;
AVFrame *output;
RDFTContext **rdft, **irdft;
} AudioFIRSegment;
typedef struct AudioFIRDSPContext {
void (*fcmul_add)(float *sum, const float *t, const float *c,
ptrdiff_t len);
} AudioFIRDSPContext;
typedef struct AudioFIRContext {
const AVClass *class;
float wet_gain;
float dry_gain;
float length;
int gtype;
float ir_gain;
int ir_format;
float max_ir_len;
int response;
int w, h;
AVRational frame_rate;
int ir_channel;
int minp;
int maxp;
int nb_irs;
int selir;
float gain;
int eof_coeffs[32];
int have_coeffs;
int nb_taps;
int nb_channels;
int nb_coef_channels;
int one2many;
AudioFIRSegment seg[1024];
int nb_segments;
AVFrame *in;
AVFrame *ir[32];
AVFrame *video;
int min_part_size;
int64_t pts;
AudioFIRDSPContext afirdsp;
AVFloatDSPContext *fdsp;
} AudioFIRContext;
void ff_afir_init(AudioFIRDSPContext *s);
void ff_afir_init_x86(AudioFIRDSPContext *s);
#endif /* AVFILTER_AFIR_H */

157
externals/ffmpeg/libavfilter/af_aformat.c vendored Executable file
View File

@@ -0,0 +1,157 @@
/*
* Copyright (c) 2011 Mina Nagy Zaki
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* format audio filter
*/
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
typedef struct AFormatContext {
const AVClass *class;
AVFilterFormats *formats;
AVFilterFormats *sample_rates;
AVFilterChannelLayouts *channel_layouts;
char *formats_str;
char *sample_rates_str;
char *channel_layouts_str;
} AFormatContext;
#define OFFSET(x) offsetof(AFormatContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption aformat_options[] = {
{ "sample_fmts", "A '|'-separated list of sample formats.", OFFSET(formats_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ "f", "A '|'-separated list of sample formats.", OFFSET(formats_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ "sample_rates", "A '|'-separated list of sample rates.", OFFSET(sample_rates_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ "r", "A '|'-separated list of sample rates.", OFFSET(sample_rates_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ "channel_layouts", "A '|'-separated list of channel layouts.", OFFSET(channel_layouts_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ "cl", "A '|'-separated list of channel layouts.", OFFSET(channel_layouts_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ NULL }
};
AVFILTER_DEFINE_CLASS(aformat);
#define PARSE_FORMATS(str, type, list, add_to_list, unref_fn, get_fmt, none, desc) \
do { \
char *next, *cur = str, sep; \
int ret; \
\
if (str && strchr(str, ',')) { \
av_log(ctx, AV_LOG_WARNING, "This syntax is deprecated, use '|' to "\
"separate %s.\n", desc); \
sep = ','; \
} else \
sep = '|'; \
\
while (cur) { \
type fmt; \
next = strchr(cur, sep); \
if (next) \
*next++ = 0; \
\
if ((fmt = get_fmt(cur)) == none) { \
av_log(ctx, AV_LOG_ERROR, "Error parsing " desc ": %s.\n", cur);\
return AVERROR(EINVAL); \
} \
if ((ret = add_to_list(&list, fmt)) < 0) { \
unref_fn(&list); \
return ret; \
} \
\
cur = next; \
} \
} while (0)
static int get_sample_rate(const char *samplerate)
{
int ret = strtol(samplerate, NULL, 0);
return FFMAX(ret, 0);
}
static av_cold int init(AVFilterContext *ctx)
{
AFormatContext *s = ctx->priv;
PARSE_FORMATS(s->formats_str, enum AVSampleFormat, s->formats,
ff_add_format, ff_formats_unref, av_get_sample_fmt, AV_SAMPLE_FMT_NONE, "sample format");
PARSE_FORMATS(s->sample_rates_str, int, s->sample_rates, ff_add_format, ff_formats_unref,
get_sample_rate, 0, "sample rate");
PARSE_FORMATS(s->channel_layouts_str, uint64_t, s->channel_layouts,
ff_add_channel_layout, ff_channel_layouts_unref, av_get_channel_layout, 0,
"channel layout");
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
AFormatContext *s = ctx->priv;
int ret;
ret = ff_set_common_formats(ctx, s->formats ? s->formats :
ff_all_formats(AVMEDIA_TYPE_AUDIO));
if (ret < 0)
return ret;
ret = ff_set_common_samplerates(ctx, s->sample_rates ? s->sample_rates :
ff_all_samplerates());
if (ret < 0)
return ret;
return ff_set_common_channel_layouts(ctx, s->channel_layouts ? s->channel_layouts :
ff_all_channel_counts());
}
static const AVFilterPad avfilter_af_aformat_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
static const AVFilterPad avfilter_af_aformat_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO
},
{ NULL }
};
AVFilter ff_af_aformat = {
.name = "aformat",
.description = NULL_IF_CONFIG_SMALL("Convert the input audio to one of the specified formats."),
.init = init,
.query_formats = query_formats,
.priv_size = sizeof(AFormatContext),
.priv_class = &aformat_class,
.inputs = avfilter_af_aformat_inputs,
.outputs = avfilter_af_aformat_outputs,
};

449
externals/ffmpeg/libavfilter/af_agate.c vendored Executable file
View File

@@ -0,0 +1,449 @@
/*
* Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen, Damien Zammit
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Audio (Sidechain) Gate filter
*/
#include "libavutil/audio_fifo.h"
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "filters.h"
#include "formats.h"
#include "hermite.h"
typedef struct AudioGateContext {
const AVClass *class;
double level_in;
double level_sc;
double attack;
double release;
double threshold;
double ratio;
double knee;
double makeup;
double range;
int link;
int detection;
int mode;
double thres;
double knee_start;
double knee_stop;
double lin_knee_start;
double lin_knee_stop;
double lin_slope;
double attack_coeff;
double release_coeff;
AVAudioFifo *fifo[2];
int64_t pts;
} AudioGateContext;
#define OFFSET(x) offsetof(AudioGateContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption options[] = {
{ "level_in", "set input level", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
{ "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A, "mode" },
{ "downward",0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A, "mode" },
{ "upward", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A, "mode" },
{ "range", "set max gain reduction", OFFSET(range), AV_OPT_TYPE_DOUBLE, {.dbl=0.06125}, 0, 1, A },
{ "threshold", "set threshold", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0.125}, 0, 1, A },
{ "ratio", "set ratio", OFFSET(ratio), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 9000, A },
{ "attack", "set attack", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 0.01, 9000, A },
{ "release", "set release", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=250}, 0.01, 9000, A },
{ "makeup", "set makeup gain", OFFSET(makeup), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 1, 64, A },
{ "knee", "set knee", OFFSET(knee), AV_OPT_TYPE_DOUBLE, {.dbl=2.828427125}, 1, 8, A },
{ "detection", "set detection", OFFSET(detection), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, A, "detection" },
{ "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A, "detection" },
{ "rms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A, "detection" },
{ "link", "set link", OFFSET(link), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A, "link" },
{ "average", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A, "link" },
{ "maximum", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A, "link" },
{ "level_sc", "set sidechain gain", OFFSET(level_sc), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
{ NULL }
};
static int agate_config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
AudioGateContext *s = ctx->priv;
double lin_threshold = s->threshold;
double lin_knee_sqrt = sqrt(s->knee);
if (s->detection)
lin_threshold *= lin_threshold;
s->attack_coeff = FFMIN(1., 1. / (s->attack * inlink->sample_rate / 4000.));
s->release_coeff = FFMIN(1., 1. / (s->release * inlink->sample_rate / 4000.));
s->lin_knee_stop = lin_threshold * lin_knee_sqrt;
s->lin_knee_start = lin_threshold / lin_knee_sqrt;
s->thres = log(lin_threshold);
s->knee_start = log(s->lin_knee_start);
s->knee_stop = log(s->lin_knee_stop);
return 0;
}
// A fake infinity value (because real infinity may break some hosts)
#define FAKE_INFINITY (65536.0 * 65536.0)
// Check for infinity (with appropriate-ish tolerance)
#define IS_FAKE_INFINITY(value) (fabs(value-FAKE_INFINITY) < 1.0)
static double output_gain(double lin_slope, double ratio, double thres,
double knee, double knee_start, double knee_stop,
double range, int mode)
{
double slope = log(lin_slope);
double tratio = ratio;
double gain = 0.;
double delta = 0.;
if (IS_FAKE_INFINITY(ratio))
tratio = 1000.;
gain = (slope - thres) * tratio + thres;
delta = tratio;
if (mode) {
if (knee > 1. && slope < knee_stop)
gain = hermite_interpolation(slope, knee_stop, knee_start, ((knee_stop - thres) * tratio + thres), knee_start, delta, 1.);
} else {
if (knee > 1. && slope > knee_start)
gain = hermite_interpolation(slope, knee_start, knee_stop, ((knee_start - thres) * tratio + thres), knee_stop, delta, 1.);
}
return FFMAX(range, exp(gain - slope));
}
static void gate(AudioGateContext *s,
const double *src, double *dst, const double *scsrc,
int nb_samples, double level_in, double level_sc,
AVFilterLink *inlink, AVFilterLink *sclink)
{
const double makeup = s->makeup;
const double attack_coeff = s->attack_coeff;
const double release_coeff = s->release_coeff;
int n, c;
for (n = 0; n < nb_samples; n++, src += inlink->channels, dst += inlink->channels, scsrc += sclink->channels) {
double abs_sample = fabs(scsrc[0] * level_sc), gain = 1.0;
int detected;
if (s->link == 1) {
for (c = 1; c < sclink->channels; c++)
abs_sample = FFMAX(fabs(scsrc[c] * level_sc), abs_sample);
} else {
for (c = 1; c < sclink->channels; c++)
abs_sample += fabs(scsrc[c] * level_sc);
abs_sample /= sclink->channels;
}
if (s->detection)
abs_sample *= abs_sample;
s->lin_slope += (abs_sample - s->lin_slope) * (abs_sample > s->lin_slope ? attack_coeff : release_coeff);
if (s->mode)
detected = s->lin_slope > s->lin_knee_start;
else
detected = s->lin_slope < s->lin_knee_stop;
if (s->lin_slope > 0.0 && detected)
gain = output_gain(s->lin_slope, s->ratio, s->thres,
s->knee, s->knee_start, s->knee_stop,
s->range, s->mode);
for (c = 0; c < inlink->channels; c++)
dst[c] = src[c] * level_in * gain * makeup;
}
}
#if CONFIG_AGATE_FILTER
#define agate_options options
AVFILTER_DEFINE_CLASS(agate);
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts;
int ret;
if ((ret = ff_add_format(&formats, AV_SAMPLE_FMT_DBL)) < 0)
return ret;
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
const double *src = (const double *)in->data[0];
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
AudioGateContext *s = ctx->priv;
AVFrame *out;
double *dst;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
dst = (double *)out->data[0];
gate(s, src, dst, src, in->nb_samples,
s->level_in, s->level_in, inlink, inlink);
if (out != in)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = agate_config_input,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_agate = {
.name = "agate",
.description = NULL_IF_CONFIG_SMALL("Audio gate."),
.query_formats = query_formats,
.priv_size = sizeof(AudioGateContext),
.priv_class = &agate_class,
.inputs = inputs,
.outputs = outputs,
};
#endif /* CONFIG_AGATE_FILTER */
#if CONFIG_SIDECHAINGATE_FILTER
#define sidechaingate_options options
AVFILTER_DEFINE_CLASS(sidechaingate);
static int activate(AVFilterContext *ctx)
{
AudioGateContext *s = ctx->priv;
AVFrame *out = NULL, *in[2] = { NULL };
int ret, i, nb_samples;
double *dst;
FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
if ((ret = ff_inlink_consume_frame(ctx->inputs[0], &in[0])) > 0) {
av_audio_fifo_write(s->fifo[0], (void **)in[0]->extended_data,
in[0]->nb_samples);
av_frame_free(&in[0]);
}
if (ret < 0)
return ret;
if ((ret = ff_inlink_consume_frame(ctx->inputs[1], &in[1])) > 0) {
av_audio_fifo_write(s->fifo[1], (void **)in[1]->extended_data,
in[1]->nb_samples);
av_frame_free(&in[1]);
}
if (ret < 0)
return ret;
nb_samples = FFMIN(av_audio_fifo_size(s->fifo[0]), av_audio_fifo_size(s->fifo[1]));
if (nb_samples) {
out = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
if (!out)
return AVERROR(ENOMEM);
for (i = 0; i < 2; i++) {
in[i] = ff_get_audio_buffer(ctx->inputs[i], nb_samples);
if (!in[i]) {
av_frame_free(&in[0]);
av_frame_free(&in[1]);
av_frame_free(&out);
return AVERROR(ENOMEM);
}
av_audio_fifo_read(s->fifo[i], (void **)in[i]->data, nb_samples);
}
dst = (double *)out->data[0];
out->pts = s->pts;
s->pts += av_rescale_q(nb_samples, (AVRational){1, ctx->outputs[0]->sample_rate}, ctx->outputs[0]->time_base);
gate(s, (double *)in[0]->data[0], dst,
(double *)in[1]->data[0], nb_samples,
s->level_in, s->level_sc,
ctx->inputs[0], ctx->inputs[1]);
av_frame_free(&in[0]);
av_frame_free(&in[1]);
ret = ff_filter_frame(ctx->outputs[0], out);
if (ret < 0)
return ret;
}
FF_FILTER_FORWARD_STATUS(ctx->inputs[0], ctx->outputs[0]);
FF_FILTER_FORWARD_STATUS(ctx->inputs[1], ctx->outputs[0]);
if (ff_outlink_frame_wanted(ctx->outputs[0])) {
if (!av_audio_fifo_size(s->fifo[0]))
ff_inlink_request_frame(ctx->inputs[0]);
if (!av_audio_fifo_size(s->fifo[1]))
ff_inlink_request_frame(ctx->inputs[1]);
}
return 0;
}
static int scquery_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts = NULL;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_NONE
};
int ret, i;
if (!ctx->inputs[0]->in_channel_layouts ||
!ctx->inputs[0]->in_channel_layouts->nb_channel_layouts) {
av_log(ctx, AV_LOG_WARNING,
"No channel layout for input 1\n");
return AVERROR(EAGAIN);
}
if ((ret = ff_add_channel_layout(&layouts, ctx->inputs[0]->in_channel_layouts->channel_layouts[0])) < 0 ||
(ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
return ret;
for (i = 0; i < 2; i++) {
layouts = ff_all_channel_counts();
if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
return ret;
}
formats = ff_make_format_list(sample_fmts);
if ((ret = ff_set_common_formats(ctx, formats)) < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
static int scconfig_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioGateContext *s = ctx->priv;
if (ctx->inputs[0]->sample_rate != ctx->inputs[1]->sample_rate) {
av_log(ctx, AV_LOG_ERROR,
"Inputs must have the same sample rate "
"%d for in0 vs %d for in1\n",
ctx->inputs[0]->sample_rate, ctx->inputs[1]->sample_rate);
return AVERROR(EINVAL);
}
outlink->sample_rate = ctx->inputs[0]->sample_rate;
outlink->time_base = ctx->inputs[0]->time_base;
outlink->channel_layout = ctx->inputs[0]->channel_layout;
outlink->channels = ctx->inputs[0]->channels;
s->fifo[0] = av_audio_fifo_alloc(ctx->inputs[0]->format, ctx->inputs[0]->channels, 1024);
s->fifo[1] = av_audio_fifo_alloc(ctx->inputs[1]->format, ctx->inputs[1]->channels, 1024);
if (!s->fifo[0] || !s->fifo[1])
return AVERROR(ENOMEM);
agate_config_input(ctx->inputs[0]);
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioGateContext *s = ctx->priv;
av_audio_fifo_free(s->fifo[0]);
av_audio_fifo_free(s->fifo[1]);
}
static const AVFilterPad sidechaingate_inputs[] = {
{
.name = "main",
.type = AVMEDIA_TYPE_AUDIO,
},{
.name = "sidechain",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
static const AVFilterPad sidechaingate_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = scconfig_output,
},
{ NULL }
};
AVFilter ff_af_sidechaingate = {
.name = "sidechaingate",
.description = NULL_IF_CONFIG_SMALL("Audio sidechain gate."),
.priv_size = sizeof(AudioGateContext),
.priv_class = &sidechaingate_class,
.query_formats = scquery_formats,
.activate = activate,
.uninit = uninit,
.inputs = sidechaingate_inputs,
.outputs = sidechaingate_outputs,
};
#endif /* CONFIG_SIDECHAINGATE_FILTER */

1276
externals/ffmpeg/libavfilter/af_aiir.c vendored Executable file

File diff suppressed because it is too large Load Diff

375
externals/ffmpeg/libavfilter/af_alimiter.c vendored Executable file
View File

@@ -0,0 +1,375 @@
/*
* Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
* Copyright (c) 2015 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Lookahead limiter filter
*/
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
typedef struct AudioLimiterContext {
const AVClass *class;
double limit;
double attack;
double release;
double att;
double level_in;
double level_out;
int auto_release;
int auto_level;
double asc;
int asc_c;
int asc_pos;
double asc_coeff;
double *buffer;
int buffer_size;
int pos;
int *nextpos;
double *nextdelta;
double delta;
int nextiter;
int nextlen;
int asc_changed;
} AudioLimiterContext;
#define OFFSET(x) offsetof(AudioLimiterContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption alimiter_options[] = {
{ "level_in", "set input level", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1},.015625, 64, A|F },
{ "level_out", "set output level", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1},.015625, 64, A|F },
{ "limit", "set limit", OFFSET(limit), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.0625, 1, A|F },
{ "attack", "set attack", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=5}, 0.1, 80, A|F },
{ "release", "set release", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=50}, 1, 8000, A|F },
{ "asc", "enable asc", OFFSET(auto_release), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A|F },
{ "asc_level", "set asc level", OFFSET(asc_coeff), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, A|F },
{ "level", "auto level", OFFSET(auto_level), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, A|F },
{ NULL }
};
AVFILTER_DEFINE_CLASS(alimiter);
static av_cold int init(AVFilterContext *ctx)
{
AudioLimiterContext *s = ctx->priv;
s->attack /= 1000.;
s->release /= 1000.;
s->att = 1.;
s->asc_pos = -1;
s->asc_coeff = pow(0.5, s->asc_coeff - 0.5) * 2 * -1;
return 0;
}
static double get_rdelta(AudioLimiterContext *s, double release, int sample_rate,
double peak, double limit, double patt, int asc)
{
double rdelta = (1.0 - patt) / (sample_rate * release);
if (asc && s->auto_release && s->asc_c > 0) {
double a_att = limit / (s->asc_coeff * s->asc) * (double)s->asc_c;
if (a_att > patt) {
double delta = FFMAX((a_att - patt) / (sample_rate * release), rdelta / 10);
if (delta < rdelta)
rdelta = delta;
}
}
return rdelta;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AudioLimiterContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
const double *src = (const double *)in->data[0];
const int channels = inlink->channels;
const int buffer_size = s->buffer_size;
double *dst, *buffer = s->buffer;
const double release = s->release;
const double limit = s->limit;
double *nextdelta = s->nextdelta;
double level = s->auto_level ? 1 / limit : 1;
const double level_out = s->level_out;
const double level_in = s->level_in;
int *nextpos = s->nextpos;
AVFrame *out;
double *buf;
int n, c, i;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
dst = (double *)out->data[0];
for (n = 0; n < in->nb_samples; n++) {
double peak = 0;
for (c = 0; c < channels; c++) {
double sample = src[c] * level_in;
buffer[s->pos + c] = sample;
peak = FFMAX(peak, fabs(sample));
}
if (s->auto_release && peak > limit) {
s->asc += peak;
s->asc_c++;
}
if (peak > limit) {
double patt = FFMIN(limit / peak, 1.);
double rdelta = get_rdelta(s, release, inlink->sample_rate,
peak, limit, patt, 0);
double delta = (limit / peak - s->att) / buffer_size * channels;
int found = 0;
if (delta < s->delta) {
s->delta = delta;
nextpos[0] = s->pos;
nextpos[1] = -1;
nextdelta[0] = rdelta;
s->nextlen = 1;
s->nextiter= 0;
} else {
for (i = s->nextiter; i < s->nextiter + s->nextlen; i++) {
int j = i % buffer_size;
double ppeak, pdelta;
ppeak = fabs(buffer[nextpos[j]]) > fabs(buffer[nextpos[j] + 1]) ?
fabs(buffer[nextpos[j]]) : fabs(buffer[nextpos[j] + 1]);
pdelta = (limit / peak - limit / ppeak) / (((buffer_size - nextpos[j] + s->pos) % buffer_size) / channels);
if (pdelta < nextdelta[j]) {
nextdelta[j] = pdelta;
found = 1;
break;
}
}
if (found) {
s->nextlen = i - s->nextiter + 1;
nextpos[(s->nextiter + s->nextlen) % buffer_size] = s->pos;
nextdelta[(s->nextiter + s->nextlen) % buffer_size] = rdelta;
nextpos[(s->nextiter + s->nextlen + 1) % buffer_size] = -1;
s->nextlen++;
}
}
}
buf = &s->buffer[(s->pos + channels) % buffer_size];
peak = 0;
for (c = 0; c < channels; c++) {
double sample = buf[c];
peak = FFMAX(peak, fabs(sample));
}
if (s->pos == s->asc_pos && !s->asc_changed)
s->asc_pos = -1;
if (s->auto_release && s->asc_pos == -1 && peak > limit) {
s->asc -= peak;
s->asc_c--;
}
s->att += s->delta;
for (c = 0; c < channels; c++)
dst[c] = buf[c] * s->att;
if ((s->pos + channels) % buffer_size == nextpos[s->nextiter]) {
if (s->auto_release) {
s->delta = get_rdelta(s, release, inlink->sample_rate,
peak, limit, s->att, 1);
if (s->nextlen > 1) {
int pnextpos = nextpos[(s->nextiter + 1) % buffer_size];
double ppeak = fabs(buffer[pnextpos]) > fabs(buffer[pnextpos + 1]) ?
fabs(buffer[pnextpos]) :
fabs(buffer[pnextpos + 1]);
double pdelta = (limit / ppeak - s->att) /
(((buffer_size + pnextpos -
((s->pos + channels) % buffer_size)) %
buffer_size) / channels);
if (pdelta < s->delta)
s->delta = pdelta;
}
} else {
s->delta = nextdelta[s->nextiter];
s->att = limit / peak;
}
s->nextlen -= 1;
nextpos[s->nextiter] = -1;
s->nextiter = (s->nextiter + 1) % buffer_size;
}
if (s->att > 1.) {
s->att = 1.;
s->delta = 0.;
s->nextiter = 0;
s->nextlen = 0;
nextpos[0] = -1;
}
if (s->att <= 0.) {
s->att = 0.0000000000001;
s->delta = (1.0 - s->att) / (inlink->sample_rate * release);
}
if (s->att != 1. && (1. - s->att) < 0.0000000000001)
s->att = 1.;
if (s->delta != 0. && fabs(s->delta) < 0.00000000000001)
s->delta = 0.;
for (c = 0; c < channels; c++)
dst[c] = av_clipd(dst[c], -limit, limit) * level * level_out;
s->pos = (s->pos + channels) % buffer_size;
src += channels;
dst += channels;
}
if (in != out)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
AudioLimiterContext *s = ctx->priv;
int obuffer_size;
obuffer_size = inlink->sample_rate * inlink->channels * 100 / 1000. + inlink->channels;
if (obuffer_size < inlink->channels)
return AVERROR(EINVAL);
s->buffer = av_calloc(obuffer_size, sizeof(*s->buffer));
s->nextdelta = av_calloc(obuffer_size, sizeof(*s->nextdelta));
s->nextpos = av_malloc_array(obuffer_size, sizeof(*s->nextpos));
if (!s->buffer || !s->nextdelta || !s->nextpos)
return AVERROR(ENOMEM);
memset(s->nextpos, -1, obuffer_size * sizeof(*s->nextpos));
s->buffer_size = inlink->sample_rate * s->attack * inlink->channels;
s->buffer_size -= s->buffer_size % inlink->channels;
if (s->buffer_size <= 0) {
av_log(ctx, AV_LOG_ERROR, "Attack is too small.\n");
return AVERROR(EINVAL);
}
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioLimiterContext *s = ctx->priv;
av_freep(&s->buffer);
av_freep(&s->nextdelta);
av_freep(&s->nextpos);
}
static const AVFilterPad alimiter_inputs[] = {
{
.name = "main",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad alimiter_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_alimiter = {
.name = "alimiter",
.description = NULL_IF_CONFIG_SMALL("Audio lookahead limiter."),
.priv_size = sizeof(AudioLimiterContext),
.priv_class = &alimiter_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = alimiter_inputs,
.outputs = alimiter_outputs,
};

358
externals/ffmpeg/libavfilter/af_amerge.c vendored Executable file
View File

@@ -0,0 +1,358 @@
/*
* Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Audio merging filter
*/
#include "libavutil/avstring.h"
#include "libavutil/bprint.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "filters.h"
#include "audio.h"
#include "internal.h"
#define SWR_CH_MAX 64
typedef struct AMergeContext {
const AVClass *class;
int nb_inputs;
int route[SWR_CH_MAX]; /**< channels routing, see copy_samples */
int bps;
struct amerge_input {
int nb_ch; /**< number of channels for the input */
} *in;
} AMergeContext;
#define OFFSET(x) offsetof(AMergeContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption amerge_options[] = {
{ "inputs", "specify the number of inputs", OFFSET(nb_inputs),
AV_OPT_TYPE_INT, { .i64 = 2 }, 1, SWR_CH_MAX, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(amerge);
static av_cold void uninit(AVFilterContext *ctx)
{
AMergeContext *s = ctx->priv;
int i;
for (i = 0; i < s->nb_inputs; i++) {
if (ctx->input_pads)
av_freep(&ctx->input_pads[i].name);
}
av_freep(&s->in);
}
static int query_formats(AVFilterContext *ctx)
{
AMergeContext *s = ctx->priv;
int64_t inlayout[SWR_CH_MAX], outlayout = 0;
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
int i, ret, overlap = 0, nb_ch = 0;
for (i = 0; i < s->nb_inputs; i++) {
if (!ctx->inputs[i]->in_channel_layouts ||
!ctx->inputs[i]->in_channel_layouts->nb_channel_layouts) {
av_log(ctx, AV_LOG_WARNING,
"No channel layout for input %d\n", i + 1);
return AVERROR(EAGAIN);
}
inlayout[i] = ctx->inputs[i]->in_channel_layouts->channel_layouts[0];
if (ctx->inputs[i]->in_channel_layouts->nb_channel_layouts > 1) {
char buf[256];
av_get_channel_layout_string(buf, sizeof(buf), 0, inlayout[i]);
av_log(ctx, AV_LOG_INFO, "Using \"%s\" for input %d\n", buf, i + 1);
}
s->in[i].nb_ch = FF_LAYOUT2COUNT(inlayout[i]);
if (s->in[i].nb_ch) {
overlap++;
} else {
s->in[i].nb_ch = av_get_channel_layout_nb_channels(inlayout[i]);
if (outlayout & inlayout[i])
overlap++;
outlayout |= inlayout[i];
}
nb_ch += s->in[i].nb_ch;
}
if (nb_ch > SWR_CH_MAX) {
av_log(ctx, AV_LOG_ERROR, "Too many channels (max %d)\n", SWR_CH_MAX);
return AVERROR(EINVAL);
}
if (overlap) {
av_log(ctx, AV_LOG_WARNING,
"Input channel layouts overlap: "
"output layout will be determined by the number of distinct input channels\n");
for (i = 0; i < nb_ch; i++)
s->route[i] = i;
outlayout = av_get_default_channel_layout(nb_ch);
if (!outlayout && nb_ch)
outlayout = 0xFFFFFFFFFFFFFFFFULL >> (64 - nb_ch);
} else {
int *route[SWR_CH_MAX];
int c, out_ch_number = 0;
route[0] = s->route;
for (i = 1; i < s->nb_inputs; i++)
route[i] = route[i - 1] + s->in[i - 1].nb_ch;
for (c = 0; c < 64; c++)
for (i = 0; i < s->nb_inputs; i++)
if ((inlayout[i] >> c) & 1)
*(route[i]++) = out_ch_number++;
}
formats = ff_make_format_list(ff_packed_sample_fmts_array);
if ((ret = ff_set_common_formats(ctx, formats)) < 0)
return ret;
for (i = 0; i < s->nb_inputs; i++) {
layouts = NULL;
if ((ret = ff_add_channel_layout(&layouts, inlayout[i])) < 0)
return ret;
if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
return ret;
}
layouts = NULL;
if ((ret = ff_add_channel_layout(&layouts, outlayout)) < 0)
return ret;
if ((ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
return ret;
return ff_set_common_samplerates(ctx, ff_all_samplerates());
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AMergeContext *s = ctx->priv;
AVBPrint bp;
int i;
for (i = 1; i < s->nb_inputs; i++) {
if (ctx->inputs[i]->sample_rate != ctx->inputs[0]->sample_rate) {
av_log(ctx, AV_LOG_ERROR,
"Inputs must have the same sample rate "
"%d for in%d vs %d\n",
ctx->inputs[i]->sample_rate, i, ctx->inputs[0]->sample_rate);
return AVERROR(EINVAL);
}
}
s->bps = av_get_bytes_per_sample(ctx->outputs[0]->format);
outlink->sample_rate = ctx->inputs[0]->sample_rate;
outlink->time_base = ctx->inputs[0]->time_base;
av_bprint_init(&bp, 0, AV_BPRINT_SIZE_AUTOMATIC);
for (i = 0; i < s->nb_inputs; i++) {
av_bprintf(&bp, "%sin%d:", i ? " + " : "", i);
av_bprint_channel_layout(&bp, -1, ctx->inputs[i]->channel_layout);
}
av_bprintf(&bp, " -> out:");
av_bprint_channel_layout(&bp, -1, ctx->outputs[0]->channel_layout);
av_log(ctx, AV_LOG_VERBOSE, "%s\n", bp.str);
return 0;
}
/**
* Copy samples from several input streams to one output stream.
* @param nb_inputs number of inputs
* @param in inputs; used only for the nb_ch field;
* @param route routing values;
* input channel i goes to output channel route[i];
* i < in[0].nb_ch are the channels from the first output;
* i >= in[0].nb_ch are the channels from the second output
* @param ins pointer to the samples of each inputs, in packed format;
* will be left at the end of the copied samples
* @param outs pointer to the samples of the output, in packet format;
* must point to a buffer big enough;
* will be left at the end of the copied samples
* @param ns number of samples to copy
* @param bps bytes per sample
*/
static inline void copy_samples(int nb_inputs, struct amerge_input in[],
int *route, uint8_t *ins[],
uint8_t **outs, int ns, int bps)
{
int *route_cur;
int i, c, nb_ch = 0;
for (i = 0; i < nb_inputs; i++)
nb_ch += in[i].nb_ch;
while (ns--) {
route_cur = route;
for (i = 0; i < nb_inputs; i++) {
for (c = 0; c < in[i].nb_ch; c++) {
memcpy((*outs) + bps * *(route_cur++), ins[i], bps);
ins[i] += bps;
}
}
*outs += nb_ch * bps;
}
}
static void free_frames(int nb_inputs, AVFrame **input_frames)
{
int i;
for (i = 0; i < nb_inputs; i++)
av_frame_free(&input_frames[i]);
}
static int try_push_frame(AVFilterContext *ctx, int nb_samples)
{
AMergeContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int i, ret;
AVFrame *outbuf, *inbuf[SWR_CH_MAX] = { NULL };
uint8_t *outs, *ins[SWR_CH_MAX];
for (i = 0; i < ctx->nb_inputs; i++) {
ret = ff_inlink_consume_samples(ctx->inputs[i], nb_samples, nb_samples, &inbuf[i]);
if (ret < 0) {
free_frames(i, inbuf);
return ret;
}
ins[i] = inbuf[i]->data[0];
}
outbuf = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
if (!outbuf) {
free_frames(s->nb_inputs, inbuf);
return AVERROR(ENOMEM);
}
outs = outbuf->data[0];
outbuf->pts = inbuf[0]->pts;
outbuf->nb_samples = nb_samples;
outbuf->channel_layout = outlink->channel_layout;
outbuf->channels = outlink->channels;
while (nb_samples) {
/* Unroll the most common sample formats: speed +~350% for the loop,
+~13% overall (including two common decoders) */
switch (s->bps) {
case 1:
copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, nb_samples, 1);
break;
case 2:
copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, nb_samples, 2);
break;
case 4:
copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, nb_samples, 4);
break;
default:
copy_samples(s->nb_inputs, s->in, s->route, ins, &outs, nb_samples, s->bps);
break;
}
nb_samples = 0;
}
free_frames(s->nb_inputs, inbuf);
return ff_filter_frame(ctx->outputs[0], outbuf);
}
static int activate(AVFilterContext *ctx)
{
int i, status;
int ret, nb_samples;
int64_t pts;
FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
nb_samples = ff_inlink_queued_samples(ctx->inputs[0]);
for (i = 1; i < ctx->nb_inputs && nb_samples > 0; i++) {
nb_samples = FFMIN(ff_inlink_queued_samples(ctx->inputs[i]), nb_samples);
}
if (nb_samples) {
ret = try_push_frame(ctx, nb_samples);
if (ret < 0)
return ret;
}
for (i = 0; i < ctx->nb_inputs; i++) {
if (ff_inlink_queued_samples(ctx->inputs[i]))
continue;
if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
ff_outlink_set_status(ctx->outputs[0], status, pts);
return 0;
} else if (ff_outlink_frame_wanted(ctx->outputs[0])) {
ff_inlink_request_frame(ctx->inputs[i]);
return 0;
}
}
return 0;
}
static av_cold int init(AVFilterContext *ctx)
{
AMergeContext *s = ctx->priv;
int i, ret;
s->in = av_calloc(s->nb_inputs, sizeof(*s->in));
if (!s->in)
return AVERROR(ENOMEM);
for (i = 0; i < s->nb_inputs; i++) {
char *name = av_asprintf("in%d", i);
AVFilterPad pad = {
.name = name,
.type = AVMEDIA_TYPE_AUDIO,
};
if (!name)
return AVERROR(ENOMEM);
if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
av_freep(&pad.name);
return ret;
}
}
return 0;
}
static const AVFilterPad amerge_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
AVFilter ff_af_amerge = {
.name = "amerge",
.description = NULL_IF_CONFIG_SMALL("Merge two or more audio streams into "
"a single multi-channel stream."),
.priv_size = sizeof(AMergeContext),
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.activate = activate,
.inputs = NULL,
.outputs = amerge_outputs,
.priv_class = &amerge_class,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};

657
externals/ffmpeg/libavfilter/af_amix.c vendored Executable file
View File

@@ -0,0 +1,657 @@
/*
* Audio Mix Filter
* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Audio Mix Filter
*
* Mixes audio from multiple sources into a single output. The channel layout,
* sample rate, and sample format will be the same for all inputs and the
* output.
*/
#include "libavutil/attributes.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/float_dsp.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "audio.h"
#include "avfilter.h"
#include "filters.h"
#include "formats.h"
#include "internal.h"
#define INPUT_ON 1 /**< input is active */
#define INPUT_EOF 2 /**< input has reached EOF (may still be active) */
#define DURATION_LONGEST 0
#define DURATION_SHORTEST 1
#define DURATION_FIRST 2
typedef struct FrameInfo {
int nb_samples;
int64_t pts;
struct FrameInfo *next;
} FrameInfo;
/**
* Linked list used to store timestamps and frame sizes of all frames in the
* FIFO for the first input.
*
* This is needed to keep timestamps synchronized for the case where multiple
* input frames are pushed to the filter for processing before a frame is
* requested by the output link.
*/
typedef struct FrameList {
int nb_frames;
int nb_samples;
FrameInfo *list;
FrameInfo *end;
} FrameList;
static void frame_list_clear(FrameList *frame_list)
{
if (frame_list) {
while (frame_list->list) {
FrameInfo *info = frame_list->list;
frame_list->list = info->next;
av_free(info);
}
frame_list->nb_frames = 0;
frame_list->nb_samples = 0;
frame_list->end = NULL;
}
}
static int frame_list_next_frame_size(FrameList *frame_list)
{
if (!frame_list->list)
return 0;
return frame_list->list->nb_samples;
}
static int64_t frame_list_next_pts(FrameList *frame_list)
{
if (!frame_list->list)
return AV_NOPTS_VALUE;
return frame_list->list->pts;
}
static void frame_list_remove_samples(FrameList *frame_list, int nb_samples)
{
if (nb_samples >= frame_list->nb_samples) {
frame_list_clear(frame_list);
} else {
int samples = nb_samples;
while (samples > 0) {
FrameInfo *info = frame_list->list;
av_assert0(info);
if (info->nb_samples <= samples) {
samples -= info->nb_samples;
frame_list->list = info->next;
if (!frame_list->list)
frame_list->end = NULL;
frame_list->nb_frames--;
frame_list->nb_samples -= info->nb_samples;
av_free(info);
} else {
info->nb_samples -= samples;
info->pts += samples;
frame_list->nb_samples -= samples;
samples = 0;
}
}
}
}
static int frame_list_add_frame(FrameList *frame_list, int nb_samples, int64_t pts)
{
FrameInfo *info = av_malloc(sizeof(*info));
if (!info)
return AVERROR(ENOMEM);
info->nb_samples = nb_samples;
info->pts = pts;
info->next = NULL;
if (!frame_list->list) {
frame_list->list = info;
frame_list->end = info;
} else {
av_assert0(frame_list->end);
frame_list->end->next = info;
frame_list->end = info;
}
frame_list->nb_frames++;
frame_list->nb_samples += nb_samples;
return 0;
}
/* FIXME: use directly links fifo */
typedef struct MixContext {
const AVClass *class; /**< class for AVOptions */
AVFloatDSPContext *fdsp;
int nb_inputs; /**< number of inputs */
int active_inputs; /**< number of input currently active */
int duration_mode; /**< mode for determining duration */
float dropout_transition; /**< transition time when an input drops out */
char *weights_str; /**< string for custom weights for every input */
int nb_channels; /**< number of channels */
int sample_rate; /**< sample rate */
int planar;
AVAudioFifo **fifos; /**< audio fifo for each input */
uint8_t *input_state; /**< current state of each input */
float *input_scale; /**< mixing scale factor for each input */
float *weights; /**< custom weights for every input */
float weight_sum; /**< sum of custom weights for every input */
float *scale_norm; /**< normalization factor for every input */
int64_t next_pts; /**< calculated pts for next output frame */
FrameList *frame_list; /**< list of frame info for the first input */
} MixContext;
#define OFFSET(x) offsetof(MixContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
#define T AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption amix_options[] = {
{ "inputs", "Number of inputs.",
OFFSET(nb_inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT16_MAX, A|F },
{ "duration", "How to determine the end-of-stream.",
OFFSET(duration_mode), AV_OPT_TYPE_INT, { .i64 = DURATION_LONGEST }, 0, 2, A|F, "duration" },
{ "longest", "Duration of longest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_LONGEST }, 0, 0, A|F, "duration" },
{ "shortest", "Duration of shortest input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_SHORTEST }, 0, 0, A|F, "duration" },
{ "first", "Duration of first input.", 0, AV_OPT_TYPE_CONST, { .i64 = DURATION_FIRST }, 0, 0, A|F, "duration" },
{ "dropout_transition", "Transition time, in seconds, for volume "
"renormalization when an input stream ends.",
OFFSET(dropout_transition), AV_OPT_TYPE_FLOAT, { .dbl = 2.0 }, 0, INT_MAX, A|F },
{ "weights", "Set weight for each input.",
OFFSET(weights_str), AV_OPT_TYPE_STRING, {.str="1 1"}, 0, 0, A|F|T },
{ NULL }
};
AVFILTER_DEFINE_CLASS(amix);
/**
* Update the scaling factors to apply to each input during mixing.
*
* This balances the full volume range between active inputs and handles
* volume transitions when EOF is encountered on an input but mixing continues
* with the remaining inputs.
*/
static void calculate_scales(MixContext *s, int nb_samples)
{
float weight_sum = 0.f;
int i;
for (i = 0; i < s->nb_inputs; i++)
if (s->input_state[i] & INPUT_ON)
weight_sum += FFABS(s->weights[i]);
for (i = 0; i < s->nb_inputs; i++) {
if (s->input_state[i] & INPUT_ON) {
if (s->scale_norm[i] > weight_sum / FFABS(s->weights[i])) {
s->scale_norm[i] -= ((s->weight_sum / FFABS(s->weights[i])) / s->nb_inputs) *
nb_samples / (s->dropout_transition * s->sample_rate);
s->scale_norm[i] = FFMAX(s->scale_norm[i], weight_sum / FFABS(s->weights[i]));
}
}
}
for (i = 0; i < s->nb_inputs; i++) {
if (s->input_state[i] & INPUT_ON)
s->input_scale[i] = 1.0f / s->scale_norm[i] * FFSIGN(s->weights[i]);
else
s->input_scale[i] = 0.0f;
}
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
MixContext *s = ctx->priv;
int i;
char buf[64];
s->planar = av_sample_fmt_is_planar(outlink->format);
s->sample_rate = outlink->sample_rate;
outlink->time_base = (AVRational){ 1, outlink->sample_rate };
s->next_pts = AV_NOPTS_VALUE;
s->frame_list = av_mallocz(sizeof(*s->frame_list));
if (!s->frame_list)
return AVERROR(ENOMEM);
s->fifos = av_mallocz_array(s->nb_inputs, sizeof(*s->fifos));
if (!s->fifos)
return AVERROR(ENOMEM);
s->nb_channels = outlink->channels;
for (i = 0; i < s->nb_inputs; i++) {
s->fifos[i] = av_audio_fifo_alloc(outlink->format, s->nb_channels, 1024);
if (!s->fifos[i])
return AVERROR(ENOMEM);
}
s->input_state = av_malloc(s->nb_inputs);
if (!s->input_state)
return AVERROR(ENOMEM);
memset(s->input_state, INPUT_ON, s->nb_inputs);
s->active_inputs = s->nb_inputs;
s->input_scale = av_mallocz_array(s->nb_inputs, sizeof(*s->input_scale));
s->scale_norm = av_mallocz_array(s->nb_inputs, sizeof(*s->scale_norm));
if (!s->input_scale || !s->scale_norm)
return AVERROR(ENOMEM);
for (i = 0; i < s->nb_inputs; i++)
s->scale_norm[i] = s->weight_sum / FFABS(s->weights[i]);
calculate_scales(s, 0);
av_get_channel_layout_string(buf, sizeof(buf), -1, outlink->channel_layout);
av_log(ctx, AV_LOG_VERBOSE,
"inputs:%d fmt:%s srate:%d cl:%s\n", s->nb_inputs,
av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf);
return 0;
}
/**
* Read samples from the input FIFOs, mix, and write to the output link.
*/
static int output_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
MixContext *s = ctx->priv;
AVFrame *out_buf, *in_buf;
int nb_samples, ns, i;
if (s->input_state[0] & INPUT_ON) {
/* first input live: use the corresponding frame size */
nb_samples = frame_list_next_frame_size(s->frame_list);
for (i = 1; i < s->nb_inputs; i++) {
if (s->input_state[i] & INPUT_ON) {
ns = av_audio_fifo_size(s->fifos[i]);
if (ns < nb_samples) {
if (!(s->input_state[i] & INPUT_EOF))
/* unclosed input with not enough samples */
return 0;
/* closed input to drain */
nb_samples = ns;
}
}
}
} else {
/* first input closed: use the available samples */
nb_samples = INT_MAX;
for (i = 1; i < s->nb_inputs; i++) {
if (s->input_state[i] & INPUT_ON) {
ns = av_audio_fifo_size(s->fifos[i]);
nb_samples = FFMIN(nb_samples, ns);
}
}
if (nb_samples == INT_MAX) {
ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts);
return 0;
}
}
s->next_pts = frame_list_next_pts(s->frame_list);
frame_list_remove_samples(s->frame_list, nb_samples);
calculate_scales(s, nb_samples);
if (nb_samples == 0)
return 0;
out_buf = ff_get_audio_buffer(outlink, nb_samples);
if (!out_buf)
return AVERROR(ENOMEM);
in_buf = ff_get_audio_buffer(outlink, nb_samples);
if (!in_buf) {
av_frame_free(&out_buf);
return AVERROR(ENOMEM);
}
for (i = 0; i < s->nb_inputs; i++) {
if (s->input_state[i] & INPUT_ON) {
int planes, plane_size, p;
av_audio_fifo_read(s->fifos[i], (void **)in_buf->extended_data,
nb_samples);
planes = s->planar ? s->nb_channels : 1;
plane_size = nb_samples * (s->planar ? 1 : s->nb_channels);
plane_size = FFALIGN(plane_size, 16);
if (out_buf->format == AV_SAMPLE_FMT_FLT ||
out_buf->format == AV_SAMPLE_FMT_FLTP) {
for (p = 0; p < planes; p++) {
s->fdsp->vector_fmac_scalar((float *)out_buf->extended_data[p],
(float *) in_buf->extended_data[p],
s->input_scale[i], plane_size);
}
} else {
for (p = 0; p < planes; p++) {
s->fdsp->vector_dmac_scalar((double *)out_buf->extended_data[p],
(double *) in_buf->extended_data[p],
s->input_scale[i], plane_size);
}
}
}
}
av_frame_free(&in_buf);
out_buf->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += nb_samples;
return ff_filter_frame(outlink, out_buf);
}
/**
* Requests a frame, if needed, from each input link other than the first.
*/
static int request_samples(AVFilterContext *ctx, int min_samples)
{
MixContext *s = ctx->priv;
int i;
av_assert0(s->nb_inputs > 1);
for (i = 1; i < s->nb_inputs; i++) {
if (!(s->input_state[i] & INPUT_ON) ||
(s->input_state[i] & INPUT_EOF))
continue;
if (av_audio_fifo_size(s->fifos[i]) >= min_samples)
continue;
ff_inlink_request_frame(ctx->inputs[i]);
}
return output_frame(ctx->outputs[0]);
}
/**
* Calculates the number of active inputs and determines EOF based on the
* duration option.
*
* @return 0 if mixing should continue, or AVERROR_EOF if mixing should stop.
*/
static int calc_active_inputs(MixContext *s)
{
int i;
int active_inputs = 0;
for (i = 0; i < s->nb_inputs; i++)
active_inputs += !!(s->input_state[i] & INPUT_ON);
s->active_inputs = active_inputs;
if (!active_inputs ||
(s->duration_mode == DURATION_FIRST && !(s->input_state[0] & INPUT_ON)) ||
(s->duration_mode == DURATION_SHORTEST && active_inputs != s->nb_inputs))
return AVERROR_EOF;
return 0;
}
static int activate(AVFilterContext *ctx)
{
AVFilterLink *outlink = ctx->outputs[0];
MixContext *s = ctx->priv;
AVFrame *buf = NULL;
int i, ret;
FF_FILTER_FORWARD_STATUS_BACK_ALL(outlink, ctx);
for (i = 0; i < s->nb_inputs; i++) {
AVFilterLink *inlink = ctx->inputs[i];
if ((ret = ff_inlink_consume_frame(ctx->inputs[i], &buf)) > 0) {
if (i == 0) {
int64_t pts = av_rescale_q(buf->pts, inlink->time_base,
outlink->time_base);
ret = frame_list_add_frame(s->frame_list, buf->nb_samples, pts);
if (ret < 0) {
av_frame_free(&buf);
return ret;
}
}
ret = av_audio_fifo_write(s->fifos[i], (void **)buf->extended_data,
buf->nb_samples);
if (ret < 0) {
av_frame_free(&buf);
return ret;
}
av_frame_free(&buf);
ret = output_frame(outlink);
if (ret < 0)
return ret;
}
}
for (i = 0; i < s->nb_inputs; i++) {
int64_t pts;
int status;
if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
if (status == AVERROR_EOF) {
if (i == 0) {
s->input_state[i] = 0;
if (s->nb_inputs == 1) {
ff_outlink_set_status(outlink, status, pts);
return 0;
}
} else {
s->input_state[i] |= INPUT_EOF;
if (av_audio_fifo_size(s->fifos[i]) == 0) {
s->input_state[i] = 0;
}
}
}
}
}
if (calc_active_inputs(s)) {
ff_outlink_set_status(outlink, AVERROR_EOF, s->next_pts);
return 0;
}
if (ff_outlink_frame_wanted(outlink)) {
int wanted_samples;
if (!(s->input_state[0] & INPUT_ON))
return request_samples(ctx, 1);
if (s->frame_list->nb_frames == 0) {
ff_inlink_request_frame(ctx->inputs[0]);
return 0;
}
av_assert0(s->frame_list->nb_frames > 0);
wanted_samples = frame_list_next_frame_size(s->frame_list);
return request_samples(ctx, wanted_samples);
}
return 0;
}
static void parse_weights(AVFilterContext *ctx)
{
MixContext *s = ctx->priv;
float last_weight = 1.f;
char *p;
int i;
s->weight_sum = 0.f;
p = s->weights_str;
for (i = 0; i < s->nb_inputs; i++) {
last_weight = av_strtod(p, &p);
s->weights[i] = last_weight;
s->weight_sum += FFABS(last_weight);
if (p && *p) {
p++;
} else {
i++;
break;
}
}
for (; i < s->nb_inputs; i++) {
s->weights[i] = last_weight;
s->weight_sum += FFABS(last_weight);
}
}
static av_cold int init(AVFilterContext *ctx)
{
MixContext *s = ctx->priv;
int i, ret;
for (i = 0; i < s->nb_inputs; i++) {
AVFilterPad pad = { 0 };
pad.type = AVMEDIA_TYPE_AUDIO;
pad.name = av_asprintf("input%d", i);
if (!pad.name)
return AVERROR(ENOMEM);
if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
av_freep(&pad.name);
return ret;
}
}
s->fdsp = avpriv_float_dsp_alloc(0);
if (!s->fdsp)
return AVERROR(ENOMEM);
s->weights = av_mallocz_array(s->nb_inputs, sizeof(*s->weights));
if (!s->weights)
return AVERROR(ENOMEM);
parse_weights(ctx);
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
int i;
MixContext *s = ctx->priv;
if (s->fifos) {
for (i = 0; i < s->nb_inputs; i++)
av_audio_fifo_free(s->fifos[i]);
av_freep(&s->fifos);
}
frame_list_clear(s->frame_list);
av_freep(&s->frame_list);
av_freep(&s->input_state);
av_freep(&s->input_scale);
av_freep(&s->scale_norm);
av_freep(&s->weights);
av_freep(&s->fdsp);
for (i = 0; i < ctx->nb_inputs; i++)
av_freep(&ctx->input_pads[i].name);
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts;
int ret;
layouts = ff_all_channel_counts();
if (!layouts) {
ret = AVERROR(ENOMEM);
goto fail;
}
if ((ret = ff_add_format(&formats, AV_SAMPLE_FMT_FLT )) < 0 ||
(ret = ff_add_format(&formats, AV_SAMPLE_FMT_FLTP)) < 0 ||
(ret = ff_add_format(&formats, AV_SAMPLE_FMT_DBL )) < 0 ||
(ret = ff_add_format(&formats, AV_SAMPLE_FMT_DBLP)) < 0 ||
(ret = ff_set_common_formats (ctx, formats)) < 0 ||
(ret = ff_set_common_channel_layouts(ctx, layouts)) < 0 ||
(ret = ff_set_common_samplerates(ctx, ff_all_samplerates())) < 0)
goto fail;
return 0;
fail:
if (layouts)
av_freep(&layouts->channel_layouts);
av_freep(&layouts);
return ret;
}
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
MixContext *s = ctx->priv;
int ret;
ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
if (ret < 0)
return ret;
parse_weights(ctx);
for (int i = 0; i < s->nb_inputs; i++)
s->scale_norm[i] = s->weight_sum / FFABS(s->weights[i]);
calculate_scales(s, 0);
return 0;
}
static const AVFilterPad avfilter_af_amix_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
AVFilter ff_af_amix = {
.name = "amix",
.description = NULL_IF_CONFIG_SMALL("Audio mixing."),
.priv_size = sizeof(MixContext),
.priv_class = &amix_class,
.init = init,
.uninit = uninit,
.activate = activate,
.query_formats = query_formats,
.inputs = NULL,
.outputs = avfilter_af_amix_outputs,
.process_command = process_command,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};

218
externals/ffmpeg/libavfilter/af_amultiply.c vendored Executable file
View File

@@ -0,0 +1,218 @@
/*
* Copyright (c) 2018 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/float_dsp.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "filters.h"
#include "internal.h"
typedef struct AudioMultiplyContext {
const AVClass *class;
AVFrame *frames[2];
int planes;
int channels;
int samples_align;
AVFloatDSPContext *fdsp;
} AudioMultiplyContext;
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static int activate(AVFilterContext *ctx)
{
AudioMultiplyContext *s = ctx->priv;
int i, ret, status;
int nb_samples;
int64_t pts;
FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
nb_samples = FFMIN(ff_inlink_queued_samples(ctx->inputs[0]),
ff_inlink_queued_samples(ctx->inputs[1]));
for (i = 0; i < ctx->nb_inputs && nb_samples > 0; i++) {
if (s->frames[i])
continue;
if (ff_inlink_check_available_samples(ctx->inputs[i], nb_samples) > 0) {
ret = ff_inlink_consume_samples(ctx->inputs[i], nb_samples, nb_samples, &s->frames[i]);
if (ret < 0)
return ret;
}
}
if (s->frames[0] && s->frames[1]) {
AVFrame *out;
int plane_samples;
if (av_sample_fmt_is_planar(ctx->inputs[0]->format))
plane_samples = FFALIGN(s->frames[0]->nb_samples, s->samples_align);
else
plane_samples = FFALIGN(s->frames[0]->nb_samples * s->channels, s->samples_align);
out = ff_get_audio_buffer(ctx->outputs[0], s->frames[0]->nb_samples);
if (!out)
return AVERROR(ENOMEM);
out->pts = s->frames[0]->pts;
if (av_get_packed_sample_fmt(ctx->inputs[0]->format) == AV_SAMPLE_FMT_FLT) {
for (i = 0; i < s->planes; i++) {
s->fdsp->vector_fmul((float *)out->extended_data[i],
(const float *)s->frames[0]->extended_data[i],
(const float *)s->frames[1]->extended_data[i],
plane_samples);
}
} else {
for (i = 0; i < s->planes; i++) {
s->fdsp->vector_dmul((double *)out->extended_data[i],
(const double *)s->frames[0]->extended_data[i],
(const double *)s->frames[1]->extended_data[i],
plane_samples);
}
}
emms_c();
av_frame_free(&s->frames[0]);
av_frame_free(&s->frames[1]);
ret = ff_filter_frame(ctx->outputs[0], out);
if (ret < 0)
return ret;
}
if (!nb_samples) {
for (i = 0; i < 2; i++) {
if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
ff_outlink_set_status(ctx->outputs[0], status, pts);
return 0;
}
}
}
if (ff_outlink_frame_wanted(ctx->outputs[0])) {
for (i = 0; i < 2; i++) {
if (ff_inlink_queued_samples(ctx->inputs[i]) > 0)
continue;
ff_inlink_request_frame(ctx->inputs[i]);
return 0;
}
}
return 0;
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioMultiplyContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
s->channels = inlink->channels;
s->planes = av_sample_fmt_is_planar(inlink->format) ? inlink->channels : 1;
s->samples_align = 16;
return 0;
}
static av_cold int init(AVFilterContext *ctx)
{
AudioMultiplyContext *s = ctx->priv;
s->fdsp = avpriv_float_dsp_alloc(0);
if (!s->fdsp)
return AVERROR(ENOMEM);
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioMultiplyContext *s = ctx->priv;
av_freep(&s->fdsp);
}
static const AVFilterPad inputs[] = {
{
.name = "multiply0",
.type = AVMEDIA_TYPE_AUDIO,
},
{
.name = "multiply1",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
AVFilter ff_af_amultiply = {
.name = "amultiply",
.description = NULL_IF_CONFIG_SMALL("Multiply two audio streams."),
.priv_size = sizeof(AudioMultiplyContext),
.init = init,
.uninit = uninit,
.activate = activate,
.query_formats = query_formats,
.inputs = inputs,
.outputs = outputs,
};

778
externals/ffmpeg/libavfilter/af_anequalizer.c vendored Executable file
View File

@@ -0,0 +1,778 @@
/*
* Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
* Copyright (c) 2015 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/intreadwrite.h"
#include "libavutil/avstring.h"
#include "libavutil/ffmath.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "avfilter.h"
#include "internal.h"
#include "audio.h"
#define FILTER_ORDER 4
enum FilterType {
BUTTERWORTH,
CHEBYSHEV1,
CHEBYSHEV2,
NB_TYPES
};
typedef struct FoSection {
double a0, a1, a2, a3, a4;
double b0, b1, b2, b3, b4;
double num[4];
double denum[4];
} FoSection;
typedef struct EqualizatorFilter {
int ignore;
int channel;
int type;
double freq;
double gain;
double width;
FoSection section[2];
} EqualizatorFilter;
typedef struct AudioNEqualizerContext {
const AVClass *class;
char *args;
char *colors;
int draw_curves;
int w, h;
double mag;
int fscale;
int nb_filters;
int nb_allocated;
EqualizatorFilter *filters;
AVFrame *video;
} AudioNEqualizerContext;
#define OFFSET(x) offsetof(AudioNEqualizerContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define V AV_OPT_FLAG_VIDEO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption anequalizer_options[] = {
{ "params", NULL, OFFSET(args), AV_OPT_TYPE_STRING, {.str=""}, 0, 0, A|F },
{ "curves", "draw frequency response curves", OFFSET(draw_curves), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, V|F },
{ "size", "set video size", OFFSET(w), AV_OPT_TYPE_IMAGE_SIZE, {.str = "hd720"}, 0, 0, V|F },
{ "mgain", "set max gain", OFFSET(mag), AV_OPT_TYPE_DOUBLE, {.dbl=60}, -900, 900, V|F },
{ "fscale", "set frequency scale", OFFSET(fscale), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, V|F, "fscale" },
{ "lin", "linear", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, V|F, "fscale" },
{ "log", "logarithmic", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, V|F, "fscale" },
{ "colors", "set channels curves colors", OFFSET(colors), AV_OPT_TYPE_STRING, {.str = "red|green|blue|yellow|orange|lime|pink|magenta|brown" }, 0, 0, V|F },
{ NULL }
};
AVFILTER_DEFINE_CLASS(anequalizer);
static void draw_curves(AVFilterContext *ctx, AVFilterLink *inlink, AVFrame *out)
{
AudioNEqualizerContext *s = ctx->priv;
char *colors, *color, *saveptr = NULL;
int ch, i, n;
colors = av_strdup(s->colors);
if (!colors)
return;
memset(out->data[0], 0, s->h * out->linesize[0]);
for (ch = 0; ch < inlink->channels; ch++) {
uint8_t fg[4] = { 0xff, 0xff, 0xff, 0xff };
int prev_v = -1;
double f;
color = av_strtok(ch == 0 ? colors : NULL, " |", &saveptr);
if (color)
av_parse_color(fg, color, -1, ctx);
for (f = 0; f < s->w; f++) {
double zr, zi, zr2, zi2;
double Hr, Hi;
double Hmag = 1;
double w;
int v, y, x;
w = M_PI * (s->fscale ? pow(s->w - 1, f / s->w) : f) / (s->w - 1);
zr = cos(w);
zr2 = zr * zr;
zi = -sin(w);
zi2 = zi * zi;
for (n = 0; n < s->nb_filters; n++) {
if (s->filters[n].channel != ch ||
s->filters[n].ignore)
continue;
for (i = 0; i < FILTER_ORDER / 2; i++) {
FoSection *S = &s->filters[n].section[i];
/* H *= (((((S->b4 * z + S->b3) * z + S->b2) * z + S->b1) * z + S->b0) /
((((S->a4 * z + S->a3) * z + S->a2) * z + S->a1) * z + S->a0)); */
Hr = S->b4*(1-8*zr2*zi2) + S->b2*(zr2-zi2) + zr*(S->b1+S->b3*(zr2-3*zi2))+ S->b0;
Hi = zi*(S->b3*(3*zr2-zi2) + S->b1 + 2*zr*(2*S->b4*(zr2-zi2) + S->b2));
Hmag *= hypot(Hr, Hi);
Hr = S->a4*(1-8*zr2*zi2) + S->a2*(zr2-zi2) + zr*(S->a1+S->a3*(zr2-3*zi2))+ S->a0;
Hi = zi*(S->a3*(3*zr2-zi2) + S->a1 + 2*zr*(2*S->a4*(zr2-zi2) + S->a2));
Hmag /= hypot(Hr, Hi);
}
}
v = av_clip((1. + -20 * log10(Hmag) / s->mag) * s->h / 2, 0, s->h - 1);
x = lrint(f);
if (prev_v == -1)
prev_v = v;
if (v <= prev_v) {
for (y = v; y <= prev_v; y++)
AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
} else {
for (y = prev_v; y <= v; y++)
AV_WL32(out->data[0] + y * out->linesize[0] + x * 4, AV_RL32(fg));
}
prev_v = v;
}
}
av_free(colors);
}
static int config_video(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioNEqualizerContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
AVFrame *out;
outlink->w = s->w;
outlink->h = s->h;
av_frame_free(&s->video);
s->video = out = ff_get_video_buffer(outlink, outlink->w, outlink->h);
if (!out)
return AVERROR(ENOMEM);
outlink->sample_aspect_ratio = (AVRational){1,1};
draw_curves(ctx, inlink, out);
return 0;
}
static av_cold int init(AVFilterContext *ctx)
{
AudioNEqualizerContext *s = ctx->priv;
AVFilterPad pad, vpad;
int ret;
pad = (AVFilterPad){
.name = av_strdup("out0"),
.type = AVMEDIA_TYPE_AUDIO,
};
if (!pad.name)
return AVERROR(ENOMEM);
if (s->draw_curves) {
vpad = (AVFilterPad){
.name = av_strdup("out1"),
.type = AVMEDIA_TYPE_VIDEO,
.config_props = config_video,
};
if (!vpad.name) {
av_freep(&pad.name);
return AVERROR(ENOMEM);
}
}
ret = ff_insert_outpad(ctx, 0, &pad);
if (ret < 0) {
av_freep(&pad.name);
return ret;
}
if (s->draw_curves) {
ret = ff_insert_outpad(ctx, 1, &vpad);
if (ret < 0) {
av_freep(&vpad.name);
return ret;
}
}
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AudioNEqualizerContext *s = ctx->priv;
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_RGBA, AV_PIX_FMT_NONE };
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
if (s->draw_curves) {
AVFilterLink *videolink = ctx->outputs[1];
formats = ff_make_format_list(pix_fmts);
if ((ret = ff_formats_ref(formats, &videolink->in_formats)) < 0)
return ret;
}
formats = ff_make_format_list(sample_fmts);
if ((ret = ff_formats_ref(formats, &inlink->out_formats)) < 0 ||
(ret = ff_formats_ref(formats, &outlink->in_formats)) < 0)
return ret;
layouts = ff_all_channel_counts();
if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0 ||
(ret = ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts)) < 0)
return ret;
formats = ff_all_samplerates();
if ((ret = ff_formats_ref(formats, &inlink->out_samplerates)) < 0 ||
(ret = ff_formats_ref(formats, &outlink->in_samplerates)) < 0)
return ret;
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioNEqualizerContext *s = ctx->priv;
for (int i = 0; i < ctx->nb_outputs; i++)
av_freep(&ctx->output_pads[i].name);
av_frame_free(&s->video);
av_freep(&s->filters);
s->nb_filters = 0;
s->nb_allocated = 0;
}
static void butterworth_fo_section(FoSection *S, double beta,
double si, double g, double g0,
double D, double c0)
{
if (c0 == 1 || c0 == -1) {
S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
S->b1 = 2*c0*(g*g*beta*beta - g0*g0)/D;
S->b2 = (g*g*beta*beta - 2*g0*g*beta*si + g0*g0)/D;
S->b3 = 0;
S->b4 = 0;
S->a0 = 1;
S->a1 = 2*c0*(beta*beta - 1)/D;
S->a2 = (beta*beta - 2*beta*si + 1)/D;
S->a3 = 0;
S->a4 = 0;
} else {
S->b0 = (g*g*beta*beta + 2*g*g0*si*beta + g0*g0)/D;
S->b1 = -4*c0*(g0*g0 + g*g0*si*beta)/D;
S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - g*g*beta*beta)/D;
S->b3 = -4*c0*(g0*g0 - g*g0*si*beta)/D;
S->b4 = (g*g*beta*beta - 2*g*g0*si*beta + g0*g0)/D;
S->a0 = 1;
S->a1 = -4*c0*(1 + si*beta)/D;
S->a2 = 2*(1 + 2*c0*c0 - beta*beta)/D;
S->a3 = -4*c0*(1 - si*beta)/D;
S->a4 = (beta*beta - 2*si*beta + 1)/D;
}
}
static void butterworth_bp_filter(EqualizatorFilter *f,
int N, double w0, double wb,
double G, double Gb, double G0)
{
double g, c0, g0, beta;
double epsilon;
int r = N % 2;
int L = (N - r) / 2;
int i;
if (G == 0 && G0 == 0) {
f->section[0].a0 = 1;
f->section[0].b0 = 1;
f->section[1].a0 = 1;
f->section[1].b0 = 1;
return;
}
G = ff_exp10(G/20);
Gb = ff_exp10(Gb/20);
G0 = ff_exp10(G0/20);
epsilon = sqrt((G * G - Gb * Gb) / (Gb * Gb - G0 * G0));
g = pow(G, 1.0 / N);
g0 = pow(G0, 1.0 / N);
beta = pow(epsilon, -1.0 / N) * tan(wb/2);
c0 = cos(w0);
for (i = 1; i <= L; i++) {
double ui = (2.0 * i - 1) / N;
double si = sin(M_PI * ui / 2.0);
double Di = beta * beta + 2 * si * beta + 1;
butterworth_fo_section(&f->section[i - 1], beta, si, g, g0, Di, c0);
}
}
static void chebyshev1_fo_section(FoSection *S, double a,
double c, double tetta_b,
double g0, double si, double b,
double D, double c0)
{
if (c0 == 1 || c0 == -1) {
S->b0 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) + 2*g0*b*si*tetta_b*tetta_b + g0*g0)/D;
S->b1 = 2*c0*(tetta_b*tetta_b*(b*b+g0*g0*c*c) - g0*g0)/D;
S->b2 = (tetta_b*tetta_b*(b*b+g0*g0*c*c) - 2*g0*b*si*tetta_b + g0*g0)/D;
S->b3 = 0;
S->b4 = 0;
S->a0 = 1;
S->a1 = 2*c0*(tetta_b*tetta_b*(a*a+c*c) - 1)/D;
S->a2 = (tetta_b*tetta_b*(a*a+c*c) - 2*a*si*tetta_b + 1)/D;
S->a3 = 0;
S->a4 = 0;
} else {
S->b0 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b + 2*g0*b*si*tetta_b + g0*g0)/D;
S->b1 = -4*c0*(g0*g0 + g0*b*si*tetta_b)/D;
S->b2 = 2*(g0*g0*(1 + 2*c0*c0) - (b*b + g0*g0*c*c)*tetta_b*tetta_b)/D;
S->b3 = -4*c0*(g0*g0 - g0*b*si*tetta_b)/D;
S->b4 = ((b*b + g0*g0*c*c)*tetta_b*tetta_b - 2*g0*b*si*tetta_b + g0*g0)/D;
S->a0 = 1;
S->a1 = -4*c0*(1 + a*si*tetta_b)/D;
S->a2 = 2*(1 + 2*c0*c0 - (a*a + c*c)*tetta_b*tetta_b)/D;
S->a3 = -4*c0*(1 - a*si*tetta_b)/D;
S->a4 = ((a*a + c*c)*tetta_b*tetta_b - 2*a*si*tetta_b + 1)/D;
}
}
static void chebyshev1_bp_filter(EqualizatorFilter *f,
int N, double w0, double wb,
double G, double Gb, double G0)
{
double a, b, c0, g0, alfa, beta, tetta_b;
double epsilon;
int r = N % 2;
int L = (N - r) / 2;
int i;
if (G == 0 && G0 == 0) {
f->section[0].a0 = 1;
f->section[0].b0 = 1;
f->section[1].a0 = 1;
f->section[1].b0 = 1;
return;
}
G = ff_exp10(G/20);
Gb = ff_exp10(Gb/20);
G0 = ff_exp10(G0/20);
epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
g0 = pow(G0,1.0/N);
alfa = pow(1.0/epsilon + sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
beta = pow(G/epsilon + Gb * sqrt(1 + 1/(epsilon*epsilon)), 1.0/N);
a = 0.5 * (alfa - 1.0/alfa);
b = 0.5 * (beta - g0*g0*(1/beta));
tetta_b = tan(wb/2);
c0 = cos(w0);
for (i = 1; i <= L; i++) {
double ui = (2.0*i-1.0)/N;
double ci = cos(M_PI*ui/2.0);
double si = sin(M_PI*ui/2.0);
double Di = (a*a + ci*ci)*tetta_b*tetta_b + 2.0*a*si*tetta_b + 1;
chebyshev1_fo_section(&f->section[i - 1], a, ci, tetta_b, g0, si, b, Di, c0);
}
}
static void chebyshev2_fo_section(FoSection *S, double a,
double c, double tetta_b,
double g, double si, double b,
double D, double c0)
{
if (c0 == 1 || c0 == -1) {
S->b0 = (g*g*tetta_b*tetta_b + 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
S->b1 = 2*c0*(g*g*tetta_b*tetta_b - b*b - g*g*c*c)/D;
S->b2 = (g*g*tetta_b*tetta_b - 2*tetta_b*g*b*si + b*b + g*g*c*c)/D;
S->b3 = 0;
S->b4 = 0;
S->a0 = 1;
S->a1 = 2*c0*(tetta_b*tetta_b - a*a - c*c)/D;
S->a2 = (tetta_b*tetta_b - 2*tetta_b*a*si + a*a + c*c)/D;
S->a3 = 0;
S->a4 = 0;
} else {
S->b0 = (g*g*tetta_b*tetta_b + 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
S->b1 = -4*c0*(b*b + g*g*c*c + g*b*si*tetta_b)/D;
S->b2 = 2*((b*b + g*g*c*c)*(1 + 2*c0*c0) - g*g*tetta_b*tetta_b)/D;
S->b3 = -4*c0*(b*b + g*g*c*c - g*b*si*tetta_b)/D;
S->b4 = (g*g*tetta_b*tetta_b - 2*g*b*si*tetta_b + b*b + g*g*c*c)/D;
S->a0 = 1;
S->a1 = -4*c0*(a*a + c*c + a*si*tetta_b)/D;
S->a2 = 2*((a*a + c*c)*(1 + 2*c0*c0) - tetta_b*tetta_b)/D;
S->a3 = -4*c0*(a*a + c*c - a*si*tetta_b)/D;
S->a4 = (tetta_b*tetta_b - 2*a*si*tetta_b + a*a + c*c)/D;
}
}
static void chebyshev2_bp_filter(EqualizatorFilter *f,
int N, double w0, double wb,
double G, double Gb, double G0)
{
double a, b, c0, tetta_b;
double epsilon, g, eu, ew;
int r = N % 2;
int L = (N - r) / 2;
int i;
if (G == 0 && G0 == 0) {
f->section[0].a0 = 1;
f->section[0].b0 = 1;
f->section[1].a0 = 1;
f->section[1].b0 = 1;
return;
}
G = ff_exp10(G/20);
Gb = ff_exp10(Gb/20);
G0 = ff_exp10(G0/20);
epsilon = sqrt((G*G - Gb*Gb) / (Gb*Gb - G0*G0));
g = pow(G, 1.0 / N);
eu = pow(epsilon + sqrt(1 + epsilon*epsilon), 1.0/N);
ew = pow(G0*epsilon + Gb*sqrt(1 + epsilon*epsilon), 1.0/N);
a = (eu - 1.0/eu)/2.0;
b = (ew - g*g/ew)/2.0;
tetta_b = tan(wb/2);
c0 = cos(w0);
for (i = 1; i <= L; i++) {
double ui = (2.0 * i - 1.0)/N;
double ci = cos(M_PI * ui / 2.0);
double si = sin(M_PI * ui / 2.0);
double Di = tetta_b*tetta_b + 2*a*si*tetta_b + a*a + ci*ci;
chebyshev2_fo_section(&f->section[i - 1], a, ci, tetta_b, g, si, b, Di, c0);
}
}
static double butterworth_compute_bw_gain_db(double gain)
{
double bw_gain = 0;
if (gain <= -6)
bw_gain = gain + 3;
else if(gain > -6 && gain < 6)
bw_gain = gain * 0.5;
else if(gain >= 6)
bw_gain = gain - 3;
return bw_gain;
}
static double chebyshev1_compute_bw_gain_db(double gain)
{
double bw_gain = 0;
if (gain <= -6)
bw_gain = gain + 1;
else if(gain > -6 && gain < 6)
bw_gain = gain * 0.9;
else if(gain >= 6)
bw_gain = gain - 1;
return bw_gain;
}
static double chebyshev2_compute_bw_gain_db(double gain)
{
double bw_gain = 0;
if (gain <= -6)
bw_gain = -3;
else if(gain > -6 && gain < 6)
bw_gain = gain * 0.3;
else if(gain >= 6)
bw_gain = 3;
return bw_gain;
}
static inline double hz_2_rad(double x, double fs)
{
return 2 * M_PI * x / fs;
}
static void equalizer(EqualizatorFilter *f, double sample_rate)
{
double w0 = hz_2_rad(f->freq, sample_rate);
double wb = hz_2_rad(f->width, sample_rate);
double bw_gain;
switch (f->type) {
case BUTTERWORTH:
bw_gain = butterworth_compute_bw_gain_db(f->gain);
butterworth_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
break;
case CHEBYSHEV1:
bw_gain = chebyshev1_compute_bw_gain_db(f->gain);
chebyshev1_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
break;
case CHEBYSHEV2:
bw_gain = chebyshev2_compute_bw_gain_db(f->gain);
chebyshev2_bp_filter(f, FILTER_ORDER, w0, wb, f->gain, bw_gain, 0);
break;
}
}
static int add_filter(AudioNEqualizerContext *s, AVFilterLink *inlink)
{
equalizer(&s->filters[s->nb_filters], inlink->sample_rate);
if (s->nb_filters >= s->nb_allocated - 1) {
EqualizatorFilter *filters;
filters = av_calloc(s->nb_allocated, 2 * sizeof(*s->filters));
if (!filters)
return AVERROR(ENOMEM);
memcpy(filters, s->filters, sizeof(*s->filters) * s->nb_allocated);
av_free(s->filters);
s->filters = filters;
s->nb_allocated *= 2;
}
s->nb_filters++;
return 0;
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
AudioNEqualizerContext *s = ctx->priv;
char *args = av_strdup(s->args);
char *saveptr = NULL;
int ret = 0;
if (!args)
return AVERROR(ENOMEM);
s->nb_allocated = 32 * inlink->channels;
s->filters = av_calloc(inlink->channels, 32 * sizeof(*s->filters));
if (!s->filters) {
s->nb_allocated = 0;
av_free(args);
return AVERROR(ENOMEM);
}
while (1) {
char *arg = av_strtok(s->nb_filters == 0 ? args : NULL, "|", &saveptr);
if (!arg)
break;
s->filters[s->nb_filters].type = 0;
if (sscanf(arg, "c%d f=%lf w=%lf g=%lf t=%d", &s->filters[s->nb_filters].channel,
&s->filters[s->nb_filters].freq,
&s->filters[s->nb_filters].width,
&s->filters[s->nb_filters].gain,
&s->filters[s->nb_filters].type) != 5 &&
sscanf(arg, "c%d f=%lf w=%lf g=%lf", &s->filters[s->nb_filters].channel,
&s->filters[s->nb_filters].freq,
&s->filters[s->nb_filters].width,
&s->filters[s->nb_filters].gain) != 4 ) {
av_free(args);
return AVERROR(EINVAL);
}
if (s->filters[s->nb_filters].freq < 0 ||
s->filters[s->nb_filters].freq > inlink->sample_rate / 2.0)
s->filters[s->nb_filters].ignore = 1;
if (s->filters[s->nb_filters].channel < 0 ||
s->filters[s->nb_filters].channel >= inlink->channels)
s->filters[s->nb_filters].ignore = 1;
s->filters[s->nb_filters].type = av_clip(s->filters[s->nb_filters].type, 0, NB_TYPES - 1);
ret = add_filter(s, inlink);
if (ret < 0)
break;
}
av_free(args);
return ret;
}
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
AudioNEqualizerContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
int ret = AVERROR(ENOSYS);
if (!strcmp(cmd, "change")) {
double freq, width, gain;
int filter;
if (sscanf(args, "%d|f=%lf|w=%lf|g=%lf", &filter, &freq, &width, &gain) != 4)
return AVERROR(EINVAL);
if (filter < 0 || filter >= s->nb_filters)
return AVERROR(EINVAL);
if (freq < 0 || freq > inlink->sample_rate / 2.0)
return AVERROR(EINVAL);
s->filters[filter].freq = freq;
s->filters[filter].width = width;
s->filters[filter].gain = gain;
equalizer(&s->filters[filter], inlink->sample_rate);
if (s->draw_curves)
draw_curves(ctx, inlink, s->video);
ret = 0;
}
return ret;
}
static inline double section_process(FoSection *S, double in)
{
double out;
out = S->b0 * in;
out+= S->b1 * S->num[0] - S->denum[0] * S->a1;
out+= S->b2 * S->num[1] - S->denum[1] * S->a2;
out+= S->b3 * S->num[2] - S->denum[2] * S->a3;
out+= S->b4 * S->num[3] - S->denum[3] * S->a4;
S->num[3] = S->num[2];
S->num[2] = S->num[1];
S->num[1] = S->num[0];
S->num[0] = in;
S->denum[3] = S->denum[2];
S->denum[2] = S->denum[1];
S->denum[1] = S->denum[0];
S->denum[0] = out;
return out;
}
static double process_sample(FoSection *s1, double in)
{
double p0 = in, p1;
int i;
for (i = 0; i < FILTER_ORDER / 2; i++) {
p1 = section_process(&s1[i], p0);
p0 = p1;
}
return p1;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
AudioNEqualizerContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
double *bptr;
int i, n;
for (i = 0; i < s->nb_filters; i++) {
EqualizatorFilter *f = &s->filters[i];
if (f->gain == 0. || f->ignore)
continue;
bptr = (double *)buf->extended_data[f->channel];
for (n = 0; n < buf->nb_samples; n++) {
double sample = bptr[n];
sample = process_sample(f->section, sample);
bptr[n] = sample;
}
}
if (s->draw_curves) {
AVFrame *clone;
const int64_t pts = buf->pts +
av_rescale_q(buf->nb_samples, (AVRational){ 1, inlink->sample_rate },
outlink->time_base);
int ret;
s->video->pts = pts;
clone = av_frame_clone(s->video);
if (!clone)
return AVERROR(ENOMEM);
ret = ff_filter_frame(ctx->outputs[1], clone);
if (ret < 0)
return ret;
}
return ff_filter_frame(outlink, buf);
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
.filter_frame = filter_frame,
.needs_writable = 1,
},
{ NULL }
};
AVFilter ff_af_anequalizer = {
.name = "anequalizer",
.description = NULL_IF_CONFIG_SMALL("Apply high-order audio parametric multi band equalizer."),
.priv_size = sizeof(AudioNEqualizerContext),
.priv_class = &anequalizer_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = inputs,
.outputs = NULL,
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
.process_command = process_command,
};

374
externals/ffmpeg/libavfilter/af_anlmdn.c vendored Executable file
View File

@@ -0,0 +1,374 @@
/*
* Copyright (c) 2019 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <float.h>
#include "libavutil/avassert.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "formats.h"
#include "af_anlmdndsp.h"
#define WEIGHT_LUT_NBITS 20
#define WEIGHT_LUT_SIZE (1<<WEIGHT_LUT_NBITS)
#define SQR(x) ((x) * (x))
typedef struct AudioNLMeansContext {
const AVClass *class;
float a;
int64_t pd;
int64_t rd;
float m;
int om;
float pdiff_lut_scale;
float weight_lut[WEIGHT_LUT_SIZE];
int K;
int S;
int N;
int H;
int offset;
AVFrame *in;
AVFrame *cache;
int64_t pts;
AVAudioFifo *fifo;
int eof_left;
AudioNLMDNDSPContext dsp;
} AudioNLMeansContext;
enum OutModes {
IN_MODE,
OUT_MODE,
NOISE_MODE,
NB_MODES
};
#define OFFSET(x) offsetof(AudioNLMeansContext, x)
#define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define AFT AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption anlmdn_options[] = {
{ "s", "set denoising strength", OFFSET(a), AV_OPT_TYPE_FLOAT, {.dbl=0.00001},0.00001, 10, AFT },
{ "p", "set patch duration", OFFSET(pd), AV_OPT_TYPE_DURATION, {.i64=2000}, 1000, 100000, AF },
{ "r", "set research duration", OFFSET(rd), AV_OPT_TYPE_DURATION, {.i64=6000}, 2000, 300000, AF },
{ "o", "set output mode", OFFSET(om), AV_OPT_TYPE_INT, {.i64=OUT_MODE}, 0, NB_MODES-1, AFT, "mode" },
{ "i", "input", 0, AV_OPT_TYPE_CONST, {.i64=IN_MODE}, 0, 0, AFT, "mode" },
{ "o", "output", 0, AV_OPT_TYPE_CONST, {.i64=OUT_MODE}, 0, 0, AFT, "mode" },
{ "n", "noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_MODE},0, 0, AFT, "mode" },
{ "m", "set smooth factor", OFFSET(m), AV_OPT_TYPE_FLOAT, {.dbl=11.}, 1, 15, AF },
{ NULL }
};
AVFILTER_DEFINE_CLASS(anlmdn);
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE
};
int ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
static float compute_distance_ssd_c(const float *f1, const float *f2, ptrdiff_t K)
{
float distance = 0.;
for (int k = -K; k <= K; k++)
distance += SQR(f1[k] - f2[k]);
return distance;
}
static void compute_cache_c(float *cache, const float *f,
ptrdiff_t S, ptrdiff_t K,
ptrdiff_t i, ptrdiff_t jj)
{
int v = 0;
for (int j = jj; j < jj + S; j++, v++)
cache[v] += -SQR(f[i - K - 1] - f[j - K - 1]) + SQR(f[i + K] - f[j + K]);
}
void ff_anlmdn_init(AudioNLMDNDSPContext *dsp)
{
dsp->compute_distance_ssd = compute_distance_ssd_c;
dsp->compute_cache = compute_cache_c;
if (ARCH_X86)
ff_anlmdn_init_x86(dsp);
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioNLMeansContext *s = ctx->priv;
int ret;
s->K = av_rescale(s->pd, outlink->sample_rate, AV_TIME_BASE);
s->S = av_rescale(s->rd, outlink->sample_rate, AV_TIME_BASE);
s->eof_left = -1;
s->pts = AV_NOPTS_VALUE;
s->H = s->K * 2 + 1;
s->N = s->H + (s->K + s->S) * 2;
av_log(ctx, AV_LOG_DEBUG, "K:%d S:%d H:%d N:%d\n", s->K, s->S, s->H, s->N);
av_frame_free(&s->in);
av_frame_free(&s->cache);
s->in = ff_get_audio_buffer(outlink, s->N);
if (!s->in)
return AVERROR(ENOMEM);
s->cache = ff_get_audio_buffer(outlink, s->S * 2);
if (!s->cache)
return AVERROR(ENOMEM);
s->fifo = av_audio_fifo_alloc(outlink->format, outlink->channels, s->N);
if (!s->fifo)
return AVERROR(ENOMEM);
ret = av_audio_fifo_write(s->fifo, (void **)s->in->extended_data, s->K + s->S);
if (ret < 0)
return ret;
s->pdiff_lut_scale = 1.f / s->m * WEIGHT_LUT_SIZE;
for (int i = 0; i < WEIGHT_LUT_SIZE; i++) {
float w = -i / s->pdiff_lut_scale;
s->weight_lut[i] = expf(w);
}
ff_anlmdn_init(&s->dsp);
return 0;
}
static int filter_channel(AVFilterContext *ctx, void *arg, int ch, int nb_jobs)
{
AudioNLMeansContext *s = ctx->priv;
AVFrame *out = arg;
const int S = s->S;
const int K = s->K;
const int om = s->om;
const float *f = (const float *)(s->in->extended_data[ch]) + K;
float *cache = (float *)s->cache->extended_data[ch];
const float sw = (65536.f / (4 * K + 2)) / sqrtf(s->a);
float *dst = (float *)out->extended_data[ch] + s->offset;
const float smooth = s->m;
for (int i = S; i < s->H + S; i++) {
float P = 0.f, Q = 0.f;
int v = 0;
if (i == S) {
for (int j = i - S; j <= i + S; j++) {
if (i == j)
continue;
cache[v++] = s->dsp.compute_distance_ssd(f + i, f + j, K);
}
} else {
s->dsp.compute_cache(cache, f, S, K, i, i - S);
s->dsp.compute_cache(cache + S, f, S, K, i, i + 1);
}
for (int j = 0; j < 2 * S && !ctx->is_disabled; j++) {
const float distance = cache[j];
unsigned weight_lut_idx;
float w;
if (distance < 0.f) {
cache[j] = 0.f;
continue;
}
w = distance * sw;
if (w >= smooth)
continue;
weight_lut_idx = w * s->pdiff_lut_scale;
av_assert2(weight_lut_idx < WEIGHT_LUT_SIZE);
w = s->weight_lut[weight_lut_idx];
P += w * f[i - S + j + (j >= S)];
Q += w;
}
P += f[i];
Q += 1;
switch (om) {
case IN_MODE: dst[i - S] = f[i]; break;
case OUT_MODE: dst[i - S] = P / Q; break;
case NOISE_MODE: dst[i - S] = f[i] - (P / Q); break;
}
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
AudioNLMeansContext *s = ctx->priv;
AVFrame *out = NULL;
int available, wanted, ret;
if (s->pts == AV_NOPTS_VALUE)
s->pts = in->pts;
ret = av_audio_fifo_write(s->fifo, (void **)in->extended_data,
in->nb_samples);
av_frame_free(&in);
s->offset = 0;
available = av_audio_fifo_size(s->fifo);
wanted = (available / s->H) * s->H;
if (wanted >= s->H && available >= s->N) {
out = ff_get_audio_buffer(outlink, wanted);
if (!out)
return AVERROR(ENOMEM);
}
while (available >= s->N) {
ret = av_audio_fifo_peek(s->fifo, (void **)s->in->extended_data, s->N);
if (ret < 0)
break;
ctx->internal->execute(ctx, filter_channel, out, NULL, inlink->channels);
av_audio_fifo_drain(s->fifo, s->H);
s->offset += s->H;
available -= s->H;
}
if (out) {
out->pts = s->pts;
out->nb_samples = s->offset;
if (s->eof_left >= 0) {
out->nb_samples = FFMIN(s->eof_left, s->offset);
s->eof_left -= out->nb_samples;
}
s->pts += av_rescale_q(s->offset, (AVRational){1, outlink->sample_rate}, outlink->time_base);
return ff_filter_frame(outlink, out);
}
return ret;
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioNLMeansContext *s = ctx->priv;
int ret;
ret = ff_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF && s->eof_left != 0) {
AVFrame *in;
if (s->eof_left < 0)
s->eof_left = av_audio_fifo_size(s->fifo) - (s->S + s->K);
if (s->eof_left <= 0)
return AVERROR_EOF;
in = ff_get_audio_buffer(outlink, s->H);
if (!in)
return AVERROR(ENOMEM);
return filter_frame(ctx->inputs[0], in);
}
return ret;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioNLMeansContext *s = ctx->priv;
av_audio_fifo_free(s->fifo);
av_frame_free(&s->in);
av_frame_free(&s->cache);
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
AVFilter ff_af_anlmdn = {
.name = "anlmdn",
.description = NULL_IF_CONFIG_SMALL("Reduce broadband noise from stream using Non-Local Means."),
.query_formats = query_formats,
.priv_size = sizeof(AudioNLMeansContext),
.priv_class = &anlmdn_class,
.uninit = uninit,
.inputs = inputs,
.outputs = outputs,
.process_command = ff_filter_process_command,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL |
AVFILTER_FLAG_SLICE_THREADS,
};

40
externals/ffmpeg/libavfilter/af_anlmdndsp.h vendored Executable file
View File

@@ -0,0 +1,40 @@
/*
* Copyright (c) 2019 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_ANLMDNDSP_H
#define AVFILTER_ANLMDNDSP_H
#include "libavutil/common.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
typedef struct AudioNLMDNDSPContext {
float (*compute_distance_ssd)(const float *f1, const float *f2, ptrdiff_t K);
void (*compute_cache)(float *cache, const float *f, ptrdiff_t S, ptrdiff_t K,
ptrdiff_t i, ptrdiff_t jj);
} AudioNLMDNDSPContext;
void ff_anlmdn_init(AudioNLMDNDSPContext *s);
void ff_anlmdn_init_x86(AudioNLMDNDSPContext *s);
#endif /* AVFILTER_ANLMDNDSP_H */

330
externals/ffmpeg/libavfilter/af_anlms.c vendored Executable file
View File

@@ -0,0 +1,330 @@
/*
* Copyright (c) 2019 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/float_dsp.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "filters.h"
#include "internal.h"
enum OutModes {
IN_MODE,
DESIRED_MODE,
OUT_MODE,
NOISE_MODE,
NB_OMODES
};
typedef struct AudioNLMSContext {
const AVClass *class;
int order;
float mu;
float eps;
float leakage;
int output_mode;
int kernel_size;
AVFrame *offset;
AVFrame *delay;
AVFrame *coeffs;
AVFrame *tmp;
AVFrame *frame[2];
AVFloatDSPContext *fdsp;
} AudioNLMSContext;
#define OFFSET(x) offsetof(AudioNLMSContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define AT AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption anlms_options[] = {
{ "order", "set the filter order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=256}, 1, INT16_MAX, A },
{ "mu", "set the filter mu", OFFSET(mu), AV_OPT_TYPE_FLOAT, {.dbl=0.75}, 0, 2, AT },
{ "eps", "set the filter eps", OFFSET(eps), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 1, AT },
{ "leakage", "set the filter leakage", OFFSET(leakage), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, 1, AT },
{ "out_mode", "set output mode", OFFSET(output_mode), AV_OPT_TYPE_INT, {.i64=OUT_MODE}, 0, NB_OMODES-1, AT, "mode" },
{ "i", "input", 0, AV_OPT_TYPE_CONST, {.i64=IN_MODE}, 0, 0, AT, "mode" },
{ "d", "desired", 0, AV_OPT_TYPE_CONST, {.i64=DESIRED_MODE}, 0, 0, AT, "mode" },
{ "o", "output", 0, AV_OPT_TYPE_CONST, {.i64=OUT_MODE}, 0, 0, AT, "mode" },
{ "n", "noise", 0, AV_OPT_TYPE_CONST, {.i64=NOISE_MODE}, 0, 0, AT, "mode" },
{ NULL }
};
AVFILTER_DEFINE_CLASS(anlms);
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static float fir_sample(AudioNLMSContext *s, float sample, float *delay,
float *coeffs, float *tmp, int *offset)
{
const int order = s->order;
float output;
delay[*offset] = sample;
memcpy(tmp, coeffs + order - *offset, order * sizeof(float));
output = s->fdsp->scalarproduct_float(delay, tmp, s->kernel_size);
if (--(*offset) < 0)
*offset = order - 1;
return output;
}
static float process_sample(AudioNLMSContext *s, float input, float desired,
float *delay, float *coeffs, float *tmp, int *offsetp)
{
const int order = s->order;
const float leakage = s->leakage;
const float mu = s->mu;
const float a = 1.f - leakage * mu;
float sum, output, e, norm, b;
int offset = *offsetp;
delay[offset + order] = input;
output = fir_sample(s, input, delay, coeffs, tmp, offsetp);
e = desired - output;
sum = s->fdsp->scalarproduct_float(delay, delay, s->kernel_size);
norm = s->eps + sum;
b = mu * e / norm;
memcpy(tmp, delay + offset, order * sizeof(float));
s->fdsp->vector_fmul_scalar(coeffs, coeffs, a, s->kernel_size);
s->fdsp->vector_fmac_scalar(coeffs, tmp, b, s->kernel_size);
memcpy(coeffs + order, coeffs, order * sizeof(float));
switch (s->output_mode) {
case IN_MODE: output = input; break;
case DESIRED_MODE: output = desired; break;
case OUT_MODE: /*output = output;*/ break;
case NOISE_MODE: output = desired - output; break;
}
return output;
}
static int process_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
AudioNLMSContext *s = ctx->priv;
AVFrame *out = arg;
const int start = (out->channels * jobnr) / nb_jobs;
const int end = (out->channels * (jobnr+1)) / nb_jobs;
for (int c = start; c < end; c++) {
const float *input = (const float *)s->frame[0]->extended_data[c];
const float *desired = (const float *)s->frame[1]->extended_data[c];
float *delay = (float *)s->delay->extended_data[c];
float *coeffs = (float *)s->coeffs->extended_data[c];
float *tmp = (float *)s->tmp->extended_data[c];
int *offset = (int *)s->offset->extended_data[c];
float *output = (float *)out->extended_data[c];
for (int n = 0; n < out->nb_samples; n++)
output[n] = process_sample(s, input[n], desired[n], delay, coeffs, tmp, offset);
}
return 0;
}
static int activate(AVFilterContext *ctx)
{
AudioNLMSContext *s = ctx->priv;
int i, ret, status;
int nb_samples;
int64_t pts;
FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
nb_samples = FFMIN(ff_inlink_queued_samples(ctx->inputs[0]),
ff_inlink_queued_samples(ctx->inputs[1]));
for (i = 0; i < ctx->nb_inputs && nb_samples > 0; i++) {
if (s->frame[i])
continue;
if (ff_inlink_check_available_samples(ctx->inputs[i], nb_samples) > 0) {
ret = ff_inlink_consume_samples(ctx->inputs[i], nb_samples, nb_samples, &s->frame[i]);
if (ret < 0)
return ret;
}
}
if (s->frame[0] && s->frame[1]) {
AVFrame *out;
out = ff_get_audio_buffer(ctx->outputs[0], s->frame[0]->nb_samples);
if (!out) {
av_frame_free(&s->frame[0]);
av_frame_free(&s->frame[1]);
return AVERROR(ENOMEM);
}
ctx->internal->execute(ctx, process_channels, out, NULL, FFMIN(ctx->outputs[0]->channels,
ff_filter_get_nb_threads(ctx)));
out->pts = s->frame[0]->pts;
av_frame_free(&s->frame[0]);
av_frame_free(&s->frame[1]);
ret = ff_filter_frame(ctx->outputs[0], out);
if (ret < 0)
return ret;
}
if (!nb_samples) {
for (i = 0; i < 2; i++) {
if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
ff_outlink_set_status(ctx->outputs[0], status, pts);
return 0;
}
}
}
if (ff_outlink_frame_wanted(ctx->outputs[0])) {
for (i = 0; i < 2; i++) {
if (ff_inlink_queued_samples(ctx->inputs[i]) > 0)
continue;
ff_inlink_request_frame(ctx->inputs[i]);
return 0;
}
}
return 0;
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioNLMSContext *s = ctx->priv;
s->kernel_size = FFALIGN(s->order, 16);
if (!s->offset)
s->offset = ff_get_audio_buffer(outlink, 1);
if (!s->delay)
s->delay = ff_get_audio_buffer(outlink, 2 * s->kernel_size);
if (!s->coeffs)
s->coeffs = ff_get_audio_buffer(outlink, 2 * s->kernel_size);
if (!s->tmp)
s->tmp = ff_get_audio_buffer(outlink, s->kernel_size);
if (!s->delay || !s->coeffs || !s->offset || !s->tmp)
return AVERROR(ENOMEM);
return 0;
}
static av_cold int init(AVFilterContext *ctx)
{
AudioNLMSContext *s = ctx->priv;
s->fdsp = avpriv_float_dsp_alloc(0);
if (!s->fdsp)
return AVERROR(ENOMEM);
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioNLMSContext *s = ctx->priv;
av_freep(&s->fdsp);
av_frame_free(&s->delay);
av_frame_free(&s->coeffs);
av_frame_free(&s->offset);
av_frame_free(&s->tmp);
}
static const AVFilterPad inputs[] = {
{
.name = "input",
.type = AVMEDIA_TYPE_AUDIO,
},
{
.name = "desired",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
AVFilter ff_af_anlms = {
.name = "anlms",
.description = NULL_IF_CONFIG_SMALL("Apply Normalized Least-Mean-Squares algorithm to first audio stream."),
.priv_size = sizeof(AudioNLMSContext),
.priv_class = &anlms_class,
.init = init,
.uninit = uninit,
.activate = activate,
.query_formats = query_formats,
.inputs = inputs,
.outputs = outputs,
.flags = AVFILTER_FLAG_SLICE_THREADS,
.process_command = ff_filter_process_command,
};

51
externals/ffmpeg/libavfilter/af_anull.c vendored Executable file
View File

@@ -0,0 +1,51 @@
/*
* Copyright (c) 2010 S.N. Hemanth Meenakshisundaram <smeenaks@ucsd.edu>
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* null audio filter
*/
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
#include "libavutil/internal.h"
static const AVFilterPad avfilter_af_anull_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
static const AVFilterPad avfilter_af_anull_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_anull = {
.name = "anull",
.description = NULL_IF_CONFIG_SMALL("Pass the source unchanged to the output."),
.inputs = avfilter_af_anull_inputs,
.outputs = avfilter_af_anull_outputs,
};

180
externals/ffmpeg/libavfilter/af_apad.c vendored Executable file
View File

@@ -0,0 +1,180 @@
/*
* Copyright (c) 2012 Michael Niedermayer
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* audio pad filter.
*
* Based on af_aresample.c
*/
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "libavutil/avassert.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
typedef struct APadContext {
const AVClass *class;
int64_t next_pts;
int packet_size;
int64_t pad_len, pad_len_left;
int64_t whole_len, whole_len_left;
int64_t pad_dur;
int64_t whole_dur;
} APadContext;
#define OFFSET(x) offsetof(APadContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption apad_options[] = {
{ "packet_size", "set silence packet size", OFFSET(packet_size), AV_OPT_TYPE_INT, { .i64 = 4096 }, 0, INT_MAX, A },
{ "pad_len", "set number of samples of silence to add", OFFSET(pad_len), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, A },
{ "whole_len", "set minimum target number of samples in the audio stream", OFFSET(whole_len), AV_OPT_TYPE_INT64, { .i64 = -1 }, -1, INT64_MAX, A },
{ "pad_dur", "set duration of silence to add", OFFSET(pad_dur), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, A },
{ "whole_dur", "set minimum target duration in the audio stream", OFFSET(whole_dur), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT64_MAX, A },
{ NULL }
};
AVFILTER_DEFINE_CLASS(apad);
static av_cold int init(AVFilterContext *ctx)
{
APadContext *s = ctx->priv;
s->next_pts = AV_NOPTS_VALUE;
if (s->whole_len >= 0 && s->pad_len >= 0) {
av_log(ctx, AV_LOG_ERROR, "Both whole and pad length are set, this is not possible\n");
return AVERROR(EINVAL);
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
APadContext *s = ctx->priv;
if (s->whole_len >= 0) {
s->whole_len_left = FFMAX(s->whole_len_left - frame->nb_samples, 0);
av_log(ctx, AV_LOG_DEBUG,
"n_out:%d whole_len_left:%"PRId64"\n", frame->nb_samples, s->whole_len_left);
}
s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
return ff_filter_frame(ctx->outputs[0], frame);
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
APadContext *s = ctx->priv;
int ret;
ret = ff_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF && !ctx->is_disabled) {
int n_out = s->packet_size;
AVFrame *outsamplesref;
if (s->whole_len >= 0 && s->pad_len < 0) {
s->pad_len = s->pad_len_left = s->whole_len_left;
}
if (s->pad_len >=0 || s->whole_len >= 0) {
n_out = FFMIN(n_out, s->pad_len_left);
s->pad_len_left -= n_out;
av_log(ctx, AV_LOG_DEBUG,
"padding n_out:%d pad_len_left:%"PRId64"\n", n_out, s->pad_len_left);
}
if (!n_out)
return AVERROR_EOF;
outsamplesref = ff_get_audio_buffer(outlink, n_out);
if (!outsamplesref)
return AVERROR(ENOMEM);
av_assert0(outsamplesref->sample_rate == outlink->sample_rate);
av_assert0(outsamplesref->nb_samples == n_out);
av_samples_set_silence(outsamplesref->extended_data, 0,
n_out,
outsamplesref->channels,
outsamplesref->format);
outsamplesref->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += av_rescale_q(n_out, (AVRational){1, outlink->sample_rate}, outlink->time_base);
return ff_filter_frame(outlink, outsamplesref);
}
return ret;
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
APadContext *s = ctx->priv;
if (s->pad_dur)
s->pad_len = av_rescale(s->pad_dur, outlink->sample_rate, AV_TIME_BASE);
if (s->whole_dur)
s->whole_len = av_rescale(s->whole_dur, outlink->sample_rate, AV_TIME_BASE);
s->pad_len_left = s->pad_len;
s->whole_len_left = s->whole_len;
return 0;
}
static const AVFilterPad apad_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad apad_outputs[] = {
{
.name = "default",
.request_frame = request_frame,
.config_props = config_output,
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_apad = {
.name = "apad",
.description = NULL_IF_CONFIG_SMALL("Pad audio with silence."),
.init = init,
.priv_size = sizeof(APadContext),
.inputs = apad_inputs,
.outputs = apad_outputs,
.priv_class = &apad_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};

303
externals/ffmpeg/libavfilter/af_aphaser.c vendored Executable file
View File

@@ -0,0 +1,303 @@
/*
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* phaser audio filter
*/
#include "libavutil/avassert.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
#include "generate_wave_table.h"
typedef struct AudioPhaserContext {
const AVClass *class;
double in_gain, out_gain;
double delay;
double decay;
double speed;
int type;
int delay_buffer_length;
double *delay_buffer;
int modulation_buffer_length;
int32_t *modulation_buffer;
int delay_pos, modulation_pos;
void (*phaser)(struct AudioPhaserContext *s,
uint8_t * const *src, uint8_t **dst,
int nb_samples, int channels);
} AudioPhaserContext;
#define OFFSET(x) offsetof(AudioPhaserContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption aphaser_options[] = {
{ "in_gain", "set input gain", OFFSET(in_gain), AV_OPT_TYPE_DOUBLE, {.dbl=.4}, 0, 1, FLAGS },
{ "out_gain", "set output gain", OFFSET(out_gain), AV_OPT_TYPE_DOUBLE, {.dbl=.74}, 0, 1e9, FLAGS },
{ "delay", "set delay in milliseconds", OFFSET(delay), AV_OPT_TYPE_DOUBLE, {.dbl=3.}, 0, 5, FLAGS },
{ "decay", "set decay", OFFSET(decay), AV_OPT_TYPE_DOUBLE, {.dbl=.4}, 0, .99, FLAGS },
{ "speed", "set modulation speed", OFFSET(speed), AV_OPT_TYPE_DOUBLE, {.dbl=.5}, .1, 2, FLAGS },
{ "type", "set modulation type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=WAVE_TRI}, 0, WAVE_NB-1, FLAGS, "type" },
{ "triangular", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_TRI}, 0, 0, FLAGS, "type" },
{ "t", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_TRI}, 0, 0, FLAGS, "type" },
{ "sinusoidal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_SIN}, 0, 0, FLAGS, "type" },
{ "s", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_SIN}, 0, 0, FLAGS, "type" },
{ NULL }
};
AVFILTER_DEFINE_CLASS(aphaser);
static av_cold int init(AVFilterContext *ctx)
{
AudioPhaserContext *s = ctx->priv;
if (s->in_gain > (1 - s->decay * s->decay))
av_log(ctx, AV_LOG_WARNING, "in_gain may cause clipping\n");
if (s->in_gain / (1 - s->decay) > 1 / s->out_gain)
av_log(ctx, AV_LOG_WARNING, "out_gain may cause clipping\n");
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
#define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
#define PHASER_PLANAR(name, type) \
static void phaser_## name ##p(AudioPhaserContext *s, \
uint8_t * const *ssrc, uint8_t **ddst, \
int nb_samples, int channels) \
{ \
int i, c, delay_pos, modulation_pos; \
\
av_assert0(channels > 0); \
for (c = 0; c < channels; c++) { \
type *src = (type *)ssrc[c]; \
type *dst = (type *)ddst[c]; \
double *buffer = s->delay_buffer + \
c * s->delay_buffer_length; \
\
delay_pos = s->delay_pos; \
modulation_pos = s->modulation_pos; \
\
for (i = 0; i < nb_samples; i++, src++, dst++) { \
double v = *src * s->in_gain + buffer[ \
MOD(delay_pos + s->modulation_buffer[ \
modulation_pos], \
s->delay_buffer_length)] * s->decay; \
\
modulation_pos = MOD(modulation_pos + 1, \
s->modulation_buffer_length); \
delay_pos = MOD(delay_pos + 1, s->delay_buffer_length); \
buffer[delay_pos] = v; \
\
*dst = v * s->out_gain; \
} \
} \
\
s->delay_pos = delay_pos; \
s->modulation_pos = modulation_pos; \
}
#define PHASER(name, type) \
static void phaser_## name (AudioPhaserContext *s, \
uint8_t * const *ssrc, uint8_t **ddst, \
int nb_samples, int channels) \
{ \
int i, c, delay_pos, modulation_pos; \
type *src = (type *)ssrc[0]; \
type *dst = (type *)ddst[0]; \
double *buffer = s->delay_buffer; \
\
delay_pos = s->delay_pos; \
modulation_pos = s->modulation_pos; \
\
for (i = 0; i < nb_samples; i++) { \
int pos = MOD(delay_pos + s->modulation_buffer[modulation_pos], \
s->delay_buffer_length) * channels; \
int npos; \
\
delay_pos = MOD(delay_pos + 1, s->delay_buffer_length); \
npos = delay_pos * channels; \
for (c = 0; c < channels; c++, src++, dst++) { \
double v = *src * s->in_gain + buffer[pos + c] * s->decay; \
\
buffer[npos + c] = v; \
\
*dst = v * s->out_gain; \
} \
\
modulation_pos = MOD(modulation_pos + 1, \
s->modulation_buffer_length); \
} \
\
s->delay_pos = delay_pos; \
s->modulation_pos = modulation_pos; \
}
PHASER_PLANAR(dbl, double)
PHASER_PLANAR(flt, float)
PHASER_PLANAR(s16, int16_t)
PHASER_PLANAR(s32, int32_t)
PHASER(dbl, double)
PHASER(flt, float)
PHASER(s16, int16_t)
PHASER(s32, int32_t)
static int config_output(AVFilterLink *outlink)
{
AudioPhaserContext *s = outlink->src->priv;
AVFilterLink *inlink = outlink->src->inputs[0];
s->delay_buffer_length = s->delay * 0.001 * inlink->sample_rate + 0.5;
if (s->delay_buffer_length <= 0) {
av_log(outlink->src, AV_LOG_ERROR, "delay is too small\n");
return AVERROR(EINVAL);
}
s->delay_buffer = av_calloc(s->delay_buffer_length, sizeof(*s->delay_buffer) * inlink->channels);
s->modulation_buffer_length = inlink->sample_rate / s->speed + 0.5;
s->modulation_buffer = av_malloc_array(s->modulation_buffer_length, sizeof(*s->modulation_buffer));
if (!s->modulation_buffer || !s->delay_buffer)
return AVERROR(ENOMEM);
ff_generate_wave_table(s->type, AV_SAMPLE_FMT_S32,
s->modulation_buffer, s->modulation_buffer_length,
1., s->delay_buffer_length, M_PI / 2.0);
s->delay_pos = s->modulation_pos = 0;
switch (inlink->format) {
case AV_SAMPLE_FMT_DBL: s->phaser = phaser_dbl; break;
case AV_SAMPLE_FMT_DBLP: s->phaser = phaser_dblp; break;
case AV_SAMPLE_FMT_FLT: s->phaser = phaser_flt; break;
case AV_SAMPLE_FMT_FLTP: s->phaser = phaser_fltp; break;
case AV_SAMPLE_FMT_S16: s->phaser = phaser_s16; break;
case AV_SAMPLE_FMT_S16P: s->phaser = phaser_s16p; break;
case AV_SAMPLE_FMT_S32: s->phaser = phaser_s32; break;
case AV_SAMPLE_FMT_S32P: s->phaser = phaser_s32p; break;
default: av_assert0(0);
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *inbuf)
{
AudioPhaserContext *s = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
AVFrame *outbuf;
if (av_frame_is_writable(inbuf)) {
outbuf = inbuf;
} else {
outbuf = ff_get_audio_buffer(outlink, inbuf->nb_samples);
if (!outbuf) {
av_frame_free(&inbuf);
return AVERROR(ENOMEM);
}
av_frame_copy_props(outbuf, inbuf);
}
s->phaser(s, inbuf->extended_data, outbuf->extended_data,
outbuf->nb_samples, outbuf->channels);
if (inbuf != outbuf)
av_frame_free(&inbuf);
return ff_filter_frame(outlink, outbuf);
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioPhaserContext *s = ctx->priv;
av_freep(&s->delay_buffer);
av_freep(&s->modulation_buffer);
}
static const AVFilterPad aphaser_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad aphaser_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
AVFilter ff_af_aphaser = {
.name = "aphaser",
.description = NULL_IF_CONFIG_SMALL("Add a phasing effect to the audio."),
.query_formats = query_formats,
.priv_size = sizeof(AudioPhaserContext),
.init = init,
.uninit = uninit,
.inputs = aphaser_inputs,
.outputs = aphaser_outputs,
.priv_class = &aphaser_class,
};

257
externals/ffmpeg/libavfilter/af_apulsator.c vendored Executable file
View File

@@ -0,0 +1,257 @@
/*
* Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avassert.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "internal.h"
#include "audio.h"
enum PulsatorModes { SINE, TRIANGLE, SQUARE, SAWUP, SAWDOWN, NB_MODES };
enum PulsatorTimings { UNIT_BPM, UNIT_MS, UNIT_HZ, NB_TIMINGS };
typedef struct SimpleLFO {
double phase;
double freq;
double offset;
double amount;
double pwidth;
int mode;
int srate;
} SimpleLFO;
typedef struct AudioPulsatorContext {
const AVClass *class;
int mode;
double level_in;
double level_out;
double amount;
double offset_l;
double offset_r;
double pwidth;
double bpm;
double hertz;
int ms;
int timing;
SimpleLFO lfoL, lfoR;
} AudioPulsatorContext;
#define OFFSET(x) offsetof(AudioPulsatorContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption apulsator_options[] = {
{ "level_in", "set input gain", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, FLAGS, },
{ "level_out", "set output gain", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, FLAGS, },
{ "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=SINE}, SINE, NB_MODES-1, FLAGS, "mode" },
{ "sine", NULL, 0, AV_OPT_TYPE_CONST, {.i64=SINE}, 0, 0, FLAGS, "mode" },
{ "triangle", NULL, 0, AV_OPT_TYPE_CONST, {.i64=TRIANGLE},0, 0, FLAGS, "mode" },
{ "square", NULL, 0, AV_OPT_TYPE_CONST, {.i64=SQUARE}, 0, 0, FLAGS, "mode" },
{ "sawup", NULL, 0, AV_OPT_TYPE_CONST, {.i64=SAWUP}, 0, 0, FLAGS, "mode" },
{ "sawdown", NULL, 0, AV_OPT_TYPE_CONST, {.i64=SAWDOWN}, 0, 0, FLAGS, "mode" },
{ "amount", "set modulation", OFFSET(amount), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS },
{ "offset_l", "set offset L", OFFSET(offset_l), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 1, FLAGS },
{ "offset_r", "set offset R", OFFSET(offset_r), AV_OPT_TYPE_DOUBLE, {.dbl=.5}, 0, 1, FLAGS },
{ "width", "set pulse width", OFFSET(pwidth), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 2, FLAGS },
{ "timing", "set timing", OFFSET(timing), AV_OPT_TYPE_INT, {.i64=2}, 0, NB_TIMINGS-1, FLAGS, "timing" },
{ "bpm", NULL, 0, AV_OPT_TYPE_CONST, {.i64=UNIT_BPM}, 0, 0, FLAGS, "timing" },
{ "ms", NULL, 0, AV_OPT_TYPE_CONST, {.i64=UNIT_MS}, 0, 0, FLAGS, "timing" },
{ "hz", NULL, 0, AV_OPT_TYPE_CONST, {.i64=UNIT_HZ}, 0, 0, FLAGS, "timing" },
{ "bpm", "set BPM", OFFSET(bpm), AV_OPT_TYPE_DOUBLE, {.dbl=120}, 30, 300, FLAGS },
{ "ms", "set ms", OFFSET(ms), AV_OPT_TYPE_INT, {.i64=500}, 10, 2000, FLAGS },
{ "hz", "set frequency", OFFSET(hertz), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0.01, 100, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(apulsator);
static void lfo_advance(SimpleLFO *lfo, unsigned count)
{
lfo->phase = fabs(lfo->phase + count * lfo->freq / lfo->srate);
if (lfo->phase >= 1)
lfo->phase = fmod(lfo->phase, 1);
}
static double lfo_get_value(SimpleLFO *lfo)
{
double phs = FFMIN(100, lfo->phase / FFMIN(1.99, FFMAX(0.01, lfo->pwidth)) + lfo->offset);
double val;
if (phs > 1)
phs = fmod(phs, 1.);
switch (lfo->mode) {
case SINE:
val = sin(phs * 2 * M_PI);
break;
case TRIANGLE:
if (phs > 0.75)
val = (phs - 0.75) * 4 - 1;
else if (phs > 0.25)
val = -4 * phs + 2;
else
val = phs * 4;
break;
case SQUARE:
val = phs < 0.5 ? -1 : +1;
break;
case SAWUP:
val = phs * 2 - 1;
break;
case SAWDOWN:
val = 1 - phs * 2;
break;
default: av_assert0(0);
}
return val * lfo->amount;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
AudioPulsatorContext *s = ctx->priv;
const double *src = (const double *)in->data[0];
const int nb_samples = in->nb_samples;
const double level_out = s->level_out;
const double level_in = s->level_in;
const double amount = s->amount;
AVFrame *out;
double *dst;
int n;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(inlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
dst = (double *)out->data[0];
for (n = 0; n < nb_samples; n++) {
double outL;
double outR;
double inL = src[0] * level_in;
double inR = src[1] * level_in;
double procL = inL;
double procR = inR;
procL *= lfo_get_value(&s->lfoL) * 0.5 + amount / 2;
procR *= lfo_get_value(&s->lfoR) * 0.5 + amount / 2;
outL = procL + inL * (1 - amount);
outR = procR + inR * (1 - amount);
outL *= level_out;
outR *= level_out;
dst[0] = outL;
dst[1] = outR;
lfo_advance(&s->lfoL, 1);
lfo_advance(&s->lfoR, 1);
dst += 2;
src += 2;
}
if (in != out)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterChannelLayouts *layout = NULL;
AVFilterFormats *formats = NULL;
int ret;
if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_DBL )) < 0 ||
(ret = ff_set_common_formats (ctx , formats )) < 0 ||
(ret = ff_add_channel_layout (&layout , AV_CH_LAYOUT_STEREO)) < 0 ||
(ret = ff_set_common_channel_layouts (ctx , layout )) < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
AudioPulsatorContext *s = ctx->priv;
double freq;
switch (s->timing) {
case UNIT_BPM: freq = s->bpm / 60; break;
case UNIT_MS: freq = 1 / (s->ms / 1000.); break;
case UNIT_HZ: freq = s->hertz; break;
default: av_assert0(0);
}
s->lfoL.freq = freq;
s->lfoR.freq = freq;
s->lfoL.mode = s->mode;
s->lfoR.mode = s->mode;
s->lfoL.offset = s->offset_l;
s->lfoR.offset = s->offset_r;
s->lfoL.srate = inlink->sample_rate;
s->lfoR.srate = inlink->sample_rate;
s->lfoL.amount = s->amount;
s->lfoR.amount = s->amount;
s->lfoL.pwidth = s->pwidth;
s->lfoR.pwidth = s->pwidth;
return 0;
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_apulsator = {
.name = "apulsator",
.description = NULL_IF_CONFIG_SMALL("Audio pulsator."),
.priv_size = sizeof(AudioPulsatorContext),
.priv_class = &apulsator_class,
.query_formats = query_formats,
.inputs = inputs,
.outputs = outputs,
};

353
externals/ffmpeg/libavfilter/af_aresample.c vendored Executable file
View File

@@ -0,0 +1,353 @@
/*
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2011 Mina Nagy Zaki
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* resampling audio filter
*/
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "libavutil/avassert.h"
#include "libswresample/swresample.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
typedef struct AResampleContext {
const AVClass *class;
int sample_rate_arg;
double ratio;
struct SwrContext *swr;
int64_t next_pts;
int more_data;
} AResampleContext;
static av_cold int init_dict(AVFilterContext *ctx, AVDictionary **opts)
{
AResampleContext *aresample = ctx->priv;
int ret = 0;
aresample->next_pts = AV_NOPTS_VALUE;
aresample->swr = swr_alloc();
if (!aresample->swr) {
ret = AVERROR(ENOMEM);
goto end;
}
if (opts) {
AVDictionaryEntry *e = NULL;
while ((e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
if ((ret = av_opt_set(aresample->swr, e->key, e->value, 0)) < 0)
goto end;
}
av_dict_free(opts);
}
if (aresample->sample_rate_arg > 0)
av_opt_set_int(aresample->swr, "osr", aresample->sample_rate_arg, 0);
end:
return ret;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AResampleContext *aresample = ctx->priv;
swr_free(&aresample->swr);
}
static int query_formats(AVFilterContext *ctx)
{
AResampleContext *aresample = ctx->priv;
enum AVSampleFormat out_format;
int64_t out_rate, out_layout;
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AVFilterFormats *in_formats, *out_formats;
AVFilterFormats *in_samplerates, *out_samplerates;
AVFilterChannelLayouts *in_layouts, *out_layouts;
int ret;
av_opt_get_sample_fmt(aresample->swr, "osf", 0, &out_format);
av_opt_get_int(aresample->swr, "osr", 0, &out_rate);
av_opt_get_int(aresample->swr, "ocl", 0, &out_layout);
in_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
if ((ret = ff_formats_ref(in_formats, &inlink->out_formats)) < 0)
return ret;
in_samplerates = ff_all_samplerates();
if ((ret = ff_formats_ref(in_samplerates, &inlink->out_samplerates)) < 0)
return ret;
in_layouts = ff_all_channel_counts();
if ((ret = ff_channel_layouts_ref(in_layouts, &inlink->out_channel_layouts)) < 0)
return ret;
if(out_rate > 0) {
int ratelist[] = { out_rate, -1 };
out_samplerates = ff_make_format_list(ratelist);
} else {
out_samplerates = ff_all_samplerates();
}
if ((ret = ff_formats_ref(out_samplerates, &outlink->in_samplerates)) < 0)
return ret;
if(out_format != AV_SAMPLE_FMT_NONE) {
int formatlist[] = { out_format, -1 };
out_formats = ff_make_format_list(formatlist);
} else
out_formats = ff_all_formats(AVMEDIA_TYPE_AUDIO);
if ((ret = ff_formats_ref(out_formats, &outlink->in_formats)) < 0)
return ret;
if(out_layout) {
int64_t layout_list[] = { out_layout, -1 };
out_layouts = avfilter_make_format64_list(layout_list);
} else
out_layouts = ff_all_channel_counts();
return ff_channel_layouts_ref(out_layouts, &outlink->in_channel_layouts);
}
static int config_output(AVFilterLink *outlink)
{
int ret;
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = ctx->inputs[0];
AResampleContext *aresample = ctx->priv;
int64_t out_rate, out_layout;
enum AVSampleFormat out_format;
char inchl_buf[128], outchl_buf[128];
aresample->swr = swr_alloc_set_opts(aresample->swr,
outlink->channel_layout, outlink->format, outlink->sample_rate,
inlink->channel_layout, inlink->format, inlink->sample_rate,
0, ctx);
if (!aresample->swr)
return AVERROR(ENOMEM);
if (!inlink->channel_layout)
av_opt_set_int(aresample->swr, "ich", inlink->channels, 0);
if (!outlink->channel_layout)
av_opt_set_int(aresample->swr, "och", outlink->channels, 0);
ret = swr_init(aresample->swr);
if (ret < 0)
return ret;
av_opt_get_int(aresample->swr, "osr", 0, &out_rate);
av_opt_get_int(aresample->swr, "ocl", 0, &out_layout);
av_opt_get_sample_fmt(aresample->swr, "osf", 0, &out_format);
outlink->time_base = (AVRational) {1, out_rate};
av_assert0(outlink->sample_rate == out_rate);
av_assert0(outlink->channel_layout == out_layout || !outlink->channel_layout);
av_assert0(outlink->format == out_format);
aresample->ratio = (double)outlink->sample_rate / inlink->sample_rate;
av_get_channel_layout_string(inchl_buf, sizeof(inchl_buf), inlink ->channels, inlink ->channel_layout);
av_get_channel_layout_string(outchl_buf, sizeof(outchl_buf), outlink->channels, outlink->channel_layout);
av_log(ctx, AV_LOG_VERBOSE, "ch:%d chl:%s fmt:%s r:%dHz -> ch:%d chl:%s fmt:%s r:%dHz\n",
inlink ->channels, inchl_buf, av_get_sample_fmt_name(inlink->format), inlink->sample_rate,
outlink->channels, outchl_buf, av_get_sample_fmt_name(outlink->format), outlink->sample_rate);
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *insamplesref)
{
AResampleContext *aresample = inlink->dst->priv;
const int n_in = insamplesref->nb_samples;
int64_t delay;
int n_out = n_in * aresample->ratio + 32;
AVFilterLink *const outlink = inlink->dst->outputs[0];
AVFrame *outsamplesref;
int ret;
delay = swr_get_delay(aresample->swr, outlink->sample_rate);
if (delay > 0)
n_out += FFMIN(delay, FFMAX(4096, n_out));
outsamplesref = ff_get_audio_buffer(outlink, n_out);
if(!outsamplesref) {
av_frame_free(&insamplesref);
return AVERROR(ENOMEM);
}
av_frame_copy_props(outsamplesref, insamplesref);
outsamplesref->format = outlink->format;
outsamplesref->channels = outlink->channels;
outsamplesref->channel_layout = outlink->channel_layout;
outsamplesref->sample_rate = outlink->sample_rate;
if(insamplesref->pts != AV_NOPTS_VALUE) {
int64_t inpts = av_rescale(insamplesref->pts, inlink->time_base.num * (int64_t)outlink->sample_rate * inlink->sample_rate, inlink->time_base.den);
int64_t outpts= swr_next_pts(aresample->swr, inpts);
aresample->next_pts =
outsamplesref->pts = ROUNDED_DIV(outpts, inlink->sample_rate);
} else {
outsamplesref->pts = AV_NOPTS_VALUE;
}
n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out,
(void *)insamplesref->extended_data, n_in);
if (n_out <= 0) {
av_frame_free(&outsamplesref);
av_frame_free(&insamplesref);
return 0;
}
aresample->more_data = outsamplesref->nb_samples == n_out; // Indicate that there is probably more data in our buffers
outsamplesref->nb_samples = n_out;
ret = ff_filter_frame(outlink, outsamplesref);
av_frame_free(&insamplesref);
return ret;
}
static int flush_frame(AVFilterLink *outlink, int final, AVFrame **outsamplesref_ret)
{
AVFilterContext *ctx = outlink->src;
AResampleContext *aresample = ctx->priv;
AVFilterLink *const inlink = outlink->src->inputs[0];
AVFrame *outsamplesref;
int n_out = 4096;
int64_t pts;
outsamplesref = ff_get_audio_buffer(outlink, n_out);
*outsamplesref_ret = outsamplesref;
if (!outsamplesref)
return AVERROR(ENOMEM);
pts = swr_next_pts(aresample->swr, INT64_MIN);
pts = ROUNDED_DIV(pts, inlink->sample_rate);
n_out = swr_convert(aresample->swr, outsamplesref->extended_data, n_out, final ? NULL : (void*)outsamplesref->extended_data, 0);
if (n_out <= 0) {
av_frame_free(&outsamplesref);
return (n_out == 0) ? AVERROR_EOF : n_out;
}
outsamplesref->sample_rate = outlink->sample_rate;
outsamplesref->nb_samples = n_out;
outsamplesref->pts = pts;
return 0;
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AResampleContext *aresample = ctx->priv;
int ret;
// First try to get data from the internal buffers
if (aresample->more_data) {
AVFrame *outsamplesref;
if (flush_frame(outlink, 0, &outsamplesref) >= 0) {
return ff_filter_frame(outlink, outsamplesref);
}
}
aresample->more_data = 0;
// Second request more data from the input
ret = ff_request_frame(ctx->inputs[0]);
// Third if we hit the end flush
if (ret == AVERROR_EOF) {
AVFrame *outsamplesref;
if ((ret = flush_frame(outlink, 1, &outsamplesref)) < 0)
return ret;
return ff_filter_frame(outlink, outsamplesref);
}
return ret;
}
static const AVClass *resample_child_class_next(const AVClass *prev)
{
return prev ? NULL : swr_get_class();
}
static void *resample_child_next(void *obj, void *prev)
{
AResampleContext *s = obj;
return prev ? NULL : s->swr;
}
#define OFFSET(x) offsetof(AResampleContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption options[] = {
{"sample_rate", NULL, OFFSET(sample_rate_arg), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
{NULL}
};
static const AVClass aresample_class = {
.class_name = "aresample",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.child_class_next = resample_child_class_next,
.child_next = resample_child_next,
};
static const AVFilterPad aresample_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad aresample_outputs[] = {
{
.name = "default",
.config_props = config_output,
.request_frame = request_frame,
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_aresample = {
.name = "aresample",
.description = NULL_IF_CONFIG_SMALL("Resample audio data."),
.init_dict = init_dict,
.uninit = uninit,
.query_formats = query_formats,
.priv_size = sizeof(AResampleContext),
.priv_class = &aresample_class,
.inputs = aresample_inputs,
.outputs = aresample_outputs,
};

1549
externals/ffmpeg/libavfilter/af_arnndn.c vendored Executable file

File diff suppressed because it is too large Load Diff

129
externals/ffmpeg/libavfilter/af_asetnsamples.c vendored Executable file
View File

@@ -0,0 +1,129 @@
/*
* Copyright (c) 2012 Andrey Utkin
* Copyright (c) 2012 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Filter that changes number of samples on single output operation
*/
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "filters.h"
#include "internal.h"
#include "formats.h"
typedef struct ASNSContext {
const AVClass *class;
int nb_out_samples; ///< how many samples to output
int pad;
} ASNSContext;
#define OFFSET(x) offsetof(ASNSContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption asetnsamples_options[] = {
{ "nb_out_samples", "set the number of per-frame output samples", OFFSET(nb_out_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
{ "n", "set the number of per-frame output samples", OFFSET(nb_out_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
{ "pad", "pad last frame with zeros", OFFSET(pad), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
{ "p", "pad last frame with zeros", OFFSET(pad), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(asetnsamples);
static int activate(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
ASNSContext *s = ctx->priv;
AVFrame *frame = NULL, *pad_frame;
int ret;
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
ret = ff_inlink_consume_samples(inlink, s->nb_out_samples, s->nb_out_samples, &frame);
if (ret < 0)
return ret;
if (ret > 0) {
if (!s->pad || frame->nb_samples == s->nb_out_samples) {
ret = ff_filter_frame(outlink, frame);
if (ff_inlink_queued_samples(inlink) >= s->nb_out_samples)
ff_filter_set_ready(ctx, 100);
return ret;
}
pad_frame = ff_get_audio_buffer(outlink, s->nb_out_samples);
if (!pad_frame) {
av_frame_free(&frame);
return AVERROR(ENOMEM);
}
ret = av_frame_copy_props(pad_frame, frame);
if (ret < 0) {
av_frame_free(&pad_frame);
av_frame_free(&frame);
return ret;
}
av_samples_copy(pad_frame->extended_data, frame->extended_data,
0, 0, frame->nb_samples, frame->channels, frame->format);
av_samples_set_silence(pad_frame->extended_data, frame->nb_samples,
s->nb_out_samples - frame->nb_samples, frame->channels,
frame->format);
av_frame_free(&frame);
return ff_filter_frame(outlink, pad_frame);
}
FF_FILTER_FORWARD_STATUS(inlink, outlink);
FF_FILTER_FORWARD_WANTED(outlink, inlink);
return FFERROR_NOT_READY;
}
static const AVFilterPad asetnsamples_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
static const AVFilterPad asetnsamples_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_asetnsamples = {
.name = "asetnsamples",
.description = NULL_IF_CONFIG_SMALL("Set the number of samples for each output audio frames."),
.priv_size = sizeof(ASNSContext),
.priv_class = &asetnsamples_class,
.inputs = asetnsamples_inputs,
.outputs = asetnsamples_outputs,
.activate = activate,
};

118
externals/ffmpeg/libavfilter/af_asetrate.c vendored Executable file
View File

@@ -0,0 +1,118 @@
/*
* Copyright (c) 2013 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/opt.h"
#include "avfilter.h"
#include "internal.h"
typedef struct ASetRateContext {
const AVClass *class;
int sample_rate;
int rescale_pts;
} ASetRateContext;
#define CONTEXT ASetRateContext
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define OPT_GENERIC(name, field, def, min, max, descr, type, deffield, ...) \
{ name, descr, offsetof(CONTEXT, field), AV_OPT_TYPE_ ## type, \
{ .deffield = def }, min, max, FLAGS, __VA_ARGS__ }
#define OPT_INT(name, field, def, min, max, descr, ...) \
OPT_GENERIC(name, field, def, min, max, descr, INT, i64, __VA_ARGS__)
static const AVOption asetrate_options[] = {
OPT_INT("sample_rate", sample_rate, 44100, 1, INT_MAX, "set the sample rate",),
OPT_INT("r", sample_rate, 44100, 1, INT_MAX, "set the sample rate",),
{NULL},
};
AVFILTER_DEFINE_CLASS(asetrate);
static av_cold int query_formats(AVFilterContext *ctx)
{
ASetRateContext *sr = ctx->priv;
int sample_rates[] = { sr->sample_rate, -1 };
return ff_formats_ref(ff_make_format_list(sample_rates),
&ctx->outputs[0]->in_samplerates);
}
static av_cold int config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
ASetRateContext *sr = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
AVRational intb = ctx->inputs[0]->time_base;
int inrate = inlink->sample_rate;
if (intb.num == 1 && intb.den == inrate) {
outlink->time_base.num = 1;
outlink->time_base.den = outlink->sample_rate;
} else {
outlink->time_base = intb;
sr->rescale_pts = 1;
if (av_q2d(intb) > 1.0 / FFMAX(inrate, outlink->sample_rate))
av_log(ctx, AV_LOG_WARNING, "Time base is inaccurate\n");
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
ASetRateContext *sr = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
frame->sample_rate = outlink->sample_rate;
if (sr->rescale_pts)
frame->pts = av_rescale(frame->pts, inlink->sample_rate,
outlink->sample_rate);
return ff_filter_frame(outlink, frame);
}
static const AVFilterPad asetrate_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad asetrate_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_props,
},
{ NULL }
};
AVFilter ff_af_asetrate = {
.name = "asetrate",
.description = NULL_IF_CONFIG_SMALL("Change the sample rate without "
"altering the data."),
.query_formats = query_formats,
.priv_size = sizeof(ASetRateContext),
.inputs = asetrate_inputs,
.outputs = asetrate_outputs,
.priv_class = &asetrate_class,
};

263
externals/ffmpeg/libavfilter/af_ashowinfo.c vendored Executable file
View File

@@ -0,0 +1,263 @@
/*
* Copyright (c) 2011 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* filter for showing textual audio frame information
*/
#include <inttypes.h>
#include <stddef.h>
#include "libavutil/adler32.h"
#include "libavutil/attributes.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/downmix_info.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/mem.h"
#include "libavutil/replaygain.h"
#include "libavutil/timestamp.h"
#include "libavutil/samplefmt.h"
#include "libavcodec/avcodec.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
typedef struct AShowInfoContext {
/**
* Scratch space for individual plane checksums for planar audio
*/
uint32_t *plane_checksums;
} AShowInfoContext;
static av_cold void uninit(AVFilterContext *ctx)
{
AShowInfoContext *s = ctx->priv;
av_freep(&s->plane_checksums);
}
static void dump_matrixenc(AVFilterContext *ctx, AVFrameSideData *sd)
{
enum AVMatrixEncoding enc;
av_log(ctx, AV_LOG_INFO, "matrix encoding: ");
if (sd->size < sizeof(enum AVMatrixEncoding)) {
av_log(ctx, AV_LOG_INFO, "invalid data");
return;
}
enc = *(enum AVMatrixEncoding *)sd->data;
switch (enc) {
case AV_MATRIX_ENCODING_NONE: av_log(ctx, AV_LOG_INFO, "none"); break;
case AV_MATRIX_ENCODING_DOLBY: av_log(ctx, AV_LOG_INFO, "Dolby Surround"); break;
case AV_MATRIX_ENCODING_DPLII: av_log(ctx, AV_LOG_INFO, "Dolby Pro Logic II"); break;
case AV_MATRIX_ENCODING_DPLIIX: av_log(ctx, AV_LOG_INFO, "Dolby Pro Logic IIx"); break;
case AV_MATRIX_ENCODING_DPLIIZ: av_log(ctx, AV_LOG_INFO, "Dolby Pro Logic IIz"); break;
case AV_MATRIX_ENCODING_DOLBYEX: av_log(ctx, AV_LOG_INFO, "Dolby EX"); break;
case AV_MATRIX_ENCODING_DOLBYHEADPHONE: av_log(ctx, AV_LOG_INFO, "Dolby Headphone"); break;
default: av_log(ctx, AV_LOG_WARNING, "unknown"); break;
}
}
static void dump_downmix(AVFilterContext *ctx, AVFrameSideData *sd)
{
AVDownmixInfo *di;
av_log(ctx, AV_LOG_INFO, "downmix: ");
if (sd->size < sizeof(*di)) {
av_log(ctx, AV_LOG_INFO, "invalid data");
return;
}
di = (AVDownmixInfo *)sd->data;
av_log(ctx, AV_LOG_INFO, "preferred downmix type - ");
switch (di->preferred_downmix_type) {
case AV_DOWNMIX_TYPE_LORO: av_log(ctx, AV_LOG_INFO, "Lo/Ro"); break;
case AV_DOWNMIX_TYPE_LTRT: av_log(ctx, AV_LOG_INFO, "Lt/Rt"); break;
case AV_DOWNMIX_TYPE_DPLII: av_log(ctx, AV_LOG_INFO, "Dolby Pro Logic II"); break;
default: av_log(ctx, AV_LOG_WARNING, "unknown"); break;
}
av_log(ctx, AV_LOG_INFO, " Mix levels: center %f (%f ltrt) - "
"surround %f (%f ltrt) - lfe %f",
di->center_mix_level, di->center_mix_level_ltrt,
di->surround_mix_level, di->surround_mix_level_ltrt,
di->lfe_mix_level);
}
static void print_gain(AVFilterContext *ctx, const char *str, int32_t gain)
{
av_log(ctx, AV_LOG_INFO, "%s - ", str);
if (gain == INT32_MIN)
av_log(ctx, AV_LOG_INFO, "unknown");
else
av_log(ctx, AV_LOG_INFO, "%f", gain / 100000.0f);
av_log(ctx, AV_LOG_INFO, ", ");
}
static void print_peak(AVFilterContext *ctx, const char *str, uint32_t peak)
{
av_log(ctx, AV_LOG_INFO, "%s - ", str);
if (!peak)
av_log(ctx, AV_LOG_INFO, "unknown");
else
av_log(ctx, AV_LOG_INFO, "%f", (float)peak / UINT32_MAX);
av_log(ctx, AV_LOG_INFO, ", ");
}
static void dump_replaygain(AVFilterContext *ctx, AVFrameSideData *sd)
{
AVReplayGain *rg;
av_log(ctx, AV_LOG_INFO, "replaygain: ");
if (sd->size < sizeof(*rg)) {
av_log(ctx, AV_LOG_INFO, "invalid data");
return;
}
rg = (AVReplayGain*)sd->data;
print_gain(ctx, "track gain", rg->track_gain);
print_peak(ctx, "track peak", rg->track_peak);
print_gain(ctx, "album gain", rg->album_gain);
print_peak(ctx, "album peak", rg->album_peak);
}
static void dump_audio_service_type(AVFilterContext *ctx, AVFrameSideData *sd)
{
enum AVAudioServiceType *ast;
av_log(ctx, AV_LOG_INFO, "audio service type: ");
if (sd->size < sizeof(*ast)) {
av_log(ctx, AV_LOG_INFO, "invalid data");
return;
}
ast = (enum AVAudioServiceType*)sd->data;
switch (*ast) {
case AV_AUDIO_SERVICE_TYPE_MAIN: av_log(ctx, AV_LOG_INFO, "Main Audio Service"); break;
case AV_AUDIO_SERVICE_TYPE_EFFECTS: av_log(ctx, AV_LOG_INFO, "Effects"); break;
case AV_AUDIO_SERVICE_TYPE_VISUALLY_IMPAIRED: av_log(ctx, AV_LOG_INFO, "Visually Impaired"); break;
case AV_AUDIO_SERVICE_TYPE_HEARING_IMPAIRED: av_log(ctx, AV_LOG_INFO, "Hearing Impaired"); break;
case AV_AUDIO_SERVICE_TYPE_DIALOGUE: av_log(ctx, AV_LOG_INFO, "Dialogue"); break;
case AV_AUDIO_SERVICE_TYPE_COMMENTARY: av_log(ctx, AV_LOG_INFO, "Commentary"); break;
case AV_AUDIO_SERVICE_TYPE_EMERGENCY: av_log(ctx, AV_LOG_INFO, "Emergency"); break;
case AV_AUDIO_SERVICE_TYPE_VOICE_OVER: av_log(ctx, AV_LOG_INFO, "Voice Over"); break;
case AV_AUDIO_SERVICE_TYPE_KARAOKE: av_log(ctx, AV_LOG_INFO, "Karaoke"); break;
default: av_log(ctx, AV_LOG_INFO, "unknown"); break;
}
}
static void dump_unknown(AVFilterContext *ctx, AVFrameSideData *sd)
{
av_log(ctx, AV_LOG_INFO, "unknown side data type: %d, size %d bytes", sd->type, sd->size);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
AShowInfoContext *s = ctx->priv;
char chlayout_str[128];
uint32_t checksum = 0;
int channels = inlink->channels;
int planar = av_sample_fmt_is_planar(buf->format);
int block_align = av_get_bytes_per_sample(buf->format) * (planar ? 1 : channels);
int data_size = buf->nb_samples * block_align;
int planes = planar ? channels : 1;
int i;
void *tmp_ptr = av_realloc_array(s->plane_checksums, channels, sizeof(*s->plane_checksums));
if (!tmp_ptr)
return AVERROR(ENOMEM);
s->plane_checksums = tmp_ptr;
for (i = 0; i < planes; i++) {
uint8_t *data = buf->extended_data[i];
s->plane_checksums[i] = av_adler32_update(0, data, data_size);
checksum = i ? av_adler32_update(checksum, data, data_size) :
s->plane_checksums[0];
}
av_get_channel_layout_string(chlayout_str, sizeof(chlayout_str), buf->channels,
buf->channel_layout);
av_log(ctx, AV_LOG_INFO,
"n:%"PRId64" pts:%s pts_time:%s pos:%"PRId64" "
"fmt:%s channels:%d chlayout:%s rate:%d nb_samples:%d "
"checksum:%08"PRIX32" ",
inlink->frame_count_out,
av_ts2str(buf->pts), av_ts2timestr(buf->pts, &inlink->time_base),
buf->pkt_pos,
av_get_sample_fmt_name(buf->format), buf->channels, chlayout_str,
buf->sample_rate, buf->nb_samples,
checksum);
av_log(ctx, AV_LOG_INFO, "plane_checksums: [ ");
for (i = 0; i < planes; i++)
av_log(ctx, AV_LOG_INFO, "%08"PRIX32" ", s->plane_checksums[i]);
av_log(ctx, AV_LOG_INFO, "]\n");
for (i = 0; i < buf->nb_side_data; i++) {
AVFrameSideData *sd = buf->side_data[i];
av_log(ctx, AV_LOG_INFO, " side data - ");
switch (sd->type) {
case AV_FRAME_DATA_MATRIXENCODING: dump_matrixenc (ctx, sd); break;
case AV_FRAME_DATA_DOWNMIX_INFO: dump_downmix (ctx, sd); break;
case AV_FRAME_DATA_REPLAYGAIN: dump_replaygain(ctx, sd); break;
case AV_FRAME_DATA_AUDIO_SERVICE_TYPE: dump_audio_service_type(ctx, sd); break;
default: dump_unknown (ctx, sd); break;
}
av_log(ctx, AV_LOG_INFO, "\n");
}
return ff_filter_frame(inlink->dst->outputs[0], buf);
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_ashowinfo = {
.name = "ashowinfo",
.description = NULL_IF_CONFIG_SMALL("Show textual information for each audio frame."),
.priv_size = sizeof(AShowInfoContext),
.uninit = uninit,
.inputs = inputs,
.outputs = outputs,
};

319
externals/ffmpeg/libavfilter/af_asoftclip.c vendored Executable file
View File

@@ -0,0 +1,319 @@
/*
* Copyright (c) 2019 The FFmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "formats.h"
enum ASoftClipTypes {
ASC_TANH,
ASC_ATAN,
ASC_CUBIC,
ASC_EXP,
ASC_ALG,
ASC_QUINTIC,
ASC_SIN,
NB_TYPES,
};
typedef struct ASoftClipContext {
const AVClass *class;
int type;
double param;
void (*filter)(struct ASoftClipContext *s, void **dst, const void **src,
int nb_samples, int channels, int start, int end);
} ASoftClipContext;
#define OFFSET(x) offsetof(ASoftClipContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption asoftclip_options[] = {
{ "type", "set softclip type", OFFSET(type), AV_OPT_TYPE_INT, {.i64=0}, 0, NB_TYPES-1, A, "types" },
{ "tanh", NULL, 0, AV_OPT_TYPE_CONST, {.i64=ASC_TANH}, 0, 0, A, "types" },
{ "atan", NULL, 0, AV_OPT_TYPE_CONST, {.i64=ASC_ATAN}, 0, 0, A, "types" },
{ "cubic", NULL, 0, AV_OPT_TYPE_CONST, {.i64=ASC_CUBIC}, 0, 0, A, "types" },
{ "exp", NULL, 0, AV_OPT_TYPE_CONST, {.i64=ASC_EXP}, 0, 0, A, "types" },
{ "alg", NULL, 0, AV_OPT_TYPE_CONST, {.i64=ASC_ALG}, 0, 0, A, "types" },
{ "quintic", NULL, 0, AV_OPT_TYPE_CONST, {.i64=ASC_QUINTIC},0, 0, A, "types" },
{ "sin", NULL, 0, AV_OPT_TYPE_CONST, {.i64=ASC_SIN}, 0, 0, A, "types" },
{ "param", "set softclip parameter", OFFSET(param), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.01, 3, A },
{ NULL }
};
AVFILTER_DEFINE_CLASS(asoftclip);
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
#define SQR(x) ((x) * (x))
static void filter_flt(ASoftClipContext *s,
void **dptr, const void **sptr,
int nb_samples, int channels,
int start, int end)
{
float param = s->param;
for (int c = start; c < end; c++) {
const float *src = sptr[c];
float *dst = dptr[c];
switch (s->type) {
case ASC_TANH:
for (int n = 0; n < nb_samples; n++) {
dst[n] = tanhf(src[n] * param);
}
break;
case ASC_ATAN:
for (int n = 0; n < nb_samples; n++)
dst[n] = 2.f / M_PI * atanf(src[n] * param);
break;
case ASC_CUBIC:
for (int n = 0; n < nb_samples; n++) {
if (FFABS(src[n]) >= 1.5f)
dst[n] = FFSIGN(src[n]);
else
dst[n] = src[n] - 0.1481f * powf(src[n], 3.f);
}
break;
case ASC_EXP:
for (int n = 0; n < nb_samples; n++)
dst[n] = 2.f / (1.f + expf(-2.f * src[n])) - 1.;
break;
case ASC_ALG:
for (int n = 0; n < nb_samples; n++)
dst[n] = src[n] / (sqrtf(param + src[n] * src[n]));
break;
case ASC_QUINTIC:
for (int n = 0; n < nb_samples; n++) {
if (FFABS(src[n]) >= 1.25)
dst[n] = FFSIGN(src[n]);
else
dst[n] = src[n] - 0.08192f * powf(src[n], 5.f);
}
break;
case ASC_SIN:
for (int n = 0; n < nb_samples; n++) {
if (FFABS(src[n]) >= M_PI_2)
dst[n] = FFSIGN(src[n]);
else
dst[n] = sinf(src[n]);
}
break;
}
}
}
static void filter_dbl(ASoftClipContext *s,
void **dptr, const void **sptr,
int nb_samples, int channels,
int start, int end)
{
double param = s->param;
for (int c = start; c < end; c++) {
const double *src = sptr[c];
double *dst = dptr[c];
switch (s->type) {
case ASC_TANH:
for (int n = 0; n < nb_samples; n++) {
dst[n] = tanh(src[n] * param);
}
break;
case ASC_ATAN:
for (int n = 0; n < nb_samples; n++)
dst[n] = 2. / M_PI * atan(src[n] * param);
break;
case ASC_CUBIC:
for (int n = 0; n < nb_samples; n++) {
if (FFABS(src[n]) >= 1.5)
dst[n] = FFSIGN(src[n]);
else
dst[n] = src[n] - 0.1481 * pow(src[n], 3.);
}
break;
case ASC_EXP:
for (int n = 0; n < nb_samples; n++)
dst[n] = 2. / (1. + exp(-2. * src[n])) - 1.;
break;
case ASC_ALG:
for (int n = 0; n < nb_samples; n++)
dst[n] = src[n] / (sqrt(param + src[n] * src[n]));
break;
case ASC_QUINTIC:
for (int n = 0; n < nb_samples; n++) {
if (FFABS(src[n]) >= 1.25)
dst[n] = FFSIGN(src[n]);
else
dst[n] = src[n] - 0.08192 * pow(src[n], 5.);
}
break;
case ASC_SIN:
for (int n = 0; n < nb_samples; n++) {
if (FFABS(src[n]) >= M_PI_2)
dst[n] = FFSIGN(src[n]);
else
dst[n] = sin(src[n]);
}
break;
}
}
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ASoftClipContext *s = ctx->priv;
switch (inlink->format) {
case AV_SAMPLE_FMT_FLT:
case AV_SAMPLE_FMT_FLTP: s->filter = filter_flt; break;
case AV_SAMPLE_FMT_DBL:
case AV_SAMPLE_FMT_DBLP: s->filter = filter_dbl; break;
}
return 0;
}
typedef struct ThreadData {
AVFrame *in, *out;
int nb_samples;
int channels;
} ThreadData;
static int filter_channels(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
ASoftClipContext *s = ctx->priv;
ThreadData *td = arg;
AVFrame *out = td->out;
AVFrame *in = td->in;
const int channels = td->channels;
const int nb_samples = td->nb_samples;
const int start = (channels * jobnr) / nb_jobs;
const int end = (channels * (jobnr+1)) / nb_jobs;
s->filter(s, (void **)out->extended_data, (const void **)in->extended_data,
nb_samples, channels, start, end);
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
int nb_samples, channels;
ThreadData td;
AVFrame *out;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
if (av_sample_fmt_is_planar(in->format)) {
nb_samples = in->nb_samples;
channels = in->channels;
} else {
nb_samples = in->channels * in->nb_samples;
channels = 1;
}
td.in = in;
td.out = out;
td.nb_samples = nb_samples;
td.channels = channels;
ctx->internal->execute(ctx, filter_channels, &td, NULL, FFMIN(channels,
ff_filter_get_nb_threads(ctx)));
if (out != in)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_asoftclip = {
.name = "asoftclip",
.description = NULL_IF_CONFIG_SMALL("Audio Soft Clipper."),
.query_formats = query_formats,
.priv_size = sizeof(ASoftClipContext),
.priv_class = &asoftclip_class,
.inputs = inputs,
.outputs = outputs,
.process_command = ff_filter_process_command,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC |
AVFILTER_FLAG_SLICE_THREADS,
};

181
externals/ffmpeg/libavfilter/af_asr.c vendored Executable file
View File

@@ -0,0 +1,181 @@
/*
* Copyright (c) 2019 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <pocketsphinx/pocketsphinx.h>
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
typedef struct ASRContext {
const AVClass *class;
int rate;
char *hmm;
char *dict;
char *lm;
char *lmctl;
char *lmname;
char *logfn;
ps_decoder_t *ps;
cmd_ln_t *config;
int utt_started;
} ASRContext;
#define OFFSET(x) offsetof(ASRContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption asr_options[] = {
{ "rate", "set sampling rate", OFFSET(rate), AV_OPT_TYPE_INT, {.i64=16000}, 0, INT_MAX, .flags = FLAGS },
{ "hmm", "set directory containing acoustic model files", OFFSET(hmm), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "dict", "set pronunciation dictionary", OFFSET(dict), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "lm", "set language model file", OFFSET(lm), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "lmctl", "set language model set", OFFSET(lmctl), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "lmname","set which language model to use", OFFSET(lmname), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "logfn", "set output for log messages", OFFSET(logfn), AV_OPT_TYPE_STRING, {.str="/dev/null"}, .flags = FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(asr);
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVDictionary **metadata = &in->metadata;
ASRContext *s = ctx->priv;
int have_speech;
const char *speech;
ps_process_raw(s->ps, (const int16_t *)in->data[0], in->nb_samples, 0, 0);
have_speech = ps_get_in_speech(s->ps);
if (have_speech && !s->utt_started)
s->utt_started = 1;
if (!have_speech && s->utt_started) {
ps_end_utt(s->ps);
speech = ps_get_hyp(s->ps, NULL);
if (speech != NULL)
av_dict_set(metadata, "lavfi.asr.text", speech, 0);
ps_start_utt(s->ps);
s->utt_started = 0;
}
return ff_filter_frame(ctx->outputs[0], in);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ASRContext *s = ctx->priv;
ps_start_utt(s->ps);
return 0;
}
static av_cold int asr_init(AVFilterContext *ctx)
{
ASRContext *s = ctx->priv;
const float frate = s->rate;
char *rate = av_asprintf("%f", frate);
const char *argv[] = { "-logfn", s->logfn,
"-hmm", s->hmm,
"-lm", s->lm,
"-lmctl", s->lmctl,
"-lmname", s->lmname,
"-dict", s->dict,
"-samprate", rate,
NULL };
s->config = cmd_ln_parse_r(NULL, ps_args(), 14, (char **)argv, 0);
av_free(rate);
if (!s->config)
return AVERROR(ENOMEM);
ps_default_search_args(s->config);
s->ps = ps_init(s->config);
if (!s->ps)
return AVERROR(ENOMEM);
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
ASRContext *s = ctx->priv;
int sample_rates[] = { s->rate, -1 };
int ret;
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layout = NULL;
if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_S16 )) < 0 ||
(ret = ff_set_common_formats (ctx , formats )) < 0 ||
(ret = ff_add_channel_layout (&layout , AV_CH_LAYOUT_MONO )) < 0 ||
(ret = ff_set_common_channel_layouts (ctx , layout )) < 0 ||
(ret = ff_set_common_samplerates (ctx , ff_make_format_list(sample_rates) )) < 0)
return ret;
return 0;
}
static av_cold void asr_uninit(AVFilterContext *ctx)
{
ASRContext *s = ctx->priv;
ps_free(s->ps);
s->ps = NULL;
cmd_ln_free_r(s->config);
s->config = NULL;
}
static const AVFilterPad asr_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad asr_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_asr = {
.name = "asr",
.description = NULL_IF_CONFIG_SMALL("Automatic Speech Recognition."),
.priv_size = sizeof(ASRContext),
.priv_class = &asr_class,
.init = asr_init,
.uninit = asr_uninit,
.query_formats = query_formats,
.inputs = asr_inputs,
.outputs = asr_outputs,
};

850
externals/ffmpeg/libavfilter/af_astats.c vendored Executable file
View File

@@ -0,0 +1,850 @@
/*
* Copyright (c) 2009 Rob Sykes <robs@users.sourceforge.net>
* Copyright (c) 2013 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <float.h>
#include <math.h>
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
#define HISTOGRAM_SIZE 8192
#define HISTOGRAM_MAX (HISTOGRAM_SIZE-1)
#define MEASURE_ALL UINT_MAX
#define MEASURE_NONE 0
#define MEASURE_DC_OFFSET (1 << 0)
#define MEASURE_MIN_LEVEL (1 << 1)
#define MEASURE_MAX_LEVEL (1 << 2)
#define MEASURE_MIN_DIFFERENCE (1 << 3)
#define MEASURE_MAX_DIFFERENCE (1 << 4)
#define MEASURE_MEAN_DIFFERENCE (1 << 5)
#define MEASURE_RMS_DIFFERENCE (1 << 6)
#define MEASURE_PEAK_LEVEL (1 << 7)
#define MEASURE_RMS_LEVEL (1 << 8)
#define MEASURE_RMS_PEAK (1 << 9)
#define MEASURE_RMS_TROUGH (1 << 10)
#define MEASURE_CREST_FACTOR (1 << 11)
#define MEASURE_FLAT_FACTOR (1 << 12)
#define MEASURE_PEAK_COUNT (1 << 13)
#define MEASURE_BIT_DEPTH (1 << 14)
#define MEASURE_DYNAMIC_RANGE (1 << 15)
#define MEASURE_ZERO_CROSSINGS (1 << 16)
#define MEASURE_ZERO_CROSSINGS_RATE (1 << 17)
#define MEASURE_NUMBER_OF_SAMPLES (1 << 18)
#define MEASURE_NUMBER_OF_NANS (1 << 19)
#define MEASURE_NUMBER_OF_INFS (1 << 20)
#define MEASURE_NUMBER_OF_DENORMALS (1 << 21)
#define MEASURE_NOISE_FLOOR (1 << 22)
#define MEASURE_NOISE_FLOOR_COUNT (1 << 23)
#define MEASURE_MINMAXPEAK (MEASURE_MIN_LEVEL | MEASURE_MAX_LEVEL | MEASURE_PEAK_LEVEL)
typedef struct ChannelStats {
double last;
double last_non_zero;
double min_non_zero;
double sigma_x, sigma_x2;
double avg_sigma_x2, min_sigma_x2, max_sigma_x2;
double min, max;
double nmin, nmax;
double min_run, max_run;
double min_runs, max_runs;
double min_diff, max_diff;
double diff1_sum;
double diff1_sum_x2;
uint64_t mask, imask;
uint64_t min_count, max_count;
uint64_t noise_floor_count;
uint64_t zero_runs;
uint64_t nb_samples;
uint64_t nb_nans;
uint64_t nb_infs;
uint64_t nb_denormals;
double *win_samples;
unsigned histogram[HISTOGRAM_SIZE];
int win_pos;
int max_index;
double noise_floor;
} ChannelStats;
typedef struct AudioStatsContext {
const AVClass *class;
ChannelStats *chstats;
int nb_channels;
uint64_t tc_samples;
double time_constant;
double mult;
int metadata;
int reset_count;
int nb_frames;
int maxbitdepth;
int measure_perchannel;
int measure_overall;
int is_float;
int is_double;
} AudioStatsContext;
#define OFFSET(x) offsetof(AudioStatsContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption astats_options[] = {
{ "length", "set the window length", OFFSET(time_constant), AV_OPT_TYPE_DOUBLE, {.dbl=.05}, .01, 10, FLAGS },
{ "metadata", "inject metadata in the filtergraph", OFFSET(metadata), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
{ "reset", "recalculate stats after this many frames", OFFSET(reset_count), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, FLAGS },
{ "measure_perchannel", "only measure_perchannel these per-channel statistics", OFFSET(measure_perchannel), AV_OPT_TYPE_FLAGS, {.i64=MEASURE_ALL}, 0, UINT_MAX, FLAGS, "measure" },
{ "none" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_NONE }, 0, 0, FLAGS, "measure" },
{ "all" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_ALL }, 0, 0, FLAGS, "measure" },
{ "DC_offset" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_DC_OFFSET }, 0, 0, FLAGS, "measure" },
{ "Min_level" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_MIN_LEVEL }, 0, 0, FLAGS, "measure" },
{ "Max_level" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_MAX_LEVEL }, 0, 0, FLAGS, "measure" },
{ "Min_difference" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_MIN_DIFFERENCE }, 0, 0, FLAGS, "measure" },
{ "Max_difference" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_MAX_DIFFERENCE }, 0, 0, FLAGS, "measure" },
{ "Mean_difference" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_MEAN_DIFFERENCE }, 0, 0, FLAGS, "measure" },
{ "RMS_difference" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_RMS_DIFFERENCE }, 0, 0, FLAGS, "measure" },
{ "Peak_level" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_PEAK_LEVEL }, 0, 0, FLAGS, "measure" },
{ "RMS_level" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_RMS_LEVEL }, 0, 0, FLAGS, "measure" },
{ "RMS_peak" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_RMS_PEAK }, 0, 0, FLAGS, "measure" },
{ "RMS_trough" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_RMS_TROUGH }, 0, 0, FLAGS, "measure" },
{ "Crest_factor" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_CREST_FACTOR }, 0, 0, FLAGS, "measure" },
{ "Flat_factor" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_FLAT_FACTOR }, 0, 0, FLAGS, "measure" },
{ "Peak_count" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_PEAK_COUNT }, 0, 0, FLAGS, "measure" },
{ "Bit_depth" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_BIT_DEPTH }, 0, 0, FLAGS, "measure" },
{ "Dynamic_range" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_DYNAMIC_RANGE }, 0, 0, FLAGS, "measure" },
{ "Zero_crossings" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_ZERO_CROSSINGS }, 0, 0, FLAGS, "measure" },
{ "Zero_crossings_rate" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_ZERO_CROSSINGS_RATE }, 0, 0, FLAGS, "measure" },
{ "Noise_floor" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_NOISE_FLOOR }, 0, 0, FLAGS, "measure" },
{ "Noise_floor_count" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_NOISE_FLOOR_COUNT }, 0, 0, FLAGS, "measure" },
{ "Number_of_samples" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_NUMBER_OF_SAMPLES }, 0, 0, FLAGS, "measure" },
{ "Number_of_NaNs" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_NUMBER_OF_NANS }, 0, 0, FLAGS, "measure" },
{ "Number_of_Infs" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_NUMBER_OF_INFS }, 0, 0, FLAGS, "measure" },
{ "Number_of_denormals" , "", 0, AV_OPT_TYPE_CONST, {.i64=MEASURE_NUMBER_OF_DENORMALS }, 0, 0, FLAGS, "measure" },
{ "measure_overall", "only measure_perchannel these overall statistics", OFFSET(measure_overall), AV_OPT_TYPE_FLAGS, {.i64=MEASURE_ALL}, 0, UINT_MAX, FLAGS, "measure" },
{ NULL }
};
AVFILTER_DEFINE_CLASS(astats);
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_S16, AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32, AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_S64, AV_SAMPLE_FMT_S64P,
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static void reset_stats(AudioStatsContext *s)
{
int c;
for (c = 0; c < s->nb_channels; c++) {
ChannelStats *p = &s->chstats[c];
p->min = p->nmin = p->min_sigma_x2 = DBL_MAX;
p->max = p->nmax = p->max_sigma_x2 =-DBL_MAX;
p->min_non_zero = DBL_MAX;
p->min_diff = DBL_MAX;
p->max_diff = 0;
p->sigma_x = 0;
p->sigma_x2 = 0;
p->avg_sigma_x2 = 0;
p->min_run = 0;
p->max_run = 0;
p->min_runs = 0;
p->max_runs = 0;
p->diff1_sum = 0;
p->diff1_sum_x2 = 0;
p->mask = 0;
p->imask = 0xFFFFFFFFFFFFFFFF;
p->min_count = 0;
p->max_count = 0;
p->zero_runs = 0;
p->nb_samples = 0;
p->nb_nans = 0;
p->nb_infs = 0;
p->nb_denormals = 0;
p->last = NAN;
p->noise_floor = NAN;
p->noise_floor_count = 0;
p->win_pos = 0;
memset(p->win_samples, 0, s->tc_samples * sizeof(*p->win_samples));
memset(p->histogram, 0, sizeof(p->histogram));
}
}
static int config_output(AVFilterLink *outlink)
{
AudioStatsContext *s = outlink->src->priv;
s->chstats = av_calloc(sizeof(*s->chstats), outlink->channels);
if (!s->chstats)
return AVERROR(ENOMEM);
s->tc_samples = 5 * s->time_constant * outlink->sample_rate + .5;
s->nb_channels = outlink->channels;
for (int i = 0; i < s->nb_channels; i++) {
ChannelStats *p = &s->chstats[i];
p->win_samples = av_calloc(s->tc_samples, sizeof(*p->win_samples));
if (!p->win_samples)
return AVERROR(ENOMEM);
}
s->mult = exp((-1 / s->time_constant / outlink->sample_rate));
s->nb_frames = 0;
s->maxbitdepth = av_get_bytes_per_sample(outlink->format) * 8;
s->is_double = outlink->format == AV_SAMPLE_FMT_DBL ||
outlink->format == AV_SAMPLE_FMT_DBLP;
s->is_float = outlink->format == AV_SAMPLE_FMT_FLT ||
outlink->format == AV_SAMPLE_FMT_FLTP;
reset_stats(s);
return 0;
}
static void bit_depth(AudioStatsContext *s, uint64_t mask, uint64_t imask, AVRational *depth)
{
unsigned result = s->maxbitdepth;
mask = mask & (~imask);
for (; result && !(mask & 1); --result, mask >>= 1);
depth->den = result;
depth->num = 0;
for (; result; --result, mask >>= 1)
if (mask & 1)
depth->num++;
}
static inline void update_minmax(AudioStatsContext *s, ChannelStats *p, double d)
{
if (d < p->min)
p->min = d;
if (d > p->max)
p->max = d;
}
static inline void update_stat(AudioStatsContext *s, ChannelStats *p, double d, double nd, int64_t i)
{
double drop;
int index;
if (d < p->min) {
p->min = d;
p->nmin = nd;
p->min_run = 1;
p->min_runs = 0;
p->min_count = 1;
} else if (d == p->min) {
p->min_count++;
p->min_run = d == p->last ? p->min_run + 1 : 1;
} else if (p->last == p->min) {
p->min_runs += p->min_run * p->min_run;
}
if (d != 0 && FFABS(d) < p->min_non_zero)
p->min_non_zero = FFABS(d);
if (d > p->max) {
p->max = d;
p->nmax = nd;
p->max_run = 1;
p->max_runs = 0;
p->max_count = 1;
} else if (d == p->max) {
p->max_count++;
p->max_run = d == p->last ? p->max_run + 1 : 1;
} else if (p->last == p->max) {
p->max_runs += p->max_run * p->max_run;
}
if (d != 0) {
p->zero_runs += FFSIGN(d) != FFSIGN(p->last_non_zero);
p->last_non_zero = d;
}
p->sigma_x += nd;
p->sigma_x2 += nd * nd;
p->avg_sigma_x2 = p->avg_sigma_x2 * s->mult + (1.0 - s->mult) * nd * nd;
if (!isnan(p->last)) {
p->min_diff = FFMIN(p->min_diff, fabs(d - p->last));
p->max_diff = FFMAX(p->max_diff, fabs(d - p->last));
p->diff1_sum += fabs(d - p->last);
p->diff1_sum_x2 += (d - p->last) * (d - p->last);
}
p->last = d;
p->mask |= i;
p->imask &= i;
drop = p->win_samples[p->win_pos];
p->win_samples[p->win_pos] = nd;
index = av_clip(FFABS(nd) * HISTOGRAM_MAX, 0, HISTOGRAM_MAX);
p->max_index = FFMAX(p->max_index, index);
p->histogram[index]++;
if (!isnan(p->noise_floor))
p->histogram[av_clip(FFABS(drop) * HISTOGRAM_MAX, 0, HISTOGRAM_MAX)]--;
p->win_pos++;
while (p->histogram[p->max_index] == 0)
p->max_index--;
if (p->win_pos >= s->tc_samples || !isnan(p->noise_floor)) {
double noise_floor = 1.;
for (int i = p->max_index; i >= 0; i--) {
if (p->histogram[i]) {
noise_floor = i / (double)HISTOGRAM_MAX;
break;
}
}
if (isnan(p->noise_floor)) {
p->noise_floor = noise_floor;
p->noise_floor_count = 1;
} else {
if (noise_floor < p->noise_floor) {
p->noise_floor = noise_floor;
p->noise_floor_count = 1;
} else if (noise_floor == p->noise_floor) {
p->noise_floor_count++;
}
}
}
if (p->win_pos >= s->tc_samples) {
p->win_pos = 0;
}
if (p->nb_samples >= s->tc_samples) {
p->max_sigma_x2 = FFMAX(p->max_sigma_x2, p->avg_sigma_x2);
p->min_sigma_x2 = FFMIN(p->min_sigma_x2, p->avg_sigma_x2);
}
p->nb_samples++;
}
static inline void update_float_stat(AudioStatsContext *s, ChannelStats *p, float d)
{
int type = fpclassify(d);
p->nb_nans += type == FP_NAN;
p->nb_infs += type == FP_INFINITE;
p->nb_denormals += type == FP_SUBNORMAL;
}
static inline void update_double_stat(AudioStatsContext *s, ChannelStats *p, double d)
{
int type = fpclassify(d);
p->nb_nans += type == FP_NAN;
p->nb_infs += type == FP_INFINITE;
p->nb_denormals += type == FP_SUBNORMAL;
}
static void set_meta(AVDictionary **metadata, int chan, const char *key,
const char *fmt, double val)
{
uint8_t value[128];
uint8_t key2[128];
snprintf(value, sizeof(value), fmt, val);
if (chan)
snprintf(key2, sizeof(key2), "lavfi.astats.%d.%s", chan, key);
else
snprintf(key2, sizeof(key2), "lavfi.astats.%s", key);
av_dict_set(metadata, key2, value, 0);
}
#define LINEAR_TO_DB(x) (log10(x) * 20)
static void set_metadata(AudioStatsContext *s, AVDictionary **metadata)
{
uint64_t mask = 0, imask = 0xFFFFFFFFFFFFFFFF, min_count = 0, max_count = 0, nb_samples = 0, noise_floor_count = 0;
uint64_t nb_nans = 0, nb_infs = 0, nb_denormals = 0;
double min_runs = 0, max_runs = 0,
min = DBL_MAX, max =-DBL_MAX, min_diff = DBL_MAX, max_diff = 0,
nmin = DBL_MAX, nmax =-DBL_MAX,
max_sigma_x = 0,
diff1_sum = 0,
diff1_sum_x2 = 0,
sigma_x = 0,
sigma_x2 = 0,
noise_floor = 0,
min_sigma_x2 = DBL_MAX,
max_sigma_x2 =-DBL_MAX;
AVRational depth;
int c;
for (c = 0; c < s->nb_channels; c++) {
ChannelStats *p = &s->chstats[c];
if (p->nb_samples < s->tc_samples)
p->min_sigma_x2 = p->max_sigma_x2 = p->sigma_x2 / p->nb_samples;
min = FFMIN(min, p->min);
max = FFMAX(max, p->max);
nmin = FFMIN(nmin, p->nmin);
nmax = FFMAX(nmax, p->nmax);
min_diff = FFMIN(min_diff, p->min_diff);
max_diff = FFMAX(max_diff, p->max_diff);
diff1_sum += p->diff1_sum;
diff1_sum_x2 += p->diff1_sum_x2;
min_sigma_x2 = FFMIN(min_sigma_x2, p->min_sigma_x2);
max_sigma_x2 = FFMAX(max_sigma_x2, p->max_sigma_x2);
sigma_x += p->sigma_x;
sigma_x2 += p->sigma_x2;
noise_floor = FFMAX(noise_floor, p->noise_floor);
noise_floor_count += p->noise_floor_count;
min_count += p->min_count;
max_count += p->max_count;
min_runs += p->min_runs;
max_runs += p->max_runs;
mask |= p->mask;
imask &= p->imask;
nb_samples += p->nb_samples;
nb_nans += p->nb_nans;
nb_infs += p->nb_infs;
nb_denormals += p->nb_denormals;
if (fabs(p->sigma_x) > fabs(max_sigma_x))
max_sigma_x = p->sigma_x;
if (s->measure_perchannel & MEASURE_DC_OFFSET)
set_meta(metadata, c + 1, "DC_offset", "%f", p->sigma_x / p->nb_samples);
if (s->measure_perchannel & MEASURE_MIN_LEVEL)
set_meta(metadata, c + 1, "Min_level", "%f", p->min);
if (s->measure_perchannel & MEASURE_MAX_LEVEL)
set_meta(metadata, c + 1, "Max_level", "%f", p->max);
if (s->measure_perchannel & MEASURE_MIN_DIFFERENCE)
set_meta(metadata, c + 1, "Min_difference", "%f", p->min_diff);
if (s->measure_perchannel & MEASURE_MAX_DIFFERENCE)
set_meta(metadata, c + 1, "Max_difference", "%f", p->max_diff);
if (s->measure_perchannel & MEASURE_MEAN_DIFFERENCE)
set_meta(metadata, c + 1, "Mean_difference", "%f", p->diff1_sum / (p->nb_samples - 1));
if (s->measure_perchannel & MEASURE_RMS_DIFFERENCE)
set_meta(metadata, c + 1, "RMS_difference", "%f", sqrt(p->diff1_sum_x2 / (p->nb_samples - 1)));
if (s->measure_perchannel & MEASURE_PEAK_LEVEL)
set_meta(metadata, c + 1, "Peak_level", "%f", LINEAR_TO_DB(FFMAX(-p->nmin, p->nmax)));
if (s->measure_perchannel & MEASURE_RMS_LEVEL)
set_meta(metadata, c + 1, "RMS_level", "%f", LINEAR_TO_DB(sqrt(p->sigma_x2 / p->nb_samples)));
if (s->measure_perchannel & MEASURE_RMS_PEAK)
set_meta(metadata, c + 1, "RMS_peak", "%f", LINEAR_TO_DB(sqrt(p->max_sigma_x2)));
if (s->measure_perchannel & MEASURE_RMS_TROUGH)
set_meta(metadata, c + 1, "RMS_trough", "%f", LINEAR_TO_DB(sqrt(p->min_sigma_x2)));
if (s->measure_perchannel & MEASURE_CREST_FACTOR)
set_meta(metadata, c + 1, "Crest_factor", "%f", p->sigma_x2 ? FFMAX(-p->min, p->max) / sqrt(p->sigma_x2 / p->nb_samples) : 1);
if (s->measure_perchannel & MEASURE_FLAT_FACTOR)
set_meta(metadata, c + 1, "Flat_factor", "%f", LINEAR_TO_DB((p->min_runs + p->max_runs) / (p->min_count + p->max_count)));
if (s->measure_perchannel & MEASURE_PEAK_COUNT)
set_meta(metadata, c + 1, "Peak_count", "%f", (float)(p->min_count + p->max_count));
if (s->measure_perchannel & MEASURE_NOISE_FLOOR)
set_meta(metadata, c + 1, "Noise_floor", "%f", LINEAR_TO_DB(p->noise_floor));
if (s->measure_perchannel & MEASURE_NOISE_FLOOR_COUNT)
set_meta(metadata, c + 1, "Noise_floor_count", "%f", p->noise_floor_count);
if (s->measure_perchannel & MEASURE_BIT_DEPTH) {
bit_depth(s, p->mask, p->imask, &depth);
set_meta(metadata, c + 1, "Bit_depth", "%f", depth.num);
set_meta(metadata, c + 1, "Bit_depth2", "%f", depth.den);
}
if (s->measure_perchannel & MEASURE_DYNAMIC_RANGE)
set_meta(metadata, c + 1, "Dynamic_range", "%f", LINEAR_TO_DB(2 * FFMAX(FFABS(p->min), FFABS(p->max))/ p->min_non_zero));
if (s->measure_perchannel & MEASURE_ZERO_CROSSINGS)
set_meta(metadata, c + 1, "Zero_crossings", "%f", p->zero_runs);
if (s->measure_perchannel & MEASURE_ZERO_CROSSINGS_RATE)
set_meta(metadata, c + 1, "Zero_crossings_rate", "%f", p->zero_runs/(double)p->nb_samples);
if ((s->is_float || s->is_double) && s->measure_perchannel & MEASURE_NUMBER_OF_NANS)
set_meta(metadata, c + 1, "Number of NaNs", "%f", p->nb_nans);
if ((s->is_float || s->is_double) && s->measure_perchannel & MEASURE_NUMBER_OF_INFS)
set_meta(metadata, c + 1, "Number of Infs", "%f", p->nb_infs);
if ((s->is_float || s->is_double) && s->measure_perchannel & MEASURE_NUMBER_OF_DENORMALS)
set_meta(metadata, c + 1, "Number of denormals", "%f", p->nb_denormals);
}
if (s->measure_overall & MEASURE_DC_OFFSET)
set_meta(metadata, 0, "Overall.DC_offset", "%f", max_sigma_x / (nb_samples / s->nb_channels));
if (s->measure_overall & MEASURE_MIN_LEVEL)
set_meta(metadata, 0, "Overall.Min_level", "%f", min);
if (s->measure_overall & MEASURE_MAX_LEVEL)
set_meta(metadata, 0, "Overall.Max_level", "%f", max);
if (s->measure_overall & MEASURE_MIN_DIFFERENCE)
set_meta(metadata, 0, "Overall.Min_difference", "%f", min_diff);
if (s->measure_overall & MEASURE_MAX_DIFFERENCE)
set_meta(metadata, 0, "Overall.Max_difference", "%f", max_diff);
if (s->measure_overall & MEASURE_MEAN_DIFFERENCE)
set_meta(metadata, 0, "Overall.Mean_difference", "%f", diff1_sum / (nb_samples - s->nb_channels));
if (s->measure_overall & MEASURE_RMS_DIFFERENCE)
set_meta(metadata, 0, "Overall.RMS_difference", "%f", sqrt(diff1_sum_x2 / (nb_samples - s->nb_channels)));
if (s->measure_overall & MEASURE_PEAK_LEVEL)
set_meta(metadata, 0, "Overall.Peak_level", "%f", LINEAR_TO_DB(FFMAX(-nmin, nmax)));
if (s->measure_overall & MEASURE_RMS_LEVEL)
set_meta(metadata, 0, "Overall.RMS_level", "%f", LINEAR_TO_DB(sqrt(sigma_x2 / nb_samples)));
if (s->measure_overall & MEASURE_RMS_PEAK)
set_meta(metadata, 0, "Overall.RMS_peak", "%f", LINEAR_TO_DB(sqrt(max_sigma_x2)));
if (s->measure_overall & MEASURE_RMS_TROUGH)
set_meta(metadata, 0, "Overall.RMS_trough", "%f", LINEAR_TO_DB(sqrt(min_sigma_x2)));
if (s->measure_overall & MEASURE_FLAT_FACTOR)
set_meta(metadata, 0, "Overall.Flat_factor", "%f", LINEAR_TO_DB((min_runs + max_runs) / (min_count + max_count)));
if (s->measure_overall & MEASURE_PEAK_COUNT)
set_meta(metadata, 0, "Overall.Peak_count", "%f", (float)(min_count + max_count) / (double)s->nb_channels);
if (s->measure_overall & MEASURE_NOISE_FLOOR)
set_meta(metadata, 0, "Overall.Noise_floor", "%f", LINEAR_TO_DB(noise_floor));
if (s->measure_overall & MEASURE_NOISE_FLOOR_COUNT)
set_meta(metadata, 0, "Overall.Noise_floor_count", "%f", noise_floor_count / (double)s->nb_channels);
if (s->measure_overall & MEASURE_BIT_DEPTH) {
bit_depth(s, mask, imask, &depth);
set_meta(metadata, 0, "Overall.Bit_depth", "%f", depth.num);
set_meta(metadata, 0, "Overall.Bit_depth2", "%f", depth.den);
}
if (s->measure_overall & MEASURE_NUMBER_OF_SAMPLES)
set_meta(metadata, 0, "Overall.Number_of_samples", "%f", nb_samples / s->nb_channels);
if ((s->is_float || s->is_double) && s->measure_overall & MEASURE_NUMBER_OF_NANS)
set_meta(metadata, 0, "Number of NaNs", "%f", nb_nans / (float)s->nb_channels);
if ((s->is_float || s->is_double) && s->measure_overall & MEASURE_NUMBER_OF_INFS)
set_meta(metadata, 0, "Number of Infs", "%f", nb_infs / (float)s->nb_channels);
if ((s->is_float || s->is_double) && s->measure_overall & MEASURE_NUMBER_OF_DENORMALS)
set_meta(metadata, 0, "Number of denormals", "%f", nb_denormals / (float)s->nb_channels);
}
#define UPDATE_STATS_P(type, update_func, update_float, channel_func) \
for (int c = start; c < end; c++) { \
ChannelStats *p = &s->chstats[c]; \
const type *src = (const type *)data[c]; \
const type * const srcend = src + samples; \
for (; src < srcend; src++) { \
update_func; \
update_float; \
} \
channel_func; \
}
#define UPDATE_STATS_I(type, update_func, update_float, channel_func) \
for (int c = start; c < end; c++) { \
ChannelStats *p = &s->chstats[c]; \
const type *src = (const type *)data[0]; \
const type * const srcend = src + samples * channels; \
for (src += c; src < srcend; src += channels) { \
update_func; \
update_float; \
} \
channel_func; \
}
#define UPDATE_STATS(planar, type, sample, normalizer_suffix, int_sample) \
if ((s->measure_overall | s->measure_perchannel) & ~MEASURE_MINMAXPEAK) { \
UPDATE_STATS_##planar(type, update_stat(s, p, sample, sample normalizer_suffix, int_sample), s->is_float ? update_float_stat(s, p, sample) : s->is_double ? update_double_stat(s, p, sample) : (void)NULL, ); \
} else { \
UPDATE_STATS_##planar(type, update_minmax(s, p, sample), , p->nmin = p->min normalizer_suffix; p->nmax = p->max normalizer_suffix;); \
}
static int filter_channel(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
AudioStatsContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
AVFrame *buf = arg;
const uint8_t * const * const data = (const uint8_t * const *)buf->extended_data;
const int channels = s->nb_channels;
const int samples = buf->nb_samples;
const int start = (buf->channels * jobnr) / nb_jobs;
const int end = (buf->channels * (jobnr+1)) / nb_jobs;
switch (inlink->format) {
case AV_SAMPLE_FMT_DBLP:
UPDATE_STATS(P, double, *src, , llrint(*src * (UINT64_C(1) << 63)));
break;
case AV_SAMPLE_FMT_DBL:
UPDATE_STATS(I, double, *src, , llrint(*src * (UINT64_C(1) << 63)));
break;
case AV_SAMPLE_FMT_FLTP:
UPDATE_STATS(P, float, *src, , llrint(*src * (UINT64_C(1) << 31)));
break;
case AV_SAMPLE_FMT_FLT:
UPDATE_STATS(I, float, *src, , llrint(*src * (UINT64_C(1) << 31)));
break;
case AV_SAMPLE_FMT_S64P:
UPDATE_STATS(P, int64_t, *src, / (double)INT64_MAX, *src);
break;
case AV_SAMPLE_FMT_S64:
UPDATE_STATS(I, int64_t, *src, / (double)INT64_MAX, *src);
break;
case AV_SAMPLE_FMT_S32P:
UPDATE_STATS(P, int32_t, *src, / (double)INT32_MAX, *src);
break;
case AV_SAMPLE_FMT_S32:
UPDATE_STATS(I, int32_t, *src, / (double)INT32_MAX, *src);
break;
case AV_SAMPLE_FMT_S16P:
UPDATE_STATS(P, int16_t, *src, / (double)INT16_MAX, *src);
break;
case AV_SAMPLE_FMT_S16:
UPDATE_STATS(I, int16_t, *src, / (double)INT16_MAX, *src);
break;
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
AudioStatsContext *s = ctx->priv;
AVDictionary **metadata = &buf->metadata;
if (s->reset_count > 0) {
if (s->nb_frames >= s->reset_count) {
reset_stats(s);
s->nb_frames = 0;
}
s->nb_frames++;
}
ctx->internal->execute(ctx, filter_channel, buf, NULL, FFMIN(inlink->channels, ff_filter_get_nb_threads(ctx)));
if (s->metadata)
set_metadata(s, metadata);
return ff_filter_frame(inlink->dst->outputs[0], buf);
}
static void print_stats(AVFilterContext *ctx)
{
AudioStatsContext *s = ctx->priv;
uint64_t mask = 0, imask = 0xFFFFFFFFFFFFFFFF, min_count = 0, max_count = 0, nb_samples = 0, noise_floor_count = 0;
uint64_t nb_nans = 0, nb_infs = 0, nb_denormals = 0;
double min_runs = 0, max_runs = 0,
min = DBL_MAX, max =-DBL_MAX, min_diff = DBL_MAX, max_diff = 0,
nmin = DBL_MAX, nmax =-DBL_MAX,
max_sigma_x = 0,
diff1_sum_x2 = 0,
diff1_sum = 0,
sigma_x = 0,
sigma_x2 = 0,
noise_floor = 0,
min_sigma_x2 = DBL_MAX,
max_sigma_x2 =-DBL_MAX;
AVRational depth;
int c;
for (c = 0; c < s->nb_channels; c++) {
ChannelStats *p = &s->chstats[c];
if (p->nb_samples < s->tc_samples)
p->min_sigma_x2 = p->max_sigma_x2 = p->sigma_x2 / p->nb_samples;
min = FFMIN(min, p->min);
max = FFMAX(max, p->max);
nmin = FFMIN(nmin, p->nmin);
nmax = FFMAX(nmax, p->nmax);
min_diff = FFMIN(min_diff, p->min_diff);
max_diff = FFMAX(max_diff, p->max_diff);
diff1_sum_x2 += p->diff1_sum_x2;
diff1_sum += p->diff1_sum;
min_sigma_x2 = FFMIN(min_sigma_x2, p->min_sigma_x2);
max_sigma_x2 = FFMAX(max_sigma_x2, p->max_sigma_x2);
sigma_x += p->sigma_x;
sigma_x2 += p->sigma_x2;
noise_floor = FFMAX(noise_floor, p->noise_floor);
min_count += p->min_count;
max_count += p->max_count;
noise_floor_count += p->noise_floor_count;
min_runs += p->min_runs;
max_runs += p->max_runs;
mask |= p->mask;
imask &= p->imask;
nb_samples += p->nb_samples;
nb_nans += p->nb_nans;
nb_infs += p->nb_infs;
nb_denormals += p->nb_denormals;
if (fabs(p->sigma_x) > fabs(max_sigma_x))
max_sigma_x = p->sigma_x;
av_log(ctx, AV_LOG_INFO, "Channel: %d\n", c + 1);
if (s->measure_perchannel & MEASURE_DC_OFFSET)
av_log(ctx, AV_LOG_INFO, "DC offset: %f\n", p->sigma_x / p->nb_samples);
if (s->measure_perchannel & MEASURE_MIN_LEVEL)
av_log(ctx, AV_LOG_INFO, "Min level: %f\n", p->min);
if (s->measure_perchannel & MEASURE_MAX_LEVEL)
av_log(ctx, AV_LOG_INFO, "Max level: %f\n", p->max);
if (s->measure_perchannel & MEASURE_MIN_DIFFERENCE)
av_log(ctx, AV_LOG_INFO, "Min difference: %f\n", p->min_diff);
if (s->measure_perchannel & MEASURE_MAX_DIFFERENCE)
av_log(ctx, AV_LOG_INFO, "Max difference: %f\n", p->max_diff);
if (s->measure_perchannel & MEASURE_MEAN_DIFFERENCE)
av_log(ctx, AV_LOG_INFO, "Mean difference: %f\n", p->diff1_sum / (p->nb_samples - 1));
if (s->measure_perchannel & MEASURE_RMS_DIFFERENCE)
av_log(ctx, AV_LOG_INFO, "RMS difference: %f\n", sqrt(p->diff1_sum_x2 / (p->nb_samples - 1)));
if (s->measure_perchannel & MEASURE_PEAK_LEVEL)
av_log(ctx, AV_LOG_INFO, "Peak level dB: %f\n", LINEAR_TO_DB(FFMAX(-p->nmin, p->nmax)));
if (s->measure_perchannel & MEASURE_RMS_LEVEL)
av_log(ctx, AV_LOG_INFO, "RMS level dB: %f\n", LINEAR_TO_DB(sqrt(p->sigma_x2 / p->nb_samples)));
if (s->measure_perchannel & MEASURE_RMS_PEAK)
av_log(ctx, AV_LOG_INFO, "RMS peak dB: %f\n", LINEAR_TO_DB(sqrt(p->max_sigma_x2)));
if (s->measure_perchannel & MEASURE_RMS_TROUGH)
if (p->min_sigma_x2 != 1)
av_log(ctx, AV_LOG_INFO, "RMS trough dB: %f\n",LINEAR_TO_DB(sqrt(p->min_sigma_x2)));
if (s->measure_perchannel & MEASURE_CREST_FACTOR)
av_log(ctx, AV_LOG_INFO, "Crest factor: %f\n", p->sigma_x2 ? FFMAX(-p->nmin, p->nmax) / sqrt(p->sigma_x2 / p->nb_samples) : 1);
if (s->measure_perchannel & MEASURE_FLAT_FACTOR)
av_log(ctx, AV_LOG_INFO, "Flat factor: %f\n", LINEAR_TO_DB((p->min_runs + p->max_runs) / (p->min_count + p->max_count)));
if (s->measure_perchannel & MEASURE_PEAK_COUNT)
av_log(ctx, AV_LOG_INFO, "Peak count: %"PRId64"\n", p->min_count + p->max_count);
if (s->measure_perchannel & MEASURE_NOISE_FLOOR)
av_log(ctx, AV_LOG_INFO, "Noise floor dB: %f\n", LINEAR_TO_DB(p->noise_floor));
if (s->measure_perchannel & MEASURE_NOISE_FLOOR_COUNT)
av_log(ctx, AV_LOG_INFO, "Noise floor count: %"PRId64"\n", p->noise_floor_count);
if (s->measure_perchannel & MEASURE_BIT_DEPTH) {
bit_depth(s, p->mask, p->imask, &depth);
av_log(ctx, AV_LOG_INFO, "Bit depth: %u/%u\n", depth.num, depth.den);
}
if (s->measure_perchannel & MEASURE_DYNAMIC_RANGE)
av_log(ctx, AV_LOG_INFO, "Dynamic range: %f\n", LINEAR_TO_DB(2 * FFMAX(FFABS(p->min), FFABS(p->max))/ p->min_non_zero));
if (s->measure_perchannel & MEASURE_ZERO_CROSSINGS)
av_log(ctx, AV_LOG_INFO, "Zero crossings: %"PRId64"\n", p->zero_runs);
if (s->measure_perchannel & MEASURE_ZERO_CROSSINGS_RATE)
av_log(ctx, AV_LOG_INFO, "Zero crossings rate: %f\n", p->zero_runs/(double)p->nb_samples);
if ((s->is_float || s->is_double) && s->measure_perchannel & MEASURE_NUMBER_OF_NANS)
av_log(ctx, AV_LOG_INFO, "Number of NaNs: %"PRId64"\n", p->nb_nans);
if ((s->is_float || s->is_double) && s->measure_perchannel & MEASURE_NUMBER_OF_INFS)
av_log(ctx, AV_LOG_INFO, "Number of Infs: %"PRId64"\n", p->nb_infs);
if ((s->is_float || s->is_double) && s->measure_perchannel & MEASURE_NUMBER_OF_DENORMALS)
av_log(ctx, AV_LOG_INFO, "Number of denormals: %"PRId64"\n", p->nb_denormals);
}
av_log(ctx, AV_LOG_INFO, "Overall\n");
if (s->measure_overall & MEASURE_DC_OFFSET)
av_log(ctx, AV_LOG_INFO, "DC offset: %f\n", max_sigma_x / (nb_samples / s->nb_channels));
if (s->measure_overall & MEASURE_MIN_LEVEL)
av_log(ctx, AV_LOG_INFO, "Min level: %f\n", min);
if (s->measure_overall & MEASURE_MAX_LEVEL)
av_log(ctx, AV_LOG_INFO, "Max level: %f\n", max);
if (s->measure_overall & MEASURE_MIN_DIFFERENCE)
av_log(ctx, AV_LOG_INFO, "Min difference: %f\n", min_diff);
if (s->measure_overall & MEASURE_MAX_DIFFERENCE)
av_log(ctx, AV_LOG_INFO, "Max difference: %f\n", max_diff);
if (s->measure_overall & MEASURE_MEAN_DIFFERENCE)
av_log(ctx, AV_LOG_INFO, "Mean difference: %f\n", diff1_sum / (nb_samples - s->nb_channels));
if (s->measure_overall & MEASURE_RMS_DIFFERENCE)
av_log(ctx, AV_LOG_INFO, "RMS difference: %f\n", sqrt(diff1_sum_x2 / (nb_samples - s->nb_channels)));
if (s->measure_overall & MEASURE_PEAK_LEVEL)
av_log(ctx, AV_LOG_INFO, "Peak level dB: %f\n", LINEAR_TO_DB(FFMAX(-nmin, nmax)));
if (s->measure_overall & MEASURE_RMS_LEVEL)
av_log(ctx, AV_LOG_INFO, "RMS level dB: %f\n", LINEAR_TO_DB(sqrt(sigma_x2 / nb_samples)));
if (s->measure_overall & MEASURE_RMS_PEAK)
av_log(ctx, AV_LOG_INFO, "RMS peak dB: %f\n", LINEAR_TO_DB(sqrt(max_sigma_x2)));
if (s->measure_overall & MEASURE_RMS_TROUGH)
if (min_sigma_x2 != 1)
av_log(ctx, AV_LOG_INFO, "RMS trough dB: %f\n", LINEAR_TO_DB(sqrt(min_sigma_x2)));
if (s->measure_overall & MEASURE_FLAT_FACTOR)
av_log(ctx, AV_LOG_INFO, "Flat factor: %f\n", LINEAR_TO_DB((min_runs + max_runs) / (min_count + max_count)));
if (s->measure_overall & MEASURE_PEAK_COUNT)
av_log(ctx, AV_LOG_INFO, "Peak count: %f\n", (min_count + max_count) / (double)s->nb_channels);
if (s->measure_overall & MEASURE_NOISE_FLOOR)
av_log(ctx, AV_LOG_INFO, "Noise floor dB: %f\n", LINEAR_TO_DB(noise_floor));
if (s->measure_overall & MEASURE_NOISE_FLOOR_COUNT)
av_log(ctx, AV_LOG_INFO, "Noise floor count: %f\n", noise_floor_count / (double)s->nb_channels);
if (s->measure_overall & MEASURE_BIT_DEPTH) {
bit_depth(s, mask, imask, &depth);
av_log(ctx, AV_LOG_INFO, "Bit depth: %u/%u\n", depth.num, depth.den);
}
if (s->measure_overall & MEASURE_NUMBER_OF_SAMPLES)
av_log(ctx, AV_LOG_INFO, "Number of samples: %"PRId64"\n", nb_samples / s->nb_channels);
if ((s->is_float || s->is_double) && s->measure_overall & MEASURE_NUMBER_OF_NANS)
av_log(ctx, AV_LOG_INFO, "Number of NaNs: %f\n", nb_nans / (float)s->nb_channels);
if ((s->is_float || s->is_double) && s->measure_overall & MEASURE_NUMBER_OF_INFS)
av_log(ctx, AV_LOG_INFO, "Number of Infs: %f\n", nb_infs / (float)s->nb_channels);
if ((s->is_float || s->is_double) && s->measure_overall & MEASURE_NUMBER_OF_DENORMALS)
av_log(ctx, AV_LOG_INFO, "Number of denormals: %f\n", nb_denormals / (float)s->nb_channels);
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioStatsContext *s = ctx->priv;
if (s->nb_channels)
print_stats(ctx);
if (s->chstats) {
for (int i = 0; i < s->nb_channels; i++) {
ChannelStats *p = &s->chstats[i];
av_freep(&p->win_samples);
}
}
av_freep(&s->chstats);
}
static const AVFilterPad astats_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad astats_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
AVFilter ff_af_astats = {
.name = "astats",
.description = NULL_IF_CONFIG_SMALL("Show time domain statistics about audio frames."),
.query_formats = query_formats,
.priv_size = sizeof(AudioStatsContext),
.priv_class = &astats_class,
.uninit = uninit,
.inputs = astats_inputs,
.outputs = astats_outputs,
.flags = AVFILTER_FLAG_SLICE_THREADS,
};

232
externals/ffmpeg/libavfilter/af_asubboost.c vendored Executable file
View File

@@ -0,0 +1,232 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/channel_layout.h"
#include "libavutil/ffmath.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "formats.h"
typedef struct ASubBoostContext {
const AVClass *class;
double dry_gain;
double wet_gain;
double feedback;
double decay;
double delay;
double cutoff;
double slope;
double a0, a1, a2;
double b0, b1, b2;
int write_pos;
int buffer_samples;
AVFrame *i, *o;
AVFrame *buffer;
} ASubBoostContext;
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
static int get_coeffs(AVFilterContext *ctx)
{
ASubBoostContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
double w0 = 2 * M_PI * s->cutoff / inlink->sample_rate;
double alpha = sin(w0) / 2 * sqrt(2. * (1. / s->slope - 1.) + 2.);
s->a0 = 1 + alpha;
s->a1 = -2 * cos(w0);
s->a2 = 1 - alpha;
s->b0 = (1 - cos(w0)) / 2;
s->b1 = 1 - cos(w0);
s->b2 = (1 - cos(w0)) / 2;
s->a1 /= s->a0;
s->a2 /= s->a0;
s->b0 /= s->a0;
s->b1 /= s->a0;
s->b2 /= s->a0;
s->buffer_samples = inlink->sample_rate * s->delay / 1000;
return 0;
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ASubBoostContext *s = ctx->priv;
s->buffer = ff_get_audio_buffer(inlink, inlink->sample_rate / 10);
s->i = ff_get_audio_buffer(inlink, 2);
s->o = ff_get_audio_buffer(inlink, 2);
if (!s->buffer || !s->i || !s->o)
return AVERROR(ENOMEM);
return get_coeffs(ctx);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
ASubBoostContext *s = ctx->priv;
const float wet = s->wet_gain, dry = s->dry_gain, feedback = s->feedback, decay = s->decay;
int write_pos;
AVFrame *out;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
for (int ch = 0; ch < in->channels; ch++) {
const double *src = (const double *)in->extended_data[ch];
double *dst = (double *)out->extended_data[ch];
double *buffer = (double *)s->buffer->extended_data[ch];
double *ix = (double *)s->i->extended_data[ch];
double *ox = (double *)s->o->extended_data[ch];
write_pos = s->write_pos;
for (int n = 0; n < in->nb_samples; n++) {
double out_sample;
out_sample = src[n] * s->b0 + ix[0] * s->b1 + ix[1] * s->b2 - ox[0] * s->a1 - ox[1] * s->a2;
ix[1] = ix[0];
ix[0] = src[n];
ox[1] = ox[0];
ox[0] = out_sample;
buffer[write_pos] = buffer[write_pos] * decay + out_sample * feedback;
dst[n] = src[n] * dry + buffer[write_pos] * wet;
if (++write_pos >= s->buffer_samples)
write_pos = 0;
}
}
s->write_pos = write_pos;
if (out != in)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static av_cold void uninit(AVFilterContext *ctx)
{
ASubBoostContext *s = ctx->priv;
av_frame_free(&s->buffer);
av_frame_free(&s->i);
av_frame_free(&s->o);
}
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
int ret;
ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
if (ret < 0)
return ret;
return get_coeffs(ctx);
}
#define OFFSET(x) offsetof(ASubBoostContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption asubboost_options[] = {
{ "dry", "set dry gain", OFFSET(dry_gain), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, FLAGS },
{ "wet", "set wet gain", OFFSET(wet_gain), AV_OPT_TYPE_DOUBLE, {.dbl=0.8}, 0, 1, FLAGS },
{ "decay", "set decay", OFFSET(decay), AV_OPT_TYPE_DOUBLE, {.dbl=0.7}, 0, 1, FLAGS },
{ "feedback", "set feedback", OFFSET(feedback), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 1, FLAGS },
{ "cutoff", "set cutoff", OFFSET(cutoff), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 50, 900, FLAGS },
{ "slope", "set slope", OFFSET(slope), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0.0001, 1, FLAGS },
{ "delay", "set delay", OFFSET(delay), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 1, 100, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(asubboost);
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_asubboost = {
.name = "asubboost",
.description = NULL_IF_CONFIG_SMALL("Boost subwoofer frequencies."),
.query_formats = query_formats,
.priv_size = sizeof(ASubBoostContext),
.priv_class = &asubboost_class,
.uninit = uninit,
.inputs = inputs,
.outputs = outputs,
.process_command = process_command,
};

1216
externals/ffmpeg/libavfilter/af_atempo.c vendored Executable file

File diff suppressed because it is too large Load Diff

378
externals/ffmpeg/libavfilter/af_axcorrelate.c vendored Executable file
View File

@@ -0,0 +1,378 @@
/*
* Copyright (c) 2019 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avassert.h"
#include "libavutil/audio_fifo.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "filters.h"
#include "internal.h"
typedef struct AudioXCorrelateContext {
const AVClass *class;
int size;
int algo;
int64_t pts;
AVAudioFifo *fifo[2];
AVFrame *cache[2];
AVFrame *mean_sum[2];
AVFrame *num_sum;
AVFrame *den_sum[2];
int used;
int (*xcorrelate)(AVFilterContext *ctx, AVFrame *out);
} AudioXCorrelateContext;
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static float mean_sum(const float *in, int size)
{
float mean_sum = 0.f;
for (int i = 0; i < size; i++)
mean_sum += in[i];
return mean_sum;
}
static float square_sum(const float *x, const float *y, int size)
{
float square_sum = 0.f;
for (int i = 0; i < size; i++)
square_sum += x[i] * y[i];
return square_sum;
}
static float xcorrelate(const float *x, const float *y, float sumx, float sumy, int size)
{
const float xm = sumx / size, ym = sumy / size;
float num = 0.f, den, den0 = 0.f, den1 = 0.f;
for (int i = 0; i < size; i++) {
float xd = x[i] - xm;
float yd = y[i] - ym;
num += xd * yd;
den0 += xd * xd;
den1 += yd * yd;
}
num /= size;
den = sqrtf((den0 * den1) / (size * size));
return den <= 1e-6f ? 0.f : num / den;
}
static int xcorrelate_slow(AVFilterContext *ctx, AVFrame *out)
{
AudioXCorrelateContext *s = ctx->priv;
const int size = s->size;
int used;
for (int ch = 0; ch < out->channels; ch++) {
const float *x = (const float *)s->cache[0]->extended_data[ch];
const float *y = (const float *)s->cache[1]->extended_data[ch];
float *sumx = (float *)s->mean_sum[0]->extended_data[ch];
float *sumy = (float *)s->mean_sum[1]->extended_data[ch];
float *dst = (float *)out->extended_data[ch];
used = s->used;
if (!used) {
sumx[0] = mean_sum(x, size);
sumy[0] = mean_sum(y, size);
used = 1;
}
for (int n = 0; n < out->nb_samples; n++) {
dst[n] = xcorrelate(x + n, y + n, sumx[0], sumy[0], size);
sumx[0] -= x[n];
sumx[0] += x[n + size];
sumy[0] -= y[n];
sumy[0] += y[n + size];
}
}
return used;
}
static int xcorrelate_fast(AVFilterContext *ctx, AVFrame *out)
{
AudioXCorrelateContext *s = ctx->priv;
const int size = s->size;
int used;
for (int ch = 0; ch < out->channels; ch++) {
const float *x = (const float *)s->cache[0]->extended_data[ch];
const float *y = (const float *)s->cache[1]->extended_data[ch];
float *num_sum = (float *)s->num_sum->extended_data[ch];
float *den_sumx = (float *)s->den_sum[0]->extended_data[ch];
float *den_sumy = (float *)s->den_sum[1]->extended_data[ch];
float *dst = (float *)out->extended_data[ch];
used = s->used;
if (!used) {
num_sum[0] = square_sum(x, y, size);
den_sumx[0] = square_sum(x, x, size);
den_sumy[0] = square_sum(y, y, size);
used = 1;
}
for (int n = 0; n < out->nb_samples; n++) {
float num, den;
num = num_sum[0] / size;
den = sqrtf((den_sumx[0] * den_sumy[0]) / (size * size));
dst[n] = den <= 1e-6f ? 0.f : num / den;
num_sum[0] -= x[n] * y[n];
num_sum[0] += x[n + size] * y[n + size];
den_sumx[0] -= x[n] * x[n];
den_sumx[0] = FFMAX(den_sumx[0], 0.f);
den_sumx[0] += x[n + size] * x[n + size];
den_sumy[0] -= y[n] * y[n];
den_sumy[0] = FFMAX(den_sumy[0], 0.f);
den_sumy[0] += y[n + size] * y[n + size];
}
}
return used;
}
static int activate(AVFilterContext *ctx)
{
AudioXCorrelateContext *s = ctx->priv;
AVFrame *frame = NULL;
int ret, status;
int available;
int64_t pts;
FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
for (int i = 0; i < 2; i++) {
ret = ff_inlink_consume_frame(ctx->inputs[i], &frame);
if (ret > 0) {
if (s->pts == AV_NOPTS_VALUE)
s->pts = frame->pts;
ret = av_audio_fifo_write(s->fifo[i], (void **)frame->extended_data,
frame->nb_samples);
av_frame_free(&frame);
if (ret < 0)
return ret;
}
}
available = FFMIN(av_audio_fifo_size(s->fifo[0]), av_audio_fifo_size(s->fifo[1]));
if (available > s->size) {
const int out_samples = available - s->size;
AVFrame *out;
if (!s->cache[0] || s->cache[0]->nb_samples < available) {
av_frame_free(&s->cache[0]);
s->cache[0] = ff_get_audio_buffer(ctx->outputs[0], available);
if (!s->cache[0])
return AVERROR(ENOMEM);
}
if (!s->cache[1] || s->cache[1]->nb_samples < available) {
av_frame_free(&s->cache[1]);
s->cache[1] = ff_get_audio_buffer(ctx->outputs[0], available);
if (!s->cache[1])
return AVERROR(ENOMEM);
}
ret = av_audio_fifo_peek(s->fifo[0], (void **)s->cache[0]->extended_data, available);
if (ret < 0)
return ret;
ret = av_audio_fifo_peek(s->fifo[1], (void **)s->cache[1]->extended_data, available);
if (ret < 0)
return ret;
out = ff_get_audio_buffer(ctx->outputs[0], out_samples);
if (!out)
return AVERROR(ENOMEM);
s->used = s->xcorrelate(ctx, out);
out->pts = s->pts;
s->pts += out_samples;
av_audio_fifo_drain(s->fifo[0], out_samples);
av_audio_fifo_drain(s->fifo[1], out_samples);
return ff_filter_frame(ctx->outputs[0], out);
}
if (av_audio_fifo_size(s->fifo[0]) > s->size &&
av_audio_fifo_size(s->fifo[1]) > s->size) {
ff_filter_set_ready(ctx, 10);
return 0;
}
for (int i = 0; i < 2; i++) {
if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
ff_outlink_set_status(ctx->outputs[0], status, pts);
return 0;
}
}
if (ff_outlink_frame_wanted(ctx->outputs[0])) {
for (int i = 0; i < 2; i++) {
if (av_audio_fifo_size(s->fifo[i]) > s->size)
continue;
ff_inlink_request_frame(ctx->inputs[i]);
return 0;
}
}
return FFERROR_NOT_READY;
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = ctx->inputs[0];
AudioXCorrelateContext *s = ctx->priv;
s->pts = AV_NOPTS_VALUE;
outlink->format = inlink->format;
outlink->channels = inlink->channels;
s->fifo[0] = av_audio_fifo_alloc(outlink->format, outlink->channels, s->size);
s->fifo[1] = av_audio_fifo_alloc(outlink->format, outlink->channels, s->size);
if (!s->fifo[0] || !s->fifo[1])
return AVERROR(ENOMEM);
s->mean_sum[0] = ff_get_audio_buffer(outlink, 1);
s->mean_sum[1] = ff_get_audio_buffer(outlink, 1);
s->num_sum = ff_get_audio_buffer(outlink, 1);
s->den_sum[0] = ff_get_audio_buffer(outlink, 1);
s->den_sum[1] = ff_get_audio_buffer(outlink, 1);
if (!s->mean_sum[0] || !s->mean_sum[1] || !s->num_sum ||
!s->den_sum[0] || !s->den_sum[1])
return AVERROR(ENOMEM);
switch (s->algo) {
case 0: s->xcorrelate = xcorrelate_slow; break;
case 1: s->xcorrelate = xcorrelate_fast; break;
}
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioXCorrelateContext *s = ctx->priv;
av_audio_fifo_free(s->fifo[0]);
av_audio_fifo_free(s->fifo[1]);
av_frame_free(&s->cache[0]);
av_frame_free(&s->cache[1]);
av_frame_free(&s->mean_sum[0]);
av_frame_free(&s->mean_sum[1]);
av_frame_free(&s->num_sum);
av_frame_free(&s->den_sum[0]);
av_frame_free(&s->den_sum[1]);
}
static const AVFilterPad inputs[] = {
{
.name = "axcorrelate0",
.type = AVMEDIA_TYPE_AUDIO,
},
{
.name = "axcorrelate1",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
#define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define OFFSET(x) offsetof(AudioXCorrelateContext, x)
static const AVOption axcorrelate_options[] = {
{ "size", "set segment size", OFFSET(size), AV_OPT_TYPE_INT, {.i64=256}, 2, 131072, AF },
{ "algo", "set alghorithm", OFFSET(algo), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AF, "algo" },
{ "slow", "slow algorithm", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AF, "algo" },
{ "fast", "fast algorithm", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AF, "algo" },
{ NULL }
};
AVFILTER_DEFINE_CLASS(axcorrelate);
AVFilter ff_af_axcorrelate = {
.name = "axcorrelate",
.description = NULL_IF_CONFIG_SMALL("Cross-correlate two audio streams."),
.priv_size = sizeof(AudioXCorrelateContext),
.priv_class = &axcorrelate_class,
.query_formats = query_formats,
.activate = activate,
.uninit = uninit,
.inputs = inputs,
.outputs = outputs,
};

867
externals/ffmpeg/libavfilter/af_biquads.c vendored Executable file
View File

@@ -0,0 +1,867 @@
/*
* Copyright (c) 2013 Paul B Mahol
* Copyright (c) 2006-2008 Rob Sykes <robs@users.sourceforge.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/*
* 2-pole filters designed by Robert Bristow-Johnson <rbj@audioimagination.com>
* see http://www.musicdsp.org/files/Audio-EQ-Cookbook.txt
*
* 1-pole filters based on code (c) 2000 Chris Bagwell <cbagwell@sprynet.com>
* Algorithms: Recursive single pole low/high pass filter
* Reference: The Scientist and Engineer's Guide to Digital Signal Processing
*
* low-pass: output[N] = input[N] * A + output[N-1] * B
* X = exp(-2.0 * pi * Fc)
* A = 1 - X
* B = X
* Fc = cutoff freq / sample rate
*
* Mimics an RC low-pass filter:
*
* ---/\/\/\/\----------->
* |
* --- C
* ---
* |
* |
* V
*
* high-pass: output[N] = A0 * input[N] + A1 * input[N-1] + B1 * output[N-1]
* X = exp(-2.0 * pi * Fc)
* A0 = (1 + X) / 2
* A1 = -(1 + X) / 2
* B1 = X
* Fc = cutoff freq / sample rate
*
* Mimics an RC high-pass filter:
*
* || C
* ----||--------->
* || |
* <
* > R
* <
* |
* V
*/
#include "libavutil/avassert.h"
#include "libavutil/ffmath.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
enum FilterType {
biquad,
equalizer,
bass,
treble,
bandpass,
bandreject,
allpass,
highpass,
lowpass,
lowshelf,
highshelf,
};
enum WidthType {
NONE,
HERTZ,
OCTAVE,
QFACTOR,
SLOPE,
KHERTZ,
NB_WTYPE,
};
typedef struct ChanCache {
double i1, i2;
double o1, o2;
int clippings;
} ChanCache;
typedef struct BiquadsContext {
const AVClass *class;
enum FilterType filter_type;
int width_type;
int poles;
int csg;
double gain;
double frequency;
double width;
double mix;
uint64_t channels;
int normalize;
int order;
double a0, a1, a2;
double b0, b1, b2;
ChanCache *cache;
int block_align;
void (*filter)(struct BiquadsContext *s, const void *ibuf, void *obuf, int len,
double *i1, double *i2, double *o1, double *o2,
double b0, double b1, double b2, double a1, double a2, int *clippings,
int disabled);
} BiquadsContext;
static av_cold int init(AVFilterContext *ctx)
{
BiquadsContext *s = ctx->priv;
if (s->filter_type != biquad) {
if (s->frequency <= 0 || s->width <= 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid frequency %f and/or width %f <= 0\n",
s->frequency, s->width);
return AVERROR(EINVAL);
}
}
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
#define BIQUAD_FILTER(name, type, min, max, need_clipping) \
static void biquad_## name (BiquadsContext *s, \
const void *input, void *output, int len, \
double *in1, double *in2, \
double *out1, double *out2, \
double b0, double b1, double b2, \
double a1, double a2, int *clippings, \
int disabled) \
{ \
const type *ibuf = input; \
type *obuf = output; \
double i1 = *in1; \
double i2 = *in2; \
double o1 = *out1; \
double o2 = *out2; \
double wet = s->mix; \
double dry = 1. - wet; \
double out; \
int i; \
a1 = -a1; \
a2 = -a2; \
\
for (i = 0; i+1 < len; i++) { \
o2 = i2 * b2 + i1 * b1 + ibuf[i] * b0 + o2 * a2 + o1 * a1; \
i2 = ibuf[i]; \
out = o2 * wet + i2 * dry; \
if (disabled) { \
obuf[i] = i2; \
} else if (need_clipping && out < min) { \
(*clippings)++; \
obuf[i] = min; \
} else if (need_clipping && out > max) { \
(*clippings)++; \
obuf[i] = max; \
} else { \
obuf[i] = out; \
} \
i++; \
o1 = i1 * b2 + i2 * b1 + ibuf[i] * b0 + o1 * a2 + o2 * a1; \
i1 = ibuf[i]; \
out = o1 * wet + i1 * dry; \
if (disabled) { \
obuf[i] = i1; \
} else if (need_clipping && out < min) { \
(*clippings)++; \
obuf[i] = min; \
} else if (need_clipping && out > max) { \
(*clippings)++; \
obuf[i] = max; \
} else { \
obuf[i] = out; \
} \
} \
if (i < len) { \
double o0 = ibuf[i] * b0 + i1 * b1 + i2 * b2 + o1 * a1 + o2 * a2; \
i2 = i1; \
i1 = ibuf[i]; \
o2 = o1; \
o1 = o0; \
out = o0 * wet + i1 * dry; \
if (disabled) { \
obuf[i] = i1; \
} else if (need_clipping && out < min) { \
(*clippings)++; \
obuf[i] = min; \
} else if (need_clipping && out > max) { \
(*clippings)++; \
obuf[i] = max; \
} else { \
obuf[i] = out; \
} \
} \
*in1 = i1; \
*in2 = i2; \
*out1 = o1; \
*out2 = o2; \
}
BIQUAD_FILTER(s16, int16_t, INT16_MIN, INT16_MAX, 1)
BIQUAD_FILTER(s32, int32_t, INT32_MIN, INT32_MAX, 1)
BIQUAD_FILTER(flt, float, -1., 1., 0)
BIQUAD_FILTER(dbl, double, -1., 1., 0)
static int config_filter(AVFilterLink *outlink, int reset)
{
AVFilterContext *ctx = outlink->src;
BiquadsContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
double A = ff_exp10(s->gain / 40);
double w0 = 2 * M_PI * s->frequency / inlink->sample_rate;
double K = tan(w0 / 2.);
double alpha, beta;
if (w0 > M_PI) {
av_log(ctx, AV_LOG_ERROR,
"Invalid frequency %f. Frequency must be less than half the sample-rate %d.\n",
s->frequency, inlink->sample_rate);
return AVERROR(EINVAL);
}
switch (s->width_type) {
case NONE:
alpha = 0.0;
break;
case HERTZ:
alpha = sin(w0) / (2 * s->frequency / s->width);
break;
case KHERTZ:
alpha = sin(w0) / (2 * s->frequency / (s->width * 1000));
break;
case OCTAVE:
alpha = sin(w0) * sinh(log(2.) / 2 * s->width * w0 / sin(w0));
break;
case QFACTOR:
alpha = sin(w0) / (2 * s->width);
break;
case SLOPE:
alpha = sin(w0) / 2 * sqrt((A + 1 / A) * (1 / s->width - 1) + 2);
break;
default:
av_assert0(0);
}
beta = 2 * sqrt(A);
switch (s->filter_type) {
case biquad:
break;
case equalizer:
s->a0 = 1 + alpha / A;
s->a1 = -2 * cos(w0);
s->a2 = 1 - alpha / A;
s->b0 = 1 + alpha * A;
s->b1 = -2 * cos(w0);
s->b2 = 1 - alpha * A;
break;
case bass:
beta = sqrt((A * A + 1) - (A - 1) * (A - 1));
case lowshelf:
s->a0 = (A + 1) + (A - 1) * cos(w0) + beta * alpha;
s->a1 = -2 * ((A - 1) + (A + 1) * cos(w0));
s->a2 = (A + 1) + (A - 1) * cos(w0) - beta * alpha;
s->b0 = A * ((A + 1) - (A - 1) * cos(w0) + beta * alpha);
s->b1 = 2 * A * ((A - 1) - (A + 1) * cos(w0));
s->b2 = A * ((A + 1) - (A - 1) * cos(w0) - beta * alpha);
break;
case treble:
beta = sqrt((A * A + 1) - (A - 1) * (A - 1));
case highshelf:
s->a0 = (A + 1) - (A - 1) * cos(w0) + beta * alpha;
s->a1 = 2 * ((A - 1) - (A + 1) * cos(w0));
s->a2 = (A + 1) - (A - 1) * cos(w0) - beta * alpha;
s->b0 = A * ((A + 1) + (A - 1) * cos(w0) + beta * alpha);
s->b1 =-2 * A * ((A - 1) + (A + 1) * cos(w0));
s->b2 = A * ((A + 1) + (A - 1) * cos(w0) - beta * alpha);
break;
case bandpass:
if (s->csg) {
s->a0 = 1 + alpha;
s->a1 = -2 * cos(w0);
s->a2 = 1 - alpha;
s->b0 = sin(w0) / 2;
s->b1 = 0;
s->b2 = -sin(w0) / 2;
} else {
s->a0 = 1 + alpha;
s->a1 = -2 * cos(w0);
s->a2 = 1 - alpha;
s->b0 = alpha;
s->b1 = 0;
s->b2 = -alpha;
}
break;
case bandreject:
s->a0 = 1 + alpha;
s->a1 = -2 * cos(w0);
s->a2 = 1 - alpha;
s->b0 = 1;
s->b1 = -2 * cos(w0);
s->b2 = 1;
break;
case lowpass:
if (s->poles == 1) {
s->a0 = 1;
s->a1 = -exp(-w0);
s->a2 = 0;
s->b0 = 1 + s->a1;
s->b1 = 0;
s->b2 = 0;
} else {
s->a0 = 1 + alpha;
s->a1 = -2 * cos(w0);
s->a2 = 1 - alpha;
s->b0 = (1 - cos(w0)) / 2;
s->b1 = 1 - cos(w0);
s->b2 = (1 - cos(w0)) / 2;
}
break;
case highpass:
if (s->poles == 1) {
s->a0 = 1;
s->a1 = -exp(-w0);
s->a2 = 0;
s->b0 = (1 - s->a1) / 2;
s->b1 = -s->b0;
s->b2 = 0;
} else {
s->a0 = 1 + alpha;
s->a1 = -2 * cos(w0);
s->a2 = 1 - alpha;
s->b0 = (1 + cos(w0)) / 2;
s->b1 = -(1 + cos(w0));
s->b2 = (1 + cos(w0)) / 2;
}
break;
case allpass:
switch (s->order) {
case 1:
s->a0 = 1.;
s->a1 = -(1. - K) / (1. + K);
s->a2 = 0.;
s->b0 = s->a1;
s->b1 = s->a0;
s->b2 = 0.;
break;
case 2:
s->a0 = 1 + alpha;
s->a1 = -2 * cos(w0);
s->a2 = 1 - alpha;
s->b0 = 1 - alpha;
s->b1 = -2 * cos(w0);
s->b2 = 1 + alpha;
break;
}
break;
default:
av_assert0(0);
}
av_log(ctx, AV_LOG_VERBOSE, "a=%f %f %f:b=%f %f %f\n", s->a0, s->a1, s->a2, s->b0, s->b1, s->b2);
s->a1 /= s->a0;
s->a2 /= s->a0;
s->b0 /= s->a0;
s->b1 /= s->a0;
s->b2 /= s->a0;
s->a0 /= s->a0;
if (s->normalize && fabs(s->b0 + s->b1 + s->b2) > 1e-6) {
double factor = (s->a0 + s->a1 + s->a2) / (s->b0 + s->b1 + s->b2);
s->b0 *= factor;
s->b1 *= factor;
s->b2 *= factor;
}
s->cache = av_realloc_f(s->cache, sizeof(ChanCache), inlink->channels);
if (!s->cache)
return AVERROR(ENOMEM);
if (reset)
memset(s->cache, 0, sizeof(ChanCache) * inlink->channels);
switch (inlink->format) {
case AV_SAMPLE_FMT_S16P: s->filter = biquad_s16; break;
case AV_SAMPLE_FMT_S32P: s->filter = biquad_s32; break;
case AV_SAMPLE_FMT_FLTP: s->filter = biquad_flt; break;
case AV_SAMPLE_FMT_DBLP: s->filter = biquad_dbl; break;
default: av_assert0(0);
}
s->block_align = av_get_bytes_per_sample(inlink->format);
return 0;
}
static int config_output(AVFilterLink *outlink)
{
return config_filter(outlink, 1);
}
typedef struct ThreadData {
AVFrame *in, *out;
} ThreadData;
static int filter_channel(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
AVFilterLink *inlink = ctx->inputs[0];
ThreadData *td = arg;
AVFrame *buf = td->in;
AVFrame *out_buf = td->out;
BiquadsContext *s = ctx->priv;
const int start = (buf->channels * jobnr) / nb_jobs;
const int end = (buf->channels * (jobnr+1)) / nb_jobs;
int ch;
for (ch = start; ch < end; ch++) {
if (!((av_channel_layout_extract_channel(inlink->channel_layout, ch) & s->channels))) {
if (buf != out_buf)
memcpy(out_buf->extended_data[ch], buf->extended_data[ch],
buf->nb_samples * s->block_align);
continue;
}
s->filter(s, buf->extended_data[ch], out_buf->extended_data[ch], buf->nb_samples,
&s->cache[ch].i1, &s->cache[ch].i2, &s->cache[ch].o1, &s->cache[ch].o2,
s->b0, s->b1, s->b2, s->a1, s->a2, &s->cache[ch].clippings, ctx->is_disabled);
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
BiquadsContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out_buf;
ThreadData td;
int ch;
if (av_frame_is_writable(buf)) {
out_buf = buf;
} else {
out_buf = ff_get_audio_buffer(outlink, buf->nb_samples);
if (!out_buf) {
av_frame_free(&buf);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out_buf, buf);
}
td.in = buf;
td.out = out_buf;
ctx->internal->execute(ctx, filter_channel, &td, NULL, FFMIN(outlink->channels, ff_filter_get_nb_threads(ctx)));
for (ch = 0; ch < outlink->channels; ch++) {
if (s->cache[ch].clippings > 0)
av_log(ctx, AV_LOG_WARNING, "Channel %d clipping %d times. Please reduce gain.\n",
ch, s->cache[ch].clippings);
s->cache[ch].clippings = 0;
}
if (buf != out_buf)
av_frame_free(&buf);
return ff_filter_frame(outlink, out_buf);
}
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
AVFilterLink *outlink = ctx->outputs[0];
int ret;
ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
if (ret < 0)
return ret;
return config_filter(outlink, 0);
}
static av_cold void uninit(AVFilterContext *ctx)
{
BiquadsContext *s = ctx->priv;
av_freep(&s->cache);
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
#define OFFSET(x) offsetof(BiquadsContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
#define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define DEFINE_BIQUAD_FILTER(name_, description_) \
AVFILTER_DEFINE_CLASS(name_); \
static av_cold int name_##_init(AVFilterContext *ctx) \
{ \
BiquadsContext *s = ctx->priv; \
s->class = &name_##_class; \
s->filter_type = name_; \
return init(ctx); \
} \
\
AVFilter ff_af_##name_ = { \
.name = #name_, \
.description = NULL_IF_CONFIG_SMALL(description_), \
.priv_size = sizeof(BiquadsContext), \
.init = name_##_init, \
.uninit = uninit, \
.query_formats = query_formats, \
.inputs = inputs, \
.outputs = outputs, \
.priv_class = &name_##_class, \
.process_command = process_command, \
.flags = AVFILTER_FLAG_SLICE_THREADS | AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL, \
}
#if CONFIG_EQUALIZER_FILTER
static const AVOption equalizer_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"t", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"k", "kHz", 0, AV_OPT_TYPE_CONST, {.i64=KHERTZ}, 0, 0, FLAGS, "width_type"},
{"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 99999, FLAGS},
{"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 99999, FLAGS},
{"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{"mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"m", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"channels", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"c", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"normalize", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{"n", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{NULL}
};
DEFINE_BIQUAD_FILTER(equalizer, "Apply two-pole peaking equalization (EQ) filter.");
#endif /* CONFIG_EQUALIZER_FILTER */
#if CONFIG_BASS_FILTER
static const AVOption bass_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"t", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"k", "kHz", 0, AV_OPT_TYPE_CONST, {.i64=KHERTZ}, 0, 0, FLAGS, "width_type"},
{"width", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{"mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"m", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"channels", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"c", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"normalize", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{"n", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{NULL}
};
DEFINE_BIQUAD_FILTER(bass, "Boost or cut lower frequencies.");
#endif /* CONFIG_BASS_FILTER */
#if CONFIG_TREBLE_FILTER
static const AVOption treble_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"t", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"k", "kHz", 0, AV_OPT_TYPE_CONST, {.i64=KHERTZ}, 0, 0, FLAGS, "width_type"},
{"width", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{"mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"m", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"channels", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"c", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"normalize", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{"n", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{NULL}
};
DEFINE_BIQUAD_FILTER(treble, "Boost or cut upper frequencies.");
#endif /* CONFIG_TREBLE_FILTER */
#if CONFIG_BANDPASS_FILTER
static const AVOption bandpass_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"t", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"k", "kHz", 0, AV_OPT_TYPE_CONST, {.i64=KHERTZ}, 0, 0, FLAGS, "width_type"},
{"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"csg", "use constant skirt gain", OFFSET(csg), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{"mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"m", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"channels", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"c", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"normalize", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{"n", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{NULL}
};
DEFINE_BIQUAD_FILTER(bandpass, "Apply a two-pole Butterworth band-pass filter.");
#endif /* CONFIG_BANDPASS_FILTER */
#if CONFIG_BANDREJECT_FILTER
static const AVOption bandreject_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"t", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"k", "kHz", 0, AV_OPT_TYPE_CONST, {.i64=KHERTZ}, 0, 0, FLAGS, "width_type"},
{"width", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"w", "set band-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"m", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"channels", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"c", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"normalize", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{"n", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{NULL}
};
DEFINE_BIQUAD_FILTER(bandreject, "Apply a two-pole Butterworth band-reject filter.");
#endif /* CONFIG_BANDREJECT_FILTER */
#if CONFIG_LOWPASS_FILTER
static const AVOption lowpass_options[] = {
{"frequency", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=500}, 0, 999999, FLAGS},
{"f", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=500}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"t", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"k", "kHz", 0, AV_OPT_TYPE_CONST, {.i64=KHERTZ}, 0, 0, FLAGS, "width_type"},
{"width", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
{"w", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
{"poles", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, AF},
{"p", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, AF},
{"mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"m", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"channels", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"c", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"normalize", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{"n", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{NULL}
};
DEFINE_BIQUAD_FILTER(lowpass, "Apply a low-pass filter with 3dB point frequency.");
#endif /* CONFIG_LOWPASS_FILTER */
#if CONFIG_HIGHPASS_FILTER
static const AVOption highpass_options[] = {
{"frequency", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"f", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"t", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"k", "kHz", 0, AV_OPT_TYPE_CONST, {.i64=KHERTZ}, 0, 0, FLAGS, "width_type"},
{"width", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
{"w", "set width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.707}, 0, 99999, FLAGS},
{"poles", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, AF},
{"p", "set number of poles", OFFSET(poles), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, AF},
{"mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"m", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"channels", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"c", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"normalize", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{"n", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{NULL}
};
DEFINE_BIQUAD_FILTER(highpass, "Apply a high-pass filter with 3dB point frequency.");
#endif /* CONFIG_HIGHPASS_FILTER */
#if CONFIG_ALLPASS_FILTER
static const AVOption allpass_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=HERTZ}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"t", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=HERTZ}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"k", "kHz", 0, AV_OPT_TYPE_CONST, {.i64=KHERTZ}, 0, 0, FLAGS, "width_type"},
{"width", "set filter-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=707.1}, 0, 99999, FLAGS},
{"w", "set filter-width", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=707.1}, 0, 99999, FLAGS},
{"mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"m", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"channels", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"c", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"normalize", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{"n", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{"order", "set filter order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
{"o", "set filter order", OFFSET(order), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, FLAGS},
{NULL}
};
DEFINE_BIQUAD_FILTER(allpass, "Apply a two-pole all-pass filter.");
#endif /* CONFIG_ALLPASS_FILTER */
#if CONFIG_LOWSHELF_FILTER
static const AVOption lowshelf_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=100}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"t", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"k", "kHz", 0, AV_OPT_TYPE_CONST, {.i64=KHERTZ}, 0, 0, FLAGS, "width_type"},
{"width", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{"mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"m", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"channels", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"c", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"normalize", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{"n", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{NULL}
};
DEFINE_BIQUAD_FILTER(lowshelf, "Apply a low shelf filter.");
#endif /* CONFIG_LOWSHELF_FILTER */
#if CONFIG_HIGHSHELF_FILTER
static const AVOption highshelf_options[] = {
{"frequency", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"f", "set central frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=3000}, 0, 999999, FLAGS},
{"width_type", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"t", "set filter-width type", OFFSET(width_type), AV_OPT_TYPE_INT, {.i64=QFACTOR}, HERTZ, NB_WTYPE-1, FLAGS, "width_type"},
{"h", "Hz", 0, AV_OPT_TYPE_CONST, {.i64=HERTZ}, 0, 0, FLAGS, "width_type"},
{"q", "Q-Factor", 0, AV_OPT_TYPE_CONST, {.i64=QFACTOR}, 0, 0, FLAGS, "width_type"},
{"o", "octave", 0, AV_OPT_TYPE_CONST, {.i64=OCTAVE}, 0, 0, FLAGS, "width_type"},
{"s", "slope", 0, AV_OPT_TYPE_CONST, {.i64=SLOPE}, 0, 0, FLAGS, "width_type"},
{"k", "kHz", 0, AV_OPT_TYPE_CONST, {.i64=KHERTZ}, 0, 0, FLAGS, "width_type"},
{"width", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"w", "set shelf transition steep", OFFSET(width), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0, 99999, FLAGS},
{"gain", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{"g", "set gain", OFFSET(gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -900, 900, FLAGS},
{"mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"m", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"channels", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"c", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"normalize", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{"n", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{NULL}
};
DEFINE_BIQUAD_FILTER(highshelf, "Apply a high shelf filter.");
#endif /* CONFIG_HIGHSHELF_FILTER */
#if CONFIG_BIQUAD_FILTER
static const AVOption biquad_options[] = {
{"a0", NULL, OFFSET(a0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, INT32_MIN, INT32_MAX, FLAGS},
{"a1", NULL, OFFSET(a1), AV_OPT_TYPE_DOUBLE, {.dbl=0}, INT32_MIN, INT32_MAX, FLAGS},
{"a2", NULL, OFFSET(a2), AV_OPT_TYPE_DOUBLE, {.dbl=0}, INT32_MIN, INT32_MAX, FLAGS},
{"b0", NULL, OFFSET(b0), AV_OPT_TYPE_DOUBLE, {.dbl=0}, INT32_MIN, INT32_MAX, FLAGS},
{"b1", NULL, OFFSET(b1), AV_OPT_TYPE_DOUBLE, {.dbl=0}, INT32_MIN, INT32_MAX, FLAGS},
{"b2", NULL, OFFSET(b2), AV_OPT_TYPE_DOUBLE, {.dbl=0}, INT32_MIN, INT32_MAX, FLAGS},
{"mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"m", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, FLAGS},
{"channels", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"c", "set channels to filter", OFFSET(channels), AV_OPT_TYPE_CHANNEL_LAYOUT, {.i64=-1}, INT64_MIN, INT64_MAX, FLAGS},
{"normalize", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{"n", "normalize coefficients", OFFSET(normalize), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS},
{NULL}
};
DEFINE_BIQUAD_FILTER(biquad, "Apply a biquad IIR filter with the given coefficients.");
#endif /* CONFIG_BIQUAD_FILTER */

224
externals/ffmpeg/libavfilter/af_bs2b.c vendored Executable file
View File

@@ -0,0 +1,224 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Bauer stereo-to-binaural filter
*/
#include <bs2b.h>
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
typedef void (*filter_func)(t_bs2bdp bs2bdp, uint8_t *sample, int n);
typedef struct Bs2bContext {
const AVClass *class;
int profile;
int fcut;
int feed;
t_bs2bdp bs2bp;
filter_func filter;
} Bs2bContext;
#define OFFSET(x) offsetof(Bs2bContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption bs2b_options[] = {
{ "profile", "Apply a pre-defined crossfeed level",
OFFSET(profile), AV_OPT_TYPE_INT, { .i64 = BS2B_DEFAULT_CLEVEL }, 0, INT_MAX, A, "profile" },
{ "default", "default profile", 0, AV_OPT_TYPE_CONST, { .i64 = BS2B_DEFAULT_CLEVEL }, 0, 0, A, "profile" },
{ "cmoy", "Chu Moy circuit", 0, AV_OPT_TYPE_CONST, { .i64 = BS2B_CMOY_CLEVEL }, 0, 0, A, "profile" },
{ "jmeier", "Jan Meier circuit", 0, AV_OPT_TYPE_CONST, { .i64 = BS2B_JMEIER_CLEVEL }, 0, 0, A, "profile" },
{ "fcut", "Set cut frequency (in Hz)",
OFFSET(fcut), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, BS2B_MAXFCUT, A },
{ "feed", "Set feed level (in Hz)",
OFFSET(feed), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, BS2B_MAXFEED, A },
{ NULL },
};
AVFILTER_DEFINE_CLASS(bs2b);
static av_cold int init(AVFilterContext *ctx)
{
Bs2bContext *bs2b = ctx->priv;
if (!(bs2b->bs2bp = bs2b_open()))
return AVERROR(ENOMEM);
bs2b_set_level(bs2b->bs2bp, bs2b->profile);
if (bs2b->fcut)
bs2b_set_level_fcut(bs2b->bs2bp, bs2b->fcut);
if (bs2b->feed)
bs2b_set_level_feed(bs2b->bs2bp, bs2b->feed);
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
Bs2bContext *bs2b = ctx->priv;
if (bs2b->bs2bp)
bs2b_close(bs2b->bs2bp);
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_U8,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_NONE,
};
int ret;
if (ff_add_channel_layout(&layouts, AV_CH_LAYOUT_STEREO) != 0)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
int ret;
AVFrame *out_frame;
Bs2bContext *bs2b = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
if (av_frame_is_writable(frame)) {
out_frame = frame;
} else {
out_frame = ff_get_audio_buffer(outlink, frame->nb_samples);
if (!out_frame) {
av_frame_free(&frame);
return AVERROR(ENOMEM);
}
av_frame_copy(out_frame, frame);
ret = av_frame_copy_props(out_frame, frame);
if (ret < 0) {
av_frame_free(&out_frame);
av_frame_free(&frame);
return ret;
}
}
bs2b->filter(bs2b->bs2bp, out_frame->extended_data[0], out_frame->nb_samples);
if (frame != out_frame)
av_frame_free(&frame);
return ff_filter_frame(outlink, out_frame);
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
Bs2bContext *bs2b = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
int srate = inlink->sample_rate;
switch (inlink->format) {
case AV_SAMPLE_FMT_U8:
bs2b->filter = (filter_func) bs2b_cross_feed_u8;
break;
case AV_SAMPLE_FMT_S16:
bs2b->filter = (filter_func) bs2b_cross_feed_s16;
break;
case AV_SAMPLE_FMT_S32:
bs2b->filter = (filter_func) bs2b_cross_feed_s32;
break;
case AV_SAMPLE_FMT_FLT:
bs2b->filter = (filter_func) bs2b_cross_feed_f;
break;
case AV_SAMPLE_FMT_DBL:
bs2b->filter = (filter_func) bs2b_cross_feed_d;
break;
default:
return AVERROR_BUG;
}
if ((srate < BS2B_MINSRATE) || (srate > BS2B_MAXSRATE))
return AVERROR(ENOSYS);
bs2b_set_srate(bs2b->bs2bp, srate);
return 0;
}
static const AVFilterPad bs2b_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad bs2b_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
AVFilter ff_af_bs2b = {
.name = "bs2b",
.description = NULL_IF_CONFIG_SMALL("Bauer stereo-to-binaural filter."),
.query_formats = query_formats,
.priv_size = sizeof(Bs2bContext),
.priv_class = &bs2b_class,
.init = init,
.uninit = uninit,
.inputs = bs2b_inputs,
.outputs = bs2b_outputs,
};

420
externals/ffmpeg/libavfilter/af_channelmap.c vendored Executable file
View File

@@ -0,0 +1,420 @@
/*
* Copyright (c) 2012 Google, Inc.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* audio channel mapping filter
*/
#include <ctype.h>
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
struct ChannelMap {
uint64_t in_channel;
uint64_t out_channel;
int in_channel_idx;
int out_channel_idx;
};
enum MappingMode {
MAP_NONE,
MAP_ONE_INT,
MAP_ONE_STR,
MAP_PAIR_INT_INT,
MAP_PAIR_INT_STR,
MAP_PAIR_STR_INT,
MAP_PAIR_STR_STR
};
#define MAX_CH 64
typedef struct ChannelMapContext {
const AVClass *class;
char *mapping_str;
char *channel_layout_str;
uint64_t output_layout;
struct ChannelMap map[MAX_CH];
int nch;
enum MappingMode mode;
} ChannelMapContext;
#define OFFSET(x) offsetof(ChannelMapContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption channelmap_options[] = {
{ "map", "A comma-separated list of input channel numbers in output order.",
OFFSET(mapping_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ "channel_layout", "Output channel layout.",
OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, .flags = A|F },
{ NULL }
};
AVFILTER_DEFINE_CLASS(channelmap);
static char* split(char *message, char delim) {
char *next = strchr(message, delim);
if (next)
*next++ = '\0';
return next;
}
static int get_channel_idx(char **map, int *ch, char delim, int max_ch)
{
char *next;
int len;
int n = 0;
if (!*map)
return AVERROR(EINVAL);
next = split(*map, delim);
if (!next && delim == '-')
return AVERROR(EINVAL);
len = strlen(*map);
sscanf(*map, "%d%n", ch, &n);
if (n != len)
return AVERROR(EINVAL);
if (*ch < 0 || *ch > max_ch)
return AVERROR(EINVAL);
*map = next;
return 0;
}
static int get_channel(char **map, uint64_t *ch, char delim)
{
char *next = split(*map, delim);
if (!next && delim == '-')
return AVERROR(EINVAL);
*ch = av_get_channel_layout(*map);
if (av_get_channel_layout_nb_channels(*ch) != 1)
return AVERROR(EINVAL);
*map = next;
return 0;
}
static av_cold int channelmap_init(AVFilterContext *ctx)
{
ChannelMapContext *s = ctx->priv;
char *mapping, separator = '|';
int map_entries = 0;
char buf[256];
enum MappingMode mode;
uint64_t out_ch_mask = 0;
int i;
mapping = s->mapping_str;
if (!mapping) {
mode = MAP_NONE;
} else {
char *dash = strchr(mapping, '-');
if (!dash) { // short mapping
if (av_isdigit(*mapping))
mode = MAP_ONE_INT;
else
mode = MAP_ONE_STR;
} else if (av_isdigit(*mapping)) {
if (av_isdigit(*(dash+1)))
mode = MAP_PAIR_INT_INT;
else
mode = MAP_PAIR_INT_STR;
} else {
if (av_isdigit(*(dash+1)))
mode = MAP_PAIR_STR_INT;
else
mode = MAP_PAIR_STR_STR;
}
}
if (mode != MAP_NONE) {
char *sep = mapping;
map_entries = 1;
while ((sep = strchr(sep, separator))) {
if (*++sep) // Allow trailing comma
map_entries++;
}
}
if (map_entries > MAX_CH) {
av_log(ctx, AV_LOG_ERROR, "Too many channels mapped: '%d'.\n", map_entries);
return AVERROR(EINVAL);
}
for (i = 0; i < map_entries; i++) {
int in_ch_idx = -1, out_ch_idx = -1;
uint64_t in_ch = 0, out_ch = 0;
static const char err[] = "Failed to parse channel map\n";
switch (mode) {
case MAP_ONE_INT:
if (get_channel_idx(&mapping, &in_ch_idx, separator, MAX_CH) < 0) {
av_log(ctx, AV_LOG_ERROR, err);
return AVERROR(EINVAL);
}
s->map[i].in_channel_idx = in_ch_idx;
s->map[i].out_channel_idx = i;
break;
case MAP_ONE_STR:
if (get_channel(&mapping, &in_ch, separator) < 0) {
av_log(ctx, AV_LOG_ERROR, err);
return AVERROR(EINVAL);
}
s->map[i].in_channel = in_ch;
s->map[i].out_channel_idx = i;
break;
case MAP_PAIR_INT_INT:
if (get_channel_idx(&mapping, &in_ch_idx, '-', MAX_CH) < 0 ||
get_channel_idx(&mapping, &out_ch_idx, separator, MAX_CH) < 0) {
av_log(ctx, AV_LOG_ERROR, err);
return AVERROR(EINVAL);
}
s->map[i].in_channel_idx = in_ch_idx;
s->map[i].out_channel_idx = out_ch_idx;
break;
case MAP_PAIR_INT_STR:
if (get_channel_idx(&mapping, &in_ch_idx, '-', MAX_CH) < 0 ||
get_channel(&mapping, &out_ch, separator) < 0 ||
out_ch & out_ch_mask) {
av_log(ctx, AV_LOG_ERROR, err);
return AVERROR(EINVAL);
}
s->map[i].in_channel_idx = in_ch_idx;
s->map[i].out_channel = out_ch;
out_ch_mask |= out_ch;
break;
case MAP_PAIR_STR_INT:
if (get_channel(&mapping, &in_ch, '-') < 0 ||
get_channel_idx(&mapping, &out_ch_idx, separator, MAX_CH) < 0) {
av_log(ctx, AV_LOG_ERROR, err);
return AVERROR(EINVAL);
}
s->map[i].in_channel = in_ch;
s->map[i].out_channel_idx = out_ch_idx;
break;
case MAP_PAIR_STR_STR:
if (get_channel(&mapping, &in_ch, '-') < 0 ||
get_channel(&mapping, &out_ch, separator) < 0 ||
out_ch & out_ch_mask) {
av_log(ctx, AV_LOG_ERROR, err);
return AVERROR(EINVAL);
}
s->map[i].in_channel = in_ch;
s->map[i].out_channel = out_ch;
out_ch_mask |= out_ch;
break;
}
}
s->mode = mode;
s->nch = map_entries;
s->output_layout = out_ch_mask ? out_ch_mask :
av_get_default_channel_layout(map_entries);
if (s->channel_layout_str) {
uint64_t fmt;
if ((fmt = av_get_channel_layout(s->channel_layout_str)) == 0) {
av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout: '%s'.\n",
s->channel_layout_str);
return AVERROR(EINVAL);
}
if (mode == MAP_NONE) {
int i;
s->nch = av_get_channel_layout_nb_channels(fmt);
for (i = 0; i < s->nch; i++) {
s->map[i].in_channel_idx = i;
s->map[i].out_channel_idx = i;
}
} else if (out_ch_mask && out_ch_mask != fmt) {
av_get_channel_layout_string(buf, sizeof(buf), 0, out_ch_mask);
av_log(ctx, AV_LOG_ERROR,
"Output channel layout '%s' does not match the list of channel mapped: '%s'.\n",
s->channel_layout_str, buf);
return AVERROR(EINVAL);
} else if (s->nch != av_get_channel_layout_nb_channels(fmt)) {
av_log(ctx, AV_LOG_ERROR,
"Output channel layout %s does not match the number of channels mapped %d.\n",
s->channel_layout_str, s->nch);
return AVERROR(EINVAL);
}
s->output_layout = fmt;
}
if (!s->output_layout) {
av_log(ctx, AV_LOG_ERROR, "Output channel layout is not set and "
"cannot be guessed from the maps.\n");
return AVERROR(EINVAL);
}
if (mode == MAP_PAIR_INT_STR || mode == MAP_PAIR_STR_STR) {
for (i = 0; i < s->nch; i++) {
s->map[i].out_channel_idx = av_get_channel_layout_channel_index(
s->output_layout, s->map[i].out_channel);
}
}
return 0;
}
static int channelmap_query_formats(AVFilterContext *ctx)
{
ChannelMapContext *s = ctx->priv;
AVFilterChannelLayouts *layouts;
AVFilterChannelLayouts *channel_layouts = NULL;
int ret;
layouts = ff_all_channel_counts();
if (!layouts) {
ret = AVERROR(ENOMEM);
goto fail;
}
if ((ret = ff_add_channel_layout (&channel_layouts, s->output_layout )) < 0 ||
(ret = ff_set_common_formats (ctx , ff_planar_sample_fmts() )) < 0 ||
(ret = ff_set_common_samplerates (ctx , ff_all_samplerates() )) < 0 ||
(ret = ff_channel_layouts_ref (layouts , &ctx->inputs[0]->out_channel_layouts)) < 0 ||
(ret = ff_channel_layouts_ref (channel_layouts , &ctx->outputs[0]->in_channel_layouts)) < 0)
goto fail;
return 0;
fail:
if (layouts)
av_freep(&layouts->channel_layouts);
av_freep(&layouts);
return ret;
}
static int channelmap_filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
const ChannelMapContext *s = ctx->priv;
const int nch_in = inlink->channels;
const int nch_out = s->nch;
int ch;
uint8_t *source_planes[MAX_CH];
memcpy(source_planes, buf->extended_data,
nch_in * sizeof(source_planes[0]));
if (nch_out > nch_in) {
if (nch_out > FF_ARRAY_ELEMS(buf->data)) {
uint8_t **new_extended_data =
av_mallocz_array(nch_out, sizeof(*buf->extended_data));
if (!new_extended_data) {
av_frame_free(&buf);
return AVERROR(ENOMEM);
}
if (buf->extended_data == buf->data) {
buf->extended_data = new_extended_data;
} else {
av_free(buf->extended_data);
buf->extended_data = new_extended_data;
}
} else if (buf->extended_data != buf->data) {
av_free(buf->extended_data);
buf->extended_data = buf->data;
}
}
for (ch = 0; ch < nch_out; ch++) {
buf->extended_data[s->map[ch].out_channel_idx] =
source_planes[s->map[ch].in_channel_idx];
}
if (buf->data != buf->extended_data)
memcpy(buf->data, buf->extended_data,
FFMIN(FF_ARRAY_ELEMS(buf->data), nch_out) * sizeof(buf->data[0]));
buf->channel_layout = outlink->channel_layout;
buf->channels = outlink->channels;
return ff_filter_frame(outlink, buf);
}
static int channelmap_config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ChannelMapContext *s = ctx->priv;
int nb_channels = inlink->channels;
int i, err = 0;
const char *channel_name;
char layout_name[256];
for (i = 0; i < s->nch; i++) {
struct ChannelMap *m = &s->map[i];
if (s->mode == MAP_PAIR_STR_INT || s->mode == MAP_PAIR_STR_STR) {
m->in_channel_idx = av_get_channel_layout_channel_index(
inlink->channel_layout, m->in_channel);
}
if (m->in_channel_idx < 0 || m->in_channel_idx >= nb_channels) {
av_get_channel_layout_string(layout_name, sizeof(layout_name),
nb_channels, inlink->channel_layout);
if (m->in_channel) {
channel_name = av_get_channel_name(m->in_channel);
av_log(ctx, AV_LOG_ERROR,
"input channel '%s' not available from input layout '%s'\n",
channel_name, layout_name);
} else {
av_log(ctx, AV_LOG_ERROR,
"input channel #%d not available from input layout '%s'\n",
m->in_channel_idx, layout_name);
}
err = AVERROR(EINVAL);
}
}
return err;
}
static const AVFilterPad avfilter_af_channelmap_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = channelmap_filter_frame,
.config_props = channelmap_config_input,
.needs_writable = 1,
},
{ NULL }
};
static const AVFilterPad avfilter_af_channelmap_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO
},
{ NULL }
};
AVFilter ff_af_channelmap = {
.name = "channelmap",
.description = NULL_IF_CONFIG_SMALL("Remap audio channels."),
.init = channelmap_init,
.query_formats = channelmap_query_formats,
.priv_size = sizeof(ChannelMapContext),
.priv_class = &channelmap_class,
.inputs = avfilter_af_channelmap_inputs,
.outputs = avfilter_af_channelmap_outputs,
};

181
externals/ffmpeg/libavfilter/af_channelsplit.c vendored Executable file
View File

@@ -0,0 +1,181 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Channel split filter
*
* Split an audio stream into per-channel streams.
*/
#include "libavutil/attributes.h"
#include "libavutil/channel_layout.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
typedef struct ChannelSplitContext {
const AVClass *class;
uint64_t channel_layout;
char *channel_layout_str;
char *channels_str;
int map[64];
} ChannelSplitContext;
#define OFFSET(x) offsetof(ChannelSplitContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption channelsplit_options[] = {
{ "channel_layout", "Input channel layout.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, { .str = "stereo" }, .flags = A|F },
{ "channels", "Channels to extract.", OFFSET(channels_str), AV_OPT_TYPE_STRING, { .str = "all" }, .flags = A|F },
{ NULL }
};
AVFILTER_DEFINE_CLASS(channelsplit);
static av_cold int init(AVFilterContext *ctx)
{
ChannelSplitContext *s = ctx->priv;
uint64_t channel_layout;
int nb_channels;
int all = 0, ret = 0, i;
if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) {
av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n",
s->channel_layout_str);
ret = AVERROR(EINVAL);
goto fail;
}
if (!strcmp(s->channels_str, "all")) {
nb_channels = av_get_channel_layout_nb_channels(s->channel_layout);
channel_layout = s->channel_layout;
all = 1;
} else {
if ((ret = av_get_extended_channel_layout(s->channels_str, &channel_layout, &nb_channels)) < 0)
return ret;
}
for (i = 0; i < nb_channels; i++) {
uint64_t channel = av_channel_layout_extract_channel(channel_layout, i);
AVFilterPad pad = { 0 };
pad.type = AVMEDIA_TYPE_AUDIO;
pad.name = av_get_channel_name(channel);
if (all) {
s->map[i] = i;
} else {
if ((ret = av_get_channel_layout_channel_index(s->channel_layout, channel)) < 0) {
av_log(ctx, AV_LOG_ERROR, "Channel name '%s' not present in channel layout '%s'.\n",
av_get_channel_name(channel), s->channel_layout_str);
return ret;
}
s->map[i] = ret;
}
if ((ret = ff_insert_outpad(ctx, i, &pad)) < 0) {
return ret;
}
}
fail:
return ret;
}
static int query_formats(AVFilterContext *ctx)
{
ChannelSplitContext *s = ctx->priv;
AVFilterChannelLayouts *in_layouts = NULL;
int i, ret;
if ((ret = ff_set_common_formats(ctx, ff_planar_sample_fmts())) < 0 ||
(ret = ff_set_common_samplerates(ctx, ff_all_samplerates())) < 0)
return ret;
if ((ret = ff_add_channel_layout(&in_layouts, s->channel_layout)) < 0 ||
(ret = ff_channel_layouts_ref(in_layouts, &ctx->inputs[0]->out_channel_layouts)) < 0)
return ret;
for (i = 0; i < ctx->nb_outputs; i++) {
AVFilterChannelLayouts *out_layouts = NULL;
uint64_t channel = av_channel_layout_extract_channel(s->channel_layout, s->map[i]);
if ((ret = ff_add_channel_layout(&out_layouts, channel)) < 0 ||
(ret = ff_channel_layouts_ref(out_layouts, &ctx->outputs[i]->in_channel_layouts)) < 0)
return ret;
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
ChannelSplitContext *s = ctx->priv;
int i, ret = 0;
for (i = 0; i < ctx->nb_outputs; i++) {
AVFrame *buf_out = av_frame_clone(buf);
if (!buf_out) {
ret = AVERROR(ENOMEM);
break;
}
buf_out->data[0] = buf_out->extended_data[0] = buf_out->extended_data[s->map[i]];
buf_out->channel_layout =
av_channel_layout_extract_channel(buf->channel_layout, s->map[i]);
buf_out->channels = 1;
ret = ff_filter_frame(ctx->outputs[i], buf_out);
if (ret < 0)
break;
}
av_frame_free(&buf);
return ret;
}
static const AVFilterPad avfilter_af_channelsplit_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
AVFilter ff_af_channelsplit = {
.name = "channelsplit",
.description = NULL_IF_CONFIG_SMALL("Split audio into per-channel streams."),
.priv_size = sizeof(ChannelSplitContext),
.priv_class = &channelsplit_class,
.init = init,
.query_formats = query_formats,
.inputs = avfilter_af_channelsplit_inputs,
.outputs = NULL,
.flags = AVFILTER_FLAG_DYNAMIC_OUTPUTS,
};

382
externals/ffmpeg/libavfilter/af_chorus.c vendored Executable file
View File

@@ -0,0 +1,382 @@
/*
* Copyright (c) 1998 Juergen Mueller And Sundry Contributors
* This source code is freely redistributable and may be used for
* any purpose. This copyright notice must be maintained.
* Juergen Mueller And Sundry Contributors are not responsible for
* the consequences of using this software.
*
* Copyright (c) 2015 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* chorus audio filter
*/
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
#include "generate_wave_table.h"
typedef struct ChorusContext {
const AVClass *class;
float in_gain, out_gain;
char *delays_str;
char *decays_str;
char *speeds_str;
char *depths_str;
float *delays;
float *decays;
float *speeds;
float *depths;
uint8_t **chorusbuf;
int **phase;
int *length;
int32_t **lookup_table;
int *counter;
int num_chorus;
int max_samples;
int channels;
int modulation;
int fade_out;
int64_t next_pts;
} ChorusContext;
#define OFFSET(x) offsetof(ChorusContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption chorus_options[] = {
{ "in_gain", "set input gain", OFFSET(in_gain), AV_OPT_TYPE_FLOAT, {.dbl=.4}, 0, 1, A },
{ "out_gain", "set output gain", OFFSET(out_gain), AV_OPT_TYPE_FLOAT, {.dbl=.4}, 0, 1, A },
{ "delays", "set delays", OFFSET(delays_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
{ "decays", "set decays", OFFSET(decays_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
{ "speeds", "set speeds", OFFSET(speeds_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
{ "depths", "set depths", OFFSET(depths_str), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, A },
{ NULL }
};
AVFILTER_DEFINE_CLASS(chorus);
static void count_items(char *item_str, int *nb_items)
{
char *p;
*nb_items = 1;
for (p = item_str; *p; p++) {
if (*p == '|')
(*nb_items)++;
}
}
static void fill_items(char *item_str, int *nb_items, float *items)
{
char *p, *saveptr = NULL;
int i, new_nb_items = 0;
p = item_str;
for (i = 0; i < *nb_items; i++) {
char *tstr = av_strtok(p, "|", &saveptr);
p = NULL;
if (tstr)
new_nb_items += sscanf(tstr, "%f", &items[new_nb_items]) == 1;
}
*nb_items = new_nb_items;
}
static av_cold int init(AVFilterContext *ctx)
{
ChorusContext *s = ctx->priv;
int nb_delays, nb_decays, nb_speeds, nb_depths;
if (!s->delays_str || !s->decays_str || !s->speeds_str || !s->depths_str) {
av_log(ctx, AV_LOG_ERROR, "Both delays & decays & speeds & depths must be set.\n");
return AVERROR(EINVAL);
}
count_items(s->delays_str, &nb_delays);
count_items(s->decays_str, &nb_decays);
count_items(s->speeds_str, &nb_speeds);
count_items(s->depths_str, &nb_depths);
s->delays = av_realloc_f(s->delays, nb_delays, sizeof(*s->delays));
s->decays = av_realloc_f(s->decays, nb_decays, sizeof(*s->decays));
s->speeds = av_realloc_f(s->speeds, nb_speeds, sizeof(*s->speeds));
s->depths = av_realloc_f(s->depths, nb_depths, sizeof(*s->depths));
if (!s->delays || !s->decays || !s->speeds || !s->depths)
return AVERROR(ENOMEM);
fill_items(s->delays_str, &nb_delays, s->delays);
fill_items(s->decays_str, &nb_decays, s->decays);
fill_items(s->speeds_str, &nb_speeds, s->speeds);
fill_items(s->depths_str, &nb_depths, s->depths);
if (nb_delays != nb_decays && nb_delays != nb_speeds && nb_delays != nb_depths) {
av_log(ctx, AV_LOG_ERROR, "Number of delays & decays & speeds & depths given must be same.\n");
return AVERROR(EINVAL);
}
s->num_chorus = nb_delays;
if (s->num_chorus < 1) {
av_log(ctx, AV_LOG_ERROR, "At least one delay & decay & speed & depth must be set.\n");
return AVERROR(EINVAL);
}
s->length = av_calloc(s->num_chorus, sizeof(*s->length));
s->lookup_table = av_calloc(s->num_chorus, sizeof(*s->lookup_table));
if (!s->length || !s->lookup_table)
return AVERROR(ENOMEM);
s->next_pts = AV_NOPTS_VALUE;
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
ChorusContext *s = ctx->priv;
float sum_in_volume = 1.0;
int n;
s->channels = outlink->channels;
for (n = 0; n < s->num_chorus; n++) {
int samples = (int) ((s->delays[n] + s->depths[n]) * outlink->sample_rate / 1000.0);
int depth_samples = (int) (s->depths[n] * outlink->sample_rate / 1000.0);
s->length[n] = outlink->sample_rate / s->speeds[n];
s->lookup_table[n] = av_malloc(sizeof(int32_t) * s->length[n]);
if (!s->lookup_table[n])
return AVERROR(ENOMEM);
ff_generate_wave_table(WAVE_SIN, AV_SAMPLE_FMT_S32, s->lookup_table[n],
s->length[n], 0., depth_samples, 0);
s->max_samples = FFMAX(s->max_samples, samples);
}
for (n = 0; n < s->num_chorus; n++)
sum_in_volume += s->decays[n];
if (s->in_gain * (sum_in_volume) > 1.0 / s->out_gain)
av_log(ctx, AV_LOG_WARNING, "output gain can cause saturation or clipping of output\n");
s->counter = av_calloc(outlink->channels, sizeof(*s->counter));
if (!s->counter)
return AVERROR(ENOMEM);
s->phase = av_calloc(outlink->channels, sizeof(*s->phase));
if (!s->phase)
return AVERROR(ENOMEM);
for (n = 0; n < outlink->channels; n++) {
s->phase[n] = av_calloc(s->num_chorus, sizeof(int));
if (!s->phase[n])
return AVERROR(ENOMEM);
}
s->fade_out = s->max_samples;
return av_samples_alloc_array_and_samples(&s->chorusbuf, NULL,
outlink->channels,
s->max_samples,
outlink->format, 0);
}
#define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
ChorusContext *s = ctx->priv;
AVFrame *out_frame;
int c, i, n;
if (av_frame_is_writable(frame)) {
out_frame = frame;
} else {
out_frame = ff_get_audio_buffer(ctx->outputs[0], frame->nb_samples);
if (!out_frame) {
av_frame_free(&frame);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out_frame, frame);
}
for (c = 0; c < inlink->channels; c++) {
const float *src = (const float *)frame->extended_data[c];
float *dst = (float *)out_frame->extended_data[c];
float *chorusbuf = (float *)s->chorusbuf[c];
int *phase = s->phase[c];
for (i = 0; i < frame->nb_samples; i++) {
float out, in = src[i];
out = in * s->in_gain;
for (n = 0; n < s->num_chorus; n++) {
out += chorusbuf[MOD(s->max_samples + s->counter[c] -
s->lookup_table[n][phase[n]],
s->max_samples)] * s->decays[n];
phase[n] = MOD(phase[n] + 1, s->length[n]);
}
out *= s->out_gain;
dst[i] = out;
chorusbuf[s->counter[c]] = in;
s->counter[c] = MOD(s->counter[c] + 1, s->max_samples);
}
}
s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, (AVRational){1, inlink->sample_rate}, inlink->time_base);
if (frame != out_frame)
av_frame_free(&frame);
return ff_filter_frame(ctx->outputs[0], out_frame);
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
ChorusContext *s = ctx->priv;
int ret;
ret = ff_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF && !ctx->is_disabled && s->fade_out) {
int nb_samples = FFMIN(s->fade_out, 2048);
AVFrame *frame;
frame = ff_get_audio_buffer(outlink, nb_samples);
if (!frame)
return AVERROR(ENOMEM);
s->fade_out -= nb_samples;
av_samples_set_silence(frame->extended_data, 0,
frame->nb_samples,
outlink->channels,
frame->format);
frame->pts = s->next_pts;
if (s->next_pts != AV_NOPTS_VALUE)
s->next_pts += av_rescale_q(nb_samples, (AVRational){1, outlink->sample_rate}, outlink->time_base);
ret = filter_frame(ctx->inputs[0], frame);
}
return ret;
}
static av_cold void uninit(AVFilterContext *ctx)
{
ChorusContext *s = ctx->priv;
int n;
av_freep(&s->delays);
av_freep(&s->decays);
av_freep(&s->speeds);
av_freep(&s->depths);
if (s->chorusbuf)
av_freep(&s->chorusbuf[0]);
av_freep(&s->chorusbuf);
if (s->phase)
for (n = 0; n < s->channels; n++)
av_freep(&s->phase[n]);
av_freep(&s->phase);
av_freep(&s->counter);
av_freep(&s->length);
if (s->lookup_table)
for (n = 0; n < s->num_chorus; n++)
av_freep(&s->lookup_table[n]);
av_freep(&s->lookup_table);
}
static const AVFilterPad chorus_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad chorus_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.request_frame = request_frame,
.config_props = config_output,
},
{ NULL }
};
AVFilter ff_af_chorus = {
.name = "chorus",
.description = NULL_IF_CONFIG_SMALL("Add a chorus effect to the audio."),
.query_formats = query_formats,
.priv_size = sizeof(ChorusContext),
.priv_class = &chorus_class,
.init = init,
.uninit = uninit,
.inputs = chorus_inputs,
.outputs = chorus_outputs,
};

599
externals/ffmpeg/libavfilter/af_compand.c vendored Executable file
View File

@@ -0,0 +1,599 @@
/*
* Copyright (c) 1999 Chris Bagwell
* Copyright (c) 1999 Nick Bailey
* Copyright (c) 2007 Rob Sykes <robs@users.sourceforge.net>
* Copyright (c) 2013 Paul B Mahol
* Copyright (c) 2014 Andrew Kelley
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* audio compand filter
*/
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/ffmath.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
typedef struct ChanParam {
double attack;
double decay;
double volume;
} ChanParam;
typedef struct CompandSegment {
double x, y;
double a, b;
} CompandSegment;
typedef struct CompandContext {
const AVClass *class;
int nb_segments;
char *attacks, *decays, *points;
CompandSegment *segments;
ChanParam *channels;
double in_min_lin;
double out_min_lin;
double curve_dB;
double gain_dB;
double initial_volume;
double delay;
AVFrame *delay_frame;
int delay_samples;
int delay_count;
int delay_index;
int64_t pts;
int (*compand)(AVFilterContext *ctx, AVFrame *frame);
} CompandContext;
#define OFFSET(x) offsetof(CompandContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption compand_options[] = {
{ "attacks", "set time over which increase of volume is determined", OFFSET(attacks), AV_OPT_TYPE_STRING, { .str = "0" }, 0, 0, A },
{ "decays", "set time over which decrease of volume is determined", OFFSET(decays), AV_OPT_TYPE_STRING, { .str = "0.8" }, 0, 0, A },
{ "points", "set points of transfer function", OFFSET(points), AV_OPT_TYPE_STRING, { .str = "-70/-70|-60/-20|1/0" }, 0, 0, A },
{ "soft-knee", "set soft-knee", OFFSET(curve_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.01, 900, A },
{ "gain", "set output gain", OFFSET(gain_dB), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 900, A },
{ "volume", "set initial volume", OFFSET(initial_volume), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, -900, 0, A },
{ "delay", "set delay for samples before sending them to volume adjuster", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0 }, 0, 20, A },
{ NULL }
};
AVFILTER_DEFINE_CLASS(compand);
static av_cold int init(AVFilterContext *ctx)
{
CompandContext *s = ctx->priv;
s->pts = AV_NOPTS_VALUE;
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
CompandContext *s = ctx->priv;
av_freep(&s->channels);
av_freep(&s->segments);
av_frame_free(&s->delay_frame);
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterChannelLayouts *layouts;
AVFilterFormats *formats;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static void count_items(char *item_str, int *nb_items)
{
char *p;
*nb_items = 1;
for (p = item_str; *p; p++) {
if (*p == ' ' || *p == '|')
(*nb_items)++;
}
}
static void update_volume(ChanParam *cp, double in)
{
double delta = in - cp->volume;
if (delta > 0.0)
cp->volume += delta * cp->attack;
else
cp->volume += delta * cp->decay;
}
static double get_volume(CompandContext *s, double in_lin)
{
CompandSegment *cs;
double in_log, out_log;
int i;
if (in_lin < s->in_min_lin)
return s->out_min_lin;
in_log = log(in_lin);
for (i = 1; i < s->nb_segments; i++)
if (in_log <= s->segments[i].x)
break;
cs = &s->segments[i - 1];
in_log -= cs->x;
out_log = cs->y + in_log * (cs->a * in_log + cs->b);
return exp(out_log);
}
static int compand_nodelay(AVFilterContext *ctx, AVFrame *frame)
{
CompandContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
const int channels = inlink->channels;
const int nb_samples = frame->nb_samples;
AVFrame *out_frame;
int chan, i;
int err;
if (av_frame_is_writable(frame)) {
out_frame = frame;
} else {
out_frame = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
if (!out_frame) {
av_frame_free(&frame);
return AVERROR(ENOMEM);
}
err = av_frame_copy_props(out_frame, frame);
if (err < 0) {
av_frame_free(&out_frame);
av_frame_free(&frame);
return err;
}
}
for (chan = 0; chan < channels; chan++) {
const double *src = (double *)frame->extended_data[chan];
double *dst = (double *)out_frame->extended_data[chan];
ChanParam *cp = &s->channels[chan];
for (i = 0; i < nb_samples; i++) {
update_volume(cp, fabs(src[i]));
dst[i] = src[i] * get_volume(s, cp->volume);
}
}
if (frame != out_frame)
av_frame_free(&frame);
return ff_filter_frame(ctx->outputs[0], out_frame);
}
#define MOD(a, b) (((a) >= (b)) ? (a) - (b) : (a))
static int compand_delay(AVFilterContext *ctx, AVFrame *frame)
{
CompandContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
const int channels = inlink->channels;
const int nb_samples = frame->nb_samples;
int chan, i, av_uninit(dindex), oindex, av_uninit(count);
AVFrame *out_frame = NULL;
int err;
if (s->pts == AV_NOPTS_VALUE) {
s->pts = (frame->pts == AV_NOPTS_VALUE) ? 0 : frame->pts;
}
av_assert1(channels > 0); /* would corrupt delay_count and delay_index */
for (chan = 0; chan < channels; chan++) {
AVFrame *delay_frame = s->delay_frame;
const double *src = (double *)frame->extended_data[chan];
double *dbuf = (double *)delay_frame->extended_data[chan];
ChanParam *cp = &s->channels[chan];
double *dst;
count = s->delay_count;
dindex = s->delay_index;
for (i = 0, oindex = 0; i < nb_samples; i++) {
const double in = src[i];
update_volume(cp, fabs(in));
if (count >= s->delay_samples) {
if (!out_frame) {
out_frame = ff_get_audio_buffer(ctx->outputs[0], nb_samples - i);
if (!out_frame) {
av_frame_free(&frame);
return AVERROR(ENOMEM);
}
err = av_frame_copy_props(out_frame, frame);
if (err < 0) {
av_frame_free(&out_frame);
av_frame_free(&frame);
return err;
}
out_frame->pts = s->pts;
s->pts += av_rescale_q(nb_samples - i,
(AVRational){ 1, inlink->sample_rate },
inlink->time_base);
}
dst = (double *)out_frame->extended_data[chan];
dst[oindex++] = dbuf[dindex] * get_volume(s, cp->volume);
} else {
count++;
}
dbuf[dindex] = in;
dindex = MOD(dindex + 1, s->delay_samples);
}
}
s->delay_count = count;
s->delay_index = dindex;
av_frame_free(&frame);
if (out_frame) {
err = ff_filter_frame(ctx->outputs[0], out_frame);
return err;
}
return 0;
}
static int compand_drain(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
CompandContext *s = ctx->priv;
const int channels = outlink->channels;
AVFrame *frame = NULL;
int chan, i, dindex;
/* 2048 is to limit output frame size during drain */
frame = ff_get_audio_buffer(outlink, FFMIN(2048, s->delay_count));
if (!frame)
return AVERROR(ENOMEM);
frame->pts = s->pts;
s->pts += av_rescale_q(frame->nb_samples,
(AVRational){ 1, outlink->sample_rate }, outlink->time_base);
av_assert0(channels > 0);
for (chan = 0; chan < channels; chan++) {
AVFrame *delay_frame = s->delay_frame;
double *dbuf = (double *)delay_frame->extended_data[chan];
double *dst = (double *)frame->extended_data[chan];
ChanParam *cp = &s->channels[chan];
dindex = s->delay_index;
for (i = 0; i < frame->nb_samples; i++) {
dst[i] = dbuf[dindex] * get_volume(s, cp->volume);
dindex = MOD(dindex + 1, s->delay_samples);
}
}
s->delay_count -= frame->nb_samples;
s->delay_index = dindex;
return ff_filter_frame(outlink, frame);
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
CompandContext *s = ctx->priv;
const int sample_rate = outlink->sample_rate;
double radius = s->curve_dB * M_LN10 / 20.0;
char *p, *saveptr = NULL;
const int channels = outlink->channels;
int nb_attacks, nb_decays, nb_points;
int new_nb_items, num;
int i;
int err;
count_items(s->attacks, &nb_attacks);
count_items(s->decays, &nb_decays);
count_items(s->points, &nb_points);
if (channels <= 0) {
av_log(ctx, AV_LOG_ERROR, "Invalid number of channels: %d\n", channels);
return AVERROR(EINVAL);
}
if (nb_attacks > channels || nb_decays > channels) {
av_log(ctx, AV_LOG_WARNING,
"Number of attacks/decays bigger than number of channels. Ignoring rest of entries.\n");
nb_attacks = FFMIN(nb_attacks, channels);
nb_decays = FFMIN(nb_decays, channels);
}
uninit(ctx);
s->channels = av_mallocz_array(channels, sizeof(*s->channels));
s->nb_segments = (nb_points + 4) * 2;
s->segments = av_mallocz_array(s->nb_segments, sizeof(*s->segments));
if (!s->channels || !s->segments) {
uninit(ctx);
return AVERROR(ENOMEM);
}
p = s->attacks;
for (i = 0, new_nb_items = 0; i < nb_attacks; i++) {
char *tstr = av_strtok(p, " |", &saveptr);
if (!tstr) {
uninit(ctx);
return AVERROR(EINVAL);
}
p = NULL;
new_nb_items += sscanf(tstr, "%lf", &s->channels[i].attack) == 1;
if (s->channels[i].attack < 0) {
uninit(ctx);
return AVERROR(EINVAL);
}
}
nb_attacks = new_nb_items;
p = s->decays;
for (i = 0, new_nb_items = 0; i < nb_decays; i++) {
char *tstr = av_strtok(p, " |", &saveptr);
if (!tstr) {
uninit(ctx);
return AVERROR(EINVAL);
}
p = NULL;
new_nb_items += sscanf(tstr, "%lf", &s->channels[i].decay) == 1;
if (s->channels[i].decay < 0) {
uninit(ctx);
return AVERROR(EINVAL);
}
}
nb_decays = new_nb_items;
if (nb_attacks != nb_decays) {
av_log(ctx, AV_LOG_ERROR,
"Number of attacks %d differs from number of decays %d.\n",
nb_attacks, nb_decays);
uninit(ctx);
return AVERROR(EINVAL);
}
for (i = nb_decays; i < channels; i++) {
s->channels[i].attack = s->channels[nb_decays - 1].attack;
s->channels[i].decay = s->channels[nb_decays - 1].decay;
}
#define S(x) s->segments[2 * ((x) + 1)]
p = s->points;
for (i = 0, new_nb_items = 0; i < nb_points; i++) {
char *tstr = av_strtok(p, " |", &saveptr);
p = NULL;
if (!tstr || sscanf(tstr, "%lf/%lf", &S(i).x, &S(i).y) != 2) {
av_log(ctx, AV_LOG_ERROR,
"Invalid and/or missing input/output value.\n");
uninit(ctx);
return AVERROR(EINVAL);
}
if (i && S(i - 1).x > S(i).x) {
av_log(ctx, AV_LOG_ERROR,
"Transfer function input values must be increasing.\n");
uninit(ctx);
return AVERROR(EINVAL);
}
S(i).y -= S(i).x;
av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y);
new_nb_items++;
}
num = new_nb_items;
/* Add 0,0 if necessary */
if (num == 0 || S(num - 1).x)
num++;
#undef S
#define S(x) s->segments[2 * (x)]
/* Add a tail off segment at the start */
S(0).x = S(1).x - 2 * s->curve_dB;
S(0).y = S(1).y;
num++;
/* Join adjacent colinear segments */
for (i = 2; i < num; i++) {
double g1 = (S(i - 1).y - S(i - 2).y) * (S(i - 0).x - S(i - 1).x);
double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x);
int j;
if (fabs(g1 - g2))
continue;
num--;
for (j = --i; j < num; j++)
S(j) = S(j + 1);
}
for (i = 0; i < s->nb_segments; i += 2) {
s->segments[i].y += s->gain_dB;
s->segments[i].x *= M_LN10 / 20;
s->segments[i].y *= M_LN10 / 20;
}
#define L(x) s->segments[i - (x)]
for (i = 4; i < s->nb_segments; i += 2) {
double x, y, cx, cy, in1, in2, out1, out2, theta, len, r;
L(4).a = 0;
L(4).b = (L(2).y - L(4).y) / (L(2).x - L(4).x);
L(2).a = 0;
L(2).b = (L(0).y - L(2).y) / (L(0).x - L(2).x);
theta = atan2(L(2).y - L(4).y, L(2).x - L(4).x);
len = hypot(L(2).x - L(4).x, L(2).y - L(4).y);
r = FFMIN(radius, len);
L(3).x = L(2).x - r * cos(theta);
L(3).y = L(2).y - r * sin(theta);
theta = atan2(L(0).y - L(2).y, L(0).x - L(2).x);
len = hypot(L(0).x - L(2).x, L(0).y - L(2).y);
r = FFMIN(radius, len / 2);
x = L(2).x + r * cos(theta);
y = L(2).y + r * sin(theta);
cx = (L(3).x + L(2).x + x) / 3;
cy = (L(3).y + L(2).y + y) / 3;
L(2).x = x;
L(2).y = y;
in1 = cx - L(3).x;
out1 = cy - L(3).y;
in2 = L(2).x - L(3).x;
out2 = L(2).y - L(3).y;
L(3).a = (out2 / in2 - out1 / in1) / (in2 - in1);
L(3).b = out1 / in1 - L(3).a * in1;
}
L(3).x = 0;
L(3).y = L(2).y;
s->in_min_lin = exp(s->segments[1].x);
s->out_min_lin = exp(s->segments[1].y);
for (i = 0; i < channels; i++) {
ChanParam *cp = &s->channels[i];
if (cp->attack > 1.0 / sample_rate)
cp->attack = 1.0 - exp(-1.0 / (sample_rate * cp->attack));
else
cp->attack = 1.0;
if (cp->decay > 1.0 / sample_rate)
cp->decay = 1.0 - exp(-1.0 / (sample_rate * cp->decay));
else
cp->decay = 1.0;
cp->volume = ff_exp10(s->initial_volume / 20);
}
s->delay_samples = s->delay * sample_rate;
if (s->delay_samples <= 0) {
s->compand = compand_nodelay;
return 0;
}
s->delay_frame = av_frame_alloc();
if (!s->delay_frame) {
uninit(ctx);
return AVERROR(ENOMEM);
}
s->delay_frame->format = outlink->format;
s->delay_frame->nb_samples = s->delay_samples;
s->delay_frame->channel_layout = outlink->channel_layout;
err = av_frame_get_buffer(s->delay_frame, 0);
if (err)
return err;
s->compand = compand_delay;
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
CompandContext *s = ctx->priv;
return s->compand(ctx, frame);
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
CompandContext *s = ctx->priv;
int ret = 0;
ret = ff_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF && !ctx->is_disabled && s->delay_count)
ret = compand_drain(outlink);
return ret;
}
static const AVFilterPad compand_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad compand_outputs[] = {
{
.name = "default",
.request_frame = request_frame,
.config_props = config_output,
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_compand = {
.name = "compand",
.description = NULL_IF_CONFIG_SMALL(
"Compress or expand audio dynamic range."),
.query_formats = query_formats,
.priv_size = sizeof(CompandContext),
.priv_class = &compand_class,
.init = init,
.uninit = uninit,
.inputs = compand_inputs,
.outputs = compand_outputs,
};

View File

@@ -0,0 +1,198 @@
/*
* Copyright (c) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen, Vladimir Sadovnikov and others
* Copyright (c) 2015 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
typedef struct CompensationDelayContext {
const AVClass *class;
int distance_mm;
int distance_cm;
int distance_m;
double dry, wet;
int temp;
unsigned delay;
unsigned w_ptr;
unsigned buf_size;
AVFrame *delay_frame;
} CompensationDelayContext;
#define OFFSET(x) offsetof(CompensationDelayContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption compensationdelay_options[] = {
{ "mm", "set mm distance", OFFSET(distance_mm), AV_OPT_TYPE_INT, {.i64=0}, 0, 10, A },
{ "cm", "set cm distance", OFFSET(distance_cm), AV_OPT_TYPE_INT, {.i64=0}, 0, 100, A },
{ "m", "set meter distance", OFFSET(distance_m), AV_OPT_TYPE_INT, {.i64=0}, 0, 100, A },
{ "dry", "set dry amount", OFFSET(dry), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 1, A },
{ "wet", "set wet amount", OFFSET(wet), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, A },
{ "temp", "set temperature °C", OFFSET(temp), AV_OPT_TYPE_INT, {.i64=20}, -50, 50, A },
{ NULL }
};
AVFILTER_DEFINE_CLASS(compensationdelay);
// The maximum distance for options
#define COMP_DELAY_MAX_DISTANCE (100.0 * 100.0 + 100.0 * 1.0 + 1.0)
// The actual speed of sound in normal conditions
#define COMP_DELAY_SOUND_SPEED_KM_H(temp) 1.85325 * (643.95 * sqrt(((temp + 273.15) / 273.15)))
#define COMP_DELAY_SOUND_SPEED_CM_S(temp) (COMP_DELAY_SOUND_SPEED_KM_H(temp) * (1000.0 * 100.0) /* cm/km */ / (60.0 * 60.0) /* s/h */)
#define COMP_DELAY_SOUND_FRONT_DELAY(temp) (1.0 / COMP_DELAY_SOUND_SPEED_CM_S(temp))
// The maximum delay may be reached by this filter
#define COMP_DELAY_MAX_DELAY (COMP_DELAY_MAX_DISTANCE * COMP_DELAY_SOUND_FRONT_DELAY(50))
static int query_formats(AVFilterContext *ctx)
{
AVFilterChannelLayouts *layouts;
AVFilterFormats *formats;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
CompensationDelayContext *s = ctx->priv;
unsigned min_size, new_size = 1;
s->delay = (s->distance_m * 100. + s->distance_cm * 1. + s->distance_mm * .1) *
COMP_DELAY_SOUND_FRONT_DELAY(s->temp) * inlink->sample_rate;
min_size = inlink->sample_rate * COMP_DELAY_MAX_DELAY;
while (new_size < min_size)
new_size <<= 1;
s->delay_frame = av_frame_alloc();
if (!s->delay_frame)
return AVERROR(ENOMEM);
s->buf_size = new_size;
s->delay_frame->format = inlink->format;
s->delay_frame->nb_samples = new_size;
s->delay_frame->channel_layout = inlink->channel_layout;
return av_frame_get_buffer(s->delay_frame, 0);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
CompensationDelayContext *s = ctx->priv;
const unsigned b_mask = s->buf_size - 1;
const unsigned buf_size = s->buf_size;
const unsigned delay = s->delay;
const double dry = s->dry;
const double wet = s->wet;
unsigned r_ptr, w_ptr;
AVFrame *out;
int n, ch;
out = ff_get_audio_buffer(ctx->outputs[0], in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
for (ch = 0; ch < inlink->channels; ch++) {
const double *src = (const double *)in->extended_data[ch];
double *dst = (double *)out->extended_data[ch];
double *buffer = (double *)s->delay_frame->extended_data[ch];
w_ptr = s->w_ptr;
r_ptr = (w_ptr + buf_size - delay) & b_mask;
for (n = 0; n < in->nb_samples; n++) {
const double sample = src[n];
buffer[w_ptr] = sample;
dst[n] = dry * sample + wet * buffer[r_ptr];
w_ptr = (w_ptr + 1) & b_mask;
r_ptr = (r_ptr + 1) & b_mask;
}
}
s->w_ptr = w_ptr;
av_frame_free(&in);
return ff_filter_frame(ctx->outputs[0], out);
}
static av_cold void uninit(AVFilterContext *ctx)
{
CompensationDelayContext *s = ctx->priv;
av_frame_free(&s->delay_frame);
}
static const AVFilterPad compensationdelay_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad compensationdelay_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_compensationdelay = {
.name = "compensationdelay",
.description = NULL_IF_CONFIG_SMALL("Audio Compensation Delay Line."),
.query_formats = query_formats,
.priv_size = sizeof(CompensationDelayContext),
.priv_class = &compensationdelay_class,
.uninit = uninit,
.inputs = compensationdelay_inputs,
.outputs = compensationdelay_outputs,
};

191
externals/ffmpeg/libavfilter/af_crossfeed.c vendored Executable file
View File

@@ -0,0 +1,191 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/channel_layout.h"
#include "libavutil/ffmath.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "formats.h"
typedef struct CrossfeedContext {
const AVClass *class;
double range;
double strength;
double slope;
double level_in;
double level_out;
double a0, a1, a2;
double b0, b1, b2;
double i1, i2;
double o1, o2;
} CrossfeedContext;
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layout = NULL;
int ret;
if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_DBL )) < 0 ||
(ret = ff_set_common_formats (ctx , formats )) < 0 ||
(ret = ff_add_channel_layout (&layout , AV_CH_LAYOUT_STEREO)) < 0 ||
(ret = ff_set_common_channel_layouts (ctx , layout )) < 0 ||
(ret = ff_set_common_samplerates (ctx , ff_all_samplerates())) < 0)
return ret;
return 0;
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
CrossfeedContext *s = ctx->priv;
double A = ff_exp10(s->strength * -30 / 40);
double w0 = 2 * M_PI * (1. - s->range) * 2100 / inlink->sample_rate;
double alpha;
alpha = sin(w0) / 2 * sqrt((A + 1 / A) * (1 / s->slope - 1) + 2);
s->a0 = (A + 1) + (A - 1) * cos(w0) + 2 * sqrt(A) * alpha;
s->a1 = -2 * ((A - 1) + (A + 1) * cos(w0));
s->a2 = (A + 1) + (A - 1) * cos(w0) - 2 * sqrt(A) * alpha;
s->b0 = A * ((A + 1) - (A - 1) * cos(w0) + 2 * sqrt(A) * alpha);
s->b1 = 2 * A * ((A - 1) - (A + 1) * cos(w0));
s->b2 = A * ((A + 1) - (A - 1) * cos(w0) - 2 * sqrt(A) * alpha);
s->a1 /= s->a0;
s->a2 /= s->a0;
s->b0 /= s->a0;
s->b1 /= s->a0;
s->b2 /= s->a0;
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
CrossfeedContext *s = ctx->priv;
const double *src = (const double *)in->data[0];
const double level_in = s->level_in;
const double level_out = s->level_out;
const double b0 = s->b0;
const double b1 = s->b1;
const double b2 = s->b2;
const double a1 = s->a1;
const double a2 = s->a2;
AVFrame *out;
double *dst;
int n;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
dst = (double *)out->data[0];
for (n = 0; n < out->nb_samples; n++, src += 2, dst += 2) {
double mid = (src[0] + src[1]) * level_in * .5;
double side = (src[0] - src[1]) * level_in * .5;
double oside = side * b0 + s->i1 * b1 + s->i2 * b2 - s->o1 * a1 - s->o2 * a2;
s->i2 = s->i1;
s->i1 = side;
s->o2 = s->o1;
s->o1 = oside;
if (ctx->is_disabled) {
dst[0] = src[0];
dst[1] = src[1];
} else {
dst[0] = (mid + oside) * level_out;
dst[1] = (mid - oside) * level_out;
}
}
if (out != in)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
int ret;
ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
if (ret < 0)
return ret;
return config_input(ctx->inputs[0]);
}
#define OFFSET(x) offsetof(CrossfeedContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption crossfeed_options[] = {
{ "strength", "set crossfeed strength", OFFSET(strength), AV_OPT_TYPE_DOUBLE, {.dbl=.2}, 0, 1, FLAGS },
{ "range", "set soundstage wideness", OFFSET(range), AV_OPT_TYPE_DOUBLE, {.dbl=.5}, 0, 1, FLAGS },
{ "slope", "set curve slope", OFFSET(slope), AV_OPT_TYPE_DOUBLE, {.dbl=.5}, .01, 1, FLAGS },
{ "level_in", "set level in", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=.9}, 0, 1, FLAGS },
{ "level_out", "set level out", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1.}, 0, 1, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(crossfeed);
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_crossfeed = {
.name = "crossfeed",
.description = NULL_IF_CONFIG_SMALL("Apply headphone crossfeed filter."),
.query_formats = query_formats,
.priv_size = sizeof(CrossfeedContext),
.priv_class = &crossfeed_class,
.inputs = inputs,
.outputs = outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
.process_command = process_command,
};

318
externals/ffmpeg/libavfilter/af_crystalizer.c vendored Executable file
View File

@@ -0,0 +1,318 @@
/*
* Copyright (c) 2016 The FFmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "formats.h"
typedef struct CrystalizerContext {
const AVClass *class;
float mult;
int clip;
AVFrame *prev;
int (*filter)(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs);
} CrystalizerContext;
#define OFFSET(x) offsetof(CrystalizerContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption crystalizer_options[] = {
{ "i", "set intensity", OFFSET(mult), AV_OPT_TYPE_FLOAT, {.dbl=2.0}, 0, 10, A },
{ "c", "enable clipping", OFFSET(clip), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, A },
{ NULL }
};
AVFILTER_DEFINE_CLASS(crystalizer);
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLT, AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
typedef struct ThreadData {
void **d;
void **p;
const void **s;
int nb_samples;
int channels;
float mult;
int clip;
} ThreadData;
static int filter_flt(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
ThreadData *td = arg;
void **d = td->d;
void **p = td->p;
const void **s = td->s;
const int nb_samples = td->nb_samples;
const int channels = td->channels;
float mult = td->mult;
const int clip = td->clip;
const int start = (channels * jobnr) / nb_jobs;
const int end = (channels * (jobnr+1)) / nb_jobs;
float *prv = p[0];
int n, c;
for (c = start; c < end; c++) {
const float *src = s[0];
float *dst = d[0];
for (n = 0; n < nb_samples; n++) {
float current = src[c];
dst[c] = current + (current - prv[c]) * mult;
prv[c] = current;
if (clip) {
dst[c] = av_clipf(dst[c], -1, 1);
}
dst += channels;
src += channels;
}
}
return 0;
}
static int filter_dbl(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
ThreadData *td = arg;
void **d = td->d;
void **p = td->p;
const void **s = td->s;
const int nb_samples = td->nb_samples;
const int channels = td->channels;
float mult = td->mult;
const int clip = td->clip;
const int start = (channels * jobnr) / nb_jobs;
const int end = (channels * (jobnr+1)) / nb_jobs;
double *prv = p[0];
int n, c;
for (c = start; c < end; c++) {
const double *src = s[0];
double *dst = d[0];
for (n = 0; n < nb_samples; n++) {
double current = src[c];
dst[c] = current + (current - prv[c]) * mult;
prv[c] = current;
if (clip) {
dst[c] = av_clipd(dst[c], -1, 1);
}
dst += channels;
src += channels;
}
}
return 0;
}
static int filter_fltp(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
ThreadData *td = arg;
void **d = td->d;
void **p = td->p;
const void **s = td->s;
const int nb_samples = td->nb_samples;
const int channels = td->channels;
float mult = td->mult;
const int clip = td->clip;
const int start = (channels * jobnr) / nb_jobs;
const int end = (channels * (jobnr+1)) / nb_jobs;
int n, c;
for (c = start; c < end; c++) {
const float *src = s[c];
float *dst = d[c];
float *prv = p[c];
for (n = 0; n < nb_samples; n++) {
float current = src[n];
dst[n] = current + (current - prv[0]) * mult;
prv[0] = current;
if (clip) {
dst[n] = av_clipf(dst[n], -1, 1);
}
}
}
return 0;
}
static int filter_dblp(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
ThreadData *td = arg;
void **d = td->d;
void **p = td->p;
const void **s = td->s;
const int nb_samples = td->nb_samples;
const int channels = td->channels;
float mult = td->mult;
const int clip = td->clip;
const int start = (channels * jobnr) / nb_jobs;
const int end = (channels * (jobnr+1)) / nb_jobs;
int n, c;
for (c = start; c < end; c++) {
const double *src = s[c];
double *dst = d[c];
double *prv = p[c];
for (n = 0; n < nb_samples; n++) {
double current = src[n];
dst[n] = current + (current - prv[0]) * mult;
prv[0] = current;
if (clip) {
dst[n] = av_clipd(dst[n], -1, 1);
}
}
}
return 0;
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
CrystalizerContext *s = ctx->priv;
switch (inlink->format) {
case AV_SAMPLE_FMT_FLT: s->filter = filter_flt; break;
case AV_SAMPLE_FMT_DBL: s->filter = filter_dbl; break;
case AV_SAMPLE_FMT_FLTP: s->filter = filter_fltp; break;
case AV_SAMPLE_FMT_DBLP: s->filter = filter_dblp; break;
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
CrystalizerContext *s = ctx->priv;
AVFrame *out;
ThreadData td;
if (!s->prev) {
s->prev = ff_get_audio_buffer(inlink, 1);
if (!s->prev) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
}
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
td.d = (void **)out->extended_data;
td.s = (const void **)in->extended_data;
td.p = (void **)s->prev->extended_data;
td.nb_samples = in->nb_samples;
td.channels = in->channels;
td.mult = ctx->is_disabled ? 0.f : s->mult;
td.clip = s->clip;
ctx->internal->execute(ctx, s->filter, &td, NULL, FFMIN(inlink->channels,
ff_filter_get_nb_threads(ctx)));
if (out != in)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static av_cold void uninit(AVFilterContext *ctx)
{
CrystalizerContext *s = ctx->priv;
av_frame_free(&s->prev);
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_crystalizer = {
.name = "crystalizer",
.description = NULL_IF_CONFIG_SMALL("Simple expand audio dynamic range filter."),
.query_formats = query_formats,
.priv_size = sizeof(CrystalizerContext),
.priv_class = &crystalizer_class,
.uninit = uninit,
.inputs = inputs,
.outputs = outputs,
.process_command = ff_filter_process_command,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL |
AVFILTER_FLAG_SLICE_THREADS,
};

173
externals/ffmpeg/libavfilter/af_dcshift.c vendored Executable file
View File

@@ -0,0 +1,173 @@
/*
* Copyright (c) 2000 Chris Ausbrooks <weed@bucket.pp.ualr.edu>
* Copyright (c) 2000 Fabien COELHO <fabien@coelho.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
typedef struct DCShiftContext {
const AVClass *class;
double dcshift;
double limiterthreshold;
double limitergain;
} DCShiftContext;
#define OFFSET(x) offsetof(DCShiftContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption dcshift_options[] = {
{ "shift", "set DC shift", OFFSET(dcshift), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
{ "limitergain", "set limiter gain", OFFSET(limitergain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 1, A },
{ NULL }
};
AVFILTER_DEFINE_CLASS(dcshift);
static av_cold int init(AVFilterContext *ctx)
{
DCShiftContext *s = ctx->priv;
s->limiterthreshold = INT32_MAX * (1.0 - (fabs(s->dcshift) - s->limitergain));
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterChannelLayouts *layouts;
AVFilterFormats *formats;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_S32P, AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
DCShiftContext *s = ctx->priv;
int i, j;
double dcshift = s->dcshift;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
if (s->limitergain > 0) {
for (i = 0; i < inlink->channels; i++) {
const int32_t *src = (int32_t *)in->extended_data[i];
int32_t *dst = (int32_t *)out->extended_data[i];
for (j = 0; j < in->nb_samples; j++) {
double d;
d = src[j];
if (d > s->limiterthreshold && dcshift > 0) {
d = (d - s->limiterthreshold) * s->limitergain /
(INT32_MAX - s->limiterthreshold) +
s->limiterthreshold + dcshift;
} else if (d < -s->limiterthreshold && dcshift < 0) {
d = (d + s->limiterthreshold) * s->limitergain /
(INT32_MAX - s->limiterthreshold) -
s->limiterthreshold + dcshift;
} else {
d = dcshift * INT32_MAX + d;
}
dst[j] = av_clipl_int32(d);
}
}
} else {
for (i = 0; i < inlink->channels; i++) {
const int32_t *src = (int32_t *)in->extended_data[i];
int32_t *dst = (int32_t *)out->extended_data[i];
for (j = 0; j < in->nb_samples; j++) {
double d = dcshift * (INT32_MAX + 1.) + src[j];
dst[j] = av_clipl_int32(d);
}
}
}
if (out != in)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static const AVFilterPad dcshift_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad dcshift_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_dcshift = {
.name = "dcshift",
.description = NULL_IF_CONFIG_SMALL("Apply a DC shift to the audio."),
.query_formats = query_formats,
.priv_size = sizeof(DCShiftContext),
.priv_class = &dcshift_class,
.init = init,
.inputs = dcshift_inputs,
.outputs = dcshift_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
};

244
externals/ffmpeg/libavfilter/af_deesser.c vendored Executable file
View File

@@ -0,0 +1,244 @@
/*
* Copyright (c) 2018 Chris Johnson
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in all
* copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
* OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*/
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "formats.h"
typedef struct DeesserChannel {
double s1, s2, s3;
double m1, m2;
double ratioA, ratioB;
double iirSampleA, iirSampleB;
int flip;
} DeesserChannel;
typedef struct DeesserContext {
const AVClass *class;
double intensity;
double max;
double frequency;
int mode;
DeesserChannel *chan;
} DeesserContext;
enum OutModes {
IN_MODE,
OUT_MODE,
ESS_MODE,
NB_MODES
};
#define OFFSET(x) offsetof(DeesserContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption deesser_options[] = {
{ "i", "set intensity", OFFSET(intensity), AV_OPT_TYPE_DOUBLE, {.dbl=0.0}, 0.0, 1.0, A },
{ "m", "set max deessing", OFFSET(max), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0.0, 1.0, A },
{ "f", "set frequency", OFFSET(frequency), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0.0, 1.0, A },
{ "s", "set output mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=OUT_MODE}, 0, NB_MODES-1, A, "mode" },
{ "i", "input", 0, AV_OPT_TYPE_CONST, {.i64=IN_MODE}, 0, 0, A, "mode" },
{ "o", "output", 0, AV_OPT_TYPE_CONST, {.i64=OUT_MODE}, 0, 0, A, "mode" },
{ "e", "ess", 0, AV_OPT_TYPE_CONST, {.i64=ESS_MODE}, 0, 0, A, "mode" },
{ NULL }
};
AVFILTER_DEFINE_CLASS(deesser);
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
DeesserContext *s = ctx->priv;
s->chan = av_calloc(inlink->channels, sizeof(*s->chan));
if (!s->chan)
return AVERROR(ENOMEM);
for (int i = 0; i < inlink->channels; i++) {
DeesserChannel *chan = &s->chan[i];
chan->ratioA = chan->ratioB = 1.0;
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
DeesserContext *s = ctx->priv;
AVFrame *out;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
for (int ch = 0; ch < inlink->channels; ch++) {
DeesserChannel *dec = &s->chan[ch];
double *src = (double *)in->extended_data[ch];
double *dst = (double *)out->extended_data[ch];
double overallscale = inlink->sample_rate < 44100 ? 44100.0 / inlink->sample_rate : inlink->sample_rate / 44100.0;
double intensity = pow(s->intensity, 5) * (8192 / overallscale);
double maxdess = 1.0 / pow(10.0, ((s->max - 1.0) * 48.0) / 20);
double iirAmount = pow(s->frequency, 2) / overallscale;
double offset;
double sense;
double recovery;
double attackspeed;
for (int i = 0; i < in->nb_samples; i++) {
double sample = src[i];
dec->s3 = dec->s2;
dec->s2 = dec->s1;
dec->s1 = sample;
dec->m1 = (dec->s1 - dec->s2) * ((dec->s1 - dec->s2) / 1.3);
dec->m2 = (dec->s2 - dec->s3) * ((dec->s1 - dec->s2) / 1.3);
sense = (dec->m1 - dec->m2) * ((dec->m1 - dec->m2) / 1.3);
attackspeed = 7.0 + sense * 1024;
sense = 1.0 + intensity * intensity * sense;
sense = FFMIN(sense, intensity);
recovery = 1.0 + (0.01 / sense);
offset = 1.0 - fabs(sample);
if (dec->flip) {
dec->iirSampleA = (dec->iirSampleA * (1.0 - (offset * iirAmount))) +
(sample * (offset * iirAmount));
if (dec->ratioA < sense) {
dec->ratioA = ((dec->ratioA * attackspeed) + sense) / (attackspeed + 1.0);
} else {
dec->ratioA = 1.0 + ((dec->ratioA - 1.0) / recovery);
}
dec->ratioA = FFMIN(dec->ratioA, maxdess);
sample = dec->iirSampleA + ((sample - dec->iirSampleA) / dec->ratioA);
} else {
dec->iirSampleB = (dec->iirSampleB * (1.0 - (offset * iirAmount))) +
(sample * (offset * iirAmount));
if (dec->ratioB < sense) {
dec->ratioB = ((dec->ratioB * attackspeed) + sense) / (attackspeed + 1.0);
} else {
dec->ratioB = 1.0 + ((dec->ratioB - 1.0) / recovery);
}
dec->ratioB = FFMIN(dec->ratioB, maxdess);
sample = dec->iirSampleB + ((sample - dec->iirSampleB) / dec->ratioB);
}
dec->flip = !dec->flip;
if (ctx->is_disabled)
sample = src[i];
switch (s->mode) {
case IN_MODE: dst[i] = src[i]; break;
case OUT_MODE: dst[i] = sample; break;
case ESS_MODE: dst[i] = src[i] - sample; break;
}
}
}
if (out != in)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static av_cold void uninit(AVFilterContext *ctx)
{
DeesserContext *s = ctx->priv;
av_freep(&s->chan);
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_deesser = {
.name = "deesser",
.description = NULL_IF_CONFIG_SMALL("Apply de-essing to the audio."),
.query_formats = query_formats,
.priv_size = sizeof(DeesserContext),
.priv_class = &deesser_class,
.uninit = uninit,
.inputs = inputs,
.outputs = outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
};

233
externals/ffmpeg/libavfilter/af_drmeter.c vendored Executable file
View File

@@ -0,0 +1,233 @@
/*
* Copyright (c) 2018 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <float.h>
#include "libavutil/ffmath.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
typedef struct ChannelStats {
uint64_t nb_samples;
uint64_t blknum;
float peak;
float sum;
uint32_t peaks[10001];
uint32_t rms[10001];
} ChannelStats;
typedef struct DRMeterContext {
const AVClass *class;
ChannelStats *chstats;
int nb_channels;
uint64_t tc_samples;
double time_constant;
} DRMeterContext;
#define OFFSET(x) offsetof(DRMeterContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption drmeter_options[] = {
{ "length", "set the window length", OFFSET(time_constant), AV_OPT_TYPE_DOUBLE, {.dbl=3}, .01, 10, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(drmeter);
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static int config_output(AVFilterLink *outlink)
{
DRMeterContext *s = outlink->src->priv;
s->chstats = av_calloc(sizeof(*s->chstats), outlink->channels);
if (!s->chstats)
return AVERROR(ENOMEM);
s->nb_channels = outlink->channels;
s->tc_samples = s->time_constant * outlink->sample_rate + .5;
return 0;
}
static void finish_block(ChannelStats *p)
{
int peak_bin, rms_bin;
float peak, rms;
rms = sqrt(2 * p->sum / p->nb_samples);
peak = p->peak;
rms_bin = av_clip(rms * 10000, 0, 10000);
peak_bin = av_clip(peak * 10000, 0, 10000);
p->rms[rms_bin]++;
p->peaks[peak_bin]++;
p->peak = 0;
p->sum = 0;
p->nb_samples = 0;
p->blknum++;
}
static void update_stat(DRMeterContext *s, ChannelStats *p, float sample)
{
if (p->nb_samples >= s->tc_samples) {
finish_block(p);
}
p->peak = FFMAX(FFABS(sample), p->peak);
p->sum += sample * sample;
p->nb_samples++;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
DRMeterContext *s = inlink->dst->priv;
const int channels = s->nb_channels;
int i, c;
switch (inlink->format) {
case AV_SAMPLE_FMT_FLTP:
for (c = 0; c < channels; c++) {
ChannelStats *p = &s->chstats[c];
const float *src = (const float *)buf->extended_data[c];
for (i = 0; i < buf->nb_samples; i++, src++)
update_stat(s, p, *src);
}
break;
case AV_SAMPLE_FMT_FLT: {
const float *src = (const float *)buf->extended_data[0];
for (i = 0; i < buf->nb_samples; i++) {
for (c = 0; c < channels; c++, src++)
update_stat(s, &s->chstats[c], *src);
}}
break;
}
return ff_filter_frame(inlink->dst->outputs[0], buf);
}
#define SQR(a) ((a)*(a))
static void print_stats(AVFilterContext *ctx)
{
DRMeterContext *s = ctx->priv;
float dr = 0;
int ch;
for (ch = 0; ch < s->nb_channels; ch++) {
ChannelStats *p = &s->chstats[ch];
float chdr, secondpeak, rmssum = 0;
int i, j, first = 0;
finish_block(p);
for (i = 0; i <= 10000; i++) {
if (p->peaks[10000 - i]) {
if (first)
break;
first = 1;
}
}
secondpeak = (10000 - i) / 10000.;
for (i = 10000, j = 0; i >= 0 && j < 0.2 * p->blknum; i--) {
if (p->rms[i]) {
rmssum += SQR(i / 10000.) * p->rms[i];
j += p->rms[i];
}
}
chdr = 20 * log10(secondpeak / sqrt(rmssum / (0.2 * p->blknum)));
dr += chdr;
av_log(ctx, AV_LOG_INFO, "Channel %d: DR: %.1f\n", ch + 1, chdr);
}
av_log(ctx, AV_LOG_INFO, "Overall DR: %.1f\n", dr / s->nb_channels);
}
static av_cold void uninit(AVFilterContext *ctx)
{
DRMeterContext *s = ctx->priv;
if (s->nb_channels)
print_stats(ctx);
av_freep(&s->chstats);
}
static const AVFilterPad drmeter_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad drmeter_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
AVFilter ff_af_drmeter = {
.name = "drmeter",
.description = NULL_IF_CONFIG_SMALL("Measure audio dynamic range."),
.query_formats = query_formats,
.priv_size = sizeof(DRMeterContext),
.priv_class = &drmeter_class,
.uninit = uninit,
.inputs = drmeter_inputs,
.outputs = drmeter_outputs,
};

883
externals/ffmpeg/libavfilter/af_dynaudnorm.c vendored Executable file
View File

@@ -0,0 +1,883 @@
/*
* Dynamic Audio Normalizer
* Copyright (c) 2015 LoRd_MuldeR <mulder2@gmx.de>. Some rights reserved.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Dynamic Audio Normalizer
*/
#include <float.h>
#include "libavutil/avassert.h"
#include "libavutil/opt.h"
#define MIN_FILTER_SIZE 3
#define MAX_FILTER_SIZE 301
#define FF_BUFQUEUE_SIZE (MAX_FILTER_SIZE + 1)
#include "libavfilter/bufferqueue.h"
#include "audio.h"
#include "avfilter.h"
#include "filters.h"
#include "internal.h"
typedef struct local_gain {
double max_gain;
double threshold;
} local_gain;
typedef struct cqueue {
double *elements;
int size;
int max_size;
int nb_elements;
} cqueue;
typedef struct DynamicAudioNormalizerContext {
const AVClass *class;
struct FFBufQueue queue;
int frame_len;
int frame_len_msec;
int filter_size;
int dc_correction;
int channels_coupled;
int alt_boundary_mode;
double peak_value;
double max_amplification;
double target_rms;
double compress_factor;
double threshold;
double *prev_amplification_factor;
double *dc_correction_value;
double *compress_threshold;
double *weights;
int channels;
int eof;
int64_t pts;
cqueue **gain_history_original;
cqueue **gain_history_minimum;
cqueue **gain_history_smoothed;
cqueue **threshold_history;
cqueue *is_enabled;
} DynamicAudioNormalizerContext;
#define OFFSET(x) offsetof(DynamicAudioNormalizerContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption dynaudnorm_options[] = {
{ "framelen", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS },
{ "f", "set the frame length in msec", OFFSET(frame_len_msec), AV_OPT_TYPE_INT, {.i64 = 500}, 10, 8000, FLAGS },
{ "gausssize", "set the filter size", OFFSET(filter_size), AV_OPT_TYPE_INT, {.i64 = 31}, 3, 301, FLAGS },
{ "g", "set the filter size", OFFSET(filter_size), AV_OPT_TYPE_INT, {.i64 = 31}, 3, 301, FLAGS },
{ "peak", "set the peak value", OFFSET(peak_value), AV_OPT_TYPE_DOUBLE, {.dbl = 0.95}, 0.0, 1.0, FLAGS },
{ "p", "set the peak value", OFFSET(peak_value), AV_OPT_TYPE_DOUBLE, {.dbl = 0.95}, 0.0, 1.0, FLAGS },
{ "maxgain", "set the max amplification", OFFSET(max_amplification), AV_OPT_TYPE_DOUBLE, {.dbl = 10.0}, 1.0, 100.0, FLAGS },
{ "m", "set the max amplification", OFFSET(max_amplification), AV_OPT_TYPE_DOUBLE, {.dbl = 10.0}, 1.0, 100.0, FLAGS },
{ "targetrms", "set the target RMS", OFFSET(target_rms), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
{ "r", "set the target RMS", OFFSET(target_rms), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
{ "coupling", "set channel coupling", OFFSET(channels_coupled), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
{ "n", "set channel coupling", OFFSET(channels_coupled), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
{ "correctdc", "set DC correction", OFFSET(dc_correction), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
{ "c", "set DC correction", OFFSET(dc_correction), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
{ "altboundary", "set alternative boundary mode", OFFSET(alt_boundary_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
{ "b", "set alternative boundary mode", OFFSET(alt_boundary_mode), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
{ "compress", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS },
{ "s", "set the compress factor", OFFSET(compress_factor), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 30.0, FLAGS },
{ "threshold", "set the threshold value", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
{ "t", "set the threshold value", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl = 0.0}, 0.0, 1.0, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(dynaudnorm);
static av_cold int init(AVFilterContext *ctx)
{
DynamicAudioNormalizerContext *s = ctx->priv;
if (!(s->filter_size & 1)) {
av_log(ctx, AV_LOG_WARNING, "filter size %d is invalid. Changing to an odd value.\n", s->filter_size);
s->filter_size |= 1;
}
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static inline int frame_size(int sample_rate, int frame_len_msec)
{
const int frame_size = lrint((double)sample_rate * (frame_len_msec / 1000.0));
return frame_size + (frame_size % 2);
}
static cqueue *cqueue_create(int size, int max_size)
{
cqueue *q;
if (max_size < size)
return NULL;
q = av_malloc(sizeof(cqueue));
if (!q)
return NULL;
q->max_size = max_size;
q->size = size;
q->nb_elements = 0;
q->elements = av_malloc_array(max_size, sizeof(double));
if (!q->elements) {
av_free(q);
return NULL;
}
return q;
}
static void cqueue_free(cqueue *q)
{
if (q)
av_free(q->elements);
av_free(q);
}
static int cqueue_size(cqueue *q)
{
return q->nb_elements;
}
static int cqueue_empty(cqueue *q)
{
return q->nb_elements <= 0;
}
static int cqueue_enqueue(cqueue *q, double element)
{
av_assert2(q->nb_elements < q->max_size);
q->elements[q->nb_elements] = element;
q->nb_elements++;
return 0;
}
static double cqueue_peek(cqueue *q, int index)
{
av_assert2(index < q->nb_elements);
return q->elements[index];
}
static int cqueue_dequeue(cqueue *q, double *element)
{
av_assert2(!cqueue_empty(q));
*element = q->elements[0];
memmove(&q->elements[0], &q->elements[1], (q->nb_elements - 1) * sizeof(double));
q->nb_elements--;
return 0;
}
static int cqueue_pop(cqueue *q)
{
av_assert2(!cqueue_empty(q));
memmove(&q->elements[0], &q->elements[1], (q->nb_elements - 1) * sizeof(double));
q->nb_elements--;
return 0;
}
static void cqueue_resize(cqueue *q, int new_size)
{
av_assert2(q->max_size >= new_size);
av_assert2(MIN_FILTER_SIZE <= new_size);
if (new_size > q->nb_elements) {
const int side = (new_size - q->nb_elements) / 2;
memmove(q->elements + side, q->elements, sizeof(double) * q->nb_elements);
for (int i = 0; i < side; i++)
q->elements[i] = q->elements[side];
q->nb_elements = new_size - 1 - side;
} else {
int count = (q->size - new_size + 1) / 2;
while (count-- > 0)
cqueue_pop(q);
}
q->size = new_size;
}
static void init_gaussian_filter(DynamicAudioNormalizerContext *s)
{
double total_weight = 0.0;
const double sigma = (((s->filter_size / 2.0) - 1.0) / 3.0) + (1.0 / 3.0);
double adjust;
int i;
// Pre-compute constants
const int offset = s->filter_size / 2;
const double c1 = 1.0 / (sigma * sqrt(2.0 * M_PI));
const double c2 = 2.0 * sigma * sigma;
// Compute weights
for (i = 0; i < s->filter_size; i++) {
const int x = i - offset;
s->weights[i] = c1 * exp(-x * x / c2);
total_weight += s->weights[i];
}
// Adjust weights
adjust = 1.0 / total_weight;
for (i = 0; i < s->filter_size; i++) {
s->weights[i] *= adjust;
}
}
static av_cold void uninit(AVFilterContext *ctx)
{
DynamicAudioNormalizerContext *s = ctx->priv;
int c;
av_freep(&s->prev_amplification_factor);
av_freep(&s->dc_correction_value);
av_freep(&s->compress_threshold);
for (c = 0; c < s->channels; c++) {
if (s->gain_history_original)
cqueue_free(s->gain_history_original[c]);
if (s->gain_history_minimum)
cqueue_free(s->gain_history_minimum[c]);
if (s->gain_history_smoothed)
cqueue_free(s->gain_history_smoothed[c]);
if (s->threshold_history)
cqueue_free(s->threshold_history[c]);
}
av_freep(&s->gain_history_original);
av_freep(&s->gain_history_minimum);
av_freep(&s->gain_history_smoothed);
av_freep(&s->threshold_history);
cqueue_free(s->is_enabled);
s->is_enabled = NULL;
av_freep(&s->weights);
ff_bufqueue_discard_all(&s->queue);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
DynamicAudioNormalizerContext *s = ctx->priv;
int c;
uninit(ctx);
s->channels = inlink->channels;
s->frame_len = frame_size(inlink->sample_rate, s->frame_len_msec);
av_log(ctx, AV_LOG_DEBUG, "frame len %d\n", s->frame_len);
s->prev_amplification_factor = av_malloc_array(inlink->channels, sizeof(*s->prev_amplification_factor));
s->dc_correction_value = av_calloc(inlink->channels, sizeof(*s->dc_correction_value));
s->compress_threshold = av_calloc(inlink->channels, sizeof(*s->compress_threshold));
s->gain_history_original = av_calloc(inlink->channels, sizeof(*s->gain_history_original));
s->gain_history_minimum = av_calloc(inlink->channels, sizeof(*s->gain_history_minimum));
s->gain_history_smoothed = av_calloc(inlink->channels, sizeof(*s->gain_history_smoothed));
s->threshold_history = av_calloc(inlink->channels, sizeof(*s->threshold_history));
s->weights = av_malloc_array(MAX_FILTER_SIZE, sizeof(*s->weights));
s->is_enabled = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
if (!s->prev_amplification_factor || !s->dc_correction_value ||
!s->compress_threshold ||
!s->gain_history_original || !s->gain_history_minimum ||
!s->gain_history_smoothed || !s->threshold_history ||
!s->is_enabled || !s->weights)
return AVERROR(ENOMEM);
for (c = 0; c < inlink->channels; c++) {
s->prev_amplification_factor[c] = 1.0;
s->gain_history_original[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
s->gain_history_minimum[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
s->gain_history_smoothed[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
s->threshold_history[c] = cqueue_create(s->filter_size, MAX_FILTER_SIZE);
if (!s->gain_history_original[c] || !s->gain_history_minimum[c] ||
!s->gain_history_smoothed[c] || !s->threshold_history[c])
return AVERROR(ENOMEM);
}
init_gaussian_filter(s);
return 0;
}
static inline double fade(double prev, double next, int pos, int length)
{
const double step_size = 1.0 / length;
const double f0 = 1.0 - (step_size * (pos + 1.0));
const double f1 = 1.0 - f0;
return f0 * prev + f1 * next;
}
static inline double pow_2(const double value)
{
return value * value;
}
static inline double bound(const double threshold, const double val)
{
const double CONST = 0.8862269254527580136490837416705725913987747280611935; //sqrt(PI) / 2.0
return erf(CONST * (val / threshold)) * threshold;
}
static double find_peak_magnitude(AVFrame *frame, int channel)
{
double max = DBL_EPSILON;
int c, i;
if (channel == -1) {
for (c = 0; c < frame->channels; c++) {
double *data_ptr = (double *)frame->extended_data[c];
for (i = 0; i < frame->nb_samples; i++)
max = FFMAX(max, fabs(data_ptr[i]));
}
} else {
double *data_ptr = (double *)frame->extended_data[channel];
for (i = 0; i < frame->nb_samples; i++)
max = FFMAX(max, fabs(data_ptr[i]));
}
return max;
}
static double compute_frame_rms(AVFrame *frame, int channel)
{
double rms_value = 0.0;
int c, i;
if (channel == -1) {
for (c = 0; c < frame->channels; c++) {
const double *data_ptr = (double *)frame->extended_data[c];
for (i = 0; i < frame->nb_samples; i++) {
rms_value += pow_2(data_ptr[i]);
}
}
rms_value /= frame->nb_samples * frame->channels;
} else {
const double *data_ptr = (double *)frame->extended_data[channel];
for (i = 0; i < frame->nb_samples; i++) {
rms_value += pow_2(data_ptr[i]);
}
rms_value /= frame->nb_samples;
}
return FFMAX(sqrt(rms_value), DBL_EPSILON);
}
static local_gain get_max_local_gain(DynamicAudioNormalizerContext *s, AVFrame *frame,
int channel)
{
const double peak_magnitude = find_peak_magnitude(frame, channel);
const double maximum_gain = s->peak_value / peak_magnitude;
const double rms_gain = s->target_rms > DBL_EPSILON ? (s->target_rms / compute_frame_rms(frame, channel)) : DBL_MAX;
local_gain gain;
gain.threshold = peak_magnitude > s->threshold;
gain.max_gain = bound(s->max_amplification, FFMIN(maximum_gain, rms_gain));
return gain;
}
static double minimum_filter(cqueue *q)
{
double min = DBL_MAX;
int i;
for (i = 0; i < cqueue_size(q); i++) {
min = FFMIN(min, cqueue_peek(q, i));
}
return min;
}
static double gaussian_filter(DynamicAudioNormalizerContext *s, cqueue *q, cqueue *tq)
{
double result = 0.0, tsum = 0.0;
int i;
for (i = 0; i < cqueue_size(q); i++) {
tsum += cqueue_peek(tq, i) * s->weights[i];
result += cqueue_peek(q, i) * s->weights[i] * cqueue_peek(tq, i);
}
if (tsum == 0.0)
result = 1.0;
return result;
}
static void update_gain_history(DynamicAudioNormalizerContext *s, int channel,
local_gain gain)
{
if (cqueue_empty(s->gain_history_original[channel])) {
const int pre_fill_size = s->filter_size / 2;
const double initial_value = s->alt_boundary_mode ? gain.max_gain : s->peak_value;
s->prev_amplification_factor[channel] = initial_value;
while (cqueue_size(s->gain_history_original[channel]) < pre_fill_size) {
cqueue_enqueue(s->gain_history_original[channel], initial_value);
cqueue_enqueue(s->threshold_history[channel], gain.threshold);
}
}
cqueue_enqueue(s->gain_history_original[channel], gain.max_gain);
while (cqueue_size(s->gain_history_original[channel]) >= s->filter_size) {
double minimum;
if (cqueue_empty(s->gain_history_minimum[channel])) {
const int pre_fill_size = s->filter_size / 2;
double initial_value = s->alt_boundary_mode ? cqueue_peek(s->gain_history_original[channel], 0) : 1.0;
int input = pre_fill_size;
while (cqueue_size(s->gain_history_minimum[channel]) < pre_fill_size) {
input++;
initial_value = FFMIN(initial_value, cqueue_peek(s->gain_history_original[channel], input));
cqueue_enqueue(s->gain_history_minimum[channel], initial_value);
}
}
minimum = minimum_filter(s->gain_history_original[channel]);
cqueue_enqueue(s->gain_history_minimum[channel], minimum);
cqueue_enqueue(s->threshold_history[channel], gain.threshold);
cqueue_pop(s->gain_history_original[channel]);
}
while (cqueue_size(s->gain_history_minimum[channel]) >= s->filter_size) {
double smoothed, limit;
smoothed = gaussian_filter(s, s->gain_history_minimum[channel], s->threshold_history[channel]);
limit = cqueue_peek(s->gain_history_original[channel], 0);
smoothed = FFMIN(smoothed, limit);
cqueue_enqueue(s->gain_history_smoothed[channel], smoothed);
cqueue_pop(s->gain_history_minimum[channel]);
cqueue_pop(s->threshold_history[channel]);
}
}
static inline double update_value(double new, double old, double aggressiveness)
{
av_assert0((aggressiveness >= 0.0) && (aggressiveness <= 1.0));
return aggressiveness * new + (1.0 - aggressiveness) * old;
}
static void perform_dc_correction(DynamicAudioNormalizerContext *s, AVFrame *frame)
{
const double diff = 1.0 / frame->nb_samples;
int is_first_frame = cqueue_empty(s->gain_history_original[0]);
int c, i;
for (c = 0; c < s->channels; c++) {
double *dst_ptr = (double *)frame->extended_data[c];
double current_average_value = 0.0;
double prev_value;
for (i = 0; i < frame->nb_samples; i++)
current_average_value += dst_ptr[i] * diff;
prev_value = is_first_frame ? current_average_value : s->dc_correction_value[c];
s->dc_correction_value[c] = is_first_frame ? current_average_value : update_value(current_average_value, s->dc_correction_value[c], 0.1);
for (i = 0; i < frame->nb_samples; i++) {
dst_ptr[i] -= fade(prev_value, s->dc_correction_value[c], i, frame->nb_samples);
}
}
}
static double setup_compress_thresh(double threshold)
{
if ((threshold > DBL_EPSILON) && (threshold < (1.0 - DBL_EPSILON))) {
double current_threshold = threshold;
double step_size = 1.0;
while (step_size > DBL_EPSILON) {
while ((llrint((current_threshold + step_size) * (UINT64_C(1) << 63)) >
llrint(current_threshold * (UINT64_C(1) << 63))) &&
(bound(current_threshold + step_size, 1.0) <= threshold)) {
current_threshold += step_size;
}
step_size /= 2.0;
}
return current_threshold;
} else {
return threshold;
}
}
static double compute_frame_std_dev(DynamicAudioNormalizerContext *s,
AVFrame *frame, int channel)
{
double variance = 0.0;
int i, c;
if (channel == -1) {
for (c = 0; c < s->channels; c++) {
const double *data_ptr = (double *)frame->extended_data[c];
for (i = 0; i < frame->nb_samples; i++) {
variance += pow_2(data_ptr[i]); // Assume that MEAN is *zero*
}
}
variance /= (s->channels * frame->nb_samples) - 1;
} else {
const double *data_ptr = (double *)frame->extended_data[channel];
for (i = 0; i < frame->nb_samples; i++) {
variance += pow_2(data_ptr[i]); // Assume that MEAN is *zero*
}
variance /= frame->nb_samples - 1;
}
return FFMAX(sqrt(variance), DBL_EPSILON);
}
static void perform_compression(DynamicAudioNormalizerContext *s, AVFrame *frame)
{
int is_first_frame = cqueue_empty(s->gain_history_original[0]);
int c, i;
if (s->channels_coupled) {
const double standard_deviation = compute_frame_std_dev(s, frame, -1);
const double current_threshold = FFMIN(1.0, s->compress_factor * standard_deviation);
const double prev_value = is_first_frame ? current_threshold : s->compress_threshold[0];
double prev_actual_thresh, curr_actual_thresh;
s->compress_threshold[0] = is_first_frame ? current_threshold : update_value(current_threshold, s->compress_threshold[0], (1.0/3.0));
prev_actual_thresh = setup_compress_thresh(prev_value);
curr_actual_thresh = setup_compress_thresh(s->compress_threshold[0]);
for (c = 0; c < s->channels; c++) {
double *const dst_ptr = (double *)frame->extended_data[c];
for (i = 0; i < frame->nb_samples; i++) {
const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, frame->nb_samples);
dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
}
}
} else {
for (c = 0; c < s->channels; c++) {
const double standard_deviation = compute_frame_std_dev(s, frame, c);
const double current_threshold = setup_compress_thresh(FFMIN(1.0, s->compress_factor * standard_deviation));
const double prev_value = is_first_frame ? current_threshold : s->compress_threshold[c];
double prev_actual_thresh, curr_actual_thresh;
double *dst_ptr;
s->compress_threshold[c] = is_first_frame ? current_threshold : update_value(current_threshold, s->compress_threshold[c], 1.0/3.0);
prev_actual_thresh = setup_compress_thresh(prev_value);
curr_actual_thresh = setup_compress_thresh(s->compress_threshold[c]);
dst_ptr = (double *)frame->extended_data[c];
for (i = 0; i < frame->nb_samples; i++) {
const double localThresh = fade(prev_actual_thresh, curr_actual_thresh, i, frame->nb_samples);
dst_ptr[i] = copysign(bound(localThresh, fabs(dst_ptr[i])), dst_ptr[i]);
}
}
}
}
static void analyze_frame(DynamicAudioNormalizerContext *s, AVFrame *frame)
{
if (s->dc_correction) {
perform_dc_correction(s, frame);
}
if (s->compress_factor > DBL_EPSILON) {
perform_compression(s, frame);
}
if (s->channels_coupled) {
const local_gain gain = get_max_local_gain(s, frame, -1);
int c;
for (c = 0; c < s->channels; c++)
update_gain_history(s, c, gain);
} else {
int c;
for (c = 0; c < s->channels; c++)
update_gain_history(s, c, get_max_local_gain(s, frame, c));
}
}
static void amplify_frame(DynamicAudioNormalizerContext *s, AVFrame *frame, int enabled)
{
int c, i;
for (c = 0; c < s->channels; c++) {
double *dst_ptr = (double *)frame->extended_data[c];
double current_amplification_factor;
cqueue_dequeue(s->gain_history_smoothed[c], &current_amplification_factor);
for (i = 0; i < frame->nb_samples && enabled; i++) {
const double amplification_factor = fade(s->prev_amplification_factor[c],
current_amplification_factor, i,
frame->nb_samples);
dst_ptr[i] *= amplification_factor;
}
s->prev_amplification_factor[c] = current_amplification_factor;
}
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
DynamicAudioNormalizerContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int ret = 1;
while (((s->queue.available >= s->filter_size) ||
(s->eof && s->queue.available)) &&
!cqueue_empty(s->gain_history_smoothed[0])) {
AVFrame *out = ff_bufqueue_get(&s->queue);
double is_enabled;
cqueue_dequeue(s->is_enabled, &is_enabled);
amplify_frame(s, out, is_enabled > 0.);
ret = ff_filter_frame(outlink, out);
}
av_frame_make_writable(in);
analyze_frame(s, in);
if (!s->eof) {
ff_bufqueue_add(ctx, &s->queue, in);
cqueue_enqueue(s->is_enabled, !ctx->is_disabled);
} else {
av_frame_free(&in);
}
return ret;
}
static int flush_buffer(DynamicAudioNormalizerContext *s, AVFilterLink *inlink,
AVFilterLink *outlink)
{
AVFrame *out = ff_get_audio_buffer(outlink, s->frame_len);
int c, i;
if (!out)
return AVERROR(ENOMEM);
for (c = 0; c < s->channels; c++) {
double *dst_ptr = (double *)out->extended_data[c];
for (i = 0; i < out->nb_samples; i++) {
dst_ptr[i] = s->alt_boundary_mode ? DBL_EPSILON : ((s->target_rms > DBL_EPSILON) ? FFMIN(s->peak_value, s->target_rms) : s->peak_value);
if (s->dc_correction) {
dst_ptr[i] *= ((i % 2) == 1) ? -1 : 1;
dst_ptr[i] += s->dc_correction_value[c];
}
}
}
return filter_frame(inlink, out);
}
static int flush(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
DynamicAudioNormalizerContext *s = ctx->priv;
int ret = 0;
if (!cqueue_empty(s->gain_history_smoothed[0])) {
ret = flush_buffer(s, ctx->inputs[0], outlink);
} else if (s->queue.available) {
AVFrame *out = ff_bufqueue_get(&s->queue);
s->pts = out->pts;
ret = ff_filter_frame(outlink, out);
}
return ret;
}
static int activate(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
DynamicAudioNormalizerContext *s = ctx->priv;
AVFrame *in = NULL;
int ret = 0, status;
int64_t pts;
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
if (!s->eof) {
ret = ff_inlink_consume_samples(inlink, s->frame_len, s->frame_len, &in);
if (ret < 0)
return ret;
if (ret > 0) {
ret = filter_frame(inlink, in);
if (ret <= 0)
return ret;
}
if (ff_inlink_queued_samples(inlink) >= s->frame_len) {
ff_filter_set_ready(ctx, 10);
return 0;
}
}
if (!s->eof && ff_inlink_acknowledge_status(inlink, &status, &pts)) {
if (status == AVERROR_EOF)
s->eof = 1;
}
if (s->eof && s->queue.available)
return flush(outlink);
if (s->eof && !s->queue.available) {
ff_outlink_set_status(outlink, AVERROR_EOF, s->pts);
return 0;
}
if (!s->eof)
FF_FILTER_FORWARD_WANTED(outlink, inlink);
return FFERROR_NOT_READY;
}
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
DynamicAudioNormalizerContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
int prev_filter_size = s->filter_size;
int ret;
ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
if (ret < 0)
return ret;
s->filter_size |= 1;
if (prev_filter_size != s->filter_size) {
init_gaussian_filter(s);
for (int c = 0; c < s->channels; c++) {
cqueue_resize(s->gain_history_original[c], s->filter_size);
cqueue_resize(s->gain_history_minimum[c], s->filter_size);
cqueue_resize(s->threshold_history[c], s->filter_size);
}
}
s->frame_len = frame_size(inlink->sample_rate, s->frame_len_msec);
return 0;
}
static const AVFilterPad avfilter_af_dynaudnorm_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad avfilter_af_dynaudnorm_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_dynaudnorm = {
.name = "dynaudnorm",
.description = NULL_IF_CONFIG_SMALL("Dynamic Audio Normalizer."),
.query_formats = query_formats,
.priv_size = sizeof(DynamicAudioNormalizerContext),
.init = init,
.uninit = uninit,
.activate = activate,
.inputs = avfilter_af_dynaudnorm_inputs,
.outputs = avfilter_af_dynaudnorm_outputs,
.priv_class = &dynaudnorm_class,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
.process_command = process_command,
};

174
externals/ffmpeg/libavfilter/af_earwax.c vendored Executable file
View File

@@ -0,0 +1,174 @@
/*
* Copyright (c) 2011 Mina Nagy Zaki
* Copyright (c) 2000 Edward Beingessner And Sundry Contributors.
* This source code is freely redistributable and may be used for any purpose.
* This copyright notice must be maintained. Edward Beingessner And Sundry
* Contributors are not responsible for the consequences of using this
* software.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Stereo Widening Effect. Adds audio cues to move stereo image in
* front of the listener. Adapted from the libsox earwax effect.
*/
#include "libavutil/channel_layout.h"
#include "avfilter.h"
#include "audio.h"
#include "formats.h"
#define NUMTAPS 64
static const int8_t filt[NUMTAPS] = {
/* 30° 330° */
4, -6, /* 32 tap stereo FIR filter. */
4, -11, /* One side filters as if the */
-1, -5, /* signal was from 30 degrees */
3, 3, /* from the ear, the other as */
-2, 5, /* if 330 degrees. */
-5, 0,
9, 1,
6, 3, /* Input */
-4, -1, /* Left Right */
-5, -3, /* __________ __________ */
-2, -5, /* | | | | */
-7, 1, /* .---| Hh,0(f) | | Hh,0(f) |---. */
6, -7, /* / |__________| |__________| \ */
30, -29, /* / \ / \ */
12, -3, /* / X \ */
-11, 4, /* / / \ \ */
-3, 7, /* ____V_____ __________V V__________ _____V____ */
-20, 23, /* | | | | | | | | */
2, 0, /* | Hh,30(f) | | Hh,330(f)| | Hh,330(f)| | Hh,30(f) | */
1, -6, /* |__________| |__________| |__________| |__________| */
-14, -5, /* \ ___ / \ ___ / */
15, -18, /* \ / \ / _____ \ / \ / */
6, 7, /* `->| + |<--' / \ `-->| + |<-' */
15, -10, /* \___/ _/ \_ \___/ */
-14, 22, /* \ / \ / \ / */
-7, -2, /* `--->| | | |<---' */
-4, 9, /* \_/ \_/ */
6, -12, /* */
6, -6, /* Headphones */
0, -11,
0, -5,
4, 0};
typedef struct EarwaxContext {
int16_t taps[NUMTAPS * 2];
} EarwaxContext;
static int query_formats(AVFilterContext *ctx)
{
static const int sample_rates[] = { 44100, -1 };
int ret;
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layout = NULL;
if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_S16 )) < 0 ||
(ret = ff_set_common_formats (ctx , formats )) < 0 ||
(ret = ff_add_channel_layout (&layout , AV_CH_LAYOUT_STEREO )) < 0 ||
(ret = ff_set_common_channel_layouts (ctx , layout )) < 0 ||
(ret = ff_set_common_samplerates (ctx , ff_make_format_list(sample_rates) )) < 0)
return ret;
return 0;
}
//FIXME: replace with DSPContext.scalarproduct_int16
static inline int16_t *scalarproduct(const int16_t *in, const int16_t *endin, int16_t *out)
{
int32_t sample;
int16_t j;
while (in < endin) {
sample = 0;
for (j = 0; j < NUMTAPS; j++)
sample += in[j] * filt[j];
*out = av_clip_int16(sample >> 6);
out++;
in++;
}
return out;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
AVFilterLink *outlink = inlink->dst->outputs[0];
int16_t *taps, *endin, *in, *out;
AVFrame *outsamples = ff_get_audio_buffer(outlink, insamples->nb_samples);
int len;
if (!outsamples) {
av_frame_free(&insamples);
return AVERROR(ENOMEM);
}
av_frame_copy_props(outsamples, insamples);
taps = ((EarwaxContext *)inlink->dst->priv)->taps;
out = (int16_t *)outsamples->data[0];
in = (int16_t *)insamples ->data[0];
len = FFMIN(NUMTAPS, 2*insamples->nb_samples);
// copy part of new input and process with saved input
memcpy(taps+NUMTAPS, in, len * sizeof(*taps));
out = scalarproduct(taps, taps + len, out);
// process current input
if (2*insamples->nb_samples >= NUMTAPS ){
endin = in + insamples->nb_samples * 2 - NUMTAPS;
scalarproduct(in, endin, out);
// save part of input for next round
memcpy(taps, endin, NUMTAPS * sizeof(*taps));
} else
memmove(taps, taps + 2*insamples->nb_samples, NUMTAPS * sizeof(*taps));
av_frame_free(&insamples);
return ff_filter_frame(outlink, outsamples);
}
static const AVFilterPad earwax_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad earwax_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_earwax = {
.name = "earwax",
.description = NULL_IF_CONFIG_SMALL("Widen the stereo image."),
.query_formats = query_formats,
.priv_size = sizeof(EarwaxContext),
.inputs = earwax_inputs,
.outputs = earwax_outputs,
};

133
externals/ffmpeg/libavfilter/af_extrastereo.c vendored Executable file
View File

@@ -0,0 +1,133 @@
/*
* Copyright (c) 2015 The FFmpeg Project
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "formats.h"
typedef struct ExtraStereoContext {
const AVClass *class;
float mult;
int clip;
} ExtraStereoContext;
#define OFFSET(x) offsetof(ExtraStereoContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption extrastereo_options[] = {
{ "m", "set the difference coefficient", OFFSET(mult), AV_OPT_TYPE_FLOAT, {.dbl=2.5}, -10, 10, A },
{ "c", "enable clipping", OFFSET(clip), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, A },
{ NULL }
};
AVFILTER_DEFINE_CLASS(extrastereo);
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layout = NULL;
int ret;
if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_FLT )) < 0 ||
(ret = ff_set_common_formats (ctx , formats )) < 0 ||
(ret = ff_add_channel_layout (&layout , AV_CH_LAYOUT_STEREO)) < 0 ||
(ret = ff_set_common_channel_layouts (ctx , layout )) < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
ExtraStereoContext *s = ctx->priv;
const float *src = (const float *)in->data[0];
const float mult = s->mult;
AVFrame *out;
float *dst;
int n;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
dst = (float *)out->data[0];
for (n = 0; n < in->nb_samples; n++) {
float average, left, right;
left = src[n * 2 ];
right = src[n * 2 + 1];
average = (left + right) / 2.;
left = average + mult * (left - average);
right = average + mult * (right - average);
if (s->clip) {
left = av_clipf(left, -1, 1);
right = av_clipf(right, -1, 1);
}
dst[n * 2 ] = left;
dst[n * 2 + 1] = right;
}
if (out != in)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_extrastereo = {
.name = "extrastereo",
.description = NULL_IF_CONFIG_SMALL("Increase difference between stereo audio channels."),
.query_formats = query_formats,
.priv_size = sizeof(ExtraStereoContext),
.priv_class = &extrastereo_class,
.inputs = inputs,
.outputs = outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
.process_command = ff_filter_process_command,
};

981
externals/ffmpeg/libavfilter/af_firequalizer.c vendored Executable file
View File

@@ -0,0 +1,981 @@
/*
* Copyright (c) 2016 Muhammad Faiz <mfcc64@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/opt.h"
#include "libavutil/eval.h"
#include "libavutil/avassert.h"
#include "libavcodec/avfft.h"
#include "avfilter.h"
#include "internal.h"
#include "audio.h"
#define RDFT_BITS_MIN 4
#define RDFT_BITS_MAX 16
enum WindowFunc {
WFUNC_RECTANGULAR,
WFUNC_HANN,
WFUNC_HAMMING,
WFUNC_BLACKMAN,
WFUNC_NUTTALL3,
WFUNC_MNUTTALL3,
WFUNC_NUTTALL,
WFUNC_BNUTTALL,
WFUNC_BHARRIS,
WFUNC_TUKEY,
NB_WFUNC
};
enum Scale {
SCALE_LINLIN,
SCALE_LINLOG,
SCALE_LOGLIN,
SCALE_LOGLOG,
NB_SCALE
};
#define NB_GAIN_ENTRY_MAX 4096
typedef struct GainEntry {
double freq;
double gain;
} GainEntry;
typedef struct OverlapIndex {
int buf_idx;
int overlap_idx;
} OverlapIndex;
typedef struct FIREqualizerContext {
const AVClass *class;
RDFTContext *analysis_rdft;
RDFTContext *analysis_irdft;
RDFTContext *rdft;
RDFTContext *irdft;
FFTContext *fft_ctx;
RDFTContext *cepstrum_rdft;
RDFTContext *cepstrum_irdft;
int analysis_rdft_len;
int rdft_len;
int cepstrum_len;
float *analysis_buf;
float *dump_buf;
float *kernel_tmp_buf;
float *kernel_buf;
float *cepstrum_buf;
float *conv_buf;
OverlapIndex *conv_idx;
int fir_len;
int nsamples_max;
int64_t next_pts;
int frame_nsamples_max;
int remaining;
char *gain_cmd;
char *gain_entry_cmd;
const char *gain;
const char *gain_entry;
double delay;
double accuracy;
int wfunc;
int fixed;
int multi;
int zero_phase;
int scale;
char *dumpfile;
int dumpscale;
int fft2;
int min_phase;
int nb_gain_entry;
int gain_entry_err;
GainEntry gain_entry_tbl[NB_GAIN_ENTRY_MAX];
} FIREqualizerContext;
#define OFFSET(x) offsetof(FIREqualizerContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define TFLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption firequalizer_options[] = {
{ "gain", "set gain curve", OFFSET(gain), AV_OPT_TYPE_STRING, { .str = "gain_interpolate(f)" }, 0, 0, TFLAGS },
{ "gain_entry", "set gain entry", OFFSET(gain_entry), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, TFLAGS },
{ "delay", "set delay", OFFSET(delay), AV_OPT_TYPE_DOUBLE, { .dbl = 0.01 }, 0.0, 1e10, FLAGS },
{ "accuracy", "set accuracy", OFFSET(accuracy), AV_OPT_TYPE_DOUBLE, { .dbl = 5.0 }, 0.0, 1e10, FLAGS },
{ "wfunc", "set window function", OFFSET(wfunc), AV_OPT_TYPE_INT, { .i64 = WFUNC_HANN }, 0, NB_WFUNC-1, FLAGS, "wfunc" },
{ "rectangular", "rectangular window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_RECTANGULAR }, 0, 0, FLAGS, "wfunc" },
{ "hann", "hann window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_HANN }, 0, 0, FLAGS, "wfunc" },
{ "hamming", "hamming window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_HAMMING }, 0, 0, FLAGS, "wfunc" },
{ "blackman", "blackman window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BLACKMAN }, 0, 0, FLAGS, "wfunc" },
{ "nuttall3", "3-term nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_NUTTALL3 }, 0, 0, FLAGS, "wfunc" },
{ "mnuttall3", "minimum 3-term nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_MNUTTALL3 }, 0, 0, FLAGS, "wfunc" },
{ "nuttall", "nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_NUTTALL }, 0, 0, FLAGS, "wfunc" },
{ "bnuttall", "blackman-nuttall window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BNUTTALL }, 0, 0, FLAGS, "wfunc" },
{ "bharris", "blackman-harris window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_BHARRIS }, 0, 0, FLAGS, "wfunc" },
{ "tukey", "tukey window", 0, AV_OPT_TYPE_CONST, { .i64 = WFUNC_TUKEY }, 0, 0, FLAGS, "wfunc" },
{ "fixed", "set fixed frame samples", OFFSET(fixed), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
{ "multi", "set multi channels mode", OFFSET(multi), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
{ "zero_phase", "set zero phase mode", OFFSET(zero_phase), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
{ "scale", "set gain scale", OFFSET(scale), AV_OPT_TYPE_INT, { .i64 = SCALE_LINLOG }, 0, NB_SCALE-1, FLAGS, "scale" },
{ "linlin", "linear-freq linear-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LINLIN }, 0, 0, FLAGS, "scale" },
{ "linlog", "linear-freq logarithmic-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LINLOG }, 0, 0, FLAGS, "scale" },
{ "loglin", "logarithmic-freq linear-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LOGLIN }, 0, 0, FLAGS, "scale" },
{ "loglog", "logarithmic-freq logarithmic-gain", 0, AV_OPT_TYPE_CONST, { .i64 = SCALE_LOGLOG }, 0, 0, FLAGS, "scale" },
{ "dumpfile", "set dump file", OFFSET(dumpfile), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, FLAGS },
{ "dumpscale", "set dump scale", OFFSET(dumpscale), AV_OPT_TYPE_INT, { .i64 = SCALE_LINLOG }, 0, NB_SCALE-1, FLAGS, "scale" },
{ "fft2", "set 2-channels fft", OFFSET(fft2), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
{ "min_phase", "set minimum phase mode", OFFSET(min_phase), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(firequalizer);
static void common_uninit(FIREqualizerContext *s)
{
av_rdft_end(s->analysis_rdft);
av_rdft_end(s->analysis_irdft);
av_rdft_end(s->rdft);
av_rdft_end(s->irdft);
av_fft_end(s->fft_ctx);
av_rdft_end(s->cepstrum_rdft);
av_rdft_end(s->cepstrum_irdft);
s->analysis_rdft = s->analysis_irdft = s->rdft = s->irdft = NULL;
s->fft_ctx = NULL;
s->cepstrum_rdft = NULL;
s->cepstrum_irdft = NULL;
av_freep(&s->analysis_buf);
av_freep(&s->dump_buf);
av_freep(&s->kernel_tmp_buf);
av_freep(&s->kernel_buf);
av_freep(&s->cepstrum_buf);
av_freep(&s->conv_buf);
av_freep(&s->conv_idx);
}
static av_cold void uninit(AVFilterContext *ctx)
{
FIREqualizerContext *s = ctx->priv;
common_uninit(s);
av_freep(&s->gain_cmd);
av_freep(&s->gain_entry_cmd);
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterChannelLayouts *layouts;
AVFilterFormats *formats;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static void fast_convolute(FIREqualizerContext *av_restrict s, const float *av_restrict kernel_buf, float *av_restrict conv_buf,
OverlapIndex *av_restrict idx, float *av_restrict data, int nsamples)
{
if (nsamples <= s->nsamples_max) {
float *buf = conv_buf + idx->buf_idx * s->rdft_len;
float *obuf = conv_buf + !idx->buf_idx * s->rdft_len + idx->overlap_idx;
int center = s->fir_len/2;
int k;
memset(buf, 0, center * sizeof(*data));
memcpy(buf + center, data, nsamples * sizeof(*data));
memset(buf + center + nsamples, 0, (s->rdft_len - nsamples - center) * sizeof(*data));
av_rdft_calc(s->rdft, buf);
buf[0] *= kernel_buf[0];
buf[1] *= kernel_buf[s->rdft_len/2];
for (k = 1; k < s->rdft_len/2; k++) {
buf[2*k] *= kernel_buf[k];
buf[2*k+1] *= kernel_buf[k];
}
av_rdft_calc(s->irdft, buf);
for (k = 0; k < s->rdft_len - idx->overlap_idx; k++)
buf[k] += obuf[k];
memcpy(data, buf, nsamples * sizeof(*data));
idx->buf_idx = !idx->buf_idx;
idx->overlap_idx = nsamples;
} else {
while (nsamples > s->nsamples_max * 2) {
fast_convolute(s, kernel_buf, conv_buf, idx, data, s->nsamples_max);
data += s->nsamples_max;
nsamples -= s->nsamples_max;
}
fast_convolute(s, kernel_buf, conv_buf, idx, data, nsamples/2);
fast_convolute(s, kernel_buf, conv_buf, idx, data + nsamples/2, nsamples - nsamples/2);
}
}
static void fast_convolute_nonlinear(FIREqualizerContext *av_restrict s, const float *av_restrict kernel_buf,
float *av_restrict conv_buf, OverlapIndex *av_restrict idx,
float *av_restrict data, int nsamples)
{
if (nsamples <= s->nsamples_max) {
float *buf = conv_buf + idx->buf_idx * s->rdft_len;
float *obuf = conv_buf + !idx->buf_idx * s->rdft_len + idx->overlap_idx;
int k;
memcpy(buf, data, nsamples * sizeof(*data));
memset(buf + nsamples, 0, (s->rdft_len - nsamples) * sizeof(*data));
av_rdft_calc(s->rdft, buf);
buf[0] *= kernel_buf[0];
buf[1] *= kernel_buf[1];
for (k = 2; k < s->rdft_len; k += 2) {
float re, im;
re = buf[k] * kernel_buf[k] - buf[k+1] * kernel_buf[k+1];
im = buf[k] * kernel_buf[k+1] + buf[k+1] * kernel_buf[k];
buf[k] = re;
buf[k+1] = im;
}
av_rdft_calc(s->irdft, buf);
for (k = 0; k < s->rdft_len - idx->overlap_idx; k++)
buf[k] += obuf[k];
memcpy(data, buf, nsamples * sizeof(*data));
idx->buf_idx = !idx->buf_idx;
idx->overlap_idx = nsamples;
} else {
while (nsamples > s->nsamples_max * 2) {
fast_convolute_nonlinear(s, kernel_buf, conv_buf, idx, data, s->nsamples_max);
data += s->nsamples_max;
nsamples -= s->nsamples_max;
}
fast_convolute_nonlinear(s, kernel_buf, conv_buf, idx, data, nsamples/2);
fast_convolute_nonlinear(s, kernel_buf, conv_buf, idx, data + nsamples/2, nsamples - nsamples/2);
}
}
static void fast_convolute2(FIREqualizerContext *av_restrict s, const float *av_restrict kernel_buf, FFTComplex *av_restrict conv_buf,
OverlapIndex *av_restrict idx, float *av_restrict data0, float *av_restrict data1, int nsamples)
{
if (nsamples <= s->nsamples_max) {
FFTComplex *buf = conv_buf + idx->buf_idx * s->rdft_len;
FFTComplex *obuf = conv_buf + !idx->buf_idx * s->rdft_len + idx->overlap_idx;
int center = s->fir_len/2;
int k;
float tmp;
memset(buf, 0, center * sizeof(*buf));
for (k = 0; k < nsamples; k++) {
buf[center+k].re = data0[k];
buf[center+k].im = data1[k];
}
memset(buf + center + nsamples, 0, (s->rdft_len - nsamples - center) * sizeof(*buf));
av_fft_permute(s->fft_ctx, buf);
av_fft_calc(s->fft_ctx, buf);
/* swap re <-> im, do backward fft using forward fft_ctx */
/* normalize with 0.5f */
tmp = buf[0].re;
buf[0].re = 0.5f * kernel_buf[0] * buf[0].im;
buf[0].im = 0.5f * kernel_buf[0] * tmp;
for (k = 1; k < s->rdft_len/2; k++) {
int m = s->rdft_len - k;
tmp = buf[k].re;
buf[k].re = 0.5f * kernel_buf[k] * buf[k].im;
buf[k].im = 0.5f * kernel_buf[k] * tmp;
tmp = buf[m].re;
buf[m].re = 0.5f * kernel_buf[k] * buf[m].im;
buf[m].im = 0.5f * kernel_buf[k] * tmp;
}
tmp = buf[k].re;
buf[k].re = 0.5f * kernel_buf[k] * buf[k].im;
buf[k].im = 0.5f * kernel_buf[k] * tmp;
av_fft_permute(s->fft_ctx, buf);
av_fft_calc(s->fft_ctx, buf);
for (k = 0; k < s->rdft_len - idx->overlap_idx; k++) {
buf[k].re += obuf[k].re;
buf[k].im += obuf[k].im;
}
/* swapped re <-> im */
for (k = 0; k < nsamples; k++) {
data0[k] = buf[k].im;
data1[k] = buf[k].re;
}
idx->buf_idx = !idx->buf_idx;
idx->overlap_idx = nsamples;
} else {
while (nsamples > s->nsamples_max * 2) {
fast_convolute2(s, kernel_buf, conv_buf, idx, data0, data1, s->nsamples_max);
data0 += s->nsamples_max;
data1 += s->nsamples_max;
nsamples -= s->nsamples_max;
}
fast_convolute2(s, kernel_buf, conv_buf, idx, data0, data1, nsamples/2);
fast_convolute2(s, kernel_buf, conv_buf, idx, data0 + nsamples/2, data1 + nsamples/2, nsamples - nsamples/2);
}
}
static void dump_fir(AVFilterContext *ctx, FILE *fp, int ch)
{
FIREqualizerContext *s = ctx->priv;
int rate = ctx->inputs[0]->sample_rate;
int xlog = s->dumpscale == SCALE_LOGLIN || s->dumpscale == SCALE_LOGLOG;
int ylog = s->dumpscale == SCALE_LINLOG || s->dumpscale == SCALE_LOGLOG;
int x;
int center = s->fir_len / 2;
double delay = s->zero_phase ? 0.0 : (double) center / rate;
double vx, ya, yb;
if (!s->min_phase) {
s->analysis_buf[0] *= s->rdft_len/2;
for (x = 1; x <= center; x++) {
s->analysis_buf[x] *= s->rdft_len/2;
s->analysis_buf[s->analysis_rdft_len - x] *= s->rdft_len/2;
}
} else {
for (x = 0; x < s->fir_len; x++)
s->analysis_buf[x] *= s->rdft_len/2;
}
if (ch)
fprintf(fp, "\n\n");
fprintf(fp, "# time[%d] (time amplitude)\n", ch);
if (!s->min_phase) {
for (x = center; x > 0; x--)
fprintf(fp, "%15.10f %15.10f\n", delay - (double) x / rate, (double) s->analysis_buf[s->analysis_rdft_len - x]);
for (x = 0; x <= center; x++)
fprintf(fp, "%15.10f %15.10f\n", delay + (double)x / rate , (double) s->analysis_buf[x]);
} else {
for (x = 0; x < s->fir_len; x++)
fprintf(fp, "%15.10f %15.10f\n", (double)x / rate, (double) s->analysis_buf[x]);
}
av_rdft_calc(s->analysis_rdft, s->analysis_buf);
fprintf(fp, "\n\n# freq[%d] (frequency desired_gain actual_gain)\n", ch);
for (x = 0; x <= s->analysis_rdft_len/2; x++) {
int i = (x == s->analysis_rdft_len/2) ? 1 : 2 * x;
vx = (double)x * rate / s->analysis_rdft_len;
if (xlog)
vx = log2(0.05*vx);
ya = s->dump_buf[i];
yb = s->min_phase && (i > 1) ? hypotf(s->analysis_buf[i], s->analysis_buf[i+1]) : s->analysis_buf[i];
if (s->min_phase)
yb = fabs(yb);
if (ylog) {
ya = 20.0 * log10(fabs(ya));
yb = 20.0 * log10(fabs(yb));
}
fprintf(fp, "%17.10f %17.10f %17.10f\n", vx, ya, yb);
}
}
static double entry_func(void *p, double freq, double gain)
{
AVFilterContext *ctx = p;
FIREqualizerContext *s = ctx->priv;
if (s->nb_gain_entry >= NB_GAIN_ENTRY_MAX) {
av_log(ctx, AV_LOG_ERROR, "entry table overflow.\n");
s->gain_entry_err = AVERROR(EINVAL);
return 0;
}
if (isnan(freq)) {
av_log(ctx, AV_LOG_ERROR, "nan frequency (%g, %g).\n", freq, gain);
s->gain_entry_err = AVERROR(EINVAL);
return 0;
}
if (s->nb_gain_entry > 0 && freq <= s->gain_entry_tbl[s->nb_gain_entry - 1].freq) {
av_log(ctx, AV_LOG_ERROR, "unsorted frequency (%g, %g).\n", freq, gain);
s->gain_entry_err = AVERROR(EINVAL);
return 0;
}
s->gain_entry_tbl[s->nb_gain_entry].freq = freq;
s->gain_entry_tbl[s->nb_gain_entry].gain = gain;
s->nb_gain_entry++;
return 0;
}
static int gain_entry_compare(const void *key, const void *memb)
{
const double *freq = key;
const GainEntry *entry = memb;
if (*freq < entry[0].freq)
return -1;
if (*freq > entry[1].freq)
return 1;
return 0;
}
static double gain_interpolate_func(void *p, double freq)
{
AVFilterContext *ctx = p;
FIREqualizerContext *s = ctx->priv;
GainEntry *res;
double d0, d1, d;
if (isnan(freq))
return freq;
if (!s->nb_gain_entry)
return 0;
if (freq <= s->gain_entry_tbl[0].freq)
return s->gain_entry_tbl[0].gain;
if (freq >= s->gain_entry_tbl[s->nb_gain_entry-1].freq)
return s->gain_entry_tbl[s->nb_gain_entry-1].gain;
res = bsearch(&freq, &s->gain_entry_tbl, s->nb_gain_entry - 1, sizeof(*res), gain_entry_compare);
av_assert0(res);
d = res[1].freq - res[0].freq;
d0 = freq - res[0].freq;
d1 = res[1].freq - freq;
if (d0 && d1)
return (d0 * res[1].gain + d1 * res[0].gain) / d;
if (d0)
return res[1].gain;
return res[0].gain;
}
static double cubic_interpolate_func(void *p, double freq)
{
AVFilterContext *ctx = p;
FIREqualizerContext *s = ctx->priv;
GainEntry *res;
double x, x2, x3;
double a, b, c, d;
double m0, m1, m2, msum, unit;
if (!s->nb_gain_entry)
return 0;
if (freq <= s->gain_entry_tbl[0].freq)
return s->gain_entry_tbl[0].gain;
if (freq >= s->gain_entry_tbl[s->nb_gain_entry-1].freq)
return s->gain_entry_tbl[s->nb_gain_entry-1].gain;
res = bsearch(&freq, &s->gain_entry_tbl, s->nb_gain_entry - 1, sizeof(*res), gain_entry_compare);
av_assert0(res);
unit = res[1].freq - res[0].freq;
m0 = res != s->gain_entry_tbl ?
unit * (res[0].gain - res[-1].gain) / (res[0].freq - res[-1].freq) : 0;
m1 = res[1].gain - res[0].gain;
m2 = res != s->gain_entry_tbl + s->nb_gain_entry - 2 ?
unit * (res[2].gain - res[1].gain) / (res[2].freq - res[1].freq) : 0;
msum = fabs(m0) + fabs(m1);
m0 = msum > 0 ? (fabs(m0) * m1 + fabs(m1) * m0) / msum : 0;
msum = fabs(m1) + fabs(m2);
m1 = msum > 0 ? (fabs(m1) * m2 + fabs(m2) * m1) / msum : 0;
d = res[0].gain;
c = m0;
b = 3 * res[1].gain - m1 - 2 * c - 3 * d;
a = res[1].gain - b - c - d;
x = (freq - res[0].freq) / unit;
x2 = x * x;
x3 = x2 * x;
return a * x3 + b * x2 + c * x + d;
}
static const char *const var_names[] = {
"f",
"sr",
"ch",
"chid",
"chs",
"chlayout",
NULL
};
enum VarOffset {
VAR_F,
VAR_SR,
VAR_CH,
VAR_CHID,
VAR_CHS,
VAR_CHLAYOUT,
VAR_NB
};
static void generate_min_phase_kernel(FIREqualizerContext *s, float *rdft_buf)
{
int k, cepstrum_len = s->cepstrum_len, rdft_len = s->rdft_len;
double norm = 2.0 / cepstrum_len;
double minval = 1e-7 / rdft_len;
memset(s->cepstrum_buf, 0, cepstrum_len * sizeof(*s->cepstrum_buf));
memcpy(s->cepstrum_buf, rdft_buf, rdft_len/2 * sizeof(*rdft_buf));
memcpy(s->cepstrum_buf + cepstrum_len - rdft_len/2, rdft_buf + rdft_len/2, rdft_len/2 * sizeof(*rdft_buf));
av_rdft_calc(s->cepstrum_rdft, s->cepstrum_buf);
s->cepstrum_buf[0] = log(FFMAX(s->cepstrum_buf[0], minval));
s->cepstrum_buf[1] = log(FFMAX(s->cepstrum_buf[1], minval));
for (k = 2; k < cepstrum_len; k += 2) {
s->cepstrum_buf[k] = log(FFMAX(s->cepstrum_buf[k], minval));
s->cepstrum_buf[k+1] = 0;
}
av_rdft_calc(s->cepstrum_irdft, s->cepstrum_buf);
memset(s->cepstrum_buf + cepstrum_len/2 + 1, 0, (cepstrum_len/2 - 1) * sizeof(*s->cepstrum_buf));
for (k = 1; k < cepstrum_len/2; k++)
s->cepstrum_buf[k] *= 2;
av_rdft_calc(s->cepstrum_rdft, s->cepstrum_buf);
s->cepstrum_buf[0] = exp(s->cepstrum_buf[0] * norm) * norm;
s->cepstrum_buf[1] = exp(s->cepstrum_buf[1] * norm) * norm;
for (k = 2; k < cepstrum_len; k += 2) {
double mag = exp(s->cepstrum_buf[k] * norm) * norm;
double ph = s->cepstrum_buf[k+1] * norm;
s->cepstrum_buf[k] = mag * cos(ph);
s->cepstrum_buf[k+1] = mag * sin(ph);
}
av_rdft_calc(s->cepstrum_irdft, s->cepstrum_buf);
memset(rdft_buf, 0, s->rdft_len * sizeof(*rdft_buf));
memcpy(rdft_buf, s->cepstrum_buf, s->fir_len * sizeof(*rdft_buf));
if (s->dumpfile) {
memset(s->analysis_buf, 0, s->analysis_rdft_len * sizeof(*s->analysis_buf));
memcpy(s->analysis_buf, s->cepstrum_buf, s->fir_len * sizeof(*s->analysis_buf));
}
}
static int generate_kernel(AVFilterContext *ctx, const char *gain, const char *gain_entry)
{
FIREqualizerContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
const char *gain_entry_func_names[] = { "entry", NULL };
const char *gain_func_names[] = { "gain_interpolate", "cubic_interpolate", NULL };
double (*gain_entry_funcs[])(void *, double, double) = { entry_func, NULL };
double (*gain_funcs[])(void *, double) = { gain_interpolate_func, cubic_interpolate_func, NULL };
double vars[VAR_NB];
AVExpr *gain_expr;
int ret, k, center, ch;
int xlog = s->scale == SCALE_LOGLIN || s->scale == SCALE_LOGLOG;
int ylog = s->scale == SCALE_LINLOG || s->scale == SCALE_LOGLOG;
FILE *dump_fp = NULL;
s->nb_gain_entry = 0;
s->gain_entry_err = 0;
if (gain_entry) {
double result = 0.0;
ret = av_expr_parse_and_eval(&result, gain_entry, NULL, NULL, NULL, NULL,
gain_entry_func_names, gain_entry_funcs, ctx, 0, ctx);
if (ret < 0)
return ret;
if (s->gain_entry_err < 0)
return s->gain_entry_err;
}
av_log(ctx, AV_LOG_DEBUG, "nb_gain_entry = %d.\n", s->nb_gain_entry);
ret = av_expr_parse(&gain_expr, gain, var_names,
gain_func_names, gain_funcs, NULL, NULL, 0, ctx);
if (ret < 0)
return ret;
if (s->dumpfile && (!s->dump_buf || !s->analysis_rdft || !(dump_fp = fopen(s->dumpfile, "w"))))
av_log(ctx, AV_LOG_WARNING, "dumping failed.\n");
vars[VAR_CHS] = inlink->channels;
vars[VAR_CHLAYOUT] = inlink->channel_layout;
vars[VAR_SR] = inlink->sample_rate;
for (ch = 0; ch < inlink->channels; ch++) {
float *rdft_buf = s->kernel_tmp_buf + ch * s->rdft_len;
double result;
vars[VAR_CH] = ch;
vars[VAR_CHID] = av_channel_layout_extract_channel(inlink->channel_layout, ch);
vars[VAR_F] = 0.0;
if (xlog)
vars[VAR_F] = log2(0.05 * vars[VAR_F]);
result = av_expr_eval(gain_expr, vars, ctx);
s->analysis_buf[0] = ylog ? pow(10.0, 0.05 * result) : result;
vars[VAR_F] = 0.5 * inlink->sample_rate;
if (xlog)
vars[VAR_F] = log2(0.05 * vars[VAR_F]);
result = av_expr_eval(gain_expr, vars, ctx);
s->analysis_buf[1] = ylog ? pow(10.0, 0.05 * result) : result;
for (k = 1; k < s->analysis_rdft_len/2; k++) {
vars[VAR_F] = k * ((double)inlink->sample_rate /(double)s->analysis_rdft_len);
if (xlog)
vars[VAR_F] = log2(0.05 * vars[VAR_F]);
result = av_expr_eval(gain_expr, vars, ctx);
s->analysis_buf[2*k] = ylog ? pow(10.0, 0.05 * result) : s->min_phase ? fabs(result) : result;
s->analysis_buf[2*k+1] = 0.0;
}
if (s->dump_buf)
memcpy(s->dump_buf, s->analysis_buf, s->analysis_rdft_len * sizeof(*s->analysis_buf));
av_rdft_calc(s->analysis_irdft, s->analysis_buf);
center = s->fir_len / 2;
for (k = 0; k <= center; k++) {
double u = k * (M_PI/center);
double win;
switch (s->wfunc) {
case WFUNC_RECTANGULAR:
win = 1.0;
break;
case WFUNC_HANN:
win = 0.5 + 0.5 * cos(u);
break;
case WFUNC_HAMMING:
win = 0.53836 + 0.46164 * cos(u);
break;
case WFUNC_BLACKMAN:
win = 0.42 + 0.5 * cos(u) + 0.08 * cos(2*u);
break;
case WFUNC_NUTTALL3:
win = 0.40897 + 0.5 * cos(u) + 0.09103 * cos(2*u);
break;
case WFUNC_MNUTTALL3:
win = 0.4243801 + 0.4973406 * cos(u) + 0.0782793 * cos(2*u);
break;
case WFUNC_NUTTALL:
win = 0.355768 + 0.487396 * cos(u) + 0.144232 * cos(2*u) + 0.012604 * cos(3*u);
break;
case WFUNC_BNUTTALL:
win = 0.3635819 + 0.4891775 * cos(u) + 0.1365995 * cos(2*u) + 0.0106411 * cos(3*u);
break;
case WFUNC_BHARRIS:
win = 0.35875 + 0.48829 * cos(u) + 0.14128 * cos(2*u) + 0.01168 * cos(3*u);
break;
case WFUNC_TUKEY:
win = (u <= 0.5 * M_PI) ? 1.0 : (0.5 + 0.5 * cos(2*u - M_PI));
break;
default:
av_assert0(0);
}
s->analysis_buf[k] *= (2.0/s->analysis_rdft_len) * (2.0/s->rdft_len) * win;
if (k)
s->analysis_buf[s->analysis_rdft_len - k] = s->analysis_buf[k];
}
memset(s->analysis_buf + center + 1, 0, (s->analysis_rdft_len - s->fir_len) * sizeof(*s->analysis_buf));
memcpy(rdft_buf, s->analysis_buf, s->rdft_len/2 * sizeof(*s->analysis_buf));
memcpy(rdft_buf + s->rdft_len/2, s->analysis_buf + s->analysis_rdft_len - s->rdft_len/2, s->rdft_len/2 * sizeof(*s->analysis_buf));
if (s->min_phase)
generate_min_phase_kernel(s, rdft_buf);
av_rdft_calc(s->rdft, rdft_buf);
for (k = 0; k < s->rdft_len; k++) {
if (isnan(rdft_buf[k]) || isinf(rdft_buf[k])) {
av_log(ctx, AV_LOG_ERROR, "filter kernel contains nan or infinity.\n");
av_expr_free(gain_expr);
if (dump_fp)
fclose(dump_fp);
return AVERROR(EINVAL);
}
}
if (!s->min_phase) {
rdft_buf[s->rdft_len-1] = rdft_buf[1];
for (k = 0; k < s->rdft_len/2; k++)
rdft_buf[k] = rdft_buf[2*k];
rdft_buf[s->rdft_len/2] = rdft_buf[s->rdft_len-1];
}
if (dump_fp)
dump_fir(ctx, dump_fp, ch);
if (!s->multi)
break;
}
memcpy(s->kernel_buf, s->kernel_tmp_buf, (s->multi ? inlink->channels : 1) * s->rdft_len * sizeof(*s->kernel_buf));
av_expr_free(gain_expr);
if (dump_fp)
fclose(dump_fp);
return 0;
}
#define SELECT_GAIN(s) (s->gain_cmd ? s->gain_cmd : s->gain)
#define SELECT_GAIN_ENTRY(s) (s->gain_entry_cmd ? s->gain_entry_cmd : s->gain_entry)
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
FIREqualizerContext *s = ctx->priv;
int rdft_bits;
common_uninit(s);
s->next_pts = 0;
s->frame_nsamples_max = 0;
s->fir_len = FFMAX(2 * (int)(inlink->sample_rate * s->delay) + 1, 3);
s->remaining = s->fir_len - 1;
for (rdft_bits = RDFT_BITS_MIN; rdft_bits <= RDFT_BITS_MAX; rdft_bits++) {
s->rdft_len = 1 << rdft_bits;
s->nsamples_max = s->rdft_len - s->fir_len + 1;
if (s->nsamples_max * 2 >= s->fir_len)
break;
}
if (rdft_bits > RDFT_BITS_MAX) {
av_log(ctx, AV_LOG_ERROR, "too large delay, please decrease it.\n");
return AVERROR(EINVAL);
}
if (!(s->rdft = av_rdft_init(rdft_bits, DFT_R2C)) || !(s->irdft = av_rdft_init(rdft_bits, IDFT_C2R)))
return AVERROR(ENOMEM);
if (s->fft2 && !s->multi && inlink->channels > 1 && !(s->fft_ctx = av_fft_init(rdft_bits, 0)))
return AVERROR(ENOMEM);
if (s->min_phase) {
int cepstrum_bits = rdft_bits + 2;
if (cepstrum_bits > RDFT_BITS_MAX) {
av_log(ctx, AV_LOG_ERROR, "too large delay, please decrease it.\n");
return AVERROR(EINVAL);
}
cepstrum_bits = FFMIN(RDFT_BITS_MAX, cepstrum_bits + 1);
s->cepstrum_rdft = av_rdft_init(cepstrum_bits, DFT_R2C);
s->cepstrum_irdft = av_rdft_init(cepstrum_bits, IDFT_C2R);
if (!s->cepstrum_rdft || !s->cepstrum_irdft)
return AVERROR(ENOMEM);
s->cepstrum_len = 1 << cepstrum_bits;
s->cepstrum_buf = av_malloc_array(s->cepstrum_len, sizeof(*s->cepstrum_buf));
if (!s->cepstrum_buf)
return AVERROR(ENOMEM);
}
for ( ; rdft_bits <= RDFT_BITS_MAX; rdft_bits++) {
s->analysis_rdft_len = 1 << rdft_bits;
if (inlink->sample_rate <= s->accuracy * s->analysis_rdft_len)
break;
}
if (rdft_bits > RDFT_BITS_MAX) {
av_log(ctx, AV_LOG_ERROR, "too small accuracy, please increase it.\n");
return AVERROR(EINVAL);
}
if (!(s->analysis_irdft = av_rdft_init(rdft_bits, IDFT_C2R)))
return AVERROR(ENOMEM);
if (s->dumpfile) {
s->analysis_rdft = av_rdft_init(rdft_bits, DFT_R2C);
s->dump_buf = av_malloc_array(s->analysis_rdft_len, sizeof(*s->dump_buf));
}
s->analysis_buf = av_malloc_array(s->analysis_rdft_len, sizeof(*s->analysis_buf));
s->kernel_tmp_buf = av_malloc_array(s->rdft_len * (s->multi ? inlink->channels : 1), sizeof(*s->kernel_tmp_buf));
s->kernel_buf = av_malloc_array(s->rdft_len * (s->multi ? inlink->channels : 1), sizeof(*s->kernel_buf));
s->conv_buf = av_calloc(2 * s->rdft_len * inlink->channels, sizeof(*s->conv_buf));
s->conv_idx = av_calloc(inlink->channels, sizeof(*s->conv_idx));
if (!s->analysis_buf || !s->kernel_tmp_buf || !s->kernel_buf || !s->conv_buf || !s->conv_idx)
return AVERROR(ENOMEM);
av_log(ctx, AV_LOG_DEBUG, "sample_rate = %d, channels = %d, analysis_rdft_len = %d, rdft_len = %d, fir_len = %d, nsamples_max = %d.\n",
inlink->sample_rate, inlink->channels, s->analysis_rdft_len, s->rdft_len, s->fir_len, s->nsamples_max);
if (s->fixed)
inlink->min_samples = inlink->max_samples = inlink->partial_buf_size = s->nsamples_max;
return generate_kernel(ctx, SELECT_GAIN(s), SELECT_GAIN_ENTRY(s));
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
FIREqualizerContext *s = ctx->priv;
int ch;
if (!s->min_phase) {
for (ch = 0; ch + 1 < inlink->channels && s->fft_ctx; ch += 2) {
fast_convolute2(s, s->kernel_buf, (FFTComplex *)(s->conv_buf + 2 * ch * s->rdft_len),
s->conv_idx + ch, (float *) frame->extended_data[ch],
(float *) frame->extended_data[ch+1], frame->nb_samples);
}
for ( ; ch < inlink->channels; ch++) {
fast_convolute(s, s->kernel_buf + (s->multi ? ch * s->rdft_len : 0),
s->conv_buf + 2 * ch * s->rdft_len, s->conv_idx + ch,
(float *) frame->extended_data[ch], frame->nb_samples);
}
} else {
for (ch = 0; ch < inlink->channels; ch++) {
fast_convolute_nonlinear(s, s->kernel_buf + (s->multi ? ch * s->rdft_len : 0),
s->conv_buf + 2 * ch * s->rdft_len, s->conv_idx + ch,
(float *) frame->extended_data[ch], frame->nb_samples);
}
}
s->next_pts = AV_NOPTS_VALUE;
if (frame->pts != AV_NOPTS_VALUE) {
s->next_pts = frame->pts + av_rescale_q(frame->nb_samples, av_make_q(1, inlink->sample_rate), inlink->time_base);
if (s->zero_phase && !s->min_phase)
frame->pts -= av_rescale_q(s->fir_len/2, av_make_q(1, inlink->sample_rate), inlink->time_base);
}
s->frame_nsamples_max = FFMAX(s->frame_nsamples_max, frame->nb_samples);
return ff_filter_frame(ctx->outputs[0], frame);
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
FIREqualizerContext *s= ctx->priv;
int ret;
ret = ff_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF && s->remaining > 0 && s->frame_nsamples_max > 0) {
AVFrame *frame = ff_get_audio_buffer(outlink, FFMIN(s->remaining, s->frame_nsamples_max));
if (!frame)
return AVERROR(ENOMEM);
av_samples_set_silence(frame->extended_data, 0, frame->nb_samples, outlink->channels, frame->format);
frame->pts = s->next_pts;
s->remaining -= frame->nb_samples;
ret = filter_frame(ctx->inputs[0], frame);
}
return ret;
}
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
FIREqualizerContext *s = ctx->priv;
int ret = AVERROR(ENOSYS);
if (!strcmp(cmd, "gain")) {
char *gain_cmd;
if (SELECT_GAIN(s) && !strcmp(SELECT_GAIN(s), args)) {
av_log(ctx, AV_LOG_DEBUG, "equal gain, do not rebuild.\n");
return 0;
}
gain_cmd = av_strdup(args);
if (!gain_cmd)
return AVERROR(ENOMEM);
ret = generate_kernel(ctx, gain_cmd, SELECT_GAIN_ENTRY(s));
if (ret >= 0) {
av_freep(&s->gain_cmd);
s->gain_cmd = gain_cmd;
} else {
av_freep(&gain_cmd);
}
} else if (!strcmp(cmd, "gain_entry")) {
char *gain_entry_cmd;
if (SELECT_GAIN_ENTRY(s) && !strcmp(SELECT_GAIN_ENTRY(s), args)) {
av_log(ctx, AV_LOG_DEBUG, "equal gain_entry, do not rebuild.\n");
return 0;
}
gain_entry_cmd = av_strdup(args);
if (!gain_entry_cmd)
return AVERROR(ENOMEM);
ret = generate_kernel(ctx, SELECT_GAIN(s), gain_entry_cmd);
if (ret >= 0) {
av_freep(&s->gain_entry_cmd);
s->gain_entry_cmd = gain_entry_cmd;
} else {
av_freep(&gain_entry_cmd);
}
}
return ret;
}
static const AVFilterPad firequalizer_inputs[] = {
{
.name = "default",
.config_props = config_input,
.filter_frame = filter_frame,
.type = AVMEDIA_TYPE_AUDIO,
.needs_writable = 1,
},
{ NULL }
};
static const AVFilterPad firequalizer_outputs[] = {
{
.name = "default",
.request_frame = request_frame,
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_firequalizer = {
.name = "firequalizer",
.description = NULL_IF_CONFIG_SMALL("Finite Impulse Response Equalizer."),
.uninit = uninit,
.query_formats = query_formats,
.process_command = process_command,
.priv_size = sizeof(FIREqualizerContext),
.inputs = firequalizer_inputs,
.outputs = firequalizer_outputs,
.priv_class = &firequalizer_class,
};

246
externals/ffmpeg/libavfilter/af_flanger.c vendored Executable file
View File

@@ -0,0 +1,246 @@
/*
* Copyright (c) 2006 Rob Sykes <robs@users.sourceforge.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avstring.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "avfilter.h"
#include "audio.h"
#include "internal.h"
#include "generate_wave_table.h"
#define INTERPOLATION_LINEAR 0
#define INTERPOLATION_QUADRATIC 1
typedef struct FlangerContext {
const AVClass *class;
double delay_min;
double delay_depth;
double feedback_gain;
double delay_gain;
double speed;
int wave_shape;
double channel_phase;
int interpolation;
double in_gain;
int max_samples;
uint8_t **delay_buffer;
int delay_buf_pos;
double *delay_last;
float *lfo;
int lfo_length;
int lfo_pos;
} FlangerContext;
#define OFFSET(x) offsetof(FlangerContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption flanger_options[] = {
{ "delay", "base delay in milliseconds", OFFSET(delay_min), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 30, A },
{ "depth", "added swept delay in milliseconds", OFFSET(delay_depth), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 0, 10, A },
{ "regen", "percentage regeneration (delayed signal feedback)", OFFSET(feedback_gain), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -95, 95, A },
{ "width", "percentage of delayed signal mixed with original", OFFSET(delay_gain), AV_OPT_TYPE_DOUBLE, {.dbl=71}, 0, 100, A },
{ "speed", "sweeps per second (Hz)", OFFSET(speed), AV_OPT_TYPE_DOUBLE, {.dbl=0.5}, 0.1, 10, A },
{ "shape", "swept wave shape", OFFSET(wave_shape), AV_OPT_TYPE_INT, {.i64=WAVE_SIN}, WAVE_SIN, WAVE_NB-1, A, "type" },
{ "triangular", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_TRI}, 0, 0, A, "type" },
{ "t", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_TRI}, 0, 0, A, "type" },
{ "sinusoidal", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_SIN}, 0, 0, A, "type" },
{ "s", NULL, 0, AV_OPT_TYPE_CONST, {.i64=WAVE_SIN}, 0, 0, A, "type" },
{ "phase", "swept wave percentage phase-shift for multi-channel", OFFSET(channel_phase), AV_OPT_TYPE_DOUBLE, {.dbl=25}, 0, 100, A },
{ "interp", "delay-line interpolation", OFFSET(interpolation), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A, "itype" },
{ "linear", NULL, 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATION_LINEAR}, 0, 0, A, "itype" },
{ "quadratic", NULL, 0, AV_OPT_TYPE_CONST, {.i64=INTERPOLATION_QUADRATIC}, 0, 0, A, "itype" },
{ NULL }
};
AVFILTER_DEFINE_CLASS(flanger);
static av_cold int init(AVFilterContext *ctx)
{
FlangerContext *s = ctx->priv;
s->feedback_gain /= 100;
s->delay_gain /= 100;
s->channel_phase /= 100;
s->delay_min /= 1000;
s->delay_depth /= 1000;
s->in_gain = 1 / (1 + s->delay_gain);
s->delay_gain /= 1 + s->delay_gain;
s->delay_gain *= 1 - fabs(s->feedback_gain);
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterChannelLayouts *layouts;
AVFilterFormats *formats;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBLP, AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
FlangerContext *s = ctx->priv;
s->max_samples = (s->delay_min + s->delay_depth) * inlink->sample_rate + 2.5;
s->lfo_length = inlink->sample_rate / s->speed;
s->delay_last = av_calloc(inlink->channels, sizeof(*s->delay_last));
s->lfo = av_calloc(s->lfo_length, sizeof(*s->lfo));
if (!s->lfo || !s->delay_last)
return AVERROR(ENOMEM);
ff_generate_wave_table(s->wave_shape, AV_SAMPLE_FMT_FLT, s->lfo, s->lfo_length,
rint(s->delay_min * inlink->sample_rate),
s->max_samples - 2., 3 * M_PI_2);
return av_samples_alloc_array_and_samples(&s->delay_buffer, NULL,
inlink->channels, s->max_samples,
inlink->format, 0);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
AVFilterContext *ctx = inlink->dst;
FlangerContext *s = ctx->priv;
AVFrame *out_frame;
int chan, i;
if (av_frame_is_writable(frame)) {
out_frame = frame;
} else {
out_frame = ff_get_audio_buffer(ctx->outputs[0], frame->nb_samples);
if (!out_frame) {
av_frame_free(&frame);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out_frame, frame);
}
for (i = 0; i < frame->nb_samples; i++) {
s->delay_buf_pos = (s->delay_buf_pos + s->max_samples - 1) % s->max_samples;
for (chan = 0; chan < inlink->channels; chan++) {
double *src = (double *)frame->extended_data[chan];
double *dst = (double *)out_frame->extended_data[chan];
double delayed_0, delayed_1;
double delayed;
double in, out;
int channel_phase = chan * s->lfo_length * s->channel_phase + .5;
double delay = s->lfo[(s->lfo_pos + channel_phase) % s->lfo_length];
int int_delay = (int)delay;
double frac_delay = modf(delay, &delay);
double *delay_buffer = (double *)s->delay_buffer[chan];
in = src[i];
delay_buffer[s->delay_buf_pos] = in + s->delay_last[chan] *
s->feedback_gain;
delayed_0 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];
delayed_1 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];
if (s->interpolation == INTERPOLATION_LINEAR) {
delayed = delayed_0 + (delayed_1 - delayed_0) * frac_delay;
} else {
double a, b;
double delayed_2 = delay_buffer[(s->delay_buf_pos + int_delay++) % s->max_samples];
delayed_2 -= delayed_0;
delayed_1 -= delayed_0;
a = delayed_2 * .5 - delayed_1;
b = delayed_1 * 2 - delayed_2 *.5;
delayed = delayed_0 + (a * frac_delay + b) * frac_delay;
}
s->delay_last[chan] = delayed;
out = in * s->in_gain + delayed * s->delay_gain;
dst[i] = out;
}
s->lfo_pos = (s->lfo_pos + 1) % s->lfo_length;
}
if (frame != out_frame)
av_frame_free(&frame);
return ff_filter_frame(ctx->outputs[0], out_frame);
}
static av_cold void uninit(AVFilterContext *ctx)
{
FlangerContext *s = ctx->priv;
av_freep(&s->lfo);
av_freep(&s->delay_last);
if (s->delay_buffer)
av_freep(&s->delay_buffer[0]);
av_freep(&s->delay_buffer);
}
static const AVFilterPad flanger_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad flanger_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_flanger = {
.name = "flanger",
.description = NULL_IF_CONFIG_SMALL("Apply a flanging effect to the audio."),
.query_formats = query_formats,
.priv_size = sizeof(FlangerContext),
.priv_class = &flanger_class,
.init = init,
.uninit = uninit,
.inputs = flanger_inputs,
.outputs = flanger_outputs,
};

228
externals/ffmpeg/libavfilter/af_haas.c vendored Executable file
View File

@@ -0,0 +1,228 @@
/*
* Copyright (c) 2001-2010 Vladimir Sadovnikov
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "formats.h"
#define MAX_HAAS_DELAY 40
typedef struct HaasContext {
const AVClass *class;
int par_m_source;
double par_delay0;
double par_delay1;
int par_phase0;
int par_phase1;
int par_middle_phase;
double par_side_gain;
double par_gain0;
double par_gain1;
double par_balance0;
double par_balance1;
double level_in;
double level_out;
double *buffer;
size_t buffer_size;
uint32_t write_ptr;
uint32_t delay[2];
double balance_l[2];
double balance_r[2];
double phase0;
double phase1;
} HaasContext;
#define OFFSET(x) offsetof(HaasContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption haas_options[] = {
{ "level_in", "set level in", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
{ "level_out", "set level out", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
{ "side_gain", "set side gain", OFFSET(par_side_gain), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
{ "middle_source", "set middle source", OFFSET(par_m_source), AV_OPT_TYPE_INT, {.i64=2}, 0, 3, A, "source" },
{ "left", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A, "source" },
{ "right", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A, "source" },
{ "mid", "L+R", 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, A, "source" },
{ "side", "L-R", 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, A, "source" },
{ "middle_phase", "set middle phase", OFFSET(par_middle_phase), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
{ "left_delay", "set left delay", OFFSET(par_delay0), AV_OPT_TYPE_DOUBLE, {.dbl=2.05}, 0, MAX_HAAS_DELAY, A },
{ "left_balance", "set left balance", OFFSET(par_balance0), AV_OPT_TYPE_DOUBLE, {.dbl=-1.0}, -1, 1, A },
{ "left_gain", "set left gain", OFFSET(par_gain0), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
{ "left_phase", "set left phase", OFFSET(par_phase0), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
{ "right_delay", "set right delay", OFFSET(par_delay1), AV_OPT_TYPE_DOUBLE, {.dbl=2.12}, 0, MAX_HAAS_DELAY, A },
{ "right_balance", "set right balance", OFFSET(par_balance1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, -1, 1, A },
{ "right_gain", "set right gain", OFFSET(par_gain1), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
{ "right_phase", "set right phase", OFFSET(par_phase1), AV_OPT_TYPE_BOOL, {.i64=1}, 0, 1, A },
{ NULL }
};
AVFILTER_DEFINE_CLASS(haas);
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layout = NULL;
int ret;
if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_DBL )) < 0 ||
(ret = ff_set_common_formats (ctx , formats )) < 0 ||
(ret = ff_add_channel_layout (&layout , AV_CH_LAYOUT_STEREO)) < 0 ||
(ret = ff_set_common_channel_layouts (ctx , layout )) < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
HaasContext *s = ctx->priv;
size_t min_buf_size = (size_t)(inlink->sample_rate * MAX_HAAS_DELAY * 0.001);
size_t new_buf_size = 1;
while (new_buf_size < min_buf_size)
new_buf_size <<= 1;
av_freep(&s->buffer);
s->buffer = av_calloc(new_buf_size, sizeof(*s->buffer));
if (!s->buffer)
return AVERROR(ENOMEM);
s->buffer_size = new_buf_size;
s->write_ptr = 0;
s->delay[0] = (uint32_t)(s->par_delay0 * 0.001 * inlink->sample_rate);
s->delay[1] = (uint32_t)(s->par_delay1 * 0.001 * inlink->sample_rate);
s->phase0 = s->par_phase0 ? 1.0 : -1.0;
s->phase1 = s->par_phase1 ? 1.0 : -1.0;
s->balance_l[0] = (s->par_balance0 + 1) / 2 * s->par_gain0 * s->phase0;
s->balance_r[0] = (1.0 - (s->par_balance0 + 1) / 2) * (s->par_gain0) * s->phase0;
s->balance_l[1] = (s->par_balance1 + 1) / 2 * s->par_gain1 * s->phase1;
s->balance_r[1] = (1.0 - (s->par_balance1 + 1) / 2) * (s->par_gain1) * s->phase1;
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
HaasContext *s = ctx->priv;
const double *src = (const double *)in->data[0];
const double level_in = s->level_in;
const double level_out = s->level_out;
const uint32_t mask = s->buffer_size - 1;
double *buffer = s->buffer;
AVFrame *out;
double *dst;
int n;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
dst = (double *)out->data[0];
for (n = 0; n < in->nb_samples; n++, src += 2, dst += 2) {
double mid, side[2], side_l, side_r;
uint32_t s0_ptr, s1_ptr;
switch (s->par_m_source) {
case 0: mid = src[0]; break;
case 1: mid = src[1]; break;
case 2: mid = (src[0] + src[1]) * 0.5; break;
case 3: mid = (src[0] - src[1]) * 0.5; break;
}
mid *= level_in;
buffer[s->write_ptr] = mid;
s0_ptr = (s->write_ptr + s->buffer_size - s->delay[0]) & mask;
s1_ptr = (s->write_ptr + s->buffer_size - s->delay[1]) & mask;
if (s->par_middle_phase)
mid = -mid;
side[0] = buffer[s0_ptr] * s->par_side_gain;
side[1] = buffer[s1_ptr] * s->par_side_gain;
side_l = side[0] * s->balance_l[0] - side[1] * s->balance_l[1];
side_r = side[1] * s->balance_r[1] - side[0] * s->balance_r[0];
dst[0] = (mid + side_l) * level_out;
dst[1] = (mid + side_r) * level_out;
s->write_ptr = (s->write_ptr + 1) & mask;
}
if (out != in)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static av_cold void uninit(AVFilterContext *ctx)
{
HaasContext *s = ctx->priv;
av_freep(&s->buffer);
s->buffer_size = 0;
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_haas = {
.name = "haas",
.description = NULL_IF_CONFIG_SMALL("Apply Haas Stereo Enhancer."),
.query_formats = query_formats,
.priv_size = sizeof(HaasContext),
.priv_class = &haas_class,
.uninit = uninit,
.inputs = inputs,
.outputs = outputs,
};

1785
externals/ffmpeg/libavfilter/af_hdcd.c vendored Executable file

File diff suppressed because it is too large Load Diff

884
externals/ffmpeg/libavfilter/af_headphone.c vendored Executable file
View File

@@ -0,0 +1,884 @@
/*
* Copyright (C) 2017 Paul B Mahol
* Copyright (C) 2013-2015 Andreas Fuchs, Wolfgang Hrauda
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <math.h>
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/float_dsp.h"
#include "libavutil/intmath.h"
#include "libavutil/opt.h"
#include "libavcodec/avfft.h"
#include "avfilter.h"
#include "filters.h"
#include "internal.h"
#include "audio.h"
#define TIME_DOMAIN 0
#define FREQUENCY_DOMAIN 1
#define HRIR_STEREO 0
#define HRIR_MULTI 1
typedef struct HeadphoneContext {
const AVClass *class;
char *map;
int type;
int lfe_channel;
int have_hrirs;
int eof_hrirs;
int ir_len;
int air_len;
int mapping[64];
int nb_inputs;
int nb_irs;
float gain;
float lfe_gain, gain_lfe;
float *ringbuffer[2];
int write[2];
int buffer_length;
int n_fft;
int size;
int hrir_fmt;
int *delay[2];
float *data_ir[2];
float *temp_src[2];
FFTComplex *temp_fft[2];
FFTComplex *temp_afft[2];
FFTContext *fft[2], *ifft[2];
FFTComplex *data_hrtf[2];
AVFloatDSPContext *fdsp;
struct headphone_inputs {
AVFrame *frame;
int ir_len;
int delay_l;
int delay_r;
int eof;
} *in;
} HeadphoneContext;
static int parse_channel_name(HeadphoneContext *s, int x, char **arg, int *rchannel, char *buf)
{
int len, i, channel_id = 0;
int64_t layout, layout0;
if (sscanf(*arg, "%7[A-Z]%n", buf, &len)) {
layout0 = layout = av_get_channel_layout(buf);
if (layout == AV_CH_LOW_FREQUENCY)
s->lfe_channel = x;
for (i = 32; i > 0; i >>= 1) {
if (layout >= 1LL << i) {
channel_id += i;
layout >>= i;
}
}
if (channel_id >= 64 || layout0 != 1LL << channel_id)
return AVERROR(EINVAL);
*rchannel = channel_id;
*arg += len;
return 0;
}
return AVERROR(EINVAL);
}
static void parse_map(AVFilterContext *ctx)
{
HeadphoneContext *s = ctx->priv;
char *arg, *tokenizer, *p, *args = av_strdup(s->map);
int i;
if (!args)
return;
p = args;
s->lfe_channel = -1;
s->nb_inputs = 1;
for (i = 0; i < 64; i++) {
s->mapping[i] = -1;
}
while ((arg = av_strtok(p, "|", &tokenizer))) {
int out_ch_id;
char buf[8];
p = NULL;
if (parse_channel_name(s, s->nb_irs, &arg, &out_ch_id, buf)) {
av_log(ctx, AV_LOG_WARNING, "Failed to parse \'%s\' as channel name.\n", buf);
continue;
}
s->mapping[s->nb_irs] = out_ch_id;
s->nb_irs++;
}
if (s->hrir_fmt == HRIR_MULTI)
s->nb_inputs = 2;
else
s->nb_inputs = s->nb_irs + 1;
av_free(args);
}
typedef struct ThreadData {
AVFrame *in, *out;
int *write;
int **delay;
float **ir;
int *n_clippings;
float **ringbuffer;
float **temp_src;
FFTComplex **temp_fft;
FFTComplex **temp_afft;
} ThreadData;
static int headphone_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
HeadphoneContext *s = ctx->priv;
ThreadData *td = arg;
AVFrame *in = td->in, *out = td->out;
int offset = jobnr;
int *write = &td->write[jobnr];
const int *const delay = td->delay[jobnr];
const float *const ir = td->ir[jobnr];
int *n_clippings = &td->n_clippings[jobnr];
float *ringbuffer = td->ringbuffer[jobnr];
float *temp_src = td->temp_src[jobnr];
const int ir_len = s->ir_len;
const int air_len = s->air_len;
const float *src = (const float *)in->data[0];
float *dst = (float *)out->data[0];
const int in_channels = in->channels;
const int buffer_length = s->buffer_length;
const uint32_t modulo = (uint32_t)buffer_length - 1;
float *buffer[16];
int wr = *write;
int read;
int i, l;
dst += offset;
for (l = 0; l < in_channels; l++) {
buffer[l] = ringbuffer + l * buffer_length;
}
for (i = 0; i < in->nb_samples; i++) {
const float *temp_ir = ir;
*dst = 0;
for (l = 0; l < in_channels; l++) {
*(buffer[l] + wr) = src[l];
}
for (l = 0; l < in_channels; l++) {
const float *const bptr = buffer[l];
if (l == s->lfe_channel) {
*dst += *(buffer[s->lfe_channel] + wr) * s->gain_lfe;
temp_ir += air_len;
continue;
}
read = (wr - *(delay + l) - (ir_len - 1) + buffer_length) & modulo;
if (read + ir_len < buffer_length) {
memcpy(temp_src, bptr + read, ir_len * sizeof(*temp_src));
} else {
int len = FFMIN(air_len - (read % ir_len), buffer_length - read);
memcpy(temp_src, bptr + read, len * sizeof(*temp_src));
memcpy(temp_src + len, bptr, (air_len - len) * sizeof(*temp_src));
}
dst[0] += s->fdsp->scalarproduct_float(temp_ir, temp_src, FFALIGN(ir_len, 32));
temp_ir += air_len;
}
if (fabsf(dst[0]) > 1)
n_clippings[0]++;
dst += 2;
src += in_channels;
wr = (wr + 1) & modulo;
}
*write = wr;
return 0;
}
static int headphone_fast_convolute(AVFilterContext *ctx, void *arg, int jobnr, int nb_jobs)
{
HeadphoneContext *s = ctx->priv;
ThreadData *td = arg;
AVFrame *in = td->in, *out = td->out;
int offset = jobnr;
int *write = &td->write[jobnr];
FFTComplex *hrtf = s->data_hrtf[jobnr];
int *n_clippings = &td->n_clippings[jobnr];
float *ringbuffer = td->ringbuffer[jobnr];
const int ir_len = s->ir_len;
const float *src = (const float *)in->data[0];
float *dst = (float *)out->data[0];
const int in_channels = in->channels;
const int buffer_length = s->buffer_length;
const uint32_t modulo = (uint32_t)buffer_length - 1;
FFTComplex *fft_in = s->temp_fft[jobnr];
FFTComplex *fft_acc = s->temp_afft[jobnr];
FFTContext *ifft = s->ifft[jobnr];
FFTContext *fft = s->fft[jobnr];
const int n_fft = s->n_fft;
const float fft_scale = 1.0f / s->n_fft;
FFTComplex *hrtf_offset;
int wr = *write;
int n_read;
int i, j;
dst += offset;
n_read = FFMIN(ir_len, in->nb_samples);
for (j = 0; j < n_read; j++) {
dst[2 * j] = ringbuffer[wr];
ringbuffer[wr] = 0.0;
wr = (wr + 1) & modulo;
}
for (j = n_read; j < in->nb_samples; j++) {
dst[2 * j] = 0;
}
memset(fft_acc, 0, sizeof(FFTComplex) * n_fft);
for (i = 0; i < in_channels; i++) {
if (i == s->lfe_channel) {
for (j = 0; j < in->nb_samples; j++) {
dst[2 * j] += src[i + j * in_channels] * s->gain_lfe;
}
continue;
}
offset = i * n_fft;
hrtf_offset = hrtf + offset;
memset(fft_in, 0, sizeof(FFTComplex) * n_fft);
for (j = 0; j < in->nb_samples; j++) {
fft_in[j].re = src[j * in_channels + i];
}
av_fft_permute(fft, fft_in);
av_fft_calc(fft, fft_in);
for (j = 0; j < n_fft; j++) {
const FFTComplex *hcomplex = hrtf_offset + j;
const float re = fft_in[j].re;
const float im = fft_in[j].im;
fft_acc[j].re += re * hcomplex->re - im * hcomplex->im;
fft_acc[j].im += re * hcomplex->im + im * hcomplex->re;
}
}
av_fft_permute(ifft, fft_acc);
av_fft_calc(ifft, fft_acc);
for (j = 0; j < in->nb_samples; j++) {
dst[2 * j] += fft_acc[j].re * fft_scale;
}
for (j = 0; j < ir_len - 1; j++) {
int write_pos = (wr + j) & modulo;
*(ringbuffer + write_pos) += fft_acc[in->nb_samples + j].re * fft_scale;
}
for (i = 0; i < out->nb_samples; i++) {
if (fabsf(dst[0]) > 1) {
n_clippings[0]++;
}
dst += 2;
}
*write = wr;
return 0;
}
static int check_ir(AVFilterLink *inlink, int input_number)
{
AVFilterContext *ctx = inlink->dst;
HeadphoneContext *s = ctx->priv;
int ir_len, max_ir_len;
ir_len = ff_inlink_queued_samples(inlink);
max_ir_len = 65536;
if (ir_len > max_ir_len) {
av_log(ctx, AV_LOG_ERROR, "Too big length of IRs: %d > %d.\n", ir_len, max_ir_len);
return AVERROR(EINVAL);
}
s->in[input_number].ir_len = ir_len;
s->ir_len = FFMAX(ir_len, s->ir_len);
return 0;
}
static int headphone_frame(HeadphoneContext *s, AVFrame *in, AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
int n_clippings[2] = { 0 };
ThreadData td;
AVFrame *out;
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
out->pts = in->pts;
td.in = in; td.out = out; td.write = s->write;
td.delay = s->delay; td.ir = s->data_ir; td.n_clippings = n_clippings;
td.ringbuffer = s->ringbuffer; td.temp_src = s->temp_src;
td.temp_fft = s->temp_fft;
td.temp_afft = s->temp_afft;
if (s->type == TIME_DOMAIN) {
ctx->internal->execute(ctx, headphone_convolute, &td, NULL, 2);
} else {
ctx->internal->execute(ctx, headphone_fast_convolute, &td, NULL, 2);
}
emms_c();
if (n_clippings[0] + n_clippings[1] > 0) {
av_log(ctx, AV_LOG_WARNING, "%d of %d samples clipped. Please reduce gain.\n",
n_clippings[0] + n_clippings[1], out->nb_samples * 2);
}
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static int convert_coeffs(AVFilterContext *ctx, AVFilterLink *inlink)
{
struct HeadphoneContext *s = ctx->priv;
const int ir_len = s->ir_len;
int nb_irs = s->nb_irs;
int nb_input_channels = ctx->inputs[0]->channels;
float gain_lin = expf((s->gain - 3 * nb_input_channels) / 20 * M_LN10);
FFTComplex *data_hrtf_l = NULL;
FFTComplex *data_hrtf_r = NULL;
FFTComplex *fft_in_l = NULL;
FFTComplex *fft_in_r = NULL;
float *data_ir_l = NULL;
float *data_ir_r = NULL;
int offset = 0, ret = 0;
int n_fft;
int i, j, k;
s->air_len = 1 << (32 - ff_clz(ir_len));
s->buffer_length = 1 << (32 - ff_clz(s->air_len));
s->n_fft = n_fft = 1 << (32 - ff_clz(ir_len + s->size));
if (s->type == FREQUENCY_DOMAIN) {
fft_in_l = av_calloc(n_fft, sizeof(*fft_in_l));
fft_in_r = av_calloc(n_fft, sizeof(*fft_in_r));
if (!fft_in_l || !fft_in_r) {
ret = AVERROR(ENOMEM);
goto fail;
}
av_fft_end(s->fft[0]);
av_fft_end(s->fft[1]);
s->fft[0] = av_fft_init(av_log2(s->n_fft), 0);
s->fft[1] = av_fft_init(av_log2(s->n_fft), 0);
av_fft_end(s->ifft[0]);
av_fft_end(s->ifft[1]);
s->ifft[0] = av_fft_init(av_log2(s->n_fft), 1);
s->ifft[1] = av_fft_init(av_log2(s->n_fft), 1);
if (!s->fft[0] || !s->fft[1] || !s->ifft[0] || !s->ifft[1]) {
av_log(ctx, AV_LOG_ERROR, "Unable to create FFT contexts of size %d.\n", s->n_fft);
ret = AVERROR(ENOMEM);
goto fail;
}
}
s->data_ir[0] = av_calloc(s->air_len, sizeof(float) * s->nb_irs);
s->data_ir[1] = av_calloc(s->air_len, sizeof(float) * s->nb_irs);
s->delay[0] = av_calloc(s->nb_irs, sizeof(float));
s->delay[1] = av_calloc(s->nb_irs, sizeof(float));
if (s->type == TIME_DOMAIN) {
s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float) * nb_input_channels);
} else {
s->ringbuffer[0] = av_calloc(s->buffer_length, sizeof(float));
s->ringbuffer[1] = av_calloc(s->buffer_length, sizeof(float));
s->temp_fft[0] = av_calloc(s->n_fft, sizeof(FFTComplex));
s->temp_fft[1] = av_calloc(s->n_fft, sizeof(FFTComplex));
s->temp_afft[0] = av_calloc(s->n_fft, sizeof(FFTComplex));
s->temp_afft[1] = av_calloc(s->n_fft, sizeof(FFTComplex));
if (!s->temp_fft[0] || !s->temp_fft[1] ||
!s->temp_afft[0] || !s->temp_afft[1]) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
if (!s->data_ir[0] || !s->data_ir[1] ||
!s->ringbuffer[0] || !s->ringbuffer[1]) {
ret = AVERROR(ENOMEM);
goto fail;
}
if (s->type == TIME_DOMAIN) {
s->temp_src[0] = av_calloc(s->air_len, sizeof(float));
s->temp_src[1] = av_calloc(s->air_len, sizeof(float));
data_ir_l = av_calloc(nb_irs * s->air_len, sizeof(*data_ir_l));
data_ir_r = av_calloc(nb_irs * s->air_len, sizeof(*data_ir_r));
if (!data_ir_r || !data_ir_l || !s->temp_src[0] || !s->temp_src[1]) {
ret = AVERROR(ENOMEM);
goto fail;
}
} else {
data_hrtf_l = av_calloc(n_fft, sizeof(*data_hrtf_l) * nb_irs);
data_hrtf_r = av_calloc(n_fft, sizeof(*data_hrtf_r) * nb_irs);
if (!data_hrtf_r || !data_hrtf_l) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
for (i = 0; i < s->nb_inputs - 1; i++) {
int len = s->in[i + 1].ir_len;
int delay_l = s->in[i + 1].delay_l;
int delay_r = s->in[i + 1].delay_r;
float *ptr;
ret = ff_inlink_consume_samples(ctx->inputs[i + 1], len, len, &s->in[i + 1].frame);
if (ret < 0)
goto fail;
ptr = (float *)s->in[i + 1].frame->extended_data[0];
if (s->hrir_fmt == HRIR_STEREO) {
int idx = -1;
for (j = 0; j < inlink->channels; j++) {
if (s->mapping[i] < 0) {
continue;
}
if ((av_channel_layout_extract_channel(inlink->channel_layout, j)) == (1LL << s->mapping[i])) {
idx = i;
break;
}
}
if (idx == -1)
continue;
if (s->type == TIME_DOMAIN) {
offset = idx * s->air_len;
for (j = 0; j < len; j++) {
data_ir_l[offset + j] = ptr[len * 2 - j * 2 - 2] * gain_lin;
data_ir_r[offset + j] = ptr[len * 2 - j * 2 - 1] * gain_lin;
}
} else {
memset(fft_in_l, 0, n_fft * sizeof(*fft_in_l));
memset(fft_in_r, 0, n_fft * sizeof(*fft_in_r));
offset = idx * n_fft;
for (j = 0; j < len; j++) {
fft_in_l[delay_l + j].re = ptr[j * 2 ] * gain_lin;
fft_in_r[delay_r + j].re = ptr[j * 2 + 1] * gain_lin;
}
av_fft_permute(s->fft[0], fft_in_l);
av_fft_calc(s->fft[0], fft_in_l);
memcpy(data_hrtf_l + offset, fft_in_l, n_fft * sizeof(*fft_in_l));
av_fft_permute(s->fft[0], fft_in_r);
av_fft_calc(s->fft[0], fft_in_r);
memcpy(data_hrtf_r + offset, fft_in_r, n_fft * sizeof(*fft_in_r));
}
} else {
int I, N = ctx->inputs[1]->channels;
for (k = 0; k < N / 2; k++) {
int idx = -1;
for (j = 0; j < inlink->channels; j++) {
if (s->mapping[k] < 0) {
continue;
}
if ((av_channel_layout_extract_channel(inlink->channel_layout, j)) == (1LL << s->mapping[k])) {
idx = k;
break;
}
}
if (idx == -1)
continue;
I = idx * 2;
if (s->type == TIME_DOMAIN) {
offset = idx * s->air_len;
for (j = 0; j < len; j++) {
data_ir_l[offset + j] = ptr[len * N - j * N - N + I ] * gain_lin;
data_ir_r[offset + j] = ptr[len * N - j * N - N + I + 1] * gain_lin;
}
} else {
memset(fft_in_l, 0, n_fft * sizeof(*fft_in_l));
memset(fft_in_r, 0, n_fft * sizeof(*fft_in_r));
offset = idx * n_fft;
for (j = 0; j < len; j++) {
fft_in_l[delay_l + j].re = ptr[j * N + I ] * gain_lin;
fft_in_r[delay_r + j].re = ptr[j * N + I + 1] * gain_lin;
}
av_fft_permute(s->fft[0], fft_in_l);
av_fft_calc(s->fft[0], fft_in_l);
memcpy(data_hrtf_l + offset, fft_in_l, n_fft * sizeof(*fft_in_l));
av_fft_permute(s->fft[0], fft_in_r);
av_fft_calc(s->fft[0], fft_in_r);
memcpy(data_hrtf_r + offset, fft_in_r, n_fft * sizeof(*fft_in_r));
}
}
}
av_frame_free(&s->in[i + 1].frame);
}
if (s->type == TIME_DOMAIN) {
memcpy(s->data_ir[0], data_ir_l, sizeof(float) * nb_irs * s->air_len);
memcpy(s->data_ir[1], data_ir_r, sizeof(float) * nb_irs * s->air_len);
} else {
s->data_hrtf[0] = av_calloc(n_fft * s->nb_irs, sizeof(FFTComplex));
s->data_hrtf[1] = av_calloc(n_fft * s->nb_irs, sizeof(FFTComplex));
if (!s->data_hrtf[0] || !s->data_hrtf[1]) {
ret = AVERROR(ENOMEM);
goto fail;
}
memcpy(s->data_hrtf[0], data_hrtf_l,
sizeof(FFTComplex) * nb_irs * n_fft);
memcpy(s->data_hrtf[1], data_hrtf_r,
sizeof(FFTComplex) * nb_irs * n_fft);
}
s->have_hrirs = 1;
fail:
for (i = 0; i < s->nb_inputs - 1; i++)
av_frame_free(&s->in[i + 1].frame);
av_freep(&data_ir_l);
av_freep(&data_ir_r);
av_freep(&data_hrtf_l);
av_freep(&data_hrtf_r);
av_freep(&fft_in_l);
av_freep(&fft_in_r);
return ret;
}
static int activate(AVFilterContext *ctx)
{
HeadphoneContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *in = NULL;
int i, ret;
FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
if (!s->eof_hrirs) {
for (i = 1; i < s->nb_inputs; i++) {
if (s->in[i].eof)
continue;
if ((ret = check_ir(ctx->inputs[i], i)) < 0)
return ret;
if (!s->in[i].eof) {
if (ff_outlink_get_status(ctx->inputs[i]) == AVERROR_EOF)
s->in[i].eof = 1;
}
}
for (i = 1; i < s->nb_inputs; i++) {
if (!s->in[i].eof)
break;
}
if (i != s->nb_inputs) {
if (ff_outlink_frame_wanted(ctx->outputs[0])) {
for (i = 1; i < s->nb_inputs; i++) {
if (!s->in[i].eof)
ff_inlink_request_frame(ctx->inputs[i]);
}
}
return 0;
} else {
s->eof_hrirs = 1;
}
}
if (!s->have_hrirs && s->eof_hrirs) {
ret = convert_coeffs(ctx, inlink);
if (ret < 0)
return ret;
}
if ((ret = ff_inlink_consume_samples(ctx->inputs[0], s->size, s->size, &in)) > 0) {
ret = headphone_frame(s, in, outlink);
if (ret < 0)
return ret;
}
if (ret < 0)
return ret;
FF_FILTER_FORWARD_STATUS(ctx->inputs[0], ctx->outputs[0]);
if (ff_outlink_frame_wanted(ctx->outputs[0]))
ff_inlink_request_frame(ctx->inputs[0]);
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
struct HeadphoneContext *s = ctx->priv;
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
AVFilterChannelLayouts *stereo_layout = NULL;
AVFilterChannelLayouts *hrir_layouts = NULL;
int ret, i;
ret = ff_add_format(&formats, AV_SAMPLE_FMT_FLT);
if (ret)
return ret;
ret = ff_set_common_formats(ctx, formats);
if (ret)
return ret;
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_channel_layouts_ref(layouts, &ctx->inputs[0]->out_channel_layouts);
if (ret)
return ret;
ret = ff_add_channel_layout(&stereo_layout, AV_CH_LAYOUT_STEREO);
if (ret)
return ret;
if (s->hrir_fmt == HRIR_MULTI) {
hrir_layouts = ff_all_channel_counts();
if (!hrir_layouts)
return AVERROR(ENOMEM);
ret = ff_channel_layouts_ref(hrir_layouts, &ctx->inputs[1]->out_channel_layouts);
if (ret)
return ret;
} else {
for (i = 1; i < s->nb_inputs; i++) {
ret = ff_channel_layouts_ref(stereo_layout, &ctx->inputs[i]->out_channel_layouts);
if (ret)
return ret;
}
}
ret = ff_channel_layouts_ref(stereo_layout, &ctx->outputs[0]->in_channel_layouts);
if (ret)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
HeadphoneContext *s = ctx->priv;
if (s->nb_irs < inlink->channels) {
av_log(ctx, AV_LOG_ERROR, "Number of HRIRs must be >= %d.\n", inlink->channels);
return AVERROR(EINVAL);
}
return 0;
}
static av_cold int init(AVFilterContext *ctx)
{
HeadphoneContext *s = ctx->priv;
int i, ret;
AVFilterPad pad = {
.name = "in0",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
};
if ((ret = ff_insert_inpad(ctx, 0, &pad)) < 0)
return ret;
if (!s->map) {
av_log(ctx, AV_LOG_ERROR, "Valid mapping must be set.\n");
return AVERROR(EINVAL);
}
parse_map(ctx);
s->in = av_calloc(s->nb_inputs, sizeof(*s->in));
if (!s->in)
return AVERROR(ENOMEM);
for (i = 1; i < s->nb_inputs; i++) {
char *name = av_asprintf("hrir%d", i - 1);
AVFilterPad pad = {
.name = name,
.type = AVMEDIA_TYPE_AUDIO,
};
if (!name)
return AVERROR(ENOMEM);
if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
av_freep(&pad.name);
return ret;
}
}
s->fdsp = avpriv_float_dsp_alloc(0);
if (!s->fdsp)
return AVERROR(ENOMEM);
return 0;
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
HeadphoneContext *s = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
if (s->hrir_fmt == HRIR_MULTI) {
AVFilterLink *hrir_link = ctx->inputs[1];
if (hrir_link->channels < inlink->channels * 2) {
av_log(ctx, AV_LOG_ERROR, "Number of channels in HRIR stream must be >= %d.\n", inlink->channels * 2);
return AVERROR(EINVAL);
}
}
s->gain_lfe = expf((s->gain - 3 * inlink->channels + s->lfe_gain) / 20 * M_LN10);
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
HeadphoneContext *s = ctx->priv;
int i;
av_fft_end(s->ifft[0]);
av_fft_end(s->ifft[1]);
av_fft_end(s->fft[0]);
av_fft_end(s->fft[1]);
av_freep(&s->delay[0]);
av_freep(&s->delay[1]);
av_freep(&s->data_ir[0]);
av_freep(&s->data_ir[1]);
av_freep(&s->ringbuffer[0]);
av_freep(&s->ringbuffer[1]);
av_freep(&s->temp_src[0]);
av_freep(&s->temp_src[1]);
av_freep(&s->temp_fft[0]);
av_freep(&s->temp_fft[1]);
av_freep(&s->temp_afft[0]);
av_freep(&s->temp_afft[1]);
av_freep(&s->data_hrtf[0]);
av_freep(&s->data_hrtf[1]);
av_freep(&s->fdsp);
for (i = 0; i < s->nb_inputs; i++) {
if (ctx->input_pads && i)
av_freep(&ctx->input_pads[i].name);
}
av_freep(&s->in);
}
#define OFFSET(x) offsetof(HeadphoneContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption headphone_options[] = {
{ "map", "set channels convolution mappings", OFFSET(map), AV_OPT_TYPE_STRING, {.str=NULL}, .flags = FLAGS },
{ "gain", "set gain in dB", OFFSET(gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
{ "lfe", "set lfe gain in dB", OFFSET(lfe_gain), AV_OPT_TYPE_FLOAT, {.dbl=0}, -20, 40, .flags = FLAGS },
{ "type", "set processing", OFFSET(type), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, .flags = FLAGS, "type" },
{ "time", "time domain", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, .flags = FLAGS, "type" },
{ "freq", "frequency domain", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, .flags = FLAGS, "type" },
{ "size", "set frame size", OFFSET(size), AV_OPT_TYPE_INT, {.i64=1024},1024,96000, .flags = FLAGS },
{ "hrir", "set hrir format", OFFSET(hrir_fmt), AV_OPT_TYPE_INT, {.i64=HRIR_STEREO}, 0, 1, .flags = FLAGS, "hrir" },
{ "stereo", "hrir files have exactly 2 channels", 0, AV_OPT_TYPE_CONST, {.i64=HRIR_STEREO}, 0, 0, .flags = FLAGS, "hrir" },
{ "multich", "single multichannel hrir file", 0, AV_OPT_TYPE_CONST, {.i64=HRIR_MULTI}, 0, 0, .flags = FLAGS, "hrir" },
{ NULL }
};
AVFILTER_DEFINE_CLASS(headphone);
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
AVFilter ff_af_headphone = {
.name = "headphone",
.description = NULL_IF_CONFIG_SMALL("Apply headphone binaural spatialization with HRTFs in additional streams."),
.priv_size = sizeof(HeadphoneContext),
.priv_class = &headphone_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.activate = activate,
.inputs = NULL,
.outputs = outputs,
.flags = AVFILTER_FLAG_SLICE_THREADS | AVFILTER_FLAG_DYNAMIC_INPUTS,
};

544
externals/ffmpeg/libavfilter/af_join.c vendored Executable file
View File

@@ -0,0 +1,544 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Audio join filter
*
* Join multiple audio inputs as different channels in
* a single output
*/
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "filters.h"
#include "internal.h"
typedef struct ChannelMap {
int input; ///< input stream index
int in_channel_idx; ///< index of in_channel in the input stream data
uint64_t in_channel; ///< layout describing the input channel
uint64_t out_channel; ///< layout describing the output channel
} ChannelMap;
typedef struct JoinContext {
const AVClass *class;
int inputs;
char *map;
char *channel_layout_str;
uint64_t channel_layout;
int nb_channels;
ChannelMap *channels;
/**
* Temporary storage for input frames, until we get one on each input.
*/
AVFrame **input_frames;
/**
* Temporary storage for buffer references, for assembling the output frame.
*/
AVBufferRef **buffers;
} JoinContext;
#define OFFSET(x) offsetof(JoinContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
static const AVOption join_options[] = {
{ "inputs", "Number of input streams.", OFFSET(inputs), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, A|F },
{ "channel_layout", "Channel layout of the "
"output stream.", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, A|F },
{ "map", "A comma-separated list of channels maps in the format "
"'input_stream.input_channel-output_channel.",
OFFSET(map), AV_OPT_TYPE_STRING, .flags = A|F },
{ NULL }
};
AVFILTER_DEFINE_CLASS(join);
static int parse_maps(AVFilterContext *ctx)
{
JoinContext *s = ctx->priv;
char separator = '|';
char *cur = s->map;
while (cur && *cur) {
char *sep, *next, *p;
uint64_t in_channel = 0, out_channel = 0;
int input_idx, out_ch_idx, in_ch_idx;
next = strchr(cur, separator);
if (next)
*next++ = 0;
/* split the map into input and output parts */
if (!(sep = strchr(cur, '-'))) {
av_log(ctx, AV_LOG_ERROR, "Missing separator '-' in channel "
"map '%s'\n", cur);
return AVERROR(EINVAL);
}
*sep++ = 0;
#define PARSE_CHANNEL(str, var, inout) \
if (!(var = av_get_channel_layout(str))) { \
av_log(ctx, AV_LOG_ERROR, "Invalid " inout " channel: %s.\n", str);\
return AVERROR(EINVAL); \
} \
if (av_get_channel_layout_nb_channels(var) != 1) { \
av_log(ctx, AV_LOG_ERROR, "Channel map describes more than one " \
inout " channel.\n"); \
return AVERROR(EINVAL); \
}
/* parse output channel */
PARSE_CHANNEL(sep, out_channel, "output");
if (!(out_channel & s->channel_layout)) {
av_log(ctx, AV_LOG_ERROR, "Output channel '%s' is not present in "
"requested channel layout.\n", sep);
return AVERROR(EINVAL);
}
out_ch_idx = av_get_channel_layout_channel_index(s->channel_layout,
out_channel);
if (s->channels[out_ch_idx].input >= 0) {
av_log(ctx, AV_LOG_ERROR, "Multiple maps for output channel "
"'%s'.\n", sep);
return AVERROR(EINVAL);
}
/* parse input channel */
input_idx = strtol(cur, &cur, 0);
if (input_idx < 0 || input_idx >= s->inputs) {
av_log(ctx, AV_LOG_ERROR, "Invalid input stream index: %d.\n",
input_idx);
return AVERROR(EINVAL);
}
if (*cur)
cur++;
in_ch_idx = strtol(cur, &p, 0);
if (p == cur) {
/* channel specifier is not a number,
* try to parse as channel name */
PARSE_CHANNEL(cur, in_channel, "input");
}
s->channels[out_ch_idx].input = input_idx;
if (in_channel)
s->channels[out_ch_idx].in_channel = in_channel;
else
s->channels[out_ch_idx].in_channel_idx = in_ch_idx;
cur = next;
}
return 0;
}
static av_cold int join_init(AVFilterContext *ctx)
{
JoinContext *s = ctx->priv;
int ret, i;
if (!(s->channel_layout = av_get_channel_layout(s->channel_layout_str))) {
av_log(ctx, AV_LOG_ERROR, "Error parsing channel layout '%s'.\n",
s->channel_layout_str);
return AVERROR(EINVAL);
}
s->nb_channels = av_get_channel_layout_nb_channels(s->channel_layout);
s->channels = av_mallocz_array(s->nb_channels, sizeof(*s->channels));
s->buffers = av_mallocz_array(s->nb_channels, sizeof(*s->buffers));
s->input_frames = av_mallocz_array(s->inputs, sizeof(*s->input_frames));
if (!s->channels || !s->buffers|| !s->input_frames)
return AVERROR(ENOMEM);
for (i = 0; i < s->nb_channels; i++) {
s->channels[i].out_channel = av_channel_layout_extract_channel(s->channel_layout, i);
s->channels[i].input = -1;
}
if ((ret = parse_maps(ctx)) < 0)
return ret;
for (i = 0; i < s->inputs; i++) {
AVFilterPad pad = { 0 };
pad.type = AVMEDIA_TYPE_AUDIO;
pad.name = av_asprintf("input%d", i);
if (!pad.name)
return AVERROR(ENOMEM);
if ((ret = ff_insert_inpad(ctx, i, &pad)) < 0) {
av_freep(&pad.name);
return ret;
}
}
return 0;
}
static av_cold void join_uninit(AVFilterContext *ctx)
{
JoinContext *s = ctx->priv;
int i;
for (i = 0; i < s->inputs && s->input_frames; i++) {
av_frame_free(&s->input_frames[i]);
}
for (i = 0; i < ctx->nb_inputs; i++) {
av_freep(&ctx->input_pads[i].name);
}
av_freep(&s->channels);
av_freep(&s->buffers);
av_freep(&s->input_frames);
}
static int join_query_formats(AVFilterContext *ctx)
{
JoinContext *s = ctx->priv;
AVFilterChannelLayouts *layouts = NULL;
int i, ret;
if ((ret = ff_add_channel_layout(&layouts, s->channel_layout)) < 0 ||
(ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
return ret;
for (i = 0; i < ctx->nb_inputs; i++) {
layouts = ff_all_channel_layouts();
if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
return ret;
}
if ((ret = ff_set_common_formats(ctx, ff_planar_sample_fmts())) < 0 ||
(ret = ff_set_common_samplerates(ctx, ff_all_samplerates())) < 0)
return ret;
return 0;
}
static void guess_map_matching(AVFilterContext *ctx, ChannelMap *ch,
uint64_t *inputs)
{
int i;
for (i = 0; i < ctx->nb_inputs; i++) {
AVFilterLink *link = ctx->inputs[i];
if (ch->out_channel & link->channel_layout &&
!(ch->out_channel & inputs[i])) {
ch->input = i;
ch->in_channel = ch->out_channel;
inputs[i] |= ch->out_channel;
return;
}
}
}
static void guess_map_any(AVFilterContext *ctx, ChannelMap *ch,
uint64_t *inputs)
{
int i;
for (i = 0; i < ctx->nb_inputs; i++) {
AVFilterLink *link = ctx->inputs[i];
if ((inputs[i] & link->channel_layout) != link->channel_layout) {
uint64_t unused = link->channel_layout & ~inputs[i];
ch->input = i;
ch->in_channel = av_channel_layout_extract_channel(unused, 0);
inputs[i] |= ch->in_channel;
return;
}
}
}
static int join_config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
JoinContext *s = ctx->priv;
uint64_t *inputs; // nth element tracks which channels are used from nth input
int i, ret = 0;
/* initialize inputs to user-specified mappings */
if (!(inputs = av_mallocz_array(ctx->nb_inputs, sizeof(*inputs))))
return AVERROR(ENOMEM);
for (i = 0; i < s->nb_channels; i++) {
ChannelMap *ch = &s->channels[i];
AVFilterLink *inlink;
if (ch->input < 0)
continue;
inlink = ctx->inputs[ch->input];
if (!ch->in_channel)
ch->in_channel = av_channel_layout_extract_channel(inlink->channel_layout,
ch->in_channel_idx);
if (!(ch->in_channel & inlink->channel_layout)) {
av_log(ctx, AV_LOG_ERROR, "Requested channel %s is not present in "
"input stream #%d.\n", av_get_channel_name(ch->in_channel),
ch->input);
ret = AVERROR(EINVAL);
goto fail;
}
inputs[ch->input] |= ch->in_channel;
}
/* guess channel maps when not explicitly defined */
/* first try unused matching channels */
for (i = 0; i < s->nb_channels; i++) {
ChannelMap *ch = &s->channels[i];
if (ch->input < 0)
guess_map_matching(ctx, ch, inputs);
}
/* if the above failed, try to find _any_ unused input channel */
for (i = 0; i < s->nb_channels; i++) {
ChannelMap *ch = &s->channels[i];
if (ch->input < 0)
guess_map_any(ctx, ch, inputs);
if (ch->input < 0) {
av_log(ctx, AV_LOG_ERROR, "Could not find input channel for "
"output channel '%s'.\n",
av_get_channel_name(ch->out_channel));
goto fail;
}
ch->in_channel_idx = av_get_channel_layout_channel_index(ctx->inputs[ch->input]->channel_layout,
ch->in_channel);
}
/* print mappings */
av_log(ctx, AV_LOG_VERBOSE, "mappings: ");
for (i = 0; i < s->nb_channels; i++) {
ChannelMap *ch = &s->channels[i];
av_log(ctx, AV_LOG_VERBOSE, "%d.%s => %s ", ch->input,
av_get_channel_name(ch->in_channel),
av_get_channel_name(ch->out_channel));
}
av_log(ctx, AV_LOG_VERBOSE, "\n");
for (i = 0; i < ctx->nb_inputs; i++) {
if (!inputs[i])
av_log(ctx, AV_LOG_WARNING, "No channels are used from input "
"stream %d.\n", i);
}
fail:
av_freep(&inputs);
return ret;
}
static int try_push_frame(AVFilterContext *ctx)
{
AVFilterLink *outlink = ctx->outputs[0];
JoinContext *s = ctx->priv;
AVFrame *frame;
int linesize = INT_MAX;
int nb_samples = INT_MAX;
int nb_buffers = 0;
int i, j, ret;
for (i = 0; i < ctx->nb_inputs; i++) {
if (!s->input_frames[i])
return 0;
nb_samples = FFMIN(nb_samples, s->input_frames[i]->nb_samples);
}
if (!nb_samples)
return 0;
/* setup the output frame */
frame = av_frame_alloc();
if (!frame)
return AVERROR(ENOMEM);
if (s->nb_channels > FF_ARRAY_ELEMS(frame->data)) {
frame->extended_data = av_mallocz_array(s->nb_channels,
sizeof(*frame->extended_data));
if (!frame->extended_data) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
/* copy the data pointers */
for (i = 0; i < s->nb_channels; i++) {
ChannelMap *ch = &s->channels[i];
AVFrame *cur = s->input_frames[ch->input];
AVBufferRef *buf;
frame->extended_data[i] = cur->extended_data[ch->in_channel_idx];
linesize = FFMIN(linesize, cur->linesize[0]);
/* add the buffer where this plan is stored to the list if it's
* not already there */
buf = av_frame_get_plane_buffer(cur, ch->in_channel_idx);
if (!buf) {
ret = AVERROR(EINVAL);
goto fail;
}
for (j = 0; j < nb_buffers; j++)
if (s->buffers[j]->buffer == buf->buffer)
break;
if (j == i)
s->buffers[nb_buffers++] = buf;
}
/* create references to the buffers we copied to output */
if (nb_buffers > FF_ARRAY_ELEMS(frame->buf)) {
frame->nb_extended_buf = nb_buffers - FF_ARRAY_ELEMS(frame->buf);
frame->extended_buf = av_mallocz_array(frame->nb_extended_buf,
sizeof(*frame->extended_buf));
if (!frame->extended_buf) {
frame->nb_extended_buf = 0;
ret = AVERROR(ENOMEM);
goto fail;
}
}
for (i = 0; i < FFMIN(FF_ARRAY_ELEMS(frame->buf), nb_buffers); i++) {
frame->buf[i] = av_buffer_ref(s->buffers[i]);
if (!frame->buf[i]) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
for (i = 0; i < frame->nb_extended_buf; i++) {
frame->extended_buf[i] = av_buffer_ref(s->buffers[i +
FF_ARRAY_ELEMS(frame->buf)]);
if (!frame->extended_buf[i]) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
frame->nb_samples = nb_samples;
frame->channel_layout = outlink->channel_layout;
frame->channels = outlink->channels;
frame->sample_rate = outlink->sample_rate;
frame->format = outlink->format;
frame->pts = s->input_frames[0]->pts;
frame->linesize[0] = linesize;
if (frame->data != frame->extended_data) {
memcpy(frame->data, frame->extended_data, sizeof(*frame->data) *
FFMIN(FF_ARRAY_ELEMS(frame->data), s->nb_channels));
}
ret = ff_filter_frame(outlink, frame);
for (i = 0; i < ctx->nb_inputs; i++)
av_frame_free(&s->input_frames[i]);
return ret;
fail:
av_frame_free(&frame);
return ret;
}
static int activate(AVFilterContext *ctx)
{
JoinContext *s = ctx->priv;
int i, ret, status;
int nb_samples = 0;
int64_t pts;
FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
if (!s->input_frames[0]) {
ret = ff_inlink_consume_frame(ctx->inputs[0], &s->input_frames[0]);
if (ret < 0) {
return ret;
} else if (ff_inlink_acknowledge_status(ctx->inputs[0], &status, &pts)) {
ff_outlink_set_status(ctx->outputs[0], status, pts);
return 0;
} else {
if (ff_outlink_frame_wanted(ctx->outputs[0]) && !s->input_frames[0]) {
ff_inlink_request_frame(ctx->inputs[0]);
return 0;
}
}
if (!s->input_frames[0]) {
return 0;
}
}
nb_samples = s->input_frames[0]->nb_samples;
for (i = 1; i < ctx->nb_inputs && nb_samples > 0; i++) {
if (s->input_frames[i])
continue;
if (ff_inlink_check_available_samples(ctx->inputs[i], nb_samples) > 0) {
ret = ff_inlink_consume_samples(ctx->inputs[i], nb_samples, nb_samples, &s->input_frames[i]);
if (ret < 0) {
return ret;
} else if (ff_inlink_acknowledge_status(ctx->inputs[i], &status, &pts)) {
ff_outlink_set_status(ctx->outputs[0], status, pts);
return 0;
}
} else {
if (ff_outlink_frame_wanted(ctx->outputs[0])) {
ff_inlink_request_frame(ctx->inputs[i]);
return 0;
}
}
}
return try_push_frame(ctx);
}
static const AVFilterPad avfilter_af_join_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = join_config_output,
},
{ NULL }
};
AVFilter ff_af_join = {
.name = "join",
.description = NULL_IF_CONFIG_SMALL("Join multiple audio streams into "
"multi-channel output."),
.priv_size = sizeof(JoinContext),
.priv_class = &join_class,
.init = join_init,
.uninit = join_uninit,
.activate = activate,
.query_formats = join_query_formats,
.inputs = NULL,
.outputs = avfilter_af_join_outputs,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};

746
externals/ffmpeg/libavfilter/af_ladspa.c vendored Executable file
View File

@@ -0,0 +1,746 @@
/*
* Copyright (c) 2013 Paul B Mahol
* Copyright (c) 2011 Mina Nagy Zaki
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* LADSPA wrapper
*/
#include <dlfcn.h>
#include <ladspa.h>
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
typedef struct LADSPAContext {
const AVClass *class;
char *dl_name;
char *plugin;
char *options;
void *dl_handle;
unsigned long nb_inputs;
unsigned long *ipmap; /* map input number to port number */
unsigned long nb_inputcontrols;
unsigned long *icmap; /* map input control number to port number */
LADSPA_Data *ictlv; /* input controls values */
unsigned long nb_outputs;
unsigned long *opmap; /* map output number to port number */
unsigned long nb_outputcontrols;
unsigned long *ocmap; /* map output control number to port number */
LADSPA_Data *octlv; /* output controls values */
const LADSPA_Descriptor *desc;
int *ctl_needs_value;
int nb_handles;
LADSPA_Handle *handles;
int sample_rate;
int nb_samples;
int64_t pts;
int64_t duration;
} LADSPAContext;
#define OFFSET(x) offsetof(LADSPAContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption ladspa_options[] = {
{ "file", "set library name or full path", OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "f", "set library name or full path", OFFSET(dl_name), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "plugin", "set plugin name", OFFSET(plugin), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "p", "set plugin name", OFFSET(plugin), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "controls", "set plugin options", OFFSET(options), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "c", "set plugin options", OFFSET(options), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, INT32_MAX, FLAGS },
{ "s", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, INT32_MAX, FLAGS },
{ "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
{ "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
{ "duration", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=-1}, -1, INT64_MAX, FLAGS },
{ "d", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=-1}, -1, INT64_MAX, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(ladspa);
static void print_ctl_info(AVFilterContext *ctx, int level,
LADSPAContext *s, int ctl, unsigned long *map,
LADSPA_Data *values, int print)
{
const LADSPA_PortRangeHint *h = s->desc->PortRangeHints + map[ctl];
av_log(ctx, level, "c%i: %s [", ctl, s->desc->PortNames[map[ctl]]);
if (LADSPA_IS_HINT_TOGGLED(h->HintDescriptor)) {
av_log(ctx, level, "toggled (1 or 0)");
if (LADSPA_IS_HINT_HAS_DEFAULT(h->HintDescriptor))
av_log(ctx, level, " (default %i)", (int)values[ctl]);
} else {
if (LADSPA_IS_HINT_INTEGER(h->HintDescriptor)) {
av_log(ctx, level, "<int>");
if (LADSPA_IS_HINT_BOUNDED_BELOW(h->HintDescriptor))
av_log(ctx, level, ", min: %i", (int)h->LowerBound);
if (LADSPA_IS_HINT_BOUNDED_ABOVE(h->HintDescriptor))
av_log(ctx, level, ", max: %i", (int)h->UpperBound);
if (print)
av_log(ctx, level, " (value %d)", (int)values[ctl]);
else if (LADSPA_IS_HINT_HAS_DEFAULT(h->HintDescriptor))
av_log(ctx, level, " (default %d)", (int)values[ctl]);
} else {
av_log(ctx, level, "<float>");
if (LADSPA_IS_HINT_BOUNDED_BELOW(h->HintDescriptor))
av_log(ctx, level, ", min: %f", h->LowerBound);
if (LADSPA_IS_HINT_BOUNDED_ABOVE(h->HintDescriptor))
av_log(ctx, level, ", max: %f", h->UpperBound);
if (print)
av_log(ctx, level, " (value %f)", values[ctl]);
else if (LADSPA_IS_HINT_HAS_DEFAULT(h->HintDescriptor))
av_log(ctx, level, " (default %f)", values[ctl]);
}
if (LADSPA_IS_HINT_SAMPLE_RATE(h->HintDescriptor))
av_log(ctx, level, ", multiple of sample rate");
if (LADSPA_IS_HINT_LOGARITHMIC(h->HintDescriptor))
av_log(ctx, level, ", logarithmic scale");
}
av_log(ctx, level, "]\n");
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
LADSPAContext *s = ctx->priv;
AVFrame *out;
int i, h, p;
av_assert0(in->channels == (s->nb_inputs * s->nb_handles));
if (!s->nb_outputs ||
(av_frame_is_writable(in) && s->nb_inputs == s->nb_outputs &&
!(s->desc->Properties & LADSPA_PROPERTY_INPLACE_BROKEN))) {
out = in;
} else {
out = ff_get_audio_buffer(ctx->outputs[0], in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
av_assert0(!s->nb_outputs || out->channels == (s->nb_outputs * s->nb_handles));
for (h = 0; h < s->nb_handles; h++) {
for (i = 0; i < s->nb_inputs; i++) {
p = s->nb_handles > 1 ? h : i;
s->desc->connect_port(s->handles[h], s->ipmap[i],
(LADSPA_Data*)in->extended_data[p]);
}
for (i = 0; i < s->nb_outputs; i++) {
p = s->nb_handles > 1 ? h : i;
s->desc->connect_port(s->handles[h], s->opmap[i],
(LADSPA_Data*)out->extended_data[p]);
}
s->desc->run(s->handles[h], in->nb_samples);
}
for (i = 0; i < s->nb_outputcontrols; i++)
print_ctl_info(ctx, AV_LOG_VERBOSE, s, i, s->ocmap, s->octlv, 1);
if (out != in)
av_frame_free(&in);
return ff_filter_frame(ctx->outputs[0], out);
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
LADSPAContext *s = ctx->priv;
AVFrame *out;
int64_t t;
int i;
if (ctx->nb_inputs)
return ff_request_frame(ctx->inputs[0]);
t = av_rescale(s->pts, AV_TIME_BASE, s->sample_rate);
if (s->duration >= 0 && t >= s->duration)
return AVERROR_EOF;
out = ff_get_audio_buffer(outlink, s->nb_samples);
if (!out)
return AVERROR(ENOMEM);
for (i = 0; i < s->nb_outputs; i++)
s->desc->connect_port(s->handles[0], s->opmap[i],
(LADSPA_Data*)out->extended_data[i]);
s->desc->run(s->handles[0], s->nb_samples);
for (i = 0; i < s->nb_outputcontrols; i++)
print_ctl_info(ctx, AV_LOG_INFO, s, i, s->ocmap, s->octlv, 1);
out->sample_rate = s->sample_rate;
out->pts = s->pts;
s->pts += s->nb_samples;
return ff_filter_frame(outlink, out);
}
static void set_default_ctl_value(LADSPAContext *s, int ctl,
unsigned long *map, LADSPA_Data *values)
{
const LADSPA_PortRangeHint *h = s->desc->PortRangeHints + map[ctl];
const LADSPA_Data lower = h->LowerBound;
const LADSPA_Data upper = h->UpperBound;
if (LADSPA_IS_HINT_DEFAULT_MINIMUM(h->HintDescriptor)) {
values[ctl] = lower;
} else if (LADSPA_IS_HINT_DEFAULT_MAXIMUM(h->HintDescriptor)) {
values[ctl] = upper;
} else if (LADSPA_IS_HINT_DEFAULT_0(h->HintDescriptor)) {
values[ctl] = 0.0;
} else if (LADSPA_IS_HINT_DEFAULT_1(h->HintDescriptor)) {
values[ctl] = 1.0;
} else if (LADSPA_IS_HINT_DEFAULT_100(h->HintDescriptor)) {
values[ctl] = 100.0;
} else if (LADSPA_IS_HINT_DEFAULT_440(h->HintDescriptor)) {
values[ctl] = 440.0;
} else if (LADSPA_IS_HINT_DEFAULT_LOW(h->HintDescriptor)) {
if (LADSPA_IS_HINT_LOGARITHMIC(h->HintDescriptor))
values[ctl] = exp(log(lower) * 0.75 + log(upper) * 0.25);
else
values[ctl] = lower * 0.75 + upper * 0.25;
} else if (LADSPA_IS_HINT_DEFAULT_MIDDLE(h->HintDescriptor)) {
if (LADSPA_IS_HINT_LOGARITHMIC(h->HintDescriptor))
values[ctl] = exp(log(lower) * 0.5 + log(upper) * 0.5);
else
values[ctl] = lower * 0.5 + upper * 0.5;
} else if (LADSPA_IS_HINT_DEFAULT_HIGH(h->HintDescriptor)) {
if (LADSPA_IS_HINT_LOGARITHMIC(h->HintDescriptor))
values[ctl] = exp(log(lower) * 0.25 + log(upper) * 0.75);
else
values[ctl] = lower * 0.25 + upper * 0.75;
}
}
static int connect_ports(AVFilterContext *ctx, AVFilterLink *link)
{
LADSPAContext *s = ctx->priv;
int i, j;
s->nb_handles = s->nb_inputs == 1 && s->nb_outputs == 1 ? link->channels : 1;
s->handles = av_calloc(s->nb_handles, sizeof(*s->handles));
if (!s->handles)
return AVERROR(ENOMEM);
for (i = 0; i < s->nb_handles; i++) {
s->handles[i] = s->desc->instantiate(s->desc, link->sample_rate);
if (!s->handles[i]) {
av_log(ctx, AV_LOG_ERROR, "Could not instantiate plugin.\n");
return AVERROR_EXTERNAL;
}
// Connect the input control ports
for (j = 0; j < s->nb_inputcontrols; j++)
s->desc->connect_port(s->handles[i], s->icmap[j], s->ictlv + j);
// Connect the output control ports
for (j = 0; j < s->nb_outputcontrols; j++)
s->desc->connect_port(s->handles[i], s->ocmap[j], &s->octlv[j]);
if (s->desc->activate)
s->desc->activate(s->handles[i]);
}
av_log(ctx, AV_LOG_DEBUG, "handles: %d\n", s->nb_handles);
return 0;
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
return connect_ports(ctx, inlink);
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
LADSPAContext *s = ctx->priv;
int ret;
if (ctx->nb_inputs) {
AVFilterLink *inlink = ctx->inputs[0];
outlink->format = inlink->format;
outlink->sample_rate = inlink->sample_rate;
if (s->nb_inputs == s->nb_outputs) {
outlink->channel_layout = inlink->channel_layout;
outlink->channels = inlink->channels;
}
ret = 0;
} else {
outlink->sample_rate = s->sample_rate;
outlink->time_base = (AVRational){1, s->sample_rate};
ret = connect_ports(ctx, outlink);
}
return ret;
}
static void count_ports(const LADSPA_Descriptor *desc,
unsigned long *nb_inputs, unsigned long *nb_outputs)
{
LADSPA_PortDescriptor pd;
int i;
for (i = 0; i < desc->PortCount; i++) {
pd = desc->PortDescriptors[i];
if (LADSPA_IS_PORT_AUDIO(pd)) {
if (LADSPA_IS_PORT_INPUT(pd)) {
(*nb_inputs)++;
} else if (LADSPA_IS_PORT_OUTPUT(pd)) {
(*nb_outputs)++;
}
}
}
}
static void *try_load(const char *dir, const char *soname)
{
char *path = av_asprintf("%s/%s.so", dir, soname);
void *ret = NULL;
if (path) {
ret = dlopen(path, RTLD_LOCAL|RTLD_NOW);
av_free(path);
}
return ret;
}
static int set_control(AVFilterContext *ctx, unsigned long port, LADSPA_Data value)
{
LADSPAContext *s = ctx->priv;
const char *label = s->desc->Label;
LADSPA_PortRangeHint *h = (LADSPA_PortRangeHint *)s->desc->PortRangeHints +
s->icmap[port];
if (port >= s->nb_inputcontrols) {
av_log(ctx, AV_LOG_ERROR, "Control c%ld is out of range [0 - %lu].\n",
port, s->nb_inputcontrols);
return AVERROR(EINVAL);
}
if (LADSPA_IS_HINT_BOUNDED_BELOW(h->HintDescriptor) &&
value < h->LowerBound) {
av_log(ctx, AV_LOG_ERROR,
"%s: input control c%ld is below lower boundary of %0.4f.\n",
label, port, h->LowerBound);
return AVERROR(EINVAL);
}
if (LADSPA_IS_HINT_BOUNDED_ABOVE(h->HintDescriptor) &&
value > h->UpperBound) {
av_log(ctx, AV_LOG_ERROR,
"%s: input control c%ld is above upper boundary of %0.4f.\n",
label, port, h->UpperBound);
return AVERROR(EINVAL);
}
s->ictlv[port] = value;
return 0;
}
static av_cold int init(AVFilterContext *ctx)
{
LADSPAContext *s = ctx->priv;
LADSPA_Descriptor_Function descriptor_fn;
const LADSPA_Descriptor *desc;
LADSPA_PortDescriptor pd;
AVFilterPad pad = { NULL };
char *p, *arg, *saveptr = NULL;
unsigned long nb_ports;
int i, j = 0;
if (!s->dl_name) {
av_log(ctx, AV_LOG_ERROR, "No plugin name provided\n");
return AVERROR(EINVAL);
}
if (s->dl_name[0] == '/' || s->dl_name[0] == '.') {
// argument is a path
s->dl_handle = dlopen(s->dl_name, RTLD_LOCAL|RTLD_NOW);
} else {
// argument is a shared object name
char *paths = av_strdup(getenv("LADSPA_PATH"));
const char *separator = ":";
if (paths) {
p = paths;
while ((arg = av_strtok(p, separator, &saveptr)) && !s->dl_handle) {
s->dl_handle = try_load(arg, s->dl_name);
p = NULL;
}
}
av_free(paths);
if (!s->dl_handle && (paths = av_asprintf("%s/.ladspa/lib", getenv("HOME")))) {
s->dl_handle = try_load(paths, s->dl_name);
av_free(paths);
}
if (!s->dl_handle)
s->dl_handle = try_load("/usr/local/lib/ladspa", s->dl_name);
if (!s->dl_handle)
s->dl_handle = try_load("/usr/lib/ladspa", s->dl_name);
}
if (!s->dl_handle) {
av_log(ctx, AV_LOG_ERROR, "Failed to load '%s'\n", s->dl_name);
return AVERROR(EINVAL);
}
descriptor_fn = dlsym(s->dl_handle, "ladspa_descriptor");
if (!descriptor_fn) {
av_log(ctx, AV_LOG_ERROR, "Could not find ladspa_descriptor: %s\n", dlerror());
return AVERROR(EINVAL);
}
// Find the requested plugin, or list plugins
if (!s->plugin) {
av_log(ctx, AV_LOG_INFO, "The '%s' library contains the following plugins:\n", s->dl_name);
av_log(ctx, AV_LOG_INFO, "I = Input Channels\n");
av_log(ctx, AV_LOG_INFO, "O = Output Channels\n");
av_log(ctx, AV_LOG_INFO, "I:O %-25s %s\n", "Plugin", "Description");
av_log(ctx, AV_LOG_INFO, "\n");
for (i = 0; desc = descriptor_fn(i); i++) {
unsigned long inputs = 0, outputs = 0;
count_ports(desc, &inputs, &outputs);
av_log(ctx, AV_LOG_INFO, "%lu:%lu %-25s %s\n", inputs, outputs, desc->Label,
(char *)av_x_if_null(desc->Name, "?"));
av_log(ctx, AV_LOG_VERBOSE, "Maker: %s\n",
(char *)av_x_if_null(desc->Maker, "?"));
av_log(ctx, AV_LOG_VERBOSE, "Copyright: %s\n",
(char *)av_x_if_null(desc->Copyright, "?"));
}
return AVERROR_EXIT;
} else {
for (i = 0;; i++) {
desc = descriptor_fn(i);
if (!desc) {
av_log(ctx, AV_LOG_ERROR, "Could not find plugin: %s\n", s->plugin);
return AVERROR(EINVAL);
}
if (desc->Label && !strcmp(desc->Label, s->plugin))
break;
}
}
s->desc = desc;
nb_ports = desc->PortCount;
s->ipmap = av_calloc(nb_ports, sizeof(*s->ipmap));
s->opmap = av_calloc(nb_ports, sizeof(*s->opmap));
s->icmap = av_calloc(nb_ports, sizeof(*s->icmap));
s->ocmap = av_calloc(nb_ports, sizeof(*s->ocmap));
s->ictlv = av_calloc(nb_ports, sizeof(*s->ictlv));
s->octlv = av_calloc(nb_ports, sizeof(*s->octlv));
s->ctl_needs_value = av_calloc(nb_ports, sizeof(*s->ctl_needs_value));
if (!s->ipmap || !s->opmap || !s->icmap ||
!s->ocmap || !s->ictlv || !s->octlv || !s->ctl_needs_value)
return AVERROR(ENOMEM);
for (i = 0; i < nb_ports; i++) {
pd = desc->PortDescriptors[i];
if (LADSPA_IS_PORT_AUDIO(pd)) {
if (LADSPA_IS_PORT_INPUT(pd)) {
s->ipmap[s->nb_inputs] = i;
s->nb_inputs++;
} else if (LADSPA_IS_PORT_OUTPUT(pd)) {
s->opmap[s->nb_outputs] = i;
s->nb_outputs++;
}
} else if (LADSPA_IS_PORT_CONTROL(pd)) {
if (LADSPA_IS_PORT_INPUT(pd)) {
s->icmap[s->nb_inputcontrols] = i;
if (LADSPA_IS_HINT_HAS_DEFAULT(desc->PortRangeHints[i].HintDescriptor))
set_default_ctl_value(s, s->nb_inputcontrols, s->icmap, s->ictlv);
else
s->ctl_needs_value[s->nb_inputcontrols] = 1;
s->nb_inputcontrols++;
} else if (LADSPA_IS_PORT_OUTPUT(pd)) {
s->ocmap[s->nb_outputcontrols] = i;
s->nb_outputcontrols++;
}
}
}
// List Control Ports if "help" is specified
if (s->options && !strcmp(s->options, "help")) {
if (!s->nb_inputcontrols) {
av_log(ctx, AV_LOG_INFO,
"The '%s' plugin does not have any input controls.\n",
desc->Label);
} else {
av_log(ctx, AV_LOG_INFO,
"The '%s' plugin has the following input controls:\n",
desc->Label);
for (i = 0; i < s->nb_inputcontrols; i++)
print_ctl_info(ctx, AV_LOG_INFO, s, i, s->icmap, s->ictlv, 0);
}
return AVERROR_EXIT;
}
// Parse control parameters
p = s->options;
while (s->options) {
LADSPA_Data val;
int ret;
if (!(arg = av_strtok(p, " |", &saveptr)))
break;
p = NULL;
if (av_sscanf(arg, "c%d=%f", &i, &val) != 2) {
if (av_sscanf(arg, "%f", &val) != 1) {
av_log(ctx, AV_LOG_ERROR, "Invalid syntax.\n");
return AVERROR(EINVAL);
}
i = j++;
}
if ((ret = set_control(ctx, i, val)) < 0)
return ret;
s->ctl_needs_value[i] = 0;
}
// Check if any controls are not set
for (i = 0; i < s->nb_inputcontrols; i++) {
if (s->ctl_needs_value[i]) {
av_log(ctx, AV_LOG_ERROR, "Control c%d must be set.\n", i);
print_ctl_info(ctx, AV_LOG_ERROR, s, i, s->icmap, s->ictlv, 0);
return AVERROR(EINVAL);
}
}
pad.type = AVMEDIA_TYPE_AUDIO;
if (s->nb_inputs) {
pad.name = av_asprintf("in0:%s%lu", desc->Label, s->nb_inputs);
if (!pad.name)
return AVERROR(ENOMEM);
pad.filter_frame = filter_frame;
pad.config_props = config_input;
if (ff_insert_inpad(ctx, ctx->nb_inputs, &pad) < 0) {
av_freep(&pad.name);
return AVERROR(ENOMEM);
}
}
av_log(ctx, AV_LOG_DEBUG, "ports: %lu\n", nb_ports);
av_log(ctx, AV_LOG_DEBUG, "inputs: %lu outputs: %lu\n",
s->nb_inputs, s->nb_outputs);
av_log(ctx, AV_LOG_DEBUG, "input controls: %lu output controls: %lu\n",
s->nb_inputcontrols, s->nb_outputcontrols);
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
LADSPAContext *s = ctx->priv;
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
int ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
if (s->nb_inputs) {
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_samplerates(ctx, formats);
if (ret < 0)
return ret;
} else {
int sample_rates[] = { s->sample_rate, -1 };
ret = ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates));
if (ret < 0)
return ret;
}
if (s->nb_inputs == 1 && s->nb_outputs == 1) {
// We will instantiate multiple LADSPA_Handle, one over each channel
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
} else if (s->nb_inputs == 2 && s->nb_outputs == 2) {
layouts = NULL;
ret = ff_add_channel_layout(&layouts, AV_CH_LAYOUT_STEREO);
if (ret < 0)
return ret;
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
} else {
AVFilterLink *outlink = ctx->outputs[0];
if (s->nb_inputs >= 1) {
AVFilterLink *inlink = ctx->inputs[0];
uint64_t inlayout = FF_COUNT2LAYOUT(s->nb_inputs);
layouts = NULL;
ret = ff_add_channel_layout(&layouts, inlayout);
if (ret < 0)
return ret;
ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
if (ret < 0)
return ret;
if (!s->nb_outputs) {
ret = ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
if (ret < 0)
return ret;
}
}
if (s->nb_outputs >= 1) {
uint64_t outlayout = FF_COUNT2LAYOUT(s->nb_outputs);
layouts = NULL;
ret = ff_add_channel_layout(&layouts, outlayout);
if (ret < 0)
return ret;
ret = ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
if (ret < 0)
return ret;
}
}
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
LADSPAContext *s = ctx->priv;
int i;
for (i = 0; i < s->nb_handles; i++) {
if (s->desc->deactivate)
s->desc->deactivate(s->handles[i]);
if (s->desc->cleanup)
s->desc->cleanup(s->handles[i]);
}
if (s->dl_handle)
dlclose(s->dl_handle);
av_freep(&s->ipmap);
av_freep(&s->opmap);
av_freep(&s->icmap);
av_freep(&s->ocmap);
av_freep(&s->ictlv);
av_freep(&s->octlv);
av_freep(&s->handles);
av_freep(&s->ctl_needs_value);
if (ctx->nb_inputs)
av_freep(&ctx->input_pads[0].name);
}
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
LADSPA_Data value;
unsigned long port;
if (av_sscanf(cmd, "c%ld", &port) + av_sscanf(args, "%f", &value) != 2)
return AVERROR(EINVAL);
return set_control(ctx, port, value);
}
static const AVFilterPad ladspa_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
AVFilter ff_af_ladspa = {
.name = "ladspa",
.description = NULL_IF_CONFIG_SMALL("Apply LADSPA effect."),
.priv_size = sizeof(LADSPAContext),
.priv_class = &ladspa_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.process_command = process_command,
.inputs = 0,
.outputs = ladspa_outputs,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};

932
externals/ffmpeg/libavfilter/af_loudnorm.c vendored Executable file
View File

@@ -0,0 +1,932 @@
/*
* Copyright (c) 2016 Kyle Swanson <k@ylo.ph>.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* http://k.ylo.ph/2016/04/04/loudnorm.html */
#include "libavutil/opt.h"
#include "avfilter.h"
#include "internal.h"
#include "audio.h"
#include "ebur128.h"
enum FrameType {
FIRST_FRAME,
INNER_FRAME,
FINAL_FRAME,
LINEAR_MODE,
FRAME_NB
};
enum LimiterState {
OUT,
ATTACK,
SUSTAIN,
RELEASE,
STATE_NB
};
enum PrintFormat {
NONE,
JSON,
SUMMARY,
PF_NB
};
typedef struct LoudNormContext {
const AVClass *class;
double target_i;
double target_lra;
double target_tp;
double measured_i;
double measured_lra;
double measured_tp;
double measured_thresh;
double offset;
int linear;
int dual_mono;
enum PrintFormat print_format;
double *buf;
int buf_size;
int buf_index;
int prev_buf_index;
double delta[30];
double weights[21];
double prev_delta;
int index;
double gain_reduction[2];
double *limiter_buf;
double *prev_smp;
int limiter_buf_index;
int limiter_buf_size;
enum LimiterState limiter_state;
int peak_index;
int env_index;
int env_cnt;
int attack_length;
int release_length;
int64_t pts;
enum FrameType frame_type;
int above_threshold;
int prev_nb_samples;
int channels;
FFEBUR128State *r128_in;
FFEBUR128State *r128_out;
} LoudNormContext;
#define OFFSET(x) offsetof(LoudNormContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption loudnorm_options[] = {
{ "I", "set integrated loudness target", OFFSET(target_i), AV_OPT_TYPE_DOUBLE, {.dbl = -24.}, -70., -5., FLAGS },
{ "i", "set integrated loudness target", OFFSET(target_i), AV_OPT_TYPE_DOUBLE, {.dbl = -24.}, -70., -5., FLAGS },
{ "LRA", "set loudness range target", OFFSET(target_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 7.}, 1., 20., FLAGS },
{ "lra", "set loudness range target", OFFSET(target_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 7.}, 1., 20., FLAGS },
{ "TP", "set maximum true peak", OFFSET(target_tp), AV_OPT_TYPE_DOUBLE, {.dbl = -2.}, -9., 0., FLAGS },
{ "tp", "set maximum true peak", OFFSET(target_tp), AV_OPT_TYPE_DOUBLE, {.dbl = -2.}, -9., 0., FLAGS },
{ "measured_I", "measured IL of input file", OFFSET(measured_i), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, -99., 0., FLAGS },
{ "measured_i", "measured IL of input file", OFFSET(measured_i), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, -99., 0., FLAGS },
{ "measured_LRA", "measured LRA of input file", OFFSET(measured_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, 0., 99., FLAGS },
{ "measured_lra", "measured LRA of input file", OFFSET(measured_lra), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, 0., 99., FLAGS },
{ "measured_TP", "measured true peak of input file", OFFSET(measured_tp), AV_OPT_TYPE_DOUBLE, {.dbl = 99.}, -99., 99., FLAGS },
{ "measured_tp", "measured true peak of input file", OFFSET(measured_tp), AV_OPT_TYPE_DOUBLE, {.dbl = 99.}, -99., 99., FLAGS },
{ "measured_thresh", "measured threshold of input file", OFFSET(measured_thresh), AV_OPT_TYPE_DOUBLE, {.dbl = -70.}, -99., 0., FLAGS },
{ "offset", "set offset gain", OFFSET(offset), AV_OPT_TYPE_DOUBLE, {.dbl = 0.}, -99., 99., FLAGS },
{ "linear", "normalize linearly if possible", OFFSET(linear), AV_OPT_TYPE_BOOL, {.i64 = 1}, 0, 1, FLAGS },
{ "dual_mono", "treat mono input as dual-mono", OFFSET(dual_mono), AV_OPT_TYPE_BOOL, {.i64 = 0}, 0, 1, FLAGS },
{ "print_format", "set print format for stats", OFFSET(print_format), AV_OPT_TYPE_INT, {.i64 = NONE}, NONE, PF_NB -1, FLAGS, "print_format" },
{ "none", 0, 0, AV_OPT_TYPE_CONST, {.i64 = NONE}, 0, 0, FLAGS, "print_format" },
{ "json", 0, 0, AV_OPT_TYPE_CONST, {.i64 = JSON}, 0, 0, FLAGS, "print_format" },
{ "summary", 0, 0, AV_OPT_TYPE_CONST, {.i64 = SUMMARY}, 0, 0, FLAGS, "print_format" },
{ NULL }
};
AVFILTER_DEFINE_CLASS(loudnorm);
static inline int frame_size(int sample_rate, int frame_len_msec)
{
const int frame_size = round((double)sample_rate * (frame_len_msec / 1000.0));
return frame_size + (frame_size % 2);
}
static void init_gaussian_filter(LoudNormContext *s)
{
double total_weight = 0.0;
const double sigma = 3.5;
double adjust;
int i;
const int offset = 21 / 2;
const double c1 = 1.0 / (sigma * sqrt(2.0 * M_PI));
const double c2 = 2.0 * pow(sigma, 2.0);
for (i = 0; i < 21; i++) {
const int x = i - offset;
s->weights[i] = c1 * exp(-(pow(x, 2.0) / c2));
total_weight += s->weights[i];
}
adjust = 1.0 / total_weight;
for (i = 0; i < 21; i++)
s->weights[i] *= adjust;
}
static double gaussian_filter(LoudNormContext *s, int index)
{
double result = 0.;
int i;
index = index - 10 > 0 ? index - 10 : index + 20;
for (i = 0; i < 21; i++)
result += s->delta[((index + i) < 30) ? (index + i) : (index + i - 30)] * s->weights[i];
return result;
}
static void detect_peak(LoudNormContext *s, int offset, int nb_samples, int channels, int *peak_delta, double *peak_value)
{
int n, c, i, index;
double ceiling;
double *buf;
*peak_delta = -1;
buf = s->limiter_buf;
ceiling = s->target_tp;
index = s->limiter_buf_index + (offset * channels) + (1920 * channels);
if (index >= s->limiter_buf_size)
index -= s->limiter_buf_size;
if (s->frame_type == FIRST_FRAME) {
for (c = 0; c < channels; c++)
s->prev_smp[c] = fabs(buf[index + c - channels]);
}
for (n = 0; n < nb_samples; n++) {
for (c = 0; c < channels; c++) {
double this, next, max_peak;
this = fabs(buf[(index + c) < s->limiter_buf_size ? (index + c) : (index + c - s->limiter_buf_size)]);
next = fabs(buf[(index + c + channels) < s->limiter_buf_size ? (index + c + channels) : (index + c + channels - s->limiter_buf_size)]);
if ((s->prev_smp[c] <= this) && (next <= this) && (this > ceiling) && (n > 0)) {
int detected;
detected = 1;
for (i = 2; i < 12; i++) {
next = fabs(buf[(index + c + (i * channels)) < s->limiter_buf_size ? (index + c + (i * channels)) : (index + c + (i * channels) - s->limiter_buf_size)]);
if (next > this) {
detected = 0;
break;
}
}
if (!detected)
continue;
for (c = 0; c < channels; c++) {
if (c == 0 || fabs(buf[index + c]) > max_peak)
max_peak = fabs(buf[index + c]);
s->prev_smp[c] = fabs(buf[(index + c) < s->limiter_buf_size ? (index + c) : (index + c - s->limiter_buf_size)]);
}
*peak_delta = n;
s->peak_index = index;
*peak_value = max_peak;
return;
}
s->prev_smp[c] = this;
}
index += channels;
if (index >= s->limiter_buf_size)
index -= s->limiter_buf_size;
}
}
static void true_peak_limiter(LoudNormContext *s, double *out, int nb_samples, int channels)
{
int n, c, index, peak_delta, smp_cnt;
double ceiling, peak_value;
double *buf;
buf = s->limiter_buf;
ceiling = s->target_tp;
index = s->limiter_buf_index;
smp_cnt = 0;
if (s->frame_type == FIRST_FRAME) {
double max;
max = 0.;
for (n = 0; n < 1920; n++) {
for (c = 0; c < channels; c++) {
max = fabs(buf[c]) > max ? fabs(buf[c]) : max;
}
buf += channels;
}
if (max > ceiling) {
s->gain_reduction[1] = ceiling / max;
s->limiter_state = SUSTAIN;
buf = s->limiter_buf;
for (n = 0; n < 1920; n++) {
for (c = 0; c < channels; c++) {
double env;
env = s->gain_reduction[1];
buf[c] *= env;
}
buf += channels;
}
}
buf = s->limiter_buf;
}
do {
switch(s->limiter_state) {
case OUT:
detect_peak(s, smp_cnt, nb_samples - smp_cnt, channels, &peak_delta, &peak_value);
if (peak_delta != -1) {
s->env_cnt = 0;
smp_cnt += (peak_delta - s->attack_length);
s->gain_reduction[0] = 1.;
s->gain_reduction[1] = ceiling / peak_value;
s->limiter_state = ATTACK;
s->env_index = s->peak_index - (s->attack_length * channels);
if (s->env_index < 0)
s->env_index += s->limiter_buf_size;
s->env_index += (s->env_cnt * channels);
if (s->env_index > s->limiter_buf_size)
s->env_index -= s->limiter_buf_size;
} else {
smp_cnt = nb_samples;
}
break;
case ATTACK:
for (; s->env_cnt < s->attack_length; s->env_cnt++) {
for (c = 0; c < channels; c++) {
double env;
env = s->gain_reduction[0] - ((double) s->env_cnt / (s->attack_length - 1) * (s->gain_reduction[0] - s->gain_reduction[1]));
buf[s->env_index + c] *= env;
}
s->env_index += channels;
if (s->env_index >= s->limiter_buf_size)
s->env_index -= s->limiter_buf_size;
smp_cnt++;
if (smp_cnt >= nb_samples) {
s->env_cnt++;
break;
}
}
if (smp_cnt < nb_samples) {
s->env_cnt = 0;
s->attack_length = 1920;
s->limiter_state = SUSTAIN;
}
break;
case SUSTAIN:
detect_peak(s, smp_cnt, nb_samples, channels, &peak_delta, &peak_value);
if (peak_delta == -1) {
s->limiter_state = RELEASE;
s->gain_reduction[0] = s->gain_reduction[1];
s->gain_reduction[1] = 1.;
s->env_cnt = 0;
break;
} else {
double gain_reduction;
gain_reduction = ceiling / peak_value;
if (gain_reduction < s->gain_reduction[1]) {
s->limiter_state = ATTACK;
s->attack_length = peak_delta;
if (s->attack_length <= 1)
s->attack_length = 2;
s->gain_reduction[0] = s->gain_reduction[1];
s->gain_reduction[1] = gain_reduction;
s->env_cnt = 0;
break;
}
for (s->env_cnt = 0; s->env_cnt < peak_delta; s->env_cnt++) {
for (c = 0; c < channels; c++) {
double env;
env = s->gain_reduction[1];
buf[s->env_index + c] *= env;
}
s->env_index += channels;
if (s->env_index >= s->limiter_buf_size)
s->env_index -= s->limiter_buf_size;
smp_cnt++;
if (smp_cnt >= nb_samples) {
s->env_cnt++;
break;
}
}
}
break;
case RELEASE:
for (; s->env_cnt < s->release_length; s->env_cnt++) {
for (c = 0; c < channels; c++) {
double env;
env = s->gain_reduction[0] + (((double) s->env_cnt / (s->release_length - 1)) * (s->gain_reduction[1] - s->gain_reduction[0]));
buf[s->env_index + c] *= env;
}
s->env_index += channels;
if (s->env_index >= s->limiter_buf_size)
s->env_index -= s->limiter_buf_size;
smp_cnt++;
if (smp_cnt >= nb_samples) {
s->env_cnt++;
break;
}
}
if (smp_cnt < nb_samples) {
s->env_cnt = 0;
s->limiter_state = OUT;
}
break;
}
} while (smp_cnt < nb_samples);
for (n = 0; n < nb_samples; n++) {
for (c = 0; c < channels; c++) {
out[c] = buf[index + c];
if (fabs(out[c]) > ceiling) {
out[c] = ceiling * (out[c] < 0 ? -1 : 1);
}
}
out += channels;
index += channels;
if (index >= s->limiter_buf_size)
index -= s->limiter_buf_size;
}
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
LoudNormContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
const double *src;
double *dst;
double *buf;
double *limiter_buf;
int i, n, c, subframe_length, src_index;
double gain, gain_next, env_global, env_shortterm,
global, shortterm, lra, relative_threshold;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
if (s->pts == AV_NOPTS_VALUE)
s->pts = in->pts;
out->pts = s->pts;
src = (const double *)in->data[0];
dst = (double *)out->data[0];
buf = s->buf;
limiter_buf = s->limiter_buf;
ff_ebur128_add_frames_double(s->r128_in, src, in->nb_samples);
if (s->frame_type == FIRST_FRAME && in->nb_samples < frame_size(inlink->sample_rate, 3000)) {
double offset, offset_tp, true_peak;
ff_ebur128_loudness_global(s->r128_in, &global);
for (c = 0; c < inlink->channels; c++) {
double tmp;
ff_ebur128_sample_peak(s->r128_in, c, &tmp);
if (c == 0 || tmp > true_peak)
true_peak = tmp;
}
offset = pow(10., (s->target_i - global) / 20.);
offset_tp = true_peak * offset;
s->offset = offset_tp < s->target_tp ? offset : s->target_tp - true_peak;
s->frame_type = LINEAR_MODE;
}
switch (s->frame_type) {
case FIRST_FRAME:
for (n = 0; n < in->nb_samples; n++) {
for (c = 0; c < inlink->channels; c++) {
buf[s->buf_index + c] = src[c];
}
src += inlink->channels;
s->buf_index += inlink->channels;
}
ff_ebur128_loudness_shortterm(s->r128_in, &shortterm);
if (shortterm < s->measured_thresh) {
s->above_threshold = 0;
env_shortterm = shortterm <= -70. ? 0. : s->target_i - s->measured_i;
} else {
s->above_threshold = 1;
env_shortterm = shortterm <= -70. ? 0. : s->target_i - shortterm;
}
for (n = 0; n < 30; n++)
s->delta[n] = pow(10., env_shortterm / 20.);
s->prev_delta = s->delta[s->index];
s->buf_index =
s->limiter_buf_index = 0;
for (n = 0; n < (s->limiter_buf_size / inlink->channels); n++) {
for (c = 0; c < inlink->channels; c++) {
limiter_buf[s->limiter_buf_index + c] = buf[s->buf_index + c] * s->delta[s->index] * s->offset;
}
s->limiter_buf_index += inlink->channels;
if (s->limiter_buf_index >= s->limiter_buf_size)
s->limiter_buf_index -= s->limiter_buf_size;
s->buf_index += inlink->channels;
}
subframe_length = frame_size(inlink->sample_rate, 100);
true_peak_limiter(s, dst, subframe_length, inlink->channels);
ff_ebur128_add_frames_double(s->r128_out, dst, subframe_length);
s->pts +=
out->nb_samples =
inlink->min_samples =
inlink->max_samples =
inlink->partial_buf_size = subframe_length;
s->frame_type = INNER_FRAME;
break;
case INNER_FRAME:
gain = gaussian_filter(s, s->index + 10 < 30 ? s->index + 10 : s->index + 10 - 30);
gain_next = gaussian_filter(s, s->index + 11 < 30 ? s->index + 11 : s->index + 11 - 30);
for (n = 0; n < in->nb_samples; n++) {
for (c = 0; c < inlink->channels; c++) {
buf[s->prev_buf_index + c] = src[c];
limiter_buf[s->limiter_buf_index + c] = buf[s->buf_index + c] * (gain + (((double) n / in->nb_samples) * (gain_next - gain))) * s->offset;
}
src += inlink->channels;
s->limiter_buf_index += inlink->channels;
if (s->limiter_buf_index >= s->limiter_buf_size)
s->limiter_buf_index -= s->limiter_buf_size;
s->prev_buf_index += inlink->channels;
if (s->prev_buf_index >= s->buf_size)
s->prev_buf_index -= s->buf_size;
s->buf_index += inlink->channels;
if (s->buf_index >= s->buf_size)
s->buf_index -= s->buf_size;
}
subframe_length = (frame_size(inlink->sample_rate, 100) - in->nb_samples) * inlink->channels;
s->limiter_buf_index = s->limiter_buf_index + subframe_length < s->limiter_buf_size ? s->limiter_buf_index + subframe_length : s->limiter_buf_index + subframe_length - s->limiter_buf_size;
true_peak_limiter(s, dst, in->nb_samples, inlink->channels);
ff_ebur128_add_frames_double(s->r128_out, dst, in->nb_samples);
ff_ebur128_loudness_range(s->r128_in, &lra);
ff_ebur128_loudness_global(s->r128_in, &global);
ff_ebur128_loudness_shortterm(s->r128_in, &shortterm);
ff_ebur128_relative_threshold(s->r128_in, &relative_threshold);
if (s->above_threshold == 0) {
double shortterm_out;
if (shortterm > s->measured_thresh)
s->prev_delta *= 1.0058;
ff_ebur128_loudness_shortterm(s->r128_out, &shortterm_out);
if (shortterm_out >= s->target_i)
s->above_threshold = 1;
}
if (shortterm < relative_threshold || shortterm <= -70. || s->above_threshold == 0) {
s->delta[s->index] = s->prev_delta;
} else {
env_global = fabs(shortterm - global) < (s->target_lra / 2.) ? shortterm - global : (s->target_lra / 2.) * ((shortterm - global) < 0 ? -1 : 1);
env_shortterm = s->target_i - shortterm;
s->delta[s->index] = pow(10., (env_global + env_shortterm) / 20.);
}
s->prev_delta = s->delta[s->index];
s->index++;
if (s->index >= 30)
s->index -= 30;
s->prev_nb_samples = in->nb_samples;
s->pts += in->nb_samples;
break;
case FINAL_FRAME:
gain = gaussian_filter(s, s->index + 10 < 30 ? s->index + 10 : s->index + 10 - 30);
s->limiter_buf_index = 0;
src_index = 0;
for (n = 0; n < s->limiter_buf_size / inlink->channels; n++) {
for (c = 0; c < inlink->channels; c++) {
s->limiter_buf[s->limiter_buf_index + c] = src[src_index + c] * gain * s->offset;
}
src_index += inlink->channels;
s->limiter_buf_index += inlink->channels;
if (s->limiter_buf_index >= s->limiter_buf_size)
s->limiter_buf_index -= s->limiter_buf_size;
}
subframe_length = frame_size(inlink->sample_rate, 100);
for (i = 0; i < in->nb_samples / subframe_length; i++) {
true_peak_limiter(s, dst, subframe_length, inlink->channels);
for (n = 0; n < subframe_length; n++) {
for (c = 0; c < inlink->channels; c++) {
if (src_index < (in->nb_samples * inlink->channels)) {
limiter_buf[s->limiter_buf_index + c] = src[src_index + c] * gain * s->offset;
} else {
limiter_buf[s->limiter_buf_index + c] = 0.;
}
}
if (src_index < (in->nb_samples * inlink->channels))
src_index += inlink->channels;
s->limiter_buf_index += inlink->channels;
if (s->limiter_buf_index >= s->limiter_buf_size)
s->limiter_buf_index -= s->limiter_buf_size;
}
dst += (subframe_length * inlink->channels);
}
dst = (double *)out->data[0];
ff_ebur128_add_frames_double(s->r128_out, dst, in->nb_samples);
break;
case LINEAR_MODE:
for (n = 0; n < in->nb_samples; n++) {
for (c = 0; c < inlink->channels; c++) {
dst[c] = src[c] * s->offset;
}
src += inlink->channels;
dst += inlink->channels;
}
dst = (double *)out->data[0];
ff_ebur128_add_frames_double(s->r128_out, dst, in->nb_samples);
s->pts += in->nb_samples;
break;
}
if (in != out)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static int request_frame(AVFilterLink *outlink)
{
int ret;
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = ctx->inputs[0];
LoudNormContext *s = ctx->priv;
ret = ff_request_frame(inlink);
if (ret == AVERROR_EOF && s->frame_type == INNER_FRAME) {
double *src;
double *buf;
int nb_samples, n, c, offset;
AVFrame *frame;
nb_samples = (s->buf_size / inlink->channels) - s->prev_nb_samples;
nb_samples -= (frame_size(inlink->sample_rate, 100) - s->prev_nb_samples);
frame = ff_get_audio_buffer(outlink, nb_samples);
if (!frame)
return AVERROR(ENOMEM);
frame->nb_samples = nb_samples;
buf = s->buf;
src = (double *)frame->data[0];
offset = ((s->limiter_buf_size / inlink->channels) - s->prev_nb_samples) * inlink->channels;
offset -= (frame_size(inlink->sample_rate, 100) - s->prev_nb_samples) * inlink->channels;
s->buf_index = s->buf_index - offset < 0 ? s->buf_index - offset + s->buf_size : s->buf_index - offset;
for (n = 0; n < nb_samples; n++) {
for (c = 0; c < inlink->channels; c++) {
src[c] = buf[s->buf_index + c];
}
src += inlink->channels;
s->buf_index += inlink->channels;
if (s->buf_index >= s->buf_size)
s->buf_index -= s->buf_size;
}
s->frame_type = FINAL_FRAME;
ret = filter_frame(inlink, frame);
}
return ret;
}
static int query_formats(AVFilterContext *ctx)
{
LoudNormContext *s = ctx->priv;
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
static const int input_srate[] = {192000, -1};
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
if (s->frame_type != LINEAR_MODE) {
formats = ff_make_format_list(input_srate);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_formats_ref(formats, &inlink->out_samplerates);
if (ret < 0)
return ret;
ret = ff_formats_ref(formats, &outlink->in_samplerates);
if (ret < 0)
return ret;
}
return 0;
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
LoudNormContext *s = ctx->priv;
s->r128_in = ff_ebur128_init(inlink->channels, inlink->sample_rate, 0, FF_EBUR128_MODE_I | FF_EBUR128_MODE_S | FF_EBUR128_MODE_LRA | FF_EBUR128_MODE_SAMPLE_PEAK);
if (!s->r128_in)
return AVERROR(ENOMEM);
s->r128_out = ff_ebur128_init(inlink->channels, inlink->sample_rate, 0, FF_EBUR128_MODE_I | FF_EBUR128_MODE_S | FF_EBUR128_MODE_LRA | FF_EBUR128_MODE_SAMPLE_PEAK);
if (!s->r128_out)
return AVERROR(ENOMEM);
if (inlink->channels == 1 && s->dual_mono) {
ff_ebur128_set_channel(s->r128_in, 0, FF_EBUR128_DUAL_MONO);
ff_ebur128_set_channel(s->r128_out, 0, FF_EBUR128_DUAL_MONO);
}
s->buf_size = frame_size(inlink->sample_rate, 3000) * inlink->channels;
s->buf = av_malloc_array(s->buf_size, sizeof(*s->buf));
if (!s->buf)
return AVERROR(ENOMEM);
s->limiter_buf_size = frame_size(inlink->sample_rate, 210) * inlink->channels;
s->limiter_buf = av_malloc_array(s->buf_size, sizeof(*s->limiter_buf));
if (!s->limiter_buf)
return AVERROR(ENOMEM);
s->prev_smp = av_malloc_array(inlink->channels, sizeof(*s->prev_smp));
if (!s->prev_smp)
return AVERROR(ENOMEM);
init_gaussian_filter(s);
if (s->frame_type != LINEAR_MODE) {
inlink->min_samples =
inlink->max_samples =
inlink->partial_buf_size = frame_size(inlink->sample_rate, 3000);
}
s->pts = AV_NOPTS_VALUE;
s->buf_index =
s->prev_buf_index =
s->limiter_buf_index = 0;
s->channels = inlink->channels;
s->index = 1;
s->limiter_state = OUT;
s->offset = pow(10., s->offset / 20.);
s->target_tp = pow(10., s->target_tp / 20.);
s->attack_length = frame_size(inlink->sample_rate, 10);
s->release_length = frame_size(inlink->sample_rate, 100);
return 0;
}
static av_cold int init(AVFilterContext *ctx)
{
LoudNormContext *s = ctx->priv;
s->frame_type = FIRST_FRAME;
if (s->linear) {
double offset, offset_tp;
offset = s->target_i - s->measured_i;
offset_tp = s->measured_tp + offset;
if (s->measured_tp != 99 && s->measured_thresh != -70 && s->measured_lra != 0 && s->measured_i != 0) {
if ((offset_tp <= s->target_tp) && (s->measured_lra <= s->target_lra)) {
s->frame_type = LINEAR_MODE;
s->offset = offset;
}
}
}
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
LoudNormContext *s = ctx->priv;
double i_in, i_out, lra_in, lra_out, thresh_in, thresh_out, tp_in, tp_out;
int c;
if (!s->r128_in || !s->r128_out)
goto end;
ff_ebur128_loudness_range(s->r128_in, &lra_in);
ff_ebur128_loudness_global(s->r128_in, &i_in);
ff_ebur128_relative_threshold(s->r128_in, &thresh_in);
for (c = 0; c < s->channels; c++) {
double tmp;
ff_ebur128_sample_peak(s->r128_in, c, &tmp);
if ((c == 0) || (tmp > tp_in))
tp_in = tmp;
}
ff_ebur128_loudness_range(s->r128_out, &lra_out);
ff_ebur128_loudness_global(s->r128_out, &i_out);
ff_ebur128_relative_threshold(s->r128_out, &thresh_out);
for (c = 0; c < s->channels; c++) {
double tmp;
ff_ebur128_sample_peak(s->r128_out, c, &tmp);
if ((c == 0) || (tmp > tp_out))
tp_out = tmp;
}
switch(s->print_format) {
case NONE:
break;
case JSON:
av_log(ctx, AV_LOG_INFO,
"\n{\n"
"\t\"input_i\" : \"%.2f\",\n"
"\t\"input_tp\" : \"%.2f\",\n"
"\t\"input_lra\" : \"%.2f\",\n"
"\t\"input_thresh\" : \"%.2f\",\n"
"\t\"output_i\" : \"%.2f\",\n"
"\t\"output_tp\" : \"%+.2f\",\n"
"\t\"output_lra\" : \"%.2f\",\n"
"\t\"output_thresh\" : \"%.2f\",\n"
"\t\"normalization_type\" : \"%s\",\n"
"\t\"target_offset\" : \"%.2f\"\n"
"}\n",
i_in,
20. * log10(tp_in),
lra_in,
thresh_in,
i_out,
20. * log10(tp_out),
lra_out,
thresh_out,
s->frame_type == LINEAR_MODE ? "linear" : "dynamic",
s->target_i - i_out
);
break;
case SUMMARY:
av_log(ctx, AV_LOG_INFO,
"\n"
"Input Integrated: %+6.1f LUFS\n"
"Input True Peak: %+6.1f dBTP\n"
"Input LRA: %6.1f LU\n"
"Input Threshold: %+6.1f LUFS\n"
"\n"
"Output Integrated: %+6.1f LUFS\n"
"Output True Peak: %+6.1f dBTP\n"
"Output LRA: %6.1f LU\n"
"Output Threshold: %+6.1f LUFS\n"
"\n"
"Normalization Type: %s\n"
"Target Offset: %+6.1f LU\n",
i_in,
20. * log10(tp_in),
lra_in,
thresh_in,
i_out,
20. * log10(tp_out),
lra_out,
thresh_out,
s->frame_type == LINEAR_MODE ? "Linear" : "Dynamic",
s->target_i - i_out
);
break;
}
end:
if (s->r128_in)
ff_ebur128_destroy(&s->r128_in);
if (s->r128_out)
ff_ebur128_destroy(&s->r128_out);
av_freep(&s->limiter_buf);
av_freep(&s->prev_smp);
av_freep(&s->buf);
}
static const AVFilterPad avfilter_af_loudnorm_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad avfilter_af_loudnorm_outputs[] = {
{
.name = "default",
.request_frame = request_frame,
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_loudnorm = {
.name = "loudnorm",
.description = NULL_IF_CONFIG_SMALL("EBU R128 loudness normalization"),
.priv_size = sizeof(LoudNormContext),
.priv_class = &loudnorm_class,
.query_formats = query_formats,
.init = init,
.uninit = uninit,
.inputs = avfilter_af_loudnorm_inputs,
.outputs = avfilter_af_loudnorm_outputs,
};

602
externals/ffmpeg/libavfilter/af_lv2.c vendored Executable file
View File

@@ -0,0 +1,602 @@
/*
* Copyright (c) 2017 Paul B Mahol
* Copyright (c) 2007-2016 David Robillard <http://drobilla.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* LV2 wrapper
*/
#include <lilv/lilv.h>
#include <lv2/lv2plug.in/ns/ext/atom/atom.h>
#include <lv2/lv2plug.in/ns/ext/buf-size/buf-size.h>
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
typedef struct URITable {
char **uris;
size_t n_uris;
} URITable;
typedef struct LV2Context {
const AVClass *class;
char *plugin_uri;
char *options;
unsigned nb_inputs;
unsigned nb_inputcontrols;
unsigned nb_outputs;
int sample_rate;
int nb_samples;
int64_t pts;
int64_t duration;
LilvWorld *world;
const LilvPlugin *plugin;
uint32_t nb_ports;
float *values;
URITable uri_table;
LV2_URID_Map map;
LV2_Feature map_feature;
LV2_URID_Unmap unmap;
LV2_Feature unmap_feature;
LV2_Atom_Sequence seq_in[2];
LV2_Atom_Sequence *seq_out;
const LV2_Feature *features[5];
float *mins;
float *maxes;
float *controls;
LilvInstance *instance;
LilvNode *atom_AtomPort;
LilvNode *atom_Sequence;
LilvNode *lv2_AudioPort;
LilvNode *lv2_CVPort;
LilvNode *lv2_ControlPort;
LilvNode *lv2_Optional;
LilvNode *lv2_InputPort;
LilvNode *lv2_OutputPort;
LilvNode *urid_map;
LilvNode *powerOf2BlockLength;
LilvNode *fixedBlockLength;
LilvNode *boundedBlockLength;
} LV2Context;
#define OFFSET(x) offsetof(LV2Context, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM
static const AVOption lv2_options[] = {
{ "plugin", "set plugin uri", OFFSET(plugin_uri), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "p", "set plugin uri", OFFSET(plugin_uri), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "controls", "set plugin options", OFFSET(options), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "c", "set plugin options", OFFSET(options), AV_OPT_TYPE_STRING, .flags = FLAGS },
{ "sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, INT32_MAX, FLAGS },
{ "s", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, INT32_MAX, FLAGS },
{ "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
{ "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, FLAGS },
{ "duration", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=-1}, -1, INT64_MAX, FLAGS },
{ "d", "set audio duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=-1}, -1, INT64_MAX, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(lv2);
static void uri_table_init(URITable *table)
{
table->uris = NULL;
table->n_uris = 0;
}
static void uri_table_destroy(URITable *table)
{
int i;
for (i = 0; i < table->n_uris; i++) {
av_freep(&table->uris[i]);
}
av_freep(&table->uris);
}
static LV2_URID uri_table_map(LV2_URID_Map_Handle handle, const char *uri)
{
URITable *table = (URITable*)handle;
const size_t len = strlen(uri);
size_t i;
char **tmp;
for (i = 0; i < table->n_uris; i++) {
if (!strcmp(table->uris[i], uri)) {
return i + 1;
}
}
tmp = av_calloc(table->n_uris + 1, sizeof(char*));
if (!tmp)
return table->n_uris;
memcpy(tmp, table->uris, table->n_uris * sizeof(char**));
av_free(table->uris);
table->uris = tmp;
table->uris[table->n_uris] = av_malloc(len + 1);
if (!table->uris[table->n_uris])
return table->n_uris;
memcpy(table->uris[table->n_uris], uri, len + 1);
table->n_uris++;
return table->n_uris;
}
static const char *uri_table_unmap(LV2_URID_Map_Handle handle, LV2_URID urid)
{
URITable *table = (URITable*)handle;
if (urid > 0 && urid <= table->n_uris) {
return table->uris[urid - 1];
}
return NULL;
}
static void connect_ports(LV2Context *s, AVFrame *in, AVFrame *out)
{
int ich = 0, och = 0, i;
for (i = 0; i < s->nb_ports; i++) {
const LilvPort *port = lilv_plugin_get_port_by_index(s->plugin, i);
if (lilv_port_is_a(s->plugin, port, s->lv2_AudioPort) ||
lilv_port_is_a(s->plugin, port, s->lv2_CVPort)) {
if (lilv_port_is_a(s->plugin, port, s->lv2_InputPort)) {
lilv_instance_connect_port(s->instance, i, in->extended_data[ich++]);
} else if (lilv_port_is_a(s->plugin, port, s->lv2_OutputPort)) {
lilv_instance_connect_port(s->instance, i, out->extended_data[och++]);
} else {
av_log(s, AV_LOG_WARNING, "port %d neither input nor output, skipping\n", i);
}
} else if (lilv_port_is_a(s->plugin, port, s->atom_AtomPort)) {
if (lilv_port_is_a(s->plugin, port, s->lv2_InputPort)) {
lilv_instance_connect_port(s->instance, i, &s->seq_in);
} else {
lilv_instance_connect_port(s->instance, i, s->seq_out);
}
} else if (lilv_port_is_a(s->plugin, port, s->lv2_ControlPort)) {
lilv_instance_connect_port(s->instance, i, &s->controls[i]);
}
}
s->seq_in[0].atom.size = sizeof(LV2_Atom_Sequence_Body);
s->seq_in[0].atom.type = uri_table_map(&s->uri_table, LV2_ATOM__Sequence);
s->seq_out->atom.size = 9624;
s->seq_out->atom.type = uri_table_map(&s->uri_table, LV2_ATOM__Chunk);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
LV2Context *s = ctx->priv;
AVFrame *out;
if (!s->nb_outputs ||
(av_frame_is_writable(in) && s->nb_inputs == s->nb_outputs)) {
out = in;
} else {
out = ff_get_audio_buffer(ctx->outputs[0], in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
connect_ports(s, in, out);
lilv_instance_run(s->instance, in->nb_samples);
if (out != in)
av_frame_free(&in);
return ff_filter_frame(ctx->outputs[0], out);
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
LV2Context *s = ctx->priv;
AVFrame *out;
int64_t t;
if (ctx->nb_inputs)
return ff_request_frame(ctx->inputs[0]);
t = av_rescale(s->pts, AV_TIME_BASE, s->sample_rate);
if (s->duration >= 0 && t >= s->duration)
return AVERROR_EOF;
out = ff_get_audio_buffer(outlink, s->nb_samples);
if (!out)
return AVERROR(ENOMEM);
connect_ports(s, out, out);
lilv_instance_run(s->instance, out->nb_samples);
out->sample_rate = s->sample_rate;
out->pts = s->pts;
s->pts += s->nb_samples;
return ff_filter_frame(outlink, out);
}
static const LV2_Feature buf_size_features[3] = {
{ LV2_BUF_SIZE__powerOf2BlockLength, NULL },
{ LV2_BUF_SIZE__fixedBlockLength, NULL },
{ LV2_BUF_SIZE__boundedBlockLength, NULL },
};
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
LV2Context *s = ctx->priv;
char *p, *arg, *saveptr = NULL;
int i, sample_rate;
uri_table_init(&s->uri_table);
s->map.handle = &s->uri_table;
s->map.map = uri_table_map;
s->map_feature.URI = LV2_URID_MAP_URI;
s->map_feature.data = &s->map;
s->unmap.handle = &s->uri_table;
s->unmap.unmap = uri_table_unmap;
s->unmap_feature.URI = LV2_URID_UNMAP_URI;
s->unmap_feature.data = &s->unmap;
s->features[0] = &s->map_feature;
s->features[1] = &s->unmap_feature;
s->features[2] = &buf_size_features[0];
s->features[3] = &buf_size_features[1];
s->features[4] = &buf_size_features[2];
if (ctx->nb_inputs) {
AVFilterLink *inlink = ctx->inputs[0];
outlink->format = inlink->format;
outlink->sample_rate = sample_rate = inlink->sample_rate;
if (s->nb_inputs == s->nb_outputs) {
outlink->channel_layout = inlink->channel_layout;
outlink->channels = inlink->channels;
}
} else {
outlink->sample_rate = sample_rate = s->sample_rate;
outlink->time_base = (AVRational){1, s->sample_rate};
}
s->instance = lilv_plugin_instantiate(s->plugin, sample_rate, s->features);
if (!s->instance) {
av_log(s, AV_LOG_ERROR, "Failed to instantiate <%s>\n", lilv_node_as_uri(lilv_plugin_get_uri(s->plugin)));
return AVERROR(EINVAL);
}
s->mins = av_calloc(s->nb_ports, sizeof(float));
s->maxes = av_calloc(s->nb_ports, sizeof(float));
s->controls = av_calloc(s->nb_ports, sizeof(float));
if (!s->mins || !s->maxes || !s->controls)
return AVERROR(ENOMEM);
lilv_plugin_get_port_ranges_float(s->plugin, s->mins, s->maxes, s->controls);
s->seq_out = av_malloc(sizeof(LV2_Atom_Sequence) + 9624);
if (!s->seq_out)
return AVERROR(ENOMEM);
if (s->options && !strcmp(s->options, "help")) {
if (!s->nb_inputcontrols) {
av_log(ctx, AV_LOG_INFO,
"The '%s' plugin does not have any input controls.\n",
s->plugin_uri);
} else {
av_log(ctx, AV_LOG_INFO,
"The '%s' plugin has the following input controls:\n",
s->plugin_uri);
for (i = 0; i < s->nb_ports; i++) {
const LilvPort *port = lilv_plugin_get_port_by_index(s->plugin, i);
const LilvNode *symbol = lilv_port_get_symbol(s->plugin, port);
LilvNode *name = lilv_port_get_name(s->plugin, port);
if (lilv_port_is_a(s->plugin, port, s->lv2_InputPort) &&
lilv_port_is_a(s->plugin, port, s->lv2_ControlPort)) {
av_log(ctx, AV_LOG_INFO, "%s\t\t<float> (from %f to %f) (default %f)\t\t%s\n",
lilv_node_as_string(symbol), s->mins[i], s->maxes[i], s->controls[i],
lilv_node_as_string(name));
}
lilv_node_free(name);
}
}
return AVERROR_EXIT;
}
p = s->options;
while (s->options) {
const LilvPort *port;
LilvNode *sym;
float val;
char *str, *vstr;
int index;
if (!(arg = av_strtok(p, " |", &saveptr)))
break;
p = NULL;
vstr = strstr(arg, "=");
if (vstr == NULL) {
av_log(ctx, AV_LOG_ERROR, "Invalid syntax.\n");
return AVERROR(EINVAL);
}
vstr[0] = 0;
str = arg;
val = atof(vstr+1);
sym = lilv_new_string(s->world, str);
port = lilv_plugin_get_port_by_symbol(s->plugin, sym);
lilv_node_free(sym);
if (!port) {
av_log(s, AV_LOG_WARNING, "Unknown option: <%s>\n", str);
} else {
index = lilv_port_get_index(s->plugin, port);
s->controls[index] = val;
}
}
if (s->nb_inputs &&
(lilv_plugin_has_feature(s->plugin, s->powerOf2BlockLength) ||
lilv_plugin_has_feature(s->plugin, s->fixedBlockLength) ||
lilv_plugin_has_feature(s->plugin, s->boundedBlockLength))) {
AVFilterLink *inlink = ctx->inputs[0];
inlink->partial_buf_size = inlink->min_samples = inlink->max_samples = 4096;
}
return 0;
}
static av_cold int init(AVFilterContext *ctx)
{
LV2Context *s = ctx->priv;
const LilvPlugins *plugins;
const LilvPlugin *plugin;
AVFilterPad pad = { NULL };
LilvNode *uri;
int i;
s->world = lilv_world_new();
if (!s->world)
return AVERROR(ENOMEM);
uri = lilv_new_uri(s->world, s->plugin_uri);
if (!uri) {
av_log(s, AV_LOG_ERROR, "Invalid plugin URI <%s>\n", s->plugin_uri);
return AVERROR(EINVAL);
}
lilv_world_load_all(s->world);
plugins = lilv_world_get_all_plugins(s->world);
plugin = lilv_plugins_get_by_uri(plugins, uri);
lilv_node_free(uri);
if (!plugin) {
av_log(s, AV_LOG_ERROR, "Plugin <%s> not found\n", s->plugin_uri);
return AVERROR(EINVAL);
}
s->plugin = plugin;
s->nb_ports = lilv_plugin_get_num_ports(s->plugin);
s->lv2_InputPort = lilv_new_uri(s->world, LV2_CORE__InputPort);
s->lv2_OutputPort = lilv_new_uri(s->world, LV2_CORE__OutputPort);
s->lv2_AudioPort = lilv_new_uri(s->world, LV2_CORE__AudioPort);
s->lv2_ControlPort = lilv_new_uri(s->world, LV2_CORE__ControlPort);
s->lv2_Optional = lilv_new_uri(s->world, LV2_CORE__connectionOptional);
s->atom_AtomPort = lilv_new_uri(s->world, LV2_ATOM__AtomPort);
s->atom_Sequence = lilv_new_uri(s->world, LV2_ATOM__Sequence);
s->urid_map = lilv_new_uri(s->world, LV2_URID__map);
s->powerOf2BlockLength = lilv_new_uri(s->world, LV2_BUF_SIZE__powerOf2BlockLength);
s->fixedBlockLength = lilv_new_uri(s->world, LV2_BUF_SIZE__fixedBlockLength);
s->boundedBlockLength = lilv_new_uri(s->world, LV2_BUF_SIZE__boundedBlockLength);
for (i = 0; i < s->nb_ports; i++) {
const LilvPort *lport = lilv_plugin_get_port_by_index(s->plugin, i);
int is_input = 0;
int is_optional = 0;
is_optional = lilv_port_has_property(s->plugin, lport, s->lv2_Optional);
if (lilv_port_is_a(s->plugin, lport, s->lv2_InputPort)) {
is_input = 1;
} else if (!lilv_port_is_a(s->plugin, lport, s->lv2_OutputPort) && !is_optional) {
return AVERROR(EINVAL);
}
if (lilv_port_is_a(s->plugin, lport, s->lv2_ControlPort)) {
if (is_input) {
s->nb_inputcontrols++;
}
} else if (lilv_port_is_a(s->plugin, lport, s->lv2_AudioPort)) {
if (is_input) {
s->nb_inputs++;
} else {
s->nb_outputs++;
}
}
}
pad.type = AVMEDIA_TYPE_AUDIO;
if (s->nb_inputs) {
pad.name = av_asprintf("in0:%s:%u", s->plugin_uri, s->nb_inputs);
if (!pad.name)
return AVERROR(ENOMEM);
pad.filter_frame = filter_frame;
if (ff_insert_inpad(ctx, ctx->nb_inputs, &pad) < 0) {
av_freep(&pad.name);
return AVERROR(ENOMEM);
}
}
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
LV2Context *s = ctx->priv;
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
AVFilterLink *outlink = ctx->outputs[0];
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLTP, AV_SAMPLE_FMT_NONE };
int ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
if (s->nb_inputs) {
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_samplerates(ctx, formats);
if (ret < 0)
return ret;
} else {
int sample_rates[] = { s->sample_rate, -1 };
ret = ff_set_common_samplerates(ctx, ff_make_format_list(sample_rates));
if (ret < 0)
return ret;
}
if (s->nb_inputs == 2 && s->nb_outputs == 2) {
layouts = NULL;
ret = ff_add_channel_layout(&layouts, AV_CH_LAYOUT_STEREO);
if (ret < 0)
return ret;
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
} else {
if (s->nb_inputs >= 1) {
AVFilterLink *inlink = ctx->inputs[0];
uint64_t inlayout = FF_COUNT2LAYOUT(s->nb_inputs);
layouts = NULL;
ret = ff_add_channel_layout(&layouts, inlayout);
if (ret < 0)
return ret;
ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts);
if (ret < 0)
return ret;
if (!s->nb_outputs) {
ret = ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
if (ret < 0)
return ret;
}
}
if (s->nb_outputs >= 1) {
uint64_t outlayout = FF_COUNT2LAYOUT(s->nb_outputs);
layouts = NULL;
ret = ff_add_channel_layout(&layouts, outlayout);
if (ret < 0)
return ret;
ret = ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
if (ret < 0)
return ret;
}
}
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
LV2Context *s = ctx->priv;
lilv_node_free(s->powerOf2BlockLength);
lilv_node_free(s->fixedBlockLength);
lilv_node_free(s->boundedBlockLength);
lilv_node_free(s->urid_map);
lilv_node_free(s->atom_Sequence);
lilv_node_free(s->atom_AtomPort);
lilv_node_free(s->lv2_Optional);
lilv_node_free(s->lv2_ControlPort);
lilv_node_free(s->lv2_AudioPort);
lilv_node_free(s->lv2_OutputPort);
lilv_node_free(s->lv2_InputPort);
uri_table_destroy(&s->uri_table);
lilv_instance_free(s->instance);
lilv_world_free(s->world);
av_freep(&s->mins);
av_freep(&s->maxes);
av_freep(&s->controls);
av_freep(&s->seq_out);
if (ctx->nb_inputs)
av_freep(&ctx->input_pads[0].name);
}
static const AVFilterPad lv2_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
AVFilter ff_af_lv2 = {
.name = "lv2",
.description = NULL_IF_CONFIG_SMALL("Apply LV2 effect."),
.priv_size = sizeof(LV2Context),
.priv_class = &lv2_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = 0,
.outputs = lv2_outputs,
.flags = AVFILTER_FLAG_DYNAMIC_INPUTS,
};

689
externals/ffmpeg/libavfilter/af_mcompand.c vendored Executable file
View File

@@ -0,0 +1,689 @@
/*
* COpyright (c) 2002 Daniel Pouzzner
* Copyright (c) 1999 Chris Bagwell
* Copyright (c) 1999 Nick Bailey
* Copyright (c) 2007 Rob Sykes <robs@users.sourceforge.net>
* Copyright (c) 2013 Paul B Mahol
* Copyright (c) 2014 Andrew Kelley
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* audio multiband compand filter
*/
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/ffmath.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
typedef struct CompandSegment {
double x, y;
double a, b;
} CompandSegment;
typedef struct CompandT {
CompandSegment *segments;
int nb_segments;
double in_min_lin;
double out_min_lin;
double curve_dB;
double gain_dB;
} CompandT;
#define N 4
typedef struct PrevCrossover {
double in;
double out_low;
double out_high;
} PrevCrossover[N * 2];
typedef struct Crossover {
PrevCrossover *previous;
size_t pos;
double coefs[3 *(N+1)];
} Crossover;
typedef struct CompBand {
CompandT transfer_fn;
double *attack_rate;
double *decay_rate;
double *volume;
double delay;
double topfreq;
Crossover filter;
AVFrame *delay_buf;
size_t delay_size;
ptrdiff_t delay_buf_ptr;
size_t delay_buf_cnt;
} CompBand;
typedef struct MCompandContext {
const AVClass *class;
char *args;
int nb_bands;
CompBand *bands;
AVFrame *band_buf1, *band_buf2, *band_buf3;
int band_samples;
size_t delay_buf_size;
} MCompandContext;
#define OFFSET(x) offsetof(MCompandContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption mcompand_options[] = {
{ "args", "set parameters for each band", OFFSET(args), AV_OPT_TYPE_STRING, { .str = "0.005,0.1 6 -47/-40,-34/-34,-17/-33 100 | 0.003,0.05 6 -47/-40,-34/-34,-17/-33 400 | 0.000625,0.0125 6 -47/-40,-34/-34,-15/-33 1600 | 0.0001,0.025 6 -47/-40,-34/-34,-31/-31,-0/-30 6400 | 0,0.025 6 -38/-31,-28/-28,-0/-25 22000" }, 0, 0, A },
{ NULL }
};
AVFILTER_DEFINE_CLASS(mcompand);
static av_cold void uninit(AVFilterContext *ctx)
{
MCompandContext *s = ctx->priv;
int i;
av_frame_free(&s->band_buf1);
av_frame_free(&s->band_buf2);
av_frame_free(&s->band_buf3);
if (s->bands) {
for (i = 0; i < s->nb_bands; i++) {
av_freep(&s->bands[i].attack_rate);
av_freep(&s->bands[i].decay_rate);
av_freep(&s->bands[i].volume);
av_freep(&s->bands[i].transfer_fn.segments);
av_freep(&s->bands[i].filter.previous);
av_frame_free(&s->bands[i].delay_buf);
}
}
av_freep(&s->bands);
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterChannelLayouts *layouts;
AVFilterFormats *formats;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static void count_items(char *item_str, int *nb_items, char delimiter)
{
char *p;
*nb_items = 1;
for (p = item_str; *p; p++) {
if (*p == delimiter)
(*nb_items)++;
}
}
static void update_volume(CompBand *cb, double in, int ch)
{
double delta = in - cb->volume[ch];
if (delta > 0.0)
cb->volume[ch] += delta * cb->attack_rate[ch];
else
cb->volume[ch] += delta * cb->decay_rate[ch];
}
static double get_volume(CompandT *s, double in_lin)
{
CompandSegment *cs;
double in_log, out_log;
int i;
if (in_lin <= s->in_min_lin)
return s->out_min_lin;
in_log = log(in_lin);
for (i = 1; i < s->nb_segments; i++)
if (in_log <= s->segments[i].x)
break;
cs = &s->segments[i - 1];
in_log -= cs->x;
out_log = cs->y + in_log * (cs->a * in_log + cs->b);
return exp(out_log);
}
static int parse_points(char *points, int nb_points, double radius,
CompandT *s, AVFilterContext *ctx)
{
int new_nb_items, num;
char *saveptr = NULL;
char *p = points;
int i;
#define S(x) s->segments[2 * ((x) + 1)]
for (i = 0, new_nb_items = 0; i < nb_points; i++) {
char *tstr = av_strtok(p, ",", &saveptr);
p = NULL;
if (!tstr || sscanf(tstr, "%lf/%lf", &S(i).x, &S(i).y) != 2) {
av_log(ctx, AV_LOG_ERROR,
"Invalid and/or missing input/output value.\n");
return AVERROR(EINVAL);
}
if (i && S(i - 1).x > S(i).x) {
av_log(ctx, AV_LOG_ERROR,
"Transfer function input values must be increasing.\n");
return AVERROR(EINVAL);
}
S(i).y -= S(i).x;
av_log(ctx, AV_LOG_DEBUG, "%d: x=%f y=%f\n", i, S(i).x, S(i).y);
new_nb_items++;
}
num = new_nb_items;
/* Add 0,0 if necessary */
if (num == 0 || S(num - 1).x)
num++;
#undef S
#define S(x) s->segments[2 * (x)]
/* Add a tail off segment at the start */
S(0).x = S(1).x - 2 * s->curve_dB;
S(0).y = S(1).y;
num++;
/* Join adjacent colinear segments */
for (i = 2; i < num; i++) {
double g1 = (S(i - 1).y - S(i - 2).y) * (S(i - 0).x - S(i - 1).x);
double g2 = (S(i - 0).y - S(i - 1).y) * (S(i - 1).x - S(i - 2).x);
int j;
if (fabs(g1 - g2))
continue;
num--;
for (j = --i; j < num; j++)
S(j) = S(j + 1);
}
for (i = 0; i < s->nb_segments; i += 2) {
s->segments[i].y += s->gain_dB;
s->segments[i].x *= M_LN10 / 20;
s->segments[i].y *= M_LN10 / 20;
}
#define L(x) s->segments[i - (x)]
for (i = 4; i < s->nb_segments; i += 2) {
double x, y, cx, cy, in1, in2, out1, out2, theta, len, r;
L(4).a = 0;
L(4).b = (L(2).y - L(4).y) / (L(2).x - L(4).x);
L(2).a = 0;
L(2).b = (L(0).y - L(2).y) / (L(0).x - L(2).x);
theta = atan2(L(2).y - L(4).y, L(2).x - L(4).x);
len = hypot(L(2).x - L(4).x, L(2).y - L(4).y);
r = FFMIN(radius, len);
L(3).x = L(2).x - r * cos(theta);
L(3).y = L(2).y - r * sin(theta);
theta = atan2(L(0).y - L(2).y, L(0).x - L(2).x);
len = hypot(L(0).x - L(2).x, L(0).y - L(2).y);
r = FFMIN(radius, len / 2);
x = L(2).x + r * cos(theta);
y = L(2).y + r * sin(theta);
cx = (L(3).x + L(2).x + x) / 3;
cy = (L(3).y + L(2).y + y) / 3;
L(2).x = x;
L(2).y = y;
in1 = cx - L(3).x;
out1 = cy - L(3).y;
in2 = L(2).x - L(3).x;
out2 = L(2).y - L(3).y;
L(3).a = (out2 / in2 - out1 / in1) / (in2 - in1);
L(3).b = out1 / in1 - L(3).a * in1;
}
L(3).x = 0;
L(3).y = L(2).y;
s->in_min_lin = exp(s->segments[1].x);
s->out_min_lin = exp(s->segments[1].y);
return 0;
}
static void square_quadratic(double const *x, double *y)
{
y[0] = x[0] * x[0];
y[1] = 2 * x[0] * x[1];
y[2] = 2 * x[0] * x[2] + x[1] * x[1];
y[3] = 2 * x[1] * x[2];
y[4] = x[2] * x[2];
}
static int crossover_setup(AVFilterLink *outlink, Crossover *p, double frequency)
{
double w0 = 2 * M_PI * frequency / outlink->sample_rate;
double Q = sqrt(.5), alpha = sin(w0) / (2*Q);
double x[9], norm;
int i;
if (w0 > M_PI)
return AVERROR(EINVAL);
x[0] = (1 - cos(w0))/2; /* Cf. filter_LPF in biquads.c */
x[1] = 1 - cos(w0);
x[2] = (1 - cos(w0))/2;
x[3] = (1 + cos(w0))/2; /* Cf. filter_HPF in biquads.c */
x[4] = -(1 + cos(w0));
x[5] = (1 + cos(w0))/2;
x[6] = 1 + alpha;
x[7] = -2*cos(w0);
x[8] = 1 - alpha;
for (norm = x[6], i = 0; i < 9; ++i)
x[i] /= norm;
square_quadratic(x , p->coefs);
square_quadratic(x + 3, p->coefs + 5);
square_quadratic(x + 6, p->coefs + 10);
p->previous = av_calloc(outlink->channels, sizeof(*p->previous));
if (!p->previous)
return AVERROR(ENOMEM);
return 0;
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
MCompandContext *s = ctx->priv;
int ret, ch, i, k, new_nb_items, nb_bands;
char *p = s->args, *saveptr = NULL;
int max_delay_size = 0;
count_items(s->args, &nb_bands, '|');
s->nb_bands = FFMAX(1, nb_bands);
s->bands = av_calloc(nb_bands, sizeof(*s->bands));
if (!s->bands)
return AVERROR(ENOMEM);
for (i = 0, new_nb_items = 0; i < nb_bands; i++) {
int nb_points, nb_attacks, nb_items = 0;
char *tstr2, *tstr = av_strtok(p, "|", &saveptr);
char *p2, *p3, *saveptr2 = NULL, *saveptr3 = NULL;
double radius;
if (!tstr) {
uninit(ctx);
return AVERROR(EINVAL);
}
p = NULL;
p2 = tstr;
count_items(tstr, &nb_items, ' ');
tstr2 = av_strtok(p2, " ", &saveptr2);
if (!tstr2) {
av_log(ctx, AV_LOG_ERROR, "at least one attacks/decays rate is mandatory\n");
uninit(ctx);
return AVERROR(EINVAL);
}
p2 = NULL;
p3 = tstr2;
count_items(tstr2, &nb_attacks, ',');
if (!nb_attacks || nb_attacks & 1) {
av_log(ctx, AV_LOG_ERROR, "number of attacks rate plus decays rate must be even\n");
uninit(ctx);
return AVERROR(EINVAL);
}
s->bands[i].attack_rate = av_calloc(outlink->channels, sizeof(double));
s->bands[i].decay_rate = av_calloc(outlink->channels, sizeof(double));
s->bands[i].volume = av_calloc(outlink->channels, sizeof(double));
for (k = 0; k < FFMIN(nb_attacks / 2, outlink->channels); k++) {
char *tstr3 = av_strtok(p3, ",", &saveptr3);
p3 = NULL;
sscanf(tstr3, "%lf", &s->bands[i].attack_rate[k]);
tstr3 = av_strtok(p3, ",", &saveptr3);
sscanf(tstr3, "%lf", &s->bands[i].decay_rate[k]);
if (s->bands[i].attack_rate[k] > 1.0 / outlink->sample_rate) {
s->bands[i].attack_rate[k] = 1.0 - exp(-1.0 / (outlink->sample_rate * s->bands[i].attack_rate[k]));
} else {
s->bands[i].attack_rate[k] = 1.0;
}
if (s->bands[i].decay_rate[k] > 1.0 / outlink->sample_rate) {
s->bands[i].decay_rate[k] = 1.0 - exp(-1.0 / (outlink->sample_rate * s->bands[i].decay_rate[k]));
} else {
s->bands[i].decay_rate[k] = 1.0;
}
}
for (ch = k; ch < outlink->channels; ch++) {
s->bands[i].attack_rate[ch] = s->bands[i].attack_rate[k - 1];
s->bands[i].decay_rate[ch] = s->bands[i].decay_rate[k - 1];
}
tstr2 = av_strtok(p2, " ", &saveptr2);
if (!tstr2) {
av_log(ctx, AV_LOG_ERROR, "transfer function curve in dB must be set\n");
uninit(ctx);
return AVERROR(EINVAL);
}
sscanf(tstr2, "%lf", &s->bands[i].transfer_fn.curve_dB);
radius = s->bands[i].transfer_fn.curve_dB * M_LN10 / 20.0;
tstr2 = av_strtok(p2, " ", &saveptr2);
if (!tstr2) {
av_log(ctx, AV_LOG_ERROR, "transfer points missing\n");
uninit(ctx);
return AVERROR(EINVAL);
}
count_items(tstr2, &nb_points, ',');
s->bands[i].transfer_fn.nb_segments = (nb_points + 4) * 2;
s->bands[i].transfer_fn.segments = av_calloc(s->bands[i].transfer_fn.nb_segments,
sizeof(CompandSegment));
if (!s->bands[i].transfer_fn.segments) {
uninit(ctx);
return AVERROR(ENOMEM);
}
ret = parse_points(tstr2, nb_points, radius, &s->bands[i].transfer_fn, ctx);
if (ret < 0) {
av_log(ctx, AV_LOG_ERROR, "transfer points parsing failed\n");
uninit(ctx);
return ret;
}
tstr2 = av_strtok(p2, " ", &saveptr2);
if (!tstr2) {
av_log(ctx, AV_LOG_ERROR, "crossover_frequency is missing\n");
uninit(ctx);
return AVERROR(EINVAL);
}
new_nb_items += sscanf(tstr2, "%lf", &s->bands[i].topfreq) == 1;
if (s->bands[i].topfreq < 0 || s->bands[i].topfreq >= outlink->sample_rate / 2) {
av_log(ctx, AV_LOG_ERROR, "crossover_frequency: %f, should be >=0 and lower than half of sample rate: %d.\n", s->bands[i].topfreq, outlink->sample_rate / 2);
uninit(ctx);
return AVERROR(EINVAL);
}
if (s->bands[i].topfreq != 0) {
ret = crossover_setup(outlink, &s->bands[i].filter, s->bands[i].topfreq);
if (ret < 0) {
uninit(ctx);
return ret;
}
}
tstr2 = av_strtok(p2, " ", &saveptr2);
if (tstr2) {
sscanf(tstr2, "%lf", &s->bands[i].delay);
max_delay_size = FFMAX(max_delay_size, s->bands[i].delay * outlink->sample_rate);
tstr2 = av_strtok(p2, " ", &saveptr2);
if (tstr2) {
double initial_volume;
sscanf(tstr2, "%lf", &initial_volume);
initial_volume = pow(10.0, initial_volume / 20);
for (k = 0; k < outlink->channels; k++) {
s->bands[i].volume[k] = initial_volume;
}
tstr2 = av_strtok(p2, " ", &saveptr2);
if (tstr2) {
sscanf(tstr2, "%lf", &s->bands[i].transfer_fn.gain_dB);
}
}
}
}
s->nb_bands = new_nb_items;
for (i = 0; max_delay_size > 0 && i < s->nb_bands; i++) {
s->bands[i].delay_buf = ff_get_audio_buffer(outlink, max_delay_size);
if (!s->bands[i].delay_buf)
return AVERROR(ENOMEM);
}
s->delay_buf_size = max_delay_size;
return 0;
}
#define CONVOLVE _ _ _ _
static void crossover(int ch, Crossover *p,
double *ibuf, double *obuf_low,
double *obuf_high, size_t len)
{
double out_low, out_high;
while (len--) {
p->pos = p->pos ? p->pos - 1 : N - 1;
#define _ out_low += p->coefs[j] * p->previous[ch][p->pos + j].in \
- p->coefs[2*N+2 + j] * p->previous[ch][p->pos + j].out_low, j++;
{
int j = 1;
out_low = p->coefs[0] * *ibuf;
CONVOLVE
*obuf_low++ = out_low;
}
#undef _
#define _ out_high += p->coefs[j+N+1] * p->previous[ch][p->pos + j].in \
- p->coefs[2*N+2 + j] * p->previous[ch][p->pos + j].out_high, j++;
{
int j = 1;
out_high = p->coefs[N+1] * *ibuf;
CONVOLVE
*obuf_high++ = out_high;
}
p->previous[ch][p->pos + N].in = p->previous[ch][p->pos].in = *ibuf++;
p->previous[ch][p->pos + N].out_low = p->previous[ch][p->pos].out_low = out_low;
p->previous[ch][p->pos + N].out_high = p->previous[ch][p->pos].out_high = out_high;
}
}
static int mcompand_channel(MCompandContext *c, CompBand *l, double *ibuf, double *obuf, int len, int ch)
{
int i;
for (i = 0; i < len; i++) {
double level_in_lin, level_out_lin, checkbuf;
/* Maintain the volume fields by simulating a leaky pump circuit */
update_volume(l, fabs(ibuf[i]), ch);
/* Volume memory is updated: perform compand */
level_in_lin = l->volume[ch];
level_out_lin = get_volume(&l->transfer_fn, level_in_lin);
if (c->delay_buf_size <= 0) {
checkbuf = ibuf[i] * level_out_lin;
obuf[i] = checkbuf;
} else {
double *delay_buf = (double *)l->delay_buf->extended_data[ch];
/* FIXME: note that this lookahead algorithm is really lame:
the response to a peak is released before the peak
arrives. */
/* because volume application delays differ band to band, but
total delay doesn't, the volume is applied in an iteration
preceding that in which the sample goes to obuf, except in
the band(s) with the longest vol app delay.
the offset between delay_buf_ptr and the sample to apply
vol to, is a constant equal to the difference between this
band's delay and the longest delay of all the bands. */
if (l->delay_buf_cnt >= l->delay_size) {
checkbuf =
delay_buf[(l->delay_buf_ptr +
c->delay_buf_size -
l->delay_size) % c->delay_buf_size] * level_out_lin;
delay_buf[(l->delay_buf_ptr + c->delay_buf_size -
l->delay_size) % c->delay_buf_size] = checkbuf;
}
if (l->delay_buf_cnt >= c->delay_buf_size) {
obuf[i] = delay_buf[l->delay_buf_ptr];
} else {
l->delay_buf_cnt++;
}
delay_buf[l->delay_buf_ptr++] = ibuf[i];
l->delay_buf_ptr %= c->delay_buf_size;
}
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
MCompandContext *s = ctx->priv;
AVFrame *out, *abuf, *bbuf, *cbuf;
int ch, band, i;
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
if (s->band_samples < in->nb_samples) {
av_frame_free(&s->band_buf1);
av_frame_free(&s->band_buf2);
av_frame_free(&s->band_buf3);
s->band_buf1 = ff_get_audio_buffer(outlink, in->nb_samples);
s->band_buf2 = ff_get_audio_buffer(outlink, in->nb_samples);
s->band_buf3 = ff_get_audio_buffer(outlink, in->nb_samples);
s->band_samples = in->nb_samples;
}
for (ch = 0; ch < outlink->channels; ch++) {
double *a, *dst = (double *)out->extended_data[ch];
for (band = 0, abuf = in, bbuf = s->band_buf2, cbuf = s->band_buf1; band < s->nb_bands; band++) {
CompBand *b = &s->bands[band];
if (b->topfreq) {
crossover(ch, &b->filter, (double *)abuf->extended_data[ch],
(double *)bbuf->extended_data[ch], (double *)cbuf->extended_data[ch], in->nb_samples);
} else {
bbuf = abuf;
abuf = cbuf;
}
if (abuf == in)
abuf = s->band_buf3;
mcompand_channel(s, b, (double *)bbuf->extended_data[ch], (double *)abuf->extended_data[ch], out->nb_samples, ch);
a = (double *)abuf->extended_data[ch];
for (i = 0; i < out->nb_samples; i++) {
dst[i] += a[i];
}
FFSWAP(AVFrame *, abuf, cbuf);
}
}
out->pts = in->pts;
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
int ret;
ret = ff_request_frame(ctx->inputs[0]);
return ret;
}
static const AVFilterPad mcompand_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad mcompand_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.request_frame = request_frame,
.config_props = config_output,
},
{ NULL }
};
AVFilter ff_af_mcompand = {
.name = "mcompand",
.description = NULL_IF_CONFIG_SMALL(
"Multiband Compress or expand audio dynamic range."),
.query_formats = query_formats,
.priv_size = sizeof(MCompandContext),
.priv_class = &mcompand_class,
.uninit = uninit,
.inputs = mcompand_inputs,
.outputs = mcompand_outputs,
};

461
externals/ffmpeg/libavfilter/af_pan.c vendored Executable file
View File

@@ -0,0 +1,461 @@
/*
* Copyright (c) 2002 Anders Johansson <ajh@atri.curtin.edu.au>
* Copyright (c) 2011 Clément Bœsch <u pkh me>
* Copyright (c) 2011 Nicolas George <nicolas.george@normalesup.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Audio panning filter (channels mixing)
* Original code written by Anders Johansson for MPlayer,
* reimplemented for FFmpeg.
*/
#include <stdio.h>
#include "libavutil/avstring.h"
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "libswresample/swresample.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#define MAX_CHANNELS 64
typedef struct PanContext {
const AVClass *class;
char *args;
int64_t out_channel_layout;
double gain[MAX_CHANNELS][MAX_CHANNELS];
int64_t need_renorm;
int need_renumber;
int nb_output_channels;
int pure_gains;
/* channel mapping specific */
int channel_map[MAX_CHANNELS];
struct SwrContext *swr;
} PanContext;
static void skip_spaces(char **arg)
{
int len = 0;
sscanf(*arg, " %n", &len);
*arg += len;
}
static int parse_channel_name(char **arg, int *rchannel, int *rnamed)
{
char buf[8];
int len, i, channel_id = 0;
int64_t layout, layout0;
skip_spaces(arg);
/* try to parse a channel name, e.g. "FL" */
if (sscanf(*arg, "%7[A-Z]%n", buf, &len)) {
layout0 = layout = av_get_channel_layout(buf);
/* channel_id <- first set bit in layout */
for (i = 32; i > 0; i >>= 1) {
if (layout >= (int64_t)1 << i) {
channel_id += i;
layout >>= i;
}
}
/* reject layouts that are not a single channel */
if (channel_id >= MAX_CHANNELS || layout0 != (int64_t)1 << channel_id)
return AVERROR(EINVAL);
*rchannel = channel_id;
*rnamed = 1;
*arg += len;
return 0;
}
/* try to parse a channel number, e.g. "c2" */
if (sscanf(*arg, "c%d%n", &channel_id, &len) &&
channel_id >= 0 && channel_id < MAX_CHANNELS) {
*rchannel = channel_id;
*rnamed = 0;
*arg += len;
return 0;
}
return AVERROR(EINVAL);
}
static av_cold int init(AVFilterContext *ctx)
{
PanContext *const pan = ctx->priv;
char *arg, *arg0, *tokenizer, *args = av_strdup(pan->args);
int out_ch_id, in_ch_id, len, named, ret, sign = 1;
int nb_in_channels[2] = { 0, 0 }; // number of unnamed and named input channels
int used_out_ch[MAX_CHANNELS] = {0};
double gain;
if (!pan->args) {
av_log(ctx, AV_LOG_ERROR,
"pan filter needs a channel layout and a set "
"of channel definitions as parameter\n");
return AVERROR(EINVAL);
}
if (!args)
return AVERROR(ENOMEM);
arg = av_strtok(args, "|", &tokenizer);
if (!arg) {
av_log(ctx, AV_LOG_ERROR, "Channel layout not specified\n");
ret = AVERROR(EINVAL);
goto fail;
}
ret = ff_parse_channel_layout(&pan->out_channel_layout,
&pan->nb_output_channels, arg, ctx);
if (ret < 0)
goto fail;
/* parse channel specifications */
while ((arg = arg0 = av_strtok(NULL, "|", &tokenizer))) {
int used_in_ch[MAX_CHANNELS] = {0};
/* channel name */
if (parse_channel_name(&arg, &out_ch_id, &named)) {
av_log(ctx, AV_LOG_ERROR,
"Expected out channel name, got \"%.8s\"\n", arg);
ret = AVERROR(EINVAL);
goto fail;
}
if (named) {
if (!((pan->out_channel_layout >> out_ch_id) & 1)) {
av_log(ctx, AV_LOG_ERROR,
"Channel \"%.8s\" does not exist in the chosen layout\n", arg0);
ret = AVERROR(EINVAL);
goto fail;
}
/* get the channel number in the output channel layout:
* out_channel_layout & ((1 << out_ch_id) - 1) are all the
* channels that come before out_ch_id,
* so their count is the index of out_ch_id */
out_ch_id = av_get_channel_layout_nb_channels(pan->out_channel_layout & (((int64_t)1 << out_ch_id) - 1));
}
if (out_ch_id < 0 || out_ch_id >= pan->nb_output_channels) {
av_log(ctx, AV_LOG_ERROR,
"Invalid out channel name \"%.8s\"\n", arg0);
ret = AVERROR(EINVAL);
goto fail;
}
if (used_out_ch[out_ch_id]) {
av_log(ctx, AV_LOG_ERROR,
"Can not reference out channel %d twice\n", out_ch_id);
ret = AVERROR(EINVAL);
goto fail;
}
used_out_ch[out_ch_id] = 1;
skip_spaces(&arg);
if (*arg == '=') {
arg++;
} else if (*arg == '<') {
pan->need_renorm |= (int64_t)1 << out_ch_id;
arg++;
} else {
av_log(ctx, AV_LOG_ERROR,
"Syntax error after channel name in \"%.8s\"\n", arg0);
ret = AVERROR(EINVAL);
goto fail;
}
/* gains */
sign = 1;
while (1) {
gain = 1;
if (sscanf(arg, "%lf%n *%n", &gain, &len, &len))
arg += len;
if (parse_channel_name(&arg, &in_ch_id, &named)){
av_log(ctx, AV_LOG_ERROR,
"Expected in channel name, got \"%.8s\"\n", arg);
ret = AVERROR(EINVAL);
goto fail;
}
nb_in_channels[named]++;
if (nb_in_channels[!named]) {
av_log(ctx, AV_LOG_ERROR,
"Can not mix named and numbered channels\n");
ret = AVERROR(EINVAL);
goto fail;
}
if (used_in_ch[in_ch_id]) {
av_log(ctx, AV_LOG_ERROR,
"Can not reference in channel %d twice\n", in_ch_id);
ret = AVERROR(EINVAL);
goto fail;
}
used_in_ch[in_ch_id] = 1;
pan->gain[out_ch_id][in_ch_id] = sign * gain;
skip_spaces(&arg);
if (!*arg)
break;
if (*arg == '-') {
sign = -1;
} else if (*arg != '+') {
av_log(ctx, AV_LOG_ERROR, "Syntax error near \"%.8s\"\n", arg);
ret = AVERROR(EINVAL);
goto fail;
} else {
sign = 1;
}
arg++;
}
}
pan->need_renumber = !!nb_in_channels[1];
ret = 0;
fail:
av_free(args);
return ret;
}
static int are_gains_pure(const PanContext *pan)
{
int i, j;
for (i = 0; i < MAX_CHANNELS; i++) {
int nb_gain = 0;
for (j = 0; j < MAX_CHANNELS; j++) {
double gain = pan->gain[i][j];
/* channel mapping is effective only if 0% or 100% of a channel is
* selected... */
if (gain != 0. && gain != 1.)
return 0;
/* ...and if the output channel is only composed of one input */
if (gain && nb_gain++)
return 0;
}
}
return 1;
}
static int query_formats(AVFilterContext *ctx)
{
PanContext *pan = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts;
int ret;
pan->pure_gains = are_gains_pure(pan);
/* libswr supports any sample and packing formats */
if ((ret = ff_set_common_formats(ctx, ff_all_formats(AVMEDIA_TYPE_AUDIO))) < 0)
return ret;
formats = ff_all_samplerates();
if ((ret = ff_set_common_samplerates(ctx, formats)) < 0)
return ret;
// inlink supports any channel layout
layouts = ff_all_channel_counts();
if ((ret = ff_channel_layouts_ref(layouts, &inlink->out_channel_layouts)) < 0)
return ret;
// outlink supports only requested output channel layout
layouts = NULL;
if ((ret = ff_add_channel_layout(&layouts,
pan->out_channel_layout ? pan->out_channel_layout :
FF_COUNT2LAYOUT(pan->nb_output_channels))) < 0)
return ret;
return ff_channel_layouts_ref(layouts, &outlink->in_channel_layouts);
}
static int config_props(AVFilterLink *link)
{
AVFilterContext *ctx = link->dst;
PanContext *pan = ctx->priv;
char buf[1024], *cur;
int i, j, k, r;
double t;
if (pan->need_renumber) {
// input channels were given by their name: renumber them
for (i = j = 0; i < MAX_CHANNELS; i++) {
if ((link->channel_layout >> i) & 1) {
for (k = 0; k < pan->nb_output_channels; k++)
pan->gain[k][j] = pan->gain[k][i];
j++;
}
}
}
// sanity check; can't be done in query_formats since the inlink
// channel layout is unknown at that time
if (link->channels > MAX_CHANNELS ||
pan->nb_output_channels > MAX_CHANNELS) {
av_log(ctx, AV_LOG_ERROR,
"af_pan supports a maximum of %d channels. "
"Feel free to ask for a higher limit.\n", MAX_CHANNELS);
return AVERROR_PATCHWELCOME;
}
// init libswresample context
pan->swr = swr_alloc_set_opts(pan->swr,
pan->out_channel_layout, link->format, link->sample_rate,
link->channel_layout, link->format, link->sample_rate,
0, ctx);
if (!pan->swr)
return AVERROR(ENOMEM);
if (!link->channel_layout) {
if (av_opt_set_int(pan->swr, "ich", link->channels, 0) < 0)
return AVERROR(EINVAL);
}
if (!pan->out_channel_layout) {
if (av_opt_set_int(pan->swr, "och", pan->nb_output_channels, 0) < 0)
return AVERROR(EINVAL);
}
// gains are pure, init the channel mapping
if (pan->pure_gains) {
// get channel map from the pure gains
for (i = 0; i < pan->nb_output_channels; i++) {
int ch_id = -1;
for (j = 0; j < link->channels; j++) {
if (pan->gain[i][j]) {
ch_id = j;
break;
}
}
pan->channel_map[i] = ch_id;
}
av_opt_set_int(pan->swr, "icl", pan->out_channel_layout, 0);
av_opt_set_int(pan->swr, "uch", pan->nb_output_channels, 0);
swr_set_channel_mapping(pan->swr, pan->channel_map);
} else {
// renormalize
for (i = 0; i < pan->nb_output_channels; i++) {
if (!((pan->need_renorm >> i) & 1))
continue;
t = 0;
for (j = 0; j < link->channels; j++)
t += fabs(pan->gain[i][j]);
if (t > -1E-5 && t < 1E-5) {
// t is almost 0 but not exactly, this is probably a mistake
if (t)
av_log(ctx, AV_LOG_WARNING,
"Degenerate coefficients while renormalizing\n");
continue;
}
for (j = 0; j < link->channels; j++)
pan->gain[i][j] /= t;
}
av_opt_set_int(pan->swr, "icl", link->channel_layout, 0);
av_opt_set_int(pan->swr, "ocl", pan->out_channel_layout, 0);
swr_set_matrix(pan->swr, pan->gain[0], pan->gain[1] - pan->gain[0]);
}
r = swr_init(pan->swr);
if (r < 0)
return r;
// summary
for (i = 0; i < pan->nb_output_channels; i++) {
cur = buf;
for (j = 0; j < link->channels; j++) {
r = snprintf(cur, buf + sizeof(buf) - cur, "%s%.3g i%d",
j ? " + " : "", pan->gain[i][j], j);
cur += FFMIN(buf + sizeof(buf) - cur, r);
}
av_log(ctx, AV_LOG_VERBOSE, "o%d = %s\n", i, buf);
}
// add channel mapping summary if possible
if (pan->pure_gains) {
av_log(ctx, AV_LOG_INFO, "Pure channel mapping detected:");
for (i = 0; i < pan->nb_output_channels; i++)
if (pan->channel_map[i] < 0)
av_log(ctx, AV_LOG_INFO, " M");
else
av_log(ctx, AV_LOG_INFO, " %d", pan->channel_map[i]);
av_log(ctx, AV_LOG_INFO, "\n");
return 0;
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
int ret;
int n = insamples->nb_samples;
AVFilterLink *const outlink = inlink->dst->outputs[0];
AVFrame *outsamples = ff_get_audio_buffer(outlink, n);
PanContext *pan = inlink->dst->priv;
if (!outsamples) {
av_frame_free(&insamples);
return AVERROR(ENOMEM);
}
swr_convert(pan->swr, outsamples->extended_data, n,
(void *)insamples->extended_data, n);
av_frame_copy_props(outsamples, insamples);
outsamples->channel_layout = outlink->channel_layout;
outsamples->channels = outlink->channels;
ret = ff_filter_frame(outlink, outsamples);
av_frame_free(&insamples);
return ret;
}
static av_cold void uninit(AVFilterContext *ctx)
{
PanContext *pan = ctx->priv;
swr_free(&pan->swr);
}
#define OFFSET(x) offsetof(PanContext, x)
static const AVOption pan_options[] = {
{ "args", NULL, OFFSET(args), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_AUDIO_PARAM | AV_OPT_FLAG_FILTERING_PARAM },
{ NULL }
};
AVFILTER_DEFINE_CLASS(pan);
static const AVFilterPad pan_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_props,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad pan_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_pan = {
.name = "pan",
.description = NULL_IF_CONFIG_SMALL("Remix channels with coefficients (panning)."),
.priv_size = sizeof(PanContext),
.priv_class = &pan_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = pan_inputs,
.outputs = pan_outputs,
};

615
externals/ffmpeg/libavfilter/af_replaygain.c vendored Executable file
View File

@@ -0,0 +1,615 @@
/*
* Copyright (c) 1998 - 2009 Conifer Software
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* ReplayGain scanner
*/
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
#define HISTOGRAM_SLOTS 12000
#define BUTTER_ORDER 2
#define YULE_ORDER 10
typedef struct ReplayGainFreqInfo {
int sample_rate;
double BYule[YULE_ORDER + 1];
double AYule[YULE_ORDER + 1];
double BButter[BUTTER_ORDER + 1];
double AButter[BUTTER_ORDER + 1];
} ReplayGainFreqInfo;
static const ReplayGainFreqInfo freqinfos[] =
{
{
192000,
{ 0.01184742123123, -0.04631092400086, 0.06584226961238,
-0.02165588522478, -0.05656260778952, 0.08607493592760,
-0.03375544339786, -0.04216579932754, 0.06416711490648,
-0.03444708260844, 0.00697275872241 },
{ 1.00000000000000, -5.24727318348167, 10.60821585192244,
-8.74127665810413, -1.33906071371683, 8.07972882096606,
-5.46179918950847, 0.54318070652536, 0.87450969224280,
-0.34656083539754, 0.03034796843589 },
{ 0.99653501465135, -1.99307002930271, 0.99653501465135 },
{ 1.00000000000000, -1.99305802314321, 0.99308203546221 },
},
{
176400,
{ 0.00268568524529, -0.00852379426080, 0.00852704191347,
0.00146116310295, -0.00950855828762, 0.00625449515499,
0.00116183868722, -0.00362461417136, 0.00203961000134,
-0.00050664587933, 0.00004327455427 },
{ 1.00000000000000, -5.57512782763045, 12.44291056065794,
-12.87462799681221, 3.08554846961576, 6.62493459880692,
-7.07662766313248, 2.51175542736441, 0.06731510802735,
-0.24567753819213, 0.03961404162376 },
{ 0.99622916581118, -1.99245833162236, 0.99622916581118 },
{ 1.00000000000000, -1.99244411238133, 0.99247255086339 },
},
{
144000,
{ 0.00639682359450, -0.02556437970955, 0.04230854400938,
-0.03722462201267, 0.01718514827295, 0.00610592243009,
-0.03065965747365, 0.04345745003539, -0.03298592681309,
0.01320937236809, -0.00220304127757 },
{ 1.00000000000000, -6.14814623523425, 15.80002457141566,
-20.78487587686937, 11.98848552310315, 3.36462015062606,
-10.22419868359470, 6.65599702146473, -1.67141861110485,
-0.05417956536718, 0.07374767867406 },
{ 0.99538268958706, -1.99076537917413, 0.99538268958706 },
{ 1.00000000000000, -1.99074405950505, 0.99078669884321 },
},
{
128000,
{ 0.00553120584305, -0.02112620545016, 0.03549076243117,
-0.03362498312306, 0.01425867248183, 0.01344686928787,
-0.03392770787836, 0.03464136459530, -0.02039116051549,
0.00667420794705, -0.00093763762995 },
{ 1.00000000000000, -6.14581710839925, 16.04785903675838,
-22.19089131407749, 15.24756471580286, -0.52001440400238,
-8.00488641699940, 6.60916094768855, -2.37856022810923,
0.33106947986101, 0.00459820832036 },
{ 0.99480702681278, -1.98961405362557, 0.99480702681278 },
{ 1.00000000000000, -1.98958708647324, 0.98964102077790 },
},
{
112000,
{ 0.00528778718259, -0.01893240907245, 0.03185982561867,
-0.02926260297838, 0.00715743034072, 0.01985743355827,
-0.03222614850941, 0.02565681978192, -0.01210662313473,
0.00325436284541, -0.00044173593001 },
{ 1.00000000000000, -6.24932108456288, 17.42344320538476,
-27.86819709054896, 26.79087344681326,-13.43711081485123,
-0.66023612948173, 6.03658091814935, -4.24926577030310,
1.40829268709186, -0.19480852628112 },
{ 0.99406737810867, -1.98813475621734, 0.99406737810867 },
{ 1.00000000000000, -1.98809955990514, 0.98816995252954 },
},
{
96000,
{ 0.00588138296683, -0.01613559730421, 0.02184798954216,
-0.01742490405317, 0.00464635643780, 0.01117772513205,
-0.02123865824368, 0.01959354413350, -0.01079720643523,
0.00352183686289, -0.00063124341421 },
{ 1.00000000000000, -5.97808823642008, 16.21362507964068,
-25.72923730652599, 25.40470663139513,-14.66166287771134,
2.81597484359752, 2.51447125969733, -2.23575306985286,
0.75788151036791, -0.10078025199029 },
{ 0.99308203517541, -1.98616407035082, 0.99308203517541 },
{ 1.00000000000000, -1.98611621154089, 0.98621192916075 },
},
{
88200,
{ 0.02667482047416, -0.11377479336097, 0.23063167910965,
-0.30726477945593, 0.33188520686529, -0.33862680249063,
0.31807161531340, -0.23730796929880, 0.12273894790371,
-0.03840017967282, 0.00549673387936 },
{ 1.00000000000000, -6.31836451657302, 18.31351310801799,
-31.88210014815921, 36.53792146976740,-28.23393036467559,
14.24725258227189, -4.04670980012854, 0.18865757280515,
0.25420333563908, -0.06012333531065 },
{ 0.99247255046129, -1.98494510092259, 0.99247255046129 },
{ 1.00000000000000, -1.98488843762335, 0.98500176422183 },
},
{
64000,
{ 0.02613056568174, -0.08128786488109, 0.14937282347325,
-0.21695711675126, 0.25010286673402, -0.23162283619278,
0.17424041833052, -0.10299599216680, 0.04258696481981,
-0.00977952936493, 0.00105325558889 },
{ 1.00000000000000, -5.73625477092119, 16.15249794355035,
-29.68654912464508, 39.55706155674083,-39.82524556246253,
30.50605345013009,-17.43051772821245, 7.05154573908017,
-1.80783839720514, 0.22127840210813 },
{ 0.98964101933472, -1.97928203866944, 0.98964101933472 },
{ 1.00000000000000, -1.97917472731009, 0.97938935002880 },
},
{
56000,
{ 0.03144914734085, -0.06151729206963, 0.08066788708145,
-0.09737939921516, 0.08943210803999, -0.06989984672010,
0.04926972841044, -0.03161257848451, 0.01456837493506,
-0.00316015108496, 0.00132807215875 },
{ 1.00000000000000, -4.87377313090032, 12.03922160140209,
-20.10151118381395, 25.10388534415171,-24.29065560815903,
18.27158469090663,-10.45249552560593, 4.30319491872003,
-1.13716992070185, 0.14510733527035 },
{ 0.98816995007392, -1.97633990014784, 0.98816995007392 },
{ 1.00000000000000, -1.97619994516973, 0.97647985512594 },
},
{
48000,
{ 0.03857599435200, -0.02160367184185, -0.00123395316851,
-0.00009291677959, -0.01655260341619, 0.02161526843274,
-0.02074045215285, 0.00594298065125, 0.00306428023191,
0.00012025322027, 0.00288463683916 },
{ 1.00000000000000, -3.84664617118067, 7.81501653005538,
-11.34170355132042, 13.05504219327545,-12.28759895145294,
9.48293806319790, -5.87257861775999, 2.75465861874613,
-0.86984376593551, 0.13919314567432 },
{ 0.98621192462708, -1.97242384925416, 0.98621192462708 },
{ 1.00000000000000, -1.97223372919527, 0.97261396931306 },
},
{
44100,
{ 0.05418656406430, -0.02911007808948, -0.00848709379851,
-0.00851165645469, -0.00834990904936, 0.02245293253339,
-0.02596338512915, 0.01624864962975, -0.00240879051584,
0.00674613682247, -0.00187763777362 },
{ 1.00000000000000, -3.47845948550071, 6.36317777566148,
-8.54751527471874, 9.47693607801280, -8.81498681370155,
6.85401540936998, -4.39470996079559, 2.19611684890774,
-0.75104302451432, 0.13149317958808 },
{ 0.98500175787242, -1.97000351574484, 0.98500175787242 },
{ 1.00000000000000, -1.96977855582618, 0.97022847566350 },
},
{
37800,
{ 0.08717879977844, -0.01000374016172, -0.06265852122368,
-0.01119328800950, -0.00114279372960, 0.02081333954769,
-0.01603261863207, 0.01936763028546, 0.00760044736442,
-0.00303979112271, -0.00075088605788 },
{ 1.00000000000000, -2.62816311472146, 3.53734535817992,
-3.81003448678921, 3.91291636730132, -3.53518605896288,
2.71356866157873, -1.86723311846592, 1.12075382367659,
-0.48574086886890, 0.11330544663849 },
{ 0.98252400815195, -1.96504801630391, 0.98252400815195 },
{ 1.00000000000000, -1.96474258269041, 0.96535344991740 },
},
{
32000,
{ 0.15457299681924, -0.09331049056315, -0.06247880153653,
0.02163541888798, -0.05588393329856, 0.04781476674921,
0.00222312597743, 0.03174092540049, -0.01390589421898,
0.00651420667831, -0.00881362733839 },
{ 1.00000000000000, -2.37898834973084, 2.84868151156327,
-2.64577170229825, 2.23697657451713, -1.67148153367602,
1.00595954808547, -0.45953458054983, 0.16378164858596,
-0.05032077717131, 0.02347897407020 },
{ 0.97938932735214, -1.95877865470428, 0.97938932735214 },
{ 1.00000000000000, -1.95835380975398, 0.95920349965459 },
},
{
24000,
{ 0.30296907319327, -0.22613988682123, -0.08587323730772,
0.03282930172664, -0.00915702933434, -0.02364141202522,
-0.00584456039913, 0.06276101321749, -0.00000828086748,
0.00205861885564, -0.02950134983287 },
{ 1.00000000000000, -1.61273165137247, 1.07977492259970,
-0.25656257754070, -0.16276719120440, -0.22638893773906,
0.39120800788284, -0.22138138954925, 0.04500235387352,
0.02005851806501, 0.00302439095741 },
{ 0.97531843204928, -1.95063686409857, 0.97531843204928 },
{ 1.00000000000000, -1.95002759149878, 0.95124613669835 },
},
{
22050,
{ 0.33642304856132, -0.25572241425570, -0.11828570177555,
0.11921148675203, -0.07834489609479, -0.00469977914380,
-0.00589500224440, 0.05724228140351, 0.00832043980773,
-0.01635381384540, -0.01760176568150 },
{ 1.00000000000000, -1.49858979367799, 0.87350271418188,
0.12205022308084, -0.80774944671438, 0.47854794562326,
-0.12453458140019, -0.04067510197014, 0.08333755284107,
-0.04237348025746, 0.02977207319925 },
{ 0.97316523498161, -1.94633046996323, 0.97316523498161 },
{ 1.00000000000000, -1.94561023566527, 0.94705070426118 },
},
{
18900,
{ 0.38524531015142, -0.27682212062067, -0.09980181488805,
0.09951486755646, -0.08934020156622, -0.00322369330199,
-0.00110329090689, 0.03784509844682, 0.01683906213303,
-0.01147039862572, -0.01941767987192 },
{ 1.00000000000000, -1.29708918404534, 0.90399339674203,
-0.29613799017877, -0.42326645916207, 0.37934887402200,
-0.37919795944938, 0.23410283284785, -0.03892971758879,
0.00403009552351, 0.03640166626278 },
{ 0.96535326815829, -1.93070653631658, 0.96535326815829 },
{ 1.00000000000000, -1.92950577983524, 0.93190729279793 },
},
{
16000,
{ 0.44915256608450, -0.14351757464547, -0.22784394429749,
-0.01419140100551, 0.04078262797139, -0.12398163381748,
0.04097565135648, 0.10478503600251, -0.01863887810927,
-0.03193428438915, 0.00541907748707 },
{ 1.00000000000000, -0.62820619233671, 0.29661783706366,
-0.37256372942400, 0.00213767857124, -0.42029820170918,
0.22199650564824, 0.00613424350682, 0.06747620744683,
0.05784820375801, 0.03222754072173 },
{ 0.96454515552826, -1.92909031105652, 0.96454515552826 },
{ 1.00000000000000, -1.92783286977036, 0.93034775234268 },
},
{
12000,
{ 0.56619470757641, -0.75464456939302, 0.16242137742230,
0.16744243493672, -0.18901604199609, 0.30931782841830,
-0.27562961986224, 0.00647310677246, 0.08647503780351,
-0.03788984554840, -0.00588215443421 },
{ 1.00000000000000, -1.04800335126349, 0.29156311971249,
-0.26806001042947, 0.00819999645858, 0.45054734505008,
-0.33032403314006, 0.06739368333110, -0.04784254229033,
0.01639907836189, 0.01807364323573 },
{ 0.96009142950541, -1.92018285901082, 0.96009142950541 },
{ 1.00000000000000, -1.91858953033784, 0.92177618768381 },
},
{
11025,
{ 0.58100494960553, -0.53174909058578, -0.14289799034253,
0.17520704835522, 0.02377945217615, 0.15558449135573,
-0.25344790059353, 0.01628462406333, 0.06920467763959,
-0.03721611395801, -0.00749618797172 },
{ 1.00000000000000, -0.51035327095184, -0.31863563325245,
-0.20256413484477, 0.14728154134330, 0.38952639978999,
-0.23313271880868, -0.05246019024463, -0.02505961724053,
0.02442357316099, 0.01818801111503 },
{ 0.95856916599601, -1.91713833199203, 0.95856916599601 },
{ 1.00000000000000, -1.91542108074780, 0.91885558323625 },
},
{
8000,
{ 0.53648789255105, -0.42163034350696, -0.00275953611929,
0.04267842219415, -0.10214864179676, 0.14590772289388,
-0.02459864859345, -0.11202315195388, -0.04060034127000,
0.04788665548180, -0.02217936801134 },
{ 1.00000000000000, -0.25049871956020, -0.43193942311114,
-0.03424681017675, -0.04678328784242, 0.26408300200955,
0.15113130533216, -0.17556493366449, -0.18823009262115,
0.05477720428674, 0.04704409688120 },
{ 0.94597685600279, -1.89195371200558, 0.94597685600279 },
{ 1.00000000000000, -1.88903307939452, 0.89487434461664 },
},
};
typedef struct ReplayGainContext {
uint32_t histogram[HISTOGRAM_SLOTS];
float peak;
int yule_hist_i, butter_hist_i;
const double *yule_coeff_a;
const double *yule_coeff_b;
const double *butter_coeff_a;
const double *butter_coeff_b;
float yule_hist_a[256];
float yule_hist_b[256];
float butter_hist_a[256];
float butter_hist_b[256];
} ReplayGainContext;
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layout = NULL;
int i, ret;
if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_FLT )) < 0 ||
(ret = ff_set_common_formats (ctx , formats )) < 0 ||
(ret = ff_add_channel_layout (&layout , AV_CH_LAYOUT_STEREO)) < 0 ||
(ret = ff_set_common_channel_layouts (ctx , layout )) < 0)
return ret;
formats = NULL;
for (i = 0; i < FF_ARRAY_ELEMS(freqinfos); i++) {
if ((ret = ff_add_format(&formats, freqinfos[i].sample_rate)) < 0)
return ret;
}
return ff_set_common_samplerates(ctx, formats);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
ReplayGainContext *s = ctx->priv;
int i;
for (i = 0; i < FF_ARRAY_ELEMS(freqinfos); i++) {
if (freqinfos[i].sample_rate == inlink->sample_rate)
break;
}
av_assert0(i < FF_ARRAY_ELEMS(freqinfos));
s->yule_coeff_a = freqinfos[i].AYule;
s->yule_coeff_b = freqinfos[i].BYule;
s->butter_coeff_a = freqinfos[i].AButter;
s->butter_coeff_b = freqinfos[i].BButter;
s->yule_hist_i = 20;
s->butter_hist_i = 4;
inlink->partial_buf_size =
inlink->min_samples =
inlink->max_samples = inlink->sample_rate / 20;
return 0;
}
/*
* Update largest absolute sample value.
*/
static void calc_stereo_peak(const float *samples, int nb_samples,
float *peak_p)
{
float peak = 0.0;
while (nb_samples--) {
if (samples[0] > peak)
peak = samples[0];
else if (-samples[0] > peak)
peak = -samples[0];
if (samples[1] > peak)
peak = samples[1];
else if (-samples[1] > peak)
peak = -samples[1];
samples += 2;
}
*peak_p = FFMAX(peak, *peak_p);
}
/*
* Calculate stereo RMS level. Minimum value is about -100 dB for
* digital silence. The 90 dB offset is to compensate for the
* normalized float range and 3 dB is for stereo samples.
*/
static double calc_stereo_rms(const float *samples, int nb_samples)
{
int count = nb_samples;
double sum = 1e-16;
while (count--) {
sum += samples[0] * samples[0] + samples[1] * samples[1];
samples += 2;
}
return 10 * log10 (sum / nb_samples) + 90.0 - 3.0;
}
/*
* Optimized implementation of 2nd-order IIR stereo filter.
*/
static void butter_filter_stereo_samples(ReplayGainContext *s,
float *samples, int nb_samples)
{
const double *coeff_a = s->butter_coeff_a;
const double *coeff_b = s->butter_coeff_b;
float *hist_a = s->butter_hist_a;
float *hist_b = s->butter_hist_b;
double left, right;
int i, j;
i = s->butter_hist_i;
// If filter history is very small magnitude, clear it completely
// to prevent denormals from rattling around in there forever
// (slowing us down).
for (j = -4; j < 0; ++j)
if (fabs(hist_a[i + j]) > 1e-10 || fabs(hist_b[i + j]) > 1e-10)
break;
if (!j) {
memset(s->butter_hist_a, 0, sizeof(s->butter_hist_a));
memset(s->butter_hist_b, 0, sizeof(s->butter_hist_b));
}
while (nb_samples--) {
left = (hist_b[i ] = samples[0]) * coeff_b[0];
right = (hist_b[i + 1] = samples[1]) * coeff_b[0];
left += hist_b[i - 2] * coeff_b[1] - hist_a[i - 2] * coeff_a[1];
right += hist_b[i - 1] * coeff_b[1] - hist_a[i - 1] * coeff_a[1];
left += hist_b[i - 4] * coeff_b[2] - hist_a[i - 4] * coeff_a[2];
right += hist_b[i - 3] * coeff_b[2] - hist_a[i - 3] * coeff_a[2];
samples[0] = hist_a[i ] = (float) left;
samples[1] = hist_a[i + 1] = (float) right;
samples += 2;
if ((i += 2) == 256) {
memcpy(hist_a, hist_a + 252, sizeof(*hist_a) * 4);
memcpy(hist_b, hist_b + 252, sizeof(*hist_b) * 4);
i = 4;
}
}
s->butter_hist_i = i;
}
/*
* Optimized implementation of 10th-order IIR stereo filter.
*/
static void yule_filter_stereo_samples(ReplayGainContext *s, const float *src,
float *dst, int nb_samples)
{
const double *coeff_a = s->yule_coeff_a;
const double *coeff_b = s->yule_coeff_b;
float *hist_a = s->yule_hist_a;
float *hist_b = s->yule_hist_b;
double left, right;
int i, j;
i = s->yule_hist_i;
// If filter history is very small magnitude, clear it completely to
// prevent denormals from rattling around in there forever
// (slowing us down).
for (j = -20; j < 0; ++j)
if (fabs(hist_a[i + j]) > 1e-10 || fabs(hist_b[i + j]) > 1e-10)
break;
if (!j) {
memset(s->yule_hist_a, 0, sizeof(s->yule_hist_a));
memset(s->yule_hist_b, 0, sizeof(s->yule_hist_b));
}
while (nb_samples--) {
left = (hist_b[i] = src[0]) * coeff_b[0];
right = (hist_b[i + 1] = src[1]) * coeff_b[0];
left += hist_b[i - 2] * coeff_b[ 1] - hist_a[i - 2] * coeff_a[1 ];
right += hist_b[i - 1] * coeff_b[ 1] - hist_a[i - 1] * coeff_a[1 ];
left += hist_b[i - 4] * coeff_b[ 2] - hist_a[i - 4] * coeff_a[2 ];
right += hist_b[i - 3] * coeff_b[ 2] - hist_a[i - 3] * coeff_a[2 ];
left += hist_b[i - 6] * coeff_b[ 3] - hist_a[i - 6] * coeff_a[3 ];
right += hist_b[i - 5] * coeff_b[ 3] - hist_a[i - 5] * coeff_a[3 ];
left += hist_b[i - 8] * coeff_b[ 4] - hist_a[i - 8] * coeff_a[4 ];
right += hist_b[i - 7] * coeff_b[ 4] - hist_a[i - 7] * coeff_a[4 ];
left += hist_b[i - 10] * coeff_b[ 5] - hist_a[i - 10] * coeff_a[5 ];
right += hist_b[i - 9] * coeff_b[ 5] - hist_a[i - 9] * coeff_a[5 ];
left += hist_b[i - 12] * coeff_b[ 6] - hist_a[i - 12] * coeff_a[6 ];
right += hist_b[i - 11] * coeff_b[ 6] - hist_a[i - 11] * coeff_a[6 ];
left += hist_b[i - 14] * coeff_b[ 7] - hist_a[i - 14] * coeff_a[7 ];
right += hist_b[i - 13] * coeff_b[ 7] - hist_a[i - 13] * coeff_a[7 ];
left += hist_b[i - 16] * coeff_b[ 8] - hist_a[i - 16] * coeff_a[8 ];
right += hist_b[i - 15] * coeff_b[ 8] - hist_a[i - 15] * coeff_a[8 ];
left += hist_b[i - 18] * coeff_b[ 9] - hist_a[i - 18] * coeff_a[9 ];
right += hist_b[i - 17] * coeff_b[ 9] - hist_a[i - 17] * coeff_a[9 ];
left += hist_b[i - 20] * coeff_b[10] - hist_a[i - 20] * coeff_a[10];
right += hist_b[i - 19] * coeff_b[10] - hist_a[i - 19] * coeff_a[10];
dst[0] = hist_a[i ] = (float)left;
dst[1] = hist_a[i + 1] = (float)right;
src += 2;
dst += 2;
if ((i += 2) == 256) {
memcpy(hist_a, hist_a + 236, sizeof(*hist_a) * 20);
memcpy(hist_b, hist_b + 236, sizeof(*hist_b) * 20);
i = 20;
}
}
s->yule_hist_i = i;
}
/*
* Calculate the ReplayGain value from the specified loudness histogram;
* clip to -24 / +64 dB.
*/
static float calc_replaygain(uint32_t *histogram)
{
uint32_t loud_count = 0, total_windows = 0;
float gain;
int i;
for (i = 0; i < HISTOGRAM_SLOTS; i++)
total_windows += histogram [i];
while (i--)
if ((loud_count += histogram [i]) * 20 >= total_windows)
break;
gain = (float)(64.54 - i / 100.0);
return av_clipf(gain, -24.0, 64.0);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
ReplayGainContext *s = ctx->priv;
int64_t level;
AVFrame *out;
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
calc_stereo_peak((float *)in->data[0],
in->nb_samples, &s->peak);
yule_filter_stereo_samples(s, (const float *)in->data[0],
(float *)out->data[0],
out->nb_samples);
butter_filter_stereo_samples(s, (float *)out->data[0],
out->nb_samples);
level = lrint(floor(100 * calc_stereo_rms((float *)out->data[0],
out->nb_samples)));
level = av_clip64(level, 0, HISTOGRAM_SLOTS - 1);
s->histogram[level]++;
av_frame_free(&out);
return ff_filter_frame(outlink, in);
}
static av_cold void uninit(AVFilterContext *ctx)
{
ReplayGainContext *s = ctx->priv;
float gain = calc_replaygain(s->histogram);
av_log(ctx, AV_LOG_INFO, "track_gain = %+.2f dB\n", gain);
av_log(ctx, AV_LOG_INFO, "track_peak = %.6f\n", s->peak);
}
static const AVFilterPad replaygain_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad replaygain_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_replaygain = {
.name = "replaygain",
.description = NULL_IF_CONFIG_SMALL("ReplayGain scanner."),
.query_formats = query_formats,
.uninit = uninit,
.priv_size = sizeof(ReplayGainContext),
.inputs = replaygain_inputs,
.outputs = replaygain_outputs,
};

357
externals/ffmpeg/libavfilter/af_resample.c vendored Executable file
View File

@@ -0,0 +1,357 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* sample format and channel layout conversion audio filter
*/
#include "libavutil/avassert.h"
#include "libavutil/avstring.h"
#include "libavutil/common.h"
#include "libavutil/dict.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavresample/avresample.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
typedef struct ResampleContext {
const AVClass *class;
AVAudioResampleContext *avr;
AVDictionary *options;
int resampling;
int64_t next_pts;
int64_t next_in_pts;
/* set by filter_frame() to signal an output frame to request_frame() */
int got_output;
} ResampleContext;
static av_cold int init(AVFilterContext *ctx, AVDictionary **opts)
{
ResampleContext *s = ctx->priv;
const AVClass *avr_class = avresample_get_class();
AVDictionaryEntry *e = NULL;
while ((e = av_dict_get(*opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
if (av_opt_find(&avr_class, e->key, NULL, 0,
AV_OPT_SEARCH_FAKE_OBJ | AV_OPT_SEARCH_CHILDREN))
av_dict_set(&s->options, e->key, e->value, 0);
}
e = NULL;
while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX)))
av_dict_set(opts, e->key, NULL, 0);
/* do not allow the user to override basic format options */
av_dict_set(&s->options, "in_channel_layout", NULL, 0);
av_dict_set(&s->options, "out_channel_layout", NULL, 0);
av_dict_set(&s->options, "in_sample_fmt", NULL, 0);
av_dict_set(&s->options, "out_sample_fmt", NULL, 0);
av_dict_set(&s->options, "in_sample_rate", NULL, 0);
av_dict_set(&s->options, "out_sample_rate", NULL, 0);
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
ResampleContext *s = ctx->priv;
if (s->avr) {
avresample_close(s->avr);
avresample_free(&s->avr);
}
av_dict_free(&s->options);
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
AVFilterFormats *in_formats, *out_formats, *in_samplerates, *out_samplerates;
AVFilterChannelLayouts *in_layouts, *out_layouts;
int ret;
if (!(in_formats = ff_all_formats (AVMEDIA_TYPE_AUDIO)) ||
!(out_formats = ff_all_formats (AVMEDIA_TYPE_AUDIO)) ||
!(in_samplerates = ff_all_samplerates ( )) ||
!(out_samplerates = ff_all_samplerates ( )) ||
!(in_layouts = ff_all_channel_layouts ( )) ||
!(out_layouts = ff_all_channel_layouts ( )))
return AVERROR(ENOMEM);
if ((ret = ff_formats_ref (in_formats, &inlink->out_formats )) < 0 ||
(ret = ff_formats_ref (out_formats, &outlink->in_formats )) < 0 ||
(ret = ff_formats_ref (in_samplerates, &inlink->out_samplerates )) < 0 ||
(ret = ff_formats_ref (out_samplerates, &outlink->in_samplerates )) < 0 ||
(ret = ff_channel_layouts_ref (in_layouts, &inlink->out_channel_layouts)) < 0 ||
(ret = ff_channel_layouts_ref (out_layouts, &outlink->in_channel_layouts)) < 0)
return ret;
return 0;
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AVFilterLink *inlink = ctx->inputs[0];
ResampleContext *s = ctx->priv;
char buf1[64], buf2[64];
int ret;
int64_t resampling_forced;
if (s->avr) {
avresample_close(s->avr);
avresample_free(&s->avr);
}
if (inlink->channel_layout == outlink->channel_layout &&
inlink->sample_rate == outlink->sample_rate &&
(inlink->format == outlink->format ||
(av_get_channel_layout_nb_channels(inlink->channel_layout) == 1 &&
av_get_channel_layout_nb_channels(outlink->channel_layout) == 1 &&
av_get_planar_sample_fmt(inlink->format) ==
av_get_planar_sample_fmt(outlink->format))))
return 0;
if (!(s->avr = avresample_alloc_context()))
return AVERROR(ENOMEM);
if (s->options) {
int ret;
AVDictionaryEntry *e = NULL;
while ((e = av_dict_get(s->options, "", e, AV_DICT_IGNORE_SUFFIX)))
av_log(ctx, AV_LOG_VERBOSE, "lavr option: %s=%s\n", e->key, e->value);
ret = av_opt_set_dict(s->avr, &s->options);
if (ret < 0)
return ret;
}
av_opt_set_int(s->avr, "in_channel_layout", inlink ->channel_layout, 0);
av_opt_set_int(s->avr, "out_channel_layout", outlink->channel_layout, 0);
av_opt_set_int(s->avr, "in_sample_fmt", inlink ->format, 0);
av_opt_set_int(s->avr, "out_sample_fmt", outlink->format, 0);
av_opt_set_int(s->avr, "in_sample_rate", inlink ->sample_rate, 0);
av_opt_set_int(s->avr, "out_sample_rate", outlink->sample_rate, 0);
if ((ret = avresample_open(s->avr)) < 0)
return ret;
av_opt_get_int(s->avr, "force_resampling", 0, &resampling_forced);
s->resampling = resampling_forced || (inlink->sample_rate != outlink->sample_rate);
if (s->resampling) {
outlink->time_base = (AVRational){ 1, outlink->sample_rate };
s->next_pts = AV_NOPTS_VALUE;
s->next_in_pts = AV_NOPTS_VALUE;
} else
outlink->time_base = inlink->time_base;
av_get_channel_layout_string(buf1, sizeof(buf1),
-1, inlink ->channel_layout);
av_get_channel_layout_string(buf2, sizeof(buf2),
-1, outlink->channel_layout);
av_log(ctx, AV_LOG_VERBOSE,
"fmt:%s srate:%d cl:%s -> fmt:%s srate:%d cl:%s\n",
av_get_sample_fmt_name(inlink ->format), inlink ->sample_rate, buf1,
av_get_sample_fmt_name(outlink->format), outlink->sample_rate, buf2);
return 0;
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
ResampleContext *s = ctx->priv;
int ret = 0;
s->got_output = 0;
while (ret >= 0 && !s->got_output)
ret = ff_request_frame(ctx->inputs[0]);
/* flush the lavr delay buffer */
if (ret == AVERROR_EOF && s->avr) {
AVFrame *frame;
int nb_samples = avresample_get_out_samples(s->avr, 0);
if (!nb_samples)
return ret;
frame = ff_get_audio_buffer(outlink, nb_samples);
if (!frame)
return AVERROR(ENOMEM);
ret = avresample_convert(s->avr, frame->extended_data,
frame->linesize[0], nb_samples,
NULL, 0, 0);
if (ret <= 0) {
av_frame_free(&frame);
return (ret == 0) ? AVERROR_EOF : ret;
}
frame->nb_samples = ret;
frame->pts = s->next_pts;
return ff_filter_frame(outlink, frame);
}
return ret;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
ResampleContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
int ret;
if (s->avr) {
AVFrame *out;
int delay, nb_samples;
/* maximum possible samples lavr can output */
delay = avresample_get_delay(s->avr);
nb_samples = avresample_get_out_samples(s->avr, in->nb_samples);
out = ff_get_audio_buffer(outlink, nb_samples);
if (!out) {
ret = AVERROR(ENOMEM);
goto fail;
}
ret = avresample_convert(s->avr, out->extended_data, out->linesize[0],
nb_samples, in->extended_data, in->linesize[0],
in->nb_samples);
if (ret <= 0) {
av_frame_free(&out);
if (ret < 0)
goto fail;
}
av_assert0(!avresample_available(s->avr));
if (s->resampling && s->next_pts == AV_NOPTS_VALUE) {
if (in->pts == AV_NOPTS_VALUE) {
av_log(ctx, AV_LOG_WARNING, "First timestamp is missing, "
"assuming 0.\n");
s->next_pts = 0;
} else
s->next_pts = av_rescale_q(in->pts, inlink->time_base,
outlink->time_base);
}
if (ret > 0) {
out->nb_samples = ret;
ret = av_frame_copy_props(out, in);
if (ret < 0) {
av_frame_free(&out);
goto fail;
}
if (s->resampling) {
out->sample_rate = outlink->sample_rate;
/* Only convert in->pts if there is a discontinuous jump.
This ensures that out->pts tracks the number of samples actually
output by the resampler in the absence of such a jump.
Otherwise, the rounding in av_rescale_q() and av_rescale()
causes off-by-1 errors. */
if (in->pts != AV_NOPTS_VALUE && in->pts != s->next_in_pts) {
out->pts = av_rescale_q(in->pts, inlink->time_base,
outlink->time_base) -
av_rescale(delay, outlink->sample_rate,
inlink->sample_rate);
} else
out->pts = s->next_pts;
s->next_pts = out->pts + out->nb_samples;
s->next_in_pts = in->pts + in->nb_samples;
} else
out->pts = in->pts;
ret = ff_filter_frame(outlink, out);
s->got_output = 1;
}
fail:
av_frame_free(&in);
} else {
in->format = outlink->format;
ret = ff_filter_frame(outlink, in);
s->got_output = 1;
}
return ret;
}
static const AVClass *resample_child_class_next(const AVClass *prev)
{
return prev ? NULL : avresample_get_class();
}
static void *resample_child_next(void *obj, void *prev)
{
ResampleContext *s = obj;
return prev ? NULL : s->avr;
}
static const AVClass resample_class = {
.class_name = "resample",
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
.child_class_next = resample_child_class_next,
.child_next = resample_child_next,
};
static const AVFilterPad avfilter_af_resample_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad avfilter_af_resample_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame
},
{ NULL }
};
AVFilter ff_af_resample = {
.name = "resample",
.description = NULL_IF_CONFIG_SMALL("Audio resampling and conversion."),
.priv_size = sizeof(ResampleContext),
.priv_class = &resample_class,
.init_dict = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = avfilter_af_resample_inputs,
.outputs = avfilter_af_resample_outputs,
};

247
externals/ffmpeg/libavfilter/af_rubberband.c vendored Executable file
View File

@@ -0,0 +1,247 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <rubberband/rubberband-c.h>
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "filters.h"
#include "formats.h"
#include "internal.h"
typedef struct RubberBandContext {
const AVClass *class;
RubberBandState rbs;
double tempo, pitch;
int transients, detector, phase, window,
smoothing, formant, opitch, channels;
int64_t nb_samples_out;
int64_t nb_samples_in;
int64_t first_pts;
int nb_samples;
} RubberBandContext;
#define OFFSET(x) offsetof(RubberBandContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define AT AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption rubberband_options[] = {
{ "tempo", "set tempo scale factor", OFFSET(tempo), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.01, 100, AT },
{ "pitch", "set pitch scale factor", OFFSET(pitch), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.01, 100, AT },
{ "transients", "set transients", OFFSET(transients), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, A, "transients" },
{ "crisp", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionTransientsCrisp}, 0, 0, A, "transients" },
{ "mixed", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionTransientsMixed}, 0, 0, A, "transients" },
{ "smooth", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionTransientsSmooth}, 0, 0, A, "transients" },
{ "detector", "set detector", OFFSET(detector), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, A, "detector" },
{ "compound", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionDetectorCompound}, 0, 0, A, "detector" },
{ "percussive", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionDetectorPercussive}, 0, 0, A, "detector" },
{ "soft", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionDetectorSoft}, 0, 0, A, "detector" },
{ "phase", "set phase", OFFSET(phase), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, A, "phase" },
{ "laminar", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionPhaseLaminar}, 0, 0, A, "phase" },
{ "independent", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionPhaseIndependent}, 0, 0, A, "phase" },
{ "window", "set window", OFFSET(window), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, A, "window" },
{ "standard", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionWindowStandard}, 0, 0, A, "window" },
{ "short", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionWindowShort}, 0, 0, A, "window" },
{ "long", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionWindowLong}, 0, 0, A, "window" },
{ "smoothing", "set smoothing", OFFSET(smoothing), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, A, "smoothing" },
{ "off", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionSmoothingOff}, 0, 0, A, "smoothing" },
{ "on", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionSmoothingOn}, 0, 0, A, "smoothing" },
{ "formant", "set formant", OFFSET(formant), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, A, "formant" },
{ "shifted", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionFormantShifted}, 0, 0, A, "formant" },
{ "preserved", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionFormantPreserved}, 0, 0, A, "formant" },
{ "pitchq", "set pitch quality", OFFSET(opitch), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, A, "pitch" },
{ "quality", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionPitchHighQuality}, 0, 0, A, "pitch" },
{ "speed", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionPitchHighSpeed}, 0, 0, A, "pitch" },
{ "consistency", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionPitchHighConsistency}, 0, 0, A, "pitch" },
{ "channels", "set channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64=0}, 0, INT_MAX, A, "channels" },
{ "apart", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionChannelsApart}, 0, 0, A, "channels" },
{ "together", 0, 0, AV_OPT_TYPE_CONST, {.i64=RubberBandOptionChannelsTogether}, 0, 0, A, "channels" },
{ NULL },
};
AVFILTER_DEFINE_CLASS(rubberband);
static av_cold void uninit(AVFilterContext *ctx)
{
RubberBandContext *s = ctx->priv;
if (s->rbs)
rubberband_delete(s->rbs);
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE,
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
RubberBandContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
int ret = 0, nb_samples;
if (s->first_pts == AV_NOPTS_VALUE)
s->first_pts = in->pts;
rubberband_process(s->rbs, (const float *const *)in->data, in->nb_samples, ff_outlink_get_status(inlink));
s->nb_samples_in += in->nb_samples;
nb_samples = rubberband_available(s->rbs);
if (nb_samples > 0) {
out = ff_get_audio_buffer(outlink, nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
out->pts = s->first_pts + av_rescale_q(s->nb_samples_out,
(AVRational){ 1, outlink->sample_rate },
outlink->time_base);
nb_samples = rubberband_retrieve(s->rbs, (float *const *)out->data, nb_samples);
out->nb_samples = nb_samples;
ret = ff_filter_frame(outlink, out);
s->nb_samples_out += nb_samples;
}
av_frame_free(&in);
if (ff_inlink_queued_samples(inlink) >= s->nb_samples)
ff_filter_set_ready(ctx, 100);
return ret < 0 ? ret : nb_samples;
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
RubberBandContext *s = ctx->priv;
int opts = s->transients|s->detector|s->phase|s->window|
s->smoothing|s->formant|s->opitch|s->channels|
RubberBandOptionProcessRealTime;
if (s->rbs)
rubberband_delete(s->rbs);
s->rbs = rubberband_new(inlink->sample_rate, inlink->channels, opts, 1. / s->tempo, s->pitch);
if (!s->rbs)
return AVERROR(ENOMEM);
s->nb_samples = rubberband_get_samples_required(s->rbs);
s->first_pts = AV_NOPTS_VALUE;
return 0;
}
static int activate(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
RubberBandContext *s = ctx->priv;
AVFrame *in = NULL;
int ret;
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
ret = ff_inlink_consume_samples(inlink, s->nb_samples, s->nb_samples, &in);
if (ret < 0)
return ret;
if (ret > 0) {
ret = filter_frame(inlink, in);
if (ret != 0)
return ret;
}
FF_FILTER_FORWARD_STATUS(inlink, outlink);
FF_FILTER_FORWARD_WANTED(outlink, inlink);
return FFERROR_NOT_READY;
}
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
RubberBandContext *s = ctx->priv;
int ret;
ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
if (ret < 0)
return ret;
rubberband_set_time_ratio(s->rbs, 1. / s->tempo);
rubberband_set_pitch_scale(s->rbs, s->pitch);
return 0;
}
static const AVFilterPad rubberband_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad rubberband_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_rubberband = {
.name = "rubberband",
.description = NULL_IF_CONFIG_SMALL("Apply time-stretching and pitch-shifting."),
.query_formats = query_formats,
.priv_size = sizeof(RubberBandContext),
.priv_class = &rubberband_class,
.uninit = uninit,
.activate = activate,
.inputs = rubberband_inputs,
.outputs = rubberband_outputs,
.process_command = process_command,
};

View File

@@ -0,0 +1,496 @@
/*
* Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen and others
* Copyright (c) 2015 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Audio (Sidechain) Compressor filter
*/
#include "libavutil/audio_fifo.h"
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "filters.h"
#include "formats.h"
#include "hermite.h"
#include "internal.h"
typedef struct SidechainCompressContext {
const AVClass *class;
double level_in;
double level_sc;
double attack, attack_coeff;
double release, release_coeff;
double lin_slope;
double ratio;
double threshold;
double makeup;
double mix;
double thres;
double knee;
double knee_start;
double knee_stop;
double lin_knee_start;
double lin_knee_stop;
double adj_knee_start;
double adj_knee_stop;
double compressed_knee_start;
double compressed_knee_stop;
int link;
int detection;
int mode;
AVAudioFifo *fifo[2];
int64_t pts;
} SidechainCompressContext;
#define OFFSET(x) offsetof(SidechainCompressContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
#define R AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption options[] = {
{ "level_in", "set input gain", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A|F|R },
{ "mode", "set mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A|F|R, "mode" },
{ "downward",0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F|R, "mode" },
{ "upward", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F|R, "mode" },
{ "threshold", "set threshold", OFFSET(threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0.125}, 0.000976563, 1, A|F|R },
{ "ratio", "set ratio", OFFSET(ratio), AV_OPT_TYPE_DOUBLE, {.dbl=2}, 1, 20, A|F|R },
{ "attack", "set attack", OFFSET(attack), AV_OPT_TYPE_DOUBLE, {.dbl=20}, 0.01, 2000, A|F|R },
{ "release", "set release", OFFSET(release), AV_OPT_TYPE_DOUBLE, {.dbl=250}, 0.01, 9000, A|F|R },
{ "makeup", "set make up gain", OFFSET(makeup), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 1, 64, A|F|R },
{ "knee", "set knee", OFFSET(knee), AV_OPT_TYPE_DOUBLE, {.dbl=2.82843}, 1, 8, A|F|R },
{ "link", "set link type", OFFSET(link), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, A|F|R, "link" },
{ "average", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F|R, "link" },
{ "maximum", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F|R, "link" },
{ "detection", "set detection", OFFSET(detection), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, A|F|R, "detection" },
{ "peak", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A|F|R, "detection" },
{ "rms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A|F|R, "detection" },
{ "level_sc", "set sidechain gain", OFFSET(level_sc), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A|F|R },
{ "mix", "set mix", OFFSET(mix), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0, 1, A|F|R },
{ NULL }
};
#define sidechaincompress_options options
AVFILTER_DEFINE_CLASS(sidechaincompress);
// A fake infinity value (because real infinity may break some hosts)
#define FAKE_INFINITY (65536.0 * 65536.0)
// Check for infinity (with appropriate-ish tolerance)
#define IS_FAKE_INFINITY(value) (fabs(value-FAKE_INFINITY) < 1.0)
static double output_gain(double lin_slope, double ratio, double thres,
double knee, double knee_start, double knee_stop,
double compressed_knee_start,
double compressed_knee_stop,
int detection, int mode)
{
double slope = log(lin_slope);
double gain = 0.0;
double delta = 0.0;
if (detection)
slope *= 0.5;
if (IS_FAKE_INFINITY(ratio)) {
gain = thres;
delta = 0.0;
} else {
gain = (slope - thres) / ratio + thres;
delta = 1.0 / ratio;
}
if (mode) {
if (knee > 1.0 && slope > knee_start)
gain = hermite_interpolation(slope, knee_stop, knee_start,
knee_stop, compressed_knee_start,
1.0, delta);
} else {
if (knee > 1.0 && slope < knee_stop)
gain = hermite_interpolation(slope, knee_start, knee_stop,
knee_start, compressed_knee_stop,
1.0, delta);
}
return exp(gain - slope);
}
static int compressor_config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
SidechainCompressContext *s = ctx->priv;
s->thres = log(s->threshold);
s->lin_knee_start = s->threshold / sqrt(s->knee);
s->lin_knee_stop = s->threshold * sqrt(s->knee);
s->adj_knee_start = s->lin_knee_start * s->lin_knee_start;
s->adj_knee_stop = s->lin_knee_stop * s->lin_knee_stop;
s->knee_start = log(s->lin_knee_start);
s->knee_stop = log(s->lin_knee_stop);
s->compressed_knee_start = (s->knee_start - s->thres) / s->ratio + s->thres;
s->compressed_knee_stop = (s->knee_stop - s->thres) / s->ratio + s->thres;
s->attack_coeff = FFMIN(1., 1. / (s->attack * outlink->sample_rate / 4000.));
s->release_coeff = FFMIN(1., 1. / (s->release * outlink->sample_rate / 4000.));
return 0;
}
static void compressor(SidechainCompressContext *s,
const double *src, double *dst, const double *scsrc, int nb_samples,
double level_in, double level_sc,
AVFilterLink *inlink, AVFilterLink *sclink)
{
const double makeup = s->makeup;
const double mix = s->mix;
int i, c;
for (i = 0; i < nb_samples; i++) {
double abs_sample, gain = 1.0;
double detector;
int detected;
abs_sample = fabs(scsrc[0] * level_sc);
if (s->link == 1) {
for (c = 1; c < sclink->channels; c++)
abs_sample = FFMAX(fabs(scsrc[c] * level_sc), abs_sample);
} else {
for (c = 1; c < sclink->channels; c++)
abs_sample += fabs(scsrc[c] * level_sc);
abs_sample /= sclink->channels;
}
if (s->detection)
abs_sample *= abs_sample;
s->lin_slope += (abs_sample - s->lin_slope) * (abs_sample > s->lin_slope ? s->attack_coeff : s->release_coeff);
if (s->mode) {
detector = (s->detection ? s->adj_knee_stop : s->lin_knee_stop);
detected = s->lin_slope < detector;
} else {
detector = (s->detection ? s->adj_knee_start : s->lin_knee_start);
detected = s->lin_slope > detector;
}
if (s->lin_slope > 0.0 && detected)
gain = output_gain(s->lin_slope, s->ratio, s->thres, s->knee,
s->knee_start, s->knee_stop,
s->compressed_knee_start,
s->compressed_knee_stop,
s->detection, s->mode);
for (c = 0; c < inlink->channels; c++)
dst[c] = src[c] * level_in * (gain * makeup * mix + (1. - mix));
src += inlink->channels;
dst += inlink->channels;
scsrc += sclink->channels;
}
}
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
int ret;
ret = ff_filter_process_command(ctx, cmd, args, res, res_len, flags);
if (ret < 0)
return ret;
compressor_config_output(ctx->outputs[0]);
return 0;
}
#if CONFIG_SIDECHAINCOMPRESS_FILTER
static int activate(AVFilterContext *ctx)
{
SidechainCompressContext *s = ctx->priv;
AVFrame *out = NULL, *in[2] = { NULL };
int ret, i, nb_samples;
double *dst;
FF_FILTER_FORWARD_STATUS_BACK_ALL(ctx->outputs[0], ctx);
if ((ret = ff_inlink_consume_frame(ctx->inputs[0], &in[0])) > 0) {
av_audio_fifo_write(s->fifo[0], (void **)in[0]->extended_data,
in[0]->nb_samples);
av_frame_free(&in[0]);
}
if (ret < 0)
return ret;
if ((ret = ff_inlink_consume_frame(ctx->inputs[1], &in[1])) > 0) {
av_audio_fifo_write(s->fifo[1], (void **)in[1]->extended_data,
in[1]->nb_samples);
av_frame_free(&in[1]);
}
if (ret < 0)
return ret;
nb_samples = FFMIN(av_audio_fifo_size(s->fifo[0]), av_audio_fifo_size(s->fifo[1]));
if (nb_samples) {
out = ff_get_audio_buffer(ctx->outputs[0], nb_samples);
if (!out)
return AVERROR(ENOMEM);
for (i = 0; i < 2; i++) {
in[i] = ff_get_audio_buffer(ctx->inputs[i], nb_samples);
if (!in[i]) {
av_frame_free(&in[0]);
av_frame_free(&in[1]);
av_frame_free(&out);
return AVERROR(ENOMEM);
}
av_audio_fifo_read(s->fifo[i], (void **)in[i]->data, nb_samples);
}
dst = (double *)out->data[0];
out->pts = s->pts;
s->pts += av_rescale_q(nb_samples, (AVRational){1, ctx->outputs[0]->sample_rate}, ctx->outputs[0]->time_base);
compressor(s, (double *)in[0]->data[0], dst,
(double *)in[1]->data[0], nb_samples,
s->level_in, s->level_sc,
ctx->inputs[0], ctx->inputs[1]);
av_frame_free(&in[0]);
av_frame_free(&in[1]);
ret = ff_filter_frame(ctx->outputs[0], out);
if (ret < 0)
return ret;
}
FF_FILTER_FORWARD_STATUS(ctx->inputs[0], ctx->outputs[0]);
FF_FILTER_FORWARD_STATUS(ctx->inputs[1], ctx->outputs[0]);
if (ff_outlink_frame_wanted(ctx->outputs[0])) {
if (!av_audio_fifo_size(s->fifo[0]))
ff_inlink_request_frame(ctx->inputs[0]);
if (!av_audio_fifo_size(s->fifo[1]))
ff_inlink_request_frame(ctx->inputs[1]);
}
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts = NULL;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_NONE
};
int ret, i;
if (!ctx->inputs[0]->in_channel_layouts ||
!ctx->inputs[0]->in_channel_layouts->nb_channel_layouts) {
av_log(ctx, AV_LOG_WARNING,
"No channel layout for input 1\n");
return AVERROR(EAGAIN);
}
if ((ret = ff_add_channel_layout(&layouts, ctx->inputs[0]->in_channel_layouts->channel_layouts[0])) < 0 ||
(ret = ff_channel_layouts_ref(layouts, &ctx->outputs[0]->in_channel_layouts)) < 0)
return ret;
for (i = 0; i < 2; i++) {
layouts = ff_all_channel_counts();
if ((ret = ff_channel_layouts_ref(layouts, &ctx->inputs[i]->out_channel_layouts)) < 0)
return ret;
}
formats = ff_make_format_list(sample_fmts);
if ((ret = ff_set_common_formats(ctx, formats)) < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
SidechainCompressContext *s = ctx->priv;
if (ctx->inputs[0]->sample_rate != ctx->inputs[1]->sample_rate) {
av_log(ctx, AV_LOG_ERROR,
"Inputs must have the same sample rate "
"%d for in0 vs %d for in1\n",
ctx->inputs[0]->sample_rate, ctx->inputs[1]->sample_rate);
return AVERROR(EINVAL);
}
outlink->sample_rate = ctx->inputs[0]->sample_rate;
outlink->time_base = ctx->inputs[0]->time_base;
outlink->channel_layout = ctx->inputs[0]->channel_layout;
outlink->channels = ctx->inputs[0]->channels;
s->fifo[0] = av_audio_fifo_alloc(ctx->inputs[0]->format, ctx->inputs[0]->channels, 1024);
s->fifo[1] = av_audio_fifo_alloc(ctx->inputs[1]->format, ctx->inputs[1]->channels, 1024);
if (!s->fifo[0] || !s->fifo[1])
return AVERROR(ENOMEM);
compressor_config_output(outlink);
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
SidechainCompressContext *s = ctx->priv;
av_audio_fifo_free(s->fifo[0]);
av_audio_fifo_free(s->fifo[1]);
}
static const AVFilterPad sidechaincompress_inputs[] = {
{
.name = "main",
.type = AVMEDIA_TYPE_AUDIO,
},{
.name = "sidechain",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
static const AVFilterPad sidechaincompress_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
AVFilter ff_af_sidechaincompress = {
.name = "sidechaincompress",
.description = NULL_IF_CONFIG_SMALL("Sidechain compressor."),
.priv_size = sizeof(SidechainCompressContext),
.priv_class = &sidechaincompress_class,
.query_formats = query_formats,
.activate = activate,
.uninit = uninit,
.inputs = sidechaincompress_inputs,
.outputs = sidechaincompress_outputs,
.process_command = process_command,
};
#endif /* CONFIG_SIDECHAINCOMPRESS_FILTER */
#if CONFIG_ACOMPRESSOR_FILTER
static int acompressor_filter_frame(AVFilterLink *inlink, AVFrame *in)
{
const double *src = (const double *)in->data[0];
AVFilterContext *ctx = inlink->dst;
SidechainCompressContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
double *dst;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
dst = (double *)out->data[0];
compressor(s, src, dst, src, in->nb_samples,
s->level_in, s->level_in,
inlink, inlink);
if (out != in)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static int acompressor_query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
#define acompressor_options options
AVFILTER_DEFINE_CLASS(acompressor);
static const AVFilterPad acompressor_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = acompressor_filter_frame,
},
{ NULL }
};
static const AVFilterPad acompressor_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = compressor_config_output,
},
{ NULL }
};
AVFilter ff_af_acompressor = {
.name = "acompressor",
.description = NULL_IF_CONFIG_SMALL("Audio compressor."),
.priv_size = sizeof(SidechainCompressContext),
.priv_class = &acompressor_class,
.query_formats = acompressor_query_formats,
.inputs = acompressor_inputs,
.outputs = acompressor_outputs,
.process_command = process_command,
};
#endif /* CONFIG_ACOMPRESSOR_FILTER */

View File

@@ -0,0 +1,270 @@
/*
* Copyright (c) 2012 Clément Bœsch <u pkh me>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Audio silence detector
*/
#include <float.h> /* DBL_MAX */
#include "libavutil/opt.h"
#include "libavutil/timestamp.h"
#include "audio.h"
#include "formats.h"
#include "avfilter.h"
#include "internal.h"
typedef struct SilenceDetectContext {
const AVClass *class;
double noise; ///< noise amplitude ratio
int64_t duration; ///< minimum duration of silence until notification
int mono; ///< mono mode : check each channel separately (default = check when ALL channels are silent)
int channels; ///< number of channels
int independent_channels; ///< number of entries in following arrays (always 1 in mono mode)
int64_t *nb_null_samples; ///< (array) current number of continuous zero samples
int64_t *start; ///< (array) if silence is detected, this value contains the time of the first zero sample (default/unset = INT64_MIN)
int64_t frame_end; ///< pts of the end of the current frame (used to compute duration of silence at EOS)
int last_sample_rate; ///< last sample rate to check for sample rate changes
AVRational time_base; ///< time_base
void (*silencedetect)(struct SilenceDetectContext *s, AVFrame *insamples,
int nb_samples, int64_t nb_samples_notify,
AVRational time_base);
} SilenceDetectContext;
#define MAX_DURATION (24*3600*1000000LL)
#define OFFSET(x) offsetof(SilenceDetectContext, x)
#define FLAGS AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
static const AVOption silencedetect_options[] = {
{ "n", "set noise tolerance", OFFSET(noise), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0, DBL_MAX, FLAGS },
{ "noise", "set noise tolerance", OFFSET(noise), AV_OPT_TYPE_DOUBLE, {.dbl=0.001}, 0, DBL_MAX, FLAGS },
{ "d", "set minimum duration in seconds", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=2000000}, 0, MAX_DURATION,FLAGS },
{ "duration", "set minimum duration in seconds", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64=2000000}, 0, MAX_DURATION,FLAGS },
{ "mono", "check each channel separately", OFFSET(mono), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
{ "m", "check each channel separately", OFFSET(mono), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(silencedetect);
static void set_meta(AVFrame *insamples, int channel, const char *key, char *value)
{
char key2[128];
if (channel)
snprintf(key2, sizeof(key2), "lavfi.%s.%d", key, channel);
else
snprintf(key2, sizeof(key2), "lavfi.%s", key);
av_dict_set(&insamples->metadata, key2, value, 0);
}
static av_always_inline void update(SilenceDetectContext *s, AVFrame *insamples,
int is_silence, int current_sample, int64_t nb_samples_notify,
AVRational time_base)
{
int channel = current_sample % s->independent_channels;
if (is_silence) {
if (s->start[channel] == INT64_MIN) {
s->nb_null_samples[channel]++;
if (s->nb_null_samples[channel] >= nb_samples_notify) {
s->start[channel] = insamples->pts + av_rescale_q(current_sample / s->channels + 1 - nb_samples_notify * s->independent_channels / s->channels,
(AVRational){ 1, s->last_sample_rate }, time_base);
set_meta(insamples, s->mono ? channel + 1 : 0, "silence_start",
av_ts2timestr(s->start[channel], &time_base));
if (s->mono)
av_log(s, AV_LOG_INFO, "channel: %d | ", channel);
av_log(s, AV_LOG_INFO, "silence_start: %s\n",
av_ts2timestr(s->start[channel], &time_base));
}
}
} else {
if (s->start[channel] > INT64_MIN) {
int64_t end_pts = insamples ? insamples->pts + av_rescale_q(current_sample / s->channels,
(AVRational){ 1, s->last_sample_rate }, time_base)
: s->frame_end;
int64_t duration_ts = end_pts - s->start[channel];
if (insamples) {
set_meta(insamples, s->mono ? channel + 1 : 0, "silence_end",
av_ts2timestr(end_pts, &time_base));
set_meta(insamples, s->mono ? channel + 1 : 0, "silence_duration",
av_ts2timestr(duration_ts, &time_base));
}
if (s->mono)
av_log(s, AV_LOG_INFO, "channel: %d | ", channel);
av_log(s, AV_LOG_INFO, "silence_end: %s | silence_duration: %s\n",
av_ts2timestr(end_pts, &time_base),
av_ts2timestr(duration_ts, &time_base));
}
s->nb_null_samples[channel] = 0;
s->start[channel] = INT64_MIN;
}
}
#define SILENCE_DETECT(name, type) \
static void silencedetect_##name(SilenceDetectContext *s, AVFrame *insamples, \
int nb_samples, int64_t nb_samples_notify, \
AVRational time_base) \
{ \
const type *p = (const type *)insamples->data[0]; \
const type noise = s->noise; \
int i; \
\
for (i = 0; i < nb_samples; i++, p++) \
update(s, insamples, *p < noise && *p > -noise, i, \
nb_samples_notify, time_base); \
}
SILENCE_DETECT(dbl, double)
SILENCE_DETECT(flt, float)
SILENCE_DETECT(s32, int32_t)
SILENCE_DETECT(s16, int16_t)
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
SilenceDetectContext *s = ctx->priv;
int c;
s->channels = inlink->channels;
s->duration = av_rescale(s->duration, inlink->sample_rate, AV_TIME_BASE);
s->independent_channels = s->mono ? s->channels : 1;
s->nb_null_samples = av_mallocz_array(sizeof(*s->nb_null_samples), s->independent_channels);
if (!s->nb_null_samples)
return AVERROR(ENOMEM);
s->start = av_malloc_array(sizeof(*s->start), s->independent_channels);
if (!s->start)
return AVERROR(ENOMEM);
for (c = 0; c < s->independent_channels; c++)
s->start[c] = INT64_MIN;
switch (inlink->format) {
case AV_SAMPLE_FMT_DBL: s->silencedetect = silencedetect_dbl; break;
case AV_SAMPLE_FMT_FLT: s->silencedetect = silencedetect_flt; break;
case AV_SAMPLE_FMT_S32:
s->noise *= INT32_MAX;
s->silencedetect = silencedetect_s32;
break;
case AV_SAMPLE_FMT_S16:
s->noise *= INT16_MAX;
s->silencedetect = silencedetect_s16;
break;
}
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *insamples)
{
SilenceDetectContext *s = inlink->dst->priv;
const int nb_channels = inlink->channels;
const int srate = inlink->sample_rate;
const int nb_samples = insamples->nb_samples * nb_channels;
const int64_t nb_samples_notify = s->duration * (s->mono ? 1 : nb_channels);
int c;
// scale number of null samples to the new sample rate
if (s->last_sample_rate && s->last_sample_rate != srate)
for (c = 0; c < s->independent_channels; c++) {
s->nb_null_samples[c] = srate * s->nb_null_samples[c] / s->last_sample_rate;
}
s->last_sample_rate = srate;
s->time_base = inlink->time_base;
s->frame_end = insamples->pts + av_rescale_q(insamples->nb_samples,
(AVRational){ 1, s->last_sample_rate }, inlink->time_base);
s->silencedetect(s, insamples, nb_samples, nb_samples_notify,
inlink->time_base);
return ff_filter_frame(inlink->dst->outputs[0], insamples);
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_layouts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static av_cold void uninit(AVFilterContext *ctx)
{
SilenceDetectContext *s = ctx->priv;
int c;
for (c = 0; c < s->independent_channels; c++)
if (s->start[c] > INT64_MIN)
update(s, NULL, 0, c, 0, s->time_base);
av_freep(&s->nb_null_samples);
av_freep(&s->start);
}
static const AVFilterPad silencedetect_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad silencedetect_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_silencedetect = {
.name = "silencedetect",
.description = NULL_IF_CONFIG_SMALL("Detect silence."),
.priv_size = sizeof(SilenceDetectContext),
.query_formats = query_formats,
.uninit = uninit,
.inputs = silencedetect_inputs,
.outputs = silencedetect_outputs,
.priv_class = &silencedetect_class,
};

View File

@@ -0,0 +1,681 @@
/*
* Copyright (c) 2001 Heikki Leinonen
* Copyright (c) 2001 Chris Bagwell
* Copyright (c) 2003 Donnie Smith
* Copyright (c) 2014 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <float.h> /* DBL_MAX */
#include "libavutil/opt.h"
#include "libavutil/timestamp.h"
#include "audio.h"
#include "formats.h"
#include "avfilter.h"
#include "internal.h"
enum SilenceDetect {
D_PEAK,
D_RMS,
};
enum ThresholdMode {
T_ANY,
T_ALL,
};
enum SilenceMode {
SILENCE_TRIM,
SILENCE_TRIM_FLUSH,
SILENCE_COPY,
SILENCE_COPY_FLUSH,
SILENCE_STOP
};
typedef struct SilenceRemoveContext {
const AVClass *class;
enum SilenceMode mode;
int start_periods;
int64_t start_duration;
int64_t start_duration_opt;
double start_threshold;
int64_t start_silence;
int64_t start_silence_opt;
int start_mode;
int stop_periods;
int64_t stop_duration;
int64_t stop_duration_opt;
double stop_threshold;
int64_t stop_silence;
int64_t stop_silence_opt;
int stop_mode;
double *start_holdoff;
double *start_silence_hold;
size_t start_holdoff_offset;
size_t start_holdoff_end;
size_t start_silence_offset;
size_t start_silence_end;
int start_found_periods;
double *stop_holdoff;
double *stop_silence_hold;
size_t stop_holdoff_offset;
size_t stop_holdoff_end;
size_t stop_silence_offset;
size_t stop_silence_end;
int stop_found_periods;
double window_ratio;
double *window;
double *window_current;
double *window_end;
int window_size;
double sum;
int restart;
int64_t next_pts;
int detection;
void (*update)(struct SilenceRemoveContext *s, double sample);
double(*compute)(struct SilenceRemoveContext *s, double sample);
} SilenceRemoveContext;
#define OFFSET(x) offsetof(SilenceRemoveContext, x)
#define AF AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_AUDIO_PARAM
static const AVOption silenceremove_options[] = {
{ "start_periods", NULL, OFFSET(start_periods), AV_OPT_TYPE_INT, {.i64=0}, 0, 9000, AF },
{ "start_duration", "set start duration of non-silence part", OFFSET(start_duration_opt), AV_OPT_TYPE_DURATION, {.i64=0}, 0, INT32_MAX, AF },
{ "start_threshold", "set threshold for start silence detection", OFFSET(start_threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, DBL_MAX, AF },
{ "start_silence", "set start duration of silence part to keep", OFFSET(start_silence_opt), AV_OPT_TYPE_DURATION, {.i64=0}, 0, INT32_MAX, AF },
{ "start_mode", "set which channel will trigger trimming from start", OFFSET(start_mode), AV_OPT_TYPE_INT, {.i64=T_ANY}, T_ANY, T_ALL, AF, "mode" },
{ "any", 0, 0, AV_OPT_TYPE_CONST, {.i64=T_ANY}, 0, 0, AF, "mode" },
{ "all", 0, 0, AV_OPT_TYPE_CONST, {.i64=T_ALL}, 0, 0, AF, "mode" },
{ "stop_periods", NULL, OFFSET(stop_periods), AV_OPT_TYPE_INT, {.i64=0}, -9000, 9000, AF },
{ "stop_duration", "set stop duration of non-silence part", OFFSET(stop_duration_opt), AV_OPT_TYPE_DURATION, {.i64=0}, 0, INT32_MAX, AF },
{ "stop_threshold", "set threshold for stop silence detection", OFFSET(stop_threshold), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, DBL_MAX, AF },
{ "stop_silence", "set stop duration of silence part to keep", OFFSET(stop_silence_opt), AV_OPT_TYPE_DURATION, {.i64=0}, 0, INT32_MAX, AF },
{ "stop_mode", "set which channel will trigger trimming from end", OFFSET(stop_mode), AV_OPT_TYPE_INT, {.i64=T_ANY}, T_ANY, T_ALL, AF, "mode" },
{ "detection", "set how silence is detected", OFFSET(detection), AV_OPT_TYPE_INT, {.i64=D_RMS}, D_PEAK,D_RMS, AF, "detection" },
{ "peak", "use absolute values of samples", 0, AV_OPT_TYPE_CONST, {.i64=D_PEAK},0, 0, AF, "detection" },
{ "rms", "use squared values of samples", 0, AV_OPT_TYPE_CONST, {.i64=D_RMS}, 0, 0, AF, "detection" },
{ "window", "set duration of window in seconds", OFFSET(window_ratio), AV_OPT_TYPE_DOUBLE, {.dbl=0.02}, 0, 10, AF },
{ NULL }
};
AVFILTER_DEFINE_CLASS(silenceremove);
static double compute_peak(SilenceRemoveContext *s, double sample)
{
double new_sum;
new_sum = s->sum;
new_sum -= *s->window_current;
new_sum += fabs(sample);
return new_sum / s->window_size;
}
static void update_peak(SilenceRemoveContext *s, double sample)
{
s->sum -= *s->window_current;
*s->window_current = fabs(sample);
s->sum += *s->window_current;
s->window_current++;
if (s->window_current >= s->window_end)
s->window_current = s->window;
}
static double compute_rms(SilenceRemoveContext *s, double sample)
{
double new_sum;
new_sum = s->sum;
new_sum -= *s->window_current;
new_sum += sample * sample;
return sqrt(new_sum / s->window_size);
}
static void update_rms(SilenceRemoveContext *s, double sample)
{
s->sum -= *s->window_current;
*s->window_current = sample * sample;
s->sum += *s->window_current;
s->window_current++;
if (s->window_current >= s->window_end)
s->window_current = s->window;
}
static av_cold int init(AVFilterContext *ctx)
{
SilenceRemoveContext *s = ctx->priv;
if (s->stop_periods < 0) {
s->stop_periods = -s->stop_periods;
s->restart = 1;
}
switch (s->detection) {
case D_PEAK:
s->update = update_peak;
s->compute = compute_peak;
break;
case D_RMS:
s->update = update_rms;
s->compute = compute_rms;
break;
}
return 0;
}
static void clear_window(SilenceRemoveContext *s)
{
memset(s->window, 0, s->window_size * sizeof(*s->window));
s->window_current = s->window;
s->window_end = s->window + s->window_size;
s->sum = 0;
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
SilenceRemoveContext *s = ctx->priv;
s->next_pts = AV_NOPTS_VALUE;
s->window_size = FFMAX((inlink->sample_rate * s->window_ratio), 1) * inlink->channels;
s->window = av_malloc_array(s->window_size, sizeof(*s->window));
if (!s->window)
return AVERROR(ENOMEM);
clear_window(s);
s->start_duration = av_rescale(s->start_duration_opt, inlink->sample_rate,
AV_TIME_BASE);
s->start_silence = av_rescale(s->start_silence_opt, inlink->sample_rate,
AV_TIME_BASE);
s->stop_duration = av_rescale(s->stop_duration_opt, inlink->sample_rate,
AV_TIME_BASE);
s->stop_silence = av_rescale(s->stop_silence_opt, inlink->sample_rate,
AV_TIME_BASE);
s->start_holdoff = av_malloc_array(FFMAX(s->start_duration, 1),
sizeof(*s->start_holdoff) *
inlink->channels);
if (!s->start_holdoff)
return AVERROR(ENOMEM);
s->start_silence_hold = av_malloc_array(FFMAX(s->start_silence, 1),
sizeof(*s->start_silence_hold) *
inlink->channels);
if (!s->start_silence_hold)
return AVERROR(ENOMEM);
s->start_holdoff_offset = 0;
s->start_holdoff_end = 0;
s->start_found_periods = 0;
s->stop_holdoff = av_malloc_array(FFMAX(s->stop_duration, 1),
sizeof(*s->stop_holdoff) *
inlink->channels);
if (!s->stop_holdoff)
return AVERROR(ENOMEM);
s->stop_silence_hold = av_malloc_array(FFMAX(s->stop_silence, 1),
sizeof(*s->stop_silence_hold) *
inlink->channels);
if (!s->stop_silence_hold)
return AVERROR(ENOMEM);
s->stop_holdoff_offset = 0;
s->stop_holdoff_end = 0;
s->stop_found_periods = 0;
if (s->start_periods)
s->mode = SILENCE_TRIM;
else
s->mode = SILENCE_COPY;
return 0;
}
static void flush(SilenceRemoveContext *s,
AVFrame *out, AVFilterLink *outlink,
int *nb_samples_written, int *ret, int flush_silence)
{
AVFrame *silence;
if (*nb_samples_written) {
out->nb_samples = *nb_samples_written / outlink->channels;
out->pts = s->next_pts;
s->next_pts += av_rescale_q(out->nb_samples,
(AVRational){1, outlink->sample_rate},
outlink->time_base);
*ret = ff_filter_frame(outlink, out);
if (*ret < 0)
return;
*nb_samples_written = 0;
} else {
av_frame_free(&out);
}
if (s->stop_silence_end <= 0 || !flush_silence)
return;
silence = ff_get_audio_buffer(outlink, s->stop_silence_end / outlink->channels);
if (!silence) {
*ret = AVERROR(ENOMEM);
return;
}
if (s->stop_silence_offset < s->stop_silence_end) {
memcpy(silence->data[0],
&s->stop_silence_hold[s->stop_silence_offset],
(s->stop_silence_end - s->stop_silence_offset) * sizeof(double));
}
if (s->stop_silence_offset > 0) {
memcpy(silence->data[0] + (s->stop_silence_end - s->stop_silence_offset) * sizeof(double),
&s->stop_silence_hold[0],
s->stop_silence_offset * sizeof(double));
}
s->stop_silence_offset = 0;
s->stop_silence_end = 0;
silence->pts = s->next_pts;
s->next_pts += av_rescale_q(silence->nb_samples,
(AVRational){1, outlink->sample_rate},
outlink->time_base);
*ret = ff_filter_frame(outlink, silence);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
SilenceRemoveContext *s = ctx->priv;
int i, j, threshold, ret = 0;
int nbs, nb_samples_read, nb_samples_written;
double *obuf, *ibuf = (double *)in->data[0];
AVFrame *out;
nb_samples_read = nb_samples_written = 0;
if (s->next_pts == AV_NOPTS_VALUE)
s->next_pts = in->pts;
switch (s->mode) {
case SILENCE_TRIM:
silence_trim:
nbs = in->nb_samples - nb_samples_read / outlink->channels;
if (!nbs)
break;
for (i = 0; i < nbs; i++) {
if (s->start_mode == T_ANY) {
threshold = 0;
for (j = 0; j < outlink->channels; j++) {
threshold |= s->compute(s, ibuf[j]) > s->start_threshold;
}
} else {
threshold = 1;
for (j = 0; j < outlink->channels; j++) {
threshold &= s->compute(s, ibuf[j]) > s->start_threshold;
}
}
if (threshold) {
for (j = 0; j < outlink->channels; j++) {
s->update(s, *ibuf);
s->start_holdoff[s->start_holdoff_end++] = *ibuf++;
}
nb_samples_read += outlink->channels;
if (s->start_holdoff_end >= s->start_duration * outlink->channels) {
if (++s->start_found_periods >= s->start_periods) {
s->mode = SILENCE_TRIM_FLUSH;
goto silence_trim_flush;
}
s->start_holdoff_offset = 0;
s->start_holdoff_end = 0;
s->start_silence_offset = 0;
s->start_silence_end = 0;
}
} else {
s->start_holdoff_end = 0;
for (j = 0; j < outlink->channels; j++) {
s->update(s, ibuf[j]);
if (s->start_silence) {
s->start_silence_hold[s->start_silence_offset++] = ibuf[j];
s->start_silence_end = FFMIN(s->start_silence_end + 1, outlink->channels * s->start_silence);
if (s->start_silence_offset >= outlink->channels * s->start_silence) {
s->start_silence_offset = 0;
}
}
}
ibuf += outlink->channels;
nb_samples_read += outlink->channels;
}
}
break;
case SILENCE_TRIM_FLUSH:
silence_trim_flush:
nbs = s->start_holdoff_end - s->start_holdoff_offset;
nbs -= nbs % outlink->channels;
if (!nbs)
break;
out = ff_get_audio_buffer(outlink, nbs / outlink->channels + s->start_silence_end / outlink->channels);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
if (s->start_silence_end > 0) {
if (s->start_silence_offset < s->start_silence_end) {
memcpy(out->data[0],
&s->start_silence_hold[s->start_silence_offset],
(s->start_silence_end - s->start_silence_offset) * sizeof(double));
}
if (s->start_silence_offset > 0) {
memcpy(out->data[0] + (s->start_silence_end - s->start_silence_offset) * sizeof(double),
&s->start_silence_hold[0],
s->start_silence_offset * sizeof(double));
}
}
memcpy(out->data[0] + s->start_silence_end * sizeof(double),
&s->start_holdoff[s->start_holdoff_offset],
nbs * sizeof(double));
out->pts = s->next_pts;
s->next_pts += av_rescale_q(out->nb_samples,
(AVRational){1, outlink->sample_rate},
outlink->time_base);
s->start_holdoff_offset += nbs;
ret = ff_filter_frame(outlink, out);
if (s->start_holdoff_offset == s->start_holdoff_end) {
s->start_holdoff_offset = 0;
s->start_holdoff_end = 0;
s->start_silence_offset = 0;
s->start_silence_end = 0;
s->mode = SILENCE_COPY;
goto silence_copy;
}
break;
case SILENCE_COPY:
silence_copy:
nbs = in->nb_samples - nb_samples_read / outlink->channels;
if (!nbs)
break;
out = ff_get_audio_buffer(outlink, nbs);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
obuf = (double *)out->data[0];
if (s->stop_periods) {
for (i = 0; i < nbs; i++) {
if (s->stop_mode == T_ANY) {
threshold = 0;
for (j = 0; j < outlink->channels; j++) {
threshold |= s->compute(s, ibuf[j]) > s->stop_threshold;
}
} else {
threshold = 1;
for (j = 0; j < outlink->channels; j++) {
threshold &= s->compute(s, ibuf[j]) > s->stop_threshold;
}
}
if (threshold && s->stop_holdoff_end && !s->stop_silence) {
s->mode = SILENCE_COPY_FLUSH;
flush(s, out, outlink, &nb_samples_written, &ret, 0);
goto silence_copy_flush;
} else if (threshold) {
for (j = 0; j < outlink->channels; j++) {
s->update(s, *ibuf);
*obuf++ = *ibuf++;
}
nb_samples_read += outlink->channels;
nb_samples_written += outlink->channels;
} else if (!threshold) {
for (j = 0; j < outlink->channels; j++) {
s->update(s, *ibuf);
if (s->stop_silence) {
s->stop_silence_hold[s->stop_silence_offset++] = *ibuf;
s->stop_silence_end = FFMIN(s->stop_silence_end + 1, outlink->channels * s->stop_silence);
if (s->stop_silence_offset >= outlink->channels * s->stop_silence) {
s->stop_silence_offset = 0;
}
}
s->stop_holdoff[s->stop_holdoff_end++] = *ibuf++;
}
nb_samples_read += outlink->channels;
if (s->stop_holdoff_end >= s->stop_duration * outlink->channels) {
if (++s->stop_found_periods >= s->stop_periods) {
s->stop_holdoff_offset = 0;
s->stop_holdoff_end = 0;
if (!s->restart) {
s->mode = SILENCE_STOP;
flush(s, out, outlink, &nb_samples_written, &ret, 1);
goto silence_stop;
} else {
s->stop_found_periods = 0;
s->start_found_periods = 0;
s->start_holdoff_offset = 0;
s->start_holdoff_end = 0;
s->start_silence_offset = 0;
s->start_silence_end = 0;
clear_window(s);
s->mode = SILENCE_TRIM;
flush(s, out, outlink, &nb_samples_written, &ret, 1);
goto silence_trim;
}
}
s->mode = SILENCE_COPY_FLUSH;
flush(s, out, outlink, &nb_samples_written, &ret, 0);
goto silence_copy_flush;
}
}
}
flush(s, out, outlink, &nb_samples_written, &ret, 0);
} else {
memcpy(obuf, ibuf, sizeof(double) * nbs * outlink->channels);
out->pts = s->next_pts;
s->next_pts += av_rescale_q(out->nb_samples,
(AVRational){1, outlink->sample_rate},
outlink->time_base);
ret = ff_filter_frame(outlink, out);
}
break;
case SILENCE_COPY_FLUSH:
silence_copy_flush:
nbs = s->stop_holdoff_end - s->stop_holdoff_offset;
nbs -= nbs % outlink->channels;
if (!nbs)
break;
out = ff_get_audio_buffer(outlink, nbs / outlink->channels);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
memcpy(out->data[0], &s->stop_holdoff[s->stop_holdoff_offset],
nbs * sizeof(double));
s->stop_holdoff_offset += nbs;
out->pts = s->next_pts;
s->next_pts += av_rescale_q(out->nb_samples,
(AVRational){1, outlink->sample_rate},
outlink->time_base);
ret = ff_filter_frame(outlink, out);
if (s->stop_holdoff_offset == s->stop_holdoff_end) {
s->stop_holdoff_offset = 0;
s->stop_holdoff_end = 0;
s->stop_silence_offset = 0;
s->stop_silence_end = 0;
s->mode = SILENCE_COPY;
goto silence_copy;
}
break;
case SILENCE_STOP:
silence_stop:
break;
}
av_frame_free(&in);
return ret;
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
SilenceRemoveContext *s = ctx->priv;
int ret;
ret = ff_request_frame(ctx->inputs[0]);
if (ret == AVERROR_EOF && (s->mode == SILENCE_COPY_FLUSH ||
s->mode == SILENCE_COPY)) {
int nbs = s->stop_holdoff_end - s->stop_holdoff_offset;
if (nbs) {
AVFrame *frame;
frame = ff_get_audio_buffer(outlink, nbs / outlink->channels);
if (!frame)
return AVERROR(ENOMEM);
memcpy(frame->data[0], &s->stop_holdoff[s->stop_holdoff_offset],
nbs * sizeof(double));
frame->pts = s->next_pts;
s->next_pts += av_rescale_q(frame->nb_samples,
(AVRational){1, outlink->sample_rate},
outlink->time_base);
ret = ff_filter_frame(outlink, frame);
}
s->mode = SILENCE_STOP;
}
return ret;
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts = NULL;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBL, AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static av_cold void uninit(AVFilterContext *ctx)
{
SilenceRemoveContext *s = ctx->priv;
av_freep(&s->start_holdoff);
av_freep(&s->start_silence_hold);
av_freep(&s->stop_holdoff);
av_freep(&s->stop_silence_hold);
av_freep(&s->window);
}
static const AVFilterPad silenceremove_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad silenceremove_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.request_frame = request_frame,
},
{ NULL }
};
AVFilter ff_af_silenceremove = {
.name = "silenceremove",
.description = NULL_IF_CONFIG_SMALL("Remove silence."),
.priv_size = sizeof(SilenceRemoveContext),
.priv_class = &silenceremove_class,
.init = init,
.uninit = uninit,
.query_formats = query_formats,
.inputs = silenceremove_inputs,
.outputs = silenceremove_outputs,
};

1098
externals/ffmpeg/libavfilter/af_sofalizer.c vendored Executable file

File diff suppressed because it is too large Load Diff

361
externals/ffmpeg/libavfilter/af_stereotools.c vendored Executable file
View File

@@ -0,0 +1,361 @@
/*
* Copyright (C) 2001-2010 Krzysztof Foltman, Markus Schmidt, Thor Harald Johansen
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "formats.h"
typedef struct StereoToolsContext {
const AVClass *class;
int softclip;
int mute_l;
int mute_r;
int phase_l;
int phase_r;
int mode;
int bmode_in;
int bmode_out;
double slev;
double sbal;
double mlev;
double mpan;
double phase;
double base;
double delay;
double balance_in;
double balance_out;
double phase_sin_coef;
double phase_cos_coef;
double sc_level;
double inv_atan_shape;
double level_in;
double level_out;
double *buffer;
int length;
int pos;
} StereoToolsContext;
#define OFFSET(x) offsetof(StereoToolsContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption stereotools_options[] = {
{ "level_in", "set level in", OFFSET(level_in), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
{ "level_out", "set level out", OFFSET(level_out), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
{ "balance_in", "set balance in", OFFSET(balance_in), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
{ "balance_out", "set balance out", OFFSET(balance_out), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
{ "softclip", "enable softclip", OFFSET(softclip), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
{ "mutel", "mute L", OFFSET(mute_l), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
{ "muter", "mute R", OFFSET(mute_r), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
{ "phasel", "phase L", OFFSET(phase_l), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
{ "phaser", "phase R", OFFSET(phase_r), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, A },
{ "mode", "set stereo mode", OFFSET(mode), AV_OPT_TYPE_INT, {.i64=0}, 0, 8, A, "mode" },
{ "lr>lr", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A, "mode" },
{ "lr>ms", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A, "mode" },
{ "ms>lr", 0, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, A, "mode" },
{ "lr>ll", 0, 0, AV_OPT_TYPE_CONST, {.i64=3}, 0, 0, A, "mode" },
{ "lr>rr", 0, 0, AV_OPT_TYPE_CONST, {.i64=4}, 0, 0, A, "mode" },
{ "lr>l+r", 0, 0, AV_OPT_TYPE_CONST, {.i64=5}, 0, 0, A, "mode" },
{ "lr>rl", 0, 0, AV_OPT_TYPE_CONST, {.i64=6}, 0, 0, A, "mode" },
{ "ms>ll", 0, 0, AV_OPT_TYPE_CONST, {.i64=7}, 0, 0, A, "mode" },
{ "ms>rr", 0, 0, AV_OPT_TYPE_CONST, {.i64=8}, 0, 0, A, "mode" },
{ "slev", "set side level", OFFSET(slev), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
{ "sbal", "set side balance", OFFSET(sbal), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
{ "mlev", "set middle level", OFFSET(mlev), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 0.015625, 64, A },
{ "mpan", "set middle pan", OFFSET(mpan), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
{ "base", "set stereo base", OFFSET(base), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -1, 1, A },
{ "delay", "set delay", OFFSET(delay), AV_OPT_TYPE_DOUBLE, {.dbl=0}, -20, 20, A },
{ "sclevel", "set S/C level", OFFSET(sc_level), AV_OPT_TYPE_DOUBLE, {.dbl=1}, 1, 100, A },
{ "phase", "set stereo phase", OFFSET(phase), AV_OPT_TYPE_DOUBLE, {.dbl=0}, 0, 360, A },
{ "bmode_in", "set balance in mode", OFFSET(bmode_in), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, A, "bmode" },
{ "balance", 0, 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, A, "bmode" },
{ "amplitude", 0, 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, A, "bmode" },
{ "power", 0, 0, AV_OPT_TYPE_CONST, {.i64=2}, 0, 0, A, "bmode" },
{ "bmode_out", "set balance out mode", OFFSET(bmode_out), AV_OPT_TYPE_INT, {.i64=0}, 0, 2, A, "bmode" },
{ NULL }
};
AVFILTER_DEFINE_CLASS(stereotools);
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layout = NULL;
int ret;
if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_DBL )) < 0 ||
(ret = ff_set_common_formats (ctx , formats )) < 0 ||
(ret = ff_add_channel_layout (&layout , AV_CH_LAYOUT_STEREO)) < 0 ||
(ret = ff_set_common_channel_layouts (ctx , layout )) < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
StereoToolsContext *s = ctx->priv;
s->length = 2 * inlink->sample_rate * 0.05;
if (s->length <= 1 || s->length & 1) {
av_log(ctx, AV_LOG_ERROR, "sample rate is too small\n");
return AVERROR(EINVAL);
}
s->buffer = av_calloc(s->length, sizeof(*s->buffer));
if (!s->buffer)
return AVERROR(ENOMEM);
s->inv_atan_shape = 1.0 / atan(s->sc_level);
s->phase_cos_coef = cos(s->phase / 180 * M_PI);
s->phase_sin_coef = sin(s->phase / 180 * M_PI);
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
StereoToolsContext *s = ctx->priv;
const double *src = (const double *)in->data[0];
const double sb = s->base < 0 ? s->base * 0.5 : s->base;
const double sbal = 1 + s->sbal;
const double mpan = 1 + s->mpan;
const double slev = s->slev;
const double mlev = s->mlev;
const double balance_in = s->balance_in;
const double balance_out = s->balance_out;
const double level_in = s->level_in;
const double level_out = s->level_out;
const double sc_level = s->sc_level;
const double delay = s->delay;
const int length = s->length;
const int mute_l = s->mute_l;
const int mute_r = s->mute_r;
const int phase_l = s->phase_l;
const int phase_r = s->phase_r;
double *buffer = s->buffer;
AVFrame *out;
double *dst;
int nbuf = inlink->sample_rate * (fabs(delay) / 1000.);
int n;
nbuf -= nbuf % 2;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
dst = (double *)out->data[0];
for (n = 0; n < in->nb_samples; n++, src += 2, dst += 2) {
double L = src[0], R = src[1], l, r, m, S, gl, gr, gd;
L *= level_in;
R *= level_in;
gl = 1. - FFMAX(0., balance_in);
gr = 1. + FFMIN(0., balance_in);
switch (s->bmode_in) {
case 1:
gd = gl - gr;
gl = 1. + gd;
gr = 1. - gd;
break;
case 2:
if (balance_in < 0.) {
gr = FFMAX(0.5, gr);
gl = 1. / gr;
} else if (balance_in > 0.) {
gl = FFMAX(0.5, gl);
gr = 1. / gl;
}
break;
}
L *= gl;
R *= gr;
if (s->softclip) {
R = s->inv_atan_shape * atan(R * sc_level);
L = s->inv_atan_shape * atan(L * sc_level);
}
switch (s->mode) {
case 0:
m = (L + R) * 0.5;
S = (L - R) * 0.5;
l = m * mlev * FFMIN(1., 2. - mpan) + S * slev * FFMIN(1., 2. - sbal);
r = m * mlev * FFMIN(1., mpan) - S * slev * FFMIN(1., sbal);
L = l;
R = r;
break;
case 1:
l = L * FFMIN(1., 2. - sbal);
r = R * FFMIN(1., sbal);
L = 0.5 * (l + r) * mlev;
R = 0.5 * (l - r) * slev;
break;
case 2:
l = L * mlev * FFMIN(1., 2. - mpan) + R * slev * FFMIN(1., 2. - sbal);
r = L * mlev * FFMIN(1., mpan) - R * slev * FFMIN(1., sbal);
L = l;
R = r;
break;
case 3:
R = L;
break;
case 4:
L = R;
break;
case 5:
L = (L + R) / 2;
R = L;
break;
case 6:
l = L;
L = R;
R = l;
m = (L + R) * 0.5;
S = (L - R) * 0.5;
l = m * mlev * FFMIN(1., 2. - mpan) + S * slev * FFMIN(1., 2. - sbal);
r = m * mlev * FFMIN(1., mpan) - S * slev * FFMIN(1., sbal);
L = l;
R = r;
break;
case 7:
l = L * mlev * FFMIN(1., 2. - mpan) + R * slev * FFMIN(1., 2. - sbal);
L = l;
R = l;
break;
case 8:
r = L * mlev * FFMIN(1., mpan) - R * slev * FFMIN(1., sbal);
L = r;
R = r;
break;
}
L *= 1. - mute_l;
R *= 1. - mute_r;
L *= (2. * (1. - phase_l)) - 1.;
R *= (2. * (1. - phase_r)) - 1.;
buffer[s->pos ] = L;
buffer[s->pos+1] = R;
if (delay > 0.) {
R = buffer[(s->pos - (int)nbuf + 1 + length) % length];
} else if (delay < 0.) {
L = buffer[(s->pos - (int)nbuf + length) % length];
}
l = L + sb * L - sb * R;
r = R + sb * R - sb * L;
L = l;
R = r;
l = L * s->phase_cos_coef - R * s->phase_sin_coef;
r = L * s->phase_sin_coef + R * s->phase_cos_coef;
L = l;
R = r;
s->pos = (s->pos + 2) % s->length;
gl = 1. - FFMAX(0., balance_out);
gr = 1. + FFMIN(0., balance_out);
switch (s->bmode_out) {
case 1:
gd = gl - gr;
gl = 1. + gd;
gr = 1. - gd;
break;
case 2:
if (balance_out < 0.) {
gr = FFMAX(0.5, gr);
gl = 1. / gr;
} else if (balance_out > 0.) {
gl = FFMAX(0.5, gl);
gr = 1. / gl;
}
break;
}
L *= gl;
R *= gr;
L *= level_out;
R *= level_out;
dst[0] = L;
dst[1] = R;
}
if (out != in)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static av_cold void uninit(AVFilterContext *ctx)
{
StereoToolsContext *s = ctx->priv;
av_freep(&s->buffer);
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_stereotools = {
.name = "stereotools",
.description = NULL_IF_CONFIG_SMALL("Apply various stereo tools."),
.query_formats = query_formats,
.priv_size = sizeof(StereoToolsContext),
.priv_class = &stereotools_class,
.uninit = uninit,
.inputs = inputs,
.outputs = outputs,
};

170
externals/ffmpeg/libavfilter/af_stereowiden.c vendored Executable file
View File

@@ -0,0 +1,170 @@
/*
* Copyright (C) 2012 VLC authors and VideoLAN
* Author : Sukrit Sangwan < sukritsangwan at gmail dot com >
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/channel_layout.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "formats.h"
typedef struct StereoWidenContext {
const AVClass *class;
float delay;
float feedback;
float crossfeed;
float drymix;
float *buffer;
float *cur;
int length;
} StereoWidenContext;
#define OFFSET(x) offsetof(StereoWidenContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define AT AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption stereowiden_options[] = {
{ "delay", "set delay time", OFFSET(delay), AV_OPT_TYPE_FLOAT, {.dbl=20}, 1, 100, A },
{ "feedback", "set feedback gain", OFFSET(feedback), AV_OPT_TYPE_FLOAT, {.dbl=.3}, 0, 0.9, AT },
{ "crossfeed", "set cross feed", OFFSET(crossfeed), AV_OPT_TYPE_FLOAT, {.dbl=.3}, 0, 0.8, AT },
{ "drymix", "set dry-mix", OFFSET(drymix), AV_OPT_TYPE_FLOAT, {.dbl=.8}, 0, 1.0, AT },
{ NULL }
};
AVFILTER_DEFINE_CLASS(stereowiden);
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layout = NULL;
int ret;
if ((ret = ff_add_format (&formats, AV_SAMPLE_FMT_FLT )) < 0 ||
(ret = ff_set_common_formats (ctx , formats )) < 0 ||
(ret = ff_add_channel_layout (&layout , AV_CH_LAYOUT_STEREO)) < 0 ||
(ret = ff_set_common_channel_layouts (ctx , layout )) < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
StereoWidenContext *s = ctx->priv;
s->length = s->delay * inlink->sample_rate / 1000;
s->length *= 2;
s->buffer = av_calloc(s->length, sizeof(*s->buffer));
if (!s->buffer)
return AVERROR(ENOMEM);
s->cur = s->buffer;
return 0;
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
StereoWidenContext *s = ctx->priv;
const float *src = (const float *)in->data[0];
const float drymix = s->drymix;
const float crossfeed = s->crossfeed;
const float feedback = s->feedback;
AVFrame *out;
float *dst;
int n;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
dst = (float *)out->data[0];
for (n = 0; n < in->nb_samples; n++, src += 2, dst += 2, s->cur += 2) {
const float left = src[0], right = src[1];
if (s->cur == s->buffer + s->length)
s->cur = s->buffer;
if (ctx->is_disabled) {
dst[0] = left;
dst[1] = right;
} else {
dst[0] = drymix * left - crossfeed * right - feedback * s->cur[1];
dst[1] = drymix * right - crossfeed * left - feedback * s->cur[0];
}
s->cur[0] = left;
s->cur[1] = right;
}
if (out != in)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static av_cold void uninit(AVFilterContext *ctx)
{
StereoWidenContext *s = ctx->priv;
av_freep(&s->buffer);
}
static const AVFilterPad inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_stereowiden = {
.name = "stereowiden",
.description = NULL_IF_CONFIG_SMALL("Apply stereo widening effect."),
.query_formats = query_formats,
.priv_size = sizeof(StereoWidenContext),
.priv_class = &stereowiden_class,
.uninit = uninit,
.inputs = inputs,
.outputs = outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_INTERNAL,
.process_command = ff_filter_process_command,
};

View File

@@ -0,0 +1,388 @@
/*
* Copyright (c) 2002 Naoki Shibata
* Copyright (c) 2017 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/opt.h"
#include "libavcodec/avfft.h"
#include "audio.h"
#include "avfilter.h"
#include "filters.h"
#include "internal.h"
#define NBANDS 17
#define M 15
typedef struct EqParameter {
float lower, upper, gain;
} EqParameter;
typedef struct SuperEqualizerContext {
const AVClass *class;
EqParameter params[NBANDS + 1];
float gains[NBANDS + 1];
float fact[M + 1];
float aa;
float iza;
float *ires, *irest;
float *fsamples;
int winlen, tabsize;
AVFrame *in, *out;
RDFTContext *rdft, *irdft;
} SuperEqualizerContext;
static const float bands[] = {
65.406392, 92.498606, 130.81278, 184.99721, 261.62557, 369.99442, 523.25113, 739.9884, 1046.5023,
1479.9768, 2093.0045, 2959.9536, 4186.0091, 5919.9072, 8372.0181, 11839.814, 16744.036
};
static float izero(SuperEqualizerContext *s, float x)
{
float ret = 1;
int m;
for (m = 1; m <= M; m++) {
float t;
t = pow(x / 2, m) / s->fact[m];
ret += t*t;
}
return ret;
}
static float hn_lpf(int n, float f, float fs)
{
float t = 1 / fs;
float omega = 2 * M_PI * f;
if (n * omega * t == 0)
return 2 * f * t;
return 2 * f * t * sinf(n * omega * t) / (n * omega * t);
}
static float hn_imp(int n)
{
return n == 0 ? 1.f : 0.f;
}
static float hn(int n, EqParameter *param, float fs)
{
float ret, lhn;
int i;
lhn = hn_lpf(n, param[0].upper, fs);
ret = param[0].gain*lhn;
for (i = 1; i < NBANDS + 1 && param[i].upper < fs / 2; i++) {
float lhn2 = hn_lpf(n, param[i].upper, fs);
ret += param[i].gain * (lhn2 - lhn);
lhn = lhn2;
}
ret += param[i].gain * (hn_imp(n) - lhn);
return ret;
}
static float alpha(float a)
{
if (a <= 21)
return 0;
if (a <= 50)
return .5842f * pow(a - 21, 0.4f) + 0.07886f * (a - 21);
return .1102f * (a - 8.7f);
}
static float win(SuperEqualizerContext *s, float n, int N)
{
return izero(s, alpha(s->aa) * sqrtf(1 - 4 * n * n / ((N - 1) * (N - 1)))) / s->iza;
}
static void process_param(float *bc, EqParameter *param, float fs)
{
int i;
for (i = 0; i <= NBANDS; i++) {
param[i].lower = i == 0 ? 0 : bands[i - 1];
param[i].upper = i == NBANDS ? fs : bands[i];
param[i].gain = bc[i];
}
}
static int equ_init(SuperEqualizerContext *s, int wb)
{
int i,j;
s->rdft = av_rdft_init(wb, DFT_R2C);
s->irdft = av_rdft_init(wb, IDFT_C2R);
if (!s->rdft || !s->irdft)
return AVERROR(ENOMEM);
s->aa = 96;
s->winlen = (1 << (wb-1))-1;
s->tabsize = 1 << wb;
s->ires = av_calloc(s->tabsize, sizeof(float));
s->irest = av_calloc(s->tabsize, sizeof(float));
s->fsamples = av_calloc(s->tabsize, sizeof(float));
for (i = 0; i <= M; i++) {
s->fact[i] = 1;
for (j = 1; j <= i; j++)
s->fact[i] *= j;
}
s->iza = izero(s, alpha(s->aa));
return 0;
}
static void make_fir(SuperEqualizerContext *s, float *lbc, float *rbc, EqParameter *param, float fs)
{
const int winlen = s->winlen;
const int tabsize = s->tabsize;
float *nires;
int i;
if (fs <= 0)
return;
process_param(lbc, param, fs);
for (i = 0; i < winlen; i++)
s->irest[i] = hn(i - winlen / 2, param, fs) * win(s, i - winlen / 2, winlen);
for (; i < tabsize; i++)
s->irest[i] = 0;
av_rdft_calc(s->rdft, s->irest);
nires = s->ires;
for (i = 0; i < tabsize; i++)
nires[i] = s->irest[i];
}
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
SuperEqualizerContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
const float *ires = s->ires;
float *fsamples = s->fsamples;
int ch, i;
AVFrame *out = ff_get_audio_buffer(outlink, s->winlen);
float *src, *dst, *ptr;
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
for (ch = 0; ch < in->channels; ch++) {
ptr = (float *)out->extended_data[ch];
dst = (float *)s->out->extended_data[ch];
src = (float *)in->extended_data[ch];
for (i = 0; i < in->nb_samples; i++)
fsamples[i] = src[i];
for (; i < s->tabsize; i++)
fsamples[i] = 0;
av_rdft_calc(s->rdft, fsamples);
fsamples[0] = ires[0] * fsamples[0];
fsamples[1] = ires[1] * fsamples[1];
for (i = 1; i < s->tabsize / 2; i++) {
float re, im;
re = ires[i*2 ] * fsamples[i*2] - ires[i*2+1] * fsamples[i*2+1];
im = ires[i*2+1] * fsamples[i*2] + ires[i*2 ] * fsamples[i*2+1];
fsamples[i*2 ] = re;
fsamples[i*2+1] = im;
}
av_rdft_calc(s->irdft, fsamples);
for (i = 0; i < s->winlen; i++)
dst[i] += fsamples[i] / s->tabsize * 2;
for (i = s->winlen; i < s->tabsize; i++)
dst[i] = fsamples[i] / s->tabsize * 2;
for (i = 0; i < s->winlen; i++)
ptr[i] = dst[i];
for (i = 0; i < s->winlen; i++)
dst[i] = dst[i+s->winlen];
}
out->pts = in->pts;
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static int activate(AVFilterContext *ctx)
{
AVFilterLink *inlink = ctx->inputs[0];
AVFilterLink *outlink = ctx->outputs[0];
SuperEqualizerContext *s = ctx->priv;
AVFrame *in = NULL;
int ret;
FF_FILTER_FORWARD_STATUS_BACK(outlink, inlink);
ret = ff_inlink_consume_samples(inlink, s->winlen, s->winlen, &in);
if (ret < 0)
return ret;
if (ret > 0)
return filter_frame(inlink, in);
FF_FILTER_FORWARD_STATUS(inlink, outlink);
FF_FILTER_FORWARD_WANTED(outlink, inlink);
return FFERROR_NOT_READY;
}
static av_cold int init(AVFilterContext *ctx)
{
SuperEqualizerContext *s = ctx->priv;
return equ_init(s, 14);
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if ((ret = ff_set_common_formats(ctx, formats)) < 0)
return ret;
formats = ff_all_samplerates();
return ff_set_common_samplerates(ctx, formats);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
SuperEqualizerContext *s = ctx->priv;
s->out = ff_get_audio_buffer(inlink, s->tabsize);
if (!s->out)
return AVERROR(ENOMEM);
return 0;
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
SuperEqualizerContext *s = ctx->priv;
make_fir(s, s->gains, s->gains, s->params, outlink->sample_rate);
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
SuperEqualizerContext *s = ctx->priv;
av_frame_free(&s->out);
av_freep(&s->irest);
av_freep(&s->ires);
av_freep(&s->fsamples);
av_rdft_end(s->rdft);
av_rdft_end(s->irdft);
}
static const AVFilterPad superequalizer_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
},
{ NULL }
};
static const AVFilterPad superequalizer_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
#define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define OFFSET(x) offsetof(SuperEqualizerContext, x)
static const AVOption superequalizer_options[] = {
{ "1b", "set 65Hz band gain", OFFSET(gains [0]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "2b", "set 92Hz band gain", OFFSET(gains [1]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "3b", "set 131Hz band gain", OFFSET(gains [2]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "4b", "set 185Hz band gain", OFFSET(gains [3]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "5b", "set 262Hz band gain", OFFSET(gains [4]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "6b", "set 370Hz band gain", OFFSET(gains [5]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "7b", "set 523Hz band gain", OFFSET(gains [6]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "8b", "set 740Hz band gain", OFFSET(gains [7]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "9b", "set 1047Hz band gain", OFFSET(gains [8]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "10b", "set 1480Hz band gain", OFFSET(gains [9]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "11b", "set 2093Hz band gain", OFFSET(gains[10]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "12b", "set 2960Hz band gain", OFFSET(gains[11]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "13b", "set 4186Hz band gain", OFFSET(gains[12]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "14b", "set 5920Hz band gain", OFFSET(gains[13]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "15b", "set 8372Hz band gain", OFFSET(gains[14]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "16b", "set 11840Hz band gain", OFFSET(gains[15]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "17b", "set 16744Hz band gain", OFFSET(gains[16]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ "18b", "set 20000Hz band gain", OFFSET(gains[17]), AV_OPT_TYPE_FLOAT, {.dbl=1}, 0, 20, AF },
{ NULL }
};
AVFILTER_DEFINE_CLASS(superequalizer);
AVFilter ff_af_superequalizer = {
.name = "superequalizer",
.description = NULL_IF_CONFIG_SMALL("Apply 18 band equalization filter."),
.priv_size = sizeof(SuperEqualizerContext),
.priv_class = &superequalizer_class,
.query_formats = query_formats,
.init = init,
.activate = activate,
.uninit = uninit,
.inputs = superequalizer_inputs,
.outputs = superequalizer_outputs,
};

1800
externals/ffmpeg/libavfilter/af_surround.c vendored Executable file

File diff suppressed because it is too large Load Diff

172
externals/ffmpeg/libavfilter/af_tremolo.c vendored Executable file
View File

@@ -0,0 +1,172 @@
/*
* Copyright (c) 2015 Kyle Swanson <k@ylo.ph>.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/opt.h"
#include "avfilter.h"
#include "internal.h"
#include "audio.h"
typedef struct TremoloContext {
const AVClass *class;
double freq;
double depth;
double *table;
int table_size;
int index;
} TremoloContext;
#define OFFSET(x) offsetof(TremoloContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption tremolo_options[] = {
{ "f", "set frequency in hertz", OFFSET(freq), AV_OPT_TYPE_DOUBLE, {.dbl = 5.0}, 0.1, 20000.0, FLAGS },
{ "d", "set depth as percentage", OFFSET(depth), AV_OPT_TYPE_DOUBLE, {.dbl = 0.5}, 0.0, 1.0, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(tremolo);
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
AVFilterLink *outlink = ctx->outputs[0];
TremoloContext *s = ctx->priv;
const double *src = (const double *)in->data[0];
const int channels = inlink->channels;
const int nb_samples = in->nb_samples;
AVFrame *out;
double *dst;
int n, c;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
dst = (double *)out->data[0];
for (n = 0; n < nb_samples; n++) {
for (c = 0; c < channels; c++)
dst[c] = src[c] * s->table[s->index];
dst += channels;
src += channels;
s->index++;
if (s->index >= s->table_size)
s->index = 0;
}
if (in != out)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static av_cold void uninit(AVFilterContext *ctx)
{
TremoloContext *s = ctx->priv;
av_freep(&s->table);
}
static int config_input(AVFilterLink *inlink)
{
AVFilterContext *ctx = inlink->dst;
TremoloContext *s = ctx->priv;
const double offset = 1. - s->depth / 2.;
int i;
s->table_size = inlink->sample_rate / s->freq;
s->table = av_malloc_array(s->table_size, sizeof(*s->table));
if (!s->table)
return AVERROR(ENOMEM);
for (i = 0; i < s->table_size; i++) {
double env = s->freq * i / inlink->sample_rate;
env = sin(2 * M_PI * fmod(env + 0.25, 1.0));
s->table[i] = env * (1 - fabs(offset)) + offset;
}
s->index = 0;
return 0;
}
static const AVFilterPad avfilter_af_tremolo_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad avfilter_af_tremolo_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_tremolo = {
.name = "tremolo",
.description = NULL_IF_CONFIG_SMALL("Apply tremolo effect."),
.priv_size = sizeof(TremoloContext),
.priv_class = &tremolo_class,
.uninit = uninit,
.query_formats = query_formats,
.inputs = avfilter_af_tremolo_inputs,
.outputs = avfilter_af_tremolo_outputs,
};

210
externals/ffmpeg/libavfilter/af_vibrato.c vendored Executable file
View File

@@ -0,0 +1,210 @@
/*
* Copyright (c) 2015 Kyle Swanson <k@ylo.ph>.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/opt.h"
#include "avfilter.h"
#include "internal.h"
#include "audio.h"
#include "generate_wave_table.h"
typedef struct VibratoContext {
const AVClass *class;
double freq;
double depth;
int channels;
double **buf;
int buf_index;
int buf_size;
double *wave_table;
int wave_table_index;
int wave_table_size;
} VibratoContext;
#define OFFSET(x) offsetof(VibratoContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption vibrato_options[] = {
{ "f", "set frequency in hertz", OFFSET(freq), AV_OPT_TYPE_DOUBLE, {.dbl = 5.0}, 0.1, 20000.0, FLAGS },
{ "d", "set depth as percentage", OFFSET(depth), AV_OPT_TYPE_DOUBLE, {.dbl = 0.5}, 0.00, 1.0, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(vibrato);
static int filter_frame(AVFilterLink *inlink, AVFrame *in)
{
AVFilterContext *ctx = inlink->dst;
VibratoContext *s = ctx->priv;
AVFilterLink *outlink = ctx->outputs[0];
AVFrame *out;
int n, c;
const double *src;
double *dst;
if (av_frame_is_writable(in)) {
out = in;
} else {
out = ff_get_audio_buffer(outlink, in->nb_samples);
if (!out) {
av_frame_free(&in);
return AVERROR(ENOMEM);
}
av_frame_copy_props(out, in);
}
for (n = 0; n < in->nb_samples; n++) {
double integer, decimal;
decimal = modf(s->depth * s->wave_table[s->wave_table_index], &integer);
s->wave_table_index++;
if (s->wave_table_index >= s->wave_table_size)
s->wave_table_index -= s->wave_table_size;
for (c = 0; c < inlink->channels; c++) {
int samp1_index, samp2_index;
double *buf;
double this_samp;
src = (const double *)in->extended_data[c];
dst = (double *)out->extended_data[c];
buf = s->buf[c];
samp1_index = s->buf_index + integer;
if (samp1_index >= s->buf_size)
samp1_index -= s->buf_size;
samp2_index = samp1_index + 1;
if (samp2_index >= s->buf_size)
samp2_index -= s->buf_size;
this_samp = src[n];
dst[n] = buf[samp1_index] + (decimal * (buf[samp2_index] - buf[samp1_index]));
buf[s->buf_index] = this_samp;
}
s->buf_index++;
if (s->buf_index >= s->buf_size)
s->buf_index -= s->buf_size;
}
if (in != out)
av_frame_free(&in);
return ff_filter_frame(outlink, out);
}
static int query_formats(AVFilterContext *ctx)
{
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static av_cold void uninit(AVFilterContext *ctx)
{
VibratoContext *s = ctx->priv;
int c;
av_freep(&s->wave_table);
for (c = 0; c < s->channels; c++)
av_freep(&s->buf[c]);
av_freep(&s->buf);
}
static int config_input(AVFilterLink *inlink)
{
int c;
AVFilterContext *ctx = inlink->dst;
VibratoContext *s = ctx->priv;
s->channels = inlink->channels;
s->buf = av_calloc(inlink->channels, sizeof(*s->buf));
if (!s->buf)
return AVERROR(ENOMEM);
s->buf_size = inlink->sample_rate * 0.005;
for (c = 0; c < s->channels; c++) {
s->buf[c] = av_malloc_array(s->buf_size, sizeof(*s->buf[c]));
if (!s->buf[c])
return AVERROR(ENOMEM);
}
s->buf_index = 0;
s->wave_table_size = inlink->sample_rate / s->freq;
s->wave_table = av_malloc_array(s->wave_table_size, sizeof(*s->wave_table));
if (!s->wave_table)
return AVERROR(ENOMEM);
ff_generate_wave_table(WAVE_SIN, AV_SAMPLE_FMT_DBL, s->wave_table, s->wave_table_size, 0.0, s->buf_size - 1, 3.0 * M_PI_2);
s->wave_table_index = 0;
return 0;
}
static const AVFilterPad avfilter_af_vibrato_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_input,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad avfilter_af_vibrato_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_vibrato = {
.name = "vibrato",
.description = NULL_IF_CONFIG_SMALL("Apply vibrato effect."),
.priv_size = sizeof(VibratoContext),
.priv_class = &vibrato_class,
.uninit = uninit,
.query_formats = query_formats,
.inputs = avfilter_af_vibrato_inputs,
.outputs = avfilter_af_vibrato_outputs,
};

496
externals/ffmpeg/libavfilter/af_volume.c vendored Executable file
View File

@@ -0,0 +1,496 @@
/*
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2012 Justin Ruggles <justin.ruggles@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* audio volume filter
*/
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/ffmath.h"
#include "libavutil/float_dsp.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/opt.h"
#include "libavutil/replaygain.h"
#include "audio.h"
#include "avfilter.h"
#include "formats.h"
#include "internal.h"
#include "af_volume.h"
static const char * const precision_str[] = {
"fixed", "float", "double"
};
static const char *const var_names[] = {
"n", ///< frame number (starting at zero)
"nb_channels", ///< number of channels
"nb_consumed_samples", ///< number of samples consumed by the filter
"nb_samples", ///< number of samples in the current frame
"pos", ///< position in the file of the frame
"pts", ///< frame presentation timestamp
"sample_rate", ///< sample rate
"startpts", ///< PTS at start of stream
"startt", ///< time at start of stream
"t", ///< time in the file of the frame
"tb", ///< timebase
"volume", ///< last set value
NULL
};
#define OFFSET(x) offsetof(VolumeContext, x)
#define A AV_OPT_FLAG_AUDIO_PARAM
#define F AV_OPT_FLAG_FILTERING_PARAM
#define T AV_OPT_FLAG_RUNTIME_PARAM
static const AVOption volume_options[] = {
{ "volume", "set volume adjustment expression",
OFFSET(volume_expr), AV_OPT_TYPE_STRING, { .str = "1.0" }, .flags = A|F|T },
{ "precision", "select mathematical precision",
OFFSET(precision), AV_OPT_TYPE_INT, { .i64 = PRECISION_FLOAT }, PRECISION_FIXED, PRECISION_DOUBLE, A|F, "precision" },
{ "fixed", "select 8-bit fixed-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FIXED }, INT_MIN, INT_MAX, A|F, "precision" },
{ "float", "select 32-bit floating-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_FLOAT }, INT_MIN, INT_MAX, A|F, "precision" },
{ "double", "select 64-bit floating-point", 0, AV_OPT_TYPE_CONST, { .i64 = PRECISION_DOUBLE }, INT_MIN, INT_MAX, A|F, "precision" },
{ "eval", "specify when to evaluate expressions", OFFSET(eval_mode), AV_OPT_TYPE_INT, {.i64 = EVAL_MODE_ONCE}, 0, EVAL_MODE_NB-1, .flags = A|F, "eval" },
{ "once", "eval volume expression once", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_ONCE}, .flags = A|F, .unit = "eval" },
{ "frame", "eval volume expression per-frame", 0, AV_OPT_TYPE_CONST, {.i64=EVAL_MODE_FRAME}, .flags = A|F, .unit = "eval" },
{ "replaygain", "Apply replaygain side data when present",
OFFSET(replaygain), AV_OPT_TYPE_INT, { .i64 = REPLAYGAIN_DROP }, REPLAYGAIN_DROP, REPLAYGAIN_ALBUM, A|F, "replaygain" },
{ "drop", "replaygain side data is dropped", 0, AV_OPT_TYPE_CONST, { .i64 = REPLAYGAIN_DROP }, 0, 0, A|F, "replaygain" },
{ "ignore", "replaygain side data is ignored", 0, AV_OPT_TYPE_CONST, { .i64 = REPLAYGAIN_IGNORE }, 0, 0, A|F, "replaygain" },
{ "track", "track gain is preferred", 0, AV_OPT_TYPE_CONST, { .i64 = REPLAYGAIN_TRACK }, 0, 0, A|F, "replaygain" },
{ "album", "album gain is preferred", 0, AV_OPT_TYPE_CONST, { .i64 = REPLAYGAIN_ALBUM }, 0, 0, A|F, "replaygain" },
{ "replaygain_preamp", "Apply replaygain pre-amplification",
OFFSET(replaygain_preamp), AV_OPT_TYPE_DOUBLE, { .dbl = 0.0 }, -15.0, 15.0, A|F },
{ "replaygain_noclip", "Apply replaygain clipping prevention",
OFFSET(replaygain_noclip), AV_OPT_TYPE_BOOL, { .i64 = 1 }, 0, 1, A|F },
{ NULL }
};
AVFILTER_DEFINE_CLASS(volume);
static int set_expr(AVExpr **pexpr, const char *expr, void *log_ctx)
{
int ret;
AVExpr *old = NULL;
if (*pexpr)
old = *pexpr;
ret = av_expr_parse(pexpr, expr, var_names,
NULL, NULL, NULL, NULL, 0, log_ctx);
if (ret < 0) {
av_log(log_ctx, AV_LOG_ERROR,
"Error when evaluating the volume expression '%s'\n", expr);
*pexpr = old;
return ret;
}
av_expr_free(old);
return 0;
}
static av_cold int init(AVFilterContext *ctx)
{
VolumeContext *vol = ctx->priv;
vol->fdsp = avpriv_float_dsp_alloc(0);
if (!vol->fdsp)
return AVERROR(ENOMEM);
return set_expr(&vol->volume_pexpr, vol->volume_expr, ctx);
}
static av_cold void uninit(AVFilterContext *ctx)
{
VolumeContext *vol = ctx->priv;
av_expr_free(vol->volume_pexpr);
av_opt_free(vol);
av_freep(&vol->fdsp);
}
static int query_formats(AVFilterContext *ctx)
{
VolumeContext *vol = ctx->priv;
AVFilterFormats *formats = NULL;
AVFilterChannelLayouts *layouts;
static const enum AVSampleFormat sample_fmts[][7] = {
[PRECISION_FIXED] = {
AV_SAMPLE_FMT_U8,
AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_NONE
},
[PRECISION_FLOAT] = {
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_NONE
},
[PRECISION_DOUBLE] = {
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
}
};
int ret;
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_fmts[vol->precision]);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats(ctx, formats);
if (ret < 0)
return ret;
formats = ff_all_samplerates();
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static inline void scale_samples_u8(uint8_t *dst, const uint8_t *src,
int nb_samples, int volume)
{
int i;
for (i = 0; i < nb_samples; i++)
dst[i] = av_clip_uint8(((((int64_t)src[i] - 128) * volume + 128) >> 8) + 128);
}
static inline void scale_samples_u8_small(uint8_t *dst, const uint8_t *src,
int nb_samples, int volume)
{
int i;
for (i = 0; i < nb_samples; i++)
dst[i] = av_clip_uint8((((src[i] - 128) * volume + 128) >> 8) + 128);
}
static inline void scale_samples_s16(uint8_t *dst, const uint8_t *src,
int nb_samples, int volume)
{
int i;
int16_t *smp_dst = (int16_t *)dst;
const int16_t *smp_src = (const int16_t *)src;
for (i = 0; i < nb_samples; i++)
smp_dst[i] = av_clip_int16(((int64_t)smp_src[i] * volume + 128) >> 8);
}
static inline void scale_samples_s16_small(uint8_t *dst, const uint8_t *src,
int nb_samples, int volume)
{
int i;
int16_t *smp_dst = (int16_t *)dst;
const int16_t *smp_src = (const int16_t *)src;
for (i = 0; i < nb_samples; i++)
smp_dst[i] = av_clip_int16((smp_src[i] * volume + 128) >> 8);
}
static inline void scale_samples_s32(uint8_t *dst, const uint8_t *src,
int nb_samples, int volume)
{
int i;
int32_t *smp_dst = (int32_t *)dst;
const int32_t *smp_src = (const int32_t *)src;
for (i = 0; i < nb_samples; i++)
smp_dst[i] = av_clipl_int32((((int64_t)smp_src[i] * volume + 128) >> 8));
}
static av_cold void volume_init(VolumeContext *vol)
{
vol->samples_align = 1;
switch (av_get_packed_sample_fmt(vol->sample_fmt)) {
case AV_SAMPLE_FMT_U8:
if (vol->volume_i < 0x1000000)
vol->scale_samples = scale_samples_u8_small;
else
vol->scale_samples = scale_samples_u8;
break;
case AV_SAMPLE_FMT_S16:
if (vol->volume_i < 0x10000)
vol->scale_samples = scale_samples_s16_small;
else
vol->scale_samples = scale_samples_s16;
break;
case AV_SAMPLE_FMT_S32:
vol->scale_samples = scale_samples_s32;
break;
case AV_SAMPLE_FMT_FLT:
vol->samples_align = 4;
break;
case AV_SAMPLE_FMT_DBL:
vol->samples_align = 8;
break;
}
if (ARCH_X86)
ff_volume_init_x86(vol);
}
static int set_volume(AVFilterContext *ctx)
{
VolumeContext *vol = ctx->priv;
vol->volume = av_expr_eval(vol->volume_pexpr, vol->var_values, NULL);
if (isnan(vol->volume)) {
if (vol->eval_mode == EVAL_MODE_ONCE) {
av_log(ctx, AV_LOG_ERROR, "Invalid value NaN for volume\n");
return AVERROR(EINVAL);
} else {
av_log(ctx, AV_LOG_WARNING, "Invalid value NaN for volume, setting to 0\n");
vol->volume = 0;
}
}
vol->var_values[VAR_VOLUME] = vol->volume;
av_log(ctx, AV_LOG_VERBOSE, "n:%f t:%f pts:%f precision:%s ",
vol->var_values[VAR_N], vol->var_values[VAR_T], vol->var_values[VAR_PTS],
precision_str[vol->precision]);
if (vol->precision == PRECISION_FIXED) {
vol->volume_i = (int)(vol->volume * 256 + 0.5);
vol->volume = vol->volume_i / 256.0;
av_log(ctx, AV_LOG_VERBOSE, "volume_i:%d/255 ", vol->volume_i);
}
av_log(ctx, AV_LOG_VERBOSE, "volume:%f volume_dB:%f\n",
vol->volume, 20.0*log10(vol->volume));
volume_init(vol);
return 0;
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
VolumeContext *vol = ctx->priv;
AVFilterLink *inlink = ctx->inputs[0];
vol->sample_fmt = inlink->format;
vol->channels = inlink->channels;
vol->planes = av_sample_fmt_is_planar(inlink->format) ? vol->channels : 1;
vol->var_values[VAR_N] =
vol->var_values[VAR_NB_CONSUMED_SAMPLES] =
vol->var_values[VAR_NB_SAMPLES] =
vol->var_values[VAR_POS] =
vol->var_values[VAR_PTS] =
vol->var_values[VAR_STARTPTS] =
vol->var_values[VAR_STARTT] =
vol->var_values[VAR_T] =
vol->var_values[VAR_VOLUME] = NAN;
vol->var_values[VAR_NB_CHANNELS] = inlink->channels;
vol->var_values[VAR_TB] = av_q2d(inlink->time_base);
vol->var_values[VAR_SAMPLE_RATE] = inlink->sample_rate;
av_log(inlink->src, AV_LOG_VERBOSE, "tb:%f sample_rate:%f nb_channels:%f\n",
vol->var_values[VAR_TB],
vol->var_values[VAR_SAMPLE_RATE],
vol->var_values[VAR_NB_CHANNELS]);
return set_volume(ctx);
}
static int process_command(AVFilterContext *ctx, const char *cmd, const char *args,
char *res, int res_len, int flags)
{
VolumeContext *vol = ctx->priv;
int ret = AVERROR(ENOSYS);
if (!strcmp(cmd, "volume")) {
if ((ret = set_expr(&vol->volume_pexpr, args, ctx)) < 0)
return ret;
if (vol->eval_mode == EVAL_MODE_ONCE)
set_volume(ctx);
}
return ret;
}
#define D2TS(d) (isnan(d) ? AV_NOPTS_VALUE : (int64_t)(d))
#define TS2D(ts) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts))
#define TS2T(ts, tb) ((ts) == AV_NOPTS_VALUE ? NAN : (double)(ts)*av_q2d(tb))
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
AVFilterContext *ctx = inlink->dst;
VolumeContext *vol = inlink->dst->priv;
AVFilterLink *outlink = inlink->dst->outputs[0];
int nb_samples = buf->nb_samples;
AVFrame *out_buf;
int64_t pos;
AVFrameSideData *sd = av_frame_get_side_data(buf, AV_FRAME_DATA_REPLAYGAIN);
int ret;
if (sd && vol->replaygain != REPLAYGAIN_IGNORE) {
if (vol->replaygain != REPLAYGAIN_DROP) {
AVReplayGain *replaygain = (AVReplayGain*)sd->data;
int32_t gain = 100000;
uint32_t peak = 100000;
float g, p;
if (vol->replaygain == REPLAYGAIN_TRACK &&
replaygain->track_gain != INT32_MIN) {
gain = replaygain->track_gain;
if (replaygain->track_peak != 0)
peak = replaygain->track_peak;
} else if (replaygain->album_gain != INT32_MIN) {
gain = replaygain->album_gain;
if (replaygain->album_peak != 0)
peak = replaygain->album_peak;
} else {
av_log(inlink->dst, AV_LOG_WARNING, "Both ReplayGain gain "
"values are unknown.\n");
}
g = gain / 100000.0f;
p = peak / 100000.0f;
av_log(inlink->dst, AV_LOG_VERBOSE,
"Using gain %f dB from replaygain side data.\n", g);
vol->volume = ff_exp10((g + vol->replaygain_preamp) / 20);
if (vol->replaygain_noclip)
vol->volume = FFMIN(vol->volume, 1.0 / p);
vol->volume_i = (int)(vol->volume * 256 + 0.5);
volume_init(vol);
}
av_frame_remove_side_data(buf, AV_FRAME_DATA_REPLAYGAIN);
}
if (isnan(vol->var_values[VAR_STARTPTS])) {
vol->var_values[VAR_STARTPTS] = TS2D(buf->pts);
vol->var_values[VAR_STARTT ] = TS2T(buf->pts, inlink->time_base);
}
vol->var_values[VAR_PTS] = TS2D(buf->pts);
vol->var_values[VAR_T ] = TS2T(buf->pts, inlink->time_base);
vol->var_values[VAR_N ] = inlink->frame_count_out;
pos = buf->pkt_pos;
vol->var_values[VAR_POS] = pos == -1 ? NAN : pos;
if (vol->eval_mode == EVAL_MODE_FRAME)
set_volume(ctx);
if (vol->volume == 1.0 || vol->volume_i == 256) {
out_buf = buf;
goto end;
}
/* do volume scaling in-place if input buffer is writable */
if (av_frame_is_writable(buf)
&& (vol->precision != PRECISION_FIXED || vol->volume_i > 0)) {
out_buf = buf;
} else {
out_buf = ff_get_audio_buffer(outlink, nb_samples);
if (!out_buf) {
av_frame_free(&buf);
return AVERROR(ENOMEM);
}
ret = av_frame_copy_props(out_buf, buf);
if (ret < 0) {
av_frame_free(&out_buf);
av_frame_free(&buf);
return ret;
}
}
if (vol->precision != PRECISION_FIXED || vol->volume_i > 0) {
int p, plane_samples;
if (av_sample_fmt_is_planar(buf->format))
plane_samples = FFALIGN(nb_samples, vol->samples_align);
else
plane_samples = FFALIGN(nb_samples * vol->channels, vol->samples_align);
if (vol->precision == PRECISION_FIXED) {
for (p = 0; p < vol->planes; p++) {
vol->scale_samples(out_buf->extended_data[p],
buf->extended_data[p], plane_samples,
vol->volume_i);
}
} else if (av_get_packed_sample_fmt(vol->sample_fmt) == AV_SAMPLE_FMT_FLT) {
for (p = 0; p < vol->planes; p++) {
vol->fdsp->vector_fmul_scalar((float *)out_buf->extended_data[p],
(const float *)buf->extended_data[p],
vol->volume, plane_samples);
}
} else {
for (p = 0; p < vol->planes; p++) {
vol->fdsp->vector_dmul_scalar((double *)out_buf->extended_data[p],
(const double *)buf->extended_data[p],
vol->volume, plane_samples);
}
}
}
emms_c();
if (buf != out_buf)
av_frame_free(&buf);
end:
vol->var_values[VAR_NB_CONSUMED_SAMPLES] += out_buf->nb_samples;
return ff_filter_frame(outlink, out_buf);
}
static const AVFilterPad avfilter_af_volume_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad avfilter_af_volume_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
},
{ NULL }
};
AVFilter ff_af_volume = {
.name = "volume",
.description = NULL_IF_CONFIG_SMALL("Change input volume."),
.query_formats = query_formats,
.priv_size = sizeof(VolumeContext),
.priv_class = &volume_class,
.init = init,
.uninit = uninit,
.inputs = avfilter_af_volume_inputs,
.outputs = avfilter_af_volume_outputs,
.flags = AVFILTER_FLAG_SUPPORT_TIMELINE_GENERIC,
.process_command = process_command,
};

93
externals/ffmpeg/libavfilter/af_volume.h vendored Executable file
View File

@@ -0,0 +1,93 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* audio volume filter
*/
#ifndef AVFILTER_VOLUME_H
#define AVFILTER_VOLUME_H
#include "libavutil/common.h"
#include "libavutil/eval.h"
#include "libavutil/float_dsp.h"
#include "libavutil/opt.h"
#include "libavutil/samplefmt.h"
enum PrecisionType {
PRECISION_FIXED = 0,
PRECISION_FLOAT,
PRECISION_DOUBLE,
};
enum EvalMode {
EVAL_MODE_ONCE,
EVAL_MODE_FRAME,
EVAL_MODE_NB
};
enum VolumeVarName {
VAR_N,
VAR_NB_CHANNELS,
VAR_NB_CONSUMED_SAMPLES,
VAR_NB_SAMPLES,
VAR_POS,
VAR_PTS,
VAR_SAMPLE_RATE,
VAR_STARTPTS,
VAR_STARTT,
VAR_T,
VAR_TB,
VAR_VOLUME,
VAR_VARS_NB
};
enum ReplayGainType {
REPLAYGAIN_DROP,
REPLAYGAIN_IGNORE,
REPLAYGAIN_TRACK,
REPLAYGAIN_ALBUM,
};
typedef struct VolumeContext {
const AVClass *class;
AVFloatDSPContext *fdsp;
int precision;
int eval_mode;
const char *volume_expr;
AVExpr *volume_pexpr;
double var_values[VAR_VARS_NB];
int replaygain;
double replaygain_preamp;
int replaygain_noclip;
double volume;
int volume_i;
int channels;
int planes;
enum AVSampleFormat sample_fmt;
void (*scale_samples)(uint8_t *dst, const uint8_t *src, int nb_samples,
int volume);
int samples_align;
} VolumeContext;
void ff_volume_init_x86(VolumeContext *vol);
#endif /* AVFILTER_VOLUME_H */

166
externals/ffmpeg/libavfilter/af_volumedetect.c vendored Executable file
View File

@@ -0,0 +1,166 @@
/*
* Copyright (c) 2012 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/channel_layout.h"
#include "libavutil/avassert.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
typedef struct VolDetectContext {
/**
* Number of samples at each PCM value.
* histogram[0x8000 + i] is the number of samples at value i.
* The extra element is there for symmetry.
*/
uint64_t histogram[0x10001];
} VolDetectContext;
static int query_formats(AVFilterContext *ctx)
{
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_NONE
};
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
int ret;
if (!(formats = ff_make_format_list(sample_fmts)))
return AVERROR(ENOMEM);
layouts = ff_all_channel_counts();
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
return ff_set_common_formats(ctx, formats);
}
static int filter_frame(AVFilterLink *inlink, AVFrame *samples)
{
AVFilterContext *ctx = inlink->dst;
VolDetectContext *vd = ctx->priv;
int nb_samples = samples->nb_samples;
int nb_channels = samples->channels;
int nb_planes = nb_channels;
int plane, i;
int16_t *pcm;
if (!av_sample_fmt_is_planar(samples->format)) {
nb_samples *= nb_channels;
nb_planes = 1;
}
for (plane = 0; plane < nb_planes; plane++) {
pcm = (int16_t *)samples->extended_data[plane];
for (i = 0; i < nb_samples; i++)
vd->histogram[pcm[i] + 0x8000]++;
}
return ff_filter_frame(inlink->dst->outputs[0], samples);
}
#define MAX_DB 91
static inline double logdb(uint64_t v)
{
double d = v / (double)(0x8000 * 0x8000);
if (!v)
return MAX_DB;
return -log10(d) * 10;
}
static void print_stats(AVFilterContext *ctx)
{
VolDetectContext *vd = ctx->priv;
int i, max_volume, shift;
uint64_t nb_samples = 0, power = 0, nb_samples_shift = 0, sum = 0;
uint64_t histdb[MAX_DB + 1] = { 0 };
for (i = 0; i < 0x10000; i++)
nb_samples += vd->histogram[i];
av_log(ctx, AV_LOG_INFO, "n_samples: %"PRId64"\n", nb_samples);
if (!nb_samples)
return;
/* If nb_samples > 1<<34, there is a risk of overflow in the
multiplication or the sum: shift all histogram values to avoid that.
The total number of samples must be recomputed to avoid rounding
errors. */
shift = av_log2(nb_samples >> 33);
for (i = 0; i < 0x10000; i++) {
nb_samples_shift += vd->histogram[i] >> shift;
power += (i - 0x8000) * (i - 0x8000) * (vd->histogram[i] >> shift);
}
if (!nb_samples_shift)
return;
power = (power + nb_samples_shift / 2) / nb_samples_shift;
av_assert0(power <= 0x8000 * 0x8000);
av_log(ctx, AV_LOG_INFO, "mean_volume: %.1f dB\n", -logdb(power));
max_volume = 0x8000;
while (max_volume > 0 && !vd->histogram[0x8000 + max_volume] &&
!vd->histogram[0x8000 - max_volume])
max_volume--;
av_log(ctx, AV_LOG_INFO, "max_volume: %.1f dB\n", -logdb(max_volume * max_volume));
for (i = 0; i < 0x10000; i++)
histdb[(int)logdb((i - 0x8000) * (i - 0x8000))] += vd->histogram[i];
for (i = 0; i <= MAX_DB && !histdb[i]; i++);
for (; i <= MAX_DB && sum < nb_samples / 1000; i++) {
av_log(ctx, AV_LOG_INFO, "histogram_%ddb: %"PRId64"\n", i, histdb[i]);
sum += histdb[i];
}
}
static av_cold void uninit(AVFilterContext *ctx)
{
print_stats(ctx);
}
static const AVFilterPad volumedetect_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = filter_frame,
},
{ NULL }
};
static const AVFilterPad volumedetect_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
},
{ NULL }
};
AVFilter ff_af_volumedetect = {
.name = "volumedetect",
.description = NULL_IF_CONFIG_SMALL("Detect audio volume."),
.priv_size = sizeof(VolDetectContext),
.query_formats = query_formats,
.uninit = uninit,
.inputs = volumedetect_inputs,
.outputs = volumedetect_outputs,
};

571
externals/ffmpeg/libavfilter/allfilters.c vendored Executable file
View File

@@ -0,0 +1,571 @@
/*
* filter registration
* Copyright (c) 2008 Vitor Sessak
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/thread.h"
#include "avfilter.h"
#include "config.h"
extern AVFilter ff_af_abench;
extern AVFilter ff_af_acompressor;
extern AVFilter ff_af_acontrast;
extern AVFilter ff_af_acopy;
extern AVFilter ff_af_acue;
extern AVFilter ff_af_acrossfade;
extern AVFilter ff_af_acrossover;
extern AVFilter ff_af_acrusher;
extern AVFilter ff_af_adeclick;
extern AVFilter ff_af_adeclip;
extern AVFilter ff_af_adelay;
extern AVFilter ff_af_aderivative;
extern AVFilter ff_af_aecho;
extern AVFilter ff_af_aemphasis;
extern AVFilter ff_af_aeval;
extern AVFilter ff_af_afade;
extern AVFilter ff_af_afftdn;
extern AVFilter ff_af_afftfilt;
extern AVFilter ff_af_afir;
extern AVFilter ff_af_aformat;
extern AVFilter ff_af_agate;
extern AVFilter ff_af_aiir;
extern AVFilter ff_af_aintegral;
extern AVFilter ff_af_ainterleave;
extern AVFilter ff_af_alimiter;
extern AVFilter ff_af_allpass;
extern AVFilter ff_af_aloop;
extern AVFilter ff_af_amerge;
extern AVFilter ff_af_ametadata;
extern AVFilter ff_af_amix;
extern AVFilter ff_af_amultiply;
extern AVFilter ff_af_anequalizer;
extern AVFilter ff_af_anlmdn;
extern AVFilter ff_af_anlms;
extern AVFilter ff_af_anull;
extern AVFilter ff_af_apad;
extern AVFilter ff_af_aperms;
extern AVFilter ff_af_aphaser;
extern AVFilter ff_af_apulsator;
extern AVFilter ff_af_arealtime;
extern AVFilter ff_af_aresample;
extern AVFilter ff_af_areverse;
extern AVFilter ff_af_arnndn;
extern AVFilter ff_af_aselect;
extern AVFilter ff_af_asendcmd;
extern AVFilter ff_af_asetnsamples;
extern AVFilter ff_af_asetpts;
extern AVFilter ff_af_asetrate;
extern AVFilter ff_af_asettb;
extern AVFilter ff_af_ashowinfo;
extern AVFilter ff_af_asidedata;
extern AVFilter ff_af_asoftclip;
extern AVFilter ff_af_asplit;
extern AVFilter ff_af_asr;
extern AVFilter ff_af_astats;
extern AVFilter ff_af_astreamselect;
extern AVFilter ff_af_asubboost;
extern AVFilter ff_af_atempo;
extern AVFilter ff_af_atrim;
extern AVFilter ff_af_axcorrelate;
extern AVFilter ff_af_azmq;
extern AVFilter ff_af_bandpass;
extern AVFilter ff_af_bandreject;
extern AVFilter ff_af_bass;
extern AVFilter ff_af_biquad;
extern AVFilter ff_af_bs2b;
extern AVFilter ff_vf_chromaber_vulkan;
extern AVFilter ff_af_channelmap;
extern AVFilter ff_af_channelsplit;
extern AVFilter ff_af_chorus;
extern AVFilter ff_af_compand;
extern AVFilter ff_af_compensationdelay;
extern AVFilter ff_af_crossfeed;
extern AVFilter ff_af_crystalizer;
extern AVFilter ff_af_dcshift;
extern AVFilter ff_af_deesser;
extern AVFilter ff_af_drmeter;
extern AVFilter ff_af_dynaudnorm;
extern AVFilter ff_af_earwax;
extern AVFilter ff_af_ebur128;
extern AVFilter ff_af_equalizer;
extern AVFilter ff_af_extrastereo;
extern AVFilter ff_af_firequalizer;
extern AVFilter ff_af_flanger;
extern AVFilter ff_af_haas;
extern AVFilter ff_af_hdcd;
extern AVFilter ff_af_headphone;
extern AVFilter ff_af_highpass;
extern AVFilter ff_af_highshelf;
extern AVFilter ff_af_join;
extern AVFilter ff_af_ladspa;
extern AVFilter ff_af_loudnorm;
extern AVFilter ff_af_lowpass;
extern AVFilter ff_af_lowshelf;
extern AVFilter ff_af_lv2;
extern AVFilter ff_af_mcompand;
extern AVFilter ff_af_pan;
extern AVFilter ff_af_replaygain;
extern AVFilter ff_af_resample;
extern AVFilter ff_af_rubberband;
extern AVFilter ff_af_sidechaincompress;
extern AVFilter ff_af_sidechaingate;
extern AVFilter ff_af_silencedetect;
extern AVFilter ff_af_silenceremove;
extern AVFilter ff_af_sofalizer;
extern AVFilter ff_af_stereotools;
extern AVFilter ff_af_stereowiden;
extern AVFilter ff_af_superequalizer;
extern AVFilter ff_af_surround;
extern AVFilter ff_af_treble;
extern AVFilter ff_af_tremolo;
extern AVFilter ff_af_vibrato;
extern AVFilter ff_af_volume;
extern AVFilter ff_af_volumedetect;
extern AVFilter ff_asrc_aevalsrc;
extern AVFilter ff_asrc_afirsrc;
extern AVFilter ff_asrc_anoisesrc;
extern AVFilter ff_asrc_anullsrc;
extern AVFilter ff_asrc_flite;
extern AVFilter ff_asrc_hilbert;
extern AVFilter ff_asrc_sinc;
extern AVFilter ff_asrc_sine;
extern AVFilter ff_asink_anullsink;
extern AVFilter ff_vf_addroi;
extern AVFilter ff_vf_alphaextract;
extern AVFilter ff_vf_alphamerge;
extern AVFilter ff_vf_amplify;
extern AVFilter ff_vf_ass;
extern AVFilter ff_vf_atadenoise;
extern AVFilter ff_vf_avgblur;
extern AVFilter ff_vf_avgblur_opencl;
extern AVFilter ff_vf_avgblur_vulkan;
extern AVFilter ff_vf_bbox;
extern AVFilter ff_vf_bench;
extern AVFilter ff_vf_bilateral;
extern AVFilter ff_vf_bitplanenoise;
extern AVFilter ff_vf_blackdetect;
extern AVFilter ff_vf_blackframe;
extern AVFilter ff_vf_blend;
extern AVFilter ff_vf_bm3d;
extern AVFilter ff_vf_boxblur;
extern AVFilter ff_vf_boxblur_opencl;
extern AVFilter ff_vf_bwdif;
extern AVFilter ff_vf_cas;
extern AVFilter ff_vf_chromahold;
extern AVFilter ff_vf_chromakey;
extern AVFilter ff_vf_chromashift;
extern AVFilter ff_vf_ciescope;
extern AVFilter ff_vf_codecview;
extern AVFilter ff_vf_colorbalance;
extern AVFilter ff_vf_colorchannelmixer;
extern AVFilter ff_vf_colorkey;
extern AVFilter ff_vf_colorkey_opencl;
extern AVFilter ff_vf_colorhold;
extern AVFilter ff_vf_colorlevels;
extern AVFilter ff_vf_colormatrix;
extern AVFilter ff_vf_colorspace;
extern AVFilter ff_vf_convolution;
extern AVFilter ff_vf_convolution_opencl;
extern AVFilter ff_vf_convolve;
extern AVFilter ff_vf_copy;
extern AVFilter ff_vf_coreimage;
extern AVFilter ff_vf_cover_rect;
extern AVFilter ff_vf_crop;
extern AVFilter ff_vf_cropdetect;
extern AVFilter ff_vf_cue;
extern AVFilter ff_vf_curves;
extern AVFilter ff_vf_datascope;
extern AVFilter ff_vf_dblur;
extern AVFilter ff_vf_dctdnoiz;
extern AVFilter ff_vf_deband;
extern AVFilter ff_vf_deblock;
extern AVFilter ff_vf_decimate;
extern AVFilter ff_vf_deconvolve;
extern AVFilter ff_vf_dedot;
extern AVFilter ff_vf_deflate;
extern AVFilter ff_vf_deflicker;
extern AVFilter ff_vf_deinterlace_qsv;
extern AVFilter ff_vf_deinterlace_vaapi;
extern AVFilter ff_vf_dejudder;
extern AVFilter ff_vf_delogo;
extern AVFilter ff_vf_denoise_vaapi;
extern AVFilter ff_vf_derain;
extern AVFilter ff_vf_deshake;
extern AVFilter ff_vf_deshake_opencl;
extern AVFilter ff_vf_despill;
extern AVFilter ff_vf_detelecine;
extern AVFilter ff_vf_dilation;
extern AVFilter ff_vf_dilation_opencl;
extern AVFilter ff_vf_displace;
extern AVFilter ff_vf_dnn_processing;
extern AVFilter ff_vf_doubleweave;
extern AVFilter ff_vf_drawbox;
extern AVFilter ff_vf_drawgraph;
extern AVFilter ff_vf_drawgrid;
extern AVFilter ff_vf_drawtext;
extern AVFilter ff_vf_edgedetect;
extern AVFilter ff_vf_elbg;
extern AVFilter ff_vf_entropy;
extern AVFilter ff_vf_eq;
extern AVFilter ff_vf_erosion;
extern AVFilter ff_vf_erosion_opencl;
extern AVFilter ff_vf_extractplanes;
extern AVFilter ff_vf_fade;
extern AVFilter ff_vf_fftdnoiz;
extern AVFilter ff_vf_fftfilt;
extern AVFilter ff_vf_field;
extern AVFilter ff_vf_fieldhint;
extern AVFilter ff_vf_fieldmatch;
extern AVFilter ff_vf_fieldorder;
extern AVFilter ff_vf_fillborders;
extern AVFilter ff_vf_find_rect;
extern AVFilter ff_vf_floodfill;
extern AVFilter ff_vf_format;
extern AVFilter ff_vf_fps;
extern AVFilter ff_vf_framepack;
extern AVFilter ff_vf_framerate;
extern AVFilter ff_vf_framestep;
extern AVFilter ff_vf_freezedetect;
extern AVFilter ff_vf_freezeframes;
extern AVFilter ff_vf_frei0r;
extern AVFilter ff_vf_fspp;
extern AVFilter ff_vf_gblur;
extern AVFilter ff_vf_geq;
extern AVFilter ff_vf_gradfun;
extern AVFilter ff_vf_graphmonitor;
extern AVFilter ff_vf_greyedge;
extern AVFilter ff_vf_haldclut;
extern AVFilter ff_vf_hflip;
extern AVFilter ff_vf_histeq;
extern AVFilter ff_vf_histogram;
extern AVFilter ff_vf_hqdn3d;
extern AVFilter ff_vf_hqx;
extern AVFilter ff_vf_hstack;
extern AVFilter ff_vf_hue;
extern AVFilter ff_vf_hwdownload;
extern AVFilter ff_vf_hwmap;
extern AVFilter ff_vf_hwupload;
extern AVFilter ff_vf_hwupload_cuda;
extern AVFilter ff_vf_hysteresis;
extern AVFilter ff_vf_idet;
extern AVFilter ff_vf_il;
extern AVFilter ff_vf_inflate;
extern AVFilter ff_vf_interlace;
extern AVFilter ff_vf_interleave;
extern AVFilter ff_vf_kerndeint;
extern AVFilter ff_vf_lagfun;
extern AVFilter ff_vf_lenscorrection;
extern AVFilter ff_vf_lensfun;
extern AVFilter ff_vf_libvmaf;
extern AVFilter ff_vf_limiter;
extern AVFilter ff_vf_loop;
extern AVFilter ff_vf_lumakey;
extern AVFilter ff_vf_lut;
extern AVFilter ff_vf_lut1d;
extern AVFilter ff_vf_lut2;
extern AVFilter ff_vf_lut3d;
extern AVFilter ff_vf_lutrgb;
extern AVFilter ff_vf_lutyuv;
extern AVFilter ff_vf_maskedclamp;
extern AVFilter ff_vf_maskedmax;
extern AVFilter ff_vf_maskedmerge;
extern AVFilter ff_vf_maskedmin;
extern AVFilter ff_vf_maskedthreshold;
extern AVFilter ff_vf_maskfun;
extern AVFilter ff_vf_mcdeint;
extern AVFilter ff_vf_median;
extern AVFilter ff_vf_mergeplanes;
extern AVFilter ff_vf_mestimate;
extern AVFilter ff_vf_metadata;
extern AVFilter ff_vf_midequalizer;
extern AVFilter ff_vf_minterpolate;
extern AVFilter ff_vf_mix;
extern AVFilter ff_vf_mpdecimate;
extern AVFilter ff_vf_negate;
extern AVFilter ff_vf_nlmeans;
extern AVFilter ff_vf_nlmeans_opencl;
extern AVFilter ff_vf_nnedi;
extern AVFilter ff_vf_noformat;
extern AVFilter ff_vf_noise;
extern AVFilter ff_vf_normalize;
extern AVFilter ff_vf_null;
extern AVFilter ff_vf_ocr;
extern AVFilter ff_vf_ocv;
extern AVFilter ff_vf_oscilloscope;
extern AVFilter ff_vf_overlay;
extern AVFilter ff_vf_overlay_opencl;
extern AVFilter ff_vf_overlay_qsv;
extern AVFilter ff_vf_overlay_vulkan;
extern AVFilter ff_vf_overlay_cuda;
extern AVFilter ff_vf_owdenoise;
extern AVFilter ff_vf_pad;
extern AVFilter ff_vf_pad_opencl;
extern AVFilter ff_vf_palettegen;
extern AVFilter ff_vf_paletteuse;
extern AVFilter ff_vf_perms;
extern AVFilter ff_vf_perspective;
extern AVFilter ff_vf_phase;
extern AVFilter ff_vf_photosensitivity;
extern AVFilter ff_vf_pixdesctest;
extern AVFilter ff_vf_pixscope;
extern AVFilter ff_vf_pp;
extern AVFilter ff_vf_pp7;
extern AVFilter ff_vf_premultiply;
extern AVFilter ff_vf_prewitt;
extern AVFilter ff_vf_prewitt_opencl;
extern AVFilter ff_vf_procamp_vaapi;
extern AVFilter ff_vf_program_opencl;
extern AVFilter ff_vf_pseudocolor;
extern AVFilter ff_vf_psnr;
extern AVFilter ff_vf_pullup;
extern AVFilter ff_vf_qp;
extern AVFilter ff_vf_random;
extern AVFilter ff_vf_readeia608;
extern AVFilter ff_vf_readvitc;
extern AVFilter ff_vf_realtime;
extern AVFilter ff_vf_remap;
extern AVFilter ff_vf_removegrain;
extern AVFilter ff_vf_removelogo;
extern AVFilter ff_vf_repeatfields;
extern AVFilter ff_vf_reverse;
extern AVFilter ff_vf_rgbashift;
extern AVFilter ff_vf_roberts;
extern AVFilter ff_vf_roberts_opencl;
extern AVFilter ff_vf_rotate;
extern AVFilter ff_vf_sab;
extern AVFilter ff_vf_scale;
extern AVFilter ff_vf_scale_cuda;
extern AVFilter ff_vf_scale_npp;
extern AVFilter ff_vf_scale_qsv;
extern AVFilter ff_vf_scale_vaapi;
extern AVFilter ff_vf_scale_vulkan;
extern AVFilter ff_vf_scale2ref;
extern AVFilter ff_vf_scdet;
extern AVFilter ff_vf_scroll;
extern AVFilter ff_vf_select;
extern AVFilter ff_vf_selectivecolor;
extern AVFilter ff_vf_sendcmd;
extern AVFilter ff_vf_separatefields;
extern AVFilter ff_vf_setdar;
extern AVFilter ff_vf_setfield;
extern AVFilter ff_vf_setparams;
extern AVFilter ff_vf_setpts;
extern AVFilter ff_vf_setrange;
extern AVFilter ff_vf_setsar;
extern AVFilter ff_vf_settb;
extern AVFilter ff_vf_sharpness_vaapi;
extern AVFilter ff_vf_showinfo;
extern AVFilter ff_vf_showpalette;
extern AVFilter ff_vf_shuffleframes;
extern AVFilter ff_vf_shuffleplanes;
extern AVFilter ff_vf_sidedata;
extern AVFilter ff_vf_signalstats;
extern AVFilter ff_vf_signature;
extern AVFilter ff_vf_smartblur;
extern AVFilter ff_vf_sobel;
extern AVFilter ff_vf_sobel_opencl;
extern AVFilter ff_vf_split;
extern AVFilter ff_vf_spp;
extern AVFilter ff_vf_sr;
extern AVFilter ff_vf_ssim;
extern AVFilter ff_vf_stereo3d;
extern AVFilter ff_vf_streamselect;
extern AVFilter ff_vf_subtitles;
extern AVFilter ff_vf_super2xsai;
extern AVFilter ff_vf_swaprect;
extern AVFilter ff_vf_swapuv;
extern AVFilter ff_vf_tblend;
extern AVFilter ff_vf_telecine;
extern AVFilter ff_vf_thistogram;
extern AVFilter ff_vf_threshold;
extern AVFilter ff_vf_thumbnail;
extern AVFilter ff_vf_thumbnail_cuda;
extern AVFilter ff_vf_tile;
extern AVFilter ff_vf_tinterlace;
extern AVFilter ff_vf_tlut2;
extern AVFilter ff_vf_tmedian;
extern AVFilter ff_vf_tmix;
extern AVFilter ff_vf_tonemap;
extern AVFilter ff_vf_tonemap_opencl;
extern AVFilter ff_vf_tonemap_vaapi;
extern AVFilter ff_vf_tpad;
extern AVFilter ff_vf_transpose;
extern AVFilter ff_vf_transpose_npp;
extern AVFilter ff_vf_transpose_opencl;
extern AVFilter ff_vf_transpose_vaapi;
extern AVFilter ff_vf_trim;
extern AVFilter ff_vf_unpremultiply;
extern AVFilter ff_vf_unsharp;
extern AVFilter ff_vf_unsharp_opencl;
extern AVFilter ff_vf_untile;
extern AVFilter ff_vf_uspp;
extern AVFilter ff_vf_v360;
extern AVFilter ff_vf_vaguedenoiser;
extern AVFilter ff_vf_vectorscope;
extern AVFilter ff_vf_vflip;
extern AVFilter ff_vf_vfrdet;
extern AVFilter ff_vf_vibrance;
extern AVFilter ff_vf_vidstabdetect;
extern AVFilter ff_vf_vidstabtransform;
extern AVFilter ff_vf_vignette;
extern AVFilter ff_vf_vmafmotion;
extern AVFilter ff_vf_vpp_qsv;
extern AVFilter ff_vf_vstack;
extern AVFilter ff_vf_w3fdif;
extern AVFilter ff_vf_waveform;
extern AVFilter ff_vf_weave;
extern AVFilter ff_vf_xbr;
extern AVFilter ff_vf_xfade;
extern AVFilter ff_vf_xfade_opencl;
extern AVFilter ff_vf_xmedian;
extern AVFilter ff_vf_xstack;
extern AVFilter ff_vf_yadif;
extern AVFilter ff_vf_yadif_cuda;
extern AVFilter ff_vf_yaepblur;
extern AVFilter ff_vf_zmq;
extern AVFilter ff_vf_zoompan;
extern AVFilter ff_vf_zscale;
extern AVFilter ff_vsrc_allrgb;
extern AVFilter ff_vsrc_allyuv;
extern AVFilter ff_vsrc_cellauto;
extern AVFilter ff_vsrc_color;
extern AVFilter ff_vsrc_coreimagesrc;
extern AVFilter ff_vsrc_frei0r_src;
extern AVFilter ff_vsrc_gradients;
extern AVFilter ff_vsrc_haldclutsrc;
extern AVFilter ff_vsrc_life;
extern AVFilter ff_vsrc_mandelbrot;
extern AVFilter ff_vsrc_mptestsrc;
extern AVFilter ff_vsrc_nullsrc;
extern AVFilter ff_vsrc_openclsrc;
extern AVFilter ff_vsrc_pal75bars;
extern AVFilter ff_vsrc_pal100bars;
extern AVFilter ff_vsrc_rgbtestsrc;
extern AVFilter ff_vsrc_sierpinski;
extern AVFilter ff_vsrc_smptebars;
extern AVFilter ff_vsrc_smptehdbars;
extern AVFilter ff_vsrc_testsrc;
extern AVFilter ff_vsrc_testsrc2;
extern AVFilter ff_vsrc_yuvtestsrc;
extern AVFilter ff_vsink_nullsink;
/* multimedia filters */
extern AVFilter ff_avf_abitscope;
extern AVFilter ff_avf_adrawgraph;
extern AVFilter ff_avf_agraphmonitor;
extern AVFilter ff_avf_ahistogram;
extern AVFilter ff_avf_aphasemeter;
extern AVFilter ff_avf_avectorscope;
extern AVFilter ff_avf_concat;
extern AVFilter ff_avf_showcqt;
extern AVFilter ff_avf_showfreqs;
extern AVFilter ff_avf_showspatial;
extern AVFilter ff_avf_showspectrum;
extern AVFilter ff_avf_showspectrumpic;
extern AVFilter ff_avf_showvolume;
extern AVFilter ff_avf_showwaves;
extern AVFilter ff_avf_showwavespic;
extern AVFilter ff_vaf_spectrumsynth;
/* multimedia sources */
extern AVFilter ff_avsrc_amovie;
extern AVFilter ff_avsrc_movie;
/* those filters are part of public or internal API,
* they are formatted to not be found by the grep
* as they are manually added again (due to their 'names'
* being the same while having different 'types'). */
extern AVFilter ff_asrc_abuffer;
extern AVFilter ff_vsrc_buffer;
extern AVFilter ff_asink_abuffer;
extern AVFilter ff_vsink_buffer;
extern AVFilter ff_af_afifo;
extern AVFilter ff_vf_fifo;
#include "libavfilter/filter_list.c"
const AVFilter *av_filter_iterate(void **opaque)
{
uintptr_t i = (uintptr_t)*opaque;
const AVFilter *f = filter_list[i];
if (f)
*opaque = (void*)(i + 1);
return f;
}
const AVFilter *avfilter_get_by_name(const char *name)
{
const AVFilter *f = NULL;
void *opaque = 0;
if (!name)
return NULL;
while ((f = av_filter_iterate(&opaque)))
if (!strcmp(f->name, name))
return (AVFilter *)f;
return NULL;
}
#if FF_API_NEXT
FF_DISABLE_DEPRECATION_WARNINGS
static AVOnce av_filter_next_init = AV_ONCE_INIT;
static void av_filter_init_next(void)
{
AVFilter *prev = NULL, *p;
void *i = 0;
while ((p = (AVFilter*)av_filter_iterate(&i))) {
if (prev)
prev->next = p;
prev = p;
}
}
void avfilter_register_all(void)
{
ff_thread_once(&av_filter_next_init, av_filter_init_next);
}
int avfilter_register(AVFilter *filter)
{
ff_thread_once(&av_filter_next_init, av_filter_init_next);
return 0;
}
const AVFilter *avfilter_next(const AVFilter *prev)
{
ff_thread_once(&av_filter_next_init, av_filter_init_next);
return prev ? prev->next : filter_list[0];
}
FF_ENABLE_DEPRECATION_WARNINGS
#endif

View File

@@ -0,0 +1,46 @@
/*
* Copyright (c) 2010 S.N. Hemanth Meenakshisundaram <smeenaks@ucsd.edu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/internal.h"
#include "avfilter.h"
#include "internal.h"
static int null_filter_frame(AVFilterLink *link, AVFrame *frame)
{
av_frame_free(&frame);
return 0;
}
static const AVFilterPad avfilter_asink_anullsink_inputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.filter_frame = null_filter_frame,
},
{ NULL },
};
AVFilter ff_asink_anullsink = {
.name = "anullsink",
.description = NULL_IF_CONFIG_SMALL("Do absolutely nothing with the input audio."),
.priv_size = 0,
.inputs = avfilter_asink_anullsink_inputs,
.outputs = NULL,
};

330
externals/ffmpeg/libavfilter/asrc_afirsrc.c vendored Executable file
View File

@@ -0,0 +1,330 @@
/*
* Copyright (c) 2020 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "libavutil/tx.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
#include "window_func.h"
typedef struct AudioFIRSourceContext {
const AVClass *class;
char *freq_points_str;
char *magnitude_str;
char *phase_str;
int nb_taps;
int sample_rate;
int nb_samples;
int win_func;
AVComplexFloat *complexf;
float *freq;
float *magnitude;
float *phase;
int freq_size;
int magnitude_size;
int phase_size;
int nb_freq;
int nb_magnitude;
int nb_phase;
float *taps;
float *win;
int64_t pts;
AVTXContext *tx_ctx;
av_tx_fn tx_fn;
} AudioFIRSourceContext;
#define OFFSET(x) offsetof(AudioFIRSourceContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption afirsrc_options[] = {
{ "taps", "set number of taps", OFFSET(nb_taps), AV_OPT_TYPE_INT, {.i64=1025}, 9, UINT16_MAX, FLAGS },
{ "t", "set number of taps", OFFSET(nb_taps), AV_OPT_TYPE_INT, {.i64=1025}, 9, UINT16_MAX, FLAGS },
{ "frequency", "set frequency points", OFFSET(freq_points_str), AV_OPT_TYPE_STRING, {.str="0 1"}, 0, 0, FLAGS },
{ "f", "set frequency points", OFFSET(freq_points_str), AV_OPT_TYPE_STRING, {.str="0 1"}, 0, 0, FLAGS },
{ "magnitude", "set magnitude values", OFFSET(magnitude_str), AV_OPT_TYPE_STRING, {.str="1 1"}, 0, 0, FLAGS },
{ "m", "set magnitude values", OFFSET(magnitude_str), AV_OPT_TYPE_STRING, {.str="1 1"}, 0, 0, FLAGS },
{ "phase", "set phase values", OFFSET(phase_str), AV_OPT_TYPE_STRING, {.str="0 0"}, 0, 0, FLAGS },
{ "p", "set phase values", OFFSET(phase_str), AV_OPT_TYPE_STRING, {.str="0 0"}, 0, 0, FLAGS },
{ "sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, INT_MAX, FLAGS },
{ "r", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, INT_MAX, FLAGS },
{ "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, FLAGS },
{ "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, FLAGS },
{ "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64=WFUNC_BLACKMAN}, 0, NB_WFUNC-1, FLAGS, "win_func" },
{ "w", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64=WFUNC_BLACKMAN}, 0, NB_WFUNC-1, FLAGS, "win_func" },
{ "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
{ "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
{ "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
{ "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
{ "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
{ "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
{ "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
{ "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
{ "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
{ "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
{ "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
{ "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
{ "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
{ "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
{ "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
{ "dolph", "Dolph-Chebyshev", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_DOLPH}, 0, 0, FLAGS, "win_func" },
{ "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" },
{ "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" },
{ "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" },
{ "bohman" , "Bohman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BOHMAN}, 0, 0, FLAGS, "win_func" },
{NULL}
};
AVFILTER_DEFINE_CLASS(afirsrc);
static av_cold int init(AVFilterContext *ctx)
{
AudioFIRSourceContext *s = ctx->priv;
if (!(s->nb_taps & 1)) {
av_log(s, AV_LOG_WARNING, "Number of taps %d must be odd length.\n", s->nb_taps);
s->nb_taps |= 1;
}
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
AudioFIRSourceContext *s = ctx->priv;
av_freep(&s->win);
av_freep(&s->taps);
av_freep(&s->freq);
av_freep(&s->magnitude);
av_freep(&s->phase);
av_freep(&s->complexf);
av_tx_uninit(&s->tx_ctx);
}
static av_cold int query_formats(AVFilterContext *ctx)
{
AudioFIRSourceContext *s = ctx->priv;
static const int64_t chlayouts[] = { AV_CH_LAYOUT_MONO, -1 };
int sample_rates[] = { s->sample_rate, -1 };
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE
};
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
int ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats (ctx, formats);
if (ret < 0)
return ret;
layouts = avfilter_make_format64_list(chlayouts);
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_rates);
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static int parse_string(char *str, float **items, int *nb_items, int *items_size)
{
float *new_items;
char *tail;
new_items = av_fast_realloc(NULL, items_size, 1 * sizeof(float));
if (!new_items)
return AVERROR(ENOMEM);
*items = new_items;
tail = str;
if (!tail)
return AVERROR(EINVAL);
do {
(*items)[(*nb_items)++] = av_strtod(tail, &tail);
new_items = av_fast_realloc(*items, items_size, (*nb_items + 1) * sizeof(float));
if (!new_items)
return AVERROR(ENOMEM);
*items = new_items;
if (tail && *tail)
tail++;
} while (tail && *tail);
return 0;
}
static void lininterp(AVComplexFloat *complexf,
const float *freq,
const float *magnitude,
const float *phase,
int m, int minterp)
{
for (int i = 0; i < minterp; i++) {
for (int j = 1; j < m; j++) {
const float x = i / (float)minterp;
if (x <= freq[j]) {
const float mg = (x - freq[j-1]) / (freq[j] - freq[j-1]) * (magnitude[j] - magnitude[j-1]) + magnitude[j-1];
const float ph = (x - freq[j-1]) / (freq[j] - freq[j-1]) * (phase[j] - phase[j-1]) + phase[j-1];
complexf[i].re = mg * cosf(ph);
complexf[i].im = mg * sinf(ph);
break;
}
}
}
}
static av_cold int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioFIRSourceContext *s = ctx->priv;
float overlap, scale = 1.f, compensation;
int fft_size, middle, ret;
s->nb_freq = s->nb_magnitude = s->nb_phase = 0;
ret = parse_string(s->freq_points_str, &s->freq, &s->nb_freq, &s->freq_size);
if (ret < 0)
return ret;
ret = parse_string(s->magnitude_str, &s->magnitude, &s->nb_magnitude, &s->magnitude_size);
if (ret < 0)
return ret;
ret = parse_string(s->phase_str, &s->phase, &s->nb_phase, &s->phase_size);
if (ret < 0)
return ret;
if (s->nb_freq != s->nb_magnitude && s->nb_freq != s->nb_phase && s->nb_freq >= 2) {
av_log(ctx, AV_LOG_ERROR, "Number of frequencies, magnitudes and phases must be same and >= 2.\n");
return AVERROR(EINVAL);
}
for (int i = 0; i < s->nb_freq; i++) {
if (i == 0 && s->freq[i] != 0.f) {
av_log(ctx, AV_LOG_ERROR, "First frequency must be 0.\n");
return AVERROR(EINVAL);
}
if (i == s->nb_freq - 1 && s->freq[i] != 1.f) {
av_log(ctx, AV_LOG_ERROR, "Last frequency must be 1.\n");
return AVERROR(EINVAL);
}
if (i && s->freq[i] < s->freq[i-1]) {
av_log(ctx, AV_LOG_ERROR, "Frequencies must be in increasing order.\n");
return AVERROR(EINVAL);
}
}
fft_size = 1 << (av_log2(s->nb_taps) + 1);
s->complexf = av_calloc(fft_size * 2, sizeof(*s->complexf));
if (!s->complexf)
return AVERROR(ENOMEM);
ret = av_tx_init(&s->tx_ctx, &s->tx_fn, AV_TX_FLOAT_FFT, 1, fft_size, &scale, 0);
if (ret < 0)
return ret;
s->taps = av_calloc(s->nb_taps, sizeof(*s->taps));
if (!s->taps)
return AVERROR(ENOMEM);
s->win = av_calloc(s->nb_taps, sizeof(*s->win));
if (!s->win)
return AVERROR(ENOMEM);
generate_window_func(s->win, s->nb_taps, s->win_func, &overlap);
lininterp(s->complexf, s->freq, s->magnitude, s->phase, s->nb_freq, fft_size / 2);
s->tx_fn(s->tx_ctx, s->complexf + fft_size, s->complexf, sizeof(float));
compensation = 2.f / fft_size;
middle = s->nb_taps / 2;
for (int i = 0; i <= middle; i++) {
s->taps[ i] = s->complexf[fft_size + middle - i].re * compensation * s->win[i];
s->taps[middle + i] = s->complexf[fft_size + i].re * compensation * s->win[middle + i];
}
s->pts = 0;
return 0;
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
AudioFIRSourceContext *s = ctx->priv;
AVFrame *frame;
int nb_samples;
nb_samples = FFMIN(s->nb_samples, s->nb_taps - s->pts);
if (!nb_samples)
return AVERROR_EOF;
if (!(frame = ff_get_audio_buffer(outlink, nb_samples)))
return AVERROR(ENOMEM);
memcpy(frame->data[0], s->taps + s->pts, nb_samples * sizeof(float));
frame->pts = s->pts;
s->pts += nb_samples;
return ff_filter_frame(outlink, frame);
}
static const AVFilterPad afirsrc_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.request_frame = request_frame,
.config_props = config_output,
},
{ NULL }
};
AVFilter ff_asrc_afirsrc = {
.name = "afirsrc",
.description = NULL_IF_CONFIG_SMALL("Generate a FIR coefficients audio stream."),
.query_formats = query_formats,
.init = init,
.uninit = uninit,
.priv_size = sizeof(AudioFIRSourceContext),
.inputs = NULL,
.outputs = afirsrc_outputs,
.priv_class = &afirsrc_class,
};

258
externals/ffmpeg/libavfilter/asrc_anoisesrc.c vendored Executable file
View File

@@ -0,0 +1,258 @@
/*
* Copyright (c) 2015 Kyle Swanson <k@ylo.ph>.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "filters.h"
#include "internal.h"
#include "libavutil/lfg.h"
#include "libavutil/random_seed.h"
typedef struct ANoiseSrcContext {
const AVClass *class;
int sample_rate;
double amplitude;
int64_t duration;
int color;
int64_t seed;
int nb_samples;
int64_t pts;
int infinite;
double (*filter)(double white, double *buf, double half_amplitude);
double buf[7];
AVLFG c;
} ANoiseSrcContext;
enum NoiseMode {
NM_WHITE,
NM_PINK,
NM_BROWN,
NM_BLUE,
NM_VIOLET,
NM_VELVET,
NM_NB
};
#define OFFSET(x) offsetof(ANoiseSrcContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption anoisesrc_options[] = {
{ "sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 15, INT_MAX, FLAGS },
{ "r", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 15, INT_MAX, FLAGS },
{ "amplitude", "set amplitude", OFFSET(amplitude), AV_OPT_TYPE_DOUBLE, {.dbl = 1.}, 0., 1., FLAGS },
{ "a", "set amplitude", OFFSET(amplitude), AV_OPT_TYPE_DOUBLE, {.dbl = 1.}, 0., 1., FLAGS },
{ "duration", "set duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0}, 0, INT64_MAX, FLAGS },
{ "d", "set duration", OFFSET(duration), AV_OPT_TYPE_DURATION, {.i64 = 0}, 0, INT64_MAX, FLAGS },
{ "color", "set noise color", OFFSET(color), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NM_NB - 1, FLAGS, "color" },
{ "colour", "set noise color", OFFSET(color), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NM_NB - 1, FLAGS, "color" },
{ "c", "set noise color", OFFSET(color), AV_OPT_TYPE_INT, {.i64 = 0}, 0, NM_NB - 1, FLAGS, "color" },
{ "white", 0, 0, AV_OPT_TYPE_CONST, {.i64 = NM_WHITE}, 0, 0, FLAGS, "color" },
{ "pink", 0, 0, AV_OPT_TYPE_CONST, {.i64 = NM_PINK}, 0, 0, FLAGS, "color" },
{ "brown", 0, 0, AV_OPT_TYPE_CONST, {.i64 = NM_BROWN}, 0, 0, FLAGS, "color" },
{ "blue", 0, 0, AV_OPT_TYPE_CONST, {.i64 = NM_BLUE}, 0, 0, FLAGS, "color" },
{ "violet", 0, 0, AV_OPT_TYPE_CONST, {.i64 = NM_VIOLET}, 0, 0, FLAGS, "color" },
{ "velvet", 0, 0, AV_OPT_TYPE_CONST, {.i64 = NM_VELVET}, 0, 0, FLAGS, "color" },
{ "seed", "set random seed", OFFSET(seed), AV_OPT_TYPE_INT64, {.i64 = -1}, -1, UINT_MAX, FLAGS },
{ "s", "set random seed", OFFSET(seed), AV_OPT_TYPE_INT64, {.i64 = -1}, -1, UINT_MAX, FLAGS },
{ "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, FLAGS },
{ "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, FLAGS },
{NULL}
};
AVFILTER_DEFINE_CLASS(anoisesrc);
static av_cold int query_formats(AVFilterContext *ctx)
{
ANoiseSrcContext *s = ctx->priv;
static const int64_t chlayouts[] = { AV_CH_LAYOUT_MONO, -1 };
int sample_rates[] = { s->sample_rate, -1 };
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_NONE
};
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
int ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats (ctx, formats);
if (ret < 0)
return ret;
layouts = avfilter_make_format64_list(chlayouts);
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_rates);
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static double white_filter(double white, double *buf, double ha)
{
return white;
}
static double pink_filter(double white, double *buf, double ha)
{
double pink;
/* http://www.musicdsp.org/files/pink.txt */
buf[0] = 0.99886 * buf[0] + white * 0.0555179;
buf[1] = 0.99332 * buf[1] + white * 0.0750759;
buf[2] = 0.96900 * buf[2] + white * 0.1538520;
buf[3] = 0.86650 * buf[3] + white * 0.3104856;
buf[4] = 0.55000 * buf[4] + white * 0.5329522;
buf[5] = -0.7616 * buf[5] - white * 0.0168980;
pink = buf[0] + buf[1] + buf[2] + buf[3] + buf[4] + buf[5] + buf[6] + white * 0.5362;
buf[6] = white * 0.115926;
return pink * 0.11;
}
static double blue_filter(double white, double *buf, double ha)
{
double blue;
/* Same as pink_filter but subtract the offsets rather than add */
buf[0] = 0.0555179 * white - 0.99886 * buf[0];
buf[1] = 0.0750759 * white - 0.99332 * buf[1];
buf[2] = 0.1538520 * white - 0.96900 * buf[2];
buf[3] = 0.3104856 * white - 0.86650 * buf[3];
buf[4] = 0.5329522 * white - 0.55000 * buf[4];
buf[5] = -0.016898 * white + 0.76160 * buf[5];
blue = buf[0] + buf[1] + buf[2] + buf[3] + buf[4] + buf[5] + buf[6] + white * 0.5362;
buf[6] = white * 0.115926;
return blue * 0.11;
}
static double brown_filter(double white, double *buf, double ha)
{
double brown;
brown = ((0.02 * white) + buf[0]) / 1.02;
buf[0] = brown;
return brown * 3.5;
}
static double violet_filter(double white, double *buf, double ha)
{
double violet;
violet = ((0.02 * white) - buf[0]) / 1.02;
buf[0] = violet;
return violet * 3.5;
}
static double velvet_filter(double white, double *buf, double ha)
{
return 2. * ha * ((white > ha) - (white < -ha));
}
static av_cold int config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
ANoiseSrcContext *s = ctx->priv;
if (s->seed == -1)
s->seed = av_get_random_seed();
av_lfg_init(&s->c, s->seed);
if (s->duration == 0)
s->infinite = 1;
s->duration = av_rescale(s->duration, s->sample_rate, AV_TIME_BASE);
switch (s->color) {
case NM_WHITE: s->filter = white_filter; break;
case NM_PINK: s->filter = pink_filter; break;
case NM_BROWN: s->filter = brown_filter; break;
case NM_BLUE: s->filter = blue_filter; break;
case NM_VIOLET: s->filter = violet_filter; break;
case NM_VELVET: s->filter = velvet_filter; break;
}
return 0;
}
static int activate(AVFilterContext *ctx)
{
AVFilterLink *outlink = ctx->outputs[0];
ANoiseSrcContext *s = ctx->priv;
AVFrame *frame;
int nb_samples, i;
double *dst;
if (!ff_outlink_frame_wanted(outlink))
return FFERROR_NOT_READY;
if (!s->infinite && s->duration <= 0) {
ff_outlink_set_status(outlink, AVERROR_EOF, s->pts);
return 0;
} else if (!s->infinite && s->duration < s->nb_samples) {
nb_samples = s->duration;
} else {
nb_samples = s->nb_samples;
}
if (!(frame = ff_get_audio_buffer(outlink, nb_samples)))
return AVERROR(ENOMEM);
dst = (double *)frame->data[0];
for (i = 0; i < nb_samples; i++) {
double white;
white = s->amplitude * ((2 * ((double) av_lfg_get(&s->c) / 0xffffffff)) - 1);
dst[i] = s->filter(white, s->buf, s->amplitude * 0.5);
}
if (!s->infinite)
s->duration -= nb_samples;
frame->pts = s->pts;
s->pts += nb_samples;
return ff_filter_frame(outlink, frame);
}
static const AVFilterPad anoisesrc_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_props,
},
{ NULL }
};
AVFilter ff_asrc_anoisesrc = {
.name = "anoisesrc",
.description = NULL_IF_CONFIG_SMALL("Generate a noise audio signal."),
.query_formats = query_formats,
.priv_size = sizeof(ANoiseSrcContext),
.inputs = NULL,
.activate = activate,
.outputs = anoisesrc_outputs,
.priv_class = &anoisesrc_class,
};

145
externals/ffmpeg/libavfilter/asrc_anullsrc.c vendored Executable file
View File

@@ -0,0 +1,145 @@
/*
* Copyright 2010 S.N. Hemanth Meenakshisundaram <smeenaks ucsd edu>
* Copyright 2010 Stefano Sabatini <stefano.sabatini-lala poste it>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* null audio source
*/
#include <inttypes.h>
#include <stdio.h>
#include "libavutil/channel_layout.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
typedef struct ANullContext {
const AVClass *class;
char *channel_layout_str;
uint64_t channel_layout;
char *sample_rate_str;
int sample_rate;
int nb_samples; ///< number of samples per requested frame
int64_t pts;
} ANullContext;
#define OFFSET(x) offsetof(ANullContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption anullsrc_options[]= {
{ "channel_layout", "set channel_layout", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, FLAGS },
{ "cl", "set channel_layout", OFFSET(channel_layout_str), AV_OPT_TYPE_STRING, {.str = "stereo"}, 0, 0, FLAGS },
{ "sample_rate", "set sample rate", OFFSET(sample_rate_str) , AV_OPT_TYPE_STRING, {.str = "44100"}, 0, 0, FLAGS },
{ "r", "set sample rate", OFFSET(sample_rate_str) , AV_OPT_TYPE_STRING, {.str = "44100"}, 0, 0, FLAGS },
{ "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
{ "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 0, INT_MAX, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(anullsrc);
static av_cold int init(AVFilterContext *ctx)
{
ANullContext *null = ctx->priv;
int ret;
if ((ret = ff_parse_sample_rate(&null->sample_rate,
null->sample_rate_str, ctx)) < 0)
return ret;
if ((ret = ff_parse_channel_layout(&null->channel_layout, NULL,
null->channel_layout_str, ctx)) < 0)
return ret;
return 0;
}
static int query_formats(AVFilterContext *ctx)
{
ANullContext *null = ctx->priv;
int64_t chlayouts[] = { null->channel_layout, -1 };
int sample_rates[] = { null->sample_rate, -1 };
int ret;
if ((ret = ff_set_common_formats (ctx, ff_all_formats (AVMEDIA_TYPE_AUDIO))) < 0 ||
(ret = ff_set_common_channel_layouts (ctx, avfilter_make_format64_list (chlayouts ))) < 0 ||
(ret = ff_set_common_samplerates (ctx, ff_make_format_list (sample_rates ))) < 0)
return ret;
return 0;
}
static int config_props(AVFilterLink *outlink)
{
ANullContext *null = outlink->src->priv;
char buf[128];
av_get_channel_layout_string(buf, sizeof(buf), 0, null->channel_layout);
av_log(outlink->src, AV_LOG_VERBOSE,
"sample_rate:%d channel_layout:'%s' nb_samples:%d\n",
null->sample_rate, buf, null->nb_samples);
return 0;
}
static int request_frame(AVFilterLink *outlink)
{
int ret;
ANullContext *null = outlink->src->priv;
AVFrame *samplesref;
samplesref = ff_get_audio_buffer(outlink, null->nb_samples);
if (!samplesref)
return AVERROR(ENOMEM);
samplesref->pts = null->pts;
ret = ff_filter_frame(outlink, samplesref);
if (ret < 0)
return ret;
null->pts += null->nb_samples;
return ret;
}
static const AVFilterPad avfilter_asrc_anullsrc_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_props,
.request_frame = request_frame,
},
{ NULL }
};
AVFilter ff_asrc_anullsrc = {
.name = "anullsrc",
.description = NULL_IF_CONFIG_SMALL("Null audio source, return empty audio frames."),
.init = init,
.query_formats = query_formats,
.priv_size = sizeof(ANullContext),
.inputs = NULL,
.outputs = avfilter_asrc_anullsrc_outputs,
.priv_class = &anullsrc_class,
};

287
externals/ffmpeg/libavfilter/asrc_flite.c vendored Executable file
View File

@@ -0,0 +1,287 @@
/*
* Copyright (c) 2012 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* flite voice synth source
*/
#include <flite/flite.h>
#include "libavutil/channel_layout.h"
#include "libavutil/file.h"
#include "libavutil/opt.h"
#include "avfilter.h"
#include "audio.h"
#include "formats.h"
#include "internal.h"
typedef struct FliteContext {
const AVClass *class;
char *voice_str;
char *textfile;
char *text;
cst_wave *wave;
int16_t *wave_samples;
int wave_nb_samples;
int list_voices;
cst_voice *voice;
struct voice_entry *voice_entry;
int64_t pts;
int frame_nb_samples; ///< number of samples per frame
} FliteContext;
#define OFFSET(x) offsetof(FliteContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption flite_options[] = {
{ "list_voices", "list voices and exit", OFFSET(list_voices), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, FLAGS },
{ "nb_samples", "set number of samples per frame", OFFSET(frame_nb_samples), AV_OPT_TYPE_INT, {.i64=512}, 0, INT_MAX, FLAGS },
{ "n", "set number of samples per frame", OFFSET(frame_nb_samples), AV_OPT_TYPE_INT, {.i64=512}, 0, INT_MAX, FLAGS },
{ "text", "set text to speak", OFFSET(text), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
{ "textfile", "set filename of the text to speak", OFFSET(textfile), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 0, FLAGS },
{ "v", "set voice", OFFSET(voice_str), AV_OPT_TYPE_STRING, {.str="kal"}, 0, 0, FLAGS },
{ "voice", "set voice", OFFSET(voice_str), AV_OPT_TYPE_STRING, {.str="kal"}, 0, 0, FLAGS },
{ NULL }
};
AVFILTER_DEFINE_CLASS(flite);
static volatile int flite_inited = 0;
/* declare functions for all the supported voices */
#define DECLARE_REGISTER_VOICE_FN(name) \
cst_voice *register_cmu_us_## name(const char *); \
void unregister_cmu_us_## name(cst_voice *);
DECLARE_REGISTER_VOICE_FN(awb);
DECLARE_REGISTER_VOICE_FN(kal);
DECLARE_REGISTER_VOICE_FN(kal16);
DECLARE_REGISTER_VOICE_FN(rms);
DECLARE_REGISTER_VOICE_FN(slt);
struct voice_entry {
const char *name;
cst_voice * (*register_fn)(const char *);
void (*unregister_fn)(cst_voice *);
cst_voice *voice;
unsigned usage_count;
} voice_entry;
#define MAKE_VOICE_STRUCTURE(voice_name) { \
.name = #voice_name, \
.register_fn = register_cmu_us_ ## voice_name, \
.unregister_fn = unregister_cmu_us_ ## voice_name, \
}
static struct voice_entry voice_entries[] = {
MAKE_VOICE_STRUCTURE(awb),
MAKE_VOICE_STRUCTURE(kal),
MAKE_VOICE_STRUCTURE(kal16),
MAKE_VOICE_STRUCTURE(rms),
MAKE_VOICE_STRUCTURE(slt),
};
static void list_voices(void *log_ctx, const char *sep)
{
int i, n = FF_ARRAY_ELEMS(voice_entries);
for (i = 0; i < n; i++)
av_log(log_ctx, AV_LOG_INFO, "%s%s",
voice_entries[i].name, i < (n-1) ? sep : "\n");
}
static int select_voice(struct voice_entry **entry_ret, const char *voice_name, void *log_ctx)
{
int i;
for (i = 0; i < FF_ARRAY_ELEMS(voice_entries); i++) {
struct voice_entry *entry = &voice_entries[i];
if (!strcmp(entry->name, voice_name)) {
if (!entry->voice)
entry->voice = entry->register_fn(NULL);
if (!entry->voice) {
av_log(log_ctx, AV_LOG_ERROR,
"Could not register voice '%s'\n", voice_name);
return AVERROR_UNKNOWN;
}
entry->usage_count++;
*entry_ret = entry;
return 0;
}
}
av_log(log_ctx, AV_LOG_ERROR, "Could not find voice '%s'\n", voice_name);
av_log(log_ctx, AV_LOG_INFO, "Choose between the voices: ");
list_voices(log_ctx, ", ");
return AVERROR(EINVAL);
}
static av_cold int init(AVFilterContext *ctx)
{
FliteContext *flite = ctx->priv;
int ret = 0;
if (flite->list_voices) {
list_voices(ctx, "\n");
return AVERROR_EXIT;
}
if (!flite_inited) {
if (flite_init() < 0) {
av_log(ctx, AV_LOG_ERROR, "flite initialization failed\n");
return AVERROR_UNKNOWN;
}
flite_inited++;
}
if ((ret = select_voice(&flite->voice_entry, flite->voice_str, ctx)) < 0)
return ret;
flite->voice = flite->voice_entry->voice;
if (flite->textfile && flite->text) {
av_log(ctx, AV_LOG_ERROR,
"Both text and textfile options set: only one must be specified\n");
return AVERROR(EINVAL);
}
if (flite->textfile) {
uint8_t *textbuf;
size_t textbuf_size;
if ((ret = av_file_map(flite->textfile, &textbuf, &textbuf_size, 0, ctx)) < 0) {
av_log(ctx, AV_LOG_ERROR,
"The text file '%s' could not be read: %s\n",
flite->textfile, av_err2str(ret));
return ret;
}
if (!(flite->text = av_malloc(textbuf_size+1))) {
av_file_unmap(textbuf, textbuf_size);
return AVERROR(ENOMEM);
}
memcpy(flite->text, textbuf, textbuf_size);
flite->text[textbuf_size] = 0;
av_file_unmap(textbuf, textbuf_size);
}
if (!flite->text) {
av_log(ctx, AV_LOG_ERROR,
"No speech text specified, specify the 'text' or 'textfile' option\n");
return AVERROR(EINVAL);
}
/* synth all the file data in block */
flite->wave = flite_text_to_wave(flite->text, flite->voice);
flite->wave_samples = flite->wave->samples;
flite->wave_nb_samples = flite->wave->num_samples;
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
FliteContext *flite = ctx->priv;
if (!--flite->voice_entry->usage_count)
flite->voice_entry->unregister_fn(flite->voice);
flite->voice = NULL;
flite->voice_entry = NULL;
delete_wave(flite->wave);
flite->wave = NULL;
}
static int query_formats(AVFilterContext *ctx)
{
FliteContext *flite = ctx->priv;
int ret;
AVFilterChannelLayouts *chlayouts = NULL;
int64_t chlayout = av_get_default_channel_layout(flite->wave->num_channels);
AVFilterFormats *sample_formats = NULL;
AVFilterFormats *sample_rates = NULL;
if ((ret = ff_add_channel_layout (&chlayouts , chlayout )) < 0 ||
(ret = ff_set_common_channel_layouts (ctx , chlayouts )) < 0 ||
(ret = ff_add_format (&sample_formats, AV_SAMPLE_FMT_S16 )) < 0 ||
(ret = ff_set_common_formats (ctx , sample_formats )) < 0 ||
(ret = ff_add_format (&sample_rates , flite->wave->sample_rate)) < 0 ||
(ret = ff_set_common_samplerates (ctx , sample_rates )) < 0)
return ret;
return 0;
}
static int config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
FliteContext *flite = ctx->priv;
outlink->sample_rate = flite->wave->sample_rate;
outlink->time_base = (AVRational){1, flite->wave->sample_rate};
av_log(ctx, AV_LOG_VERBOSE, "voice:%s fmt:%s sample_rate:%d\n",
flite->voice_str,
av_get_sample_fmt_name(outlink->format), outlink->sample_rate);
return 0;
}
static int request_frame(AVFilterLink *outlink)
{
AVFrame *samplesref;
FliteContext *flite = outlink->src->priv;
int nb_samples = FFMIN(flite->wave_nb_samples, flite->frame_nb_samples);
if (!nb_samples)
return AVERROR_EOF;
samplesref = ff_get_audio_buffer(outlink, nb_samples);
if (!samplesref)
return AVERROR(ENOMEM);
memcpy(samplesref->data[0], flite->wave_samples,
nb_samples * flite->wave->num_channels * 2);
samplesref->pts = flite->pts;
samplesref->pkt_pos = -1;
samplesref->sample_rate = flite->wave->sample_rate;
flite->pts += nb_samples;
flite->wave_samples += nb_samples * flite->wave->num_channels;
flite->wave_nb_samples -= nb_samples;
return ff_filter_frame(outlink, samplesref);
}
static const AVFilterPad flite_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_props,
.request_frame = request_frame,
},
{ NULL }
};
AVFilter ff_asrc_flite = {
.name = "flite",
.description = NULL_IF_CONFIG_SMALL("Synthesize voice from text using libflite."),
.query_formats = query_formats,
.init = init,
.uninit = uninit,
.priv_size = sizeof(FliteContext),
.inputs = NULL,
.outputs = flite_outputs,
.priv_class = &flite_class,
};

200
externals/ffmpeg/libavfilter/asrc_hilbert.c vendored Executable file
View File

@@ -0,0 +1,200 @@
/*
* Copyright (c) 2018 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
#include "window_func.h"
typedef struct HilbertContext {
const AVClass *class;
int sample_rate;
int nb_taps;
int nb_samples;
int win_func;
float *taps;
int64_t pts;
} HilbertContext;
#define OFFSET(x) offsetof(HilbertContext, x)
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
static const AVOption hilbert_options[] = {
{ "sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, INT_MAX, FLAGS },
{ "r", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, INT_MAX, FLAGS },
{ "taps", "set number of taps", OFFSET(nb_taps), AV_OPT_TYPE_INT, {.i64=22051}, 11, UINT16_MAX, FLAGS },
{ "t", "set number of taps", OFFSET(nb_taps), AV_OPT_TYPE_INT, {.i64=22051}, 11, UINT16_MAX, FLAGS },
{ "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, FLAGS },
{ "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, FLAGS },
{ "win_func", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64=WFUNC_BLACKMAN}, 0, NB_WFUNC-1, FLAGS, "win_func" },
{ "w", "set window function", OFFSET(win_func), AV_OPT_TYPE_INT, {.i64=WFUNC_BLACKMAN}, 0, NB_WFUNC-1, FLAGS, "win_func" },
{ "rect", "Rectangular", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_RECT}, 0, 0, FLAGS, "win_func" },
{ "bartlett", "Bartlett", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BARTLETT}, 0, 0, FLAGS, "win_func" },
{ "hanning", "Hanning", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HANNING}, 0, 0, FLAGS, "win_func" },
{ "hamming", "Hamming", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_HAMMING}, 0, 0, FLAGS, "win_func" },
{ "blackman", "Blackman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BLACKMAN}, 0, 0, FLAGS, "win_func" },
{ "welch", "Welch", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_WELCH}, 0, 0, FLAGS, "win_func" },
{ "flattop", "Flat-top", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_FLATTOP}, 0, 0, FLAGS, "win_func" },
{ "bharris", "Blackman-Harris", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHARRIS}, 0, 0, FLAGS, "win_func" },
{ "bnuttall", "Blackman-Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BNUTTALL}, 0, 0, FLAGS, "win_func" },
{ "bhann", "Bartlett-Hann", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BHANN}, 0, 0, FLAGS, "win_func" },
{ "sine", "Sine", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_SINE}, 0, 0, FLAGS, "win_func" },
{ "nuttall", "Nuttall", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_NUTTALL}, 0, 0, FLAGS, "win_func" },
{ "lanczos", "Lanczos", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_LANCZOS}, 0, 0, FLAGS, "win_func" },
{ "gauss", "Gauss", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_GAUSS}, 0, 0, FLAGS, "win_func" },
{ "tukey", "Tukey", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_TUKEY}, 0, 0, FLAGS, "win_func" },
{ "dolph", "Dolph-Chebyshev", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_DOLPH}, 0, 0, FLAGS, "win_func" },
{ "cauchy", "Cauchy", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_CAUCHY}, 0, 0, FLAGS, "win_func" },
{ "parzen", "Parzen", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_PARZEN}, 0, 0, FLAGS, "win_func" },
{ "poisson", "Poisson", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_POISSON}, 0, 0, FLAGS, "win_func" },
{ "bohman" , "Bohman", 0, AV_OPT_TYPE_CONST, {.i64=WFUNC_BOHMAN}, 0, 0, FLAGS, "win_func" },
{NULL}
};
AVFILTER_DEFINE_CLASS(hilbert);
static av_cold int init(AVFilterContext *ctx)
{
HilbertContext *s = ctx->priv;
if (!(s->nb_taps & 1)) {
av_log(s, AV_LOG_ERROR, "Number of taps %d must be odd length.\n", s->nb_taps);
return AVERROR(EINVAL);
}
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
HilbertContext *s = ctx->priv;
av_freep(&s->taps);
}
static av_cold int query_formats(AVFilterContext *ctx)
{
HilbertContext *s = ctx->priv;
static const int64_t chlayouts[] = { AV_CH_LAYOUT_MONO, -1 };
int sample_rates[] = { s->sample_rate, -1 };
static const enum AVSampleFormat sample_fmts[] = {
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE
};
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
int ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats (ctx, formats);
if (ret < 0)
return ret;
layouts = avfilter_make_format64_list(chlayouts);
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_rates);
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static av_cold int config_props(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
HilbertContext *s = ctx->priv;
float overlap;
int i;
s->taps = av_malloc_array(s->nb_taps, sizeof(*s->taps));
if (!s->taps)
return AVERROR(ENOMEM);
generate_window_func(s->taps, s->nb_taps, s->win_func, &overlap);
for (i = 0; i < s->nb_taps; i++) {
int k = -(s->nb_taps / 2) + i;
if (k & 1) {
float pk = M_PI * k;
s->taps[i] *= (1.f - cosf(pk)) / pk;
} else {
s->taps[i] = 0.f;
}
}
s->pts = 0;
return 0;
}
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
HilbertContext *s = ctx->priv;
AVFrame *frame;
int nb_samples;
nb_samples = FFMIN(s->nb_samples, s->nb_taps - s->pts);
if (!nb_samples)
return AVERROR_EOF;
if (!(frame = ff_get_audio_buffer(outlink, nb_samples)))
return AVERROR(ENOMEM);
memcpy(frame->data[0], s->taps + s->pts, nb_samples * sizeof(float));
frame->pts = s->pts;
s->pts += nb_samples;
return ff_filter_frame(outlink, frame);
}
static const AVFilterPad hilbert_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.request_frame = request_frame,
.config_props = config_props,
},
{ NULL }
};
AVFilter ff_asrc_hilbert = {
.name = "hilbert",
.description = NULL_IF_CONFIG_SMALL("Generate a Hilbert transform FIR coefficients."),
.query_formats = query_formats,
.init = init,
.uninit = uninit,
.priv_size = sizeof(HilbertContext),
.inputs = NULL,
.outputs = hilbert_outputs,
.priv_class = &hilbert_class,
};

456
externals/ffmpeg/libavfilter/asrc_sinc.c vendored Executable file
View File

@@ -0,0 +1,456 @@
/*
* Copyright (c) 2008-2009 Rob Sykes <robs@users.sourceforge.net>
* Copyright (c) 2017 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avassert.h"
#include "libavutil/opt.h"
#include "libavcodec/avfft.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
typedef struct SincContext {
const AVClass *class;
int sample_rate, nb_samples;
float att, beta, phase, Fc0, Fc1, tbw0, tbw1;
int num_taps[2];
int round;
int n, rdft_len;
float *coeffs;
int64_t pts;
RDFTContext *rdft, *irdft;
} SincContext;
static int request_frame(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
SincContext *s = ctx->priv;
const float *coeffs = s->coeffs;
AVFrame *frame = NULL;
int nb_samples;
nb_samples = FFMIN(s->nb_samples, s->n - s->pts);
if (nb_samples <= 0)
return AVERROR_EOF;
if (!(frame = ff_get_audio_buffer(outlink, nb_samples)))
return AVERROR(ENOMEM);
memcpy(frame->data[0], coeffs + s->pts, nb_samples * sizeof(float));
frame->pts = s->pts;
s->pts += nb_samples;
return ff_filter_frame(outlink, frame);
}
static int query_formats(AVFilterContext *ctx)
{
SincContext *s = ctx->priv;
static const int64_t chlayouts[] = { AV_CH_LAYOUT_MONO, -1 };
int sample_rates[] = { s->sample_rate, -1 };
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_NONE };
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
int ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats (ctx, formats);
if (ret < 0)
return ret;
layouts = avfilter_make_format64_list(chlayouts);
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_rates);
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static float bessel_I_0(float x)
{
float term = 1, sum = 1, last_sum, x2 = x / 2;
int i = 1;
do {
float y = x2 / i++;
last_sum = sum;
sum += term *= y * y;
} while (sum != last_sum);
return sum;
}
static float *make_lpf(int num_taps, float Fc, float beta, float rho,
float scale, int dc_norm)
{
int i, m = num_taps - 1;
float *h = av_calloc(num_taps, sizeof(*h)), sum = 0;
float mult = scale / bessel_I_0(beta), mult1 = 1.f / (.5f * m + rho);
av_assert0(Fc >= 0 && Fc <= 1);
for (i = 0; i <= m / 2; i++) {
float z = i - .5f * m, x = z * M_PI, y = z * mult1;
h[i] = x ? sinf(Fc * x) / x : Fc;
sum += h[i] *= bessel_I_0(beta * sqrtf(1.f - y * y)) * mult;
if (m - i != i) {
h[m - i] = h[i];
sum += h[i];
}
}
for (i = 0; dc_norm && i < num_taps; i++)
h[i] *= scale / sum;
return h;
}
static float kaiser_beta(float att, float tr_bw)
{
if (att >= 60.f) {
static const float coefs[][4] = {
{-6.784957e-10, 1.02856e-05, 0.1087556, -0.8988365 + .001},
{-6.897885e-10, 1.027433e-05, 0.10876, -0.8994658 + .002},
{-1.000683e-09, 1.030092e-05, 0.1087677, -0.9007898 + .003},
{-3.654474e-10, 1.040631e-05, 0.1087085, -0.8977766 + .006},
{8.106988e-09, 6.983091e-06, 0.1091387, -0.9172048 + .015},
{9.519571e-09, 7.272678e-06, 0.1090068, -0.9140768 + .025},
{-5.626821e-09, 1.342186e-05, 0.1083999, -0.9065452 + .05},
{-9.965946e-08, 5.073548e-05, 0.1040967, -0.7672778 + .085},
{1.604808e-07, -5.856462e-05, 0.1185998, -1.34824 + .1},
{-1.511964e-07, 6.363034e-05, 0.1064627, -0.9876665 + .18},
};
float realm = logf(tr_bw / .0005f) / logf(2.f);
float const *c0 = coefs[av_clip((int)realm, 0, FF_ARRAY_ELEMS(coefs) - 1)];
float const *c1 = coefs[av_clip(1 + (int)realm, 0, FF_ARRAY_ELEMS(coefs) - 1)];
float b0 = ((c0[0] * att + c0[1]) * att + c0[2]) * att + c0[3];
float b1 = ((c1[0] * att + c1[1]) * att + c1[2]) * att + c1[3];
return b0 + (b1 - b0) * (realm - (int)realm);
}
if (att > 50.f)
return .1102f * (att - 8.7f);
if (att > 20.96f)
return .58417f * powf(att - 20.96f, .4f) + .07886f * (att - 20.96f);
return 0;
}
static void kaiser_params(float att, float Fc, float tr_bw, float *beta, int *num_taps)
{
*beta = *beta < 0.f ? kaiser_beta(att, tr_bw * .5f / Fc): *beta;
att = att < 60.f ? (att - 7.95f) / (2.285f * M_PI * 2.f) :
((.0007528358f-1.577737e-05 * *beta) * *beta + 0.6248022f) * *beta + .06186902f;
*num_taps = !*num_taps ? ceilf(att/tr_bw + 1) : *num_taps;
}
static float *lpf(float Fn, float Fc, float tbw, int *num_taps, float att, float *beta, int round)
{
int n = *num_taps;
if ((Fc /= Fn) <= 0.f || Fc >= 1.f) {
*num_taps = 0;
return NULL;
}
att = att ? att : 120.f;
kaiser_params(att, Fc, (tbw ? tbw / Fn : .05f) * .5f, beta, num_taps);
if (!n) {
n = *num_taps;
*num_taps = av_clip(n, 11, 32767);
if (round)
*num_taps = 1 + 2 * (int)((int)((*num_taps / 2) * Fc + .5f) / Fc + .5f);
}
return make_lpf(*num_taps |= 1, Fc, *beta, 0.f, 1.f, 0);
}
static void invert(float *h, int n)
{
for (int i = 0; i < n; i++)
h[i] = -h[i];
h[(n - 1) / 2] += 1;
}
#define PACK(h, n) h[1] = h[n]
#define UNPACK(h, n) h[n] = h[1], h[n + 1] = h[1] = 0;
#define SQR(a) ((a) * (a))
static float safe_log(float x)
{
av_assert0(x >= 0);
if (x)
return logf(x);
return -26;
}
static int fir_to_phase(SincContext *s, float **h, int *len, int *post_len, float phase)
{
float *pi_wraps, *work, phase1 = (phase > 50.f ? 100.f - phase : phase) / 50.f;
int i, work_len, begin, end, imp_peak = 0, peak = 0;
float imp_sum = 0, peak_imp_sum = 0;
float prev_angle2 = 0, cum_2pi = 0, prev_angle1 = 0, cum_1pi = 0;
for (i = *len, work_len = 2 * 2 * 8; i > 1; work_len <<= 1, i >>= 1);
/* The first part is for work (+2 for (UN)PACK), the latter for pi_wraps. */
work = av_calloc((work_len + 2) + (work_len / 2 + 1), sizeof(float));
if (!work)
return AVERROR(ENOMEM);
pi_wraps = &work[work_len + 2];
memcpy(work, *h, *len * sizeof(*work));
av_rdft_end(s->rdft);
av_rdft_end(s->irdft);
s->rdft = s->irdft = NULL;
s->rdft = av_rdft_init(av_log2(work_len), DFT_R2C);
s->irdft = av_rdft_init(av_log2(work_len), IDFT_C2R);
if (!s->rdft || !s->irdft) {
av_free(work);
return AVERROR(ENOMEM);
}
av_rdft_calc(s->rdft, work); /* Cepstral: */
UNPACK(work, work_len);
for (i = 0; i <= work_len; i += 2) {
float angle = atan2f(work[i + 1], work[i]);
float detect = 2 * M_PI;
float delta = angle - prev_angle2;
float adjust = detect * ((delta < -detect * .7f) - (delta > detect * .7f));
prev_angle2 = angle;
cum_2pi += adjust;
angle += cum_2pi;
detect = M_PI;
delta = angle - prev_angle1;
adjust = detect * ((delta < -detect * .7f) - (delta > detect * .7f));
prev_angle1 = angle;
cum_1pi += fabsf(adjust); /* fabs for when 2pi and 1pi have combined */
pi_wraps[i >> 1] = cum_1pi;
work[i] = safe_log(sqrtf(SQR(work[i]) + SQR(work[i + 1])));
work[i + 1] = 0;
}
PACK(work, work_len);
av_rdft_calc(s->irdft, work);
for (i = 0; i < work_len; i++)
work[i] *= 2.f / work_len;
for (i = 1; i < work_len / 2; i++) { /* Window to reject acausal components */
work[i] *= 2;
work[i + work_len / 2] = 0;
}
av_rdft_calc(s->rdft, work);
for (i = 2; i < work_len; i += 2) /* Interpolate between linear & min phase */
work[i + 1] = phase1 * i / work_len * pi_wraps[work_len >> 1] + (1 - phase1) * (work[i + 1] + pi_wraps[i >> 1]) - pi_wraps[i >> 1];
work[0] = exp(work[0]);
work[1] = exp(work[1]);
for (i = 2; i < work_len; i += 2) {
float x = expf(work[i]);
work[i ] = x * cosf(work[i + 1]);
work[i + 1] = x * sinf(work[i + 1]);
}
av_rdft_calc(s->irdft, work);
for (i = 0; i < work_len; i++)
work[i] *= 2.f / work_len;
/* Find peak pos. */
for (i = 0; i <= (int) (pi_wraps[work_len >> 1] / M_PI + .5f); i++) {
imp_sum += work[i];
if (fabs(imp_sum) > fabs(peak_imp_sum)) {
peak_imp_sum = imp_sum;
peak = i;
}
if (work[i] > work[imp_peak]) /* For debug check only */
imp_peak = i;
}
while (peak && fabsf(work[peak - 1]) > fabsf(work[peak]) && (work[peak - 1] * work[peak] > 0)) {
peak--;
}
if (!phase1) {
begin = 0;
} else if (phase1 == 1) {
begin = peak - *len / 2;
} else {
begin = (.997f - (2 - phase1) * .22f) * *len + .5f;
end = (.997f + (0 - phase1) * .22f) * *len + .5f;
begin = peak - (begin & ~3);
end = peak + 1 + ((end + 3) & ~3);
*len = end - begin;
*h = av_realloc_f(*h, *len, sizeof(**h));
if (!*h) {
av_free(work);
return AVERROR(ENOMEM);
}
}
for (i = 0; i < *len; i++) {
(*h)[i] = work[(begin + (phase > 50.f ? *len - 1 - i : i) + work_len) & (work_len - 1)];
}
*post_len = phase > 50 ? peak - begin : begin + *len - (peak + 1);
av_log(s, AV_LOG_DEBUG, "%d nPI=%g peak-sum@%i=%g (val@%i=%g); len=%i post=%i (%g%%)\n",
work_len, pi_wraps[work_len >> 1] / M_PI, peak, peak_imp_sum, imp_peak,
work[imp_peak], *len, *post_len, 100.f - 100.f * *post_len / (*len - 1));
av_free(work);
return 0;
}
static int config_output(AVFilterLink *outlink)
{
AVFilterContext *ctx = outlink->src;
SincContext *s = ctx->priv;
float Fn = s->sample_rate * .5f;
float *h[2];
int i, n, post_peak, longer;
outlink->sample_rate = s->sample_rate;
s->pts = 0;
if (s->Fc0 >= Fn || s->Fc1 >= Fn) {
av_log(ctx, AV_LOG_ERROR,
"filter frequency must be less than %d/2.\n", s->sample_rate);
return AVERROR(EINVAL);
}
h[0] = lpf(Fn, s->Fc0, s->tbw0, &s->num_taps[0], s->att, &s->beta, s->round);
h[1] = lpf(Fn, s->Fc1, s->tbw1, &s->num_taps[1], s->att, &s->beta, s->round);
if (h[0])
invert(h[0], s->num_taps[0]);
longer = s->num_taps[1] > s->num_taps[0];
n = s->num_taps[longer];
if (h[0] && h[1]) {
for (i = 0; i < s->num_taps[!longer]; i++)
h[longer][i + (n - s->num_taps[!longer]) / 2] += h[!longer][i];
if (s->Fc0 < s->Fc1)
invert(h[longer], n);
av_free(h[!longer]);
}
if (s->phase != 50.f) {
int ret = fir_to_phase(s, &h[longer], &n, &post_peak, s->phase);
if (ret < 0)
return ret;
} else {
post_peak = n >> 1;
}
s->n = 1 << (av_log2(n) + 1);
s->rdft_len = 1 << av_log2(n);
s->coeffs = av_calloc(s->n, sizeof(*s->coeffs));
if (!s->coeffs)
return AVERROR(ENOMEM);
for (i = 0; i < n; i++)
s->coeffs[i] = h[longer][i];
av_free(h[longer]);
av_rdft_end(s->rdft);
av_rdft_end(s->irdft);
s->rdft = s->irdft = NULL;
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
SincContext *s = ctx->priv;
av_freep(&s->coeffs);
av_rdft_end(s->rdft);
av_rdft_end(s->irdft);
s->rdft = s->irdft = NULL;
}
static const AVFilterPad sinc_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.config_props = config_output,
.request_frame = request_frame,
},
{ NULL }
};
#define AF AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define OFFSET(x) offsetof(SincContext, x)
static const AVOption sinc_options[] = {
{ "sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, INT_MAX, AF },
{ "r", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, INT_MAX, AF },
{ "nb_samples", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, AF },
{ "n", "set the number of samples per requested frame", OFFSET(nb_samples), AV_OPT_TYPE_INT, {.i64=1024}, 1, INT_MAX, AF },
{ "hp", "set high-pass filter frequency", OFFSET(Fc0), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, INT_MAX, AF },
{ "lp", "set low-pass filter frequency", OFFSET(Fc1), AV_OPT_TYPE_FLOAT, {.dbl=0}, 0, INT_MAX, AF },
{ "phase", "set filter phase response", OFFSET(phase), AV_OPT_TYPE_FLOAT, {.dbl=50}, 0, 100, AF },
{ "beta", "set kaiser window beta", OFFSET(beta), AV_OPT_TYPE_FLOAT, {.dbl=-1}, -1, 256, AF },
{ "att", "set stop-band attenuation", OFFSET(att), AV_OPT_TYPE_FLOAT, {.dbl=120}, 40, 180, AF },
{ "round", "enable rounding", OFFSET(round), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, AF },
{ "hptaps", "set number of taps for high-pass filter", OFFSET(num_taps[0]), AV_OPT_TYPE_INT, {.i64=0}, 0, 32768, AF },
{ "lptaps", "set number of taps for low-pass filter", OFFSET(num_taps[1]), AV_OPT_TYPE_INT, {.i64=0}, 0, 32768, AF },
{ NULL }
};
AVFILTER_DEFINE_CLASS(sinc);
AVFilter ff_asrc_sinc = {
.name = "sinc",
.description = NULL_IF_CONFIG_SMALL("Generate a sinc kaiser-windowed low-pass, high-pass, band-pass, or band-reject FIR coefficients."),
.priv_size = sizeof(SincContext),
.priv_class = &sinc_class,
.query_formats = query_formats,
.uninit = uninit,
.inputs = NULL,
.outputs = sinc_outputs,
};

282
externals/ffmpeg/libavfilter/asrc_sine.c vendored Executable file
View File

@@ -0,0 +1,282 @@
/*
* Copyright (c) 2013 Nicolas George
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public License
* along with FFmpeg; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <float.h>
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/eval.h"
#include "libavutil/opt.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
typedef struct SineContext {
const AVClass *class;
double frequency;
double beep_factor;
char *samples_per_frame;
AVExpr *samples_per_frame_expr;
int sample_rate;
int64_t duration;
int16_t *sin;
int64_t pts;
uint32_t phi; ///< current phase of the sine (2pi = 1<<32)
uint32_t dphi; ///< phase increment between two samples
unsigned beep_period;
unsigned beep_index;
unsigned beep_length;
uint32_t phi_beep; ///< current phase of the beep
uint32_t dphi_beep; ///< phase increment of the beep
} SineContext;
#define CONTEXT SineContext
#define FLAGS AV_OPT_FLAG_AUDIO_PARAM|AV_OPT_FLAG_FILTERING_PARAM
#define OPT_GENERIC(name, field, def, min, max, descr, type, deffield, ...) \
{ name, descr, offsetof(CONTEXT, field), AV_OPT_TYPE_ ## type, \
{ .deffield = def }, min, max, FLAGS, __VA_ARGS__ }
#define OPT_INT(name, field, def, min, max, descr, ...) \
OPT_GENERIC(name, field, def, min, max, descr, INT, i64, __VA_ARGS__)
#define OPT_DBL(name, field, def, min, max, descr, ...) \
OPT_GENERIC(name, field, def, min, max, descr, DOUBLE, dbl, __VA_ARGS__)
#define OPT_DUR(name, field, def, min, max, descr, ...) \
OPT_GENERIC(name, field, def, min, max, descr, DURATION, str, __VA_ARGS__)
#define OPT_STR(name, field, def, min, max, descr, ...) \
OPT_GENERIC(name, field, def, min, max, descr, STRING, str, __VA_ARGS__)
static const AVOption sine_options[] = {
OPT_DBL("frequency", frequency, 440, 0, DBL_MAX, "set the sine frequency",),
OPT_DBL("f", frequency, 440, 0, DBL_MAX, "set the sine frequency",),
OPT_DBL("beep_factor", beep_factor, 0, 0, DBL_MAX, "set the beep frequency factor",),
OPT_DBL("b", beep_factor, 0, 0, DBL_MAX, "set the beep frequency factor",),
OPT_INT("sample_rate", sample_rate, 44100, 1, INT_MAX, "set the sample rate",),
OPT_INT("r", sample_rate, 44100, 1, INT_MAX, "set the sample rate",),
OPT_DUR("duration", duration, 0, 0, INT64_MAX, "set the audio duration",),
OPT_DUR("d", duration, 0, 0, INT64_MAX, "set the audio duration",),
OPT_STR("samples_per_frame", samples_per_frame, "1024", 0, 0, "set the number of samples per frame",),
{NULL}
};
AVFILTER_DEFINE_CLASS(sine);
#define LOG_PERIOD 15
#define AMPLITUDE 4095
#define AMPLITUDE_SHIFT 3
static void make_sin_table(int16_t *sin)
{
unsigned half_pi = 1 << (LOG_PERIOD - 2);
unsigned ampls = AMPLITUDE << AMPLITUDE_SHIFT;
uint64_t unit2 = (uint64_t)(ampls * ampls) << 32;
unsigned step, i, c, s, k, new_k, n2;
/* Principle: if u = exp(i*a1) and v = exp(i*a2), then
exp(i*(a1+a2)/2) = (u+v) / length(u+v) */
sin[0] = 0;
sin[half_pi] = ampls;
for (step = half_pi; step > 1; step /= 2) {
/* k = (1 << 16) * amplitude / length(u+v)
In exact values, k is constant at a given step */
k = 0x10000;
for (i = 0; i < half_pi / 2; i += step) {
s = sin[i] + sin[i + step];
c = sin[half_pi - i] + sin[half_pi - i - step];
n2 = s * s + c * c;
/* Newton's method to solve n² * k² = unit² */
while (1) {
new_k = (k + unit2 / ((uint64_t)k * n2) + 1) >> 1;
if (k == new_k)
break;
k = new_k;
}
sin[i + step / 2] = (k * s + 0x7FFF) >> 16;
sin[half_pi - i - step / 2] = (k * c + 0x8000) >> 16;
}
}
/* Unshift amplitude */
for (i = 0; i <= half_pi; i++)
sin[i] = (sin[i] + (1 << (AMPLITUDE_SHIFT - 1))) >> AMPLITUDE_SHIFT;
/* Use symmetries to fill the other three quarters */
for (i = 0; i < half_pi; i++)
sin[half_pi * 2 - i] = sin[i];
for (i = 0; i < 2 * half_pi; i++)
sin[i + 2 * half_pi] = -sin[i];
}
static const char *const var_names[] = {
"n",
"pts",
"t",
"TB",
NULL
};
enum {
VAR_N,
VAR_PTS,
VAR_T,
VAR_TB,
VAR_VARS_NB
};
static av_cold int init(AVFilterContext *ctx)
{
int ret;
SineContext *sine = ctx->priv;
if (!(sine->sin = av_malloc(sizeof(*sine->sin) << LOG_PERIOD)))
return AVERROR(ENOMEM);
sine->dphi = ldexp(sine->frequency, 32) / sine->sample_rate + 0.5;
make_sin_table(sine->sin);
if (sine->beep_factor) {
sine->beep_period = sine->sample_rate;
sine->beep_length = sine->beep_period / 25;
sine->dphi_beep = ldexp(sine->beep_factor * sine->frequency, 32) /
sine->sample_rate + 0.5;
}
ret = av_expr_parse(&sine->samples_per_frame_expr,
sine->samples_per_frame, var_names,
NULL, NULL, NULL, NULL, 0, sine);
if (ret < 0)
return ret;
return 0;
}
static av_cold void uninit(AVFilterContext *ctx)
{
SineContext *sine = ctx->priv;
av_expr_free(sine->samples_per_frame_expr);
sine->samples_per_frame_expr = NULL;
av_freep(&sine->sin);
}
static av_cold int query_formats(AVFilterContext *ctx)
{
SineContext *sine = ctx->priv;
static const int64_t chlayouts[] = { AV_CH_LAYOUT_MONO, -1 };
int sample_rates[] = { sine->sample_rate, -1 };
static const enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_NONE };
AVFilterFormats *formats;
AVFilterChannelLayouts *layouts;
int ret;
formats = ff_make_format_list(sample_fmts);
if (!formats)
return AVERROR(ENOMEM);
ret = ff_set_common_formats (ctx, formats);
if (ret < 0)
return ret;
layouts = avfilter_make_format64_list(chlayouts);
if (!layouts)
return AVERROR(ENOMEM);
ret = ff_set_common_channel_layouts(ctx, layouts);
if (ret < 0)
return ret;
formats = ff_make_format_list(sample_rates);
if (!formats)
return AVERROR(ENOMEM);
return ff_set_common_samplerates(ctx, formats);
}
static av_cold int config_props(AVFilterLink *outlink)
{
SineContext *sine = outlink->src->priv;
sine->duration = av_rescale(sine->duration, sine->sample_rate, AV_TIME_BASE);
return 0;
}
static int request_frame(AVFilterLink *outlink)
{
SineContext *sine = outlink->src->priv;
AVFrame *frame;
double values[VAR_VARS_NB] = {
[VAR_N] = outlink->frame_count_in,
[VAR_PTS] = sine->pts,
[VAR_T] = sine->pts * av_q2d(outlink->time_base),
[VAR_TB] = av_q2d(outlink->time_base),
};
int i, nb_samples = lrint(av_expr_eval(sine->samples_per_frame_expr, values, sine));
int16_t *samples;
if (nb_samples <= 0) {
av_log(sine, AV_LOG_WARNING, "nb samples expression evaluated to %d, "
"defaulting to 1024\n", nb_samples);
nb_samples = 1024;
}
if (sine->duration) {
nb_samples = FFMIN(nb_samples, sine->duration - sine->pts);
av_assert1(nb_samples >= 0);
if (!nb_samples)
return AVERROR_EOF;
}
if (!(frame = ff_get_audio_buffer(outlink, nb_samples)))
return AVERROR(ENOMEM);
samples = (int16_t *)frame->data[0];
for (i = 0; i < nb_samples; i++) {
samples[i] = sine->sin[sine->phi >> (32 - LOG_PERIOD)];
sine->phi += sine->dphi;
if (sine->beep_index < sine->beep_length) {
samples[i] += sine->sin[sine->phi_beep >> (32 - LOG_PERIOD)] << 1;
sine->phi_beep += sine->dphi_beep;
}
if (++sine->beep_index == sine->beep_period)
sine->beep_index = 0;
}
frame->pts = sine->pts;
sine->pts += nb_samples;
return ff_filter_frame(outlink, frame);
}
static const AVFilterPad sine_outputs[] = {
{
.name = "default",
.type = AVMEDIA_TYPE_AUDIO,
.request_frame = request_frame,
.config_props = config_props,
},
{ NULL }
};
AVFilter ff_asrc_sine = {
.name = "sine",
.description = NULL_IF_CONFIG_SMALL("Generate sine wave audio signal."),
.query_formats = query_formats,
.init = init,
.uninit = uninit,
.priv_size = sizeof(SineContext),
.inputs = NULL,
.outputs = sine_outputs,
.priv_class = &sine_class,
};

42
externals/ffmpeg/libavfilter/atadenoise.h vendored Executable file
View File

@@ -0,0 +1,42 @@
/*
* Copyright (c) 2019 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_ATADENOISE_H
#define AVFILTER_ATADENOISE_H
#include <stddef.h>
#include <stdint.h>
enum ATAAlgorithm {
PARALLEL,
SERIAL,
NB_ATAA
};
typedef struct ATADenoiseDSPContext {
void (*filter_row)(const uint8_t *src, uint8_t *dst,
const uint8_t **srcf,
int w, int mid, int size,
int thra, int thrb);
} ATADenoiseDSPContext;
void ff_atadenoise_init_x86(ATADenoiseDSPContext *dsp, int depth, int algorithm);
#endif /* AVFILTER_ATADENOISE_H */

97
externals/ffmpeg/libavfilter/audio.c vendored Executable file
View File

@@ -0,0 +1,97 @@
/*
* Copyright (c) Stefano Sabatini | stefasab at gmail.com
* Copyright (c) S.N. Hemanth Meenakshisundaram | smeenaks at ucsd.edu
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "libavutil/common.h"
#include "audio.h"
#include "avfilter.h"
#include "internal.h"
#define BUFFER_ALIGN 0
AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
return ff_get_audio_buffer(link->dst->outputs[0], nb_samples);
}
AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
AVFrame *frame = NULL;
int channels = link->channels;
av_assert0(channels == av_get_channel_layout_nb_channels(link->channel_layout) || !av_get_channel_layout_nb_channels(link->channel_layout));
if (!link->frame_pool) {
link->frame_pool = ff_frame_pool_audio_init(av_buffer_allocz, channels,
nb_samples, link->format, BUFFER_ALIGN);
if (!link->frame_pool)
return NULL;
} else {
int pool_channels = 0;
int pool_nb_samples = 0;
int pool_align = 0;
enum AVSampleFormat pool_format = AV_SAMPLE_FMT_NONE;
if (ff_frame_pool_get_audio_config(link->frame_pool,
&pool_channels, &pool_nb_samples,
&pool_format, &pool_align) < 0) {
return NULL;
}
if (pool_channels != channels || pool_nb_samples < nb_samples ||
pool_format != link->format || pool_align != BUFFER_ALIGN) {
ff_frame_pool_uninit((FFFramePool **)&link->frame_pool);
link->frame_pool = ff_frame_pool_audio_init(av_buffer_allocz, channels,
nb_samples, link->format, BUFFER_ALIGN);
if (!link->frame_pool)
return NULL;
}
}
frame = ff_frame_pool_get(link->frame_pool);
if (!frame)
return NULL;
frame->nb_samples = nb_samples;
frame->channel_layout = link->channel_layout;
frame->sample_rate = link->sample_rate;
av_samples_set_silence(frame->extended_data, 0, nb_samples, channels, link->format);
return frame;
}
AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples)
{
AVFrame *ret = NULL;
if (link->dstpad->get_audio_buffer)
ret = link->dstpad->get_audio_buffer(link, nb_samples);
if (!ret)
ret = ff_default_get_audio_buffer(link, nb_samples);
return ret;
}

63
externals/ffmpeg/libavfilter/audio.h vendored Executable file
View File

@@ -0,0 +1,63 @@
/*
* Copyright (c) Stefano Sabatini | stefasab at gmail.com
* Copyright (c) S.N. Hemanth Meenakshisundaram | smeenaks at ucsd.edu
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVFILTER_AUDIO_H
#define AVFILTER_AUDIO_H
#include "avfilter.h"
#include "internal.h"
static const enum AVSampleFormat ff_packed_sample_fmts_array[] = {
AV_SAMPLE_FMT_U8,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_DBL,
AV_SAMPLE_FMT_NONE
};
static const enum AVSampleFormat ff_planar_sample_fmts_array[] = {
AV_SAMPLE_FMT_U8P,
AV_SAMPLE_FMT_S16P,
AV_SAMPLE_FMT_S32P,
AV_SAMPLE_FMT_FLTP,
AV_SAMPLE_FMT_DBLP,
AV_SAMPLE_FMT_NONE
};
/** default handler for get_audio_buffer() for audio inputs */
AVFrame *ff_default_get_audio_buffer(AVFilterLink *link, int nb_samples);
/** get_audio_buffer() handler for filters which simply pass audio along */
AVFrame *ff_null_get_audio_buffer(AVFilterLink *link, int nb_samples);
/**
* Request an audio samples buffer with a specific set of permissions.
*
* @param link the output link to the filter from which the buffer will
* be requested
* @param nb_samples the number of samples per channel
* @return A reference to the samples. This must be unreferenced with
* avfilter_unref_buffer when you are finished with it.
*/
AVFrame *ff_get_audio_buffer(AVFilterLink *link, int nb_samples);
#endif /* AVFILTER_AUDIO_H */

Some files were not shown because too many files have changed in this diff Show More