early-access version 1432

This commit is contained in:
pineappleEA
2021-02-09 04:25:58 +01:00
parent de64eab4b4
commit 3d5a9d908a
7336 changed files with 1773492 additions and 111 deletions

2
externals/ffmpeg/libavdevice/.gitignore vendored Executable file
View File

@@ -0,0 +1,2 @@
/indev_list.c
/outdev_list.c

71
externals/ffmpeg/libavdevice/Makefile vendored Executable file
View File

@@ -0,0 +1,71 @@
NAME = avdevice
DESC = FFmpeg device handling library
HEADERS = avdevice.h \
version.h \
OBJS = alldevices.o \
avdevice.o \
utils.o \
OBJS-$(HAVE_LIBC_MSVCRT) += file_open.o
OBJS-$(CONFIG_SHARED) += reverse.o
# input/output devices
OBJS-$(CONFIG_ALSA_INDEV) += alsa_dec.o alsa.o timefilter.o
OBJS-$(CONFIG_ALSA_OUTDEV) += alsa_enc.o alsa.o
OBJS-$(CONFIG_ANDROID_CAMERA_INDEV) += android_camera.o
OBJS-$(CONFIG_AVFOUNDATION_INDEV) += avfoundation.o
OBJS-$(CONFIG_BKTR_INDEV) += bktr.o
OBJS-$(CONFIG_CACA_OUTDEV) += caca.o
OBJS-$(CONFIG_DECKLINK_OUTDEV) += decklink_enc.o decklink_enc_c.o decklink_common.o
OBJS-$(CONFIG_DECKLINK_INDEV) += decklink_dec.o decklink_dec_c.o decklink_common.o
OBJS-$(CONFIG_DSHOW_INDEV) += dshow_crossbar.o dshow.o dshow_enummediatypes.o \
dshow_enumpins.o dshow_filter.o \
dshow_pin.o dshow_common.o
OBJS-$(CONFIG_FBDEV_INDEV) += fbdev_dec.o \
fbdev_common.o
OBJS-$(CONFIG_FBDEV_OUTDEV) += fbdev_enc.o \
fbdev_common.o
OBJS-$(CONFIG_GDIGRAB_INDEV) += gdigrab.o
OBJS-$(CONFIG_IEC61883_INDEV) += iec61883.o
OBJS-$(CONFIG_JACK_INDEV) += jack.o timefilter.o
OBJS-$(CONFIG_KMSGRAB_INDEV) += kmsgrab.o
OBJS-$(CONFIG_LAVFI_INDEV) += lavfi.o
OBJS-$(CONFIG_OPENAL_INDEV) += openal-dec.o
OBJS-$(CONFIG_OPENGL_OUTDEV) += opengl_enc.o
OBJS-$(CONFIG_OSS_INDEV) += oss_dec.o oss.o
OBJS-$(CONFIG_OSS_OUTDEV) += oss_enc.o oss.o
OBJS-$(CONFIG_PULSE_INDEV) += pulse_audio_dec.o \
pulse_audio_common.o timefilter.o
OBJS-$(CONFIG_PULSE_OUTDEV) += pulse_audio_enc.o \
pulse_audio_common.o
OBJS-$(CONFIG_SDL2_OUTDEV) += sdl2.o
OBJS-$(CONFIG_SNDIO_INDEV) += sndio_dec.o sndio.o
OBJS-$(CONFIG_SNDIO_OUTDEV) += sndio_enc.o sndio.o
OBJS-$(CONFIG_V4L2_INDEV) += v4l2.o v4l2-common.o timefilter.o
OBJS-$(CONFIG_V4L2_OUTDEV) += v4l2enc.o v4l2-common.o
OBJS-$(CONFIG_VFWCAP_INDEV) += vfwcap.o
OBJS-$(CONFIG_XCBGRAB_INDEV) += xcbgrab.o
OBJS-$(CONFIG_XV_OUTDEV) += xv.o
# external libraries
OBJS-$(CONFIG_LIBCDIO_INDEV) += libcdio.o
OBJS-$(CONFIG_LIBDC1394_INDEV) += libdc1394.o
# Windows resource file
SLIBOBJS-$(HAVE_GNU_WINDRES) += avdeviceres.o
SKIPHEADERS += decklink_common.h
SKIPHEADERS-$(CONFIG_DECKLINK) += decklink_enc.h decklink_dec.h \
decklink_common_c.h
SKIPHEADERS-$(CONFIG_DSHOW_INDEV) += dshow_capture.h
SKIPHEADERS-$(CONFIG_FBDEV_INDEV) += fbdev_common.h
SKIPHEADERS-$(CONFIG_FBDEV_OUTDEV) += fbdev_common.h
SKIPHEADERS-$(CONFIG_LIBPULSE) += pulse_audio_common.h
SKIPHEADERS-$(CONFIG_V4L2_INDEV) += v4l2-common.h
SKIPHEADERS-$(CONFIG_V4L2_OUTDEV) += v4l2-common.h
SKIPHEADERS-$(CONFIG_ALSA) += alsa.h
SKIPHEADERS-$(CONFIG_SNDIO) += sndio.h
TESTPROGS-$(CONFIG_JACK_INDEV) += timefilter

68
externals/ffmpeg/libavdevice/alldevices.c vendored Executable file
View File

@@ -0,0 +1,68 @@
/*
* Register all the grabbing devices.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include "libavutil/thread.h"
#include "libavformat/internal.h"
#include "avdevice.h"
/* devices */
extern AVInputFormat ff_alsa_demuxer;
extern AVOutputFormat ff_alsa_muxer;
extern AVInputFormat ff_android_camera_demuxer;
extern AVInputFormat ff_avfoundation_demuxer;
extern AVInputFormat ff_bktr_demuxer;
extern AVOutputFormat ff_caca_muxer;
extern AVInputFormat ff_decklink_demuxer;
extern AVOutputFormat ff_decklink_muxer;
extern AVInputFormat ff_dshow_demuxer;
extern AVInputFormat ff_fbdev_demuxer;
extern AVOutputFormat ff_fbdev_muxer;
extern AVInputFormat ff_gdigrab_demuxer;
extern AVInputFormat ff_iec61883_demuxer;
extern AVInputFormat ff_jack_demuxer;
extern AVInputFormat ff_kmsgrab_demuxer;
extern AVInputFormat ff_lavfi_demuxer;
extern AVInputFormat ff_openal_demuxer;
extern AVOutputFormat ff_opengl_muxer;
extern AVInputFormat ff_oss_demuxer;
extern AVOutputFormat ff_oss_muxer;
extern AVInputFormat ff_pulse_demuxer;
extern AVOutputFormat ff_pulse_muxer;
extern AVOutputFormat ff_sdl2_muxer;
extern AVInputFormat ff_sndio_demuxer;
extern AVOutputFormat ff_sndio_muxer;
extern AVInputFormat ff_v4l2_demuxer;
extern AVOutputFormat ff_v4l2_muxer;
extern AVInputFormat ff_vfwcap_demuxer;
extern AVInputFormat ff_xcbgrab_demuxer;
extern AVOutputFormat ff_xv_muxer;
/* external libraries */
extern AVInputFormat ff_libcdio_demuxer;
extern AVInputFormat ff_libdc1394_demuxer;
#include "libavdevice/outdev_list.c"
#include "libavdevice/indev_list.c"
void avdevice_register_all(void)
{
avpriv_register_devices(outdev_list, indev_list);
}

403
externals/ffmpeg/libavdevice/alsa.c vendored Executable file
View File

@@ -0,0 +1,403 @@
/*
* ALSA input and output
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* ALSA input and output: common code
* @author Luca Abeni ( lucabe72 email it )
* @author Benoit Fouet ( benoit fouet free fr )
* @author Nicolas George ( nicolas george normalesup org )
*/
#include <alsa/asoundlib.h>
#include "avdevice.h"
#include "libavutil/avassert.h"
#include "libavutil/channel_layout.h"
#include "alsa.h"
static av_cold snd_pcm_format_t codec_id_to_pcm_format(int codec_id)
{
switch(codec_id) {
case AV_CODEC_ID_PCM_F64LE: return SND_PCM_FORMAT_FLOAT64_LE;
case AV_CODEC_ID_PCM_F64BE: return SND_PCM_FORMAT_FLOAT64_BE;
case AV_CODEC_ID_PCM_F32LE: return SND_PCM_FORMAT_FLOAT_LE;
case AV_CODEC_ID_PCM_F32BE: return SND_PCM_FORMAT_FLOAT_BE;
case AV_CODEC_ID_PCM_S32LE: return SND_PCM_FORMAT_S32_LE;
case AV_CODEC_ID_PCM_S32BE: return SND_PCM_FORMAT_S32_BE;
case AV_CODEC_ID_PCM_U32LE: return SND_PCM_FORMAT_U32_LE;
case AV_CODEC_ID_PCM_U32BE: return SND_PCM_FORMAT_U32_BE;
case AV_CODEC_ID_PCM_S24LE: return SND_PCM_FORMAT_S24_3LE;
case AV_CODEC_ID_PCM_S24BE: return SND_PCM_FORMAT_S24_3BE;
case AV_CODEC_ID_PCM_U24LE: return SND_PCM_FORMAT_U24_3LE;
case AV_CODEC_ID_PCM_U24BE: return SND_PCM_FORMAT_U24_3BE;
case AV_CODEC_ID_PCM_S16LE: return SND_PCM_FORMAT_S16_LE;
case AV_CODEC_ID_PCM_S16BE: return SND_PCM_FORMAT_S16_BE;
case AV_CODEC_ID_PCM_U16LE: return SND_PCM_FORMAT_U16_LE;
case AV_CODEC_ID_PCM_U16BE: return SND_PCM_FORMAT_U16_BE;
case AV_CODEC_ID_PCM_S8: return SND_PCM_FORMAT_S8;
case AV_CODEC_ID_PCM_U8: return SND_PCM_FORMAT_U8;
case AV_CODEC_ID_PCM_MULAW: return SND_PCM_FORMAT_MU_LAW;
case AV_CODEC_ID_PCM_ALAW: return SND_PCM_FORMAT_A_LAW;
default: return SND_PCM_FORMAT_UNKNOWN;
}
}
#define MAKE_REORDER_FUNC(NAME, TYPE, CHANNELS, LAYOUT, MAP) \
static void alsa_reorder_ ## NAME ## _ ## LAYOUT(const void *in_v, \
void *out_v, \
int n) \
{ \
const TYPE *in = in_v; \
TYPE *out = out_v; \
\
while (n-- > 0) { \
MAP \
in += CHANNELS; \
out += CHANNELS; \
} \
}
#define MAKE_REORDER_FUNCS(CHANNELS, LAYOUT, MAP) \
MAKE_REORDER_FUNC(int8, int8_t, CHANNELS, LAYOUT, MAP) \
MAKE_REORDER_FUNC(int16, int16_t, CHANNELS, LAYOUT, MAP) \
MAKE_REORDER_FUNC(int32, int32_t, CHANNELS, LAYOUT, MAP) \
MAKE_REORDER_FUNC(f32, float, CHANNELS, LAYOUT, MAP)
MAKE_REORDER_FUNCS(5, out_50, \
out[0] = in[0]; \
out[1] = in[1]; \
out[2] = in[3]; \
out[3] = in[4]; \
out[4] = in[2]; \
)
MAKE_REORDER_FUNCS(6, out_51, \
out[0] = in[0]; \
out[1] = in[1]; \
out[2] = in[4]; \
out[3] = in[5]; \
out[4] = in[2]; \
out[5] = in[3]; \
)
MAKE_REORDER_FUNCS(8, out_71, \
out[0] = in[0]; \
out[1] = in[1]; \
out[2] = in[4]; \
out[3] = in[5]; \
out[4] = in[2]; \
out[5] = in[3]; \
out[6] = in[6]; \
out[7] = in[7]; \
)
#define FORMAT_I8 0
#define FORMAT_I16 1
#define FORMAT_I32 2
#define FORMAT_F32 3
#define PICK_REORDER(layout)\
switch(format) {\
case FORMAT_I8: s->reorder_func = alsa_reorder_int8_out_ ##layout; break;\
case FORMAT_I16: s->reorder_func = alsa_reorder_int16_out_ ##layout; break;\
case FORMAT_I32: s->reorder_func = alsa_reorder_int32_out_ ##layout; break;\
case FORMAT_F32: s->reorder_func = alsa_reorder_f32_out_ ##layout; break;\
}
static av_cold int find_reorder_func(AlsaData *s, int codec_id, uint64_t layout, int out)
{
int format;
/* reordering input is not currently supported */
if (!out)
return AVERROR(ENOSYS);
/* reordering is not needed for QUAD or 2_2 layout */
if (layout == AV_CH_LAYOUT_QUAD || layout == AV_CH_LAYOUT_2_2)
return 0;
switch (codec_id) {
case AV_CODEC_ID_PCM_S8:
case AV_CODEC_ID_PCM_U8:
case AV_CODEC_ID_PCM_ALAW:
case AV_CODEC_ID_PCM_MULAW: format = FORMAT_I8; break;
case AV_CODEC_ID_PCM_S16LE:
case AV_CODEC_ID_PCM_S16BE:
case AV_CODEC_ID_PCM_U16LE:
case AV_CODEC_ID_PCM_U16BE: format = FORMAT_I16; break;
case AV_CODEC_ID_PCM_S32LE:
case AV_CODEC_ID_PCM_S32BE:
case AV_CODEC_ID_PCM_U32LE:
case AV_CODEC_ID_PCM_U32BE: format = FORMAT_I32; break;
case AV_CODEC_ID_PCM_F32LE:
case AV_CODEC_ID_PCM_F32BE: format = FORMAT_F32; break;
default: return AVERROR(ENOSYS);
}
if (layout == AV_CH_LAYOUT_5POINT0_BACK || layout == AV_CH_LAYOUT_5POINT0)
PICK_REORDER(50)
else if (layout == AV_CH_LAYOUT_5POINT1_BACK || layout == AV_CH_LAYOUT_5POINT1)
PICK_REORDER(51)
else if (layout == AV_CH_LAYOUT_7POINT1)
PICK_REORDER(71)
return s->reorder_func ? 0 : AVERROR(ENOSYS);
}
av_cold int ff_alsa_open(AVFormatContext *ctx, snd_pcm_stream_t mode,
unsigned int *sample_rate,
int channels, enum AVCodecID *codec_id)
{
AlsaData *s = ctx->priv_data;
const char *audio_device;
int res, flags = 0;
snd_pcm_format_t format;
snd_pcm_t *h;
snd_pcm_hw_params_t *hw_params;
snd_pcm_uframes_t buffer_size, period_size;
uint64_t layout = ctx->streams[0]->codecpar->channel_layout;
if (ctx->url[0] == 0) audio_device = "default";
else audio_device = ctx->url;
if (*codec_id == AV_CODEC_ID_NONE)
*codec_id = DEFAULT_CODEC_ID;
format = codec_id_to_pcm_format(*codec_id);
if (format == SND_PCM_FORMAT_UNKNOWN) {
av_log(ctx, AV_LOG_ERROR, "sample format 0x%04x is not supported\n", *codec_id);
return AVERROR(ENOSYS);
}
s->frame_size = av_get_bits_per_sample(*codec_id) / 8 * channels;
if (ctx->flags & AVFMT_FLAG_NONBLOCK) {
flags = SND_PCM_NONBLOCK;
}
res = snd_pcm_open(&h, audio_device, mode, flags);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot open audio device %s (%s)\n",
audio_device, snd_strerror(res));
return AVERROR(EIO);
}
res = snd_pcm_hw_params_malloc(&hw_params);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot allocate hardware parameter structure (%s)\n",
snd_strerror(res));
goto fail1;
}
res = snd_pcm_hw_params_any(h, hw_params);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot initialize hardware parameter structure (%s)\n",
snd_strerror(res));
goto fail;
}
res = snd_pcm_hw_params_set_access(h, hw_params, SND_PCM_ACCESS_RW_INTERLEAVED);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set access type (%s)\n",
snd_strerror(res));
goto fail;
}
res = snd_pcm_hw_params_set_format(h, hw_params, format);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set sample format 0x%04x %d (%s)\n",
*codec_id, format, snd_strerror(res));
goto fail;
}
res = snd_pcm_hw_params_set_rate_near(h, hw_params, sample_rate, 0);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set sample rate (%s)\n",
snd_strerror(res));
goto fail;
}
res = snd_pcm_hw_params_set_channels(h, hw_params, channels);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set channel count to %d (%s)\n",
channels, snd_strerror(res));
goto fail;
}
snd_pcm_hw_params_get_buffer_size_max(hw_params, &buffer_size);
buffer_size = FFMIN(buffer_size, ALSA_BUFFER_SIZE_MAX);
/* TODO: maybe use ctx->max_picture_buffer somehow */
res = snd_pcm_hw_params_set_buffer_size_near(h, hw_params, &buffer_size);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set ALSA buffer size (%s)\n",
snd_strerror(res));
goto fail;
}
snd_pcm_hw_params_get_period_size_min(hw_params, &period_size, NULL);
if (!period_size)
period_size = buffer_size / 4;
res = snd_pcm_hw_params_set_period_size_near(h, hw_params, &period_size, NULL);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set ALSA period size (%s)\n",
snd_strerror(res));
goto fail;
}
s->period_size = period_size;
res = snd_pcm_hw_params(h, hw_params);
if (res < 0) {
av_log(ctx, AV_LOG_ERROR, "cannot set parameters (%s)\n",
snd_strerror(res));
goto fail;
}
snd_pcm_hw_params_free(hw_params);
if (channels > 2 && layout) {
if (find_reorder_func(s, *codec_id, layout, mode == SND_PCM_STREAM_PLAYBACK) < 0) {
char name[128];
av_get_channel_layout_string(name, sizeof(name), channels, layout);
av_log(ctx, AV_LOG_WARNING, "ALSA channel layout unknown or unimplemented for %s %s.\n",
name, mode == SND_PCM_STREAM_PLAYBACK ? "playback" : "capture");
}
if (s->reorder_func) {
s->reorder_buf_size = buffer_size;
s->reorder_buf = av_malloc_array(s->reorder_buf_size, s->frame_size);
if (!s->reorder_buf)
goto fail1;
}
}
s->h = h;
return 0;
fail:
snd_pcm_hw_params_free(hw_params);
fail1:
snd_pcm_close(h);
return AVERROR(EIO);
}
av_cold int ff_alsa_close(AVFormatContext *s1)
{
AlsaData *s = s1->priv_data;
if (snd_pcm_stream(s->h) == SND_PCM_STREAM_PLAYBACK) {
snd_pcm_nonblock(s->h, 0);
snd_pcm_drain(s->h);
}
av_freep(&s->reorder_buf);
if (CONFIG_ALSA_INDEV)
ff_timefilter_destroy(s->timefilter);
snd_pcm_close(s->h);
return 0;
}
int ff_alsa_xrun_recover(AVFormatContext *s1, int err)
{
AlsaData *s = s1->priv_data;
snd_pcm_t *handle = s->h;
av_log(s1, AV_LOG_WARNING, "ALSA buffer xrun.\n");
if (err == -EPIPE) {
err = snd_pcm_prepare(handle);
if (err < 0) {
av_log(s1, AV_LOG_ERROR, "cannot recover from underrun (snd_pcm_prepare failed: %s)\n", snd_strerror(err));
return AVERROR(EIO);
}
} else if (err == -ESTRPIPE) {
av_log(s1, AV_LOG_ERROR, "-ESTRPIPE... Unsupported!\n");
return -1;
}
return err;
}
int ff_alsa_extend_reorder_buf(AlsaData *s, int min_size)
{
int size = s->reorder_buf_size;
void *r;
av_assert0(size != 0);
while (size < min_size)
size *= 2;
r = av_realloc_array(s->reorder_buf, size, s->frame_size);
if (!r)
return AVERROR(ENOMEM);
s->reorder_buf = r;
s->reorder_buf_size = size;
return 0;
}
/* ported from alsa-utils/aplay.c */
int ff_alsa_get_device_list(AVDeviceInfoList *device_list, snd_pcm_stream_t stream_type)
{
int ret = 0;
void **hints, **n;
char *name = NULL, *descr = NULL, *io = NULL, *tmp;
AVDeviceInfo *new_device = NULL;
const char *filter = stream_type == SND_PCM_STREAM_PLAYBACK ? "Output" : "Input";
if (snd_device_name_hint(-1, "pcm", &hints) < 0)
return AVERROR_EXTERNAL;
n = hints;
while (*n && !ret) {
name = snd_device_name_get_hint(*n, "NAME");
descr = snd_device_name_get_hint(*n, "DESC");
io = snd_device_name_get_hint(*n, "IOID");
if (!io || !strcmp(io, filter)) {
new_device = av_mallocz(sizeof(AVDeviceInfo));
if (!new_device) {
ret = AVERROR(ENOMEM);
goto fail;
}
new_device->device_name = av_strdup(name);
if ((tmp = strrchr(descr, '\n')) && tmp[1])
new_device->device_description = av_strdup(&tmp[1]);
else
new_device->device_description = av_strdup(descr);
if (!new_device->device_description || !new_device->device_name) {
ret = AVERROR(ENOMEM);
goto fail;
}
if ((ret = av_dynarray_add_nofree(&device_list->devices,
&device_list->nb_devices, new_device)) < 0) {
goto fail;
}
if (!strcmp(new_device->device_name, "default"))
device_list->default_device = device_list->nb_devices - 1;
new_device = NULL;
}
fail:
free(io);
free(name);
free(descr);
n++;
}
if (new_device) {
av_free(new_device->device_description);
av_free(new_device->device_name);
av_free(new_device);
}
snd_device_name_free_hint(hints);
return ret;
}

108
externals/ffmpeg/libavdevice/alsa.h vendored Executable file
View File

@@ -0,0 +1,108 @@
/*
* ALSA input and output
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* ALSA input and output: definitions and structures
* @author Luca Abeni ( lucabe72 email it )
* @author Benoit Fouet ( benoit fouet free fr )
*/
#ifndef AVDEVICE_ALSA_H
#define AVDEVICE_ALSA_H
#include <alsa/asoundlib.h>
#include "config.h"
#include "libavutil/log.h"
#include "timefilter.h"
#include "avdevice.h"
/* XXX: we make the assumption that the soundcard accepts this format */
/* XXX: find better solution with "preinit" method, needed also in
other formats */
#define DEFAULT_CODEC_ID AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE)
typedef void (*ff_reorder_func)(const void *, void *, int);
#define ALSA_BUFFER_SIZE_MAX 131072
typedef struct AlsaData {
AVClass *class;
snd_pcm_t *h;
int frame_size; ///< bytes per sample * channels
int period_size; ///< preferred size for reads and writes, in frames
int sample_rate; ///< sample rate set by user
int channels; ///< number of channels set by user
int last_period;
TimeFilter *timefilter;
void (*reorder_func)(const void *, void *, int);
void *reorder_buf;
int reorder_buf_size; ///< in frames
int64_t timestamp; ///< current timestamp, without latency applied.
} AlsaData;
/**
* Open an ALSA PCM.
*
* @param s media file handle
* @param mode either SND_PCM_STREAM_CAPTURE or SND_PCM_STREAM_PLAYBACK
* @param sample_rate in: requested sample rate;
* out: actually selected sample rate
* @param channels number of channels
* @param codec_id in: requested AVCodecID or AV_CODEC_ID_NONE;
* out: actually selected AVCodecID, changed only if
* AV_CODEC_ID_NONE was requested
*
* @return 0 if OK, AVERROR_xxx on error
*/
av_warn_unused_result
int ff_alsa_open(AVFormatContext *s, snd_pcm_stream_t mode,
unsigned int *sample_rate,
int channels, enum AVCodecID *codec_id);
/**
* Close the ALSA PCM.
*
* @param s1 media file handle
*
* @return 0
*/
int ff_alsa_close(AVFormatContext *s1);
/**
* Try to recover from ALSA buffer underrun.
*
* @param s1 media file handle
* @param err error code reported by the previous ALSA call
*
* @return 0 if OK, AVERROR_xxx on error
*/
av_warn_unused_result
int ff_alsa_xrun_recover(AVFormatContext *s1, int err);
av_warn_unused_result
int ff_alsa_extend_reorder_buf(AlsaData *s, int size);
av_warn_unused_result
int ff_alsa_get_device_list(AVDeviceInfoList *device_list, snd_pcm_stream_t stream_type);
#endif /* AVDEVICE_ALSA_H */

168
externals/ffmpeg/libavdevice/alsa_dec.c vendored Executable file
View File

@@ -0,0 +1,168 @@
/*
* ALSA input and output
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* ALSA input and output: input
* @author Luca Abeni ( lucabe72 email it )
* @author Benoit Fouet ( benoit fouet free fr )
* @author Nicolas George ( nicolas george normalesup org )
*
* This avdevice decoder can capture audio from an ALSA (Advanced
* Linux Sound Architecture) device.
*
* The filename parameter is the name of an ALSA PCM device capable of
* capture, for example "default" or "plughw:1"; see the ALSA documentation
* for naming conventions. The empty string is equivalent to "default".
*
* The capture period is set to the lower value available for the device,
* which gives a low latency suitable for real-time capture.
*
* The PTS are an Unix time in microsecond.
*
* Due to a bug in the ALSA library
* (https://bugtrack.alsa-project.org/alsa-bug/view.php?id=4308), this
* decoder does not work with certain ALSA plugins, especially the dsnoop
* plugin.
*/
#include <alsa/asoundlib.h>
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavformat/internal.h"
#include "avdevice.h"
#include "alsa.h"
static av_cold int audio_read_header(AVFormatContext *s1)
{
AlsaData *s = s1->priv_data;
AVStream *st;
int ret;
enum AVCodecID codec_id;
st = avformat_new_stream(s1, NULL);
if (!st) {
av_log(s1, AV_LOG_ERROR, "Cannot add stream\n");
return AVERROR(ENOMEM);
}
codec_id = s1->audio_codec_id;
ret = ff_alsa_open(s1, SND_PCM_STREAM_CAPTURE, &s->sample_rate, s->channels,
&codec_id);
if (ret < 0) {
return AVERROR(EIO);
}
/* take real parameters */
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = codec_id;
st->codecpar->sample_rate = s->sample_rate;
st->codecpar->channels = s->channels;
st->codecpar->frame_size = s->frame_size;
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
/* microseconds instead of seconds, MHz instead of Hz */
s->timefilter = ff_timefilter_new(1000000.0 / s->sample_rate,
s->period_size, 1.5E-6);
if (!s->timefilter)
goto fail;
return 0;
fail:
snd_pcm_close(s->h);
return AVERROR(EIO);
}
static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
AlsaData *s = s1->priv_data;
int res;
int64_t dts;
snd_pcm_sframes_t delay = 0;
if (av_new_packet(pkt, s->period_size * s->frame_size) < 0) {
return AVERROR(EIO);
}
while ((res = snd_pcm_readi(s->h, pkt->data, s->period_size)) < 0) {
if (res == -EAGAIN) {
av_packet_unref(pkt);
return AVERROR(EAGAIN);
}
if (ff_alsa_xrun_recover(s1, res) < 0) {
av_log(s1, AV_LOG_ERROR, "ALSA read error: %s\n",
snd_strerror(res));
av_packet_unref(pkt);
return AVERROR(EIO);
}
ff_timefilter_reset(s->timefilter);
}
dts = av_gettime();
snd_pcm_delay(s->h, &delay);
dts -= av_rescale(delay + res, 1000000, s->sample_rate);
pkt->pts = ff_timefilter_update(s->timefilter, dts, s->last_period);
s->last_period = res;
pkt->size = res * s->frame_size;
return 0;
}
static int audio_get_device_list(AVFormatContext *h, AVDeviceInfoList *device_list)
{
return ff_alsa_get_device_list(device_list, SND_PCM_STREAM_CAPTURE);
}
static const AVOption options[] = {
{ "sample_rate", "", offsetof(AlsaData, sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "channels", "", offsetof(AlsaData, channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass alsa_demuxer_class = {
.class_name = "ALSA indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_alsa_demuxer = {
.name = "alsa",
.long_name = NULL_IF_CONFIG_SMALL("ALSA audio input"),
.priv_data_size = sizeof(AlsaData),
.read_header = audio_read_header,
.read_packet = audio_read_packet,
.read_close = ff_alsa_close,
.get_device_list = audio_get_device_list,
.flags = AVFMT_NOFILE,
.priv_class = &alsa_demuxer_class,
};

174
externals/ffmpeg/libavdevice/alsa_enc.c vendored Executable file
View File

@@ -0,0 +1,174 @@
/*
* ALSA input and output
* Copyright (c) 2007 Luca Abeni ( lucabe72 email it )
* Copyright (c) 2007 Benoit Fouet ( benoit fouet free fr )
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* ALSA input and output: output
* @author Luca Abeni ( lucabe72 email it )
* @author Benoit Fouet ( benoit fouet free fr )
*
* This avdevice encoder can play audio to an ALSA (Advanced Linux
* Sound Architecture) device.
*
* The filename parameter is the name of an ALSA PCM device capable of
* capture, for example "default" or "plughw:1"; see the ALSA documentation
* for naming conventions. The empty string is equivalent to "default".
*
* The playback period is set to the lower value available for the device,
* which gives a low latency suitable for real-time playback.
*/
#include <alsa/asoundlib.h>
#include "libavutil/internal.h"
#include "libavutil/time.h"
#include "libavformat/internal.h"
#include "avdevice.h"
#include "alsa.h"
static av_cold int audio_write_header(AVFormatContext *s1)
{
AlsaData *s = s1->priv_data;
AVStream *st = NULL;
unsigned int sample_rate;
enum AVCodecID codec_id;
int res;
if (s1->nb_streams != 1 || s1->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
av_log(s1, AV_LOG_ERROR, "Only a single audio stream is supported.\n");
return AVERROR(EINVAL);
}
st = s1->streams[0];
sample_rate = st->codecpar->sample_rate;
codec_id = st->codecpar->codec_id;
res = ff_alsa_open(s1, SND_PCM_STREAM_PLAYBACK, &sample_rate,
st->codecpar->channels, &codec_id);
if (sample_rate != st->codecpar->sample_rate) {
av_log(s1, AV_LOG_ERROR,
"sample rate %d not available, nearest is %d\n",
st->codecpar->sample_rate, sample_rate);
goto fail;
}
avpriv_set_pts_info(st, 64, 1, sample_rate);
return res;
fail:
snd_pcm_close(s->h);
return AVERROR(EIO);
}
static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
{
AlsaData *s = s1->priv_data;
int res;
int size = pkt->size;
uint8_t *buf = pkt->data;
size /= s->frame_size;
if (pkt->dts != AV_NOPTS_VALUE)
s->timestamp = pkt->dts;
s->timestamp += pkt->duration ? pkt->duration : size;
if (s->reorder_func) {
if (size > s->reorder_buf_size)
if (ff_alsa_extend_reorder_buf(s, size))
return AVERROR(ENOMEM);
s->reorder_func(buf, s->reorder_buf, size);
buf = s->reorder_buf;
}
while ((res = snd_pcm_writei(s->h, buf, size)) < 0) {
if (res == -EAGAIN) {
return AVERROR(EAGAIN);
}
if (ff_alsa_xrun_recover(s1, res) < 0) {
av_log(s1, AV_LOG_ERROR, "ALSA write error: %s\n",
snd_strerror(res));
return AVERROR(EIO);
}
}
return 0;
}
static int audio_write_frame(AVFormatContext *s1, int stream_index,
AVFrame **frame, unsigned flags)
{
AlsaData *s = s1->priv_data;
AVPacket pkt;
/* ff_alsa_open() should have accepted only supported formats */
if ((flags & AV_WRITE_UNCODED_FRAME_QUERY))
return av_sample_fmt_is_planar(s1->streams[stream_index]->codecpar->format) ?
AVERROR(EINVAL) : 0;
/* set only used fields */
pkt.data = (*frame)->data[0];
pkt.size = (*frame)->nb_samples * s->frame_size;
pkt.dts = (*frame)->pkt_dts;
pkt.duration = (*frame)->pkt_duration;
return audio_write_packet(s1, &pkt);
}
static void
audio_get_output_timestamp(AVFormatContext *s1, int stream,
int64_t *dts, int64_t *wall)
{
AlsaData *s = s1->priv_data;
snd_pcm_sframes_t delay = 0;
*wall = av_gettime();
snd_pcm_delay(s->h, &delay);
*dts = s->timestamp - delay;
}
static int audio_get_device_list(AVFormatContext *h, AVDeviceInfoList *device_list)
{
return ff_alsa_get_device_list(device_list, SND_PCM_STREAM_PLAYBACK);
}
static const AVClass alsa_muxer_class = {
.class_name = "ALSA outdev",
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
};
AVOutputFormat ff_alsa_muxer = {
.name = "alsa",
.long_name = NULL_IF_CONFIG_SMALL("ALSA audio output"),
.priv_data_size = sizeof(AlsaData),
.audio_codec = DEFAULT_CODEC_ID,
.video_codec = AV_CODEC_ID_NONE,
.write_header = audio_write_header,
.write_packet = audio_write_packet,
.write_trailer = ff_alsa_close,
.write_uncoded_frame = audio_write_frame,
.get_device_list = audio_get_device_list,
.get_output_timestamp = audio_get_output_timestamp,
.flags = AVFMT_NOFILE,
.priv_class = &alsa_muxer_class,
};

871
externals/ffmpeg/libavdevice/android_camera.c vendored Executable file
View File

@@ -0,0 +1,871 @@
/*
* Android camera input device
*
* Copyright (C) 2017 Felix Matouschek
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <errno.h>
#include <pthread.h>
#include <stdatomic.h>
#include <stdbool.h>
#include <stdint.h>
#include <camera/NdkCameraDevice.h>
#include <camera/NdkCameraManager.h>
#include <media/NdkImage.h>
#include <media/NdkImageReader.h>
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/avstring.h"
#include "libavutil/display.h"
#include "libavutil/imgutils.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixfmt.h"
#include "libavutil/threadmessage.h"
#include "libavutil/time.h"
#include "version.h"
/* This image format is available on all Android devices
* supporting the Camera2 API */
#define IMAGE_FORMAT_ANDROID AIMAGE_FORMAT_YUV_420_888
#define MAX_BUF_COUNT 2
#define VIDEO_STREAM_INDEX 0
#define VIDEO_TIMEBASE_ANDROID 1000000000
#define RETURN_CASE(x) case x: return AV_STRINGIFY(x);
#define RETURN_DEFAULT(x) default: return AV_STRINGIFY(x);
typedef struct AndroidCameraCtx {
const AVClass *class;
int requested_width;
int requested_height;
AVRational framerate;
int camera_index;
int input_queue_size;
uint8_t lens_facing;
int32_t sensor_orientation;
int width;
int height;
int32_t framerate_range[2];
int image_format;
ACameraManager *camera_mgr;
char *camera_id;
ACameraMetadata *camera_metadata;
ACameraDevice *camera_dev;
ACameraDevice_StateCallbacks camera_state_callbacks;
AImageReader *image_reader;
AImageReader_ImageListener image_listener;
ANativeWindow *image_reader_window;
ACaptureSessionOutputContainer *capture_session_output_container;
ACaptureSessionOutput *capture_session_output;
ACameraOutputTarget *camera_output_target;
ACaptureRequest *capture_request;
ACameraCaptureSession_stateCallbacks capture_session_state_callbacks;
ACameraCaptureSession *capture_session;
AVThreadMessageQueue *input_queue;
atomic_int exit;
atomic_int got_image_format;
} AndroidCameraCtx;
static const char *camera_status_string(camera_status_t val)
{
switch(val) {
RETURN_CASE(ACAMERA_OK)
RETURN_CASE(ACAMERA_ERROR_UNKNOWN)
RETURN_CASE(ACAMERA_ERROR_INVALID_PARAMETER)
RETURN_CASE(ACAMERA_ERROR_CAMERA_DISCONNECTED)
RETURN_CASE(ACAMERA_ERROR_NOT_ENOUGH_MEMORY)
RETURN_CASE(ACAMERA_ERROR_METADATA_NOT_FOUND)
RETURN_CASE(ACAMERA_ERROR_CAMERA_DEVICE)
RETURN_CASE(ACAMERA_ERROR_CAMERA_SERVICE)
RETURN_CASE(ACAMERA_ERROR_SESSION_CLOSED)
RETURN_CASE(ACAMERA_ERROR_INVALID_OPERATION)
RETURN_CASE(ACAMERA_ERROR_STREAM_CONFIGURE_FAIL)
RETURN_CASE(ACAMERA_ERROR_CAMERA_IN_USE)
RETURN_CASE(ACAMERA_ERROR_MAX_CAMERA_IN_USE)
RETURN_CASE(ACAMERA_ERROR_CAMERA_DISABLED)
RETURN_CASE(ACAMERA_ERROR_PERMISSION_DENIED)
RETURN_DEFAULT(ACAMERA_ERROR_UNKNOWN)
}
}
static const char *media_status_string(media_status_t val)
{
switch(val) {
RETURN_CASE(AMEDIA_OK)
RETURN_CASE(AMEDIA_ERROR_UNKNOWN)
RETURN_CASE(AMEDIA_ERROR_MALFORMED)
RETURN_CASE(AMEDIA_ERROR_UNSUPPORTED)
RETURN_CASE(AMEDIA_ERROR_INVALID_OBJECT)
RETURN_CASE(AMEDIA_ERROR_INVALID_PARAMETER)
RETURN_CASE(AMEDIA_ERROR_INVALID_OPERATION)
RETURN_CASE(AMEDIA_DRM_NOT_PROVISIONED)
RETURN_CASE(AMEDIA_DRM_RESOURCE_BUSY)
RETURN_CASE(AMEDIA_DRM_DEVICE_REVOKED)
RETURN_CASE(AMEDIA_DRM_SHORT_BUFFER)
RETURN_CASE(AMEDIA_DRM_SESSION_NOT_OPENED)
RETURN_CASE(AMEDIA_DRM_TAMPER_DETECTED)
RETURN_CASE(AMEDIA_DRM_VERIFY_FAILED)
RETURN_CASE(AMEDIA_DRM_NEED_KEY)
RETURN_CASE(AMEDIA_DRM_LICENSE_EXPIRED)
RETURN_CASE(AMEDIA_IMGREADER_NO_BUFFER_AVAILABLE)
RETURN_CASE(AMEDIA_IMGREADER_MAX_IMAGES_ACQUIRED)
RETURN_CASE(AMEDIA_IMGREADER_CANNOT_LOCK_IMAGE)
RETURN_CASE(AMEDIA_IMGREADER_CANNOT_UNLOCK_IMAGE)
RETURN_CASE(AMEDIA_IMGREADER_IMAGE_NOT_LOCKED)
RETURN_DEFAULT(AMEDIA_ERROR_UNKNOWN)
}
}
static const char *error_state_callback_string(int val)
{
switch(val) {
RETURN_CASE(ERROR_CAMERA_IN_USE)
RETURN_CASE(ERROR_MAX_CAMERAS_IN_USE)
RETURN_CASE(ERROR_CAMERA_DISABLED)
RETURN_CASE(ERROR_CAMERA_DEVICE)
RETURN_CASE(ERROR_CAMERA_SERVICE)
default:
return "ERROR_CAMERA_UNKNOWN";
}
}
static void camera_dev_disconnected(void *context, ACameraDevice *device)
{
AVFormatContext *avctx = context;
AndroidCameraCtx *ctx = avctx->priv_data;
atomic_store(&ctx->exit, 1);
av_log(avctx, AV_LOG_ERROR, "Camera with id %s disconnected.\n",
ACameraDevice_getId(device));
}
static void camera_dev_error(void *context, ACameraDevice *device, int error)
{
AVFormatContext *avctx = context;
AndroidCameraCtx *ctx = avctx->priv_data;
atomic_store(&ctx->exit, 1);
av_log(avctx, AV_LOG_ERROR, "Error %s on camera with id %s.\n",
error_state_callback_string(error), ACameraDevice_getId(device));
}
static int open_camera(AVFormatContext *avctx)
{
AndroidCameraCtx *ctx = avctx->priv_data;
camera_status_t ret;
ACameraIdList *camera_ids;
ret = ACameraManager_getCameraIdList(ctx->camera_mgr, &camera_ids);
if (ret != ACAMERA_OK) {
av_log(avctx, AV_LOG_ERROR, "Failed to get camera id list, error: %s.\n",
camera_status_string(ret));
return AVERROR_EXTERNAL;
}
if (ctx->camera_index < camera_ids->numCameras) {
ctx->camera_id = av_strdup(camera_ids->cameraIds[ctx->camera_index]);
if (!ctx->camera_id) {
av_log(avctx, AV_LOG_ERROR, "Failed to allocate memory for camera_id.\n");
return AVERROR(ENOMEM);
}
} else {
av_log(avctx, AV_LOG_ERROR, "No camera with index %d available.\n",
ctx->camera_index);
return AVERROR(ENXIO);
}
ACameraManager_deleteCameraIdList(camera_ids);
ret = ACameraManager_getCameraCharacteristics(ctx->camera_mgr,
ctx->camera_id, &ctx->camera_metadata);
if (ret != ACAMERA_OK) {
av_log(avctx, AV_LOG_ERROR, "Failed to get metadata for camera with id %s, error: %s.\n",
ctx->camera_id, camera_status_string(ret));
return AVERROR_EXTERNAL;
}
ctx->camera_state_callbacks.context = avctx;
ctx->camera_state_callbacks.onDisconnected = camera_dev_disconnected;
ctx->camera_state_callbacks.onError = camera_dev_error;
ret = ACameraManager_openCamera(ctx->camera_mgr, ctx->camera_id,
&ctx->camera_state_callbacks, &ctx->camera_dev);
if (ret != ACAMERA_OK) {
av_log(avctx, AV_LOG_ERROR, "Failed to open camera with id %s, error: %s.\n",
ctx->camera_id, camera_status_string(ret));
return AVERROR_EXTERNAL;
}
return 0;
}
static void get_sensor_orientation(AVFormatContext *avctx)
{
AndroidCameraCtx *ctx = avctx->priv_data;
ACameraMetadata_const_entry lens_facing;
ACameraMetadata_const_entry sensor_orientation;
ACameraMetadata_getConstEntry(ctx->camera_metadata,
ACAMERA_LENS_FACING, &lens_facing);
ACameraMetadata_getConstEntry(ctx->camera_metadata,
ACAMERA_SENSOR_ORIENTATION, &sensor_orientation);
ctx->lens_facing = lens_facing.data.u8[0];
ctx->sensor_orientation = sensor_orientation.data.i32[0];
}
static void match_video_size(AVFormatContext *avctx)
{
AndroidCameraCtx *ctx = avctx->priv_data;
ACameraMetadata_const_entry available_configs;
int found = 0;
ACameraMetadata_getConstEntry(ctx->camera_metadata,
ACAMERA_SCALER_AVAILABLE_STREAM_CONFIGURATIONS,
&available_configs);
for (int i = 0; i < available_configs.count; i++) {
int32_t input = available_configs.data.i32[i * 4 + 3];
int32_t format = available_configs.data.i32[i * 4 + 0];
if (input) {
continue;
}
if (format == IMAGE_FORMAT_ANDROID) {
int32_t width = available_configs.data.i32[i * 4 + 1];
int32_t height = available_configs.data.i32[i * 4 + 2];
//Same ratio
if ((ctx->requested_width == width && ctx->requested_height == height) ||
(ctx->requested_width == height && ctx->requested_height == width)) {
ctx->width = width;
ctx->height = height;
found = 1;
break;
}
}
}
if (!found || ctx->width == 0 || ctx->height == 0) {
ctx->width = available_configs.data.i32[1];
ctx->height = available_configs.data.i32[2];
av_log(avctx, AV_LOG_WARNING,
"Requested video_size %dx%d not available, falling back to %dx%d\n",
ctx->requested_width, ctx->requested_height, ctx->width, ctx->height);
}
return;
}
static void match_framerate(AVFormatContext *avctx)
{
AndroidCameraCtx *ctx = avctx->priv_data;
ACameraMetadata_const_entry available_framerates;
int found = 0;
int current_best_match = -1;
int requested_framerate = av_q2d(ctx->framerate);
ACameraMetadata_getConstEntry(ctx->camera_metadata,
ACAMERA_CONTROL_AE_AVAILABLE_TARGET_FPS_RANGES,
&available_framerates);
for (int i = 0; i < available_framerates.count; i++) {
int32_t min = available_framerates.data.i32[i * 2 + 0];
int32_t max = available_framerates.data.i32[i * 2 + 1];
if (requested_framerate == max) {
if (min == max) {
ctx->framerate_range[0] = min;
ctx->framerate_range[1] = max;
found = 1;
break;
} else if (current_best_match >= 0) {
int32_t current_best_match_min = available_framerates.data.i32[current_best_match * 2 + 0];
if (min > current_best_match_min) {
current_best_match = i;
}
} else {
current_best_match = i;
}
}
}
if (!found) {
if (current_best_match >= 0) {
ctx->framerate_range[0] = available_framerates.data.i32[current_best_match * 2 + 0];
ctx->framerate_range[1] = available_framerates.data.i32[current_best_match * 2 + 1];
} else {
ctx->framerate_range[0] = available_framerates.data.i32[0];
ctx->framerate_range[1] = available_framerates.data.i32[1];
}
av_log(avctx, AV_LOG_WARNING,
"Requested framerate %d not available, falling back to min: %d and max: %d fps\n",
requested_framerate, ctx->framerate_range[0], ctx->framerate_range[1]);
}
return;
}
static int get_image_format(AVFormatContext *avctx, AImage *image)
{
AndroidCameraCtx *ctx = avctx->priv_data;
int32_t image_pixelstrides[2];
uint8_t *image_plane_data[2];
int plane_data_length[2];
for (int i = 0; i < 2; i++) {
AImage_getPlanePixelStride(image, i + 1, &image_pixelstrides[i]);
AImage_getPlaneData(image, i + 1, &image_plane_data[i], &plane_data_length[i]);
}
if (image_pixelstrides[0] != image_pixelstrides[1]) {
av_log(avctx, AV_LOG_ERROR,
"Pixel strides of U and V plane should have been the same.\n");
return AVERROR_EXTERNAL;
}
switch (image_pixelstrides[0]) {
case 1:
ctx->image_format = AV_PIX_FMT_YUV420P;
break;
case 2:
if (image_plane_data[0] < image_plane_data[1]) {
ctx->image_format = AV_PIX_FMT_NV12;
} else {
ctx->image_format = AV_PIX_FMT_NV21;
}
break;
default:
av_log(avctx, AV_LOG_ERROR,
"Unknown pixel stride %d of U and V plane, cannot determine camera image format.\n",
image_pixelstrides[0]);
return AVERROR(ENOSYS);
}
return 0;
}
static void image_available(void *context, AImageReader *reader)
{
AVFormatContext *avctx = context;
AndroidCameraCtx *ctx = avctx->priv_data;
media_status_t media_status;
int ret = 0;
AImage *image;
int64_t image_timestamp;
int32_t image_linestrides[4];
uint8_t *image_plane_data[4];
int plane_data_length[4];
AVPacket pkt;
int pkt_buffer_size = 0;
media_status = AImageReader_acquireLatestImage(reader, &image);
if (media_status != AMEDIA_OK) {
if (media_status == AMEDIA_IMGREADER_NO_BUFFER_AVAILABLE) {
av_log(avctx, AV_LOG_WARNING,
"An image reader frame was discarded");
} else {
av_log(avctx, AV_LOG_ERROR,
"Failed to acquire latest image from image reader, error: %s.\n",
media_status_string(media_status));
ret = AVERROR_EXTERNAL;
}
goto error;
}
// Silently drop frames when exit is set
if (atomic_load(&ctx->exit)) {
goto error;
}
// Determine actual image format
if (!atomic_load(&ctx->got_image_format)) {
ret = get_image_format(avctx, image);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR,
"Could not get image format of camera.\n");
goto error;
} else {
atomic_store(&ctx->got_image_format, 1);
}
}
pkt_buffer_size = av_image_get_buffer_size(ctx->image_format, ctx->width, ctx->height, 32);
AImage_getTimestamp(image, &image_timestamp);
AImage_getPlaneRowStride(image, 0, &image_linestrides[0]);
AImage_getPlaneData(image, 0, &image_plane_data[0], &plane_data_length[0]);
switch (ctx->image_format) {
case AV_PIX_FMT_YUV420P:
AImage_getPlaneRowStride(image, 1, &image_linestrides[1]);
AImage_getPlaneData(image, 1, &image_plane_data[1], &plane_data_length[1]);
AImage_getPlaneRowStride(image, 2, &image_linestrides[2]);
AImage_getPlaneData(image, 2, &image_plane_data[2], &plane_data_length[2]);
break;
case AV_PIX_FMT_NV12:
AImage_getPlaneRowStride(image, 1, &image_linestrides[1]);
AImage_getPlaneData(image, 1, &image_plane_data[1], &plane_data_length[1]);
break;
case AV_PIX_FMT_NV21:
AImage_getPlaneRowStride(image, 2, &image_linestrides[1]);
AImage_getPlaneData(image, 2, &image_plane_data[1], &plane_data_length[1]);
break;
default:
av_log(avctx, AV_LOG_ERROR, "Unsupported camera image format.\n");
ret = AVERROR(ENOSYS);
goto error;
}
ret = av_new_packet(&pkt, pkt_buffer_size);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR,
"Failed to create new av packet, error: %s.\n", av_err2str(ret));
goto error;
}
pkt.stream_index = VIDEO_STREAM_INDEX;
pkt.pts = image_timestamp;
av_image_copy_to_buffer(pkt.data, pkt_buffer_size,
(const uint8_t * const *) image_plane_data,
image_linestrides, ctx->image_format,
ctx->width, ctx->height, 32);
ret = av_thread_message_queue_send(ctx->input_queue, &pkt, AV_THREAD_MESSAGE_NONBLOCK);
error:
if (ret < 0) {
if (ret != AVERROR(EAGAIN)) {
av_log(avctx, AV_LOG_ERROR,
"Error while processing new image, error: %s.\n", av_err2str(ret));
av_thread_message_queue_set_err_recv(ctx->input_queue, ret);
atomic_store(&ctx->exit, 1);
} else {
av_log(avctx, AV_LOG_WARNING,
"Input queue was full, dropping frame, consider raising the input_queue_size option (current value: %d)\n",
ctx->input_queue_size);
}
if (pkt_buffer_size) {
av_packet_unref(&pkt);
}
}
AImage_delete(image);
return;
}
static int create_image_reader(AVFormatContext *avctx)
{
AndroidCameraCtx *ctx = avctx->priv_data;
media_status_t ret;
ret = AImageReader_new(ctx->width, ctx->height, IMAGE_FORMAT_ANDROID,
MAX_BUF_COUNT, &ctx->image_reader);
if (ret != AMEDIA_OK) {
av_log(avctx, AV_LOG_ERROR,
"Failed to create image reader, error: %s.\n", media_status_string(ret));
return AVERROR_EXTERNAL;
}
ctx->image_listener.context = avctx;
ctx->image_listener.onImageAvailable = image_available;
ret = AImageReader_setImageListener(ctx->image_reader, &ctx->image_listener);
if (ret != AMEDIA_OK) {
av_log(avctx, AV_LOG_ERROR,
"Failed to set image listener on image reader, error: %s.\n",
media_status_string(ret));
return AVERROR_EXTERNAL;
}
ret = AImageReader_getWindow(ctx->image_reader, &ctx->image_reader_window);
if (ret != AMEDIA_OK) {
av_log(avctx, AV_LOG_ERROR,
"Could not get image reader window, error: %s.\n",
media_status_string(ret));
return AVERROR_EXTERNAL;
}
return 0;
}
static void capture_session_closed(void *context, ACameraCaptureSession *session)
{
av_log(context, AV_LOG_INFO, "Android camera capture session was closed.\n");
}
static void capture_session_ready(void *context, ACameraCaptureSession *session)
{
av_log(context, AV_LOG_INFO, "Android camera capture session is ready.\n");
}
static void capture_session_active(void *context, ACameraCaptureSession *session)
{
av_log(context, AV_LOG_INFO, "Android camera capture session is active.\n");
}
static int create_capture_session(AVFormatContext *avctx)
{
AndroidCameraCtx *ctx = avctx->priv_data;
camera_status_t ret;
ret = ACaptureSessionOutputContainer_create(&ctx->capture_session_output_container);
if (ret != ACAMERA_OK) {
av_log(avctx, AV_LOG_ERROR,
"Failed to create capture session output container, error: %s.\n",
camera_status_string(ret));
return AVERROR_EXTERNAL;
}
ANativeWindow_acquire(ctx->image_reader_window);
ret = ACaptureSessionOutput_create(ctx->image_reader_window, &ctx->capture_session_output);
if (ret != ACAMERA_OK) {
av_log(avctx, AV_LOG_ERROR,
"Failed to create capture session container, error: %s.\n",
camera_status_string(ret));
return AVERROR_EXTERNAL;
}
ret = ACaptureSessionOutputContainer_add(ctx->capture_session_output_container,
ctx->capture_session_output);
if (ret != ACAMERA_OK) {
av_log(avctx, AV_LOG_ERROR,
"Failed to add output to output container, error: %s.\n",
camera_status_string(ret));
return AVERROR_EXTERNAL;
}
ret = ACameraOutputTarget_create(ctx->image_reader_window, &ctx->camera_output_target);
if (ret != ACAMERA_OK) {
av_log(avctx, AV_LOG_ERROR,
"Failed to create camera output target, error: %s.\n",
camera_status_string(ret));
return AVERROR_EXTERNAL;
}
ret = ACameraDevice_createCaptureRequest(ctx->camera_dev, TEMPLATE_RECORD, &ctx->capture_request);
if (ret != ACAMERA_OK) {
av_log(avctx, AV_LOG_ERROR,
"Failed to create capture request, error: %s.\n",
camera_status_string(ret));
return AVERROR_EXTERNAL;
}
ret = ACaptureRequest_setEntry_i32(ctx->capture_request, ACAMERA_CONTROL_AE_TARGET_FPS_RANGE,
2, ctx->framerate_range);
if (ret != ACAMERA_OK) {
av_log(avctx, AV_LOG_ERROR,
"Failed to set target fps range in capture request, error: %s.\n",
camera_status_string(ret));
return AVERROR_EXTERNAL;
}
ret = ACaptureRequest_addTarget(ctx->capture_request, ctx->camera_output_target);
if (ret != ACAMERA_OK) {
av_log(avctx, AV_LOG_ERROR,
"Failed to add capture request capture request, error: %s.\n",
camera_status_string(ret));
return AVERROR_EXTERNAL;
}
ctx->capture_session_state_callbacks.context = avctx;
ctx->capture_session_state_callbacks.onClosed = capture_session_closed;
ctx->capture_session_state_callbacks.onReady = capture_session_ready;
ctx->capture_session_state_callbacks.onActive = capture_session_active;
ret = ACameraDevice_createCaptureSession(ctx->camera_dev, ctx->capture_session_output_container,
&ctx->capture_session_state_callbacks, &ctx->capture_session);
if (ret != ACAMERA_OK) {
av_log(avctx, AV_LOG_ERROR,
"Failed to create capture session, error: %s.\n",
camera_status_string(ret));
return AVERROR_EXTERNAL;
}
ret = ACameraCaptureSession_setRepeatingRequest(ctx->capture_session, NULL, 1, &ctx->capture_request, NULL);
if (ret != ACAMERA_OK) {
av_log(avctx, AV_LOG_ERROR,
"Failed to set repeating request on capture session, error: %s.\n",
camera_status_string(ret));
return AVERROR_EXTERNAL;
}
return 0;
}
static int wait_for_image_format(AVFormatContext *avctx)
{
AndroidCameraCtx *ctx = avctx->priv_data;
while (!atomic_load(&ctx->got_image_format) && !atomic_load(&ctx->exit)) {
//Wait until first frame arrived and actual image format was determined
usleep(1000);
}
return atomic_load(&ctx->got_image_format);
}
static int add_display_matrix(AVFormatContext *avctx, AVStream *st)
{
AndroidCameraCtx *ctx = avctx->priv_data;
uint8_t *side_data;
int32_t display_matrix[9];
av_display_rotation_set(display_matrix, ctx->sensor_orientation);
if (ctx->lens_facing == ACAMERA_LENS_FACING_FRONT) {
av_display_matrix_flip(display_matrix, 1, 0);
}
side_data = av_stream_new_side_data(st,
AV_PKT_DATA_DISPLAYMATRIX, sizeof(display_matrix));
if (!side_data) {
return AVERROR(ENOMEM);
}
memcpy(side_data, display_matrix, sizeof(display_matrix));
return 0;
}
static int add_video_stream(AVFormatContext *avctx)
{
AndroidCameraCtx *ctx = avctx->priv_data;
AVStream *st;
AVCodecParameters *codecpar;
st = avformat_new_stream(avctx, NULL);
if (!st) {
return AVERROR(ENOMEM);
}
st->id = VIDEO_STREAM_INDEX;
st->avg_frame_rate = (AVRational) { ctx->framerate_range[1], 1 };
st->r_frame_rate = (AVRational) { ctx->framerate_range[1], 1 };
if (!wait_for_image_format(avctx)) {
return AVERROR_EXTERNAL;
}
codecpar = st->codecpar;
codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
codecpar->format = ctx->image_format;
codecpar->width = ctx->width;
codecpar->height = ctx->height;
avpriv_set_pts_info(st, 64, 1, VIDEO_TIMEBASE_ANDROID);
return add_display_matrix(avctx, st);
}
static int android_camera_read_close(AVFormatContext *avctx)
{
AndroidCameraCtx *ctx = avctx->priv_data;
atomic_store(&ctx->exit, 1);
if (ctx->capture_session) {
ACameraCaptureSession_stopRepeating(ctx->capture_session);
// Following warning is emitted, after capture session closed callback is received:
// ACameraCaptureSession: Device is closed but session 0 is not notified
// Seems to be a bug in Android, we can ignore this
ACameraCaptureSession_close(ctx->capture_session);
ctx->capture_session = NULL;
}
if (ctx->capture_request) {
ACaptureRequest_removeTarget(ctx->capture_request, ctx->camera_output_target);
ACaptureRequest_free(ctx->capture_request);
ctx->capture_request = NULL;
}
if (ctx->camera_output_target) {
ACameraOutputTarget_free(ctx->camera_output_target);
ctx->camera_output_target = NULL;
}
if (ctx->capture_session_output) {
ACaptureSessionOutputContainer_remove(ctx->capture_session_output_container,
ctx->capture_session_output);
ACaptureSessionOutput_free(ctx->capture_session_output);
ctx->capture_session_output = NULL;
}
if (ctx->image_reader_window) {
ANativeWindow_release(ctx->image_reader_window);
ctx->image_reader_window = NULL;
}
if (ctx->capture_session_output_container) {
ACaptureSessionOutputContainer_free(ctx->capture_session_output_container);
ctx->capture_session_output_container = NULL;
}
if (ctx->camera_dev) {
ACameraDevice_close(ctx->camera_dev);
ctx->camera_dev = NULL;
}
if (ctx->image_reader) {
AImageReader_delete(ctx->image_reader);
ctx->image_reader = NULL;
}
if (ctx->camera_metadata) {
ACameraMetadata_free(ctx->camera_metadata);
ctx->camera_metadata = NULL;
}
av_freep(&ctx->camera_id);
if (ctx->camera_mgr) {
ACameraManager_delete(ctx->camera_mgr);
ctx->camera_mgr = NULL;
}
if (ctx->input_queue) {
AVPacket pkt;
av_thread_message_queue_set_err_send(ctx->input_queue, AVERROR_EOF);
while (av_thread_message_queue_recv(ctx->input_queue, &pkt, AV_THREAD_MESSAGE_NONBLOCK) >= 0) {
av_packet_unref(&pkt);
}
av_thread_message_queue_free(&ctx->input_queue);
}
return 0;
}
static int android_camera_read_header(AVFormatContext *avctx)
{
AndroidCameraCtx *ctx = avctx->priv_data;
int ret;
atomic_init(&ctx->got_image_format, 0);
atomic_init(&ctx->exit, 0);
ret = av_thread_message_queue_alloc(&ctx->input_queue, ctx->input_queue_size, sizeof(AVPacket));
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR,
"Failed to allocate input queue, error: %s.\n", av_err2str(ret));
goto error;
}
ctx->camera_mgr = ACameraManager_create();
if (!ctx->camera_mgr) {
av_log(avctx, AV_LOG_ERROR, "Failed to create Android camera manager.\n");
ret = AVERROR_EXTERNAL;
goto error;
}
ret = open_camera(avctx);
if (ret < 0) {
av_log(avctx, AV_LOG_ERROR, "Failed to open camera.\n");
goto error;
}
get_sensor_orientation(avctx);
match_video_size(avctx);
match_framerate(avctx);
ret = create_image_reader(avctx);
if (ret < 0) {
goto error;
}
ret = create_capture_session(avctx);
if (ret < 0) {
goto error;
}
ret = add_video_stream(avctx);
error:
if (ret < 0) {
android_camera_read_close(avctx);
av_log(avctx, AV_LOG_ERROR, "Failed to open android_camera.\n");
}
return ret;
}
static int android_camera_read_packet(AVFormatContext *avctx, AVPacket *pkt)
{
AndroidCameraCtx *ctx = avctx->priv_data;
int ret;
if (!atomic_load(&ctx->exit)) {
ret = av_thread_message_queue_recv(ctx->input_queue, pkt,
avctx->flags & AVFMT_FLAG_NONBLOCK ? AV_THREAD_MESSAGE_NONBLOCK : 0);
} else {
ret = AVERROR_EOF;
}
if (ret < 0) {
return ret;
} else {
return pkt->size;
}
}
#define OFFSET(x) offsetof(AndroidCameraCtx, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "video_size", "set video size given as a string such as 640x480 or hd720", OFFSET(requested_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC },
{ "framerate", "set video frame rate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "30"}, 0, INT_MAX, DEC },
{ "camera_index", "set index of camera to use", OFFSET(camera_index), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, DEC },
{ "input_queue_size", "set maximum number of frames to buffer", OFFSET(input_queue_size), AV_OPT_TYPE_INT, {.i64 = 5}, 0, INT_MAX, DEC },
{ NULL },
};
static const AVClass android_camera_class = {
.class_name = "android_camera indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_android_camera_demuxer = {
.name = "android_camera",
.long_name = NULL_IF_CONFIG_SMALL("Android camera input device"),
.priv_data_size = sizeof(AndroidCameraCtx),
.read_header = android_camera_read_header,
.read_packet = android_camera_read_packet,
.read_close = android_camera_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &android_camera_class,
};

271
externals/ffmpeg/libavdevice/avdevice.c vendored Executable file
View File

@@ -0,0 +1,271 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/avassert.h"
#include "libavutil/samplefmt.h"
#include "libavutil/pixfmt.h"
#include "libavcodec/avcodec.h"
#include "avdevice.h"
#include "internal.h"
#include "config.h"
#include "libavutil/ffversion.h"
const char av_device_ffversion[] = "FFmpeg version " FFMPEG_VERSION;
#define E AV_OPT_FLAG_ENCODING_PARAM
#define D AV_OPT_FLAG_DECODING_PARAM
#define A AV_OPT_FLAG_AUDIO_PARAM
#define V AV_OPT_FLAG_VIDEO_PARAM
#define OFFSET(x) offsetof(AVDeviceCapabilitiesQuery, x)
const AVOption av_device_capabilities[] = {
{ "codec", "codec", OFFSET(codec), AV_OPT_TYPE_INT,
{.i64 = AV_CODEC_ID_NONE}, AV_CODEC_ID_NONE, INT_MAX, E|D|A|V },
{ "sample_format", "sample format", OFFSET(sample_format), AV_OPT_TYPE_SAMPLE_FMT,
{.i64 = AV_SAMPLE_FMT_NONE}, AV_SAMPLE_FMT_NONE, INT_MAX, E|D|A },
{ "sample_rate", "sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT,
{.i64 = -1}, -1, INT_MAX, E|D|A },
{ "channels", "channels", OFFSET(channels), AV_OPT_TYPE_INT,
{.i64 = -1}, -1, INT_MAX, E|D|A },
{ "channel_layout", "channel layout", OFFSET(channel_layout), AV_OPT_TYPE_CHANNEL_LAYOUT,
{.i64 = -1}, -1, INT_MAX, E|D|A },
{ "pixel_format", "pixel format", OFFSET(pixel_format), AV_OPT_TYPE_PIXEL_FMT,
{.i64 = AV_PIX_FMT_NONE}, AV_PIX_FMT_NONE, INT_MAX, E|D|V },
{ "window_size", "window size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE,
{.str = NULL}, -1, INT_MAX, E|D|V },
{ "frame_size", "frame size", OFFSET(frame_width), AV_OPT_TYPE_IMAGE_SIZE,
{.str = NULL}, -1, INT_MAX, E|D|V },
{ "fps", "fps", OFFSET(fps), AV_OPT_TYPE_RATIONAL,
{.dbl = -1}, -1, INT_MAX, E|D|V },
{ NULL }
};
#undef E
#undef D
#undef A
#undef V
#undef OFFSET
unsigned avdevice_version(void)
{
av_assert0(LIBAVDEVICE_VERSION_MICRO >= 100);
return LIBAVDEVICE_VERSION_INT;
}
const char * avdevice_configuration(void)
{
return FFMPEG_CONFIGURATION;
}
const char * avdevice_license(void)
{
#define LICENSE_PREFIX "libavdevice license: "
return &LICENSE_PREFIX FFMPEG_LICENSE[sizeof(LICENSE_PREFIX) - 1];
}
static void *device_next(void *prev, int output,
AVClassCategory c1, AVClassCategory c2)
{
const AVClass *pc;
AVClassCategory category = AV_CLASS_CATEGORY_NA;
do {
if (output) {
if (!(prev = av_oformat_next(prev)))
break;
pc = ((AVOutputFormat *)prev)->priv_class;
} else {
if (!(prev = av_iformat_next(prev)))
break;
pc = ((AVInputFormat *)prev)->priv_class;
}
if (!pc)
continue;
category = pc->category;
} while (category != c1 && category != c2);
return prev;
}
AVInputFormat *av_input_audio_device_next(AVInputFormat *d)
{
return device_next(d, 0, AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
AV_CLASS_CATEGORY_DEVICE_INPUT);
}
AVInputFormat *av_input_video_device_next(AVInputFormat *d)
{
return device_next(d, 0, AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
AV_CLASS_CATEGORY_DEVICE_INPUT);
}
AVOutputFormat *av_output_audio_device_next(AVOutputFormat *d)
{
return device_next(d, 1, AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
AV_CLASS_CATEGORY_DEVICE_OUTPUT);
}
AVOutputFormat *av_output_video_device_next(AVOutputFormat *d)
{
return device_next(d, 1, AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
AV_CLASS_CATEGORY_DEVICE_OUTPUT);
}
int avdevice_app_to_dev_control_message(struct AVFormatContext *s, enum AVAppToDevMessageType type,
void *data, size_t data_size)
{
if (!s->oformat || !s->oformat->control_message)
return AVERROR(ENOSYS);
return s->oformat->control_message(s, type, data, data_size);
}
int avdevice_dev_to_app_control_message(struct AVFormatContext *s, enum AVDevToAppMessageType type,
void *data, size_t data_size)
{
if (!s->control_message_cb)
return AVERROR(ENOSYS);
return s->control_message_cb(s, type, data, data_size);
}
int avdevice_capabilities_create(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s,
AVDictionary **device_options)
{
int ret;
av_assert0(s && caps);
av_assert0(s->iformat || s->oformat);
if ((s->oformat && !s->oformat->create_device_capabilities) ||
(s->iformat && !s->iformat->create_device_capabilities))
return AVERROR(ENOSYS);
*caps = av_mallocz(sizeof(**caps));
if (!(*caps))
return AVERROR(ENOMEM);
(*caps)->device_context = s;
if (((ret = av_opt_set_dict(s->priv_data, device_options)) < 0))
goto fail;
if (s->iformat) {
if ((ret = s->iformat->create_device_capabilities(s, *caps)) < 0)
goto fail;
} else {
if ((ret = s->oformat->create_device_capabilities(s, *caps)) < 0)
goto fail;
}
av_opt_set_defaults(*caps);
return 0;
fail:
av_freep(caps);
return ret;
}
void avdevice_capabilities_free(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s)
{
if (!s || !caps || !(*caps))
return;
av_assert0(s->iformat || s->oformat);
if (s->iformat) {
if (s->iformat->free_device_capabilities)
s->iformat->free_device_capabilities(s, *caps);
} else {
if (s->oformat->free_device_capabilities)
s->oformat->free_device_capabilities(s, *caps);
}
av_freep(caps);
}
int avdevice_list_devices(AVFormatContext *s, AVDeviceInfoList **device_list)
{
int ret;
av_assert0(s);
av_assert0(device_list);
av_assert0(s->oformat || s->iformat);
if ((s->oformat && !s->oformat->get_device_list) ||
(s->iformat && !s->iformat->get_device_list)) {
*device_list = NULL;
return AVERROR(ENOSYS);
}
*device_list = av_mallocz(sizeof(AVDeviceInfoList));
if (!(*device_list))
return AVERROR(ENOMEM);
/* no default device by default */
(*device_list)->default_device = -1;
if (s->oformat)
ret = s->oformat->get_device_list(s, *device_list);
else
ret = s->iformat->get_device_list(s, *device_list);
if (ret < 0)
avdevice_free_list_devices(device_list);
return ret;
}
static int list_devices_for_context(AVFormatContext *s, AVDictionary *options,
AVDeviceInfoList **device_list)
{
AVDictionary *tmp = NULL;
int ret;
av_dict_copy(&tmp, options, 0);
if ((ret = av_opt_set_dict2(s, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
goto fail;
ret = avdevice_list_devices(s, device_list);
fail:
av_dict_free(&tmp);
avformat_free_context(s);
return ret;
}
int avdevice_list_input_sources(AVInputFormat *device, const char *device_name,
AVDictionary *device_options, AVDeviceInfoList **device_list)
{
AVFormatContext *s = NULL;
int ret;
if ((ret = ff_alloc_input_device_context(&s, device, device_name)) < 0)
return ret;
return list_devices_for_context(s, device_options, device_list);
}
int avdevice_list_output_sinks(AVOutputFormat *device, const char *device_name,
AVDictionary *device_options, AVDeviceInfoList **device_list)
{
AVFormatContext *s = NULL;
int ret;
if ((ret = avformat_alloc_output_context2(&s, device, device_name, NULL)) < 0)
return ret;
return list_devices_for_context(s, device_options, device_list);
}
void avdevice_free_list_devices(AVDeviceInfoList **device_list)
{
AVDeviceInfoList *list;
AVDeviceInfo *dev;
int i;
av_assert0(device_list);
list = *device_list;
if (!list)
return;
for (i = 0; i < list->nb_devices; i++) {
dev = list->devices[i];
if (dev) {
av_freep(&dev->device_name);
av_freep(&dev->device_description);
av_free(dev);
}
}
av_freep(&list->devices);
av_freep(device_list);
}

514
externals/ffmpeg/libavdevice/avdevice.h vendored Executable file
View File

@@ -0,0 +1,514 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_AVDEVICE_H
#define AVDEVICE_AVDEVICE_H
#include "version.h"
/**
* @file
* @ingroup lavd
* Main libavdevice API header
*/
/**
* @defgroup lavd libavdevice
* Special devices muxing/demuxing library.
*
* Libavdevice is a complementary library to @ref libavf "libavformat". It
* provides various "special" platform-specific muxers and demuxers, e.g. for
* grabbing devices, audio capture and playback etc. As a consequence, the
* (de)muxers in libavdevice are of the AVFMT_NOFILE type (they use their own
* I/O functions). The filename passed to avformat_open_input() often does not
* refer to an actually existing file, but has some special device-specific
* meaning - e.g. for xcbgrab it is the display name.
*
* To use libavdevice, simply call avdevice_register_all() to register all
* compiled muxers and demuxers. They all use standard libavformat API.
*
* @{
*/
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/dict.h"
#include "libavformat/avformat.h"
/**
* Return the LIBAVDEVICE_VERSION_INT constant.
*/
unsigned avdevice_version(void);
/**
* Return the libavdevice build-time configuration.
*/
const char *avdevice_configuration(void);
/**
* Return the libavdevice license.
*/
const char *avdevice_license(void);
/**
* Initialize libavdevice and register all the input and output devices.
*/
void avdevice_register_all(void);
/**
* Audio input devices iterator.
*
* If d is NULL, returns the first registered input audio/video device,
* if d is non-NULL, returns the next registered input audio/video device after d
* or NULL if d is the last one.
*/
AVInputFormat *av_input_audio_device_next(AVInputFormat *d);
/**
* Video input devices iterator.
*
* If d is NULL, returns the first registered input audio/video device,
* if d is non-NULL, returns the next registered input audio/video device after d
* or NULL if d is the last one.
*/
AVInputFormat *av_input_video_device_next(AVInputFormat *d);
/**
* Audio output devices iterator.
*
* If d is NULL, returns the first registered output audio/video device,
* if d is non-NULL, returns the next registered output audio/video device after d
* or NULL if d is the last one.
*/
AVOutputFormat *av_output_audio_device_next(AVOutputFormat *d);
/**
* Video output devices iterator.
*
* If d is NULL, returns the first registered output audio/video device,
* if d is non-NULL, returns the next registered output audio/video device after d
* or NULL if d is the last one.
*/
AVOutputFormat *av_output_video_device_next(AVOutputFormat *d);
typedef struct AVDeviceRect {
int x; /**< x coordinate of top left corner */
int y; /**< y coordinate of top left corner */
int width; /**< width */
int height; /**< height */
} AVDeviceRect;
/**
* Message types used by avdevice_app_to_dev_control_message().
*/
enum AVAppToDevMessageType {
/**
* Dummy message.
*/
AV_APP_TO_DEV_NONE = MKBETAG('N','O','N','E'),
/**
* Window size change message.
*
* Message is sent to the device every time the application changes the size
* of the window device renders to.
* Message should also be sent right after window is created.
*
* data: AVDeviceRect: new window size.
*/
AV_APP_TO_DEV_WINDOW_SIZE = MKBETAG('G','E','O','M'),
/**
* Repaint request message.
*
* Message is sent to the device when window has to be repainted.
*
* data: AVDeviceRect: area required to be repainted.
* NULL: whole area is required to be repainted.
*/
AV_APP_TO_DEV_WINDOW_REPAINT = MKBETAG('R','E','P','A'),
/**
* Request pause/play.
*
* Application requests pause/unpause playback.
* Mostly usable with devices that have internal buffer.
* By default devices are not paused.
*
* data: NULL
*/
AV_APP_TO_DEV_PAUSE = MKBETAG('P', 'A', 'U', ' '),
AV_APP_TO_DEV_PLAY = MKBETAG('P', 'L', 'A', 'Y'),
AV_APP_TO_DEV_TOGGLE_PAUSE = MKBETAG('P', 'A', 'U', 'T'),
/**
* Volume control message.
*
* Set volume level. It may be device-dependent if volume
* is changed per stream or system wide. Per stream volume
* change is expected when possible.
*
* data: double: new volume with range of 0.0 - 1.0.
*/
AV_APP_TO_DEV_SET_VOLUME = MKBETAG('S', 'V', 'O', 'L'),
/**
* Mute control messages.
*
* Change mute state. It may be device-dependent if mute status
* is changed per stream or system wide. Per stream mute status
* change is expected when possible.
*
* data: NULL.
*/
AV_APP_TO_DEV_MUTE = MKBETAG(' ', 'M', 'U', 'T'),
AV_APP_TO_DEV_UNMUTE = MKBETAG('U', 'M', 'U', 'T'),
AV_APP_TO_DEV_TOGGLE_MUTE = MKBETAG('T', 'M', 'U', 'T'),
/**
* Get volume/mute messages.
*
* Force the device to send AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED or
* AV_DEV_TO_APP_MUTE_STATE_CHANGED command respectively.
*
* data: NULL.
*/
AV_APP_TO_DEV_GET_VOLUME = MKBETAG('G', 'V', 'O', 'L'),
AV_APP_TO_DEV_GET_MUTE = MKBETAG('G', 'M', 'U', 'T'),
};
/**
* Message types used by avdevice_dev_to_app_control_message().
*/
enum AVDevToAppMessageType {
/**
* Dummy message.
*/
AV_DEV_TO_APP_NONE = MKBETAG('N','O','N','E'),
/**
* Create window buffer message.
*
* Device requests to create a window buffer. Exact meaning is device-
* and application-dependent. Message is sent before rendering first
* frame and all one-shot initializations should be done here.
* Application is allowed to ignore preferred window buffer size.
*
* @note: Application is obligated to inform about window buffer size
* with AV_APP_TO_DEV_WINDOW_SIZE message.
*
* data: AVDeviceRect: preferred size of the window buffer.
* NULL: no preferred size of the window buffer.
*/
AV_DEV_TO_APP_CREATE_WINDOW_BUFFER = MKBETAG('B','C','R','E'),
/**
* Prepare window buffer message.
*
* Device requests to prepare a window buffer for rendering.
* Exact meaning is device- and application-dependent.
* Message is sent before rendering of each frame.
*
* data: NULL.
*/
AV_DEV_TO_APP_PREPARE_WINDOW_BUFFER = MKBETAG('B','P','R','E'),
/**
* Display window buffer message.
*
* Device requests to display a window buffer.
* Message is sent when new frame is ready to be displayed.
* Usually buffers need to be swapped in handler of this message.
*
* data: NULL.
*/
AV_DEV_TO_APP_DISPLAY_WINDOW_BUFFER = MKBETAG('B','D','I','S'),
/**
* Destroy window buffer message.
*
* Device requests to destroy a window buffer.
* Message is sent when device is about to be destroyed and window
* buffer is not required anymore.
*
* data: NULL.
*/
AV_DEV_TO_APP_DESTROY_WINDOW_BUFFER = MKBETAG('B','D','E','S'),
/**
* Buffer fullness status messages.
*
* Device signals buffer overflow/underflow.
*
* data: NULL.
*/
AV_DEV_TO_APP_BUFFER_OVERFLOW = MKBETAG('B','O','F','L'),
AV_DEV_TO_APP_BUFFER_UNDERFLOW = MKBETAG('B','U','F','L'),
/**
* Buffer readable/writable.
*
* Device informs that buffer is readable/writable.
* When possible, device informs how many bytes can be read/write.
*
* @warning Device may not inform when number of bytes than can be read/write changes.
*
* data: int64_t: amount of bytes available to read/write.
* NULL: amount of bytes available to read/write is not known.
*/
AV_DEV_TO_APP_BUFFER_READABLE = MKBETAG('B','R','D',' '),
AV_DEV_TO_APP_BUFFER_WRITABLE = MKBETAG('B','W','R',' '),
/**
* Mute state change message.
*
* Device informs that mute state has changed.
*
* data: int: 0 for not muted state, non-zero for muted state.
*/
AV_DEV_TO_APP_MUTE_STATE_CHANGED = MKBETAG('C','M','U','T'),
/**
* Volume level change message.
*
* Device informs that volume level has changed.
*
* data: double: new volume with range of 0.0 - 1.0.
*/
AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED = MKBETAG('C','V','O','L'),
};
/**
* Send control message from application to device.
*
* @param s device context.
* @param type message type.
* @param data message data. Exact type depends on message type.
* @param data_size size of message data.
* @return >= 0 on success, negative on error.
* AVERROR(ENOSYS) when device doesn't implement handler of the message.
*/
int avdevice_app_to_dev_control_message(struct AVFormatContext *s,
enum AVAppToDevMessageType type,
void *data, size_t data_size);
/**
* Send control message from device to application.
*
* @param s device context.
* @param type message type.
* @param data message data. Can be NULL.
* @param data_size size of message data.
* @return >= 0 on success, negative on error.
* AVERROR(ENOSYS) when application doesn't implement handler of the message.
*/
int avdevice_dev_to_app_control_message(struct AVFormatContext *s,
enum AVDevToAppMessageType type,
void *data, size_t data_size);
/**
* Following API allows user to probe device capabilities (supported codecs,
* pixel formats, sample formats, resolutions, channel counts, etc).
* It is build on top op AVOption API.
* Queried capabilities make it possible to set up converters of video or audio
* parameters that fit to the device.
*
* List of capabilities that can be queried:
* - Capabilities valid for both audio and video devices:
* - codec: supported audio/video codecs.
* type: AV_OPT_TYPE_INT (AVCodecID value)
* - Capabilities valid for audio devices:
* - sample_format: supported sample formats.
* type: AV_OPT_TYPE_INT (AVSampleFormat value)
* - sample_rate: supported sample rates.
* type: AV_OPT_TYPE_INT
* - channels: supported number of channels.
* type: AV_OPT_TYPE_INT
* - channel_layout: supported channel layouts.
* type: AV_OPT_TYPE_INT64
* - Capabilities valid for video devices:
* - pixel_format: supported pixel formats.
* type: AV_OPT_TYPE_INT (AVPixelFormat value)
* - window_size: supported window sizes (describes size of the window size presented to the user).
* type: AV_OPT_TYPE_IMAGE_SIZE
* - frame_size: supported frame sizes (describes size of provided video frames).
* type: AV_OPT_TYPE_IMAGE_SIZE
* - fps: supported fps values
* type: AV_OPT_TYPE_RATIONAL
*
* Value of the capability may be set by user using av_opt_set() function
* and AVDeviceCapabilitiesQuery object. Following queries will
* limit results to the values matching already set capabilities.
* For example, setting a codec may impact number of formats or fps values
* returned during next query. Setting invalid value may limit results to zero.
*
* Example of the usage basing on opengl output device:
*
* @code
* AVFormatContext *oc = NULL;
* AVDeviceCapabilitiesQuery *caps = NULL;
* AVOptionRanges *ranges;
* int ret;
*
* if ((ret = avformat_alloc_output_context2(&oc, NULL, "opengl", NULL)) < 0)
* goto fail;
* if (avdevice_capabilities_create(&caps, oc, NULL) < 0)
* goto fail;
*
* //query codecs
* if (av_opt_query_ranges(&ranges, caps, "codec", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
* goto fail;
* //pick codec here and set it
* av_opt_set(caps, "codec", AV_CODEC_ID_RAWVIDEO, 0);
*
* //query format
* if (av_opt_query_ranges(&ranges, caps, "pixel_format", AV_OPT_MULTI_COMPONENT_RANGE)) < 0)
* goto fail;
* //pick format here and set it
* av_opt_set(caps, "pixel_format", AV_PIX_FMT_YUV420P, 0);
*
* //query and set more capabilities
*
* fail:
* //clean up code
* avdevice_capabilities_free(&query, oc);
* avformat_free_context(oc);
* @endcode
*/
/**
* Structure describes device capabilities.
*
* It is used by devices in conjunction with av_device_capabilities AVOption table
* to implement capabilities probing API based on AVOption API. Should not be used directly.
*/
typedef struct AVDeviceCapabilitiesQuery {
const AVClass *av_class;
AVFormatContext *device_context;
enum AVCodecID codec;
enum AVSampleFormat sample_format;
enum AVPixelFormat pixel_format;
int sample_rate;
int channels;
int64_t channel_layout;
int window_width;
int window_height;
int frame_width;
int frame_height;
AVRational fps;
} AVDeviceCapabilitiesQuery;
/**
* AVOption table used by devices to implement device capabilities API. Should not be used by a user.
*/
extern const AVOption av_device_capabilities[];
/**
* Initialize capabilities probing API based on AVOption API.
*
* avdevice_capabilities_free() must be called when query capabilities API is
* not used anymore.
*
* @param[out] caps Device capabilities data. Pointer to a NULL pointer must be passed.
* @param s Context of the device.
* @param device_options An AVDictionary filled with device-private options.
* On return this parameter will be destroyed and replaced with a dict
* containing options that were not found. May be NULL.
* The same options must be passed later to avformat_write_header() for output
* devices or avformat_open_input() for input devices, or at any other place
* that affects device-private options.
*
* @return >= 0 on success, negative otherwise.
*/
int avdevice_capabilities_create(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s,
AVDictionary **device_options);
/**
* Free resources created by avdevice_capabilities_create()
*
* @param caps Device capabilities data to be freed.
* @param s Context of the device.
*/
void avdevice_capabilities_free(AVDeviceCapabilitiesQuery **caps, AVFormatContext *s);
/**
* Structure describes basic parameters of the device.
*/
typedef struct AVDeviceInfo {
char *device_name; /**< device name, format depends on device */
char *device_description; /**< human friendly name */
} AVDeviceInfo;
/**
* List of devices.
*/
typedef struct AVDeviceInfoList {
AVDeviceInfo **devices; /**< list of autodetected devices */
int nb_devices; /**< number of autodetected devices */
int default_device; /**< index of default device or -1 if no default */
} AVDeviceInfoList;
/**
* List devices.
*
* Returns available device names and their parameters.
*
* @note: Some devices may accept system-dependent device names that cannot be
* autodetected. The list returned by this function cannot be assumed to
* be always completed.
*
* @param s device context.
* @param[out] device_list list of autodetected devices.
* @return count of autodetected devices, negative on error.
*/
int avdevice_list_devices(struct AVFormatContext *s, AVDeviceInfoList **device_list);
/**
* Convenient function to free result of avdevice_list_devices().
*
* @param devices device list to be freed.
*/
void avdevice_free_list_devices(AVDeviceInfoList **device_list);
/**
* List devices.
*
* Returns available device names and their parameters.
* These are convinient wrappers for avdevice_list_devices().
* Device context is allocated and deallocated internally.
*
* @param device device format. May be NULL if device name is set.
* @param device_name device name. May be NULL if device format is set.
* @param device_options An AVDictionary filled with device-private options. May be NULL.
* The same options must be passed later to avformat_write_header() for output
* devices or avformat_open_input() for input devices, or at any other place
* that affects device-private options.
* @param[out] device_list list of autodetected devices
* @return count of autodetected devices, negative on error.
* @note device argument takes precedence over device_name when both are set.
*/
int avdevice_list_input_sources(struct AVInputFormat *device, const char *device_name,
AVDictionary *device_options, AVDeviceInfoList **device_list);
int avdevice_list_output_sinks(struct AVOutputFormat *device, const char *device_name,
AVDictionary *device_options, AVDeviceInfoList **device_list);
/**
* @}
*/
#endif /* AVDEVICE_AVDEVICE_H */

55
externals/ffmpeg/libavdevice/avdeviceres.rc vendored Executable file
View File

@@ -0,0 +1,55 @@
/*
* Windows resource file for libavdevice
*
* Copyright (C) 2012 James Almer
* Copyright (C) 2013 Tiancheng "Timothy" Gu
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <windows.h>
#include "libavdevice/version.h"
#include "libavutil/ffversion.h"
#include "config.h"
1 VERSIONINFO
FILEVERSION LIBAVDEVICE_VERSION_MAJOR, LIBAVDEVICE_VERSION_MINOR, LIBAVDEVICE_VERSION_MICRO, 0
PRODUCTVERSION LIBAVDEVICE_VERSION_MAJOR, LIBAVDEVICE_VERSION_MINOR, LIBAVDEVICE_VERSION_MICRO, 0
FILEFLAGSMASK VS_FFI_FILEFLAGSMASK
FILEOS VOS_NT_WINDOWS32
FILETYPE VFT_DLL
{
BLOCK "StringFileInfo"
{
BLOCK "040904B0"
{
VALUE "CompanyName", "FFmpeg Project"
VALUE "FileDescription", "FFmpeg device handling library"
VALUE "FileVersion", AV_STRINGIFY(LIBAVDEVICE_VERSION)
VALUE "InternalName", "libavdevice"
VALUE "LegalCopyright", "Copyright (C) 2000-" AV_STRINGIFY(CONFIG_THIS_YEAR) " FFmpeg Project"
VALUE "OriginalFilename", "avdevice" BUILDSUF "-" AV_STRINGIFY(LIBAVDEVICE_VERSION_MAJOR) SLIBSUF
VALUE "ProductName", "FFmpeg"
VALUE "ProductVersion", FFMPEG_VERSION
}
}
BLOCK "VarFileInfo"
{
VALUE "Translation", 0x0409, 0x04B0
}
}

1226
externals/ffmpeg/libavdevice/avfoundation.m vendored Executable file

File diff suppressed because it is too large Load Diff

360
externals/ffmpeg/libavdevice/bktr.c vendored Executable file
View File

@@ -0,0 +1,360 @@
/*
* *BSD video grab interface
* Copyright (c) 2002 Steve O'Hara-Smith
* based on
* Linux video grab interface
* Copyright (c) 2000, 2001 Fabrice Bellard
* and
* simple_grab.c Copyright (c) 1999 Roger Hardiman
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavformat/internal.h"
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/time.h"
#if HAVE_DEV_BKTR_IOCTL_METEOR_H && HAVE_DEV_BKTR_IOCTL_BT848_H
# include <dev/bktr/ioctl_meteor.h>
# include <dev/bktr/ioctl_bt848.h>
#elif HAVE_MACHINE_IOCTL_METEOR_H && HAVE_MACHINE_IOCTL_BT848_H
# include <machine/ioctl_meteor.h>
# include <machine/ioctl_bt848.h>
#elif HAVE_DEV_VIDEO_METEOR_IOCTL_METEOR_H && HAVE_DEV_VIDEO_BKTR_IOCTL_BT848_H
# include <dev/video/meteor/ioctl_meteor.h>
# include <dev/video/bktr/ioctl_bt848.h>
#elif HAVE_DEV_IC_BT8XX_H
# include <dev/ic/bt8xx.h>
#endif
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/time.h>
#include <signal.h>
#include <stdint.h>
#include "avdevice.h"
typedef struct VideoData {
AVClass *class;
int video_fd;
int tuner_fd;
int width, height;
uint64_t per_frame;
int standard;
char *framerate; /**< Set by a private option. */
} VideoData;
#define PAL 1
#define PALBDGHI 1
#define NTSC 2
#define NTSCM 2
#define SECAM 3
#define PALN 4
#define PALM 5
#define NTSCJ 6
/* PAL is 768 x 576. NTSC is 640 x 480 */
#define PAL_HEIGHT 576
#define SECAM_HEIGHT 576
#define NTSC_HEIGHT 480
#ifndef VIDEO_FORMAT
#define VIDEO_FORMAT NTSC
#endif
static const int bktr_dev[] = { METEOR_DEV0, METEOR_DEV1, METEOR_DEV2,
METEOR_DEV3, METEOR_DEV_SVIDEO };
uint8_t *video_buf;
size_t video_buf_size;
uint64_t last_frame_time;
volatile sig_atomic_t nsignals;
static void catchsignal(int signal)
{
nsignals++;
return;
}
static av_cold int bktr_init(const char *video_device, int width, int height,
int format, int *video_fd, int *tuner_fd, int idev, double frequency)
{
struct meteor_geomet geo;
int h_max;
long ioctl_frequency;
char *arg;
int c;
struct sigaction act, old;
int ret;
char errbuf[128];
if (idev < 0 || idev > 4)
{
arg = getenv ("BKTR_DEV");
if (arg)
idev = atoi (arg);
if (idev < 0 || idev > 4)
idev = 1;
}
if (format < 1 || format > 6)
{
arg = getenv ("BKTR_FORMAT");
if (arg)
format = atoi (arg);
if (format < 1 || format > 6)
format = VIDEO_FORMAT;
}
if (frequency <= 0)
{
arg = getenv ("BKTR_FREQUENCY");
if (arg)
frequency = atof (arg);
if (frequency <= 0)
frequency = 0.0;
}
memset(&act, 0, sizeof(act));
sigemptyset(&act.sa_mask);
act.sa_handler = catchsignal;
sigaction(SIGUSR1, &act, &old);
*tuner_fd = avpriv_open("/dev/tuner0", O_RDONLY);
if (*tuner_fd < 0)
av_log(NULL, AV_LOG_ERROR, "Warning. Tuner not opened, continuing: %s\n", strerror(errno));
*video_fd = avpriv_open(video_device, O_RDONLY);
if (*video_fd < 0) {
ret = AVERROR(errno);
av_strerror(ret, errbuf, sizeof(errbuf));
av_log(NULL, AV_LOG_ERROR, "%s: %s\n", video_device, errbuf);
return ret;
}
geo.rows = height;
geo.columns = width;
geo.frames = 1;
geo.oformat = METEOR_GEO_YUV_422 | METEOR_GEO_YUV_12;
switch (format) {
case PAL: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALBDGHI; break;
case PALN: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALN; break;
case PALM: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALM; break;
case SECAM: h_max = SECAM_HEIGHT; c = BT848_IFORM_F_SECAM; break;
case NTSC: h_max = NTSC_HEIGHT; c = BT848_IFORM_F_NTSCM; break;
case NTSCJ: h_max = NTSC_HEIGHT; c = BT848_IFORM_F_NTSCJ; break;
default: h_max = PAL_HEIGHT; c = BT848_IFORM_F_PALBDGHI; break;
}
if (height <= h_max / 2)
geo.oformat |= METEOR_GEO_EVEN_ONLY;
if (ioctl(*video_fd, METEORSETGEO, &geo) < 0) {
ret = AVERROR(errno);
av_strerror(ret, errbuf, sizeof(errbuf));
av_log(NULL, AV_LOG_ERROR, "METEORSETGEO: %s\n", errbuf);
return ret;
}
if (ioctl(*video_fd, BT848SFMT, &c) < 0) {
ret = AVERROR(errno);
av_strerror(ret, errbuf, sizeof(errbuf));
av_log(NULL, AV_LOG_ERROR, "BT848SFMT: %s\n", errbuf);
return ret;
}
c = bktr_dev[idev];
if (ioctl(*video_fd, METEORSINPUT, &c) < 0) {
ret = AVERROR(errno);
av_strerror(ret, errbuf, sizeof(errbuf));
av_log(NULL, AV_LOG_ERROR, "METEORSINPUT: %s\n", errbuf);
return ret;
}
video_buf_size = width * height * 12 / 8;
video_buf = (uint8_t *)mmap((caddr_t)0, video_buf_size,
PROT_READ, MAP_SHARED, *video_fd, (off_t)0);
if (video_buf == MAP_FAILED) {
ret = AVERROR(errno);
av_strerror(ret, errbuf, sizeof(errbuf));
av_log(NULL, AV_LOG_ERROR, "mmap: %s\n", errbuf);
return ret;
}
if (frequency != 0.0) {
ioctl_frequency = (unsigned long)(frequency*16);
if (ioctl(*tuner_fd, TVTUNER_SETFREQ, &ioctl_frequency) < 0)
av_log(NULL, AV_LOG_ERROR, "TVTUNER_SETFREQ: %s\n", strerror(errno));
}
c = AUDIO_UNMUTE;
if (ioctl(*tuner_fd, BT848_SAUDIO, &c) < 0)
av_log(NULL, AV_LOG_ERROR, "TVTUNER_SAUDIO: %s\n", strerror(errno));
c = METEOR_CAP_CONTINOUS;
ioctl(*video_fd, METEORCAPTUR, &c);
c = SIGUSR1;
ioctl(*video_fd, METEORSSIGNAL, &c);
return 0;
}
static void bktr_getframe(uint64_t per_frame)
{
uint64_t curtime;
curtime = av_gettime();
if (!last_frame_time
|| ((last_frame_time + per_frame) > curtime)) {
if (!usleep(last_frame_time + per_frame + per_frame / 8 - curtime)) {
if (!nsignals)
av_log(NULL, AV_LOG_INFO,
"SLEPT NO signals - %d microseconds late\n",
(int)(av_gettime() - last_frame_time - per_frame));
}
}
nsignals = 0;
last_frame_time = curtime;
}
/* note: we support only one picture read at a time */
static int grab_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
VideoData *s = s1->priv_data;
if (av_new_packet(pkt, video_buf_size) < 0)
return AVERROR(EIO);
bktr_getframe(s->per_frame);
pkt->pts = av_gettime();
memcpy(pkt->data, video_buf, video_buf_size);
return video_buf_size;
}
static int grab_read_header(AVFormatContext *s1)
{
VideoData *s = s1->priv_data;
AVStream *st;
AVRational framerate;
int ret = 0;
if (!s->framerate)
switch (s->standard) {
case PAL: s->framerate = av_strdup("pal"); break;
case NTSC: s->framerate = av_strdup("ntsc"); break;
case SECAM: s->framerate = av_strdup("25"); break;
default:
av_log(s1, AV_LOG_ERROR, "Unknown standard.\n");
ret = AVERROR(EINVAL);
goto out;
}
if ((ret = av_parse_video_rate(&framerate, s->framerate)) < 0) {
av_log(s1, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", s->framerate);
goto out;
}
st = avformat_new_stream(s1, NULL);
if (!st) {
ret = AVERROR(ENOMEM);
goto out;
}
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in use */
s->per_frame = ((uint64_t)1000000 * framerate.den) / framerate.num;
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->format = AV_PIX_FMT_YUV420P;
st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codecpar->width = s->width;
st->codecpar->height = s->height;
st->avg_frame_rate = framerate;
if (bktr_init(s1->url, s->width, s->height, s->standard,
&s->video_fd, &s->tuner_fd, -1, 0.0) < 0) {
ret = AVERROR(EIO);
goto out;
}
nsignals = 0;
last_frame_time = 0;
out:
return ret;
}
static int grab_read_close(AVFormatContext *s1)
{
VideoData *s = s1->priv_data;
int c;
c = METEOR_CAP_STOP_CONT;
ioctl(s->video_fd, METEORCAPTUR, &c);
close(s->video_fd);
c = AUDIO_MUTE;
ioctl(s->tuner_fd, BT848_SAUDIO, &c);
close(s->tuner_fd);
munmap((caddr_t)video_buf, video_buf_size);
return 0;
}
#define OFFSET(x) offsetof(VideoData, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "standard", "", offsetof(VideoData, standard), AV_OPT_TYPE_INT, {.i64 = VIDEO_FORMAT}, PAL, NTSCJ, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PAL", "", 0, AV_OPT_TYPE_CONST, {.i64 = PAL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSC", "", 0, AV_OPT_TYPE_CONST, {.i64 = NTSC}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "SECAM", "", 0, AV_OPT_TYPE_CONST, {.i64 = SECAM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PALN", "", 0, AV_OPT_TYPE_CONST, {.i64 = PALN}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "PALM", "", 0, AV_OPT_TYPE_CONST, {.i64 = PALM}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "NTSCJ", "", 0, AV_OPT_TYPE_CONST, {.i64 = NTSCJ}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "standard" },
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = "vga"}, 0, 0, DEC },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ NULL },
};
static const AVClass bktr_class = {
.class_name = "BKTR grab indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_bktr_demuxer = {
.name = "bktr",
.long_name = NULL_IF_CONFIG_SMALL("video grab"),
.priv_data_size = sizeof(VideoData),
.read_header = grab_read_header,
.read_packet = grab_read_packet,
.read_close = grab_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &bktr_class,
};

241
externals/ffmpeg/libavdevice/caca.c vendored Executable file
View File

@@ -0,0 +1,241 @@
/*
* Copyright (c) 2012 Paul B Mahol
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <caca.h>
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "avdevice.h"
typedef struct CACAContext {
AVClass *class;
AVFormatContext *ctx;
char *window_title;
int window_width, window_height;
caca_canvas_t *canvas;
caca_display_t *display;
caca_dither_t *dither;
char *algorithm, *antialias;
char *charset, *color;
char *driver;
char *list_dither;
int list_drivers;
} CACAContext;
static int caca_write_trailer(AVFormatContext *s)
{
CACAContext *c = s->priv_data;
av_freep(&c->window_title);
if (c->display) {
caca_free_display(c->display);
c->display = NULL;
}
if (c->dither) {
caca_free_dither(c->dither);
c->dither = NULL;
}
if (c->canvas) {
caca_free_canvas(c->canvas);
c->canvas = NULL;
}
return 0;
}
static void list_drivers(CACAContext *c)
{
const char *const *drivers = caca_get_display_driver_list();
int i;
av_log(c->ctx, AV_LOG_INFO, "Available drivers:\n");
for (i = 0; drivers[i]; i += 2)
av_log(c->ctx, AV_LOG_INFO, "%s: %s\n", drivers[i], drivers[i + 1]);
}
#define DEFINE_LIST_DITHER(thing, thing_str) \
static void list_dither_## thing(CACAContext *c) \
{ \
const char *const *thing = caca_get_dither_## thing ##_list(c->dither); \
int i; \
\
av_log(c->ctx, AV_LOG_INFO, "Available %s:\n", thing_str); \
for (i = 0; thing[i]; i += 2) \
av_log(c->ctx, AV_LOG_INFO, "%s: %s\n", thing[i], thing[i + 1]); \
}
DEFINE_LIST_DITHER(color, "colors");
DEFINE_LIST_DITHER(charset, "charsets");
DEFINE_LIST_DITHER(algorithm, "algorithms");
DEFINE_LIST_DITHER(antialias, "antialias");
static int caca_write_header(AVFormatContext *s)
{
CACAContext *c = s->priv_data;
AVStream *st = s->streams[0];
AVCodecParameters *encctx = st->codecpar;
int ret, bpp;
c->ctx = s;
if (c->list_drivers) {
list_drivers(c);
return AVERROR_EXIT;
}
if (c->list_dither) {
if (!strcmp(c->list_dither, "colors")) {
list_dither_color(c);
} else if (!strcmp(c->list_dither, "charsets")) {
list_dither_charset(c);
} else if (!strcmp(c->list_dither, "algorithms")) {
list_dither_algorithm(c);
} else if (!strcmp(c->list_dither, "antialiases")) {
list_dither_antialias(c);
} else {
av_log(s, AV_LOG_ERROR,
"Invalid argument '%s', for 'list_dither' option\n"
"Argument must be one of 'algorithms, 'antialiases', 'charsets', 'colors'\n",
c->list_dither);
return AVERROR(EINVAL);
}
return AVERROR_EXIT;
}
if ( s->nb_streams > 1
|| encctx->codec_type != AVMEDIA_TYPE_VIDEO
|| encctx->codec_id != AV_CODEC_ID_RAWVIDEO) {
av_log(s, AV_LOG_ERROR, "Only supports one rawvideo stream\n");
return AVERROR(EINVAL);
}
if (encctx->format != AV_PIX_FMT_RGB24) {
av_log(s, AV_LOG_ERROR,
"Unsupported pixel format '%s', choose rgb24\n",
av_get_pix_fmt_name(encctx->format));
return AVERROR(EINVAL);
}
c->canvas = caca_create_canvas(c->window_width, c->window_height);
if (!c->canvas) {
ret = AVERROR(errno);
av_log(s, AV_LOG_ERROR, "Failed to create canvas\n");
goto fail;
}
bpp = av_get_bits_per_pixel(av_pix_fmt_desc_get(encctx->format));
c->dither = caca_create_dither(bpp, encctx->width, encctx->height,
bpp / 8 * encctx->width,
0x0000ff, 0x00ff00, 0xff0000, 0);
if (!c->dither) {
ret = AVERROR(errno);
av_log(s, AV_LOG_ERROR, "Failed to create dither\n");
goto fail;
}
#define CHECK_DITHER_OPT(opt) do { \
if (caca_set_dither_##opt(c->dither, c->opt) < 0) { \
ret = AVERROR(errno); \
av_log(s, AV_LOG_ERROR, "Failed to set value '%s' for option '%s'\n", \
c->opt, #opt); \
goto fail; \
} \
} while (0)
CHECK_DITHER_OPT(algorithm);
CHECK_DITHER_OPT(antialias);
CHECK_DITHER_OPT(charset);
CHECK_DITHER_OPT(color);
c->display = caca_create_display_with_driver(c->canvas, c->driver);
if (!c->display) {
ret = AVERROR(errno);
av_log(s, AV_LOG_ERROR, "Failed to create display\n");
list_drivers(c);
goto fail;
}
if (!c->window_width || !c->window_height) {
c->window_width = caca_get_canvas_width(c->canvas);
c->window_height = caca_get_canvas_height(c->canvas);
}
if (!c->window_title)
c->window_title = av_strdup(s->url);
caca_set_display_title(c->display, c->window_title);
caca_set_display_time(c->display, av_rescale_q(1, st->codec->time_base, AV_TIME_BASE_Q));
return 0;
fail:
caca_write_trailer(s);
return ret;
}
static int caca_write_packet(AVFormatContext *s, AVPacket *pkt)
{
CACAContext *c = s->priv_data;
caca_dither_bitmap(c->canvas, 0, 0, c->window_width, c->window_height, c->dither, pkt->data);
caca_refresh_display(c->display);
return 0;
}
#define OFFSET(x) offsetof(CACAContext,x)
#define ENC AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "window_size", "set window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL }, 0, 0, ENC},
{ "window_title", "set window title", OFFSET(window_title), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, ENC },
{ "driver", "set display driver", OFFSET(driver), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, ENC },
{ "algorithm", "set dithering algorithm", OFFSET(algorithm), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
{ "antialias", "set antialias method", OFFSET(antialias), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
{ "charset", "set charset used to render output", OFFSET(charset), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
{ "color", "set color used to render output", OFFSET(color), AV_OPT_TYPE_STRING, {.str = "default" }, 0, 0, ENC },
{ "list_drivers", "list available drivers", OFFSET(list_drivers), AV_OPT_TYPE_BOOL, {.i64=0}, 0, 1, ENC },
{ "list_dither", "list available dither options", OFFSET(list_dither), AV_OPT_TYPE_STRING, {.str=NULL}, 0, 1, ENC, "list_dither" },
{ "algorithms", NULL, 0, AV_OPT_TYPE_CONST, {.str = "algorithms"}, 0, 0, ENC, "list_dither" },
{ "antialiases", NULL, 0, AV_OPT_TYPE_CONST, {.str = "antialiases"},0, 0, ENC, "list_dither" },
{ "charsets", NULL, 0, AV_OPT_TYPE_CONST, {.str = "charsets"}, 0, 0, ENC, "list_dither" },
{ "colors", NULL, 0, AV_OPT_TYPE_CONST, {.str = "colors"}, 0, 0, ENC, "list_dither" },
{ NULL },
};
static const AVClass caca_class = {
.class_name = "caca outdev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
};
AVOutputFormat ff_caca_muxer = {
.name = "caca",
.long_name = NULL_IF_CONFIG_SMALL("caca (color ASCII art) output device"),
.priv_data_size = sizeof(CACAContext),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = caca_write_header,
.write_packet = caca_write_packet,
.write_trailer = caca_write_trailer,
.flags = AVFMT_NOFILE,
.priv_class = &caca_class,
};

View File

@@ -0,0 +1,538 @@
/*
* Blackmagic DeckLink output
* Copyright (c) 2013-2014 Ramiro Polla, Luca Barbato, Deti Fliegl
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/* Include internal.h first to avoid conflict between winsock.h (used by
* DeckLink headers) and winsock2.h (used by libavformat) in MSVC++ builds */
extern "C" {
#include "libavformat/internal.h"
}
#include <DeckLinkAPI.h>
#ifdef _WIN32
#include <DeckLinkAPI_i.c>
#else
/* The file provided by the SDK is known to be missing prototypes, which doesn't
cause issues with GCC since the warning doesn't apply to C++ files. However
Clang does complain (and warnings are treated as errors), so suppress the
warning just for this one file */
#ifdef __clang__
#pragma clang diagnostic push
#pragma clang diagnostic ignored "-Wmissing-prototypes"
#endif
#include <DeckLinkAPIDispatch.cpp>
#ifdef __clang__
#pragma clang diagnostic pop
#endif
#endif
extern "C" {
#include "libavformat/avformat.h"
#include "libavutil/imgutils.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/bswap.h"
#include "avdevice.h"
}
#include "decklink_common.h"
static IDeckLinkIterator *decklink_create_iterator(AVFormatContext *avctx)
{
IDeckLinkIterator *iter;
#ifdef _WIN32
if (CoInitialize(NULL) < 0) {
av_log(avctx, AV_LOG_ERROR, "COM initialization failed.\n");
return NULL;
}
if (CoCreateInstance(CLSID_CDeckLinkIterator, NULL, CLSCTX_ALL,
IID_IDeckLinkIterator, (void**) &iter) != S_OK) {
iter = NULL;
}
#else
iter = CreateDeckLinkIteratorInstance();
#endif
if (!iter)
av_log(avctx, AV_LOG_ERROR, "Could not create DeckLink iterator. "
"Make sure you have DeckLink drivers " BLACKMAGIC_DECKLINK_API_VERSION_STRING " or newer installed.\n");
return iter;
}
static int decklink_get_attr_string(IDeckLink *dl, BMDDeckLinkAttributeID cfg_id, const char **s)
{
DECKLINK_STR tmp;
HRESULT hr;
IDeckLinkProfileAttributes *attr;
*s = NULL;
if (dl->QueryInterface(IID_IDeckLinkProfileAttributes, (void **)&attr) != S_OK)
return AVERROR_EXTERNAL;
hr = attr->GetString(cfg_id, &tmp);
attr->Release();
if (hr == S_OK) {
*s = DECKLINK_STRDUP(tmp);
DECKLINK_FREE(tmp);
if (!*s)
return AVERROR(ENOMEM);
} else if (hr == E_FAIL) {
return AVERROR_EXTERNAL;
}
return 0;
}
static int decklink_select_input(AVFormatContext *avctx, BMDDeckLinkConfigurationID cfg_id)
{
struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
BMDDeckLinkAttributeID attr_id = (cfg_id == bmdDeckLinkConfigAudioInputConnection) ? BMDDeckLinkAudioInputConnections : BMDDeckLinkVideoInputConnections;
int64_t bmd_input = (cfg_id == bmdDeckLinkConfigAudioInputConnection) ? (int64_t)ctx->audio_input : (int64_t)ctx->video_input;
const char *type_name = (cfg_id == bmdDeckLinkConfigAudioInputConnection) ? "audio" : "video";
int64_t supported_connections = 0;
HRESULT res;
if (bmd_input) {
res = ctx->attr->GetInt(attr_id, &supported_connections);
if (res != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Failed to query supported %s inputs.\n", type_name);
return AVERROR_EXTERNAL;
}
if ((supported_connections & bmd_input) != bmd_input) {
av_log(avctx, AV_LOG_ERROR, "Device does not support selected %s input.\n", type_name);
return AVERROR(ENOSYS);
}
res = ctx->cfg->SetInt(cfg_id, bmd_input);
if (res != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Failed to select %s input.\n", type_name);
return AVERROR_EXTERNAL;
}
}
return 0;
}
static DECKLINK_BOOL field_order_eq(enum AVFieldOrder field_order, BMDFieldDominance bmd_field_order)
{
if (field_order == AV_FIELD_UNKNOWN)
return true;
if ((field_order == AV_FIELD_TT || field_order == AV_FIELD_TB) && bmd_field_order == bmdUpperFieldFirst)
return true;
if ((field_order == AV_FIELD_BB || field_order == AV_FIELD_BT) && bmd_field_order == bmdLowerFieldFirst)
return true;
if (field_order == AV_FIELD_PROGRESSIVE && (bmd_field_order == bmdProgressiveFrame || bmd_field_order == bmdProgressiveSegmentedFrame))
return true;
return false;
}
int ff_decklink_set_configs(AVFormatContext *avctx,
decklink_direction_t direction) {
struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
HRESULT res;
if (ctx->duplex_mode) {
DECKLINK_BOOL duplex_supported = false;
#if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b000000
IDeckLinkProfileManager *manager = NULL;
if (ctx->dl->QueryInterface(IID_IDeckLinkProfileManager, (void **)&manager) == S_OK)
duplex_supported = true;
#else
if (ctx->attr->GetFlag(BMDDeckLinkSupportsDuplexModeConfiguration, &duplex_supported) != S_OK)
duplex_supported = false;
#endif
if (duplex_supported) {
#if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b000000
IDeckLinkProfile *profile = NULL;
BMDProfileID bmd_profile_id = ctx->duplex_mode == 2 ? bmdProfileOneSubDeviceFullDuplex : bmdProfileTwoSubDevicesHalfDuplex;
res = manager->GetProfile(bmd_profile_id, &profile);
if (res == S_OK) {
res = profile->SetActive();
profile->Release();
}
manager->Release();
#else
res = ctx->cfg->SetInt(bmdDeckLinkConfigDuplexMode, ctx->duplex_mode == 2 ? bmdDuplexModeFull : bmdDuplexModeHalf);
#endif
if (res != S_OK)
av_log(avctx, AV_LOG_WARNING, "Setting duplex mode failed.\n");
else
av_log(avctx, AV_LOG_VERBOSE, "Successfully set duplex mode to %s duplex.\n", ctx->duplex_mode == 2 ? "full" : "half");
} else {
av_log(avctx, AV_LOG_WARNING, "Unable to set duplex mode, because it is not supported.\n");
}
}
if (direction == DIRECTION_IN) {
int ret;
ret = decklink_select_input(avctx, bmdDeckLinkConfigAudioInputConnection);
if (ret < 0)
return ret;
ret = decklink_select_input(avctx, bmdDeckLinkConfigVideoInputConnection);
if (ret < 0)
return ret;
}
if (direction == DIRECTION_OUT && cctx->timing_offset != INT_MIN) {
res = ctx->cfg->SetInt(bmdDeckLinkConfigReferenceInputTimingOffset, cctx->timing_offset);
if (res != S_OK)
av_log(avctx, AV_LOG_WARNING, "Setting timing offset failed.\n");
}
return 0;
}
int ff_decklink_set_format(AVFormatContext *avctx,
int width, int height,
int tb_num, int tb_den,
enum AVFieldOrder field_order,
decklink_direction_t direction)
{
struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
#if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b000000
DECKLINK_BOOL support;
#else
BMDDisplayModeSupport support;
#endif
IDeckLinkDisplayModeIterator *itermode;
IDeckLinkDisplayMode *mode;
int i = 1;
HRESULT res;
av_log(avctx, AV_LOG_DEBUG, "Trying to find mode for frame size %dx%d, frame timing %d/%d, field order %d, direction %d, format code %s\n",
width, height, tb_num, tb_den, field_order, direction, cctx->format_code ? cctx->format_code : "(unset)");
if (direction == DIRECTION_IN) {
res = ctx->dli->GetDisplayModeIterator (&itermode);
} else {
res = ctx->dlo->GetDisplayModeIterator (&itermode);
}
if (res!= S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not get Display Mode Iterator\n");
return AVERROR(EIO);
}
char format_buf[] = " ";
if (cctx->format_code)
memcpy(format_buf, cctx->format_code, FFMIN(strlen(cctx->format_code), sizeof(format_buf)));
BMDDisplayMode target_mode = (BMDDisplayMode)AV_RB32(format_buf);
AVRational target_tb = av_make_q(tb_num, tb_den);
ctx->bmd_mode = bmdModeUnknown;
while ((ctx->bmd_mode == bmdModeUnknown) && itermode->Next(&mode) == S_OK) {
BMDTimeValue bmd_tb_num, bmd_tb_den;
int bmd_width = mode->GetWidth();
int bmd_height = mode->GetHeight();
BMDDisplayMode bmd_mode = mode->GetDisplayMode();
BMDFieldDominance bmd_field_dominance = mode->GetFieldDominance();
mode->GetFrameRate(&bmd_tb_num, &bmd_tb_den);
AVRational mode_tb = av_make_q(bmd_tb_num, bmd_tb_den);
if ((bmd_width == width &&
bmd_height == height &&
!av_cmp_q(mode_tb, target_tb) &&
field_order_eq(field_order, bmd_field_dominance))
|| target_mode == bmd_mode) {
ctx->bmd_mode = bmd_mode;
ctx->bmd_width = bmd_width;
ctx->bmd_height = bmd_height;
ctx->bmd_tb_den = bmd_tb_den;
ctx->bmd_tb_num = bmd_tb_num;
ctx->bmd_field_dominance = bmd_field_dominance;
av_log(avctx, AV_LOG_INFO, "Found Decklink mode %d x %d with rate %.2f%s\n",
bmd_width, bmd_height, 1/av_q2d(mode_tb),
(ctx->bmd_field_dominance==bmdLowerFieldFirst || ctx->bmd_field_dominance==bmdUpperFieldFirst)?"(i)":"");
}
mode->Release();
i++;
}
itermode->Release();
if (ctx->bmd_mode == bmdModeUnknown)
return -1;
#if BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b050000
if (direction == DIRECTION_IN) {
BMDDisplayMode actualMode = ctx->bmd_mode;
if (ctx->dli->DoesSupportVideoMode(ctx->video_input, ctx->bmd_mode, (BMDPixelFormat) cctx->raw_format,
bmdNoVideoInputConversion, bmdSupportedVideoModeDefault,
&actualMode, &support) != S_OK || !support || ctx->bmd_mode != actualMode)
return -1;
} else {
BMDDisplayMode actualMode = ctx->bmd_mode;
if (ctx->dlo->DoesSupportVideoMode(bmdVideoConnectionUnspecified, ctx->bmd_mode, ctx->raw_format,
bmdNoVideoOutputConversion, bmdSupportedVideoModeDefault,
&actualMode, &support) != S_OK || !support || ctx->bmd_mode != actualMode)
return -1;
}
return 0;
#elif BLACKMAGIC_DECKLINK_API_VERSION >= 0x0b000000
if (direction == DIRECTION_IN) {
if (ctx->dli->DoesSupportVideoMode(ctx->video_input, ctx->bmd_mode, (BMDPixelFormat) cctx->raw_format,
bmdSupportedVideoModeDefault,
&support) != S_OK)
return -1;
} else {
BMDDisplayMode actualMode = ctx->bmd_mode;
if (ctx->dlo->DoesSupportVideoMode(bmdVideoConnectionUnspecified, ctx->bmd_mode, ctx->raw_format,
bmdSupportedVideoModeDefault,
&actualMode, &support) != S_OK || !support || ctx->bmd_mode != actualMode) {
return -1;
}
}
if (support)
return 0;
#else
if (direction == DIRECTION_IN) {
if (ctx->dli->DoesSupportVideoMode(ctx->bmd_mode, (BMDPixelFormat) cctx->raw_format,
bmdVideoOutputFlagDefault,
&support, NULL) != S_OK)
return -1;
} else {
if (!ctx->supports_vanc || ctx->dlo->DoesSupportVideoMode(ctx->bmd_mode, ctx->raw_format,
bmdVideoOutputVANC,
&support, NULL) != S_OK || support != bmdDisplayModeSupported) {
/* Try without VANC enabled */
if (ctx->dlo->DoesSupportVideoMode(ctx->bmd_mode, ctx->raw_format,
bmdVideoOutputFlagDefault,
&support, NULL) != S_OK) {
return -1;
}
ctx->supports_vanc = 0;
}
}
if (support == bmdDisplayModeSupported)
return 0;
#endif
return -1;
}
int ff_decklink_set_format(AVFormatContext *avctx, decklink_direction_t direction) {
return ff_decklink_set_format(avctx, 0, 0, 0, 0, AV_FIELD_UNKNOWN, direction);
}
int ff_decklink_list_devices(AVFormatContext *avctx,
struct AVDeviceInfoList *device_list,
int show_inputs, int show_outputs)
{
IDeckLink *dl = NULL;
IDeckLinkIterator *iter = decklink_create_iterator(avctx);
int ret = 0;
if (!iter)
return AVERROR(EIO);
while (ret == 0 && iter->Next(&dl) == S_OK) {
IDeckLinkOutput *output_config;
IDeckLinkInput *input_config;
const char *display_name = NULL;
const char *unique_name = NULL;
AVDeviceInfo *new_device = NULL;
int add = 0;
ret = decklink_get_attr_string(dl, BMDDeckLinkDisplayName, &display_name);
if (ret < 0)
goto next;
ret = decklink_get_attr_string(dl, BMDDeckLinkDeviceHandle, &unique_name);
if (ret < 0)
goto next;
if (show_outputs) {
if (dl->QueryInterface(IID_IDeckLinkOutput, (void **)&output_config) == S_OK) {
output_config->Release();
add = 1;
}
}
if (show_inputs) {
if (dl->QueryInterface(IID_IDeckLinkInput, (void **)&input_config) == S_OK) {
input_config->Release();
add = 1;
}
}
if (add == 1) {
new_device = (AVDeviceInfo *) av_mallocz(sizeof(AVDeviceInfo));
if (!new_device) {
ret = AVERROR(ENOMEM);
goto next;
}
new_device->device_name = av_strdup(unique_name ? unique_name : display_name);
new_device->device_description = av_strdup(display_name);
if (!new_device->device_name ||
!new_device->device_description ||
av_dynarray_add_nofree(&device_list->devices, &device_list->nb_devices, new_device) < 0) {
ret = AVERROR(ENOMEM);
av_freep(&new_device->device_name);
av_freep(&new_device->device_description);
av_freep(&new_device);
goto next;
}
}
next:
av_freep(&display_name);
av_freep(&unique_name);
dl->Release();
}
iter->Release();
return ret;
}
/* This is a wrapper around the ff_decklink_list_devices() which dumps the
output to av_log() and exits (for backward compatibility with the
"-list_devices" argument). */
void ff_decklink_list_devices_legacy(AVFormatContext *avctx,
int show_inputs, int show_outputs)
{
struct AVDeviceInfoList *device_list = NULL;
int ret;
device_list = (struct AVDeviceInfoList *) av_mallocz(sizeof(AVDeviceInfoList));
if (!device_list)
return;
ret = ff_decklink_list_devices(avctx, device_list, show_inputs, show_outputs);
if (ret == 0) {
av_log(avctx, AV_LOG_INFO, "Blackmagic DeckLink %s devices:\n",
show_inputs ? "input" : "output");
for (int i = 0; i < device_list->nb_devices; i++) {
av_log(avctx, AV_LOG_INFO, "\t'%s'\n", device_list->devices[i]->device_description);
}
}
avdevice_free_list_devices(&device_list);
}
int ff_decklink_list_formats(AVFormatContext *avctx, decklink_direction_t direction)
{
struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
IDeckLinkDisplayModeIterator *itermode;
IDeckLinkDisplayMode *mode;
uint32_t format_code;
HRESULT res;
if (direction == DIRECTION_IN) {
int ret;
ret = decklink_select_input(avctx, bmdDeckLinkConfigAudioInputConnection);
if (ret < 0)
return ret;
ret = decklink_select_input(avctx, bmdDeckLinkConfigVideoInputConnection);
if (ret < 0)
return ret;
res = ctx->dli->GetDisplayModeIterator (&itermode);
} else {
res = ctx->dlo->GetDisplayModeIterator (&itermode);
}
if (res!= S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not get Display Mode Iterator\n");
return AVERROR(EIO);
}
av_log(avctx, AV_LOG_INFO, "Supported formats for '%s':\n\tformat_code\tdescription",
avctx->url);
while (itermode->Next(&mode) == S_OK) {
BMDTimeValue tb_num, tb_den;
mode->GetFrameRate(&tb_num, &tb_den);
format_code = av_bswap32(mode->GetDisplayMode());
av_log(avctx, AV_LOG_INFO, "\n\t%.4s\t\t%ldx%ld at %d/%d fps",
(char*) &format_code, mode->GetWidth(), mode->GetHeight(),
(int) tb_den, (int) tb_num);
switch (mode->GetFieldDominance()) {
case bmdLowerFieldFirst:
av_log(avctx, AV_LOG_INFO, " (interlaced, lower field first)"); break;
case bmdUpperFieldFirst:
av_log(avctx, AV_LOG_INFO, " (interlaced, upper field first)"); break;
}
mode->Release();
}
av_log(avctx, AV_LOG_INFO, "\n");
itermode->Release();
return 0;
}
void ff_decklink_cleanup(AVFormatContext *avctx)
{
struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
if (ctx->dli)
ctx->dli->Release();
if (ctx->dlo)
ctx->dlo->Release();
if (ctx->attr)
ctx->attr->Release();
if (ctx->cfg)
ctx->cfg->Release();
if (ctx->dl)
ctx->dl->Release();
}
int ff_decklink_init_device(AVFormatContext *avctx, const char* name)
{
struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
IDeckLink *dl = NULL;
IDeckLinkIterator *iter = decklink_create_iterator(avctx);
if (!iter)
return AVERROR_EXTERNAL;
while (iter->Next(&dl) == S_OK) {
const char *display_name = NULL;
const char *unique_name = NULL;
decklink_get_attr_string(dl, BMDDeckLinkDisplayName, &display_name);
decklink_get_attr_string(dl, BMDDeckLinkDeviceHandle, &unique_name);
if (display_name && !strcmp(name, display_name) || unique_name && !strcmp(name, unique_name)) {
av_free((void *)unique_name);
av_free((void *)display_name);
ctx->dl = dl;
break;
}
av_free((void *)display_name);
av_free((void *)unique_name);
dl->Release();
}
iter->Release();
if (!ctx->dl)
return AVERROR(ENXIO);
if (ctx->dl->QueryInterface(IID_IDeckLinkConfiguration, (void **)&ctx->cfg) != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not get configuration interface for '%s'\n", name);
ff_decklink_cleanup(avctx);
return AVERROR_EXTERNAL;
}
if (ctx->dl->QueryInterface(IID_IDeckLinkProfileAttributes, (void **)&ctx->attr) != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not get attributes interface for '%s'\n", name);
ff_decklink_cleanup(avctx);
return AVERROR_EXTERNAL;
}
return 0;
}

207
externals/ffmpeg/libavdevice/decklink_common.h vendored Executable file
View File

@@ -0,0 +1,207 @@
/*
* Blackmagic DeckLink common code
* Copyright (c) 2013-2014 Ramiro Polla, Luca Barbato, Deti Fliegl
* Copyright (c) 2017 Akamai Technologies, Inc.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_DECKLINK_COMMON_H
#define AVDEVICE_DECKLINK_COMMON_H
#include <DeckLinkAPIVersion.h>
#if BLACKMAGIC_DECKLINK_API_VERSION < 0x0b000000
#define IID_IDeckLinkProfileAttributes IID_IDeckLinkAttributes
#define IDeckLinkProfileAttributes IDeckLinkAttributes
#endif
#include "libavutil/thread.h"
#include "decklink_common_c.h"
#if CONFIG_LIBKLVANC
#include "libklvanc/vanc.h"
#endif
#ifdef _WIN32
#define DECKLINK_BOOL BOOL
#else
#define DECKLINK_BOOL bool
#endif
#ifdef _WIN32
static char *dup_wchar_to_utf8(wchar_t *w)
{
char *s = NULL;
int l = WideCharToMultiByte(CP_UTF8, 0, w, -1, 0, 0, 0, 0);
s = (char *) av_malloc(l);
if (s)
WideCharToMultiByte(CP_UTF8, 0, w, -1, s, l, 0, 0);
return s;
}
#define DECKLINK_STR OLECHAR *
#define DECKLINK_STRDUP dup_wchar_to_utf8
#define DECKLINK_FREE(s) SysFreeString(s)
#elif defined(__APPLE__)
static char *dup_cfstring_to_utf8(CFStringRef w)
{
char s[256];
CFStringGetCString(w, s, 255, kCFStringEncodingUTF8);
return av_strdup(s);
}
#define DECKLINK_STR const __CFString *
#define DECKLINK_STRDUP dup_cfstring_to_utf8
#define DECKLINK_FREE(s) CFRelease(s)
#else
#define DECKLINK_STR const char *
#define DECKLINK_STRDUP av_strdup
/* free() is needed for a string returned by the DeckLink SDL. */
#define DECKLINK_FREE(s) free((void *) s)
#endif
class decklink_output_callback;
class decklink_input_callback;
typedef struct AVPacketQueue {
AVPacketList *first_pkt, *last_pkt;
int nb_packets;
unsigned long long size;
int abort_request;
pthread_mutex_t mutex;
pthread_cond_t cond;
AVFormatContext *avctx;
int64_t max_q_size;
} AVPacketQueue;
struct decklink_ctx {
/* DeckLink SDK interfaces */
IDeckLink *dl;
IDeckLinkOutput *dlo;
IDeckLinkInput *dli;
IDeckLinkConfiguration *cfg;
IDeckLinkProfileAttributes *attr;
decklink_output_callback *output_callback;
/* DeckLink mode information */
BMDTimeValue bmd_tb_den;
BMDTimeValue bmd_tb_num;
BMDDisplayMode bmd_mode;
BMDVideoConnection video_input;
BMDAudioConnection audio_input;
BMDTimecodeFormat tc_format;
int bmd_width;
int bmd_height;
int bmd_field_dominance;
int supports_vanc;
/* Capture buffer queue */
AVPacketQueue queue;
/* Streams present */
int audio;
int video;
/* Status */
int playback_started;
int64_t last_pts;
unsigned long frameCount;
unsigned int dropped;
AVStream *audio_st;
AVStream *video_st;
AVStream *teletext_st;
uint16_t cdp_sequence_num;
/* Options */
int list_devices;
int list_formats;
int64_t teletext_lines;
double preroll;
int duplex_mode;
DecklinkPtsSource audio_pts_source;
DecklinkPtsSource video_pts_source;
int draw_bars;
BMDPixelFormat raw_format;
int frames_preroll;
int frames_buffer;
pthread_mutex_t mutex;
pthread_cond_t cond;
int frames_buffer_available_spots;
int autodetect;
#if CONFIG_LIBKLVANC
struct klvanc_context_s *vanc_ctx;
#endif
int channels;
int audio_depth;
unsigned long tc_seen; // used with option wait_for_tc
};
typedef enum { DIRECTION_IN, DIRECTION_OUT} decklink_direction_t;
#ifdef _WIN32
#if BLACKMAGIC_DECKLINK_API_VERSION < 0x0a040000
typedef unsigned long buffercount_type;
#else
typedef unsigned int buffercount_type;
#endif
IDeckLinkIterator *CreateDeckLinkIteratorInstance(void);
#else
typedef uint32_t buffercount_type;
#endif
static const BMDAudioConnection decklink_audio_connection_map[] = {
(BMDAudioConnection)0,
bmdAudioConnectionEmbedded,
bmdAudioConnectionAESEBU,
bmdAudioConnectionAnalog,
bmdAudioConnectionAnalogXLR,
bmdAudioConnectionAnalogRCA,
bmdAudioConnectionMicrophone,
};
static const BMDVideoConnection decklink_video_connection_map[] = {
(BMDVideoConnection)0,
bmdVideoConnectionSDI,
bmdVideoConnectionHDMI,
bmdVideoConnectionOpticalSDI,
bmdVideoConnectionComponent,
bmdVideoConnectionComposite,
bmdVideoConnectionSVideo,
};
static const BMDTimecodeFormat decklink_timecode_format_map[] = {
(BMDTimecodeFormat)0,
bmdTimecodeRP188VITC1,
bmdTimecodeRP188VITC2,
bmdTimecodeRP188LTC,
bmdTimecodeRP188Any,
bmdTimecodeVITC,
bmdTimecodeVITCField2,
bmdTimecodeSerial,
};
int ff_decklink_set_configs(AVFormatContext *avctx, decklink_direction_t direction);
int ff_decklink_set_format(AVFormatContext *avctx, int width, int height, int tb_num, int tb_den, enum AVFieldOrder field_order, decklink_direction_t direction = DIRECTION_OUT);
int ff_decklink_set_format(AVFormatContext *avctx, decklink_direction_t direction);
int ff_decklink_list_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list, int show_inputs, int show_outputs);
void ff_decklink_list_devices_legacy(AVFormatContext *avctx, int show_inputs, int show_outputs);
int ff_decklink_list_formats(AVFormatContext *avctx, decklink_direction_t direction = DIRECTION_OUT);
void ff_decklink_cleanup(AVFormatContext *avctx);
int ff_decklink_init_device(AVFormatContext *avctx, const char* name);
#endif /* AVDEVICE_DECKLINK_COMMON_H */

View File

@@ -0,0 +1,63 @@
/*
* Blackmagic DeckLink common code
* Copyright (c) 2013-2014 Ramiro Polla
* Copyright (c) 2017 Akamai Technologies, Inc.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_DECKLINK_COMMON_C_H
#define AVDEVICE_DECKLINK_COMMON_C_H
typedef enum DecklinkPtsSource {
PTS_SRC_AUDIO = 1,
PTS_SRC_VIDEO = 2,
PTS_SRC_REFERENCE = 3,
PTS_SRC_WALLCLOCK = 4,
PTS_SRC_ABS_WALLCLOCK = 5,
PTS_SRC_NB
} DecklinkPtsSource;
struct decklink_cctx {
const AVClass *cclass;
void *ctx;
/* Options */
int list_devices;
int list_formats;
int64_t teletext_lines;
double preroll;
int audio_channels;
int audio_depth;
int duplex_mode;
DecklinkPtsSource audio_pts_source;
DecklinkPtsSource video_pts_source;
int audio_input;
int video_input;
int tc_format;
int draw_bars;
char *format_code;
int raw_format;
int64_t queue_size;
int copyts;
int64_t timestamp_align;
int timing_offset;
int wait_for_tc;
};
#endif /* AVDEVICE_DECKLINK_COMMON_C_H */

1278
externals/ffmpeg/libavdevice/decklink_dec.cpp vendored Executable file

File diff suppressed because it is too large Load Diff

38
externals/ffmpeg/libavdevice/decklink_dec.h vendored Executable file
View File

@@ -0,0 +1,38 @@
/*
* Blackmagic DeckLink input
* Copyright (c) 2013-2014 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_DECKLINK_DEC_H
#define AVDEVICE_DECKLINK_DEC_H
#ifdef __cplusplus
extern "C" {
#endif
int ff_decklink_read_header(AVFormatContext *avctx);
int ff_decklink_read_packet(AVFormatContext *avctx, AVPacket *pkt);
int ff_decklink_read_close(AVFormatContext *avctx);
int ff_decklink_list_input_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* AVDEVICE_DECKLINK_DEC_H */

109
externals/ffmpeg/libavdevice/decklink_dec_c.c vendored Executable file
View File

@@ -0,0 +1,109 @@
/*
* Blackmagic DeckLink input
* Copyright (c) 2014 Deti Fliegl
* Copyright (c) 2017 Akamai Technologies, Inc.
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavformat/avformat.h"
#include "libavutil/opt.h"
#include "decklink_common_c.h"
#include "decklink_dec.h"
#define OFFSET(x) offsetof(struct decklink_cctx, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "list_devices", "list available devices" , OFFSET(list_devices), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, DEC },
{ "list_formats", "list supported formats" , OFFSET(list_formats), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, DEC },
{ "format_code", "set format by fourcc" , OFFSET(format_code), AV_OPT_TYPE_STRING, { .str = NULL}, 0, 0, DEC },
{ "raw_format", "pixel format to be returned by the card when capturing" , OFFSET(raw_format), AV_OPT_TYPE_INT, { .i64 = MKBETAG('2','v','u','y')}, 0, UINT_MAX, DEC, "raw_format" },
{ "uyvy422", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MKBETAG('2','v','u','y') }, 0, 0, DEC, "raw_format"},
{ "yuv422p10", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MKBETAG('v','2','1','0') }, 0, 0, DEC, "raw_format"},
{ "argb", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 32 }, 0, 0, DEC, "raw_format"},
{ "bgra", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MKBETAG('B','G','R','A') }, 0, 0, DEC, "raw_format"},
{ "rgb10", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = MKBETAG('r','2','1','0') }, 0, 0, DEC, "raw_format"},
{ "teletext_lines", "teletext lines bitmask", OFFSET(teletext_lines), AV_OPT_TYPE_INT64, { .i64 = 0 }, 0, 0x7ffffffffLL, DEC, "teletext_lines"},
{ "standard", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0x7fff9fffeLL}, 0, 0, DEC, "teletext_lines"},
{ "all", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0x7ffffffffLL}, 0, 0, DEC, "teletext_lines"},
{ "channels", "number of audio channels", OFFSET(audio_channels), AV_OPT_TYPE_INT , { .i64 = 2 }, 2, 16, DEC },
{ "duplex_mode", "duplex mode", OFFSET(duplex_mode), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 2, DEC, "duplex_mode"},
{ "unset", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0}, 0, 0, DEC, "duplex_mode"},
{ "half", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1}, 0, 0, DEC, "duplex_mode"},
{ "full", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2}, 0, 0, DEC, "duplex_mode"},
{ "timecode_format", "timecode format", OFFSET(tc_format), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 7, DEC, "tc_format"},
{ "none", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0}, 0, 0, DEC, "tc_format"},
{ "rp188vitc", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1}, 0, 0, DEC, "tc_format"},
{ "rp188vitc2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2}, 0, 0, DEC, "tc_format"},
{ "rp188ltc", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 3}, 0, 0, DEC, "tc_format"},
{ "rp188any", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 4}, 0, 0, DEC, "tc_format"},
{ "vitc", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 5}, 0, 0, DEC, "tc_format"},
{ "vitc2", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 6}, 0, 0, DEC, "tc_format"},
{ "serial", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 7}, 0, 0, DEC, "tc_format"},
{ "video_input", "video input", OFFSET(video_input), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 6, DEC, "video_input"},
{ "unset", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0}, 0, 0, DEC, "video_input"},
{ "sdi", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1}, 0, 0, DEC, "video_input"},
{ "hdmi", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2}, 0, 0, DEC, "video_input"},
{ "optical_sdi", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 3}, 0, 0, DEC, "video_input"},
{ "component", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 4}, 0, 0, DEC, "video_input"},
{ "composite", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 5}, 0, 0, DEC, "video_input"},
{ "s_video", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 6}, 0, 0, DEC, "video_input"},
{ "audio_input", "audio input", OFFSET(audio_input), AV_OPT_TYPE_INT, { .i64 = 0}, 0, 6, DEC, "audio_input"},
{ "unset", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 0}, 0, 0, DEC, "audio_input"},
{ "embedded", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 1}, 0, 0, DEC, "audio_input"},
{ "aes_ebu", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 2}, 0, 0, DEC, "audio_input"},
{ "analog", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 3}, 0, 0, DEC, "audio_input"},
{ "analog_xlr", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 4}, 0, 0, DEC, "audio_input"},
{ "analog_rca", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 5}, 0, 0, DEC, "audio_input"},
{ "microphone", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = 6}, 0, 0, DEC, "audio_input"},
{ "audio_pts", "audio pts source", OFFSET(audio_pts_source), AV_OPT_TYPE_INT, { .i64 = PTS_SRC_AUDIO }, 1, PTS_SRC_NB-1, DEC, "pts_source"},
{ "video_pts", "video pts source", OFFSET(video_pts_source), AV_OPT_TYPE_INT, { .i64 = PTS_SRC_VIDEO }, 1, PTS_SRC_NB-1, DEC, "pts_source"},
{ "audio", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PTS_SRC_AUDIO }, 0, 0, DEC, "pts_source"},
{ "video", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PTS_SRC_VIDEO }, 0, 0, DEC, "pts_source"},
{ "reference", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PTS_SRC_REFERENCE}, 0, 0, DEC, "pts_source"},
{ "wallclock", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PTS_SRC_WALLCLOCK}, 0, 0, DEC, "pts_source"},
{ "abs_wallclock", NULL, 0, AV_OPT_TYPE_CONST, { .i64 = PTS_SRC_ABS_WALLCLOCK}, 0, 0, DEC, "pts_source"},
{ "draw_bars", "draw bars on signal loss" , OFFSET(draw_bars), AV_OPT_TYPE_BOOL, { .i64 = 1}, 0, 1, DEC },
{ "queue_size", "input queue buffer size", OFFSET(queue_size), AV_OPT_TYPE_INT64, { .i64 = (1024 * 1024 * 1024)}, 0, INT64_MAX, DEC },
{ "audio_depth", "audio bitdepth (16 or 32)", OFFSET(audio_depth), AV_OPT_TYPE_INT, { .i64 = 16}, 16, 32, DEC },
{ "decklink_copyts", "copy timestamps, do not remove the initial offset", OFFSET(copyts), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, DEC },
{ "timestamp_align", "capture start time alignment (in seconds)", OFFSET(timestamp_align), AV_OPT_TYPE_DURATION, { .i64 = 0 }, 0, INT_MAX, DEC },
{ "wait_for_tc", "drop frames till a frame with timecode is received. TC format must be set", OFFSET(wait_for_tc), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, DEC },
{ NULL },
};
static const AVClass decklink_demuxer_class = {
.class_name = "Blackmagic DeckLink indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_decklink_demuxer = {
.name = "decklink",
.long_name = NULL_IF_CONFIG_SMALL("Blackmagic DeckLink input"),
.flags = AVFMT_NOFILE,
.priv_class = &decklink_demuxer_class,
.priv_data_size = sizeof(struct decklink_cctx),
.get_device_list = ff_decklink_list_input_devices,
.read_header = ff_decklink_read_header,
.read_packet = ff_decklink_read_packet,
.read_close = ff_decklink_read_close,
};

640
externals/ffmpeg/libavdevice/decklink_enc.cpp vendored Executable file
View File

@@ -0,0 +1,640 @@
/*
* Blackmagic DeckLink output
* Copyright (c) 2013-2014 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <atomic>
using std::atomic;
/* Include internal.h first to avoid conflict between winsock.h (used by
* DeckLink headers) and winsock2.h (used by libavformat) in MSVC++ builds */
extern "C" {
#include "libavformat/internal.h"
}
#include <DeckLinkAPI.h>
extern "C" {
#include "libavformat/avformat.h"
#include "libavutil/imgutils.h"
#include "avdevice.h"
}
#include "decklink_common.h"
#include "decklink_enc.h"
#if CONFIG_LIBKLVANC
#include "libklvanc/vanc.h"
#include "libklvanc/vanc-lines.h"
#include "libklvanc/pixels.h"
#endif
/* DeckLink callback class declaration */
class decklink_frame : public IDeckLinkVideoFrame
{
public:
decklink_frame(struct decklink_ctx *ctx, AVFrame *avframe, AVCodecID codec_id, int height, int width) :
_ctx(ctx), _avframe(avframe), _avpacket(NULL), _codec_id(codec_id), _ancillary(NULL), _height(height), _width(width), _refs(1) { }
decklink_frame(struct decklink_ctx *ctx, AVPacket *avpacket, AVCodecID codec_id, int height, int width) :
_ctx(ctx), _avframe(NULL), _avpacket(avpacket), _codec_id(codec_id), _ancillary(NULL), _height(height), _width(width), _refs(1) { }
virtual long STDMETHODCALLTYPE GetWidth (void) { return _width; }
virtual long STDMETHODCALLTYPE GetHeight (void) { return _height; }
virtual long STDMETHODCALLTYPE GetRowBytes (void)
{
if (_codec_id == AV_CODEC_ID_WRAPPED_AVFRAME)
return _avframe->linesize[0] < 0 ? -_avframe->linesize[0] : _avframe->linesize[0];
else
return ((GetWidth() + 47) / 48) * 128;
}
virtual BMDPixelFormat STDMETHODCALLTYPE GetPixelFormat(void)
{
if (_codec_id == AV_CODEC_ID_WRAPPED_AVFRAME)
return bmdFormat8BitYUV;
else
return bmdFormat10BitYUV;
}
virtual BMDFrameFlags STDMETHODCALLTYPE GetFlags (void)
{
if (_codec_id == AV_CODEC_ID_WRAPPED_AVFRAME)
return _avframe->linesize[0] < 0 ? bmdFrameFlagFlipVertical : bmdFrameFlagDefault;
else
return bmdFrameFlagDefault;
}
virtual HRESULT STDMETHODCALLTYPE GetBytes (void **buffer)
{
if (_codec_id == AV_CODEC_ID_WRAPPED_AVFRAME) {
if (_avframe->linesize[0] < 0)
*buffer = (void *)(_avframe->data[0] + _avframe->linesize[0] * (_avframe->height - 1));
else
*buffer = (void *)(_avframe->data[0]);
} else {
*buffer = (void *)(_avpacket->data);
}
return S_OK;
}
virtual HRESULT STDMETHODCALLTYPE GetTimecode (BMDTimecodeFormat format, IDeckLinkTimecode **timecode) { return S_FALSE; }
virtual HRESULT STDMETHODCALLTYPE GetAncillaryData(IDeckLinkVideoFrameAncillary **ancillary)
{
*ancillary = _ancillary;
if (_ancillary) {
_ancillary->AddRef();
return S_OK;
} else {
return S_FALSE;
}
}
virtual HRESULT STDMETHODCALLTYPE SetAncillaryData(IDeckLinkVideoFrameAncillary *ancillary)
{
if (_ancillary)
_ancillary->Release();
_ancillary = ancillary;
_ancillary->AddRef();
return S_OK;
}
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
virtual ULONG STDMETHODCALLTYPE AddRef(void) { return ++_refs; }
virtual ULONG STDMETHODCALLTYPE Release(void)
{
int ret = --_refs;
if (!ret) {
av_frame_free(&_avframe);
av_packet_free(&_avpacket);
if (_ancillary)
_ancillary->Release();
delete this;
}
return ret;
}
struct decklink_ctx *_ctx;
AVFrame *_avframe;
AVPacket *_avpacket;
AVCodecID _codec_id;
IDeckLinkVideoFrameAncillary *_ancillary;
int _height;
int _width;
private:
std::atomic<int> _refs;
};
class decklink_output_callback : public IDeckLinkVideoOutputCallback
{
public:
virtual HRESULT STDMETHODCALLTYPE ScheduledFrameCompleted(IDeckLinkVideoFrame *_frame, BMDOutputFrameCompletionResult result)
{
decklink_frame *frame = static_cast<decklink_frame *>(_frame);
struct decklink_ctx *ctx = frame->_ctx;
if (frame->_avframe)
av_frame_unref(frame->_avframe);
if (frame->_avpacket)
av_packet_unref(frame->_avpacket);
pthread_mutex_lock(&ctx->mutex);
ctx->frames_buffer_available_spots++;
pthread_cond_broadcast(&ctx->cond);
pthread_mutex_unlock(&ctx->mutex);
return S_OK;
}
virtual HRESULT STDMETHODCALLTYPE ScheduledPlaybackHasStopped(void) { return S_OK; }
virtual HRESULT STDMETHODCALLTYPE QueryInterface(REFIID iid, LPVOID *ppv) { return E_NOINTERFACE; }
virtual ULONG STDMETHODCALLTYPE AddRef(void) { return 1; }
virtual ULONG STDMETHODCALLTYPE Release(void) { return 1; }
};
static int decklink_setup_video(AVFormatContext *avctx, AVStream *st)
{
struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
AVCodecParameters *c = st->codecpar;
if (ctx->video) {
av_log(avctx, AV_LOG_ERROR, "Only one video stream is supported!\n");
return -1;
}
if (c->codec_id == AV_CODEC_ID_WRAPPED_AVFRAME) {
if (c->format != AV_PIX_FMT_UYVY422) {
av_log(avctx, AV_LOG_ERROR, "Unsupported pixel format!"
" Only AV_PIX_FMT_UYVY422 is supported.\n");
return -1;
}
ctx->raw_format = bmdFormat8BitYUV;
} else if (c->codec_id != AV_CODEC_ID_V210) {
av_log(avctx, AV_LOG_ERROR, "Unsupported codec type!"
" Only V210 and wrapped frame with AV_PIX_FMT_UYVY422 are supported.\n");
return -1;
} else {
ctx->raw_format = bmdFormat10BitYUV;
}
if (ff_decklink_set_configs(avctx, DIRECTION_OUT) < 0) {
av_log(avctx, AV_LOG_ERROR, "Could not set output configuration\n");
return -1;
}
if (ff_decklink_set_format(avctx, c->width, c->height,
st->time_base.num, st->time_base.den, c->field_order)) {
av_log(avctx, AV_LOG_ERROR, "Unsupported video size, framerate or field order!"
" Check available formats with -list_formats 1.\n");
return -1;
}
if (ctx->supports_vanc && ctx->dlo->EnableVideoOutput(ctx->bmd_mode, bmdVideoOutputVANC) != S_OK) {
av_log(avctx, AV_LOG_WARNING, "Could not enable video output with VANC! Trying without...\n");
ctx->supports_vanc = 0;
}
if (!ctx->supports_vanc && ctx->dlo->EnableVideoOutput(ctx->bmd_mode, bmdVideoOutputFlagDefault) != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not enable video output!\n");
return -1;
}
/* Set callback. */
ctx->output_callback = new decklink_output_callback();
ctx->dlo->SetScheduledFrameCompletionCallback(ctx->output_callback);
ctx->frames_preroll = st->time_base.den * ctx->preroll;
if (st->time_base.den > 1000)
ctx->frames_preroll /= 1000;
/* Buffer twice as many frames as the preroll. */
ctx->frames_buffer = ctx->frames_preroll * 2;
ctx->frames_buffer = FFMIN(ctx->frames_buffer, 60);
pthread_mutex_init(&ctx->mutex, NULL);
pthread_cond_init(&ctx->cond, NULL);
ctx->frames_buffer_available_spots = ctx->frames_buffer;
av_log(avctx, AV_LOG_DEBUG, "output: %s, preroll: %d, frames buffer size: %d\n",
avctx->url, ctx->frames_preroll, ctx->frames_buffer);
/* The device expects the framerate to be fixed. */
avpriv_set_pts_info(st, 64, st->time_base.num, st->time_base.den);
ctx->video = 1;
return 0;
}
static int decklink_setup_audio(AVFormatContext *avctx, AVStream *st)
{
struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
AVCodecParameters *c = st->codecpar;
if (ctx->audio) {
av_log(avctx, AV_LOG_ERROR, "Only one audio stream is supported!\n");
return -1;
}
if (c->sample_rate != 48000) {
av_log(avctx, AV_LOG_ERROR, "Unsupported sample rate!"
" Only 48kHz is supported.\n");
return -1;
}
if (c->channels != 2 && c->channels != 8 && c->channels != 16) {
av_log(avctx, AV_LOG_ERROR, "Unsupported number of channels!"
" Only 2, 8 or 16 channels are supported.\n");
return -1;
}
if (ctx->dlo->EnableAudioOutput(bmdAudioSampleRate48kHz,
bmdAudioSampleType16bitInteger,
c->channels,
bmdAudioOutputStreamTimestamped) != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not enable audio output!\n");
return -1;
}
if (ctx->dlo->BeginAudioPreroll() != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not begin audio preroll!\n");
return -1;
}
/* The device expects the sample rate to be fixed. */
avpriv_set_pts_info(st, 64, 1, c->sample_rate);
ctx->channels = c->channels;
ctx->audio = 1;
return 0;
}
av_cold int ff_decklink_write_trailer(AVFormatContext *avctx)
{
struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
if (ctx->playback_started) {
BMDTimeValue actual;
ctx->dlo->StopScheduledPlayback(ctx->last_pts * ctx->bmd_tb_num,
&actual, ctx->bmd_tb_den);
ctx->dlo->DisableVideoOutput();
if (ctx->audio)
ctx->dlo->DisableAudioOutput();
}
ff_decklink_cleanup(avctx);
if (ctx->output_callback)
delete ctx->output_callback;
pthread_mutex_destroy(&ctx->mutex);
pthread_cond_destroy(&ctx->cond);
#if CONFIG_LIBKLVANC
klvanc_context_destroy(ctx->vanc_ctx);
#endif
av_freep(&cctx->ctx);
return 0;
}
#if CONFIG_LIBKLVANC
static void construct_cc(AVFormatContext *avctx, struct decklink_ctx *ctx,
AVPacket *pkt, struct klvanc_line_set_s *vanc_lines)
{
struct klvanc_packet_eia_708b_s *cdp;
uint16_t *cdp_words;
uint16_t len;
uint8_t cc_count;
int size, ret, i;
const uint8_t *data = av_packet_get_side_data(pkt, AV_PKT_DATA_A53_CC, &size);
if (!data)
return;
cc_count = size / 3;
ret = klvanc_create_eia708_cdp(&cdp);
if (ret)
return;
ret = klvanc_set_framerate_EIA_708B(cdp, ctx->bmd_tb_num, ctx->bmd_tb_den);
if (ret) {
av_log(avctx, AV_LOG_ERROR, "Invalid framerate specified: %lld/%lld\n",
ctx->bmd_tb_num, ctx->bmd_tb_den);
klvanc_destroy_eia708_cdp(cdp);
return;
}
if (cc_count > KLVANC_MAX_CC_COUNT) {
av_log(avctx, AV_LOG_ERROR, "Illegal cc_count received: %d\n", cc_count);
cc_count = KLVANC_MAX_CC_COUNT;
}
/* CC data */
cdp->header.ccdata_present = 1;
cdp->header.caption_service_active = 1;
cdp->ccdata.cc_count = cc_count;
for (i = 0; i < cc_count; i++) {
if (data [3*i] & 0x04)
cdp->ccdata.cc[i].cc_valid = 1;
cdp->ccdata.cc[i].cc_type = data[3*i] & 0x03;
cdp->ccdata.cc[i].cc_data[0] = data[3*i+1];
cdp->ccdata.cc[i].cc_data[1] = data[3*i+2];
}
klvanc_finalize_EIA_708B(cdp, ctx->cdp_sequence_num++);
ret = klvanc_convert_EIA_708B_to_words(cdp, &cdp_words, &len);
klvanc_destroy_eia708_cdp(cdp);
if (ret != 0) {
av_log(avctx, AV_LOG_ERROR, "Failed converting 708 packet to words\n");
return;
}
ret = klvanc_line_insert(ctx->vanc_ctx, vanc_lines, cdp_words, len, 11, 0);
free(cdp_words);
if (ret != 0) {
av_log(avctx, AV_LOG_ERROR, "VANC line insertion failed\n");
return;
}
}
static int decklink_construct_vanc(AVFormatContext *avctx, struct decklink_ctx *ctx,
AVPacket *pkt, decklink_frame *frame)
{
struct klvanc_line_set_s vanc_lines = { 0 };
int ret = 0, i;
if (!ctx->supports_vanc)
return 0;
construct_cc(avctx, ctx, pkt, &vanc_lines);
IDeckLinkVideoFrameAncillary *vanc;
int result = ctx->dlo->CreateAncillaryData(bmdFormat10BitYUV, &vanc);
if (result != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Failed to create vanc\n");
ret = AVERROR(EIO);
goto done;
}
/* Now that we've got all the VANC lines in a nice orderly manner, generate the
final VANC sections for the Decklink output */
for (i = 0; i < vanc_lines.num_lines; i++) {
struct klvanc_line_s *line = vanc_lines.lines[i];
int real_line;
void *buf;
if (!line)
break;
/* FIXME: include hack for certain Decklink cards which mis-represent
line numbers for pSF frames */
real_line = line->line_number;
result = vanc->GetBufferForVerticalBlankingLine(real_line, &buf);
if (result != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Failed to get VANC line %d: %d", real_line, result);
continue;
}
/* Generate the full line taking into account all VANC packets on that line */
result = klvanc_generate_vanc_line_v210(ctx->vanc_ctx, line, (uint8_t *) buf,
ctx->bmd_width);
if (result) {
av_log(avctx, AV_LOG_ERROR, "Failed to generate VANC line\n");
continue;
}
}
result = frame->SetAncillaryData(vanc);
vanc->Release();
if (result != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Failed to set vanc: %d", result);
ret = AVERROR(EIO);
}
done:
for (i = 0; i < vanc_lines.num_lines; i++)
klvanc_line_free(vanc_lines.lines[i]);
return ret;
}
#endif
static int decklink_write_video_packet(AVFormatContext *avctx, AVPacket *pkt)
{
struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
AVStream *st = avctx->streams[pkt->stream_index];
AVFrame *avframe = NULL, *tmp = (AVFrame *)pkt->data;
AVPacket *avpacket = NULL;
decklink_frame *frame;
buffercount_type buffered;
HRESULT hr;
if (st->codecpar->codec_id == AV_CODEC_ID_WRAPPED_AVFRAME) {
if (tmp->format != AV_PIX_FMT_UYVY422 ||
tmp->width != ctx->bmd_width ||
tmp->height != ctx->bmd_height) {
av_log(avctx, AV_LOG_ERROR, "Got a frame with invalid pixel format or dimension.\n");
return AVERROR(EINVAL);
}
avframe = av_frame_clone(tmp);
if (!avframe) {
av_log(avctx, AV_LOG_ERROR, "Could not clone video frame.\n");
return AVERROR(EIO);
}
frame = new decklink_frame(ctx, avframe, st->codecpar->codec_id, avframe->height, avframe->width);
} else {
avpacket = av_packet_clone(pkt);
if (!avpacket) {
av_log(avctx, AV_LOG_ERROR, "Could not clone video frame.\n");
return AVERROR(EIO);
}
frame = new decklink_frame(ctx, avpacket, st->codecpar->codec_id, ctx->bmd_height, ctx->bmd_width);
#if CONFIG_LIBKLVANC
if (decklink_construct_vanc(avctx, ctx, pkt, frame))
av_log(avctx, AV_LOG_ERROR, "Failed to construct VANC\n");
#endif
}
if (!frame) {
av_log(avctx, AV_LOG_ERROR, "Could not create new frame.\n");
av_frame_free(&avframe);
av_packet_free(&avpacket);
return AVERROR(EIO);
}
/* Always keep at most one second of frames buffered. */
pthread_mutex_lock(&ctx->mutex);
while (ctx->frames_buffer_available_spots == 0) {
pthread_cond_wait(&ctx->cond, &ctx->mutex);
}
ctx->frames_buffer_available_spots--;
pthread_mutex_unlock(&ctx->mutex);
/* Schedule frame for playback. */
hr = ctx->dlo->ScheduleVideoFrame((class IDeckLinkVideoFrame *) frame,
pkt->pts * ctx->bmd_tb_num,
ctx->bmd_tb_num, ctx->bmd_tb_den);
/* Pass ownership to DeckLink, or release on failure */
frame->Release();
if (hr != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not schedule video frame."
" error %08x.\n", (uint32_t) hr);
return AVERROR(EIO);
}
ctx->dlo->GetBufferedVideoFrameCount(&buffered);
av_log(avctx, AV_LOG_DEBUG, "Buffered video frames: %d.\n", (int) buffered);
if (pkt->pts > 2 && buffered <= 2)
av_log(avctx, AV_LOG_WARNING, "There are not enough buffered video frames."
" Video may misbehave!\n");
/* Preroll video frames. */
if (!ctx->playback_started && pkt->pts > ctx->frames_preroll) {
av_log(avctx, AV_LOG_DEBUG, "Ending audio preroll.\n");
if (ctx->audio && ctx->dlo->EndAudioPreroll() != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not end audio preroll!\n");
return AVERROR(EIO);
}
av_log(avctx, AV_LOG_DEBUG, "Starting scheduled playback.\n");
if (ctx->dlo->StartScheduledPlayback(0, ctx->bmd_tb_den, 1.0) != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not start scheduled playback!\n");
return AVERROR(EIO);
}
ctx->playback_started = 1;
}
return 0;
}
static int decklink_write_audio_packet(AVFormatContext *avctx, AVPacket *pkt)
{
struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
int sample_count = pkt->size / (ctx->channels << 1);
buffercount_type buffered;
ctx->dlo->GetBufferedAudioSampleFrameCount(&buffered);
if (pkt->pts > 1 && !buffered)
av_log(avctx, AV_LOG_WARNING, "There's no buffered audio."
" Audio will misbehave!\n");
if (ctx->dlo->ScheduleAudioSamples(pkt->data, sample_count, pkt->pts,
bmdAudioSampleRate48kHz, NULL) != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not schedule audio samples.\n");
return AVERROR(EIO);
}
return 0;
}
extern "C" {
av_cold int ff_decklink_write_header(AVFormatContext *avctx)
{
struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
struct decklink_ctx *ctx;
unsigned int n;
int ret;
ctx = (struct decklink_ctx *) av_mallocz(sizeof(struct decklink_ctx));
if (!ctx)
return AVERROR(ENOMEM);
ctx->list_devices = cctx->list_devices;
ctx->list_formats = cctx->list_formats;
ctx->preroll = cctx->preroll;
ctx->duplex_mode = cctx->duplex_mode;
cctx->ctx = ctx;
#if CONFIG_LIBKLVANC
if (klvanc_context_create(&ctx->vanc_ctx) < 0) {
av_log(avctx, AV_LOG_ERROR, "Cannot create VANC library context\n");
return AVERROR(ENOMEM);
}
ctx->supports_vanc = 1;
#endif
/* List available devices and exit. */
if (ctx->list_devices) {
av_log(avctx, AV_LOG_WARNING, "The -list_devices option is deprecated and will be removed. Please use ffmpeg -sinks decklink instead.\n");
ff_decklink_list_devices_legacy(avctx, 0, 1);
return AVERROR_EXIT;
}
ret = ff_decklink_init_device(avctx, avctx->url);
if (ret < 0)
return ret;
/* Get output device. */
if (ctx->dl->QueryInterface(IID_IDeckLinkOutput, (void **) &ctx->dlo) != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Could not open output device from '%s'\n",
avctx->url);
ret = AVERROR(EIO);
goto error;
}
/* List supported formats. */
if (ctx->list_formats) {
ff_decklink_list_formats(avctx);
ret = AVERROR_EXIT;
goto error;
}
/* Setup streams. */
ret = AVERROR(EIO);
for (n = 0; n < avctx->nb_streams; n++) {
AVStream *st = avctx->streams[n];
AVCodecParameters *c = st->codecpar;
if (c->codec_type == AVMEDIA_TYPE_AUDIO) {
if (decklink_setup_audio(avctx, st))
goto error;
} else if (c->codec_type == AVMEDIA_TYPE_VIDEO) {
if (decklink_setup_video(avctx, st))
goto error;
} else {
av_log(avctx, AV_LOG_ERROR, "Unsupported stream type.\n");
goto error;
}
}
return 0;
error:
ff_decklink_cleanup(avctx);
return ret;
}
int ff_decklink_write_packet(AVFormatContext *avctx, AVPacket *pkt)
{
struct decklink_cctx *cctx = (struct decklink_cctx *)avctx->priv_data;
struct decklink_ctx *ctx = (struct decklink_ctx *)cctx->ctx;
AVStream *st = avctx->streams[pkt->stream_index];
ctx->last_pts = FFMAX(ctx->last_pts, pkt->pts);
if (st->codecpar->codec_type == AVMEDIA_TYPE_VIDEO)
return decklink_write_video_packet(avctx, pkt);
else if (st->codecpar->codec_type == AVMEDIA_TYPE_AUDIO)
return decklink_write_audio_packet(avctx, pkt);
return AVERROR(EIO);
}
int ff_decklink_list_output_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list)
{
return ff_decklink_list_devices(avctx, device_list, 0, 1);
}
} /* extern "C" */

38
externals/ffmpeg/libavdevice/decklink_enc.h vendored Executable file
View File

@@ -0,0 +1,38 @@
/*
* Blackmagic DeckLink output
* Copyright (c) 2013-2014 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_DECKLINK_ENC_H
#define AVDEVICE_DECKLINK_ENC_H
#ifdef __cplusplus
extern "C" {
#endif
int ff_decklink_write_header(AVFormatContext *avctx);
int ff_decklink_write_packet(AVFormatContext *avctx, AVPacket *pkt);
int ff_decklink_write_trailer(AVFormatContext *avctx);
int ff_decklink_list_output_devices(AVFormatContext *avctx, struct AVDeviceInfoList *device_list);
#ifdef __cplusplus
} /* extern "C" */
#endif
#endif /* AVDEVICE_DECKLINK_ENC_H */

64
externals/ffmpeg/libavdevice/decklink_enc_c.c vendored Executable file
View File

@@ -0,0 +1,64 @@
/*
* Blackmagic DeckLink output
* Copyright (c) 2013-2014 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavformat/avformat.h"
#include "libavutil/opt.h"
#include "decklink_common_c.h"
#include "decklink_enc.h"
#define OFFSET(x) offsetof(struct decklink_cctx, x)
#define ENC AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "list_devices", "list available devices" , OFFSET(list_devices), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, ENC },
{ "list_formats", "list supported formats" , OFFSET(list_formats), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 1, ENC },
{ "preroll" , "video preroll in seconds", OFFSET(preroll ), AV_OPT_TYPE_DOUBLE, { .dbl = 0.5 }, 0, 5, ENC },
{ "duplex_mode" , "duplex mode" , OFFSET(duplex_mode ), AV_OPT_TYPE_INT , { .i64 = 0 }, 0, 2, ENC, "duplex_mode"},
{ "unset" , NULL , 0 , AV_OPT_TYPE_CONST , { .i64 = 0 }, 0, 0, ENC, "duplex_mode"},
{ "half" , NULL , 0 , AV_OPT_TYPE_CONST , { .i64 = 1 }, 0, 0, ENC, "duplex_mode"},
{ "full" , NULL , 0 , AV_OPT_TYPE_CONST , { .i64 = 2 }, 0, 0, ENC, "duplex_mode"},
{ "timing_offset", "genlock timing pixel offset", OFFSET(timing_offset), AV_OPT_TYPE_INT, { .i64 = INT_MIN }, INT_MIN, INT_MAX, ENC, "timing_offset"},
{ "unset" , NULL , 0 , AV_OPT_TYPE_CONST, { .i64 = INT_MIN }, 0, 0, ENC, "timing_offset"},
{ NULL },
};
static const AVClass decklink_muxer_class = {
.class_name = "Blackmagic DeckLink outdev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
};
AVOutputFormat ff_decklink_muxer = {
.name = "decklink",
.long_name = NULL_IF_CONFIG_SMALL("Blackmagic DeckLink output"),
.audio_codec = AV_CODEC_ID_PCM_S16LE,
.video_codec = AV_CODEC_ID_WRAPPED_AVFRAME,
.subtitle_codec = AV_CODEC_ID_NONE,
.flags = AVFMT_NOFILE,
.get_device_list = ff_decklink_list_output_devices,
.priv_class = &decklink_muxer_class,
.priv_data_size = sizeof(struct decklink_cctx),
.write_header = ff_decklink_write_header,
.write_packet = ff_decklink_write_packet,
.write_trailer = ff_decklink_write_trailer,
};

1340
externals/ffmpeg/libavdevice/dshow.c vendored Executable file

File diff suppressed because it is too large Load Diff

354
externals/ffmpeg/libavdevice/dshow_capture.h vendored Executable file
View File

@@ -0,0 +1,354 @@
/*
* DirectShow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_DSHOW_CAPTURE_H
#define AVDEVICE_DSHOW_CAPTURE_H
#define DSHOWDEBUG 0
#include "avdevice.h"
#define COBJMACROS
#define WIN32_LEAN_AND_MEAN
#include <windows.h>
#define NO_DSHOW_STRSAFE
#include <dshow.h>
#include <dvdmedia.h>
#include "libavcodec/internal.h"
/* EC_DEVICE_LOST is not defined in MinGW dshow headers. */
#ifndef EC_DEVICE_LOST
#define EC_DEVICE_LOST 0x1f
#endif
long ff_copy_dshow_media_type(AM_MEDIA_TYPE *dst, const AM_MEDIA_TYPE *src);
void ff_print_VIDEO_STREAM_CONFIG_CAPS(const VIDEO_STREAM_CONFIG_CAPS *caps);
void ff_print_AUDIO_STREAM_CONFIG_CAPS(const AUDIO_STREAM_CONFIG_CAPS *caps);
void ff_print_AM_MEDIA_TYPE(const AM_MEDIA_TYPE *type);
void ff_printGUID(const GUID *g);
extern const AVClass *ff_dshow_context_class_ptr;
#define dshowdebug(...) ff_dlog(&ff_dshow_context_class_ptr, __VA_ARGS__)
static inline void nothing(void *foo)
{
}
struct GUIDoffset {
const GUID *iid;
int offset;
};
enum dshowDeviceType {
VideoDevice = 0,
AudioDevice = 1,
};
enum dshowSourceFilterType {
VideoSourceDevice = 0,
AudioSourceDevice = 1,
};
#define DECLARE_QUERYINTERFACE(class, ...) \
long WINAPI \
class##_QueryInterface(class *this, const GUID *riid, void **ppvObject) \
{ \
struct GUIDoffset ifaces[] = __VA_ARGS__; \
int i; \
dshowdebug(AV_STRINGIFY(class)"_QueryInterface(%p, %p, %p)\n", this, riid, ppvObject); \
ff_printGUID(riid); \
if (!ppvObject) \
return E_POINTER; \
for (i = 0; i < sizeof(ifaces)/sizeof(ifaces[0]); i++) { \
if (IsEqualGUID(riid, ifaces[i].iid)) { \
void *obj = (void *) ((uint8_t *) this + ifaces[i].offset); \
class##_AddRef(this); \
dshowdebug("\tfound %d with offset %d\n", i, ifaces[i].offset); \
*ppvObject = (void *) obj; \
return S_OK; \
} \
} \
dshowdebug("\tE_NOINTERFACE\n"); \
*ppvObject = NULL; \
return E_NOINTERFACE; \
}
#define DECLARE_ADDREF(class) \
unsigned long WINAPI \
class##_AddRef(class *this) \
{ \
dshowdebug(AV_STRINGIFY(class)"_AddRef(%p)\t%ld\n", this, this->ref+1); \
return InterlockedIncrement(&this->ref); \
}
#define DECLARE_RELEASE(class) \
unsigned long WINAPI \
class##_Release(class *this) \
{ \
long ref = InterlockedDecrement(&this->ref); \
dshowdebug(AV_STRINGIFY(class)"_Release(%p)\t%ld\n", this, ref); \
if (!ref) \
class##_Destroy(this); \
return ref; \
}
#define DECLARE_DESTROY(class, func) \
void class##_Destroy(class *this) \
{ \
dshowdebug(AV_STRINGIFY(class)"_Destroy(%p)\n", this); \
func(this); \
if (this) { \
if (this->vtbl) \
CoTaskMemFree(this->vtbl); \
CoTaskMemFree(this); \
} \
}
#define DECLARE_CREATE(class, setup, ...) \
class *class##_Create(__VA_ARGS__) \
{ \
class *this = CoTaskMemAlloc(sizeof(class)); \
void *vtbl = CoTaskMemAlloc(sizeof(*this->vtbl)); \
dshowdebug(AV_STRINGIFY(class)"_Create(%p)\n", this); \
if (!this || !vtbl) \
goto fail; \
ZeroMemory(this, sizeof(class)); \
ZeroMemory(vtbl, sizeof(*this->vtbl)); \
this->ref = 1; \
this->vtbl = vtbl; \
if (!setup) \
goto fail; \
dshowdebug("created "AV_STRINGIFY(class)" %p\n", this); \
return this; \
fail: \
class##_Destroy(this); \
dshowdebug("could not create "AV_STRINGIFY(class)"\n"); \
return NULL; \
}
#define SETVTBL(vtbl, class, fn) \
do { (vtbl)->fn = (void *) class##_##fn; } while(0)
/*****************************************************************************
* Forward Declarations
****************************************************************************/
typedef struct libAVPin libAVPin;
typedef struct libAVMemInputPin libAVMemInputPin;
typedef struct libAVEnumPins libAVEnumPins;
typedef struct libAVEnumMediaTypes libAVEnumMediaTypes;
typedef struct libAVFilter libAVFilter;
/*****************************************************************************
* libAVPin
****************************************************************************/
struct libAVPin {
IPinVtbl *vtbl;
long ref;
libAVFilter *filter;
IPin *connectedto;
AM_MEDIA_TYPE type;
IMemInputPinVtbl *imemvtbl;
};
long WINAPI libAVPin_QueryInterface (libAVPin *, const GUID *, void **);
unsigned long WINAPI libAVPin_AddRef (libAVPin *);
unsigned long WINAPI libAVPin_Release (libAVPin *);
long WINAPI libAVPin_Connect (libAVPin *, IPin *, const AM_MEDIA_TYPE *);
long WINAPI libAVPin_ReceiveConnection (libAVPin *, IPin *, const AM_MEDIA_TYPE *);
long WINAPI libAVPin_Disconnect (libAVPin *);
long WINAPI libAVPin_ConnectedTo (libAVPin *, IPin **);
long WINAPI libAVPin_ConnectionMediaType (libAVPin *, AM_MEDIA_TYPE *);
long WINAPI libAVPin_QueryPinInfo (libAVPin *, PIN_INFO *);
long WINAPI libAVPin_QueryDirection (libAVPin *, PIN_DIRECTION *);
long WINAPI libAVPin_QueryId (libAVPin *, wchar_t **);
long WINAPI libAVPin_QueryAccept (libAVPin *, const AM_MEDIA_TYPE *);
long WINAPI libAVPin_EnumMediaTypes (libAVPin *, IEnumMediaTypes **);
long WINAPI libAVPin_QueryInternalConnections(libAVPin *, IPin **, unsigned long *);
long WINAPI libAVPin_EndOfStream (libAVPin *);
long WINAPI libAVPin_BeginFlush (libAVPin *);
long WINAPI libAVPin_EndFlush (libAVPin *);
long WINAPI libAVPin_NewSegment (libAVPin *, REFERENCE_TIME, REFERENCE_TIME, double);
long WINAPI libAVMemInputPin_QueryInterface (libAVMemInputPin *, const GUID *, void **);
unsigned long WINAPI libAVMemInputPin_AddRef (libAVMemInputPin *);
unsigned long WINAPI libAVMemInputPin_Release (libAVMemInputPin *);
long WINAPI libAVMemInputPin_GetAllocator (libAVMemInputPin *, IMemAllocator **);
long WINAPI libAVMemInputPin_NotifyAllocator (libAVMemInputPin *, IMemAllocator *, BOOL);
long WINAPI libAVMemInputPin_GetAllocatorRequirements(libAVMemInputPin *, ALLOCATOR_PROPERTIES *);
long WINAPI libAVMemInputPin_Receive (libAVMemInputPin *, IMediaSample *);
long WINAPI libAVMemInputPin_ReceiveMultiple (libAVMemInputPin *, IMediaSample **, long, long *);
long WINAPI libAVMemInputPin_ReceiveCanBlock (libAVMemInputPin *);
void libAVPin_Destroy(libAVPin *);
libAVPin *libAVPin_Create (libAVFilter *filter);
void libAVMemInputPin_Destroy(libAVMemInputPin *);
/*****************************************************************************
* libAVEnumPins
****************************************************************************/
struct libAVEnumPins {
IEnumPinsVtbl *vtbl;
long ref;
int pos;
libAVPin *pin;
libAVFilter *filter;
};
long WINAPI libAVEnumPins_QueryInterface(libAVEnumPins *, const GUID *, void **);
unsigned long WINAPI libAVEnumPins_AddRef (libAVEnumPins *);
unsigned long WINAPI libAVEnumPins_Release (libAVEnumPins *);
long WINAPI libAVEnumPins_Next (libAVEnumPins *, unsigned long, IPin **, unsigned long *);
long WINAPI libAVEnumPins_Skip (libAVEnumPins *, unsigned long);
long WINAPI libAVEnumPins_Reset (libAVEnumPins *);
long WINAPI libAVEnumPins_Clone (libAVEnumPins *, libAVEnumPins **);
void libAVEnumPins_Destroy(libAVEnumPins *);
libAVEnumPins *libAVEnumPins_Create (libAVPin *pin, libAVFilter *filter);
/*****************************************************************************
* libAVEnumMediaTypes
****************************************************************************/
struct libAVEnumMediaTypes {
IEnumMediaTypesVtbl *vtbl;
long ref;
int pos;
AM_MEDIA_TYPE type;
};
long WINAPI libAVEnumMediaTypes_QueryInterface(libAVEnumMediaTypes *, const GUID *, void **);
unsigned long WINAPI libAVEnumMediaTypes_AddRef (libAVEnumMediaTypes *);
unsigned long WINAPI libAVEnumMediaTypes_Release (libAVEnumMediaTypes *);
long WINAPI libAVEnumMediaTypes_Next (libAVEnumMediaTypes *, unsigned long, AM_MEDIA_TYPE **, unsigned long *);
long WINAPI libAVEnumMediaTypes_Skip (libAVEnumMediaTypes *, unsigned long);
long WINAPI libAVEnumMediaTypes_Reset (libAVEnumMediaTypes *);
long WINAPI libAVEnumMediaTypes_Clone (libAVEnumMediaTypes *, libAVEnumMediaTypes **);
void libAVEnumMediaTypes_Destroy(libAVEnumMediaTypes *);
libAVEnumMediaTypes *libAVEnumMediaTypes_Create(const AM_MEDIA_TYPE *type);
/*****************************************************************************
* libAVFilter
****************************************************************************/
struct libAVFilter {
IBaseFilterVtbl *vtbl;
long ref;
const wchar_t *name;
libAVPin *pin;
FILTER_INFO info;
FILTER_STATE state;
IReferenceClock *clock;
enum dshowDeviceType type;
void *priv_data;
int stream_index;
int64_t start_time;
void (*callback)(void *priv_data, int index, uint8_t *buf, int buf_size, int64_t time, enum dshowDeviceType type);
};
long WINAPI libAVFilter_QueryInterface (libAVFilter *, const GUID *, void **);
unsigned long WINAPI libAVFilter_AddRef (libAVFilter *);
unsigned long WINAPI libAVFilter_Release (libAVFilter *);
long WINAPI libAVFilter_GetClassID (libAVFilter *, CLSID *);
long WINAPI libAVFilter_Stop (libAVFilter *);
long WINAPI libAVFilter_Pause (libAVFilter *);
long WINAPI libAVFilter_Run (libAVFilter *, REFERENCE_TIME);
long WINAPI libAVFilter_GetState (libAVFilter *, DWORD, FILTER_STATE *);
long WINAPI libAVFilter_SetSyncSource (libAVFilter *, IReferenceClock *);
long WINAPI libAVFilter_GetSyncSource (libAVFilter *, IReferenceClock **);
long WINAPI libAVFilter_EnumPins (libAVFilter *, IEnumPins **);
long WINAPI libAVFilter_FindPin (libAVFilter *, const wchar_t *, IPin **);
long WINAPI libAVFilter_QueryFilterInfo(libAVFilter *, FILTER_INFO *);
long WINAPI libAVFilter_JoinFilterGraph(libAVFilter *, IFilterGraph *, const wchar_t *);
long WINAPI libAVFilter_QueryVendorInfo(libAVFilter *, wchar_t **);
void libAVFilter_Destroy(libAVFilter *);
libAVFilter *libAVFilter_Create (void *, void *, enum dshowDeviceType);
/*****************************************************************************
* dshow_ctx
****************************************************************************/
struct dshow_ctx {
const AVClass *class;
IGraphBuilder *graph;
char *device_name[2];
char *device_unique_name[2];
int video_device_number;
int audio_device_number;
int list_options;
int list_devices;
int audio_buffer_size;
int crossbar_video_input_pin_number;
int crossbar_audio_input_pin_number;
char *video_pin_name;
char *audio_pin_name;
int show_video_device_dialog;
int show_audio_device_dialog;
int show_video_crossbar_connection_dialog;
int show_audio_crossbar_connection_dialog;
int show_analog_tv_tuner_dialog;
int show_analog_tv_tuner_audio_dialog;
char *audio_filter_load_file;
char *audio_filter_save_file;
char *video_filter_load_file;
char *video_filter_save_file;
IBaseFilter *device_filter[2];
IPin *device_pin[2];
libAVFilter *capture_filter[2];
libAVPin *capture_pin[2];
HANDLE mutex;
HANDLE event[2]; /* event[0] is set by DirectShow
* event[1] is set by callback() */
AVPacketList *pktl;
int eof;
int64_t curbufsize[2];
unsigned int video_frame_num;
IMediaControl *control;
IMediaEvent *media_event;
enum AVPixelFormat pixel_format;
enum AVCodecID video_codec_id;
char *framerate;
int requested_width;
int requested_height;
AVRational requested_framerate;
int sample_rate;
int sample_size;
int channels;
};
/*****************************************************************************
* CrossBar
****************************************************************************/
HRESULT dshow_try_setup_crossbar_options(ICaptureGraphBuilder2 *graph_builder2,
IBaseFilter *device_filter, enum dshowDeviceType devtype, AVFormatContext *avctx);
void dshow_show_filter_properties(IBaseFilter *pFilter, AVFormatContext *avctx);
#endif /* AVDEVICE_DSHOW_CAPTURE_H */

190
externals/ffmpeg/libavdevice/dshow_common.c vendored Executable file
View File

@@ -0,0 +1,190 @@
/*
* Directshow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dshow_capture.h"
long ff_copy_dshow_media_type(AM_MEDIA_TYPE *dst, const AM_MEDIA_TYPE *src)
{
uint8_t *pbFormat = NULL;
if (src->cbFormat) {
pbFormat = CoTaskMemAlloc(src->cbFormat);
if (!pbFormat)
return E_OUTOFMEMORY;
memcpy(pbFormat, src->pbFormat, src->cbFormat);
}
*dst = *src;
dst->pUnk = NULL;
dst->pbFormat = pbFormat;
return S_OK;
}
void ff_printGUID(const GUID *g)
{
#if DSHOWDEBUG
const uint32_t *d = (const uint32_t *) &g->Data1;
const uint16_t *w = (const uint16_t *) &g->Data2;
const uint8_t *c = (const uint8_t *) &g->Data4;
dshowdebug("0x%08x 0x%04x 0x%04x %02x%02x%02x%02x%02x%02x%02x%02x",
d[0], w[0], w[1],
c[0], c[1], c[2], c[3], c[4], c[5], c[6], c[7]);
#endif
}
static const char *dshow_context_to_name(void *ptr)
{
return "dshow";
}
static const AVClass ff_dshow_context_class = { "DirectShow", dshow_context_to_name };
const AVClass *ff_dshow_context_class_ptr = &ff_dshow_context_class;
#define dstruct(pctx, sname, var, type) \
dshowdebug(" "#var":\t%"type"\n", sname->var)
#if DSHOWDEBUG
static void dump_bih(void *s, BITMAPINFOHEADER *bih)
{
dshowdebug(" BITMAPINFOHEADER\n");
dstruct(s, bih, biSize, "lu");
dstruct(s, bih, biWidth, "ld");
dstruct(s, bih, biHeight, "ld");
dstruct(s, bih, biPlanes, "d");
dstruct(s, bih, biBitCount, "d");
dstruct(s, bih, biCompression, "lu");
dshowdebug(" biCompression:\t\"%.4s\"\n",
(char*) &bih->biCompression);
dstruct(s, bih, biSizeImage, "lu");
dstruct(s, bih, biXPelsPerMeter, "lu");
dstruct(s, bih, biYPelsPerMeter, "lu");
dstruct(s, bih, biClrUsed, "lu");
dstruct(s, bih, biClrImportant, "lu");
}
#endif
void ff_print_VIDEO_STREAM_CONFIG_CAPS(const VIDEO_STREAM_CONFIG_CAPS *caps)
{
#if DSHOWDEBUG
dshowdebug(" VIDEO_STREAM_CONFIG_CAPS\n");
dshowdebug(" guid\t");
ff_printGUID(&caps->guid);
dshowdebug("\n");
dshowdebug(" VideoStandard\t%lu\n", caps->VideoStandard);
dshowdebug(" InputSize %ld\t%ld\n", caps->InputSize.cx, caps->InputSize.cy);
dshowdebug(" MinCroppingSize %ld\t%ld\n", caps->MinCroppingSize.cx, caps->MinCroppingSize.cy);
dshowdebug(" MaxCroppingSize %ld\t%ld\n", caps->MaxCroppingSize.cx, caps->MaxCroppingSize.cy);
dshowdebug(" CropGranularityX\t%d\n", caps->CropGranularityX);
dshowdebug(" CropGranularityY\t%d\n", caps->CropGranularityY);
dshowdebug(" CropAlignX\t%d\n", caps->CropAlignX);
dshowdebug(" CropAlignY\t%d\n", caps->CropAlignY);
dshowdebug(" MinOutputSize %ld\t%ld\n", caps->MinOutputSize.cx, caps->MinOutputSize.cy);
dshowdebug(" MaxOutputSize %ld\t%ld\n", caps->MaxOutputSize.cx, caps->MaxOutputSize.cy);
dshowdebug(" OutputGranularityX\t%d\n", caps->OutputGranularityX);
dshowdebug(" OutputGranularityY\t%d\n", caps->OutputGranularityY);
dshowdebug(" StretchTapsX\t%d\n", caps->StretchTapsX);
dshowdebug(" StretchTapsY\t%d\n", caps->StretchTapsY);
dshowdebug(" ShrinkTapsX\t%d\n", caps->ShrinkTapsX);
dshowdebug(" ShrinkTapsY\t%d\n", caps->ShrinkTapsY);
dshowdebug(" MinFrameInterval\t%"PRId64"\n", caps->MinFrameInterval);
dshowdebug(" MaxFrameInterval\t%"PRId64"\n", caps->MaxFrameInterval);
dshowdebug(" MinBitsPerSecond\t%ld\n", caps->MinBitsPerSecond);
dshowdebug(" MaxBitsPerSecond\t%ld\n", caps->MaxBitsPerSecond);
#endif
}
void ff_print_AUDIO_STREAM_CONFIG_CAPS(const AUDIO_STREAM_CONFIG_CAPS *caps)
{
#if DSHOWDEBUG
dshowdebug(" AUDIO_STREAM_CONFIG_CAPS\n");
dshowdebug(" guid\t");
ff_printGUID(&caps->guid);
dshowdebug("\n");
dshowdebug(" MinimumChannels\t%lu\n", caps->MinimumChannels);
dshowdebug(" MaximumChannels\t%lu\n", caps->MaximumChannels);
dshowdebug(" ChannelsGranularity\t%lu\n", caps->ChannelsGranularity);
dshowdebug(" MinimumBitsPerSample\t%lu\n", caps->MinimumBitsPerSample);
dshowdebug(" MaximumBitsPerSample\t%lu\n", caps->MaximumBitsPerSample);
dshowdebug(" BitsPerSampleGranularity\t%lu\n", caps->BitsPerSampleGranularity);
dshowdebug(" MinimumSampleFrequency\t%lu\n", caps->MinimumSampleFrequency);
dshowdebug(" MaximumSampleFrequency\t%lu\n", caps->MaximumSampleFrequency);
dshowdebug(" SampleFrequencyGranularity\t%lu\n", caps->SampleFrequencyGranularity);
#endif
}
void ff_print_AM_MEDIA_TYPE(const AM_MEDIA_TYPE *type)
{
#if DSHOWDEBUG
dshowdebug(" majortype\t");
ff_printGUID(&type->majortype);
dshowdebug("\n");
dshowdebug(" subtype\t");
ff_printGUID(&type->subtype);
dshowdebug("\n");
dshowdebug(" bFixedSizeSamples\t%d\n", type->bFixedSizeSamples);
dshowdebug(" bTemporalCompression\t%d\n", type->bTemporalCompression);
dshowdebug(" lSampleSize\t%lu\n", type->lSampleSize);
dshowdebug(" formattype\t");
ff_printGUID(&type->formattype);
dshowdebug("\n");
dshowdebug(" pUnk\t%p\n", type->pUnk);
dshowdebug(" cbFormat\t%lu\n", type->cbFormat);
dshowdebug(" pbFormat\t%p\n", type->pbFormat);
if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo)) {
VIDEOINFOHEADER *v = (void *) type->pbFormat;
dshowdebug(" rcSource: left %ld top %ld right %ld bottom %ld\n",
v->rcSource.left, v->rcSource.top, v->rcSource.right, v->rcSource.bottom);
dshowdebug(" rcTarget: left %ld top %ld right %ld bottom %ld\n",
v->rcTarget.left, v->rcTarget.top, v->rcTarget.right, v->rcTarget.bottom);
dshowdebug(" dwBitRate: %lu\n", v->dwBitRate);
dshowdebug(" dwBitErrorRate: %lu\n", v->dwBitErrorRate);
dshowdebug(" AvgTimePerFrame: %"PRId64"\n", v->AvgTimePerFrame);
dump_bih(NULL, &v->bmiHeader);
} else if (IsEqualGUID(&type->formattype, &FORMAT_VideoInfo2)) {
VIDEOINFOHEADER2 *v = (void *) type->pbFormat;
dshowdebug(" rcSource: left %ld top %ld right %ld bottom %ld\n",
v->rcSource.left, v->rcSource.top, v->rcSource.right, v->rcSource.bottom);
dshowdebug(" rcTarget: left %ld top %ld right %ld bottom %ld\n",
v->rcTarget.left, v->rcTarget.top, v->rcTarget.right, v->rcTarget.bottom);
dshowdebug(" dwBitRate: %lu\n", v->dwBitRate);
dshowdebug(" dwBitErrorRate: %lu\n", v->dwBitErrorRate);
dshowdebug(" AvgTimePerFrame: %"PRId64"\n", v->AvgTimePerFrame);
dshowdebug(" dwInterlaceFlags: %lu\n", v->dwInterlaceFlags);
dshowdebug(" dwCopyProtectFlags: %lu\n", v->dwCopyProtectFlags);
dshowdebug(" dwPictAspectRatioX: %lu\n", v->dwPictAspectRatioX);
dshowdebug(" dwPictAspectRatioY: %lu\n", v->dwPictAspectRatioY);
// dshowdebug(" dwReserved1: %lu\n", v->u.dwReserved1); /* mingw-w64 is buggy and doesn't name unnamed unions */
dshowdebug(" dwReserved2: %lu\n", v->dwReserved2);
dump_bih(NULL, &v->bmiHeader);
} else if (IsEqualGUID(&type->formattype, &FORMAT_WaveFormatEx)) {
WAVEFORMATEX *fx = (void *) type->pbFormat;
dshowdebug(" wFormatTag: %u\n", fx->wFormatTag);
dshowdebug(" nChannels: %u\n", fx->nChannels);
dshowdebug(" nSamplesPerSec: %lu\n", fx->nSamplesPerSec);
dshowdebug(" nAvgBytesPerSec: %lu\n", fx->nAvgBytesPerSec);
dshowdebug(" nBlockAlign: %u\n", fx->nBlockAlign);
dshowdebug(" wBitsPerSample: %u\n", fx->wBitsPerSample);
dshowdebug(" cbSize: %u\n", fx->cbSize);
}
#endif
}

208
externals/ffmpeg/libavdevice/dshow_crossbar.c vendored Executable file
View File

@@ -0,0 +1,208 @@
/*
* DirectShow capture interface
* Copyright (c) 2015 Roger Pack
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dshow_capture.h"
static const char *
GetPhysicalPinName(long pin_type)
{
switch (pin_type)
{
case PhysConn_Video_Tuner: return "Video Tuner";
case PhysConn_Video_Composite: return "Video Composite";
case PhysConn_Video_SVideo: return "S-Video";
case PhysConn_Video_RGB: return "Video RGB";
case PhysConn_Video_YRYBY: return "Video YRYBY";
case PhysConn_Video_SerialDigital: return "Video Serial Digital";
case PhysConn_Video_ParallelDigital: return "Video Parallel Digital";
case PhysConn_Video_SCSI: return "Video SCSI";
case PhysConn_Video_AUX: return "Video AUX";
case PhysConn_Video_1394: return "Video 1394";
case PhysConn_Video_USB: return "Video USB";
case PhysConn_Video_VideoDecoder: return "Video Decoder";
case PhysConn_Video_VideoEncoder: return "Video Encoder";
case PhysConn_Audio_Tuner: return "Audio Tuner";
case PhysConn_Audio_Line: return "Audio Line";
case PhysConn_Audio_Mic: return "Audio Microphone";
case PhysConn_Audio_AESDigital: return "Audio AES/EBU Digital";
case PhysConn_Audio_SPDIFDigital: return "Audio S/PDIF";
case PhysConn_Audio_SCSI: return "Audio SCSI";
case PhysConn_Audio_AUX: return "Audio AUX";
case PhysConn_Audio_1394: return "Audio 1394";
case PhysConn_Audio_USB: return "Audio USB";
case PhysConn_Audio_AudioDecoder: return "Audio Decoder";
default: return "Unknown Crossbar Pin Type—Please report!";
}
}
static HRESULT
setup_crossbar_options(IAMCrossbar *cross_bar, enum dshowDeviceType devtype, AVFormatContext *avctx)
{
struct dshow_ctx *ctx = avctx->priv_data;
long count_output_pins, count_input_pins;
int i;
int log_level = ctx->list_options ? AV_LOG_INFO : AV_LOG_DEBUG;
int video_input_pin = ctx->crossbar_video_input_pin_number;
int audio_input_pin = ctx->crossbar_audio_input_pin_number;
const char *device_name = ctx->device_name[devtype];
HRESULT hr;
av_log(avctx, log_level, "Crossbar Switching Information for %s:\n", device_name);
hr = IAMCrossbar_get_PinCounts(cross_bar, &count_output_pins, &count_input_pins);
if (hr != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to get crossbar pin counts\n");
return hr;
}
for (i = 0; i < count_output_pins; i++)
{
int j;
long related_pin, pin_type, route_to_pin;
hr = IAMCrossbar_get_CrossbarPinInfo(cross_bar, FALSE, i, &related_pin, &pin_type);
if (pin_type == PhysConn_Video_VideoDecoder) {
/* assume there is only one "Video (and one Audio) Decoder" output pin, and it's all we care about routing to...for now */
if (video_input_pin != -1) {
av_log(avctx, log_level, "Routing video input from pin %d\n", video_input_pin);
hr = IAMCrossbar_Route(cross_bar, i, video_input_pin);
if (hr != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to route video input from pin %d\n", video_input_pin);
return AVERROR(EIO);
}
}
} else if (pin_type == PhysConn_Audio_AudioDecoder) {
if (audio_input_pin != -1) {
av_log(avctx, log_level, "Routing audio input from pin %d\n", audio_input_pin);
hr = IAMCrossbar_Route(cross_bar, i, audio_input_pin);
if (hr != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to route audio input from pin %d\n", audio_input_pin);
return hr;
}
}
} else {
av_log(avctx, AV_LOG_WARNING, "Unexpected output pin type, please report the type if you want to use this (%s)", GetPhysicalPinName(pin_type));
}
hr = IAMCrossbar_get_IsRoutedTo(cross_bar, i, &route_to_pin);
if (hr != S_OK) {
av_log(avctx, AV_LOG_ERROR, "Unable to get crossbar is routed to from pin %d\n", i);
return hr;
}
av_log(avctx, log_level, " Crossbar Output pin %d: \"%s\" related output pin: %ld ", i, GetPhysicalPinName(pin_type), related_pin);
av_log(avctx, log_level, "current input pin: %ld ", route_to_pin);
av_log(avctx, log_level, "compatible input pins: ");
for (j = 0; j < count_input_pins; j++)
{
hr = IAMCrossbar_CanRoute(cross_bar, i, j);
if (hr == S_OK)
av_log(avctx, log_level ,"%d ", j);
}
av_log(avctx, log_level, "\n");
}
for (i = 0; i < count_input_pins; i++)
{
long related_pin, pin_type;
hr = IAMCrossbar_get_CrossbarPinInfo(cross_bar, TRUE, i, &related_pin, &pin_type);
if (hr != S_OK) {
av_log(avctx, AV_LOG_ERROR, "unable to get crossbar info audio input from pin %d\n", i);
return hr;
}
av_log(avctx, log_level, " Crossbar Input pin %d - \"%s\" ", i, GetPhysicalPinName(pin_type));
av_log(avctx, log_level, "related input pin: %ld\n", related_pin);
}
return S_OK;
}
/**
* Given a fully constructed graph, check if there is a cross bar filter, and configure its pins if so.
*/
HRESULT
dshow_try_setup_crossbar_options(ICaptureGraphBuilder2 *graph_builder2,
IBaseFilter *device_filter, enum dshowDeviceType devtype, AVFormatContext *avctx)
{
struct dshow_ctx *ctx = avctx->priv_data;
IAMCrossbar *cross_bar = NULL;
IBaseFilter *cross_bar_base_filter = NULL;
IAMTVTuner *tv_tuner_filter = NULL;
IBaseFilter *tv_tuner_base_filter = NULL;
IAMAudioInputMixer *tv_audio_filter = NULL;
IBaseFilter *tv_audio_base_filter = NULL;
HRESULT hr;
hr = ICaptureGraphBuilder2_FindInterface(graph_builder2, &LOOK_UPSTREAM_ONLY, (const GUID *) NULL,
device_filter, &IID_IAMCrossbar, (void**) &cross_bar);
if (hr != S_OK) {
/* no crossbar found */
hr = S_OK;
goto end;
}
/* TODO some TV tuners apparently have multiple crossbars? */
if (devtype == VideoDevice && ctx->show_video_crossbar_connection_dialog ||
devtype == AudioDevice && ctx->show_audio_crossbar_connection_dialog) {
hr = IAMCrossbar_QueryInterface(cross_bar, &IID_IBaseFilter, (void **) &cross_bar_base_filter);
if (hr != S_OK)
goto end;
dshow_show_filter_properties(cross_bar_base_filter, avctx);
}
if (devtype == VideoDevice && ctx->show_analog_tv_tuner_dialog) {
hr = ICaptureGraphBuilder2_FindInterface(graph_builder2, &LOOK_UPSTREAM_ONLY, NULL,
device_filter, &IID_IAMTVTuner, (void**) &tv_tuner_filter);
if (hr == S_OK) {
hr = IAMCrossbar_QueryInterface(tv_tuner_filter, &IID_IBaseFilter, (void **) &tv_tuner_base_filter);
if (hr != S_OK)
goto end;
dshow_show_filter_properties(tv_tuner_base_filter, avctx);
} else {
av_log(avctx, AV_LOG_WARNING, "unable to find a tv tuner to display dialog for!");
}
}
if (devtype == AudioDevice && ctx->show_analog_tv_tuner_audio_dialog) {
hr = ICaptureGraphBuilder2_FindInterface(graph_builder2, &LOOK_UPSTREAM_ONLY, NULL,
device_filter, &IID_IAMTVAudio, (void**) &tv_audio_filter);
if (hr == S_OK) {
hr = IAMCrossbar_QueryInterface(tv_audio_filter, &IID_IBaseFilter, (void **) &tv_audio_base_filter);
if (hr != S_OK)
goto end;
dshow_show_filter_properties(tv_audio_base_filter, avctx);
} else {
av_log(avctx, AV_LOG_WARNING, "unable to find a tv audio tuner to display dialog for!");
}
}
hr = setup_crossbar_options(cross_bar, devtype, avctx);
if (hr != S_OK)
goto end;
end:
if (cross_bar)
IAMCrossbar_Release(cross_bar);
if (cross_bar_base_filter)
IBaseFilter_Release(cross_bar_base_filter);
if (tv_tuner_filter)
IAMTVTuner_Release(tv_tuner_filter);
if (tv_tuner_base_filter)
IBaseFilter_Release(tv_tuner_base_filter);
return hr;
}

View File

@@ -0,0 +1,105 @@
/*
* DirectShow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dshow_capture.h"
DECLARE_QUERYINTERFACE(libAVEnumMediaTypes,
{ {&IID_IUnknown,0}, {&IID_IEnumMediaTypes,0} })
DECLARE_ADDREF(libAVEnumMediaTypes)
DECLARE_RELEASE(libAVEnumMediaTypes)
long WINAPI
libAVEnumMediaTypes_Next(libAVEnumMediaTypes *this, unsigned long n,
AM_MEDIA_TYPE **types, unsigned long *fetched)
{
int count = 0;
dshowdebug("libAVEnumMediaTypes_Next(%p)\n", this);
if (!types)
return E_POINTER;
if (!this->pos && n == 1) {
if (!IsEqualGUID(&this->type.majortype, &GUID_NULL)) {
AM_MEDIA_TYPE *type = av_malloc(sizeof(AM_MEDIA_TYPE));
if (!type)
return E_OUTOFMEMORY;
ff_copy_dshow_media_type(type, &this->type);
*types = type;
count = 1;
}
this->pos = 1;
}
if (fetched)
*fetched = count;
if (!count)
return S_FALSE;
return S_OK;
}
long WINAPI
libAVEnumMediaTypes_Skip(libAVEnumMediaTypes *this, unsigned long n)
{
dshowdebug("libAVEnumMediaTypes_Skip(%p)\n", this);
if (n) /* Any skip will always fall outside of the only valid type. */
return S_FALSE;
return S_OK;
}
long WINAPI
libAVEnumMediaTypes_Reset(libAVEnumMediaTypes *this)
{
dshowdebug("libAVEnumMediaTypes_Reset(%p)\n", this);
this->pos = 0;
return S_OK;
}
long WINAPI
libAVEnumMediaTypes_Clone(libAVEnumMediaTypes *this, libAVEnumMediaTypes **enums)
{
libAVEnumMediaTypes *new;
dshowdebug("libAVEnumMediaTypes_Clone(%p)\n", this);
if (!enums)
return E_POINTER;
new = libAVEnumMediaTypes_Create(&this->type);
if (!new)
return E_OUTOFMEMORY;
new->pos = this->pos;
*enums = new;
return S_OK;
}
static int
libAVEnumMediaTypes_Setup(libAVEnumMediaTypes *this, const AM_MEDIA_TYPE *type)
{
IEnumMediaTypesVtbl *vtbl = this->vtbl;
SETVTBL(vtbl, libAVEnumMediaTypes, QueryInterface);
SETVTBL(vtbl, libAVEnumMediaTypes, AddRef);
SETVTBL(vtbl, libAVEnumMediaTypes, Release);
SETVTBL(vtbl, libAVEnumMediaTypes, Next);
SETVTBL(vtbl, libAVEnumMediaTypes, Skip);
SETVTBL(vtbl, libAVEnumMediaTypes, Reset);
SETVTBL(vtbl, libAVEnumMediaTypes, Clone);
if (!type) {
this->type.majortype = GUID_NULL;
} else {
ff_copy_dshow_media_type(&this->type, type);
}
return 1;
}
DECLARE_CREATE(libAVEnumMediaTypes, libAVEnumMediaTypes_Setup(this, type), const AM_MEDIA_TYPE *type)
DECLARE_DESTROY(libAVEnumMediaTypes, nothing)

105
externals/ffmpeg/libavdevice/dshow_enumpins.c vendored Executable file
View File

@@ -0,0 +1,105 @@
/*
* DirectShow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dshow_capture.h"
DECLARE_QUERYINTERFACE(libAVEnumPins,
{ {&IID_IUnknown,0}, {&IID_IEnumPins,0} })
DECLARE_ADDREF(libAVEnumPins)
DECLARE_RELEASE(libAVEnumPins)
long WINAPI
libAVEnumPins_Next(libAVEnumPins *this, unsigned long n, IPin **pins,
unsigned long *fetched)
{
int count = 0;
dshowdebug("libAVEnumPins_Next(%p)\n", this);
if (!pins)
return E_POINTER;
if (!this->pos && n == 1) {
libAVPin_AddRef(this->pin);
*pins = (IPin *) this->pin;
count = 1;
this->pos = 1;
}
if (fetched)
*fetched = count;
if (!count)
return S_FALSE;
return S_OK;
}
long WINAPI
libAVEnumPins_Skip(libAVEnumPins *this, unsigned long n)
{
dshowdebug("libAVEnumPins_Skip(%p)\n", this);
if (n) /* Any skip will always fall outside of the only valid pin. */
return S_FALSE;
return S_OK;
}
long WINAPI
libAVEnumPins_Reset(libAVEnumPins *this)
{
dshowdebug("libAVEnumPins_Reset(%p)\n", this);
this->pos = 0;
return S_OK;
}
long WINAPI
libAVEnumPins_Clone(libAVEnumPins *this, libAVEnumPins **pins)
{
libAVEnumPins *new;
dshowdebug("libAVEnumPins_Clone(%p)\n", this);
if (!pins)
return E_POINTER;
new = libAVEnumPins_Create(this->pin, this->filter);
if (!new)
return E_OUTOFMEMORY;
new->pos = this->pos;
*pins = new;
return S_OK;
}
static int
libAVEnumPins_Setup(libAVEnumPins *this, libAVPin *pin, libAVFilter *filter)
{
IEnumPinsVtbl *vtbl = this->vtbl;
SETVTBL(vtbl, libAVEnumPins, QueryInterface);
SETVTBL(vtbl, libAVEnumPins, AddRef);
SETVTBL(vtbl, libAVEnumPins, Release);
SETVTBL(vtbl, libAVEnumPins, Next);
SETVTBL(vtbl, libAVEnumPins, Skip);
SETVTBL(vtbl, libAVEnumPins, Reset);
SETVTBL(vtbl, libAVEnumPins, Clone);
this->pin = pin;
this->filter = filter;
libAVFilter_AddRef(this->filter);
return 1;
}
static int
libAVEnumPins_Cleanup(libAVEnumPins *this)
{
libAVFilter_Release(this->filter);
return 1;
}
DECLARE_CREATE(libAVEnumPins, libAVEnumPins_Setup(this, pin, filter),
libAVPin *pin, libAVFilter *filter)
DECLARE_DESTROY(libAVEnumPins, libAVEnumPins_Cleanup)

200
externals/ffmpeg/libavdevice/dshow_filter.c vendored Executable file
View File

@@ -0,0 +1,200 @@
/*
* DirectShow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dshow_capture.h"
DECLARE_QUERYINTERFACE(libAVFilter,
{ {&IID_IUnknown,0}, {&IID_IBaseFilter,0} })
DECLARE_ADDREF(libAVFilter)
DECLARE_RELEASE(libAVFilter)
long WINAPI
libAVFilter_GetClassID(libAVFilter *this, CLSID *id)
{
dshowdebug("libAVFilter_GetClassID(%p)\n", this);
/* I'm not creating a ClassID just for this. */
return E_FAIL;
}
long WINAPI
libAVFilter_Stop(libAVFilter *this)
{
dshowdebug("libAVFilter_Stop(%p)\n", this);
this->state = State_Stopped;
return S_OK;
}
long WINAPI
libAVFilter_Pause(libAVFilter *this)
{
dshowdebug("libAVFilter_Pause(%p)\n", this);
this->state = State_Paused;
return S_OK;
}
long WINAPI
libAVFilter_Run(libAVFilter *this, REFERENCE_TIME start)
{
dshowdebug("libAVFilter_Run(%p) %"PRId64"\n", this, start);
this->state = State_Running;
this->start_time = start;
return S_OK;
}
long WINAPI
libAVFilter_GetState(libAVFilter *this, DWORD ms, FILTER_STATE *state)
{
dshowdebug("libAVFilter_GetState(%p)\n", this);
if (!state)
return E_POINTER;
*state = this->state;
return S_OK;
}
long WINAPI
libAVFilter_SetSyncSource(libAVFilter *this, IReferenceClock *clock)
{
dshowdebug("libAVFilter_SetSyncSource(%p)\n", this);
if (this->clock != clock) {
if (this->clock)
IReferenceClock_Release(this->clock);
this->clock = clock;
if (clock)
IReferenceClock_AddRef(clock);
}
return S_OK;
}
long WINAPI
libAVFilter_GetSyncSource(libAVFilter *this, IReferenceClock **clock)
{
dshowdebug("libAVFilter_GetSyncSource(%p)\n", this);
if (!clock)
return E_POINTER;
if (this->clock)
IReferenceClock_AddRef(this->clock);
*clock = this->clock;
return S_OK;
}
long WINAPI
libAVFilter_EnumPins(libAVFilter *this, IEnumPins **enumpin)
{
libAVEnumPins *new;
dshowdebug("libAVFilter_EnumPins(%p)\n", this);
if (!enumpin)
return E_POINTER;
new = libAVEnumPins_Create(this->pin, this);
if (!new)
return E_OUTOFMEMORY;
*enumpin = (IEnumPins *) new;
return S_OK;
}
long WINAPI
libAVFilter_FindPin(libAVFilter *this, const wchar_t *id, IPin **pin)
{
libAVPin *found = NULL;
dshowdebug("libAVFilter_FindPin(%p)\n", this);
if (!id || !pin)
return E_POINTER;
if (!wcscmp(id, L"In")) {
found = this->pin;
libAVPin_AddRef(found);
}
*pin = (IPin *) found;
if (!found)
return VFW_E_NOT_FOUND;
return S_OK;
}
long WINAPI
libAVFilter_QueryFilterInfo(libAVFilter *this, FILTER_INFO *info)
{
dshowdebug("libAVFilter_QueryFilterInfo(%p)\n", this);
if (!info)
return E_POINTER;
if (this->info.pGraph)
IFilterGraph_AddRef(this->info.pGraph);
*info = this->info;
return S_OK;
}
long WINAPI
libAVFilter_JoinFilterGraph(libAVFilter *this, IFilterGraph *graph,
const wchar_t *name)
{
dshowdebug("libAVFilter_JoinFilterGraph(%p)\n", this);
this->info.pGraph = graph;
if (name)
wcscpy(this->info.achName, name);
return S_OK;
}
long WINAPI
libAVFilter_QueryVendorInfo(libAVFilter *this, wchar_t **info)
{
dshowdebug("libAVFilter_QueryVendorInfo(%p)\n", this);
if (!info)
return E_POINTER;
return E_NOTIMPL; /* don't have to do anything here */
}
static int
libAVFilter_Setup(libAVFilter *this, void *priv_data, void *callback,
enum dshowDeviceType type)
{
IBaseFilterVtbl *vtbl = this->vtbl;
SETVTBL(vtbl, libAVFilter, QueryInterface);
SETVTBL(vtbl, libAVFilter, AddRef);
SETVTBL(vtbl, libAVFilter, Release);
SETVTBL(vtbl, libAVFilter, GetClassID);
SETVTBL(vtbl, libAVFilter, Stop);
SETVTBL(vtbl, libAVFilter, Pause);
SETVTBL(vtbl, libAVFilter, Run);
SETVTBL(vtbl, libAVFilter, GetState);
SETVTBL(vtbl, libAVFilter, SetSyncSource);
SETVTBL(vtbl, libAVFilter, GetSyncSource);
SETVTBL(vtbl, libAVFilter, EnumPins);
SETVTBL(vtbl, libAVFilter, FindPin);
SETVTBL(vtbl, libAVFilter, QueryFilterInfo);
SETVTBL(vtbl, libAVFilter, JoinFilterGraph);
SETVTBL(vtbl, libAVFilter, QueryVendorInfo);
this->pin = libAVPin_Create(this);
this->priv_data = priv_data;
this->callback = callback;
this->type = type;
return 1;
}
static int
libAVFilter_Cleanup(libAVFilter *this)
{
libAVPin_Release(this->pin);
return 1;
}
DECLARE_CREATE(libAVFilter, libAVFilter_Setup(this, priv_data, callback, type),
void *priv_data, void *callback, enum dshowDeviceType type)
DECLARE_DESTROY(libAVFilter, libAVFilter_Cleanup)

396
externals/ffmpeg/libavdevice/dshow_pin.c vendored Executable file
View File

@@ -0,0 +1,396 @@
/*
* DirectShow capture interface
* Copyright (c) 2010 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "dshow_capture.h"
#include <stddef.h>
#define imemoffset offsetof(libAVPin, imemvtbl)
DECLARE_QUERYINTERFACE(libAVPin,
{ {&IID_IUnknown,0}, {&IID_IPin,0}, {&IID_IMemInputPin,imemoffset} })
DECLARE_ADDREF(libAVPin)
DECLARE_RELEASE(libAVPin)
long WINAPI
libAVPin_Connect(libAVPin *this, IPin *pin, const AM_MEDIA_TYPE *type)
{
dshowdebug("libAVPin_Connect(%p, %p, %p)\n", this, pin, type);
/* Input pins receive connections. */
return S_FALSE;
}
long WINAPI
libAVPin_ReceiveConnection(libAVPin *this, IPin *pin,
const AM_MEDIA_TYPE *type)
{
enum dshowDeviceType devtype = this->filter->type;
dshowdebug("libAVPin_ReceiveConnection(%p)\n", this);
if (!pin)
return E_POINTER;
if (this->connectedto)
return VFW_E_ALREADY_CONNECTED;
ff_print_AM_MEDIA_TYPE(type);
if (devtype == VideoDevice) {
if (!IsEqualGUID(&type->majortype, &MEDIATYPE_Video))
return VFW_E_TYPE_NOT_ACCEPTED;
} else {
if (!IsEqualGUID(&type->majortype, &MEDIATYPE_Audio))
return VFW_E_TYPE_NOT_ACCEPTED;
}
IPin_AddRef(pin);
this->connectedto = pin;
ff_copy_dshow_media_type(&this->type, type);
return S_OK;
}
long WINAPI
libAVPin_Disconnect(libAVPin *this)
{
dshowdebug("libAVPin_Disconnect(%p)\n", this);
if (this->filter->state != State_Stopped)
return VFW_E_NOT_STOPPED;
if (!this->connectedto)
return S_FALSE;
IPin_Release(this->connectedto);
this->connectedto = NULL;
return S_OK;
}
long WINAPI
libAVPin_ConnectedTo(libAVPin *this, IPin **pin)
{
dshowdebug("libAVPin_ConnectedTo(%p)\n", this);
if (!pin)
return E_POINTER;
if (!this->connectedto)
return VFW_E_NOT_CONNECTED;
IPin_AddRef(this->connectedto);
*pin = this->connectedto;
return S_OK;
}
long WINAPI
libAVPin_ConnectionMediaType(libAVPin *this, AM_MEDIA_TYPE *type)
{
dshowdebug("libAVPin_ConnectionMediaType(%p)\n", this);
if (!type)
return E_POINTER;
if (!this->connectedto)
return VFW_E_NOT_CONNECTED;
return ff_copy_dshow_media_type(type, &this->type);
}
long WINAPI
libAVPin_QueryPinInfo(libAVPin *this, PIN_INFO *info)
{
dshowdebug("libAVPin_QueryPinInfo(%p)\n", this);
if (!info)
return E_POINTER;
if (this->filter)
libAVFilter_AddRef(this->filter);
info->pFilter = (IBaseFilter *) this->filter;
info->dir = PINDIR_INPUT;
wcscpy(info->achName, L"Capture");
return S_OK;
}
long WINAPI
libAVPin_QueryDirection(libAVPin *this, PIN_DIRECTION *dir)
{
dshowdebug("libAVPin_QueryDirection(%p)\n", this);
if (!dir)
return E_POINTER;
*dir = PINDIR_INPUT;
return S_OK;
}
long WINAPI
libAVPin_QueryId(libAVPin *this, wchar_t **id)
{
dshowdebug("libAVPin_QueryId(%p)\n", this);
if (!id)
return E_POINTER;
*id = wcsdup(L"libAV Pin");
return S_OK;
}
long WINAPI
libAVPin_QueryAccept(libAVPin *this, const AM_MEDIA_TYPE *type)
{
dshowdebug("libAVPin_QueryAccept(%p)\n", this);
return S_FALSE;
}
long WINAPI
libAVPin_EnumMediaTypes(libAVPin *this, IEnumMediaTypes **enumtypes)
{
const AM_MEDIA_TYPE *type = NULL;
libAVEnumMediaTypes *new;
dshowdebug("libAVPin_EnumMediaTypes(%p)\n", this);
if (!enumtypes)
return E_POINTER;
new = libAVEnumMediaTypes_Create(type);
if (!new)
return E_OUTOFMEMORY;
*enumtypes = (IEnumMediaTypes *) new;
return S_OK;
}
long WINAPI
libAVPin_QueryInternalConnections(libAVPin *this, IPin **pin,
unsigned long *npin)
{
dshowdebug("libAVPin_QueryInternalConnections(%p)\n", this);
return E_NOTIMPL;
}
long WINAPI
libAVPin_EndOfStream(libAVPin *this)
{
dshowdebug("libAVPin_EndOfStream(%p)\n", this);
/* I don't care. */
return S_OK;
}
long WINAPI
libAVPin_BeginFlush(libAVPin *this)
{
dshowdebug("libAVPin_BeginFlush(%p)\n", this);
/* I don't care. */
return S_OK;
}
long WINAPI
libAVPin_EndFlush(libAVPin *this)
{
dshowdebug("libAVPin_EndFlush(%p)\n", this);
/* I don't care. */
return S_OK;
}
long WINAPI
libAVPin_NewSegment(libAVPin *this, REFERENCE_TIME start, REFERENCE_TIME stop,
double rate)
{
dshowdebug("libAVPin_NewSegment(%p)\n", this);
/* I don't care. */
return S_OK;
}
static int
libAVPin_Setup(libAVPin *this, libAVFilter *filter)
{
IPinVtbl *vtbl = this->vtbl;
IMemInputPinVtbl *imemvtbl;
if (!filter)
return 0;
imemvtbl = av_malloc(sizeof(IMemInputPinVtbl));
if (!imemvtbl)
return 0;
SETVTBL(imemvtbl, libAVMemInputPin, QueryInterface);
SETVTBL(imemvtbl, libAVMemInputPin, AddRef);
SETVTBL(imemvtbl, libAVMemInputPin, Release);
SETVTBL(imemvtbl, libAVMemInputPin, GetAllocator);
SETVTBL(imemvtbl, libAVMemInputPin, NotifyAllocator);
SETVTBL(imemvtbl, libAVMemInputPin, GetAllocatorRequirements);
SETVTBL(imemvtbl, libAVMemInputPin, Receive);
SETVTBL(imemvtbl, libAVMemInputPin, ReceiveMultiple);
SETVTBL(imemvtbl, libAVMemInputPin, ReceiveCanBlock);
this->imemvtbl = imemvtbl;
SETVTBL(vtbl, libAVPin, QueryInterface);
SETVTBL(vtbl, libAVPin, AddRef);
SETVTBL(vtbl, libAVPin, Release);
SETVTBL(vtbl, libAVPin, Connect);
SETVTBL(vtbl, libAVPin, ReceiveConnection);
SETVTBL(vtbl, libAVPin, Disconnect);
SETVTBL(vtbl, libAVPin, ConnectedTo);
SETVTBL(vtbl, libAVPin, ConnectionMediaType);
SETVTBL(vtbl, libAVPin, QueryPinInfo);
SETVTBL(vtbl, libAVPin, QueryDirection);
SETVTBL(vtbl, libAVPin, QueryId);
SETVTBL(vtbl, libAVPin, QueryAccept);
SETVTBL(vtbl, libAVPin, EnumMediaTypes);
SETVTBL(vtbl, libAVPin, QueryInternalConnections);
SETVTBL(vtbl, libAVPin, EndOfStream);
SETVTBL(vtbl, libAVPin, BeginFlush);
SETVTBL(vtbl, libAVPin, EndFlush);
SETVTBL(vtbl, libAVPin, NewSegment);
this->filter = filter;
return 1;
}
static void
libAVPin_Free(libAVPin *this)
{
if (!this)
return;
av_freep(&this->imemvtbl);
if (this->type.pbFormat) {
CoTaskMemFree(this->type.pbFormat);
this->type.pbFormat = NULL;
}
}
DECLARE_CREATE(libAVPin, libAVPin_Setup(this, filter), libAVFilter *filter)
DECLARE_DESTROY(libAVPin, libAVPin_Free)
/*****************************************************************************
* libAVMemInputPin
****************************************************************************/
long WINAPI
libAVMemInputPin_QueryInterface(libAVMemInputPin *this, const GUID *riid,
void **ppvObject)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
dshowdebug("libAVMemInputPin_QueryInterface(%p)\n", this);
return libAVPin_QueryInterface(pin, riid, ppvObject);
}
unsigned long WINAPI
libAVMemInputPin_AddRef(libAVMemInputPin *this)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
dshowdebug("libAVMemInputPin_AddRef(%p)\n", this);
return libAVPin_AddRef(pin);
}
unsigned long WINAPI
libAVMemInputPin_Release(libAVMemInputPin *this)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
dshowdebug("libAVMemInputPin_Release(%p)\n", this);
return libAVPin_Release(pin);
}
long WINAPI
libAVMemInputPin_GetAllocator(libAVMemInputPin *this, IMemAllocator **alloc)
{
dshowdebug("libAVMemInputPin_GetAllocator(%p)\n", this);
return VFW_E_NO_ALLOCATOR;
}
long WINAPI
libAVMemInputPin_NotifyAllocator(libAVMemInputPin *this, IMemAllocator *alloc,
BOOL rdwr)
{
dshowdebug("libAVMemInputPin_NotifyAllocator(%p)\n", this);
return S_OK;
}
long WINAPI
libAVMemInputPin_GetAllocatorRequirements(libAVMemInputPin *this,
ALLOCATOR_PROPERTIES *props)
{
dshowdebug("libAVMemInputPin_GetAllocatorRequirements(%p)\n", this);
return E_NOTIMPL;
}
long WINAPI
libAVMemInputPin_Receive(libAVMemInputPin *this, IMediaSample *sample)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
enum dshowDeviceType devtype = pin->filter->type;
void *priv_data;
AVFormatContext *s;
uint8_t *buf;
int buf_size; /* todo should be a long? */
int index;
int64_t curtime;
int64_t orig_curtime;
int64_t graphtime;
const char *devtypename = (devtype == VideoDevice) ? "video" : "audio";
IReferenceClock *clock = pin->filter->clock;
int64_t dummy;
struct dshow_ctx *ctx;
dshowdebug("libAVMemInputPin_Receive(%p)\n", this);
if (!sample)
return E_POINTER;
IMediaSample_GetTime(sample, &orig_curtime, &dummy);
orig_curtime += pin->filter->start_time;
IReferenceClock_GetTime(clock, &graphtime);
if (devtype == VideoDevice) {
/* PTS from video devices is unreliable. */
IReferenceClock_GetTime(clock, &curtime);
} else {
IMediaSample_GetTime(sample, &curtime, &dummy);
if(curtime > 400000000000000000LL) {
/* initial frames sometimes start < 0 (shown as a very large number here,
like 437650244077016960 which FFmpeg doesn't like.
TODO figure out math. For now just drop them. */
av_log(NULL, AV_LOG_DEBUG,
"dshow dropping initial (or ending) audio frame with odd PTS too high %"PRId64"\n", curtime);
return S_OK;
}
curtime += pin->filter->start_time;
}
buf_size = IMediaSample_GetActualDataLength(sample);
IMediaSample_GetPointer(sample, &buf);
priv_data = pin->filter->priv_data;
s = priv_data;
ctx = s->priv_data;
index = pin->filter->stream_index;
av_log(NULL, AV_LOG_VERBOSE, "dshow passing through packet of type %s size %8d "
"timestamp %"PRId64" orig timestamp %"PRId64" graph timestamp %"PRId64" diff %"PRId64" %s\n",
devtypename, buf_size, curtime, orig_curtime, graphtime, graphtime - orig_curtime, ctx->device_name[devtype]);
pin->filter->callback(priv_data, index, buf, buf_size, curtime, devtype);
return S_OK;
}
long WINAPI
libAVMemInputPin_ReceiveMultiple(libAVMemInputPin *this,
IMediaSample **samples, long n, long *nproc)
{
int i;
dshowdebug("libAVMemInputPin_ReceiveMultiple(%p)\n", this);
for (i = 0; i < n; i++)
libAVMemInputPin_Receive(this, samples[i]);
*nproc = n;
return S_OK;
}
long WINAPI
libAVMemInputPin_ReceiveCanBlock(libAVMemInputPin *this)
{
dshowdebug("libAVMemInputPin_ReceiveCanBlock(%p)\n", this);
/* I swear I will not block. */
return S_FALSE;
}
void
libAVMemInputPin_Destroy(libAVMemInputPin *this)
{
libAVPin *pin = (libAVPin *) ((uint8_t *) this - imemoffset);
dshowdebug("libAVMemInputPin_Destroy(%p)\n", this);
libAVPin_Destroy(pin);
}

134
externals/ffmpeg/libavdevice/fbdev_common.c vendored Executable file
View File

@@ -0,0 +1,134 @@
/*
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2009 Giliard B. de Freitas <giliarde@gmail.com>
* Copyright (C) 2002 Gunnar Monell <gmo@linux.nu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <stdlib.h>
#include "fbdev_common.h"
#include "libavutil/common.h"
#include "avdevice.h"
struct rgb_pixfmt_map_entry {
int bits_per_pixel;
int red_offset, green_offset, blue_offset, alpha_offset;
enum AVPixelFormat pixfmt;
};
static const struct rgb_pixfmt_map_entry rgb_pixfmt_map[] = {
// bpp, red_offset, green_offset, blue_offset, alpha_offset, pixfmt
{ 32, 0, 8, 16, 24, AV_PIX_FMT_RGBA },
{ 32, 16, 8, 0, 24, AV_PIX_FMT_BGRA },
{ 32, 8, 16, 24, 0, AV_PIX_FMT_ARGB },
{ 32, 3, 2, 8, 0, AV_PIX_FMT_ABGR },
{ 24, 0, 8, 16, 0, AV_PIX_FMT_RGB24 },
{ 24, 16, 8, 0, 0, AV_PIX_FMT_BGR24 },
{ 16, 11, 5, 0, 0, AV_PIX_FMT_RGB565 },
};
enum AVPixelFormat ff_get_pixfmt_from_fb_varinfo(struct fb_var_screeninfo *varinfo)
{
int i;
for (i = 0; i < FF_ARRAY_ELEMS(rgb_pixfmt_map); i++) {
const struct rgb_pixfmt_map_entry *entry = &rgb_pixfmt_map[i];
if (entry->bits_per_pixel == varinfo->bits_per_pixel &&
entry->red_offset == varinfo->red.offset &&
entry->green_offset == varinfo->green.offset &&
entry->blue_offset == varinfo->blue.offset)
return entry->pixfmt;
}
return AV_PIX_FMT_NONE;
}
const char* ff_fbdev_default_device()
{
const char *dev = getenv("FRAMEBUFFER");
if (!dev)
dev = "/dev/fb0";
return dev;
}
int ff_fbdev_get_device_list(AVDeviceInfoList *device_list)
{
struct fb_var_screeninfo varinfo;
struct fb_fix_screeninfo fixinfo;
char device_file[12];
AVDeviceInfo *device = NULL;
int i, fd, ret = 0;
const char *default_device = ff_fbdev_default_device();
if (!device_list)
return AVERROR(EINVAL);
for (i = 0; i <= 31; i++) {
snprintf(device_file, sizeof(device_file), "/dev/fb%d", i);
if ((fd = avpriv_open(device_file, O_RDWR)) < 0) {
int err = AVERROR(errno);
if (err != AVERROR(ENOENT))
av_log(NULL, AV_LOG_ERROR, "Could not open framebuffer device '%s': %s\n",
device_file, av_err2str(err));
continue;
}
if (ioctl(fd, FBIOGET_VSCREENINFO, &varinfo) == -1)
goto fail_device;
if (ioctl(fd, FBIOGET_FSCREENINFO, &fixinfo) == -1)
goto fail_device;
device = av_mallocz(sizeof(AVDeviceInfo));
if (!device) {
ret = AVERROR(ENOMEM);
goto fail_device;
}
device->device_name = av_strdup(device_file);
device->device_description = av_strdup(fixinfo.id);
if (!device->device_name || !device->device_description) {
ret = AVERROR(ENOMEM);
goto fail_device;
}
if ((ret = av_dynarray_add_nofree(&device_list->devices,
&device_list->nb_devices, device)) < 0)
goto fail_device;
if (default_device && !strcmp(device->device_name, default_device)) {
device_list->default_device = device_list->nb_devices - 1;
default_device = NULL;
}
close(fd);
continue;
fail_device:
if (device) {
av_freep(&device->device_name);
av_freep(&device->device_description);
av_freep(&device);
}
if (fd >= 0)
close(fd);
if (ret < 0)
return ret;
}
return 0;
}

38
externals/ffmpeg/libavdevice/fbdev_common.h vendored Executable file
View File

@@ -0,0 +1,38 @@
/*
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2009 Giliard B. de Freitas <giliarde@gmail.com>
* Copyright (C) 2002 Gunnar Monell <gmo@linux.nu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_FBDEV_COMMON_H
#define AVDEVICE_FBDEV_COMMON_H
#include <features.h>
#include <linux/fb.h>
#include "libavutil/pixfmt.h"
struct AVDeviceInfoList;
enum AVPixelFormat ff_get_pixfmt_from_fb_varinfo(struct fb_var_screeninfo *varinfo);
const char* ff_fbdev_default_device(void);
int ff_fbdev_get_device_list(struct AVDeviceInfoList *device_list);
#endif /* AVDEVICE_FBDEV_COMMON_H */

245
externals/ffmpeg/libavdevice/fbdev_dec.c vendored Executable file
View File

@@ -0,0 +1,245 @@
/*
* Copyright (c) 2011 Stefano Sabatini
* Copyright (c) 2009 Giliard B. de Freitas <giliarde@gmail.com>
* Copyright (C) 2002 Gunnar Monell <gmo@linux.nu>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* Linux framebuffer input device,
* inspired by code from fbgrab.c by Gunnar Monell.
* @see http://linux-fbdev.sourceforge.net/
*/
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <time.h>
#include <linux/fb.h>
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavformat/internal.h"
#include "avdevice.h"
#include "fbdev_common.h"
typedef struct FBDevContext {
AVClass *class; ///< class for private options
int frame_size; ///< size in bytes of a grabbed frame
AVRational framerate_q; ///< framerate
int64_t time_frame; ///< time for the next frame to output (in 1/1000000 units)
int fd; ///< framebuffer device file descriptor
int width, height; ///< assumed frame resolution
int frame_linesize; ///< linesize of the output frame, it is assumed to be constant
int bytes_per_pixel;
struct fb_var_screeninfo varinfo; ///< variable info;
struct fb_fix_screeninfo fixinfo; ///< fixed info;
uint8_t *data; ///< framebuffer data
} FBDevContext;
static av_cold int fbdev_read_header(AVFormatContext *avctx)
{
FBDevContext *fbdev = avctx->priv_data;
AVStream *st = NULL;
enum AVPixelFormat pix_fmt;
int ret, flags = O_RDONLY;
const char* device;
if (!(st = avformat_new_stream(avctx, NULL)))
return AVERROR(ENOMEM);
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in microseconds */
/* NONBLOCK is ignored by the fbdev driver, only set for consistency */
if (avctx->flags & AVFMT_FLAG_NONBLOCK)
flags |= O_NONBLOCK;
if (avctx->url[0])
device = avctx->url;
else
device = ff_fbdev_default_device();
if ((fbdev->fd = avpriv_open(device, flags)) == -1) {
ret = AVERROR(errno);
av_log(avctx, AV_LOG_ERROR,
"Could not open framebuffer device '%s': %s\n",
device, av_err2str(ret));
return ret;
}
if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0) {
ret = AVERROR(errno);
av_log(avctx, AV_LOG_ERROR,
"FBIOGET_VSCREENINFO: %s\n", av_err2str(ret));
goto fail;
}
if (ioctl(fbdev->fd, FBIOGET_FSCREENINFO, &fbdev->fixinfo) < 0) {
ret = AVERROR(errno);
av_log(avctx, AV_LOG_ERROR,
"FBIOGET_FSCREENINFO: %s\n", av_err2str(ret));
goto fail;
}
pix_fmt = ff_get_pixfmt_from_fb_varinfo(&fbdev->varinfo);
if (pix_fmt == AV_PIX_FMT_NONE) {
ret = AVERROR(EINVAL);
av_log(avctx, AV_LOG_ERROR,
"Framebuffer pixel format not supported.\n");
goto fail;
}
fbdev->width = fbdev->varinfo.xres;
fbdev->height = fbdev->varinfo.yres;
fbdev->bytes_per_pixel = (fbdev->varinfo.bits_per_pixel + 7) >> 3;
fbdev->frame_linesize = fbdev->width * fbdev->bytes_per_pixel;
fbdev->frame_size = fbdev->frame_linesize * fbdev->height;
fbdev->time_frame = AV_NOPTS_VALUE;
fbdev->data = mmap(NULL, fbdev->fixinfo.smem_len, PROT_READ, MAP_SHARED, fbdev->fd, 0);
if (fbdev->data == MAP_FAILED) {
ret = AVERROR(errno);
av_log(avctx, AV_LOG_ERROR, "Error in mmap(): %s\n", av_err2str(ret));
goto fail;
}
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codecpar->width = fbdev->width;
st->codecpar->height = fbdev->height;
st->codecpar->format = pix_fmt;
st->avg_frame_rate = fbdev->framerate_q;
st->codecpar->bit_rate =
fbdev->width * fbdev->height * fbdev->bytes_per_pixel * av_q2d(fbdev->framerate_q) * 8;
av_log(avctx, AV_LOG_INFO,
"w:%d h:%d bpp:%d pixfmt:%s fps:%d/%d bit_rate:%"PRId64"\n",
fbdev->width, fbdev->height, fbdev->varinfo.bits_per_pixel,
av_get_pix_fmt_name(pix_fmt),
fbdev->framerate_q.num, fbdev->framerate_q.den,
st->codecpar->bit_rate);
return 0;
fail:
close(fbdev->fd);
return ret;
}
static int fbdev_read_packet(AVFormatContext *avctx, AVPacket *pkt)
{
FBDevContext *fbdev = avctx->priv_data;
int64_t curtime, delay;
struct timespec ts;
int i, ret;
uint8_t *pin, *pout;
if (fbdev->time_frame == AV_NOPTS_VALUE)
fbdev->time_frame = av_gettime();
/* wait based on the frame rate */
while (1) {
curtime = av_gettime();
delay = fbdev->time_frame - curtime;
av_log(avctx, AV_LOG_TRACE,
"time_frame:%"PRId64" curtime:%"PRId64" delay:%"PRId64"\n",
fbdev->time_frame, curtime, delay);
if (delay <= 0) {
fbdev->time_frame += INT64_C(1000000) / av_q2d(fbdev->framerate_q);
break;
}
if (avctx->flags & AVFMT_FLAG_NONBLOCK)
return AVERROR(EAGAIN);
ts.tv_sec = delay / 1000000;
ts.tv_nsec = (delay % 1000000) * 1000;
while (nanosleep(&ts, &ts) < 0 && errno == EINTR);
}
if ((ret = av_new_packet(pkt, fbdev->frame_size)) < 0)
return ret;
/* refresh fbdev->varinfo, visible data position may change at each call */
if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0) {
av_log(avctx, AV_LOG_WARNING,
"Error refreshing variable info: %s\n", av_err2str(AVERROR(errno)));
}
pkt->pts = curtime;
/* compute visible data offset */
pin = fbdev->data + fbdev->bytes_per_pixel * fbdev->varinfo.xoffset +
fbdev->varinfo.yoffset * fbdev->fixinfo.line_length;
pout = pkt->data;
for (i = 0; i < fbdev->height; i++) {
memcpy(pout, pin, fbdev->frame_linesize);
pin += fbdev->fixinfo.line_length;
pout += fbdev->frame_linesize;
}
return fbdev->frame_size;
}
static av_cold int fbdev_read_close(AVFormatContext *avctx)
{
FBDevContext *fbdev = avctx->priv_data;
munmap(fbdev->data, fbdev->fixinfo.smem_len);
close(fbdev->fd);
return 0;
}
static int fbdev_get_device_list(AVFormatContext *s, AVDeviceInfoList *device_list)
{
return ff_fbdev_get_device_list(device_list);
}
#define OFFSET(x) offsetof(FBDevContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "framerate","", OFFSET(framerate_q), AV_OPT_TYPE_VIDEO_RATE, {.str = "25"}, 0, INT_MAX, DEC },
{ NULL },
};
static const AVClass fbdev_class = {
.class_name = "fbdev indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_fbdev_demuxer = {
.name = "fbdev",
.long_name = NULL_IF_CONFIG_SMALL("Linux framebuffer"),
.priv_data_size = sizeof(FBDevContext),
.read_header = fbdev_read_header,
.read_packet = fbdev_read_packet,
.read_close = fbdev_read_close,
.get_device_list = fbdev_get_device_list,
.flags = AVFMT_NOFILE,
.priv_class = &fbdev_class,
};

220
externals/ffmpeg/libavdevice/fbdev_enc.c vendored Executable file
View File

@@ -0,0 +1,220 @@
/*
* Copyright (c) 2013 Lukasz Marek
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <linux/fb.h>
#include "libavutil/pixdesc.h"
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavformat/avformat.h"
#include "fbdev_common.h"
#include "avdevice.h"
typedef struct {
AVClass *class; ///< class for private options
int xoffset; ///< x coordinate of top left corner
int yoffset; ///< y coordinate of top left corner
struct fb_var_screeninfo varinfo; ///< framebuffer variable info
struct fb_fix_screeninfo fixinfo; ///< framebuffer fixed info
int fd; ///< framebuffer device file descriptor
uint8_t *data; ///< framebuffer data
} FBDevContext;
static av_cold int fbdev_write_header(AVFormatContext *h)
{
FBDevContext *fbdev = h->priv_data;
enum AVPixelFormat pix_fmt;
int ret, flags = O_RDWR;
const char* device;
if (h->nb_streams != 1 || h->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO) {
av_log(fbdev, AV_LOG_ERROR, "Only a single video stream is supported.\n");
return AVERROR(EINVAL);
}
if (h->url[0])
device = h->url;
else
device = ff_fbdev_default_device();
if ((fbdev->fd = avpriv_open(device, flags)) == -1) {
ret = AVERROR(errno);
av_log(h, AV_LOG_ERROR,
"Could not open framebuffer device '%s': %s\n",
device, av_err2str(ret));
return ret;
}
if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0) {
ret = AVERROR(errno);
av_log(h, AV_LOG_ERROR, "FBIOGET_VSCREENINFO: %s\n", av_err2str(ret));
goto fail;
}
if (ioctl(fbdev->fd, FBIOGET_FSCREENINFO, &fbdev->fixinfo) < 0) {
ret = AVERROR(errno);
av_log(h, AV_LOG_ERROR, "FBIOGET_FSCREENINFO: %s\n", av_err2str(ret));
goto fail;
}
pix_fmt = ff_get_pixfmt_from_fb_varinfo(&fbdev->varinfo);
if (pix_fmt == AV_PIX_FMT_NONE) {
ret = AVERROR(EINVAL);
av_log(h, AV_LOG_ERROR, "Framebuffer pixel format not supported.\n");
goto fail;
}
fbdev->data = mmap(NULL, fbdev->fixinfo.smem_len, PROT_WRITE, MAP_SHARED, fbdev->fd, 0);
if (fbdev->data == MAP_FAILED) {
ret = AVERROR(errno);
av_log(h, AV_LOG_ERROR, "Error in mmap(): %s\n", av_err2str(ret));
goto fail;
}
return 0;
fail:
close(fbdev->fd);
return ret;
}
static int fbdev_write_packet(AVFormatContext *h, AVPacket *pkt)
{
FBDevContext *fbdev = h->priv_data;
uint8_t *pin, *pout;
enum AVPixelFormat fb_pix_fmt;
int disp_height;
int bytes_to_copy;
AVCodecParameters *par = h->streams[0]->codecpar;
enum AVPixelFormat video_pix_fmt = par->format;
int video_width = par->width;
int video_height = par->height;
int bytes_per_pixel = ((par->bits_per_coded_sample + 7) >> 3);
int src_line_size = video_width * bytes_per_pixel;
int i;
if (ioctl(fbdev->fd, FBIOGET_VSCREENINFO, &fbdev->varinfo) < 0)
av_log(h, AV_LOG_WARNING,
"Error refreshing variable info: %s\n", av_err2str(AVERROR(errno)));
fb_pix_fmt = ff_get_pixfmt_from_fb_varinfo(&fbdev->varinfo);
if (fb_pix_fmt != video_pix_fmt) {
av_log(h, AV_LOG_ERROR, "Pixel format %s is not supported, use %s\n",
av_get_pix_fmt_name(video_pix_fmt), av_get_pix_fmt_name(fb_pix_fmt));
return AVERROR(EINVAL);
}
disp_height = FFMIN(fbdev->varinfo.yres, video_height);
bytes_to_copy = FFMIN(fbdev->varinfo.xres, video_width) * bytes_per_pixel;
pin = pkt->data;
pout = fbdev->data +
bytes_per_pixel * fbdev->varinfo.xoffset +
fbdev->varinfo.yoffset * fbdev->fixinfo.line_length;
if (fbdev->xoffset) {
if (fbdev->xoffset < 0) {
if (-fbdev->xoffset >= video_width) //nothing to display
return 0;
bytes_to_copy += fbdev->xoffset * bytes_per_pixel;
pin -= fbdev->xoffset * bytes_per_pixel;
} else {
int diff = (video_width + fbdev->xoffset) - fbdev->varinfo.xres;
if (diff > 0) {
if (diff >= video_width) //nothing to display
return 0;
bytes_to_copy -= diff * bytes_per_pixel;
}
pout += bytes_per_pixel * fbdev->xoffset;
}
}
if (fbdev->yoffset) {
if (fbdev->yoffset < 0) {
if (-fbdev->yoffset >= video_height) //nothing to display
return 0;
disp_height += fbdev->yoffset;
pin -= fbdev->yoffset * src_line_size;
} else {
int diff = (video_height + fbdev->yoffset) - fbdev->varinfo.yres;
if (diff > 0) {
if (diff >= video_height) //nothing to display
return 0;
disp_height -= diff;
}
pout += fbdev->yoffset * fbdev->fixinfo.line_length;
}
}
for (i = 0; i < disp_height; i++) {
memcpy(pout, pin, bytes_to_copy);
pout += fbdev->fixinfo.line_length;
pin += src_line_size;
}
return 0;
}
static av_cold int fbdev_write_trailer(AVFormatContext *h)
{
FBDevContext *fbdev = h->priv_data;
munmap(fbdev->data, fbdev->fixinfo.smem_len);
close(fbdev->fd);
return 0;
}
static int fbdev_get_device_list(AVFormatContext *s, AVDeviceInfoList *device_list)
{
return ff_fbdev_get_device_list(device_list);
}
#define OFFSET(x) offsetof(FBDevContext, x)
#define ENC AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "xoffset", "set x coordinate of top left corner", OFFSET(xoffset), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, ENC },
{ "yoffset", "set y coordinate of top left corner", OFFSET(yoffset), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, ENC },
{ NULL }
};
static const AVClass fbdev_class = {
.class_name = "fbdev outdev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
};
AVOutputFormat ff_fbdev_muxer = {
.name = "fbdev",
.long_name = NULL_IF_CONFIG_SMALL("Linux framebuffer"),
.priv_data_size = sizeof(FBDevContext),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = fbdev_write_header,
.write_packet = fbdev_write_packet,
.write_trailer = fbdev_write_trailer,
.get_device_list = fbdev_get_device_list,
.flags = AVFMT_NOFILE | AVFMT_VARIABLE_FPS | AVFMT_NOTIMESTAMPS,
.priv_class = &fbdev_class,
};

1
externals/ffmpeg/libavdevice/file_open.c vendored Executable file
View File

@@ -0,0 +1 @@
#include "libavutil/file_open.c"

663
externals/ffmpeg/libavdevice/gdigrab.c vendored Executable file
View File

@@ -0,0 +1,663 @@
/*
* GDI video grab interface
*
* This file is part of FFmpeg.
*
* Copyright (C) 2013 Calvin Walton <calvin.walton@kepstin.ca>
* Copyright (C) 2007-2010 Christophe Gisquet <word1.word2@gmail.com>
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public License
* as published by the Free Software Foundation; either version 2.1
* of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* GDI frame device demuxer
* @author Calvin Walton <calvin.walton@kepstin.ca>
* @author Christophe Gisquet <word1.word2@gmail.com>
*/
#include "config.h"
#include "libavformat/internal.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include <windows.h>
/**
* GDI Device Demuxer context
*/
struct gdigrab {
const AVClass *class; /**< Class for private options */
int frame_size; /**< Size in bytes of the frame pixel data */
int header_size; /**< Size in bytes of the DIB header */
AVRational time_base; /**< Time base */
int64_t time_frame; /**< Current time */
int draw_mouse; /**< Draw mouse cursor (private option) */
int show_region; /**< Draw border (private option) */
AVRational framerate; /**< Capture framerate (private option) */
int width; /**< Width of the grab frame (private option) */
int height; /**< Height of the grab frame (private option) */
int offset_x; /**< Capture x offset (private option) */
int offset_y; /**< Capture y offset (private option) */
HWND hwnd; /**< Handle of the window for the grab */
HDC source_hdc; /**< Source device context */
HDC dest_hdc; /**< Destination, source-compatible DC */
BITMAPINFO bmi; /**< Information describing DIB format */
HBITMAP hbmp; /**< Information on the bitmap captured */
void *buffer; /**< The buffer containing the bitmap image data */
RECT clip_rect; /**< The subarea of the screen or window to clip */
HWND region_hwnd; /**< Handle of the region border window */
int cursor_error_printed;
};
#define WIN32_API_ERROR(str) \
av_log(s1, AV_LOG_ERROR, str " (error %li)\n", GetLastError())
#define REGION_WND_BORDER 3
/**
* Callback to handle Windows messages for the region outline window.
*
* In particular, this handles painting the frame rectangle.
*
* @param hwnd The region outline window handle.
* @param msg The Windows message.
* @param wparam First Windows message parameter.
* @param lparam Second Windows message parameter.
* @return 0 success, !0 failure
*/
static LRESULT CALLBACK
gdigrab_region_wnd_proc(HWND hwnd, UINT msg, WPARAM wparam, LPARAM lparam)
{
PAINTSTRUCT ps;
HDC hdc;
RECT rect;
switch (msg) {
case WM_PAINT:
hdc = BeginPaint(hwnd, &ps);
GetClientRect(hwnd, &rect);
FrameRect(hdc, &rect, GetStockObject(BLACK_BRUSH));
rect.left++; rect.top++; rect.right--; rect.bottom--;
FrameRect(hdc, &rect, GetStockObject(WHITE_BRUSH));
rect.left++; rect.top++; rect.right--; rect.bottom--;
FrameRect(hdc, &rect, GetStockObject(BLACK_BRUSH));
EndPaint(hwnd, &ps);
break;
default:
return DefWindowProc(hwnd, msg, wparam, lparam);
}
return 0;
}
/**
* Initialize the region outline window.
*
* @param s1 The format context.
* @param gdigrab gdigrab context.
* @return 0 success, !0 failure
*/
static int
gdigrab_region_wnd_init(AVFormatContext *s1, struct gdigrab *gdigrab)
{
HWND hwnd;
RECT rect = gdigrab->clip_rect;
HRGN region = NULL;
HRGN region_interior = NULL;
DWORD style = WS_POPUP | WS_VISIBLE;
DWORD ex = WS_EX_TOOLWINDOW | WS_EX_TOPMOST | WS_EX_TRANSPARENT;
rect.left -= REGION_WND_BORDER; rect.top -= REGION_WND_BORDER;
rect.right += REGION_WND_BORDER; rect.bottom += REGION_WND_BORDER;
AdjustWindowRectEx(&rect, style, FALSE, ex);
// Create a window with no owner; use WC_DIALOG instead of writing a custom
// window class
hwnd = CreateWindowEx(ex, WC_DIALOG, NULL, style, rect.left, rect.top,
rect.right - rect.left, rect.bottom - rect.top,
NULL, NULL, NULL, NULL);
if (!hwnd) {
WIN32_API_ERROR("Could not create region display window");
goto error;
}
// Set the window shape to only include the border area
GetClientRect(hwnd, &rect);
region = CreateRectRgn(0, 0,
rect.right - rect.left, rect.bottom - rect.top);
region_interior = CreateRectRgn(REGION_WND_BORDER, REGION_WND_BORDER,
rect.right - rect.left - REGION_WND_BORDER,
rect.bottom - rect.top - REGION_WND_BORDER);
CombineRgn(region, region, region_interior, RGN_DIFF);
if (!SetWindowRgn(hwnd, region, FALSE)) {
WIN32_API_ERROR("Could not set window region");
goto error;
}
// The "region" memory is now owned by the window
region = NULL;
DeleteObject(region_interior);
SetWindowLongPtr(hwnd, GWLP_WNDPROC, (LONG_PTR) gdigrab_region_wnd_proc);
ShowWindow(hwnd, SW_SHOW);
gdigrab->region_hwnd = hwnd;
return 0;
error:
if (region)
DeleteObject(region);
if (region_interior)
DeleteObject(region_interior);
if (hwnd)
DestroyWindow(hwnd);
return 1;
}
/**
* Cleanup/free the region outline window.
*
* @param s1 The format context.
* @param gdigrab gdigrab context.
*/
static void
gdigrab_region_wnd_destroy(AVFormatContext *s1, struct gdigrab *gdigrab)
{
if (gdigrab->region_hwnd)
DestroyWindow(gdigrab->region_hwnd);
gdigrab->region_hwnd = NULL;
}
/**
* Process the Windows message queue.
*
* This is important to prevent Windows from thinking the window has become
* unresponsive. As well, things like WM_PAINT (to actually draw the window
* contents) are handled from the message queue context.
*
* @param s1 The format context.
* @param gdigrab gdigrab context.
*/
static void
gdigrab_region_wnd_update(AVFormatContext *s1, struct gdigrab *gdigrab)
{
HWND hwnd = gdigrab->region_hwnd;
MSG msg;
while (PeekMessage(&msg, hwnd, 0, 0, PM_REMOVE)) {
DispatchMessage(&msg);
}
}
/**
* Initializes the gdi grab device demuxer (public device demuxer API).
*
* @param s1 Context from avformat core
* @return AVERROR_IO error, 0 success
*/
static int
gdigrab_read_header(AVFormatContext *s1)
{
struct gdigrab *gdigrab = s1->priv_data;
HWND hwnd;
HDC source_hdc = NULL;
HDC dest_hdc = NULL;
BITMAPINFO bmi;
HBITMAP hbmp = NULL;
void *buffer = NULL;
const char *filename = s1->url;
const char *name = NULL;
AVStream *st = NULL;
int bpp;
int horzres;
int vertres;
int desktophorzres;
int desktopvertres;
RECT virtual_rect;
RECT clip_rect;
BITMAP bmp;
int ret;
if (!strncmp(filename, "title=", 6)) {
name = filename + 6;
hwnd = FindWindow(NULL, name);
if (!hwnd) {
av_log(s1, AV_LOG_ERROR,
"Can't find window '%s', aborting.\n", name);
ret = AVERROR(EIO);
goto error;
}
if (gdigrab->show_region) {
av_log(s1, AV_LOG_WARNING,
"Can't show region when grabbing a window.\n");
gdigrab->show_region = 0;
}
} else if (!strcmp(filename, "desktop")) {
hwnd = NULL;
} else {
av_log(s1, AV_LOG_ERROR,
"Please use \"desktop\" or \"title=<windowname>\" to specify your target.\n");
ret = AVERROR(EIO);
goto error;
}
/* This will get the device context for the selected window, or if
* none, the primary screen */
source_hdc = GetDC(hwnd);
if (!source_hdc) {
WIN32_API_ERROR("Couldn't get window device context");
ret = AVERROR(EIO);
goto error;
}
bpp = GetDeviceCaps(source_hdc, BITSPIXEL);
horzres = GetDeviceCaps(source_hdc, HORZRES);
vertres = GetDeviceCaps(source_hdc, VERTRES);
desktophorzres = GetDeviceCaps(source_hdc, DESKTOPHORZRES);
desktopvertres = GetDeviceCaps(source_hdc, DESKTOPVERTRES);
if (hwnd) {
GetClientRect(hwnd, &virtual_rect);
/* window -- get the right height and width for scaling DPI */
virtual_rect.left = virtual_rect.left * desktophorzres / horzres;
virtual_rect.right = virtual_rect.right * desktophorzres / horzres;
virtual_rect.top = virtual_rect.top * desktopvertres / vertres;
virtual_rect.bottom = virtual_rect.bottom * desktopvertres / vertres;
} else {
/* desktop -- get the right height and width for scaling DPI */
virtual_rect.left = GetSystemMetrics(SM_XVIRTUALSCREEN);
virtual_rect.top = GetSystemMetrics(SM_YVIRTUALSCREEN);
virtual_rect.right = (virtual_rect.left + GetSystemMetrics(SM_CXVIRTUALSCREEN)) * desktophorzres / horzres;
virtual_rect.bottom = (virtual_rect.top + GetSystemMetrics(SM_CYVIRTUALSCREEN)) * desktopvertres / vertres;
}
/* If no width or height set, use full screen/window area */
if (!gdigrab->width || !gdigrab->height) {
clip_rect.left = virtual_rect.left;
clip_rect.top = virtual_rect.top;
clip_rect.right = virtual_rect.right;
clip_rect.bottom = virtual_rect.bottom;
} else {
clip_rect.left = gdigrab->offset_x;
clip_rect.top = gdigrab->offset_y;
clip_rect.right = gdigrab->width + gdigrab->offset_x;
clip_rect.bottom = gdigrab->height + gdigrab->offset_y;
}
if (clip_rect.left < virtual_rect.left ||
clip_rect.top < virtual_rect.top ||
clip_rect.right > virtual_rect.right ||
clip_rect.bottom > virtual_rect.bottom) {
av_log(s1, AV_LOG_ERROR,
"Capture area (%li,%li),(%li,%li) extends outside window area (%li,%li),(%li,%li)",
clip_rect.left, clip_rect.top,
clip_rect.right, clip_rect.bottom,
virtual_rect.left, virtual_rect.top,
virtual_rect.right, virtual_rect.bottom);
ret = AVERROR(EIO);
goto error;
}
if (name) {
av_log(s1, AV_LOG_INFO,
"Found window %s, capturing %lix%lix%i at (%li,%li)\n",
name,
clip_rect.right - clip_rect.left,
clip_rect.bottom - clip_rect.top,
bpp, clip_rect.left, clip_rect.top);
} else {
av_log(s1, AV_LOG_INFO,
"Capturing whole desktop as %lix%lix%i at (%li,%li)\n",
clip_rect.right - clip_rect.left,
clip_rect.bottom - clip_rect.top,
bpp, clip_rect.left, clip_rect.top);
}
if (clip_rect.right - clip_rect.left <= 0 ||
clip_rect.bottom - clip_rect.top <= 0 || bpp%8) {
av_log(s1, AV_LOG_ERROR, "Invalid properties, aborting\n");
ret = AVERROR(EIO);
goto error;
}
dest_hdc = CreateCompatibleDC(source_hdc);
if (!dest_hdc) {
WIN32_API_ERROR("Screen DC CreateCompatibleDC");
ret = AVERROR(EIO);
goto error;
}
/* Create a DIB and select it into the dest_hdc */
bmi.bmiHeader.biSize = sizeof(BITMAPINFOHEADER);
bmi.bmiHeader.biWidth = clip_rect.right - clip_rect.left;
bmi.bmiHeader.biHeight = -(clip_rect.bottom - clip_rect.top);
bmi.bmiHeader.biPlanes = 1;
bmi.bmiHeader.biBitCount = bpp;
bmi.bmiHeader.biCompression = BI_RGB;
bmi.bmiHeader.biSizeImage = 0;
bmi.bmiHeader.biXPelsPerMeter = 0;
bmi.bmiHeader.biYPelsPerMeter = 0;
bmi.bmiHeader.biClrUsed = 0;
bmi.bmiHeader.biClrImportant = 0;
hbmp = CreateDIBSection(dest_hdc, &bmi, DIB_RGB_COLORS,
&buffer, NULL, 0);
if (!hbmp) {
WIN32_API_ERROR("Creating DIB Section");
ret = AVERROR(EIO);
goto error;
}
if (!SelectObject(dest_hdc, hbmp)) {
WIN32_API_ERROR("SelectObject");
ret = AVERROR(EIO);
goto error;
}
/* Get info from the bitmap */
GetObject(hbmp, sizeof(BITMAP), &bmp);
st = avformat_new_stream(s1, NULL);
if (!st) {
ret = AVERROR(ENOMEM);
goto error;
}
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
gdigrab->frame_size = bmp.bmWidthBytes * bmp.bmHeight * bmp.bmPlanes;
gdigrab->header_size = sizeof(BITMAPFILEHEADER) + sizeof(BITMAPINFOHEADER) +
(bpp <= 8 ? (1 << bpp) : 0) * sizeof(RGBQUAD) /* palette size */;
gdigrab->time_base = av_inv_q(gdigrab->framerate);
gdigrab->time_frame = av_gettime() / av_q2d(gdigrab->time_base);
gdigrab->hwnd = hwnd;
gdigrab->source_hdc = source_hdc;
gdigrab->dest_hdc = dest_hdc;
gdigrab->hbmp = hbmp;
gdigrab->bmi = bmi;
gdigrab->buffer = buffer;
gdigrab->clip_rect = clip_rect;
gdigrab->cursor_error_printed = 0;
if (gdigrab->show_region) {
if (gdigrab_region_wnd_init(s1, gdigrab)) {
ret = AVERROR(EIO);
goto error;
}
}
st->avg_frame_rate = av_inv_q(gdigrab->time_base);
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_BMP;
st->codecpar->bit_rate = (gdigrab->header_size + gdigrab->frame_size) * 1/av_q2d(gdigrab->time_base) * 8;
return 0;
error:
if (source_hdc)
ReleaseDC(hwnd, source_hdc);
if (dest_hdc)
DeleteDC(dest_hdc);
if (hbmp)
DeleteObject(hbmp);
if (source_hdc)
DeleteDC(source_hdc);
return ret;
}
/**
* Paints a mouse pointer in a Win32 image.
*
* @param s1 Context of the log information
* @param s Current grad structure
*/
static void paint_mouse_pointer(AVFormatContext *s1, struct gdigrab *gdigrab)
{
CURSORINFO ci = {0};
#define CURSOR_ERROR(str) \
if (!gdigrab->cursor_error_printed) { \
WIN32_API_ERROR(str); \
gdigrab->cursor_error_printed = 1; \
}
ci.cbSize = sizeof(ci);
if (GetCursorInfo(&ci)) {
HCURSOR icon = CopyCursor(ci.hCursor);
ICONINFO info;
POINT pos;
RECT clip_rect = gdigrab->clip_rect;
HWND hwnd = gdigrab->hwnd;
int horzres = GetDeviceCaps(gdigrab->source_hdc, HORZRES);
int vertres = GetDeviceCaps(gdigrab->source_hdc, VERTRES);
int desktophorzres = GetDeviceCaps(gdigrab->source_hdc, DESKTOPHORZRES);
int desktopvertres = GetDeviceCaps(gdigrab->source_hdc, DESKTOPVERTRES);
info.hbmMask = NULL;
info.hbmColor = NULL;
if (ci.flags != CURSOR_SHOWING)
return;
if (!icon) {
/* Use the standard arrow cursor as a fallback.
* You'll probably only hit this in Wine, which can't fetch
* the current system cursor. */
icon = CopyCursor(LoadCursor(NULL, IDC_ARROW));
}
if (!GetIconInfo(icon, &info)) {
CURSOR_ERROR("Could not get icon info");
goto icon_error;
}
if (hwnd) {
RECT rect;
if (GetWindowRect(hwnd, &rect)) {
pos.x = ci.ptScreenPos.x - clip_rect.left - info.xHotspot - rect.left;
pos.y = ci.ptScreenPos.y - clip_rect.top - info.yHotspot - rect.top;
//that would keep the correct location of mouse with hidpi screens
pos.x = pos.x * desktophorzres / horzres;
pos.y = pos.y * desktopvertres / vertres;
} else {
CURSOR_ERROR("Couldn't get window rectangle");
goto icon_error;
}
} else {
//that would keep the correct location of mouse with hidpi screens
pos.x = ci.ptScreenPos.x * desktophorzres / horzres - clip_rect.left - info.xHotspot;
pos.y = ci.ptScreenPos.y * desktopvertres / vertres - clip_rect.top - info.yHotspot;
}
av_log(s1, AV_LOG_DEBUG, "Cursor pos (%li,%li) -> (%li,%li)\n",
ci.ptScreenPos.x, ci.ptScreenPos.y, pos.x, pos.y);
if (pos.x >= 0 && pos.x <= clip_rect.right - clip_rect.left &&
pos.y >= 0 && pos.y <= clip_rect.bottom - clip_rect.top) {
if (!DrawIcon(gdigrab->dest_hdc, pos.x, pos.y, icon))
CURSOR_ERROR("Couldn't draw icon");
}
icon_error:
if (info.hbmMask)
DeleteObject(info.hbmMask);
if (info.hbmColor)
DeleteObject(info.hbmColor);
if (icon)
DestroyCursor(icon);
} else {
CURSOR_ERROR("Couldn't get cursor info");
}
}
/**
* Grabs a frame from gdi (public device demuxer API).
*
* @param s1 Context from avformat core
* @param pkt Packet holding the grabbed frame
* @return frame size in bytes
*/
static int gdigrab_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
struct gdigrab *gdigrab = s1->priv_data;
HDC dest_hdc = gdigrab->dest_hdc;
HDC source_hdc = gdigrab->source_hdc;
RECT clip_rect = gdigrab->clip_rect;
AVRational time_base = gdigrab->time_base;
int64_t time_frame = gdigrab->time_frame;
BITMAPFILEHEADER bfh;
int file_size = gdigrab->header_size + gdigrab->frame_size;
int64_t curtime, delay;
/* Calculate the time of the next frame */
time_frame += INT64_C(1000000);
/* Run Window message processing queue */
if (gdigrab->show_region)
gdigrab_region_wnd_update(s1, gdigrab);
/* wait based on the frame rate */
for (;;) {
curtime = av_gettime();
delay = time_frame * av_q2d(time_base) - curtime;
if (delay <= 0) {
if (delay < INT64_C(-1000000) * av_q2d(time_base)) {
time_frame += INT64_C(1000000);
}
break;
}
if (s1->flags & AVFMT_FLAG_NONBLOCK) {
return AVERROR(EAGAIN);
} else {
av_usleep(delay);
}
}
if (av_new_packet(pkt, file_size) < 0)
return AVERROR(ENOMEM);
pkt->pts = curtime;
/* Blit screen grab */
if (!BitBlt(dest_hdc, 0, 0,
clip_rect.right - clip_rect.left,
clip_rect.bottom - clip_rect.top,
source_hdc,
clip_rect.left, clip_rect.top, SRCCOPY | CAPTUREBLT)) {
WIN32_API_ERROR("Failed to capture image");
return AVERROR(EIO);
}
if (gdigrab->draw_mouse)
paint_mouse_pointer(s1, gdigrab);
/* Copy bits to packet data */
bfh.bfType = 0x4d42; /* "BM" in little-endian */
bfh.bfSize = file_size;
bfh.bfReserved1 = 0;
bfh.bfReserved2 = 0;
bfh.bfOffBits = gdigrab->header_size;
memcpy(pkt->data, &bfh, sizeof(bfh));
memcpy(pkt->data + sizeof(bfh), &gdigrab->bmi.bmiHeader, sizeof(gdigrab->bmi.bmiHeader));
if (gdigrab->bmi.bmiHeader.biBitCount <= 8)
GetDIBColorTable(dest_hdc, 0, 1 << gdigrab->bmi.bmiHeader.biBitCount,
(RGBQUAD *) (pkt->data + sizeof(bfh) + sizeof(gdigrab->bmi.bmiHeader)));
memcpy(pkt->data + gdigrab->header_size, gdigrab->buffer, gdigrab->frame_size);
gdigrab->time_frame = time_frame;
return gdigrab->header_size + gdigrab->frame_size;
}
/**
* Closes gdi frame grabber (public device demuxer API).
*
* @param s1 Context from avformat core
* @return 0 success, !0 failure
*/
static int gdigrab_read_close(AVFormatContext *s1)
{
struct gdigrab *s = s1->priv_data;
if (s->show_region)
gdigrab_region_wnd_destroy(s1, s);
if (s->source_hdc)
ReleaseDC(s->hwnd, s->source_hdc);
if (s->dest_hdc)
DeleteDC(s->dest_hdc);
if (s->hbmp)
DeleteObject(s->hbmp);
if (s->source_hdc)
DeleteDC(s->source_hdc);
return 0;
}
#define OFFSET(x) offsetof(struct gdigrab, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "draw_mouse", "draw the mouse pointer", OFFSET(draw_mouse), AV_OPT_TYPE_INT, {.i64 = 1}, 0, 1, DEC },
{ "show_region", "draw border around capture area", OFFSET(show_region), AV_OPT_TYPE_INT, {.i64 = 0}, 0, 1, DEC },
{ "framerate", "set video frame rate", OFFSET(framerate), AV_OPT_TYPE_VIDEO_RATE, {.str = "ntsc"}, 0, INT_MAX, DEC },
{ "video_size", "set video frame size", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, DEC },
{ "offset_x", "capture area x offset", OFFSET(offset_x), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC },
{ "offset_y", "capture area y offset", OFFSET(offset_y), AV_OPT_TYPE_INT, {.i64 = 0}, INT_MIN, INT_MAX, DEC },
{ NULL },
};
static const AVClass gdigrab_class = {
.class_name = "GDIgrab indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
/** gdi grabber device demuxer declaration */
AVInputFormat ff_gdigrab_demuxer = {
.name = "gdigrab",
.long_name = NULL_IF_CONFIG_SMALL("GDI API Windows frame grabber"),
.priv_data_size = sizeof(struct gdigrab),
.read_header = gdigrab_read_header,
.read_packet = gdigrab_read_packet,
.read_close = gdigrab_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &gdigrab_class,
};

511
externals/ffmpeg/libavdevice/iec61883.c vendored Executable file
View File

@@ -0,0 +1,511 @@
/*
* Copyright (c) 2012 Georg Lippitsch <georg.lippitsch@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* libiec61883 interface
*/
#include <poll.h>
#include <libraw1394/raw1394.h>
#include <libavc1394/avc1394.h>
#include <libavc1394/rom1394.h>
#include <libiec61883/iec61883.h>
#include "libavformat/dv.h"
#include "libavformat/mpegts.h"
#include "libavutil/opt.h"
#include "avdevice.h"
#define THREADS HAVE_PTHREADS
#if THREADS
#include <pthread.h>
#endif
#define MOTDCT_SPEC_ID 0x00005068
#define IEC61883_AUTO 0
#define IEC61883_DV 1
#define IEC61883_HDV 2
/**
* For DV, one packet corresponds exactly to one frame.
* For HDV, these are MPEG2 transport stream packets.
* The queue is implemented as linked list.
*/
typedef struct DVPacket {
uint8_t *buf; ///< actual buffer data
int len; ///< size of buffer allocated
struct DVPacket *next; ///< next DVPacket
} DVPacket;
struct iec61883_data {
AVClass *class;
raw1394handle_t raw1394; ///< handle for libraw1394
iec61883_dv_fb_t iec61883_dv; ///< handle for libiec61883 when used with DV
iec61883_mpeg2_t iec61883_mpeg2; ///< handle for libiec61883 when used with HDV
DVDemuxContext *dv_demux; ///< generic DV muxing/demuxing context
MpegTSContext *mpeg_demux; ///< generic HDV muxing/demuxing context
DVPacket *queue_first; ///< first element of packet queue
DVPacket *queue_last; ///< last element of packet queue
char *device_guid; ///< to select one of multiple DV devices
int packets; ///< Number of packets queued
int max_packets; ///< Max. number of packets in queue
int bandwidth; ///< returned by libiec61883
int channel; ///< returned by libiec61883
int input_port; ///< returned by libiec61883
int type; ///< Stream type, to distinguish DV/HDV
int node; ///< returned by libiec61883
int output_port; ///< returned by libiec61883
int thread_loop; ///< Condition for thread while-loop
int receiving; ///< True as soon data from device available
int receive_error; ///< Set in receive task in case of error
int eof; ///< True as soon as no more data available
struct pollfd raw1394_poll; ///< to poll for new data from libraw1394
/** Parse function for DV/HDV differs, so this is set before packets arrive */
int (*parse_queue)(struct iec61883_data *dv, AVPacket *pkt);
#if THREADS
pthread_t receive_task_thread;
pthread_mutex_t mutex;
pthread_cond_t cond;
#endif
};
static int iec61883_callback(unsigned char *data, int length,
int complete, void *callback_data)
{
struct iec61883_data *dv = callback_data;
DVPacket *packet;
int ret;
#if THREADS
pthread_mutex_lock(&dv->mutex);
#endif
if (dv->packets >= dv->max_packets) {
av_log(NULL, AV_LOG_ERROR, "DV packet queue overrun, dropping.\n");
ret = 0;
goto exit;
}
packet = av_mallocz(sizeof(*packet));
if (!packet) {
ret = -1;
goto exit;
}
packet->buf = av_malloc(length + AV_INPUT_BUFFER_PADDING_SIZE);
if (!packet->buf) {
av_free(packet);
ret = -1;
goto exit;
}
packet->len = length;
memcpy(packet->buf, data, length);
memset(packet->buf + length, 0, AV_INPUT_BUFFER_PADDING_SIZE);
if (dv->queue_first) {
dv->queue_last->next = packet;
dv->queue_last = packet;
} else {
dv->queue_first = packet;
dv->queue_last = packet;
}
dv->packets++;
ret = 0;
exit:
#if THREADS
pthread_cond_broadcast(&dv->cond);
pthread_mutex_unlock(&dv->mutex);
#endif
return ret;
}
static void *iec61883_receive_task(void *opaque)
{
struct iec61883_data *dv = (struct iec61883_data *)opaque;
int result;
#if THREADS
while (dv->thread_loop)
#endif
{
while ((result = poll(&dv->raw1394_poll, 1, 200)) < 0) {
if (!(errno == EAGAIN || errno == EINTR)) {
av_log(NULL, AV_LOG_ERROR, "Raw1394 poll error occurred.\n");
dv->receive_error = AVERROR(EIO);
return NULL;
}
}
if (result > 0 && ((dv->raw1394_poll.revents & POLLIN)
|| (dv->raw1394_poll.revents & POLLPRI))) {
dv->receiving = 1;
raw1394_loop_iterate(dv->raw1394);
} else if (dv->receiving) {
av_log(NULL, AV_LOG_ERROR, "No more input data available\n");
#if THREADS
pthread_mutex_lock(&dv->mutex);
dv->eof = 1;
pthread_cond_broadcast(&dv->cond);
pthread_mutex_unlock(&dv->mutex);
#else
dv->eof = 1;
#endif
return NULL;
}
}
return NULL;
}
static int iec61883_parse_queue_dv(struct iec61883_data *dv, AVPacket *pkt)
{
DVPacket *packet;
int size;
size = avpriv_dv_get_packet(dv->dv_demux, pkt);
if (size > 0)
return size;
packet = dv->queue_first;
if (!packet)
return -1;
size = avpriv_dv_produce_packet(dv->dv_demux, pkt,
packet->buf, packet->len, -1);
dv->queue_first = packet->next;
if (size < 0)
av_free(packet->buf);
av_free(packet);
dv->packets--;
if (size < 0)
return -1;
if (av_packet_from_data(pkt, pkt->data, pkt->size) < 0) {
av_freep(&pkt->data);
av_packet_unref(pkt);
return -1;
}
return size;
}
static int iec61883_parse_queue_hdv(struct iec61883_data *dv, AVPacket *pkt)
{
DVPacket *packet;
int size;
while (dv->queue_first) {
packet = dv->queue_first;
size = avpriv_mpegts_parse_packet(dv->mpeg_demux, pkt, packet->buf,
packet->len);
dv->queue_first = packet->next;
av_freep(&packet->buf);
av_freep(&packet);
dv->packets--;
if (size > 0)
return size;
}
return -1;
}
static int iec61883_read_header(AVFormatContext *context)
{
struct iec61883_data *dv = context->priv_data;
struct raw1394_portinfo pinf[16];
rom1394_directory rom_dir;
char *endptr;
int inport;
int nb_ports;
int port = -1;
int response;
int i, j = 0;
uint64_t guid = 0;
dv->input_port = -1;
dv->output_port = -1;
dv->channel = -1;
dv->raw1394 = raw1394_new_handle();
if (!dv->raw1394) {
av_log(context, AV_LOG_ERROR, "Failed to open IEEE1394 interface.\n");
return AVERROR(EIO);
}
if ((nb_ports = raw1394_get_port_info(dv->raw1394, pinf, 16)) < 0) {
av_log(context, AV_LOG_ERROR, "Failed to get number of IEEE1394 ports.\n");
goto fail;
}
inport = strtol(context->url, &endptr, 10);
if (endptr != context->url && *endptr == '\0') {
av_log(context, AV_LOG_INFO, "Selecting IEEE1394 port: %d\n", inport);
j = inport;
nb_ports = inport + 1;
} else if (strcmp(context->url, "auto")) {
av_log(context, AV_LOG_ERROR, "Invalid input \"%s\", you should specify "
"\"auto\" for auto-detection, or the port number.\n", context->url);
goto fail;
}
if (dv->device_guid) {
if (sscanf(dv->device_guid, "%"SCNu64, &guid) != 1) {
av_log(context, AV_LOG_INFO, "Invalid dvguid parameter: %s\n",
dv->device_guid);
goto fail;
}
}
for (; j < nb_ports && port==-1; ++j) {
raw1394_destroy_handle(dv->raw1394);
if (!(dv->raw1394 = raw1394_new_handle_on_port(j))) {
av_log(context, AV_LOG_ERROR, "Failed setting IEEE1394 port.\n");
goto fail;
}
for (i=0; i<raw1394_get_nodecount(dv->raw1394); ++i) {
/* Select device explicitly by GUID */
if (guid > 1) {
if (guid == rom1394_get_guid(dv->raw1394, i)) {
dv->node = i;
port = j;
break;
}
} else {
/* Select first AV/C tape recorder player node */
if (rom1394_get_directory(dv->raw1394, i, &rom_dir) < 0)
continue;
if (((rom1394_get_node_type(&rom_dir) == ROM1394_NODE_TYPE_AVC) &&
avc1394_check_subunit_type(dv->raw1394, i, AVC1394_SUBUNIT_TYPE_VCR)) ||
(rom_dir.unit_spec_id == MOTDCT_SPEC_ID)) {
rom1394_free_directory(&rom_dir);
dv->node = i;
port = j;
break;
}
rom1394_free_directory(&rom_dir);
}
}
}
if (port == -1) {
av_log(context, AV_LOG_ERROR, "No AV/C devices found.\n");
goto fail;
}
/* Provide bus sanity for multiple connections */
iec61883_cmp_normalize_output(dv->raw1394, 0xffc0 | dv->node);
/* Find out if device is DV or HDV */
if (dv->type == IEC61883_AUTO) {
response = avc1394_transaction(dv->raw1394, dv->node,
AVC1394_CTYPE_STATUS |
AVC1394_SUBUNIT_TYPE_TAPE_RECORDER |
AVC1394_SUBUNIT_ID_0 |
AVC1394_VCR_COMMAND_OUTPUT_SIGNAL_MODE |
0xFF, 2);
response = AVC1394_GET_OPERAND0(response);
dv->type = (response == 0x10 || response == 0x90 || response == 0x1A || response == 0x9A) ?
IEC61883_HDV : IEC61883_DV;
}
/* Connect to device, and do initialization */
dv->channel = iec61883_cmp_connect(dv->raw1394, dv->node, &dv->output_port,
raw1394_get_local_id(dv->raw1394),
&dv->input_port, &dv->bandwidth);
if (dv->channel < 0)
dv->channel = 63;
if (!dv->max_packets)
dv->max_packets = 100;
if (CONFIG_MPEGTS_DEMUXER && dv->type == IEC61883_HDV) {
/* Init HDV receive */
avformat_new_stream(context, NULL);
dv->mpeg_demux = avpriv_mpegts_parse_open(context);
if (!dv->mpeg_demux)
goto fail;
dv->parse_queue = iec61883_parse_queue_hdv;
dv->iec61883_mpeg2 = iec61883_mpeg2_recv_init(dv->raw1394,
(iec61883_mpeg2_recv_t)iec61883_callback,
dv);
dv->max_packets *= 766;
} else {
/* Init DV receive */
dv->dv_demux = avpriv_dv_init_demux(context);
if (!dv->dv_demux)
goto fail;
dv->parse_queue = iec61883_parse_queue_dv;
dv->iec61883_dv = iec61883_dv_fb_init(dv->raw1394, iec61883_callback, dv);
}
dv->raw1394_poll.fd = raw1394_get_fd(dv->raw1394);
dv->raw1394_poll.events = POLLIN | POLLERR | POLLHUP | POLLPRI;
/* Actually start receiving */
if (dv->type == IEC61883_HDV)
iec61883_mpeg2_recv_start(dv->iec61883_mpeg2, dv->channel);
else
iec61883_dv_fb_start(dv->iec61883_dv, dv->channel);
#if THREADS
dv->thread_loop = 1;
if (pthread_mutex_init(&dv->mutex, NULL))
goto fail;
if (pthread_cond_init(&dv->cond, NULL))
goto fail;
if (pthread_create(&dv->receive_task_thread, NULL, iec61883_receive_task, dv))
goto fail;
#endif
return 0;
fail:
raw1394_destroy_handle(dv->raw1394);
return AVERROR(EIO);
}
static int iec61883_read_packet(AVFormatContext *context, AVPacket *pkt)
{
struct iec61883_data *dv = context->priv_data;
int size;
/**
* Try to parse frames from queue
*/
#if THREADS
pthread_mutex_lock(&dv->mutex);
while ((size = dv->parse_queue(dv, pkt)) == -1)
if (!dv->eof)
pthread_cond_wait(&dv->cond, &dv->mutex);
else
break;
pthread_mutex_unlock(&dv->mutex);
#else
int result;
while ((size = dv->parse_queue(dv, pkt)) == -1) {
iec61883_receive_task((void *)dv);
if (dv->receive_error)
return dv->receive_error;
}
#endif
return size;
}
static int iec61883_close(AVFormatContext *context)
{
struct iec61883_data *dv = context->priv_data;
#if THREADS
dv->thread_loop = 0;
pthread_join(dv->receive_task_thread, NULL);
pthread_cond_destroy(&dv->cond);
pthread_mutex_destroy(&dv->mutex);
#endif
if (CONFIG_MPEGTS_DEMUXER && dv->type == IEC61883_HDV) {
iec61883_mpeg2_recv_stop(dv->iec61883_mpeg2);
iec61883_mpeg2_close(dv->iec61883_mpeg2);
avpriv_mpegts_parse_close(dv->mpeg_demux);
} else {
iec61883_dv_fb_stop(dv->iec61883_dv);
iec61883_dv_fb_close(dv->iec61883_dv);
av_freep(&dv->dv_demux);
}
while (dv->queue_first) {
DVPacket *packet = dv->queue_first;
dv->queue_first = packet->next;
av_freep(&packet->buf);
av_freep(&packet);
}
iec61883_cmp_disconnect(dv->raw1394, dv->node, dv->output_port,
raw1394_get_local_id(dv->raw1394),
dv->input_port, dv->channel, dv->bandwidth);
raw1394_destroy_handle(dv->raw1394);
return 0;
}
static const AVOption options[] = {
{ "dvtype", "override autodetection of DV/HDV", offsetof(struct iec61883_data, type), AV_OPT_TYPE_INT, {.i64 = IEC61883_AUTO}, IEC61883_AUTO, IEC61883_HDV, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
{ "auto", "auto detect DV/HDV", 0, AV_OPT_TYPE_CONST, {.i64 = IEC61883_AUTO}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
{ "dv", "force device being treated as DV device", 0, AV_OPT_TYPE_CONST, {.i64 = IEC61883_DV}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
{ "hdv" , "force device being treated as HDV device", 0, AV_OPT_TYPE_CONST, {.i64 = IEC61883_HDV}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "dvtype" },
{ "dvbuffer", "set queue buffer size (in packets)", offsetof(struct iec61883_data, max_packets), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "dvguid", "select one of multiple DV devices by its GUID", offsetof(struct iec61883_data, device_guid), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass iec61883_class = {
.class_name = "iec61883 indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_iec61883_demuxer = {
.name = "iec61883",
.long_name = NULL_IF_CONFIG_SMALL("libiec61883 (new DV1394) A/V input device"),
.priv_data_size = sizeof(struct iec61883_data),
.read_header = iec61883_read_header,
.read_packet = iec61883_read_packet,
.read_close = iec61883_close,
.flags = AVFMT_NOFILE,
.priv_class = &iec61883_class,
};

28
externals/ffmpeg/libavdevice/internal.h vendored Executable file
View File

@@ -0,0 +1,28 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_INTERNAL_H
#define AVDEVICE_INTERNAL_H
#include "libavformat/avformat.h"
av_warn_unused_result
int ff_alloc_input_device_context(struct AVFormatContext **avctx, struct AVInputFormat *iformat,
const char *format);
#endif

354
externals/ffmpeg/libavdevice/jack.c vendored Executable file
View File

@@ -0,0 +1,354 @@
/*
* JACK Audio Connection Kit input device
* Copyright (c) 2009 Samalyse
* Author: Olivier Guilyardi <olivier samalyse com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include <semaphore.h>
#include <jack/jack.h>
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/fifo.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "timefilter.h"
#include "avdevice.h"
/**
* Size of the internal FIFO buffers as a number of audio packets
*/
#define FIFO_PACKETS_NUM 16
typedef struct JackData {
AVClass *class;
jack_client_t * client;
int activated;
sem_t packet_count;
jack_nframes_t sample_rate;
jack_nframes_t buffer_size;
jack_port_t ** ports;
int nports;
TimeFilter * timefilter;
AVFifoBuffer * new_pkts;
AVFifoBuffer * filled_pkts;
int pkt_xrun;
int jack_xrun;
} JackData;
static int process_callback(jack_nframes_t nframes, void *arg)
{
/* Warning: this function runs in realtime. One mustn't allocate memory here
* or do any other thing that could block. */
int i, j;
JackData *self = arg;
float * buffer;
jack_nframes_t latency, cycle_delay;
AVPacket pkt;
float *pkt_data;
double cycle_time;
if (!self->client)
return 0;
/* The approximate delay since the hardware interrupt as a number of frames */
cycle_delay = jack_frames_since_cycle_start(self->client);
/* Retrieve filtered cycle time */
cycle_time = ff_timefilter_update(self->timefilter,
av_gettime() / 1000000.0 - (double) cycle_delay / self->sample_rate,
self->buffer_size);
/* Check if an empty packet is available, and if there's enough space to send it back once filled */
if ((av_fifo_size(self->new_pkts) < sizeof(pkt)) || (av_fifo_space(self->filled_pkts) < sizeof(pkt))) {
self->pkt_xrun = 1;
return 0;
}
/* Retrieve empty (but allocated) packet */
av_fifo_generic_read(self->new_pkts, &pkt, sizeof(pkt), NULL);
pkt_data = (float *) pkt.data;
latency = 0;
/* Copy and interleave audio data from the JACK buffer into the packet */
for (i = 0; i < self->nports; i++) {
jack_latency_range_t range;
jack_port_get_latency_range(self->ports[i], JackCaptureLatency, &range);
latency += range.max;
buffer = jack_port_get_buffer(self->ports[i], self->buffer_size);
for (j = 0; j < self->buffer_size; j++)
pkt_data[j * self->nports + i] = buffer[j];
}
/* Timestamp the packet with the cycle start time minus the average latency */
pkt.pts = (cycle_time - (double) latency / (self->nports * self->sample_rate)) * 1000000.0;
/* Send the now filled packet back, and increase packet counter */
av_fifo_generic_write(self->filled_pkts, &pkt, sizeof(pkt), NULL);
sem_post(&self->packet_count);
return 0;
}
static void shutdown_callback(void *arg)
{
JackData *self = arg;
self->client = NULL;
}
static int xrun_callback(void *arg)
{
JackData *self = arg;
self->jack_xrun = 1;
ff_timefilter_reset(self->timefilter);
return 0;
}
static int supply_new_packets(JackData *self, AVFormatContext *context)
{
AVPacket pkt;
int test, pkt_size = self->buffer_size * self->nports * sizeof(float);
/* Supply the process callback with new empty packets, by filling the new
* packets FIFO buffer with as many packets as possible. process_callback()
* can't do this by itself, because it can't allocate memory in realtime. */
while (av_fifo_space(self->new_pkts) >= sizeof(pkt)) {
if ((test = av_new_packet(&pkt, pkt_size)) < 0) {
av_log(context, AV_LOG_ERROR, "Could not create packet of size %d\n", pkt_size);
return test;
}
av_fifo_generic_write(self->new_pkts, &pkt, sizeof(pkt), NULL);
}
return 0;
}
static int start_jack(AVFormatContext *context)
{
JackData *self = context->priv_data;
jack_status_t status;
int i, test;
/* Register as a JACK client, using the context url as client name. */
self->client = jack_client_open(context->url, JackNullOption, &status);
if (!self->client) {
av_log(context, AV_LOG_ERROR, "Unable to register as a JACK client\n");
return AVERROR(EIO);
}
sem_init(&self->packet_count, 0, 0);
self->sample_rate = jack_get_sample_rate(self->client);
self->ports = av_malloc_array(self->nports, sizeof(*self->ports));
if (!self->ports)
return AVERROR(ENOMEM);
self->buffer_size = jack_get_buffer_size(self->client);
/* Register JACK ports */
for (i = 0; i < self->nports; i++) {
char str[16];
snprintf(str, sizeof(str), "input_%d", i + 1);
self->ports[i] = jack_port_register(self->client, str,
JACK_DEFAULT_AUDIO_TYPE,
JackPortIsInput, 0);
if (!self->ports[i]) {
av_log(context, AV_LOG_ERROR, "Unable to register port %s:%s\n",
context->url, str);
jack_client_close(self->client);
return AVERROR(EIO);
}
}
/* Register JACK callbacks */
jack_set_process_callback(self->client, process_callback, self);
jack_on_shutdown(self->client, shutdown_callback, self);
jack_set_xrun_callback(self->client, xrun_callback, self);
/* Create time filter */
self->timefilter = ff_timefilter_new (1.0 / self->sample_rate, self->buffer_size, 1.5);
if (!self->timefilter) {
jack_client_close(self->client);
return AVERROR(ENOMEM);
}
/* Create FIFO buffers */
self->filled_pkts = av_fifo_alloc_array(FIFO_PACKETS_NUM, sizeof(AVPacket));
/* New packets FIFO with one extra packet for safety against underruns */
self->new_pkts = av_fifo_alloc_array((FIFO_PACKETS_NUM + 1), sizeof(AVPacket));
if (!self->new_pkts) {
jack_client_close(self->client);
return AVERROR(ENOMEM);
}
if ((test = supply_new_packets(self, context))) {
jack_client_close(self->client);
return test;
}
return 0;
}
static void free_pkt_fifo(AVFifoBuffer **fifo)
{
AVPacket pkt;
while (av_fifo_size(*fifo)) {
av_fifo_generic_read(*fifo, &pkt, sizeof(pkt), NULL);
av_packet_unref(&pkt);
}
av_fifo_freep(fifo);
}
static void stop_jack(JackData *self)
{
if (self->client) {
if (self->activated)
jack_deactivate(self->client);
jack_client_close(self->client);
}
sem_destroy(&self->packet_count);
free_pkt_fifo(&self->new_pkts);
free_pkt_fifo(&self->filled_pkts);
av_freep(&self->ports);
ff_timefilter_destroy(self->timefilter);
}
static int audio_read_header(AVFormatContext *context)
{
JackData *self = context->priv_data;
AVStream *stream;
int test;
if ((test = start_jack(context)))
return test;
stream = avformat_new_stream(context, NULL);
if (!stream) {
stop_jack(self);
return AVERROR(ENOMEM);
}
stream->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
#if HAVE_BIGENDIAN
stream->codecpar->codec_id = AV_CODEC_ID_PCM_F32BE;
#else
stream->codecpar->codec_id = AV_CODEC_ID_PCM_F32LE;
#endif
stream->codecpar->sample_rate = self->sample_rate;
stream->codecpar->channels = self->nports;
avpriv_set_pts_info(stream, 64, 1, 1000000); /* 64 bits pts in us */
return 0;
}
static int audio_read_packet(AVFormatContext *context, AVPacket *pkt)
{
JackData *self = context->priv_data;
struct timespec timeout = {0, 0};
int test;
/* Activate the JACK client on first packet read. Activating the JACK client
* means that process_callback() starts to get called at regular interval.
* If we activate it in audio_read_header(), we're actually reading audio data
* from the device before instructed to, and that may result in an overrun. */
if (!self->activated) {
if (!jack_activate(self->client)) {
self->activated = 1;
av_log(context, AV_LOG_INFO,
"JACK client registered and activated (rate=%dHz, buffer_size=%d frames)\n",
self->sample_rate, self->buffer_size);
} else {
av_log(context, AV_LOG_ERROR, "Unable to activate JACK client\n");
return AVERROR(EIO);
}
}
/* Wait for a packet coming back from process_callback(), if one isn't available yet */
timeout.tv_sec = av_gettime() / 1000000 + 2;
if (sem_timedwait(&self->packet_count, &timeout)) {
if (errno == ETIMEDOUT) {
av_log(context, AV_LOG_ERROR,
"Input error: timed out when waiting for JACK process callback output\n");
} else {
char errbuf[128];
int ret = AVERROR(errno);
av_strerror(ret, errbuf, sizeof(errbuf));
av_log(context, AV_LOG_ERROR, "Error while waiting for audio packet: %s\n",
errbuf);
}
if (!self->client)
av_log(context, AV_LOG_ERROR, "Input error: JACK server is gone\n");
return AVERROR(EIO);
}
if (self->pkt_xrun) {
av_log(context, AV_LOG_WARNING, "Audio packet xrun\n");
self->pkt_xrun = 0;
}
if (self->jack_xrun) {
av_log(context, AV_LOG_WARNING, "JACK xrun\n");
self->jack_xrun = 0;
}
/* Retrieve the packet filled with audio data by process_callback() */
av_fifo_generic_read(self->filled_pkts, pkt, sizeof(*pkt), NULL);
if ((test = supply_new_packets(self, context)))
return test;
return 0;
}
static int audio_read_close(AVFormatContext *context)
{
JackData *self = context->priv_data;
stop_jack(self);
return 0;
}
#define OFFSET(x) offsetof(JackData, x)
static const AVOption options[] = {
{ "channels", "Number of audio channels.", OFFSET(nports), AV_OPT_TYPE_INT, { .i64 = 2 }, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass jack_indev_class = {
.class_name = "JACK indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_jack_demuxer = {
.name = "jack",
.long_name = NULL_IF_CONFIG_SMALL("JACK Audio Connection Kit"),
.priv_data_size = sizeof(JackData),
.read_header = audio_read_header,
.read_packet = audio_read_packet,
.read_close = audio_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &jack_indev_class,
};

466
externals/ffmpeg/libavdevice/kmsgrab.c vendored Executable file
View File

@@ -0,0 +1,466 @@
/*
* KMS/DRM input device
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <fcntl.h>
#include <unistd.h>
#include <drm.h>
#include <drm_fourcc.h>
#include <drm_mode.h>
#include <xf86drm.h>
#include <xf86drmMode.h>
#include "libavutil/hwcontext.h"
#include "libavutil/hwcontext_drm.h"
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/pixfmt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/time.h"
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
typedef struct KMSGrabContext {
const AVClass *class;
AVBufferRef *device_ref;
AVHWDeviceContext *device;
AVDRMDeviceContext *hwctx;
AVBufferRef *frames_ref;
AVHWFramesContext *frames;
uint32_t plane_id;
uint32_t drm_format;
unsigned int width;
unsigned int height;
int64_t frame_delay;
int64_t frame_last;
const char *device_path;
enum AVPixelFormat format;
int64_t drm_format_modifier;
int64_t source_plane;
int64_t source_crtc;
AVRational framerate;
} KMSGrabContext;
static void kmsgrab_free_desc(void *opaque, uint8_t *data)
{
AVDRMFrameDescriptor *desc = (AVDRMFrameDescriptor*)data;
close(desc->objects[0].fd);
av_free(desc);
}
static void kmsgrab_free_frame(void *opaque, uint8_t *data)
{
AVFrame *frame = (AVFrame*)data;
av_frame_free(&frame);
}
static int kmsgrab_read_packet(AVFormatContext *avctx, AVPacket *pkt)
{
KMSGrabContext *ctx = avctx->priv_data;
drmModePlane *plane;
drmModeFB *fb;
AVDRMFrameDescriptor *desc;
AVFrame *frame;
int64_t now;
int err, fd;
now = av_gettime();
if (ctx->frame_last) {
int64_t delay;
while (1) {
delay = ctx->frame_last + ctx->frame_delay - now;
if (delay <= 0)
break;
av_usleep(delay);
now = av_gettime();
}
}
ctx->frame_last = now;
plane = drmModeGetPlane(ctx->hwctx->fd, ctx->plane_id);
if (!plane) {
av_log(avctx, AV_LOG_ERROR, "Failed to get plane "
"%"PRIu32".\n", ctx->plane_id);
return AVERROR(EIO);
}
if (!plane->fb_id) {
av_log(avctx, AV_LOG_ERROR, "Plane %"PRIu32" no longer has "
"an associated framebuffer.\n", ctx->plane_id);
return AVERROR(EIO);
}
fb = drmModeGetFB(ctx->hwctx->fd, plane->fb_id);
if (!fb) {
av_log(avctx, AV_LOG_ERROR, "Failed to get framebuffer "
"%"PRIu32".\n", plane->fb_id);
return AVERROR(EIO);
}
if (fb->width != ctx->width || fb->height != ctx->height) {
av_log(avctx, AV_LOG_ERROR, "Plane %"PRIu32" framebuffer "
"dimensions changed: now %"PRIu32"x%"PRIu32".\n",
ctx->plane_id, fb->width, fb->height);
return AVERROR(EIO);
}
if (!fb->handle) {
av_log(avctx, AV_LOG_ERROR, "No handle set on framebuffer.\n");
return AVERROR(EIO);
}
err = drmPrimeHandleToFD(ctx->hwctx->fd, fb->handle, O_RDONLY, &fd);
if (err < 0) {
err = errno;
av_log(avctx, AV_LOG_ERROR, "Failed to get PRIME fd from "
"framebuffer handle: %s.\n", strerror(errno));
return AVERROR(err);
}
desc = av_mallocz(sizeof(*desc));
if (!desc)
return AVERROR(ENOMEM);
*desc = (AVDRMFrameDescriptor) {
.nb_objects = 1,
.objects[0] = {
.fd = fd,
.size = fb->height * fb->pitch,
.format_modifier = ctx->drm_format_modifier,
},
.nb_layers = 1,
.layers[0] = {
.format = ctx->drm_format,
.nb_planes = 1,
.planes[0] = {
.object_index = 0,
.offset = 0,
.pitch = fb->pitch,
},
},
};
frame = av_frame_alloc();
if (!frame)
return AVERROR(ENOMEM);
frame->hw_frames_ctx = av_buffer_ref(ctx->frames_ref);
if (!frame->hw_frames_ctx)
return AVERROR(ENOMEM);
frame->buf[0] = av_buffer_create((uint8_t*)desc, sizeof(*desc),
&kmsgrab_free_desc, avctx, 0);
if (!frame->buf[0])
return AVERROR(ENOMEM);
frame->data[0] = (uint8_t*)desc;
frame->format = AV_PIX_FMT_DRM_PRIME;
frame->width = fb->width;
frame->height = fb->height;
drmModeFreeFB(fb);
drmModeFreePlane(plane);
pkt->buf = av_buffer_create((uint8_t*)frame, sizeof(*frame),
&kmsgrab_free_frame, avctx, 0);
if (!pkt->buf)
return AVERROR(ENOMEM);
pkt->data = (uint8_t*)frame;
pkt->size = sizeof(*frame);
pkt->pts = now;
pkt->flags |= AV_PKT_FLAG_TRUSTED;
return 0;
}
static const struct {
enum AVPixelFormat pixfmt;
uint32_t drm_format;
} kmsgrab_formats[] = {
#ifdef DRM_FORMAT_R8
{ AV_PIX_FMT_GRAY8, DRM_FORMAT_R8 },
#endif
#ifdef DRM_FORMAT_R16
{ AV_PIX_FMT_GRAY16LE, DRM_FORMAT_R16 },
{ AV_PIX_FMT_GRAY16BE, DRM_FORMAT_R16 | DRM_FORMAT_BIG_ENDIAN },
#endif
{ AV_PIX_FMT_BGR8, DRM_FORMAT_BGR233 },
{ AV_PIX_FMT_RGB555LE, DRM_FORMAT_XRGB1555 },
{ AV_PIX_FMT_RGB555BE, DRM_FORMAT_XRGB1555 | DRM_FORMAT_BIG_ENDIAN },
{ AV_PIX_FMT_BGR555LE, DRM_FORMAT_XBGR1555 },
{ AV_PIX_FMT_BGR555BE, DRM_FORMAT_XBGR1555 | DRM_FORMAT_BIG_ENDIAN },
{ AV_PIX_FMT_RGB565LE, DRM_FORMAT_RGB565 },
{ AV_PIX_FMT_RGB565BE, DRM_FORMAT_RGB565 | DRM_FORMAT_BIG_ENDIAN },
{ AV_PIX_FMT_BGR565LE, DRM_FORMAT_BGR565 },
{ AV_PIX_FMT_BGR565BE, DRM_FORMAT_BGR565 | DRM_FORMAT_BIG_ENDIAN },
{ AV_PIX_FMT_RGB24, DRM_FORMAT_RGB888 },
{ AV_PIX_FMT_BGR24, DRM_FORMAT_BGR888 },
{ AV_PIX_FMT_0RGB, DRM_FORMAT_BGRX8888 },
{ AV_PIX_FMT_0BGR, DRM_FORMAT_RGBX8888 },
{ AV_PIX_FMT_RGB0, DRM_FORMAT_XBGR8888 },
{ AV_PIX_FMT_BGR0, DRM_FORMAT_XRGB8888 },
{ AV_PIX_FMT_ARGB, DRM_FORMAT_BGRA8888 },
{ AV_PIX_FMT_ABGR, DRM_FORMAT_RGBA8888 },
{ AV_PIX_FMT_RGBA, DRM_FORMAT_ABGR8888 },
{ AV_PIX_FMT_BGRA, DRM_FORMAT_ARGB8888 },
{ AV_PIX_FMT_YUYV422, DRM_FORMAT_YUYV },
{ AV_PIX_FMT_YVYU422, DRM_FORMAT_YVYU },
{ AV_PIX_FMT_UYVY422, DRM_FORMAT_UYVY },
};
static av_cold int kmsgrab_read_header(AVFormatContext *avctx)
{
KMSGrabContext *ctx = avctx->priv_data;
drmModePlaneRes *plane_res = NULL;
drmModePlane *plane = NULL;
drmModeFB *fb = NULL;
AVStream *stream;
int err, i;
for (i = 0; i < FF_ARRAY_ELEMS(kmsgrab_formats); i++) {
if (kmsgrab_formats[i].pixfmt == ctx->format) {
ctx->drm_format = kmsgrab_formats[i].drm_format;
break;
}
}
if (i >= FF_ARRAY_ELEMS(kmsgrab_formats)) {
av_log(avctx, AV_LOG_ERROR, "Unsupported format %s.\n",
av_get_pix_fmt_name(ctx->format));
return AVERROR(EINVAL);
}
err = av_hwdevice_ctx_create(&ctx->device_ref, AV_HWDEVICE_TYPE_DRM,
ctx->device_path, NULL, 0);
if (err < 0) {
av_log(avctx, AV_LOG_ERROR, "Failed to open DRM device.\n");
return err;
}
ctx->device = (AVHWDeviceContext*) ctx->device_ref->data;
ctx->hwctx = (AVDRMDeviceContext*)ctx->device->hwctx;
err = drmSetClientCap(ctx->hwctx->fd,
DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1);
if (err < 0) {
av_log(avctx, AV_LOG_WARNING, "Failed to set universal planes "
"capability: primary planes will not be usable.\n");
}
if (ctx->source_plane > 0) {
plane = drmModeGetPlane(ctx->hwctx->fd, ctx->source_plane);
if (!plane) {
err = errno;
av_log(avctx, AV_LOG_ERROR, "Failed to get plane %"PRId64": "
"%s.\n", ctx->source_plane, strerror(err));
err = AVERROR(err);
goto fail;
}
if (plane->fb_id == 0) {
av_log(avctx, AV_LOG_ERROR, "Plane %"PRId64" does not have "
"an attached framebuffer.\n", ctx->source_plane);
err = AVERROR(EINVAL);
goto fail;
}
} else {
plane_res = drmModeGetPlaneResources(ctx->hwctx->fd);
if (!plane_res) {
av_log(avctx, AV_LOG_ERROR, "Failed to get plane "
"resources: %s.\n", strerror(errno));
err = AVERROR(EINVAL);
goto fail;
}
for (i = 0; i < plane_res->count_planes; i++) {
plane = drmModeGetPlane(ctx->hwctx->fd,
plane_res->planes[i]);
if (!plane) {
err = errno;
av_log(avctx, AV_LOG_VERBOSE, "Failed to get "
"plane %"PRIu32": %s.\n",
plane_res->planes[i], strerror(err));
continue;
}
av_log(avctx, AV_LOG_DEBUG, "Plane %"PRIu32": "
"CRTC %"PRIu32" FB %"PRIu32".\n",
plane->plane_id, plane->crtc_id, plane->fb_id);
if ((ctx->source_crtc > 0 &&
plane->crtc_id != ctx->source_crtc) ||
plane->fb_id == 0) {
// Either not connected to the target source CRTC
// or not active.
drmModeFreePlane(plane);
plane = NULL;
continue;
}
break;
}
if (i == plane_res->count_planes) {
if (ctx->source_crtc > 0) {
av_log(avctx, AV_LOG_ERROR, "No usable planes found on "
"CRTC %"PRId64".\n", ctx->source_crtc);
} else {
av_log(avctx, AV_LOG_ERROR, "No usable planes found.\n");
}
err = AVERROR(EINVAL);
goto fail;
}
av_log(avctx, AV_LOG_INFO, "Using plane %"PRIu32" to "
"locate framebuffers.\n", plane->plane_id);
}
ctx->plane_id = plane->plane_id;
fb = drmModeGetFB(ctx->hwctx->fd, plane->fb_id);
if (!fb) {
err = errno;
av_log(avctx, AV_LOG_ERROR, "Failed to get "
"framebuffer %"PRIu32": %s.\n",
plane->fb_id, strerror(err));
err = AVERROR(err);
goto fail;
}
av_log(avctx, AV_LOG_INFO, "Template framebuffer is %"PRIu32": "
"%"PRIu32"x%"PRIu32" %"PRIu32"bpp %"PRIu32"b depth.\n",
fb->fb_id, fb->width, fb->height, fb->bpp, fb->depth);
ctx->width = fb->width;
ctx->height = fb->height;
if (!fb->handle) {
av_log(avctx, AV_LOG_ERROR, "No handle set on framebuffer: "
"maybe you need some additional capabilities?\n");
err = AVERROR(EINVAL);
goto fail;
}
stream = avformat_new_stream(avctx, NULL);
if (!stream) {
err = AVERROR(ENOMEM);
goto fail;
}
stream->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
stream->codecpar->codec_id = AV_CODEC_ID_WRAPPED_AVFRAME;
stream->codecpar->width = fb->width;
stream->codecpar->height = fb->height;
stream->codecpar->format = AV_PIX_FMT_DRM_PRIME;
avpriv_set_pts_info(stream, 64, 1, 1000000);
ctx->frames_ref = av_hwframe_ctx_alloc(ctx->device_ref);
if (!ctx->frames_ref) {
err = AVERROR(ENOMEM);
goto fail;
}
ctx->frames = (AVHWFramesContext*)ctx->frames_ref->data;
ctx->frames->format = AV_PIX_FMT_DRM_PRIME;
ctx->frames->sw_format = ctx->format,
ctx->frames->width = fb->width;
ctx->frames->height = fb->height;
err = av_hwframe_ctx_init(ctx->frames_ref);
if (err < 0) {
av_log(avctx, AV_LOG_ERROR, "Failed to initialise "
"hardware frames context: %d.\n", err);
goto fail;
}
ctx->frame_delay = av_rescale_q(1, (AVRational) { ctx->framerate.den,
ctx->framerate.num }, AV_TIME_BASE_Q);
err = 0;
fail:
if (plane_res)
drmModeFreePlaneResources(plane_res);
if (plane)
drmModeFreePlane(plane);
if (fb)
drmModeFreeFB(fb);
return err;
}
static av_cold int kmsgrab_read_close(AVFormatContext *avctx)
{
KMSGrabContext *ctx = avctx->priv_data;
av_buffer_unref(&ctx->frames_ref);
av_buffer_unref(&ctx->device_ref);
return 0;
}
#define OFFSET(x) offsetof(KMSGrabContext, x)
#define FLAGS AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "device", "DRM device path",
OFFSET(device_path), AV_OPT_TYPE_STRING,
{ .str = "/dev/dri/card0" }, 0, 0, FLAGS },
{ "format", "Pixel format for framebuffer",
OFFSET(format), AV_OPT_TYPE_PIXEL_FMT,
{ .i64 = AV_PIX_FMT_BGR0 }, 0, UINT32_MAX, FLAGS },
{ "format_modifier", "DRM format modifier for framebuffer",
OFFSET(drm_format_modifier), AV_OPT_TYPE_INT64,
{ .i64 = DRM_FORMAT_MOD_NONE }, 0, INT64_MAX, FLAGS },
{ "crtc_id", "CRTC ID to define capture source",
OFFSET(source_crtc), AV_OPT_TYPE_INT64,
{ .i64 = 0 }, 0, UINT32_MAX, FLAGS },
{ "plane_id", "Plane ID to define capture source",
OFFSET(source_plane), AV_OPT_TYPE_INT64,
{ .i64 = 0 }, 0, UINT32_MAX, FLAGS },
{ "framerate", "Framerate to capture at",
OFFSET(framerate), AV_OPT_TYPE_RATIONAL,
{ .dbl = 30.0 }, 0, 1000, FLAGS },
{ NULL },
};
static const AVClass kmsgrab_class = {
.class_name = "kmsgrab indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
AVInputFormat ff_kmsgrab_demuxer = {
.name = "kmsgrab",
.long_name = NULL_IF_CONFIG_SMALL("KMS screen capture"),
.priv_data_size = sizeof(KMSGrabContext),
.read_header = &kmsgrab_read_header,
.read_packet = &kmsgrab_read_packet,
.read_close = &kmsgrab_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &kmsgrab_class,
};

517
externals/ffmpeg/libavdevice/lavfi.c vendored Executable file
View File

@@ -0,0 +1,517 @@
/*
* Copyright (c) 2011 Stefano Sabatini
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* libavfilter virtual input device
*/
/* #define DEBUG */
#include <float.h> /* DBL_MIN, DBL_MAX */
#include "libavutil/bprint.h"
#include "libavutil/channel_layout.h"
#include "libavutil/file.h"
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavfilter/avfilter.h"
#include "libavfilter/buffersink.h"
#include "libavformat/avio_internal.h"
#include "libavformat/internal.h"
#include "avdevice.h"
typedef struct {
AVClass *class; ///< class for private options
char *graph_str;
char *graph_filename;
char *dump_graph;
AVFilterGraph *graph;
AVFilterContext **sinks;
int *sink_stream_map;
int *sink_eof;
int *stream_sink_map;
int *sink_stream_subcc_map;
AVFrame *decoded_frame;
int nb_sinks;
AVPacket subcc_packet;
} LavfiContext;
static int *create_all_formats(int n)
{
int i, j, *fmts, count = 0;
for (i = 0; i < n; i++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
count++;
}
if (!(fmts = av_malloc((count+1) * sizeof(int))))
return NULL;
for (j = 0, i = 0; i < n; i++) {
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(i);
if (!(desc->flags & AV_PIX_FMT_FLAG_HWACCEL))
fmts[j++] = i;
}
fmts[j] = -1;
return fmts;
}
av_cold static int lavfi_read_close(AVFormatContext *avctx)
{
LavfiContext *lavfi = avctx->priv_data;
av_freep(&lavfi->sink_stream_map);
av_freep(&lavfi->sink_eof);
av_freep(&lavfi->stream_sink_map);
av_freep(&lavfi->sink_stream_subcc_map);
av_freep(&lavfi->sinks);
avfilter_graph_free(&lavfi->graph);
av_frame_free(&lavfi->decoded_frame);
return 0;
}
static int create_subcc_streams(AVFormatContext *avctx)
{
LavfiContext *lavfi = avctx->priv_data;
AVStream *st;
int stream_idx, sink_idx;
for (stream_idx = 0; stream_idx < lavfi->nb_sinks; stream_idx++) {
sink_idx = lavfi->stream_sink_map[stream_idx];
if (lavfi->sink_stream_subcc_map[sink_idx]) {
lavfi->sink_stream_subcc_map[sink_idx] = avctx->nb_streams;
if (!(st = avformat_new_stream(avctx, NULL)))
return AVERROR(ENOMEM);
st->codecpar->codec_id = AV_CODEC_ID_EIA_608;
st->codecpar->codec_type = AVMEDIA_TYPE_SUBTITLE;
} else {
lavfi->sink_stream_subcc_map[sink_idx] = -1;
}
}
return 0;
}
av_cold static int lavfi_read_header(AVFormatContext *avctx)
{
LavfiContext *lavfi = avctx->priv_data;
AVFilterInOut *input_links = NULL, *output_links = NULL, *inout;
const AVFilter *buffersink, *abuffersink;
int *pix_fmts = create_all_formats(AV_PIX_FMT_NB);
enum AVMediaType type;
int ret = 0, i, n;
#define FAIL(ERR) { ret = ERR; goto end; }
if (!pix_fmts)
FAIL(AVERROR(ENOMEM));
buffersink = avfilter_get_by_name("buffersink");
abuffersink = avfilter_get_by_name("abuffersink");
if (lavfi->graph_filename && lavfi->graph_str) {
av_log(avctx, AV_LOG_ERROR,
"Only one of the graph or graph_file options must be specified\n");
FAIL(AVERROR(EINVAL));
}
if (lavfi->graph_filename) {
AVBPrint graph_file_pb;
AVIOContext *avio = NULL;
AVDictionary *options = NULL;
if (avctx->protocol_whitelist && (ret = av_dict_set(&options, "protocol_whitelist", avctx->protocol_whitelist, 0)) < 0)
goto end;
ret = avio_open2(&avio, lavfi->graph_filename, AVIO_FLAG_READ, &avctx->interrupt_callback, &options);
av_dict_set(&options, "protocol_whitelist", NULL, 0);
if (ret < 0)
goto end;
av_bprint_init(&graph_file_pb, 0, AV_BPRINT_SIZE_UNLIMITED);
ret = avio_read_to_bprint(avio, &graph_file_pb, INT_MAX);
avio_closep(&avio);
av_bprint_chars(&graph_file_pb, '\0', 1);
if (!ret && !av_bprint_is_complete(&graph_file_pb))
ret = AVERROR(ENOMEM);
if (ret) {
av_bprint_finalize(&graph_file_pb, NULL);
goto end;
}
if ((ret = av_bprint_finalize(&graph_file_pb, &lavfi->graph_str)))
goto end;
}
if (!lavfi->graph_str)
lavfi->graph_str = av_strdup(avctx->url);
/* parse the graph, create a stream for each open output */
if (!(lavfi->graph = avfilter_graph_alloc()))
FAIL(AVERROR(ENOMEM));
if ((ret = avfilter_graph_parse_ptr(lavfi->graph, lavfi->graph_str,
&input_links, &output_links, avctx)) < 0)
goto end;
if (input_links) {
av_log(avctx, AV_LOG_ERROR,
"Open inputs in the filtergraph are not acceptable\n");
FAIL(AVERROR(EINVAL));
}
/* count the outputs */
for (n = 0, inout = output_links; inout; n++, inout = inout->next);
lavfi->nb_sinks = n;
if (!(lavfi->sink_stream_map = av_malloc(sizeof(int) * n)))
FAIL(AVERROR(ENOMEM));
if (!(lavfi->sink_eof = av_mallocz(sizeof(int) * n)))
FAIL(AVERROR(ENOMEM));
if (!(lavfi->stream_sink_map = av_malloc(sizeof(int) * n)))
FAIL(AVERROR(ENOMEM));
if (!(lavfi->sink_stream_subcc_map = av_malloc(sizeof(int) * n)))
FAIL(AVERROR(ENOMEM));
for (i = 0; i < n; i++)
lavfi->stream_sink_map[i] = -1;
/* parse the output link names - they need to be of the form out0, out1, ...
* create a mapping between them and the streams */
for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
int stream_idx = 0, suffix = 0, use_subcc = 0;
sscanf(inout->name, "out%n%d%n", &suffix, &stream_idx, &suffix);
if (!suffix) {
av_log(avctx, AV_LOG_ERROR,
"Invalid outpad name '%s'\n", inout->name);
FAIL(AVERROR(EINVAL));
}
if (inout->name[suffix]) {
if (!strcmp(inout->name + suffix, "+subcc")) {
use_subcc = 1;
} else {
av_log(avctx, AV_LOG_ERROR,
"Invalid outpad suffix '%s'\n", inout->name);
FAIL(AVERROR(EINVAL));
}
}
if ((unsigned)stream_idx >= n) {
av_log(avctx, AV_LOG_ERROR,
"Invalid index was specified in output '%s', "
"must be a non-negative value < %d\n",
inout->name, n);
FAIL(AVERROR(EINVAL));
}
if (lavfi->stream_sink_map[stream_idx] != -1) {
av_log(avctx, AV_LOG_ERROR,
"An output with stream index %d was already specified\n",
stream_idx);
FAIL(AVERROR(EINVAL));
}
lavfi->sink_stream_map[i] = stream_idx;
lavfi->stream_sink_map[stream_idx] = i;
lavfi->sink_stream_subcc_map[i] = !!use_subcc;
}
/* for each open output create a corresponding stream */
for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
AVStream *st;
if (!(st = avformat_new_stream(avctx, NULL)))
FAIL(AVERROR(ENOMEM));
st->id = i;
}
/* create a sink for each output and connect them to the graph */
lavfi->sinks = av_malloc_array(lavfi->nb_sinks, sizeof(AVFilterContext *));
if (!lavfi->sinks)
FAIL(AVERROR(ENOMEM));
for (i = 0, inout = output_links; inout; i++, inout = inout->next) {
AVFilterContext *sink;
type = avfilter_pad_get_type(inout->filter_ctx->output_pads, inout->pad_idx);
if (type == AVMEDIA_TYPE_VIDEO && ! buffersink ||
type == AVMEDIA_TYPE_AUDIO && ! abuffersink) {
av_log(avctx, AV_LOG_ERROR, "Missing required buffersink filter, aborting.\n");
FAIL(AVERROR_FILTER_NOT_FOUND);
}
if (type == AVMEDIA_TYPE_VIDEO) {
ret = avfilter_graph_create_filter(&sink, buffersink,
inout->name, NULL,
NULL, lavfi->graph);
if (ret >= 0)
ret = av_opt_set_int_list(sink, "pix_fmts", pix_fmts, AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
goto end;
} else if (type == AVMEDIA_TYPE_AUDIO) {
enum AVSampleFormat sample_fmts[] = { AV_SAMPLE_FMT_U8,
AV_SAMPLE_FMT_S16,
AV_SAMPLE_FMT_S32,
AV_SAMPLE_FMT_FLT,
AV_SAMPLE_FMT_DBL, -1 };
ret = avfilter_graph_create_filter(&sink, abuffersink,
inout->name, NULL,
NULL, lavfi->graph);
if (ret >= 0)
ret = av_opt_set_int_list(sink, "sample_fmts", sample_fmts, AV_SAMPLE_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
goto end;
ret = av_opt_set_int(sink, "all_channel_counts", 1,
AV_OPT_SEARCH_CHILDREN);
if (ret < 0)
goto end;
} else {
av_log(avctx, AV_LOG_ERROR,
"Output '%s' is not a video or audio output, not yet supported\n", inout->name);
FAIL(AVERROR(EINVAL));
}
lavfi->sinks[i] = sink;
if ((ret = avfilter_link(inout->filter_ctx, inout->pad_idx, sink, 0)) < 0)
goto end;
}
/* configure the graph */
if ((ret = avfilter_graph_config(lavfi->graph, avctx)) < 0)
goto end;
if (lavfi->dump_graph) {
char *dump = avfilter_graph_dump(lavfi->graph, lavfi->dump_graph);
if (dump != NULL) {
fputs(dump, stderr);
fflush(stderr);
av_free(dump);
} else {
FAIL(AVERROR(ENOMEM));
}
}
/* fill each stream with the information in the corresponding sink */
for (i = 0; i < lavfi->nb_sinks; i++) {
AVFilterContext *sink = lavfi->sinks[lavfi->stream_sink_map[i]];
AVRational time_base = av_buffersink_get_time_base(sink);
AVStream *st = avctx->streams[i];
st->codecpar->codec_type = av_buffersink_get_type(sink);
avpriv_set_pts_info(st, 64, time_base.num, time_base.den);
if (av_buffersink_get_type(sink) == AVMEDIA_TYPE_VIDEO) {
st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codecpar->format = av_buffersink_get_format(sink);
st->codecpar->width = av_buffersink_get_w(sink);
st->codecpar->height = av_buffersink_get_h(sink);
st ->sample_aspect_ratio =
st->codecpar->sample_aspect_ratio = av_buffersink_get_sample_aspect_ratio(sink);
avctx->probesize = FFMAX(avctx->probesize,
av_buffersink_get_w(sink) * av_buffersink_get_h(sink) *
av_get_padded_bits_per_pixel(av_pix_fmt_desc_get(av_buffersink_get_format(sink))) *
30);
} else if (av_buffersink_get_type(sink) == AVMEDIA_TYPE_AUDIO) {
st->codecpar->codec_id = av_get_pcm_codec(av_buffersink_get_format(sink), -1);
st->codecpar->channels = av_buffersink_get_channels(sink);
st->codecpar->format = av_buffersink_get_format(sink);
st->codecpar->sample_rate = av_buffersink_get_sample_rate(sink);
st->codecpar->channel_layout = av_buffersink_get_channel_layout(sink);
if (st->codecpar->codec_id == AV_CODEC_ID_NONE)
av_log(avctx, AV_LOG_ERROR,
"Could not find PCM codec for sample format %s.\n",
av_get_sample_fmt_name(av_buffersink_get_format(sink)));
}
}
if ((ret = create_subcc_streams(avctx)) < 0)
goto end;
if (!(lavfi->decoded_frame = av_frame_alloc()))
FAIL(AVERROR(ENOMEM));
end:
av_free(pix_fmts);
avfilter_inout_free(&input_links);
avfilter_inout_free(&output_links);
if (ret < 0)
lavfi_read_close(avctx);
return ret;
}
static int create_subcc_packet(AVFormatContext *avctx, AVFrame *frame,
int sink_idx)
{
LavfiContext *lavfi = avctx->priv_data;
AVFrameSideData *sd;
int stream_idx, i, ret;
if ((stream_idx = lavfi->sink_stream_subcc_map[sink_idx]) < 0)
return 0;
for (i = 0; i < frame->nb_side_data; i++)
if (frame->side_data[i]->type == AV_FRAME_DATA_A53_CC)
break;
if (i >= frame->nb_side_data)
return 0;
sd = frame->side_data[i];
if ((ret = av_new_packet(&lavfi->subcc_packet, sd->size)) < 0)
return ret;
memcpy(lavfi->subcc_packet.data, sd->data, sd->size);
lavfi->subcc_packet.stream_index = stream_idx;
lavfi->subcc_packet.pts = frame->pts;
lavfi->subcc_packet.pos = frame->pkt_pos;
return 0;
}
static int lavfi_read_packet(AVFormatContext *avctx, AVPacket *pkt)
{
LavfiContext *lavfi = avctx->priv_data;
double min_pts = DBL_MAX;
int stream_idx, min_pts_sink_idx = 0;
AVFrame *frame = lavfi->decoded_frame;
AVDictionary *frame_metadata;
int ret, i;
int size = 0;
if (lavfi->subcc_packet.size) {
*pkt = lavfi->subcc_packet;
av_init_packet(&lavfi->subcc_packet);
lavfi->subcc_packet.size = 0;
lavfi->subcc_packet.data = NULL;
return pkt->size;
}
/* iterate through all the graph sinks. Select the sink with the
* minimum PTS */
for (i = 0; i < lavfi->nb_sinks; i++) {
AVRational tb = av_buffersink_get_time_base(lavfi->sinks[i]);
double d;
int ret;
if (lavfi->sink_eof[i])
continue;
ret = av_buffersink_get_frame_flags(lavfi->sinks[i], frame,
AV_BUFFERSINK_FLAG_PEEK);
if (ret == AVERROR_EOF) {
ff_dlog(avctx, "EOF sink_idx:%d\n", i);
lavfi->sink_eof[i] = 1;
continue;
} else if (ret < 0)
return ret;
d = av_rescale_q_rnd(frame->pts, tb, AV_TIME_BASE_Q, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
ff_dlog(avctx, "sink_idx:%d time:%f\n", i, d);
av_frame_unref(frame);
if (d < min_pts) {
min_pts = d;
min_pts_sink_idx = i;
}
}
if (min_pts == DBL_MAX)
return AVERROR_EOF;
ff_dlog(avctx, "min_pts_sink_idx:%i\n", min_pts_sink_idx);
av_buffersink_get_frame_flags(lavfi->sinks[min_pts_sink_idx], frame, 0);
stream_idx = lavfi->sink_stream_map[min_pts_sink_idx];
if (frame->width /* FIXME best way of testing a video */) {
size = av_image_get_buffer_size(frame->format, frame->width, frame->height, 1);
if ((ret = av_new_packet(pkt, size)) < 0)
return ret;
av_image_copy_to_buffer(pkt->data, size, (const uint8_t **)frame->data, frame->linesize,
frame->format, frame->width, frame->height, 1);
} else if (frame->channels /* FIXME test audio */) {
size = frame->nb_samples * av_get_bytes_per_sample(frame->format) *
frame->channels;
if ((ret = av_new_packet(pkt, size)) < 0)
return ret;
memcpy(pkt->data, frame->data[0], size);
}
frame_metadata = frame->metadata;
if (frame_metadata) {
uint8_t *metadata;
AVDictionaryEntry *e = NULL;
AVBPrint meta_buf;
av_bprint_init(&meta_buf, 0, AV_BPRINT_SIZE_UNLIMITED);
while ((e = av_dict_get(frame_metadata, "", e, AV_DICT_IGNORE_SUFFIX))) {
av_bprintf(&meta_buf, "%s", e->key);
av_bprint_chars(&meta_buf, '\0', 1);
av_bprintf(&meta_buf, "%s", e->value);
av_bprint_chars(&meta_buf, '\0', 1);
}
if (!av_bprint_is_complete(&meta_buf) ||
!(metadata = av_packet_new_side_data(pkt, AV_PKT_DATA_STRINGS_METADATA,
meta_buf.len))) {
av_bprint_finalize(&meta_buf, NULL);
return AVERROR(ENOMEM);
}
memcpy(metadata, meta_buf.str, meta_buf.len);
av_bprint_finalize(&meta_buf, NULL);
}
if ((ret = create_subcc_packet(avctx, frame, min_pts_sink_idx)) < 0) {
av_frame_unref(frame);
av_packet_unref(pkt);
return ret;
}
pkt->stream_index = stream_idx;
pkt->pts = frame->pts;
pkt->pos = frame->pkt_pos;
pkt->size = size;
av_frame_unref(frame);
return size;
}
#define OFFSET(x) offsetof(LavfiContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "graph", "set libavfilter graph", OFFSET(graph_str), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ "graph_file","set libavfilter graph filename", OFFSET(graph_filename), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC},
{ "dumpgraph", "dump graph to stderr", OFFSET(dump_graph), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ NULL },
};
static const AVClass lavfi_class = {
.class_name = "lavfi indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_INPUT,
};
AVInputFormat ff_lavfi_demuxer = {
.name = "lavfi",
.long_name = NULL_IF_CONFIG_SMALL("Libavfilter virtual input device"),
.priv_data_size = sizeof(LavfiContext),
.read_header = lavfi_read_header,
.read_packet = lavfi_read_packet,
.read_close = lavfi_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &lavfi_class,
};

7
externals/ffmpeg/libavdevice/libavdevice.v vendored Executable file
View File

@@ -0,0 +1,7 @@
LIBAVDEVICE_MAJOR {
global:
avdevice_*;
av_*;
local:
*;
};

194
externals/ffmpeg/libavdevice/libcdio.c vendored Executable file
View File

@@ -0,0 +1,194 @@
/*
* Copyright (c) 2011 Anton Khirnov <anton@khirnov.net>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* libcdio CD grabbing
*/
#include "config.h"
#if HAVE_CDIO_PARANOIA_H
#include <cdio/cdda.h>
#include <cdio/paranoia.h>
#elif HAVE_CDIO_PARANOIA_PARANOIA_H
#include <cdio/paranoia/cdda.h>
#include <cdio/paranoia/paranoia.h>
#endif
#include "libavutil/log.h"
#include "libavutil/mem.h"
#include "libavutil/opt.h"
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
typedef struct CDIOContext {
const AVClass *class;
cdrom_drive_t *drive;
cdrom_paranoia_t *paranoia;
int32_t last_sector;
/* private options */
int speed;
int paranoia_mode;
} CDIOContext;
static av_cold int read_header(AVFormatContext *ctx)
{
CDIOContext *s = ctx->priv_data;
AVStream *st;
int ret, i;
char *err = NULL;
if (!(st = avformat_new_stream(ctx, NULL)))
return AVERROR(ENOMEM);
s->drive = cdio_cddap_identify(ctx->url, CDDA_MESSAGE_LOGIT, &err);
if (!s->drive) {
av_log(ctx, AV_LOG_ERROR, "Could not open drive %s.\n", ctx->url);
return AVERROR(EINVAL);
}
if (err) {
av_log(ctx, AV_LOG_VERBOSE, "%s\n", err);
free(err);
}
if ((ret = cdio_cddap_open(s->drive)) < 0 || !s->drive->opened) {
av_log(ctx, AV_LOG_ERROR, "Could not open disk in drive %s.\n", ctx->url);
return AVERROR(EINVAL);
}
cdio_cddap_verbose_set(s->drive, CDDA_MESSAGE_LOGIT, CDDA_MESSAGE_LOGIT);
if (s->speed)
cdio_cddap_speed_set(s->drive, s->speed);
s->paranoia = cdio_paranoia_init(s->drive);
if (!s->paranoia) {
av_log(ctx, AV_LOG_ERROR, "Could not init paranoia.\n");
return AVERROR(EINVAL);
}
cdio_paranoia_modeset(s->paranoia, s->paranoia_mode);
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
if (s->drive->bigendianp)
st->codecpar->codec_id = AV_CODEC_ID_PCM_S16BE;
else
st->codecpar->codec_id = AV_CODEC_ID_PCM_S16LE;
st->codecpar->sample_rate = 44100;
st->codecpar->channels = 2;
if (s->drive->audio_last_sector != CDIO_INVALID_LSN &&
s->drive->audio_first_sector != CDIO_INVALID_LSN)
st->duration = s->drive->audio_last_sector - s->drive->audio_first_sector;
else if (s->drive->tracks)
st->duration = s->drive->disc_toc[s->drive->tracks].dwStartSector;
avpriv_set_pts_info(st, 64, CDIO_CD_FRAMESIZE_RAW, 2 * st->codecpar->channels * st->codecpar->sample_rate);
for (i = 0; i < s->drive->tracks; i++) {
char title[16];
snprintf(title, sizeof(title), "track %02d", s->drive->disc_toc[i].bTrack);
avpriv_new_chapter(ctx, i, st->time_base, s->drive->disc_toc[i].dwStartSector,
s->drive->disc_toc[i+1].dwStartSector, title);
}
s->last_sector = cdio_cddap_disc_lastsector(s->drive);
return 0;
}
static int read_packet(AVFormatContext *ctx, AVPacket *pkt)
{
CDIOContext *s = ctx->priv_data;
int ret;
uint16_t *buf;
char *err = NULL;
if (ctx->streams[0]->cur_dts > s->last_sector)
return AVERROR_EOF;
buf = cdio_paranoia_read(s->paranoia, NULL);
if (!buf)
return AVERROR_EOF;
if (err = cdio_cddap_errors(s->drive)) {
av_log(ctx, AV_LOG_ERROR, "%s\n", err);
free(err);
err = NULL;
}
if (err = cdio_cddap_messages(s->drive)) {
av_log(ctx, AV_LOG_VERBOSE, "%s\n", err);
free(err);
err = NULL;
}
if ((ret = av_new_packet(pkt, CDIO_CD_FRAMESIZE_RAW)) < 0)
return ret;
memcpy(pkt->data, buf, CDIO_CD_FRAMESIZE_RAW);
return 0;
}
static av_cold int read_close(AVFormatContext *ctx)
{
CDIOContext *s = ctx->priv_data;
cdio_paranoia_free(s->paranoia);
cdio_cddap_close(s->drive);
return 0;
}
static int read_seek(AVFormatContext *ctx, int stream_index, int64_t timestamp,
int flags)
{
CDIOContext *s = ctx->priv_data;
AVStream *st = ctx->streams[0];
cdio_paranoia_seek(s->paranoia, timestamp, SEEK_SET);
st->cur_dts = timestamp;
return 0;
}
#define OFFSET(x) offsetof(CDIOContext, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "speed", "set drive reading speed", OFFSET(speed), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, DEC },
{ "paranoia_mode", "set error recovery mode", OFFSET(paranoia_mode), AV_OPT_TYPE_FLAGS, { .i64 = PARANOIA_MODE_DISABLE }, INT_MIN, INT_MAX, DEC, "paranoia_mode" },
{ "disable", "apply no fixups", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_DISABLE }, 0, 0, DEC, "paranoia_mode" },
{ "verify", "verify data integrity in overlap area", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_VERIFY }, 0, 0, DEC, "paranoia_mode" },
{ "overlap", "perform overlapped reads", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_OVERLAP }, 0, 0, DEC, "paranoia_mode" },
{ "neverskip", "do not skip failed reads", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_NEVERSKIP }, 0, 0, DEC, "paranoia_mode" },
{ "full", "apply all recovery modes", 0, AV_OPT_TYPE_CONST, { .i64 = PARANOIA_MODE_FULL }, 0, 0, DEC, "paranoia_mode" },
{ NULL },
};
static const AVClass libcdio_class = {
.class_name = "libcdio indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_libcdio_demuxer = {
.name = "libcdio",
.read_header = read_header,
.read_packet = read_packet,
.read_close = read_close,
.read_seek = read_seek,
.priv_data_size = sizeof(CDIOContext),
.flags = AVFMT_NOFILE,
.priv_class = &libcdio_class,
};

300
externals/ffmpeg/libavdevice/libdc1394.c vendored Executable file
View File

@@ -0,0 +1,300 @@
/*
* IIDC1394 grab interface (uses libdc1394 and libraw1394)
* Copyright (c) 2004 Roman Shaposhnik
* Copyright (c) 2008 Alessandro Sappia
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <dc1394/dc1394.h>
#include "libavutil/imgutils.h"
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
typedef struct dc1394_data {
AVClass *class;
dc1394_t *d;
dc1394camera_t *camera;
dc1394video_frame_t *frame;
int current_frame;
int frame_rate; /**< frames per 1000 seconds (fps * 1000) */
char *video_size; /**< String describing video size, set by a private option. */
char *pixel_format; /**< Set by a private option. */
char *framerate; /**< Set by a private option. */
int size;
int stream_index;
} dc1394_data;
static const struct dc1394_frame_format {
int width;
int height;
enum AVPixelFormat pix_fmt;
int frame_size_id;
} dc1394_frame_formats[] = {
{ 320, 240, AV_PIX_FMT_UYVY422, DC1394_VIDEO_MODE_320x240_YUV422 },
{ 640, 480, AV_PIX_FMT_GRAY8, DC1394_VIDEO_MODE_640x480_MONO8 },
{ 640, 480, AV_PIX_FMT_UYYVYY411, DC1394_VIDEO_MODE_640x480_YUV411 },
{ 640, 480, AV_PIX_FMT_UYVY422, DC1394_VIDEO_MODE_640x480_YUV422 },
{ 0, 0, 0, 0 } /* gotta be the last one */
};
static const struct dc1394_frame_rate {
int frame_rate;
int frame_rate_id;
} dc1394_frame_rates[] = {
{ 1875, DC1394_FRAMERATE_1_875 },
{ 3750, DC1394_FRAMERATE_3_75 },
{ 7500, DC1394_FRAMERATE_7_5 },
{ 15000, DC1394_FRAMERATE_15 },
{ 30000, DC1394_FRAMERATE_30 },
{ 60000, DC1394_FRAMERATE_60 },
{120000, DC1394_FRAMERATE_120 },
{240000, DC1394_FRAMERATE_240 },
{ 0, 0 } /* gotta be the last one */
};
#define OFFSET(x) offsetof(dc1394_data, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, {.str = "qvga"}, 0, 0, DEC },
{ "pixel_format", "", OFFSET(pixel_format), AV_OPT_TYPE_STRING, {.str = "uyvy422"}, 0, 0, DEC },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC },
{ NULL },
};
static const AVClass libdc1394_class = {
.class_name = "libdc1394 indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
static inline int dc1394_read_common(AVFormatContext *c,
const struct dc1394_frame_format **select_fmt, const struct dc1394_frame_rate **select_fps)
{
dc1394_data* dc1394 = c->priv_data;
AVStream* vst;
const struct dc1394_frame_format *fmt;
const struct dc1394_frame_rate *fps;
enum AVPixelFormat pix_fmt;
int width, height;
AVRational framerate;
int ret = 0;
if ((pix_fmt = av_get_pix_fmt(dc1394->pixel_format)) == AV_PIX_FMT_NONE) {
av_log(c, AV_LOG_ERROR, "No such pixel format: %s.\n", dc1394->pixel_format);
ret = AVERROR(EINVAL);
goto out;
}
if ((ret = av_parse_video_size(&width, &height, dc1394->video_size)) < 0) {
av_log(c, AV_LOG_ERROR, "Could not parse video size '%s'.\n", dc1394->video_size);
goto out;
}
if ((ret = av_parse_video_rate(&framerate, dc1394->framerate)) < 0) {
av_log(c, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", dc1394->framerate);
goto out;
}
dc1394->frame_rate = av_rescale(1000, framerate.num, framerate.den);
for (fmt = dc1394_frame_formats; fmt->width; fmt++)
if (fmt->pix_fmt == pix_fmt && fmt->width == width && fmt->height == height)
break;
for (fps = dc1394_frame_rates; fps->frame_rate; fps++)
if (fps->frame_rate == dc1394->frame_rate)
break;
if (!fps->frame_rate || !fmt->width) {
av_log(c, AV_LOG_ERROR, "Can't find matching camera format for %s, %dx%d@%d:1000fps\n", av_get_pix_fmt_name(pix_fmt),
width, height, dc1394->frame_rate);
ret = AVERROR(EINVAL);
goto out;
}
/* create a video stream */
vst = avformat_new_stream(c, NULL);
if (!vst) {
ret = AVERROR(ENOMEM);
goto out;
}
avpriv_set_pts_info(vst, 64, 1, 1000);
vst->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
vst->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
vst->codecpar->width = fmt->width;
vst->codecpar->height = fmt->height;
vst->codecpar->format = fmt->pix_fmt;
vst->avg_frame_rate = framerate;
dc1394->current_frame = 0;
dc1394->stream_index = vst->index;
dc1394->size = av_image_get_buffer_size(fmt->pix_fmt,
fmt->width, fmt->height, 1);
vst->codecpar->bit_rate = av_rescale(dc1394->size * 8,
fps->frame_rate, 1000);
*select_fps = fps;
*select_fmt = fmt;
out:
return ret;
}
static int dc1394_read_header(AVFormatContext *c)
{
dc1394_data* dc1394 = c->priv_data;
dc1394camera_list_t *list;
int res, i;
const struct dc1394_frame_format *fmt = NULL;
const struct dc1394_frame_rate *fps = NULL;
if (dc1394_read_common(c, &fmt, &fps) != 0)
return -1;
/* Now let us prep the hardware. */
dc1394->d = dc1394_new();
if (dc1394_camera_enumerate(dc1394->d, &list) != DC1394_SUCCESS || !list) {
av_log(c, AV_LOG_ERROR, "Unable to look for an IIDC camera.\n");
goto out;
}
if (list->num == 0) {
av_log(c, AV_LOG_ERROR, "No cameras found.\n");
dc1394_camera_free_list(list);
goto out;
}
/* FIXME: To select a specific camera I need to search in list its guid */
dc1394->camera = dc1394_camera_new (dc1394->d, list->ids[0].guid);
if (!dc1394->camera) {
av_log(c, AV_LOG_ERROR, "Unable to open camera with guid 0x%"PRIx64"\n",
list->ids[0].guid);
dc1394_camera_free_list(list);
goto out;
}
if (list->num > 1) {
av_log(c, AV_LOG_INFO, "Working with the first camera found\n");
}
/* Freeing list of cameras */
dc1394_camera_free_list (list);
/* Select MAX Speed possible from the cam */
if (dc1394->camera->bmode_capable>0) {
dc1394_video_set_operation_mode(dc1394->camera, DC1394_OPERATION_MODE_1394B);
i = DC1394_ISO_SPEED_800;
} else {
i = DC1394_ISO_SPEED_400;
}
for (res = DC1394_FAILURE; i >= DC1394_ISO_SPEED_MIN && res != DC1394_SUCCESS; i--) {
res=dc1394_video_set_iso_speed(dc1394->camera, i);
}
if (res != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Couldn't set ISO Speed\n");
goto out_camera;
}
if (dc1394_video_set_mode(dc1394->camera, fmt->frame_size_id) != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Couldn't set video format\n");
goto out_camera;
}
if (dc1394_video_set_framerate(dc1394->camera,fps->frame_rate_id) != DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Couldn't set framerate %d \n",fps->frame_rate);
goto out_camera;
}
if (dc1394_capture_setup(dc1394->camera, 10, DC1394_CAPTURE_FLAGS_DEFAULT)!=DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Cannot setup camera \n");
goto out_camera;
}
if (dc1394_video_set_transmission(dc1394->camera, DC1394_ON) !=DC1394_SUCCESS) {
av_log(c, AV_LOG_ERROR, "Cannot start capture\n");
goto out_camera;
}
return 0;
out_camera:
dc1394_capture_stop(dc1394->camera);
dc1394_video_set_transmission(dc1394->camera, DC1394_OFF);
dc1394_camera_free (dc1394->camera);
out:
dc1394_free(dc1394->d);
return -1;
}
static int dc1394_read_packet(AVFormatContext *c, AVPacket *pkt)
{
struct dc1394_data *dc1394 = c->priv_data;
int res;
/* discard stale frame */
if (dc1394->current_frame++) {
if (dc1394_capture_enqueue(dc1394->camera, dc1394->frame) != DC1394_SUCCESS)
av_log(c, AV_LOG_ERROR, "failed to release %d frame\n", dc1394->current_frame);
}
res = dc1394_capture_dequeue(dc1394->camera, DC1394_CAPTURE_POLICY_WAIT, &dc1394->frame);
if (res == DC1394_SUCCESS) {
pkt->data = (uint8_t *)dc1394->frame->image;
pkt->size = dc1394->frame->image_bytes;
pkt->pts = dc1394->current_frame * 1000000 / dc1394->frame_rate;
pkt->flags |= AV_PKT_FLAG_KEY;
pkt->stream_index = dc1394->stream_index;
} else {
av_log(c, AV_LOG_ERROR, "DMA capture failed\n");
return AVERROR_INVALIDDATA;
}
return pkt->size;
}
static int dc1394_close(AVFormatContext * context)
{
struct dc1394_data *dc1394 = context->priv_data;
dc1394_video_set_transmission(dc1394->camera, DC1394_OFF);
dc1394_capture_stop(dc1394->camera);
dc1394_camera_free(dc1394->camera);
dc1394_free(dc1394->d);
return 0;
}
AVInputFormat ff_libdc1394_demuxer = {
.name = "libdc1394",
.long_name = NULL_IF_CONFIG_SMALL("dc1394 v.2 A/V grab"),
.priv_data_size = sizeof(struct dc1394_data),
.read_header = dc1394_read_header,
.read_packet = dc1394_read_packet,
.read_close = dc1394_close,
.flags = AVFMT_NOFILE,
.priv_class = &libdc1394_class,
};

261
externals/ffmpeg/libavdevice/openal-dec.c vendored Executable file
View File

@@ -0,0 +1,261 @@
/*
* Copyright (c) 2011 Jonathan Baldwin
*
* This file is part of FFmpeg.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES WITH
* REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
* AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY SPECIAL, DIRECT,
* INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
* LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
* OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
* PERFORMANCE OF THIS SOFTWARE.
*/
/**
* @file
* OpenAL 1.1 capture device for libavdevice
**/
#include <AL/al.h>
#include <AL/alc.h>
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavformat/internal.h"
#include "avdevice.h"
typedef struct {
AVClass *class;
/** OpenAL capture device context. **/
ALCdevice *device;
/** The number of channels in the captured audio. **/
int channels;
/** The sample rate (in Hz) of the captured audio. **/
int sample_rate;
/** The sample size (in bits) of the captured audio. **/
int sample_size;
/** The OpenAL sample format of the captured audio. **/
ALCenum sample_format;
/** The number of bytes between two consecutive samples of the same channel/component. **/
ALCint sample_step;
/** If true, print a list of capture devices on this system and exit. **/
int list_devices;
} al_data;
typedef struct {
ALCenum al_fmt;
enum AVCodecID codec_id;
int channels;
} al_format_info;
#define LOWEST_AL_FORMAT FFMIN(FFMIN(AL_FORMAT_MONO8,AL_FORMAT_MONO16),FFMIN(AL_FORMAT_STEREO8,AL_FORMAT_STEREO16))
/**
* Get information about an AL_FORMAT value.
* @param al_fmt the AL_FORMAT value to find information about.
* @return A pointer to a structure containing information about the AL_FORMAT value.
*/
static const inline al_format_info* get_al_format_info(ALCenum al_fmt)
{
static const al_format_info info_table[] = {
[AL_FORMAT_MONO8-LOWEST_AL_FORMAT] = {AL_FORMAT_MONO8, AV_CODEC_ID_PCM_U8, 1},
[AL_FORMAT_MONO16-LOWEST_AL_FORMAT] = {AL_FORMAT_MONO16, AV_NE (AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE), 1},
[AL_FORMAT_STEREO8-LOWEST_AL_FORMAT] = {AL_FORMAT_STEREO8, AV_CODEC_ID_PCM_U8, 2},
[AL_FORMAT_STEREO16-LOWEST_AL_FORMAT] = {AL_FORMAT_STEREO16, AV_NE (AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE), 2},
};
return &info_table[al_fmt-LOWEST_AL_FORMAT];
}
/**
* Get the OpenAL error code, translated into an av/errno error code.
* @param device The ALC device to check for errors.
* @param error_msg_ret A pointer to a char* in which to return the error message, or NULL if desired.
* @return The error code, or 0 if there is no error.
*/
static inline int al_get_error(ALCdevice *device, const char** error_msg_ret)
{
ALCenum error = alcGetError(device);
if (error_msg_ret)
*error_msg_ret = (const char*) alcGetString(device, error);
switch (error) {
case ALC_NO_ERROR:
return 0;
case ALC_INVALID_DEVICE:
return AVERROR(ENODEV);
break;
case ALC_INVALID_CONTEXT:
case ALC_INVALID_ENUM:
case ALC_INVALID_VALUE:
return AVERROR(EINVAL);
break;
case ALC_OUT_OF_MEMORY:
return AVERROR(ENOMEM);
break;
default:
return AVERROR(EIO);
}
}
/**
* Print out a list of OpenAL capture devices on this system.
*/
static inline void print_al_capture_devices(void *log_ctx)
{
const char *devices;
if (!(devices = alcGetString(NULL, ALC_CAPTURE_DEVICE_SPECIFIER)))
return;
av_log(log_ctx, AV_LOG_INFO, "List of OpenAL capture devices on this system:\n");
for (; *devices != '\0'; devices += strlen(devices) + 1)
av_log(log_ctx, AV_LOG_INFO, " %s\n", devices);
}
static int read_header(AVFormatContext *ctx)
{
al_data *ad = ctx->priv_data;
static const ALCenum sample_formats[2][2] = {
{ AL_FORMAT_MONO8, AL_FORMAT_STEREO8 },
{ AL_FORMAT_MONO16, AL_FORMAT_STEREO16 }
};
int error = 0;
const char *error_msg;
AVStream *st = NULL;
AVCodecParameters *par = NULL;
if (ad->list_devices) {
print_al_capture_devices(ctx);
return AVERROR_EXIT;
}
ad->sample_format = sample_formats[ad->sample_size/8-1][ad->channels-1];
/* Open device for capture */
ad->device =
alcCaptureOpenDevice(ctx->url[0] ? ctx->url : NULL,
ad->sample_rate,
ad->sample_format,
ad->sample_rate); /* Maximum 1 second of sample data to be read at once */
if (error = al_get_error(ad->device, &error_msg)) goto fail;
/* Create stream */
if (!(st = avformat_new_stream(ctx, NULL))) {
error = AVERROR(ENOMEM);
goto fail;
}
/* We work in microseconds */
avpriv_set_pts_info(st, 64, 1, 1000000);
/* Set codec parameters */
par = st->codecpar;
par->codec_type = AVMEDIA_TYPE_AUDIO;
par->sample_rate = ad->sample_rate;
par->channels = get_al_format_info(ad->sample_format)->channels;
par->codec_id = get_al_format_info(ad->sample_format)->codec_id;
/* This is needed to read the audio data */
ad->sample_step = (av_get_bits_per_sample(get_al_format_info(ad->sample_format)->codec_id) *
get_al_format_info(ad->sample_format)->channels) / 8;
/* Finally, start the capture process */
alcCaptureStart(ad->device);
return 0;
fail:
/* Handle failure */
if (ad->device)
alcCaptureCloseDevice(ad->device);
if (error_msg)
av_log(ctx, AV_LOG_ERROR, "Cannot open device: %s\n", error_msg);
return error;
}
static int read_packet(AVFormatContext* ctx, AVPacket *pkt)
{
al_data *ad = ctx->priv_data;
int error=0;
const char *error_msg;
ALCint nb_samples;
for (;;) {
/* Get number of samples available */
alcGetIntegerv(ad->device, ALC_CAPTURE_SAMPLES, (ALCsizei) sizeof(ALCint), &nb_samples);
if (error = al_get_error(ad->device, &error_msg)) goto fail;
if (nb_samples > 0)
break;
if (ctx->flags & AVFMT_FLAG_NONBLOCK)
return AVERROR(EAGAIN);
av_usleep(1000);
}
/* Create a packet of appropriate size */
if ((error = av_new_packet(pkt, nb_samples*ad->sample_step)) < 0)
goto fail;
pkt->pts = av_gettime();
/* Fill the packet with the available samples */
alcCaptureSamples(ad->device, pkt->data, nb_samples);
if (error = al_get_error(ad->device, &error_msg)) goto fail;
return pkt->size;
fail:
/* Handle failure */
if (pkt->data)
av_packet_unref(pkt);
if (error_msg)
av_log(ctx, AV_LOG_ERROR, "Error: %s\n", error_msg);
return error;
}
static int read_close(AVFormatContext* ctx)
{
al_data *ad = ctx->priv_data;
if (ad->device) {
alcCaptureStop(ad->device);
alcCaptureCloseDevice(ad->device);
}
return 0;
}
#define OFFSET(x) offsetof(al_data, x)
static const AVOption options[] = {
{"channels", "set number of channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64=2}, 1, 2, AV_OPT_FLAG_DECODING_PARAM },
{"sample_rate", "set sample rate", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64=44100}, 1, 192000, AV_OPT_FLAG_DECODING_PARAM },
{"sample_size", "set sample size", OFFSET(sample_size), AV_OPT_TYPE_INT, {.i64=16}, 8, 16, AV_OPT_FLAG_DECODING_PARAM },
{"list_devices", "list available devices", OFFSET(list_devices), AV_OPT_TYPE_INT, {.i64=0}, 0, 1, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
{"true", "", 0, AV_OPT_TYPE_CONST, {.i64=1}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
{"false", "", 0, AV_OPT_TYPE_CONST, {.i64=0}, 0, 0, AV_OPT_FLAG_DECODING_PARAM, "list_devices" },
{NULL},
};
static const AVClass class = {
.class_name = "openal indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_openal_demuxer = {
.name = "openal",
.long_name = NULL_IF_CONFIG_SMALL("OpenAL audio capture device"),
.priv_data_size = sizeof(al_data),
.read_probe = NULL,
.read_header = read_header,
.read_packet = read_packet,
.read_close = read_close,
.flags = AVFMT_NOFILE,
.priv_class = &class
};

1308
externals/ffmpeg/libavdevice/opengl_enc.c vendored Executable file

File diff suppressed because it is too large Load Diff

View File

@@ -0,0 +1,188 @@
/*
* Copyright (c) 2014 Lukasz Marek
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_OPENGL_ENC_SHADERS_H
#define AVDEVICE_OPENGL_ENC_SHADERS_H
#include "libavutil/pixfmt.h"
static const char * const FF_OPENGL_VERTEX_SHADER =
"uniform mat4 u_projectionMatrix;"
"uniform mat4 u_modelViewMatrix;"
"attribute vec4 a_position;"
"attribute vec2 a_textureCoords;"
"varying vec2 texture_coordinate;"
"void main()"
"{"
"gl_Position = u_projectionMatrix * (a_position * u_modelViewMatrix);"
"texture_coordinate = a_textureCoords;"
"}";
/**
* Fragment shader for packet RGBA formats.
*/
static const char * const FF_OPENGL_FRAGMENT_SHADER_RGBA_PACKET =
#if defined(GL_ES_VERSION_2_0)
"precision mediump float;"
#endif
"uniform sampler2D u_texture0;"
"uniform mat4 u_colorMap;"
"varying vec2 texture_coordinate;"
"void main()"
"{"
"gl_FragColor = texture2D(u_texture0, texture_coordinate) * u_colorMap;"
"}";
/**
* Fragment shader for packet RGB formats.
*/
static const char * const FF_OPENGL_FRAGMENT_SHADER_RGB_PACKET =
#if defined(GL_ES_VERSION_2_0)
"precision mediump float;"
#endif
"uniform sampler2D u_texture0;"
"uniform mat4 u_colorMap;"
"varying vec2 texture_coordinate;"
"void main()"
"{"
"gl_FragColor = vec4((texture2D(u_texture0, texture_coordinate) * u_colorMap).rgb, 1.0);"
"}";
/**
* Fragment shader for planar RGBA formats.
*/
static const char * const FF_OPENGL_FRAGMENT_SHADER_RGBA_PLANAR =
#if defined(GL_ES_VERSION_2_0)
"precision mediump float;"
#endif
"uniform sampler2D u_texture0;"
"uniform sampler2D u_texture1;"
"uniform sampler2D u_texture2;"
"uniform sampler2D u_texture3;"
"varying vec2 texture_coordinate;"
"void main()"
"{"
"gl_FragColor = vec4(texture2D(u_texture0, texture_coordinate).r,"
"texture2D(u_texture1, texture_coordinate).r,"
"texture2D(u_texture2, texture_coordinate).r,"
"texture2D(u_texture3, texture_coordinate).r);"
"}";
/**
* Fragment shader for planar RGB formats.
*/
static const char * const FF_OPENGL_FRAGMENT_SHADER_RGB_PLANAR =
#if defined(GL_ES_VERSION_2_0)
"precision mediump float;"
#endif
"uniform sampler2D u_texture0;"
"uniform sampler2D u_texture1;"
"uniform sampler2D u_texture2;"
"varying vec2 texture_coordinate;"
"void main()"
"{"
"gl_FragColor = vec4(texture2D(u_texture0, texture_coordinate).r,"
"texture2D(u_texture1, texture_coordinate).r,"
"texture2D(u_texture2, texture_coordinate).r,"
"1.0);"
"}";
/**
* Fragment shader for planar YUV formats.
*/
static const char * const FF_OPENGL_FRAGMENT_SHADER_YUV_PLANAR =
#if defined(GL_ES_VERSION_2_0)
"precision mediump float;"
#endif
"uniform sampler2D u_texture0;"
"uniform sampler2D u_texture1;"
"uniform sampler2D u_texture2;"
"uniform float u_chroma_div_w;"
"uniform float u_chroma_div_h;"
"varying vec2 texture_coordinate;"
"void main()"
"{"
"vec3 yuv;"
"yuv.r = texture2D(u_texture0, texture_coordinate).r - 0.0625;"
"yuv.g = texture2D(u_texture1, vec2(texture_coordinate.x / u_chroma_div_w, texture_coordinate.y / u_chroma_div_h)).r - 0.5;"
"yuv.b = texture2D(u_texture2, vec2(texture_coordinate.x / u_chroma_div_w, texture_coordinate.y / u_chroma_div_h)).r - 0.5;"
"gl_FragColor = clamp(vec4(mat3(1.1643, 1.16430, 1.1643,"
"0.0, -0.39173, 2.0170,"
"1.5958, -0.81290, 0.0) * yuv, 1.0), 0.0, 1.0);"
"}";
/**
* Fragment shader for planar YUVA formats.
*/
static const char * const FF_OPENGL_FRAGMENT_SHADER_YUVA_PLANAR =
#if defined(GL_ES_VERSION_2_0)
"precision mediump float;"
#endif
"uniform sampler2D u_texture0;"
"uniform sampler2D u_texture1;"
"uniform sampler2D u_texture2;"
"uniform sampler2D u_texture3;"
"uniform float u_chroma_div_w;"
"uniform float u_chroma_div_h;"
"varying vec2 texture_coordinate;"
"void main()"
"{"
"vec3 yuv;"
"yuv.r = texture2D(u_texture0, texture_coordinate).r - 0.0625;"
"yuv.g = texture2D(u_texture1, vec2(texture_coordinate.x / u_chroma_div_w, texture_coordinate.y / u_chroma_div_h)).r - 0.5;"
"yuv.b = texture2D(u_texture2, vec2(texture_coordinate.x / u_chroma_div_w, texture_coordinate.y / u_chroma_div_h)).r - 0.5;"
"gl_FragColor = clamp(vec4(mat3(1.1643, 1.16430, 1.1643,"
"0.0, -0.39173, 2.0170,"
"1.5958, -0.81290, 0.0) * yuv, texture2D(u_texture3, texture_coordinate).r), 0.0, 1.0);"
"}";
static const char * const FF_OPENGL_FRAGMENT_SHADER_GRAY =
#if defined(GL_ES_VERSION_2_0)
"precision mediump float;"
#endif
"uniform sampler2D u_texture0;"
"varying vec2 texture_coordinate;"
"void main()"
"{"
"float c = texture2D(u_texture0, texture_coordinate).r;"
"gl_FragColor = vec4(c, c, c, 1.0);"
"}";
#endif /* AVDEVICE_OPENGL_ENC_SHADERS_H */

139
externals/ffmpeg/libavdevice/oss.c vendored Executable file
View File

@@ -0,0 +1,139 @@
/*
* Linux audio play and grab interface
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include <string.h>
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/soundcard.h>
#include "libavutil/log.h"
#include "libavcodec/avcodec.h"
#include "avdevice.h"
#include "oss.h"
int ff_oss_audio_open(AVFormatContext *s1, int is_output,
const char *audio_device)
{
OSSAudioData *s = s1->priv_data;
int audio_fd;
int tmp, err;
char *flip = getenv("AUDIO_FLIP_LEFT");
if (is_output)
audio_fd = avpriv_open(audio_device, O_WRONLY);
else
audio_fd = avpriv_open(audio_device, O_RDONLY);
if (audio_fd < 0) {
av_log(s1, AV_LOG_ERROR, "%s: %s\n", audio_device, av_err2str(AVERROR(errno)));
return AVERROR(EIO);
}
if (flip && *flip == '1') {
s->flip_left = 1;
}
/* non blocking mode */
if (!is_output) {
if (fcntl(audio_fd, F_SETFL, O_NONBLOCK) < 0) {
av_log(s1, AV_LOG_WARNING, "%s: Could not enable non block mode (%s)\n", audio_device, av_err2str(AVERROR(errno)));
}
}
s->frame_size = OSS_AUDIO_BLOCK_SIZE;
#define CHECK_IOCTL_ERROR(event) \
if (err < 0) { \
av_log(s1, AV_LOG_ERROR, #event ": %s\n", av_err2str(AVERROR(errno)));\
goto fail; \
}
/* select format : favour native format
* We don't CHECK_IOCTL_ERROR here because even if failed OSS still may be
* usable. If OSS is not usable the SNDCTL_DSP_SETFMTS later is going to
* fail anyway. */
err = ioctl(audio_fd, SNDCTL_DSP_GETFMTS, &tmp);
if (err < 0) {
av_log(s1, AV_LOG_WARNING, "SNDCTL_DSP_GETFMTS: %s\n", av_err2str(AVERROR(errno)));
}
#if HAVE_BIGENDIAN
if (tmp & AFMT_S16_BE) {
tmp = AFMT_S16_BE;
} else if (tmp & AFMT_S16_LE) {
tmp = AFMT_S16_LE;
} else {
tmp = 0;
}
#else
if (tmp & AFMT_S16_LE) {
tmp = AFMT_S16_LE;
} else if (tmp & AFMT_S16_BE) {
tmp = AFMT_S16_BE;
} else {
tmp = 0;
}
#endif
switch(tmp) {
case AFMT_S16_LE:
s->codec_id = AV_CODEC_ID_PCM_S16LE;
break;
case AFMT_S16_BE:
s->codec_id = AV_CODEC_ID_PCM_S16BE;
break;
default:
av_log(s1, AV_LOG_ERROR, "Soundcard does not support 16 bit sample format\n");
close(audio_fd);
return AVERROR(EIO);
}
err=ioctl(audio_fd, SNDCTL_DSP_SETFMT, &tmp);
CHECK_IOCTL_ERROR(SNDCTL_DSP_SETFMTS)
tmp = (s->channels == 2);
err = ioctl(audio_fd, SNDCTL_DSP_STEREO, &tmp);
CHECK_IOCTL_ERROR(SNDCTL_DSP_STEREO)
tmp = s->sample_rate;
err = ioctl(audio_fd, SNDCTL_DSP_SPEED, &tmp);
CHECK_IOCTL_ERROR(SNDCTL_DSP_SPEED)
s->sample_rate = tmp; /* store real sample rate */
s->fd = audio_fd;
return 0;
fail:
close(audio_fd);
return AVERROR(EIO);
#undef CHECK_IOCTL_ERROR
}
int ff_oss_audio_close(OSSAudioData *s)
{
close(s->fd);
return 0;
}

45
externals/ffmpeg/libavdevice/oss.h vendored Executable file
View File

@@ -0,0 +1,45 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_OSS_H
#define AVDEVICE_OSS_H
#include "libavcodec/avcodec.h"
#include "libavformat/avformat.h"
#define OSS_AUDIO_BLOCK_SIZE 4096
typedef struct OSSAudioData {
AVClass *class;
int fd;
int sample_rate;
int channels;
int frame_size; /* in bytes ! */
enum AVCodecID codec_id;
unsigned int flip_left : 1;
uint8_t buffer[OSS_AUDIO_BLOCK_SIZE];
int buffer_ptr;
} OSSAudioData;
int ff_oss_audio_open(AVFormatContext *s1, int is_output,
const char *audio_device);
int ff_oss_audio_close(OSSAudioData *s);
#endif /* AVDEVICE_OSS_H */

144
externals/ffmpeg/libavdevice/oss_dec.c vendored Executable file
View File

@@ -0,0 +1,144 @@
/*
* Linux audio play interface
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include <stdint.h>
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/soundcard.h>
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavcodec/avcodec.h"
#include "avdevice.h"
#include "libavformat/internal.h"
#include "oss.h"
static int audio_read_header(AVFormatContext *s1)
{
OSSAudioData *s = s1->priv_data;
AVStream *st;
int ret;
st = avformat_new_stream(s1, NULL);
if (!st) {
return AVERROR(ENOMEM);
}
ret = ff_oss_audio_open(s1, 0, s1->url);
if (ret < 0) {
return AVERROR(EIO);
}
/* take real parameters */
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = s->codec_id;
st->codecpar->sample_rate = s->sample_rate;
st->codecpar->channels = s->channels;
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
return 0;
}
static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
OSSAudioData *s = s1->priv_data;
int ret, bdelay;
int64_t cur_time;
struct audio_buf_info abufi;
if ((ret=av_new_packet(pkt, s->frame_size)) < 0)
return ret;
ret = read(s->fd, pkt->data, pkt->size);
if (ret <= 0){
av_packet_unref(pkt);
pkt->size = 0;
if (ret<0) return AVERROR(errno);
else return AVERROR_EOF;
}
pkt->size = ret;
/* compute pts of the start of the packet */
cur_time = av_gettime();
bdelay = ret;
if (ioctl(s->fd, SNDCTL_DSP_GETISPACE, &abufi) == 0) {
bdelay += abufi.bytes;
}
/* subtract time represented by the number of bytes in the audio fifo */
cur_time -= (bdelay * 1000000LL) / (s->sample_rate * s->channels);
/* convert to wanted units */
pkt->pts = cur_time;
if (s->flip_left && s->channels == 2) {
int i;
short *p = (short *) pkt->data;
for (i = 0; i < ret; i += 4) {
*p = ~*p;
p += 2;
}
}
return 0;
}
static int audio_read_close(AVFormatContext *s1)
{
OSSAudioData *s = s1->priv_data;
ff_oss_audio_close(s);
return 0;
}
static const AVOption options[] = {
{ "sample_rate", "", offsetof(OSSAudioData, sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "channels", "", offsetof(OSSAudioData, channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass oss_demuxer_class = {
.class_name = "OSS indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_oss_demuxer = {
.name = "oss",
.long_name = NULL_IF_CONFIG_SMALL("OSS (Open Sound System) capture"),
.priv_data_size = sizeof(OSSAudioData),
.read_header = audio_read_header,
.read_packet = audio_read_packet,
.read_close = audio_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &oss_demuxer_class,
};

113
externals/ffmpeg/libavdevice/oss_enc.c vendored Executable file
View File

@@ -0,0 +1,113 @@
/*
* Linux audio grab interface
* Copyright (c) 2000, 2001 Fabrice Bellard
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#if HAVE_UNISTD_H
#include <unistd.h>
#endif
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/soundcard.h>
#include "libavutil/internal.h"
#include "libavcodec/avcodec.h"
#include "avdevice.h"
#include "libavformat/internal.h"
#include "oss.h"
static int audio_write_header(AVFormatContext *s1)
{
OSSAudioData *s = s1->priv_data;
AVStream *st;
int ret;
st = s1->streams[0];
s->sample_rate = st->codecpar->sample_rate;
s->channels = st->codecpar->channels;
ret = ff_oss_audio_open(s1, 1, s1->url);
if (ret < 0) {
return AVERROR(EIO);
} else {
return 0;
}
}
static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
{
OSSAudioData *s = s1->priv_data;
int len, ret;
int size= pkt->size;
uint8_t *buf= pkt->data;
while (size > 0) {
len = FFMIN(OSS_AUDIO_BLOCK_SIZE - s->buffer_ptr, size);
memcpy(s->buffer + s->buffer_ptr, buf, len);
s->buffer_ptr += len;
if (s->buffer_ptr >= OSS_AUDIO_BLOCK_SIZE) {
for(;;) {
ret = write(s->fd, s->buffer, OSS_AUDIO_BLOCK_SIZE);
if (ret > 0)
break;
if (ret < 0 && (errno != EAGAIN && errno != EINTR))
return AVERROR(EIO);
}
s->buffer_ptr = 0;
}
buf += len;
size -= len;
}
return 0;
}
static int audio_write_trailer(AVFormatContext *s1)
{
OSSAudioData *s = s1->priv_data;
ff_oss_audio_close(s);
return 0;
}
static const AVClass oss_muxer_class = {
.class_name = "OSS outdev",
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
};
AVOutputFormat ff_oss_muxer = {
.name = "oss",
.long_name = NULL_IF_CONFIG_SMALL("OSS (Open Sound System) playback"),
.priv_data_size = sizeof(OSSAudioData),
/* XXX: we make the assumption that the soundcard accepts this format */
/* XXX: find better solution with "preinit" method, needed also in
other formats */
.audio_codec = AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE),
.video_codec = AV_CODEC_ID_NONE,
.write_header = audio_write_header,
.write_packet = audio_write_packet,
.write_trailer = audio_write_trailer,
.flags = AVFMT_NOFILE,
.priv_class = &oss_muxer_class,
};

View File

@@ -0,0 +1,249 @@
/*
* Pulseaudio common
* Copyright (c) 2014 Lukasz Marek
* Copyright (c) 2011 Luca Barbato <lu_zero@gentoo.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "pulse_audio_common.h"
#include "libavutil/attributes.h"
#include "libavutil/avstring.h"
#include "libavutil/mem.h"
#include "libavutil/avassert.h"
pa_sample_format_t av_cold ff_codec_id_to_pulse_format(enum AVCodecID codec_id)
{
switch (codec_id) {
case AV_CODEC_ID_PCM_U8: return PA_SAMPLE_U8;
case AV_CODEC_ID_PCM_ALAW: return PA_SAMPLE_ALAW;
case AV_CODEC_ID_PCM_MULAW: return PA_SAMPLE_ULAW;
case AV_CODEC_ID_PCM_S16LE: return PA_SAMPLE_S16LE;
case AV_CODEC_ID_PCM_S16BE: return PA_SAMPLE_S16BE;
case AV_CODEC_ID_PCM_F32LE: return PA_SAMPLE_FLOAT32LE;
case AV_CODEC_ID_PCM_F32BE: return PA_SAMPLE_FLOAT32BE;
case AV_CODEC_ID_PCM_S32LE: return PA_SAMPLE_S32LE;
case AV_CODEC_ID_PCM_S32BE: return PA_SAMPLE_S32BE;
case AV_CODEC_ID_PCM_S24LE: return PA_SAMPLE_S24LE;
case AV_CODEC_ID_PCM_S24BE: return PA_SAMPLE_S24BE;
default: return PA_SAMPLE_INVALID;
}
}
enum PulseAudioContextState {
PULSE_CONTEXT_INITIALIZING,
PULSE_CONTEXT_READY,
PULSE_CONTEXT_FINISHED
};
typedef struct PulseAudioDeviceList {
AVDeviceInfoList *devices;
int error_code;
int output;
char *default_device;
} PulseAudioDeviceList;
static void pa_state_cb(pa_context *c, void *userdata)
{
enum PulseAudioContextState *context_state = userdata;
switch (pa_context_get_state(c)) {
case PA_CONTEXT_FAILED:
case PA_CONTEXT_TERMINATED:
*context_state = PULSE_CONTEXT_FINISHED;
break;
case PA_CONTEXT_READY:
*context_state = PULSE_CONTEXT_READY;
break;
default:
break;
}
}
void ff_pulse_audio_disconnect_context(pa_mainloop **pa_ml, pa_context **pa_ctx)
{
av_assert0(pa_ml);
av_assert0(pa_ctx);
if (*pa_ctx) {
pa_context_set_state_callback(*pa_ctx, NULL, NULL);
pa_context_disconnect(*pa_ctx);
pa_context_unref(*pa_ctx);
}
if (*pa_ml)
pa_mainloop_free(*pa_ml);
*pa_ml = NULL;
*pa_ctx = NULL;
}
int ff_pulse_audio_connect_context(pa_mainloop **pa_ml, pa_context **pa_ctx,
const char *server, const char *description)
{
int ret;
pa_mainloop_api *pa_mlapi = NULL;
enum PulseAudioContextState context_state = PULSE_CONTEXT_INITIALIZING;
av_assert0(pa_ml);
av_assert0(pa_ctx);
*pa_ml = NULL;
*pa_ctx = NULL;
if (!(*pa_ml = pa_mainloop_new()))
return AVERROR(ENOMEM);
if (!(pa_mlapi = pa_mainloop_get_api(*pa_ml))) {
ret = AVERROR_EXTERNAL;
goto fail;
}
if (!(*pa_ctx = pa_context_new(pa_mlapi, description))) {
ret = AVERROR(ENOMEM);
goto fail;
}
pa_context_set_state_callback(*pa_ctx, pa_state_cb, &context_state);
if (pa_context_connect(*pa_ctx, server, 0, NULL) < 0) {
ret = AVERROR_EXTERNAL;
goto fail;
}
while (context_state == PULSE_CONTEXT_INITIALIZING)
pa_mainloop_iterate(*pa_ml, 1, NULL);
if (context_state == PULSE_CONTEXT_FINISHED) {
ret = AVERROR_EXTERNAL;
goto fail;
}
return 0;
fail:
ff_pulse_audio_disconnect_context(pa_ml, pa_ctx);
return ret;
}
static void pulse_add_detected_device(PulseAudioDeviceList *info,
const char *name, const char *description)
{
int ret;
AVDeviceInfo *new_device = NULL;
if (info->error_code)
return;
new_device = av_mallocz(sizeof(AVDeviceInfo));
if (!new_device) {
info->error_code = AVERROR(ENOMEM);
return;
}
new_device->device_description = av_strdup(description);
new_device->device_name = av_strdup(name);
if (!new_device->device_description || !new_device->device_name) {
info->error_code = AVERROR(ENOMEM);
goto fail;
}
if ((ret = av_dynarray_add_nofree(&info->devices->devices,
&info->devices->nb_devices, new_device)) < 0) {
info->error_code = ret;
goto fail;
}
return;
fail:
av_freep(&new_device->device_description);
av_freep(&new_device->device_name);
av_free(new_device);
}
static void pulse_audio_source_device_cb(pa_context *c, const pa_source_info *dev,
int eol, void *userdata)
{
if (!eol)
pulse_add_detected_device(userdata, dev->name, dev->description);
}
static void pulse_audio_sink_device_cb(pa_context *c, const pa_sink_info *dev,
int eol, void *userdata)
{
if (!eol)
pulse_add_detected_device(userdata, dev->name, dev->description);
}
static void pulse_server_info_cb(pa_context *c, const pa_server_info *i, void *userdata)
{
PulseAudioDeviceList *info = userdata;
if (info->output)
info->default_device = av_strdup(i->default_sink_name);
else
info->default_device = av_strdup(i->default_source_name);
if (!info->default_device)
info->error_code = AVERROR(ENOMEM);
}
int ff_pulse_audio_get_devices(AVDeviceInfoList *devices, const char *server, int output)
{
pa_mainloop *pa_ml = NULL;
pa_operation *pa_op = NULL;
pa_context *pa_ctx = NULL;
enum pa_operation_state op_state;
PulseAudioDeviceList dev_list = { 0 };
int i;
dev_list.output = output;
dev_list.devices = devices;
if (!devices)
return AVERROR(EINVAL);
devices->nb_devices = 0;
devices->devices = NULL;
if ((dev_list.error_code = ff_pulse_audio_connect_context(&pa_ml, &pa_ctx, server, "Query devices")) < 0)
goto fail;
if (output)
pa_op = pa_context_get_sink_info_list(pa_ctx, pulse_audio_sink_device_cb, &dev_list);
else
pa_op = pa_context_get_source_info_list(pa_ctx, pulse_audio_source_device_cb, &dev_list);
while ((op_state = pa_operation_get_state(pa_op)) == PA_OPERATION_RUNNING)
pa_mainloop_iterate(pa_ml, 1, NULL);
if (op_state != PA_OPERATION_DONE)
dev_list.error_code = AVERROR_EXTERNAL;
pa_operation_unref(pa_op);
if (dev_list.error_code < 0)
goto fail;
pa_op = pa_context_get_server_info(pa_ctx, pulse_server_info_cb, &dev_list);
while ((op_state = pa_operation_get_state(pa_op)) == PA_OPERATION_RUNNING)
pa_mainloop_iterate(pa_ml, 1, NULL);
if (op_state != PA_OPERATION_DONE)
dev_list.error_code = AVERROR_EXTERNAL;
pa_operation_unref(pa_op);
if (dev_list.error_code < 0)
goto fail;
devices->default_device = -1;
for (i = 0; i < devices->nb_devices; i++) {
if (!strcmp(devices->devices[i]->device_name, dev_list.default_device)) {
devices->default_device = i;
break;
}
}
fail:
av_free(dev_list.default_device);
ff_pulse_audio_disconnect_context(&pa_ml, &pa_ctx);
return dev_list.error_code;
}

View File

@@ -0,0 +1,40 @@
/*
* Pulseaudio input
* Copyright (c) 2011 Luca Barbato <lu_zero@gentoo.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_PULSE_AUDIO_COMMON_H
#define AVDEVICE_PULSE_AUDIO_COMMON_H
#include <pulse/pulseaudio.h>
#include "libavcodec/avcodec.h"
#include "avdevice.h"
pa_sample_format_t ff_codec_id_to_pulse_format(enum AVCodecID codec_id);
av_warn_unused_result
int ff_pulse_audio_get_devices(AVDeviceInfoList *devices, const char *server, int output);
av_warn_unused_result
int ff_pulse_audio_connect_context(pa_mainloop **pa_ml, pa_context **pa_ctx,
const char *server, const char *description);
void ff_pulse_audio_disconnect_context(pa_mainloop **pa_ml, pa_context **pa_ctx);
#endif /* AVDEVICE_PULSE_AUDIO_COMMON_H */

379
externals/ffmpeg/libavdevice/pulse_audio_dec.c vendored Executable file
View File

@@ -0,0 +1,379 @@
/*
* Pulseaudio input
* Copyright (c) 2011 Luca Barbato <lu_zero@gentoo.org>
* Copyright 2004-2006 Lennart Poettering
* Copyright (c) 2014 Michael Niedermayer <michaelni@gmx.at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <pulse/rtclock.h>
#include <pulse/error.h>
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "pulse_audio_common.h"
#include "timefilter.h"
#define DEFAULT_CODEC_ID AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE)
typedef struct PulseData {
AVClass *class;
char *server;
char *name;
char *stream_name;
int sample_rate;
int channels;
int frame_size;
int fragment_size;
pa_threaded_mainloop *mainloop;
pa_context *context;
pa_stream *stream;
TimeFilter *timefilter;
int last_period;
int wallclock;
} PulseData;
#define CHECK_SUCCESS_GOTO(rerror, expression, label) \
do { \
if (!(expression)) { \
rerror = AVERROR_EXTERNAL; \
goto label; \
} \
} while (0)
#define CHECK_DEAD_GOTO(p, rerror, label) \
do { \
if (!(p)->context || !PA_CONTEXT_IS_GOOD(pa_context_get_state((p)->context)) || \
!(p)->stream || !PA_STREAM_IS_GOOD(pa_stream_get_state((p)->stream))) { \
rerror = AVERROR_EXTERNAL; \
goto label; \
} \
} while (0)
static void context_state_cb(pa_context *c, void *userdata) {
PulseData *p = userdata;
switch (pa_context_get_state(c)) {
case PA_CONTEXT_READY:
case PA_CONTEXT_TERMINATED:
case PA_CONTEXT_FAILED:
pa_threaded_mainloop_signal(p->mainloop, 0);
break;
}
}
static void stream_state_cb(pa_stream *s, void * userdata) {
PulseData *p = userdata;
switch (pa_stream_get_state(s)) {
case PA_STREAM_READY:
case PA_STREAM_FAILED:
case PA_STREAM_TERMINATED:
pa_threaded_mainloop_signal(p->mainloop, 0);
break;
}
}
static void stream_request_cb(pa_stream *s, size_t length, void *userdata) {
PulseData *p = userdata;
pa_threaded_mainloop_signal(p->mainloop, 0);
}
static void stream_latency_update_cb(pa_stream *s, void *userdata) {
PulseData *p = userdata;
pa_threaded_mainloop_signal(p->mainloop, 0);
}
static av_cold int pulse_close(AVFormatContext *s)
{
PulseData *pd = s->priv_data;
if (pd->mainloop)
pa_threaded_mainloop_stop(pd->mainloop);
if (pd->stream)
pa_stream_unref(pd->stream);
pd->stream = NULL;
if (pd->context) {
pa_context_disconnect(pd->context);
pa_context_unref(pd->context);
}
pd->context = NULL;
if (pd->mainloop)
pa_threaded_mainloop_free(pd->mainloop);
pd->mainloop = NULL;
ff_timefilter_destroy(pd->timefilter);
pd->timefilter = NULL;
return 0;
}
static av_cold int pulse_read_header(AVFormatContext *s)
{
PulseData *pd = s->priv_data;
AVStream *st;
char *device = NULL;
int ret;
enum AVCodecID codec_id =
s->audio_codec_id == AV_CODEC_ID_NONE ? DEFAULT_CODEC_ID : s->audio_codec_id;
const pa_sample_spec ss = { ff_codec_id_to_pulse_format(codec_id),
pd->sample_rate,
pd->channels };
pa_buffer_attr attr = { -1 };
pa_channel_map cmap;
pa_channel_map_init_extend(&cmap, pd->channels, PA_CHANNEL_MAP_WAVEEX);
st = avformat_new_stream(s, NULL);
if (!st) {
av_log(s, AV_LOG_ERROR, "Cannot add stream\n");
return AVERROR(ENOMEM);
}
attr.fragsize = pd->fragment_size;
if (s->url[0] != '\0' && strcmp(s->url, "default"))
device = s->url;
if (!(pd->mainloop = pa_threaded_mainloop_new())) {
pulse_close(s);
return AVERROR_EXTERNAL;
}
if (!(pd->context = pa_context_new(pa_threaded_mainloop_get_api(pd->mainloop), pd->name))) {
pulse_close(s);
return AVERROR_EXTERNAL;
}
pa_context_set_state_callback(pd->context, context_state_cb, pd);
if (pa_context_connect(pd->context, pd->server, 0, NULL) < 0) {
pulse_close(s);
return AVERROR(pa_context_errno(pd->context));
}
pa_threaded_mainloop_lock(pd->mainloop);
if (pa_threaded_mainloop_start(pd->mainloop) < 0) {
ret = -1;
goto unlock_and_fail;
}
for (;;) {
pa_context_state_t state;
state = pa_context_get_state(pd->context);
if (state == PA_CONTEXT_READY)
break;
if (!PA_CONTEXT_IS_GOOD(state)) {
ret = AVERROR(pa_context_errno(pd->context));
goto unlock_and_fail;
}
/* Wait until the context is ready */
pa_threaded_mainloop_wait(pd->mainloop);
}
if (!(pd->stream = pa_stream_new(pd->context, pd->stream_name, &ss, &cmap))) {
ret = AVERROR(pa_context_errno(pd->context));
goto unlock_and_fail;
}
pa_stream_set_state_callback(pd->stream, stream_state_cb, pd);
pa_stream_set_read_callback(pd->stream, stream_request_cb, pd);
pa_stream_set_write_callback(pd->stream, stream_request_cb, pd);
pa_stream_set_latency_update_callback(pd->stream, stream_latency_update_cb, pd);
ret = pa_stream_connect_record(pd->stream, device, &attr,
PA_STREAM_INTERPOLATE_TIMING
|PA_STREAM_ADJUST_LATENCY
|PA_STREAM_AUTO_TIMING_UPDATE);
if (ret < 0) {
ret = AVERROR(pa_context_errno(pd->context));
goto unlock_and_fail;
}
for (;;) {
pa_stream_state_t state;
state = pa_stream_get_state(pd->stream);
if (state == PA_STREAM_READY)
break;
if (!PA_STREAM_IS_GOOD(state)) {
ret = AVERROR(pa_context_errno(pd->context));
goto unlock_and_fail;
}
/* Wait until the stream is ready */
pa_threaded_mainloop_wait(pd->mainloop);
}
pa_threaded_mainloop_unlock(pd->mainloop);
/* take real parameters */
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = codec_id;
st->codecpar->sample_rate = pd->sample_rate;
st->codecpar->channels = pd->channels;
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
pd->timefilter = ff_timefilter_new(1000000.0 / pd->sample_rate,
1000, 1.5E-6);
if (!pd->timefilter) {
pulse_close(s);
return AVERROR(ENOMEM);
}
return 0;
unlock_and_fail:
pa_threaded_mainloop_unlock(pd->mainloop);
pulse_close(s);
return ret;
}
static int pulse_read_packet(AVFormatContext *s, AVPacket *pkt)
{
PulseData *pd = s->priv_data;
int ret;
size_t read_length;
const void *read_data = NULL;
int64_t dts;
pa_usec_t latency;
int negative;
pa_threaded_mainloop_lock(pd->mainloop);
CHECK_DEAD_GOTO(pd, ret, unlock_and_fail);
while (!read_data) {
int r;
r = pa_stream_peek(pd->stream, &read_data, &read_length);
CHECK_SUCCESS_GOTO(ret, r == 0, unlock_and_fail);
if (read_length <= 0) {
pa_threaded_mainloop_wait(pd->mainloop);
CHECK_DEAD_GOTO(pd, ret, unlock_and_fail);
} else if (!read_data) {
/* There's a hole in the stream, skip it. We could generate
* silence, but that wouldn't work for compressed streams. */
r = pa_stream_drop(pd->stream);
CHECK_SUCCESS_GOTO(ret, r == 0, unlock_and_fail);
}
}
if (av_new_packet(pkt, read_length) < 0) {
ret = AVERROR(ENOMEM);
goto unlock_and_fail;
}
dts = av_gettime();
pa_operation_unref(pa_stream_update_timing_info(pd->stream, NULL, NULL));
if (pa_stream_get_latency(pd->stream, &latency, &negative) >= 0) {
enum AVCodecID codec_id =
s->audio_codec_id == AV_CODEC_ID_NONE ? DEFAULT_CODEC_ID : s->audio_codec_id;
int frame_size = ((av_get_bits_per_sample(codec_id) >> 3) * pd->channels);
int frame_duration = read_length / frame_size;
if (negative) {
dts += latency;
} else
dts -= latency;
if (pd->wallclock)
pkt->pts = ff_timefilter_update(pd->timefilter, dts, pd->last_period);
pd->last_period = frame_duration;
} else {
av_log(s, AV_LOG_WARNING, "pa_stream_get_latency() failed\n");
}
memcpy(pkt->data, read_data, read_length);
pa_stream_drop(pd->stream);
pa_threaded_mainloop_unlock(pd->mainloop);
return 0;
unlock_and_fail:
pa_threaded_mainloop_unlock(pd->mainloop);
return ret;
}
static int pulse_get_device_list(AVFormatContext *h, AVDeviceInfoList *device_list)
{
PulseData *s = h->priv_data;
return ff_pulse_audio_get_devices(device_list, s->server, 0);
}
#define OFFSET(a) offsetof(PulseData, a)
#define D AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "server", "set PulseAudio server", OFFSET(server), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, D },
{ "name", "set application name", OFFSET(name), AV_OPT_TYPE_STRING, {.str = LIBAVFORMAT_IDENT}, 0, 0, D },
{ "stream_name", "set stream description", OFFSET(stream_name), AV_OPT_TYPE_STRING, {.str = "record"}, 0, 0, D },
{ "sample_rate", "set sample rate in Hz", OFFSET(sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, D },
{ "channels", "set number of audio channels", OFFSET(channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, D },
{ "frame_size", "set number of bytes per frame", OFFSET(frame_size), AV_OPT_TYPE_INT, {.i64 = 1024}, 1, INT_MAX, D },
{ "fragment_size", "set buffering size, affects latency and cpu usage", OFFSET(fragment_size), AV_OPT_TYPE_INT, {.i64 = -1}, -1, INT_MAX, D },
{ "wallclock", "set the initial pts using the current time", OFFSET(wallclock), AV_OPT_TYPE_INT, {.i64 = 1}, -1, 1, D },
{ NULL },
};
static const AVClass pulse_demuxer_class = {
.class_name = "Pulse indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_pulse_demuxer = {
.name = "pulse",
.long_name = NULL_IF_CONFIG_SMALL("Pulse audio input"),
.priv_data_size = sizeof(PulseData),
.read_header = pulse_read_header,
.read_packet = pulse_read_packet,
.read_close = pulse_close,
.get_device_list = pulse_get_device_list,
.flags = AVFMT_NOFILE,
.priv_class = &pulse_demuxer_class,
};

796
externals/ffmpeg/libavdevice/pulse_audio_enc.c vendored Executable file
View File

@@ -0,0 +1,796 @@
/*
* Copyright (c) 2013 Lukasz Marek <lukasz.m.luki@gmail.com>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <math.h>
#include <pulse/pulseaudio.h>
#include <pulse/error.h>
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavutil/log.h"
#include "libavutil/attributes.h"
#include "pulse_audio_common.h"
typedef struct PulseData {
AVClass *class;
const char *server;
const char *name;
const char *stream_name;
const char *device;
int64_t timestamp;
int buffer_size; /**< Buffer size in bytes */
int buffer_duration; /**< Buffer size in ms, recalculated to buffer_size */
int prebuf;
int minreq;
int last_result;
pa_threaded_mainloop *mainloop;
pa_context *ctx;
pa_stream *stream;
int nonblocking;
int mute;
pa_volume_t base_volume;
pa_volume_t last_volume;
} PulseData;
static void pulse_audio_sink_device_cb(pa_context *ctx, const pa_sink_info *dev,
int eol, void *userdata)
{
PulseData *s = userdata;
if (s->ctx != ctx)
return;
if (eol) {
pa_threaded_mainloop_signal(s->mainloop, 0);
} else {
if (dev->flags & PA_SINK_FLAT_VOLUME)
s->base_volume = dev->base_volume;
else
s->base_volume = PA_VOLUME_NORM;
av_log(s, AV_LOG_DEBUG, "base volume: %u\n", s->base_volume);
}
}
/* Mainloop must be locked before calling this function as it uses pa_threaded_mainloop_wait. */
static int pulse_update_sink_info(AVFormatContext *h)
{
PulseData *s = h->priv_data;
pa_operation *op;
if (!(op = pa_context_get_sink_info_by_name(s->ctx, s->device,
pulse_audio_sink_device_cb, s))) {
av_log(s, AV_LOG_ERROR, "pa_context_get_sink_info_by_name failed.\n");
return AVERROR_EXTERNAL;
}
while (pa_operation_get_state(op) == PA_OPERATION_RUNNING)
pa_threaded_mainloop_wait(s->mainloop);
pa_operation_unref(op);
return 0;
}
static void pulse_audio_sink_input_cb(pa_context *ctx, const pa_sink_input_info *i,
int eol, void *userdata)
{
AVFormatContext *h = userdata;
PulseData *s = h->priv_data;
if (s->ctx != ctx)
return;
if (!eol) {
double val;
pa_volume_t vol = pa_cvolume_avg(&i->volume);
if (s->mute < 0 || (s->mute && !i->mute) || (!s->mute && i->mute)) {
s->mute = i->mute;
avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_MUTE_STATE_CHANGED, &s->mute, sizeof(s->mute));
}
vol = pa_sw_volume_divide(vol, s->base_volume);
if (s->last_volume != vol) {
val = (double)vol / PA_VOLUME_NORM;
avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_VOLUME_LEVEL_CHANGED, &val, sizeof(val));
s->last_volume = vol;
}
}
}
/* This function creates new loop so may be called from PA callbacks.
Mainloop must be locked before calling this function as it operates on streams. */
static int pulse_update_sink_input_info(AVFormatContext *h)
{
PulseData *s = h->priv_data;
pa_operation *op;
enum pa_operation_state op_state;
pa_mainloop *ml = NULL;
pa_context *ctx = NULL;
int ret = 0;
if ((ret = ff_pulse_audio_connect_context(&ml, &ctx, s->server, "Update sink input information")) < 0)
return ret;
if (!(op = pa_context_get_sink_input_info(ctx, pa_stream_get_index(s->stream),
pulse_audio_sink_input_cb, h))) {
ret = AVERROR_EXTERNAL;
goto fail;
}
while ((op_state = pa_operation_get_state(op)) == PA_OPERATION_RUNNING)
pa_mainloop_iterate(ml, 1, NULL);
pa_operation_unref(op);
if (op_state != PA_OPERATION_DONE) {
ret = AVERROR_EXTERNAL;
goto fail;
}
fail:
ff_pulse_audio_disconnect_context(&ml, &ctx);
if (ret)
av_log(s, AV_LOG_ERROR, "pa_context_get_sink_input_info failed.\n");
return ret;
}
static void pulse_event(pa_context *ctx, pa_subscription_event_type_t t,
uint32_t idx, void *userdata)
{
AVFormatContext *h = userdata;
PulseData *s = h->priv_data;
if (s->ctx != ctx)
return;
if ((t & PA_SUBSCRIPTION_EVENT_FACILITY_MASK) == PA_SUBSCRIPTION_EVENT_SINK_INPUT) {
if ((t & PA_SUBSCRIPTION_EVENT_TYPE_MASK) == PA_SUBSCRIPTION_EVENT_CHANGE)
// Calling from mainloop callback. No need to lock mainloop.
pulse_update_sink_input_info(h);
}
}
static void pulse_stream_writable(pa_stream *stream, size_t nbytes, void *userdata)
{
AVFormatContext *h = userdata;
PulseData *s = h->priv_data;
int64_t val = nbytes;
if (stream != s->stream)
return;
avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_WRITABLE, &val, sizeof(val));
pa_threaded_mainloop_signal(s->mainloop, 0);
}
static void pulse_overflow(pa_stream *stream, void *userdata)
{
AVFormatContext *h = userdata;
avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_OVERFLOW, NULL, 0);
}
static void pulse_underflow(pa_stream *stream, void *userdata)
{
AVFormatContext *h = userdata;
avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_UNDERFLOW, NULL, 0);
}
static void pulse_stream_state(pa_stream *stream, void *userdata)
{
PulseData *s = userdata;
if (stream != s->stream)
return;
switch (pa_stream_get_state(s->stream)) {
case PA_STREAM_READY:
case PA_STREAM_FAILED:
case PA_STREAM_TERMINATED:
pa_threaded_mainloop_signal(s->mainloop, 0);
default:
break;
}
}
static int pulse_stream_wait(PulseData *s)
{
pa_stream_state_t state;
while ((state = pa_stream_get_state(s->stream)) != PA_STREAM_READY) {
if (state == PA_STREAM_FAILED || state == PA_STREAM_TERMINATED)
return AVERROR_EXTERNAL;
pa_threaded_mainloop_wait(s->mainloop);
}
return 0;
}
static void pulse_context_state(pa_context *ctx, void *userdata)
{
PulseData *s = userdata;
if (s->ctx != ctx)
return;
switch (pa_context_get_state(ctx)) {
case PA_CONTEXT_READY:
case PA_CONTEXT_FAILED:
case PA_CONTEXT_TERMINATED:
pa_threaded_mainloop_signal(s->mainloop, 0);
default:
break;
}
}
static int pulse_context_wait(PulseData *s)
{
pa_context_state_t state;
while ((state = pa_context_get_state(s->ctx)) != PA_CONTEXT_READY) {
if (state == PA_CONTEXT_FAILED || state == PA_CONTEXT_TERMINATED)
return AVERROR_EXTERNAL;
pa_threaded_mainloop_wait(s->mainloop);
}
return 0;
}
static void pulse_stream_result(pa_stream *stream, int success, void *userdata)
{
PulseData *s = userdata;
if (stream != s->stream)
return;
s->last_result = success ? 0 : AVERROR_EXTERNAL;
pa_threaded_mainloop_signal(s->mainloop, 0);
}
static int pulse_finish_stream_operation(PulseData *s, pa_operation *op, const char *name)
{
if (!op) {
pa_threaded_mainloop_unlock(s->mainloop);
av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
return AVERROR_EXTERNAL;
}
s->last_result = 2;
while (s->last_result == 2)
pa_threaded_mainloop_wait(s->mainloop);
pa_operation_unref(op);
pa_threaded_mainloop_unlock(s->mainloop);
if (s->last_result != 0)
av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
return s->last_result;
}
static int pulse_set_pause(PulseData *s, int pause)
{
pa_operation *op;
pa_threaded_mainloop_lock(s->mainloop);
op = pa_stream_cork(s->stream, pause, pulse_stream_result, s);
return pulse_finish_stream_operation(s, op, "pa_stream_cork");
}
static int pulse_flash_stream(PulseData *s)
{
pa_operation *op;
pa_threaded_mainloop_lock(s->mainloop);
op = pa_stream_flush(s->stream, pulse_stream_result, s);
return pulse_finish_stream_operation(s, op, "pa_stream_flush");
}
static void pulse_context_result(pa_context *ctx, int success, void *userdata)
{
PulseData *s = userdata;
if (s->ctx != ctx)
return;
s->last_result = success ? 0 : AVERROR_EXTERNAL;
pa_threaded_mainloop_signal(s->mainloop, 0);
}
static int pulse_finish_context_operation(PulseData *s, pa_operation *op, const char *name)
{
if (!op) {
pa_threaded_mainloop_unlock(s->mainloop);
av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
return AVERROR_EXTERNAL;
}
s->last_result = 2;
while (s->last_result == 2)
pa_threaded_mainloop_wait(s->mainloop);
pa_operation_unref(op);
pa_threaded_mainloop_unlock(s->mainloop);
if (s->last_result != 0)
av_log(s, AV_LOG_ERROR, "%s failed.\n", name);
return s->last_result;
}
static int pulse_set_mute(PulseData *s)
{
pa_operation *op;
pa_threaded_mainloop_lock(s->mainloop);
op = pa_context_set_sink_input_mute(s->ctx, pa_stream_get_index(s->stream),
s->mute, pulse_context_result, s);
return pulse_finish_context_operation(s, op, "pa_context_set_sink_input_mute");
}
static int pulse_set_volume(PulseData *s, double volume)
{
pa_operation *op;
pa_cvolume cvol;
pa_volume_t vol;
const pa_sample_spec *ss = pa_stream_get_sample_spec(s->stream);
vol = pa_sw_volume_multiply(lrint(volume * PA_VOLUME_NORM), s->base_volume);
pa_cvolume_set(&cvol, ss->channels, PA_VOLUME_NORM);
pa_sw_cvolume_multiply_scalar(&cvol, &cvol, vol);
pa_threaded_mainloop_lock(s->mainloop);
op = pa_context_set_sink_input_volume(s->ctx, pa_stream_get_index(s->stream),
&cvol, pulse_context_result, s);
return pulse_finish_context_operation(s, op, "pa_context_set_sink_input_volume");
}
static int pulse_subscribe_events(PulseData *s)
{
pa_operation *op;
pa_threaded_mainloop_lock(s->mainloop);
op = pa_context_subscribe(s->ctx, PA_SUBSCRIPTION_MASK_SINK_INPUT, pulse_context_result, s);
return pulse_finish_context_operation(s, op, "pa_context_subscribe");
}
static void pulse_map_channels_to_pulse(int64_t channel_layout, pa_channel_map *channel_map)
{
channel_map->channels = 0;
if (channel_layout & AV_CH_FRONT_LEFT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT;
if (channel_layout & AV_CH_FRONT_RIGHT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT;
if (channel_layout & AV_CH_FRONT_CENTER)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_CENTER;
if (channel_layout & AV_CH_LOW_FREQUENCY)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_LFE;
if (channel_layout & AV_CH_BACK_LEFT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_LEFT;
if (channel_layout & AV_CH_BACK_RIGHT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_RIGHT;
if (channel_layout & AV_CH_FRONT_LEFT_OF_CENTER)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT_OF_CENTER;
if (channel_layout & AV_CH_FRONT_RIGHT_OF_CENTER)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT_OF_CENTER;
if (channel_layout & AV_CH_BACK_CENTER)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_REAR_CENTER;
if (channel_layout & AV_CH_SIDE_LEFT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_SIDE_LEFT;
if (channel_layout & AV_CH_SIDE_RIGHT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_SIDE_RIGHT;
if (channel_layout & AV_CH_TOP_CENTER)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_CENTER;
if (channel_layout & AV_CH_TOP_FRONT_LEFT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_LEFT;
if (channel_layout & AV_CH_TOP_FRONT_CENTER)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_CENTER;
if (channel_layout & AV_CH_TOP_FRONT_RIGHT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_FRONT_RIGHT;
if (channel_layout & AV_CH_TOP_BACK_LEFT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_LEFT;
if (channel_layout & AV_CH_TOP_BACK_CENTER)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_CENTER;
if (channel_layout & AV_CH_TOP_BACK_RIGHT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_TOP_REAR_RIGHT;
if (channel_layout & AV_CH_STEREO_LEFT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_LEFT;
if (channel_layout & AV_CH_STEREO_RIGHT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_FRONT_RIGHT;
if (channel_layout & AV_CH_WIDE_LEFT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX0;
if (channel_layout & AV_CH_WIDE_RIGHT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX1;
if (channel_layout & AV_CH_SURROUND_DIRECT_LEFT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX2;
if (channel_layout & AV_CH_SURROUND_DIRECT_RIGHT)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_AUX3;
if (channel_layout & AV_CH_LOW_FREQUENCY_2)
channel_map->map[channel_map->channels++] = PA_CHANNEL_POSITION_LFE;
}
static av_cold int pulse_write_trailer(AVFormatContext *h)
{
PulseData *s = h->priv_data;
if (s->mainloop) {
pa_threaded_mainloop_lock(s->mainloop);
if (s->stream) {
pa_stream_disconnect(s->stream);
pa_stream_set_state_callback(s->stream, NULL, NULL);
pa_stream_set_write_callback(s->stream, NULL, NULL);
pa_stream_set_overflow_callback(s->stream, NULL, NULL);
pa_stream_set_underflow_callback(s->stream, NULL, NULL);
pa_stream_unref(s->stream);
s->stream = NULL;
}
if (s->ctx) {
pa_context_disconnect(s->ctx);
pa_context_set_state_callback(s->ctx, NULL, NULL);
pa_context_set_subscribe_callback(s->ctx, NULL, NULL);
pa_context_unref(s->ctx);
s->ctx = NULL;
}
pa_threaded_mainloop_unlock(s->mainloop);
pa_threaded_mainloop_stop(s->mainloop);
pa_threaded_mainloop_free(s->mainloop);
s->mainloop = NULL;
}
return 0;
}
static av_cold int pulse_write_header(AVFormatContext *h)
{
PulseData *s = h->priv_data;
AVStream *st = NULL;
int ret;
pa_sample_spec sample_spec;
pa_buffer_attr buffer_attributes = { -1, -1, -1, -1, -1 };
pa_channel_map channel_map;
pa_mainloop_api *mainloop_api;
const char *stream_name = s->stream_name;
static const pa_stream_flags_t stream_flags = PA_STREAM_INTERPOLATE_TIMING |
PA_STREAM_AUTO_TIMING_UPDATE |
PA_STREAM_NOT_MONOTONIC;
if (h->nb_streams != 1 || h->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_AUDIO) {
av_log(s, AV_LOG_ERROR, "Only a single audio stream is supported.\n");
return AVERROR(EINVAL);
}
st = h->streams[0];
if (!stream_name) {
if (h->url[0])
stream_name = h->url;
else
stream_name = "Playback";
}
s->nonblocking = (h->flags & AVFMT_FLAG_NONBLOCK);
if (s->buffer_duration) {
int64_t bytes = s->buffer_duration;
bytes *= st->codecpar->channels * st->codecpar->sample_rate *
av_get_bytes_per_sample(st->codecpar->format);
bytes /= 1000;
buffer_attributes.tlength = FFMAX(s->buffer_size, av_clip64(bytes, 0, UINT32_MAX - 1));
av_log(s, AV_LOG_DEBUG,
"Buffer duration: %ums recalculated into %"PRId64" bytes buffer.\n",
s->buffer_duration, bytes);
av_log(s, AV_LOG_DEBUG, "Real buffer length is %u bytes\n", buffer_attributes.tlength);
} else if (s->buffer_size)
buffer_attributes.tlength = s->buffer_size;
if (s->prebuf)
buffer_attributes.prebuf = s->prebuf;
if (s->minreq)
buffer_attributes.minreq = s->minreq;
sample_spec.format = ff_codec_id_to_pulse_format(st->codecpar->codec_id);
sample_spec.rate = st->codecpar->sample_rate;
sample_spec.channels = st->codecpar->channels;
if (!pa_sample_spec_valid(&sample_spec)) {
av_log(s, AV_LOG_ERROR, "Invalid sample spec.\n");
return AVERROR(EINVAL);
}
if (sample_spec.channels == 1) {
channel_map.channels = 1;
channel_map.map[0] = PA_CHANNEL_POSITION_MONO;
} else if (st->codecpar->channel_layout) {
if (av_get_channel_layout_nb_channels(st->codecpar->channel_layout) != st->codecpar->channels)
return AVERROR(EINVAL);
pulse_map_channels_to_pulse(st->codecpar->channel_layout, &channel_map);
/* Unknown channel is present in channel_layout, let PulseAudio use its default. */
if (channel_map.channels != sample_spec.channels) {
av_log(s, AV_LOG_WARNING, "Unknown channel. Using defaul channel map.\n");
channel_map.channels = 0;
}
} else
channel_map.channels = 0;
if (!channel_map.channels)
av_log(s, AV_LOG_WARNING, "Using PulseAudio's default channel map.\n");
else if (!pa_channel_map_valid(&channel_map)) {
av_log(s, AV_LOG_ERROR, "Invalid channel map.\n");
return AVERROR(EINVAL);
}
/* start main loop */
s->mainloop = pa_threaded_mainloop_new();
if (!s->mainloop) {
av_log(s, AV_LOG_ERROR, "Cannot create threaded mainloop.\n");
return AVERROR(ENOMEM);
}
if ((ret = pa_threaded_mainloop_start(s->mainloop)) < 0) {
av_log(s, AV_LOG_ERROR, "Cannot start threaded mainloop: %s.\n", pa_strerror(ret));
pa_threaded_mainloop_free(s->mainloop);
s->mainloop = NULL;
return AVERROR_EXTERNAL;
}
pa_threaded_mainloop_lock(s->mainloop);
mainloop_api = pa_threaded_mainloop_get_api(s->mainloop);
if (!mainloop_api) {
av_log(s, AV_LOG_ERROR, "Cannot get mainloop API.\n");
ret = AVERROR_EXTERNAL;
goto fail;
}
s->ctx = pa_context_new(mainloop_api, s->name);
if (!s->ctx) {
av_log(s, AV_LOG_ERROR, "Cannot create context.\n");
ret = AVERROR(ENOMEM);
goto fail;
}
pa_context_set_state_callback(s->ctx, pulse_context_state, s);
pa_context_set_subscribe_callback(s->ctx, pulse_event, h);
if ((ret = pa_context_connect(s->ctx, s->server, 0, NULL)) < 0) {
av_log(s, AV_LOG_ERROR, "Cannot connect context: %s.\n", pa_strerror(ret));
ret = AVERROR_EXTERNAL;
goto fail;
}
if ((ret = pulse_context_wait(s)) < 0) {
av_log(s, AV_LOG_ERROR, "Context failed.\n");
goto fail;
}
s->stream = pa_stream_new(s->ctx, stream_name, &sample_spec,
channel_map.channels ? &channel_map : NULL);
if ((ret = pulse_update_sink_info(h)) < 0) {
av_log(s, AV_LOG_ERROR, "Updating sink info failed.\n");
goto fail;
}
if (!s->stream) {
av_log(s, AV_LOG_ERROR, "Cannot create stream.\n");
ret = AVERROR(ENOMEM);
goto fail;
}
pa_stream_set_state_callback(s->stream, pulse_stream_state, s);
pa_stream_set_write_callback(s->stream, pulse_stream_writable, h);
pa_stream_set_overflow_callback(s->stream, pulse_overflow, h);
pa_stream_set_underflow_callback(s->stream, pulse_underflow, h);
if ((ret = pa_stream_connect_playback(s->stream, s->device, &buffer_attributes,
stream_flags, NULL, NULL)) < 0) {
av_log(s, AV_LOG_ERROR, "pa_stream_connect_playback failed: %s.\n", pa_strerror(ret));
ret = AVERROR_EXTERNAL;
goto fail;
}
if ((ret = pulse_stream_wait(s)) < 0) {
av_log(s, AV_LOG_ERROR, "Stream failed.\n");
goto fail;
}
/* read back buffer attributes for future use */
buffer_attributes = *pa_stream_get_buffer_attr(s->stream);
s->buffer_size = buffer_attributes.tlength;
s->prebuf = buffer_attributes.prebuf;
s->minreq = buffer_attributes.minreq;
av_log(s, AV_LOG_DEBUG, "Real buffer attributes: size: %d, prebuf: %d, minreq: %d\n",
s->buffer_size, s->prebuf, s->minreq);
pa_threaded_mainloop_unlock(s->mainloop);
if ((ret = pulse_subscribe_events(s)) < 0) {
av_log(s, AV_LOG_ERROR, "Event subscription failed.\n");
/* a bit ugly but the simplest to lock here*/
pa_threaded_mainloop_lock(s->mainloop);
goto fail;
}
/* force control messages */
s->mute = -1;
s->last_volume = PA_VOLUME_INVALID;
pa_threaded_mainloop_lock(s->mainloop);
if ((ret = pulse_update_sink_input_info(h)) < 0) {
av_log(s, AV_LOG_ERROR, "Updating sink input info failed.\n");
goto fail;
}
pa_threaded_mainloop_unlock(s->mainloop);
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
return 0;
fail:
pa_threaded_mainloop_unlock(s->mainloop);
pulse_write_trailer(h);
return ret;
}
static int pulse_write_packet(AVFormatContext *h, AVPacket *pkt)
{
PulseData *s = h->priv_data;
int ret;
int64_t writable_size;
if (!pkt)
return pulse_flash_stream(s);
if (pkt->dts != AV_NOPTS_VALUE)
s->timestamp = pkt->dts;
if (pkt->duration) {
s->timestamp += pkt->duration;
} else {
AVStream *st = h->streams[0];
AVRational r = { 1, st->codecpar->sample_rate };
int64_t samples = pkt->size / (av_get_bytes_per_sample(st->codecpar->format) * st->codecpar->channels);
s->timestamp += av_rescale_q(samples, r, st->time_base);
}
pa_threaded_mainloop_lock(s->mainloop);
if (!PA_STREAM_IS_GOOD(pa_stream_get_state(s->stream))) {
av_log(s, AV_LOG_ERROR, "PulseAudio stream is in invalid state.\n");
goto fail;
}
while (pa_stream_writable_size(s->stream) < s->minreq) {
if (s->nonblocking) {
pa_threaded_mainloop_unlock(s->mainloop);
return AVERROR(EAGAIN);
} else
pa_threaded_mainloop_wait(s->mainloop);
}
if ((ret = pa_stream_write(s->stream, pkt->data, pkt->size, NULL, 0, PA_SEEK_RELATIVE)) < 0) {
av_log(s, AV_LOG_ERROR, "pa_stream_write failed: %s\n", pa_strerror(ret));
goto fail;
}
if ((writable_size = pa_stream_writable_size(s->stream)) >= s->minreq)
avdevice_dev_to_app_control_message(h, AV_DEV_TO_APP_BUFFER_WRITABLE, &writable_size, sizeof(writable_size));
pa_threaded_mainloop_unlock(s->mainloop);
return 0;
fail:
pa_threaded_mainloop_unlock(s->mainloop);
return AVERROR_EXTERNAL;
}
static int pulse_write_frame(AVFormatContext *h, int stream_index,
AVFrame **frame, unsigned flags)
{
AVPacket pkt;
/* Planar formats are not supported yet. */
if (flags & AV_WRITE_UNCODED_FRAME_QUERY)
return av_sample_fmt_is_planar(h->streams[stream_index]->codecpar->format) ?
AVERROR(EINVAL) : 0;
pkt.data = (*frame)->data[0];
pkt.size = (*frame)->nb_samples * av_get_bytes_per_sample((*frame)->format) * (*frame)->channels;
pkt.dts = (*frame)->pkt_dts;
pkt.duration = (*frame)->pkt_duration;
return pulse_write_packet(h, &pkt);
}
static void pulse_get_output_timestamp(AVFormatContext *h, int stream, int64_t *dts, int64_t *wall)
{
PulseData *s = h->priv_data;
pa_usec_t latency;
int neg;
pa_threaded_mainloop_lock(s->mainloop);
pa_stream_get_latency(s->stream, &latency, &neg);
pa_threaded_mainloop_unlock(s->mainloop);
if (wall)
*wall = av_gettime();
if (dts)
*dts = s->timestamp - (neg ? -latency : latency);
}
static int pulse_get_device_list(AVFormatContext *h, AVDeviceInfoList *device_list)
{
PulseData *s = h->priv_data;
return ff_pulse_audio_get_devices(device_list, s->server, 1);
}
static int pulse_control_message(AVFormatContext *h, int type,
void *data, size_t data_size)
{
PulseData *s = h->priv_data;
int ret;
switch(type) {
case AV_APP_TO_DEV_PAUSE:
return pulse_set_pause(s, 1);
case AV_APP_TO_DEV_PLAY:
return pulse_set_pause(s, 0);
case AV_APP_TO_DEV_TOGGLE_PAUSE:
return pulse_set_pause(s, !pa_stream_is_corked(s->stream));
case AV_APP_TO_DEV_MUTE:
if (!s->mute) {
s->mute = 1;
return pulse_set_mute(s);
}
return 0;
case AV_APP_TO_DEV_UNMUTE:
if (s->mute) {
s->mute = 0;
return pulse_set_mute(s);
}
return 0;
case AV_APP_TO_DEV_TOGGLE_MUTE:
s->mute = !s->mute;
return pulse_set_mute(s);
case AV_APP_TO_DEV_SET_VOLUME:
return pulse_set_volume(s, *(double *)data);
case AV_APP_TO_DEV_GET_VOLUME:
s->last_volume = PA_VOLUME_INVALID;
pa_threaded_mainloop_lock(s->mainloop);
ret = pulse_update_sink_input_info(h);
pa_threaded_mainloop_unlock(s->mainloop);
return ret;
case AV_APP_TO_DEV_GET_MUTE:
s->mute = -1;
pa_threaded_mainloop_lock(s->mainloop);
ret = pulse_update_sink_input_info(h);
pa_threaded_mainloop_unlock(s->mainloop);
return ret;
default:
break;
}
return AVERROR(ENOSYS);
}
#define OFFSET(a) offsetof(PulseData, a)
#define E AV_OPT_FLAG_ENCODING_PARAM
static const AVOption options[] = {
{ "server", "set PulseAudio server", OFFSET(server), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
{ "name", "set application name", OFFSET(name), AV_OPT_TYPE_STRING, {.str = LIBAVFORMAT_IDENT}, 0, 0, E },
{ "stream_name", "set stream description", OFFSET(stream_name), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
{ "device", "set device name", OFFSET(device), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, E },
{ "buffer_size", "set buffer size in bytes", OFFSET(buffer_size), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
{ "buffer_duration", "set buffer duration in millisecs", OFFSET(buffer_duration), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
{ "prebuf", "set pre-buffering size", OFFSET(prebuf), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
{ "minreq", "set minimum request size", OFFSET(minreq), AV_OPT_TYPE_INT, {.i64 = 0}, 0, INT_MAX, E },
{ NULL }
};
static const AVClass pulse_muxer_class = {
.class_name = "PulseAudio outdev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
};
AVOutputFormat ff_pulse_muxer = {
.name = "pulse",
.long_name = NULL_IF_CONFIG_SMALL("Pulse audio output"),
.priv_data_size = sizeof(PulseData),
.audio_codec = AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE),
.video_codec = AV_CODEC_ID_NONE,
.write_header = pulse_write_header,
.write_packet = pulse_write_packet,
.write_uncoded_frame = pulse_write_frame,
.write_trailer = pulse_write_trailer,
.get_output_timestamp = pulse_get_output_timestamp,
.get_device_list = pulse_get_device_list,
.control_message = pulse_control_message,
.flags = AVFMT_NOFILE | AVFMT_ALLOW_FLUSH,
.priv_class = &pulse_muxer_class,
};

1
externals/ffmpeg/libavdevice/reverse.c vendored Executable file
View File

@@ -0,0 +1 @@
#include "libavutil/reverse.c"

369
externals/ffmpeg/libavdevice/sdl2.c vendored Executable file
View File

@@ -0,0 +1,369 @@
/*
* Copyright (c) 2016 Josh de Kock
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* libSDL2 output device
*/
#include <SDL.h>
#include <SDL_thread.h>
#include "libavutil/avstring.h"
#include "libavutil/imgutils.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavutil/time.h"
#include "avdevice.h"
typedef struct {
AVClass *class;
SDL_Window *window;
SDL_Renderer *renderer;
char *window_title;
int window_width, window_height; /**< size of the window */
int window_x, window_y; /**< position of the window */
int window_fullscreen;
int window_borderless;
int enable_quit_action;
SDL_Texture *texture;
int texture_fmt;
SDL_Rect texture_rect;
int inited;
} SDLContext;
static const struct sdl_texture_format_entry {
enum AVPixelFormat format; int texture_fmt;
} sdl_texture_format_map[] = {
/*
* Not implemented in FFmpeg, but leaving here for completeness.
* { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_ARGB4444 },
* { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_RGBA4444 },
* { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_ABGR4444 },
* { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_BGRA4444 },
* { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_ARGB1555 },
* { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_RGBA5551 },
* { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_ABGR1555 },
* { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_BGRA5551 },
* { AV_PIX_FMT_NONE, SDL_PIXELFORMAT_ARGB2101010 },
*/
{ AV_PIX_FMT_RGB8, SDL_PIXELFORMAT_RGB332 },
{ AV_PIX_FMT_RGB444, SDL_PIXELFORMAT_RGB444 },
{ AV_PIX_FMT_RGB555, SDL_PIXELFORMAT_RGB555 },
{ AV_PIX_FMT_BGR555, SDL_PIXELFORMAT_BGR555 },
{ AV_PIX_FMT_RGB565, SDL_PIXELFORMAT_RGB565 },
{ AV_PIX_FMT_BGR565, SDL_PIXELFORMAT_BGR565 },
{ AV_PIX_FMT_RGB24, SDL_PIXELFORMAT_RGB24 },
{ AV_PIX_FMT_BGR24, SDL_PIXELFORMAT_BGR24 },
{ AV_PIX_FMT_0RGB32, SDL_PIXELFORMAT_RGB888 },
{ AV_PIX_FMT_0BGR32, SDL_PIXELFORMAT_BGR888 },
#if HAVE_BIGENDIAN
{ AV_PIX_FMT_RGB0, SDL_PIXELFORMAT_RGBX8888 },
{ AV_PIX_FMT_BGR0, SDL_PIXELFORMAT_BGRX8888 },
#else
{ AV_PIX_FMT_0BGR, SDL_PIXELFORMAT_RGBX8888 },
{ AV_PIX_FMT_0RGB, SDL_PIXELFORMAT_BGRX8888 },
#endif
{ AV_PIX_FMT_RGB32, SDL_PIXELFORMAT_ARGB8888 },
{ AV_PIX_FMT_RGB32_1, SDL_PIXELFORMAT_RGBA8888 },
{ AV_PIX_FMT_BGR32, SDL_PIXELFORMAT_ABGR8888 },
{ AV_PIX_FMT_BGR32_1, SDL_PIXELFORMAT_BGRA8888 },
{ AV_PIX_FMT_YUV420P, SDL_PIXELFORMAT_IYUV },
{ AV_PIX_FMT_YUYV422, SDL_PIXELFORMAT_YUY2 },
{ AV_PIX_FMT_UYVY422, SDL_PIXELFORMAT_UYVY },
{ AV_PIX_FMT_NONE, 0 },
};
static void compute_texture_rect(AVFormatContext *s)
{
AVRational sar, dar; /* sample and display aspect ratios */
SDLContext *sdl = s->priv_data;
AVStream *st = s->streams[0];
AVCodecParameters *codecpar = st->codecpar;
SDL_Rect *texture_rect = &sdl->texture_rect;
/* compute texture width and height from the codec context information */
sar = st->sample_aspect_ratio.num ? st->sample_aspect_ratio : (AVRational){ 1, 1 };
dar = av_mul_q(sar, (AVRational){ codecpar->width, codecpar->height });
/* we suppose the screen has a 1/1 sample aspect ratio */
if (sdl->window_width && sdl->window_height) {
/* fit in the window */
if (av_cmp_q(dar, (AVRational){ sdl->window_width, sdl->window_height }) > 0) {
/* fit in width */
texture_rect->w = sdl->window_width;
texture_rect->h = av_rescale(texture_rect->w, dar.den, dar.num);
} else {
/* fit in height */
texture_rect->h = sdl->window_height;
texture_rect->w = av_rescale(texture_rect->h, dar.num, dar.den);
}
} else {
if (sar.num > sar.den) {
texture_rect->w = codecpar->width;
texture_rect->h = av_rescale(texture_rect->w, dar.den, dar.num);
} else {
texture_rect->h = codecpar->height;
texture_rect->w = av_rescale(texture_rect->h, dar.num, dar.den);
}
sdl->window_width = texture_rect->w;
sdl->window_height = texture_rect->h;
}
texture_rect->x = (sdl->window_width - texture_rect->w) / 2;
texture_rect->y = (sdl->window_height - texture_rect->h) / 2;
}
static int sdl2_write_trailer(AVFormatContext *s)
{
SDLContext *sdl = s->priv_data;
if (sdl->texture)
SDL_DestroyTexture(sdl->texture);
sdl->texture = NULL;
if (sdl->renderer)
SDL_DestroyRenderer(sdl->renderer);
sdl->renderer = NULL;
if (sdl->window)
SDL_DestroyWindow(sdl->window);
sdl->window = NULL;
if (!sdl->inited)
SDL_Quit();
return 0;
}
static int sdl2_write_header(AVFormatContext *s)
{
SDLContext *sdl = s->priv_data;
AVStream *st = s->streams[0];
AVCodecParameters *codecpar = st->codecpar;
int i, ret = 0;
int flags = 0;
if (!sdl->window_title)
sdl->window_title = av_strdup(s->url);
if (SDL_WasInit(SDL_INIT_VIDEO)) {
av_log(s, AV_LOG_WARNING,
"SDL video subsystem was already inited, you could have multiple SDL outputs. This may cause unknown behaviour.\n");
sdl->inited = 1;
}
if ( s->nb_streams > 1
|| codecpar->codec_type != AVMEDIA_TYPE_VIDEO
|| codecpar->codec_id != AV_CODEC_ID_RAWVIDEO) {
av_log(s, AV_LOG_ERROR, "Only supports one rawvideo stream\n");
goto fail;
}
for (i = 0; sdl_texture_format_map[i].format != AV_PIX_FMT_NONE; i++) {
if (sdl_texture_format_map[i].format == codecpar->format) {
sdl->texture_fmt = sdl_texture_format_map[i].texture_fmt;
break;
}
}
if (!sdl->texture_fmt) {
av_log(s, AV_LOG_ERROR,
"Unsupported pixel format '%s'.\n",
av_get_pix_fmt_name(codecpar->format));
goto fail;
}
/* resize texture to width and height from the codec context information */
flags = SDL_WINDOW_HIDDEN |
(sdl->window_fullscreen ? SDL_WINDOW_FULLSCREEN : 0) |
(sdl->window_borderless ? SDL_WINDOW_BORDERLESS : SDL_WINDOW_RESIZABLE);
/* initialization */
if (!sdl->inited){
if (SDL_Init(SDL_INIT_VIDEO) != 0) {
av_log(s, AV_LOG_ERROR, "Unable to initialize SDL: %s\n", SDL_GetError());
goto fail;
}
}
compute_texture_rect(s);
if (SDL_CreateWindowAndRenderer(sdl->window_width, sdl->window_height,
flags, &sdl->window, &sdl->renderer) != 0){
av_log(sdl, AV_LOG_ERROR, "Couldn't create window and renderer: %s\n", SDL_GetError());
goto fail;
}
SDL_SetWindowTitle(sdl->window, sdl->window_title);
SDL_SetWindowPosition(sdl->window, sdl->window_x, sdl->window_y);
SDL_ShowWindow(sdl->window);
sdl->texture = SDL_CreateTexture(sdl->renderer, sdl->texture_fmt, SDL_TEXTUREACCESS_STREAMING,
codecpar->width, codecpar->height);
if (!sdl->texture) {
av_log(sdl, AV_LOG_ERROR, "Unable to set create mode: %s\n", SDL_GetError());
goto fail;
}
av_log(s, AV_LOG_VERBOSE, "w:%d h:%d fmt:%s -> w:%d h:%d\n",
codecpar->width, codecpar->height, av_get_pix_fmt_name(codecpar->format),
sdl->window_width, sdl->window_height);
sdl->inited = 1;
return 0;
fail:
sdl2_write_trailer(s);
return ret;
}
static int sdl2_write_packet(AVFormatContext *s, AVPacket *pkt)
{
int ret, quit = 0;
SDLContext *sdl = s->priv_data;
AVCodecParameters *codecpar = s->streams[0]->codecpar;
uint8_t *data[4];
int linesize[4];
SDL_Event event;
if (SDL_PollEvent(&event)){
switch (event.type) {
case SDL_KEYDOWN:
switch (event.key.keysym.sym) {
case SDLK_ESCAPE:
case SDLK_q:
quit = 1;
break;
default:
break;
}
break;
case SDL_QUIT:
quit = 1;
break;
case SDL_WINDOWEVENT:
switch(event.window.event){
case SDL_WINDOWEVENT_RESIZED:
case SDL_WINDOWEVENT_SIZE_CHANGED:
sdl->window_width = event.window.data1;
sdl->window_height = event.window.data2;
compute_texture_rect(s);
break;
default:
break;
}
break;
default:
break;
}
}
if (quit && sdl->enable_quit_action) {
sdl2_write_trailer(s);
return AVERROR(EIO);
}
av_image_fill_arrays(data, linesize, pkt->data, codecpar->format, codecpar->width, codecpar->height, 1);
switch (sdl->texture_fmt) {
/* case SDL_PIXELFORMAT_ARGB4444:
* case SDL_PIXELFORMAT_RGBA4444:
* case SDL_PIXELFORMAT_ABGR4444:
* case SDL_PIXELFORMAT_BGRA4444:
* case SDL_PIXELFORMAT_ARGB1555:
* case SDL_PIXELFORMAT_RGBA5551:
* case SDL_PIXELFORMAT_ABGR1555:
* case SDL_PIXELFORMAT_BGRA5551:
* case SDL_PIXELFORMAT_ARGB2101010:
*/
case SDL_PIXELFORMAT_IYUV:
case SDL_PIXELFORMAT_YUY2:
case SDL_PIXELFORMAT_UYVY:
ret = SDL_UpdateYUVTexture(sdl->texture, NULL,
data[0], linesize[0],
data[1], linesize[1],
data[2], linesize[2]);
break;
case SDL_PIXELFORMAT_RGB332:
case SDL_PIXELFORMAT_RGB444:
case SDL_PIXELFORMAT_RGB555:
case SDL_PIXELFORMAT_BGR555:
case SDL_PIXELFORMAT_RGB565:
case SDL_PIXELFORMAT_BGR565:
case SDL_PIXELFORMAT_RGB24:
case SDL_PIXELFORMAT_BGR24:
case SDL_PIXELFORMAT_RGB888:
case SDL_PIXELFORMAT_RGBX8888:
case SDL_PIXELFORMAT_BGR888:
case SDL_PIXELFORMAT_BGRX8888:
case SDL_PIXELFORMAT_ARGB8888:
case SDL_PIXELFORMAT_RGBA8888:
case SDL_PIXELFORMAT_ABGR8888:
case SDL_PIXELFORMAT_BGRA8888:
ret = SDL_UpdateTexture(sdl->texture, NULL, data[0], linesize[0]);
break;
default:
av_log(NULL, AV_LOG_FATAL, "Unsupported pixel format\n");
ret = -1;
break;
}
SDL_RenderClear(sdl->renderer);
SDL_RenderCopy(sdl->renderer, sdl->texture, NULL, &sdl->texture_rect);
SDL_RenderPresent(sdl->renderer);
return ret;
}
#define OFFSET(x) offsetof(SDLContext,x)
static const AVOption options[] = {
{ "window_title", "set SDL window title", OFFSET(window_title), AV_OPT_TYPE_STRING, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_size", "set SDL window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, { .str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_x", "set SDL window x position", OFFSET(window_x), AV_OPT_TYPE_INT, { .i64 = SDL_WINDOWPOS_CENTERED }, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_y", "set SDL window y position", OFFSET(window_y), AV_OPT_TYPE_INT, { .i64 = SDL_WINDOWPOS_CENTERED }, INT_MIN, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_fullscreen", "set SDL window fullscreen", OFFSET(window_fullscreen), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_borderless", "set SDL window border off", OFFSET(window_borderless), AV_OPT_TYPE_BOOL, { .i64 = 0 }, 0, 1, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_enable_quit", "set if quit action is available", OFFSET(enable_quit_action), AV_OPT_TYPE_INT, {.i64=1}, 0, 1, AV_OPT_FLAG_ENCODING_PARAM },
{ NULL },
};
static const AVClass sdl2_class = {
.class_name = "sdl2 outdev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
};
AVOutputFormat ff_sdl2_muxer = {
.name = "sdl,sdl2",
.long_name = NULL_IF_CONFIG_SMALL("SDL2 output device"),
.priv_data_size = sizeof(SDLContext),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = sdl2_write_header,
.write_packet = sdl2_write_packet,
.write_trailer = sdl2_write_trailer,
.flags = AVFMT_NOFILE | AVFMT_VARIABLE_FPS | AVFMT_NOTIMESTAMPS,
.priv_class = &sdl2_class,
};

120
externals/ffmpeg/libavdevice/sndio.c vendored Executable file
View File

@@ -0,0 +1,120 @@
/*
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include <sndio.h>
#include "avdevice.h"
#include "libavdevice/sndio.h"
static inline void movecb(void *addr, int delta)
{
SndioData *s = addr;
s->hwpos += delta * s->channels * s->bps;
}
av_cold int ff_sndio_open(AVFormatContext *s1, int is_output,
const char *audio_device)
{
SndioData *s = s1->priv_data;
struct sio_hdl *hdl;
struct sio_par par;
hdl = sio_open(audio_device, is_output ? SIO_PLAY : SIO_REC, 0);
if (!hdl) {
av_log(s1, AV_LOG_ERROR, "Could not open sndio device\n");
return AVERROR(EIO);
}
sio_initpar(&par);
par.bits = 16;
par.sig = 1;
par.le = SIO_LE_NATIVE;
if (is_output)
par.pchan = s->channels;
else
par.rchan = s->channels;
par.rate = s->sample_rate;
if (!sio_setpar(hdl, &par) || !sio_getpar(hdl, &par)) {
av_log(s1, AV_LOG_ERROR, "Impossible to set sndio parameters, "
"channels: %d sample rate: %d\n", s->channels, s->sample_rate);
goto fail;
}
if (par.bits != 16 || par.sig != 1 ||
(is_output && (par.pchan != s->channels)) ||
(!is_output && (par.rchan != s->channels)) ||
(par.rate != s->sample_rate)) {
av_log(s1, AV_LOG_ERROR, "Could not set appropriate sndio parameters, "
"channels: %d sample rate: %d\n", s->channels, s->sample_rate);
goto fail;
}
s->buffer_size = par.round * par.bps *
(is_output ? par.pchan : par.rchan);
if (is_output) {
s->buffer = av_malloc(s->buffer_size);
if (!s->buffer) {
av_log(s1, AV_LOG_ERROR, "Could not allocate buffer\n");
goto fail;
}
}
s->codec_id = par.le ? AV_CODEC_ID_PCM_S16LE : AV_CODEC_ID_PCM_S16BE;
s->channels = is_output ? par.pchan : par.rchan;
s->sample_rate = par.rate;
s->bps = par.bps;
sio_onmove(hdl, movecb, s);
if (!sio_start(hdl)) {
av_log(s1, AV_LOG_ERROR, "Could not start sndio\n");
goto fail;
}
s->hdl = hdl;
return 0;
fail:
av_freep(&s->buffer);
if (hdl)
sio_close(hdl);
return AVERROR(EIO);
}
int ff_sndio_close(SndioData *s)
{
av_freep(&s->buffer);
if (s->hdl)
sio_close(s->hdl);
return 0;
}

48
externals/ffmpeg/libavdevice/sndio.h vendored Executable file
View File

@@ -0,0 +1,48 @@
/*
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_SNDIO_H
#define AVDEVICE_SNDIO_H
#include <stdint.h>
#include <sndio.h>
#include "libavutil/log.h"
#include "avdevice.h"
typedef struct SndioData {
AVClass *class;
struct sio_hdl *hdl;
enum AVCodecID codec_id;
int64_t hwpos;
int64_t softpos;
uint8_t *buffer;
int bps;
int buffer_size;
int buffer_offset;
int channels;
int sample_rate;
} SndioData;
int ff_sndio_open(AVFormatContext *s1, int is_output, const char *audio_device);
int ff_sndio_close(SndioData *s);
#endif /* AVDEVICE_SNDIO_H */

121
externals/ffmpeg/libavdevice/sndio_dec.c vendored Executable file
View File

@@ -0,0 +1,121 @@
/*
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include <sndio.h>
#include "libavutil/internal.h"
#include "libavutil/opt.h"
#include "libavutil/time.h"
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
#include "libavdevice/sndio.h"
static av_cold int audio_read_header(AVFormatContext *s1)
{
SndioData *s = s1->priv_data;
AVStream *st;
int ret;
st = avformat_new_stream(s1, NULL);
if (!st)
return AVERROR(ENOMEM);
ret = ff_sndio_open(s1, 0, s1->url);
if (ret < 0)
return ret;
/* take real parameters */
st->codecpar->codec_type = AVMEDIA_TYPE_AUDIO;
st->codecpar->codec_id = s->codec_id;
st->codecpar->sample_rate = s->sample_rate;
st->codecpar->channels = s->channels;
avpriv_set_pts_info(st, 64, 1, 1000000); /* 64 bits pts in us */
return 0;
}
static int audio_read_packet(AVFormatContext *s1, AVPacket *pkt)
{
SndioData *s = s1->priv_data;
int64_t bdelay, cur_time;
int ret;
if ((ret = av_new_packet(pkt, s->buffer_size)) < 0)
return ret;
ret = sio_read(s->hdl, pkt->data, pkt->size);
if (ret == 0 || sio_eof(s->hdl)) {
av_packet_unref(pkt);
return AVERROR_EOF;
}
pkt->size = ret;
s->softpos += ret;
/* compute pts of the start of the packet */
cur_time = av_gettime();
bdelay = ret + s->hwpos - s->softpos;
/* convert to pts */
pkt->pts = cur_time - ((bdelay * 1000000) /
(s->bps * s->channels * s->sample_rate));
return 0;
}
static av_cold int audio_read_close(AVFormatContext *s1)
{
SndioData *s = s1->priv_data;
ff_sndio_close(s);
return 0;
}
static const AVOption options[] = {
{ "sample_rate", "", offsetof(SndioData, sample_rate), AV_OPT_TYPE_INT, {.i64 = 48000}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ "channels", "", offsetof(SndioData, channels), AV_OPT_TYPE_INT, {.i64 = 2}, 1, INT_MAX, AV_OPT_FLAG_DECODING_PARAM },
{ NULL },
};
static const AVClass sndio_demuxer_class = {
.class_name = "sndio indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_INPUT,
};
AVInputFormat ff_sndio_demuxer = {
.name = "sndio",
.long_name = NULL_IF_CONFIG_SMALL("sndio audio capture"),
.priv_data_size = sizeof(SndioData),
.read_header = audio_read_header,
.read_packet = audio_read_packet,
.read_close = audio_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &sndio_demuxer_class,
};

103
externals/ffmpeg/libavdevice/sndio_enc.c vendored Executable file
View File

@@ -0,0 +1,103 @@
/*
* sndio play and grab interface
* Copyright (c) 2010 Jacob Meuser
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdint.h>
#include <sndio.h>
#include "libavutil/internal.h"
#include "libavdevice/avdevice.h"
#include "libavdevice/sndio.h"
static av_cold int audio_write_header(AVFormatContext *s1)
{
SndioData *s = s1->priv_data;
AVStream *st;
int ret;
st = s1->streams[0];
s->sample_rate = st->codecpar->sample_rate;
s->channels = st->codecpar->channels;
ret = ff_sndio_open(s1, 1, s1->url);
return ret;
}
static int audio_write_packet(AVFormatContext *s1, AVPacket *pkt)
{
SndioData *s = s1->priv_data;
uint8_t *buf= pkt->data;
int size = pkt->size;
int len, ret;
while (size > 0) {
len = FFMIN(s->buffer_size - s->buffer_offset, size);
memcpy(s->buffer + s->buffer_offset, buf, len);
buf += len;
size -= len;
s->buffer_offset += len;
if (s->buffer_offset >= s->buffer_size) {
ret = sio_write(s->hdl, s->buffer, s->buffer_size);
if (ret == 0 || sio_eof(s->hdl))
return AVERROR(EIO);
s->softpos += ret;
s->buffer_offset = 0;
}
}
return 0;
}
static int audio_write_trailer(AVFormatContext *s1)
{
SndioData *s = s1->priv_data;
sio_write(s->hdl, s->buffer, s->buffer_offset);
ff_sndio_close(s);
return 0;
}
static const AVClass sndio_muxer_class = {
.class_name = "sndio outdev",
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_AUDIO_OUTPUT,
};
AVOutputFormat ff_sndio_muxer = {
.name = "sndio",
.long_name = NULL_IF_CONFIG_SMALL("sndio audio playback"),
.priv_data_size = sizeof(SndioData),
/* XXX: we make the assumption that the soundcard accepts this format */
/* XXX: find better solution with "preinit" method, needed also in
other formats */
.audio_codec = AV_NE(AV_CODEC_ID_PCM_S16BE, AV_CODEC_ID_PCM_S16LE),
.video_codec = AV_CODEC_ID_NONE,
.write_header = audio_write_header,
.write_packet = audio_write_packet,
.write_trailer = audio_write_trailer,
.flags = AVFMT_NOFILE,
.priv_class = &sndio_muxer_class,
};

View File

@@ -0,0 +1 @@
/timefilter

View File

@@ -0,0 +1,85 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include <stdio.h>
#include "libavutil/common.h"
#include "libavutil/lfg.h"
#include "libavdevice/timefilter.h"
#define LFG_MAX ((1LL << 32) - 1)
int main(void)
{
AVLFG prng;
double n0, n1;
#define SAMPLES 1000
double ideal[SAMPLES];
double samples[SAMPLES];
double samplet[SAMPLES];
for (n0 = 0; n0 < 40; n0 = 2 * n0 + 1) {
for (n1 = 0; n1 < 10; n1 = 2 * n1 + 1) {
double best_error = 1000000000;
double bestpar0 = n0 ? 1 : 100000;
double bestpar1 = 1;
int better, i;
av_lfg_init(&prng, 123);
for (i = 0; i < SAMPLES; i++) {
samplet[i] = 10 + i + (av_lfg_get(&prng) < LFG_MAX/2 ? 0 : 0.999);
ideal[i] = samplet[i] + n1 * i / (1000);
samples[i] = ideal[i] + n0 * (av_lfg_get(&prng) - LFG_MAX / 2) / (LFG_MAX * 10LL);
if(i && samples[i]<samples[i-1])
samples[i]=samples[i-1]+0.001;
}
do {
double par0, par1;
better = 0;
for (par0 = bestpar0 * 0.8; par0 <= bestpar0 * 1.21; par0 += bestpar0 * 0.05) {
for (par1 = bestpar1 * 0.8; par1 <= bestpar1 * 1.21; par1 += bestpar1 * 0.05) {
double error = 0;
TimeFilter *tf = ff_timefilter_new(1, par0, par1);
if (!tf) {
printf("Could not allocate memory for timefilter.\n");
exit(1);
}
for (i = 0; i < SAMPLES; i++) {
double filtered;
filtered = ff_timefilter_update(tf, samples[i], i ? (samplet[i] - samplet[i-1]) : 1);
if(filtered < 0 || filtered > 1000000000)
printf("filter is unstable\n");
error += (filtered - ideal[i]) * (filtered - ideal[i]);
}
ff_timefilter_destroy(tf);
if (error < best_error) {
best_error = error;
bestpar0 = par0;
bestpar1 = par1;
better = 1;
}
}
}
} while (better);
printf(" [%12f %11f %9f]", bestpar0, bestpar1, best_error);
}
printf("\n");
}
return 0;
}

91
externals/ffmpeg/libavdevice/timefilter.c vendored Executable file
View File

@@ -0,0 +1,91 @@
/*
* Delay Locked Loop based time filter
* Copyright (c) 2009 Samalyse
* Copyright (c) 2009 Michael Niedermayer
* Author: Olivier Guilyardi <olivier samalyse com>
* Michael Niedermayer <michaelni gmx at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/common.h"
#include "libavutil/mem.h"
#include "timefilter.h"
struct TimeFilter {
// Delay Locked Loop data. These variables refer to mathematical
// concepts described in: http://www.kokkinizita.net/papers/usingdll.pdf
double cycle_time;
double feedback2_factor;
double feedback3_factor;
double clock_period;
int count;
};
/* 1 - exp(-x) using a 3-order power series */
static double qexpneg(double x)
{
return 1 - 1 / (1 + x * (1 + x / 2 * (1 + x / 3)));
}
TimeFilter *ff_timefilter_new(double time_base,
double period,
double bandwidth)
{
TimeFilter *self = av_mallocz(sizeof(TimeFilter));
double o = 2 * M_PI * bandwidth * period * time_base;
if (!self)
return NULL;
self->clock_period = time_base;
self->feedback2_factor = qexpneg(M_SQRT2 * o);
self->feedback3_factor = qexpneg(o * o) / period;
return self;
}
void ff_timefilter_destroy(TimeFilter *self)
{
av_freep(&self);
}
void ff_timefilter_reset(TimeFilter *self)
{
self->count = 0;
}
double ff_timefilter_update(TimeFilter *self, double system_time, double period)
{
self->count++;
if (self->count == 1) {
self->cycle_time = system_time;
} else {
double loop_error;
self->cycle_time += self->clock_period * period;
loop_error = system_time - self->cycle_time;
self->cycle_time += FFMAX(self->feedback2_factor, 1.0 / self->count) * loop_error;
self->clock_period += self->feedback3_factor * loop_error;
}
return self->cycle_time;
}
double ff_timefilter_eval(TimeFilter *self, double delta)
{
return self->cycle_time + self->clock_period * delta;
}

110
externals/ffmpeg/libavdevice/timefilter.h vendored Executable file
View File

@@ -0,0 +1,110 @@
/*
* Delay Locked Loop based time filter prototypes and declarations
* Copyright (c) 2009 Samalyse
* Copyright (c) 2009 Michael Niedermayer
* Author: Olivier Guilyardi <olivier samalyse com>
* Michael Niedermayer <michaelni gmx at>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_TIMEFILTER_H
#define AVDEVICE_TIMEFILTER_H
/**
* Opaque type representing a time filter state
*
* The purpose of this filter is to provide a way to compute accurate time
* stamps that can be compared to wall clock time, especially when dealing
* with two clocks: the system clock and a hardware device clock, such as
* a soundcard.
*/
typedef struct TimeFilter TimeFilter;
/**
* Create a new Delay Locked Loop time filter
*
* feedback2_factor and feedback3_factor are the factors used for the
* multiplications that are respectively performed in the second and third
* feedback paths of the loop.
*
* Unless you know what you are doing, you should set these as follow:
*
* o = 2 * M_PI * bandwidth * period_in_seconds
* feedback2_factor = sqrt(2) * o
* feedback3_factor = o * o
*
* Where bandwidth is up to you to choose. Smaller values will filter out more
* of the jitter, but also take a longer time for the loop to settle. A good
* starting point is something between 0.3 and 3 Hz.
*
* @param time_base period of the hardware clock in seconds
* (for example 1.0/44100)
* @param period expected update interval, in input units
* @param brandwidth filtering bandwidth, in Hz
*
* @return a pointer to a TimeFilter struct, or NULL on error
*
* For more details about these parameters and background concepts please see:
* http://www.kokkinizita.net/papers/usingdll.pdf
*/
TimeFilter * ff_timefilter_new(double clock_period, double feedback2_factor, double feedback3_factor);
/**
* Update the filter
*
* This function must be called in real time, at each process cycle.
*
* @param period the device cycle duration in clock_periods. For example, at
* 44.1kHz and a buffer size of 512 frames, period = 512 when clock_period
* was 1.0/44100, or 512/44100 if clock_period was 1.
*
* system_time, in seconds, should be the value of the system clock time,
* at (or as close as possible to) the moment the device hardware interrupt
* occurred (or any other event the device clock raises at the beginning of a
* cycle).
*
* @return the filtered time, in seconds
*/
double ff_timefilter_update(TimeFilter *self, double system_time, double period);
/**
* Evaluate the filter at a specified time
*
* @param delta difference between the requested time and the current time
* (last call to ff_timefilter_update).
* @return the filtered time
*/
double ff_timefilter_eval(TimeFilter *self, double delta);
/**
* Reset the filter
*
* This function should mainly be called in case of XRUN.
*
* Warning: after calling this, the filter is in an undetermined state until
* the next call to ff_timefilter_update()
*/
void ff_timefilter_reset(TimeFilter *);
/**
* Free all resources associated with the filter
*/
void ff_timefilter_destroy(TimeFilter *);
#endif /* AVDEVICE_TIMEFILTER_H */

59
externals/ffmpeg/libavdevice/utils.c vendored Executable file
View File

@@ -0,0 +1,59 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "internal.h"
#include "libavutil/opt.h"
#include "libavformat/avformat.h"
int ff_alloc_input_device_context(AVFormatContext **avctx, AVInputFormat *iformat, const char *format)
{
AVFormatContext *s;
int ret = 0;
*avctx = NULL;
if (!iformat && !format)
return AVERROR(EINVAL);
if (!(s = avformat_alloc_context()))
return AVERROR(ENOMEM);
if (!iformat)
iformat = av_find_input_format(format);
if (!iformat || !iformat->priv_class || !AV_IS_INPUT_DEVICE(iformat->priv_class->category)) {
ret = AVERROR(EINVAL);
goto error;
}
s->iformat = iformat;
if (s->iformat->priv_data_size > 0) {
s->priv_data = av_mallocz(s->iformat->priv_data_size);
if (!s->priv_data) {
ret = AVERROR(ENOMEM);
goto error;
}
if (s->iformat->priv_class) {
*(const AVClass**)s->priv_data= s->iformat->priv_class;
av_opt_set_defaults(s->priv_data);
}
} else
s->priv_data = NULL;
*avctx = s;
return 0;
error:
avformat_free_context(s);
return ret;
}

114
externals/ffmpeg/libavdevice/v4l2-common.c vendored Executable file
View File

@@ -0,0 +1,114 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "v4l2-common.h"
const struct fmt_map ff_fmt_conversion_table[] = {
//ff_fmt codec_id v4l2_fmt
{ AV_PIX_FMT_YUV420P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV420 },
{ AV_PIX_FMT_YUV420P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YVU420 },
{ AV_PIX_FMT_YUV422P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV422P },
{ AV_PIX_FMT_YUYV422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUYV },
{ AV_PIX_FMT_UYVY422, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_UYVY },
{ AV_PIX_FMT_YUV411P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV411P },
{ AV_PIX_FMT_YUV410P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YUV410 },
{ AV_PIX_FMT_YUV410P, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_YVU410 },
{ AV_PIX_FMT_RGB555LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB555 },
{ AV_PIX_FMT_RGB555BE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB555X },
{ AV_PIX_FMT_RGB565LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB565 },
{ AV_PIX_FMT_RGB565BE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB565X },
{ AV_PIX_FMT_BGR24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR24 },
{ AV_PIX_FMT_RGB24, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB24 },
#ifdef V4L2_PIX_FMT_XBGR32
{ AV_PIX_FMT_BGR0, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_XBGR32 },
{ AV_PIX_FMT_0RGB, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_XRGB32 },
{ AV_PIX_FMT_BGRA, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_ABGR32 },
{ AV_PIX_FMT_ARGB, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_ARGB32 },
#endif
{ AV_PIX_FMT_BGR0, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_BGR32 },
{ AV_PIX_FMT_0RGB, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_RGB32 },
{ AV_PIX_FMT_GRAY8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_GREY },
#ifdef V4L2_PIX_FMT_Y16
{ AV_PIX_FMT_GRAY16LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_Y16 },
#endif
#ifdef V4L2_PIX_FMT_Z16
{ AV_PIX_FMT_GRAY16LE,AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_Z16 },
#endif
{ AV_PIX_FMT_NV12, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_NV12 },
{ AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_MJPEG },
{ AV_PIX_FMT_NONE, AV_CODEC_ID_MJPEG, V4L2_PIX_FMT_JPEG },
#ifdef V4L2_PIX_FMT_H264
{ AV_PIX_FMT_NONE, AV_CODEC_ID_H264, V4L2_PIX_FMT_H264 },
#endif
#ifdef V4L2_PIX_FMT_MPEG4
{ AV_PIX_FMT_NONE, AV_CODEC_ID_MPEG4, V4L2_PIX_FMT_MPEG4 },
#endif
#ifdef V4L2_PIX_FMT_CPIA1
{ AV_PIX_FMT_NONE, AV_CODEC_ID_CPIA, V4L2_PIX_FMT_CPIA1 },
#endif
#ifdef V4L2_PIX_FMT_SRGGB8
{ AV_PIX_FMT_BAYER_BGGR8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_SBGGR8 },
{ AV_PIX_FMT_BAYER_GBRG8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_SGBRG8 },
{ AV_PIX_FMT_BAYER_GRBG8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_SGRBG8 },
{ AV_PIX_FMT_BAYER_RGGB8, AV_CODEC_ID_RAWVIDEO, V4L2_PIX_FMT_SRGGB8 },
#endif
{ AV_PIX_FMT_NONE, AV_CODEC_ID_NONE, 0 },
};
uint32_t ff_fmt_ff2v4l(enum AVPixelFormat pix_fmt, enum AVCodecID codec_id)
{
int i;
for (i = 0; ff_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
if ((codec_id == AV_CODEC_ID_NONE ||
ff_fmt_conversion_table[i].codec_id == codec_id) &&
(pix_fmt == AV_PIX_FMT_NONE ||
ff_fmt_conversion_table[i].ff_fmt == pix_fmt)) {
return ff_fmt_conversion_table[i].v4l2_fmt;
}
}
return 0;
}
enum AVPixelFormat ff_fmt_v4l2ff(uint32_t v4l2_fmt, enum AVCodecID codec_id)
{
int i;
for (i = 0; ff_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
if (ff_fmt_conversion_table[i].v4l2_fmt == v4l2_fmt &&
ff_fmt_conversion_table[i].codec_id == codec_id) {
return ff_fmt_conversion_table[i].ff_fmt;
}
}
return AV_PIX_FMT_NONE;
}
enum AVCodecID ff_fmt_v4l2codec(uint32_t v4l2_fmt)
{
int i;
for (i = 0; ff_fmt_conversion_table[i].codec_id != AV_CODEC_ID_NONE; i++) {
if (ff_fmt_conversion_table[i].v4l2_fmt == v4l2_fmt) {
return ff_fmt_conversion_table[i].codec_id;
}
}
return AV_CODEC_ID_NONE;
}

61
externals/ffmpeg/libavdevice/v4l2-common.h vendored Executable file
View File

@@ -0,0 +1,61 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_V4L2_COMMON_H
#define AVDEVICE_V4L2_COMMON_H
#undef __STRICT_ANSI__ //workaround due to broken kernel headers
#include "config.h"
#include "libavformat/internal.h"
#include <unistd.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include <sys/mman.h>
#include <sys/time.h>
#if HAVE_SYS_VIDEOIO_H
#include <sys/videoio.h>
#else
#if HAVE_ASM_TYPES_H
#include <asm/types.h>
#endif
#include <linux/videodev2.h>
#endif
#include "libavutil/avassert.h"
#include "libavutil/imgutils.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "avdevice.h"
#include "timefilter.h"
#include "libavutil/parseutils.h"
#include "libavutil/pixdesc.h"
#include "libavutil/time.h"
#include "libavutil/avstring.h"
struct fmt_map {
enum AVPixelFormat ff_fmt;
enum AVCodecID codec_id;
uint32_t v4l2_fmt;
};
extern const struct fmt_map ff_fmt_conversion_table[];
uint32_t ff_fmt_ff2v4l(enum AVPixelFormat pix_fmt, enum AVCodecID codec_id);
enum AVPixelFormat ff_fmt_v4l2ff(uint32_t v4l2_fmt, enum AVCodecID codec_id);
enum AVCodecID ff_fmt_v4l2codec(uint32_t v4l2_fmt);
#endif /* AVDEVICE_V4L2_COMMON_H */

1146
externals/ffmpeg/libavdevice/v4l2.c vendored Executable file

File diff suppressed because it is too large Load Diff

123
externals/ffmpeg/libavdevice/v4l2enc.c vendored Executable file
View File

@@ -0,0 +1,123 @@
/*
* Copyright (c) 2013 Clément Bœsch
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "v4l2-common.h"
#include "avdevice.h"
typedef struct {
AVClass *class;
int fd;
} V4L2Context;
static av_cold int write_header(AVFormatContext *s1)
{
int res = 0, flags = O_RDWR;
struct v4l2_format fmt = {
.type = V4L2_BUF_TYPE_VIDEO_OUTPUT
};
V4L2Context *s = s1->priv_data;
AVCodecParameters *par;
uint32_t v4l2_pixfmt;
if (s1->flags & AVFMT_FLAG_NONBLOCK)
flags |= O_NONBLOCK;
s->fd = open(s1->url, flags);
if (s->fd < 0) {
res = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "Unable to open V4L2 device '%s'\n", s1->url);
return res;
}
if (s1->nb_streams != 1 ||
s1->streams[0]->codecpar->codec_type != AVMEDIA_TYPE_VIDEO) {
av_log(s1, AV_LOG_ERROR,
"V4L2 output device supports only a single raw video stream\n");
return AVERROR(EINVAL);
}
par = s1->streams[0]->codecpar;
if(par->codec_id == AV_CODEC_ID_RAWVIDEO) {
v4l2_pixfmt = ff_fmt_ff2v4l(par->format, AV_CODEC_ID_RAWVIDEO);
} else {
v4l2_pixfmt = ff_fmt_ff2v4l(AV_PIX_FMT_NONE, par->codec_id);
}
if (!v4l2_pixfmt) { // XXX: try to force them one by one?
av_log(s1, AV_LOG_ERROR, "Unknown V4L2 pixel format equivalent for %s\n",
av_get_pix_fmt_name(par->format));
return AVERROR(EINVAL);
}
if (ioctl(s->fd, VIDIOC_G_FMT, &fmt) < 0) {
res = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_G_FMT): %s\n", av_err2str(res));
return res;
}
fmt.fmt.pix.width = par->width;
fmt.fmt.pix.height = par->height;
fmt.fmt.pix.pixelformat = v4l2_pixfmt;
fmt.fmt.pix.sizeimage = av_image_get_buffer_size(par->format, par->width, par->height, 1);
if (ioctl(s->fd, VIDIOC_S_FMT, &fmt) < 0) {
res = AVERROR(errno);
av_log(s1, AV_LOG_ERROR, "ioctl(VIDIOC_S_FMT): %s\n", av_err2str(res));
return res;
}
return res;
}
static int write_packet(AVFormatContext *s1, AVPacket *pkt)
{
const V4L2Context *s = s1->priv_data;
if (write(s->fd, pkt->data, pkt->size) == -1)
return AVERROR(errno);
return 0;
}
static int write_trailer(AVFormatContext *s1)
{
const V4L2Context *s = s1->priv_data;
close(s->fd);
return 0;
}
static const AVClass v4l2_class = {
.class_name = "V4L2 outdev",
.item_name = av_default_item_name,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
};
AVOutputFormat ff_v4l2_muxer = {
.name = "video4linux2,v4l2",
.long_name = NULL_IF_CONFIG_SMALL("Video4Linux2 output device"),
.priv_data_size = sizeof(V4L2Context),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_RAWVIDEO,
.write_header = write_header,
.write_packet = write_packet,
.write_trailer = write_trailer,
.flags = AVFMT_NOFILE,
.priv_class = &v4l2_class,
};

50
externals/ffmpeg/libavdevice/version.h vendored Executable file
View File

@@ -0,0 +1,50 @@
/*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#ifndef AVDEVICE_VERSION_H
#define AVDEVICE_VERSION_H
/**
* @file
* @ingroup lavd
* Libavdevice version macros
*/
#include "libavutil/version.h"
#define LIBAVDEVICE_VERSION_MAJOR 58
#define LIBAVDEVICE_VERSION_MINOR 10
#define LIBAVDEVICE_VERSION_MICRO 100
#define LIBAVDEVICE_VERSION_INT AV_VERSION_INT(LIBAVDEVICE_VERSION_MAJOR, \
LIBAVDEVICE_VERSION_MINOR, \
LIBAVDEVICE_VERSION_MICRO)
#define LIBAVDEVICE_VERSION AV_VERSION(LIBAVDEVICE_VERSION_MAJOR, \
LIBAVDEVICE_VERSION_MINOR, \
LIBAVDEVICE_VERSION_MICRO)
#define LIBAVDEVICE_BUILD LIBAVDEVICE_VERSION_INT
#define LIBAVDEVICE_IDENT "Lavd" AV_STRINGIFY(LIBAVDEVICE_VERSION)
/**
* FF_API_* defines may be placed below to indicate public API that will be
* dropped at a future version bump. The defines themselves are not part of
* the public API and may change, break or disappear at any time.
*/
#endif /* AVDEVICE_VERSION_H */

493
externals/ffmpeg/libavdevice/vfwcap.c vendored Executable file
View File

@@ -0,0 +1,493 @@
/*
* VFW capture interface
* Copyright (c) 2006-2008 Ramiro Polla
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "libavutil/internal.h"
#include "libavutil/log.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavformat/internal.h"
// windows.h must no be included before winsock2.h, and libavformat internal
// headers may include winsock2.h
#include <windows.h>
// windows.h needs to be included before vfw.h
#include <vfw.h>
#include "avdevice.h"
/* Some obsolete versions of MinGW32 before 4.0.0 lack this. */
#ifndef HWND_MESSAGE
#define HWND_MESSAGE ((HWND) -3)
#endif
struct vfw_ctx {
const AVClass *class;
HWND hwnd;
HANDLE mutex;
HANDLE event;
AVPacketList *pktl;
unsigned int curbufsize;
unsigned int frame_num;
char *video_size; /**< A string describing video size, set by a private option. */
char *framerate; /**< Set by a private option. */
};
static enum AVPixelFormat vfw_pixfmt(DWORD biCompression, WORD biBitCount)
{
switch(biCompression) {
case MKTAG('U', 'Y', 'V', 'Y'):
return AV_PIX_FMT_UYVY422;
case MKTAG('Y', 'U', 'Y', '2'):
return AV_PIX_FMT_YUYV422;
case MKTAG('I', '4', '2', '0'):
return AV_PIX_FMT_YUV420P;
case BI_RGB:
switch(biBitCount) { /* 1-8 are untested */
case 1:
return AV_PIX_FMT_MONOWHITE;
case 4:
return AV_PIX_FMT_RGB4;
case 8:
return AV_PIX_FMT_RGB8;
case 16:
return AV_PIX_FMT_RGB555;
case 24:
return AV_PIX_FMT_BGR24;
case 32:
return AV_PIX_FMT_RGB32;
}
}
return AV_PIX_FMT_NONE;
}
static enum AVCodecID vfw_codecid(DWORD biCompression)
{
switch(biCompression) {
case MKTAG('d', 'v', 's', 'd'):
return AV_CODEC_ID_DVVIDEO;
case MKTAG('M', 'J', 'P', 'G'):
case MKTAG('m', 'j', 'p', 'g'):
return AV_CODEC_ID_MJPEG;
}
return AV_CODEC_ID_NONE;
}
#define dstruct(pctx, sname, var, type) \
av_log(pctx, AV_LOG_DEBUG, #var":\t%"type"\n", sname->var)
static void dump_captureparms(AVFormatContext *s, CAPTUREPARMS *cparms)
{
av_log(s, AV_LOG_DEBUG, "CAPTUREPARMS\n");
dstruct(s, cparms, dwRequestMicroSecPerFrame, "lu");
dstruct(s, cparms, fMakeUserHitOKToCapture, "d");
dstruct(s, cparms, wPercentDropForError, "u");
dstruct(s, cparms, fYield, "d");
dstruct(s, cparms, dwIndexSize, "lu");
dstruct(s, cparms, wChunkGranularity, "u");
dstruct(s, cparms, fUsingDOSMemory, "d");
dstruct(s, cparms, wNumVideoRequested, "u");
dstruct(s, cparms, fCaptureAudio, "d");
dstruct(s, cparms, wNumAudioRequested, "u");
dstruct(s, cparms, vKeyAbort, "u");
dstruct(s, cparms, fAbortLeftMouse, "d");
dstruct(s, cparms, fAbortRightMouse, "d");
dstruct(s, cparms, fLimitEnabled, "d");
dstruct(s, cparms, wTimeLimit, "u");
dstruct(s, cparms, fMCIControl, "d");
dstruct(s, cparms, fStepMCIDevice, "d");
dstruct(s, cparms, dwMCIStartTime, "lu");
dstruct(s, cparms, dwMCIStopTime, "lu");
dstruct(s, cparms, fStepCaptureAt2x, "d");
dstruct(s, cparms, wStepCaptureAverageFrames, "u");
dstruct(s, cparms, dwAudioBufferSize, "lu");
dstruct(s, cparms, fDisableWriteCache, "d");
dstruct(s, cparms, AVStreamMaster, "u");
}
static void dump_videohdr(AVFormatContext *s, VIDEOHDR *vhdr)
{
#ifdef DEBUG
av_log(s, AV_LOG_DEBUG, "VIDEOHDR\n");
dstruct(s, vhdr, lpData, "p");
dstruct(s, vhdr, dwBufferLength, "lu");
dstruct(s, vhdr, dwBytesUsed, "lu");
dstruct(s, vhdr, dwTimeCaptured, "lu");
dstruct(s, vhdr, dwUser, "lu");
dstruct(s, vhdr, dwFlags, "lu");
dstruct(s, vhdr, dwReserved[0], "lu");
dstruct(s, vhdr, dwReserved[1], "lu");
dstruct(s, vhdr, dwReserved[2], "lu");
dstruct(s, vhdr, dwReserved[3], "lu");
#endif
}
static void dump_bih(AVFormatContext *s, BITMAPINFOHEADER *bih)
{
av_log(s, AV_LOG_DEBUG, "BITMAPINFOHEADER\n");
dstruct(s, bih, biSize, "lu");
dstruct(s, bih, biWidth, "ld");
dstruct(s, bih, biHeight, "ld");
dstruct(s, bih, biPlanes, "d");
dstruct(s, bih, biBitCount, "d");
dstruct(s, bih, biCompression, "lu");
av_log(s, AV_LOG_DEBUG, " biCompression:\t\"%.4s\"\n",
(char*) &bih->biCompression);
dstruct(s, bih, biSizeImage, "lu");
dstruct(s, bih, biXPelsPerMeter, "lu");
dstruct(s, bih, biYPelsPerMeter, "lu");
dstruct(s, bih, biClrUsed, "lu");
dstruct(s, bih, biClrImportant, "lu");
}
static int shall_we_drop(AVFormatContext *s)
{
struct vfw_ctx *ctx = s->priv_data;
static const uint8_t dropscore[4] = { 62, 75, 87, 100 };
const int ndropscores = FF_ARRAY_ELEMS(dropscore);
unsigned int buffer_fullness = (ctx->curbufsize*100)/s->max_picture_buffer;
if(dropscore[++ctx->frame_num%ndropscores] <= buffer_fullness) {
av_log(s, AV_LOG_ERROR,
"real-time buffer %d%% full! frame dropped!\n", buffer_fullness);
return 1;
}
return 0;
}
static LRESULT CALLBACK videostream_cb(HWND hwnd, LPVIDEOHDR vdhdr)
{
AVFormatContext *s;
struct vfw_ctx *ctx;
AVPacketList **ppktl, *pktl_next;
s = (AVFormatContext *) GetWindowLongPtr(hwnd, GWLP_USERDATA);
ctx = s->priv_data;
dump_videohdr(s, vdhdr);
if(shall_we_drop(s))
return FALSE;
WaitForSingleObject(ctx->mutex, INFINITE);
pktl_next = av_mallocz(sizeof(AVPacketList));
if(!pktl_next)
goto fail;
if(av_new_packet(&pktl_next->pkt, vdhdr->dwBytesUsed) < 0) {
av_free(pktl_next);
goto fail;
}
pktl_next->pkt.pts = vdhdr->dwTimeCaptured;
memcpy(pktl_next->pkt.data, vdhdr->lpData, vdhdr->dwBytesUsed);
for(ppktl = &ctx->pktl ; *ppktl ; ppktl = &(*ppktl)->next);
*ppktl = pktl_next;
ctx->curbufsize += vdhdr->dwBytesUsed;
SetEvent(ctx->event);
ReleaseMutex(ctx->mutex);
return TRUE;
fail:
ReleaseMutex(ctx->mutex);
return FALSE;
}
static int vfw_read_close(AVFormatContext *s)
{
struct vfw_ctx *ctx = s->priv_data;
AVPacketList *pktl;
if(ctx->hwnd) {
SendMessage(ctx->hwnd, WM_CAP_SET_CALLBACK_VIDEOSTREAM, 0, 0);
SendMessage(ctx->hwnd, WM_CAP_DRIVER_DISCONNECT, 0, 0);
DestroyWindow(ctx->hwnd);
}
if(ctx->mutex)
CloseHandle(ctx->mutex);
if(ctx->event)
CloseHandle(ctx->event);
pktl = ctx->pktl;
while (pktl) {
AVPacketList *next = pktl->next;
av_packet_unref(&pktl->pkt);
av_free(pktl);
pktl = next;
}
return 0;
}
static int vfw_read_header(AVFormatContext *s)
{
struct vfw_ctx *ctx = s->priv_data;
AVCodecParameters *par;
AVStream *st;
int devnum;
int bisize;
BITMAPINFO *bi = NULL;
CAPTUREPARMS cparms;
DWORD biCompression;
WORD biBitCount;
int ret;
AVRational framerate_q;
if (!strcmp(s->url, "list")) {
for (devnum = 0; devnum <= 9; devnum++) {
char driver_name[256];
char driver_ver[256];
ret = capGetDriverDescription(devnum,
driver_name, sizeof(driver_name),
driver_ver, sizeof(driver_ver));
if (ret) {
av_log(s, AV_LOG_INFO, "Driver %d\n", devnum);
av_log(s, AV_LOG_INFO, " %s\n", driver_name);
av_log(s, AV_LOG_INFO, " %s\n", driver_ver);
}
}
return AVERROR(EIO);
}
ctx->hwnd = capCreateCaptureWindow(NULL, 0, 0, 0, 0, 0, HWND_MESSAGE, 0);
if(!ctx->hwnd) {
av_log(s, AV_LOG_ERROR, "Could not create capture window.\n");
return AVERROR(EIO);
}
/* If atoi fails, devnum==0 and the default device is used */
devnum = atoi(s->url);
ret = SendMessage(ctx->hwnd, WM_CAP_DRIVER_CONNECT, devnum, 0);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not connect to device.\n");
DestroyWindow(ctx->hwnd);
return AVERROR(ENODEV);
}
SendMessage(ctx->hwnd, WM_CAP_SET_OVERLAY, 0, 0);
SendMessage(ctx->hwnd, WM_CAP_SET_PREVIEW, 0, 0);
ret = SendMessage(ctx->hwnd, WM_CAP_SET_CALLBACK_VIDEOSTREAM, 0,
(LPARAM) videostream_cb);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not set video stream callback.\n");
goto fail;
}
SetWindowLongPtr(ctx->hwnd, GWLP_USERDATA, (LONG_PTR) s);
st = avformat_new_stream(s, NULL);
if(!st) {
vfw_read_close(s);
return AVERROR(ENOMEM);
}
/* Set video format */
bisize = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, 0, 0);
if(!bisize)
goto fail;
bi = av_malloc(bisize);
if(!bi) {
vfw_read_close(s);
return AVERROR(ENOMEM);
}
ret = SendMessage(ctx->hwnd, WM_CAP_GET_VIDEOFORMAT, bisize, (LPARAM) bi);
if(!ret)
goto fail;
dump_bih(s, &bi->bmiHeader);
ret = av_parse_video_rate(&framerate_q, ctx->framerate);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Could not parse framerate '%s'.\n", ctx->framerate);
goto fail;
}
if (ctx->video_size) {
int w, h;
ret = av_parse_video_size(&w, &h, ctx->video_size);
if (ret < 0) {
av_log(s, AV_LOG_ERROR, "Couldn't parse video size.\n");
goto fail;
}
bi->bmiHeader.biWidth = w;
bi->bmiHeader.biHeight = h;
}
if (0) {
/* For testing yet unsupported compressions
* Copy these values from user-supplied verbose information */
bi->bmiHeader.biWidth = 320;
bi->bmiHeader.biHeight = 240;
bi->bmiHeader.biPlanes = 1;
bi->bmiHeader.biBitCount = 12;
bi->bmiHeader.biCompression = MKTAG('I','4','2','0');
bi->bmiHeader.biSizeImage = 115200;
dump_bih(s, &bi->bmiHeader);
}
ret = SendMessage(ctx->hwnd, WM_CAP_SET_VIDEOFORMAT, bisize, (LPARAM) bi);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not set Video Format.\n");
goto fail;
}
biCompression = bi->bmiHeader.biCompression;
biBitCount = bi->bmiHeader.biBitCount;
/* Set sequence setup */
ret = SendMessage(ctx->hwnd, WM_CAP_GET_SEQUENCE_SETUP, sizeof(cparms),
(LPARAM) &cparms);
if(!ret)
goto fail;
dump_captureparms(s, &cparms);
cparms.fYield = 1; // Spawn a background thread
cparms.dwRequestMicroSecPerFrame =
(framerate_q.den*1000000) / framerate_q.num;
cparms.fAbortLeftMouse = 0;
cparms.fAbortRightMouse = 0;
cparms.fCaptureAudio = 0;
cparms.vKeyAbort = 0;
ret = SendMessage(ctx->hwnd, WM_CAP_SET_SEQUENCE_SETUP, sizeof(cparms),
(LPARAM) &cparms);
if(!ret)
goto fail;
st->avg_frame_rate = framerate_q;
par = st->codecpar;
par->codec_type = AVMEDIA_TYPE_VIDEO;
par->width = bi->bmiHeader.biWidth;
par->height = bi->bmiHeader.biHeight;
par->format = vfw_pixfmt(biCompression, biBitCount);
if (par->format == AV_PIX_FMT_NONE) {
par->codec_id = vfw_codecid(biCompression);
if (par->codec_id == AV_CODEC_ID_NONE) {
avpriv_report_missing_feature(s, "This compression type");
vfw_read_close(s);
return AVERROR_PATCHWELCOME;
}
par->bits_per_coded_sample = biBitCount;
} else {
par->codec_id = AV_CODEC_ID_RAWVIDEO;
if(biCompression == BI_RGB) {
par->bits_per_coded_sample = biBitCount;
par->extradata = av_malloc(9 + AV_INPUT_BUFFER_PADDING_SIZE);
if (par->extradata) {
par->extradata_size = 9;
memcpy(par->extradata, "BottomUp", 9);
}
}
}
av_freep(&bi);
avpriv_set_pts_info(st, 32, 1, 1000);
ctx->mutex = CreateMutex(NULL, 0, NULL);
if(!ctx->mutex) {
av_log(s, AV_LOG_ERROR, "Could not create Mutex.\n" );
goto fail;
}
ctx->event = CreateEvent(NULL, 1, 0, NULL);
if(!ctx->event) {
av_log(s, AV_LOG_ERROR, "Could not create Event.\n" );
goto fail;
}
ret = SendMessage(ctx->hwnd, WM_CAP_SEQUENCE_NOFILE, 0, 0);
if(!ret) {
av_log(s, AV_LOG_ERROR, "Could not start capture sequence.\n" );
goto fail;
}
return 0;
fail:
av_freep(&bi);
vfw_read_close(s);
return AVERROR(EIO);
}
static int vfw_read_packet(AVFormatContext *s, AVPacket *pkt)
{
struct vfw_ctx *ctx = s->priv_data;
AVPacketList *pktl = NULL;
while(!pktl) {
WaitForSingleObject(ctx->mutex, INFINITE);
pktl = ctx->pktl;
if(ctx->pktl) {
*pkt = ctx->pktl->pkt;
ctx->pktl = ctx->pktl->next;
av_free(pktl);
}
ResetEvent(ctx->event);
ReleaseMutex(ctx->mutex);
if(!pktl) {
if(s->flags & AVFMT_FLAG_NONBLOCK) {
return AVERROR(EAGAIN);
} else {
WaitForSingleObject(ctx->event, INFINITE);
}
}
}
ctx->curbufsize -= pkt->size;
return pkt->size;
}
#define OFFSET(x) offsetof(struct vfw_ctx, x)
#define DEC AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(video_size), AV_OPT_TYPE_STRING, {.str = NULL}, 0, 0, DEC },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "ntsc"}, 0, 0, DEC },
{ NULL },
};
static const AVClass vfw_class = {
.class_name = "VFW indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT
};
AVInputFormat ff_vfwcap_demuxer = {
.name = "vfwcap",
.long_name = NULL_IF_CONFIG_SMALL("VfW video capture"),
.priv_data_size = sizeof(struct vfw_ctx),
.read_header = vfw_read_header,
.read_packet = vfw_read_packet,
.read_close = vfw_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &vfw_class,
};

745
externals/ffmpeg/libavdevice/xcbgrab.c vendored Executable file
View File

@@ -0,0 +1,745 @@
/*
* XCB input grabber
* Copyright (C) 2014 Luca Barbato <lu_zero@gentoo.org>
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
#include "config.h"
#include <stdlib.h>
#include <xcb/xcb.h>
#if CONFIG_LIBXCB_XFIXES
#include <xcb/xfixes.h>
#endif
#if CONFIG_LIBXCB_SHM
#include <sys/shm.h>
#include <xcb/shm.h>
#endif
#if CONFIG_LIBXCB_SHAPE
#include <xcb/shape.h>
#endif
#include "libavutil/internal.h"
#include "libavutil/mathematics.h"
#include "libavutil/opt.h"
#include "libavutil/parseutils.h"
#include "libavutil/time.h"
#include "libavformat/avformat.h"
#include "libavformat/internal.h"
typedef struct XCBGrabContext {
const AVClass *class;
xcb_connection_t *conn;
xcb_screen_t *screen;
xcb_window_t window;
#if CONFIG_LIBXCB_SHM
AVBufferPool *shm_pool;
#endif
int64_t time_frame;
AVRational time_base;
int64_t frame_duration;
int x, y;
int width, height;
int frame_size;
int bpp;
int draw_mouse;
int follow_mouse;
int show_region;
int region_border;
int centered;
const char *framerate;
int has_shm;
} XCBGrabContext;
#define FOLLOW_CENTER -1
#define OFFSET(x) offsetof(XCBGrabContext, x)
#define D AV_OPT_FLAG_DECODING_PARAM
static const AVOption options[] = {
{ "x", "Initial x coordinate.", OFFSET(x), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, D },
{ "y", "Initial y coordinate.", OFFSET(y), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, D },
{ "grab_x", "Initial x coordinate.", OFFSET(x), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, D },
{ "grab_y", "Initial y coordinate.", OFFSET(y), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, INT_MAX, D },
{ "video_size", "A string describing frame size, such as 640x480 or hd720.", OFFSET(width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL }, 0, 0, D },
{ "framerate", "", OFFSET(framerate), AV_OPT_TYPE_STRING, {.str = "ntsc" }, 0, 0, D },
{ "draw_mouse", "Draw the mouse pointer.", OFFSET(draw_mouse), AV_OPT_TYPE_INT, { .i64 = 1 }, 0, 1, D },
{ "follow_mouse", "Move the grabbing region when the mouse pointer reaches within specified amount of pixels to the edge of region.",
OFFSET(follow_mouse), AV_OPT_TYPE_INT, { .i64 = 0 }, FOLLOW_CENTER, INT_MAX, D, "follow_mouse" },
{ "centered", "Keep the mouse pointer at the center of grabbing region when following.", 0, AV_OPT_TYPE_CONST, { .i64 = -1 }, INT_MIN, INT_MAX, D, "follow_mouse" },
{ "show_region", "Show the grabbing region.", OFFSET(show_region), AV_OPT_TYPE_INT, { .i64 = 0 }, 0, 1, D },
{ "region_border", "Set the region border thickness.", OFFSET(region_border), AV_OPT_TYPE_INT, { .i64 = 3 }, 1, 128, D },
{ NULL },
};
static const AVClass xcbgrab_class = {
.class_name = "xcbgrab indev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_INPUT,
};
static int xcbgrab_reposition(AVFormatContext *s,
xcb_query_pointer_reply_t *p,
xcb_get_geometry_reply_t *geo)
{
XCBGrabContext *c = s->priv_data;
int x = c->x, y = c->y;
int w = c->width, h = c->height, f = c->follow_mouse;
int p_x, p_y;
if (!p || !geo)
return AVERROR(EIO);
p_x = p->win_x;
p_y = p->win_y;
if (f == FOLLOW_CENTER) {
x = p_x - w / 2;
y = p_y - h / 2;
} else {
int left = x + f;
int right = x + w - f;
int top = y + f;
int bottom = y + h - f;
if (p_x > right) {
x += p_x - right;
} else if (p_x < left) {
x -= left - p_x;
}
if (p_y > bottom) {
y += p_y - bottom;
} else if (p_y < top) {
y -= top - p_y;
}
}
c->x = FFMIN(FFMAX(0, x), geo->width - w);
c->y = FFMIN(FFMAX(0, y), geo->height - h);
return 0;
}
static void xcbgrab_image_reply_free(void *opaque, uint8_t *data)
{
free(opaque);
}
static int xcbgrab_frame(AVFormatContext *s, AVPacket *pkt)
{
XCBGrabContext *c = s->priv_data;
xcb_get_image_cookie_t iq;
xcb_get_image_reply_t *img;
xcb_drawable_t drawable = c->screen->root;
xcb_generic_error_t *e = NULL;
uint8_t *data;
int length;
iq = xcb_get_image(c->conn, XCB_IMAGE_FORMAT_Z_PIXMAP, drawable,
c->x, c->y, c->width, c->height, ~0);
img = xcb_get_image_reply(c->conn, iq, &e);
if (e) {
av_log(s, AV_LOG_ERROR,
"Cannot get the image data "
"event_error: response_type:%u error_code:%u "
"sequence:%u resource_id:%u minor_code:%u major_code:%u.\n",
e->response_type, e->error_code,
e->sequence, e->resource_id, e->minor_code, e->major_code);
free(e);
return AVERROR(EACCES);
}
if (!img)
return AVERROR(EAGAIN);
data = xcb_get_image_data(img);
length = xcb_get_image_data_length(img);
av_init_packet(pkt);
pkt->buf = av_buffer_create(data, length, xcbgrab_image_reply_free, img, 0);
if (!pkt->buf) {
free(img);
return AVERROR(ENOMEM);
}
pkt->data = data;
pkt->size = length;
return 0;
}
static int64_t wait_frame(AVFormatContext *s, AVPacket *pkt)
{
XCBGrabContext *c = s->priv_data;
int64_t curtime, delay;
c->time_frame += c->frame_duration;
for (;;) {
curtime = av_gettime();
delay = c->time_frame - curtime;
if (delay <= 0)
break;
av_usleep(delay);
}
return curtime;
}
#if CONFIG_LIBXCB_SHM
static int check_shm(xcb_connection_t *conn)
{
xcb_shm_query_version_cookie_t cookie = xcb_shm_query_version(conn);
xcb_shm_query_version_reply_t *reply;
reply = xcb_shm_query_version_reply(conn, cookie, NULL);
if (reply) {
free(reply);
return 1;
}
return 0;
}
static void free_shm_buffer(void *opaque, uint8_t *data)
{
shmdt(data);
}
static AVBufferRef *allocate_shm_buffer(void *opaque, int size)
{
xcb_connection_t *conn = opaque;
xcb_shm_seg_t segment;
AVBufferRef *ref;
uint8_t *data;
int id;
id = shmget(IPC_PRIVATE, size, IPC_CREAT | 0777);
if (id == -1)
return NULL;
segment = xcb_generate_id(conn);
xcb_shm_attach(conn, segment, id, 0);
data = shmat(id, NULL, 0);
shmctl(id, IPC_RMID, 0);
if ((intptr_t)data == -1 || !data)
return NULL;
ref = av_buffer_create(data, size, free_shm_buffer, (void *)(ptrdiff_t)segment, 0);
if (!ref)
shmdt(data);
return ref;
}
static int xcbgrab_frame_shm(AVFormatContext *s, AVPacket *pkt)
{
XCBGrabContext *c = s->priv_data;
xcb_shm_get_image_cookie_t iq;
xcb_shm_get_image_reply_t *img;
xcb_drawable_t drawable = c->screen->root;
xcb_generic_error_t *e = NULL;
AVBufferRef *buf;
xcb_shm_seg_t segment;
buf = av_buffer_pool_get(c->shm_pool);
if (!buf) {
av_log(s, AV_LOG_ERROR, "Could not get shared memory buffer.\n");
return AVERROR(ENOMEM);
}
segment = (xcb_shm_seg_t)av_buffer_pool_buffer_get_opaque(buf);
iq = xcb_shm_get_image(c->conn, drawable,
c->x, c->y, c->width, c->height, ~0,
XCB_IMAGE_FORMAT_Z_PIXMAP, segment, 0);
img = xcb_shm_get_image_reply(c->conn, iq, &e);
xcb_flush(c->conn);
if (e) {
av_log(s, AV_LOG_ERROR,
"Cannot get the image data "
"event_error: response_type:%u error_code:%u "
"sequence:%u resource_id:%u minor_code:%u major_code:%u.\n",
e->response_type, e->error_code,
e->sequence, e->resource_id, e->minor_code, e->major_code);
free(e);
av_buffer_unref(&buf);
return AVERROR(EACCES);
}
free(img);
av_init_packet(pkt);
pkt->buf = buf;
pkt->data = buf->data;
pkt->size = c->frame_size;
return 0;
}
#endif /* CONFIG_LIBXCB_SHM */
#if CONFIG_LIBXCB_XFIXES
static int check_xfixes(xcb_connection_t *conn)
{
xcb_xfixes_query_version_cookie_t cookie;
xcb_xfixes_query_version_reply_t *reply;
cookie = xcb_xfixes_query_version(conn, XCB_XFIXES_MAJOR_VERSION,
XCB_XFIXES_MINOR_VERSION);
reply = xcb_xfixes_query_version_reply(conn, cookie, NULL);
if (reply) {
free(reply);
return 1;
}
return 0;
}
#define BLEND(target, source, alpha) \
(target) + ((source) * (255 - (alpha)) + 255 / 2) / 255
static void xcbgrab_draw_mouse(AVFormatContext *s, AVPacket *pkt,
xcb_query_pointer_reply_t *p,
xcb_get_geometry_reply_t *geo)
{
XCBGrabContext *gr = s->priv_data;
uint32_t *cursor;
uint8_t *image = pkt->data;
int stride = gr->bpp / 8;
xcb_xfixes_get_cursor_image_cookie_t cc;
xcb_xfixes_get_cursor_image_reply_t *ci;
int cx, cy, x, y, w, h, c_off, i_off;
cc = xcb_xfixes_get_cursor_image(gr->conn);
ci = xcb_xfixes_get_cursor_image_reply(gr->conn, cc, NULL);
if (!ci)
return;
cursor = xcb_xfixes_get_cursor_image_cursor_image(ci);
if (!cursor)
return;
cx = ci->x - ci->xhot;
cy = ci->y - ci->yhot;
x = FFMAX(cx, gr->x);
y = FFMAX(cy, gr->y);
w = FFMIN(cx + ci->width, gr->x + gr->width) - x;
h = FFMIN(cy + ci->height, gr->y + gr->height) - y;
c_off = x - cx;
i_off = x - gr->x;
cursor += (y - cy) * ci->width;
image += (y - gr->y) * gr->width * stride;
for (y = 0; y < h; y++) {
cursor += c_off;
image += i_off * stride;
for (x = 0; x < w; x++, cursor++, image += stride) {
int r, g, b, a;
r = *cursor & 0xff;
g = (*cursor >> 8) & 0xff;
b = (*cursor >> 16) & 0xff;
a = (*cursor >> 24) & 0xff;
if (!a)
continue;
if (a == 255) {
image[0] = r;
image[1] = g;
image[2] = b;
} else {
image[0] = BLEND(r, image[0], a);
image[1] = BLEND(g, image[1], a);
image[2] = BLEND(b, image[2], a);
}
}
cursor += ci->width - w - c_off;
image += (gr->width - w - i_off) * stride;
}
free(ci);
}
#endif /* CONFIG_LIBXCB_XFIXES */
static void xcbgrab_update_region(AVFormatContext *s)
{
XCBGrabContext *c = s->priv_data;
const uint32_t args[] = { c->x - c->region_border,
c->y - c->region_border };
xcb_configure_window(c->conn,
c->window,
XCB_CONFIG_WINDOW_X | XCB_CONFIG_WINDOW_Y,
args);
}
static int xcbgrab_read_packet(AVFormatContext *s, AVPacket *pkt)
{
XCBGrabContext *c = s->priv_data;
xcb_query_pointer_cookie_t pc;
xcb_get_geometry_cookie_t gc;
xcb_query_pointer_reply_t *p = NULL;
xcb_get_geometry_reply_t *geo = NULL;
int ret = 0;
int64_t pts;
pts = wait_frame(s, pkt);
if (c->follow_mouse || c->draw_mouse) {
pc = xcb_query_pointer(c->conn, c->screen->root);
gc = xcb_get_geometry(c->conn, c->screen->root);
p = xcb_query_pointer_reply(c->conn, pc, NULL);
geo = xcb_get_geometry_reply(c->conn, gc, NULL);
}
if (c->follow_mouse && p->same_screen)
xcbgrab_reposition(s, p, geo);
if (c->show_region)
xcbgrab_update_region(s);
#if CONFIG_LIBXCB_SHM
if (c->has_shm && xcbgrab_frame_shm(s, pkt) < 0) {
av_log(s, AV_LOG_WARNING, "Continuing without shared memory.\n");
c->has_shm = 0;
}
#endif
if (!c->has_shm)
ret = xcbgrab_frame(s, pkt);
pkt->dts = pkt->pts = pts;
pkt->duration = c->frame_duration;
#if CONFIG_LIBXCB_XFIXES
if (ret >= 0 && c->draw_mouse && p->same_screen)
xcbgrab_draw_mouse(s, pkt, p, geo);
#endif
free(p);
free(geo);
return ret;
}
static av_cold int xcbgrab_read_close(AVFormatContext *s)
{
XCBGrabContext *ctx = s->priv_data;
#if CONFIG_LIBXCB_SHM
av_buffer_pool_uninit(&ctx->shm_pool);
#endif
xcb_disconnect(ctx->conn);
return 0;
}
static xcb_screen_t *get_screen(const xcb_setup_t *setup, int screen_num)
{
xcb_screen_iterator_t it = xcb_setup_roots_iterator(setup);
xcb_screen_t *screen = NULL;
for (; it.rem > 0; xcb_screen_next (&it)) {
if (!screen_num) {
screen = it.data;
break;
}
screen_num--;
}
return screen;
}
static int pixfmt_from_pixmap_format(AVFormatContext *s, int depth,
int *pix_fmt, int *bpp)
{
XCBGrabContext *c = s->priv_data;
const xcb_setup_t *setup = xcb_get_setup(c->conn);
const xcb_format_t *fmt = xcb_setup_pixmap_formats(setup);
int length = xcb_setup_pixmap_formats_length(setup);
*pix_fmt = 0;
while (length--) {
if (fmt->depth == depth) {
switch (depth) {
case 32:
if (fmt->bits_per_pixel == 32)
*pix_fmt = AV_PIX_FMT_0RGB;
break;
case 24:
if (fmt->bits_per_pixel == 32)
*pix_fmt = AV_PIX_FMT_0RGB32;
else if (fmt->bits_per_pixel == 24)
*pix_fmt = AV_PIX_FMT_RGB24;
break;
case 16:
if (fmt->bits_per_pixel == 16)
*pix_fmt = AV_PIX_FMT_RGB565;
break;
case 15:
if (fmt->bits_per_pixel == 16)
*pix_fmt = AV_PIX_FMT_RGB555;
break;
case 8:
if (fmt->bits_per_pixel == 8)
*pix_fmt = AV_PIX_FMT_RGB8;
break;
}
}
if (*pix_fmt) {
*bpp = fmt->bits_per_pixel;
return 0;
}
fmt++;
}
avpriv_report_missing_feature(s, "Mapping this pixmap format");
return AVERROR_PATCHWELCOME;
}
static int create_stream(AVFormatContext *s)
{
XCBGrabContext *c = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
xcb_get_geometry_cookie_t gc;
xcb_get_geometry_reply_t *geo;
int64_t frame_size_bits;
int ret;
if (!st)
return AVERROR(ENOMEM);
ret = av_parse_video_rate(&st->avg_frame_rate, c->framerate);
if (ret < 0)
return ret;
avpriv_set_pts_info(st, 64, 1, 1000000);
gc = xcb_get_geometry(c->conn, c->screen->root);
geo = xcb_get_geometry_reply(c->conn, gc, NULL);
if (!geo)
return AVERROR_EXTERNAL;
if (!c->width || !c->height) {
c->width = geo->width;
c->height = geo->height;
}
if (c->x + c->width > geo->width ||
c->y + c->height > geo->height) {
av_log(s, AV_LOG_ERROR,
"Capture area %dx%d at position %d.%d "
"outside the screen size %dx%d\n",
c->width, c->height,
c->x, c->y,
geo->width, geo->height);
free(geo);
return AVERROR(EINVAL);
}
c->time_base = (AVRational){ st->avg_frame_rate.den,
st->avg_frame_rate.num };
c->frame_duration = av_rescale_q(1, c->time_base, AV_TIME_BASE_Q);
c->time_frame = av_gettime();
ret = pixfmt_from_pixmap_format(s, geo->depth, &st->codecpar->format, &c->bpp);
free(geo);
if (ret < 0)
return ret;
frame_size_bits = (int64_t)c->width * c->height * c->bpp;
if (frame_size_bits / 8 + AV_INPUT_BUFFER_PADDING_SIZE > INT_MAX) {
av_log(s, AV_LOG_ERROR, "Captured area is too large\n");
return AVERROR_PATCHWELCOME;
}
c->frame_size = frame_size_bits / 8;
#if CONFIG_LIBXCB_SHM
c->shm_pool = av_buffer_pool_init2(c->frame_size + AV_INPUT_BUFFER_PADDING_SIZE,
c->conn, allocate_shm_buffer, NULL);
if (!c->shm_pool)
return AVERROR(ENOMEM);
#endif
st->codecpar->codec_type = AVMEDIA_TYPE_VIDEO;
st->codecpar->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codecpar->width = c->width;
st->codecpar->height = c->height;
st->codecpar->bit_rate = av_rescale(frame_size_bits, st->avg_frame_rate.num, st->avg_frame_rate.den);
return ret;
}
static void draw_rectangle(AVFormatContext *s)
{
XCBGrabContext *c = s->priv_data;
xcb_gcontext_t gc = xcb_generate_id(c->conn);
uint32_t mask = XCB_GC_FOREGROUND |
XCB_GC_BACKGROUND |
XCB_GC_LINE_WIDTH |
XCB_GC_LINE_STYLE |
XCB_GC_FILL_STYLE;
uint32_t values[] = { c->screen->black_pixel,
c->screen->white_pixel,
c->region_border,
XCB_LINE_STYLE_DOUBLE_DASH,
XCB_FILL_STYLE_SOLID };
xcb_rectangle_t r = { 1, 1,
c->width + c->region_border * 2 - 3,
c->height + c->region_border * 2 - 3 };
xcb_create_gc(c->conn, gc, c->window, mask, values);
xcb_poly_rectangle(c->conn, c->window, gc, 1, &r);
}
static void setup_window(AVFormatContext *s)
{
XCBGrabContext *c = s->priv_data;
uint32_t mask = XCB_CW_OVERRIDE_REDIRECT | XCB_CW_EVENT_MASK;
uint32_t values[] = { 1,
XCB_EVENT_MASK_EXPOSURE |
XCB_EVENT_MASK_STRUCTURE_NOTIFY };
av_unused xcb_rectangle_t rect = { 0, 0, c->width, c->height };
c->window = xcb_generate_id(c->conn);
xcb_create_window(c->conn, XCB_COPY_FROM_PARENT,
c->window,
c->screen->root,
c->x - c->region_border,
c->y - c->region_border,
c->width + c->region_border * 2,
c->height + c->region_border * 2,
0,
XCB_WINDOW_CLASS_INPUT_OUTPUT,
XCB_COPY_FROM_PARENT,
mask, values);
#if CONFIG_LIBXCB_SHAPE
xcb_shape_rectangles(c->conn, XCB_SHAPE_SO_SUBTRACT,
XCB_SHAPE_SK_BOUNDING, XCB_CLIP_ORDERING_UNSORTED,
c->window,
c->region_border, c->region_border,
1, &rect);
#endif
xcb_map_window(c->conn, c->window);
draw_rectangle(s);
}
static av_cold int xcbgrab_read_header(AVFormatContext *s)
{
XCBGrabContext *c = s->priv_data;
int screen_num, ret;
const xcb_setup_t *setup;
char *display_name = av_strdup(s->url);
if (!display_name)
return AVERROR(ENOMEM);
if (!sscanf(s->url, "%[^+]+%d,%d", display_name, &c->x, &c->y)) {
*display_name = 0;
sscanf(s->url, "+%d,%d", &c->x, &c->y);
}
c->conn = xcb_connect(display_name[0] ? display_name : NULL, &screen_num);
av_freep(&display_name);
if ((ret = xcb_connection_has_error(c->conn))) {
av_log(s, AV_LOG_ERROR, "Cannot open display %s, error %d.\n",
s->url[0] ? s->url : "default", ret);
return AVERROR(EIO);
}
setup = xcb_get_setup(c->conn);
c->screen = get_screen(setup, screen_num);
if (!c->screen) {
av_log(s, AV_LOG_ERROR, "The screen %d does not exist.\n",
screen_num);
xcbgrab_read_close(s);
return AVERROR(EIO);
}
ret = create_stream(s);
if (ret < 0) {
xcbgrab_read_close(s);
return ret;
}
#if CONFIG_LIBXCB_SHM
c->has_shm = check_shm(c->conn);
#endif
#if CONFIG_LIBXCB_XFIXES
if (c->draw_mouse) {
if (!(c->draw_mouse = check_xfixes(c->conn))) {
av_log(s, AV_LOG_WARNING,
"XFixes not available, cannot draw the mouse.\n");
}
if (c->bpp < 24) {
avpriv_report_missing_feature(s, "%d bits per pixel screen",
c->bpp);
c->draw_mouse = 0;
}
}
#endif
if (c->show_region)
setup_window(s);
return 0;
}
AVInputFormat ff_xcbgrab_demuxer = {
.name = "x11grab",
.long_name = NULL_IF_CONFIG_SMALL("X11 screen capture, using XCB"),
.priv_data_size = sizeof(XCBGrabContext),
.read_header = xcbgrab_read_header,
.read_packet = xcbgrab_read_packet,
.read_close = xcbgrab_read_close,
.flags = AVFMT_NOFILE,
.priv_class = &xcbgrab_class,
};

392
externals/ffmpeg/libavdevice/xv.c vendored Executable file
View File

@@ -0,0 +1,392 @@
/*
* Copyright (c) 2013 Jeff Moguillansky
*
* This file is part of FFmpeg.
*
* FFmpeg is free software; you can redistribute it and/or
* modify it under the terms of the GNU Lesser General Public
* License as published by the Free Software Foundation; either
* version 2.1 of the License, or (at your option) any later version.
*
* FFmpeg is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
* Lesser General Public License for more details.
*
* You should have received a copy of the GNU Lesser General Public
* License along with FFmpeg; if not, write to the Free Software
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
/**
* @file
* XVideo output device
*
* TODO:
* - add support to more formats
*/
#include <X11/Xlib.h>
#include <X11/extensions/Xv.h>
#include <X11/extensions/XShm.h>
#include <X11/extensions/Xvlib.h>
#include <sys/shm.h>
#include "libavutil/opt.h"
#include "libavutil/pixdesc.h"
#include "libavutil/imgutils.h"
#include "libavformat/internal.h"
#include "avdevice.h"
typedef struct {
AVClass *class;
GC gc;
Window window;
int64_t window_id;
char *window_title;
int window_width, window_height;
int window_x, window_y;
int dest_x, dest_y; /**< display area position */
unsigned int dest_w, dest_h; /**< display area dimensions */
Display* display;
char *display_name;
XvImage* yuv_image;
enum AVPixelFormat image_format;
int image_width, image_height;
XShmSegmentInfo yuv_shminfo;
int xv_port;
Atom wm_delete_message;
} XVContext;
typedef struct XVTagFormatMap
{
int tag;
enum AVPixelFormat format;
} XVTagFormatMap;
static const XVTagFormatMap tag_codec_map[] = {
{ MKTAG('I','4','2','0'), AV_PIX_FMT_YUV420P },
{ MKTAG('U','Y','V','Y'), AV_PIX_FMT_UYVY422 },
{ MKTAG('Y','U','Y','2'), AV_PIX_FMT_YUYV422 },
{ 0, AV_PIX_FMT_NONE }
};
static int xv_get_tag_from_format(enum AVPixelFormat format)
{
const XVTagFormatMap *m = tag_codec_map;
int i;
for (i = 0; m->tag; m = &tag_codec_map[++i]) {
if (m->format == format)
return m->tag;
}
return 0;
}
static int xv_write_trailer(AVFormatContext *s)
{
XVContext *xv = s->priv_data;
if (xv->display) {
XShmDetach(xv->display, &xv->yuv_shminfo);
if (xv->yuv_image)
shmdt(xv->yuv_image->data);
XFree(xv->yuv_image);
if (xv->gc)
XFreeGC(xv->display, xv->gc);
XCloseDisplay(xv->display);
}
return 0;
}
static int xv_write_header(AVFormatContext *s)
{
XVContext *xv = s->priv_data;
unsigned int num_adaptors;
XvAdaptorInfo *ai;
XvImageFormatValues *fv;
XColor fgcolor;
XWindowAttributes window_attrs;
int num_formats = 0, j, tag, ret;
AVCodecParameters *par = s->streams[0]->codecpar;
if ( s->nb_streams > 1
|| par->codec_type != AVMEDIA_TYPE_VIDEO
|| (par->codec_id != AV_CODEC_ID_WRAPPED_AVFRAME && par->codec_id != AV_CODEC_ID_RAWVIDEO)) {
av_log(s, AV_LOG_ERROR, "Only a single raw or wrapped avframe video stream is supported.\n");
return AVERROR(EINVAL);
}
if (!(tag = xv_get_tag_from_format(par->format))) {
av_log(s, AV_LOG_ERROR,
"Unsupported pixel format '%s', only yuv420p, uyvy422, yuyv422 are currently supported\n",
av_get_pix_fmt_name(par->format));
return AVERROR_PATCHWELCOME;
}
xv->image_format = par->format;
xv->display = XOpenDisplay(xv->display_name);
if (!xv->display) {
av_log(s, AV_LOG_ERROR, "Could not open the X11 display '%s'\n", xv->display_name);
return AVERROR(EINVAL);
}
xv->image_width = par->width;
xv->image_height = par->height;
if (!xv->window_width && !xv->window_height) {
AVRational sar = par->sample_aspect_ratio;
xv->window_width = par->width;
xv->window_height = par->height;
if (sar.num) {
if (sar.num > sar.den)
xv->window_width = av_rescale(xv->window_width, sar.num, sar.den);
if (sar.num < sar.den)
xv->window_height = av_rescale(xv->window_height, sar.den, sar.num);
}
}
if (!xv->window_id) {
xv->window = XCreateSimpleWindow(xv->display, DefaultRootWindow(xv->display),
xv->window_x, xv->window_y,
xv->window_width, xv->window_height,
0, 0, 0);
if (!xv->window_title) {
if (!(xv->window_title = av_strdup(s->url))) {
ret = AVERROR(ENOMEM);
goto fail;
}
}
XStoreName(xv->display, xv->window, xv->window_title);
xv->wm_delete_message = XInternAtom(xv->display, "WM_DELETE_WINDOW", False);
XSetWMProtocols(xv->display, xv->window, &xv->wm_delete_message, 1);
XMapWindow(xv->display, xv->window);
} else
xv->window = xv->window_id;
if (XvQueryAdaptors(xv->display, DefaultRootWindow(xv->display), &num_adaptors, &ai) != Success) {
ret = AVERROR_EXTERNAL;
goto fail;
}
if (!num_adaptors) {
av_log(s, AV_LOG_ERROR, "No X-Video adaptors present\n");
return AVERROR(ENODEV);
}
xv->xv_port = ai[0].base_id;
XvFreeAdaptorInfo(ai);
fv = XvListImageFormats(xv->display, xv->xv_port, &num_formats);
if (!fv) {
ret = AVERROR_EXTERNAL;
goto fail;
}
for (j = 0; j < num_formats; j++) {
if (fv[j].id == tag) {
break;
}
}
XFree(fv);
if (j >= num_formats) {
av_log(s, AV_LOG_ERROR,
"Device does not support pixel format %s, aborting\n",
av_get_pix_fmt_name(par->format));
ret = AVERROR(EINVAL);
goto fail;
}
xv->gc = XCreateGC(xv->display, xv->window, 0, 0);
xv->image_width = par->width;
xv->image_height = par->height;
xv->yuv_image = XvShmCreateImage(xv->display, xv->xv_port, tag, 0,
xv->image_width, xv->image_height, &xv->yuv_shminfo);
xv->yuv_shminfo.shmid = shmget(IPC_PRIVATE, xv->yuv_image->data_size,
IPC_CREAT | 0777);
xv->yuv_shminfo.shmaddr = (char *)shmat(xv->yuv_shminfo.shmid, 0, 0);
xv->yuv_image->data = xv->yuv_shminfo.shmaddr;
xv->yuv_shminfo.readOnly = False;
XShmAttach(xv->display, &xv->yuv_shminfo);
XSync(xv->display, False);
shmctl(xv->yuv_shminfo.shmid, IPC_RMID, 0);
XGetWindowAttributes(xv->display, xv->window, &window_attrs);
fgcolor.red = fgcolor.green = fgcolor.blue = 0;
fgcolor.flags = DoRed | DoGreen | DoBlue;
XAllocColor(xv->display, window_attrs.colormap, &fgcolor);
XSetForeground(xv->display, xv->gc, fgcolor.pixel);
//force display area recalculation at first frame
xv->window_width = xv->window_height = 0;
return 0;
fail:
xv_write_trailer(s);
return ret;
}
static void compute_display_area(AVFormatContext *s)
{
XVContext *xv = s->priv_data;
AVRational sar, dar; /* sample and display aspect ratios */
AVStream *st = s->streams[0];
AVCodecParameters *par = st->codecpar;
/* compute overlay width and height from the codec context information */
sar = st->sample_aspect_ratio.num ? st->sample_aspect_ratio : (AVRational){ 1, 1 };
dar = av_mul_q(sar, (AVRational){ par->width, par->height });
/* we suppose the screen has a 1/1 sample aspect ratio */
/* fit in the window */
if (av_cmp_q(dar, (AVRational){ xv->dest_w, xv->dest_h }) > 0) {
/* fit in width */
xv->dest_y = xv->dest_h;
xv->dest_x = 0;
xv->dest_h = av_rescale(xv->dest_w, dar.den, dar.num);
xv->dest_y -= xv->dest_h;
xv->dest_y /= 2;
} else {
/* fit in height */
xv->dest_x = xv->dest_w;
xv->dest_y = 0;
xv->dest_w = av_rescale(xv->dest_h, dar.num, dar.den);
xv->dest_x -= xv->dest_w;
xv->dest_x /= 2;
}
}
static int xv_repaint(AVFormatContext *s)
{
XVContext *xv = s->priv_data;
XWindowAttributes window_attrs;
XGetWindowAttributes(xv->display, xv->window, &window_attrs);
if (window_attrs.width != xv->window_width || window_attrs.height != xv->window_height) {
XRectangle rect[2];
xv->dest_w = window_attrs.width;
xv->dest_h = window_attrs.height;
compute_display_area(s);
if (xv->dest_x) {
rect[0].width = rect[1].width = xv->dest_x;
rect[0].height = rect[1].height = window_attrs.height;
rect[0].y = rect[1].y = 0;
rect[0].x = 0;
rect[1].x = xv->dest_w + xv->dest_x;
XFillRectangles(xv->display, xv->window, xv->gc, rect, 2);
}
if (xv->dest_y) {
rect[0].width = rect[1].width = window_attrs.width;
rect[0].height = rect[1].height = xv->dest_y;
rect[0].x = rect[1].x = 0;
rect[0].y = 0;
rect[1].y = xv->dest_h + xv->dest_y;
XFillRectangles(xv->display, xv->window, xv->gc, rect, 2);
}
}
if (XvShmPutImage(xv->display, xv->xv_port, xv->window, xv->gc,
xv->yuv_image, 0, 0, xv->image_width, xv->image_height,
xv->dest_x, xv->dest_y, xv->dest_w, xv->dest_h, True) != Success) {
av_log(s, AV_LOG_ERROR, "Could not copy image to XV shared memory buffer\n");
return AVERROR_EXTERNAL;
}
return 0;
}
static int write_picture(AVFormatContext *s, uint8_t *input_data[4],
int linesize[4])
{
XVContext *xv = s->priv_data;
XvImage *img = xv->yuv_image;
uint8_t *data[3] = {
img->data + img->offsets[0],
img->data + img->offsets[1],
img->data + img->offsets[2]
};
/* Check messages. Window might get closed. */
if (!xv->window_id) {
XEvent event;
while (XPending(xv->display)) {
XNextEvent(xv->display, &event);
if (event.type == ClientMessage && event.xclient.data.l[0] == xv->wm_delete_message) {
av_log(xv, AV_LOG_DEBUG, "Window close event.\n");
return AVERROR(EPIPE);
}
}
}
av_image_copy(data, img->pitches, (const uint8_t **)input_data, linesize,
xv->image_format, img->width, img->height);
return xv_repaint(s);
}
static int xv_write_packet(AVFormatContext *s, AVPacket *pkt)
{
AVCodecParameters *par = s->streams[0]->codecpar;
if (par->codec_id == AV_CODEC_ID_WRAPPED_AVFRAME) {
AVFrame *frame = (AVFrame *)pkt->data;
return write_picture(s, frame->data, frame->linesize);
} else {
uint8_t *data[4];
int linesize[4];
av_image_fill_arrays(data, linesize, pkt->data, par->format,
par->width, par->height, 1);
return write_picture(s, data, linesize);
}
}
static int xv_write_frame(AVFormatContext *s, int stream_index, AVFrame **frame,
unsigned flags)
{
/* xv_write_header() should have accepted only supported formats */
if ((flags & AV_WRITE_UNCODED_FRAME_QUERY))
return 0;
return write_picture(s, (*frame)->data, (*frame)->linesize);
}
static int xv_control_message(AVFormatContext *s, int type, void *data, size_t data_size)
{
switch(type) {
case AV_APP_TO_DEV_WINDOW_REPAINT:
return xv_repaint(s);
default:
break;
}
return AVERROR(ENOSYS);
}
#define OFFSET(x) offsetof(XVContext, x)
static const AVOption options[] = {
{ "display_name", "set display name", OFFSET(display_name), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_id", "set existing window id", OFFSET(window_id), AV_OPT_TYPE_INT64, {.i64 = 0 }, 0, INT64_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_size", "set window forced size", OFFSET(window_width), AV_OPT_TYPE_IMAGE_SIZE, {.str = NULL}, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_title", "set window title", OFFSET(window_title), AV_OPT_TYPE_STRING, {.str = NULL }, 0, 0, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_x", "set window x offset", OFFSET(window_x), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ "window_y", "set window y offset", OFFSET(window_y), AV_OPT_TYPE_INT, {.i64 = 0 }, -INT_MAX, INT_MAX, AV_OPT_FLAG_ENCODING_PARAM },
{ NULL }
};
static const AVClass xv_class = {
.class_name = "xvideo outdev",
.item_name = av_default_item_name,
.option = options,
.version = LIBAVUTIL_VERSION_INT,
.category = AV_CLASS_CATEGORY_DEVICE_VIDEO_OUTPUT,
};
AVOutputFormat ff_xv_muxer = {
.name = "xv",
.long_name = NULL_IF_CONFIG_SMALL("XV (XVideo) output device"),
.priv_data_size = sizeof(XVContext),
.audio_codec = AV_CODEC_ID_NONE,
.video_codec = AV_CODEC_ID_WRAPPED_AVFRAME,
.write_header = xv_write_header,
.write_packet = xv_write_packet,
.write_uncoded_frame = xv_write_frame,
.write_trailer = xv_write_trailer,
.control_message = xv_control_message,
.flags = AVFMT_NOFILE | AVFMT_VARIABLE_FPS | AVFMT_NOTIMESTAMPS,
.priv_class = &xv_class,
};