Commit 88d2ccbe authored by Philip Langdale's avatar Philip Langdale Committed by Lynne

lavfi/vf_hwupload: Add support for HW -> HW transfers

As we find ourselves wanting a way to transfer frames between
HW devices (or more realistically, between APIs on the same device),
it's desirable to have a way to describe the relationship. While
we could imagine introducing a `hwtransfer` filter, there is
almost no difference from `hwupload`. The main new feature we need
is a way to specify the target device. Having a single device
for the filter chain is obviously insufficient if we're dealing
with two devices.

So let's add a way to specify the upload target device, and if none
is specified, continue with the existing behaviour.

We must also correctly preserve the sw_format on such a transfer.
parent a88449ff
...@@ -11981,7 +11981,18 @@ Upload system memory frames to hardware surfaces. ...@@ -11981,7 +11981,18 @@ Upload system memory frames to hardware surfaces.
The device to upload to must be supplied when the filter is initialised. If The device to upload to must be supplied when the filter is initialised. If
using ffmpeg, select the appropriate device with the @option{-filter_hw_device} using ffmpeg, select the appropriate device with the @option{-filter_hw_device}
option. option or with the @option{derive_device} option. The input and output devices
must be of different types and compatible - the exact meaning of this is
system-dependent, but typically it means that they must refer to the same
underlying hardware context (for example, refer to the same graphics card).
The following additional parameters are accepted:
@table @option
@item derive_device @var{type}
Rather than using the device supplied at initialisation, instead derive a new
device of type @var{type} from the device the input frames exist on.
@end table
@anchor{hwupload_cuda} @anchor{hwupload_cuda}
@section hwupload_cuda @section hwupload_cuda
......
...@@ -32,10 +32,11 @@ typedef struct HWUploadContext { ...@@ -32,10 +32,11 @@ typedef struct HWUploadContext {
const AVClass *class; const AVClass *class;
AVBufferRef *hwdevice_ref; AVBufferRef *hwdevice_ref;
AVHWDeviceContext *hwdevice;
AVBufferRef *hwframes_ref; AVBufferRef *hwframes_ref;
AVHWFramesContext *hwframes; AVHWFramesContext *hwframes;
char *device_type;
} HWUploadContext; } HWUploadContext;
static int hwupload_query_formats(AVFilterContext *avctx) static int hwupload_query_formats(AVFilterContext *avctx)
...@@ -46,17 +47,27 @@ static int hwupload_query_formats(AVFilterContext *avctx) ...@@ -46,17 +47,27 @@ static int hwupload_query_formats(AVFilterContext *avctx)
AVFilterFormats *input_formats = NULL; AVFilterFormats *input_formats = NULL;
int err, i; int err, i;
if (!avctx->hw_device_ctx) { if (ctx->hwdevice_ref) {
/* We already have a specified device. */
} else if (avctx->hw_device_ctx) {
if (ctx->device_type) {
err = av_hwdevice_ctx_create_derived(
&ctx->hwdevice_ref,
av_hwdevice_find_type_by_name(ctx->device_type),
avctx->hw_device_ctx, 0);
if (err < 0)
return err;
} else {
ctx->hwdevice_ref = av_buffer_ref(avctx->hw_device_ctx);
if (!ctx->hwdevice_ref)
return AVERROR(ENOMEM);
}
} else {
av_log(ctx, AV_LOG_ERROR, "A hardware device reference is required " av_log(ctx, AV_LOG_ERROR, "A hardware device reference is required "
"to upload frames to.\n"); "to upload frames to.\n");
return AVERROR(EINVAL); return AVERROR(EINVAL);
} }
ctx->hwdevice_ref = av_buffer_ref(avctx->hw_device_ctx);
if (!ctx->hwdevice_ref)
return AVERROR(ENOMEM);
ctx->hwdevice = (AVHWDeviceContext*)ctx->hwdevice_ref->data;
constraints = av_hwdevice_get_hwframe_constraints(ctx->hwdevice_ref, NULL); constraints = av_hwdevice_get_hwframe_constraints(ctx->hwdevice_ref, NULL);
if (!constraints) { if (!constraints) {
err = AVERROR(EINVAL); err = AVERROR(EINVAL);
...@@ -127,7 +138,13 @@ static int hwupload_config_output(AVFilterLink *outlink) ...@@ -127,7 +138,13 @@ static int hwupload_config_output(AVFilterLink *outlink)
av_get_pix_fmt_name(inlink->format)); av_get_pix_fmt_name(inlink->format));
ctx->hwframes->format = outlink->format; ctx->hwframes->format = outlink->format;
ctx->hwframes->sw_format = inlink->format; if (inlink->hw_frames_ctx) {
AVHWFramesContext *in_hwframe_ctx =
(AVHWFramesContext*)inlink->hw_frames_ctx->data;
ctx->hwframes->sw_format = in_hwframe_ctx->sw_format;
} else {
ctx->hwframes->sw_format = inlink->format;
}
ctx->hwframes->width = inlink->w; ctx->hwframes->width = inlink->w;
ctx->hwframes->height = inlink->h; ctx->hwframes->height = inlink->h;
...@@ -200,13 +217,21 @@ static av_cold void hwupload_uninit(AVFilterContext *avctx) ...@@ -200,13 +217,21 @@ static av_cold void hwupload_uninit(AVFilterContext *avctx)
av_buffer_unref(&ctx->hwdevice_ref); av_buffer_unref(&ctx->hwdevice_ref);
} }
static const AVClass hwupload_class = { #define OFFSET(x) offsetof(HWUploadContext, x)
.class_name = "hwupload", #define FLAGS (AV_OPT_FLAG_FILTERING_PARAM|AV_OPT_FLAG_VIDEO_PARAM)
.item_name = av_default_item_name, static const AVOption hwupload_options[] = {
.option = NULL, {
.version = LIBAVUTIL_VERSION_INT, "derive_device", "Derive a new device of this type",
OFFSET(device_type), AV_OPT_TYPE_STRING,
{ .str = NULL }, 0, 0, FLAGS
},
{
NULL
}
}; };
AVFILTER_DEFINE_CLASS(hwupload);
static const AVFilterPad hwupload_inputs[] = { static const AVFilterPad hwupload_inputs[] = {
{ {
.name = "default", .name = "default",
......
...@@ -60,6 +60,9 @@ static int cudaupload_query_formats(AVFilterContext *ctx) ...@@ -60,6 +60,9 @@ static int cudaupload_query_formats(AVFilterContext *ctx)
AV_PIX_FMT_NV12, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV444P, AV_PIX_FMT_NV12, AV_PIX_FMT_YUV420P, AV_PIX_FMT_YUV444P,
AV_PIX_FMT_P010, AV_PIX_FMT_P016, AV_PIX_FMT_YUV444P16, AV_PIX_FMT_P010, AV_PIX_FMT_P016, AV_PIX_FMT_YUV444P16,
AV_PIX_FMT_0RGB32, AV_PIX_FMT_0BGR32, AV_PIX_FMT_0RGB32, AV_PIX_FMT_0BGR32,
#if CONFIG_VULKAN
AV_PIX_FMT_VULKAN,
#endif
AV_PIX_FMT_NONE, AV_PIX_FMT_NONE,
}; };
static const enum AVPixelFormat output_pix_fmts[] = { static const enum AVPixelFormat output_pix_fmts[] = {
...@@ -97,7 +100,12 @@ static int cudaupload_config_output(AVFilterLink *outlink) ...@@ -97,7 +100,12 @@ static int cudaupload_config_output(AVFilterLink *outlink)
hwframe_ctx = (AVHWFramesContext*)s->hwframe->data; hwframe_ctx = (AVHWFramesContext*)s->hwframe->data;
hwframe_ctx->format = AV_PIX_FMT_CUDA; hwframe_ctx->format = AV_PIX_FMT_CUDA;
hwframe_ctx->sw_format = inlink->format; if (inlink->hw_frames_ctx) {
AVHWFramesContext *in_hwframe_ctx = (AVHWFramesContext*)inlink->hw_frames_ctx->data;
hwframe_ctx->sw_format = in_hwframe_ctx->sw_format;
} else {
hwframe_ctx->sw_format = inlink->format;
}
hwframe_ctx->width = inlink->w; hwframe_ctx->width = inlink->w;
hwframe_ctx->height = inlink->h; hwframe_ctx->height = inlink->h;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment