firefox/ffvpx.patch
2020-12-10 21:59:50 +01:00

9590 lines
359 KiB
Diff

diff -up firefox-84.0/dom/media/platforms/ffmpeg/ffmpeg58/moz.build.ffvpx firefox-84.0/dom/media/platforms/ffmpeg/ffmpeg58/moz.build
--- firefox-84.0/dom/media/platforms/ffmpeg/ffmpeg58/moz.build.ffvpx 2020-12-08 00:35:04.000000000 +0100
+++ firefox-84.0/dom/media/platforms/ffmpeg/ffmpeg58/moz.build 2020-12-10 20:36:08.398441034 +0100
@@ -27,8 +27,6 @@ if CONFIG['CC_TYPE'] == 'gcc':
]
if CONFIG['MOZ_WAYLAND']:
CXXFLAGS += CONFIG['TK_CFLAGS']
- CXXFLAGS += CONFIG['MOZ_WAYLAND_CFLAGS']
DEFINES['MOZ_WAYLAND_USE_VAAPI'] = 1
- include('/ipc/chromium/chromium-config.mozbuild')
FINAL_LIBRARY = 'xul'
diff -up firefox-84.0/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp.ffvpx firefox-84.0/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp
--- firefox-84.0/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp.ffvpx 2020-12-08 00:35:04.000000000 +0100
+++ firefox-84.0/dom/media/platforms/ffmpeg/FFmpegLibWrapper.cpp 2020-12-10 20:40:53.388541336 +0100
@@ -11,6 +11,10 @@
#include "mozilla/Types.h"
#include "PlatformDecoderModule.h"
#include "prlink.h"
+#ifdef MOZ_WAYLAND
+# include "mozilla/widget/DMABufLibWrapper.h"
+# include "mozilla/StaticPrefs_media.h"
+#endif
#define AV_LOG_DEBUG 48
#define AV_LOG_INFO 32
@@ -254,6 +258,46 @@ void FFmpegLibWrapper::Unlink() {
}
#ifdef MOZ_WAYLAND
+void FFmpegLibWrapper::LinkVAAPILibs() {
+ if (widget::GetDMABufDevice()->IsDMABufVAAPIEnabled()) {
+ PRLibSpec lspec;
+ lspec.type = PR_LibSpec_Pathname;
+ const char* libDrm = "libva-drm.so.2";
+ lspec.value.pathname = libDrm;
+ mVALibDrm = PR_LoadLibraryWithFlags(lspec, PR_LD_NOW | PR_LD_LOCAL);
+ if (!mVALibDrm) {
+ FFMPEG_LOG("VA-API support: Missing or old %s library.\n", libDrm);
+ }
+
+ if (!StaticPrefs::media_ffmpeg_vaapi_drm_display_enabled()) {
+ const char* libWayland = "libva-wayland.so.2";
+ lspec.value.pathname = libWayland;
+ mVALibWayland = PR_LoadLibraryWithFlags(lspec, PR_LD_NOW | PR_LD_LOCAL);
+ if (!mVALibWayland) {
+ FFMPEG_LOG("VA-API support: Missing or old %s library.\n", libWayland);
+ }
+ }
+
+ if (mVALibWayland || mVALibDrm) {
+ const char* lib = "libva.so.2";
+ lspec.value.pathname = lib;
+ mVALib = PR_LoadLibraryWithFlags(lspec, PR_LD_NOW | PR_LD_LOCAL);
+ // Don't use libva when it's missing vaExportSurfaceHandle.
+ if (mVALib && !PR_FindSymbol(mVALib, "vaExportSurfaceHandle")) {
+ PR_UnloadLibrary(mVALib);
+ mVALib = nullptr;
+ }
+ if (!mVALib) {
+ FFMPEG_LOG("VA-API support: Missing or old %s library.\n", lib);
+ }
+ }
+ } else {
+ FFMPEG_LOG("VA-API FFmpeg is disabled by platform");
+ }
+}
+#endif
+
+#ifdef MOZ_WAYLAND
bool FFmpegLibWrapper::IsVAAPIAvailable() {
# define VA_FUNC_LOADED(func) (func != nullptr)
return VA_FUNC_LOADED(avcodec_get_hw_config) &&
diff -up firefox-84.0/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h.ffvpx firefox-84.0/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h
--- firefox-84.0/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h.ffvpx 2020-12-08 00:35:04.000000000 +0100
+++ firefox-84.0/dom/media/platforms/ffmpeg/FFmpegLibWrapper.h 2020-12-10 20:40:53.388541336 +0100
@@ -56,6 +56,7 @@ struct MOZ_ONLY_USED_TO_AVOID_STATIC_CON
#ifdef MOZ_WAYLAND
// Check if mVALib are available and we can use HW decode.
bool IsVAAPIAvailable();
+ void LinkVAAPILibs();
#endif
// indicate the version of libavcodec linked to.
diff -up firefox-84.0/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp.ffvpx firefox-84.0/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp
--- firefox-84.0/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp.ffvpx 2020-12-08 00:35:04.000000000 +0100
+++ firefox-84.0/dom/media/platforms/ffmpeg/FFmpegRuntimeLinker.cpp 2020-12-10 20:40:53.388541336 +0100
@@ -9,10 +9,6 @@
#include "mozilla/ArrayUtils.h"
#include "FFmpegLog.h"
#include "prlink.h"
-#ifdef MOZ_WAYLAND
-# include "mozilla/widget/DMABufLibWrapper.h"
-# include "mozilla/StaticPrefs_media.h"
-#endif
namespace mozilla {
@@ -58,43 +54,7 @@ bool FFmpegRuntimeLinker::Init() {
}
#ifdef MOZ_WAYLAND
- if (widget::GetDMABufDevice()->IsDMABufVAAPIEnabled()) {
- PRLibSpec lspec;
- lspec.type = PR_LibSpec_Pathname;
- const char* libDrm = "libva-drm.so.2";
- lspec.value.pathname = libDrm;
- sLibAV.mVALibDrm = PR_LoadLibraryWithFlags(lspec, PR_LD_NOW | PR_LD_LOCAL);
- if (!sLibAV.mVALibDrm) {
- FFMPEG_LOG("VA-API support: Missing or old %s library.\n", libDrm);
- }
-
- if (!StaticPrefs::media_ffmpeg_vaapi_drm_display_enabled()) {
- const char* libWayland = "libva-wayland.so.2";
- lspec.value.pathname = libWayland;
- sLibAV.mVALibWayland =
- PR_LoadLibraryWithFlags(lspec, PR_LD_NOW | PR_LD_LOCAL);
- if (!sLibAV.mVALibWayland) {
- FFMPEG_LOG("VA-API support: Missing or old %s library.\n", libWayland);
- }
- }
-
- if (sLibAV.mVALibWayland || sLibAV.mVALibDrm) {
- const char* lib = "libva.so.2";
- lspec.value.pathname = lib;
- sLibAV.mVALib = PR_LoadLibraryWithFlags(lspec, PR_LD_NOW | PR_LD_LOCAL);
- // Don't use libva when it's missing vaExportSurfaceHandle.
- if (sLibAV.mVALib &&
- !PR_FindSymbol(sLibAV.mVALib, "vaExportSurfaceHandle")) {
- PR_UnloadLibrary(sLibAV.mVALib);
- sLibAV.mVALib = nullptr;
- }
- if (!sLibAV.mVALib) {
- FFMPEG_LOG("VA-API support: Missing or old %s library.\n", lib);
- }
- }
- } else {
- FFMPEG_LOG("VA-API FFmpeg is disabled by platform");
- }
+ sLibAV.LinkVAAPILibs();
#endif
// While going through all possible libs, this status will be updated with a
diff -up firefox-84.0/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp.ffvpx firefox-84.0/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp
--- firefox-84.0/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp.ffvpx 2020-12-08 00:35:04.000000000 +0100
+++ firefox-84.0/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.cpp 2020-12-10 20:40:53.392541452 +0100
@@ -32,6 +32,8 @@
# define AV_PIX_FMT_NONE PIX_FMT_NONE
#endif
#include "mozilla/PodOperations.h"
+#include "mozilla/ScopeExit.h"
+#include "mozilla/StaticPrefs_media.h"
#include "mozilla/TaskQueue.h"
#include "nsThreadUtils.h"
#include "prsystem.h"
@@ -45,7 +47,7 @@ typedef int VAStatus;
#endif
// Use some extra HW frames for potential rendering lags.
-#define EXTRA_HW_FRAMES 6
+#define EXTRA_HW_FRAMES 20
typedef mozilla::layers::Image Image;
typedef mozilla::layers::PlanarYCbCrImage PlanarYCbCrImage;
@@ -124,8 +126,8 @@ static AVPixelFormat ChooseVAAPIPixelFor
return AV_PIX_FMT_NONE;
}
-DMABufSurfaceWrapper::DMABufSurfaceWrapper(DMABufSurface* aSurface,
- FFmpegLibWrapper* aLib)
+DMABufSurfaceWrapper<LIBAV_VER>::DMABufSurfaceWrapper(DMABufSurface* aSurface,
+ FFmpegLibWrapper* aLib)
: mSurface(aSurface),
mLib(aLib),
mAVHWFramesContext(nullptr),
@@ -138,8 +140,8 @@ DMABufSurfaceWrapper::DMABufSurfaceWrapp
mSurface->GetUID());
}
-void DMABufSurfaceWrapper::LockVAAPIData(AVCodecContext* aAVCodecContext,
- AVFrame* aAVFrame) {
+void DMABufSurfaceWrapper<LIBAV_VER>::LockVAAPIData(
+ AVCodecContext* aAVCodecContext, AVFrame* aAVFrame) {
FFMPEG_LOG("DMABufSurfaceWrapper: VAAPI locking dmabuf surface UID = %d",
mSurface->GetUID());
if (aAVCodecContext && aAVFrame) {
@@ -148,7 +150,7 @@ void DMABufSurfaceWrapper::LockVAAPIData
}
}
-void DMABufSurfaceWrapper::ReleaseVAAPIData() {
+void DMABufSurfaceWrapper<LIBAV_VER>::ReleaseVAAPIData() {
FFMPEG_LOG("DMABufSurfaceWrapper: VAAPI releasing dmabuf surface UID = %d",
mSurface->GetUID());
if (mHWAVBuffer && mAVHWFramesContext) {
@@ -158,7 +160,7 @@ void DMABufSurfaceWrapper::ReleaseVAAPID
mSurface->ReleaseSurface();
}
-DMABufSurfaceWrapper::~DMABufSurfaceWrapper() {
+DMABufSurfaceWrapper<LIBAV_VER>::~DMABufSurfaceWrapper() {
FFMPEG_LOG("DMABufSurfaceWrapper: deleting dmabuf surface UID = %d",
mSurface->GetUID());
ReleaseVAAPIData();
@@ -181,7 +183,14 @@ AVCodec* FFmpegVideoDecoder<LIBAV_VER>::
return nullptr;
}
-class VAAPIDisplayHolder {
+template <int V>
+class VAAPIDisplayHolder {};
+
+template <>
+class VAAPIDisplayHolder<LIBAV_VER>;
+
+template <>
+class VAAPIDisplayHolder<LIBAV_VER> {
public:
VAAPIDisplayHolder(FFmpegLibWrapper* aLib, VADisplay aDisplay)
: mLib(aLib), mDisplay(aDisplay){};
@@ -193,7 +202,8 @@ class VAAPIDisplayHolder {
};
static void VAAPIDisplayReleaseCallback(struct AVHWDeviceContext* hwctx) {
- auto displayHolder = static_cast<VAAPIDisplayHolder*>(hwctx->user_opaque);
+ auto displayHolder =
+ static_cast<VAAPIDisplayHolder<LIBAV_VER>*>(hwctx->user_opaque);
delete displayHolder;
}
@@ -229,7 +239,7 @@ bool FFmpegVideoDecoder<LIBAV_VER>::Crea
}
}
- hwctx->user_opaque = new VAAPIDisplayHolder(mLib, mDisplay);
+ hwctx->user_opaque = new VAAPIDisplayHolder<LIBAV_VER>(mLib, mDisplay);
hwctx->free = VAAPIDisplayReleaseCallback;
int major, minor;
@@ -703,7 +713,7 @@ void FFmpegVideoDecoder<LIBAV_VER>::Rele
}
}
-DMABufSurfaceWrapper*
+DMABufSurfaceWrapper<LIBAV_VER>*
FFmpegVideoDecoder<LIBAV_VER>::GetUnusedDMABufSurfaceWrapper() {
int len = mDMABufSurfaces.Length();
for (int i = 0; i < len; i++) {
@@ -769,7 +779,8 @@ MediaResult FFmpegVideoDecoder<LIBAV_VER
RefPtr<DMABufSurfaceYUV> surface;
- DMABufSurfaceWrapper* surfaceWrapper = GetUnusedDMABufSurfaceWrapper();
+ DMABufSurfaceWrapper<LIBAV_VER>* surfaceWrapper =
+ GetUnusedDMABufSurfaceWrapper();
if (!surfaceWrapper) {
if (mVAAPIDeviceContext) {
surface = DMABufSurfaceYUV::CreateYUVSurface(vaDesc);
diff -up firefox-84.0/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h.ffvpx firefox-84.0/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h
--- firefox-84.0/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h.ffvpx 2020-12-08 00:35:04.000000000 +0100
+++ firefox-84.0/dom/media/platforms/ffmpeg/FFmpegVideoDecoder.h 2020-12-10 20:40:53.388541336 +0100
@@ -55,7 +55,14 @@ namespace mozilla {
// We own the DMABufSurface underlying GPU data and we use it for
// repeated rendering of video frames.
//
-class DMABufSurfaceWrapper final {
+template <int V>
+class DMABufSurfaceWrapper {};
+
+template <>
+class DMABufSurfaceWrapper<LIBAV_VER>;
+
+template <>
+class DMABufSurfaceWrapper<LIBAV_VER> final {
public:
DMABufSurfaceWrapper(DMABufSurface* aSurface, FFmpegLibWrapper* aLib);
~DMABufSurfaceWrapper();
@@ -162,7 +169,7 @@ class FFmpegVideoDecoder<LIBAV_VER>
MediaDataDecoder::DecodedData& aResults);
void ReleaseUnusedVAAPIFrames();
- DMABufSurfaceWrapper* GetUnusedDMABufSurfaceWrapper();
+ DMABufSurfaceWrapper<LIBAV_VER>* GetUnusedDMABufSurfaceWrapper();
void ReleaseDMABufSurfaces();
#endif
@@ -180,7 +187,7 @@ class FFmpegVideoDecoder<LIBAV_VER>
const bool mDisableHardwareDecoding;
VADisplay mDisplay;
bool mUseDMABufSurfaces;
- nsTArray<DMABufSurfaceWrapper> mDMABufSurfaces;
+ nsTArray<DMABufSurfaceWrapper<LIBAV_VER>> mDMABufSurfaces;
#endif
RefPtr<KnowsCompositor> mImageAllocator;
RefPtr<ImageContainer> mImageContainer;
diff -up firefox-84.0/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp.ffvpx firefox-84.0/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp
--- firefox-84.0/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp.ffvpx 2020-12-08 00:35:04.000000000 +0100
+++ firefox-84.0/dom/media/platforms/ffmpeg/ffvpx/FFVPXRuntimeLinker.cpp 2020-12-10 20:40:53.388541336 +0100
@@ -64,6 +64,10 @@ bool FFVPXRuntimeLinker::Init() {
MOZ_ASSERT(NS_IsMainThread());
sLinkStatus = LinkStatus_FAILED;
+#ifdef MOZ_WAYLAND
+ sFFVPXLib.LinkVAAPILibs();
+#endif
+
// We retrieve the path of the lgpllibs library as this is where mozavcodec
// and mozavutil libs are located.
PathString lgpllibsname = GetLibraryName(nullptr, "lgpllibs");
diff -up firefox-84.0/dom/media/platforms/ffmpeg/ffvpx/moz.build.ffvpx firefox-84.0/dom/media/platforms/ffmpeg/ffvpx/moz.build
--- firefox-84.0/dom/media/platforms/ffmpeg/ffvpx/moz.build.ffvpx 2020-12-08 00:35:04.000000000 +0100
+++ firefox-84.0/dom/media/platforms/ffmpeg/ffvpx/moz.build 2020-12-10 20:36:08.431441959 +0100
@@ -36,4 +36,8 @@ if CONFIG["CC_TYPE"] == "gcc":
DEFINES["FFVPX_VERSION"] = 46465650
DEFINES["USING_MOZFFVPX"] = True
+if CONFIG["MOZ_WAYLAND"]:
+ CXXFLAGS += CONFIG["TK_CFLAGS"]
+ DEFINES["MOZ_WAYLAND_USE_VAAPI"] = 1
+
FINAL_LIBRARY = "xul"
diff -up firefox-84.0/gfx/layers/DMABUFSurfaceImage.cpp.ffvpx firefox-84.0/gfx/layers/DMABUFSurfaceImage.cpp
--- firefox-84.0/gfx/layers/DMABUFSurfaceImage.cpp.ffvpx 2020-12-08 00:35:04.000000000 +0100
+++ firefox-84.0/gfx/layers/DMABUFSurfaceImage.cpp 2020-12-10 20:36:08.308438509 +0100
@@ -5,16 +5,23 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "DMABUFSurfaceImage.h"
-#include "gfxPlatform.h"
+#include "mozilla/widget/DMABufSurface.h"
#include "mozilla/layers/CompositableClient.h"
#include "mozilla/layers/CompositableForwarder.h"
#include "mozilla/layers/DMABUFTextureClientOGL.h"
-#include "mozilla/UniquePtr.h"
+#include "mozilla/layers/TextureForwarder.h"
using namespace mozilla;
using namespace mozilla::layers;
using namespace mozilla::gfx;
+DMABUFSurfaceImage::DMABUFSurfaceImage(DMABufSurface* aSurface)
+ : Image(nullptr, ImageFormat::DMABUF), mSurface(aSurface) {
+ mSurface->GlobalRefAdd();
+}
+
+DMABUFSurfaceImage::~DMABUFSurfaceImage() { mSurface->GlobalRefRelease(); }
+
TextureClient* DMABUFSurfaceImage::GetTextureClient(
KnowsCompositor* aKnowsCompositor) {
if (!mTextureClient) {
@@ -25,3 +32,7 @@ TextureClient* DMABUFSurfaceImage::GetTe
}
return mTextureClient;
}
+
+gfx::IntSize DMABUFSurfaceImage::GetSize() const {
+ return gfx::IntSize::Truncate(mSurface->GetWidth(), mSurface->GetHeight());
+}
diff -up firefox-84.0/gfx/layers/DMABUFSurfaceImage.h.ffvpx firefox-84.0/gfx/layers/DMABUFSurfaceImage.h
--- firefox-84.0/gfx/layers/DMABUFSurfaceImage.h.ffvpx 2020-12-10 20:53:45.300792876 +0100
+++ firefox-84.0/gfx/layers/DMABUFSurfaceImage.h 2020-12-10 20:36:08.279437696 +0100
@@ -8,32 +8,24 @@
#define SURFACE_DMABUF_H
#include "ImageContainer.h"
-#include "mozilla/widget/DMABufSurface.h"
-#include "mozilla/gfx/Point.h"
-#include "mozilla/layers/TextureClient.h"
+
+class DMABufSurface;
namespace mozilla {
namespace layers {
+class TextureClient;
+
class DMABUFSurfaceImage : public Image {
public:
- explicit DMABUFSurfaceImage(DMABufSurface* aSurface)
- : Image(nullptr, ImageFormat::DMABUF), mSurface(aSurface) {
- mSurface->GlobalRefAdd();
- }
-
- ~DMABUFSurfaceImage() { mSurface->GlobalRefRelease(); }
+ explicit DMABUFSurfaceImage(DMABufSurface* aSurface);
+ ~DMABUFSurfaceImage();
DMABufSurface* GetSurface() { return mSurface; }
-
- gfx::IntSize GetSize() const override {
- return gfx::IntSize::Truncate(mSurface->GetWidth(), mSurface->GetHeight());
- }
-
+ gfx::IntSize GetSize() const override;
already_AddRefed<gfx::SourceSurface> GetAsSourceSurface() override {
return nullptr;
}
-
TextureClient* GetTextureClient(KnowsCompositor* aKnowsCompositor) override;
private:
diff -up firefox-84.0/media/ffvpx/changes.patch.ffvpx firefox-84.0/media/ffvpx/changes.patch
--- firefox-84.0/media/ffvpx/changes.patch.ffvpx 2020-12-08 00:35:04.000000000 +0100
+++ firefox-84.0/media/ffvpx/changes.patch 2020-12-10 20:40:53.388541336 +0100
@@ -50,3 +50,27 @@ index 9fb8d0a..97ad3b9 100644
rgba_color[0] = rgba >> 24;
rgba_color[1] = rgba >> 16;
rgba_color[2] = rgba >> 8;
+diff --git a/media/ffvpx/libavutil/hwcontext_vaapi.c b/media/ffvpx/libavutil/hwcontext_vaapi.c
+--- a/media/ffvpx/libavutil/hwcontext_vaapi.c
++++ b/media/ffvpx/libavutil/hwcontext_vaapi.c
+@@ -39,17 +39,19 @@
+ # include <unistd.h>
+ #endif
+
+
+ #include "avassert.h"
+ #include "buffer.h"
+ #include "common.h"
+ #include "hwcontext.h"
++#if CONFIG_LIBDRM
+ #include "hwcontext_drm.h"
++#endif
+ #include "hwcontext_internal.h"
+ #include "hwcontext_vaapi.h"
+ #include "mem.h"
+ #include "pixdesc.h"
+ #include "pixfmt.h"
+
+
+ typedef struct VAAPIDevicePriv {
+
diff -up firefox-84.0/media/ffvpx/config_common.h.ffvpx firefox-84.0/media/ffvpx/config_common.h
--- firefox-84.0/media/ffvpx/config_common.h.ffvpx 2020-12-08 00:35:05.000000000 +0100
+++ firefox-84.0/media/ffvpx/config_common.h 2020-12-10 20:40:53.388541336 +0100
@@ -18,4 +18,13 @@
#define CONFIG_RDFT 1
#endif
+#ifdef MOZ_WAYLAND
+#undef CONFIG_VAAPI
+#undef CONFIG_VP8_VAAPI_HWACCEL
+#undef CONFIG_VP9_VAAPI_HWACCEL
+#define CONFIG_VAAPI 1
+#define CONFIG_VP8_VAAPI_HWACCEL 1
+#define CONFIG_VP9_VAAPI_HWACCEL 1
+#endif
+
#endif
diff -up firefox-84.0/media/ffvpx/FILES.ffvpx firefox-84.0/media/ffvpx/FILES
--- firefox-84.0/media/ffvpx/FILES.ffvpx 2020-12-08 00:35:05.000000000 +0100
+++ firefox-84.0/media/ffvpx/FILES 2020-12-10 20:40:53.388541336 +0100
@@ -131,6 +131,11 @@
./libavcodec/thread.h
./libavcodec/unary.h
./libavcodec/utils.c
+./libavcodec/vaapi.h
+./libavcodec/vaapi_decode.h
+./libavcodec/vaapi_decode.c
+./libavcodec/vaapi_vp8.c
+./libavcodec/vaapi_vp9.c
./libavcodec/version.h
./libavcodec/videodsp.c
./libavcodec/videodsp.h
@@ -264,6 +269,8 @@
./libavutil/hwcontext.c
./libavutil/hwcontext.h
./libavutil/hwcontext_internal.h
+./libavutil/hwcontext_vaapi.h
+./libavutil/hwcontext_vaapi.c
./libavutil/imgutils.c
./libavutil/imgutils.h
./libavutil/imgutils_internal.h
diff -up firefox-84.0/media/ffvpx/libavcodec/avcodec.symbols.ffvpx firefox-84.0/media/ffvpx/libavcodec/avcodec.symbols
--- firefox-84.0/media/ffvpx/libavcodec/avcodec.symbols.ffvpx 2020-12-08 00:35:05.000000000 +0100
+++ firefox-84.0/media/ffvpx/libavcodec/avcodec.symbols 2020-12-10 20:40:53.389541365 +0100
@@ -28,6 +28,11 @@ av_get_pcm_codec
av_get_profile_name
av_grow_packet
av_hwaccel_next
+av_hwdevice_ctx_init
+av_hwdevice_ctx_alloc
+av_hwdevice_ctx_create_derived
+av_hwframe_transfer_get_formats
+av_hwframe_ctx_alloc
av_init_packet
av_lockmgr_register
av_new_packet
@@ -93,6 +98,7 @@ avcodec_free_context
avcodec_get_class
avcodec_get_context_defaults3
avcodec_get_frame_class
+avcodec_get_hw_config
avcodec_get_name
avcodec_get_subtitle_rect_class
avcodec_get_type
diff -up firefox-84.0/media/ffvpx/libavcodec/moz.build.ffvpx firefox-84.0/media/ffvpx/libavcodec/moz.build
--- firefox-84.0/media/ffvpx/libavcodec/moz.build.ffvpx 2020-12-10 20:40:53.383541192 +0100
+++ firefox-84.0/media/ffvpx/libavcodec/moz.build 2020-12-10 20:40:53.389541365 +0100
@@ -96,6 +96,13 @@ if not CONFIG['MOZ_FFVPX_AUDIOONLY']:
'vp9prob.c',
'vp9recon.c'
]
+ if CONFIG['MOZ_WAYLAND']:
+ SOURCES += [
+ 'vaapi_decode.c',
+ 'vaapi_vp8.c',
+ 'vaapi_vp9.c',
+ ]
+ USE_LIBS += ['mozva']
if CONFIG['MOZ_FDK_AAC']:
SOURCES += [
diff -up firefox-84.0/media/ffvpx/libavcodec/vaapi_decode.c.ffvpx firefox-84.0/media/ffvpx/libavcodec/vaapi_decode.c
--- firefox-84.0/media/ffvpx/libavcodec/vaapi_decode.c.ffvpx 2020-12-10 20:40:53.389541365 +0100
+++ firefox-84.0/media/ffvpx/libavcodec/vaapi_decode.c 2020-12-10 20:40:53.389541365 +0100
@@ -0,0 +1,732 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/avassert.h"
+#include "libavutil/common.h"
+#include "libavutil/pixdesc.h"
+
+#include "avcodec.h"
+#include "decode.h"
+#include "internal.h"
+#include "vaapi_decode.h"
+
+
+int ff_vaapi_decode_make_param_buffer(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic,
+ int type,
+ const void *data,
+ size_t size)
+{
+ VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data;
+ VAStatus vas;
+ VABufferID buffer;
+
+ av_assert0(pic->nb_param_buffers + 1 <= MAX_PARAM_BUFFERS);
+
+ vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
+ type, size, 1, (void*)data, &buffer);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create parameter "
+ "buffer (type %d): %d (%s).\n",
+ type, vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+
+ pic->param_buffers[pic->nb_param_buffers++] = buffer;
+
+ av_log(avctx, AV_LOG_DEBUG, "Param buffer (type %d, %zu bytes) "
+ "is %#x.\n", type, size, buffer);
+ return 0;
+}
+
+
+int ff_vaapi_decode_make_slice_buffer(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic,
+ const void *params_data,
+ size_t params_size,
+ const void *slice_data,
+ size_t slice_size)
+{
+ VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data;
+ VAStatus vas;
+ int index;
+
+ av_assert0(pic->nb_slices <= pic->slices_allocated);
+ if (pic->nb_slices == pic->slices_allocated) {
+ if (pic->slices_allocated > 0)
+ pic->slices_allocated *= 2;
+ else
+ pic->slices_allocated = 64;
+
+ pic->slice_buffers =
+ av_realloc_array(pic->slice_buffers,
+ pic->slices_allocated,
+ 2 * sizeof(*pic->slice_buffers));
+ if (!pic->slice_buffers)
+ return AVERROR(ENOMEM);
+ }
+ av_assert0(pic->nb_slices + 1 <= pic->slices_allocated);
+
+ index = 2 * pic->nb_slices;
+
+ vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
+ VASliceParameterBufferType,
+ params_size, 1, (void*)params_data,
+ &pic->slice_buffers[index]);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create slice "
+ "parameter buffer: %d (%s).\n", vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+
+ av_log(avctx, AV_LOG_DEBUG, "Slice %d param buffer (%zu bytes) "
+ "is %#x.\n", pic->nb_slices, params_size,
+ pic->slice_buffers[index]);
+
+ vas = vaCreateBuffer(ctx->hwctx->display, ctx->va_context,
+ VASliceDataBufferType,
+ slice_size, 1, (void*)slice_data,
+ &pic->slice_buffers[index + 1]);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create slice "
+ "data buffer (size %zu): %d (%s).\n",
+ slice_size, vas, vaErrorStr(vas));
+ vaDestroyBuffer(ctx->hwctx->display,
+ pic->slice_buffers[index]);
+ return AVERROR(EIO);
+ }
+
+ av_log(avctx, AV_LOG_DEBUG, "Slice %d data buffer (%zu bytes) "
+ "is %#x.\n", pic->nb_slices, slice_size,
+ pic->slice_buffers[index + 1]);
+
+ ++pic->nb_slices;
+ return 0;
+}
+
+static void ff_vaapi_decode_destroy_buffers(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic)
+{
+ VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data;
+ VAStatus vas;
+ int i;
+
+ for (i = 0; i < pic->nb_param_buffers; i++) {
+ vas = vaDestroyBuffer(ctx->hwctx->display,
+ pic->param_buffers[i]);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to destroy "
+ "parameter buffer %#x: %d (%s).\n",
+ pic->param_buffers[i], vas, vaErrorStr(vas));
+ }
+ }
+
+ for (i = 0; i < 2 * pic->nb_slices; i++) {
+ vas = vaDestroyBuffer(ctx->hwctx->display,
+ pic->slice_buffers[i]);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to destroy slice "
+ "slice buffer %#x: %d (%s).\n",
+ pic->slice_buffers[i], vas, vaErrorStr(vas));
+ }
+ }
+}
+
+int ff_vaapi_decode_issue(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic)
+{
+ VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data;
+ VAStatus vas;
+ int err;
+
+ av_log(avctx, AV_LOG_DEBUG, "Decode to surface %#x.\n",
+ pic->output_surface);
+
+ vas = vaBeginPicture(ctx->hwctx->display, ctx->va_context,
+ pic->output_surface);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to begin picture decode "
+ "issue: %d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail_with_picture;
+ }
+
+ vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context,
+ pic->param_buffers, pic->nb_param_buffers);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to upload decode "
+ "parameters: %d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail_with_picture;
+ }
+
+ vas = vaRenderPicture(ctx->hwctx->display, ctx->va_context,
+ pic->slice_buffers, 2 * pic->nb_slices);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to upload slices: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail_with_picture;
+ }
+
+ vas = vaEndPicture(ctx->hwctx->display, ctx->va_context);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to end picture decode "
+ "issue: %d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ if (CONFIG_VAAPI_1 || ctx->hwctx->driver_quirks &
+ AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS)
+ goto fail;
+ else
+ goto fail_at_end;
+ }
+
+ if (CONFIG_VAAPI_1 || ctx->hwctx->driver_quirks &
+ AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS)
+ ff_vaapi_decode_destroy_buffers(avctx, pic);
+
+ err = 0;
+ goto exit;
+
+fail_with_picture:
+ vas = vaEndPicture(ctx->hwctx->display, ctx->va_context);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to end picture decode "
+ "after error: %d (%s).\n", vas, vaErrorStr(vas));
+ }
+fail:
+ ff_vaapi_decode_destroy_buffers(avctx, pic);
+fail_at_end:
+exit:
+ pic->nb_param_buffers = 0;
+ pic->nb_slices = 0;
+ pic->slices_allocated = 0;
+ av_freep(&pic->slice_buffers);
+
+ return err;
+}
+
+int ff_vaapi_decode_cancel(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic)
+{
+ ff_vaapi_decode_destroy_buffers(avctx, pic);
+
+ pic->nb_param_buffers = 0;
+ pic->nb_slices = 0;
+ pic->slices_allocated = 0;
+ av_freep(&pic->slice_buffers);
+
+ return 0;
+}
+
+static const struct {
+ uint32_t fourcc;
+ enum AVPixelFormat pix_fmt;
+} vaapi_format_map[] = {
+#define MAP(va, av) { VA_FOURCC_ ## va, AV_PIX_FMT_ ## av }
+ // 4:0:0
+ MAP(Y800, GRAY8),
+ // 4:2:0
+ MAP(NV12, NV12),
+ MAP(YV12, YUV420P),
+ MAP(IYUV, YUV420P),
+#ifdef VA_FOURCC_I420
+ MAP(I420, YUV420P),
+#endif
+ MAP(IMC3, YUV420P),
+ // 4:1:1
+ MAP(411P, YUV411P),
+ // 4:2:2
+ MAP(422H, YUV422P),
+#ifdef VA_FOURCC_YV16
+ MAP(YV16, YUV422P),
+#endif
+ // 4:4:0
+ MAP(422V, YUV440P),
+ // 4:4:4
+ MAP(444P, YUV444P),
+ // 4:2:0 10-bit
+#ifdef VA_FOURCC_P010
+ MAP(P010, P010),
+#endif
+#ifdef VA_FOURCC_I010
+ MAP(I010, YUV420P10),
+#endif
+#undef MAP
+};
+
+static int vaapi_decode_find_best_format(AVCodecContext *avctx,
+ AVHWDeviceContext *device,
+ VAConfigID config_id,
+ AVHWFramesContext *frames)
+{
+ AVVAAPIDeviceContext *hwctx = device->hwctx;
+ VAStatus vas;
+ VASurfaceAttrib *attr;
+ enum AVPixelFormat source_format, best_format, format;
+ uint32_t best_fourcc, fourcc;
+ int i, j, nb_attr;
+
+ source_format = avctx->sw_pix_fmt;
+ av_assert0(source_format != AV_PIX_FMT_NONE);
+
+ vas = vaQuerySurfaceAttributes(hwctx->display, config_id,
+ NULL, &nb_attr);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to query surface attributes: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ return AVERROR(ENOSYS);
+ }
+
+ attr = av_malloc_array(nb_attr, sizeof(*attr));
+ if (!attr)
+ return AVERROR(ENOMEM);
+
+ vas = vaQuerySurfaceAttributes(hwctx->display, config_id,
+ attr, &nb_attr);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to query surface attributes: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ av_freep(&attr);
+ return AVERROR(ENOSYS);
+ }
+
+ best_format = AV_PIX_FMT_NONE;
+
+ for (i = 0; i < nb_attr; i++) {
+ if (attr[i].type != VASurfaceAttribPixelFormat)
+ continue;
+
+ fourcc = attr[i].value.value.i;
+ for (j = 0; j < FF_ARRAY_ELEMS(vaapi_format_map); j++) {
+ if (fourcc == vaapi_format_map[j].fourcc)
+ break;
+ }
+ if (j >= FF_ARRAY_ELEMS(vaapi_format_map)) {
+ av_log(avctx, AV_LOG_DEBUG, "Ignoring unknown format %#x.\n",
+ fourcc);
+ continue;
+ }
+ format = vaapi_format_map[j].pix_fmt;
+ av_log(avctx, AV_LOG_DEBUG, "Considering format %#x -> %s.\n",
+ fourcc, av_get_pix_fmt_name(format));
+
+ best_format = av_find_best_pix_fmt_of_2(format, best_format,
+ source_format, 0, NULL);
+ if (format == best_format)
+ best_fourcc = fourcc;
+ }
+
+ av_freep(&attr);
+
+ if (best_format == AV_PIX_FMT_NONE) {
+ av_log(avctx, AV_LOG_ERROR, "No usable formats for decoding!\n");
+ return AVERROR(EINVAL);
+ }
+
+ av_log(avctx, AV_LOG_DEBUG, "Picked %s (%#x) as best match for %s.\n",
+ av_get_pix_fmt_name(best_format), best_fourcc,
+ av_get_pix_fmt_name(source_format));
+
+ frames->sw_format = best_format;
+ if (avctx->internal->hwaccel_priv_data) {
+ VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data;
+ AVVAAPIFramesContext *avfc = frames->hwctx;
+
+ ctx->pixel_format_attribute = (VASurfaceAttrib) {
+ .type = VASurfaceAttribPixelFormat,
+ .value.value.i = best_fourcc,
+ };
+
+ avfc->attributes = &ctx->pixel_format_attribute;
+ avfc->nb_attributes = 1;
+ }
+
+ return 0;
+}
+
+static const struct {
+ enum AVCodecID codec_id;
+ int codec_profile;
+ VAProfile va_profile;
+} vaapi_profile_map[] = {
+#define MAP(c, p, v) { AV_CODEC_ID_ ## c, FF_PROFILE_ ## p, VAProfile ## v }
+ MAP(MPEG2VIDEO, MPEG2_SIMPLE, MPEG2Simple ),
+ MAP(MPEG2VIDEO, MPEG2_MAIN, MPEG2Main ),
+ MAP(H263, UNKNOWN, H263Baseline),
+ MAP(MPEG4, MPEG4_SIMPLE, MPEG4Simple ),
+ MAP(MPEG4, MPEG4_ADVANCED_SIMPLE,
+ MPEG4AdvancedSimple),
+ MAP(MPEG4, MPEG4_MAIN, MPEG4Main ),
+ MAP(H264, H264_CONSTRAINED_BASELINE,
+ H264ConstrainedBaseline),
+ MAP(H264, H264_MAIN, H264Main ),
+ MAP(H264, H264_HIGH, H264High ),
+#if VA_CHECK_VERSION(0, 37, 0)
+ MAP(HEVC, HEVC_MAIN, HEVCMain ),
+ MAP(HEVC, HEVC_MAIN_10, HEVCMain10 ),
+#endif
+ MAP(MJPEG, MJPEG_HUFFMAN_BASELINE_DCT,
+ JPEGBaseline),
+ MAP(WMV3, VC1_SIMPLE, VC1Simple ),
+ MAP(WMV3, VC1_MAIN, VC1Main ),
+ MAP(WMV3, VC1_COMPLEX, VC1Advanced ),
+ MAP(WMV3, VC1_ADVANCED, VC1Advanced ),
+ MAP(VC1, VC1_SIMPLE, VC1Simple ),
+ MAP(VC1, VC1_MAIN, VC1Main ),
+ MAP(VC1, VC1_COMPLEX, VC1Advanced ),
+ MAP(VC1, VC1_ADVANCED, VC1Advanced ),
+ MAP(VP8, UNKNOWN, VP8Version0_3 ),
+#if VA_CHECK_VERSION(0, 38, 0)
+ MAP(VP9, VP9_0, VP9Profile0 ),
+#endif
+#if VA_CHECK_VERSION(0, 39, 0)
+ MAP(VP9, VP9_2, VP9Profile2 ),
+#endif
+#undef MAP
+};
+
+/*
+ * Set *va_config and the frames_ref fields from the current codec parameters
+ * in avctx.
+ */
+static int vaapi_decode_make_config(AVCodecContext *avctx,
+ AVBufferRef *device_ref,
+ VAConfigID *va_config,
+ AVBufferRef *frames_ref)
+{
+ AVVAAPIHWConfig *hwconfig = NULL;
+ AVHWFramesConstraints *constraints = NULL;
+ VAStatus vas;
+ int err, i, j;
+ const AVCodecDescriptor *codec_desc;
+ VAProfile *profile_list = NULL, matched_va_profile;
+ int profile_count, exact_match, matched_ff_profile;
+
+ AVHWDeviceContext *device = (AVHWDeviceContext*)device_ref->data;
+ AVVAAPIDeviceContext *hwctx = device->hwctx;
+
+ codec_desc = avcodec_descriptor_get(avctx->codec_id);
+ if (!codec_desc) {
+ err = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ profile_count = vaMaxNumProfiles(hwctx->display);
+ profile_list = av_malloc_array(profile_count,
+ sizeof(VAProfile));
+ if (!profile_list) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ vas = vaQueryConfigProfiles(hwctx->display,
+ profile_list, &profile_count);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to query profiles: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(ENOSYS);
+ goto fail;
+ }
+
+ matched_va_profile = VAProfileNone;
+ exact_match = 0;
+
+ for (i = 0; i < FF_ARRAY_ELEMS(vaapi_profile_map); i++) {
+ int profile_match = 0;
+ if (avctx->codec_id != vaapi_profile_map[i].codec_id)
+ continue;
+ if (avctx->profile == vaapi_profile_map[i].codec_profile ||
+ vaapi_profile_map[i].codec_profile == FF_PROFILE_UNKNOWN)
+ profile_match = 1;
+ for (j = 0; j < profile_count; j++) {
+ if (vaapi_profile_map[i].va_profile == profile_list[j]) {
+ exact_match = profile_match;
+ break;
+ }
+ }
+ if (j < profile_count) {
+ matched_va_profile = vaapi_profile_map[i].va_profile;
+ matched_ff_profile = vaapi_profile_map[i].codec_profile;
+ if (exact_match)
+ break;
+ }
+ }
+ av_freep(&profile_list);
+
+ if (matched_va_profile == VAProfileNone) {
+ av_log(avctx, AV_LOG_ERROR, "No support for codec %s "
+ "profile %d.\n", codec_desc->name, avctx->profile);
+ err = AVERROR(ENOSYS);
+ goto fail;
+ }
+ if (!exact_match) {
+ if (avctx->hwaccel_flags &
+ AV_HWACCEL_FLAG_ALLOW_PROFILE_MISMATCH) {
+ av_log(avctx, AV_LOG_VERBOSE, "Codec %s profile %d not "
+ "supported for hardware decode.\n",
+ codec_desc->name, avctx->profile);
+ av_log(avctx, AV_LOG_WARNING, "Using possibly-"
+ "incompatible profile %d instead.\n",
+ matched_ff_profile);
+ } else {
+ av_log(avctx, AV_LOG_VERBOSE, "Codec %s profile %d not "
+ "supported for hardware decode.\n",
+ codec_desc->name, avctx->profile);
+ err = AVERROR(EINVAL);
+ goto fail;
+ }
+ }
+
+ vas = vaCreateConfig(hwctx->display, matched_va_profile,
+ VAEntrypointVLD, NULL, 0,
+ va_config);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create decode "
+ "configuration: %d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ hwconfig = av_hwdevice_hwconfig_alloc(device_ref);
+ if (!hwconfig) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ hwconfig->config_id = *va_config;
+
+ constraints =
+ av_hwdevice_get_hwframe_constraints(device_ref, hwconfig);
+ if (!constraints) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ if (avctx->coded_width < constraints->min_width ||
+ avctx->coded_height < constraints->min_height ||
+ avctx->coded_width > constraints->max_width ||
+ avctx->coded_height > constraints->max_height) {
+ av_log(avctx, AV_LOG_ERROR, "Hardware does not support image "
+ "size %dx%d (constraints: width %d-%d height %d-%d).\n",
+ avctx->coded_width, avctx->coded_height,
+ constraints->min_width, constraints->max_width,
+ constraints->min_height, constraints->max_height);
+ err = AVERROR(EINVAL);
+ goto fail;
+ }
+ if (!constraints->valid_sw_formats ||
+ constraints->valid_sw_formats[0] == AV_PIX_FMT_NONE) {
+ av_log(avctx, AV_LOG_ERROR, "Hardware does not offer any "
+ "usable surface formats.\n");
+ err = AVERROR(EINVAL);
+ goto fail;
+ }
+
+ if (frames_ref) {
+ AVHWFramesContext *frames = (AVHWFramesContext *)frames_ref->data;
+
+ frames->format = AV_PIX_FMT_VAAPI;
+ frames->width = avctx->coded_width;
+ frames->height = avctx->coded_height;
+
+ err = vaapi_decode_find_best_format(avctx, device,
+ *va_config, frames);
+ if (err < 0)
+ goto fail;
+
+ frames->initial_pool_size = 1;
+ // Add per-codec number of surfaces used for storing reference frames.
+ switch (avctx->codec_id) {
+ case AV_CODEC_ID_H264:
+ case AV_CODEC_ID_HEVC:
+ frames->initial_pool_size += 16;
+ break;
+ case AV_CODEC_ID_VP9:
+ frames->initial_pool_size += 8;
+ break;
+ case AV_CODEC_ID_VP8:
+ frames->initial_pool_size += 3;
+ break;
+ default:
+ frames->initial_pool_size += 2;
+ }
+ }
+
+ av_hwframe_constraints_free(&constraints);
+ av_freep(&hwconfig);
+
+ return 0;
+
+fail:
+ av_hwframe_constraints_free(&constraints);
+ av_freep(&hwconfig);
+ if (*va_config != VA_INVALID_ID) {
+ vaDestroyConfig(hwctx->display, *va_config);
+ *va_config = VA_INVALID_ID;
+ }
+ av_freep(&profile_list);
+ return err;
+}
+
+int ff_vaapi_common_frame_params(AVCodecContext *avctx,
+ AVBufferRef *hw_frames_ctx)
+{
+ AVHWFramesContext *hw_frames = (AVHWFramesContext *)hw_frames_ctx->data;
+ AVHWDeviceContext *device_ctx = hw_frames->device_ctx;
+ AVVAAPIDeviceContext *hwctx;
+ VAConfigID va_config = VA_INVALID_ID;
+ int err;
+
+ if (device_ctx->type != AV_HWDEVICE_TYPE_VAAPI)
+ return AVERROR(EINVAL);
+ hwctx = device_ctx->hwctx;
+
+ err = vaapi_decode_make_config(avctx, hw_frames->device_ref, &va_config,
+ hw_frames_ctx);
+ if (err)
+ return err;
+
+ if (va_config != VA_INVALID_ID)
+ vaDestroyConfig(hwctx->display, va_config);
+
+ return 0;
+}
+
+int ff_vaapi_decode_init(AVCodecContext *avctx)
+{
+ VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data;
+ VAStatus vas;
+ int err;
+
+ ctx->va_config = VA_INVALID_ID;
+ ctx->va_context = VA_INVALID_ID;
+
+#if FF_API_STRUCT_VAAPI_CONTEXT
+ if (avctx->hwaccel_context) {
+ av_log(avctx, AV_LOG_WARNING, "Using deprecated struct "
+ "vaapi_context in decode.\n");
+
+ ctx->have_old_context = 1;
+ ctx->old_context = avctx->hwaccel_context;
+
+ // Really we only want the VAAPI device context, but this
+ // allocates a whole generic device context because we don't
+ // have any other way to determine how big it should be.
+ ctx->device_ref =
+ av_hwdevice_ctx_alloc(AV_HWDEVICE_TYPE_VAAPI);
+ if (!ctx->device_ref) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ ctx->device = (AVHWDeviceContext*)ctx->device_ref->data;
+ ctx->hwctx = ctx->device->hwctx;
+
+ ctx->hwctx->display = ctx->old_context->display;
+
+ // The old VAAPI decode setup assumed this quirk was always
+ // present, so set it here to avoid the behaviour changing.
+ ctx->hwctx->driver_quirks =
+ AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS;
+
+ }
+#endif
+
+#if FF_API_STRUCT_VAAPI_CONTEXT
+ if (ctx->have_old_context) {
+ ctx->va_config = ctx->old_context->config_id;
+ ctx->va_context = ctx->old_context->context_id;
+
+ av_log(avctx, AV_LOG_DEBUG, "Using user-supplied decoder "
+ "context: %#x/%#x.\n", ctx->va_config, ctx->va_context);
+ } else {
+#endif
+
+ err = ff_decode_get_hw_frames_ctx(avctx, AV_HWDEVICE_TYPE_VAAPI);
+ if (err < 0)
+ goto fail;
+
+ ctx->frames = (AVHWFramesContext*)avctx->hw_frames_ctx->data;
+ ctx->hwfc = ctx->frames->hwctx;
+ ctx->device = ctx->frames->device_ctx;
+ ctx->hwctx = ctx->device->hwctx;
+
+ err = vaapi_decode_make_config(avctx, ctx->frames->device_ref,
+ &ctx->va_config, avctx->hw_frames_ctx);
+ if (err)
+ goto fail;
+
+ vas = vaCreateContext(ctx->hwctx->display, ctx->va_config,
+ avctx->coded_width, avctx->coded_height,
+ VA_PROGRESSIVE,
+ ctx->hwfc->surface_ids,
+ ctx->hwfc->nb_surfaces,
+ &ctx->va_context);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to create decode "
+ "context: %d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ av_log(avctx, AV_LOG_DEBUG, "Decode context initialised: "
+ "%#x/%#x.\n", ctx->va_config, ctx->va_context);
+#if FF_API_STRUCT_VAAPI_CONTEXT
+ }
+#endif
+
+ return 0;
+
+fail:
+ ff_vaapi_decode_uninit(avctx);
+ return err;
+}
+
+int ff_vaapi_decode_uninit(AVCodecContext *avctx)
+{
+ VAAPIDecodeContext *ctx = avctx->internal->hwaccel_priv_data;
+ VAStatus vas;
+
+#if FF_API_STRUCT_VAAPI_CONTEXT
+ if (ctx->have_old_context) {
+ av_buffer_unref(&ctx->device_ref);
+ } else {
+#endif
+
+ if (ctx->va_context != VA_INVALID_ID) {
+ vas = vaDestroyContext(ctx->hwctx->display, ctx->va_context);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to destroy decode "
+ "context %#x: %d (%s).\n",
+ ctx->va_context, vas, vaErrorStr(vas));
+ }
+ }
+ if (ctx->va_config != VA_INVALID_ID) {
+ vas = vaDestroyConfig(ctx->hwctx->display, ctx->va_config);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(avctx, AV_LOG_ERROR, "Failed to destroy decode "
+ "configuration %#x: %d (%s).\n",
+ ctx->va_config, vas, vaErrorStr(vas));
+ }
+ }
+
+#if FF_API_STRUCT_VAAPI_CONTEXT
+ }
+#endif
+
+ return 0;
+}
diff -up firefox-84.0/media/ffvpx/libavcodec/vaapi_decode.h.ffvpx firefox-84.0/media/ffvpx/libavcodec/vaapi_decode.h
--- firefox-84.0/media/ffvpx/libavcodec/vaapi_decode.h.ffvpx 2020-12-10 20:40:53.389541365 +0100
+++ firefox-84.0/media/ffvpx/libavcodec/vaapi_decode.h 2020-12-10 20:40:53.389541365 +0100
@@ -0,0 +1,105 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VAAPI_DECODE_H
+#define AVCODEC_VAAPI_DECODE_H
+
+#include <va/va.h>
+#include <va/va_dec_vp9.h>
+
+#include "libavutil/frame.h"
+#include "libavutil/hwcontext.h"
+#include "libavutil/hwcontext_vaapi.h"
+
+#include "avcodec.h"
+
+#include "version.h"
+#if FF_API_STRUCT_VAAPI_CONTEXT
+#include "vaapi.h"
+#endif
+
+static inline VASurfaceID ff_vaapi_get_surface_id(AVFrame *pic)
+{
+ return (uintptr_t)pic->data[3];
+}
+
+enum {
+ MAX_PARAM_BUFFERS = 16,
+};
+
+typedef struct VAAPIDecodePicture {
+ VASurfaceID output_surface;
+
+ int nb_param_buffers;
+ VABufferID param_buffers[MAX_PARAM_BUFFERS];
+
+ int nb_slices;
+ VABufferID *slice_buffers;
+ int slices_allocated;
+} VAAPIDecodePicture;
+
+typedef struct VAAPIDecodeContext {
+ VAConfigID va_config;
+ VAContextID va_context;
+
+#if FF_API_STRUCT_VAAPI_CONTEXT
+FF_DISABLE_DEPRECATION_WARNINGS
+ int have_old_context;
+ struct vaapi_context *old_context;
+ AVBufferRef *device_ref;
+FF_ENABLE_DEPRECATION_WARNINGS
+#endif
+
+ AVHWDeviceContext *device;
+ AVVAAPIDeviceContext *hwctx;
+
+ AVHWFramesContext *frames;
+ AVVAAPIFramesContext *hwfc;
+
+ enum AVPixelFormat surface_format;
+ int surface_count;
+
+ VASurfaceAttrib pixel_format_attribute;
+} VAAPIDecodeContext;
+
+
+int ff_vaapi_decode_make_param_buffer(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic,
+ int type,
+ const void *data,
+ size_t size);
+
+int ff_vaapi_decode_make_slice_buffer(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic,
+ const void *params_data,
+ size_t params_size,
+ const void *slice_data,
+ size_t slice_size);
+
+int ff_vaapi_decode_issue(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic);
+int ff_vaapi_decode_cancel(AVCodecContext *avctx,
+ VAAPIDecodePicture *pic);
+
+int ff_vaapi_decode_init(AVCodecContext *avctx);
+int ff_vaapi_decode_uninit(AVCodecContext *avctx);
+
+int ff_vaapi_common_frame_params(AVCodecContext *avctx,
+ AVBufferRef *hw_frames_ctx);
+
+#endif /* AVCODEC_VAAPI_DECODE_H */
diff -up firefox-84.0/media/ffvpx/libavcodec/vaapi.h.ffvpx firefox-84.0/media/ffvpx/libavcodec/vaapi.h
--- firefox-84.0/media/ffvpx/libavcodec/vaapi.h.ffvpx 2020-12-10 20:40:53.389541365 +0100
+++ firefox-84.0/media/ffvpx/libavcodec/vaapi.h 2020-12-10 20:40:53.389541365 +0100
@@ -0,0 +1,86 @@
+/*
+ * Video Acceleration API (shared data between FFmpeg and the video player)
+ * HW decode acceleration for MPEG-2, MPEG-4, H.264 and VC-1
+ *
+ * Copyright (C) 2008-2009 Splitted-Desktop Systems
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVCODEC_VAAPI_H
+#define AVCODEC_VAAPI_H
+
+/**
+ * @file
+ * @ingroup lavc_codec_hwaccel_vaapi
+ * Public libavcodec VA API header.
+ */
+
+#include <stdint.h>
+#include "libavutil/attributes.h"
+#include "version.h"
+
+#if FF_API_STRUCT_VAAPI_CONTEXT
+
+/**
+ * @defgroup lavc_codec_hwaccel_vaapi VA API Decoding
+ * @ingroup lavc_codec_hwaccel
+ * @{
+ */
+
+/**
+ * This structure is used to share data between the FFmpeg library and
+ * the client video application.
+ * This shall be zero-allocated and available as
+ * AVCodecContext.hwaccel_context. All user members can be set once
+ * during initialization or through each AVCodecContext.get_buffer()
+ * function call. In any case, they must be valid prior to calling
+ * decoding functions.
+ *
+ * Deprecated: use AVCodecContext.hw_frames_ctx instead.
+ */
+struct attribute_deprecated vaapi_context {
+ /**
+ * Window system dependent data
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ void *display;
+
+ /**
+ * Configuration ID
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ uint32_t config_id;
+
+ /**
+ * Context ID (video decode pipeline)
+ *
+ * - encoding: unused
+ * - decoding: Set by user
+ */
+ uint32_t context_id;
+};
+
+/* @} */
+
+#endif /* FF_API_STRUCT_VAAPI_CONTEXT */
+
+#endif /* AVCODEC_VAAPI_H */
diff -up firefox-84.0/media/ffvpx/libavcodec/vaapi_vp8.c.ffvpx firefox-84.0/media/ffvpx/libavcodec/vaapi_vp8.c
--- firefox-84.0/media/ffvpx/libavcodec/vaapi_vp8.c.ffvpx 2020-12-10 20:40:53.389541365 +0100
+++ firefox-84.0/media/ffvpx/libavcodec/vaapi_vp8.c 2020-12-10 20:40:53.389541365 +0100
@@ -0,0 +1,237 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include <va/va.h>
+#include <va/va_dec_vp8.h>
+
+#include "hwaccel.h"
+#include "vaapi_decode.h"
+#include "vp8.h"
+
+static VASurfaceID vaapi_vp8_surface_id(VP8Frame *vf)
+{
+ if (vf)
+ return ff_vaapi_get_surface_id(vf->tf.f);
+ else
+ return VA_INVALID_SURFACE;
+}
+
+static int vaapi_vp8_start_frame(AVCodecContext *avctx,
+ av_unused const uint8_t *buffer,
+ av_unused uint32_t size)
+{
+ const VP8Context *s = avctx->priv_data;
+ VAAPIDecodePicture *pic = s->framep[VP56_FRAME_CURRENT]->hwaccel_picture_private;
+ VAPictureParameterBufferVP8 pp;
+ VAProbabilityDataBufferVP8 prob;
+ VAIQMatrixBufferVP8 quant;
+ int err, i, j, k;
+
+ pic->output_surface = vaapi_vp8_surface_id(s->framep[VP56_FRAME_CURRENT]);
+
+ pp = (VAPictureParameterBufferVP8) {
+ .frame_width = avctx->width,
+ .frame_height = avctx->height,
+
+ .last_ref_frame = vaapi_vp8_surface_id(s->framep[VP56_FRAME_PREVIOUS]),
+ .golden_ref_frame = vaapi_vp8_surface_id(s->framep[VP56_FRAME_GOLDEN]),
+ .alt_ref_frame = vaapi_vp8_surface_id(s->framep[VP56_FRAME_GOLDEN2]),
+ .out_of_loop_frame = VA_INVALID_SURFACE,
+
+ .pic_fields.bits = {
+ .key_frame = !s->keyframe,
+ .version = s->profile,
+
+ .segmentation_enabled = s->segmentation.enabled,
+ .update_mb_segmentation_map = s->segmentation.update_map,
+ .update_segment_feature_data = s->segmentation.update_feature_data,
+
+ .filter_type = s->filter.simple,
+ .sharpness_level = s->filter.sharpness,
+
+ .loop_filter_adj_enable = s->lf_delta.enabled,
+ .mode_ref_lf_delta_update = s->lf_delta.update,
+
+ .sign_bias_golden = s->sign_bias[VP56_FRAME_GOLDEN],
+ .sign_bias_alternate = s->sign_bias[VP56_FRAME_GOLDEN2],
+
+ .mb_no_coeff_skip = s->mbskip_enabled,
+ .loop_filter_disable = s->filter.level == 0,
+ },
+
+ .prob_skip_false = s->prob->mbskip,
+ .prob_intra = s->prob->intra,
+ .prob_last = s->prob->last,
+ .prob_gf = s->prob->golden,
+ };
+
+ for (i = 0; i < 3; i++)
+ pp.mb_segment_tree_probs[i] = s->prob->segmentid[i];
+
+ for (i = 0; i < 4; i++) {
+ if (s->segmentation.enabled) {
+ pp.loop_filter_level[i] = s->segmentation.filter_level[i];
+ if (!s->segmentation.absolute_vals)
+ pp.loop_filter_level[i] += s->filter.level;
+ } else {
+ pp.loop_filter_level[i] = s->filter.level;
+ }
+ pp.loop_filter_level[i] = av_clip_uintp2(pp.loop_filter_level[i], 6);
+ }
+
+ for (i = 0; i < 4; i++) {
+ pp.loop_filter_deltas_ref_frame[i] = s->lf_delta.ref[i];
+ pp.loop_filter_deltas_mode[i] = s->lf_delta.mode[i + 4];
+ }
+
+ if (s->keyframe) {
+ static const uint8_t keyframe_y_mode_probs[4] = {
+ 145, 156, 163, 128
+ };
+ static const uint8_t keyframe_uv_mode_probs[3] = {
+ 142, 114, 183
+ };
+ memcpy(pp.y_mode_probs, keyframe_y_mode_probs, 4);
+ memcpy(pp.uv_mode_probs, keyframe_uv_mode_probs, 3);
+ } else {
+ for (i = 0; i < 4; i++)
+ pp.y_mode_probs[i] = s->prob->pred16x16[i];
+ for (i = 0; i < 3; i++)
+ pp.uv_mode_probs[i] = s->prob->pred8x8c[i];
+ }
+ for (i = 0; i < 2; i++)
+ for (j = 0; j < 19; j++)
+ pp.mv_probs[i][j] = s->prob->mvc[i][j];
+
+ pp.bool_coder_ctx.range = s->coder_state_at_header_end.range;
+ pp.bool_coder_ctx.value = s->coder_state_at_header_end.value;
+ pp.bool_coder_ctx.count = s->coder_state_at_header_end.bit_count;
+
+ err = ff_vaapi_decode_make_param_buffer(avctx, pic,
+ VAPictureParameterBufferType,
+ &pp, sizeof(pp));
+ if (err < 0)
+ goto fail;
+
+ for (i = 0; i < 4; i++) {
+ for (j = 0; j < 8; j++) {
+ static const int coeff_bands_inverse[8] = {
+ 0, 1, 2, 3, 5, 6, 4, 15
+ };
+ int coeff_pos = coeff_bands_inverse[j];
+
+ for (k = 0; k < 3; k++) {
+ memcpy(prob.dct_coeff_probs[i][j][k],
+ s->prob->token[i][coeff_pos][k], 11);
+ }
+ }
+ }
+
+ err = ff_vaapi_decode_make_param_buffer(avctx, pic,
+ VAProbabilityBufferType,
+ &prob, sizeof(prob));
+ if (err < 0)
+ goto fail;
+
+ for (i = 0; i < 4; i++) {
+ int base_qi = s->segmentation.base_quant[i];
+ if (!s->segmentation.absolute_vals)
+ base_qi += s->quant.yac_qi;
+
+ quant.quantization_index[i][0] = av_clip_uintp2(base_qi, 7);
+ quant.quantization_index[i][1] = av_clip_uintp2(base_qi + s->quant.ydc_delta, 7);
+ quant.quantization_index[i][2] = av_clip_uintp2(base_qi + s->quant.y2dc_delta, 7);
+ quant.quantization_index[i][3] = av_clip_uintp2(base_qi + s->quant.y2ac_delta, 7);
+ quant.quantization_index[i][4] = av_clip_uintp2(base_qi + s->quant.uvdc_delta, 7);
+ quant.quantization_index[i][5] = av_clip_uintp2(base_qi + s->quant.uvac_delta, 7);
+ }
+
+ err = ff_vaapi_decode_make_param_buffer(avctx, pic,
+ VAIQMatrixBufferType,
+ &quant, sizeof(quant));
+ if (err < 0)
+ goto fail;
+
+ return 0;
+
+fail:
+ ff_vaapi_decode_cancel(avctx, pic);
+ return err;
+}
+
+static int vaapi_vp8_end_frame(AVCodecContext *avctx)
+{
+ const VP8Context *s = avctx->priv_data;
+ VAAPIDecodePicture *pic = s->framep[VP56_FRAME_CURRENT]->hwaccel_picture_private;
+
+ return ff_vaapi_decode_issue(avctx, pic);
+}
+
+static int vaapi_vp8_decode_slice(AVCodecContext *avctx,
+ const uint8_t *buffer,
+ uint32_t size)
+{
+ const VP8Context *s = avctx->priv_data;
+ VAAPIDecodePicture *pic = s->framep[VP56_FRAME_CURRENT]->hwaccel_picture_private;
+ VASliceParameterBufferVP8 sp;
+ int err, i;
+
+ unsigned int header_size = 3 + 7 * s->keyframe;
+ const uint8_t *data = buffer + header_size;
+ unsigned int data_size = size - header_size;
+
+ sp = (VASliceParameterBufferVP8) {
+ .slice_data_size = data_size,
+ .slice_data_offset = 0,
+ .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
+
+ .macroblock_offset = (8 * (s->coder_state_at_header_end.input - data) -
+ s->coder_state_at_header_end.bit_count - 8),
+ .num_of_partitions = s->num_coeff_partitions + 1,
+ };
+
+ sp.partition_size[0] = s->header_partition_size - ((sp.macroblock_offset + 7) / 8);
+ for (i = 0; i < 8; i++)
+ sp.partition_size[i+1] = s->coeff_partition_size[i];
+
+ err = ff_vaapi_decode_make_slice_buffer(avctx, pic, &sp, sizeof(sp), data, data_size);
+ if (err)
+ goto fail;
+
+ return 0;
+
+fail:
+ ff_vaapi_decode_cancel(avctx, pic);
+ return err;
+}
+
+const AVHWAccel ff_vp8_vaapi_hwaccel = {
+ .name = "vp8_vaapi",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_VP8,
+ .pix_fmt = AV_PIX_FMT_VAAPI,
+ .start_frame = &vaapi_vp8_start_frame,
+ .end_frame = &vaapi_vp8_end_frame,
+ .decode_slice = &vaapi_vp8_decode_slice,
+ .frame_priv_data_size = sizeof(VAAPIDecodePicture),
+ .init = &ff_vaapi_decode_init,
+ .uninit = &ff_vaapi_decode_uninit,
+ .frame_params = &ff_vaapi_common_frame_params,
+ .priv_data_size = sizeof(VAAPIDecodeContext),
+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
+};
diff -up firefox-84.0/media/ffvpx/libavcodec/vaapi_vp9.c.ffvpx firefox-84.0/media/ffvpx/libavcodec/vaapi_vp9.c
--- firefox-84.0/media/ffvpx/libavcodec/vaapi_vp9.c.ffvpx 2020-12-10 20:40:53.389541365 +0100
+++ firefox-84.0/media/ffvpx/libavcodec/vaapi_vp9.c 2020-12-10 20:40:53.389541365 +0100
@@ -0,0 +1,185 @@
+/*
+ * VP9 HW decode acceleration through VA API
+ *
+ * Copyright (C) 2015 Timo Rothenpieler <timo@rothenpieler.org>
+ *
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "libavutil/pixdesc.h"
+
+#include "hwaccel.h"
+#include "vaapi_decode.h"
+#include "vp9shared.h"
+
+static VASurfaceID vaapi_vp9_surface_id(const VP9Frame *vf)
+{
+ if (vf)
+ return ff_vaapi_get_surface_id(vf->tf.f);
+ else
+ return VA_INVALID_SURFACE;
+}
+
+static int vaapi_vp9_start_frame(AVCodecContext *avctx,
+ av_unused const uint8_t *buffer,
+ av_unused uint32_t size)
+{
+ const VP9SharedContext *h = avctx->priv_data;
+ VAAPIDecodePicture *pic = h->frames[CUR_FRAME].hwaccel_picture_private;
+ VADecPictureParameterBufferVP9 pic_param;
+ const AVPixFmtDescriptor *pixdesc = av_pix_fmt_desc_get(avctx->sw_pix_fmt);
+ int err, i;
+
+ pic->output_surface = vaapi_vp9_surface_id(&h->frames[CUR_FRAME]);
+
+ pic_param = (VADecPictureParameterBufferVP9) {
+ .frame_width = avctx->width,
+ .frame_height = avctx->height,
+
+ .pic_fields.bits = {
+ .subsampling_x = pixdesc->log2_chroma_w,
+ .subsampling_y = pixdesc->log2_chroma_h,
+ .frame_type = !h->h.keyframe,
+ .show_frame = !h->h.invisible,
+ .error_resilient_mode = h->h.errorres,
+ .intra_only = h->h.intraonly,
+ .allow_high_precision_mv = h->h.keyframe ? 0 : h->h.highprecisionmvs,
+ .mcomp_filter_type = h->h.filtermode ^ (h->h.filtermode <= 1),
+ .frame_parallel_decoding_mode = h->h.parallelmode,
+ .reset_frame_context = h->h.resetctx,
+ .refresh_frame_context = h->h.refreshctx,
+ .frame_context_idx = h->h.framectxid,
+
+ .segmentation_enabled = h->h.segmentation.enabled,
+ .segmentation_temporal_update = h->h.segmentation.temporal,
+ .segmentation_update_map = h->h.segmentation.update_map,
+
+ .last_ref_frame = h->h.refidx[0],
+ .last_ref_frame_sign_bias = h->h.signbias[0],
+ .golden_ref_frame = h->h.refidx[1],
+ .golden_ref_frame_sign_bias = h->h.signbias[1],
+ .alt_ref_frame = h->h.refidx[2],
+ .alt_ref_frame_sign_bias = h->h.signbias[2],
+ .lossless_flag = h->h.lossless,
+ },
+
+ .filter_level = h->h.filter.level,
+ .sharpness_level = h->h.filter.sharpness,
+ .log2_tile_rows = h->h.tiling.log2_tile_rows,
+ .log2_tile_columns = h->h.tiling.log2_tile_cols,
+
+ .frame_header_length_in_bytes = h->h.uncompressed_header_size,
+ .first_partition_size = h->h.compressed_header_size,
+
+ .profile = h->h.profile,
+ .bit_depth = h->h.bpp,
+ };
+
+ for (i = 0; i < 7; i++)
+ pic_param.mb_segment_tree_probs[i] = h->h.segmentation.prob[i];
+
+ if (h->h.segmentation.temporal) {
+ for (i = 0; i < 3; i++)
+ pic_param.segment_pred_probs[i] = h->h.segmentation.pred_prob[i];
+ } else {
+ memset(pic_param.segment_pred_probs, 255, sizeof(pic_param.segment_pred_probs));
+ }
+
+ for (i = 0; i < 8; i++) {
+ if (h->refs[i].f->buf[0])
+ pic_param.reference_frames[i] = ff_vaapi_get_surface_id(h->refs[i].f);
+ else
+ pic_param.reference_frames[i] = VA_INVALID_ID;
+ }
+
+ err = ff_vaapi_decode_make_param_buffer(avctx, pic,
+ VAPictureParameterBufferType,
+ &pic_param, sizeof(pic_param));
+ if (err < 0) {
+ ff_vaapi_decode_cancel(avctx, pic);
+ return err;
+ }
+
+ return 0;
+}
+
+static int vaapi_vp9_end_frame(AVCodecContext *avctx)
+{
+ const VP9SharedContext *h = avctx->priv_data;
+ VAAPIDecodePicture *pic = h->frames[CUR_FRAME].hwaccel_picture_private;
+
+ return ff_vaapi_decode_issue(avctx, pic);
+}
+
+static int vaapi_vp9_decode_slice(AVCodecContext *avctx,
+ const uint8_t *buffer,
+ uint32_t size)
+{
+ const VP9SharedContext *h = avctx->priv_data;
+ VAAPIDecodePicture *pic = h->frames[CUR_FRAME].hwaccel_picture_private;
+ VASliceParameterBufferVP9 slice_param;
+ int err, i;
+
+ slice_param = (VASliceParameterBufferVP9) {
+ .slice_data_size = size,
+ .slice_data_offset = 0,
+ .slice_data_flag = VA_SLICE_DATA_FLAG_ALL,
+ };
+
+ for (i = 0; i < 8; i++) {
+ slice_param.seg_param[i] = (VASegmentParameterVP9) {
+ .segment_flags.fields = {
+ .segment_reference_enabled = h->h.segmentation.feat[i].ref_enabled,
+ .segment_reference = h->h.segmentation.feat[i].ref_val,
+ .segment_reference_skipped = h->h.segmentation.feat[i].skip_enabled,
+ },
+
+ .luma_dc_quant_scale = h->h.segmentation.feat[i].qmul[0][0],
+ .luma_ac_quant_scale = h->h.segmentation.feat[i].qmul[0][1],
+ .chroma_dc_quant_scale = h->h.segmentation.feat[i].qmul[1][0],
+ .chroma_ac_quant_scale = h->h.segmentation.feat[i].qmul[1][1],
+ };
+
+ memcpy(slice_param.seg_param[i].filter_level, h->h.segmentation.feat[i].lflvl, sizeof(slice_param.seg_param[i].filter_level));
+ }
+
+ err = ff_vaapi_decode_make_slice_buffer(avctx, pic,
+ &slice_param, sizeof(slice_param),
+ buffer, size);
+ if (err) {
+ ff_vaapi_decode_cancel(avctx, pic);
+ return err;
+ }
+
+ return 0;
+}
+
+const AVHWAccel ff_vp9_vaapi_hwaccel = {
+ .name = "vp9_vaapi",
+ .type = AVMEDIA_TYPE_VIDEO,
+ .id = AV_CODEC_ID_VP9,
+ .pix_fmt = AV_PIX_FMT_VAAPI,
+ .start_frame = vaapi_vp9_start_frame,
+ .end_frame = vaapi_vp9_end_frame,
+ .decode_slice = vaapi_vp9_decode_slice,
+ .frame_priv_data_size = sizeof(VAAPIDecodePicture),
+ .init = ff_vaapi_decode_init,
+ .uninit = ff_vaapi_decode_uninit,
+ .frame_params = ff_vaapi_common_frame_params,
+ .priv_data_size = sizeof(VAAPIDecodeContext),
+ .caps_internal = HWACCEL_CAP_ASYNC_SAFE,
+};
diff -up firefox-84.0/media/ffvpx/libavutil/avutil.symbols.ffvpx firefox-84.0/media/ffvpx/libavutil/avutil.symbols
--- firefox-84.0/media/ffvpx/libavutil/avutil.symbols.ffvpx 2020-12-08 00:35:05.000000000 +0100
+++ firefox-84.0/media/ffvpx/libavutil/avutil.symbols 2020-12-10 20:40:53.389541365 +0100
@@ -158,6 +158,9 @@ av_get_token
av_gettime
av_gettime_relative
av_gettime_relative_is_monotonic
+av_hwdevice_get_hwframe_constraints
+av_hwdevice_hwconfig_alloc
+av_hwframe_constraints_free
av_hwframe_get_buffer
av_image_alloc
av_image_check_sar
@@ -320,5 +323,9 @@ avpriv_slicethread_free
av_hwdevice_get_type_name
av_hwframe_ctx_alloc
av_hwframe_ctx_init
+av_hwdevice_ctx_alloc
+av_hwdevice_ctx_init
+av_hwframe_transfer_get_formats
+av_hwdevice_ctx_create_derived
av_malloc_array
av_mallocz_array
diff -up firefox-84.0/media/ffvpx/libavutil/hwcontext_vaapi.c.ffvpx firefox-84.0/media/ffvpx/libavutil/hwcontext_vaapi.c
--- firefox-84.0/media/ffvpx/libavutil/hwcontext_vaapi.c.ffvpx 2020-12-10 20:40:53.390541394 +0100
+++ firefox-84.0/media/ffvpx/libavutil/hwcontext_vaapi.c 2020-12-10 20:40:53.389541365 +0100
@@ -0,0 +1,1691 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#include "config.h"
+
+#if HAVE_VAAPI_X11
+# include <va/va_x11.h>
+#endif
+#if HAVE_VAAPI_DRM
+# include <va/va_drm.h>
+#endif
+
+#if CONFIG_LIBDRM
+# include <va/va_drmcommon.h>
+# include <xf86drm.h>
+# include <drm_fourcc.h>
+# ifndef DRM_FORMAT_MOD_INVALID
+# define DRM_FORMAT_MOD_INVALID ((1ULL << 56) - 1)
+# endif
+#endif
+
+#include <fcntl.h>
+#if HAVE_UNISTD_H
+# include <unistd.h>
+#endif
+
+
+#include "avassert.h"
+#include "buffer.h"
+#include "common.h"
+#include "hwcontext.h"
+#if CONFIG_LIBDRM
+#include "hwcontext_drm.h"
+#endif
+#include "hwcontext_internal.h"
+#include "hwcontext_vaapi.h"
+#include "mem.h"
+#include "pixdesc.h"
+#include "pixfmt.h"
+
+
+typedef struct VAAPIDevicePriv {
+#if HAVE_VAAPI_X11
+ Display *x11_display;
+#endif
+
+ int drm_fd;
+} VAAPIDevicePriv;
+
+typedef struct VAAPISurfaceFormat {
+ enum AVPixelFormat pix_fmt;
+ VAImageFormat image_format;
+} VAAPISurfaceFormat;
+
+typedef struct VAAPIDeviceContext {
+ // Surface formats which can be used with this device.
+ VAAPISurfaceFormat *formats;
+ int nb_formats;
+} VAAPIDeviceContext;
+
+typedef struct VAAPIFramesContext {
+ // Surface attributes set at create time.
+ VASurfaceAttrib *attributes;
+ int nb_attributes;
+ // RT format of the underlying surface (Intel driver ignores this anyway).
+ unsigned int rt_format;
+ // Whether vaDeriveImage works.
+ int derive_works;
+} VAAPIFramesContext;
+
+typedef struct VAAPIMapping {
+ // Handle to the derived or copied image which is mapped.
+ VAImage image;
+ // The mapping flags actually used.
+ int flags;
+} VAAPIMapping;
+
+typedef struct VAAPIFormat {
+ unsigned int fourcc;
+ unsigned int rt_format;
+ enum AVPixelFormat pix_fmt;
+ int chroma_planes_swapped;
+} VAAPIFormatDescriptor;
+
+#define MAP(va, rt, av, swap_uv) { \
+ VA_FOURCC_ ## va, \
+ VA_RT_FORMAT_ ## rt, \
+ AV_PIX_FMT_ ## av, \
+ swap_uv, \
+ }
+// The map fourcc <-> pix_fmt isn't bijective because of the annoying U/V
+// plane swap cases. The frame handling below tries to hide these.
+static const VAAPIFormatDescriptor vaapi_format_map[] = {
+ MAP(NV12, YUV420, NV12, 0),
+#ifdef VA_FOURCC_I420
+ MAP(I420, YUV420, YUV420P, 0),
+#endif
+ MAP(YV12, YUV420, YUV420P, 1),
+ MAP(IYUV, YUV420, YUV420P, 0),
+ MAP(422H, YUV422, YUV422P, 0),
+#ifdef VA_FOURCC_YV16
+ MAP(YV16, YUV422, YUV422P, 1),
+#endif
+ MAP(UYVY, YUV422, UYVY422, 0),
+ MAP(YUY2, YUV422, YUYV422, 0),
+ MAP(411P, YUV411, YUV411P, 0),
+ MAP(422V, YUV422, YUV440P, 0),
+ MAP(444P, YUV444, YUV444P, 0),
+ MAP(Y800, YUV400, GRAY8, 0),
+#ifdef VA_FOURCC_P010
+ MAP(P010, YUV420_10BPP, P010, 0),
+#endif
+ MAP(BGRA, RGB32, BGRA, 0),
+ MAP(BGRX, RGB32, BGR0, 0),
+ MAP(RGBA, RGB32, RGBA, 0),
+ MAP(RGBX, RGB32, RGB0, 0),
+#ifdef VA_FOURCC_ABGR
+ MAP(ABGR, RGB32, ABGR, 0),
+ MAP(XBGR, RGB32, 0BGR, 0),
+#endif
+ MAP(ARGB, RGB32, ARGB, 0),
+ MAP(XRGB, RGB32, 0RGB, 0),
+};
+#undef MAP
+
+static const VAAPIFormatDescriptor *
+ vaapi_format_from_fourcc(unsigned int fourcc)
+{
+ int i;
+ for (i = 0; i < FF_ARRAY_ELEMS(vaapi_format_map); i++)
+ if (vaapi_format_map[i].fourcc == fourcc)
+ return &vaapi_format_map[i];
+ return NULL;
+}
+
+static const VAAPIFormatDescriptor *
+ vaapi_format_from_pix_fmt(enum AVPixelFormat pix_fmt)
+{
+ int i;
+ for (i = 0; i < FF_ARRAY_ELEMS(vaapi_format_map); i++)
+ if (vaapi_format_map[i].pix_fmt == pix_fmt)
+ return &vaapi_format_map[i];
+ return NULL;
+}
+
+static enum AVPixelFormat vaapi_pix_fmt_from_fourcc(unsigned int fourcc)
+{
+ const VAAPIFormatDescriptor *desc;
+ desc = vaapi_format_from_fourcc(fourcc);
+ if (desc)
+ return desc->pix_fmt;
+ else
+ return AV_PIX_FMT_NONE;
+}
+
+static int vaapi_get_image_format(AVHWDeviceContext *hwdev,
+ enum AVPixelFormat pix_fmt,
+ VAImageFormat **image_format)
+{
+ VAAPIDeviceContext *ctx = hwdev->internal->priv;
+ int i;
+
+ for (i = 0; i < ctx->nb_formats; i++) {
+ if (ctx->formats[i].pix_fmt == pix_fmt) {
+ if (image_format)
+ *image_format = &ctx->formats[i].image_format;
+ return 0;
+ }
+ }
+ return AVERROR(EINVAL);
+}
+
+static int vaapi_frames_get_constraints(AVHWDeviceContext *hwdev,
+ const void *hwconfig,
+ AVHWFramesConstraints *constraints)
+{
+ AVVAAPIDeviceContext *hwctx = hwdev->hwctx;
+ const AVVAAPIHWConfig *config = hwconfig;
+ VAAPIDeviceContext *ctx = hwdev->internal->priv;
+ VASurfaceAttrib *attr_list = NULL;
+ VAStatus vas;
+ enum AVPixelFormat pix_fmt;
+ unsigned int fourcc;
+ int err, i, j, attr_count, pix_fmt_count;
+
+ if (config &&
+ !(hwctx->driver_quirks & AV_VAAPI_DRIVER_QUIRK_SURFACE_ATTRIBUTES)) {
+ attr_count = 0;
+ vas = vaQuerySurfaceAttributes(hwctx->display, config->config_id,
+ 0, &attr_count);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwdev, AV_LOG_ERROR, "Failed to query surface attributes: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(ENOSYS);
+ goto fail;
+ }
+
+ attr_list = av_malloc(attr_count * sizeof(*attr_list));
+ if (!attr_list) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ vas = vaQuerySurfaceAttributes(hwctx->display, config->config_id,
+ attr_list, &attr_count);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwdev, AV_LOG_ERROR, "Failed to query surface attributes: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ err = AVERROR(ENOSYS);
+ goto fail;
+ }
+
+ pix_fmt_count = 0;
+ for (i = 0; i < attr_count; i++) {
+ switch (attr_list[i].type) {
+ case VASurfaceAttribPixelFormat:
+ fourcc = attr_list[i].value.value.i;
+ pix_fmt = vaapi_pix_fmt_from_fourcc(fourcc);
+ if (pix_fmt != AV_PIX_FMT_NONE) {
+ ++pix_fmt_count;
+ } else {
+ // Something unsupported - ignore.
+ }
+ break;
+ case VASurfaceAttribMinWidth:
+ constraints->min_width = attr_list[i].value.value.i;
+ break;
+ case VASurfaceAttribMinHeight:
+ constraints->min_height = attr_list[i].value.value.i;
+ break;
+ case VASurfaceAttribMaxWidth:
+ constraints->max_width = attr_list[i].value.value.i;
+ break;
+ case VASurfaceAttribMaxHeight:
+ constraints->max_height = attr_list[i].value.value.i;
+ break;
+ }
+ }
+ if (pix_fmt_count == 0) {
+ // Nothing usable found. Presumably there exists something which
+ // works, so leave the set null to indicate unknown.
+ constraints->valid_sw_formats = NULL;
+ } else {
+ constraints->valid_sw_formats = av_malloc_array(pix_fmt_count + 1,
+ sizeof(pix_fmt));
+ if (!constraints->valid_sw_formats) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ for (i = j = 0; i < attr_count; i++) {
+ if (attr_list[i].type != VASurfaceAttribPixelFormat)
+ continue;
+ fourcc = attr_list[i].value.value.i;
+ pix_fmt = vaapi_pix_fmt_from_fourcc(fourcc);
+ if (pix_fmt != AV_PIX_FMT_NONE)
+ constraints->valid_sw_formats[j++] = pix_fmt;
+ }
+ av_assert0(j == pix_fmt_count);
+ constraints->valid_sw_formats[j] = AV_PIX_FMT_NONE;
+ }
+ } else {
+ // No configuration supplied.
+ // Return the full set of image formats known by the implementation.
+ constraints->valid_sw_formats = av_malloc_array(ctx->nb_formats + 1,
+ sizeof(pix_fmt));
+ if (!constraints->valid_sw_formats) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ for (i = 0; i < ctx->nb_formats; i++)
+ constraints->valid_sw_formats[i] = ctx->formats[i].pix_fmt;
+ constraints->valid_sw_formats[i] = AV_PIX_FMT_NONE;
+ }
+
+ constraints->valid_hw_formats = av_malloc_array(2, sizeof(pix_fmt));
+ if (!constraints->valid_hw_formats) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ constraints->valid_hw_formats[0] = AV_PIX_FMT_VAAPI;
+ constraints->valid_hw_formats[1] = AV_PIX_FMT_NONE;
+
+ err = 0;
+fail:
+ av_freep(&attr_list);
+ return err;
+}
+
+static const struct {
+ const char *friendly_name;
+ const char *match_string;
+ unsigned int quirks;
+} vaapi_driver_quirks_table[] = {
+#if !VA_CHECK_VERSION(1, 0, 0)
+ // The i965 driver did not conform before version 2.0.
+ {
+ "Intel i965 (Quick Sync)",
+ "i965",
+ AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS,
+ },
+#endif
+ {
+ "Intel iHD",
+ "ubit",
+ AV_VAAPI_DRIVER_QUIRK_ATTRIB_MEMTYPE,
+ },
+ {
+ "VDPAU wrapper",
+ "Splitted-Desktop Systems VDPAU backend for VA-API",
+ AV_VAAPI_DRIVER_QUIRK_SURFACE_ATTRIBUTES,
+ },
+};
+
+static int vaapi_device_init(AVHWDeviceContext *hwdev)
+{
+ VAAPIDeviceContext *ctx = hwdev->internal->priv;
+ AVVAAPIDeviceContext *hwctx = hwdev->hwctx;
+ VAImageFormat *image_list = NULL;
+ VAStatus vas;
+ const char *vendor_string;
+ int err, i, image_count;
+ enum AVPixelFormat pix_fmt;
+ unsigned int fourcc;
+
+ image_count = vaMaxNumImageFormats(hwctx->display);
+ if (image_count <= 0) {
+ err = AVERROR(EIO);
+ goto fail;
+ }
+ image_list = av_malloc(image_count * sizeof(*image_list));
+ if (!image_list) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ vas = vaQueryImageFormats(hwctx->display, image_list, &image_count);
+ if (vas != VA_STATUS_SUCCESS) {
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ ctx->formats = av_malloc(image_count * sizeof(*ctx->formats));
+ if (!ctx->formats) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ ctx->nb_formats = 0;
+ for (i = 0; i < image_count; i++) {
+ fourcc = image_list[i].fourcc;
+ pix_fmt = vaapi_pix_fmt_from_fourcc(fourcc);
+ if (pix_fmt == AV_PIX_FMT_NONE) {
+ av_log(hwdev, AV_LOG_DEBUG, "Format %#x -> unknown.\n",
+ fourcc);
+ } else {
+ av_log(hwdev, AV_LOG_DEBUG, "Format %#x -> %s.\n",
+ fourcc, av_get_pix_fmt_name(pix_fmt));
+ ctx->formats[ctx->nb_formats].pix_fmt = pix_fmt;
+ ctx->formats[ctx->nb_formats].image_format = image_list[i];
+ ++ctx->nb_formats;
+ }
+ }
+
+ vendor_string = vaQueryVendorString(hwctx->display);
+ if (vendor_string)
+ av_log(hwdev, AV_LOG_VERBOSE, "VAAPI driver: %s.\n", vendor_string);
+
+ if (hwctx->driver_quirks & AV_VAAPI_DRIVER_QUIRK_USER_SET) {
+ av_log(hwdev, AV_LOG_VERBOSE, "Using quirks set by user (%#x).\n",
+ hwctx->driver_quirks);
+ } else {
+ // Detect the driver in use and set quirk flags if necessary.
+ hwctx->driver_quirks = 0;
+ if (vendor_string) {
+ for (i = 0; i < FF_ARRAY_ELEMS(vaapi_driver_quirks_table); i++) {
+ if (strstr(vendor_string,
+ vaapi_driver_quirks_table[i].match_string)) {
+ av_log(hwdev, AV_LOG_VERBOSE, "Matched driver string "
+ "as known nonstandard driver \"%s\", setting "
+ "quirks (%#x).\n",
+ vaapi_driver_quirks_table[i].friendly_name,
+ vaapi_driver_quirks_table[i].quirks);
+ hwctx->driver_quirks |=
+ vaapi_driver_quirks_table[i].quirks;
+ break;
+ }
+ }
+ if (!(i < FF_ARRAY_ELEMS(vaapi_driver_quirks_table))) {
+ av_log(hwdev, AV_LOG_VERBOSE, "Driver not found in known "
+ "nonstandard list, using standard behaviour.\n");
+ }
+ } else {
+ av_log(hwdev, AV_LOG_VERBOSE, "Driver has no vendor string, "
+ "assuming standard behaviour.\n");
+ }
+ }
+
+ av_free(image_list);
+ return 0;
+fail:
+ av_freep(&ctx->formats);
+ av_free(image_list);
+ return err;
+}
+
+static void vaapi_device_uninit(AVHWDeviceContext *hwdev)
+{
+ VAAPIDeviceContext *ctx = hwdev->internal->priv;
+
+ av_freep(&ctx->formats);
+}
+
+static void vaapi_buffer_free(void *opaque, uint8_t *data)
+{
+ AVHWFramesContext *hwfc = opaque;
+ AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ VASurfaceID surface_id;
+ VAStatus vas;
+
+ surface_id = (VASurfaceID)(uintptr_t)data;
+
+ vas = vaDestroySurfaces(hwctx->display, &surface_id, 1);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to destroy surface %#x: "
+ "%d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ }
+}
+
+static AVBufferRef *vaapi_pool_alloc(void *opaque, int size)
+{
+ AVHWFramesContext *hwfc = opaque;
+ VAAPIFramesContext *ctx = hwfc->internal->priv;
+ AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ AVVAAPIFramesContext *avfc = hwfc->hwctx;
+ VASurfaceID surface_id;
+ VAStatus vas;
+ AVBufferRef *ref;
+
+ if (hwfc->initial_pool_size > 0 &&
+ avfc->nb_surfaces >= hwfc->initial_pool_size)
+ return NULL;
+
+ vas = vaCreateSurfaces(hwctx->display, ctx->rt_format,
+ hwfc->width, hwfc->height,
+ &surface_id, 1,
+ ctx->attributes, ctx->nb_attributes);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to create surface: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ return NULL;
+ }
+ av_log(hwfc, AV_LOG_DEBUG, "Created surface %#x.\n", surface_id);
+
+ ref = av_buffer_create((uint8_t*)(uintptr_t)surface_id,
+ sizeof(surface_id), &vaapi_buffer_free,
+ hwfc, AV_BUFFER_FLAG_READONLY);
+ if (!ref) {
+ vaDestroySurfaces(hwctx->display, &surface_id, 1);
+ return NULL;
+ }
+
+ if (hwfc->initial_pool_size > 0) {
+ // This is a fixed-size pool, so we must still be in the initial
+ // allocation sequence.
+ av_assert0(avfc->nb_surfaces < hwfc->initial_pool_size);
+ avfc->surface_ids[avfc->nb_surfaces] = surface_id;
+ ++avfc->nb_surfaces;
+ }
+
+ return ref;
+}
+
+static int vaapi_frames_init(AVHWFramesContext *hwfc)
+{
+ AVVAAPIFramesContext *avfc = hwfc->hwctx;
+ VAAPIFramesContext *ctx = hwfc->internal->priv;
+ AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ const VAAPIFormatDescriptor *desc;
+ VAImageFormat *expected_format;
+ AVBufferRef *test_surface = NULL;
+ VASurfaceID test_surface_id;
+ VAImage test_image;
+ VAStatus vas;
+ int err, i;
+
+ desc = vaapi_format_from_pix_fmt(hwfc->sw_format);
+ if (!desc) {
+ av_log(hwfc, AV_LOG_ERROR, "Unsupported format: %s.\n",
+ av_get_pix_fmt_name(hwfc->sw_format));
+ return AVERROR(EINVAL);
+ }
+
+ if (!hwfc->pool) {
+ if (!(hwctx->driver_quirks & AV_VAAPI_DRIVER_QUIRK_SURFACE_ATTRIBUTES)) {
+ int need_memory_type = !(hwctx->driver_quirks & AV_VAAPI_DRIVER_QUIRK_ATTRIB_MEMTYPE);
+ int need_pixel_format = 1;
+ for (i = 0; i < avfc->nb_attributes; i++) {
+ if (avfc->attributes[i].type == VASurfaceAttribMemoryType)
+ need_memory_type = 0;
+ if (avfc->attributes[i].type == VASurfaceAttribPixelFormat)
+ need_pixel_format = 0;
+ }
+ ctx->nb_attributes =
+ avfc->nb_attributes + need_memory_type + need_pixel_format;
+
+ ctx->attributes = av_malloc(ctx->nb_attributes *
+ sizeof(*ctx->attributes));
+ if (!ctx->attributes) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ for (i = 0; i < avfc->nb_attributes; i++)
+ ctx->attributes[i] = avfc->attributes[i];
+ if (need_memory_type) {
+ ctx->attributes[i++] = (VASurfaceAttrib) {
+ .type = VASurfaceAttribMemoryType,
+ .flags = VA_SURFACE_ATTRIB_SETTABLE,
+ .value.type = VAGenericValueTypeInteger,
+ .value.value.i = VA_SURFACE_ATTRIB_MEM_TYPE_VA,
+ };
+ }
+ if (need_pixel_format) {
+ ctx->attributes[i++] = (VASurfaceAttrib) {
+ .type = VASurfaceAttribPixelFormat,
+ .flags = VA_SURFACE_ATTRIB_SETTABLE,
+ .value.type = VAGenericValueTypeInteger,
+ .value.value.i = desc->fourcc,
+ };
+ }
+ av_assert0(i == ctx->nb_attributes);
+ } else {
+ ctx->attributes = NULL;
+ ctx->nb_attributes = 0;
+ }
+
+ ctx->rt_format = desc->rt_format;
+
+ if (hwfc->initial_pool_size > 0) {
+ // This pool will be usable as a render target, so we need to store
+ // all of the surface IDs somewhere that vaCreateContext() calls
+ // will be able to access them.
+ avfc->nb_surfaces = 0;
+ avfc->surface_ids = av_malloc(hwfc->initial_pool_size *
+ sizeof(*avfc->surface_ids));
+ if (!avfc->surface_ids) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ } else {
+ // This pool allows dynamic sizing, and will not be usable as a
+ // render target.
+ avfc->nb_surfaces = 0;
+ avfc->surface_ids = NULL;
+ }
+
+ hwfc->internal->pool_internal =
+ av_buffer_pool_init2(sizeof(VASurfaceID), hwfc,
+ &vaapi_pool_alloc, NULL);
+ if (!hwfc->internal->pool_internal) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to create VAAPI surface pool.\n");
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ }
+
+ // Allocate a single surface to test whether vaDeriveImage() is going
+ // to work for the specific configuration.
+ if (hwfc->pool) {
+ test_surface = av_buffer_pool_get(hwfc->pool);
+ if (!test_surface) {
+ av_log(hwfc, AV_LOG_ERROR, "Unable to allocate a surface from "
+ "user-configured buffer pool.\n");
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ } else {
+ test_surface = av_buffer_pool_get(hwfc->internal->pool_internal);
+ if (!test_surface) {
+ av_log(hwfc, AV_LOG_ERROR, "Unable to allocate a surface from "
+ "internal buffer pool.\n");
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+ }
+ test_surface_id = (VASurfaceID)(uintptr_t)test_surface->data;
+
+ ctx->derive_works = 0;
+
+ err = vaapi_get_image_format(hwfc->device_ctx,
+ hwfc->sw_format, &expected_format);
+ if (err == 0) {
+ vas = vaDeriveImage(hwctx->display, test_surface_id, &test_image);
+ if (vas == VA_STATUS_SUCCESS) {
+ if (expected_format->fourcc == test_image.format.fourcc) {
+ av_log(hwfc, AV_LOG_DEBUG, "Direct mapping possible.\n");
+ ctx->derive_works = 1;
+ } else {
+ av_log(hwfc, AV_LOG_DEBUG, "Direct mapping disabled: "
+ "derived image format %08x does not match "
+ "expected format %08x.\n",
+ expected_format->fourcc, test_image.format.fourcc);
+ }
+ vaDestroyImage(hwctx->display, test_image.image_id);
+ } else {
+ av_log(hwfc, AV_LOG_DEBUG, "Direct mapping disabled: "
+ "deriving image does not work: "
+ "%d (%s).\n", vas, vaErrorStr(vas));
+ }
+ } else {
+ av_log(hwfc, AV_LOG_DEBUG, "Direct mapping disabled: "
+ "image format is not supported.\n");
+ }
+
+ av_buffer_unref(&test_surface);
+ return 0;
+
+fail:
+ av_buffer_unref(&test_surface);
+ av_freep(&avfc->surface_ids);
+ av_freep(&ctx->attributes);
+ return err;
+}
+
+static void vaapi_frames_uninit(AVHWFramesContext *hwfc)
+{
+ AVVAAPIFramesContext *avfc = hwfc->hwctx;
+ VAAPIFramesContext *ctx = hwfc->internal->priv;
+
+ av_freep(&avfc->surface_ids);
+ av_freep(&ctx->attributes);
+}
+
+static int vaapi_get_buffer(AVHWFramesContext *hwfc, AVFrame *frame)
+{
+ frame->buf[0] = av_buffer_pool_get(hwfc->pool);
+ if (!frame->buf[0])
+ return AVERROR(ENOMEM);
+
+ frame->data[3] = frame->buf[0]->data;
+ frame->format = AV_PIX_FMT_VAAPI;
+ frame->width = hwfc->width;
+ frame->height = hwfc->height;
+
+ return 0;
+}
+
+static int vaapi_transfer_get_formats(AVHWFramesContext *hwfc,
+ enum AVHWFrameTransferDirection dir,
+ enum AVPixelFormat **formats)
+{
+ VAAPIDeviceContext *ctx = hwfc->device_ctx->internal->priv;
+ enum AVPixelFormat *pix_fmts;
+ int i, k, sw_format_available;
+
+ sw_format_available = 0;
+ for (i = 0; i < ctx->nb_formats; i++) {
+ if (ctx->formats[i].pix_fmt == hwfc->sw_format)
+ sw_format_available = 1;
+ }
+
+ pix_fmts = av_malloc((ctx->nb_formats + 1) * sizeof(*pix_fmts));
+ if (!pix_fmts)
+ return AVERROR(ENOMEM);
+
+ if (sw_format_available) {
+ pix_fmts[0] = hwfc->sw_format;
+ k = 1;
+ } else {
+ k = 0;
+ }
+ for (i = 0; i < ctx->nb_formats; i++) {
+ if (ctx->formats[i].pix_fmt == hwfc->sw_format)
+ continue;
+ av_assert0(k < ctx->nb_formats);
+ pix_fmts[k++] = ctx->formats[i].pix_fmt;
+ }
+ pix_fmts[k] = AV_PIX_FMT_NONE;
+
+ *formats = pix_fmts;
+ return 0;
+}
+
+static void vaapi_unmap_frame(AVHWFramesContext *hwfc,
+ HWMapDescriptor *hwmap)
+{
+ AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ VAAPIMapping *map = hwmap->priv;
+ VASurfaceID surface_id;
+ VAStatus vas;
+
+ surface_id = (VASurfaceID)(uintptr_t)hwmap->source->data[3];
+ av_log(hwfc, AV_LOG_DEBUG, "Unmap surface %#x.\n", surface_id);
+
+ vas = vaUnmapBuffer(hwctx->display, map->image.buf);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to unmap image from surface "
+ "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ }
+
+ if ((map->flags & AV_HWFRAME_MAP_WRITE) &&
+ !(map->flags & AV_HWFRAME_MAP_DIRECT)) {
+ vas = vaPutImage(hwctx->display, surface_id, map->image.image_id,
+ 0, 0, hwfc->width, hwfc->height,
+ 0, 0, hwfc->width, hwfc->height);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to write image to surface "
+ "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ }
+ }
+
+ vas = vaDestroyImage(hwctx->display, map->image.image_id);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to destroy image from surface "
+ "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ }
+
+ av_free(map);
+}
+
+static int vaapi_map_frame(AVHWFramesContext *hwfc,
+ AVFrame *dst, const AVFrame *src, int flags)
+{
+ AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ VAAPIFramesContext *ctx = hwfc->internal->priv;
+ VASurfaceID surface_id;
+ const VAAPIFormatDescriptor *desc;
+ VAImageFormat *image_format;
+ VAAPIMapping *map;
+ VAStatus vas;
+ void *address = NULL;
+ int err, i;
+
+ surface_id = (VASurfaceID)(uintptr_t)src->data[3];
+ av_log(hwfc, AV_LOG_DEBUG, "Map surface %#x.\n", surface_id);
+
+ if (!ctx->derive_works && (flags & AV_HWFRAME_MAP_DIRECT)) {
+ // Requested direct mapping but it is not possible.
+ return AVERROR(EINVAL);
+ }
+ if (dst->format == AV_PIX_FMT_NONE)
+ dst->format = hwfc->sw_format;
+ if (dst->format != hwfc->sw_format && (flags & AV_HWFRAME_MAP_DIRECT)) {
+ // Requested direct mapping but the formats do not match.
+ return AVERROR(EINVAL);
+ }
+
+ err = vaapi_get_image_format(hwfc->device_ctx, dst->format, &image_format);
+ if (err < 0) {
+ // Requested format is not a valid output format.
+ return AVERROR(EINVAL);
+ }
+
+ map = av_malloc(sizeof(*map));
+ if (!map)
+ return AVERROR(ENOMEM);
+ map->flags = flags;
+ map->image.image_id = VA_INVALID_ID;
+
+ vas = vaSyncSurface(hwctx->display, surface_id);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to sync surface "
+ "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ // The memory which we map using derive need not be connected to the CPU
+ // in a way conducive to fast access. On Gen7-Gen9 Intel graphics, the
+ // memory is mappable but not cached, so normal memcpy()-like access is
+ // very slow to read it (but writing is ok). It is possible to read much
+ // faster with a copy routine which is aware of the limitation, but we
+ // assume for now that the user is not aware of that and would therefore
+ // prefer not to be given direct-mapped memory if they request read access.
+ if (ctx->derive_works && dst->format == hwfc->sw_format &&
+ ((flags & AV_HWFRAME_MAP_DIRECT) || !(flags & AV_HWFRAME_MAP_READ))) {
+ vas = vaDeriveImage(hwctx->display, surface_id, &map->image);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to derive image from "
+ "surface %#x: %d (%s).\n",
+ surface_id, vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+ if (map->image.format.fourcc != image_format->fourcc) {
+ av_log(hwfc, AV_LOG_ERROR, "Derive image of surface %#x "
+ "is in wrong format: expected %#08x, got %#08x.\n",
+ surface_id, image_format->fourcc, map->image.format.fourcc);
+ err = AVERROR(EIO);
+ goto fail;
+ }
+ map->flags |= AV_HWFRAME_MAP_DIRECT;
+ } else {
+ vas = vaCreateImage(hwctx->display, image_format,
+ hwfc->width, hwfc->height, &map->image);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to create image for "
+ "surface %#x: %d (%s).\n",
+ surface_id, vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+ if (!(flags & AV_HWFRAME_MAP_OVERWRITE)) {
+ vas = vaGetImage(hwctx->display, surface_id, 0, 0,
+ hwfc->width, hwfc->height, map->image.image_id);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to read image from "
+ "surface %#x: %d (%s).\n",
+ surface_id, vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+ }
+ }
+
+ vas = vaMapBuffer(hwctx->display, map->image.buf, &address);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to map image from surface "
+ "%#x: %d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ err = ff_hwframe_map_create(src->hw_frames_ctx,
+ dst, src, &vaapi_unmap_frame, map);
+ if (err < 0)
+ goto fail;
+
+ dst->width = src->width;
+ dst->height = src->height;
+
+ for (i = 0; i < map->image.num_planes; i++) {
+ dst->data[i] = (uint8_t*)address + map->image.offsets[i];
+ dst->linesize[i] = map->image.pitches[i];
+ }
+
+ desc = vaapi_format_from_fourcc(map->image.format.fourcc);
+ if (desc && desc->chroma_planes_swapped) {
+ // Chroma planes are YVU rather than YUV, so swap them.
+ FFSWAP(uint8_t*, dst->data[1], dst->data[2]);
+ }
+
+ return 0;
+
+fail:
+ if (map) {
+ if (address)
+ vaUnmapBuffer(hwctx->display, map->image.buf);
+ if (map->image.image_id != VA_INVALID_ID)
+ vaDestroyImage(hwctx->display, map->image.image_id);
+ av_free(map);
+ }
+ return err;
+}
+
+static int vaapi_transfer_data_from(AVHWFramesContext *hwfc,
+ AVFrame *dst, const AVFrame *src)
+{
+ AVFrame *map;
+ int err;
+
+ if (dst->width > hwfc->width || dst->height > hwfc->height)
+ return AVERROR(EINVAL);
+
+ map = av_frame_alloc();
+ if (!map)
+ return AVERROR(ENOMEM);
+ map->format = dst->format;
+
+ err = vaapi_map_frame(hwfc, map, src, AV_HWFRAME_MAP_READ);
+ if (err)
+ goto fail;
+
+ map->width = dst->width;
+ map->height = dst->height;
+
+ err = av_frame_copy(dst, map);
+ if (err)
+ goto fail;
+
+ err = 0;
+fail:
+ av_frame_free(&map);
+ return err;
+}
+
+static int vaapi_transfer_data_to(AVHWFramesContext *hwfc,
+ AVFrame *dst, const AVFrame *src)
+{
+ AVFrame *map;
+ int err;
+
+ if (src->width > hwfc->width || src->height > hwfc->height)
+ return AVERROR(EINVAL);
+
+ map = av_frame_alloc();
+ if (!map)
+ return AVERROR(ENOMEM);
+ map->format = src->format;
+
+ err = vaapi_map_frame(hwfc, map, dst, AV_HWFRAME_MAP_WRITE | AV_HWFRAME_MAP_OVERWRITE);
+ if (err)
+ goto fail;
+
+ map->width = src->width;
+ map->height = src->height;
+
+ err = av_frame_copy(map, src);
+ if (err)
+ goto fail;
+
+ err = 0;
+fail:
+ av_frame_free(&map);
+ return err;
+}
+
+static int vaapi_map_to_memory(AVHWFramesContext *hwfc, AVFrame *dst,
+ const AVFrame *src, int flags)
+{
+ int err;
+
+ if (dst->format != AV_PIX_FMT_NONE) {
+ err = vaapi_get_image_format(hwfc->device_ctx, dst->format, NULL);
+ if (err < 0)
+ return AVERROR(ENOSYS);
+ }
+
+ err = vaapi_map_frame(hwfc, dst, src, flags);
+ if (err)
+ return err;
+
+ err = av_frame_copy_props(dst, src);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+#if CONFIG_LIBDRM
+
+#define DRM_MAP(va, layers, ...) { \
+ VA_FOURCC_ ## va, \
+ layers, \
+ { __VA_ARGS__ } \
+ }
+static const struct {
+ uint32_t va_fourcc;
+ int nb_layer_formats;
+ uint32_t layer_formats[AV_DRM_MAX_PLANES];
+} vaapi_drm_format_map[] = {
+#ifdef DRM_FORMAT_R8
+ DRM_MAP(NV12, 2, DRM_FORMAT_R8, DRM_FORMAT_RG88),
+#endif
+ DRM_MAP(NV12, 1, DRM_FORMAT_NV12),
+#if defined(VA_FOURCC_P010) && defined(DRM_FORMAT_R16)
+ DRM_MAP(P010, 2, DRM_FORMAT_R16, DRM_FORMAT_RG1616),
+#endif
+ DRM_MAP(BGRA, 1, DRM_FORMAT_ARGB8888),
+ DRM_MAP(BGRX, 1, DRM_FORMAT_XRGB8888),
+ DRM_MAP(RGBA, 1, DRM_FORMAT_ABGR8888),
+ DRM_MAP(RGBX, 1, DRM_FORMAT_XBGR8888),
+#ifdef VA_FOURCC_ABGR
+ DRM_MAP(ABGR, 1, DRM_FORMAT_RGBA8888),
+ DRM_MAP(XBGR, 1, DRM_FORMAT_RGBX8888),
+#endif
+ DRM_MAP(ARGB, 1, DRM_FORMAT_BGRA8888),
+ DRM_MAP(XRGB, 1, DRM_FORMAT_BGRX8888),
+};
+#undef DRM_MAP
+
+static void vaapi_unmap_from_drm(AVHWFramesContext *dst_fc,
+ HWMapDescriptor *hwmap)
+{
+ AVVAAPIDeviceContext *dst_dev = dst_fc->device_ctx->hwctx;
+
+ VASurfaceID surface_id = (VASurfaceID)(uintptr_t)hwmap->priv;
+
+ av_log(dst_fc, AV_LOG_DEBUG, "Destroy surface %#x.\n", surface_id);
+
+ vaDestroySurfaces(dst_dev->display, &surface_id, 1);
+}
+
+static int vaapi_map_from_drm(AVHWFramesContext *src_fc, AVFrame *dst,
+ const AVFrame *src, int flags)
+{
+ AVHWFramesContext *dst_fc =
+ (AVHWFramesContext*)dst->hw_frames_ctx->data;
+ AVVAAPIDeviceContext *dst_dev = dst_fc->device_ctx->hwctx;
+ const AVDRMFrameDescriptor *desc;
+ const VAAPIFormatDescriptor *format_desc;
+ VASurfaceID surface_id;
+ VAStatus vas;
+ uint32_t va_fourcc;
+ int err, i, j, k;
+
+ unsigned long buffer_handle;
+ VASurfaceAttribExternalBuffers buffer_desc;
+ VASurfaceAttrib attrs[2] = {
+ {
+ .type = VASurfaceAttribMemoryType,
+ .flags = VA_SURFACE_ATTRIB_SETTABLE,
+ .value.type = VAGenericValueTypeInteger,
+ .value.value.i = VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME,
+ },
+ {
+ .type = VASurfaceAttribExternalBufferDescriptor,
+ .flags = VA_SURFACE_ATTRIB_SETTABLE,
+ .value.type = VAGenericValueTypePointer,
+ .value.value.p = &buffer_desc,
+ }
+ };
+
+ desc = (AVDRMFrameDescriptor*)src->data[0];
+
+ if (desc->nb_objects != 1) {
+ av_log(dst_fc, AV_LOG_ERROR, "VAAPI can only map frames "
+ "made from a single DRM object.\n");
+ return AVERROR(EINVAL);
+ }
+
+ va_fourcc = 0;
+ for (i = 0; i < FF_ARRAY_ELEMS(vaapi_drm_format_map); i++) {
+ if (desc->nb_layers != vaapi_drm_format_map[i].nb_layer_formats)
+ continue;
+ for (j = 0; j < desc->nb_layers; j++) {
+ if (desc->layers[j].format !=
+ vaapi_drm_format_map[i].layer_formats[j])
+ break;
+ }
+ if (j != desc->nb_layers)
+ continue;
+ va_fourcc = vaapi_drm_format_map[i].va_fourcc;
+ break;
+ }
+ if (!va_fourcc) {
+ av_log(dst_fc, AV_LOG_ERROR, "DRM format not supported "
+ "by VAAPI.\n");
+ return AVERROR(EINVAL);
+ }
+
+ av_log(dst_fc, AV_LOG_DEBUG, "Map DRM object %d to VAAPI as "
+ "%08x.\n", desc->objects[0].fd, va_fourcc);
+
+ format_desc = vaapi_format_from_fourcc(va_fourcc);
+ av_assert0(format_desc);
+
+ buffer_handle = desc->objects[0].fd;
+ buffer_desc.pixel_format = va_fourcc;
+ buffer_desc.width = src_fc->width;
+ buffer_desc.height = src_fc->height;
+ buffer_desc.data_size = desc->objects[0].size;
+ buffer_desc.buffers = &buffer_handle;
+ buffer_desc.num_buffers = 1;
+ buffer_desc.flags = 0;
+
+ k = 0;
+ for (i = 0; i < desc->nb_layers; i++) {
+ for (j = 0; j < desc->layers[i].nb_planes; j++) {
+ buffer_desc.pitches[k] = desc->layers[i].planes[j].pitch;
+ buffer_desc.offsets[k] = desc->layers[i].planes[j].offset;
+ ++k;
+ }
+ }
+ buffer_desc.num_planes = k;
+
+ if (format_desc->chroma_planes_swapped &&
+ buffer_desc.num_planes == 3) {
+ FFSWAP(uint32_t, buffer_desc.pitches[1], buffer_desc.pitches[2]);
+ FFSWAP(uint32_t, buffer_desc.offsets[1], buffer_desc.offsets[2]);
+ }
+
+ vas = vaCreateSurfaces(dst_dev->display, format_desc->rt_format,
+ src->width, src->height,
+ &surface_id, 1,
+ attrs, FF_ARRAY_ELEMS(attrs));
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(dst_fc, AV_LOG_ERROR, "Failed to create surface from DRM "
+ "object: %d (%s).\n", vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+ av_log(dst_fc, AV_LOG_DEBUG, "Create surface %#x.\n", surface_id);
+
+ err = ff_hwframe_map_create(dst->hw_frames_ctx, dst, src,
+ &vaapi_unmap_from_drm,
+ (void*)(uintptr_t)surface_id);
+ if (err < 0)
+ return err;
+
+ dst->width = src->width;
+ dst->height = src->height;
+ dst->data[3] = (uint8_t*)(uintptr_t)surface_id;
+
+ av_log(dst_fc, AV_LOG_DEBUG, "Mapped DRM object %d to "
+ "surface %#x.\n", desc->objects[0].fd, surface_id);
+
+ return 0;
+}
+
+#if VA_CHECK_VERSION(1, 1, 0)
+static void vaapi_unmap_to_drm_esh(AVHWFramesContext *hwfc,
+ HWMapDescriptor *hwmap)
+{
+ AVDRMFrameDescriptor *drm_desc = hwmap->priv;
+ int i;
+
+ for (i = 0; i < drm_desc->nb_objects; i++)
+ close(drm_desc->objects[i].fd);
+
+ av_freep(&drm_desc);
+}
+
+static int vaapi_map_to_drm_esh(AVHWFramesContext *hwfc, AVFrame *dst,
+ const AVFrame *src, int flags)
+{
+ AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ VASurfaceID surface_id;
+ VAStatus vas;
+ VADRMPRIMESurfaceDescriptor va_desc;
+ AVDRMFrameDescriptor *drm_desc = NULL;
+ uint32_t export_flags;
+ int err, i, j;
+
+ surface_id = (VASurfaceID)(uintptr_t)src->data[3];
+
+ export_flags = VA_EXPORT_SURFACE_SEPARATE_LAYERS;
+ if (flags & AV_HWFRAME_MAP_READ)
+ export_flags |= VA_EXPORT_SURFACE_READ_ONLY;
+ if (flags & AV_HWFRAME_MAP_WRITE)
+ export_flags |= VA_EXPORT_SURFACE_WRITE_ONLY;
+
+ vas = vaExportSurfaceHandle(hwctx->display, surface_id,
+ VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME_2,
+ export_flags, &va_desc);
+ if (vas != VA_STATUS_SUCCESS) {
+ if (vas == VA_STATUS_ERROR_UNIMPLEMENTED)
+ return AVERROR(ENOSYS);
+ av_log(hwfc, AV_LOG_ERROR, "Failed to export surface %#x: "
+ "%d (%s).\n", surface_id, vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+
+ drm_desc = av_mallocz(sizeof(*drm_desc));
+ if (!drm_desc) {
+ err = AVERROR(ENOMEM);
+ goto fail;
+ }
+
+ // By some bizarre coincidence, these structures are very similar...
+ drm_desc->nb_objects = va_desc.num_objects;
+ for (i = 0; i < va_desc.num_objects; i++) {
+ drm_desc->objects[i].fd = va_desc.objects[i].fd;
+ drm_desc->objects[i].size = va_desc.objects[i].size;
+ drm_desc->objects[i].format_modifier =
+ va_desc.objects[i].drm_format_modifier;
+ }
+ drm_desc->nb_layers = va_desc.num_layers;
+ for (i = 0; i < va_desc.num_layers; i++) {
+ drm_desc->layers[i].format = va_desc.layers[i].drm_format;
+ drm_desc->layers[i].nb_planes = va_desc.layers[i].num_planes;
+ for (j = 0; j < va_desc.layers[i].num_planes; j++) {
+ drm_desc->layers[i].planes[j].object_index =
+ va_desc.layers[i].object_index[j];
+ drm_desc->layers[i].planes[j].offset =
+ va_desc.layers[i].offset[j];
+ drm_desc->layers[i].planes[j].pitch =
+ va_desc.layers[i].pitch[j];
+ }
+ }
+
+ err = ff_hwframe_map_create(src->hw_frames_ctx, dst, src,
+ &vaapi_unmap_to_drm_esh, drm_desc);
+ if (err < 0)
+ goto fail;
+
+ dst->width = src->width;
+ dst->height = src->height;
+ dst->data[0] = (uint8_t*)drm_desc;
+
+ return 0;
+
+fail:
+ for (i = 0; i < va_desc.num_objects; i++)
+ close(va_desc.objects[i].fd);
+ av_freep(&drm_desc);
+ return err;
+}
+#endif
+
+#if VA_CHECK_VERSION(0, 36, 0)
+typedef struct VAAPIDRMImageBufferMapping {
+ VAImage image;
+ VABufferInfo buffer_info;
+
+ AVDRMFrameDescriptor drm_desc;
+} VAAPIDRMImageBufferMapping;
+
+static void vaapi_unmap_to_drm_abh(AVHWFramesContext *hwfc,
+ HWMapDescriptor *hwmap)
+{
+ AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ VAAPIDRMImageBufferMapping *mapping = hwmap->priv;
+ VASurfaceID surface_id;
+ VAStatus vas;
+
+ surface_id = (VASurfaceID)(uintptr_t)hwmap->source->data[3];
+ av_log(hwfc, AV_LOG_DEBUG, "Unmap VAAPI surface %#x from DRM.\n",
+ surface_id);
+
+ // DRM PRIME file descriptors are closed by vaReleaseBufferHandle(),
+ // so we shouldn't close them separately.
+
+ vas = vaReleaseBufferHandle(hwctx->display, mapping->image.buf);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to release buffer "
+ "handle of image %#x (derived from surface %#x): "
+ "%d (%s).\n", mapping->image.buf, surface_id,
+ vas, vaErrorStr(vas));
+ }
+
+ vas = vaDestroyImage(hwctx->display, mapping->image.image_id);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to destroy image "
+ "derived from surface %#x: %d (%s).\n",
+ surface_id, vas, vaErrorStr(vas));
+ }
+
+ av_free(mapping);
+}
+
+static int vaapi_map_to_drm_abh(AVHWFramesContext *hwfc, AVFrame *dst,
+ const AVFrame *src, int flags)
+{
+ AVVAAPIDeviceContext *hwctx = hwfc->device_ctx->hwctx;
+ VAAPIDRMImageBufferMapping *mapping = NULL;
+ VASurfaceID surface_id;
+ VAStatus vas;
+ int err, i, p;
+
+ surface_id = (VASurfaceID)(uintptr_t)src->data[3];
+ av_log(hwfc, AV_LOG_DEBUG, "Map VAAPI surface %#x to DRM.\n",
+ surface_id);
+
+ mapping = av_mallocz(sizeof(*mapping));
+ if (!mapping)
+ return AVERROR(ENOMEM);
+
+ vas = vaDeriveImage(hwctx->display, surface_id,
+ &mapping->image);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to derive image from "
+ "surface %#x: %d (%s).\n",
+ surface_id, vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail;
+ }
+
+ for (i = 0; i < FF_ARRAY_ELEMS(vaapi_drm_format_map); i++) {
+ if (vaapi_drm_format_map[i].va_fourcc ==
+ mapping->image.format.fourcc)
+ break;
+ }
+ if (i >= FF_ARRAY_ELEMS(vaapi_drm_format_map)) {
+ av_log(hwfc, AV_LOG_ERROR, "No matching DRM format for "
+ "VAAPI format %#x.\n", mapping->image.format.fourcc);
+ err = AVERROR(EINVAL);
+ goto fail_derived;
+ }
+
+ mapping->buffer_info.mem_type =
+ VA_SURFACE_ATTRIB_MEM_TYPE_DRM_PRIME;
+
+ mapping->drm_desc.nb_layers =
+ vaapi_drm_format_map[i].nb_layer_formats;
+ if (mapping->drm_desc.nb_layers > 1) {
+ if (mapping->drm_desc.nb_layers != mapping->image.num_planes) {
+ av_log(hwfc, AV_LOG_ERROR, "Image properties do not match "
+ "expected format: got %d planes, but expected %d.\n",
+ mapping->image.num_planes, mapping->drm_desc.nb_layers);
+ err = AVERROR(EINVAL);
+ goto fail_derived;
+ }
+
+ for(p = 0; p < mapping->drm_desc.nb_layers; p++) {
+ mapping->drm_desc.layers[p] = (AVDRMLayerDescriptor) {
+ .format = vaapi_drm_format_map[i].layer_formats[p],
+ .nb_planes = 1,
+ .planes[0] = {
+ .object_index = 0,
+ .offset = mapping->image.offsets[p],
+ .pitch = mapping->image.pitches[p],
+ },
+ };
+ }
+ } else {
+ mapping->drm_desc.layers[0].format =
+ vaapi_drm_format_map[i].layer_formats[0];
+ mapping->drm_desc.layers[0].nb_planes = mapping->image.num_planes;
+ for (p = 0; p < mapping->image.num_planes; p++) {
+ mapping->drm_desc.layers[0].planes[p] = (AVDRMPlaneDescriptor) {
+ .object_index = 0,
+ .offset = mapping->image.offsets[p],
+ .pitch = mapping->image.pitches[p],
+ };
+ }
+ }
+
+ vas = vaAcquireBufferHandle(hwctx->display, mapping->image.buf,
+ &mapping->buffer_info);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(hwfc, AV_LOG_ERROR, "Failed to get buffer "
+ "handle from image %#x (derived from surface %#x): "
+ "%d (%s).\n", mapping->image.buf, surface_id,
+ vas, vaErrorStr(vas));
+ err = AVERROR(EIO);
+ goto fail_derived;
+ }
+
+ av_log(hwfc, AV_LOG_DEBUG, "DRM PRIME fd is %ld.\n",
+ mapping->buffer_info.handle);
+
+ mapping->drm_desc.nb_objects = 1;
+ mapping->drm_desc.objects[0] = (AVDRMObjectDescriptor) {
+ .fd = mapping->buffer_info.handle,
+ .size = mapping->image.data_size,
+ // There is no way to get the format modifier with this API.
+ .format_modifier = DRM_FORMAT_MOD_INVALID,
+ };
+
+ err = ff_hwframe_map_create(src->hw_frames_ctx,
+ dst, src, &vaapi_unmap_to_drm_abh,
+ mapping);
+ if (err < 0)
+ goto fail_mapped;
+
+ dst->data[0] = (uint8_t*)&mapping->drm_desc;
+ dst->width = src->width;
+ dst->height = src->height;
+
+ return 0;
+
+fail_mapped:
+ vaReleaseBufferHandle(hwctx->display, mapping->image.buf);
+fail_derived:
+ vaDestroyImage(hwctx->display, mapping->image.image_id);
+fail:
+ av_freep(&mapping);
+ return err;
+}
+#endif
+
+static int vaapi_map_to_drm(AVHWFramesContext *hwfc, AVFrame *dst,
+ const AVFrame *src, int flags)
+{
+#if VA_CHECK_VERSION(1, 1, 0)
+ int err;
+ err = vaapi_map_to_drm_esh(hwfc, dst, src, flags);
+ if (err != AVERROR(ENOSYS))
+ return err;
+#endif
+#if VA_CHECK_VERSION(0, 36, 0)
+ return vaapi_map_to_drm_abh(hwfc, dst, src, flags);
+#endif
+ return AVERROR(ENOSYS);
+}
+
+#endif /* CONFIG_LIBDRM */
+
+static int vaapi_map_to(AVHWFramesContext *hwfc, AVFrame *dst,
+ const AVFrame *src, int flags)
+{
+ switch (src->format) {
+#if CONFIG_LIBDRM
+ case AV_PIX_FMT_DRM_PRIME:
+ return vaapi_map_from_drm(hwfc, dst, src, flags);
+#endif
+ default:
+ return AVERROR(ENOSYS);
+ }
+}
+
+static int vaapi_map_from(AVHWFramesContext *hwfc, AVFrame *dst,
+ const AVFrame *src, int flags)
+{
+ switch (dst->format) {
+#if CONFIG_LIBDRM
+ case AV_PIX_FMT_DRM_PRIME:
+ return vaapi_map_to_drm(hwfc, dst, src, flags);
+#endif
+ default:
+ return vaapi_map_to_memory(hwfc, dst, src, flags);
+ }
+}
+
+static void vaapi_device_free(AVHWDeviceContext *ctx)
+{
+ AVVAAPIDeviceContext *hwctx = ctx->hwctx;
+ VAAPIDevicePriv *priv = ctx->user_opaque;
+
+ if (hwctx->display)
+ vaTerminate(hwctx->display);
+
+#if HAVE_VAAPI_X11
+ if (priv->x11_display)
+ XCloseDisplay(priv->x11_display);
+#endif
+
+ if (priv->drm_fd >= 0)
+ close(priv->drm_fd);
+
+ av_freep(&priv);
+}
+
+#if CONFIG_VAAPI_1
+static void vaapi_device_log_error(void *context, const char *message)
+{
+ AVHWDeviceContext *ctx = context;
+
+ av_log(ctx, AV_LOG_ERROR, "libva: %s", message);
+}
+
+static void vaapi_device_log_info(void *context, const char *message)
+{
+ AVHWDeviceContext *ctx = context;
+
+ av_log(ctx, AV_LOG_VERBOSE, "libva: %s", message);
+}
+#endif
+
+static int vaapi_device_connect(AVHWDeviceContext *ctx,
+ VADisplay display)
+{
+ AVVAAPIDeviceContext *hwctx = ctx->hwctx;
+ int major, minor;
+ VAStatus vas;
+
+#if CONFIG_VAAPI_1
+ vaSetErrorCallback(display, &vaapi_device_log_error, ctx);
+ vaSetInfoCallback (display, &vaapi_device_log_info, ctx);
+#endif
+
+ hwctx->display = display;
+
+ vas = vaInitialize(display, &major, &minor);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to initialise VAAPI "
+ "connection: %d (%s).\n", vas, vaErrorStr(vas));
+ return AVERROR(EIO);
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "Initialised VAAPI connection: "
+ "version %d.%d\n", major, minor);
+
+ return 0;
+}
+
+static int vaapi_device_create(AVHWDeviceContext *ctx, const char *device,
+ AVDictionary *opts, int flags)
+{
+ VAAPIDevicePriv *priv;
+ VADisplay display = NULL;
+ const AVDictionaryEntry *ent;
+ int try_drm, try_x11, try_all;
+
+ priv = av_mallocz(sizeof(*priv));
+ if (!priv)
+ return AVERROR(ENOMEM);
+
+ priv->drm_fd = -1;
+
+ ctx->user_opaque = priv;
+ ctx->free = vaapi_device_free;
+
+ ent = av_dict_get(opts, "connection_type", NULL, 0);
+ if (ent) {
+ try_all = try_drm = try_x11 = 0;
+ if (!strcmp(ent->value, "drm")) {
+ try_drm = 1;
+ } else if (!strcmp(ent->value, "x11")) {
+ try_x11 = 1;
+ } else {
+ av_log(ctx, AV_LOG_ERROR, "Invalid connection type %s.\n",
+ ent->value);
+ return AVERROR(EINVAL);
+ }
+ } else {
+ try_all = 1;
+ try_drm = HAVE_VAAPI_DRM;
+ try_x11 = HAVE_VAAPI_X11;
+ }
+
+#if HAVE_VAAPI_DRM
+ while (!display && try_drm) {
+ // If the device is specified, try to open it as a DRM device node.
+ // If not, look for a usable render node, possibly restricted to those
+ // using a specified kernel driver.
+ int loglevel = try_all ? AV_LOG_VERBOSE : AV_LOG_ERROR;
+ if (device) {
+ priv->drm_fd = open(device, O_RDWR);
+ if (priv->drm_fd < 0) {
+ av_log(ctx, loglevel, "Failed to open %s as "
+ "DRM device node.\n", device);
+ break;
+ }
+ } else {
+ char path[64];
+ int n, max_devices = 8;
+#if CONFIG_LIBDRM
+ const AVDictionaryEntry *kernel_driver;
+ kernel_driver = av_dict_get(opts, "kernel_driver", NULL, 0);
+#endif
+ for (n = 0; n < max_devices; n++) {
+ snprintf(path, sizeof(path),
+ "/dev/dri/renderD%d", 128 + n);
+ priv->drm_fd = open(path, O_RDWR);
+ if (priv->drm_fd < 0) {
+ av_log(ctx, AV_LOG_VERBOSE, "Cannot open "
+ "DRM render node for device %d.\n", n);
+ break;
+ }
+#if CONFIG_LIBDRM
+ if (kernel_driver) {
+ drmVersion *info;
+ info = drmGetVersion(priv->drm_fd);
+ if (strcmp(kernel_driver->value, info->name)) {
+ av_log(ctx, AV_LOG_VERBOSE, "Ignoring device %d "
+ "with non-matching kernel driver (%s).\n",
+ n, info->name);
+ drmFreeVersion(info);
+ close(priv->drm_fd);
+ priv->drm_fd = -1;
+ continue;
+ }
+ av_log(ctx, AV_LOG_VERBOSE, "Trying to use "
+ "DRM render node for device %d, "
+ "with matching kernel driver (%s).\n",
+ n, info->name);
+ drmFreeVersion(info);
+ } else
+#endif
+ {
+ av_log(ctx, AV_LOG_VERBOSE, "Trying to use "
+ "DRM render node for device %d.\n", n);
+ }
+ break;
+ }
+ if (n >= max_devices)
+ break;
+ }
+
+ display = vaGetDisplayDRM(priv->drm_fd);
+ if (!display) {
+ av_log(ctx, AV_LOG_VERBOSE, "Cannot open a VA display "
+ "from DRM device %s.\n", device);
+ return AVERROR_EXTERNAL;
+ }
+ break;
+ }
+#endif
+
+#if HAVE_VAAPI_X11
+ if (!display && try_x11) {
+ // Try to open the device as an X11 display.
+ priv->x11_display = XOpenDisplay(device);
+ if (!priv->x11_display) {
+ av_log(ctx, AV_LOG_VERBOSE, "Cannot open X11 display "
+ "%s.\n", XDisplayName(device));
+ } else {
+ display = vaGetDisplay(priv->x11_display);
+ if (!display) {
+ av_log(ctx, AV_LOG_ERROR, "Cannot open a VA display "
+ "from X11 display %s.\n", XDisplayName(device));
+ return AVERROR_UNKNOWN;
+ }
+
+ av_log(ctx, AV_LOG_VERBOSE, "Opened VA display via "
+ "X11 display %s.\n", XDisplayName(device));
+ }
+ }
+#endif
+
+ if (!display) {
+ if (device)
+ av_log(ctx, AV_LOG_ERROR, "No VA display found for "
+ "device %s.\n", device);
+ else
+ av_log(ctx, AV_LOG_ERROR, "No VA display found for "
+ "any default device.\n");
+ return AVERROR(EINVAL);
+ }
+
+ ent = av_dict_get(opts, "driver", NULL, 0);
+ if (ent) {
+#if VA_CHECK_VERSION(0, 38, 0)
+ VAStatus vas;
+ vas = vaSetDriverName(display, ent->value);
+ if (vas != VA_STATUS_SUCCESS) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to set driver name to "
+ "%s: %d (%s).\n", ent->value, vas, vaErrorStr(vas));
+ vaTerminate(display);
+ return AVERROR_EXTERNAL;
+ }
+#else
+ av_log(ctx, AV_LOG_WARNING, "Driver name setting is not "
+ "supported with this VAAPI version.\n");
+#endif
+ }
+
+ return vaapi_device_connect(ctx, display);
+}
+
+static int vaapi_device_derive(AVHWDeviceContext *ctx,
+ AVHWDeviceContext *src_ctx, int flags)
+{
+#if HAVE_VAAPI_DRM
+ if (src_ctx->type == AV_HWDEVICE_TYPE_DRM) {
+ AVDRMDeviceContext *src_hwctx = src_ctx->hwctx;
+ VADisplay *display;
+ VAAPIDevicePriv *priv;
+
+ if (src_hwctx->fd < 0) {
+ av_log(ctx, AV_LOG_ERROR, "DRM instance requires an associated "
+ "device to derive a VA display from.\n");
+ return AVERROR(EINVAL);
+ }
+
+ priv = av_mallocz(sizeof(*priv));
+ if (!priv)
+ return AVERROR(ENOMEM);
+
+ // Inherits the fd from the source context, which will close it.
+ priv->drm_fd = -1;
+
+ ctx->user_opaque = priv;
+ ctx->free = &vaapi_device_free;
+
+ display = vaGetDisplayDRM(src_hwctx->fd);
+ if (!display) {
+ av_log(ctx, AV_LOG_ERROR, "Failed to open a VA display from "
+ "DRM device.\n");
+ return AVERROR(EIO);
+ }
+
+ return vaapi_device_connect(ctx, display);
+ }
+#endif
+ return AVERROR(ENOSYS);
+}
+
+const HWContextType ff_hwcontext_type_vaapi = {
+ .type = AV_HWDEVICE_TYPE_VAAPI,
+ .name = "VAAPI",
+
+ .device_hwctx_size = sizeof(AVVAAPIDeviceContext),
+ .device_priv_size = sizeof(VAAPIDeviceContext),
+ .device_hwconfig_size = sizeof(AVVAAPIHWConfig),
+ .frames_hwctx_size = sizeof(AVVAAPIFramesContext),
+ .frames_priv_size = sizeof(VAAPIFramesContext),
+
+ .device_create = &vaapi_device_create,
+ .device_derive = &vaapi_device_derive,
+ .device_init = &vaapi_device_init,
+ .device_uninit = &vaapi_device_uninit,
+ .frames_get_constraints = &vaapi_frames_get_constraints,
+ .frames_init = &vaapi_frames_init,
+ .frames_uninit = &vaapi_frames_uninit,
+ .frames_get_buffer = &vaapi_get_buffer,
+ .transfer_get_formats = &vaapi_transfer_get_formats,
+ .transfer_data_to = &vaapi_transfer_data_to,
+ .transfer_data_from = &vaapi_transfer_data_from,
+ .map_to = &vaapi_map_to,
+ .map_from = &vaapi_map_from,
+
+ .pix_fmts = (const enum AVPixelFormat[]) {
+ AV_PIX_FMT_VAAPI,
+ AV_PIX_FMT_NONE
+ },
+};
diff -up firefox-84.0/media/ffvpx/libavutil/hwcontext_vaapi.h.ffvpx firefox-84.0/media/ffvpx/libavutil/hwcontext_vaapi.h
--- firefox-84.0/media/ffvpx/libavutil/hwcontext_vaapi.h.ffvpx 2020-12-10 20:40:53.390541394 +0100
+++ firefox-84.0/media/ffvpx/libavutil/hwcontext_vaapi.h 2020-12-10 20:40:53.390541394 +0100
@@ -0,0 +1,117 @@
+/*
+ * This file is part of FFmpeg.
+ *
+ * FFmpeg is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2.1 of the License, or (at your option) any later version.
+ *
+ * FFmpeg is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with FFmpeg; if not, write to the Free Software
+ * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ */
+
+#ifndef AVUTIL_HWCONTEXT_VAAPI_H
+#define AVUTIL_HWCONTEXT_VAAPI_H
+
+#include <va/va.h>
+
+/**
+ * @file
+ * API-specific header for AV_HWDEVICE_TYPE_VAAPI.
+ *
+ * Dynamic frame pools are supported, but note that any pool used as a render
+ * target is required to be of fixed size in order to be be usable as an
+ * argument to vaCreateContext().
+ *
+ * For user-allocated pools, AVHWFramesContext.pool must return AVBufferRefs
+ * with the data pointer set to a VASurfaceID.
+ */
+
+enum {
+ /**
+ * The quirks field has been set by the user and should not be detected
+ * automatically by av_hwdevice_ctx_init().
+ */
+ AV_VAAPI_DRIVER_QUIRK_USER_SET = (1 << 0),
+ /**
+ * The driver does not destroy parameter buffers when they are used by
+ * vaRenderPicture(). Additional code will be required to destroy them
+ * separately afterwards.
+ */
+ AV_VAAPI_DRIVER_QUIRK_RENDER_PARAM_BUFFERS = (1 << 1),
+
+ /**
+ * The driver does not support the VASurfaceAttribMemoryType attribute,
+ * so the surface allocation code will not try to use it.
+ */
+ AV_VAAPI_DRIVER_QUIRK_ATTRIB_MEMTYPE = (1 << 2),
+
+ /**
+ * The driver does not support surface attributes at all.
+ * The surface allocation code will never pass them to surface allocation,
+ * and the results of the vaQuerySurfaceAttributes() call will be faked.
+ */
+ AV_VAAPI_DRIVER_QUIRK_SURFACE_ATTRIBUTES = (1 << 3),
+};
+
+/**
+ * VAAPI connection details.
+ *
+ * Allocated as AVHWDeviceContext.hwctx
+ */
+typedef struct AVVAAPIDeviceContext {
+ /**
+ * The VADisplay handle, to be filled by the user.
+ */
+ VADisplay display;
+ /**
+ * Driver quirks to apply - this is filled by av_hwdevice_ctx_init(),
+ * with reference to a table of known drivers, unless the
+ * AV_VAAPI_DRIVER_QUIRK_USER_SET bit is already present. The user
+ * may need to refer to this field when performing any later
+ * operations using VAAPI with the same VADisplay.
+ */
+ unsigned int driver_quirks;
+} AVVAAPIDeviceContext;
+
+/**
+ * VAAPI-specific data associated with a frame pool.
+ *
+ * Allocated as AVHWFramesContext.hwctx.
+ */
+typedef struct AVVAAPIFramesContext {
+ /**
+ * Set by the user to apply surface attributes to all surfaces in
+ * the frame pool. If null, default settings are used.
+ */
+ VASurfaceAttrib *attributes;
+ int nb_attributes;
+ /**
+ * The surfaces IDs of all surfaces in the pool after creation.
+ * Only valid if AVHWFramesContext.initial_pool_size was positive.
+ * These are intended to be used as the render_targets arguments to
+ * vaCreateContext().
+ */
+ VASurfaceID *surface_ids;
+ int nb_surfaces;
+} AVVAAPIFramesContext;
+
+/**
+ * VAAPI hardware pipeline configuration details.
+ *
+ * Allocated with av_hwdevice_hwconfig_alloc().
+ */
+typedef struct AVVAAPIHWConfig {
+ /**
+ * ID of a VAAPI pipeline configuration.
+ */
+ VAConfigID config_id;
+} AVVAAPIHWConfig;
+
+#endif /* AVUTIL_HWCONTEXT_VAAPI_H */
diff -up firefox-84.0/media/ffvpx/libavutil/moz.build.ffvpx firefox-84.0/media/ffvpx/libavutil/moz.build
--- firefox-84.0/media/ffvpx/libavutil/moz.build.ffvpx 2020-12-08 00:35:05.000000000 +0100
+++ firefox-84.0/media/ffvpx/libavutil/moz.build 2020-12-10 20:40:53.390541394 +0100
@@ -45,6 +45,11 @@ SOURCES += [
'time.c',
'utils.c'
]
+if CONFIG['MOZ_WAYLAND']:
+ SOURCES += [
+ 'hwcontext_vaapi.c',
+ ]
+ USE_LIBS += ['mozva']
if not CONFIG['MOZ_FFVPX_AUDIOONLY']:
SOURCES += [
diff -up firefox-84.0/media/ffvpx/moz.build.ffvpx firefox-84.0/media/ffvpx/moz.build
--- firefox-84.0/media/ffvpx/moz.build.ffvpx 2020-12-08 00:35:05.000000000 +0100
+++ firefox-84.0/media/ffvpx/moz.build 2020-12-10 20:40:53.390541394 +0100
@@ -11,3 +11,8 @@ DIRS += [
'libavutil',
'libavcodec'
]
+
+if CONFIG['MOZ_WAYLAND']:
+ DIRS += [
+ 'mozva',
+ ]
diff -up firefox-84.0/media/ffvpx/mozva/moz.build.ffvpx firefox-84.0/media/ffvpx/mozva/moz.build
--- firefox-84.0/media/ffvpx/mozva/moz.build.ffvpx 2020-12-10 20:40:53.390541394 +0100
+++ firefox-84.0/media/ffvpx/mozva/moz.build 2020-12-10 20:40:53.390541394 +0100
@@ -0,0 +1,13 @@
+# -*- Mode: python; indent-tabs-mode: nil; tab-width: 40 -*-
+# vim: set filetype=python:
+# This Source Code Form is subject to the terms of the Mozilla Public
+# License, v. 2.0. If a copy of the MPL was not distributed with this
+# file, You can obtain one at http://mozilla.org/MPL/2.0/.
+
+SOURCES += [
+ 'mozva.c',
+]
+
+LOCAL_INCLUDES += ['/media/ffvpx']
+
+Library('mozva')
diff -up firefox-84.0/media/ffvpx/mozva/mozva.c.ffvpx firefox-84.0/media/ffvpx/mozva/mozva.c
--- firefox-84.0/media/ffvpx/mozva/mozva.c.ffvpx 2020-12-10 20:40:53.390541394 +0100
+++ firefox-84.0/media/ffvpx/mozva/mozva.c 2020-12-10 20:40:53.390541394 +0100
@@ -0,0 +1,406 @@
+/* -*- Mode: C; tab-width: 4; indent-tabs-mode: nil; c-basic-offset: 2 -*- */
+/* vim:expandtab:shiftwidth=4:tabstop=4:
+ */
+/* This Source Code Form is subject to the terms of the Mozilla Public
+ * License, v. 2.0. If a copy of the MPL was not distributed with this
+ * file, You can obtain one at http://mozilla.org/MPL/2.0/. */
+
+#pragma GCC visibility push(default)
+#include <va/va.h>
+#pragma GCC visibility pop
+
+#include "mozilla/Types.h"
+#include <dlfcn.h>
+#include <pthread.h>
+#include <stdlib.h>
+
+#define GET_FUNC(func, lib) (func##Fn = dlsym(lib, #func))
+
+#define IS_FUNC_LOADED(func) (func##Fn != NULL)
+
+static VAStatus (*vaDestroyBufferFn)(VADisplay dpy, VABufferID buffer_id);
+static VAStatus (*vaBeginPictureFn)(VADisplay dpy, VAContextID context,
+ VASurfaceID render_target);
+static VAStatus (*vaEndPictureFn)(VADisplay dpy, VAContextID context);
+static VAStatus (*vaRenderPictureFn)(VADisplay dpy, VAContextID context,
+ VABufferID* buffers, int num_buffers);
+static int (*vaMaxNumProfilesFn)(VADisplay dpy);
+static VAStatus (*vaCreateContextFn)(VADisplay dpy, VAConfigID config_id,
+ int picture_width, int picture_height,
+ int flag, VASurfaceID* render_targets,
+ int num_render_targets,
+ VAContextID* context /* out */);
+static VAStatus (*vaDestroyContextFn)(VADisplay dpy, VAContextID context);
+static VAStatus (*vaCreateBufferFn)(VADisplay dpy, VAContextID context,
+ VABufferType type, /* in */
+ unsigned int size, /* in */
+ unsigned int num_elements, /* in */
+ void* data, /* in */
+ VABufferID* buf_id /* out */);
+static VAStatus (*vaQuerySurfaceAttributesFn)(VADisplay dpy, VAConfigID config,
+ VASurfaceAttrib* attrib_list,
+ unsigned int* num_attribs);
+static VAStatus (*vaQueryConfigProfilesFn)(VADisplay dpy,
+ VAProfile* profile_list, /* out */
+ int* num_profiles /* out */);
+static const char* (*vaErrorStrFn)(VAStatus error_status);
+static VAStatus (*vaCreateConfigFn)(VADisplay dpy, VAProfile profile,
+ VAEntrypoint entrypoint,
+ VAConfigAttrib* attrib_list,
+ int num_attribs,
+ VAConfigID* config_id /* out */);
+static VAStatus (*vaDestroyConfigFn)(VADisplay dpy, VAConfigID config_id);
+static int (*vaMaxNumImageFormatsFn)(VADisplay dpy);
+static VAStatus (*vaQueryImageFormatsFn)(VADisplay dpy,
+ VAImageFormat* format_list, /* out */
+ int* num_formats /* out */);
+static const char* (*vaQueryVendorStringFn)(VADisplay dpy);
+static VAStatus (*vaDestroySurfacesFn)(VADisplay dpy, VASurfaceID* surfaces,
+ int num_surfaces);
+static VAStatus (*vaCreateSurfacesFn)(VADisplay dpy, unsigned int format,
+ unsigned int width, unsigned int height,
+ VASurfaceID* surfaces,
+ unsigned int num_surfaces,
+ VASurfaceAttrib* attrib_list,
+ unsigned int num_attribs);
+static VAStatus (*vaDeriveImageFn)(VADisplay dpy, VASurfaceID surface,
+ VAImage* image /* out */);
+static VAStatus (*vaDestroyImageFn)(VADisplay dpy, VAImageID image);
+static VAStatus (*vaPutImageFn)(VADisplay dpy, VASurfaceID surface,
+ VAImageID image, int src_x, int src_y,
+ unsigned int src_width, unsigned int src_height,
+ int dest_x, int dest_y, unsigned int dest_width,
+ unsigned int dest_height);
+static VAStatus (*vaSyncSurfaceFn)(VADisplay dpy, VASurfaceID render_target);
+static VAStatus (*vaCreateImageFn)(VADisplay dpy, VAImageFormat* format,
+ int width, int height,
+ VAImage* image /* out */);
+static VAStatus (*vaGetImageFn)(
+ VADisplay dpy, VASurfaceID surface,
+ int x, /* coordinates of the upper left source pixel */
+ int y, unsigned int width, /* width and height of the region */
+ unsigned int height, VAImageID image);
+static VAStatus (*vaMapBufferFn)(VADisplay dpy, VABufferID buf_id, /* in */
+ void** pbuf /* out */);
+static VAStatus (*vaUnmapBufferFn)(VADisplay dpy, VABufferID buf_id /* in */);
+static VAStatus (*vaTerminateFn)(VADisplay dpy);
+static VAStatus (*vaInitializeFn)(VADisplay dpy, int* major_version, /* out */
+ int* minor_version /* out */);
+static VAStatus (*vaSetDriverNameFn)(VADisplay dpy, char* driver_name);
+
+int LoadVALibrary() {
+ static pthread_mutex_t sVALock = PTHREAD_MUTEX_INITIALIZER;
+ static void* sVALib = NULL;
+ static int sVAInitialized = 0;
+ static int sVALoaded = 0;
+
+ pthread_mutex_lock(&sVALock);
+
+ if (!sVAInitialized) {
+ sVAInitialized = 1;
+ sVALib = dlopen("libva.so.2", RTLD_LAZY);
+ if (!sVALib) {
+ pthread_mutex_unlock(&sVALock);
+ return 0;
+ }
+ GET_FUNC(vaDestroyBuffer, sVALib);
+ GET_FUNC(vaBeginPicture, sVALib);
+ GET_FUNC(vaEndPicture, sVALib);
+ GET_FUNC(vaRenderPicture, sVALib);
+ GET_FUNC(vaMaxNumProfiles, sVALib);
+ GET_FUNC(vaCreateContext, sVALib);
+ GET_FUNC(vaDestroyContext, sVALib);
+ GET_FUNC(vaCreateBuffer, sVALib);
+ GET_FUNC(vaQuerySurfaceAttributes, sVALib);
+ GET_FUNC(vaQueryConfigProfiles, sVALib);
+ GET_FUNC(vaErrorStr, sVALib);
+ GET_FUNC(vaCreateConfig, sVALib);
+ GET_FUNC(vaDestroyConfig, sVALib);
+ GET_FUNC(vaMaxNumImageFormats, sVALib);
+ GET_FUNC(vaQueryImageFormats, sVALib);
+ GET_FUNC(vaQueryVendorString, sVALib);
+ GET_FUNC(vaDestroySurfaces, sVALib);
+ GET_FUNC(vaCreateSurfaces, sVALib);
+ GET_FUNC(vaDeriveImage, sVALib);
+ GET_FUNC(vaDestroyImage, sVALib);
+ GET_FUNC(vaPutImage, sVALib);
+ GET_FUNC(vaSyncSurface, sVALib);
+ GET_FUNC(vaCreateImage, sVALib);
+ GET_FUNC(vaGetImage, sVALib);
+ GET_FUNC(vaMapBuffer, sVALib);
+ GET_FUNC(vaUnmapBuffer, sVALib);
+ GET_FUNC(vaTerminate, sVALib);
+ GET_FUNC(vaInitialize, sVALib);
+ GET_FUNC(vaSetDriverName, sVALib);
+
+ sVALoaded =
+ (IS_FUNC_LOADED(vaDestroyBuffer) && IS_FUNC_LOADED(vaBeginPicture) &&
+ IS_FUNC_LOADED(vaEndPicture) && IS_FUNC_LOADED(vaRenderPicture) &&
+ IS_FUNC_LOADED(vaMaxNumProfiles) && IS_FUNC_LOADED(vaCreateContext) &&
+ IS_FUNC_LOADED(vaDestroyContext) && IS_FUNC_LOADED(vaCreateBuffer) &&
+ IS_FUNC_LOADED(vaQuerySurfaceAttributes) &&
+ IS_FUNC_LOADED(vaQueryConfigProfiles) && IS_FUNC_LOADED(vaErrorStr) &&
+ IS_FUNC_LOADED(vaCreateConfig) && IS_FUNC_LOADED(vaDestroyConfig) &&
+ IS_FUNC_LOADED(vaMaxNumImageFormats) &&
+ IS_FUNC_LOADED(vaQueryImageFormats) &&
+ IS_FUNC_LOADED(vaQueryVendorString) &&
+ IS_FUNC_LOADED(vaDestroySurfaces) &&
+ IS_FUNC_LOADED(vaCreateSurfaces) && IS_FUNC_LOADED(vaDeriveImage) &&
+ IS_FUNC_LOADED(vaDestroyImage) && IS_FUNC_LOADED(vaPutImage) &&
+ IS_FUNC_LOADED(vaSyncSurface) && IS_FUNC_LOADED(vaCreateImage) &&
+ IS_FUNC_LOADED(vaGetImage) && IS_FUNC_LOADED(vaMapBuffer) &&
+ IS_FUNC_LOADED(vaUnmapBuffer) && IS_FUNC_LOADED(vaTerminate) &&
+ IS_FUNC_LOADED(vaInitialize) && IS_FUNC_LOADED(vaSetDriverName));
+ }
+ pthread_mutex_unlock(&sVALock);
+ return sVALoaded;
+}
+
+#pragma GCC visibility push(default)
+
+VAStatus vaDestroyBuffer(VADisplay dpy, VABufferID buffer_id) {
+ if (LoadVALibrary()) {
+ return vaDestroyBufferFn(dpy, buffer_id);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaBeginPicture(VADisplay dpy, VAContextID context,
+ VASurfaceID render_target) {
+ if (LoadVALibrary()) {
+ return vaBeginPictureFn(dpy, context, render_target);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaEndPicture(VADisplay dpy, VAContextID context) {
+ if (LoadVALibrary()) {
+ return vaEndPictureFn(dpy, context);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaRenderPicture(VADisplay dpy, VAContextID context,
+ VABufferID* buffers, int num_buffers) {
+ if (LoadVALibrary()) {
+ return vaRenderPictureFn(dpy, context, buffers, num_buffers);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+int vaMaxNumProfiles(VADisplay dpy) {
+ if (LoadVALibrary()) {
+ return vaMaxNumProfilesFn(dpy);
+ }
+ return 0;
+}
+
+VAStatus vaCreateContext(VADisplay dpy, VAConfigID config_id, int picture_width,
+ int picture_height, int flag,
+ VASurfaceID* render_targets, int num_render_targets,
+ VAContextID* context /* out */) {
+ if (LoadVALibrary()) {
+ return vaCreateContextFn(dpy, config_id, picture_width, picture_height,
+ flag, render_targets, num_render_targets, context);
+ }
+ *context = 0;
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaDestroyContext(VADisplay dpy, VAContextID context) {
+ if (LoadVALibrary()) {
+ return vaDestroyContextFn(dpy, context);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaCreateBuffer(VADisplay dpy, VAContextID context,
+ VABufferType type, /* in */
+ unsigned int size, /* in */
+ unsigned int num_elements, /* in */
+ void* data, /* in */
+ VABufferID* buf_id /* out */) {
+ if (LoadVALibrary()) {
+ return vaCreateBufferFn(dpy, context, type, size, num_elements, data,
+ buf_id);
+ }
+ *buf_id = 0;
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaQuerySurfaceAttributes(VADisplay dpy, VAConfigID config,
+ VASurfaceAttrib* attrib_list,
+ unsigned int* num_attribs) {
+ if (LoadVALibrary()) {
+ return vaQuerySurfaceAttributesFn(dpy, config, attrib_list, num_attribs);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaQueryConfigProfiles(VADisplay dpy, VAProfile* profile_list, /* out */
+ int* num_profiles /* out */) {
+ if (LoadVALibrary()) {
+ return vaQueryConfigProfilesFn(dpy, profile_list, num_profiles);
+ }
+ *num_profiles = 0;
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+const char* vaErrorStr(VAStatus error_status) {
+ if (LoadVALibrary()) {
+ return vaErrorStrFn(error_status);
+ }
+ static char tmp[] = "Unimplemented";
+ return tmp;
+}
+
+VAStatus vaCreateConfig(VADisplay dpy, VAProfile profile,
+ VAEntrypoint entrypoint, VAConfigAttrib* attrib_list,
+ int num_attribs, VAConfigID* config_id /* out */) {
+ if (LoadVALibrary()) {
+ return vaCreateConfigFn(dpy, profile, entrypoint, attrib_list, num_attribs,
+ config_id);
+ }
+ *config_id = 0;
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaDestroyConfig(VADisplay dpy, VAConfigID config_id) {
+ if (LoadVALibrary()) {
+ return vaDestroyConfigFn(dpy, config_id);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+int vaMaxNumImageFormats(VADisplay dpy) {
+ if (LoadVALibrary()) {
+ return vaMaxNumImageFormatsFn(dpy);
+ }
+ return 0;
+}
+
+VAStatus vaQueryImageFormats(VADisplay dpy, VAImageFormat* format_list,
+ int* num_formats) {
+ if (LoadVALibrary()) {
+ return vaQueryImageFormatsFn(dpy, format_list, num_formats);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+const char* vaQueryVendorString(VADisplay dpy) {
+ if (LoadVALibrary()) {
+ return vaQueryVendorStringFn(dpy);
+ }
+ return NULL;
+}
+
+VAStatus vaDestroySurfaces(VADisplay dpy, VASurfaceID* surfaces,
+ int num_surfaces) {
+ if (LoadVALibrary()) {
+ return vaDestroySurfacesFn(dpy, surfaces, num_surfaces);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaCreateSurfaces(VADisplay dpy, unsigned int format,
+ unsigned int width, unsigned int height,
+ VASurfaceID* surfaces, unsigned int num_surfaces,
+ VASurfaceAttrib* attrib_list,
+ unsigned int num_attribs) {
+ if (LoadVALibrary()) {
+ return vaCreateSurfacesFn(dpy, format, width, height, surfaces,
+ num_surfaces, attrib_list, num_attribs);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaDeriveImage(VADisplay dpy, VASurfaceID surface,
+ VAImage* image /* out */) {
+ if (LoadVALibrary()) {
+ return vaDeriveImageFn(dpy, surface, image);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaDestroyImage(VADisplay dpy, VAImageID image) {
+ if (LoadVALibrary()) {
+ return vaDestroyImageFn(dpy, image);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaPutImage(VADisplay dpy, VASurfaceID surface, VAImageID image,
+ int src_x, int src_y, unsigned int src_width,
+ unsigned int src_height, int dest_x, int dest_y,
+ unsigned int dest_width, unsigned int dest_height) {
+ if (LoadVALibrary()) {
+ return vaPutImageFn(dpy, surface, image, src_x, src_y, src_width,
+ src_height, dest_x, dest_y, dest_width, dest_height);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaSyncSurface(VADisplay dpy, VASurfaceID render_target) {
+ if (LoadVALibrary()) {
+ return vaSyncSurfaceFn(dpy, render_target);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaCreateImage(VADisplay dpy, VAImageFormat* format, int width,
+ int height, VAImage* image /* out */) {
+ if (LoadVALibrary()) {
+ return vaCreateImageFn(dpy, format, width, height, image);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaGetImage(VADisplay dpy, VASurfaceID surface,
+ int x, /* coordinates of the upper left source pixel */
+ int y,
+ unsigned int width, /* width and height of the region */
+ unsigned int height, VAImageID image) {
+ if (LoadVALibrary()) {
+ return vaGetImageFn(dpy, surface, x, y, width, height, image);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaMapBuffer(VADisplay dpy, VABufferID buf_id, /* in */
+ void** pbuf /* out */) {
+ if (LoadVALibrary()) {
+ return vaMapBufferFn(dpy, buf_id, pbuf);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaUnmapBuffer(VADisplay dpy, VABufferID buf_id /* in */) {
+ if (LoadVALibrary()) {
+ return vaUnmapBufferFn(dpy, buf_id);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaTerminate(VADisplay dpy) {
+ if (LoadVALibrary()) {
+ return vaTerminateFn(dpy);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaInitialize(VADisplay dpy, int* major_version, /* out */
+ int* minor_version /* out */) {
+ if (LoadVALibrary()) {
+ return vaInitializeFn(dpy, major_version, minor_version);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+VAStatus vaSetDriverName(VADisplay dpy, char* driver_name) {
+ if (LoadVALibrary()) {
+ return vaSetDriverNameFn(dpy, driver_name);
+ }
+ return VA_STATUS_ERROR_UNIMPLEMENTED;
+}
+
+#pragma GCC visibility pop
diff -up firefox-84.0/media/ffvpx/README_MOZILLA.ffvpx firefox-84.0/media/ffvpx/README_MOZILLA
--- firefox-84.0/media/ffvpx/README_MOZILLA.ffvpx 2020-12-08 00:35:05.000000000 +0100
+++ firefox-84.0/media/ffvpx/README_MOZILLA 2020-12-10 20:40:53.390541394 +0100
@@ -56,3 +56,5 @@ $ for i in `cat $PATH_CENTRAL/media/ffvp
Then apply patch.diff on the ffvpx tree.
Compilation will reveal if any files are missing.
+
+Apply linux-vaapi-build.patch patch to enable build VA-API support for Linux.
diff -up firefox-84.0/media/ffvpx/va/README.ffvpx firefox-84.0/media/ffvpx/va/README
--- firefox-84.0/media/ffvpx/va/README.ffvpx 2020-12-10 20:40:53.390541394 +0100
+++ firefox-84.0/media/ffvpx/va/README 2020-12-10 20:40:53.390541394 +0100
@@ -0,0 +1,2 @@
+There are libva headers from libva 1.7.0 used to build vaapi support for in-tree ffvpx.
+Apply va.patch when you update it to new versions.
\ No newline at end of file
diff -up firefox-84.0/media/ffvpx/va/va_dec_vp8.h.ffvpx firefox-84.0/media/ffvpx/va/va_dec_vp8.h
--- firefox-84.0/media/ffvpx/va/va_dec_vp8.h.ffvpx 2020-12-10 20:40:53.390541394 +0100
+++ firefox-84.0/media/ffvpx/va/va_dec_vp8.h 2020-12-10 20:40:53.390541394 +0100
@@ -0,0 +1,254 @@
+/*
+ * Copyright (c) 2007-2012 Intel Corporation. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file va_dec_vp.h
+ * \brief VP8 decoding API
+ *
+ * This file contains the \ref api_dec_vp8 "VP8 decoding API".
+ */
+
+#ifndef VA_DEC_VP8_H
+#define VA_DEC_VP8_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \defgroup api_dec_vp8 VP8 decoding API
+ *
+ * @{
+ */
+
+/**
+ * \brief VPX Bool Coder Context structure
+ *
+ * This common structure is defined for potential sharing by other VP formats
+ *
+ */
+typedef struct _VABoolCoderContextVPX
+{
+ /* partition 0 "range" */
+ uint8_t range;
+ /* partition 0 "value" */
+ uint8_t value;
+ /*
+ * 'partition 0 number of shifts before an output byte is available'
+ * it is the number of remaining bits in 'value' for decoding, range [0, 7].
+ */
+
+ uint8_t count;
+} VABoolCoderContextVPX;
+
+/**
+ * \brief VP8 Decoding Picture Parameter Buffer Structure
+ *
+ * This structure conveys frame level parameters and should be sent once
+ * per frame.
+ *
+ */
+typedef struct _VAPictureParameterBufferVP8
+{
+ /* frame width in pixels */
+ uint32_t frame_width;
+ /* frame height in pixels */
+ uint32_t frame_height;
+
+ /* specifies the "last" reference frame */
+ VASurfaceID last_ref_frame;
+ /* specifies the "golden" reference frame */
+ VASurfaceID golden_ref_frame;
+ /* specifies the "alternate" referrence frame */
+ VASurfaceID alt_ref_frame;
+ /* specifies the out-of-loop deblocked frame, not used currently */
+ VASurfaceID out_of_loop_frame;
+
+ union {
+ struct {
+ /* same as key_frame in bitstream syntax, 0 means a key frame */
+ uint32_t key_frame : 1;
+ /* same as version in bitstream syntax */
+ uint32_t version : 3;
+ /* same as segmentation_enabled in bitstream syntax */
+ uint32_t segmentation_enabled : 1;
+ /* same as update_mb_segmentation_map in bitstream syntax */
+ uint32_t update_mb_segmentation_map : 1;
+ /* same as update_segment_feature_data in bitstream syntax */
+ uint32_t update_segment_feature_data : 1;
+ /* same as filter_type in bitstream syntax */
+ uint32_t filter_type : 1;
+ /* same as sharpness_level in bitstream syntax */
+ uint32_t sharpness_level : 3;
+ /* same as loop_filter_adj_enable in bitstream syntax */
+ uint32_t loop_filter_adj_enable : 1;
+ /* same as mode_ref_lf_delta_update in bitstream syntax */
+ uint32_t mode_ref_lf_delta_update : 1;
+ /* same as sign_bias_golden in bitstream syntax */
+ uint32_t sign_bias_golden : 1;
+ /* same as sign_bias_alternate in bitstream syntax */
+ uint32_t sign_bias_alternate : 1;
+ /* same as mb_no_coeff_skip in bitstream syntax */
+ uint32_t mb_no_coeff_skip : 1;
+ /* flag to indicate that loop filter should be disabled */
+ uint32_t loop_filter_disable : 1;
+ } bits;
+ uint32_t value;
+ } pic_fields;
+
+ /*
+ * probabilities of the segment_id decoding tree and same as
+ * mb_segment_tree_probs in the spec.
+ */
+ uint8_t mb_segment_tree_probs[3];
+
+ /* Post-adjustment loop filter levels for the 4 segments */
+ uint8_t loop_filter_level[4];
+ /* loop filter deltas for reference frame based MB level adjustment */
+ int8_t loop_filter_deltas_ref_frame[4];
+ /* loop filter deltas for coding mode based MB level adjustment */
+ int8_t loop_filter_deltas_mode[4];
+
+ /* same as prob_skip_false in bitstream syntax */
+ uint8_t prob_skip_false;
+ /* same as prob_intra in bitstream syntax */
+ uint8_t prob_intra;
+ /* same as prob_last in bitstream syntax */
+ uint8_t prob_last;
+ /* same as prob_gf in bitstream syntax */
+ uint8_t prob_gf;
+
+ /*
+ * list of 4 probabilities of the luma intra prediction mode decoding
+ * tree and same as y_mode_probs in frame header
+ */
+ uint8_t y_mode_probs[4];
+ /*
+ * list of 3 probabilities of the chroma intra prediction mode decoding
+ * tree and same as uv_mode_probs in frame header
+ */
+ uint8_t uv_mode_probs[3];
+ /*
+ * updated mv decoding probabilities and same as mv_probs in
+ * frame header
+ */
+ uint8_t mv_probs[2][19];
+
+ VABoolCoderContextVPX bool_coder_ctx;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAPictureParameterBufferVP8;
+
+/**
+ * \brief VP8 Slice Parameter Buffer Structure
+ *
+ * This structure conveys parameters related to data partitions and should be
+ * sent once per frame. Slice data buffer of VASliceDataBufferType is used
+ * to send the partition data.
+ *
+ */
+typedef struct _VASliceParameterBufferVP8
+{
+ /*
+ * number of bytes in the slice data buffer for the partitions
+ */
+ uint32_t slice_data_size;
+ /*
+ * offset to the first byte of partition data (control partition)
+ */
+ uint32_t slice_data_offset;
+ /*
+ * see VA_SLICE_DATA_FLAG_XXX definitions
+ */
+ uint32_t slice_data_flag;
+ /*
+ * offset to the first bit of MB from the first byte of partition data(slice_data_offset)
+ */
+ uint32_t macroblock_offset;
+
+ /*
+ * Partitions
+ * (1<<log2_nbr_of_dct_partitions)+1, count both control partition (frame header) and toke partition
+ */
+ uint8_t num_of_partitions;
+ /*
+ * partition_size[0] is remaining bytes of control partition after parsed by application.
+ * exclude current byte for the remaining bits in bool_coder_ctx.
+ * exclude the uncompress data chunk since first_part_size 'excluding the uncompressed data chunk'
+ */
+ uint32_t partition_size[9];
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VASliceParameterBufferVP8;
+
+/**
+ * \brief VP8 Coefficient Probability Data Buffer Structure
+ *
+ * Contains the contents of the token probability table, which may be
+ * incrementally modified in the frame header. There are four dimensions to
+ * the token probability array. The outermost dimension is indexed by the
+ * type of plane being decoded; the next dimension is selected by the
+ * position of the coefficient being decoded; the third dimension, * roughly
+ * speaking, measures the "local complexity" or extent to which nearby
+ * coefficients are non-zero; the fourth, and final, dimension of the token
+ * probability array is indexed by the position in the token tree structure,
+ * as are all tree probability arrays. This structure is sent once per frame.
+ *
+ */
+typedef struct _VAProbabilityDataBufferVP8
+{
+ uint8_t dct_coeff_probs[4][8][3][11];
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAProbabilityDataBufferVP8;
+
+/**
+ * \brief VP8 Inverse Quantization Matrix Buffer Structure
+ *
+ * Contains quantization indices for yac(0),ydc(1),y2dc(2),y2ac(3),uvdc(4),
+ * uvac(5) for each segment (0-3). When segmentation is disabled, only
+ * quantization_index[0][] will be used. This structure is sent once per frame.
+ */
+typedef struct _VAIQMatrixBufferVP8
+{
+ /*
+ * array first dimensional is segment and 2nd dimensional is Q index
+ * all Q indexs should be clipped to be range [0, 127]
+ */
+ uint16_t quantization_index[4][6];
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAIQMatrixBufferVP8;
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VA_DEC_VP8_H */
diff -up firefox-84.0/media/ffvpx/va/va_dec_vp9.h.ffvpx firefox-84.0/media/ffvpx/va/va_dec_vp9.h
--- firefox-84.0/media/ffvpx/va/va_dec_vp9.h.ffvpx 2020-12-10 20:40:53.390541394 +0100
+++ firefox-84.0/media/ffvpx/va/va_dec_vp9.h 2020-12-10 20:40:53.390541394 +0100
@@ -0,0 +1,319 @@
+/*
+ * Copyright (c) 2014 Intel Corporation. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+/**
+ * \file va_dec_vp9.h
+ * \brief The VP9 decoding API
+ *
+ * This file contains the \ref api_dec_vp9 "VP9 decoding API".
+ */
+
+#ifndef VA_DEC_VP9_H
+#define VA_DEC_VP9_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * \defgroup api_dec_vp9 VP9 decoding API
+ *
+ * This VP9 decoding API supports 8-bit 420 format only.
+ *
+ * @{
+ */
+
+
+
+
+/**
+ * \brief VP9 Decoding Picture Parameter Buffer Structure
+ *
+ * This structure conveys picture level parameters.
+ * App should send a surface with this data structure down to VAAPI once
+ * per frame.
+ *
+ */
+typedef struct _VADecPictureParameterBufferVP9
+{
+ /** \brief picture width
+ * Picture original resolution. The value may not be multiple of 8.
+ */
+ uint16_t frame_width;
+ /** \brief picture height
+ * Picture original resolution. The value may not be multiple of 8.
+ */
+ uint16_t frame_height;
+
+ /** \brief Surface indices of reference frames in DPB.
+ *
+ * Each entry of the list specifies the surface index of the picture
+ * that is referred by current picture or will be referred by any future
+ * picture.
+ * Application who calls this API should update this list based on the
+ * refreshing information from VP9 bitstream.
+ */
+ VASurfaceID reference_frames[8];
+
+ union
+ {
+ struct
+ {
+ /** \brief flags for current picture
+ * same syntax and semantic as those in VP9 code
+ */
+ uint32_t subsampling_x : 1;
+ uint32_t subsampling_y : 1;
+ uint32_t frame_type : 1;
+ uint32_t show_frame : 1;
+ uint32_t error_resilient_mode : 1;
+ uint32_t intra_only : 1;
+ uint32_t allow_high_precision_mv : 1;
+ uint32_t mcomp_filter_type : 3;
+ uint32_t frame_parallel_decoding_mode : 1;
+ uint32_t reset_frame_context : 2;
+ uint32_t refresh_frame_context : 1;
+ uint32_t frame_context_idx : 2;
+ uint32_t segmentation_enabled : 1;
+
+ /** \brief corresponds to variable temporal_update in VP9 code.
+ */
+ uint32_t segmentation_temporal_update : 1;
+ /** \brief corresponds to variable update_mb_segmentation_map
+ * in VP9 code.
+ */
+ uint32_t segmentation_update_map : 1;
+
+ /** \brief Index of reference_frames[] and points to the
+ * LAST reference frame.
+ * It corresponds to active_ref_idx[0] in VP9 code.
+ */
+ uint32_t last_ref_frame : 3;
+ /** \brief Sign Bias of the LAST reference frame.
+ * It corresponds to ref_frame_sign_bias[LAST_FRAME] in VP9 code.
+ */
+ uint32_t last_ref_frame_sign_bias : 1;
+ /** \brief Index of reference_frames[] and points to the
+ * GOLDERN reference frame.
+ * It corresponds to active_ref_idx[1] in VP9 code.
+ */
+ uint32_t golden_ref_frame : 3;
+ /** \brief Sign Bias of the GOLDERN reference frame.
+ * Corresponds to ref_frame_sign_bias[GOLDERN_FRAME] in VP9 code.
+ */
+ uint32_t golden_ref_frame_sign_bias : 1;
+ /** \brief Index of reference_frames[] and points to the
+ * ALTERNATE reference frame.
+ * Corresponds to active_ref_idx[2] in VP9 code.
+ */
+ uint32_t alt_ref_frame : 3;
+ /** \brief Sign Bias of the ALTERNATE reference frame.
+ * Corresponds to ref_frame_sign_bias[ALTREF_FRAME] in VP9 code.
+ */
+ uint32_t alt_ref_frame_sign_bias : 1;
+ /** \brief Lossless Mode
+ * LosslessFlag = base_qindex == 0 &&
+ * y_dc_delta_q == 0 &&
+ * uv_dc_delta_q == 0 &&
+ * uv_ac_delta_q == 0;
+ * Where base_qindex, y_dc_delta_q, uv_dc_delta_q and uv_ac_delta_q
+ * are all variables in VP9 code.
+ */
+ uint32_t lossless_flag : 1;
+ } bits;
+ uint32_t value;
+ } pic_fields;
+
+ /* following parameters have same syntax with those in VP9 code */
+ uint8_t filter_level;
+ uint8_t sharpness_level;
+
+ /** \brief number of tile rows specified by (1 << log2_tile_rows).
+ * It corresponds the variable with same name in VP9 code.
+ */
+ uint8_t log2_tile_rows;
+ /** \brief number of tile columns specified by (1 << log2_tile_columns).
+ * It corresponds the variable with same name in VP9 code.
+ */
+ uint8_t log2_tile_columns;
+ /** \brief Number of bytes taken up by the uncompressed frame header,
+ * which corresponds to byte length of function
+ * read_uncompressed_header() in VP9 code.
+ * Specifically, it is the byte count from bit stream buffer start to
+ * the last byte of uncompressed frame header.
+ * If there are other meta data in the buffer before uncompressed header,
+ * its size should be also included here.
+ */
+ uint8_t frame_header_length_in_bytes;
+
+ /** \brief The byte count of compressed header the bitstream buffer,
+ * which corresponds to syntax first_partition_size in code.
+ */
+ uint16_t first_partition_size;
+
+ /** These values are segment probabilities with same names in VP9
+ * function setup_segmentation(). They should be parsed directly from
+ * bitstream by application.
+ */
+ uint8_t mb_segment_tree_probs[7];
+ uint8_t segment_pred_probs[3];
+
+ /** \brief VP9 Profile definition
+ * value range [0..3].
+ */
+ uint8_t profile;
+
+ /** \brief VP9 bit depth per sample
+ * same for both luma and chroma samples.
+ */
+ uint8_t bit_depth;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_MEDIUM];
+
+} VADecPictureParameterBufferVP9;
+
+
+
+/**
+ * \brief VP9 Segmentation Parameter Data Structure
+ *
+ * This structure conveys per segment parameters.
+ * 8 of this data structure will be included in VASegmentationParameterBufferVP9
+ * and sent to API in a single buffer.
+ *
+ */
+typedef struct _VASegmentParameterVP9
+{
+ union
+ {
+ struct
+ {
+ /** \brief Indicates if per segment reference frame indicator
+ * is enabled.
+ * Corresponding to variable feature_enabled when
+ * j == SEG_LVL_REF_FRAME in function setup_segmentation() VP9 code.
+ */
+ uint16_t segment_reference_enabled : 1;
+ /** \brief Specifies per segment reference indication.
+ * 0: reserved
+ * 1: Last ref
+ * 2: golden
+ * 3: altref
+ * Value can be derived from variable data when
+ * j == SEG_LVL_REF_FRAME in function setup_segmentation() VP9 code.
+ */
+ uint16_t segment_reference : 2;
+ /** \brief Indicates if per segment skip feature is enabled.
+ * Corresponding to variable feature_enabled when
+ * j == SEG_LVL_SKIP in function setup_segmentation() VP9 code.
+ */
+ uint16_t segment_reference_skipped : 1;
+ } fields;
+ uint16_t value;
+ } segment_flags;
+
+ /** \brief Specifies the filter level information per segment.
+ * The value corresponds to variable lfi->lvl[seg][ref][mode] in VP9 code,
+ * where m is [ref], and n is [mode] in FilterLevel[m][n].
+ */
+ uint8_t filter_level[4][2];
+ /** \brief Specifies per segment Luma AC quantization scale.
+ * Corresponding to y_dequant[qindex][1] in vp9_mb_init_quantizer()
+ * function of VP9 code.
+ */
+ int16_t luma_ac_quant_scale;
+ /** \brief Specifies per segment Luma DC quantization scale.
+ * Corresponding to y_dequant[qindex][0] in vp9_mb_init_quantizer()
+ * function of VP9 code.
+ */
+ int16_t luma_dc_quant_scale;
+ /** \brief Specifies per segment Chroma AC quantization scale.
+ * Corresponding to uv_dequant[qindex][1] in vp9_mb_init_quantizer()
+ * function of VP9 code.
+ */
+ int16_t chroma_ac_quant_scale;
+ /** \brief Specifies per segment Chroma DC quantization scale.
+ * Corresponding to uv_dequant[qindex][0] in vp9_mb_init_quantizer()
+ * function of VP9 code.
+ */
+ int16_t chroma_dc_quant_scale;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+
+} VASegmentParameterVP9;
+
+
+
+/**
+ * \brief VP9 Slice Parameter Buffer Structure
+ *
+ * This structure conveys parameters related to segmentation data and should be
+ * sent once per frame.
+ *
+ * When segmentation is disabled, only SegParam[0] has valid values,
+ * all other entries should be populated with 0.
+ * Otherwise, all eight entries should be valid.
+ *
+ * Slice data buffer of VASliceDataBufferType is used
+ * to send the bitstream which should include whole or part of partition 0
+ * (at least compressed header) to the end of frame.
+ *
+ */
+typedef struct _VASliceParameterBufferVP9
+{
+ /** \brief The byte count of current frame in the bitstream buffer,
+ * starting from first byte of the buffer.
+ * It uses the name slice_data_size to be consitent with other codec,
+ * but actually means frame_data_size.
+ */
+ uint32_t slice_data_size;
+ /**
+ * offset to the first byte of partition data (control partition)
+ */
+ uint32_t slice_data_offset;
+ /**
+ * see VA_SLICE_DATA_FLAG_XXX definitions
+ */
+ uint32_t slice_data_flag;
+
+ /**
+ * \brief per segment information
+ */
+ VASegmentParameterVP9 seg_param[8];
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+
+} VASliceParameterBufferVP9;
+
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* VA_DEC_VP9_H */
diff -up firefox-84.0/media/ffvpx/va/va.h.ffvpx firefox-84.0/media/ffvpx/va/va.h
--- firefox-84.0/media/ffvpx/va/va.h.ffvpx 2020-12-10 20:40:53.391541423 +0100
+++ firefox-84.0/media/ffvpx/va/va.h 2020-12-10 20:40:53.391541423 +0100
@@ -0,0 +1,4636 @@
+/*
+ * Copyright (c) 2007-2009 Intel Corporation. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL INTEL AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+/*
+ * Video Acceleration (VA) API Specification
+ *
+ * Rev. 0.30
+ * <jonathan.bian@intel.com>
+ *
+ * Revision History:
+ * rev 0.10 (12/10/2006 Jonathan Bian) - Initial draft
+ * rev 0.11 (12/15/2006 Jonathan Bian) - Fixed some errors
+ * rev 0.12 (02/05/2007 Jonathan Bian) - Added VC-1 data structures for slice level decode
+ * rev 0.13 (02/28/2007 Jonathan Bian) - Added GetDisplay()
+ * rev 0.14 (04/13/2007 Jonathan Bian) - Fixed MPEG-2 PictureParameter structure, cleaned up a few funcs.
+ * rev 0.15 (04/20/2007 Jonathan Bian) - Overhauled buffer management
+ * rev 0.16 (05/02/2007 Jonathan Bian) - Added error codes and fixed some issues with configuration
+ * rev 0.17 (05/07/2007 Jonathan Bian) - Added H.264/AVC data structures for slice level decode.
+ * rev 0.18 (05/14/2007 Jonathan Bian) - Added data structures for MPEG-4 slice level decode
+ * and MPEG-2 motion compensation.
+ * rev 0.19 (08/06/2007 Jonathan Bian) - Removed extra type for bitplane data.
+ * rev 0.20 (08/08/2007 Jonathan Bian) - Added missing fields to VC-1 PictureParameter structure.
+ * rev 0.21 (08/20/2007 Jonathan Bian) - Added image and subpicture support.
+ * rev 0.22 (08/27/2007 Jonathan Bian) - Added support for chroma-keying and global alpha.
+ * rev 0.23 (09/11/2007 Jonathan Bian) - Fixed some issues with images and subpictures.
+ * rev 0.24 (09/18/2007 Jonathan Bian) - Added display attributes.
+ * rev 0.25 (10/18/2007 Jonathan Bian) - Changed to use IDs only for some types.
+ * rev 0.26 (11/07/2007 Waldo Bastian) - Change vaCreateBuffer semantics
+ * rev 0.27 (11/19/2007 Matt Sottek) - Added DeriveImage
+ * rev 0.28 (12/06/2007 Jonathan Bian) - Added new versions of PutImage and AssociateSubpicture
+ * to enable scaling
+ * rev 0.29 (02/07/2008 Jonathan Bian) - VC1 parameter fixes,
+ * added VA_STATUS_ERROR_RESOLUTION_NOT_SUPPORTED
+ * rev 0.30 (03/01/2009 Jonathan Bian) - Added encoding support for H.264 BP and MPEG-4 SP and fixes
+ * for ISO C conformance.
+ * rev 0.31 (09/02/2009 Gwenole Beauchesne) - VC-1/H264 fields change for VDPAU and XvBA backend
+ * Application needs to relink with the new library.
+ *
+ * rev 0.31.1 (03/29/2009) - Data structure for JPEG encode
+ * rev 0.31.2 (01/13/2011 Anthony Pabon)- Added a flag to indicate Subpicture coordinates are screen
+ * screen relative rather than source video relative.
+ * rev 0.32.0 (01/13/2011 Xiang Haihao) - Add profile into VAPictureParameterBufferVC1
+ * update VAAPI to 0.32.0
+ *
+ * Acknowledgements:
+ * Some concepts borrowed from XvMC and XvImage.
+ * Waldo Bastian (Intel), Matt Sottek (Intel), Austin Yuan (Intel), and Gwenole Beauchesne (SDS)
+ * contributed to various aspects of the API.
+ */
+
+/**
+ * \file va.h
+ * \brief The Core API
+ *
+ * This file contains the \ref api_core "Core API".
+ */
+
+#ifndef _VA_H_
+#define _VA_H_
+
+#include <stddef.h>
+#include <stdint.h>
+#include "va_version.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if defined(__GNUC__) && !defined(__COVERITY__)
+#define va_deprecated __attribute__((deprecated))
+#if __GNUC__ >= 6
+#define va_deprecated_enum va_deprecated
+#else
+#define va_deprecated_enum
+#endif
+#else
+#define va_deprecated
+#define va_deprecated_enum
+#endif
+
+/**
+ * \mainpage Video Acceleration (VA) API
+ *
+ * \section intro Introduction
+ *
+ * The main motivation for VA-API (Video Acceleration API) is to
+ * enable hardware accelerated video decode and encode at various
+ * entry-points (VLD, IDCT, Motion Compensation etc.) for the
+ * prevailing coding standards today (MPEG-2, MPEG-4 ASP/H.263, MPEG-4
+ * AVC/H.264, VC-1/VMW3, and JPEG, HEVC/H265, VP8, VP9) and video pre/post
+ * processing
+ *
+ * VA-API is split into several modules:
+ * - \ref api_core
+ * - Encoder (H264, HEVC, JPEG, MPEG2, VP8, VP9)
+ * - \ref api_enc_h264
+ * - \ref api_enc_hevc
+ * - \ref api_enc_jpeg
+ * - \ref api_enc_mpeg2
+ * - \ref api_enc_vp8
+ * - \ref api_enc_vp9
+ * - Decoder (HEVC, JPEG, VP8, VP9)
+ * - \ref api_dec_hevc
+ * - \ref api_dec_jpeg
+ * - \ref api_dec_vp8
+ * - \ref api_dec_vp9
+ * - \ref api_vpp
+ * - FEI (H264, HEVC)
+ * - \ref api_fei
+ * - \ref api_fei_h264
+ * - \ref api_fei_hevc
+ */
+
+/**
+ * \defgroup api_core Core API
+ *
+ * @{
+ */
+
+/**
+Overview
+
+The VA API is intended to provide an interface between a video decode/encode/processing
+application (client) and a hardware accelerator (server), to off-load
+video decode/encode/processing operations from the host to the hardware accelerator at various
+entry-points.
+
+The basic operation steps are:
+
+- Negotiate a mutually acceptable configuration with the server to lock
+ down profile, entrypoints, and other attributes that will not change on
+ a frame-by-frame basis.
+- Create a video decode, encode or processing context which represents a
+ "virtualized" hardware device
+- Get and fill the render buffers with the corresponding data (depending on
+ profiles and entrypoints)
+- Pass the render buffers to the server to handle the current frame
+
+Initialization & Configuration Management
+
+- Find out supported profiles
+- Find out entrypoints for a given profile
+- Find out configuration attributes for a given profile/entrypoint pair
+- Create a configuration for use by the application
+
+*/
+
+typedef void* VADisplay; /* window system dependent */
+
+typedef int VAStatus; /** Return status type from functions */
+/** Values for the return status */
+#define VA_STATUS_SUCCESS 0x00000000
+#define VA_STATUS_ERROR_OPERATION_FAILED 0x00000001
+#define VA_STATUS_ERROR_ALLOCATION_FAILED 0x00000002
+#define VA_STATUS_ERROR_INVALID_DISPLAY 0x00000003
+#define VA_STATUS_ERROR_INVALID_CONFIG 0x00000004
+#define VA_STATUS_ERROR_INVALID_CONTEXT 0x00000005
+#define VA_STATUS_ERROR_INVALID_SURFACE 0x00000006
+#define VA_STATUS_ERROR_INVALID_BUFFER 0x00000007
+#define VA_STATUS_ERROR_INVALID_IMAGE 0x00000008
+#define VA_STATUS_ERROR_INVALID_SUBPICTURE 0x00000009
+#define VA_STATUS_ERROR_ATTR_NOT_SUPPORTED 0x0000000a
+#define VA_STATUS_ERROR_MAX_NUM_EXCEEDED 0x0000000b
+#define VA_STATUS_ERROR_UNSUPPORTED_PROFILE 0x0000000c
+#define VA_STATUS_ERROR_UNSUPPORTED_ENTRYPOINT 0x0000000d
+#define VA_STATUS_ERROR_UNSUPPORTED_RT_FORMAT 0x0000000e
+#define VA_STATUS_ERROR_UNSUPPORTED_BUFFERTYPE 0x0000000f
+#define VA_STATUS_ERROR_SURFACE_BUSY 0x00000010
+#define VA_STATUS_ERROR_FLAG_NOT_SUPPORTED 0x00000011
+#define VA_STATUS_ERROR_INVALID_PARAMETER 0x00000012
+#define VA_STATUS_ERROR_RESOLUTION_NOT_SUPPORTED 0x00000013
+#define VA_STATUS_ERROR_UNIMPLEMENTED 0x00000014
+#define VA_STATUS_ERROR_SURFACE_IN_DISPLAYING 0x00000015
+#define VA_STATUS_ERROR_INVALID_IMAGE_FORMAT 0x00000016
+#define VA_STATUS_ERROR_DECODING_ERROR 0x00000017
+#define VA_STATUS_ERROR_ENCODING_ERROR 0x00000018
+/**
+ * \brief An invalid/unsupported value was supplied.
+ *
+ * This is a catch-all error code for invalid or unsupported values.
+ * e.g. value exceeding the valid range, invalid type in the context
+ * of generic attribute values.
+ */
+#define VA_STATUS_ERROR_INVALID_VALUE 0x00000019
+/** \brief An unsupported filter was supplied. */
+#define VA_STATUS_ERROR_UNSUPPORTED_FILTER 0x00000020
+/** \brief An invalid filter chain was supplied. */
+#define VA_STATUS_ERROR_INVALID_FILTER_CHAIN 0x00000021
+/** \brief Indicate HW busy (e.g. run multiple encoding simultaneously). */
+#define VA_STATUS_ERROR_HW_BUSY 0x00000022
+/** \brief An unsupported memory type was supplied. */
+#define VA_STATUS_ERROR_UNSUPPORTED_MEMORY_TYPE 0x00000024
+/** \brief Indicate allocated buffer size is not enough for input or output. */
+#define VA_STATUS_ERROR_NOT_ENOUGH_BUFFER 0x00000025
+#define VA_STATUS_ERROR_UNKNOWN 0xFFFFFFFF
+
+/**
+ * 1. De-interlacing flags for vaPutSurface()
+ * 2. Surface sample type for input/output surface flag
+ * - Progressive: VA_FRAME_PICTURE
+ * - Interleaved: VA_TOP_FIELD_FIRST, VA_BOTTOM_FIELD_FIRST
+ * - Field: VA_TOP_FIELD, VA_BOTTOM_FIELD
+*/
+#define VA_FRAME_PICTURE 0x00000000
+#define VA_TOP_FIELD 0x00000001
+#define VA_BOTTOM_FIELD 0x00000002
+#define VA_TOP_FIELD_FIRST 0x00000004
+#define VA_BOTTOM_FIELD_FIRST 0x00000008
+
+/**
+ * Enabled the positioning/cropping/blending feature:
+ * 1, specify the video playback position in the isurface
+ * 2, specify the cropping info for video playback
+ * 3, encoded video will blend with background color
+ */
+#define VA_ENABLE_BLEND 0x00000004 /* video area blend with the constant color */
+
+/**
+ * Clears the drawable with background color.
+ * for hardware overlay based implementation this flag
+ * can be used to turn off the overlay
+ */
+#define VA_CLEAR_DRAWABLE 0x00000008
+
+/** Color space conversion flags for vaPutSurface() */
+#define VA_SRC_COLOR_MASK 0x000000f0
+#define VA_SRC_BT601 0x00000010
+#define VA_SRC_BT709 0x00000020
+#define VA_SRC_SMPTE_240 0x00000040
+
+/** Scaling flags for vaPutSurface() */
+#define VA_FILTER_SCALING_DEFAULT 0x00000000
+#define VA_FILTER_SCALING_FAST 0x00000100
+#define VA_FILTER_SCALING_HQ 0x00000200
+#define VA_FILTER_SCALING_NL_ANAMORPHIC 0x00000300
+#define VA_FILTER_SCALING_MASK 0x00000f00
+
+/** Padding size in 4-bytes */
+#define VA_PADDING_LOW 4
+#define VA_PADDING_MEDIUM 8
+#define VA_PADDING_HIGH 16
+#define VA_PADDING_LARGE 32
+
+/**
+ * Returns a short english description of error_status
+ */
+const char *vaErrorStr(VAStatus error_status);
+
+typedef struct _VARectangle
+{
+ int16_t x;
+ int16_t y;
+ uint16_t width;
+ uint16_t height;
+} VARectangle;
+
+/** \brief Generic motion vector data structure. */
+typedef struct _VAMotionVector {
+ /** \mv0[0]: horizontal motion vector for past reference */
+ /** \mv0[1]: vertical motion vector for past reference */
+ /** \mv1[0]: horizontal motion vector for future reference */
+ /** \mv1[1]: vertical motion vector for future reference */
+ int16_t mv0[2]; /* past reference */
+ int16_t mv1[2]; /* future reference */
+} VAMotionVector;
+
+/** Type of a message callback, used for both error and info log. */
+typedef void (*VAMessageCallback)(void *user_context, const char *message);
+
+/**
+ * Set the callback for error messages, or NULL for no logging.
+ * Returns the previous one, or NULL if it was disabled.
+ */
+VAMessageCallback vaSetErrorCallback(VADisplay dpy, VAMessageCallback callback, void *user_context);
+
+/**
+ * Set the callback for info messages, or NULL for no logging.
+ * Returns the previous one, or NULL if it was disabled.
+ */
+VAMessageCallback vaSetInfoCallback(VADisplay dpy, VAMessageCallback callback, void *user_context);
+
+/**
+ * Initialization:
+ * A display must be obtained by calling vaGetDisplay() before calling
+ * vaInitialize() and other functions. This connects the API to the
+ * native window system.
+ * For X Windows, native_dpy would be from XOpenDisplay()
+ */
+typedef void* VANativeDisplay; /* window system dependent */
+
+int vaDisplayIsValid(VADisplay dpy);
+
+/**
+ * Set the override driver name instead of queried driver driver.
+ */
+VAStatus vaSetDriverName(VADisplay dpy,
+ char *driver_name
+);
+
+/**
+ * Initialize the library
+ */
+VAStatus vaInitialize (
+ VADisplay dpy,
+ int *major_version, /* out */
+ int *minor_version /* out */
+);
+
+/**
+ * After this call, all library internal resources will be cleaned up
+ */
+VAStatus vaTerminate (
+ VADisplay dpy
+);
+
+/**
+ * vaQueryVendorString returns a pointer to a zero-terminated string
+ * describing some aspects of the VA implemenation on a specific
+ * hardware accelerator. The format of the returned string is vendor
+ * specific and at the discretion of the implementer.
+ * e.g. for the Intel GMA500 implementation, an example would be:
+ * "Intel GMA500 - 2.0.0.32L.0005"
+ */
+const char *vaQueryVendorString (
+ VADisplay dpy
+);
+
+typedef int (*VAPrivFunc)(void);
+
+/**
+ * Return a function pointer given a function name in the library.
+ * This allows private interfaces into the library
+ */
+VAPrivFunc vaGetLibFunc (
+ VADisplay dpy,
+ const char *func
+);
+
+/** Currently defined profiles */
+typedef enum
+{
+ /** \brief Profile ID used for video processing. */
+ VAProfileNone = -1,
+ VAProfileMPEG2Simple = 0,
+ VAProfileMPEG2Main = 1,
+ VAProfileMPEG4Simple = 2,
+ VAProfileMPEG4AdvancedSimple = 3,
+ VAProfileMPEG4Main = 4,
+ VAProfileH264Baseline va_deprecated_enum = 5,
+ VAProfileH264Main = 6,
+ VAProfileH264High = 7,
+ VAProfileVC1Simple = 8,
+ VAProfileVC1Main = 9,
+ VAProfileVC1Advanced = 10,
+ VAProfileH263Baseline = 11,
+ VAProfileJPEGBaseline = 12,
+ VAProfileH264ConstrainedBaseline = 13,
+ VAProfileVP8Version0_3 = 14,
+ VAProfileH264MultiviewHigh = 15,
+ VAProfileH264StereoHigh = 16,
+ VAProfileHEVCMain = 17,
+ VAProfileHEVCMain10 = 18,
+ VAProfileVP9Profile0 = 19,
+ VAProfileVP9Profile1 = 20,
+ VAProfileVP9Profile2 = 21,
+ VAProfileVP9Profile3 = 22,
+ VAProfileHEVCMain12 = 23,
+ VAProfileHEVCMain422_10 = 24,
+ VAProfileHEVCMain422_12 = 25,
+ VAProfileHEVCMain444 = 26,
+ VAProfileHEVCMain444_10 = 27,
+ VAProfileHEVCMain444_12 = 28,
+ VAProfileHEVCSccMain = 29,
+ VAProfileHEVCSccMain10 = 30,
+ VAProfileHEVCSccMain444 = 31
+} VAProfile;
+
+/**
+ * Currently defined entrypoints
+ */
+typedef enum
+{
+ VAEntrypointVLD = 1,
+ VAEntrypointIZZ = 2,
+ VAEntrypointIDCT = 3,
+ VAEntrypointMoComp = 4,
+ VAEntrypointDeblocking = 5,
+ VAEntrypointEncSlice = 6, /* slice level encode */
+ VAEntrypointEncPicture = 7, /* pictuer encode, JPEG, etc */
+ /*
+ * For an implementation that supports a low power/high performance variant
+ * for slice level encode, it can choose to expose the
+ * VAEntrypointEncSliceLP entrypoint. Certain encoding tools may not be
+ * available with this entrypoint (e.g. interlace, MBAFF) and the
+ * application can query the encoding configuration attributes to find
+ * out more details if this entrypoint is supported.
+ */
+ VAEntrypointEncSliceLP = 8,
+ VAEntrypointVideoProc = 10, /**< Video pre/post-processing. */
+ /**
+ * \brief VAEntrypointFEI
+ *
+ * The purpose of FEI (Flexible Encoding Infrastructure) is to allow applications to
+ * have more controls and trade off quality for speed with their own IPs.
+ * The application can optionally provide input to ENC for extra encode control
+ * and get the output from ENC. Application can chose to modify the ENC
+ * output/PAK input during encoding, but the performance impact is significant.
+ *
+ * On top of the existing buffers for normal encode, there will be
+ * one extra input buffer (VAEncMiscParameterFEIFrameControl) and
+ * three extra output buffers (VAEncFEIMVBufferType, VAEncFEIMBModeBufferType
+ * and VAEncFEIDistortionBufferType) for VAEntrypointFEI entry function.
+ * If separate PAK is set, two extra input buffers
+ * (VAEncFEIMVBufferType, VAEncFEIMBModeBufferType) are needed for PAK input.
+ **/
+ VAEntrypointFEI = 11,
+ /**
+ * \brief VAEntrypointStats
+ *
+ * A pre-processing function for getting some statistics and motion vectors is added,
+ * and some extra controls for Encode pipeline are provided. The application can
+ * optionally call the statistics function to get motion vectors and statistics like
+ * variances, distortions before calling Encode function via this entry point.
+ *
+ * Checking whether Statistics is supported can be performed with vaQueryConfigEntrypoints().
+ * If Statistics entry point is supported, then the list of returned entry-points will
+ * include #VAEntrypointStats. Supported pixel format, maximum resolution and statistics
+ * specific attributes can be obtained via normal attribute query. One input buffer
+ * (VAStatsStatisticsParameterBufferType) and one or two output buffers
+ * (VAStatsStatisticsBufferType, VAStatsStatisticsBottomFieldBufferType (for interlace only)
+ * and VAStatsMVBufferType) are needed for this entry point.
+ **/
+ VAEntrypointStats = 12,
+} VAEntrypoint;
+
+/** Currently defined configuration attribute types */
+typedef enum
+{
+ VAConfigAttribRTFormat = 0,
+ VAConfigAttribSpatialResidual = 1,
+ VAConfigAttribSpatialClipping = 2,
+ VAConfigAttribIntraResidual = 3,
+ VAConfigAttribEncryption = 4,
+ VAConfigAttribRateControl = 5,
+
+ /** @name Attributes for decoding */
+ /**@{*/
+ /**
+ * \brief Slice Decoding mode. Read/write.
+ *
+ * This attribute determines what mode the driver supports for slice
+ * decoding, through vaGetConfigAttributes(); and what mode the user
+ * will be providing to the driver, through vaCreateConfig(), if the
+ * driver supports those. If this attribute is not set by the user then
+ * it is assumed that VA_DEC_SLICE_MODE_NORMAL mode is used.
+ *
+ * See \c VA_DEC_SLICE_MODE_xxx for the list of slice decoding modes.
+ */
+ VAConfigAttribDecSliceMode = 6,
+ /**
+ * \brief JPEG decoding attribute. Read-only.
+ *
+ * This attribute exposes a number of capabilities of the underlying
+ * JPEG implementation. The attribute value is partitioned into fields as defined in the
+ * VAConfigAttribValDecJPEG union.
+ */
+ VAConfigAttribDecJPEG = 7,
+ /**
+ * \brief Decode processing support. Read/write.
+ *
+ * This attribute determines if the driver supports video processing
+ * with decoding using the decoding context in a single call, through
+ * vaGetConfigAttributes(); and if the user may use this feature,
+ * through vaCreateConfig(), if the driver supports the user scenario.
+ * The user will essentially create a regular decode VAContext. Therefore,
+ * the parameters of vaCreateContext() such as picture_width, picture_height
+ * and render_targets are in relation to the decode output parameters
+ * (not processing output parameters) as normal.
+ * If this attribute is not set by the user then it is assumed that no
+ * extra processing is done after decoding for this decode context.
+ *
+ * Since essentially the application is creating a decoder config and context,
+ * all function calls that take in the config (e.g. vaQuerySurfaceAttributes())
+ * or context are in relation to the decoder, except those video processing
+ * function specified in the next paragraph.
+ *
+ * Once the decode config and context are created, the user must further
+ * query the supported processing filters using vaQueryVideoProcFilters(),
+ * vaQueryVideoProcFilterCaps(), vaQueryVideoProcPipelineCaps() by specifying
+ * the created decode context. The user must provide processing information
+ * and extra processing output surfaces as "additional_outputs" to the driver
+ * through VAProcPipelineParameterBufferType. The render_target specified
+ * at vaBeginPicture() time refers to the decode output surface. The
+ * target surface for the output of processing needs to be a different
+ * surface since the decode process requires the original reconstructed buffer.
+ * The “surface” member of VAProcPipelineParameterBuffer should be set to the
+ * same as “render_target” set in vaBeginPicture(), but the driver may choose
+ * to ignore this parameter.
+ */
+ VAConfigAttribDecProcessing = 8,
+ /** @name Attributes for encoding */
+ /**@{*/
+ /**
+ * \brief Packed headers mode. Read/write.
+ *
+ * This attribute determines what packed headers the driver supports,
+ * through vaGetConfigAttributes(); and what packed headers the user
+ * will be providing to the driver, through vaCreateConfig(), if the
+ * driver supports those.
+ *
+ * See \c VA_ENC_PACKED_HEADER_xxx for the list of packed headers.
+ */
+ VAConfigAttribEncPackedHeaders = 10,
+ /**
+ * \brief Interlaced mode. Read/write.
+ *
+ * This attribute determines what kind of interlaced encoding mode
+ * the driver supports.
+ *
+ * See \c VA_ENC_INTERLACED_xxx for the list of interlaced modes.
+ */
+ VAConfigAttribEncInterlaced = 11,
+ /**
+ * \brief Maximum number of reference frames. Read-only.
+ *
+ * This attribute determines the maximum number of reference
+ * frames supported for encoding.
+ *
+ * Note: for H.264 encoding, the value represents the maximum number
+ * of reference frames for both the reference picture list 0 (bottom
+ * 16 bits) and the reference picture list 1 (top 16 bits).
+ */
+ VAConfigAttribEncMaxRefFrames = 13,
+ /**
+ * \brief Maximum number of slices per frame. Read-only.
+ *
+ * This attribute determines the maximum number of slices the
+ * driver can support to encode a single frame.
+ */
+ VAConfigAttribEncMaxSlices = 14,
+ /**
+ * \brief Slice structure. Read-only.
+ *
+ * This attribute determines slice structures supported by the
+ * driver for encoding. This attribute is a hint to the user so
+ * that he can choose a suitable surface size and how to arrange
+ * the encoding process of multiple slices per frame.
+ *
+ * More specifically, for H.264 encoding, this attribute
+ * determines the range of accepted values to
+ * VAEncSliceParameterBufferH264::macroblock_address and
+ * VAEncSliceParameterBufferH264::num_macroblocks.
+ *
+ * See \c VA_ENC_SLICE_STRUCTURE_xxx for the supported slice
+ * structure types.
+ */
+ VAConfigAttribEncSliceStructure = 15,
+ /**
+ * \brief Macroblock information. Read-only.
+ *
+ * This attribute determines whether the driver supports extra
+ * encoding information per-macroblock. e.g. QP.
+ *
+ * More specifically, for H.264 encoding, if the driver returns a non-zero
+ * value for this attribute, this means the application can create
+ * additional #VAEncMacroblockParameterBufferH264 buffers referenced
+ * through VAEncSliceParameterBufferH264::macroblock_info.
+ */
+ VAConfigAttribEncMacroblockInfo = 16,
+ /**
+ * \brief Maximum picture width. Read-only.
+ *
+ * This attribute determines the maximum picture width the driver supports
+ * for a given configuration.
+ */
+ VAConfigAttribMaxPictureWidth = 18,
+ /**
+ * \brief Maximum picture height. Read-only.
+ *
+ * This attribute determines the maximum picture height the driver supports
+ * for a given configuration.
+ */
+ VAConfigAttribMaxPictureHeight = 19,
+ /**
+ * \brief JPEG encoding attribute. Read-only.
+ *
+ * This attribute exposes a number of capabilities of the underlying
+ * JPEG implementation. The attribute value is partitioned into fields as defined in the
+ * VAConfigAttribValEncJPEG union.
+ */
+ VAConfigAttribEncJPEG = 20,
+ /**
+ * \brief Encoding quality range attribute. Read-only.
+ *
+ * This attribute conveys whether the driver supports different quality level settings
+ * for encoding. A value less than or equal to 1 means that the encoder only has a single
+ * quality setting, and a value greater than 1 represents the number of quality levels
+ * that can be configured. e.g. a value of 2 means there are two distinct quality levels.
+ */
+ VAConfigAttribEncQualityRange = 21,
+ /**
+ * \brief Encoding quantization attribute. Read-only.
+ *
+ * This attribute conveys whether the driver supports certain types of quantization methods
+ * for encoding (e.g. trellis). See \c VA_ENC_QUANTIZATION_xxx for the list of quantization methods
+ */
+ VAConfigAttribEncQuantization = 22,
+ /**
+ * \brief Encoding intra refresh attribute. Read-only.
+ *
+ * This attribute conveys whether the driver supports certain types of intra refresh methods
+ * for encoding (e.g. adaptive intra refresh or rolling intra refresh).
+ * See \c VA_ENC_INTRA_REFRESH_xxx for intra refresh methods
+ */
+ VAConfigAttribEncIntraRefresh = 23,
+ /**
+ * \brief Encoding skip frame attribute. Read-only.
+ *
+ * This attribute conveys whether the driver supports sending skip frame parameters
+ * (VAEncMiscParameterTypeSkipFrame) to the encoder's rate control, when the user has
+ * externally skipped frames.
+ */
+ VAConfigAttribEncSkipFrame = 24,
+ /**
+ * \brief Encoding region-of-interest (ROI) attribute. Read-only.
+ *
+ * This attribute conveys whether the driver supports region-of-interest (ROI) encoding,
+ * based on user provided ROI rectangles. The attribute value is partitioned into fields
+ * as defined in the VAConfigAttribValEncROI union.
+ *
+ * If ROI encoding is supported, the ROI information is passed to the driver using
+ * VAEncMiscParameterTypeROI.
+ */
+ VAConfigAttribEncROI = 25,
+ /**
+ * \brief Encoding extended rate control attribute. Read-only.
+ *
+ * This attribute conveys whether the driver supports any extended rate control features
+ * The attribute value is partitioned into fields as defined in the
+ * VAConfigAttribValEncRateControlExt union.
+ */
+ VAConfigAttribEncRateControlExt = 26,
+ /**
+ * \brief Processing rate reporting attribute. Read-only.
+ *
+ * This attribute conveys whether the driver supports reporting of
+ * encode/decode processing rate based on certain set of parameters
+ * (i.e. levels, I frame internvals) for a given configuration.
+ * If this is supported, vaQueryProcessingRate() can be used to get
+ * encode or decode processing rate.
+ * See \c VA_PROCESSING_RATE_xxx for encode/decode processing rate
+ */
+ VAConfigAttribProcessingRate = 27,
+ /**
+ * \brief Encoding dirty rectangle. Read-only.
+ *
+ * This attribute conveys whether the driver supports dirty rectangle.
+ * encoding, based on user provided ROI rectangles which indicate the rectangular areas
+ * where the content has changed as compared to the previous picture. The regions of the
+ * picture that are not covered by dirty rect rectangles are assumed to have not changed
+ * compared to the previous picture. The encoder may do some optimizations based on
+ * this information. The attribute value returned indicates the number of regions that
+ * are supported. e.g. A value of 0 means dirty rect encoding is not supported. If dirty
+ * rect encoding is supported, the ROI information is passed to the driver using
+ * VAEncMiscParameterTypeDirtyRect.
+ */
+ VAConfigAttribEncDirtyRect = 28,
+ /**
+ * \brief Parallel Rate Control (hierachical B) attribute. Read-only.
+ *
+ * This attribute conveys whether the encoder supports parallel rate control.
+ * It is a integer value 0 - unsupported, > 0 - maximum layer supported.
+ * This is the way when hireachical B frames are encoded, multiple independent B frames
+ * on the same layer may be processed at same time. If supported, app may enable it by
+ * setting enable_parallel_brc in VAEncMiscParameterRateControl,and the number of B frames
+ * per layer per GOP will be passed to driver through VAEncMiscParameterParallelRateControl
+ * structure.Currently three layers are defined.
+ */
+ VAConfigAttribEncParallelRateControl = 29,
+ /**
+ * \brief Dynamic Scaling Attribute. Read-only.
+ *
+ * This attribute conveys whether encoder is capable to determine dynamic frame
+ * resolutions adaptive to bandwidth utilization and processing power, etc.
+ * It is a boolean value 0 - unsupported, 1 - supported.
+ * If it is supported,for VP9, suggested frame resolution can be retrieved from VACodedBufferVP9Status.
+ */
+ VAConfigAttribEncDynamicScaling = 30,
+ /**
+ * \brief frame size tolerance support
+ * it indicates the tolerance of frame size
+ */
+ VAConfigAttribFrameSizeToleranceSupport = 31,
+ /**
+ * \brief Encode function type for FEI.
+ *
+ * This attribute conveys whether the driver supports different function types for encode.
+ * It can be VA_FEI_FUNCTION_ENC, VA_FEI_FUNCTION_PAK, or VA_FEI_FUNCTION_ENC_PAK. Currently
+ * it is for FEI entry point only.
+ * Default is VA_FEI_FUNCTION_ENC_PAK.
+ */
+ VAConfigAttribFEIFunctionType = 32,
+ /**
+ * \brief Maximum number of FEI MV predictors. Read-only.
+ *
+ * This attribute determines the maximum number of MV predictors the driver
+ * can support to encode a single frame. 0 means no MV predictor is supported.
+ * Currently it is for FEI entry point only.
+ */
+ VAConfigAttribFEIMVPredictors = 33,
+ /**
+ * \brief Statistics attribute. Read-only.
+ *
+ * This attribute exposes a number of capabilities of the VAEntrypointStats entry
+ * point. The attribute value is partitioned into fields as defined in the
+ * VAConfigAttribValStats union. Currently it is for VAEntrypointStats only.
+ */
+ VAConfigAttribStats = 34,
+ /**
+ * \brief Tile Support Attribute. Read-only.
+ *
+ * This attribute conveys whether encoder is capable to support tiles.
+ * If not supported, the tile related parameters sent to encoder, such as
+ * tiling structure, should be ignored. 0 - unsupported, 1 - supported.
+ */
+ VAConfigAttribEncTileSupport = 35,
+ /**
+ * \brief whether accept rouding setting from application. Read-only.
+ * This attribute is for encode quality, if it is report,
+ * application can change the rounding setting by VAEncMiscParameterTypeCustomRoundingControl
+ */
+ VAConfigAttribCustomRoundingControl = 36,
+ /**
+ * \brief Encoding QP info block size attribute. Read-only.
+ * This attribute conveys the block sizes that underlying driver
+ * support for QP info for buffer #VAEncQpBuffer.
+ */
+ VAConfigAttribQPBlockSize = 37,
+ /**
+ * \brief encode max frame size attribute. Read-only
+ * attribute value \c VAConfigAttribValMaxFrameSize represent max frame size support
+ */
+ VAConfigAttribMaxFrameSize = 38,
+ /** \brief inter frame prediction directrion attribute. Read-only.
+ * this attribute conveys the prediction direction (backward or forword) for specific config
+ * the value could be VA_PREDICTION_DIRECTION_XXXX. it can be combined with VAConfigAttribEncMaxRefFrames
+ * to describe reference list , and the prediction direction. if this attrib is not present,both direction
+ * should be supported, no restriction.
+ * for example: normal HEVC encoding , maximum reference frame number in reflist 0 and reflist 1 is deduced
+ * by VAConfigAttribEncMaxRefFrames. so there are typical P frame, B frame,
+ * if VAConfigAttribPredictionDirection is also present. it will stipulate prediction direction in both
+ * reference list. if only one prediction direction present(such as PREVIOUS),all reference frame should be
+ * previous frame (PoC < current).
+ */
+ VAConfigAttribPredictionDirection = 39,
+ /** \brief combined submission of multiple frames from different streams, it is optimization for different HW
+ * implementation, multiple frames encode/decode can improve HW concurrency
+ */
+ VAConfigAttribMultipleFrame = 40,
+ /**@}*/
+ VAConfigAttribTypeMax
+} VAConfigAttribType;
+
+/**
+ * Configuration attributes
+ * If there is more than one value for an attribute, a default
+ * value will be assigned to the attribute if the client does not
+ * specify the attribute when creating a configuration
+ */
+typedef struct _VAConfigAttrib {
+ VAConfigAttribType type;
+ uint32_t value; /* OR'd flags (bits) for this attribute */
+} VAConfigAttrib;
+
+/* Attribute values for VAConfigAttribRTFormat. */
+
+#define VA_RT_FORMAT_YUV420 0x00000001 ///< YUV 4:2:0 8-bit.
+#define VA_RT_FORMAT_YUV422 0x00000002 ///< YUV 4:2:2 8-bit.
+#define VA_RT_FORMAT_YUV444 0x00000004 ///< YUV 4:4:4 8-bit.
+#define VA_RT_FORMAT_YUV411 0x00000008 ///< YUV 4:1:1 8-bit.
+#define VA_RT_FORMAT_YUV400 0x00000010 ///< Greyscale 8-bit.
+#define VA_RT_FORMAT_YUV420_10 0x00000100 ///< YUV 4:2:0 10-bit.
+#define VA_RT_FORMAT_YUV422_10 0x00000200 ///< YUV 4:2:2 10-bit.
+#define VA_RT_FORMAT_YUV444_10 0x00000400 ///< YUV 4:4:4 10-bit.
+#define VA_RT_FORMAT_YUV420_12 0x00001000 ///< YUV 4:2:0 12-bit.
+#define VA_RT_FORMAT_YUV422_12 0x00002000 ///< YUV 4:2:2 12-bit.
+#define VA_RT_FORMAT_YUV444_12 0x00004000 ///< YUV 4:4:4 12-bit.
+
+#define VA_RT_FORMAT_RGB16 0x00010000 ///< Packed RGB, 16 bits per pixel.
+#define VA_RT_FORMAT_RGB32 0x00020000 ///< Packed RGB, 32 bits per pixel, 8 bits per colour sample.
+#define VA_RT_FORMAT_RGBP 0x00100000 ///< Planar RGB, 8 bits per sample.
+#define VA_RT_FORMAT_RGB32_10 0x00200000 ///< Packed RGB, 32 bits per pixel, 10 bits per colour sample.
+
+#define VA_RT_FORMAT_PROTECTED 0x80000000
+
+#define VA_RT_FORMAT_RGB32_10BPP VA_RT_FORMAT_RGB32_10 ///< @deprecated use VA_RT_FORMAT_RGB32_10 instead.
+#define VA_RT_FORMAT_YUV420_10BPP VA_RT_FORMAT_YUV420_10 ///< @deprecated use VA_RT_FORMAT_YUV420_10 instead.
+
+/** @name Attribute values for VAConfigAttribRateControl */
+/**@{*/
+/** \brief Driver does not support any form of rate control. */
+#define VA_RC_NONE 0x00000001
+/** \brief Constant bitrate. */
+#define VA_RC_CBR 0x00000002
+/** \brief Variable bitrate. */
+#define VA_RC_VBR 0x00000004
+/** \brief Video conference mode. */
+#define VA_RC_VCM 0x00000008
+/** \brief Constant QP. */
+#define VA_RC_CQP 0x00000010
+/** \brief Variable bitrate with peak rate higher than average bitrate. */
+#define VA_RC_VBR_CONSTRAINED 0x00000020
+/** \brief Intelligent Constant Quality. Provided an initial ICQ_quality_factor,
+ * adjusts QP at a frame and MB level based on motion to improve subjective quality. */
+#define VA_RC_ICQ 0x00000040
+/** \brief Macroblock based rate control. Per MB control is decided
+ * internally in the encoder. It may be combined with other RC modes, except CQP. */
+#define VA_RC_MB 0x00000080
+/** \brief Constant Frame Size, it is used for small tolerent */
+#define VA_RC_CFS 0x00000100
+/** \brief Parallel BRC, for hierachical B.
+ *
+ * For hierachical B, B frames can be refered by other B frames.
+ * Currently three layers of hierachy are defined:
+ * B0 - regular B, no reference to other B frames.
+ * B1 - reference to only I, P and regular B0 frames.
+ * B2 - reference to any other frames, including B1.
+ * In Hierachical B structure, B frames on the same layer can be processed
+ * simultaneously. And BRC would adjust accordingly. This is so called
+ * Parallel BRC. */
+#define VA_RC_PARALLEL 0x00000200
+/** \brief Quality defined VBR
+ * Use Quality factor to determine the good enough QP for each MB such that
+ * good enough quality can be obtained without waste of bits
+ * for this BRC mode, you must set all legacy VBR parameters
+ * and reuse quality_factor in \c VAEncMiscParameterRateControl
+ * */
+#define VA_RC_QVBR 0x00000400
+/** \brief Average VBR
+ * Average variable bitrate control algorithm focuses on overall encoding
+ * quality while meeting the specified target bitrate, within the accuracy
+ * range, after a convergence period.
+ * bits_per_second in VAEncMiscParameterRateControl is target bitrate for AVBR.
+ * Convergence is specified in the unit of frame.
+ * window_size in VAEncMiscParameterRateControl is equal to convergence for AVBR.
+ * Accuracy is in the range of [1,100], 1 means one percent, and so on.
+ * target_percentage in VAEncMiscParameterRateControl is equal to accuracy for AVBR. */
+#define VA_RC_AVBR 0x00000800
+
+/**@}*/
+
+/** @name Attribute values for VAConfigAttribDecSliceMode */
+/**@{*/
+/** \brief Driver supports normal mode for slice decoding */
+#define VA_DEC_SLICE_MODE_NORMAL 0x00000001
+/** \brief Driver supports base mode for slice decoding */
+#define VA_DEC_SLICE_MODE_BASE 0x00000002
+
+/** @name Attribute values for VAConfigAttribDecJPEG */
+/**@{*/
+typedef union _VAConfigAttribValDecJPEG {
+ struct {
+ /** \brief Set to (1 << VA_ROTATION_xxx) for supported rotation angles. */
+ uint32_t rotation : 4;
+ /** \brief Reserved for future use. */
+ uint32_t reserved : 28;
+ } bits;
+ uint32_t value;
+} VAConfigAttribValDecJPEG;
+/** @name Attribute values for VAConfigAttribDecProcessing */
+/**@{*/
+/** \brief No decoding + processing in a single decoding call. */
+#define VA_DEC_PROCESSING_NONE 0x00000000
+/** \brief Decode + processing in a single decoding call. */
+#define VA_DEC_PROCESSING 0x00000001
+/**@}*/
+
+/** @name Attribute values for VAConfigAttribEncPackedHeaders */
+/**@{*/
+/** \brief Driver does not support any packed headers mode. */
+#define VA_ENC_PACKED_HEADER_NONE 0x00000000
+/**
+ * \brief Driver supports packed sequence headers. e.g. SPS for H.264.
+ *
+ * Application must provide it to driver once this flag is returned through
+ * vaGetConfigAttributes()
+ */
+#define VA_ENC_PACKED_HEADER_SEQUENCE 0x00000001
+/**
+ * \brief Driver supports packed picture headers. e.g. PPS for H.264.
+ *
+ * Application must provide it to driver once this falg is returned through
+ * vaGetConfigAttributes()
+ */
+#define VA_ENC_PACKED_HEADER_PICTURE 0x00000002
+/**
+ * \brief Driver supports packed slice headers. e.g. slice_header() for H.264.
+ *
+ * Application must provide it to driver once this flag is returned through
+ * vaGetConfigAttributes()
+ */
+#define VA_ENC_PACKED_HEADER_SLICE 0x00000004
+/**
+ * \brief Driver supports misc packed headers. e.g. SEI for H.264.
+ *
+ * @deprecated
+ * This is a deprecated packed header flag, All applications can use
+ * \c VA_ENC_PACKED_HEADER_RAW_DATA to pass the corresponding packed
+ * header data buffer to the driver
+ */
+#define VA_ENC_PACKED_HEADER_MISC 0x00000008
+/** \brief Driver supports raw packed header, see VAEncPackedHeaderRawData */
+#define VA_ENC_PACKED_HEADER_RAW_DATA 0x00000010
+/**@}*/
+
+/** @name Attribute values for VAConfigAttribEncInterlaced */
+/**@{*/
+/** \brief Driver does not support interlaced coding. */
+#define VA_ENC_INTERLACED_NONE 0x00000000
+/** \brief Driver supports interlaced frame coding. */
+#define VA_ENC_INTERLACED_FRAME 0x00000001
+/** \brief Driver supports interlaced field coding. */
+#define VA_ENC_INTERLACED_FIELD 0x00000002
+/** \brief Driver supports macroblock adaptive frame field coding. */
+#define VA_ENC_INTERLACED_MBAFF 0x00000004
+/** \brief Driver supports picture adaptive frame field coding. */
+#define VA_ENC_INTERLACED_PAFF 0x00000008
+/**@}*/
+
+/** @name Attribute values for VAConfigAttribEncSliceStructure */
+/**@{*/
+/** \brief Driver supports a power-of-two number of rows per slice. */
+#define VA_ENC_SLICE_STRUCTURE_POWER_OF_TWO_ROWS 0x00000001
+/** \brief Driver supports an arbitrary number of macroblocks per slice. */
+#define VA_ENC_SLICE_STRUCTURE_ARBITRARY_MACROBLOCKS 0x00000002
+/** \brief Dirver support 1 rows per slice */
+#define VA_ENC_SLICE_STRUCTURE_EQUAL_ROWS 0x00000004
+/** \brief Dirver support max encoded slice size per slice */
+#define VA_ENC_SLICE_STRUCTURE_MAX_SLICE_SIZE 0x00000008
+/** \brief Driver supports an arbitrary number of rows per slice. */
+#define VA_ENC_SLICE_STRUCTURE_ARBITRARY_ROWS 0x00000010
+/**@}*/
+
+/** \brief Attribute value for VAConfigAttribMaxFrameSize */
+typedef union _VAConfigAttribValMaxFrameSize {
+ struct {
+ /** \brief support max frame size
+ * if max_frame_size == 1, VAEncMiscParameterTypeMaxFrameSize/VAEncMiscParameterBufferMaxFrameSize
+ * could be used to set the frame size, if multiple_pass also equal 1, VAEncMiscParameterTypeMultiPassFrameSize
+ * VAEncMiscParameterBufferMultiPassFrameSize could be used to set frame size and pass information
+ */
+ uint32_t max_frame_size : 1;
+ /** \brief multiple_pass support */
+ uint32_t multiple_pass : 1;
+ /** \brief reserved bits for future, must be zero*/
+ uint32_t reserved :30;
+ } bits;
+ uint32_t value;
+} VAConfigAttribValMaxFrameSize;
+
+/** \brief Attribute value for VAConfigAttribEncJPEG */
+typedef union _VAConfigAttribValEncJPEG {
+ struct {
+ /** \brief set to 1 for arithmatic coding. */
+ uint32_t arithmatic_coding_mode : 1;
+ /** \brief set to 1 for progressive dct. */
+ uint32_t progressive_dct_mode : 1;
+ /** \brief set to 1 for non-interleaved. */
+ uint32_t non_interleaved_mode : 1;
+ /** \brief set to 1 for differential. */
+ uint32_t differential_mode : 1;
+ uint32_t max_num_components : 3;
+ uint32_t max_num_scans : 4;
+ uint32_t max_num_huffman_tables : 3;
+ uint32_t max_num_quantization_tables : 3;
+ } bits;
+ uint32_t value;
+} VAConfigAttribValEncJPEG;
+
+/** @name Attribute values for VAConfigAttribEncQuantization */
+/**@{*/
+/** \brief Driver does not support special types of quantization */
+#define VA_ENC_QUANTIZATION_NONE 0x00000000
+/** \brief Driver supports trellis quantization */
+#define VA_ENC_QUANTIZATION_TRELLIS_SUPPORTED 0x00000001
+/**@}*/
+
+/** @name Attribute values for VAConfigAttribPredictionDirection */
+/**@{*/
+/** \brief Driver support forward reference frame (inter frame for vpx, P frame for H26x MPEG)
+ * can work with the VAConfigAttribEncMaxRefFrames. for example: low delay B frame of HEVC.
+ * these value can be OR'd together. typical value should be VA_PREDICTION_DIRECTION_PREVIOUS
+ * or VA_PREDICTION_DIRECTION_PREVIOUS | VA_PREDICTION_DIRECTION_FUTURE, theoretically, there
+ * are no stream only include future reference frame.
+ */
+#define VA_PREDICTION_DIRECTION_PREVIOUS 0x00000001
+/** \brief Driver support backward prediction frame/slice */
+#define VA_PREDICTION_DIRECTION_FUTURE 0x00000002
+/**@}*/
+
+/** @name Attribute values for VAConfigAttribEncIntraRefresh */
+/**@{*/
+/** \brief Driver does not support intra refresh */
+#define VA_ENC_INTRA_REFRESH_NONE 0x00000000
+/** \brief Driver supports column based rolling intra refresh */
+#define VA_ENC_INTRA_REFRESH_ROLLING_COLUMN 0x00000001
+/** \brief Driver supports row based rolling intra refresh */
+#define VA_ENC_INTRA_REFRESH_ROLLING_ROW 0x00000002
+/** \brief Driver supports adaptive intra refresh */
+#define VA_ENC_INTRA_REFRESH_ADAPTIVE 0x00000010
+/** \brief Driver supports cyclic intra refresh */
+#define VA_ENC_INTRA_REFRESH_CYCLIC 0x00000020
+/** \brief Driver supports intra refresh of P frame*/
+#define VA_ENC_INTRA_REFRESH_P_FRAME 0x00010000
+/** \brief Driver supports intra refresh of B frame */
+#define VA_ENC_INTRA_REFRESH_B_FRAME 0x00020000
+/** \brief Driver supports intra refresh of multiple reference encoder */
+#define VA_ENC_INTRA_REFRESH_MULTI_REF 0x00040000
+
+/**@}*/
+
+/** \brief Attribute value for VAConfigAttribEncROI */
+typedef union _VAConfigAttribValEncROI {
+ struct {
+ /** \brief The number of ROI regions supported, 0 if ROI is not supported. */
+ uint32_t num_roi_regions : 8;
+ /**
+ * \brief A flag indicates whether ROI priority is supported
+ *
+ * \ref roi_rc_priority_support equal to 1 specifies the underlying driver supports
+ * ROI priority when VAConfigAttribRateControl != VA_RC_CQP, user can use \c roi_value
+ * in #VAEncROI to set ROI priority. \ref roi_rc_priority_support equal to 0 specifies
+ * the underlying driver doesn't support ROI priority.
+ *
+ * User should ignore \ref roi_rc_priority_support when VAConfigAttribRateControl == VA_RC_CQP
+ * because ROI delta QP is always required when VAConfigAttribRateControl == VA_RC_CQP.
+ */
+ uint32_t roi_rc_priority_support : 1;
+ /**
+ * \brief A flag indicates whether ROI delta QP is supported
+ *
+ * \ref roi_rc_qp_delta_support equal to 1 specifies the underlying driver supports
+ * ROI delta QP when VAConfigAttribRateControl != VA_RC_CQP, user can use \c roi_value
+ * in #VAEncROI to set ROI delta QP. \ref roi_rc_qp_delta_support equal to 0 specifies
+ * the underlying driver doesn't support ROI delta QP.
+ *
+ * User should ignore \ref roi_rc_qp_delta_support when VAConfigAttribRateControl == VA_RC_CQP
+ * because ROI delta QP is always required when VAConfigAttribRateControl == VA_RC_CQP.
+ */
+ uint32_t roi_rc_qp_delta_support : 1;
+ uint32_t reserved : 22;
+ } bits;
+ uint32_t value;
+} VAConfigAttribValEncROI;
+
+/** \brief Attribute value for VAConfigAttribEncRateControlExt */
+typedef union _VAConfigAttribValEncRateControlExt {
+ struct {
+ /**
+ * \brief The maximum number of temporal layers minus 1
+ *
+ * \ref max_num_temporal_layers_minus1 plus 1 specifies the maximum number of temporal
+ * layers that supported by the underlying driver. \ref max_num_temporal_layers_minus1
+ * equal to 0 implies the underlying driver doesn't support encoding with temporal layer.
+ */
+ uint32_t max_num_temporal_layers_minus1 : 8;
+
+ /**
+ * /brief support temporal layer bit-rate control flag
+ *
+ * \ref temporal_layer_bitrate_control_flag equal to 1 specifies the underlying driver
+ * can support bit-rate control per temporal layer when (#VAConfigAttribRateControl == #VA_RC_CBR ||
+ * #VAConfigAttribRateControl == #VA_RC_VBR).
+ *
+ * The underlying driver must set \ref temporal_layer_bitrate_control_flag to 0 when
+ * \c max_num_temporal_layers_minus1 is equal to 0
+ *
+ * To use bit-rate control per temporal layer, an application must send the right layer
+ * structure via #VAEncMiscParameterTemporalLayerStructure at the beginning of a coded sequence
+ * and then followed by #VAEncMiscParameterRateControl and #VAEncMiscParameterFrameRate structures
+ * for each layer, using the \c temporal_id field as the layer identifier. Otherwise
+ * the driver doesn't use bitrate control per temporal layer if an application doesn't send the
+ * layer structure via #VAEncMiscParameterTemporalLayerStructure to the driver. The driver returns
+ * VA_STATUS_ERROR_INVALID_PARAMETER if an application sends a wrong layer structure or doesn't send
+ * #VAEncMiscParameterRateControl and #VAEncMiscParameterFrameRate for each layer.
+ *
+ * The driver will ignore #VAEncMiscParameterTemporalLayerStructure and the \c temporal_id field
+ * in #VAEncMiscParameterRateControl and #VAEncMiscParameterFrameRate if
+ * \ref temporal_layer_bitrate_control_flag is equal to 0 or #VAConfigAttribRateControl == #VA_RC_CQP
+ */
+ uint32_t temporal_layer_bitrate_control_flag : 1;
+ uint32_t reserved : 23;
+ } bits;
+ uint32_t value;
+} VAConfigAttribValEncRateControlExt;
+
+/** \brief Attribute value for VAConfigAttribMultipleFrame*/
+typedef union _VAConfigAttribValMultipleFrame {
+ struct {
+ /** \brief max num of concurrent frames from different stream */
+ uint32_t max_num_concurrent_frames : 8;
+ /** \brief indicate whether all stream must support same quality level
+ * if mixed_quality_level == 0, same quality level setting for multple streams is required
+ * if mixed_quality_level == 1, different stream can have different quality level*/
+ uint32_t mixed_quality_level : 1;
+ /** \brief reserved bit for future, must be zero */
+ uint32_t reserved : 23;
+ } bits;
+ uint32_t value;
+}VAConfigAttribValMultipleFrame;
+
+/** @name Attribute values for VAConfigAttribProcessingRate. */
+/**@{*/
+/** \brief Driver does not support processing rate report */
+#define VA_PROCESSING_RATE_NONE 0x00000000
+/** \brief Driver supports encode processing rate report */
+#define VA_PROCESSING_RATE_ENCODE 0x00000001
+/** \brief Driver supports decode processing rate report */
+#define VA_PROCESSING_RATE_DECODE 0x00000002
+/**@}*/
+/**
+ * if an attribute is not applicable for a given
+ * profile/entrypoint pair, then set the value to the following
+ */
+#define VA_ATTRIB_NOT_SUPPORTED 0x80000000
+
+/** Get maximum number of profiles supported by the implementation */
+int vaMaxNumProfiles (
+ VADisplay dpy
+);
+
+/** Get maximum number of entrypoints supported by the implementation */
+int vaMaxNumEntrypoints (
+ VADisplay dpy
+);
+
+/** Get maximum number of attributs supported by the implementation */
+int vaMaxNumConfigAttributes (
+ VADisplay dpy
+);
+
+/**
+ * Query supported profiles
+ * The caller must provide a "profile_list" array that can hold at
+ * least vaMaxNumProfile() entries. The actual number of profiles
+ * returned in "profile_list" is returned in "num_profile".
+ */
+VAStatus vaQueryConfigProfiles (
+ VADisplay dpy,
+ VAProfile *profile_list, /* out */
+ int *num_profiles /* out */
+);
+
+/**
+ * Query supported entrypoints for a given profile
+ * The caller must provide an "entrypoint_list" array that can hold at
+ * least vaMaxNumEntrypoints() entries. The actual number of entrypoints
+ * returned in "entrypoint_list" is returned in "num_entrypoints".
+ */
+VAStatus vaQueryConfigEntrypoints (
+ VADisplay dpy,
+ VAProfile profile,
+ VAEntrypoint *entrypoint_list, /* out */
+ int *num_entrypoints /* out */
+);
+
+/**
+ * Get attributes for a given profile/entrypoint pair
+ * The caller must provide an "attrib_list" with all attributes to be
+ * retrieved. Upon return, the attributes in "attrib_list" have been
+ * updated with their value. Unknown attributes or attributes that are
+ * not supported for the given profile/entrypoint pair will have their
+ * value set to VA_ATTRIB_NOT_SUPPORTED
+ */
+VAStatus vaGetConfigAttributes (
+ VADisplay dpy,
+ VAProfile profile,
+ VAEntrypoint entrypoint,
+ VAConfigAttrib *attrib_list, /* in/out */
+ int num_attribs
+);
+
+/** Generic ID type, can be re-typed for specific implementation */
+typedef unsigned int VAGenericID;
+
+typedef VAGenericID VAConfigID;
+
+/**
+ * Create a configuration for the video decode/encode/processing pipeline
+ * it passes in the attribute list that specifies the attributes it cares
+ * about, with the rest taking default values.
+ */
+VAStatus vaCreateConfig (
+ VADisplay dpy,
+ VAProfile profile,
+ VAEntrypoint entrypoint,
+ VAConfigAttrib *attrib_list,
+ int num_attribs,
+ VAConfigID *config_id /* out */
+);
+
+/**
+ * Free resources associdated with a given config
+ */
+VAStatus vaDestroyConfig (
+ VADisplay dpy,
+ VAConfigID config_id
+);
+
+/**
+ * Query all attributes for a given configuration
+ * The profile of the configuration is returned in "profile"
+ * The entrypoint of the configuration is returned in "entrypoint"
+ * The caller must provide an "attrib_list" array that can hold at least
+ * vaMaxNumConfigAttributes() entries. The actual number of attributes
+ * returned in "attrib_list" is returned in "num_attribs"
+ */
+VAStatus vaQueryConfigAttributes (
+ VADisplay dpy,
+ VAConfigID config_id,
+ VAProfile *profile, /* out */
+ VAEntrypoint *entrypoint, /* out */
+ VAConfigAttrib *attrib_list,/* out */
+ int *num_attribs /* out */
+);
+
+
+/**
+ * Contexts and Surfaces
+ *
+ * Context represents a "virtual" video decode, encode or video processing
+ * pipeline. Surfaces are render targets for a given context. The data in the
+ * surfaces are not accessible to the client except if derived image is supported
+ * and the internal data format of the surface is implementation specific.
+ *
+ * Surfaces are provided as a hint of what surfaces will be used when the context
+ * is created through vaCreateContext(). A surface may be used by different contexts
+ * at the same time as soon as application can make sure the operations are synchronized
+ * between different contexts, e.g. a surface is used as the output of a decode context
+ * and the input of a video process context. Surfaces can only be destroyed after all
+ * contexts using these surfaces have been destroyed.
+ *
+ * Both contexts and surfaces are identified by unique IDs and its
+ * implementation specific internals are kept opaque to the clients
+ */
+
+typedef VAGenericID VAContextID;
+
+typedef VAGenericID VASurfaceID;
+
+#define VA_INVALID_ID 0xffffffff
+#define VA_INVALID_SURFACE VA_INVALID_ID
+
+/** \brief Generic value types. */
+typedef enum {
+ VAGenericValueTypeInteger = 1, /**< 32-bit signed integer. */
+ VAGenericValueTypeFloat, /**< 32-bit floating-point value. */
+ VAGenericValueTypePointer, /**< Generic pointer type */
+ VAGenericValueTypeFunc /**< Pointer to function */
+} VAGenericValueType;
+
+/** \brief Generic function type. */
+typedef void (*VAGenericFunc)(void);
+
+/** \brief Generic value. */
+typedef struct _VAGenericValue {
+ /** \brief Value type. See #VAGenericValueType. */
+ VAGenericValueType type;
+ /** \brief Value holder. */
+ union {
+ /** \brief 32-bit signed integer. */
+ int32_t i;
+ /** \brief 32-bit float. */
+ float f;
+ /** \brief Generic pointer. */
+ void *p;
+ /** \brief Pointer to function. */
+ VAGenericFunc fn;
+ } value;
+} VAGenericValue;
+
+/** @name Surface attribute flags */
+/**@{*/
+/** \brief Surface attribute is not supported. */
+#define VA_SURFACE_ATTRIB_NOT_SUPPORTED 0x00000000
+/** \brief Surface attribute can be got through vaQuerySurfaceAttributes(). */
+#define VA_SURFACE_ATTRIB_GETTABLE 0x00000001
+/** \brief Surface attribute can be set through vaCreateSurfaces(). */
+#define VA_SURFACE_ATTRIB_SETTABLE 0x00000002
+/**@}*/
+
+/** \brief Surface attribute types. */
+typedef enum {
+ VASurfaceAttribNone = 0,
+ /**
+ * \brief Pixel format (fourcc).
+ *
+ * The value is meaningful as input to vaQuerySurfaceAttributes().
+ * If zero, the driver returns the optimal pixel format for the
+ * specified config. Otherwise, if non-zero, the value represents
+ * a pixel format (FOURCC) that is kept as is on output, if the
+ * driver supports it. Otherwise, the driver sets the value to
+ * zero and drops the \c VA_SURFACE_ATTRIB_SETTABLE flag.
+ */
+ VASurfaceAttribPixelFormat,
+ /** \brief Minimal width in pixels (int, read-only). */
+ VASurfaceAttribMinWidth,
+ /** \brief Maximal width in pixels (int, read-only). */
+ VASurfaceAttribMaxWidth,
+ /** \brief Minimal height in pixels (int, read-only). */
+ VASurfaceAttribMinHeight,
+ /** \brief Maximal height in pixels (int, read-only). */
+ VASurfaceAttribMaxHeight,
+ /** \brief Surface memory type expressed in bit fields (int, read/write). */
+ VASurfaceAttribMemoryType,
+ /** \brief External buffer descriptor (pointer, write).
+ *
+ * Refer to the documentation for the memory type being created to
+ * determine what descriptor structure to pass here. If not otherwise
+ * stated, the common VASurfaceAttribExternalBuffers should be used.
+ */
+ VASurfaceAttribExternalBufferDescriptor,
+ /** \brief Surface usage hint, gives the driver a hint of intended usage
+ * to optimize allocation (e.g. tiling) (int, read/write). */
+ VASurfaceAttribUsageHint,
+ /** \brief Number of surface attributes. */
+ VASurfaceAttribCount
+} VASurfaceAttribType;
+
+/** \brief Surface attribute. */
+typedef struct _VASurfaceAttrib {
+ /** \brief Type. */
+ VASurfaceAttribType type;
+ /** \brief Flags. See "Surface attribute flags". */
+ uint32_t flags;
+ /** \brief Value. See "Surface attribute types" for the expected types. */
+ VAGenericValue value;
+} VASurfaceAttrib;
+
+/**
+ * @name VASurfaceAttribMemoryType values in bit fields.
+ * Bit 0:7 are reserved for generic types, Bit 31:28 are reserved for
+ * Linux DRM, Bit 23:20 are reserved for Android. DRM and Android specific
+ * types are defined in DRM and Android header files.
+ */
+/**@{*/
+/** \brief VA memory type (default) is supported. */
+#define VA_SURFACE_ATTRIB_MEM_TYPE_VA 0x00000001
+/** \brief V4L2 buffer memory type is supported. */
+#define VA_SURFACE_ATTRIB_MEM_TYPE_V4L2 0x00000002
+/** \brief User pointer memory type is supported. */
+#define VA_SURFACE_ATTRIB_MEM_TYPE_USER_PTR 0x00000004
+/**@}*/
+
+/**
+ * \brief VASurfaceAttribExternalBuffers structure for
+ * the VASurfaceAttribExternalBufferDescriptor attribute.
+ */
+typedef struct _VASurfaceAttribExternalBuffers {
+ /** \brief pixel format in fourcc. */
+ uint32_t pixel_format;
+ /** \brief width in pixels. */
+ uint32_t width;
+ /** \brief height in pixels. */
+ uint32_t height;
+ /** \brief total size of the buffer in bytes. */
+ uint32_t data_size;
+ /** \brief number of planes for planar layout */
+ uint32_t num_planes;
+ /** \brief pitch for each plane in bytes */
+ uint32_t pitches[4];
+ /** \brief offset for each plane in bytes */
+ uint32_t offsets[4];
+ /** \brief buffer handles or user pointers */
+ uintptr_t *buffers;
+ /** \brief number of elements in the "buffers" array */
+ uint32_t num_buffers;
+ /** \brief flags. See "Surface external buffer descriptor flags". */
+ uint32_t flags;
+ /** \brief reserved for passing private data */
+ void *private_data;
+} VASurfaceAttribExternalBuffers;
+
+/** @name VASurfaceAttribExternalBuffers flags */
+/**@{*/
+/** \brief Enable memory tiling */
+#define VA_SURFACE_EXTBUF_DESC_ENABLE_TILING 0x00000001
+/** \brief Memory is cacheable */
+#define VA_SURFACE_EXTBUF_DESC_CACHED 0x00000002
+/** \brief Memory is non-cacheable */
+#define VA_SURFACE_EXTBUF_DESC_UNCACHED 0x00000004
+/** \brief Memory is write-combined */
+#define VA_SURFACE_EXTBUF_DESC_WC 0x00000008
+/** \brief Memory is protected */
+#define VA_SURFACE_EXTBUF_DESC_PROTECTED 0x80000000
+
+/** @name VASurfaceAttribUsageHint attribute usage hint flags */
+/**@{*/
+/** \brief Surface usage not indicated. */
+#define VA_SURFACE_ATTRIB_USAGE_HINT_GENERIC 0x00000000
+/** \brief Surface used by video decoder. */
+#define VA_SURFACE_ATTRIB_USAGE_HINT_DECODER 0x00000001
+/** \brief Surface used by video encoder. */
+#define VA_SURFACE_ATTRIB_USAGE_HINT_ENCODER 0x00000002
+/** \brief Surface read by video post-processing. */
+#define VA_SURFACE_ATTRIB_USAGE_HINT_VPP_READ 0x00000004
+/** \brief Surface written by video post-processing. */
+#define VA_SURFACE_ATTRIB_USAGE_HINT_VPP_WRITE 0x00000008
+/** \brief Surface used for display. */
+#define VA_SURFACE_ATTRIB_USAGE_HINT_DISPLAY 0x00000010
+/** \brief Surface used for export to third-party APIs, e.g. via
+ * vaExportSurfaceHandle(). */
+#define VA_SURFACE_ATTRIB_USAGE_HINT_EXPORT 0x00000020
+
+/**@}*/
+
+/**
+ * \brief Queries surface attributes for the supplied config.
+ *
+ * This function queries for all supported attributes for the
+ * supplied VA @config. In particular, if the underlying hardware
+ * supports the creation of VA surfaces in various formats, then
+ * this function will enumerate all pixel formats that are supported.
+ *
+ * The \c attrib_list array is allocated by the user and \c
+ * num_attribs shall be initialized to the number of allocated
+ * elements in that array. Upon successful return, the actual number
+ * of attributes will be overwritten into \c num_attribs. Otherwise,
+ * \c VA_STATUS_ERROR_MAX_NUM_EXCEEDED is returned and \c num_attribs
+ * is adjusted to the number of elements that would be returned if
+ * enough space was available.
+ *
+ * Note: it is perfectly valid to pass NULL to the \c attrib_list
+ * argument when vaQuerySurfaceAttributes() is used to determine the
+ * actual number of elements that need to be allocated.
+ *
+ * @param[in] dpy the VA display
+ * @param[in] config the config identifying a codec or a video
+ * processing pipeline
+ * @param[out] attrib_list the output array of #VASurfaceAttrib elements
+ * @param[in,out] num_attribs the number of elements allocated on
+ * input, the number of elements actually filled in output
+ */
+VAStatus
+vaQuerySurfaceAttributes(
+ VADisplay dpy,
+ VAConfigID config,
+ VASurfaceAttrib *attrib_list,
+ unsigned int *num_attribs
+);
+
+/**
+ * \brief Creates an array of surfaces
+ *
+ * Creates an array of surfaces. The optional list of attributes shall
+ * be constructed based on what the underlying hardware could expose
+ * through vaQuerySurfaceAttributes().
+ *
+ * @param[in] dpy the VA display
+ * @param[in] format the desired surface format. See \c VA_RT_FORMAT_*
+ * @param[in] width the surface width
+ * @param[in] height the surface height
+ * @param[out] surfaces the array of newly created surfaces
+ * @param[in] num_surfaces the number of surfaces to create
+ * @param[in] attrib_list the list of (optional) attributes, or \c NULL
+ * @param[in] num_attribs the number of attributes supplied in
+ * \c attrib_list, or zero
+ */
+VAStatus
+vaCreateSurfaces(
+ VADisplay dpy,
+ unsigned int format,
+ unsigned int width,
+ unsigned int height,
+ VASurfaceID *surfaces,
+ unsigned int num_surfaces,
+ VASurfaceAttrib *attrib_list,
+ unsigned int num_attribs
+);
+
+/**
+ * vaDestroySurfaces - Destroy resources associated with surfaces.
+ * Surfaces can only be destroyed after all contexts using these surfaces have been
+ * destroyed.
+ * dpy: display
+ * surfaces: array of surfaces to destroy
+ * num_surfaces: number of surfaces in the array to be destroyed.
+ */
+VAStatus vaDestroySurfaces (
+ VADisplay dpy,
+ VASurfaceID *surfaces,
+ int num_surfaces
+);
+
+#define VA_PROGRESSIVE 0x1
+/**
+ * vaCreateContext - Create a context
+ * dpy: display
+ * config_id: configuration for the context
+ * picture_width: coded picture width
+ * picture_height: coded picture height
+ * flag: any combination of the following:
+ * VA_PROGRESSIVE (only progressive frame pictures in the sequence when set)
+ * render_targets: a hint for render targets (surfaces) tied to the context
+ * num_render_targets: number of render targets in the above array
+ * context: created context id upon return
+ */
+VAStatus vaCreateContext (
+ VADisplay dpy,
+ VAConfigID config_id,
+ int picture_width,
+ int picture_height,
+ int flag,
+ VASurfaceID *render_targets,
+ int num_render_targets,
+ VAContextID *context /* out */
+);
+
+/**
+ * vaDestroyContext - Destroy a context
+ * dpy: display
+ * context: context to be destroyed
+ */
+VAStatus vaDestroyContext (
+ VADisplay dpy,
+ VAContextID context
+);
+
+//Multi-frame context
+typedef VAGenericID VAMFContextID;
+/**
+ * vaCreateMFContext - Create a multi-frame context
+ * interface encapsulating common for all streams memory objects and structures
+ * required for single GPU task submission from several VAContextID's.
+ * Allocation: This call only creates an instance, doesn't allocate any additional memory.
+ * Support identification: Application can identify multi-frame feature support by ability
+ * to create multi-frame context. If driver supports multi-frame - call successful,
+ * mf_context != NULL and VAStatus = VA_STATUS_SUCCESS, otherwise if multi-frame processing
+ * not supported driver returns VA_STATUS_ERROR_UNIMPLEMENTED and mf_context = NULL.
+ * return values:
+ * VA_STATUS_SUCCESS - operation successful.
+ * VA_STATUS_ERROR_UNIMPLEMENTED - no support for multi-frame.
+ * dpy: display adapter.
+ * mf_context: Multi-Frame context encapsulating all associated context
+ * for multi-frame submission.
+ */
+VAStatus vaCreateMFContext (
+ VADisplay dpy,
+ VAMFContextID *mf_context /* out */
+);
+
+/**
+ * vaMFAddContext - Provide ability to associate each context used for
+ * Multi-Frame submission and common Multi-Frame context.
+ * Try to add context to understand if it is supported.
+ * Allocation: this call allocates and/or reallocates all memory objects
+ * common for all contexts associated with particular Multi-Frame context.
+ * All memory required for each context(pixel buffers, internal driver
+ * buffers required for processing) allocated during standard vaCreateContext call for each context.
+ * Runtime dependency - if current implementation doesn't allow to run different entry points/profile,
+ * first context added will set entry point/profile for whole Multi-Frame context,
+ * all other entry points and profiles can be rejected to be added.
+ * Return values:
+ * VA_STATUS_SUCCESS - operation successful, context was added.
+ * VA_STATUS_ERROR_OPERATION_FAILED - something unexpected happened - application have to close
+ * current mf_context and associated contexts and start working with new ones.
+ * VA_STATUS_ERROR_INVALID_CONTEXT - ContextID is invalid, means:
+ * 1 - mf_context is not valid context or
+ * 2 - driver can't suport different VAEntrypoint or VAProfile simultaneosly
+ * and current context contradicts with previously added, application can continue with current mf_context
+ * and other contexts passed this call, rejected context can continue work in stand-alone
+ * mode or other mf_context.
+ * VA_STATUS_ERROR_UNSUPPORTED_ENTRYPOINT - particular context being added was created with with
+ * unsupported VAEntrypoint. Application can continue with current mf_context
+ * and other contexts passed this call, rejected context can continue work in stand-alone
+ * mode.
+ * VA_STATUS_ERROR_UNSUPPORTED_PROFILE - Current context with Particular VAEntrypoint is supported
+ * but VAProfile is not supported. Application can continue with current mf_context
+ * and other contexts passed this call, rejected context can continue work in stand-alone
+ * mode.
+ * dpy: display adapter.
+ * context: context being associated with Multi-Frame context.
+ * mf_context: - multi-frame context used to associate contexts for multi-frame submission.
+ */
+VAStatus vaMFAddContext (
+ VADisplay dpy,
+ VAMFContextID mf_context,
+ VAContextID context
+);
+
+/**
+ * vaMFReleaseContext - Removes context from multi-frame and
+ * association with multi-frame context.
+ * After association removed vaEndPicture will submit tasks, but not vaMFSubmit.
+ * Return values:
+ * VA_STATUS_SUCCESS - operation successful, context was removed.
+ * VA_STATUS_ERROR_OPERATION_FAILED - something unexpected happened.
+ * application need to destroy this VAMFContextID and all assotiated VAContextID
+ * dpy: display
+ * mf_context: VAMFContextID where context is added
+ * context: VAContextID to be added
+ */
+VAStatus vaMFReleaseContext (
+ VADisplay dpy,
+ VAMFContextID mf_context,
+ VAContextID context
+);
+
+/**
+ * Buffers
+ * Buffers are used to pass various types of data from the
+ * client to the server. The server maintains a data store
+ * for each buffer created, and the client idenfies a buffer
+ * through a unique buffer id assigned by the server.
+ */
+
+typedef VAGenericID VABufferID;
+
+typedef enum
+{
+ VAPictureParameterBufferType = 0,
+ VAIQMatrixBufferType = 1,
+ VABitPlaneBufferType = 2,
+ VASliceGroupMapBufferType = 3,
+ VASliceParameterBufferType = 4,
+ VASliceDataBufferType = 5,
+ VAMacroblockParameterBufferType = 6,
+ VAResidualDataBufferType = 7,
+ VADeblockingParameterBufferType = 8,
+ VAImageBufferType = 9,
+ VAProtectedSliceDataBufferType = 10,
+ VAQMatrixBufferType = 11,
+ VAHuffmanTableBufferType = 12,
+ VAProbabilityBufferType = 13,
+
+/* Following are encode buffer types */
+ VAEncCodedBufferType = 21,
+ VAEncSequenceParameterBufferType = 22,
+ VAEncPictureParameterBufferType = 23,
+ VAEncSliceParameterBufferType = 24,
+ VAEncPackedHeaderParameterBufferType = 25,
+ VAEncPackedHeaderDataBufferType = 26,
+ VAEncMiscParameterBufferType = 27,
+ VAEncMacroblockParameterBufferType = 28,
+ VAEncMacroblockMapBufferType = 29,
+
+ /**
+ * \brief Encoding QP buffer
+ *
+ * This buffer contains QP per MB for encoding. Currently
+ * VAEncQPBufferH264 is defined for H.264 encoding, see
+ * #VAEncQPBufferH264 for details
+ */
+ VAEncQPBufferType = 30,
+/* Following are video processing buffer types */
+ /**
+ * \brief Video processing pipeline parameter buffer.
+ *
+ * This buffer describes the video processing pipeline. See
+ * #VAProcPipelineParameterBuffer for details.
+ */
+ VAProcPipelineParameterBufferType = 41,
+ /**
+ * \brief Video filter parameter buffer.
+ *
+ * This buffer describes the video filter parameters. All buffers
+ * inherit from #VAProcFilterParameterBufferBase, thus including
+ * a unique filter buffer type.
+ *
+ * The default buffer used by most filters is #VAProcFilterParameterBuffer.
+ * Filters requiring advanced parameters include, but are not limited to,
+ * deinterlacing (#VAProcFilterParameterBufferDeinterlacing),
+ * color balance (#VAProcFilterParameterBufferColorBalance), etc.
+ */
+ VAProcFilterParameterBufferType = 42,
+ /**
+ * \brief FEI specific buffer types
+ */
+ VAEncFEIMVBufferType = 43,
+ VAEncFEIMBCodeBufferType = 44,
+ VAEncFEIDistortionBufferType = 45,
+ VAEncFEIMBControlBufferType = 46,
+ VAEncFEIMVPredictorBufferType = 47,
+ VAStatsStatisticsParameterBufferType = 48,
+ /** \brief Statistics output for VAEntrypointStats progressive and top field of interlaced case*/
+ VAStatsStatisticsBufferType = 49,
+ /** \brief Statistics output for VAEntrypointStats bottom field of interlaced case*/
+ VAStatsStatisticsBottomFieldBufferType = 50,
+ VAStatsMVBufferType = 51,
+ VAStatsMVPredictorBufferType = 52,
+ /** Force MB's to be non skip for encode.it's per-mb control buffer, The width of the MB map
+ * Surface is (width of the Picture in MB unit) * 1 byte, multiple of 64 bytes.
+ * The height is (height of the picture in MB unit). The picture is either
+ * frame or non-interleaved top or bottom field. If the application provides this
+ *surface, it will override the "skipCheckDisable" setting in VAEncMiscParameterEncQuality.
+ */
+ VAEncMacroblockDisableSkipMapBufferType = 53,
+ /**
+ * \brief HEVC FEI CTB level cmd buffer
+ * it is CTB level information for future usage.
+ */
+ VAEncFEICTBCmdBufferType = 54,
+ /**
+ * \brief HEVC FEI CU level data buffer
+ * it's CTB level information for future usage
+ */
+ VAEncFEICURecordBufferType = 55,
+ /** decode stream out buffer, intermedia data of decode, it may include MV, MB mode etc.
+ * it can be used to detect motion and analyze the frame contain */
+ VADecodeStreamoutBufferType = 56,
+
+ /** \brief HEVC Decoding Subset Parameter buffer type
+ *
+ * The subsets parameter buffer is concatenation with one or multiple
+ * subset entry point offsets. All the offset values are layed out one
+ * by one according to slice order with first slice segment first, second
+ * slice segment second, etc... The entry number is indicated by parameter
+ * \ref num_entry_point_offsets. And the first entry position of the entry
+ * point offsets for any slice segment is indicated by parameter
+ * entry_offset_to_subset_array in VAPictureParameterBufferHEVC data structure.
+ */
+ VASubsetsParameterBufferType = 57,
+
+ VABufferTypeMax
+} VABufferType;
+
+/**
+ * Processing rate parameter for encode.
+ */
+typedef struct _VAProcessingRateParameterEnc {
+ /** \brief Profile level */
+ uint8_t level_idc;
+ uint8_t reserved[3];
+ /** \brief quality level. When set to 0, default quality
+ * level is used.
+ */
+ uint32_t quality_level;
+ /** \brief Period between I frames. */
+ uint32_t intra_period;
+ /** \brief Period between I/P frames. */
+ uint32_t ip_period;
+} VAProcessingRateParameterEnc;
+
+/**
+ * Processing rate parameter for decode.
+ */
+typedef struct _VAProcessingRateParameterDec {
+ /** \brief Profile level */
+ uint8_t level_idc;
+ uint8_t reserved0[3];
+ uint32_t reserved;
+} VAProcessingRateParameterDec;
+
+typedef struct _VAProcessingRateParameter {
+ union {
+ VAProcessingRateParameterEnc proc_buf_enc;
+ VAProcessingRateParameterDec proc_buf_dec;
+ };
+} VAProcessingRateParameter;
+
+/**
+ * \brief Queries processing rate for the supplied config.
+ *
+ * This function queries the processing rate based on parameters in
+ * \c proc_buf for the given \c config. Upon successful return, the processing
+ * rate value will be stored in \c processing_rate. Processing rate is
+ * specified as the number of macroblocks/CTU per second.
+ *
+ * If NULL is passed to the \c proc_buf, the default processing rate for the
+ * given configuration will be returned.
+ *
+ * @param[in] dpy the VA display
+ * @param[in] config the config identifying a codec or a video
+ * processing pipeline
+ * @param[in] proc_buf the buffer that contains the parameters for
+ either the encode or decode processing rate
+ * @param[out] processing_rate processing rate in number of macroblocks per
+ second constrained by parameters specified in proc_buf
+ *
+ */
+VAStatus
+vaQueryProcessingRate(
+ VADisplay dpy,
+ VAConfigID config,
+ VAProcessingRateParameter *proc_buf,
+ unsigned int *processing_rate
+);
+
+typedef enum
+{
+ VAEncMiscParameterTypeFrameRate = 0,
+ VAEncMiscParameterTypeRateControl = 1,
+ VAEncMiscParameterTypeMaxSliceSize = 2,
+ VAEncMiscParameterTypeAIR = 3,
+ /** \brief Buffer type used to express a maximum frame size (in bits). */
+ VAEncMiscParameterTypeMaxFrameSize = 4,
+ /** \brief Buffer type used for HRD parameters. */
+ VAEncMiscParameterTypeHRD = 5,
+ VAEncMiscParameterTypeQualityLevel = 6,
+ /** \brief Buffer type used for Rolling intra refresh */
+ VAEncMiscParameterTypeRIR = 7,
+ /** \brief Buffer type used for quantization parameters, it's per-sequence parameter*/
+ VAEncMiscParameterTypeQuantization = 8,
+ /** \brief Buffer type used for sending skip frame parameters to the encoder's
+ * rate control, when the user has externally skipped frames. */
+ VAEncMiscParameterTypeSkipFrame = 9,
+ /** \brief Buffer type used for region-of-interest (ROI) parameters. */
+ VAEncMiscParameterTypeROI = 10,
+ /** \brief Buffer type used to express a maximum frame size (in bytes) settings for multiple pass. */
+ VAEncMiscParameterTypeMultiPassFrameSize = 11,
+ /** \brief Buffer type used for temporal layer structure */
+ VAEncMiscParameterTypeTemporalLayerStructure = 12,
+ /** \brief Buffer type used for dirty region-of-interest (ROI) parameters. */
+ VAEncMiscParameterTypeDirtyRect = 13,
+ /** \brief Buffer type used for parallel BRC parameters. */
+ VAEncMiscParameterTypeParallelBRC = 14,
+ /** \brief Set MB partion mode mask and Half-pel/Quant-pel motion search */
+ VAEncMiscParameterTypeSubMbPartPel = 15,
+ /** \brief set encode quality tuning */
+ VAEncMiscParameterTypeEncQuality = 16,
+ /** \brief Buffer type used for encoder rounding offset parameters. */
+ VAEncMiscParameterTypeCustomRoundingControl = 17,
+ /** \brief Buffer type used for FEI input frame level parameters */
+ VAEncMiscParameterTypeFEIFrameControl = 18,
+ /** \brief encode extension buffer, ect. MPEG2 Sequence extenstion data */
+ VAEncMiscParameterTypeExtensionData = 19
+} VAEncMiscParameterType;
+
+/** \brief Packed header type. */
+typedef enum {
+ /** \brief Packed sequence header. */
+ VAEncPackedHeaderSequence = 1,
+ /** \brief Packed picture header. */
+ VAEncPackedHeaderPicture = 2,
+ /** \brief Packed slice header. */
+ VAEncPackedHeaderSlice = 3,
+ /**
+ * \brief Packed raw header.
+ *
+ * Packed raw data header can be used by the client to insert a header
+ * into the bitstream data buffer at the point it is passed, the driver
+ * will handle the raw packed header based on "has_emulation_bytes" field
+ * in the packed header parameter structure.
+ */
+ VAEncPackedHeaderRawData = 4,
+ /**
+ * \brief Misc packed header. See codec-specific definitions.
+ *
+ * @deprecated
+ * This is a deprecated packed header type. All applications can use
+ * \c VAEncPackedHeaderRawData to insert a codec-specific packed header
+ */
+ VAEncPackedHeaderMiscMask va_deprecated_enum = 0x80000000,
+} VAEncPackedHeaderType;
+
+/** \brief Packed header parameter. */
+typedef struct _VAEncPackedHeaderParameterBuffer {
+ /** Type of the packed header buffer. See #VAEncPackedHeaderType. */
+ uint32_t type;
+ /** \brief Size of the #VAEncPackedHeaderDataBuffer in bits. */
+ uint32_t bit_length;
+ /** \brief Flag: buffer contains start code emulation prevention bytes? */
+ uint8_t has_emulation_bytes;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAEncPackedHeaderParameterBuffer;
+
+/**
+ * For application, e.g. set a new bitrate
+ * VABufferID buf_id;
+ * VAEncMiscParameterBuffer *misc_param;
+ * VAEncMiscParameterRateControl *misc_rate_ctrl;
+ *
+ * vaCreateBuffer(dpy, context, VAEncMiscParameterBufferType,
+ * sizeof(VAEncMiscParameterBuffer) + sizeof(VAEncMiscParameterRateControl),
+ * 1, NULL, &buf_id);
+ *
+ * vaMapBuffer(dpy,buf_id,(void **)&misc_param);
+ * misc_param->type = VAEncMiscParameterTypeRateControl;
+ * misc_rate_ctrl= (VAEncMiscParameterRateControl *)misc_param->data;
+ * misc_rate_ctrl->bits_per_second = 6400000;
+ * vaUnmapBuffer(dpy, buf_id);
+ * vaRenderPicture(dpy, context, &buf_id, 1);
+ */
+typedef struct _VAEncMiscParameterBuffer
+{
+ VAEncMiscParameterType type;
+ uint32_t data[];
+} VAEncMiscParameterBuffer;
+
+/** \brief Temporal layer Structure*/
+typedef struct _VAEncMiscParameterTemporalLayerStructure
+{
+ /** \brief The number of temporal layers */
+ uint32_t number_of_layers;
+ /** \brief The length of the array defining frame layer membership. Should be 1-32 */
+ uint32_t periodicity;
+ /**
+ * \brief The array indicating the layer id for each frame
+ *
+ * The layer id for the first frame in a coded sequence is always 0, so layer_id[] specifies the layer
+ * ids for frames starting from the 2nd frame.
+ */
+ uint32_t layer_id[32];
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAEncMiscParameterTemporalLayerStructure;
+
+
+/** \brief Rate control parameters */
+typedef struct _VAEncMiscParameterRateControl
+{
+ /** The maximum bit-rate which the the rate controller should generate. */
+ uint32_t bits_per_second;
+ /** The target bit-rate which the rate controller should generate, as a percentage of the
+ * maximum bit-rate.
+ *
+ * In CBR mode this value is ignored (treated as 100%).
+ */
+ uint32_t target_percentage;
+ /** Rate control window size in milliseconds.
+ *
+ * The rate controller will attempt to guarantee that the target and maximum bit-rates are
+ * correct over this window.
+ */
+ uint32_t window_size;
+ /** Initial quantiser value used at the start of the stream.
+ *
+ * Ignored if set to zero.
+ */
+ uint32_t initial_qp;
+ /** Minimum quantiser value to use.
+ *
+ * The quantiser will not go below the value - if this limit is hit, the output bitrate may
+ * be lower than the target. Ignored if set to zero.
+ */
+ uint32_t min_qp;
+ /** Basic unit size.
+ *
+ * Only used by some drivers - see driver documentation for details. Set to zero if unused.
+ */
+ uint32_t basic_unit_size;
+ union
+ {
+ struct
+ {
+ /** Force rate controller reset.
+ *
+ * The next frame will be treated as the start of a new stream, with all rate
+ * controller state reset to its initial values.
+ */
+ uint32_t reset : 1;
+ /** Disable frame skip in rate control mode. */
+ uint32_t disable_frame_skip : 1;
+ /** Disable bit stuffing in rate control mode. */
+ uint32_t disable_bit_stuffing : 1;
+ /** Macroblock-level rate control.
+ *
+ * 0: use default, 1: always enable, 2: always disable, other: reserved.
+ *
+ * This feature is only available if VAConfigAttribRateControl has the
+ * \ref VA_RC_MB bit set.
+ */
+ uint32_t mb_rate_control : 4;
+ /** The temporal layer that these rate control parameters apply to. */
+ uint32_t temporal_id : 8;
+ /** Ensure that intra frames also conform to the constant frame size. */
+ uint32_t cfs_I_frames : 1;
+ /** Enable parallel rate control for hierarchical B frames.
+ *
+ * See \ref VA_RC_PARALLEL.
+ */
+ uint32_t enable_parallel_brc : 1;
+ uint32_t enable_dynamic_scaling : 1;
+ /** Frame tolerance mode.
+ *
+ * Indicates the tolerance the application has to variations in the frame size.
+ * For example, wireless display scenarios may require very steady bit rate to
+ * reduce buffering time. It affects the rate control algorithm used,
+ * but may or may not have an effect based on the combination of other BRC
+ * parameters. Only valid when the driver reports support for
+ * #VAConfigAttribFrameSizeToleranceSupport.
+ *
+ * equals 0 -- normal mode;
+ * equals 1 -- maps to sliding window;
+ * equals 2 -- maps to low delay mode;
+ * other -- invalid.
+ */
+ uint32_t frame_tolerance_mode : 2;
+ /** Reserved for future use, must be zero. */
+ uint32_t reserved : 12;
+ } bits;
+ uint32_t value;
+ } rc_flags;
+ /** Initial quality factor used in ICQ mode.
+ *
+ * This value must be between 1 and 51.
+ * this value will be deprecated in future, to use quality_factor instead of it.
+ */
+ uint32_t ICQ_quality_factor;
+ /** Maximum quantiser value to use.
+ *
+ * The quantiser will not go above this value - if this limit is hit, the output bitrate
+ * may exceed the target. Ignored if set to zero.
+ */
+ uint32_t max_qp;
+ /** Quality factor
+ *
+ * the range will be different for different codec
+ */
+ uint32_t quality_factor;
+ /** Reserved bytes for future use, must be zero. */
+ uint32_t va_reserved[VA_PADDING_MEDIUM - 3];
+} VAEncMiscParameterRateControl;
+
+/** Encode framerate parameters.
+ *
+ * Sets the encode framerate used by the rate controller. This should be
+ * provided in all modes using a bitrate target (variable framerate is not
+ * supported).
+ */
+typedef struct _VAEncMiscParameterFrameRate
+{
+ /** Encode framerate.
+ *
+ * The framerate is specified as a number of frames per second, as a
+ * fraction. The denominator of the fraction is given in the top half
+ * (the high two bytes) of the framerate field, and the numerator is
+ * given in the bottom half (the low two bytes).
+ *
+ * That is:
+ * denominator = framerate >> 16 & 0xffff;
+ * numerator = framerate & 0xffff;
+ * fps = numerator / denominator;
+ *
+ * For example, if framerate is set to (100 << 16 | 750), this is
+ * 750 / 100, hence 7.5fps.
+ *
+ * If the denominator is zero (the high two bytes are both zero) then
+ * it takes the value one instead, so the framerate is just the integer
+ * in the low 2 bytes.
+ */
+ uint32_t framerate;
+ union
+ {
+ struct
+ {
+ /** The temporal layer that these framerate parameters apply to. */
+ uint32_t temporal_id : 8;
+ /** Reserved for future use, must be zero. */
+ uint32_t reserved : 24;
+ } bits;
+ uint32_t value;
+ } framerate_flags;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAEncMiscParameterFrameRate;
+
+/**
+ * Allow a maximum slice size to be specified (in bits).
+ * The encoder will attempt to make sure that individual slices do not exceed this size
+ * Or to signal applicate if the slice size exceed this size, see "status" of VACodedBufferSegment
+ */
+typedef struct _VAEncMiscParameterMaxSliceSize
+{
+ uint32_t max_slice_size;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAEncMiscParameterMaxSliceSize;
+
+typedef struct _VAEncMiscParameterAIR
+{
+ uint32_t air_num_mbs;
+ uint32_t air_threshold;
+ uint32_t air_auto; /* if set to 1 then hardware auto-tune the AIR threshold */
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAEncMiscParameterAIR;
+
+/*
+ * \brief Rolling intra refresh data structure for encoding.
+ */
+typedef struct _VAEncMiscParameterRIR
+{
+ union
+ {
+ struct
+ /**
+ * \brief Indicate if intra refresh is enabled in column/row.
+ *
+ * App should query VAConfigAttribEncIntraRefresh to confirm RIR support
+ * by the driver before sending this structure.
+ */
+ {
+ /* \brief enable RIR in column */
+ uint32_t enable_rir_column : 1;
+ /* \brief enable RIR in row */
+ uint32_t enable_rir_row : 1;
+ uint32_t reserved : 30;
+ } bits;
+ uint32_t value;
+ } rir_flags;
+ /**
+ * \brief Indicates the column or row location in MB. It is ignored if
+ * rir_flags is 0.
+ */
+ uint16_t intra_insertion_location;
+ /**
+ * \brief Indicates the number of columns or rows in MB. It is ignored if
+ * rir_flags is 0.
+ */
+ uint16_t intra_insert_size;
+ /**
+ * \brief indicates the Qp difference for inserted intra columns or rows.
+ * App can use this to adjust intra Qp based on bitrate & max frame size.
+ */
+ uint8_t qp_delta_for_inserted_intra;
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAEncMiscParameterRIR;
+
+/** HRD / VBV buffering parameters for encoding.
+ *
+ * This sets the HRD / VBV parameters which will be used by the rate
+ * controller for encoding. It should be specified in modes using a bitrate
+ * target when the buffering of the output stream needs to be constrained.
+ *
+ * If not provided, the encoder may use arbitrary amounts of buffering.
+ */
+typedef struct _VAEncMiscParameterHRD
+{
+ /** The initial fullness of the HRD coded picture buffer, in bits.
+ *
+ * This sets how full the CPB is when encoding begins - that is, how much
+ * buffering will happen on the decoder side before the first frame.
+ * The CPB fullness will be reset to this value after any rate control
+ * reset (a change in parameters or an explicit reset).
+ *
+ * For H.264, it should match the value of initial_cpb_removal_delay in
+ * buffering_period SEI messages.
+ */
+ uint32_t initial_buffer_fullness;
+ /** The HRD coded picture buffer size, in bits.
+ *
+ * For H.264, it should match the value of cpb_size_value_minus1 in the VUI
+ * parameters.
+ */
+ uint32_t buffer_size;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAEncMiscParameterHRD;
+
+/**
+ * \brief Defines a maximum frame size (in bits).
+ *
+ * This misc parameter buffer defines the maximum size of a frame (in
+ * bits). The encoder will try to make sure that each frame does not
+ * exceed this size. Otherwise, if the frame size exceeds this size,
+ * the \c status flag of #VACodedBufferSegment will contain
+ * #VA_CODED_BUF_STATUS_FRAME_SIZE_OVERFLOW.
+ */
+typedef struct _VAEncMiscParameterBufferMaxFrameSize {
+ /** \brief Type. Shall be set to #VAEncMiscParameterTypeMaxFrameSize. */
+ /** duplicated with VAEncMiscParameterBuffer, should be deprecated*/
+ va_deprecated VAEncMiscParameterType type;
+ /** \brief Maximum size of a frame (in bits). */
+ uint32_t max_frame_size;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAEncMiscParameterBufferMaxFrameSize;
+
+/**
+ * \brief Maximum frame size (in bytes) settings for multiple pass.
+ *
+ * This misc parameter buffer defines the maximum size of a frame (in
+ * bytes) settings for multiple pass. currently only AVC encoder can
+ * support this settings in multiple pass case. If the frame size exceeds
+ * this size, the encoder will do more pak passes to adjust the QP value
+ * to control the frame size.
+ */
+typedef struct _VAEncMiscParameterBufferMultiPassFrameSize {
+ /** \brief Type. Shall be set to #VAEncMiscParameterTypeMultiPassMaxFrameSize. */
+ /** duplicated with VAEncMiscParameterBuffer, should be deprecated*/
+ va_deprecated VAEncMiscParameterType type;
+ /** \brief Maximum size of a frame (in byte) */
+ uint32_t max_frame_size;
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t reserved;
+ /** \brief number of passes, every pass has different QP, currently AVC encoder can support up to 4 passes */
+ uint8_t num_passes;
+ /** \brief delta QP list for every pass */
+ uint8_t *delta_qp;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ unsigned long va_reserved[VA_PADDING_LOW];
+} VAEncMiscParameterBufferMultiPassFrameSize;
+
+/**
+ * \brief Encoding quality level.
+ *
+ * The encoding quality could be set through this structure, if the implementation
+ * supports multiple quality levels. The quality level set through this structure is
+ * persistent over the entire coded sequence, or until a new structure is being sent.
+ * The quality level range can be queried through the VAConfigAttribEncQualityRange
+ * attribute. A lower value means higher quality, and a value of 1 represents the highest
+ * quality. The quality level setting is used as a trade-off between quality and speed/power
+ * consumption, with higher quality corresponds to lower speed and higher power consumption.
+ */
+typedef struct _VAEncMiscParameterBufferQualityLevel {
+ /** \brief Encoding quality level setting. When set to 0, default quality
+ * level is used.
+ */
+ uint32_t quality_level;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAEncMiscParameterBufferQualityLevel;
+
+/**
+ * \brief Quantization settings for encoding.
+ *
+ * Some encoders support special types of quantization such as trellis, and this structure
+ * can be used by the app to control these special types of quantization by the encoder.
+ */
+typedef struct _VAEncMiscParameterQuantization
+{
+ union
+ {
+ /* if no flags is set then quantization is determined by the driver */
+ struct
+ {
+ /* \brief disable trellis for all frames/fields */
+ uint32_t disable_trellis : 1;
+ /* \brief enable trellis for I frames/fields */
+ uint32_t enable_trellis_I : 1;
+ /* \brief enable trellis for P frames/fields */
+ uint32_t enable_trellis_P : 1;
+ /* \brief enable trellis for B frames/fields */
+ uint32_t enable_trellis_B : 1;
+ uint32_t reserved : 28;
+ } bits;
+ uint32_t value;
+ } quantization_flags;
+ uint32_t va_reserved;
+} VAEncMiscParameterQuantization;
+
+/**
+ * \brief Encoding skip frame.
+ *
+ * The application may choose to skip frames externally to the encoder (e.g. drop completely or
+ * code as all skip's). For rate control purposes the encoder will need to know the size and number
+ * of skipped frames. Skip frame(s) indicated through this structure is applicable only to the
+ * current frame. It is allowed for the application to still send in packed headers for the driver to
+ * pack, although no frame will be encoded (e.g. for HW to encrypt the frame).
+ */
+typedef struct _VAEncMiscParameterSkipFrame {
+ /** \brief Indicates skip frames as below.
+ * 0: Encode as normal, no skip.
+ * 1: One or more frames were skipped prior to the current frame, encode the current frame as normal.
+ * 2: The current frame is to be skipped, do not encode it but pack/encrypt the packed header contents
+ * (all except VAEncPackedHeaderSlice) which could contain actual frame contents (e.g. pack the frame
+ * in VAEncPackedHeaderPicture). */
+ uint8_t skip_frame_flag;
+ /** \brief The number of frames skipped prior to the current frame. Valid when skip_frame_flag = 1. */
+ uint8_t num_skip_frames;
+ /** \brief When skip_frame_flag = 1, the size of the skipped frames in bits. When skip_frame_flag = 2,
+ * the size of the current skipped frame that is to be packed/encrypted in bits. */
+ uint32_t size_skip_frames;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAEncMiscParameterSkipFrame;
+
+/**
+ * \brief Encoding region-of-interest (ROI).
+ *
+ * The encoding ROI can be set through VAEncMiscParameterBufferROI, if the implementation
+ * supports ROI input. The ROI set through this structure is applicable only to the
+ * current frame or field, so must be sent every frame or field to be applied. The number of
+ * supported ROIs can be queried through the VAConfigAttribEncROI. The encoder will use the
+ * ROI information to adjust the QP values of the MB's that fall within the ROIs.
+ */
+typedef struct _VAEncROI
+{
+ /** \brief Defines the ROI boundary in pixels, the driver will map it to appropriate
+ * codec coding units. It is relative to frame coordinates for the frame case and
+ * to field coordinates for the field case. */
+ VARectangle roi_rectangle;
+ /**
+ * \brief ROI value
+ *
+ * \ref roi_value specifies ROI delta QP or ROI priority.
+ * -- ROI delta QP is the value that will be added on top of the frame level QP.
+ * -- ROI priority specifies the priority of a region, it can be positive (more important)
+ * or negative (less important) values and is compared with non-ROI region (taken as value 0),
+ * E.g. ROI region with \ref roi_value -3 is less important than the non-ROI region (\ref roi_value
+ * implied to be 0) which is less important than ROI region with roi_value +2. For overlapping
+ * regions, the roi_value that is first in the ROI array will have priority.
+ *
+ * \ref roi_value always specifes ROI delta QP when VAConfigAttribRateControl == VA_RC_CQP, no matter
+ * the value of \c roi_value_is_qp_delta in #VAEncMiscParameterBufferROI.
+ *
+ * \ref roi_value depends on \c roi_value_is_qp_delta in #VAEncMiscParameterBufferROI when
+ * VAConfigAttribRateControl != VA_RC_CQP. \ref roi_value specifies ROI_delta QP if \c roi_value_is_qp_delta
+ * in VAEncMiscParameterBufferROI is 1, otherwise \ref roi_value specifies ROI priority.
+ */
+ int8_t roi_value;
+} VAEncROI;
+
+typedef struct _VAEncMiscParameterBufferROI {
+ /** \brief Number of ROIs being sent.*/
+ uint32_t num_roi;
+
+ /** \brief Valid when VAConfigAttribRateControl != VA_RC_CQP, then the encoder's
+ * rate control will determine actual delta QPs. Specifies the max/min allowed delta
+ * QPs. */
+ int8_t max_delta_qp;
+ int8_t min_delta_qp;
+
+ /** \brief Pointer to a VAEncROI array with num_roi elements. It is relative to frame
+ * coordinates for the frame case and to field coordinates for the field case.*/
+ VAEncROI *roi;
+ union {
+ struct {
+ /**
+ * \brief An indication for roi value.
+ *
+ * \ref roi_value_is_qp_delta equal to 1 indicates \c roi_value in #VAEncROI should
+ * be used as ROI delta QP. \ref roi_value_is_qp_delta equal to 0 indicates \c roi_value
+ * in #VAEncROI should be used as ROI priority.
+ *
+ * \ref roi_value_is_qp_delta is only available when VAConfigAttribRateControl != VA_RC_CQP,
+ * the setting must comply with \c roi_rc_priority_support and \c roi_rc_qp_delta_support in
+ * #VAConfigAttribValEncROI. The underlying driver should ignore this field
+ * when VAConfigAttribRateControl == VA_RC_CQP.
+ */
+ uint32_t roi_value_is_qp_delta : 1;
+ uint32_t reserved : 31;
+ } bits;
+ uint32_t value;
+ } roi_flags;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAEncMiscParameterBufferROI;
+/*
+ * \brief Dirty rectangle data structure for encoding.
+ *
+ * The encoding dirty rect can be set through VAEncMiscParameterBufferDirtyRect, if the
+ * implementation supports dirty rect input. The rect set through this structure is applicable
+ * only to the current frame or field, so must be sent every frame or field to be applied.
+ * The number of supported rects can be queried through the VAConfigAttribEncDirtyRect. The
+ * encoder will use the rect information to know those rectangle areas have changed while the
+ * areas not covered by dirty rect rectangles are assumed to have not changed compared to the
+ * previous picture. The encoder may do some internal optimizations.
+ */
+typedef struct _VAEncMiscParameterBufferDirtyRect
+{
+ /** \brief Number of Rectangle being sent.*/
+ uint32_t num_roi_rectangle;
+
+ /** \brief Pointer to a VARectangle array with num_roi_rectangle elements.*/
+ VARectangle *roi_rectangle;
+} VAEncMiscParameterBufferDirtyRect;
+
+/** \brief Attribute value for VAConfigAttribEncParallelRateControl */
+typedef struct _VAEncMiscParameterParallelRateControl {
+ /** brief Number of layers*/
+ uint32_t num_layers;
+ /** brief Number of B frames per layer per GOP.
+ *
+ * it should be allocated by application, and the is num_layers.
+ * num_b_in_gop[0] is the number of regular B which refers to only I or P frames. */
+ uint32_t *num_b_in_gop;
+} VAEncMiscParameterParallelRateControl;
+
+/** per frame encoder quality controls, once set they will persist for all future frames
+ *till it is updated again. */
+typedef struct _VAEncMiscParameterEncQuality
+{
+ union
+ {
+ struct
+ {
+ /** Use raw frames for reference instead of reconstructed frames.
+ * it only impact motion estimation (ME) stage, and will not impact MC stage
+ * so the reconstruct picture will can match with decode side */
+ uint32_t useRawPicForRef : 1;
+ /** Disables skip check for ME stage, it will increase the bistream size
+ * but will improve the qulity */
+ uint32_t skipCheckDisable : 1;
+ /** Indicates app will override default driver FTQ settings using FTQEnable.
+ * FTQ is forward transform quantization */
+ uint32_t FTQOverride : 1;
+ /** Enables/disables FTQ. */
+ uint32_t FTQEnable : 1;
+ /** Indicates the app will provide the Skip Threshold LUT to use when FTQ is
+ * enabled (FTQSkipThresholdLUT), else default driver thresholds will be used. */
+ uint32_t FTQSkipThresholdLUTInput : 1;
+ /** Indicates the app will provide the Skip Threshold LUT to use when FTQ is
+ * disabled (NonFTQSkipThresholdLUT), else default driver thresholds will be used. */
+ uint32_t NonFTQSkipThresholdLUTInput : 1;
+ uint32_t ReservedBit : 1;
+ /** Control to enable the ME mode decision algorithm to bias to fewer B Direct/Skip types.
+ * Applies only to B frames, all other frames will ignore this setting. */
+ uint32_t directBiasAdjustmentEnable : 1;
+ /** Enables global motion bias. global motion also is called HME (Heirarchical Motion Estimation )
+ * HME is used to handle large motions and avoiding local minima in the video encoding process
+ * down scaled the input and reference picture, then do ME. the result will be a predictor to next level HME or ME
+ * current interface divide the HME to 3 level. UltraHME , SuperHME, and HME, result of UltraHME will be input of SurperHME,
+ * result of superHME will be a input for HME. HME result will be input of ME. it is a switch for HMEMVCostScalingFactor
+ * can change the HME bias inside RDO stage*/
+ uint32_t globalMotionBiasAdjustmentEnable : 1;
+ /** MV cost scaling ratio for HME ( predictors. It is used when
+ * globalMotionBiasAdjustmentEnable == 1, else it is ignored. Values are:
+ * 0: set MV cost to be 0 for HME predictor.
+ * 1: scale MV cost to be 1/2 of the default value for HME predictor.
+ * 2: scale MV cost to be 1/4 of the default value for HME predictor.
+ * 3: scale MV cost to be 1/8 of the default value for HME predictor. */
+ uint32_t HMEMVCostScalingFactor : 2;
+ /**disable HME, if it is disabled. Super*ultraHME should also be disabled */
+ uint32_t HMEDisable : 1;
+ /**disable Super HME, if it is disabled, ultraHME should be disabled */
+ uint32_t SuperHMEDisable : 1;
+ /** disable Ultra HME */
+ uint32_t UltraHMEDisable : 1;
+ /** disable panic mode. Panic mode happened when there are extreme BRC (bit rate control) requirement
+ * frame size cant achieve the target of BRC. when Panic mode is triggered, Coefficients will
+ * be set to zero. disable panic mode will improve quality but will impact BRC */
+ uint32_t PanicModeDisable : 1;
+ /** Force RepartitionCheck
+ * 0: DEFAULT - follow driver default settings.
+ * 1: FORCE_ENABLE - enable this feature totally for all cases.
+ * 2: FORCE_DISABLE - disable this feature totally for all cases. */
+ uint32_t ForceRepartitionCheck : 2;
+
+ };
+ uint32_t encControls;
+ };
+
+ /** Maps QP to skip thresholds when FTQ is enabled. Valid range is 0-255. */
+ uint8_t FTQSkipThresholdLUT[52];
+ /** Maps QP to skip thresholds when FTQ is disabled. Valid range is 0-65535. */
+ uint16_t NonFTQSkipThresholdLUT[52];
+
+ uint32_t reserved[VA_PADDING_HIGH]; // Reserved for future use.
+
+} VAEncMiscParameterEncQuality;
+
+/**
+ * \brief Custom Encoder Rounding Offset Control.
+ * Application may use this structure to set customized rounding
+ * offset parameters for quantization.
+ * Valid when \c VAConfigAttribCustomRoundingControl equals 1.
+ */
+typedef struct _VAEncMiscParameterCustomRoundingControl
+{
+ union {
+ struct {
+ /** \brief Enable customized rounding offset for intra blocks.
+ * If 0, default value would be taken by driver for intra
+ * rounding offset.
+ */
+ uint32_t enable_custom_rouding_intra : 1 ;
+
+ /** \brief Intra rounding offset
+ * Ignored if \c enable_custom_rouding_intra equals 0.
+ */
+ uint32_t rounding_offset_intra : 7;
+
+ /** \brief Enable customized rounding offset for inter blocks.
+ * If 0, default value would be taken by driver for inter
+ * rounding offset.
+ */
+ uint32_t enable_custom_rounding_inter : 1 ;
+
+ /** \brief Inter rounding offset
+ * Ignored if \c enable_custom_rouding_inter equals 0.
+ */
+ uint32_t rounding_offset_inter : 7;
+
+ /* Reserved */
+ uint32_t reserved :16;
+ } bits;
+ uint32_t value;
+ } rounding_offset_setting;
+} VAEncMiscParameterCustomRoundingControl;
+/**
+ * There will be cases where the bitstream buffer will not have enough room to hold
+ * the data for the entire slice, and the following flags will be used in the slice
+ * parameter to signal to the server for the possible cases.
+ * If a slice parameter buffer and slice data buffer pair is sent to the server with
+ * the slice data partially in the slice data buffer (BEGIN and MIDDLE cases below),
+ * then a slice parameter and data buffer needs to be sent again to complete this slice.
+ */
+#define VA_SLICE_DATA_FLAG_ALL 0x00 /* whole slice is in the buffer */
+#define VA_SLICE_DATA_FLAG_BEGIN 0x01 /* The beginning of the slice is in the buffer but the end if not */
+#define VA_SLICE_DATA_FLAG_MIDDLE 0x02 /* Neither beginning nor end of the slice is in the buffer */
+#define VA_SLICE_DATA_FLAG_END 0x04 /* end of the slice is in the buffer */
+
+/* Codec-independent Slice Parameter Buffer base */
+typedef struct _VASliceParameterBufferBase
+{
+ uint32_t slice_data_size; /* number of bytes in the slice data buffer for this slice */
+ uint32_t slice_data_offset; /* the offset to the first byte of slice data */
+ uint32_t slice_data_flag; /* see VA_SLICE_DATA_FLAG_XXX definitions */
+} VASliceParameterBufferBase;
+
+/**********************************
+ * JPEG common data structures
+ **********************************/
+/**
+ * \brief Huffman table for JPEG decoding.
+ *
+ * This structure holds the complete Huffman tables. This is an
+ * aggregation of all Huffman table (DHT) segments maintained by the
+ * application. i.e. up to 2 Huffman tables are stored in there for
+ * baseline profile.
+ *
+ * The #load_huffman_table array can be used as a hint to notify the
+ * VA driver implementation about which table(s) actually changed
+ * since the last submission of this buffer.
+ */
+typedef struct _VAHuffmanTableBufferJPEGBaseline {
+ /** \brief Specifies which #huffman_table is valid. */
+ uint8_t load_huffman_table[2];
+ /** \brief Huffman tables indexed by table identifier (Th). */
+ struct {
+ /** @name DC table (up to 12 categories) */
+ /**@{*/
+ /** \brief Number of Huffman codes of length i + 1 (Li). */
+ uint8_t num_dc_codes[16];
+ /** \brief Value associated with each Huffman code (Vij). */
+ uint8_t dc_values[12];
+ /**@}*/
+ /** @name AC table (2 special codes + up to 16 * 10 codes) */
+ /**@{*/
+ /** \brief Number of Huffman codes of length i + 1 (Li). */
+ uint8_t num_ac_codes[16];
+ /** \brief Value associated with each Huffman code (Vij). */
+ uint8_t ac_values[162];
+ /** \brief Padding to 4-byte boundaries. Must be set to zero. */
+ uint8_t pad[2];
+ /**@}*/
+ } huffman_table[2];
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAHuffmanTableBufferJPEGBaseline;
+
+/****************************
+ * MPEG-2 data structures
+ ****************************/
+
+/* MPEG-2 Picture Parameter Buffer */
+/*
+ * For each frame or field, and before any slice data, a single
+ * picture parameter buffer must be send.
+ */
+typedef struct _VAPictureParameterBufferMPEG2
+{
+ uint16_t horizontal_size;
+ uint16_t vertical_size;
+ VASurfaceID forward_reference_picture;
+ VASurfaceID backward_reference_picture;
+ /* meanings of the following fields are the same as in the standard */
+ int32_t picture_coding_type;
+ int32_t f_code; /* pack all four fcode into this */
+ union {
+ struct {
+ uint32_t intra_dc_precision : 2;
+ uint32_t picture_structure : 2;
+ uint32_t top_field_first : 1;
+ uint32_t frame_pred_frame_dct : 1;
+ uint32_t concealment_motion_vectors : 1;
+ uint32_t q_scale_type : 1;
+ uint32_t intra_vlc_format : 1;
+ uint32_t alternate_scan : 1;
+ uint32_t repeat_first_field : 1;
+ uint32_t progressive_frame : 1;
+ uint32_t is_first_field : 1; /* indicate whether the current field
+ * is the first field for field picture
+ */
+ } bits;
+ uint32_t value;
+ } picture_coding_extension;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAPictureParameterBufferMPEG2;
+
+/** MPEG-2 Inverse Quantization Matrix Buffer */
+typedef struct _VAIQMatrixBufferMPEG2
+{
+ /** \brief Same as the MPEG-2 bitstream syntax element. */
+ int32_t load_intra_quantiser_matrix;
+ /** \brief Same as the MPEG-2 bitstream syntax element. */
+ int32_t load_non_intra_quantiser_matrix;
+ /** \brief Same as the MPEG-2 bitstream syntax element. */
+ int32_t load_chroma_intra_quantiser_matrix;
+ /** \brief Same as the MPEG-2 bitstream syntax element. */
+ int32_t load_chroma_non_intra_quantiser_matrix;
+ /** \brief Luminance intra matrix, in zig-zag scan order. */
+ uint8_t intra_quantiser_matrix[64];
+ /** \brief Luminance non-intra matrix, in zig-zag scan order. */
+ uint8_t non_intra_quantiser_matrix[64];
+ /** \brief Chroma intra matrix, in zig-zag scan order. */
+ uint8_t chroma_intra_quantiser_matrix[64];
+ /** \brief Chroma non-intra matrix, in zig-zag scan order. */
+ uint8_t chroma_non_intra_quantiser_matrix[64];
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAIQMatrixBufferMPEG2;
+
+/** MPEG-2 Slice Parameter Buffer */
+typedef struct _VASliceParameterBufferMPEG2
+{
+ uint32_t slice_data_size;/* number of bytes in the slice data buffer for this slice */
+ uint32_t slice_data_offset;/* the offset to the first byte of slice data */
+ uint32_t slice_data_flag; /* see VA_SLICE_DATA_FLAG_XXX defintions */
+ uint32_t macroblock_offset;/* the offset to the first bit of MB from the first byte of slice data */
+ uint32_t slice_horizontal_position;
+ uint32_t slice_vertical_position;
+ int32_t quantiser_scale_code;
+ int32_t intra_slice_flag;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VASliceParameterBufferMPEG2;
+
+/** MPEG-2 Macroblock Parameter Buffer */
+typedef struct _VAMacroblockParameterBufferMPEG2
+{
+ uint16_t macroblock_address;
+ /*
+ * macroblock_address (in raster scan order)
+ * top-left: 0
+ * bottom-right: picture-height-in-mb*picture-width-in-mb - 1
+ */
+ uint8_t macroblock_type; /* see definition below */
+ union {
+ struct {
+ uint32_t frame_motion_type : 2;
+ uint32_t field_motion_type : 2;
+ uint32_t dct_type : 1;
+ } bits;
+ uint32_t value;
+ } macroblock_modes;
+ uint8_t motion_vertical_field_select;
+ /*
+ * motion_vertical_field_select:
+ * see section 6.3.17.2 in the spec
+ * only the lower 4 bits are used
+ * bit 0: first vector forward
+ * bit 1: first vector backward
+ * bit 2: second vector forward
+ * bit 3: second vector backward
+ */
+ int16_t PMV[2][2][2]; /* see Table 7-7 in the spec */
+ uint16_t coded_block_pattern;
+ /*
+ * The bitplanes for coded_block_pattern are described
+ * in Figure 6.10-12 in the spec
+ */
+
+ /* Number of skipped macroblocks after this macroblock */
+ uint16_t num_skipped_macroblocks;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAMacroblockParameterBufferMPEG2;
+
+/*
+ * OR'd flags for macroblock_type (section 6.3.17.1 in the spec)
+ */
+#define VA_MB_TYPE_MOTION_FORWARD 0x02
+#define VA_MB_TYPE_MOTION_BACKWARD 0x04
+#define VA_MB_TYPE_MOTION_PATTERN 0x08
+#define VA_MB_TYPE_MOTION_INTRA 0x10
+
+/**
+ * MPEG-2 Residual Data Buffer
+ * For each macroblock, there wil be 64 shorts (16-bit) in the
+ * residual data buffer
+ */
+
+/****************************
+ * MPEG-4 Part 2 data structures
+ ****************************/
+
+/* MPEG-4 Picture Parameter Buffer */
+/*
+ * For each frame or field, and before any slice data, a single
+ * picture parameter buffer must be send.
+ */
+typedef struct _VAPictureParameterBufferMPEG4
+{
+ uint16_t vop_width;
+ uint16_t vop_height;
+ VASurfaceID forward_reference_picture;
+ VASurfaceID backward_reference_picture;
+ union {
+ struct {
+ uint32_t short_video_header : 1;
+ uint32_t chroma_format : 2;
+ uint32_t interlaced : 1;
+ uint32_t obmc_disable : 1;
+ uint32_t sprite_enable : 2;
+ uint32_t sprite_warping_accuracy : 2;
+ uint32_t quant_type : 1;
+ uint32_t quarter_sample : 1;
+ uint32_t data_partitioned : 1;
+ uint32_t reversible_vlc : 1;
+ uint32_t resync_marker_disable : 1;
+ } bits;
+ uint32_t value;
+ } vol_fields;
+ uint8_t no_of_sprite_warping_points;
+ int16_t sprite_trajectory_du[3];
+ int16_t sprite_trajectory_dv[3];
+ uint8_t quant_precision;
+ union {
+ struct {
+ uint32_t vop_coding_type : 2;
+ uint32_t backward_reference_vop_coding_type : 2;
+ uint32_t vop_rounding_type : 1;
+ uint32_t intra_dc_vlc_thr : 3;
+ uint32_t top_field_first : 1;
+ uint32_t alternate_vertical_scan_flag : 1;
+ } bits;
+ uint32_t value;
+ } vop_fields;
+ uint8_t vop_fcode_forward;
+ uint8_t vop_fcode_backward;
+ uint16_t vop_time_increment_resolution;
+ /* short header related */
+ uint8_t num_gobs_in_vop;
+ uint8_t num_macroblocks_in_gob;
+ /* for direct mode prediction */
+ int16_t TRB;
+ int16_t TRD;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAPictureParameterBufferMPEG4;
+
+/** MPEG-4 Inverse Quantization Matrix Buffer */
+typedef struct _VAIQMatrixBufferMPEG4
+{
+ /** Same as the MPEG-4:2 bitstream syntax element. */
+ int32_t load_intra_quant_mat;
+ /** Same as the MPEG-4:2 bitstream syntax element. */
+ int32_t load_non_intra_quant_mat;
+ /** The matrix for intra blocks, in zig-zag scan order. */
+ uint8_t intra_quant_mat[64];
+ /** The matrix for non-intra blocks, in zig-zag scan order. */
+ uint8_t non_intra_quant_mat[64];
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAIQMatrixBufferMPEG4;
+
+/** MPEG-4 Slice Parameter Buffer */
+typedef struct _VASliceParameterBufferMPEG4
+{
+ uint32_t slice_data_size;/* number of bytes in the slice data buffer for this slice */
+ uint32_t slice_data_offset;/* the offset to the first byte of slice data */
+ uint32_t slice_data_flag; /* see VA_SLICE_DATA_FLAG_XXX defintions */
+ uint32_t macroblock_offset;/* the offset to the first bit of MB from the first byte of slice data */
+ uint32_t macroblock_number;
+ int32_t quant_scale;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VASliceParameterBufferMPEG4;
+
+/**
+ VC-1 data structures
+*/
+
+typedef enum /* see 7.1.1.32 */
+{
+ VAMvMode1Mv = 0,
+ VAMvMode1MvHalfPel = 1,
+ VAMvMode1MvHalfPelBilinear = 2,
+ VAMvModeMixedMv = 3,
+ VAMvModeIntensityCompensation = 4
+} VAMvModeVC1;
+
+/** VC-1 Picture Parameter Buffer */
+/*
+ * For each picture, and before any slice data, a picture parameter
+ * buffer must be send. Multiple picture parameter buffers may be
+ * sent for a single picture. In that case picture parameters will
+ * apply to all slice data that follow it until a new picture
+ * parameter buffer is sent.
+ *
+ * Notes:
+ * pic_quantizer_type should be set to the applicable quantizer
+ * type as defined by QUANTIZER (J.1.19) and either
+ * PQUANTIZER (7.1.1.8) or PQINDEX (7.1.1.6)
+ */
+typedef struct _VAPictureParameterBufferVC1
+{
+ VASurfaceID forward_reference_picture;
+ VASurfaceID backward_reference_picture;
+ /* if out-of-loop post-processing is done on the render
+ target, then we need to keep the in-loop decoded
+ picture as a reference picture */
+ VASurfaceID inloop_decoded_picture;
+
+ /* sequence layer for AP or meta data for SP and MP */
+ union {
+ struct {
+ uint32_t pulldown : 1; /* SEQUENCE_LAYER::PULLDOWN */
+ uint32_t interlace : 1; /* SEQUENCE_LAYER::INTERLACE */
+ uint32_t tfcntrflag : 1; /* SEQUENCE_LAYER::TFCNTRFLAG */
+ uint32_t finterpflag : 1; /* SEQUENCE_LAYER::FINTERPFLAG */
+ uint32_t psf : 1; /* SEQUENCE_LAYER::PSF */
+ uint32_t multires : 1; /* METADATA::MULTIRES */
+ uint32_t overlap : 1; /* METADATA::OVERLAP */
+ uint32_t syncmarker : 1; /* METADATA::SYNCMARKER */
+ uint32_t rangered : 1; /* METADATA::RANGERED */
+ uint32_t max_b_frames : 3; /* METADATA::MAXBFRAMES */
+ uint32_t profile : 2; /* SEQUENCE_LAYER::PROFILE or The MSB of METADATA::PROFILE */
+ } bits;
+ uint32_t value;
+ } sequence_fields;
+
+ uint16_t coded_width; /* ENTRY_POINT_LAYER::CODED_WIDTH */
+ uint16_t coded_height; /* ENTRY_POINT_LAYER::CODED_HEIGHT */
+ union {
+ struct {
+ uint32_t broken_link : 1; /* ENTRY_POINT_LAYER::BROKEN_LINK */
+ uint32_t closed_entry : 1; /* ENTRY_POINT_LAYER::CLOSED_ENTRY */
+ uint32_t panscan_flag : 1; /* ENTRY_POINT_LAYER::PANSCAN_FLAG */
+ uint32_t loopfilter : 1; /* ENTRY_POINT_LAYER::LOOPFILTER */
+ } bits;
+ uint32_t value;
+ } entrypoint_fields;
+ uint8_t conditional_overlap_flag; /* ENTRY_POINT_LAYER::CONDOVER */
+ uint8_t fast_uvmc_flag; /* ENTRY_POINT_LAYER::FASTUVMC */
+ union {
+ struct {
+ uint32_t luma_flag : 1; /* ENTRY_POINT_LAYER::RANGE_MAPY_FLAG */
+ uint32_t luma : 3; /* ENTRY_POINT_LAYER::RANGE_MAPY */
+ uint32_t chroma_flag : 1; /* ENTRY_POINT_LAYER::RANGE_MAPUV_FLAG */
+ uint32_t chroma : 3; /* ENTRY_POINT_LAYER::RANGE_MAPUV */
+ } bits;
+ uint32_t value;
+ } range_mapping_fields;
+
+ uint8_t b_picture_fraction; /* Index for PICTURE_LAYER::BFRACTION value in Table 40 (7.1.1.14) */
+ uint8_t cbp_table; /* PICTURE_LAYER::CBPTAB/ICBPTAB */
+ uint8_t mb_mode_table; /* PICTURE_LAYER::MBMODETAB */
+ uint8_t range_reduction_frame;/* PICTURE_LAYER::RANGEREDFRM */
+ uint8_t rounding_control; /* PICTURE_LAYER::RNDCTRL */
+ uint8_t post_processing; /* PICTURE_LAYER::POSTPROC */
+ uint8_t picture_resolution_index; /* PICTURE_LAYER::RESPIC */
+ uint8_t luma_scale; /* PICTURE_LAYER::LUMSCALE */
+ uint8_t luma_shift; /* PICTURE_LAYER::LUMSHIFT */
+
+ union {
+ struct {
+ uint32_t picture_type : 3; /* PICTURE_LAYER::PTYPE */
+ uint32_t frame_coding_mode : 3; /* PICTURE_LAYER::FCM */
+ uint32_t top_field_first : 1; /* PICTURE_LAYER::TFF */
+ uint32_t is_first_field : 1; /* set to 1 if it is the first field */
+ uint32_t intensity_compensation : 1; /* PICTURE_LAYER::INTCOMP */
+ } bits;
+ uint32_t value;
+ } picture_fields;
+ union {
+ struct {
+ uint32_t mv_type_mb : 1; /* PICTURE::MVTYPEMB */
+ uint32_t direct_mb : 1; /* PICTURE::DIRECTMB */
+ uint32_t skip_mb : 1; /* PICTURE::SKIPMB */
+ uint32_t field_tx : 1; /* PICTURE::FIELDTX */
+ uint32_t forward_mb : 1; /* PICTURE::FORWARDMB */
+ uint32_t ac_pred : 1; /* PICTURE::ACPRED */
+ uint32_t overflags : 1; /* PICTURE::OVERFLAGS */
+ } flags;
+ uint32_t value;
+ } raw_coding;
+ union {
+ struct {
+ uint32_t bp_mv_type_mb : 1; /* PICTURE::MVTYPEMB */
+ uint32_t bp_direct_mb : 1; /* PICTURE::DIRECTMB */
+ uint32_t bp_skip_mb : 1; /* PICTURE::SKIPMB */
+ uint32_t bp_field_tx : 1; /* PICTURE::FIELDTX */
+ uint32_t bp_forward_mb : 1; /* PICTURE::FORWARDMB */
+ uint32_t bp_ac_pred : 1; /* PICTURE::ACPRED */
+ uint32_t bp_overflags : 1; /* PICTURE::OVERFLAGS */
+ } flags;
+ uint32_t value;
+ } bitplane_present; /* signal what bitplane is being passed via the bitplane buffer */
+ union {
+ struct {
+ uint32_t reference_distance_flag : 1;/* PICTURE_LAYER::REFDIST_FLAG */
+ uint32_t reference_distance : 5;/* PICTURE_LAYER::REFDIST */
+ uint32_t num_reference_pictures: 1;/* PICTURE_LAYER::NUMREF */
+ uint32_t reference_field_pic_indicator : 1;/* PICTURE_LAYER::REFFIELD */
+ } bits;
+ uint32_t value;
+ } reference_fields;
+ union {
+ struct {
+ uint32_t mv_mode : 3; /* PICTURE_LAYER::MVMODE */
+ uint32_t mv_mode2 : 3; /* PICTURE_LAYER::MVMODE2 */
+ uint32_t mv_table : 3; /* PICTURE_LAYER::MVTAB/IMVTAB */
+ uint32_t two_mv_block_pattern_table: 2; /* PICTURE_LAYER::2MVBPTAB */
+ uint32_t four_mv_switch : 1; /* PICTURE_LAYER::4MVSWITCH */
+ uint32_t four_mv_block_pattern_table : 2; /* PICTURE_LAYER::4MVBPTAB */
+ uint32_t extended_mv_flag : 1; /* ENTRY_POINT_LAYER::EXTENDED_MV */
+ uint32_t extended_mv_range : 2; /* PICTURE_LAYER::MVRANGE */
+ uint32_t extended_dmv_flag : 1; /* ENTRY_POINT_LAYER::EXTENDED_DMV */
+ uint32_t extended_dmv_range : 2; /* PICTURE_LAYER::DMVRANGE */
+ } bits;
+ uint32_t value;
+ } mv_fields;
+ union {
+ struct {
+ uint32_t dquant : 2; /* ENTRY_POINT_LAYER::DQUANT */
+ uint32_t quantizer : 2; /* ENTRY_POINT_LAYER::QUANTIZER */
+ uint32_t half_qp : 1; /* PICTURE_LAYER::HALFQP */
+ uint32_t pic_quantizer_scale : 5;/* PICTURE_LAYER::PQUANT */
+ uint32_t pic_quantizer_type : 1;/* PICTURE_LAYER::PQUANTIZER */
+ uint32_t dq_frame : 1; /* VOPDQUANT::DQUANTFRM */
+ uint32_t dq_profile : 2; /* VOPDQUANT::DQPROFILE */
+ uint32_t dq_sb_edge : 2; /* VOPDQUANT::DQSBEDGE */
+ uint32_t dq_db_edge : 2; /* VOPDQUANT::DQDBEDGE */
+ uint32_t dq_binary_level : 1; /* VOPDQUANT::DQBILEVEL */
+ uint32_t alt_pic_quantizer : 5;/* VOPDQUANT::ALTPQUANT */
+ } bits;
+ uint32_t value;
+ } pic_quantizer_fields;
+ union {
+ struct {
+ uint32_t variable_sized_transform_flag : 1;/* ENTRY_POINT_LAYER::VSTRANSFORM */
+ uint32_t mb_level_transform_type_flag : 1;/* PICTURE_LAYER::TTMBF */
+ uint32_t frame_level_transform_type : 2;/* PICTURE_LAYER::TTFRM */
+ uint32_t transform_ac_codingset_idx1 : 2;/* PICTURE_LAYER::TRANSACFRM */
+ uint32_t transform_ac_codingset_idx2 : 2;/* PICTURE_LAYER::TRANSACFRM2 */
+ uint32_t intra_transform_dc_table : 1;/* PICTURE_LAYER::TRANSDCTAB */
+ } bits;
+ uint32_t value;
+ } transform_fields;
+
+ uint8_t luma_scale2; /* PICTURE_LAYER::LUMSCALE2 */
+ uint8_t luma_shift2; /* PICTURE_LAYER::LUMSHIFT2 */
+ uint8_t intensity_compensation_field; /* Index for PICTURE_LAYER::INTCOMPFIELD value in Table 109 (9.1.1.48) */
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_MEDIUM - 1];
+} VAPictureParameterBufferVC1;
+
+/** VC-1 Bitplane Buffer
+There will be at most three bitplanes coded in any picture header. To send
+the bitplane data more efficiently, each byte is divided in two nibbles, with
+each nibble carrying three bitplanes for one macroblock. The following table
+shows the bitplane data arrangement within each nibble based on the picture
+type.
+
+Picture Type Bit3 Bit2 Bit1 Bit0
+I or BI OVERFLAGS ACPRED FIELDTX
+P MYTYPEMB SKIPMB DIRECTMB
+B FORWARDMB SKIPMB DIRECTMB
+
+Within each byte, the lower nibble is for the first MB and the upper nibble is
+for the second MB. E.g. the lower nibble of the first byte in the bitplane
+buffer is for Macroblock #1 and the upper nibble of the first byte is for
+Macroblock #2 in the first row.
+*/
+
+/* VC-1 Slice Parameter Buffer */
+typedef struct _VASliceParameterBufferVC1
+{
+ uint32_t slice_data_size;/* number of bytes in the slice data buffer for this slice */
+ uint32_t slice_data_offset;/* the offset to the first byte of slice data */
+ uint32_t slice_data_flag; /* see VA_SLICE_DATA_FLAG_XXX defintions */
+ uint32_t macroblock_offset;/* the offset to the first bit of MB from the first byte of slice data */
+ uint32_t slice_vertical_position;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VASliceParameterBufferVC1;
+
+/* VC-1 Slice Data Buffer */
+/*
+This is simplely a buffer containing raw bit-stream bytes
+*/
+
+/****************************
+ * H.264/AVC data structures
+ ****************************/
+
+typedef struct _VAPictureH264
+{
+ VASurfaceID picture_id;
+ uint32_t frame_idx;
+ uint32_t flags;
+ int32_t TopFieldOrderCnt;
+ int32_t BottomFieldOrderCnt;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAPictureH264;
+/* flags in VAPictureH264 could be OR of the following */
+#define VA_PICTURE_H264_INVALID 0x00000001
+#define VA_PICTURE_H264_TOP_FIELD 0x00000002
+#define VA_PICTURE_H264_BOTTOM_FIELD 0x00000004
+#define VA_PICTURE_H264_SHORT_TERM_REFERENCE 0x00000008
+#define VA_PICTURE_H264_LONG_TERM_REFERENCE 0x00000010
+
+/** H.264 Picture Parameter Buffer */
+/*
+ * For each picture, and before any slice data, a single
+ * picture parameter buffer must be send.
+ */
+typedef struct _VAPictureParameterBufferH264
+{
+ VAPictureH264 CurrPic;
+ VAPictureH264 ReferenceFrames[16]; /* in DPB */
+ uint16_t picture_width_in_mbs_minus1;
+ uint16_t picture_height_in_mbs_minus1;
+ uint8_t bit_depth_luma_minus8;
+ uint8_t bit_depth_chroma_minus8;
+ uint8_t num_ref_frames;
+ union {
+ struct {
+ uint32_t chroma_format_idc : 2;
+ uint32_t residual_colour_transform_flag : 1; /* Renamed to separate_colour_plane_flag in newer standard versions. */
+ uint32_t gaps_in_frame_num_value_allowed_flag : 1;
+ uint32_t frame_mbs_only_flag : 1;
+ uint32_t mb_adaptive_frame_field_flag : 1;
+ uint32_t direct_8x8_inference_flag : 1;
+ uint32_t MinLumaBiPredSize8x8 : 1; /* see A.3.3.2 */
+ uint32_t log2_max_frame_num_minus4 : 4;
+ uint32_t pic_order_cnt_type : 2;
+ uint32_t log2_max_pic_order_cnt_lsb_minus4 : 4;
+ uint32_t delta_pic_order_always_zero_flag : 1;
+ } bits;
+ uint32_t value;
+ } seq_fields;
+ // FMO is not supported.
+ va_deprecated uint8_t num_slice_groups_minus1;
+ va_deprecated uint8_t slice_group_map_type;
+ va_deprecated uint16_t slice_group_change_rate_minus1;
+ int8_t pic_init_qp_minus26;
+ int8_t pic_init_qs_minus26;
+ int8_t chroma_qp_index_offset;
+ int8_t second_chroma_qp_index_offset;
+ union {
+ struct {
+ uint32_t entropy_coding_mode_flag : 1;
+ uint32_t weighted_pred_flag : 1;
+ uint32_t weighted_bipred_idc : 2;
+ uint32_t transform_8x8_mode_flag : 1;
+ uint32_t field_pic_flag : 1;
+ uint32_t constrained_intra_pred_flag : 1;
+ uint32_t pic_order_present_flag : 1; /* Renamed to bottom_field_pic_order_in_frame_present_flag in newer standard versions. */
+ uint32_t deblocking_filter_control_present_flag : 1;
+ uint32_t redundant_pic_cnt_present_flag : 1;
+ uint32_t reference_pic_flag : 1; /* nal_ref_idc != 0 */
+ } bits;
+ uint32_t value;
+ } pic_fields;
+ uint16_t frame_num;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_MEDIUM];
+} VAPictureParameterBufferH264;
+
+/** H.264 Inverse Quantization Matrix Buffer */
+typedef struct _VAIQMatrixBufferH264
+{
+ /** \brief 4x4 scaling list, in raster scan order. */
+ uint8_t ScalingList4x4[6][16];
+ /** \brief 8x8 scaling list, in raster scan order. */
+ uint8_t ScalingList8x8[2][64];
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAIQMatrixBufferH264;
+
+/** H.264 Slice Parameter Buffer */
+typedef struct _VASliceParameterBufferH264
+{
+ uint32_t slice_data_size;/* number of bytes in the slice data buffer for this slice */
+ /** \brief Byte offset to the NAL Header Unit for this slice. */
+ uint32_t slice_data_offset;
+ uint32_t slice_data_flag; /* see VA_SLICE_DATA_FLAG_XXX defintions */
+ /**
+ * \brief Bit offset from NAL Header Unit to the begining of slice_data().
+ *
+ * This bit offset is relative to and includes the NAL unit byte
+ * and represents the number of bits parsed in the slice_header()
+ * after the removal of any emulation prevention bytes in
+ * there. However, the slice data buffer passed to the hardware is
+ * the original bitstream, thus including any emulation prevention
+ * bytes.
+ */
+ uint16_t slice_data_bit_offset;
+ uint16_t first_mb_in_slice;
+ uint8_t slice_type;
+ uint8_t direct_spatial_mv_pred_flag;
+ /**
+ * H264/AVC syntax element
+ *
+ * if num_ref_idx_active_override_flag equals 0, host decoder should
+ * set its value to num_ref_idx_l0_default_active_minus1.
+ */
+ uint8_t num_ref_idx_l0_active_minus1;
+ /**
+ * H264/AVC syntax element
+ *
+ * if num_ref_idx_active_override_flag equals 0, host decoder should
+ * set its value to num_ref_idx_l1_default_active_minus1.
+ */
+ uint8_t num_ref_idx_l1_active_minus1;
+ uint8_t cabac_init_idc;
+ int8_t slice_qp_delta;
+ uint8_t disable_deblocking_filter_idc;
+ int8_t slice_alpha_c0_offset_div2;
+ int8_t slice_beta_offset_div2;
+ VAPictureH264 RefPicList0[32]; /* See 8.2.4.2 */
+ VAPictureH264 RefPicList1[32]; /* See 8.2.4.2 */
+ uint8_t luma_log2_weight_denom;
+ uint8_t chroma_log2_weight_denom;
+ uint8_t luma_weight_l0_flag;
+ int16_t luma_weight_l0[32];
+ int16_t luma_offset_l0[32];
+ uint8_t chroma_weight_l0_flag;
+ int16_t chroma_weight_l0[32][2];
+ int16_t chroma_offset_l0[32][2];
+ uint8_t luma_weight_l1_flag;
+ int16_t luma_weight_l1[32];
+ int16_t luma_offset_l1[32];
+ uint8_t chroma_weight_l1_flag;
+ int16_t chroma_weight_l1[32][2];
+ int16_t chroma_offset_l1[32][2];
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VASliceParameterBufferH264;
+
+/****************************
+ * Common encode data structures
+ ****************************/
+typedef enum
+{
+ VAEncPictureTypeIntra = 0,
+ VAEncPictureTypePredictive = 1,
+ VAEncPictureTypeBidirectional = 2,
+} VAEncPictureType;
+
+/**
+ * \brief Encode Slice Parameter Buffer.
+ *
+ * @deprecated
+ * This is a deprecated encode slice parameter buffer, All applications
+ * \c can use VAEncSliceParameterBufferXXX (XXX = MPEG2, HEVC, H264, JPEG)
+ */
+typedef struct _VAEncSliceParameterBuffer
+{
+ uint32_t start_row_number; /* starting MB row number for this slice */
+ uint32_t slice_height; /* slice height measured in MB */
+ union {
+ struct {
+ uint32_t is_intra : 1;
+ uint32_t disable_deblocking_filter_idc : 2;
+ uint32_t uses_long_term_ref :1;
+ uint32_t is_long_term_ref :1;
+ } bits;
+ uint32_t value;
+ } slice_flags;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAEncSliceParameterBuffer;
+
+
+/****************************
+ * H.263 specific encode data structures
+ ****************************/
+
+typedef struct _VAEncSequenceParameterBufferH263
+{
+ uint32_t intra_period;
+ uint32_t bits_per_second;
+ uint32_t frame_rate;
+ uint32_t initial_qp;
+ uint32_t min_qp;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAEncSequenceParameterBufferH263;
+
+typedef struct _VAEncPictureParameterBufferH263
+{
+ VASurfaceID reference_picture;
+ VASurfaceID reconstructed_picture;
+ VABufferID coded_buf;
+ uint16_t picture_width;
+ uint16_t picture_height;
+ VAEncPictureType picture_type;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAEncPictureParameterBufferH263;
+
+/****************************
+ * MPEG-4 specific encode data structures
+ ****************************/
+
+typedef struct _VAEncSequenceParameterBufferMPEG4
+{
+ uint8_t profile_and_level_indication;
+ uint32_t intra_period;
+ uint32_t video_object_layer_width;
+ uint32_t video_object_layer_height;
+ uint32_t vop_time_increment_resolution;
+ uint32_t fixed_vop_rate;
+ uint32_t fixed_vop_time_increment;
+ uint32_t bits_per_second;
+ uint32_t frame_rate;
+ uint32_t initial_qp;
+ uint32_t min_qp;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAEncSequenceParameterBufferMPEG4;
+
+typedef struct _VAEncPictureParameterBufferMPEG4
+{
+ VASurfaceID reference_picture;
+ VASurfaceID reconstructed_picture;
+ VABufferID coded_buf;
+ uint16_t picture_width;
+ uint16_t picture_height;
+ uint32_t modulo_time_base; /* number of 1s */
+ uint32_t vop_time_increment;
+ VAEncPictureType picture_type;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAEncPictureParameterBufferMPEG4;
+
+
+
+/** Buffer functions */
+
+/**
+ * Creates a buffer for "num_elements" elements of "size" bytes and
+ * initalize with "data".
+ * if "data" is null, then the contents of the buffer data store
+ * are undefined.
+ * Basically there are two ways to get buffer data to the server side. One is
+ * to call vaCreateBuffer() with a non-null "data", which results the data being
+ * copied to the data store on the server side. A different method that
+ * eliminates this copy is to pass null as "data" when calling vaCreateBuffer(),
+ * and then use vaMapBuffer() to map the data store from the server side to the
+ * client address space for access.
+ * The user must call vaDestroyBuffer() to destroy a buffer.
+ * Note: image buffers are created by the library, not the client. Please see
+ * vaCreateImage on how image buffers are managed.
+ */
+VAStatus vaCreateBuffer (
+ VADisplay dpy,
+ VAContextID context,
+ VABufferType type, /* in */
+ unsigned int size, /* in */
+ unsigned int num_elements, /* in */
+ void *data, /* in */
+ VABufferID *buf_id /* out */
+);
+
+/**
+ * Create a buffer for given width & height get unit_size, pitch, buf_id for 2D buffer
+ * for permb qp buffer, it will return unit_size for one MB or LCU and the pitch for alignments
+ * can call vaMapBuffer with this Buffer ID to get virtual address.
+ * e.g. AVC 1080P encode, 1920x1088, the size in MB is 120x68,but inside driver,
+ * maybe it should align with 256, and one byte present one Qp.so, call the function.
+ * then get unit_size = 1, pitch = 256. call vaMapBuffer to get the virtual address (pBuf).
+ * then read write the memory like 2D. the size is 256x68, application can only use 120x68
+ * pBuf + 256 is the start of next line.
+ * different driver implementation maybe return different unit_size and pitch
+ */
+VAStatus vaCreateBuffer2(
+ VADisplay dpy,
+ VAContextID context,
+ VABufferType type,
+ unsigned int width,
+ unsigned int height,
+ unsigned int *unit_size,
+ unsigned int *pitch,
+ VABufferID *buf_id
+);
+
+/**
+ * Convey to the server how many valid elements are in the buffer.
+ * e.g. if multiple slice parameters are being held in a single buffer,
+ * this will communicate to the server the number of slice parameters
+ * that are valid in the buffer.
+ */
+VAStatus vaBufferSetNumElements (
+ VADisplay dpy,
+ VABufferID buf_id, /* in */
+ unsigned int num_elements /* in */
+);
+
+
+/**
+ * device independent data structure for codedbuffer
+ */
+
+/*
+ * FICTURE_AVE_QP(bit7-0): The average Qp value used during this frame
+ * LARGE_SLICE(bit8):At least one slice in the current frame was large
+ * enough for the encoder to attempt to limit its size.
+ * SLICE_OVERFLOW(bit9): At least one slice in the current frame has
+ * exceeded the maximum slice size specified.
+ * BITRATE_OVERFLOW(bit10): The peak bitrate was exceeded for this frame.
+ * BITRATE_HIGH(bit11): The frame size got within the safety margin of the maximum size (VCM only)
+ * AIR_MB_OVER_THRESHOLD: the number of MBs adapted to Intra MB
+ */
+#define VA_CODED_BUF_STATUS_PICTURE_AVE_QP_MASK 0xff
+#define VA_CODED_BUF_STATUS_LARGE_SLICE_MASK 0x100
+#define VA_CODED_BUF_STATUS_SLICE_OVERFLOW_MASK 0x200
+#define VA_CODED_BUF_STATUS_BITRATE_OVERFLOW 0x400
+#define VA_CODED_BUF_STATUS_BITRATE_HIGH 0x800
+/**
+ * \brief The frame has exceeded the maximum requested size.
+ *
+ * This flag indicates that the encoded frame size exceeds the value
+ * specified through a misc parameter buffer of type
+ * #VAEncMiscParameterTypeMaxFrameSize.
+ */
+#define VA_CODED_BUF_STATUS_FRAME_SIZE_OVERFLOW 0x1000
+/**
+ * \brief the bitstream is bad or corrupt.
+ */
+#define VA_CODED_BUF_STATUS_BAD_BITSTREAM 0x8000
+#define VA_CODED_BUF_STATUS_AIR_MB_OVER_THRESHOLD 0xff0000
+
+/**
+ * \brief The coded buffer segment status contains frame encoding passes number
+ *
+ * This is the mask to get the number of encoding passes from the coded
+ * buffer segment status.
+ * NUMBER_PASS(bit24~bit27): the number for encoding passes executed for the coded frame.
+ *
+ */
+#define VA_CODED_BUF_STATUS_NUMBER_PASSES_MASK 0xf000000
+
+/**
+ * \brief The coded buffer segment contains a single NAL unit.
+ *
+ * This flag indicates that the coded buffer segment contains a
+ * single NAL unit. This flag might be useful to the user for
+ * processing the coded buffer.
+ */
+#define VA_CODED_BUF_STATUS_SINGLE_NALU 0x10000000
+
+/**
+ * \brief Coded buffer segment.
+ *
+ * #VACodedBufferSegment is an element of a linked list describing
+ * some information on the coded buffer. The coded buffer segment
+ * could contain either a single NAL unit, or more than one NAL unit.
+ * It is recommended (but not required) to return a single NAL unit
+ * in a coded buffer segment, and the implementation should set the
+ * VA_CODED_BUF_STATUS_SINGLE_NALU status flag if that is the case.
+ */
+typedef struct _VACodedBufferSegment {
+ /**
+ * \brief Size of the data buffer in this segment (in bytes).
+ */
+ uint32_t size;
+ /** \brief Bit offset into the data buffer where the video data starts. */
+ uint32_t bit_offset;
+ /** \brief Status set by the driver. See \c VA_CODED_BUF_STATUS_*. */
+ uint32_t status;
+ /** \brief Reserved for future use. */
+ uint32_t reserved;
+ /** \brief Pointer to the start of the data buffer. */
+ void *buf;
+ /**
+ * \brief Pointer to the next #VACodedBufferSegment element,
+ * or \c NULL if there is none.
+ */
+ void *next;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VACodedBufferSegment;
+
+/**
+ * Map data store of the buffer into the client's address space
+ * vaCreateBuffer() needs to be called with "data" set to NULL before
+ * calling vaMapBuffer()
+ *
+ * if buffer type is VAEncCodedBufferType, pbuf points to link-list of
+ * VACodedBufferSegment, and the list is terminated if "next" is NULL
+ */
+VAStatus vaMapBuffer (
+ VADisplay dpy,
+ VABufferID buf_id, /* in */
+ void **pbuf /* out */
+);
+
+/**
+ * After client making changes to a mapped data store, it needs to
+ * "Unmap" it to let the server know that the data is ready to be
+ * consumed by the server
+ */
+VAStatus vaUnmapBuffer (
+ VADisplay dpy,
+ VABufferID buf_id /* in */
+);
+
+/**
+ * After this call, the buffer is deleted and this buffer_id is no longer valid
+ *
+ * A buffer can be re-used and sent to the server by another Begin/Render/End
+ * sequence if vaDestroyBuffer() is not called with this buffer.
+ *
+ * Note re-using a shared buffer (e.g. a slice data buffer) between the host and the
+ * hardware accelerator can result in performance dropping.
+ */
+VAStatus vaDestroyBuffer (
+ VADisplay dpy,
+ VABufferID buffer_id
+);
+
+/** \brief VA buffer information */
+typedef struct {
+ /** \brief Buffer handle */
+ uintptr_t handle;
+ /** \brief Buffer type (See \ref VABufferType). */
+ uint32_t type;
+ /**
+ * \brief Buffer memory type (See \ref VASurfaceAttribMemoryType).
+ *
+ * On input to vaAcquireBufferHandle(), this field can serve as a hint
+ * to specify the set of memory types the caller is interested in.
+ * On successful return from vaAcquireBufferHandle(), the field is
+ * updated with the best matching memory type.
+ */
+ uint32_t mem_type;
+ /** \brief Size of the underlying buffer. */
+ size_t mem_size;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VABufferInfo;
+
+/**
+ * \brief Acquires buffer handle for external API usage
+ *
+ * Locks the VA buffer object \ref buf_id for external API usage like
+ * EGL or OpenCL (OCL). This function is a synchronization point. This
+ * means that any pending operation is guaranteed to be completed
+ * prior to returning from the function.
+ *
+ * If the referenced VA buffer object is the backing store of a VA
+ * surface, then this function acts as if vaSyncSurface() on the
+ * parent surface was called first.
+ *
+ * The \ref VABufferInfo argument shall be zero'ed on input. On
+ * successful output, the data structure is filled in with all the
+ * necessary buffer level implementation details like handle, type,
+ * memory type and memory size.
+ *
+ * Note: the external API implementation, or the application, can
+ * express the memory types it is interested in by filling in the \ref
+ * mem_type field accordingly. On successful output, the memory type
+ * that fits best the request and that was used is updated in the \ref
+ * VABufferInfo data structure. If none of the supplied memory types
+ * is supported, then a \ref VA_STATUS_ERROR_UNSUPPORTED_MEMORY_TYPE
+ * error is returned.
+ *
+ * The \ref VABufferInfo data is valid until vaReleaseBufferHandle()
+ * is called. Besides, no additional operation is allowed on any of
+ * the buffer parent object until vaReleaseBufferHandle() is called.
+ * e.g. decoding into a VA surface backed with the supplied VA buffer
+ * object \ref buf_id would fail with a \ref VA_STATUS_ERROR_SURFACE_BUSY
+ * error.
+ *
+ * Possible errors:
+ * - \ref VA_STATUS_ERROR_UNIMPLEMENTED: the VA driver implementation
+ * does not support this interface
+ * - \ref VA_STATUS_ERROR_INVALID_DISPLAY: an invalid display was supplied
+ * - \ref VA_STATUS_ERROR_INVALID_BUFFER: an invalid buffer was supplied
+ * - \ref VA_STATUS_ERROR_UNSUPPORTED_BUFFERTYPE: the implementation
+ * does not support exporting buffers of the specified type
+ * - \ref VA_STATUS_ERROR_UNSUPPORTED_MEMORY_TYPE: none of the requested
+ * memory types in \ref VABufferInfo.mem_type was supported
+ *
+ * @param[in] dpy the VA display
+ * @param[in] buf_id the VA buffer
+ * @param[in,out] buf_info the associated VA buffer information
+ * @return VA_STATUS_SUCCESS if successful
+ */
+VAStatus
+vaAcquireBufferHandle(VADisplay dpy, VABufferID buf_id, VABufferInfo *buf_info);
+
+/**
+ * \brief Releases buffer after usage from external API
+ *
+ * Unlocks the VA buffer object \ref buf_id from external API usage like
+ * EGL or OpenCL (OCL). This function is a synchronization point. This
+ * means that any pending operation is guaranteed to be completed
+ * prior to returning from the function.
+ *
+ * The \ref VABufferInfo argument shall point to the original data
+ * structure that was obtained from vaAcquireBufferHandle(), unaltered.
+ * This is necessary so that the VA driver implementation could
+ * deallocate any resources that were needed.
+ *
+ * In any case, returning from this function invalidates any contents
+ * in \ref VABufferInfo. i.e. the underlyng buffer handle is no longer
+ * valid. Therefore, VA driver implementations are free to reset this
+ * data structure to safe defaults.
+ *
+ * Possible errors:
+ * - \ref VA_STATUS_ERROR_UNIMPLEMENTED: the VA driver implementation
+ * does not support this interface
+ * - \ref VA_STATUS_ERROR_INVALID_DISPLAY: an invalid display was supplied
+ * - \ref VA_STATUS_ERROR_INVALID_BUFFER: an invalid buffer was supplied
+ * - \ref VA_STATUS_ERROR_UNSUPPORTED_BUFFERTYPE: the implementation
+ * does not support exporting buffers of the specified type
+ *
+ * @param[in] dpy the VA display
+ * @param[in] buf_id the VA buffer
+ * @return VA_STATUS_SUCCESS if successful
+ */
+VAStatus
+vaReleaseBufferHandle(VADisplay dpy, VABufferID buf_id);
+
+/** @name vaExportSurfaceHandle() flags
+ *
+ * @{
+ */
+/** Export surface to be read by external API. */
+#define VA_EXPORT_SURFACE_READ_ONLY 0x0001
+/** Export surface to be written by external API. */
+#define VA_EXPORT_SURFACE_WRITE_ONLY 0x0002
+/** Export surface to be both read and written by external API. */
+#define VA_EXPORT_SURFACE_READ_WRITE 0x0003
+/** Export surface with separate layers.
+ *
+ * For example, NV12 surfaces should be exported as two separate
+ * planes for luma and chroma.
+ */
+#define VA_EXPORT_SURFACE_SEPARATE_LAYERS 0x0004
+/** Export surface with composed layers.
+ *
+ * For example, NV12 surfaces should be exported as a single NV12
+ * composed object.
+ */
+#define VA_EXPORT_SURFACE_COMPOSED_LAYERS 0x0008
+
+/** @} */
+
+/**
+ * \brief Export a handle to a surface for use with an external API
+ *
+ * The exported handles are owned by the caller, and the caller is
+ * responsible for freeing them when no longer needed (e.g. by closing
+ * DRM PRIME file descriptors).
+ *
+ * This does not perform any synchronisation. If the contents of the
+ * surface will be read, vaSyncSurface() must be called before doing so.
+ * If the contents of the surface are written, then all operations must
+ * be completed externally before using the surface again by via VA-API
+ * functions.
+ *
+ * @param[in] dpy VA display.
+ * @param[in] surface_id Surface to export.
+ * @param[in] mem_type Memory type to export to.
+ * @param[in] flags Combination of flags to apply
+ * (VA_EXPORT_SURFACE_*).
+ * @param[out] descriptor Pointer to the descriptor structure to fill
+ * with the handle details. The type of this structure depends on
+ * the value of mem_type.
+ *
+ * @return Status code:
+ * - VA_STATUS_SUCCESS: Success.
+ * - VA_STATUS_ERROR_INVALID_DISPLAY: The display is not valid.
+ * - VA_STATUS_ERROR_UNIMPLEMENTED: The driver does not implement
+ * this interface.
+ * - VA_STATUS_ERROR_INVALID_SURFACE: The surface is not valid, or
+ * the surface is not exportable in the specified way.
+ * - VA_STATUS_ERROR_UNSUPPORTED_MEMORY_TYPE: The driver does not
+ * support exporting surfaces to the specified memory type.
+ */
+VAStatus vaExportSurfaceHandle(VADisplay dpy,
+ VASurfaceID surface_id,
+ uint32_t mem_type, uint32_t flags,
+ void *descriptor);
+
+/**
+ * Render (Video Decode/Encode/Processing) Pictures
+ *
+ * A picture represents either a frame or a field.
+ *
+ * The Begin/Render/End sequence sends the video decode/encode/processing buffers
+ * to the server
+ */
+
+/**
+ * Get ready for a video pipeline
+ * - decode a picture to a target surface
+ * - encode a picture from a target surface
+ * - process a picture to a target surface
+ */
+VAStatus vaBeginPicture (
+ VADisplay dpy,
+ VAContextID context,
+ VASurfaceID render_target
+);
+
+/**
+ * Send video decode, encode or processing buffers to the server.
+ */
+VAStatus vaRenderPicture (
+ VADisplay dpy,
+ VAContextID context,
+ VABufferID *buffers,
+ int num_buffers
+);
+
+/**
+ * Make the end of rendering for a picture.
+ * The server should start processing all pending operations for this
+ * surface. This call is non-blocking. The client can start another
+ * Begin/Render/End sequence on a different render target.
+ * if VAContextID used in this function previously successfully passed
+ * vaMFAddContext call, real processing will be started during vaMFSubmit
+ */
+VAStatus vaEndPicture (
+ VADisplay dpy,
+ VAContextID context
+);
+
+/**
+ * Make the end of rendering for a pictures in contexts passed with submission.
+ * The server should start processing all pending operations for contexts.
+ * All contexts passed should be associated through vaMFAddContext
+ * and call sequence Begin/Render/End performed.
+ * This call is non-blocking. The client can start another
+ * Begin/Render/End/vaMFSubmit sequence on a different render targets.
+ * Return values:
+ * VA_STATUS_SUCCESS - operation successful, context was removed.
+ * VA_STATUS_ERROR_INVALID_CONTEXT - mf_context or one of contexts are invalid
+ * due to mf_context not created or one of contexts not assotiated with mf_context
+ * through vaAddContext.
+ * VA_STATUS_ERROR_INVALID_PARAMETER - one of context has not submitted it's frame
+ * through vaBeginPicture vaRenderPicture vaEndPicture call sequence.
+ * dpy: display
+ * mf_context: Multi-Frame context
+ * contexts: list of contexts submitting their tasks for multi-frame operation.
+ * num_contexts: number of passed contexts.
+ */
+VAStatus vaMFSubmit (
+ VADisplay dpy,
+ VAMFContextID mf_context,
+ VAContextID * contexts,
+ int num_contexts
+);
+
+/*
+
+Synchronization
+
+*/
+
+/**
+ * This function blocks until all pending operations on the render target
+ * have been completed. Upon return it is safe to use the render target for a
+ * different picture.
+ */
+VAStatus vaSyncSurface (
+ VADisplay dpy,
+ VASurfaceID render_target
+);
+
+typedef enum
+{
+ VASurfaceRendering = 1, /* Rendering in progress */
+ VASurfaceDisplaying = 2, /* Displaying in progress (not safe to render into it) */
+ /* this status is useful if surface is used as the source */
+ /* of an overlay */
+ VASurfaceReady = 4, /* not being rendered or displayed */
+ VASurfaceSkipped = 8 /* Indicate a skipped frame during encode */
+} VASurfaceStatus;
+
+/**
+ * Find out any pending ops on the render target
+ */
+VAStatus vaQuerySurfaceStatus (
+ VADisplay dpy,
+ VASurfaceID render_target,
+ VASurfaceStatus *status /* out */
+);
+
+typedef enum
+{
+ VADecodeSliceMissing = 0,
+ VADecodeMBError = 1,
+} VADecodeErrorType;
+
+/**
+ * Client calls vaQuerySurfaceError with VA_STATUS_ERROR_DECODING_ERROR, server side returns
+ * an array of structure VASurfaceDecodeMBErrors, and the array is terminated by setting status=-1
+*/
+typedef struct _VASurfaceDecodeMBErrors
+{
+ int32_t status; /* 1 if hardware has returned detailed info below, -1 means this record is invalid */
+ uint32_t start_mb; /* start mb address with errors */
+ uint32_t end_mb; /* end mb address with errors */
+ VADecodeErrorType decode_error_type;
+ uint32_t num_mb; /* number of mbs with errors */
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW - 1];
+} VASurfaceDecodeMBErrors;
+
+/**
+ * After the application gets VA_STATUS_ERROR_DECODING_ERROR after calling vaSyncSurface(),
+ * it can call vaQuerySurfaceError to find out further details on the particular error.
+ * VA_STATUS_ERROR_DECODING_ERROR should be passed in as "error_status",
+ * upon the return, error_info will point to an array of _VASurfaceDecodeMBErrors structure,
+ * which is allocated and filled by libVA with detailed information on the missing or error macroblocks.
+ * The array is terminated if "status==-1" is detected.
+ */
+VAStatus vaQuerySurfaceError(
+ VADisplay dpy,
+ VASurfaceID surface,
+ VAStatus error_status,
+ void **error_info
+);
+
+/**
+ * Images and Subpictures
+ * VAImage is used to either get the surface data to client memory, or
+ * to copy image data in client memory to a surface.
+ * Both images, subpictures and surfaces follow the same 2D coordinate system where origin
+ * is at the upper left corner with positive X to the right and positive Y down
+ */
+#define VA_FOURCC(ch0, ch1, ch2, ch3) \
+ ((unsigned long)(unsigned char) (ch0) | ((unsigned long)(unsigned char) (ch1) << 8) | \
+ ((unsigned long)(unsigned char) (ch2) << 16) | ((unsigned long)(unsigned char) (ch3) << 24 ))
+
+/* Pre-defined fourcc codes. */
+
+/** NV12: two-plane 8-bit YUV 4:2:0.
+ * The first plane contains Y, the second plane contains U and V in pairs of bytes.
+ */
+#define VA_FOURCC_NV12 0x3231564E
+/** NV21: two-plane 8-bit YUV 4:2:0.
+ * Same as NV12, but with U and V swapped.
+ */
+#define VA_FOURCC_NV21 0x3132564E
+
+/** AI44: packed 4-bit YA.
+ *
+ * The bottom half of each byte contains luma, the top half contains alpha.
+ */
+#define VA_FOURCC_AI44 0x34344149
+
+/** RGBA: packed 8-bit RGBA.
+ *
+ * Four bytes per pixel: red, green, blue, alpha.
+ */
+#define VA_FOURCC_RGBA 0x41424752
+/** RGBX: packed 8-bit RGB.
+ *
+ * Four bytes per pixel: red, green, blue, unspecified.
+ */
+#define VA_FOURCC_RGBX 0x58424752
+/** BGRA: packed 8-bit RGBA.
+ *
+ * Four bytes per pixel: blue, green, red, alpha.
+ */
+#define VA_FOURCC_BGRA 0x41524742
+/** BGRX: packed 8-bit RGB.
+ *
+ * Four bytes per pixel: blue, green, red, unspecified.
+ */
+#define VA_FOURCC_BGRX 0x58524742
+/** ARGB: packed 8-bit RGBA.
+ *
+ * Four bytes per pixel: alpha, red, green, blue.
+ */
+#define VA_FOURCC_ARGB 0x42475241
+/** XRGB: packed 8-bit RGB.
+ *
+ * Four bytes per pixel: unspecified, red, green, blue.
+ */
+#define VA_FOURCC_XRGB 0x42475258
+/** ABGR: packed 8-bit RGBA.
+ *
+ * Four bytes per pixel: alpha, blue, green, red.
+ */
+#define VA_FOURCC_ABGR 0x52474241
+/** XBGR: packed 8-bit RGB.
+ *
+ * Four bytes per pixel: unspecified, blue, green, red.
+ */
+#define VA_FOURCC_XBGR 0x52474258
+
+/** UYUV: packed 8-bit YUV 4:2:2.
+ *
+ * Four bytes per pair of pixels: U, Y, U, V.
+ */
+#define VA_FOURCC_UYVY 0x59565955
+/** YUY2: packed 8-bit YUV 4:2:2.
+ *
+ * Four bytes per pair of pixels: Y, U, Y, V.
+ */
+#define VA_FOURCC_YUY2 0x32595559
+/** AYUV: packed 8-bit YUVA 4:4:4.
+ *
+ * Four bytes per pixel: A, Y, U, V.
+ */
+#define VA_FOURCC_AYUV 0x56555941
+/** NV11: two-plane 8-bit YUV 4:1:1.
+ *
+ * The first plane contains Y, the second plane contains U and V in pairs of bytes.
+ */
+#define VA_FOURCC_NV11 0x3131564e
+/** YV12: three-plane 8-bit YUV 4:2:0.
+ *
+ * The three planes contain Y, V and U respectively.
+ */
+#define VA_FOURCC_YV12 0x32315659
+/** P208: two-plane 8-bit YUV 4:2:2.
+ *
+ * The first plane contains Y, the second plane contains U and V in pairs of bytes.
+ */
+#define VA_FOURCC_P208 0x38303250
+/** I420: three-plane 8-bit YUV 4:2:0.
+ *
+ * The three planes contain Y, U and V respectively.
+ */
+#define VA_FOURCC_I420 0x30323449
+/** YV24: three-plane 8-bit YUV 4:4:4.
+ *
+ * The three planes contain Y, V and U respectively.
+ */
+#define VA_FOURCC_YV24 0x34325659
+/** YV32: four-plane 8-bit YUVA 4:4:4
+ *
+ * The four planes contain Y, V, U and A respectively.
+ */
+#define VA_FOURCC_YV32 0x32335659
+/** Y800: 8-bit greyscale.
+ */
+#define VA_FOURCC_Y800 0x30303859
+/** IMC3: three-plane 8-bit YUV 4:2:0.
+ *
+ * Equivalent to YV12, but with the additional constraint that the pitch of all three planes
+ * must be the same.
+ */
+#define VA_FOURCC_IMC3 0x33434D49
+/** 411P: three-plane 8-bit YUV 4:1:1.
+ *
+ * The three planes contain Y, U and V respectively.
+ */
+#define VA_FOURCC_411P 0x50313134
+/** 411R: three-plane 8-bit YUV.
+ *
+ * The subsampling is the transpose of 4:1:1 - full chroma appears on every fourth line.
+ * The three planes contain Y, U and V respectively.
+ */
+#define VA_FOURCC_411R 0x52313134
+/** 422H: three-plane 8-bit YUV 4:2:2.
+ *
+ * The three planes contain Y, U and V respectively.
+ */
+#define VA_FOURCC_422H 0x48323234
+/** 422V: three-plane 8-bit YUV 4:4:0.
+ *
+ * The three planes contain Y, U and V respectively.
+ */
+#define VA_FOURCC_422V 0x56323234
+/** 444P: three-plane 8-bit YUV 4:4:4.
+ *
+ * The three planes contain Y, U and V respectively.
+ */
+#define VA_FOURCC_444P 0x50343434
+
+/** RGBP: three-plane 8-bit RGB.
+ *
+ * The three planes contain red, green and blue respectively.
+ */
+#define VA_FOURCC_RGBP 0x50424752
+/** BGRP: three-plane 8-bit RGB.
+ *
+ * The three planes contain blue, green and red respectively.
+ */
+#define VA_FOURCC_BGRP 0x50524742
+/** RG16: packed 5/6-bit RGB.
+ *
+ * Each pixel is a two-byte little-endian value.
+ * Red, green and blue are found in bits 15:11, 10:5, 4:0 respectively.
+ */
+#define VA_FOURCC_RGB565 0x36314752
+/** BG16: packed 5/6-bit RGB.
+ *
+ * Each pixel is a two-byte little-endian value.
+ * Blue, green and red are found in bits 15:11, 10:5, 4:0 respectively.
+ */
+#define VA_FOURCC_BGR565 0x36314742
+
+/** Y210: packed 10-bit YUV 4:2:2.
+ *
+ * Eight bytes represent a pair of pixels. Each sample is a two-byte little-endian value,
+ * with the bottom six bits ignored. The samples are in the order Y, U, Y, V.
+ */
+#define VA_FOURCC_Y210 0x30313259
+/** Y216: packed 16-bit YUV 4:2:2.
+ *
+ * Eight bytes represent a pair of pixels. Each sample is a two-byte little-endian value.
+ * The samples are in the order Y, U, Y, V.
+ */
+#define VA_FOURCC_Y216 0x36313259
+/** Y410: packed 10-bit YUVA 4:4:4.
+ *
+ * Each pixel is a four-byte little-endian value.
+ * A, V, Y, U are found in bits 31:30, 29:20, 19:10, 9:0 respectively.
+ */
+#define VA_FOURCC_Y410 0x30313459
+/** Y416: packed 16-bit YUVA 4:4:4.
+ *
+ * Each pixel is a set of four samples, each of which is a two-byte little-endian value.
+ * The samples are in the order A, V, Y, U.
+ */
+#define VA_FOURCC_Y416 0x36313459
+
+/** YV16: three-plane 8-bit YUV 4:2:2.
+ *
+ * The three planes contain Y, V and U respectively.
+ */
+#define VA_FOURCC_YV16 0x36315659
+/** P010: two-plane 10-bit YUV 4:2:0.
+ *
+ * Each sample is a two-byte little-endian value with the bottom six bits ignored.
+ * The first plane contains Y, the second plane contains U and V in pairs of samples.
+ */
+#define VA_FOURCC_P010 0x30313050
+/** P016: two-plane 16-bit YUV 4:2:0.
+ *
+ * Each sample is a two-byte little-endian value. The first plane contains Y, the second
+ * plane contains U and V in pairs of samples.
+ */
+#define VA_FOURCC_P016 0x36313050
+
+/** I010: three-plane 10-bit YUV 4:2:0.
+ *
+ * Each sample is a two-byte little-endian value with the top six bits ignored.
+ * The three planes contain Y, V and U respectively.
+ */
+#define VA_FOURCC_I010 0x30313049
+
+/** IYUV: three-plane 8-bit YUV 4:2:0.
+ *
+ * @deprecated Use I420 instead.
+ */
+#define VA_FOURCC_IYUV 0x56555949
+/**
+ * 10-bit Pixel RGB formats.
+ */
+#define VA_FOURCC_A2R10G10B10 0x30335241 /* VA_FOURCC('A','R','3','0') */
+/**
+ * 10-bit Pixel BGR formats.
+ */
+#define VA_FOURCC_A2B10G10R10 0x30334241 /* VA_FOURCC('A','B','3','0') */
+
+/** Y8: 8-bit greyscale.
+ *
+ * Only a single sample, 8 bit Y plane for monochrome images
+ */
+#define VA_FOURCC_Y8 0x20203859
+/** Y16: 16-bit greyscale.
+ *
+ * Only a single sample, 16 bit Y plane for monochrome images
+ */
+#define VA_FOURCC_Y16 0x20363159
+/** VYUV: packed 8-bit YUV 4:2:2.
+ *
+ * Four bytes per pair of pixels: V, Y, U, V.
+ */
+#define VA_FOURCC_VYUY 0x59555956
+/** YVYU: packed 8-bit YUV 4:2:2.
+ *
+ * Four bytes per pair of pixels: Y, V, Y, U.
+ */
+#define VA_FOURCC_YVYU 0x55595659
+/** AGRB64: three-plane 16-bit ARGB 16:16:16:16
+ *
+ * The four planes contain: alpha, red, green, blue respectively.
+ */
+#define VA_FOURCC_ARGB64 0x34475241
+/** ABGR64: three-plane 16-bit ABGR 16:16:16:16
+ *
+ * The four planes contain: alpha, blue, green, red respectively.
+ */
+#define VA_FOURCC_ABGR64 0x34474241
+
+/* byte order */
+#define VA_LSB_FIRST 1
+#define VA_MSB_FIRST 2
+
+typedef struct _VAImageFormat
+{
+ uint32_t fourcc;
+ uint32_t byte_order; /* VA_LSB_FIRST, VA_MSB_FIRST */
+ uint32_t bits_per_pixel;
+ /* for RGB formats */
+ uint32_t depth; /* significant bits per pixel */
+ uint32_t red_mask;
+ uint32_t green_mask;
+ uint32_t blue_mask;
+ uint32_t alpha_mask;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAImageFormat;
+
+typedef VAGenericID VAImageID;
+
+typedef struct _VAImage
+{
+ VAImageID image_id; /* uniquely identify this image */
+ VAImageFormat format;
+ VABufferID buf; /* image data buffer */
+ /*
+ * Image data will be stored in a buffer of type VAImageBufferType to facilitate
+ * data store on the server side for optimal performance. The buffer will be
+ * created by the CreateImage function, and proper storage allocated based on the image
+ * size and format. This buffer is managed by the library implementation, and
+ * accessed by the client through the buffer Map/Unmap functions.
+ */
+ uint16_t width;
+ uint16_t height;
+ uint32_t data_size;
+ uint32_t num_planes; /* can not be greater than 3 */
+ /*
+ * An array indicating the scanline pitch in bytes for each plane.
+ * Each plane may have a different pitch. Maximum 3 planes for planar formats
+ */
+ uint32_t pitches[3];
+ /*
+ * An array indicating the byte offset from the beginning of the image data
+ * to the start of each plane.
+ */
+ uint32_t offsets[3];
+
+ /* The following fields are only needed for paletted formats */
+ int32_t num_palette_entries; /* set to zero for non-palette images */
+ /*
+ * Each component is one byte and entry_bytes indicates the number of components in
+ * each entry (eg. 3 for YUV palette entries). set to zero for non-palette images
+ */
+ int32_t entry_bytes;
+ /*
+ * An array of ascii characters describing the order of the components within the bytes.
+ * Only entry_bytes characters of the string are used.
+ */
+ int8_t component_order[4];
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAImage;
+
+/** Get maximum number of image formats supported by the implementation */
+int vaMaxNumImageFormats (
+ VADisplay dpy
+);
+
+/**
+ * Query supported image formats
+ * The caller must provide a "format_list" array that can hold at
+ * least vaMaxNumImageFormats() entries. The actual number of formats
+ * returned in "format_list" is returned in "num_formats".
+ */
+VAStatus vaQueryImageFormats (
+ VADisplay dpy,
+ VAImageFormat *format_list, /* out */
+ int *num_formats /* out */
+);
+
+/**
+ * Create a VAImage structure
+ * The width and height fields returned in the VAImage structure may get
+ * enlarged for some YUV formats. Upon return from this function,
+ * image->buf has been created and proper storage allocated by the library.
+ * The client can access the image through the Map/Unmap calls.
+ */
+VAStatus vaCreateImage (
+ VADisplay dpy,
+ VAImageFormat *format,
+ int width,
+ int height,
+ VAImage *image /* out */
+);
+
+/**
+ * Should call DestroyImage before destroying the surface it is bound to
+ */
+VAStatus vaDestroyImage (
+ VADisplay dpy,
+ VAImageID image
+);
+
+VAStatus vaSetImagePalette (
+ VADisplay dpy,
+ VAImageID image,
+ /*
+ * pointer to an array holding the palette data. The size of the array is
+ * num_palette_entries * entry_bytes in size. The order of the components
+ * in the palette is described by the component_order in VAImage struct
+ */
+ unsigned char *palette
+);
+
+/**
+ * Retrive surface data into a VAImage
+ * Image must be in a format supported by the implementation
+ */
+VAStatus vaGetImage (
+ VADisplay dpy,
+ VASurfaceID surface,
+ int x, /* coordinates of the upper left source pixel */
+ int y,
+ unsigned int width, /* width and height of the region */
+ unsigned int height,
+ VAImageID image
+);
+
+/**
+ * Copy data from a VAImage to a surface
+ * Image must be in a format supported by the implementation
+ * Returns a VA_STATUS_ERROR_SURFACE_BUSY if the surface
+ * shouldn't be rendered into when this is called
+ */
+VAStatus vaPutImage (
+ VADisplay dpy,
+ VASurfaceID surface,
+ VAImageID image,
+ int src_x,
+ int src_y,
+ unsigned int src_width,
+ unsigned int src_height,
+ int dest_x,
+ int dest_y,
+ unsigned int dest_width,
+ unsigned int dest_height
+);
+
+/**
+ * Derive an VAImage from an existing surface.
+ * This interface will derive a VAImage and corresponding image buffer from
+ * an existing VA Surface. The image buffer can then be mapped/unmapped for
+ * direct CPU access. This operation is only possible on implementations with
+ * direct rendering capabilities and internal surface formats that can be
+ * represented with a VAImage. When the operation is not possible this interface
+ * will return VA_STATUS_ERROR_OPERATION_FAILED. Clients should then fall back
+ * to using vaCreateImage + vaPutImage to accomplish the same task in an
+ * indirect manner.
+ *
+ * Implementations should only return success when the resulting image buffer
+ * would be useable with vaMap/Unmap.
+ *
+ * When directly accessing a surface special care must be taken to insure
+ * proper synchronization with the graphics hardware. Clients should call
+ * vaQuerySurfaceStatus to insure that a surface is not the target of concurrent
+ * rendering or currently being displayed by an overlay.
+ *
+ * Additionally nothing about the contents of a surface should be assumed
+ * following a vaPutSurface. Implementations are free to modify the surface for
+ * scaling or subpicture blending within a call to vaPutImage.
+ *
+ * Calls to vaPutImage or vaGetImage using the same surface from which the image
+ * has been derived will return VA_STATUS_ERROR_SURFACE_BUSY. vaPutImage or
+ * vaGetImage with other surfaces is supported.
+ *
+ * An image created with vaDeriveImage should be freed with vaDestroyImage. The
+ * image and image buffer structures will be destroyed; however, the underlying
+ * surface will remain unchanged until freed with vaDestroySurfaces.
+ */
+VAStatus vaDeriveImage (
+ VADisplay dpy,
+ VASurfaceID surface,
+ VAImage *image /* out */
+);
+
+/**
+ * Subpictures
+ * Subpicture is a special type of image that can be blended
+ * with a surface during vaPutSurface(). Subpicture can be used to render
+ * DVD sub-titles or closed captioning text etc.
+ */
+
+typedef VAGenericID VASubpictureID;
+
+/** Get maximum number of subpicture formats supported by the implementation */
+int vaMaxNumSubpictureFormats (
+ VADisplay dpy
+);
+
+/** flags for subpictures */
+#define VA_SUBPICTURE_CHROMA_KEYING 0x0001
+#define VA_SUBPICTURE_GLOBAL_ALPHA 0x0002
+#define VA_SUBPICTURE_DESTINATION_IS_SCREEN_COORD 0x0004
+/**
+ * Query supported subpicture formats
+ * The caller must provide a "format_list" array that can hold at
+ * least vaMaxNumSubpictureFormats() entries. The flags arrary holds the flag
+ * for each format to indicate additional capabilities for that format. The actual
+ * number of formats returned in "format_list" is returned in "num_formats".
+ * flags: returned value to indicate addtional capabilities
+ * VA_SUBPICTURE_CHROMA_KEYING - supports chroma-keying
+ * VA_SUBPICTURE_GLOBAL_ALPHA - supports global alpha
+ * VA_SUBPICTURE_DESTINATION_IS_SCREEN_COORD - supports unscaled screen relative subpictures for On Screen Display
+ */
+
+VAStatus vaQuerySubpictureFormats (
+ VADisplay dpy,
+ VAImageFormat *format_list, /* out */
+ unsigned int *flags, /* out */
+ unsigned int *num_formats /* out */
+);
+
+/**
+ * Subpictures are created with an image associated.
+ */
+VAStatus vaCreateSubpicture (
+ VADisplay dpy,
+ VAImageID image,
+ VASubpictureID *subpicture /* out */
+);
+
+/**
+ * Destroy the subpicture before destroying the image it is assocated to
+ */
+VAStatus vaDestroySubpicture (
+ VADisplay dpy,
+ VASubpictureID subpicture
+);
+
+/**
+ * Bind an image to the subpicture. This image will now be associated with
+ * the subpicture instead of the one at creation.
+ */
+VAStatus vaSetSubpictureImage (
+ VADisplay dpy,
+ VASubpictureID subpicture,
+ VAImageID image
+);
+
+/**
+ * If chromakey is enabled, then the area where the source value falls within
+ * the chromakey [min, max] range is transparent
+ * The chromakey component format is the following:
+ * For RGB: [0:7] Red [8:15] Blue [16:23] Green
+ * For YUV: [0:7] V [8:15] U [16:23] Y
+ * The chromakey mask can be used to mask out certain components for chromakey
+ * comparision
+ */
+VAStatus vaSetSubpictureChromakey (
+ VADisplay dpy,
+ VASubpictureID subpicture,
+ unsigned int chromakey_min,
+ unsigned int chromakey_max,
+ unsigned int chromakey_mask
+);
+
+/**
+ * Global alpha value is between 0 and 1. A value of 1 means fully opaque and
+ * a value of 0 means fully transparent. If per-pixel alpha is also specified then
+ * the overall alpha is per-pixel alpha multiplied by the global alpha
+ */
+VAStatus vaSetSubpictureGlobalAlpha (
+ VADisplay dpy,
+ VASubpictureID subpicture,
+ float global_alpha
+);
+
+/**
+ * vaAssociateSubpicture associates the subpicture with target_surfaces.
+ * It defines the region mapping between the subpicture and the target
+ * surfaces through source and destination rectangles (with the same width and height).
+ * Both will be displayed at the next call to vaPutSurface. Additional
+ * associations before the call to vaPutSurface simply overrides the association.
+ */
+VAStatus vaAssociateSubpicture (
+ VADisplay dpy,
+ VASubpictureID subpicture,
+ VASurfaceID *target_surfaces,
+ int num_surfaces,
+ int16_t src_x, /* upper left offset in subpicture */
+ int16_t src_y,
+ uint16_t src_width,
+ uint16_t src_height,
+ int16_t dest_x, /* upper left offset in surface */
+ int16_t dest_y,
+ uint16_t dest_width,
+ uint16_t dest_height,
+ /*
+ * whether to enable chroma-keying, global-alpha, or screen relative mode
+ * see VA_SUBPICTURE_XXX values
+ */
+ uint32_t flags
+);
+
+/**
+ * vaDeassociateSubpicture removes the association of the subpicture with target_surfaces.
+ */
+VAStatus vaDeassociateSubpicture (
+ VADisplay dpy,
+ VASubpictureID subpicture,
+ VASurfaceID *target_surfaces,
+ int num_surfaces
+);
+
+/**
+ * Display attributes
+ * Display attributes are used to control things such as contrast, hue, saturation,
+ * brightness etc. in the rendering process. The application can query what
+ * attributes are supported by the driver, and then set the appropriate attributes
+ * before calling vaPutSurface()
+ */
+/* PowerVR IEP Lite attributes */
+typedef enum
+{
+ VADISPLAYATTRIB_BLE_OFF = 0x00,
+ VADISPLAYATTRIB_BLE_LOW,
+ VADISPLAYATTRIB_BLE_MEDIUM,
+ VADISPLAYATTRIB_BLE_HIGH,
+ VADISPLAYATTRIB_BLE_NONE,
+} VADisplayAttribBLEMode;
+
+/** attribute value for VADisplayAttribRotation */
+#define VA_ROTATION_NONE 0x00000000
+#define VA_ROTATION_90 0x00000001
+#define VA_ROTATION_180 0x00000002
+#define VA_ROTATION_270 0x00000003
+/**@}*/
+
+/**
+ * @name Mirroring directions
+ *
+ * Those values could be used for VADisplayAttribMirror attribute or
+ * VAProcPipelineParameterBuffer::mirror_state.
+
+ */
+/**@{*/
+/** \brief No Mirroring. */
+#define VA_MIRROR_NONE 0x00000000
+/** \brief Horizontal Mirroring. */
+#define VA_MIRROR_HORIZONTAL 0x00000001
+/** \brief Vertical Mirroring. */
+#define VA_MIRROR_VERTICAL 0x00000002
+/**@}*/
+
+/** attribute value for VADisplayAttribOutOfLoopDeblock */
+#define VA_OOL_DEBLOCKING_FALSE 0x00000000
+#define VA_OOL_DEBLOCKING_TRUE 0x00000001
+
+/** Render mode */
+#define VA_RENDER_MODE_UNDEFINED 0
+#define VA_RENDER_MODE_LOCAL_OVERLAY 1
+#define VA_RENDER_MODE_LOCAL_GPU 2
+#define VA_RENDER_MODE_EXTERNAL_OVERLAY 4
+#define VA_RENDER_MODE_EXTERNAL_GPU 8
+
+/** Render device */
+#define VA_RENDER_DEVICE_UNDEFINED 0
+#define VA_RENDER_DEVICE_LOCAL 1
+#define VA_RENDER_DEVICE_EXTERNAL 2
+
+/** Currently defined display attribute types */
+typedef enum
+{
+ VADisplayAttribBrightness = 0,
+ VADisplayAttribContrast = 1,
+ VADisplayAttribHue = 2,
+ VADisplayAttribSaturation = 3,
+ /* client can specifiy a background color for the target window
+ * the new feature of video conference,
+ * the uncovered area of the surface is filled by this color
+ * also it will blend with the decoded video color
+ */
+ VADisplayAttribBackgroundColor = 4,
+ /*
+ * this is a gettable only attribute. For some implementations that use the
+ * hardware overlay, after PutSurface is called, the surface can not be
+ * re-used until after the subsequent PutSurface call. If this is the case
+ * then the value for this attribute will be set to 1 so that the client
+ * will not attempt to re-use the surface right after returning from a call
+ * to PutSurface.
+ *
+ * Don't use it, use flag VASurfaceDisplaying of vaQuerySurfaceStatus since
+ * driver may use overlay or GPU alternatively
+ */
+ VADisplayAttribDirectSurface = 5,
+ VADisplayAttribRotation = 6,
+ VADisplayAttribOutofLoopDeblock = 7,
+
+ /* PowerVR IEP Lite specific attributes */
+ VADisplayAttribBLEBlackMode = 8,
+ VADisplayAttribBLEWhiteMode = 9,
+ VADisplayAttribBlueStretch = 10,
+ VADisplayAttribSkinColorCorrection = 11,
+ /*
+ * For type VADisplayAttribCSCMatrix, "value" field is a pointer to the color
+ * conversion matrix. Each element in the matrix is float-point
+ */
+ VADisplayAttribCSCMatrix = 12,
+ /* specify the constant color used to blend with video surface
+ * Cd = Cv*Cc*Ac + Cb *(1 - Ac) C means the constant RGB
+ * d: the final color to overwrite into the frame buffer
+ * v: decoded video after color conversion,
+ * c: video color specified by VADisplayAttribBlendColor
+ * b: background color of the drawable
+ */
+ VADisplayAttribBlendColor = 13,
+ /*
+ * Indicate driver to skip painting color key or not.
+ * only applicable if the render is overlay
+ */
+ VADisplayAttribOverlayAutoPaintColorKey = 14,
+ /*
+ * customized overlay color key, the format is RGB888
+ * [23:16] = Red, [15:08] = Green, [07:00] = Blue.
+ */
+ VADisplayAttribOverlayColorKey = 15,
+ /*
+ * The hint for the implementation of vaPutSurface
+ * normally, the driver could use an overlay or GPU to render the surface on the screen
+ * this flag provides APP the flexibity to switch the render dynamically
+ */
+ VADisplayAttribRenderMode = 16,
+ /*
+ * specify if vaPutSurface needs to render into specified monitors
+ * one example is that one external monitor (e.g. HDMI) is enabled,
+ * but the window manager is not aware of it, and there is no associated drawable
+ */
+ VADisplayAttribRenderDevice = 17,
+ /*
+ * specify vaPutSurface render area if there is no drawable on the monitor
+ */
+ VADisplayAttribRenderRect = 18,
+} VADisplayAttribType;
+
+/* flags for VADisplayAttribute */
+#define VA_DISPLAY_ATTRIB_NOT_SUPPORTED 0x0000
+#define VA_DISPLAY_ATTRIB_GETTABLE 0x0001
+#define VA_DISPLAY_ATTRIB_SETTABLE 0x0002
+
+typedef struct _VADisplayAttribute
+{
+ VADisplayAttribType type;
+ int32_t min_value;
+ int32_t max_value;
+ int32_t value; /* used by the set/get attribute functions */
+/* flags can be VA_DISPLAY_ATTRIB_GETTABLE or VA_DISPLAY_ATTRIB_SETTABLE or OR'd together */
+ uint32_t flags;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VADisplayAttribute;
+
+/** Get maximum number of display attributs supported by the implementation */
+int vaMaxNumDisplayAttributes (
+ VADisplay dpy
+);
+
+/**
+ * Query display attributes
+ * The caller must provide a "attr_list" array that can hold at
+ * least vaMaxNumDisplayAttributes() entries. The actual number of attributes
+ * returned in "attr_list" is returned in "num_attributes".
+ */
+VAStatus vaQueryDisplayAttributes (
+ VADisplay dpy,
+ VADisplayAttribute *attr_list, /* out */
+ int *num_attributes /* out */
+);
+
+/**
+ * Get display attributes
+ * This function returns the current attribute values in "attr_list".
+ * Only attributes returned with VA_DISPLAY_ATTRIB_GETTABLE set in the "flags" field
+ * from vaQueryDisplayAttributes() can have their values retrieved.
+ */
+VAStatus vaGetDisplayAttributes (
+ VADisplay dpy,
+ VADisplayAttribute *attr_list, /* in/out */
+ int num_attributes
+);
+
+/**
+ * Set display attributes
+ * Only attributes returned with VA_DISPLAY_ATTRIB_SETTABLE set in the "flags" field
+ * from vaQueryDisplayAttributes() can be set. If the attribute is not settable or
+ * the value is out of range, the function returns VA_STATUS_ERROR_ATTR_NOT_SUPPORTED
+ */
+VAStatus vaSetDisplayAttributes (
+ VADisplay dpy,
+ VADisplayAttribute *attr_list,
+ int num_attributes
+);
+
+/****************************
+ * HEVC data structures
+ ****************************/
+/**
+ * \brief Description of picture properties of those in DPB surfaces.
+ *
+ * If only progressive scan is supported, each surface contains one whole
+ * frame picture.
+ * Otherwise, each surface contains two fields of whole picture.
+ * In this case, two entries of ReferenceFrames[] may share same picture_id
+ * value.
+ */
+typedef struct _VAPictureHEVC
+{
+ /** \brief reconstructed picture buffer surface index
+ * invalid when taking value VA_INVALID_SURFACE.
+ */
+ VASurfaceID picture_id;
+ /** \brief picture order count.
+ * in HEVC, POCs for top and bottom fields of same picture should
+ * take different values.
+ */
+ int32_t pic_order_cnt;
+ /* described below */
+ uint32_t flags;
+
+ /** \brief Reserved bytes for future use, must be zero */
+ uint32_t va_reserved[VA_PADDING_LOW];
+} VAPictureHEVC;
+
+/* flags in VAPictureHEVC could be OR of the following */
+#define VA_PICTURE_HEVC_INVALID 0x00000001
+/** \brief indication of interlace scan picture.
+ * should take same value for all the pictures in sequence.
+ */
+#define VA_PICTURE_HEVC_FIELD_PIC 0x00000002
+/** \brief polarity of the field picture.
+ * top field takes even lines of buffer surface.
+ * bottom field takes odd lines of buffer surface.
+ */
+#define VA_PICTURE_HEVC_BOTTOM_FIELD 0x00000004
+/** \brief Long term reference picture */
+#define VA_PICTURE_HEVC_LONG_TERM_REFERENCE 0x00000008
+/**
+ * VA_PICTURE_HEVC_RPS_ST_CURR_BEFORE, VA_PICTURE_HEVC_RPS_ST_CURR_AFTER
+ * and VA_PICTURE_HEVC_RPS_LT_CURR of any picture in ReferenceFrames[] should
+ * be exclusive. No more than one of them can be set for any picture.
+ * Sum of NumPocStCurrBefore, NumPocStCurrAfter and NumPocLtCurr
+ * equals NumPocTotalCurr, which should be equal to or smaller than 8.
+ * Application should provide valid values for both short format and long format.
+ * The pictures in DPB with any of these three flags turned on are referred by
+ * the current picture.
+ */
+/** \brief RefPicSetStCurrBefore of HEVC spec variable
+ * Number of ReferenceFrames[] entries with this bit set equals
+ * NumPocStCurrBefore.
+ */
+#define VA_PICTURE_HEVC_RPS_ST_CURR_BEFORE 0x00000010
+/** \brief RefPicSetStCurrAfter of HEVC spec variable
+ * Number of ReferenceFrames[] entries with this bit set equals
+ * NumPocStCurrAfter.
+ */
+#define VA_PICTURE_HEVC_RPS_ST_CURR_AFTER 0x00000020
+/** \brief RefPicSetLtCurr of HEVC spec variable
+ * Number of ReferenceFrames[] entries with this bit set equals
+ * NumPocLtCurr.
+ */
+#define VA_PICTURE_HEVC_RPS_LT_CURR 0x00000040
+
+//#include <va/va_dec_hevc.h>
+//#include <va/va_dec_jpeg.h>
+#include "va_dec_vp8.h"
+#include "va_dec_vp9.h"
+//#include <va/va_enc_hevc.h>
+//#include <va/va_fei_hevc.h>
+//#include <va/va_enc_h264.h>
+//#include <va/va_enc_jpeg.h>
+//#include <va/va_enc_mpeg2.h>
+//#include <va/va_enc_vp8.h>
+//#include <va/va_enc_vp9.h>
+//#include <va/va_fei.h>
+//#include <va/va_fei_h264.h>
+//#include <va/va_vpp.h>
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _VA_H_ */
diff -up firefox-84.0/media/ffvpx/va/va.patch.ffvpx firefox-84.0/media/ffvpx/va/va.patch
--- firefox-84.0/media/ffvpx/va/va.patch.ffvpx 2020-12-10 20:40:53.391541423 +0100
+++ firefox-84.0/media/ffvpx/va/va.patch 2020-12-10 20:40:53.391541423 +0100
@@ -0,0 +1,33 @@
+--- va.h.old 2020-10-22 10:41:57.805112031 +0200
++++ va.h 2020-10-22 10:37:22.597088670 +0200
+@@ -4613,18 +4613,18 @@
+ #define VA_PICTURE_HEVC_RPS_LT_CURR 0x00000040
+
+-#include <va/va_dec_hevc.h>
+-#include <va/va_dec_jpeg.h>
++//#include <va/va_dec_hevc.h>
++//#include <va/va_dec_jpeg.h>
+ #include "va_dec_vp8.h"
+ #include "va_dec_vp9.h"
+-#include <va/va_enc_hevc.h>
+-#include <va/va_fei_hevc.h>
+-#include <va/va_enc_h264.h>
+-#include <va/va_enc_jpeg.h>
+-#include <va/va_enc_mpeg2.h>
+-#include <va/va_enc_vp8.h>
+-#include <va/va_enc_vp9.h>
+-#include <va/va_fei.h>
+-#include <va/va_fei_h264.h>
+-#include <va/va_vpp.h>
++//#include <va/va_enc_hevc.h>
++//#include <va/va_fei_hevc.h>
++//#include <va/va_enc_h264.h>
++//#include <va/va_enc_jpeg.h>
++//#include <va/va_enc_mpeg2.h>
++//#include <va/va_enc_vp8.h>
++//#include <va/va_enc_vp9.h>
++//#include <va/va_fei.h>
++//#include <va/va_fei_h264.h>
++//#include <va/va_vpp.h>
+
+ /**@}*/
diff -up firefox-84.0/media/ffvpx/va/va_version.h.ffvpx firefox-84.0/media/ffvpx/va/va_version.h
--- firefox-84.0/media/ffvpx/va/va_version.h.ffvpx 2020-12-10 20:40:53.391541423 +0100
+++ firefox-84.0/media/ffvpx/va/va_version.h 2020-12-10 20:40:53.391541423 +0100
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2009 Splitted-Desktop Systems. All Rights Reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sub license, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
+ * OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT.
+ * IN NO EVENT SHALL PRECISION INSIGHT AND/OR ITS SUPPLIERS BE LIABLE FOR
+ * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef VA_VERSION_H
+#define VA_VERSION_H
+
+/**
+ * VA_MAJOR_VERSION:
+ *
+ * The major version of VA-API (1, if %VA_VERSION is 1.2.3)
+ */
+#define VA_MAJOR_VERSION 1
+
+/**
+ * VA_MINOR_VERSION:
+ *
+ * The minor version of VA-API (2, if %VA_VERSION is 1.2.3)
+ */
+#define VA_MINOR_VERSION 7
+
+/**
+ * VA_MICRO_VERSION:
+ *
+ * The micro version of VA-API (3, if %VA_VERSION is 1.2.3)
+ */
+#define VA_MICRO_VERSION 0
+
+/**
+ * VA_VERSION:
+ *
+ * The full version of VA-API, like 1.2.3
+ */
+#define VA_VERSION 1.7.0
+
+/**
+ * VA_VERSION_S:
+ *
+ * The full version of VA-API, in string form (suited for string
+ * concatenation)
+ */
+#define VA_VERSION_S "1.7.0"
+
+/**
+ * VA_VERSION_HEX:
+ *
+ * Numerically encoded version of VA-API, like 0x010203
+ */
+#define VA_VERSION_HEX ((VA_MAJOR_VERSION << 24) | \
+ (VA_MINOR_VERSION << 16) | \
+ (VA_MICRO_VERSION << 8))
+
+/**
+ * VA_CHECK_VERSION:
+ * @major: major version, like 1 in 1.2.3
+ * @minor: minor version, like 2 in 1.2.3
+ * @micro: micro version, like 3 in 1.2.3
+ *
+ * Evaluates to %TRUE if the version of VA-API is greater than
+ * @major, @minor and @micro
+ */
+#define VA_CHECK_VERSION(major,minor,micro) \
+ (VA_MAJOR_VERSION > (major) || \
+ (VA_MAJOR_VERSION == (major) && VA_MINOR_VERSION > (minor)) || \
+ (VA_MAJOR_VERSION == (major) && VA_MINOR_VERSION == (minor) && VA_MICRO_VERSION >= (micro)))
+
+#endif /* VA_VERSION_H */
diff -up firefox-84.0/widget/gtk/DMABufSurface.h.ffvpx firefox-84.0/widget/gtk/DMABufSurface.h
--- firefox-84.0/widget/gtk/DMABufSurface.h.ffvpx 2020-12-08 00:35:05.000000000 +0100
+++ firefox-84.0/widget/gtk/DMABufSurface.h 2020-12-10 20:40:53.391541423 +0100
@@ -8,10 +8,9 @@
#define DMABufSurface_h__
#include <stdint.h>
-#include "GLContext.h"
-#include "GLContextTypes.h"
#include "mozilla/widget/nsWaylandDisplay.h"
#include "mozilla/widget/va_drmcommon.h"
+#include "GLTypes.h"
typedef void* EGLImageKHR;
typedef void* EGLSyncKHR;
@@ -23,6 +22,9 @@ namespace layers {
class SurfaceDescriptor;
class SurfaceDescriptorDMABuf;
} // namespace layers
+namespace gl {
+class GLContext;
+}
} // namespace mozilla
typedef enum {
diff -up firefox-84.0/widget/gtk/nsWaylandDisplay.cpp.ffvpx firefox-84.0/widget/gtk/nsWaylandDisplay.cpp
--- firefox-84.0/widget/gtk/nsWaylandDisplay.cpp.ffvpx 2020-12-08 00:35:05.000000000 +0100
+++ firefox-84.0/widget/gtk/nsWaylandDisplay.cpp 2020-12-10 20:40:53.391541423 +0100
@@ -6,6 +6,10 @@
* file, You can obtain one at http://mozilla.org/MPL/2.0/. */
#include "nsWaylandDisplay.h"
+
+#include "base/message_loop.h" // for MessageLoop
+#include "base/task.h" // for NewRunnableMethod, etc
+#include "mozilla/StaticMutex.h"
#include "mozilla/StaticPrefs_widget.h"
namespace mozilla {
diff -up firefox-84.0/widget/gtk/nsWaylandDisplay.h.ffvpx firefox-84.0/widget/gtk/nsWaylandDisplay.h
--- firefox-84.0/widget/gtk/nsWaylandDisplay.h.ffvpx 2020-12-08 00:35:05.000000000 +0100
+++ firefox-84.0/widget/gtk/nsWaylandDisplay.h 2020-12-10 20:40:53.391541423 +0100
@@ -10,10 +10,6 @@
#include "DMABufLibWrapper.h"
-#include "base/message_loop.h" // for MessageLoop
-#include "base/task.h" // for NewRunnableMethod, etc
-#include "mozilla/StaticMutex.h"
-
#include "mozilla/widget/mozwayland.h"
#include "mozilla/widget/gbm.h"
#include "mozilla/widget/gtk-primary-selection-client-protocol.h"