Skip to content

Commit

Permalink
wip
Browse files Browse the repository at this point in the history
  • Loading branch information
silenium-dev committed Oct 6, 2024
1 parent a048a8d commit 4f0a988
Show file tree
Hide file tree
Showing 11 changed files with 115 additions and 83 deletions.
4 changes: 2 additions & 2 deletions gradle/libs.versions.toml
Original file line number Diff line number Diff line change
Expand Up @@ -7,9 +7,9 @@ kotlinx-datetime = "0.6.1"

flow-graph = "e9463af-dev"
compose = "1.6.11"
compose-gl = "0.3.3"
compose-gl = "0.4.0"
jni-utils = "0.1.5"
skiko = "0.8.10-egl"
skiko = "0.8.12-egl"

slf4j = "2.0.16"
logback = "1.5.8"
Expand Down
2 changes: 0 additions & 2 deletions native/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -81,8 +81,6 @@ if (CMAKE_SYSTEM_NAME STREQUAL "Linux")
src/cpp/platform/linux/VaapiYuvToRgbConversion.cpp
src/cpp/platform/linux/VAEGLInteropImage.cpp
src/cpp/platform/linux/VAEGLInteropImage.hpp
src/cpp/platform/linux/VAGLXInteropImage.cpp
src/cpp/platform/linux/VAGLXInteropImage.hpp
src/cpp/platform/linux/VAEGLRenderInterop.cpp
src/cpp/platform/linux/VaapiDecoder.cpp
)
Expand Down
2 changes: 1 addition & 1 deletion native/src/cpp/platform/linux/VAEGLRenderInterop.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -140,7 +140,7 @@ Java_dev_silenium_multimedia_core_platform_linux_VAEGLRenderInteropKt_mapN(JNIEn
glEGLImageTargetTexture2DOES(GL_TEXTURE_2D, eglImage);
error = glGetError();
if (error != GL_NO_ERROR) {
std::cerr << "Failed to bind egl image to texture: " << error << std::endl;
// std::cerr << "Failed to bind egl image to texture: " << error << std::endl;
eglDestroyImageKHR(eglDisplay, eglImage);
closeDrm(drm);
return glResultFailure(env, "glEGLImageTargetTexture2DOES", error);
Expand Down
26 changes: 0 additions & 26 deletions native/src/cpp/platform/linux/VAGLXInteropImage.cpp

This file was deleted.

38 changes: 0 additions & 38 deletions native/src/cpp/platform/linux/VAGLXInteropImage.hpp

This file was deleted.

23 changes: 21 additions & 2 deletions native/src/cpp/platform/linux/VaapiYuvToRgbConversion.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,8 @@
#include "data/FramePadMetadata.hpp"
#include "helper/errors.hpp"
#include "helper/rationals.hpp"
#include "helper/va.hpp"

#include <cinttypes>
#include <iostream>
#include <unistd.h>
Expand All @@ -24,12 +26,12 @@ struct VaapiYuvToRgbConversionContext {
AVFilterContext *bufferSink{nullptr};
};

JNIEXPORT jobject JNICALL Java_dev_silenium_multimedia_core_platform_linux_VaapiYuvToRgbConversionKt_createN(JNIEnv *env, jclass clazz, jobject _inputMetadata, const jlong _deviceRef, const jlong _inputFramesContext, const jlong _outputFramesContext, jobject _timeBase) {
JNIEXPORT jobject JNICALL Java_dev_silenium_multimedia_core_platform_linux_VaapiYuvToRgbConversionKt_createN(JNIEnv *env, jclass clazz, jobject _inputMetadata, const jlong _deviceRef, const jlong _inputFramesContext, const jlong _outputFramesContext) {
const auto deviceRef = reinterpret_cast<AVBufferRef *>(_deviceRef);
const auto inputFramesRef = reinterpret_cast<AVBufferRef *>(_inputFramesContext);
const auto outputFramesRef = reinterpret_cast<AVBufferRef *>(_outputFramesContext);
const FramePadMetadata inputMetadata{env, _inputMetadata};
const auto timeBase = fromJava(env, _timeBase);
const auto timeBase = inputMetadata.timeBase();

char filterString[2048];
snprintf(filterString, sizeof(filterString),
Expand Down Expand Up @@ -155,6 +157,23 @@ JNIEXPORT jobject JNICALL Java_dev_silenium_multimedia_core_platform_linux_Vaapi
hwFrame->color_range = frame->color_range;
hwFrame->sample_aspect_ratio = frame->sample_aspect_ratio;

av_frame_free(&frame);
frame = hwFrame;
} else {
if (frame->format != AV_PIX_FMT_VAAPI) {
return avResultFailure(env, "input frame format is not VAAPI", AVERROR(EINVAL));
}
const auto hwFrame = av_frame_clone(frame);
if (hwFrame == nullptr) {
return avResultFailure(env, "allocating hw frame", AVERROR(ENOMEM));
}

// auto ret = mapFrameToDifferentContext(hwFrame, inputFrame, ctx->inputFramesRef);
// if (ret.code != 0) {
// av_frame_free(&hwFrame);
// return avResultFailure(env, ret.message, ret.code);
// }

av_frame_free(&frame);
frame = hwFrame;
}
Expand Down
70 changes: 70 additions & 0 deletions native/src/cpp/platform/linux/helper/va.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -4,10 +4,80 @@

#include "va.hpp"

#include <iostream>
#include <unistd.h>

extern "C" {
#include <libavutil/hwcontext.h>
#include <libavutil/hwcontext_vaapi.h>
#include <libavutil/pixdesc.h>
}

void closeDrm(const VADRMPRIMESurfaceDescriptor &drm) {
for (int i = 0; i < drm.num_objects; ++i) {
close(drm.objects[i].fd);
}
}

/**
*
* @param dst target frame, freshly allocated or will be unreffed before usage
* @param src source frame
* @param targetContext target context
* @return result
*/
Result mapFrameToDifferentContext(AVFrame *dst, const AVFrame *src, AVBufferRef *targetContext) {
const auto sourceFrames = reinterpret_cast<AVHWFramesContext *>(src->hw_frames_ctx->data);
const auto sourceDevice = sourceFrames->device_ctx;
const auto sourceVaDevice = static_cast<AVVAAPIDeviceContext *>(sourceDevice->hwctx);
const auto targetFrames = reinterpret_cast<AVHWFramesContext *>(targetContext->data);
const auto targetDevice = targetFrames->device_ctx;
const auto targetVaDevice = static_cast<AVVAAPIDeviceContext *>(targetDevice->hwctx);
if (sourceVaDevice->display != targetVaDevice->display) {
return {AVERROR(ENOTSUP), "source and target devices are not the same"};
}

av_frame_unref(dst);
auto ret = av_hwframe_get_buffer(targetContext, dst, 0);
if (ret < 0) {
return {ret, "getting hw frame buffer"};
}
ret = av_hwframe_map(dst, src, AV_HWFRAME_MAP_READ);
if (ret < 0) {
return {ret, "mapping frame"};
}
// const VASurfaceID srcSurface = reinterpret_cast<intptr_t>(src->data[3]);
// const VASurfaceID dstSurface = reinterpret_cast<intptr_t>(dst->data[3]);
//
// std::cout << "Copying surface " << srcSurface << " to " << dstSurface << std::endl;
// std::cout << "Source display: " << sourceVaDevice->display
// << ", target display: " << targetVaDevice->display
// << std::endl;
// std::cout << "Source format: " << av_get_pix_fmt_name(sourceFrames->sw_format)
// << ", target format: " << av_get_pix_fmt_name(targetFrames->sw_format)
// << std::endl;
//
// VACopyObject srcObject{
// .obj_type = VACopyObjectSurface,
// .object = {
// .surface_id = srcSurface,
// },
// };
// VACopyObject dstObject{
// .obj_type = VACopyObjectSurface,
// .object = {
// .surface_id = dstSurface,
// },
// };
// ret = vaCopy(targetVaDevice->display, &srcObject, &dstObject, VACopyOption{});
// if (ret != VA_STATUS_SUCCESS) {
// std::cout << "vaCopy failed: " << vaErrorStr(ret) << std::endl;
// return {AVERROR(EIO), vaErrorStr(ret)};
// }
// ret = vaSyncSurface(targetVaDevice->display, dstSurface);
// if (ret != VA_STATUS_SUCCESS) {
// std::cout << "vaSyncSurface failed: " << vaErrorStr(ret) << std::endl;
// return {AVERROR(EIO), vaErrorStr(ret)};
// }
return {0, nullptr};
}
9 changes: 9 additions & 0 deletions native/src/cpp/platform/linux/helper/va.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,17 @@
#ifndef VA_HPP
#define VA_HPP

extern "C" {
#include <libavutil/frame.h>
}
#include <va/va_drmcommon.h>

struct Result {
int code;
const char *message;
};

void closeDrm(const VADRMPRIMESurfaceDescriptor &drm);
Result mapFrameToDifferentContext(AVFrame *dst, const AVFrame *src, AVBufferRef *targetContext);

#endif //VA_HPP
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ class VideoPlayer(path: Path) : Sink<Frame, FramePadMetadata> {
// private var lastFrame: Frame? = null

override suspend fun receive(item: FlowItem<Frame, FramePadMetadata>): Result<Unit> {
println("PTS: ${item.value.pts}, VASurface: 0x${item.value.data[3].toString(16)}")
// println("PTS: ${item.value.pts}, VASurface: 0x${item.value.data[3].toString(16)}")
// println("PTS: ${item.value.pts}")
// lastFrame?.let {
// if (it.pts >= item.value.pts) {
Expand Down Expand Up @@ -222,11 +222,13 @@ class VideoPlayer(path: Path) : Sink<Frame, FramePadMetadata> {
AV_PIX_FMT_P010LE,
AV_PIX_FMT_P010BE,
AV_PIX_FMT_YUV420P10LE,
AV_PIX_FMT_YUV420P10BE -> true
AV_PIX_FMT_YUV420P10BE,
-> true

else -> false
}
renderImage(it, hdr)
redrawAfter(it.frame.duration)
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,6 @@ class VaapiYuvToRgbConversion :
deviceContext.address,
inputFramesContext!!.address,
outputFramesContext.address,
metadata.timeBase,
).getOrThrow().asNativePointer(::destroyN)

return super.configure(pad, metadata)
Expand Down Expand Up @@ -128,7 +127,6 @@ private external fun createN(
deviceContext: Long,
inputFramesContext: Long,
outputFramesContext: Long,
timeBase: Rational
): Result<Long>

private external fun destroyN(context: Long)
Expand Down
16 changes: 8 additions & 8 deletions src/test/kotlin/dev/silenium/multimedia/core/vaapi/VATest.kt
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,6 @@ import dev.silenium.multimedia.core.platform.linux.VaapiDeviceContext
import dev.silenium.multimedia.core.platform.linux.VaapiYuvToRgbConversion
import dev.silenium.multimedia.core.util.Mode
import dev.silenium.multimedia.core.util.savePNG
import io.kotest.core.annotation.RequiresTag
import io.kotest.core.spec.style.FunSpec
import kotlinx.coroutines.CompletableDeferred
import kotlinx.coroutines.Dispatchers
Expand All @@ -21,11 +20,11 @@ import kotlinx.coroutines.withContext
import java.nio.file.Files
import kotlin.io.path.outputStream

@RequiresTag("vaapi")
//@RequiresTag("vaapi")
class VATest : FunSpec({
val file = run {
val videoFile = Files.createTempFile("video", ".webm")
FileDemuxer::class.java.classLoader.getResourceAsStream("1080p.webm").use {
FileDemuxer::class.java.classLoader.getResourceAsStream("1080p.cut.webm").use {
videoFile.outputStream().use(it::copyTo)
}
videoFile.apply { toFile().deleteOnExit() }
Expand All @@ -45,11 +44,9 @@ class VATest : FunSpec({
connect(demuxerSource to decoderTransformer) { _, _, pad, metadata ->
if (metadata.type == AVMediaType.AVMEDIA_TYPE_VIDEO) pad else null
}
filter = VaapiYuvToRgbConversion()
val filterTransformer = transformer(filter, "filter")
connect(decoderTransformer to filterTransformer)
filter = VaapiYuvToRgbConversion(decoder.outputMetadata.values.first(), decoder.framesContext)
val sink = sink(bufferSink, "sink")
connect(filterTransformer to sink)
connect(decoderTransformer to sink)
}
val started = CompletableDeferred<Unit>()
val frameDeferred = async {
Expand All @@ -61,8 +58,11 @@ class VATest : FunSpec({
val frame = frameDeferred.await()
graph.close()

filter.submit(frame).getOrThrow()
val converted = filter.receive().getOrThrow()

withContext(Dispatchers.IO) {
frame.savePNG(0, Files.createTempFile("frame", ".png"), Mode.RGB0)
converted.savePNG(0, Files.createTempFile("frame", ".png"), Mode.RGB0)
}

context.close()
Expand Down

0 comments on commit 4f0a988

Please sign in to comment.