Skip to content
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
110 changes: 80 additions & 30 deletions host/virtio_gpu_frontend.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -23,19 +23,25 @@
#include <google/protobuf/text_format.h>
#endif // ifdef GFXSTREAM_BUILD_WITH_SNAPSHOT_FRONTEND_SUPPORT

#include <cerrno>
#include <cstdint>
#include <cstring>

#include <vulkan/vulkan.h>

#include "frame_buffer.h"
#include "framework_formats.h"
#include "vulkan/vk_common_operations.h"
#include "gfxstream/host/address_space_operations.h"
// TODO: remove after moving save/load interface to ops.
#include "gfxstream/common/logging.h"
#include "gfxstream/host/address_space_graphics.h"
#include "gfxstream/host/file_stream.h"
#include "gfxstream/host/tracing.h"
#include "gfxstream/memory/SharedMemory.h"
#include "gfxstream/threads/WorkerThread.h"
#include "virtgpu_gfxstream_protocol.h"
#include "virtio_gpu_timelines.h"

namespace gfxstream {
namespace host {
Expand Down Expand Up @@ -199,12 +205,23 @@ int VirtioGpuFrontend::destroyContext(VirtioGpuCtxId contextId) {
return 0;
}

#define DECODE(variable, type, input) \
type variable = {}; \
memcpy(&variable, input, sizeof(type));
template <typename T>
bool SafeDecode(T& out, const uint8_t* buffer, uint32_t buffer_size) {
if (buffer_size < sizeof(T)) {
GFXSTREAM_ERROR("failed to decode: need %u bytes but have %u bytes)",
sizeof(T), buffer_size);
return false;
}
std::memcpy(&out, buffer, sizeof(T));
return true;
}

int VirtioGpuFrontend::addressSpaceProcessCmd(VirtioGpuCtxId ctxId, uint32_t* dwords) {
DECODE(header, gfxstream::gfxstreamHeader, dwords)
int VirtioGpuFrontend::processAddressSpaceCommand(VirtioGpuCtxId ctxId, const uint8_t* cmd_buffer, uint32_t cmd_buffer_size) {
gfxstream::gfxstreamHeader header = {};
if (!SafeDecode(header, cmd_buffer, cmd_buffer_size)) {
GFXSTREAM_ERROR("failed to decode header");
return -EINVAL;
}

auto contextIt = mContexts.find(ctxId);
if (contextIt == mContexts.end()) {
Expand All @@ -215,7 +232,11 @@ int VirtioGpuFrontend::addressSpaceProcessCmd(VirtioGpuCtxId ctxId, uint32_t* dw

switch (header.opCode) {
case GFXSTREAM_CONTEXT_CREATE: {
DECODE(contextCreate, gfxstream::gfxstreamContextCreate, dwords)
gfxstream::gfxstreamContextCreate contextCreate = {};
if (!SafeDecode(contextCreate, cmd_buffer, cmd_buffer_size)) {
GFXSTREAM_ERROR("failed to decode contextCreate");
return -EINVAL;
}

auto resourceIt = mResources.find(contextCreate.resourceId);
if (resourceIt == mResources.end()) {
Expand All @@ -228,7 +249,11 @@ int VirtioGpuFrontend::addressSpaceProcessCmd(VirtioGpuCtxId ctxId, uint32_t* dw
resource);
}
case GFXSTREAM_CONTEXT_PING: {
DECODE(contextPing, gfxstream::gfxstreamContextPing, dwords)
gfxstream::gfxstreamContextPing contextPing = {};
if (!SafeDecode(contextPing, cmd_buffer, cmd_buffer_size)) {
GFXSTREAM_ERROR("failed to decode contextPing");
return -EINVAL;
}

return context.PingAddressSpaceGraphicsInstance(get_gfxstream_address_space_ops(),
contextPing.resourceId);
Expand All @@ -240,34 +265,40 @@ int VirtioGpuFrontend::addressSpaceProcessCmd(VirtioGpuCtxId ctxId, uint32_t* dw
return 0;
}

int VirtioGpuFrontend::submitCmd(struct stream_renderer_command* cmd) {
if (!cmd) return -EINVAL;

void* buffer = reinterpret_cast<void*>(cmd->cmd);
int VirtioGpuFrontend::processCommand(const struct stream_renderer_command* cmd) {
if (!cmd) {
GFXSTREAM_ERROR("missing command");
return -EINVAL;
}

VirtioGpuRing ring = VirtioGpuRingGlobal{};
GFXSTREAM_DEBUG("ctx: % u, ring: %s buffer: %p dwords: %d", cmd->ctx_id,
to_string(ring).c_str(), buffer, cmd->cmd_size);
const uint8_t* cmd_buffer = cmd->cmd;
if (!cmd_buffer) {
GFXSTREAM_ERROR("missing command buffer");
return -EINVAL;
}

if (!buffer) {
GFXSTREAM_ERROR("error: buffer null");
const uint32_t cmd_buffer_size = cmd->cmd_size;
if (cmd_buffer_size < 4) {
GFXSTREAM_ERROR("not enough bytes (got %d)", cmd_buffer_size);
return -EINVAL;
}

if (cmd->cmd_size < 4) {
GFXSTREAM_ERROR("error: not enough bytes (got %d)", cmd->cmd_size);
VirtioGpuRing ring = VirtioGpuRingGlobal{};

gfxstream::gfxstreamHeader header = {};
if (!SafeDecode(header, cmd_buffer, cmd_buffer_size)) {
GFXSTREAM_ERROR("failed to decode command header");
return -EINVAL;
}

DECODE(header, gfxstream::gfxstreamHeader, buffer);
switch (header.opCode) {
case GFXSTREAM_CONTEXT_CREATE:
case GFXSTREAM_CONTEXT_PING:
case GFXSTREAM_CONTEXT_PING_WITH_RESPONSE: {
GFXSTREAM_TRACE_EVENT(GFXSTREAM_TRACE_STREAM_RENDERER_CATEGORY,
"GFXSTREAM_CONTEXT_[CREATE|PING]");

if (addressSpaceProcessCmd(cmd->ctx_id, (uint32_t*)buffer)) {
if (processAddressSpaceCommand(cmd->ctx_id, cmd_buffer, cmd->cmd_size)) {
return -EINVAL;
}
break;
Expand All @@ -282,7 +313,11 @@ int VirtioGpuFrontend::submitCmd(struct stream_renderer_command* cmd) {
.mRingIdx = 0,
};

DECODE(exportSync, gfxstream::gfxstreamCreateExportSync, buffer)
gfxstream::gfxstreamCreateExportSync exportSync = {};
if (!SafeDecode(exportSync, cmd_buffer, cmd_buffer_size)) {
GFXSTREAM_ERROR("failed to decode exportSync");
return -EINVAL;
}

uint64_t sync_handle = convert32to64(exportSync.syncHandleLo, exportSync.syncHandleHi);

Expand All @@ -299,6 +334,12 @@ int VirtioGpuFrontend::submitCmd(struct stream_renderer_command* cmd) {
GFXSTREAM_TRACE_EVENT(GFXSTREAM_TRACE_STREAM_RENDERER_CATEGORY,
"GFXSTREAM_CREATE_[IMPORT|EXPORT]_SYNC_VK");

gfxstream::gfxstreamCreateExportSyncVK exportSyncVK = {};
if (!SafeDecode(exportSyncVK, cmd_buffer, cmd_buffer_size)) {
GFXSTREAM_ERROR("failed to decode exportSyncVK");
return -EINVAL;
}

// The guest sync export assumes fence context support and always uses
// VIRTGPU_EXECBUF_RING_IDX. With this, the task created here must use
// the same ring as the fence created for the virtio gpu command or the
Expand All @@ -308,8 +349,6 @@ int VirtioGpuFrontend::submitCmd(struct stream_renderer_command* cmd) {
.mRingIdx = 0,
};

DECODE(exportSyncVK, gfxstream::gfxstreamCreateExportSyncVK, buffer)

uint64_t device_handle =
convert32to64(exportSyncVK.deviceHandleLo, exportSyncVK.deviceHandleHi);

Expand All @@ -327,6 +366,12 @@ int VirtioGpuFrontend::submitCmd(struct stream_renderer_command* cmd) {
GFXSTREAM_TRACE_EVENT(GFXSTREAM_TRACE_STREAM_RENDERER_CATEGORY,
"GFXSTREAM_CREATE_QSRI_EXPORT_VK");

gfxstream::gfxstreamCreateQSRIExportVK exportQSRI = {};
if (!SafeDecode(exportQSRI, cmd_buffer, cmd_buffer_size)) {
GFXSTREAM_ERROR("failed to decode exportQSRI");
return -EINVAL;
}

// The guest QSRI export assumes fence context support and always uses
// VIRTGPU_EXECBUF_RING_IDX. With this, the task created here must use
// the same ring as the fence created for the virtio gpu command or the
Expand All @@ -336,12 +381,10 @@ int VirtioGpuFrontend::submitCmd(struct stream_renderer_command* cmd) {
.mRingIdx = 0,
};

DECODE(exportQSRI, gfxstream::gfxstreamCreateQSRIExportVK, buffer)

uint64_t image_handle =
convert32to64(exportQSRI.imageHandleLo, exportQSRI.imageHandleHi);

GFXSTREAM_DEBUG("wait for gpu vk qsri ring %u image 0x%llx", to_string(ring).c_str(),
GFXSTREAM_DEBUG("wait for gpu vk qsri ring %s image 0x%llx", to_string(ring).c_str(),
(unsigned long long)image_handle);
auto taskId = mVirtioGpuTimelines->enqueueTask(ring);
FrameBuffer::getFB()->asyncWaitForGpuVulkanQsriWithCb(
Expand All @@ -353,9 +396,12 @@ int VirtioGpuFrontend::submitCmd(struct stream_renderer_command* cmd) {
GFXSTREAM_TRACE_EVENT(GFXSTREAM_TRACE_STREAM_RENDERER_CATEGORY,
"GFXSTREAM_RESOURCE_CREATE_3D");

DECODE(create3d, gfxstream::gfxstreamResourceCreate3d, buffer)
struct stream_renderer_resource_create_args rc3d = {0};

gfxstream::gfxstreamResourceCreate3d create3d = {};
if (!SafeDecode(create3d, cmd_buffer, cmd_buffer_size)) {
GFXSTREAM_ERROR("failed to decode create3d");
return -EINVAL;
}
struct stream_renderer_resource_create_args rc3d = {};
rc3d.target = create3d.target;
rc3d.format = create3d.format;
rc3d.bind = create3d.bind;
Expand All @@ -380,7 +426,11 @@ int VirtioGpuFrontend::submitCmd(struct stream_renderer_command* cmd) {
GFXSTREAM_TRACE_EVENT(GFXSTREAM_TRACE_STREAM_RENDERER_CATEGORY,
"GFXSTREAM_ACQUIRE_SYNC");

DECODE(acquireSync, gfxstream::gfxstreamAcquireSync, buffer);
gfxstream::gfxstreamAcquireSync acquireSync = {};
if (!SafeDecode(acquireSync, cmd_buffer, cmd_buffer_size)) {
GFXSTREAM_ERROR("failed to decode acquireSync");
return -EINVAL;
}

auto contextIt = mContexts.find(cmd->ctx_id);
if (contextIt == mContexts.end()) {
Expand Down
6 changes: 3 additions & 3 deletions host/virtio_gpu_frontend.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,9 +55,9 @@ class VirtioGpuFrontend {

int destroyContext(VirtioGpuContextId handle);

int addressSpaceProcessCmd(VirtioGpuContextId ctxId, uint32_t* dwords);

int submitCmd(struct stream_renderer_command* cmd);
int processCommand(const struct stream_renderer_command* cmd);
int processAddressSpaceCommand(VirtioGpuContextId ctxId, const uint8_t* cmd_buffer,
uint32_t cmd_buffer_size);

int createFence(uint64_t fence_id, const VirtioGpuRing& ring);

Expand Down
2 changes: 1 addition & 1 deletion host/virtio_gpu_gfxstream_renderer.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -358,7 +358,7 @@ VG_EXPORT void stream_renderer_context_destroy(uint32_t handle) {
VG_EXPORT int stream_renderer_submit_cmd(struct stream_renderer_command* cmd) {
GFXSTREAM_TRACE_EVENT(GFXSTREAM_TRACE_STREAM_RENDERER_CATEGORY, "stream_renderer_submit_cmd()");

return sFrontend()->submitCmd(cmd);
return sFrontend()->processCommand(cmd);
}

VG_EXPORT int stream_renderer_transfer_read_iov(uint32_t handle, uint32_t ctx_id, uint32_t level,
Expand Down
Loading