/**********************************************************
* Copyright 2008-2009 VMware, Inc. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person
* obtaining a copy of this software and associated documentation
* files (the "Software"), to deal in the Software without
* restriction, including without limitation the rights to use, copy,
* modify, merge, publish, distribute, sublicense, and/or sell copies
* of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be
* included in all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
* EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
* NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
* BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
* ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
* CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
* SOFTWARE.
*
**********************************************************/
#include "os/os_thread.h"
#include "pipe/p_state.h"
#include "pipe/p_defines.h"
#include "util/u_inlines.h"
#include "util/u_math.h"
#include "util/u_memory.h"
#include "svga_cmd.h"
#include "svga_context.h"
#include "svga_debug.h"
#include "svga_resource_buffer.h"
#include "svga_resource_buffer_upload.h"
#include "svga_screen.h"
#include "svga_winsys.h"
/**
* Describes a complete SVGA_3D_CMD_UPDATE_GB_IMAGE command
*
*/
struct svga_3d_update_gb_image {
SVGA3dCmdHeader header;
SVGA3dCmdUpdateGBImage body;
};
struct svga_3d_invalidate_gb_image {
SVGA3dCmdHeader header;
SVGA3dCmdInvalidateGBImage body;
};
/**
* Allocate a winsys_buffer (ie. DMA, aka GMR memory).
*
* It will flush and retry in case the first attempt to create a DMA buffer
* fails, so it should not be called from any function involved in flushing
* to avoid recursion.
*/
struct svga_winsys_buffer *
svga_winsys_buffer_create( struct svga_context *svga,
unsigned alignment,
unsigned usage,
unsigned size )
{
struct svga_screen *svgascreen = svga_screen(svga->pipe.screen);
struct svga_winsys_screen *sws = svgascreen->sws;
struct svga_winsys_buffer *buf;
/* Just try */
buf = sws->buffer_create(sws, alignment, usage, size);
if (!buf) {
SVGA_DBG(DEBUG_DMA|DEBUG_PERF, "flushing context to find %d bytes GMR\n",
size);
/* Try flushing all pending DMAs */
svga_context_flush(svga, NULL);
buf = sws->buffer_create(sws, alignment, usage, size);
}
return buf;
}
/**
* Destroy HW storage if separate from the host surface.
* In the GB case, the HW storage is associated with the host surface
* and is therefore a No-op.
*/
void
svga_buffer_destroy_hw_storage(struct svga_screen *ss, struct svga_buffer *sbuf)
{
struct svga_winsys_screen *sws = ss->sws;
assert(sbuf->map.count == 0);
assert(sbuf->hwbuf);
if (sbuf->hwbuf) {
sws->buffer_destroy(sws, sbuf->hwbuf);
sbuf->hwbuf = NULL;
}
}
/**
* Allocate DMA'ble or Updatable storage for the buffer.
*
* Called before mapping a buffer.
*/
enum pipe_error
svga_buffer_create_hw_storage(struct svga_screen *ss,
struct svga_buffer *sbuf,
unsigned bind_flags)
{
assert(!sbuf->user);
if (ss->sws->have_gb_objects) {
assert(sbuf->handle || !sbuf->dma.pending);
return svga_buffer_create_host_surface(ss, sbuf, bind_flags);
}
if (!sbuf->hwbuf) {
struct svga_winsys_screen *sws = ss->sws;
unsigned alignment = 16;
unsigned usage = 0;
unsigned size = sbuf->b.b.width0;
sbuf->hwbuf = sws->buffer_create(sws, alignment, usage, size);
if (!sbuf->hwbuf)
return PIPE_ERROR_OUT_OF_MEMORY;
assert(!sbuf->dma.pending);
}
return PIPE_OK;
}
/**
* Allocate graphics memory for vertex/index/constant/etc buffer (not
* textures).
*/
enum pipe_error
svga_buffer_create_host_surface(struct svga_screen *ss,
struct svga_buffer *sbuf,
unsigned bind_flags)
{
enum pipe_error ret = PIPE_OK;
assert(!sbuf->user);
if (!sbuf->handle) {
boolean validated;
sbuf->key.flags = 0;
sbuf->key.format = SVGA3D_BUFFER;
if (bind_flags & PIPE_BIND_VERTEX_BUFFER) {
sbuf->key.flags |= SVGA3D_SURFACE_HINT_VERTEXBUFFER;
sbuf->key.flags |= SVGA3D_SURFACE_BIND_VERTEX_BUFFER;
}
if (bind_flags & PIPE_BIND_INDEX_BUFFER) {
sbuf->key.flags |= SVGA3D_SURFACE_HINT_INDEXBUFFER;
sbuf->key.flags |= SVGA3D_SURFACE_BIND_INDEX_BUFFER;
}
if (bind_flags & PIPE_BIND_CONSTANT_BUFFER)
sbuf->key.flags |= SVGA3D_SURFACE_BIND_CONSTANT_BUFFER;
if (bind_flags & PIPE_BIND_STREAM_OUTPUT)
sbuf->key.flags |= SVGA3D_SURFACE_BIND_STREAM_OUTPUT;
if (bind_flags & PIPE_BIND_SAMPLER_VIEW)
sbuf->key.flags |= SVGA3D_SURFACE_BIND_SHADER_RESOURCE;
if (!bind_flags && sbuf->b.b.usage == PIPE_USAGE_STAGING) {
/* This surface is to be used with the
* SVGA3D_CMD_DX_TRANSFER_FROM_BUFFER command, and no other
* bind flags are allowed to be set for this surface.
*/
sbuf->key.flags = SVGA3D_SURFACE_TRANSFER_FROM_BUFFER;
}
sbuf->key.size.width = sbuf->b.b.width0;
sbuf->key.size.height = 1;
sbuf->key.size.depth = 1;
sbuf->key.numFaces = 1;
sbuf->key.numMipLevels = 1;
sbuf->key.cachable = 1;
sbuf->key.arraySize = 1;
SVGA_DBG(DEBUG_DMA, "surface_create for buffer sz %d\n",
sbuf->b.b.width0);
sbuf->handle = svga_screen_surface_create(ss, bind_flags,
sbuf->b.b.usage,
&validated, &sbuf->key);
if (!sbuf->handle)
return PIPE_ERROR_OUT_OF_MEMORY;
/* Always set the discard flag on the first time the buffer is written
* as svga_screen_surface_create might have passed a recycled host
* buffer.
*/
sbuf->dma.flags.discard = TRUE;
SVGA_DBG(DEBUG_DMA, " --> got sid %p sz %d (buffer)\n",
sbuf->handle, sbuf->b.b.width0);
/* Add the new surface to the buffer surface list */
ret = svga_buffer_add_host_surface(sbuf, sbuf->handle, &sbuf->key,
bind_flags);
}
return ret;
}
/**
* Recreates a host surface with the new bind flags.
*/
enum pipe_error
svga_buffer_recreate_host_surface(struct svga_context *svga,
struct svga_buffer *sbuf,
unsigned bind_flags)
{
enum pipe_error ret = PIPE_OK;
struct svga_winsys_surface *old_handle = sbuf->handle;
assert(sbuf->bind_flags != bind_flags);
assert(old_handle);
sbuf->handle = NULL;
/* Create a new resource with the requested bind_flags */
ret = svga_buffer_create_host_surface(svga_screen(svga->pipe.screen),
sbuf, bind_flags);
if (ret == PIPE_OK) {
/* Copy the surface data */
assert(sbuf->handle);
ret = SVGA3D_vgpu10_BufferCopy(svga->swc, old_handle, sbuf->handle,
0, 0, sbuf->b.b.width0);
if (ret != PIPE_OK) {
svga_context_flush(svga, NULL);
ret = SVGA3D_vgpu10_BufferCopy(svga->swc, old_handle, sbuf->handle,
0, 0, sbuf->b.b.width0);
assert(ret == PIPE_OK);
}
}
/* Set the new bind flags for this buffer resource */
sbuf->bind_flags = bind_flags;
return ret;
}
/**
* Returns TRUE if the surface bind flags is compatible with the new bind flags.
*/
static boolean
compatible_bind_flags(unsigned bind_flags,
unsigned tobind_flags)
{
if ((bind_flags & tobind_flags) == tobind_flags)
return TRUE;
else if ((bind_flags|tobind_flags) & PIPE_BIND_CONSTANT_BUFFER)
return FALSE;
else
return TRUE;
}
/**
* Returns a buffer surface from the surface list
* that has the requested bind flags or its existing bind flags
* can be promoted to include the new bind flags.
*/
static struct svga_buffer_surface *
svga_buffer_get_host_surface(struct svga_buffer *sbuf,
unsigned bind_flags)
{
struct svga_buffer_surface *bufsurf;
LIST_FOR_EACH_ENTRY(bufsurf, &sbuf->surfaces, list) {
if (compatible_bind_flags(bufsurf->bind_flags, bind_flags))
return bufsurf;
}
return NULL;
}
/**
* Adds the host surface to the buffer surface list.
*/
enum pipe_error
svga_buffer_add_host_surface(struct svga_buffer *sbuf,
struct svga_winsys_surface *handle,
struct svga_host_surface_cache_key *key,
unsigned bind_flags)
{
struct svga_buffer_surface *bufsurf;
bufsurf = CALLOC_STRUCT(svga_buffer_surface);
if (!bufsurf)
return PIPE_ERROR_OUT_OF_MEMORY;
bufsurf->bind_flags = bind_flags;
bufsurf->handle = handle;
bufsurf->key = *key;
/* add the surface to the surface list */
LIST_ADD(&bufsurf->list, &sbuf->surfaces);
return PIPE_OK;
}
/**
* Start using the specified surface for this buffer resource.
*/
void
svga_buffer_bind_host_surface(struct svga_context *svga,
struct svga_buffer *sbuf,
struct svga_buffer_surface *bufsurf)
{
enum pipe_error ret;
/* Update the to-bind surface */
assert(bufsurf->handle);
assert(sbuf->handle);
/* If we are switching from stream output to other buffer,
* make sure to copy the buffer content.
*/
if (sbuf->bind_flags & PIPE_BIND_STREAM_OUTPUT) {
ret = SVGA3D_vgpu10_BufferCopy(svga->swc, sbuf->handle, bufsurf->handle,
0, 0, sbuf->b.b.width0);
if (ret != PIPE_OK) {
svga_context_flush(svga, NULL);
ret = SVGA3D_vgpu10_BufferCopy(svga->swc, sbuf->handle, bufsurf->handle,
0, 0, sbuf->b.b.width0);
assert(ret == PIPE_OK);
}
}
/* Set this surface as the current one */
sbuf->handle = bufsurf->handle;
sbuf->key = bufsurf->key;
sbuf->bind_flags = bufsurf->bind_flags;
}
/**
* Prepare a host surface that can be used as indicated in the
* tobind_flags. If the existing host surface is not created
* with the necessary binding flags and if the new bind flags can be
* combined with the existing bind flags, then we will recreate a
* new surface with the combined bind flags. Otherwise, we will create
* a surface for that incompatible bind flags.
* For example, if a stream output buffer is reused as a constant buffer,
* since constant buffer surface cannot be bound as a stream output surface,
* two surfaces will be created, one for stream output,
* and another one for constant buffer.
*/
enum pipe_error
svga_buffer_validate_host_surface(struct svga_context *svga,
struct svga_buffer *sbuf,
unsigned tobind_flags)
{
struct svga_buffer_surface *bufsurf;
enum pipe_error ret = PIPE_OK;
/* Flush any pending upload first */
svga_buffer_upload_flush(svga, sbuf);
/* First check from the cached buffer surface list to see if there is
* already a buffer surface that has the requested bind flags, or
* surface with compatible bind flags that can be promoted.
*/
bufsurf = svga_buffer_get_host_surface(sbuf, tobind_flags);
if (bufsurf) {
if ((bufsurf->bind_flags & tobind_flags) == tobind_flags) {
/* there is a surface with the requested bind flags */
svga_buffer_bind_host_surface(svga, sbuf, bufsurf);
} else {
/* Recreate a host surface with the combined bind flags */
ret = svga_buffer_recreate_host_surface(svga, sbuf,
bufsurf->bind_flags |
tobind_flags);
/* Destroy the old surface */
svga_screen_surface_destroy(svga_screen(sbuf->b.b.screen),
&bufsurf->key, &bufsurf->handle);
LIST_DEL(&bufsurf->list);
FREE(bufsurf);
}
} else {
/* Need to create a new surface if the bind flags are incompatible,
* such as constant buffer surface & stream output surface.
*/
ret = svga_buffer_recreate_host_surface(svga, sbuf,
tobind_flags);
}
return ret;
}
void
svga_buffer_destroy_host_surface(struct svga_screen *ss,
struct svga_buffer *sbuf)
{
struct svga_buffer_surface *bufsurf, *next;
LIST_FOR_EACH_ENTRY_SAFE(bufsurf, next, &sbuf->surfaces, list) {
SVGA_DBG(DEBUG_DMA, " ungrab sid %p sz %d\n",
bufsurf->handle, sbuf->b.b.width0);
svga_screen_surface_destroy(ss, &bufsurf->key, &bufsurf->handle);
FREE(bufsurf);
}
}
/**
* Insert a number of preliminary UPDATE_GB_IMAGE commands in the
* command buffer, equal to the current number of mapped ranges.
* The UPDATE_GB_IMAGE commands will be patched with the
* actual ranges just before flush.
*/
static enum pipe_error
svga_buffer_upload_gb_command(struct svga_context *svga,
struct svga_buffer *sbuf)
{
struct svga_winsys_context *swc = svga->swc;
SVGA3dCmdUpdateGBImage *update_cmd;
struct svga_3d_update_gb_image *whole_update_cmd = NULL;
const uint32 numBoxes = sbuf->map.num_ranges;
struct pipe_resource *dummy;
unsigned i;
assert(svga_have_gb_objects(svga));
assert(numBoxes);
assert(sbuf->dma.updates == NULL);
if (sbuf->dma.flags.discard) {
struct svga_3d_invalidate_gb_image *cicmd = NULL;
SVGA3dCmdInvalidateGBImage *invalidate_cmd;
const unsigned total_commands_size =
sizeof(*invalidate_cmd) + numBoxes * sizeof(*whole_update_cmd);
/* Allocate FIFO space for one INVALIDATE_GB_IMAGE command followed by
* 'numBoxes' UPDATE_GB_IMAGE commands. Allocate all at once rather
* than with separate commands because we need to properly deal with
* filling the command buffer.
*/
invalidate_cmd = SVGA3D_FIFOReserve(swc,
SVGA_3D_CMD_INVALIDATE_GB_IMAGE,
total_commands_size, 1 + numBoxes);
if (!invalidate_cmd)
return PIPE_ERROR_OUT_OF_MEMORY;
cicmd = container_of(invalidate_cmd, cicmd, body);
cicmd->header.size = sizeof(*invalidate_cmd);
swc->surface_relocation(swc, &invalidate_cmd->image.sid, NULL,
sbuf->handle,
(SVGA_RELOC_WRITE |
SVGA_RELOC_INTERNAL |
SVGA_RELOC_DMA));
invalidate_cmd->image.face = 0;
invalidate_cmd->image.mipmap = 0;
/* The whole_update_command is a SVGA3dCmdHeader plus the
* SVGA3dCmdUpdateGBImage command.
*/
whole_update_cmd = (struct svga_3d_update_gb_image *) &invalidate_cmd[1];
/* initialize the first UPDATE_GB_IMAGE command */
whole_update_cmd->header.id = SVGA_3D_CMD_UPDATE_GB_IMAGE;
update_cmd = &whole_update_cmd->body;
} else {
/* Allocate FIFO space for 'numBoxes' UPDATE_GB_IMAGE commands */
const unsigned total_commands_size =
sizeof(*update_cmd) + (numBoxes - 1) * sizeof(*whole_update_cmd);
update_cmd = SVGA3D_FIFOReserve(swc,
SVGA_3D_CMD_UPDATE_GB_IMAGE,
total_commands_size, numBoxes);
if (!update_cmd)
return PIPE_ERROR_OUT_OF_MEMORY;
/* The whole_update_command is a SVGA3dCmdHeader plus the
* SVGA3dCmdUpdateGBImage command.
*/
whole_update_cmd = container_of(update_cmd, whole_update_cmd, body);
}
/* Init the first UPDATE_GB_IMAGE command */
whole_update_cmd->header.size = sizeof(*update_cmd);
swc->surface_relocation(swc, &update_cmd->image.sid, NULL, sbuf->handle,
SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL);
update_cmd->image.face = 0;
update_cmd->image.mipmap = 0;
/* Save pointer to the first UPDATE_GB_IMAGE command so that we can
* fill in the box info below.
*/
sbuf->dma.updates = whole_update_cmd;
/*
* Copy the face, mipmap, etc. info to all subsequent commands.
* Also do the surface relocation for each subsequent command.
*/
for (i = 1; i < numBoxes; ++i) {
whole_update_cmd++;
memcpy(whole_update_cmd, sbuf->dma.updates, sizeof(*whole_update_cmd));
swc->surface_relocation(swc, &whole_update_cmd->body.image.sid, NULL,
sbuf->handle,
SVGA_RELOC_WRITE | SVGA_RELOC_INTERNAL);
}
/* Increment reference count */
sbuf->dma.svga = svga;
dummy = NULL;
pipe_resource_reference(&dummy, &sbuf->b.b);
SVGA_FIFOCommitAll(swc);
swc->hints |= SVGA_HINT_FLAG_CAN_PRE_FLUSH;
sbuf->dma.flags.discard = FALSE;
svga->hud.num_resource_updates++;
return PIPE_OK;
}
/**
* Issue DMA commands to transfer guest memory to the host.
* Note that the memory segments (offset, size) will be patched in
* later in the svga_buffer_upload_flush() function.
*/
static enum pipe_error
svga_buffer_upload_hb_command(struct svga_context *svga,
struct svga_buffer *sbuf)
{
struct svga_winsys_context *swc = svga->swc;
struct svga_winsys_buffer *guest = sbuf->hwbuf;
struct svga_winsys_surface *host = sbuf->handle;
const SVGA3dTransferType transfer = SVGA3D_WRITE_HOST_VRAM;
SVGA3dCmdSurfaceDMA *cmd;
const uint32 numBoxes = sbuf->map.num_ranges;
SVGA3dCopyBox *boxes;
SVGA3dCmdSurfaceDMASuffix *pSuffix;
unsigned region_flags;
unsigned surface_flags;
struct pipe_resource *dummy;
assert(!svga_have_gb_objects(svga));
if (transfer == SVGA3D_WRITE_HOST_VRAM) {
region_flags = SVGA_RELOC_READ;
surface_flags = SVGA_RELOC_WRITE;
}
else if (transfer == SVGA3D_READ_HOST_VRAM) {
region_flags = SVGA_RELOC_WRITE;
surface_flags = SVGA_RELOC_READ;
}
else {
assert(0);
return PIPE_ERROR_BAD_INPUT;
}
assert(numBoxes);
cmd = SVGA3D_FIFOReserve(swc,
SVGA_3D_CMD_SURFACE_DMA,
sizeof *cmd + numBoxes * sizeof *boxes + sizeof *pSuffix,
2);
if (!cmd)
return PIPE_ERROR_OUT_OF_MEMORY;
swc->region_relocation(swc, &cmd->guest.ptr, guest, 0, region_flags);
cmd->guest.pitch = 0;
swc->surface_relocation(swc, &cmd->host.sid, NULL, host, surface_flags);
cmd->host.face = 0;
cmd->host.mipmap = 0;
cmd->transfer = transfer;
sbuf->dma.boxes = (SVGA3dCopyBox *)&cmd[1];
sbuf->dma.svga = svga;
/* Increment reference count */
dummy = NULL;
pipe_resource_reference(&dummy, &sbuf->b.b);
pSuffix = (SVGA3dCmdSurfaceDMASuffix *)((uint8_t*)cmd + sizeof *cmd + numBoxes * sizeof *boxes);
pSuffix->suffixSize = sizeof *pSuffix;
pSuffix->maximumOffset = sbuf->b.b.width0;
pSuffix->flags = sbuf->dma.flags;
SVGA_FIFOCommitAll(swc);
swc->hints |= SVGA_HINT_FLAG_CAN_PRE_FLUSH;
sbuf->dma.flags.discard = FALSE;
svga->hud.num_buffer_uploads++;
return PIPE_OK;
}
/**
* Issue commands to transfer guest memory to the host.
*/
static enum pipe_error
svga_buffer_upload_command(struct svga_context *svga, struct svga_buffer *sbuf)
{
if (svga_have_gb_objects(svga)) {
return svga_buffer_upload_gb_command(svga, sbuf);
} else {
return svga_buffer_upload_hb_command(svga, sbuf);
}
}
/**
* Patch up the upload DMA command reserved by svga_buffer_upload_command
* with the final ranges.
*/
void
svga_buffer_upload_flush(struct svga_context *svga, struct svga_buffer *sbuf)
{
unsigned i;
struct pipe_resource *dummy;
if (!sbuf->dma.pending) {
//debug_printf("no dma pending on buffer\n");
return;
}
assert(sbuf->handle);
assert(sbuf->map.num_ranges);
assert(sbuf->dma.svga == svga);
/*
* Patch the DMA/update command with the final copy box.
*/
if (svga_have_gb_objects(svga)) {
struct svga_3d_update_gb_image *update = sbuf->dma.updates;
assert(update);
for (i = 0; i < sbuf->map.num_ranges; ++i, ++update) {
SVGA3dBox *box = &update->body.box;
SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
box->x = sbuf->map.ranges[i].start;
box->y = 0;
box->z = 0;
box->w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
box->h = 1;
box->d = 1;
assert(box->x <= sbuf->b.b.width0);
assert(box->x + box->w <= sbuf->b.b.width0);
svga->hud.num_bytes_uploaded += box->w;
svga->hud.num_buffer_uploads++;
}
}
else {
assert(sbuf->hwbuf);
assert(sbuf->dma.boxes);
SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
for (i = 0; i < sbuf->map.num_ranges; ++i) {
SVGA3dCopyBox *box = sbuf->dma.boxes + i;
SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
sbuf->map.ranges[i].start, sbuf->map.ranges[i].end);
box->x = sbuf->map.ranges[i].start;
box->y = 0;
box->z = 0;
box->w = sbuf->map.ranges[i].end - sbuf->map.ranges[i].start;
box->h = 1;
box->d = 1;
box->srcx = sbuf->map.ranges[i].start;
box->srcy = 0;
box->srcz = 0;
assert(box->x <= sbuf->b.b.width0);
assert(box->x + box->w <= sbuf->b.b.width0);
svga->hud.num_bytes_uploaded += box->w;
svga->hud.num_buffer_uploads++;
}
}
/* Reset sbuf for next use/upload */
sbuf->map.num_ranges = 0;
assert(sbuf->head.prev && sbuf->head.next);
LIST_DEL(&sbuf->head); /* remove from svga->dirty_buffers list */
#ifdef DEBUG
sbuf->head.next = sbuf->head.prev = NULL;
#endif
sbuf->dma.pending = FALSE;
sbuf->dma.flags.discard = FALSE;
sbuf->dma.flags.unsynchronized = FALSE;
sbuf->dma.svga = NULL;
sbuf->dma.boxes = NULL;
sbuf->dma.updates = NULL;
/* Decrement reference count (and potentially destroy) */
dummy = &sbuf->b.b;
pipe_resource_reference(&dummy, NULL);
}
/**
* Note a dirty range.
*
* This function only notes the range down. It doesn't actually emit a DMA
* upload command. That only happens when a context tries to refer to this
* buffer, and the DMA upload command is added to that context's command
* buffer.
*
* We try to lump as many contiguous DMA transfers together as possible.
*/
void
svga_buffer_add_range(struct svga_buffer *sbuf, unsigned start, unsigned end)
{
unsigned i;
unsigned nearest_range;
unsigned nearest_dist;
assert(end > start);
if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
nearest_range = sbuf->map.num_ranges;
nearest_dist = ~0;
} else {
nearest_range = SVGA_BUFFER_MAX_RANGES - 1;
nearest_dist = 0;
}
/*
* Try to grow one of the ranges.
*/
for (i = 0; i < sbuf->map.num_ranges; ++i) {
const int left_dist = start - sbuf->map.ranges[i].end;
const int right_dist = sbuf->map.ranges[i].start - end;
const int dist = MAX2(left_dist, right_dist);
if (dist <= 0) {
/*
* Ranges are contiguous or overlapping -- extend this one and return.
*
* Note that it is not this function's task to prevent overlapping
* ranges, as the GMR was already given so it is too late to do
* anything. If the ranges overlap here it must surely be because
* PIPE_TRANSFER_UNSYNCHRONIZED was set.
*/
sbuf->map.ranges[i].start = MIN2(sbuf->map.ranges[i].start, start);
sbuf->map.ranges[i].end = MAX2(sbuf->map.ranges[i].end, end);
return;
}
else {
/*
* Discontiguous ranges -- keep track of the nearest range.
*/
if (dist < nearest_dist) {
nearest_range = i;
nearest_dist = dist;
}
}
}
/*
* We cannot add a new range to an existing DMA command, so patch-up the
* pending DMA upload and start clean.
*/
svga_buffer_upload_flush(sbuf->dma.svga, sbuf);
assert(!sbuf->dma.pending);
assert(!sbuf->dma.svga);
assert(!sbuf->dma.boxes);
if (sbuf->map.num_ranges < SVGA_BUFFER_MAX_RANGES) {
/*
* Add a new range.
*/
sbuf->map.ranges[sbuf->map.num_ranges].start = start;
sbuf->map.ranges[sbuf->map.num_ranges].end = end;
++sbuf->map.num_ranges;
} else {
/*
* Everything else failed, so just extend the nearest range.
*
* It is OK to do this because we always keep a local copy of the
* host buffer data, for SW TNL, and the host never modifies the buffer.
*/
assert(nearest_range < SVGA_BUFFER_MAX_RANGES);
assert(nearest_range < sbuf->map.num_ranges);
sbuf->map.ranges[nearest_range].start =
MIN2(sbuf->map.ranges[nearest_range].start, start);
sbuf->map.ranges[nearest_range].end =
MAX2(sbuf->map.ranges[nearest_range].end, end);
}
}
/**
* Copy the contents of the malloc buffer to a hardware buffer.
*/
static enum pipe_error
svga_buffer_update_hw(struct svga_context *svga, struct svga_buffer *sbuf,
unsigned bind_flags)
{
assert(!sbuf->user);
if (!svga_buffer_has_hw_storage(sbuf)) {
struct svga_screen *ss = svga_screen(sbuf->b.b.screen);
enum pipe_error ret;
boolean retry;
void *map;
unsigned i;
assert(sbuf->swbuf);
if (!sbuf->swbuf)
return PIPE_ERROR;
ret = svga_buffer_create_hw_storage(svga_screen(sbuf->b.b.screen), sbuf,
bind_flags);
if (ret != PIPE_OK)
return ret;
mtx_lock(&ss->swc_mutex);
map = svga_buffer_hw_storage_map(svga, sbuf, PIPE_TRANSFER_WRITE, &retry);
assert(map);
assert(!retry);
if (!map) {
mtx_unlock(&ss->swc_mutex);
svga_buffer_destroy_hw_storage(ss, sbuf);
return PIPE_ERROR;
}
/* Copy data from malloc'd swbuf to the new hardware buffer */
for (i = 0; i < sbuf->map.num_ranges; i++) {
unsigned start = sbuf->map.ranges[i].start;
unsigned len = sbuf->map.ranges[i].end - start;
memcpy((uint8_t *) map + start, (uint8_t *) sbuf->swbuf + start, len);
}
svga_buffer_hw_storage_unmap(svga, sbuf);
/* This user/malloc buffer is now indistinguishable from a gpu buffer */
assert(sbuf->map.count == 0);
if (sbuf->map.count == 0) {
if (sbuf->user)
sbuf->user = FALSE;
else
align_free(sbuf->swbuf);
sbuf->swbuf = NULL;
}
mtx_unlock(&ss->swc_mutex);
}
return PIPE_OK;
}
/**
* Upload the buffer to the host in a piecewise fashion.
*
* Used when the buffer is too big to fit in the GMR aperture.
* This function should never get called in the guest-backed case
* since we always have a full-sized hardware storage backing the
* host surface.
*/
static enum pipe_error
svga_buffer_upload_piecewise(struct svga_screen *ss,
struct svga_context *svga,
struct svga_buffer *sbuf)
{
struct svga_winsys_screen *sws = ss->sws;
const unsigned alignment = sizeof(void *);
const unsigned usage = 0;
unsigned i;
assert(sbuf->map.num_ranges);
assert(!sbuf->dma.pending);
assert(!svga_have_gb_objects(svga));
SVGA_DBG(DEBUG_DMA, "dma to sid %p\n", sbuf->handle);
for (i = 0; i < sbuf->map.num_ranges; ++i) {
const struct svga_buffer_range *range = &sbuf->map.ranges[i];
unsigned offset = range->start;
unsigned size = range->end - range->start;
while (offset < range->end) {
struct svga_winsys_buffer *hwbuf;
uint8_t *map;
enum pipe_error ret;
if (offset + size > range->end)
size = range->end - offset;
hwbuf = sws->buffer_create(sws, alignment, usage, size);
while (!hwbuf) {
size /= 2;
if (!size)
return PIPE_ERROR_OUT_OF_MEMORY;
hwbuf = sws->buffer_create(sws, alignment, usage, size);
}
SVGA_DBG(DEBUG_DMA, " bytes %u - %u\n",
offset, offset + size);
map = sws->buffer_map(sws, hwbuf,
PIPE_TRANSFER_WRITE |
PIPE_TRANSFER_DISCARD_RANGE);
assert(map);
if (map) {
memcpy(map, (const char *) sbuf->swbuf + offset, size);
sws->buffer_unmap(sws, hwbuf);
}
ret = SVGA3D_BufferDMA(svga->swc,
hwbuf, sbuf->handle,
SVGA3D_WRITE_HOST_VRAM,
size, 0, offset, sbuf->dma.flags);
if (ret != PIPE_OK) {
svga_context_flush(svga, NULL);
ret = SVGA3D_BufferDMA(svga->swc,
hwbuf, sbuf->handle,
SVGA3D_WRITE_HOST_VRAM,
size, 0, offset, sbuf->dma.flags);
assert(ret == PIPE_OK);
}
sbuf->dma.flags.discard = FALSE;
sws->buffer_destroy(sws, hwbuf);
offset += size;
}
}
sbuf->map.num_ranges = 0;
return PIPE_OK;
}
/**
* Get (or create/upload) the winsys surface handle so that we can
* refer to this buffer in fifo commands.
* This function will create the host surface, and in the GB case also the
* hardware storage. In the non-GB case, the hardware storage will be created
* if there are mapped ranges and the data is currently in a malloc'ed buffer.
*/
struct svga_winsys_surface *
svga_buffer_handle(struct svga_context *svga, struct pipe_resource *buf,
unsigned tobind_flags)
{
struct pipe_screen *screen = svga->pipe.screen;
struct svga_screen *ss = svga_screen(screen);
struct svga_buffer *sbuf;
enum pipe_error ret;
if (!buf)
return NULL;
sbuf = svga_buffer(buf);
assert(!sbuf->user);
if (sbuf->handle) {
if ((sbuf->bind_flags & tobind_flags) != tobind_flags) {
/* If the allocated resource's bind flags do not include the
* requested bind flags, validate the host surface.
*/
ret = svga_buffer_validate_host_surface(svga, sbuf, tobind_flags);
if (ret != PIPE_OK)
return NULL;
}
} else {
if (!sbuf->bind_flags) {
sbuf->bind_flags = tobind_flags;
}
assert((sbuf->bind_flags & tobind_flags) == tobind_flags);
/* This call will set sbuf->handle */
if (svga_have_gb_objects(svga)) {
ret = svga_buffer_update_hw(svga, sbuf, sbuf->bind_flags);
} else {
ret = svga_buffer_create_host_surface(ss, sbuf, sbuf->bind_flags);
}
if (ret != PIPE_OK)
return NULL;
}
assert(sbuf->handle);
if (sbuf->map.num_ranges) {
if (!sbuf->dma.pending) {
/* No pending DMA/update commands yet. */
/* Migrate the data from swbuf -> hwbuf if necessary */
ret = svga_buffer_update_hw(svga, sbuf, sbuf->bind_flags);
if (ret == PIPE_OK) {
/* Emit DMA or UpdateGBImage commands */
ret = svga_buffer_upload_command(svga, sbuf);
if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
svga_context_flush(svga, NULL);
ret = svga_buffer_upload_command(svga, sbuf);
assert(ret == PIPE_OK);
}
if (ret == PIPE_OK) {
sbuf->dma.pending = TRUE;
assert(!sbuf->head.prev && !sbuf->head.next);
LIST_ADDTAIL(&sbuf->head, &svga->dirty_buffers);
}
}
else if (ret == PIPE_ERROR_OUT_OF_MEMORY) {
/*
* The buffer is too big to fit in the GMR aperture, so break it in
* smaller pieces.
*/
ret = svga_buffer_upload_piecewise(ss, svga, sbuf);
}
if (ret != PIPE_OK) {
/*
* Something unexpected happened above. There is very little that
* we can do other than proceeding while ignoring the dirty ranges.
*/
assert(0);
sbuf->map.num_ranges = 0;
}
}
else {
/*
* There a pending dma already. Make sure it is from this context.
*/
assert(sbuf->dma.svga == svga);
}
}
assert(sbuf->map.num_ranges == 0 || sbuf->dma.pending);
return sbuf->handle;
}
void
svga_context_flush_buffers(struct svga_context *svga)
{
struct list_head *curr, *next;
SVGA_STATS_TIME_PUSH(svga_sws(svga), SVGA_STATS_TIME_BUFFERSFLUSH);
curr = svga->dirty_buffers.next;
next = curr->next;
while (curr != &svga->dirty_buffers) {
struct svga_buffer *sbuf = LIST_ENTRY(struct svga_buffer, curr, head);
assert(p_atomic_read(&sbuf->b.b.reference.count) != 0);
assert(sbuf->dma.pending);
svga_buffer_upload_flush(svga, sbuf);
curr = next;
next = curr->next;
}
SVGA_STATS_TIME_POP(svga_sws(svga));
}