summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/radeon/radeon_cs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/radeon/radeon_cs.c')
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c176
1 files changed, 154 insertions, 22 deletions
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 5407459e56d2..70d38241b083 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -29,9 +29,6 @@
#include "radeon_reg.h"
#include "radeon.h"
-void r100_cs_dump_packet(struct radeon_cs_parser *p,
- struct radeon_cs_packet *pkt);
-
static int radeon_cs_parser_relocs(struct radeon_cs_parser *p)
{
struct drm_device *ddev = p->rdev->ddev;
@@ -128,18 +125,6 @@ static int radeon_cs_get_ring(struct radeon_cs_parser *p, u32 ring, s32 priority
return 0;
}
-static void radeon_cs_sync_to(struct radeon_cs_parser *p,
- struct radeon_fence *fence)
-{
- struct radeon_fence *other;
-
- if (!fence)
- return;
-
- other = p->ib.sync_to[fence->ring];
- p->ib.sync_to[fence->ring] = radeon_fence_later(fence, other);
-}
-
static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
{
int i;
@@ -148,7 +133,7 @@ static void radeon_cs_sync_rings(struct radeon_cs_parser *p)
if (!p->relocs[i].robj)
continue;
- radeon_cs_sync_to(p, p->relocs[i].robj->tbo.sync_obj);
+ radeon_ib_sync_to(&p->ib, p->relocs[i].robj->tbo.sync_obj);
}
}
@@ -203,7 +188,7 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
p->chunks[i].length_dw = user_chunk.length_dw;
p->chunks[i].kdata = NULL;
p->chunks[i].chunk_id = user_chunk.chunk_id;
-
+ p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
p->chunk_relocs_idx = i;
}
@@ -226,9 +211,6 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
return -EINVAL;
}
- p->chunks[i].length_dw = user_chunk.length_dw;
- p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
-
cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
if ((p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) ||
(p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS)) {
@@ -478,8 +460,9 @@ static int radeon_cs_ib_vm_chunk(struct radeon_device *rdev,
goto out;
}
radeon_cs_sync_rings(parser);
- radeon_cs_sync_to(parser, vm->fence);
- radeon_cs_sync_to(parser, radeon_vm_grab_id(rdev, vm, parser->ring));
+ radeon_ib_sync_to(&parser->ib, vm->fence);
+ radeon_ib_sync_to(&parser->ib, radeon_vm_grab_id(
+ rdev, vm, parser->ring));
if ((rdev->family >= CHIP_TAHITI) &&
(parser->chunk_const_ib_idx != -1)) {
@@ -648,3 +631,152 @@ u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
idx_value = ibc->kpage[new_page][pg_offset/4];
return idx_value;
}
+
+/**
+ * radeon_cs_packet_parse() - parse cp packet and point ib index to next packet
+ * @parser: parser structure holding parsing context.
+ * @pkt: where to store packet information
+ *
+ * Assume that chunk_ib_index is properly set. Will return -EINVAL
+ * if packet is bigger than remaining ib size. or if packets is unknown.
+ **/
+int radeon_cs_packet_parse(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt,
+ unsigned idx)
+{
+ struct radeon_cs_chunk *ib_chunk = &p->chunks[p->chunk_ib_idx];
+ struct radeon_device *rdev = p->rdev;
+ uint32_t header;
+
+ if (idx >= ib_chunk->length_dw) {
+ DRM_ERROR("Can not parse packet at %d after CS end %d !\n",
+ idx, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ header = radeon_get_ib_value(p, idx);
+ pkt->idx = idx;
+ pkt->type = RADEON_CP_PACKET_GET_TYPE(header);
+ pkt->count = RADEON_CP_PACKET_GET_COUNT(header);
+ pkt->one_reg_wr = 0;
+ switch (pkt->type) {
+ case RADEON_PACKET_TYPE0:
+ if (rdev->family < CHIP_R600) {
+ pkt->reg = R100_CP_PACKET0_GET_REG(header);
+ pkt->one_reg_wr =
+ RADEON_CP_PACKET0_GET_ONE_REG_WR(header);
+ } else
+ pkt->reg = R600_CP_PACKET0_GET_REG(header);
+ break;
+ case RADEON_PACKET_TYPE3:
+ pkt->opcode = RADEON_CP_PACKET3_GET_OPCODE(header);
+ break;
+ case RADEON_PACKET_TYPE2:
+ pkt->count = -1;
+ break;
+ default:
+ DRM_ERROR("Unknown packet type %d at %d !\n", pkt->type, idx);
+ return -EINVAL;
+ }
+ if ((pkt->count + 1 + pkt->idx) >= ib_chunk->length_dw) {
+ DRM_ERROR("Packet (%d:%d:%d) end after CS buffer (%d) !\n",
+ pkt->idx, pkt->type, pkt->count, ib_chunk->length_dw);
+ return -EINVAL;
+ }
+ return 0;
+}
+
+/**
+ * radeon_cs_packet_next_is_pkt3_nop() - test if the next packet is P3 NOP
+ * @p: structure holding the parser context.
+ *
+ * Check if the next packet is NOP relocation packet3.
+ **/
+bool radeon_cs_packet_next_is_pkt3_nop(struct radeon_cs_parser *p)
+{
+ struct radeon_cs_packet p3reloc;
+ int r;
+
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r)
+ return false;
+ if (p3reloc.type != RADEON_PACKET_TYPE3)
+ return false;
+ if (p3reloc.opcode != RADEON_PACKET3_NOP)
+ return false;
+ return true;
+}
+
+/**
+ * radeon_cs_dump_packet() - dump raw packet context
+ * @p: structure holding the parser context.
+ * @pkt: structure holding the packet.
+ *
+ * Used mostly for debugging and error reporting.
+ **/
+void radeon_cs_dump_packet(struct radeon_cs_parser *p,
+ struct radeon_cs_packet *pkt)
+{
+ volatile uint32_t *ib;
+ unsigned i;
+ unsigned idx;
+
+ ib = p->ib.ptr;
+ idx = pkt->idx;
+ for (i = 0; i <= (pkt->count + 1); i++, idx++)
+ DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
+}
+
+/**
+ * radeon_cs_packet_next_reloc() - parse next (should be reloc) packet
+ * @parser: parser structure holding parsing context.
+ * @data: pointer to relocation data
+ * @offset_start: starting offset
+ * @offset_mask: offset mask (to align start offset on)
+ * @reloc: reloc informations
+ *
+ * Check if next packet is relocation packet3, do bo validation and compute
+ * GPU offset using the provided start.
+ **/
+int radeon_cs_packet_next_reloc(struct radeon_cs_parser *p,
+ struct radeon_cs_reloc **cs_reloc,
+ int nomm)
+{
+ struct radeon_cs_chunk *relocs_chunk;
+ struct radeon_cs_packet p3reloc;
+ unsigned idx;
+ int r;
+
+ if (p->chunk_relocs_idx == -1) {
+ DRM_ERROR("No relocation chunk !\n");
+ return -EINVAL;
+ }
+ *cs_reloc = NULL;
+ relocs_chunk = &p->chunks[p->chunk_relocs_idx];
+ r = radeon_cs_packet_parse(p, &p3reloc, p->idx);
+ if (r)
+ return r;
+ p->idx += p3reloc.count + 2;
+ if (p3reloc.type != RADEON_PACKET_TYPE3 ||
+ p3reloc.opcode != RADEON_PACKET3_NOP) {
+ DRM_ERROR("No packet3 for relocation for packet at %d.\n",
+ p3reloc.idx);
+ radeon_cs_dump_packet(p, &p3reloc);
+ return -EINVAL;
+ }
+ idx = radeon_get_ib_value(p, p3reloc.idx + 1);
+ if (idx >= relocs_chunk->length_dw) {
+ DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
+ idx, relocs_chunk->length_dw);
+ radeon_cs_dump_packet(p, &p3reloc);
+ return -EINVAL;
+ }
+ /* FIXME: we assume reloc size is 4 dwords */
+ if (nomm) {
+ *cs_reloc = p->relocs;
+ (*cs_reloc)->lobj.gpu_offset =
+ (u64)relocs_chunk->kdata[idx + 3] << 32;
+ (*cs_reloc)->lobj.gpu_offset |= relocs_chunk->kdata[idx + 0];
+ } else
+ *cs_reloc = p->relocs_ptr[(idx / 4)];
+ return 0;
+}