diff options
Diffstat (limited to 'drivers/gpu/drm/msm/adreno/a6xx_hfi.c')
-rw-r--r-- | drivers/gpu/drm/msm/adreno/a6xx_hfi.c | 435 |
1 files changed, 435 insertions, 0 deletions
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c new file mode 100644 index 000000000000..f19ef4cb6ea4 --- /dev/null +++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c @@ -0,0 +1,435 @@ +// SPDX-License-Identifier: GPL-2.0 +/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */ + +#include <linux/completion.h> +#include <linux/circ_buf.h> +#include <linux/list.h> + +#include "a6xx_gmu.h" +#include "a6xx_gmu.xml.h" + +#define HFI_MSG_ID(val) [val] = #val + +static const char * const a6xx_hfi_msg_id[] = { + HFI_MSG_ID(HFI_H2F_MSG_INIT), + HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION), + HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE), + HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE), + HFI_MSG_ID(HFI_H2F_MSG_TEST), +}; + +static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data, + u32 dwords) +{ + struct a6xx_hfi_queue_header *header = queue->header; + u32 i, hdr, index = header->read_index; + + if (header->read_index == header->write_index) { + header->rx_request = 1; + return 0; + } + + hdr = queue->data[index]; + + /* + * If we are to assume that the GMU firmware is in fact a rational actor + * and is programmed to not send us a larger response than we expect + * then we can also assume that if the header size is unexpectedly large + * that it is due to memory corruption and/or hardware failure. In this + * case the only reasonable course of action is to BUG() to help harden + * the failure. + */ + + BUG_ON(HFI_HEADER_SIZE(hdr) > dwords); + + for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) { + data[i] = queue->data[index]; + index = (index + 1) % header->size; + } + + header->read_index = index; + return HFI_HEADER_SIZE(hdr); +} + +static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, + struct a6xx_hfi_queue *queue, u32 *data, u32 dwords) +{ + struct a6xx_hfi_queue_header *header = queue->header; + u32 i, space, index = header->write_index; + + spin_lock(&queue->lock); + + space = CIRC_SPACE(header->write_index, header->read_index, + header->size); + if (space < dwords) { + header->dropped++; + spin_unlock(&queue->lock); + return -ENOSPC; + } + + for (i = 0; i < dwords; i++) { + queue->data[index] = data[i]; + index = (index + 1) % header->size; + } + + header->write_index = index; + spin_unlock(&queue->lock); + + gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); + return 0; +} + +struct a6xx_hfi_response { + u32 id; + u32 seqnum; + struct list_head node; + struct completion complete; + + u32 error; + u32 payload[16]; +}; + +/* + * Incoming HFI ack messages can come in out of order so we need to store all + * the pending messages on a list until they are handled. + */ +static spinlock_t hfi_ack_lock = __SPIN_LOCK_UNLOCKED(message_lock); +static LIST_HEAD(hfi_ack_list); + +static void a6xx_hfi_handle_ack(struct a6xx_gmu *gmu, + struct a6xx_hfi_msg_response *msg) +{ + struct a6xx_hfi_response *resp; + u32 id, seqnum; + + /* msg->ret_header contains the header of the message being acked */ + id = HFI_HEADER_ID(msg->ret_header); + seqnum = HFI_HEADER_SEQNUM(msg->ret_header); + + spin_lock(&hfi_ack_lock); + list_for_each_entry(resp, &hfi_ack_list, node) { + if (resp->id == id && resp->seqnum == seqnum) { + resp->error = msg->error; + memcpy(resp->payload, msg->payload, + sizeof(resp->payload)); + + complete(&resp->complete); + spin_unlock(&hfi_ack_lock); + return; + } + } + spin_unlock(&hfi_ack_lock); + + dev_err(gmu->dev, "Nobody was waiting for HFI message %d\n", seqnum); +} + +static void a6xx_hfi_handle_error(struct a6xx_gmu *gmu, + struct a6xx_hfi_msg_response *msg) +{ + struct a6xx_hfi_msg_error *error = (struct a6xx_hfi_msg_error *) msg; + + dev_err(gmu->dev, "GMU firmware error %d\n", error->code); +} + +void a6xx_hfi_task(unsigned long data) +{ + struct a6xx_gmu *gmu = (struct a6xx_gmu *) data; + struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; + struct a6xx_hfi_msg_response resp; + + for (;;) { + u32 id; + int ret = a6xx_hfi_queue_read(queue, (u32 *) &resp, + sizeof(resp) >> 2); + + /* Returns the number of bytes copied or negative on error */ + if (ret <= 0) { + if (ret < 0) + dev_err(gmu->dev, + "Unable to read the HFI message queue\n"); + break; + } + + id = HFI_HEADER_ID(resp.header); + + if (id == HFI_F2H_MSG_ACK) + a6xx_hfi_handle_ack(gmu, &resp); + else if (id == HFI_F2H_MSG_ERROR) + a6xx_hfi_handle_error(gmu, &resp); + } +} + +static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id, + void *data, u32 size, u32 *payload, u32 payload_size) +{ + struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE]; + struct a6xx_hfi_response resp = { 0 }; + int ret, dwords = size >> 2; + u32 seqnum; + + seqnum = atomic_inc_return(&queue->seqnum) % 0xfff; + + /* First dword of the message is the message header - fill it in */ + *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) | + (dwords << 8) | id; + + init_completion(&resp.complete); + resp.id = id; + resp.seqnum = seqnum; + + spin_lock_bh(&hfi_ack_lock); + list_add_tail(&resp.node, &hfi_ack_list); + spin_unlock_bh(&hfi_ack_lock); + + ret = a6xx_hfi_queue_write(gmu, queue, data, dwords); + if (ret) { + dev_err(gmu->dev, "Unable to send message %s id %d\n", + a6xx_hfi_msg_id[id], seqnum); + goto out; + } + + /* Wait up to 5 seconds for the response */ + ret = wait_for_completion_timeout(&resp.complete, + msecs_to_jiffies(5000)); + if (!ret) { + dev_err(gmu->dev, + "Message %s id %d timed out waiting for response\n", + a6xx_hfi_msg_id[id], seqnum); + ret = -ETIMEDOUT; + } else + ret = 0; + +out: + spin_lock_bh(&hfi_ack_lock); + list_del(&resp.node); + spin_unlock_bh(&hfi_ack_lock); + + if (ret) + return ret; + + if (resp.error) { + dev_err(gmu->dev, "Message %s id %d returned error %d\n", + a6xx_hfi_msg_id[id], seqnum, resp.error); + return -EINVAL; + } + + if (payload && payload_size) { + int copy = min_t(u32, payload_size, sizeof(resp.payload)); + + memcpy(payload, resp.payload, copy); + } + + return 0; +} + +static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state) +{ + struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 }; + + msg.dbg_buffer_addr = (u32) gmu->debug->iova; + msg.dbg_buffer_size = (u32) gmu->debug->size; + msg.boot_state = boot_state; + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg), + NULL, 0); +} + +static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version) +{ + struct a6xx_hfi_msg_fw_version msg = { 0 }; + + /* Currently supporting version 1.1 */ + msg.supported_version = (1 << 28) | (1 << 16); + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg), + version, sizeof(*version)); +} + +static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_msg_perf_table msg = { 0 }; + int i; + + msg.num_gpu_levels = gmu->nr_gpu_freqs; + msg.num_gmu_levels = gmu->nr_gmu_freqs; + + for (i = 0; i < gmu->nr_gpu_freqs; i++) { + msg.gx_votes[i].vote = gmu->gx_arc_votes[i]; + msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000; + } + + for (i = 0; i < gmu->nr_gmu_freqs; i++) { + msg.cx_votes[i].vote = gmu->cx_arc_votes[i]; + msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000; + } + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg), + NULL, 0); +} + +static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_msg_bw_table msg = { 0 }; + + /* + * The sdm845 GMU doesn't do bus frequency scaling on its own but it + * does need at least one entry in the list because it might be accessed + * when the GMU is shutting down. Send a single "off" entry. + */ + + msg.bw_level_num = 1; + + msg.ddr_cmds_num = 3; + msg.ddr_wait_bitmask = 0x07; + + msg.ddr_cmds_addrs[0] = 0x50000; + msg.ddr_cmds_addrs[1] = 0x5005c; + msg.ddr_cmds_addrs[2] = 0x5000c; + + msg.ddr_cmds_data[0][0] = 0x40000000; + msg.ddr_cmds_data[0][1] = 0x40000000; + msg.ddr_cmds_data[0][2] = 0x40000000; + + /* + * These are the CX (CNOC) votes. This is used but the values for the + * sdm845 GMU are known and fixed so we can hard code them. + */ + + msg.cnoc_cmds_num = 3; + msg.cnoc_wait_bitmask = 0x05; + + msg.cnoc_cmds_addrs[0] = 0x50034; + msg.cnoc_cmds_addrs[1] = 0x5007c; + msg.cnoc_cmds_addrs[2] = 0x5004c; + + msg.cnoc_cmds_data[0][0] = 0x40000000; + msg.cnoc_cmds_data[0][1] = 0x00000000; + msg.cnoc_cmds_data[0][2] = 0x40000000; + + msg.cnoc_cmds_data[1][0] = 0x60000001; + msg.cnoc_cmds_data[1][1] = 0x20000001; + msg.cnoc_cmds_data[1][2] = 0x60000001; + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg), + NULL, 0); +} + +static int a6xx_hfi_send_test(struct a6xx_gmu *gmu) +{ + struct a6xx_hfi_msg_test msg = { 0 }; + + return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg), + NULL, 0); +} + +int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state) +{ + int ret; + + ret = a6xx_hfi_send_gmu_init(gmu, boot_state); + if (ret) + return ret; + + ret = a6xx_hfi_get_fw_version(gmu, NULL); + if (ret) + return ret; + + /* + * We have to get exchange version numbers per the sequence but at this + * point th kernel driver doesn't need to know the exact version of + * the GMU firmware + */ + + ret = a6xx_hfi_send_perf_table(gmu); + if (ret) + return ret; + + ret = a6xx_hfi_send_bw_table(gmu); + if (ret) + return ret; + + /* + * Let the GMU know that there won't be any more HFI messages until next + * boot + */ + a6xx_hfi_send_test(gmu); + + return 0; +} + +void a6xx_hfi_stop(struct a6xx_gmu *gmu) +{ + int i; + + for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) { + struct a6xx_hfi_queue *queue = &gmu->queues[i]; + + if (!queue->header) + continue; + + if (queue->header->read_index != queue->header->write_index) + dev_err(gmu->dev, "HFI queue %d is not empty\n", i); + + queue->header->read_index = 0; + queue->header->write_index = 0; + } +} + +static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue, + struct a6xx_hfi_queue_header *header, void *virt, u64 iova, + u32 id) +{ + spin_lock_init(&queue->lock); + queue->header = header; + queue->data = virt; + atomic_set(&queue->seqnum, 0); + + /* Set up the shared memory header */ + header->iova = iova; + header->type = 10 << 8 | id; + header->status = 1; + header->size = SZ_4K >> 2; + header->msg_size = 0; + header->dropped = 0; + header->rx_watermark = 1; + header->tx_watermark = 1; + header->rx_request = 1; + header->tx_request = 0; + header->read_index = 0; + header->write_index = 0; +} + +void a6xx_hfi_init(struct a6xx_gmu *gmu) +{ + struct a6xx_gmu_bo *hfi = gmu->hfi; + struct a6xx_hfi_queue_table_header *table = hfi->virt; + struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table); + u64 offset; + int table_size; + + /* + * The table size is the size of the table header plus all of the queue + * headers + */ + table_size = sizeof(*table); + table_size += (ARRAY_SIZE(gmu->queues) * + sizeof(struct a6xx_hfi_queue_header)); + + table->version = 0; + table->size = table_size; + /* First queue header is located immediately after the table header */ + table->qhdr0_offset = sizeof(*table) >> 2; + table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2; + table->num_queues = ARRAY_SIZE(gmu->queues); + table->active_queues = ARRAY_SIZE(gmu->queues); + + /* Command queue */ + offset = SZ_4K; + a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset, + hfi->iova + offset, 0); + + /* GMU response queue */ + offset += SZ_4K; + a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset, + hfi->iova + offset, 4); +} |