diff options
Diffstat (limited to 'drivers')
89 files changed, 4696 insertions, 1058 deletions
diff --git a/drivers/block/mtip32xx/mtip32xx.c b/drivers/block/mtip32xx/mtip32xx.c index 2aca98e8e427..88c46853dbb5 100644 --- a/drivers/block/mtip32xx/mtip32xx.c +++ b/drivers/block/mtip32xx/mtip32xx.c @@ -3686,7 +3686,7 @@ static int mtip_block_open(struct block_device *dev, fmode_t mode) return -ENODEV; } -void mtip_block_release(struct gendisk *disk, fmode_t mode) +static void mtip_block_release(struct gendisk *disk, fmode_t mode) { } diff --git a/drivers/block/nbd.c b/drivers/block/nbd.c index a9e398019f38..ccfcfc11399a 100644 --- a/drivers/block/nbd.c +++ b/drivers/block/nbd.c @@ -34,33 +34,29 @@ #include <linux/kthread.h> #include <linux/types.h> #include <linux/debugfs.h> +#include <linux/blk-mq.h> #include <asm/uaccess.h> #include <asm/types.h> #include <linux/nbd.h> +#define NBD_TIMEDOUT 0 +#define NBD_DISCONNECT_REQUESTED 1 + struct nbd_device { u32 flags; + unsigned long runtime_flags; struct socket * sock; /* If == NULL, device is not ready, yet */ int magic; - spinlock_t queue_lock; - struct list_head queue_head; /* Requests waiting result */ - struct request *active_req; - wait_queue_head_t active_wq; - struct list_head waiting_queue; /* Requests to be sent */ - wait_queue_head_t waiting_wq; + struct blk_mq_tag_set tag_set; struct mutex tx_lock; struct gendisk *disk; int blksize; loff_t bytesize; - int xmit_timeout; - bool timedout; - bool disconnect; /* a disconnect has been requested by user */ - struct timer_list timeout_timer; /* protects initialization and shutdown of the socket */ spinlock_t sock_lock; struct task_struct *task_recv; @@ -71,6 +67,11 @@ struct nbd_device { #endif }; +struct nbd_cmd { + struct nbd_device *nbd; + struct list_head list; +}; + #if IS_ENABLED(CONFIG_DEBUG_FS) static struct dentry *nbd_dbg_dir; #endif @@ -83,18 +84,6 @@ static unsigned int nbds_max = 16; static struct nbd_device *nbd_dev; static int max_part; -/* - * Use just one lock (or at most 1 per NIC). Two arguments for this: - * 1. Each NIC is essentially a synchronization point for all servers - * accessed through that NIC so there's no need to have more locks - * than NICs anyway. - * 2. More locks lead to more "Dirty cache line bouncing" which will slow - * down each lock to the point where they're actually slower than just - * a single lock. - * Thanks go to Jens Axboe and Al Viro for their LKML emails explaining this! - */ -static DEFINE_SPINLOCK(nbd_lock); - static inline struct device *nbd_to_dev(struct nbd_device *nbd) { return disk_to_dev(nbd->disk); @@ -153,18 +142,16 @@ static int nbd_size_set(struct nbd_device *nbd, struct block_device *bdev, return 0; } -static void nbd_end_request(struct nbd_device *nbd, struct request *req) +static void nbd_end_request(struct nbd_cmd *cmd) { + struct nbd_device *nbd = cmd->nbd; + struct request *req = blk_mq_rq_from_pdu(cmd); int error = req->errors ? -EIO : 0; - struct request_queue *q = req->q; - unsigned long flags; - dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", req, + dev_dbg(nbd_to_dev(nbd), "request %p: %s\n", cmd, error ? "failed" : "done"); - spin_lock_irqsave(q->queue_lock, flags); - __blk_end_request_all(req, error); - spin_unlock_irqrestore(q->queue_lock, flags); + blk_mq_complete_request(req, error); } /* @@ -172,40 +159,49 @@ static void nbd_end_request(struct nbd_device *nbd, struct request *req) */ static void sock_shutdown(struct nbd_device *nbd) { - spin_lock_irq(&nbd->sock_lock); + struct socket *sock; + + spin_lock(&nbd->sock_lock); if (!nbd->sock) { spin_unlock_irq(&nbd->sock_lock); return; } + sock = nbd->sock; dev_warn(disk_to_dev(nbd->disk), "shutting down socket\n"); - kernel_sock_shutdown(nbd->sock, SHUT_RDWR); - sockfd_put(nbd->sock); nbd->sock = NULL; - spin_unlock_irq(&nbd->sock_lock); + spin_unlock(&nbd->sock_lock); - del_timer(&nbd->timeout_timer); + kernel_sock_shutdown(sock, SHUT_RDWR); + sockfd_put(sock); } -static void nbd_xmit_timeout(unsigned long arg) +static enum blk_eh_timer_return nbd_xmit_timeout(struct request *req, + bool reserved) { - struct nbd_device *nbd = (struct nbd_device *)arg; - unsigned long flags; - - if (list_empty(&nbd->queue_head)) - return; + struct nbd_cmd *cmd = blk_mq_rq_to_pdu(req); + struct nbd_device *nbd = cmd->nbd; + struct socket *sock = NULL; - spin_lock_irqsave(&nbd->sock_lock, flags); + spin_lock(&nbd->sock_lock); - nbd->timedout = true; + set_bit(NBD_TIMEDOUT, &nbd->runtime_flags); - if (nbd->sock) - kernel_sock_shutdown(nbd->sock, SHUT_RDWR); + if (nbd->sock) { + sock = nbd->sock; + get_file(sock->file); + } - spin_unlock_irqrestore(&nbd->sock_lock, flags); + spin_unlock(&nbd->sock_lock); + if (sock) { + kernel_sock_shutdown(sock, SHUT_RDWR); + sockfd_put(sock); + } + req->errors++; dev_err(nbd_to_dev(nbd), "Connection timed out, shutting down connection\n"); + return BLK_EH_HANDLED; } /* @@ -255,9 +251,6 @@ static int sock_xmit(struct nbd_device *nbd, int send, void *buf, int size, tsk_restore_flags(current, pflags, PF_MEMALLOC); - if (!send && nbd->xmit_timeout) - mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout); - return result; } @@ -273,8 +266,9 @@ static inline int sock_send_bvec(struct nbd_device *nbd, struct bio_vec *bvec, } /* always call with the tx_lock held */ -static int nbd_send_req(struct nbd_device *nbd, struct request *req) +static int nbd_send_cmd(struct nbd_device *nbd, struct nbd_cmd *cmd) { + struct request *req = blk_mq_rq_from_pdu(cmd); int result, flags; struct nbd_request request; unsigned long size = blk_rq_bytes(req); @@ -298,10 +292,10 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) request.from = cpu_to_be64((u64)blk_rq_pos(req) << 9); request.len = htonl(size); } - memcpy(request.handle, &req, sizeof(req)); + memcpy(request.handle, &req->tag, sizeof(req->tag)); dev_dbg(nbd_to_dev(nbd), "request %p: sending control (%s@%llu,%uB)\n", - req, nbdcmd_to_ascii(type), + cmd, nbdcmd_to_ascii(type), (unsigned long long)blk_rq_pos(req) << 9, blk_rq_bytes(req)); result = sock_xmit(nbd, 1, &request, sizeof(request), (type == NBD_CMD_WRITE) ? MSG_MORE : 0); @@ -323,7 +317,7 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) if (!rq_iter_last(bvec, iter)) flags = MSG_MORE; dev_dbg(nbd_to_dev(nbd), "request %p: sending %d bytes data\n", - req, bvec.bv_len); + cmd, bvec.bv_len); result = sock_send_bvec(nbd, &bvec, flags); if (result <= 0) { dev_err(disk_to_dev(nbd->disk), @@ -336,29 +330,6 @@ static int nbd_send_req(struct nbd_device *nbd, struct request *req) return 0; } -static struct request *nbd_find_request(struct nbd_device *nbd, - struct request *xreq) -{ - struct request *req, *tmp; - int err; - - err = wait_event_interruptible(nbd->active_wq, nbd->active_req != xreq); - if (unlikely(err)) - return ERR_PTR(err); - - spin_lock(&nbd->queue_lock); - list_for_each_entry_safe(req, tmp, &nbd->queue_head, queuelist) { - if (req != xreq) - continue; - list_del_init(&req->queuelist); - spin_unlock(&nbd->queue_lock); - return req; - } - spin_unlock(&nbd->queue_lock); - - return ERR_PTR(-ENOENT); -} - static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec) { int result; @@ -370,11 +341,14 @@ static inline int sock_recv_bvec(struct nbd_device *nbd, struct bio_vec *bvec) } /* NULL returned = something went wrong, inform userspace */ -static struct request *nbd_read_stat(struct nbd_device *nbd) +static struct nbd_cmd *nbd_read_stat(struct nbd_device *nbd) { int result; struct nbd_reply reply; - struct request *req; + struct nbd_cmd *cmd; + struct request *req = NULL; + u16 hwq; + int tag; reply.magic = 0; result = sock_xmit(nbd, 0, &reply, sizeof(reply), MSG_WAITALL); @@ -390,25 +364,27 @@ static struct request *nbd_read_stat(struct nbd_device *nbd) return ERR_PTR(-EPROTO); } - req = nbd_find_request(nbd, *(struct request **)reply.handle); - if (IS_ERR(req)) { - result = PTR_ERR(req); - if (result != -ENOENT) - return ERR_PTR(result); + memcpy(&tag, reply.handle, sizeof(int)); - dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%p)\n", - reply.handle); - return ERR_PTR(-EBADR); + hwq = blk_mq_unique_tag_to_hwq(tag); + if (hwq < nbd->tag_set.nr_hw_queues) + req = blk_mq_tag_to_rq(nbd->tag_set.tags[hwq], + blk_mq_unique_tag_to_tag(tag)); + if (!req || !blk_mq_request_started(req)) { + dev_err(disk_to_dev(nbd->disk), "Unexpected reply (%d) %p\n", + tag, req); + return ERR_PTR(-ENOENT); } + cmd = blk_mq_rq_to_pdu(req); if (ntohl(reply.error)) { dev_err(disk_to_dev(nbd->disk), "Other side returned error (%d)\n", ntohl(reply.error)); req->errors++; - return req; + return cmd; } - dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", req); + dev_dbg(nbd_to_dev(nbd), "request %p: got reply\n", cmd); if (rq_data_dir(req) != WRITE) { struct req_iterator iter; struct bio_vec bvec; @@ -419,13 +395,13 @@ static struct request *nbd_read_stat(struct nbd_device *nbd) dev_err(disk_to_dev(nbd->disk), "Receive data failed (result %d)\n", result); req->errors++; - return req; + return cmd; } dev_dbg(nbd_to_dev(nbd), "request %p: got %d bytes data\n", - req, bvec.bv_len); + cmd, bvec.bv_len); } } - return req; + return cmd; } static ssize_t pid_show(struct device *dev, @@ -444,7 +420,7 @@ static struct device_attribute pid_attr = { static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev) { - struct request *req; + struct nbd_cmd *cmd; int ret; BUG_ON(nbd->magic != NBD_MAGIC); @@ -460,13 +436,13 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev) nbd_size_update(nbd, bdev); while (1) { - req = nbd_read_stat(nbd); - if (IS_ERR(req)) { - ret = PTR_ERR(req); + cmd = nbd_read_stat(nbd); + if (IS_ERR(cmd)) { + ret = PTR_ERR(cmd); break; } - nbd_end_request(nbd, req); + nbd_end_request(cmd); } nbd_size_clear(nbd, bdev); @@ -475,44 +451,37 @@ static int nbd_thread_recv(struct nbd_device *nbd, struct block_device *bdev) return ret; } -static void nbd_clear_que(struct nbd_device *nbd) +static void nbd_clear_req(struct request *req, void *data, bool reserved) { - struct request *req; + struct nbd_cmd *cmd; + + if (!blk_mq_request_started(req)) + return; + cmd = blk_mq_rq_to_pdu(req); + req->errors++; + nbd_end_request(cmd); +} +static void nbd_clear_que(struct nbd_device *nbd) +{ BUG_ON(nbd->magic != NBD_MAGIC); /* * Because we have set nbd->sock to NULL under the tx_lock, all - * modifications to the list must have completed by now. For - * the same reason, the active_req must be NULL. - * - * As a consequence, we don't need to take the spin lock while - * purging the list here. + * modifications to the list must have completed by now. */ BUG_ON(nbd->sock); - BUG_ON(nbd->active_req); - while (!list_empty(&nbd->queue_head)) { - req = list_entry(nbd->queue_head.next, struct request, - queuelist); - list_del_init(&req->queuelist); - req->errors++; - nbd_end_request(nbd, req); - } - - while (!list_empty(&nbd->waiting_queue)) { - req = list_entry(nbd->waiting_queue.next, struct request, - queuelist); - list_del_init(&req->queuelist); - req->errors++; - nbd_end_request(nbd, req); - } + blk_mq_tagset_busy_iter(&nbd->tag_set, nbd_clear_req, NULL); dev_dbg(disk_to_dev(nbd->disk), "queue cleared\n"); } -static void nbd_handle_req(struct nbd_device *nbd, struct request *req) +static void nbd_handle_cmd(struct nbd_cmd *cmd) { + struct request *req = blk_mq_rq_from_pdu(cmd); + struct nbd_device *nbd = cmd->nbd; + if (req->cmd_type != REQ_TYPE_FS) goto error_out; @@ -526,6 +495,7 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req) req->errors = 0; mutex_lock(&nbd->tx_lock); + nbd->task_send = current; if (unlikely(!nbd->sock)) { mutex_unlock(&nbd->tx_lock); dev_err(disk_to_dev(nbd->disk), @@ -533,106 +503,30 @@ static void nbd_handle_req(struct nbd_device *nbd, struct request *req) goto error_out; } - nbd->active_req = req; - - if (nbd->xmit_timeout && list_empty_careful(&nbd->queue_head)) - mod_timer(&nbd->timeout_timer, jiffies + nbd->xmit_timeout); - - if (nbd_send_req(nbd, req) != 0) { + if (nbd_send_cmd(nbd, cmd) != 0) { dev_err(disk_to_dev(nbd->disk), "Request send failed\n"); req->errors++; - nbd_end_request(nbd, req); - } else { - spin_lock(&nbd->queue_lock); - list_add_tail(&req->queuelist, &nbd->queue_head); - spin_unlock(&nbd->queue_lock); + nbd_end_request(cmd); } - nbd->active_req = NULL; + nbd->task_send = NULL; mutex_unlock(&nbd->tx_lock); - wake_up_all(&nbd->active_wq); return; error_out: req->errors++; - nbd_end_request(nbd, req); + nbd_end_request(cmd); } -static int nbd_thread_send(void *data) +static int nbd_queue_rq(struct blk_mq_hw_ctx *hctx, + const struct blk_mq_queue_data *bd) { - struct nbd_device *nbd = data; - struct request *req; - - nbd->task_send = current; - - set_user_nice(current, MIN_NICE); - while (!kthread_should_stop() || !list_empty(&nbd->waiting_queue)) { - /* wait for something to do */ - wait_event_interruptible(nbd->waiting_wq, - kthread_should_stop() || - !list_empty(&nbd->waiting_queue)); - - /* extract request */ - if (list_empty(&nbd->waiting_queue)) - continue; - - spin_lock_irq(&nbd->queue_lock); - req = list_entry(nbd->waiting_queue.next, struct request, - queuelist); - list_del_init(&req->queuelist); - spin_unlock_irq(&nbd->queue_lock); - - /* handle request */ - nbd_handle_req(nbd, req); - } + struct nbd_cmd *cmd = blk_mq_rq_to_pdu(bd->rq); - nbd->task_send = NULL; - - return 0; -} - -/* - * We always wait for result of write, for now. It would be nice to make it optional - * in future - * if ((rq_data_dir(req) == WRITE) && (nbd->flags & NBD_WRITE_NOCHK)) - * { printk( "Warning: Ignoring result!\n"); nbd_end_request( req ); } - */ - -static void nbd_request_handler(struct request_queue *q) - __releases(q->queue_lock) __acquires(q->queue_lock) -{ - struct request *req; - - while ((req = blk_fetch_request(q)) != NULL) { - struct nbd_device *nbd; - - spin_unlock_irq(q->queue_lock); - - nbd = req->rq_disk->private_data; - - BUG_ON(nbd->magic != NBD_MAGIC); - - dev_dbg(nbd_to_dev(nbd), "request %p: dequeued (flags=%x)\n", - req, req->cmd_type); - - if (unlikely(!nbd->sock)) { - dev_err_ratelimited(disk_to_dev(nbd->disk), - "Attempted send on closed socket\n"); - req->errors++; - nbd_end_request(nbd, req); - spin_lock_irq(q->queue_lock); - continue; - } - - spin_lock_irq(&nbd->queue_lock); - list_add_tail(&req->queuelist, &nbd->waiting_queue); - spin_unlock_irq(&nbd->queue_lock); - - wake_up(&nbd->waiting_wq); - - spin_lock_irq(q->queue_lock); - } + blk_mq_start_request(bd->rq); + nbd_handle_cmd(cmd); + return BLK_MQ_RQ_QUEUE_OK; } static int nbd_set_socket(struct nbd_device *nbd, struct socket *sock) @@ -657,15 +551,13 @@ out: /* Reset all properties of an NBD device */ static void nbd_reset(struct nbd_device *nbd) { - nbd->disconnect = false; - nbd->timedout = false; + nbd->runtime_flags = 0; nbd->blksize = 1024; nbd->bytesize = 0; set_capacity(nbd->disk, 0); nbd->flags = 0; - nbd->xmit_timeout = 0; + nbd->tag_set.timeout = 0; queue_flag_clear_unlocked(QUEUE_FLAG_DISCARD, nbd->disk->queue); - del_timer_sync(&nbd->timeout_timer); } static void nbd_bdev_reset(struct block_device *bdev) @@ -700,33 +592,37 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, { switch (cmd) { case NBD_DISCONNECT: { - struct request sreq; + struct request *sreq; dev_info(disk_to_dev(nbd->disk), "NBD_DISCONNECT\n"); if (!nbd->sock) return -EINVAL; + sreq = blk_mq_alloc_request(bdev_get_queue(bdev), WRITE, 0); + if (!sreq) + return -ENOMEM; + mutex_unlock(&nbd->tx_lock); fsync_bdev(bdev); mutex_lock(&nbd->tx_lock); - blk_rq_init(NULL, &sreq); - sreq.cmd_type = REQ_TYPE_DRV_PRIV; + sreq->cmd_type = REQ_TYPE_DRV_PRIV; /* Check again after getting mutex back. */ - if (!nbd->sock) + if (!nbd->sock) { + blk_mq_free_request(sreq); return -EINVAL; + } - nbd->disconnect = true; + set_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags); - nbd_send_req(nbd, &sreq); + nbd_send_cmd(nbd, blk_mq_rq_to_pdu(sreq)); + blk_mq_free_request(sreq); return 0; } case NBD_CLEAR_SOCK: sock_shutdown(nbd); nbd_clear_que(nbd); - BUG_ON(!list_empty(&nbd->queue_head)); - BUG_ON(!list_empty(&nbd->waiting_queue)); kill_bdev(bdev); return 0; @@ -758,13 +654,7 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, return nbd_size_set(nbd, bdev, nbd->blksize, arg); case NBD_SET_TIMEOUT: - nbd->xmit_timeout = arg * HZ; - if (arg) - mod_timer(&nbd->timeout_timer, - jiffies + nbd->xmit_timeout); - else - del_timer_sync(&nbd->timeout_timer); - + nbd->tag_set.timeout = arg * HZ; return 0; case NBD_SET_FLAGS: @@ -772,7 +662,6 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, return 0; case NBD_DO_IT: { - struct task_struct *thread; int error; if (nbd->task_recv) @@ -786,18 +675,9 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, nbd_parse_flags(nbd, bdev); - thread = kthread_run(nbd_thread_send, nbd, "%s", - nbd_name(nbd)); - if (IS_ERR(thread)) { - mutex_lock(&nbd->tx_lock); - nbd->task_recv = NULL; - return PTR_ERR(thread); - } - nbd_dev_dbg_init(nbd); error = nbd_thread_recv(nbd, bdev); nbd_dev_dbg_close(nbd); - kthread_stop(thread); mutex_lock(&nbd->tx_lock); nbd->task_recv = NULL; @@ -807,9 +687,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, kill_bdev(bdev); nbd_bdev_reset(bdev); - if (nbd->disconnect) /* user requested, ignore socket errors */ + /* user requested, ignore socket errors */ + if (test_bit(NBD_DISCONNECT_REQUESTED, &nbd->runtime_flags)) error = 0; - if (nbd->timedout) + if (test_bit(NBD_TIMEDOUT, &nbd->runtime_flags)) error = -ETIMEDOUT; nbd_reset(nbd); @@ -825,10 +706,10 @@ static int __nbd_ioctl(struct block_device *bdev, struct nbd_device *nbd, return 0; case NBD_PRINT_DEBUG: - dev_info(disk_to_dev(nbd->disk), - "next = %p, prev = %p, head = %p\n", - nbd->queue_head.next, nbd->queue_head.prev, - &nbd->queue_head); + /* + * For compatibility only, we no longer keep a list of + * outstanding requests. + */ return 0; } return -ENOTTY; @@ -935,7 +816,7 @@ static int nbd_dev_dbg_init(struct nbd_device *nbd) debugfs_create_file("tasks", 0444, dir, nbd, &nbd_dbg_tasks_ops); debugfs_create_u64("size_bytes", 0444, dir, &nbd->bytesize); - debugfs_create_u32("timeout", 0444, dir, &nbd->xmit_timeout); + debugfs_create_u32("timeout", 0444, dir, &nbd->tag_set.timeout); debugfs_create_u32("blocksize", 0444, dir, &nbd->blksize); debugfs_create_file("flags", 0444, dir, nbd, &nbd_dbg_flags_ops); @@ -987,6 +868,24 @@ static void nbd_dbg_close(void) #endif +static int nbd_init_request(void *data, struct request *rq, + unsigned int hctx_idx, unsigned int request_idx, + unsigned int numa_node) +{ + struct nbd_cmd *cmd = blk_mq_rq_to_pdu(rq); + + cmd->nbd = data; + INIT_LIST_HEAD(&cmd->list); + return 0; +} + +static struct blk_mq_ops nbd_mq_ops = { + .queue_rq = nbd_queue_rq, + .map_queue = blk_mq_map_queue, + .init_request = nbd_init_request, + .timeout = nbd_xmit_timeout, +}; + /* * And here should be modules and kernel interface * (Just smiley confuses emacs :-) @@ -1035,16 +934,34 @@ static int __init nbd_init(void) if (!disk) goto out; nbd_dev[i].disk = disk; + + nbd_dev[i].tag_set.ops = &nbd_mq_ops; + nbd_dev[i].tag_set.nr_hw_queues = 1; + nbd_dev[i].tag_set.queue_depth = 128; + nbd_dev[i].tag_set.numa_node = NUMA_NO_NODE; + nbd_dev[i].tag_set.cmd_size = sizeof(struct nbd_cmd); + nbd_dev[i].tag_set.flags = BLK_MQ_F_SHOULD_MERGE | + BLK_MQ_F_SG_MERGE | BLK_MQ_F_BLOCKING; + nbd_dev[i].tag_set.driver_data = &nbd_dev[i]; + + err = blk_mq_alloc_tag_set(&nbd_dev[i].tag_set); + if (err) { + put_disk(disk); + goto out; + } + /* * The new linux 2.5 block layer implementation requires * every gendisk to have its very own request_queue struct. * These structs are big so we dynamically allocate them. */ - disk->queue = blk_init_queue(nbd_request_handler, &nbd_lock); + disk->queue = blk_mq_init_queue(&nbd_dev[i].tag_set); if (!disk->queue) { + blk_mq_free_tag_set(&nbd_dev[i].tag_set); put_disk(disk); goto out; } + /* * Tell the block layer that we are not a rotational device */ @@ -1069,16 +986,8 @@ static int __init nbd_init(void) for (i = 0; i < nbds_max; i++) { struct gendisk *disk = nbd_dev[i].disk; nbd_dev[i].magic = NBD_MAGIC; - INIT_LIST_HEAD(&nbd_dev[i].waiting_queue); - spin_lock_init(&nbd_dev[i].queue_lock); spin_lock_init(&nbd_dev[i].sock_lock); - INIT_LIST_HEAD(&nbd_dev[i].queue_head); mutex_init(&nbd_dev[i].tx_lock); - init_timer(&nbd_dev[i].timeout_timer); - nbd_dev[i].timeout_timer.function = nbd_xmit_timeout; - nbd_dev[i].timeout_timer.data = (unsigned long)&nbd_dev[i]; - init_waitqueue_head(&nbd_dev[i].active_wq); - init_waitqueue_head(&nbd_dev[i].waiting_wq); disk->major = NBD_MAJOR; disk->first_minor = i << part_shift; disk->fops = &nbd_fops; @@ -1091,6 +1000,7 @@ static int __init nbd_init(void) return 0; out: while (i--) { + blk_mq_free_tag_set(&nbd_dev[i].tag_set); blk_cleanup_queue(nbd_dev[i].disk->queue); put_disk(nbd_dev[i].disk); } @@ -1110,6 +1020,7 @@ static void __exit nbd_cleanup(void) if (disk) { del_gendisk(disk); blk_cleanup_queue(disk->queue); + blk_mq_free_tag_set(&nbd_dev[i].tag_set); put_disk(disk); } } diff --git a/drivers/block/null_blk.c b/drivers/block/null_blk.c index 75a7f88d6717..91e1de898daf 100644 --- a/drivers/block/null_blk.c +++ b/drivers/block/null_blk.c @@ -34,6 +34,7 @@ struct nullb { unsigned int index; struct request_queue *q; struct gendisk *disk; + struct nvm_dev *ndev; struct blk_mq_tag_set tag_set; struct hrtimer timer; unsigned int queue_depth; @@ -414,23 +415,6 @@ static void cleanup_queues(struct nullb *nullb) kfree(nullb->queues); } -static void null_del_dev(struct nullb *nullb) -{ - list_del_init(&nullb->list); - - if (use_lightnvm) - nvm_unregister(nullb->disk_name); - else - del_gendisk(nullb->disk); - blk_cleanup_queue(nullb->q); - if (queue_mode == NULL_Q_MQ) - blk_mq_free_tag_set(&nullb->tag_set); - if (!use_lightnvm) - put_disk(nullb->disk); - cleanup_queues(nullb); - kfree(nullb); -} - #ifdef CONFIG_NVM static void null_lnvm_end_io(struct request *rq, int error) @@ -564,10 +548,58 @@ static struct nvm_dev_ops null_lnvm_dev_ops = { /* Simulate nvme protocol restriction */ .max_phys_sect = 64, }; + +static int null_nvm_register(struct nullb *nullb) +{ + struct nvm_dev *dev; + int rv; + + dev = nvm_alloc_dev(0); + if (!dev) + return -ENOMEM; + + dev->q = nullb->q; + memcpy(dev->name, nullb->disk_name, DISK_NAME_LEN); + dev->ops = &null_lnvm_dev_ops; + + rv = nvm_register(dev); + if (rv) { + kfree(dev); + return rv; + } + nullb->ndev = dev; + return 0; +} + +static void null_nvm_unregister(struct nullb *nullb) +{ + nvm_unregister(nullb->ndev); +} #else -static struct nvm_dev_ops null_lnvm_dev_ops; +static int null_nvm_register(struct nullb *nullb) +{ + return -EINVAL; +} +static void null_nvm_unregister(struct nullb *nullb) {} #endif /* CONFIG_NVM */ +static void null_del_dev(struct nullb *nullb) +{ + list_del_init(&nullb->list); + + if (use_lightnvm) + null_nvm_unregister(nullb); + else + del_gendisk(nullb->disk); + blk_cleanup_queue(nullb->q); + if (queue_mode == NULL_Q_MQ) + blk_mq_free_tag_set(&nullb->tag_set); + if (!use_lightnvm) + put_disk(nullb->disk); + cleanup_queues(nullb); + kfree(nullb); +} + static int null_open(struct block_device *bdev, fmode_t mode) { return 0; @@ -640,11 +672,32 @@ static int init_driver_queues(struct nullb *nullb) return 0; } -static int null_add_dev(void) +static int null_gendisk_register(struct nullb *nullb) { struct gendisk *disk; - struct nullb *nullb; sector_t size; + + disk = nullb->disk = alloc_disk_node(1, home_node); + if (!disk) + return -ENOMEM; + size = gb * 1024 * 1024 * 1024ULL; + set_capacity(disk, size >> 9); + + disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; + disk->major = null_major; + disk->first_minor = nullb->index; + disk->fops = &null_fops; + disk->private_data = nullb; + disk->queue = nullb->q; + strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); + + add_disk(disk); + return 0; +} + +static int null_add_dev(void) +{ + struct nullb *nullb; int rv; nullb = kzalloc_node(sizeof(*nullb), GFP_KERNEL, home_node); @@ -716,42 +769,19 @@ static int null_add_dev(void) sprintf(nullb->disk_name, "nullb%d", nullb->index); - if (use_lightnvm) { - rv = nvm_register(nullb->q, nullb->disk_name, - &null_lnvm_dev_ops); - if (rv) - goto out_cleanup_blk_queue; - goto done; - } - - disk = nullb->disk = alloc_disk_node(1, home_node); - if (!disk) { - rv = -ENOMEM; - goto out_cleanup_lightnvm; - } - size = gb * 1024 * 1024 * 1024ULL; - set_capacity(disk, size >> 9); - - disk->flags |= GENHD_FL_EXT_DEVT | GENHD_FL_SUPPRESS_PARTITION_INFO; - disk->major = null_major; - disk->first_minor = nullb->index; - disk->fops = &null_fops; - disk->private_data = nullb; - disk->queue = nullb->q; - strncpy(disk->disk_name, nullb->disk_name, DISK_NAME_LEN); + if (use_lightnvm) + rv = null_nvm_register(nullb); + else + rv = null_gendisk_register(nullb); - add_disk(disk); + if (rv) + goto out_cleanup_blk_queue; -done: mutex_lock(&lock); list_add_tail(&nullb->list, &nullb_list); mutex_unlock(&lock); return 0; - -out_cleanup_lightnvm: - if (use_lightnvm) - nvm_unregister(nullb->disk_name); out_cleanup_blk_queue: blk_cleanup_queue(nullb->q); out_cleanup_tags: diff --git a/drivers/bus/Kconfig b/drivers/bus/Kconfig index 3b205e212337..7010dcac9328 100644 --- a/drivers/bus/Kconfig +++ b/drivers/bus/Kconfig @@ -108,6 +108,14 @@ config OMAP_OCP2SCP OCP2SCP and in OMAP5, both USB PHY and SATA PHY is connected via OCP2SCP. +config QCOM_EBI2 + bool "Qualcomm External Bus Interface 2 (EBI2)" + depends on HAS_IOMEM + help + Say y here to enable support for the Qualcomm External Bus + Interface 2, which can be used to connect things like NAND Flash, + SRAM, ethernet adapters, FPGAs and LCD displays. + config SIMPLE_PM_BUS bool "Simple Power-Managed Bus Driver" depends on OF && PM @@ -132,12 +140,8 @@ config SUNXI_RSB with various RSB based devices, such as AXP223, AXP8XX PMICs, and AC100/AC200 ICs. -# TODO: This uses pm_clk_*() symbols that aren't exported in v4.7 and hence -# the driver will fail to build as a module. However there are patches to -# address that queued for v4.8, so this can be turned into a tristate symbol -# after v4.8-rc1. config TEGRA_ACONNECT - bool "Tegra ACONNECT Bus Driver" + tristate "Tegra ACONNECT Bus Driver" depends on ARCH_TEGRA_210_SOC depends on OF && PM select PM_CLK diff --git a/drivers/bus/Makefile b/drivers/bus/Makefile index ac84cc4348e3..c6cfa6b2606e 100644 --- a/drivers/bus/Makefile +++ b/drivers/bus/Makefile @@ -15,6 +15,7 @@ obj-$(CONFIG_MVEBU_MBUS) += mvebu-mbus.o obj-$(CONFIG_OMAP_INTERCONNECT) += omap_l3_smx.o omap_l3_noc.o obj-$(CONFIG_OMAP_OCP2SCP) += omap-ocp2scp.o +obj-$(CONFIG_QCOM_EBI2) += qcom-ebi2.o obj-$(CONFIG_SUNXI_RSB) += sunxi-rsb.o obj-$(CONFIG_SIMPLE_PM_BUS) += simple-pm-bus.o obj-$(CONFIG_TEGRA_ACONNECT) += tegra-aconnect.o diff --git a/drivers/bus/qcom-ebi2.c b/drivers/bus/qcom-ebi2.c new file mode 100644 index 000000000000..a6444244c411 --- /dev/null +++ b/drivers/bus/qcom-ebi2.c @@ -0,0 +1,408 @@ +/* + * Qualcomm External Bus Interface 2 (EBI2) driver + * an older version of the Qualcomm Parallel Interface Controller (QPIC) + * + * Copyright (C) 2016 Linaro Ltd. + * + * Author: Linus Walleij <linus.walleij@linaro.org> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2, as + * published by the Free Software Foundation. + * + * See the device tree bindings for this block for more details on the + * hardware. + */ + +#include <linux/module.h> +#include <linux/clk.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/init.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/platform_device.h> +#include <linux/bitops.h> + +/* + * CS0, CS1, CS4 and CS5 are two bits wide, CS2 and CS3 are one bit. + */ +#define EBI2_CS0_ENABLE_MASK BIT(0)|BIT(1) +#define EBI2_CS1_ENABLE_MASK BIT(2)|BIT(3) +#define EBI2_CS2_ENABLE_MASK BIT(4) +#define EBI2_CS3_ENABLE_MASK BIT(5) +#define EBI2_CS4_ENABLE_MASK BIT(6)|BIT(7) +#define EBI2_CS5_ENABLE_MASK BIT(8)|BIT(9) +#define EBI2_CSN_MASK GENMASK(9, 0) + +#define EBI2_XMEM_CFG 0x0000 /* Power management etc */ + +/* + * SLOW CSn CFG + * + * Bits 31-28: RECOVERY recovery cycles (0 = 1, 1 = 2 etc) this is the time the + * memory continues to drive the data bus after OE is de-asserted. + * Inserted when reading one CS and switching to another CS or read + * followed by write on the same CS. Valid values 0 thru 15. + * Bits 27-24: WR_HOLD write hold cycles, these are extra cycles inserted after + * every write minimum 1. The data out is driven from the time WE is + * asserted until CS is asserted. With a hold of 1, the CS stays + * active for 1 extra cycle etc. Valid values 0 thru 15. + * Bits 23-16: WR_DELTA initial latency for write cycles inserted for the first + * write to a page or burst memory + * Bits 15-8: RD_DELTA initial latency for read cycles inserted for the first + * read to a page or burst memory + * Bits 7-4: WR_WAIT number of wait cycles for every write access, 0=1 cycle + * so 1 thru 16 cycles. + * Bits 3-0: RD_WAIT number of wait cycles for every read access, 0=1 cycle + * so 1 thru 16 cycles. + */ +#define EBI2_XMEM_CS0_SLOW_CFG 0x0008 +#define EBI2_XMEM_CS1_SLOW_CFG 0x000C +#define EBI2_XMEM_CS2_SLOW_CFG 0x0010 +#define EBI2_XMEM_CS3_SLOW_CFG 0x0014 +#define EBI2_XMEM_CS4_SLOW_CFG 0x0018 +#define EBI2_XMEM_CS5_SLOW_CFG 0x001C + +#define EBI2_XMEM_RECOVERY_SHIFT 28 +#define EBI2_XMEM_WR_HOLD_SHIFT 24 +#define EBI2_XMEM_WR_DELTA_SHIFT 16 +#define EBI2_XMEM_RD_DELTA_SHIFT 8 +#define EBI2_XMEM_WR_WAIT_SHIFT 4 +#define EBI2_XMEM_RD_WAIT_SHIFT 0 + +/* + * FAST CSn CFG + * Bits 31-28: ? + * Bits 27-24: RD_HOLD: the length in cycles of the first segment of a read + * transfer. For a single read trandfer this will be the time + * from CS assertion to OE assertion. + * Bits 18-24: ? + * Bits 17-16: ADV_OE_RECOVERY, the number of cycles elapsed before an OE + * assertion, with respect to the cycle where ADV is asserted. + * 2 means 2 cycles between ADV and OE. Values 0, 1, 2 or 3. + * Bits 5: ADDR_HOLD_ENA, The address is held for an extra cycle to meet + * hold time requirements with ADV assertion. + * + * The manual mentions "write precharge cycles" and "precharge cycles". + * We have not been able to figure out which bit fields these correspond to + * in the hardware, or what valid values exist. The current hypothesis is that + * this is something just used on the FAST chip selects. There is also a "byte + * device enable" flag somewhere for 8bit memories. + */ +#define EBI2_XMEM_CS0_FAST_CFG 0x0028 +#define EBI2_XMEM_CS1_FAST_CFG 0x002C +#define EBI2_XMEM_CS2_FAST_CFG 0x0030 +#define EBI2_XMEM_CS3_FAST_CFG 0x0034 +#define EBI2_XMEM_CS4_FAST_CFG 0x0038 +#define EBI2_XMEM_CS5_FAST_CFG 0x003C + +#define EBI2_XMEM_RD_HOLD_SHIFT 24 +#define EBI2_XMEM_ADV_OE_RECOVERY_SHIFT 16 +#define EBI2_XMEM_ADDR_HOLD_ENA_SHIFT 5 + +/** + * struct cs_data - struct with info on a chipselect setting + * @enable_mask: mask to enable the chipselect in the EBI2 config + * @slow_cfg0: offset to XMEMC slow CS config + * @fast_cfg1: offset to XMEMC fast CS config + */ +struct cs_data { + u32 enable_mask; + u16 slow_cfg; + u16 fast_cfg; +}; + +static const struct cs_data cs_info[] = { + { + /* CS0 */ + .enable_mask = EBI2_CS0_ENABLE_MASK, + .slow_cfg = EBI2_XMEM_CS0_SLOW_CFG, + .fast_cfg = EBI2_XMEM_CS0_FAST_CFG, + }, + { + /* CS1 */ + .enable_mask = EBI2_CS1_ENABLE_MASK, + .slow_cfg = EBI2_XMEM_CS1_SLOW_CFG, + .fast_cfg = EBI2_XMEM_CS1_FAST_CFG, + }, + { + /* CS2 */ + .enable_mask = EBI2_CS2_ENABLE_MASK, + .slow_cfg = EBI2_XMEM_CS2_SLOW_CFG, + .fast_cfg = EBI2_XMEM_CS2_FAST_CFG, + }, + { + /* CS3 */ + .enable_mask = EBI2_CS3_ENABLE_MASK, + .slow_cfg = EBI2_XMEM_CS3_SLOW_CFG, + .fast_cfg = EBI2_XMEM_CS3_FAST_CFG, + }, + { + /* CS4 */ + .enable_mask = EBI2_CS4_ENABLE_MASK, + .slow_cfg = EBI2_XMEM_CS4_SLOW_CFG, + .fast_cfg = EBI2_XMEM_CS4_FAST_CFG, + }, + { + /* CS5 */ + .enable_mask = EBI2_CS5_ENABLE_MASK, + .slow_cfg = EBI2_XMEM_CS5_SLOW_CFG, + .fast_cfg = EBI2_XMEM_CS5_FAST_CFG, + }, +}; + +/** + * struct ebi2_xmem_prop - describes an XMEM config property + * @prop: the device tree binding name + * @max: maximum value for the property + * @slowreg: true if this property is in the SLOW CS config register + * else it is assumed to be in the FAST config register + * @shift: the bit field start in the SLOW or FAST register for this + * property + */ +struct ebi2_xmem_prop { + const char *prop; + u32 max; + bool slowreg; + u16 shift; +}; + +static const struct ebi2_xmem_prop xmem_props[] = { + { + .prop = "qcom,xmem-recovery-cycles", + .max = 15, + .slowreg = true, + .shift = EBI2_XMEM_RECOVERY_SHIFT, + }, + { + .prop = "qcom,xmem-write-hold-cycles", + .max = 15, + .slowreg = true, + .shift = EBI2_XMEM_WR_HOLD_SHIFT, + }, + { + .prop = "qcom,xmem-write-delta-cycles", + .max = 255, + .slowreg = true, + .shift = EBI2_XMEM_WR_DELTA_SHIFT, + }, + { + .prop = "qcom,xmem-read-delta-cycles", + .max = 255, + .slowreg = true, + .shift = EBI2_XMEM_RD_DELTA_SHIFT, + }, + { + .prop = "qcom,xmem-write-wait-cycles", + .max = 15, + .slowreg = true, + .shift = EBI2_XMEM_WR_WAIT_SHIFT, + }, + { + .prop = "qcom,xmem-read-wait-cycles", + .max = 15, + .slowreg = true, + .shift = EBI2_XMEM_RD_WAIT_SHIFT, + }, + { + .prop = "qcom,xmem-address-hold-enable", + .max = 1, /* boolean prop */ + .slowreg = false, + .shift = EBI2_XMEM_ADDR_HOLD_ENA_SHIFT, + }, + { + .prop = "qcom,xmem-adv-to-oe-recovery-cycles", + .max = 3, + .slowreg = false, + .shift = EBI2_XMEM_ADV_OE_RECOVERY_SHIFT, + }, + { + .prop = "qcom,xmem-read-hold-cycles", + .max = 15, + .slowreg = false, + .shift = EBI2_XMEM_RD_HOLD_SHIFT, + }, +}; + +static void qcom_ebi2_setup_chipselect(struct device_node *np, + struct device *dev, + void __iomem *ebi2_base, + void __iomem *ebi2_xmem, + u32 csindex) +{ + const struct cs_data *csd; + u32 slowcfg, fastcfg; + u32 val; + int ret; + int i; + + csd = &cs_info[csindex]; + val = readl(ebi2_base); + val |= csd->enable_mask; + writel(val, ebi2_base); + dev_dbg(dev, "enabled CS%u\n", csindex); + + /* Next set up the XMEMC */ + slowcfg = 0; + fastcfg = 0; + + for (i = 0; i < ARRAY_SIZE(xmem_props); i++) { + const struct ebi2_xmem_prop *xp = &xmem_props[i]; + + /* All are regular u32 values */ + ret = of_property_read_u32(np, xp->prop, &val); + if (ret) { + dev_dbg(dev, "could not read %s for CS%d\n", + xp->prop, csindex); + continue; + } + + /* First check boolean props */ + if (xp->max == 1 && val) { + if (xp->slowreg) + slowcfg |= BIT(xp->shift); + else + fastcfg |= BIT(xp->shift); + dev_dbg(dev, "set %s flag\n", xp->prop); + continue; + } + + /* We're dealing with an u32 */ + if (val > xp->max) { + dev_err(dev, + "too high value for %s: %u, capped at %u\n", + xp->prop, val, xp->max); + val = xp->max; + } + if (xp->slowreg) + slowcfg |= (val << xp->shift); + else + fastcfg |= (val << xp->shift); + dev_dbg(dev, "set %s to %u\n", xp->prop, val); + } + + dev_info(dev, "CS%u: SLOW CFG 0x%08x, FAST CFG 0x%08x\n", + csindex, slowcfg, fastcfg); + + if (slowcfg) + writel(slowcfg, ebi2_xmem + csd->slow_cfg); + if (fastcfg) + writel(fastcfg, ebi2_xmem + csd->fast_cfg); +} + +static int qcom_ebi2_probe(struct platform_device *pdev) +{ + struct device_node *np = pdev->dev.of_node; + struct device_node *child; + struct device *dev = &pdev->dev; + struct resource *res; + void __iomem *ebi2_base; + void __iomem *ebi2_xmem; + struct clk *ebi2xclk; + struct clk *ebi2clk; + bool have_children = false; + u32 val; + int ret; + + ebi2xclk = devm_clk_get(dev, "ebi2x"); + if (IS_ERR(ebi2xclk)) + return PTR_ERR(ebi2xclk); + + ret = clk_prepare_enable(ebi2xclk); + if (ret) { + dev_err(dev, "could not enable EBI2X clk (%d)\n", ret); + return ret; + } + + ebi2clk = devm_clk_get(dev, "ebi2"); + if (IS_ERR(ebi2clk)) { + ret = PTR_ERR(ebi2clk); + goto err_disable_2x_clk; + } + + ret = clk_prepare_enable(ebi2clk); + if (ret) { + dev_err(dev, "could not enable EBI2 clk\n"); + goto err_disable_2x_clk; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + ebi2_base = devm_ioremap_resource(dev, res); + if (IS_ERR(ebi2_base)) { + ret = PTR_ERR(ebi2_base); + goto err_disable_clk; + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + ebi2_xmem = devm_ioremap_resource(dev, res); + if (IS_ERR(ebi2_xmem)) { + ret = PTR_ERR(ebi2_xmem); + goto err_disable_clk; + } + + /* Allegedly this turns the power save mode off */ + writel(0UL, ebi2_xmem + EBI2_XMEM_CFG); + + /* Disable all chipselects */ + val = readl(ebi2_base); + val &= ~EBI2_CSN_MASK; + writel(val, ebi2_base); + + /* Walk over the child nodes and see what chipselects we use */ + for_each_available_child_of_node(np, child) { + u32 csindex; + + /* Figure out the chipselect */ + ret = of_property_read_u32(child, "reg", &csindex); + if (ret) + return ret; + + if (csindex > 5) { + dev_err(dev, + "invalid chipselect %u, we only support 0-5\n", + csindex); + continue; + } + + qcom_ebi2_setup_chipselect(child, + dev, + ebi2_base, + ebi2_xmem, + csindex); + + /* We have at least one child */ + have_children = true; + } + + if (have_children) + return of_platform_default_populate(np, NULL, dev); + return 0; + +err_disable_clk: + clk_disable_unprepare(ebi2clk); +err_disable_2x_clk: + clk_disable_unprepare(ebi2xclk); + + return ret; +} + +static const struct of_device_id qcom_ebi2_of_match[] = { + { .compatible = "qcom,msm8660-ebi2", }, + { .compatible = "qcom,apq8060-ebi2", }, + { } +}; + +static struct platform_driver qcom_ebi2_driver = { + .probe = qcom_ebi2_probe, + .driver = { + .name = "qcom-ebi2", + .of_match_table = qcom_ebi2_of_match, + }, +}; +module_platform_driver(qcom_ebi2_driver); +MODULE_AUTHOR("Linus Walleij <linus.walleij@linaro.org>"); +MODULE_DESCRIPTION("Qualcomm EBI2 driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/bus/tegra-aconnect.c b/drivers/bus/tegra-aconnect.c index 7e4104b74fa8..084ae286fa23 100644 --- a/drivers/bus/tegra-aconnect.c +++ b/drivers/bus/tegra-aconnect.c @@ -15,24 +15,6 @@ #include <linux/pm_clock.h> #include <linux/pm_runtime.h> -static int tegra_aconnect_add_clock(struct device *dev, char *name) -{ - struct clk *clk; - int ret; - - clk = clk_get(dev, name); - if (IS_ERR(clk)) { - dev_err(dev, "%s clock not found\n", name); - return PTR_ERR(clk); - } - - ret = pm_clk_add_clk(dev, clk); - if (ret) - clk_put(clk); - - return ret; -} - static int tegra_aconnect_probe(struct platform_device *pdev) { int ret; @@ -44,11 +26,11 @@ static int tegra_aconnect_probe(struct platform_device *pdev) if (ret) return ret; - ret = tegra_aconnect_add_clock(&pdev->dev, "ape"); + ret = of_pm_clk_add_clk(&pdev->dev, "ape"); if (ret) goto clk_destroy; - ret = tegra_aconnect_add_clock(&pdev->dev, "apb2ape"); + ret = of_pm_clk_add_clk(&pdev->dev, "apb2ape"); if (ret) goto clk_destroy; diff --git a/drivers/char/hw_random/pasemi-rng.c b/drivers/char/hw_random/pasemi-rng.c index 699b7259f5d7..c19e23d22b36 100644 --- a/drivers/char/hw_random/pasemi-rng.c +++ b/drivers/char/hw_random/pasemi-rng.c @@ -26,7 +26,7 @@ #include <linux/delay.h> #include <linux/of_address.h> #include <linux/of_platform.h> -#include <asm/io.h> +#include <linux/io.h> #define SDCRNG_CTL_REG 0x00 #define SDCRNG_CTL_FVLD_M 0x0000f000 diff --git a/drivers/char/virtio_console.c b/drivers/char/virtio_console.c index 5da47e26a012..8114744bf30c 100644 --- a/drivers/char/virtio_console.c +++ b/drivers/char/virtio_console.c @@ -889,7 +889,7 @@ static int pipe_to_sg(struct pipe_inode_info *pipe, struct pipe_buffer *buf, return 0; /* Try lock this page */ - if (buf->ops->steal(pipe, buf) == 0) { + if (pipe_buf_steal(pipe, buf) == 0) { /* Get reference and unlock page for moving */ get_page(buf->page); unlock_page(buf->page); diff --git a/drivers/clk/imx/clk-imx1.c b/drivers/clk/imx/clk-imx1.c index 99cf802fa51f..eaa462ad09e8 100644 --- a/drivers/clk/imx/clk-imx1.c +++ b/drivers/clk/imx/clk-imx1.c @@ -45,10 +45,13 @@ static void __iomem *ccm __initdata; #define CCM_PCDR (ccm + 0x0020) #define SCM_GCCR (ccm + 0x0810) -static void __init _mx1_clocks_init(unsigned long fref) +static void __init mx1_clocks_init_dt(struct device_node *np) { + ccm = of_iomap(np, 0); + BUG_ON(!ccm); + clk[IMX1_CLK_DUMMY] = imx_clk_fixed("dummy", 0); - clk[IMX1_CLK_CLK32] = imx_obtain_fixed_clock("clk32", fref); + clk[IMX1_CLK_CLK32] = imx_obtain_fixed_clock("clk32", 32768); clk[IMX1_CLK_CLK16M_EXT] = imx_clk_fixed("clk16m_ext", 16000000); clk[IMX1_CLK_CLK16M] = imx_clk_gate("clk16m", "clk16m_ext", CCM_CSCR, 17); clk[IMX1_CLK_CLK32_PREMULT] = imx_clk_fixed_factor("clk32_premult", "clk32", 512, 1); @@ -74,45 +77,6 @@ static void __init _mx1_clocks_init(unsigned long fref) clk[IMX1_CLK_USBD_GATE] = imx_clk_gate("usbd_gate", "clk48m", SCM_GCCR, 0); imx_check_clocks(clk, ARRAY_SIZE(clk)); -} - -int __init mx1_clocks_init(unsigned long fref) -{ - ccm = ioremap(MX1_CCM_BASE_ADDR, SZ_4K); - BUG_ON(!ccm); - - _mx1_clocks_init(fref); - - clk_register_clkdev(clk[IMX1_CLK_PER1], "per", "imx-gpt.0"); - clk_register_clkdev(clk[IMX1_CLK_HCLK], "ipg", "imx-gpt.0"); - clk_register_clkdev(clk[IMX1_CLK_DMA_GATE], "ahb", "imx1-dma"); - clk_register_clkdev(clk[IMX1_CLK_HCLK], "ipg", "imx1-dma"); - clk_register_clkdev(clk[IMX1_CLK_PER1], "per", "imx1-uart.0"); - clk_register_clkdev(clk[IMX1_CLK_HCLK], "ipg", "imx1-uart.0"); - clk_register_clkdev(clk[IMX1_CLK_PER1], "per", "imx1-uart.1"); - clk_register_clkdev(clk[IMX1_CLK_HCLK], "ipg", "imx1-uart.1"); - clk_register_clkdev(clk[IMX1_CLK_PER1], "per", "imx1-uart.2"); - clk_register_clkdev(clk[IMX1_CLK_UART3_GATE], "ipg", "imx1-uart.2"); - clk_register_clkdev(clk[IMX1_CLK_HCLK], NULL, "imx1-i2c.0"); - clk_register_clkdev(clk[IMX1_CLK_PER2], "per", "imx1-cspi.0"); - clk_register_clkdev(clk[IMX1_CLK_DUMMY], "ipg", "imx1-cspi.0"); - clk_register_clkdev(clk[IMX1_CLK_PER2], "per", "imx1-cspi.1"); - clk_register_clkdev(clk[IMX1_CLK_DUMMY], "ipg", "imx1-cspi.1"); - clk_register_clkdev(clk[IMX1_CLK_PER2], "per", "imx1-fb.0"); - clk_register_clkdev(clk[IMX1_CLK_DUMMY], "ipg", "imx1-fb.0"); - clk_register_clkdev(clk[IMX1_CLK_DUMMY], "ahb", "imx1-fb.0"); - - mxc_timer_init(MX1_TIM1_BASE_ADDR, MX1_TIM1_INT, GPT_TYPE_IMX1); - - return 0; -} - -static void __init mx1_clocks_init_dt(struct device_node *np) -{ - ccm = of_iomap(np, 0); - BUG_ON(!ccm); - - _mx1_clocks_init(32768); clk_data.clks = clk; clk_data.clk_num = ARRAY_SIZE(clk); diff --git a/drivers/clk/meson/gxbb.h b/drivers/clk/meson/gxbb.h index ae461b16af75..0252939ba58f 100644 --- a/drivers/clk/meson/gxbb.h +++ b/drivers/clk/meson/gxbb.h @@ -183,14 +183,14 @@ /* CLKID_CLK81 */ #define CLKID_MPLL0 13 #define CLKID_MPLL1 14 -#define CLKID_MPLL2 15 +/* CLKID_MPLL2 */ #define CLKID_DDR 16 #define CLKID_DOS 17 #define CLKID_ISA 18 #define CLKID_PL301 19 #define CLKID_PERIPHS 20 #define CLKID_SPICC 21 -#define CLKID_I2C 22 +/* CLKID_I2C */ #define CLKID_SAR_ADC 23 #define CLKID_SMART_CARD 24 #define CLKID_RNG0 25 @@ -202,7 +202,7 @@ #define CLKID_ABUF 31 #define CLKID_HIU_IFACE 32 #define CLKID_ASSIST_MISC 33 -#define CLKID_SPI 34 +/* CLKID_SPI */ #define CLKID_I2S_SPDIF 35 #define CLKID_ETH 36 #define CLKID_DEMUX 37 @@ -218,12 +218,12 @@ #define CLKID_AIU 47 #define CLKID_UART1 48 #define CLKID_G2D 49 -#define CLKID_USB0 50 -#define CLKID_USB1 51 +/* CLKID_USB0 */ +/* CLKID_USB1 */ #define CLKID_RESET 52 #define CLKID_NAND 53 #define CLKID_DOS_PARSER 54 -#define CLKID_USB 55 +/* CLKID_USB */ #define CLKID_VDIN1 56 #define CLKID_AHB_ARB0 57 #define CLKID_EFUSE 58 @@ -232,8 +232,8 @@ #define CLKID_AHB_CTRL_BUS 61 #define CLKID_HDMI_INTR_SYNC 62 #define CLKID_HDMI_PCLK 63 -#define CLKID_USB1_DDR_BRIDGE 64 -#define CLKID_USB0_DDR_BRIDGE 65 +/* CLKID_USB1_DDR_BRIDGE */ +/* CLKID_USB0_DDR_BRIDGE */ #define CLKID_MMC_PCLK 66 #define CLKID_DVIN 67 #define CLKID_UART2 68 @@ -261,7 +261,7 @@ #define CLKID_AO_AHB_SRAM 90 #define CLKID_AO_AHB_BUS 91 #define CLKID_AO_IFACE 92 -#define CLKID_AO_I2C 93 +/* CLKID_AO_I2C */ /* CLKID_SD_EMMC_A */ /* CLKID_SD_EMMC_B */ /* CLKID_SD_EMMC_C */ diff --git a/drivers/clk/mvebu/orion.c b/drivers/clk/mvebu/orion.c index fd129566c1ce..a6e5bee23385 100644 --- a/drivers/clk/mvebu/orion.c +++ b/drivers/clk/mvebu/orion.c @@ -21,6 +21,76 @@ static const struct coreclk_ratio orion_coreclk_ratios[] __initconst = { }; /* + * Orion 5181 + */ + +#define SAR_MV88F5181_TCLK_FREQ 8 +#define SAR_MV88F5181_TCLK_FREQ_MASK 0x3 + +static u32 __init mv88f5181_get_tclk_freq(void __iomem *sar) +{ + u32 opt = (readl(sar) >> SAR_MV88F5181_TCLK_FREQ) & + SAR_MV88F5181_TCLK_FREQ_MASK; + if (opt == 0) + return 133333333; + else if (opt == 1) + return 150000000; + else if (opt == 2) + return 166666667; + else + return 0; +} + +#define SAR_MV88F5181_CPU_FREQ 4 +#define SAR_MV88F5181_CPU_FREQ_MASK 0xf + +static u32 __init mv88f5181_get_cpu_freq(void __iomem *sar) +{ + u32 opt = (readl(sar) >> SAR_MV88F5181_CPU_FREQ) & + SAR_MV88F5181_CPU_FREQ_MASK; + if (opt == 0) + return 333333333; + else if (opt == 1 || opt == 2) + return 400000000; + else if (opt == 3) + return 500000000; + else + return 0; +} + +static void __init mv88f5181_get_clk_ratio(void __iomem *sar, int id, + int *mult, int *div) +{ + u32 opt = (readl(sar) >> SAR_MV88F5181_CPU_FREQ) & + SAR_MV88F5181_CPU_FREQ_MASK; + if (opt == 0 || opt == 1) { + *mult = 1; + *div = 2; + } else if (opt == 2 || opt == 3) { + *mult = 1; + *div = 3; + } else { + *mult = 0; + *div = 1; + } +} + +static const struct coreclk_soc_desc mv88f5181_coreclks = { + .get_tclk_freq = mv88f5181_get_tclk_freq, + .get_cpu_freq = mv88f5181_get_cpu_freq, + .get_clk_ratio = mv88f5181_get_clk_ratio, + .ratios = orion_coreclk_ratios, + .num_ratios = ARRAY_SIZE(orion_coreclk_ratios), +}; + +static void __init mv88f5181_clk_init(struct device_node *np) +{ + return mvebu_coreclk_setup(np, &mv88f5181_coreclks); +} + +CLK_OF_DECLARE(mv88f5181_clk, "marvell,mv88f5181-core-clock", mv88f5181_clk_init); + +/* * Orion 5182 */ diff --git a/drivers/clocksource/Kconfig b/drivers/clocksource/Kconfig index 8a753fd5b79d..245190839359 100644 --- a/drivers/clocksource/Kconfig +++ b/drivers/clocksource/Kconfig @@ -361,7 +361,7 @@ config CLKSRC_METAG_GENERIC config CLKSRC_EXYNOS_MCT bool "Exynos multi core timer driver" if COMPILE_TEST - depends on ARM + depends on ARM || ARM64 help Support for Multi Core Timer controller on Exynos SoCs. diff --git a/drivers/clocksource/exynos_mct.c b/drivers/clocksource/exynos_mct.c index 41840d02c331..8f3488b80896 100644 --- a/drivers/clocksource/exynos_mct.c +++ b/drivers/clocksource/exynos_mct.c @@ -223,6 +223,7 @@ static u64 notrace exynos4_read_sched_clock(void) return exynos4_read_count_32(); } +#if defined(CONFIG_ARM) static struct delay_timer exynos4_delay_timer; static cycles_t exynos4_read_current_timer(void) @@ -231,14 +232,17 @@ static cycles_t exynos4_read_current_timer(void) "cycles_t needs to move to 32-bit for ARM64 usage"); return exynos4_read_count_32(); } +#endif static int __init exynos4_clocksource_init(void) { exynos4_mct_frc_start(); +#if defined(CONFIG_ARM) exynos4_delay_timer.read_current_timer = &exynos4_read_current_timer; exynos4_delay_timer.freq = clk_rate; register_current_timer_delay(&exynos4_delay_timer); +#endif if (clocksource_register_hz(&mct_frc, clk_rate)) panic("%s: can't register clocksource\n", mct_frc.name); diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index 0e22f241403b..bca172d42c74 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -209,5 +209,6 @@ config HAVE_ARM_SMCCC source "drivers/firmware/broadcom/Kconfig" source "drivers/firmware/google/Kconfig" source "drivers/firmware/efi/Kconfig" +source "drivers/firmware/meson/Kconfig" endmenu diff --git a/drivers/firmware/Makefile b/drivers/firmware/Makefile index 44a59dcfc398..898ac41fa8b3 100644 --- a/drivers/firmware/Makefile +++ b/drivers/firmware/Makefile @@ -22,6 +22,7 @@ obj-$(CONFIG_QCOM_SCM_32) += qcom_scm-32.o CFLAGS_qcom_scm-32.o :=$(call as-instr,.arch armv7-a\n.arch_extension sec,-DREQUIRES_SEC=1) -march=armv7-a obj-y += broadcom/ +obj-y += meson/ obj-$(CONFIG_GOOGLE_FIRMWARE) += google/ obj-$(CONFIG_EFI) += efi/ obj-$(CONFIG_UEFI_CPER) += efi/ diff --git a/drivers/firmware/meson/Kconfig b/drivers/firmware/meson/Kconfig new file mode 100644 index 000000000000..170d7e8bcdfb --- /dev/null +++ b/drivers/firmware/meson/Kconfig @@ -0,0 +1,9 @@ +# +# Amlogic Secure Monitor driver +# +config MESON_SM + bool + default ARCH_MESON + depends on ARM64_4K_PAGES + help + Say y here to enable the Amlogic secure monitor driver diff --git a/drivers/firmware/meson/Makefile b/drivers/firmware/meson/Makefile new file mode 100644 index 000000000000..9ab3884f96bc --- /dev/null +++ b/drivers/firmware/meson/Makefile @@ -0,0 +1 @@ +obj-$(CONFIG_MESON_SM) += meson_sm.o diff --git a/drivers/firmware/meson/meson_sm.c b/drivers/firmware/meson/meson_sm.c new file mode 100644 index 000000000000..b0d254930ed3 --- /dev/null +++ b/drivers/firmware/meson/meson_sm.c @@ -0,0 +1,248 @@ +/* + * Amlogic Secure Monitor driver + * + * Copyright (C) 2016 Endless Mobile, Inc. + * Author: Carlo Caione <carlo@endlessm.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * version 2 as published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#define pr_fmt(fmt) "meson-sm: " fmt + +#include <linux/arm-smccc.h> +#include <linux/bug.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/printk.h> +#include <linux/types.h> +#include <linux/sizes.h> + +#include <linux/firmware/meson/meson_sm.h> + +struct meson_sm_cmd { + unsigned int index; + u32 smc_id; +}; +#define CMD(d, s) { .index = (d), .smc_id = (s), } + +struct meson_sm_chip { + unsigned int shmem_size; + u32 cmd_shmem_in_base; + u32 cmd_shmem_out_base; + struct meson_sm_cmd cmd[]; +}; + +struct meson_sm_chip gxbb_chip = { + .shmem_size = SZ_4K, + .cmd_shmem_in_base = 0x82000020, + .cmd_shmem_out_base = 0x82000021, + .cmd = { + CMD(SM_EFUSE_READ, 0x82000030), + CMD(SM_EFUSE_WRITE, 0x82000031), + CMD(SM_EFUSE_USER_MAX, 0x82000033), + { /* sentinel */ }, + }, +}; + +struct meson_sm_firmware { + const struct meson_sm_chip *chip; + void __iomem *sm_shmem_in_base; + void __iomem *sm_shmem_out_base; +}; + +static struct meson_sm_firmware fw; + +static u32 meson_sm_get_cmd(const struct meson_sm_chip *chip, + unsigned int cmd_index) +{ + const struct meson_sm_cmd *cmd = chip->cmd; + + while (cmd->smc_id && cmd->index != cmd_index) + cmd++; + + return cmd->smc_id; +} + +static u32 __meson_sm_call(u32 cmd, u32 arg0, u32 arg1, u32 arg2, + u32 arg3, u32 arg4) +{ + struct arm_smccc_res res; + + arm_smccc_smc(cmd, arg0, arg1, arg2, arg3, arg4, 0, 0, &res); + return res.a0; +} + +static void __iomem *meson_sm_map_shmem(u32 cmd_shmem, unsigned int size) +{ + u32 sm_phy_base; + + sm_phy_base = __meson_sm_call(cmd_shmem, 0, 0, 0, 0, 0); + if (!sm_phy_base) + return 0; + + return ioremap_cache(sm_phy_base, size); +} + +/** + * meson_sm_call - generic SMC32 call to the secure-monitor + * + * @cmd_index: Index of the SMC32 function ID + * @ret: Returned value + * @arg0: SMC32 Argument 0 + * @arg1: SMC32 Argument 1 + * @arg2: SMC32 Argument 2 + * @arg3: SMC32 Argument 3 + * @arg4: SMC32 Argument 4 + * + * Return: 0 on success, a negative value on error + */ +int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0, + u32 arg1, u32 arg2, u32 arg3, u32 arg4) +{ + u32 cmd, lret; + + if (!fw.chip) + return -ENOENT; + + cmd = meson_sm_get_cmd(fw.chip, cmd_index); + if (!cmd) + return -EINVAL; + + lret = __meson_sm_call(cmd, arg0, arg1, arg2, arg3, arg4); + + if (ret) + *ret = lret; + + return 0; +} +EXPORT_SYMBOL(meson_sm_call); + +/** + * meson_sm_call_read - retrieve data from secure-monitor + * + * @buffer: Buffer to store the retrieved data + * @cmd_index: Index of the SMC32 function ID + * @arg0: SMC32 Argument 0 + * @arg1: SMC32 Argument 1 + * @arg2: SMC32 Argument 2 + * @arg3: SMC32 Argument 3 + * @arg4: SMC32 Argument 4 + * + * Return: size of read data on success, a negative value on error + */ +int meson_sm_call_read(void *buffer, unsigned int cmd_index, u32 arg0, + u32 arg1, u32 arg2, u32 arg3, u32 arg4) +{ + u32 size; + + if (!fw.chip) + return -ENOENT; + + if (!fw.chip->cmd_shmem_out_base) + return -EINVAL; + + if (meson_sm_call(cmd_index, &size, arg0, arg1, arg2, arg3, arg4) < 0) + return -EINVAL; + + if (!size || size > fw.chip->shmem_size) + return -EINVAL; + + if (buffer) + memcpy(buffer, fw.sm_shmem_out_base, size); + + return size; +} +EXPORT_SYMBOL(meson_sm_call_read); + +/** + * meson_sm_call_write - send data to secure-monitor + * + * @buffer: Buffer containing data to send + * @size: Size of the data to send + * @cmd_index: Index of the SMC32 function ID + * @arg0: SMC32 Argument 0 + * @arg1: SMC32 Argument 1 + * @arg2: SMC32 Argument 2 + * @arg3: SMC32 Argument 3 + * @arg4: SMC32 Argument 4 + * + * Return: size of sent data on success, a negative value on error + */ +int meson_sm_call_write(void *buffer, unsigned int size, unsigned int cmd_index, + u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4) +{ + u32 written; + + if (!fw.chip) + return -ENOENT; + + if (size > fw.chip->shmem_size) + return -EINVAL; + + if (!fw.chip->cmd_shmem_in_base) + return -EINVAL; + + memcpy(fw.sm_shmem_in_base, buffer, size); + + if (meson_sm_call(cmd_index, &written, arg0, arg1, arg2, arg3, arg4) < 0) + return -EINVAL; + + if (!written) + return -EINVAL; + + return written; +} +EXPORT_SYMBOL(meson_sm_call_write); + +static const struct of_device_id meson_sm_ids[] = { + { .compatible = "amlogic,meson-gxbb-sm", .data = &gxbb_chip }, + { /* sentinel */ }, +}; + +int __init meson_sm_init(void) +{ + const struct meson_sm_chip *chip; + const struct of_device_id *matched_np; + struct device_node *np; + + np = of_find_matching_node_and_match(NULL, meson_sm_ids, &matched_np); + if (!np) + return -ENODEV; + + chip = matched_np->data; + if (!chip) { + pr_err("unable to setup secure-monitor data\n"); + goto out; + } + + if (chip->cmd_shmem_in_base) { + fw.sm_shmem_in_base = meson_sm_map_shmem(chip->cmd_shmem_in_base, + chip->shmem_size); + if (WARN_ON(!fw.sm_shmem_in_base)) + goto out; + } + + if (chip->cmd_shmem_out_base) { + fw.sm_shmem_out_base = meson_sm_map_shmem(chip->cmd_shmem_out_base, + chip->shmem_size); + if (WARN_ON(!fw.sm_shmem_out_base)) + goto out_in_base; + } + + fw.chip = chip; + pr_info("secure-monitor enabled\n"); + + return 0; + +out_in_base: + iounmap(fw.sm_shmem_in_base); +out: + return -EINVAL; +} +device_initcall(meson_sm_init); diff --git a/drivers/firmware/qcom_scm.c b/drivers/firmware/qcom_scm.c index e64a501adbf4..d95c70227c05 100644 --- a/drivers/firmware/qcom_scm.c +++ b/drivers/firmware/qcom_scm.c @@ -1,4 +1,7 @@ -/* Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. +/* + * Qualcomm SCM driver + * + * Copyright (c) 2010,2015, The Linux Foundation. All rights reserved. * Copyright (C) 2015 Linaro Ltd. * * This program is free software; you can redistribute it and/or modify @@ -12,7 +15,7 @@ * */ #include <linux/platform_device.h> -#include <linux/module.h> +#include <linux/init.h> #include <linux/cpumask.h> #include <linux/export.h> #include <linux/dma-mapping.h> @@ -376,8 +379,6 @@ static const struct of_device_id qcom_scm_dt_match[] = { {} }; -MODULE_DEVICE_TABLE(of, qcom_scm_dt_match); - static struct platform_driver qcom_scm_driver = { .driver = { .name = "qcom_scm", @@ -414,14 +415,4 @@ static int __init qcom_scm_init(void) return platform_driver_register(&qcom_scm_driver); } - subsys_initcall(qcom_scm_init); - -static void __exit qcom_scm_exit(void) -{ - platform_driver_unregister(&qcom_scm_driver); -} -module_exit(qcom_scm_exit); - -MODULE_DESCRIPTION("Qualcomm SCM driver"); -MODULE_LICENSE("GPL v2"); diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index b372e792adc2..e4dbfc85abdb 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -25,7 +25,7 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_PM) += irq-gic-pm.o -obj-$(CONFIG_REALVIEW_DT) += irq-gic-realview.o +obj-$(CONFIG_ARCH_REALVIEW) += irq-gic-realview.o obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o irq-gic-v3-its-pci-msi.o irq-gic-v3-its-platform-msi.o diff --git a/drivers/lightnvm/Kconfig b/drivers/lightnvm/Kconfig index 61c68a1f054a..2f5d5f4a4c75 100644 --- a/drivers/lightnvm/Kconfig +++ b/drivers/lightnvm/Kconfig @@ -4,7 +4,7 @@ menuconfig NVM bool "Open-Channel SSD target support" - depends on BLOCK + depends on BLOCK && HAS_DMA help Say Y here to get to enable Open-channel SSDs. diff --git a/drivers/lightnvm/Makefile b/drivers/lightnvm/Makefile index a7a0a22cf1a5..1f6b6521016a 100644 --- a/drivers/lightnvm/Makefile +++ b/drivers/lightnvm/Makefile @@ -2,6 +2,6 @@ # Makefile for Open-Channel SSDs. # -obj-$(CONFIG_NVM) := core.o sysblk.o +obj-$(CONFIG_NVM) := core.o sysblk.o sysfs.o obj-$(CONFIG_NVM_GENNVM) += gennvm.o obj-$(CONFIG_NVM_RRPC) += rrpc.o diff --git a/drivers/lightnvm/core.c b/drivers/lightnvm/core.c index c784ddcd4405..1cac0f8bc0dc 100644 --- a/drivers/lightnvm/core.c +++ b/drivers/lightnvm/core.c @@ -27,6 +27,8 @@ #include <linux/lightnvm.h> #include <linux/sched/sysctl.h> +#include "lightnvm.h" + static LIST_HEAD(nvm_tgt_types); static DECLARE_RWSEM(nvm_tgtt_lock); static LIST_HEAD(nvm_mgrs); @@ -581,6 +583,8 @@ static int nvm_core_init(struct nvm_dev *dev) mutex_init(&dev->mlock); spin_lock_init(&dev->lock); + blk_queue_logical_block_size(dev->q, dev->sec_size); + return 0; err_fmtype: kfree(dev->lun_map); @@ -596,15 +600,19 @@ static void nvm_free_mgr(struct nvm_dev *dev) dev->mt = NULL; } -static void nvm_free(struct nvm_dev *dev) +void nvm_free(struct nvm_dev *dev) { if (!dev) return; nvm_free_mgr(dev); + if (dev->dma_pool) + dev->ops->destroy_dma_pool(dev->dma_pool); + kfree(dev->lptbl); kfree(dev->lun_map); + kfree(dev); } static int nvm_init(struct nvm_dev *dev) @@ -651,30 +659,19 @@ err: static void nvm_exit(struct nvm_dev *dev) { - if (dev->dma_pool) - dev->ops->destroy_dma_pool(dev->dma_pool); - nvm_free(dev); + nvm_sysfs_unregister_dev(dev); +} - pr_info("nvm: successfully unloaded\n"); +struct nvm_dev *nvm_alloc_dev(int node) +{ + return kzalloc_node(sizeof(struct nvm_dev), GFP_KERNEL, node); } +EXPORT_SYMBOL(nvm_alloc_dev); -int nvm_register(struct request_queue *q, char *disk_name, - struct nvm_dev_ops *ops) +int nvm_register(struct nvm_dev *dev) { - struct nvm_dev *dev; int ret; - if (!ops->identity) - return -EINVAL; - - dev = kzalloc(sizeof(struct nvm_dev), GFP_KERNEL); - if (!dev) - return -ENOMEM; - - dev->q = q; - dev->ops = ops; - strncpy(dev->name, disk_name, DISK_NAME_LEN); - ret = nvm_init(dev); if (ret) goto err_init; @@ -694,6 +691,10 @@ int nvm_register(struct request_queue *q, char *disk_name, } } + ret = nvm_sysfs_register_dev(dev); + if (ret) + goto err_ppalist; + if (dev->identity.cap & NVM_ID_DCAP_BBLKMGMT) { ret = nvm_get_sysblock(dev, &dev->sb); if (!ret) @@ -710,31 +711,21 @@ int nvm_register(struct request_queue *q, char *disk_name, up_write(&nvm_lock); return 0; +err_ppalist: + dev->ops->destroy_dma_pool(dev->dma_pool); err_init: kfree(dev->lun_map); - kfree(dev); return ret; } EXPORT_SYMBOL(nvm_register); -void nvm_unregister(char *disk_name) +void nvm_unregister(struct nvm_dev *dev) { - struct nvm_dev *dev; - down_write(&nvm_lock); - dev = nvm_find_nvm_dev(disk_name); - if (!dev) { - pr_err("nvm: could not find device %s to unregister\n", - disk_name); - up_write(&nvm_lock); - return; - } - list_del(&dev->devices); up_write(&nvm_lock); nvm_exit(dev); - kfree(dev); } EXPORT_SYMBOL(nvm_unregister); diff --git a/drivers/lightnvm/lightnvm.h b/drivers/lightnvm/lightnvm.h new file mode 100644 index 000000000000..305c181509a6 --- /dev/null +++ b/drivers/lightnvm/lightnvm.h @@ -0,0 +1,35 @@ +/* + * Copyright (C) 2016 CNEX Labs. All rights reserved. + * Initial release: Matias Bjorling <matias@cnexlabs.com> + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License version + * 2 as published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; see the file COPYING. If not, write to + * the Free Software Foundation, 675 Mass Ave, Cambridge, MA 02139, + * USA. + * + */ + +#ifndef LIGHTNVM_H +#define LIGHTNVM_H + +#include <linux/lightnvm.h> + +/* core -> sysfs.c */ +int __must_check nvm_sysfs_register_dev(struct nvm_dev *); +void nvm_sysfs_unregister_dev(struct nvm_dev *); +int nvm_sysfs_register(void); +void nvm_sysfs_unregister(void); + +/* sysfs > core */ +void nvm_free(struct nvm_dev *); + +#endif diff --git a/drivers/lightnvm/sysfs.c b/drivers/lightnvm/sysfs.c new file mode 100644 index 000000000000..0338c27ab95a --- /dev/null +++ b/drivers/lightnvm/sysfs.c @@ -0,0 +1,198 @@ +#include <linux/kernel.h> +#include <linux/lightnvm.h> +#include <linux/miscdevice.h> +#include <linux/kobject.h> +#include <linux/blk-mq.h> + +#include "lightnvm.h" + +static ssize_t nvm_dev_attr_show(struct device *dev, + struct device_attribute *dattr, char *page) +{ + struct nvm_dev *ndev = container_of(dev, struct nvm_dev, dev); + struct nvm_id *id = &ndev->identity; + struct nvm_id_group *grp = &id->groups[0]; + struct attribute *attr = &dattr->attr; + + if (strcmp(attr->name, "version") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->ver_id); + } else if (strcmp(attr->name, "vendor_opcode") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->vmnt); + } else if (strcmp(attr->name, "capabilities") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->cap); + } else if (strcmp(attr->name, "device_mode") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", id->dom); + } else if (strcmp(attr->name, "media_manager") == 0) { + if (!ndev->mt) + return scnprintf(page, PAGE_SIZE, "%s\n", "none"); + return scnprintf(page, PAGE_SIZE, "%s\n", ndev->mt->name); + } else if (strcmp(attr->name, "ppa_format") == 0) { + return scnprintf(page, PAGE_SIZE, + "0x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x%02x\n", + id->ppaf.ch_offset, id->ppaf.ch_len, + id->ppaf.lun_offset, id->ppaf.lun_len, + id->ppaf.pln_offset, id->ppaf.pln_len, + id->ppaf.blk_offset, id->ppaf.blk_len, + id->ppaf.pg_offset, id->ppaf.pg_len, + id->ppaf.sect_offset, id->ppaf.sect_len); + } else if (strcmp(attr->name, "media_type") == 0) { /* u8 */ + return scnprintf(page, PAGE_SIZE, "%u\n", grp->mtype); + } else if (strcmp(attr->name, "flash_media_type") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->fmtype); + } else if (strcmp(attr->name, "num_channels") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_ch); + } else if (strcmp(attr->name, "num_luns") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_lun); + } else if (strcmp(attr->name, "num_planes") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pln); + } else if (strcmp(attr->name, "num_blocks") == 0) { /* u16 */ + return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_blk); + } else if (strcmp(attr->name, "num_pages") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->num_pg); + } else if (strcmp(attr->name, "page_size") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->fpg_sz); + } else if (strcmp(attr->name, "hw_sector_size") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->csecs); + } else if (strcmp(attr->name, "oob_sector_size") == 0) {/* u32 */ + return scnprintf(page, PAGE_SIZE, "%u\n", grp->sos); + } else if (strcmp(attr->name, "read_typ") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdt); + } else if (strcmp(attr->name, "read_max") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->trdm); + } else if (strcmp(attr->name, "prog_typ") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprt); + } else if (strcmp(attr->name, "prog_max") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->tprm); + } else if (strcmp(attr->name, "erase_typ") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbet); + } else if (strcmp(attr->name, "erase_max") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", grp->tbem); + } else if (strcmp(attr->name, "multiplane_modes") == 0) { + return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mpos); + } else if (strcmp(attr->name, "media_capabilities") == 0) { + return scnprintf(page, PAGE_SIZE, "0x%08x\n", grp->mccap); + } else if (strcmp(attr->name, "max_phys_secs") == 0) { + return scnprintf(page, PAGE_SIZE, "%u\n", + ndev->ops->max_phys_sect); + } else { + return scnprintf(page, + PAGE_SIZE, + "Unhandled attr(%s) in `nvm_dev_attr_show`\n", + attr->name); + } +} + +#define NVM_DEV_ATTR_RO(_name) \ + DEVICE_ATTR(_name, S_IRUGO, nvm_dev_attr_show, NULL) + +static NVM_DEV_ATTR_RO(version); +static NVM_DEV_ATTR_RO(vendor_opcode); +static NVM_DEV_ATTR_RO(capabilities); +static NVM_DEV_ATTR_RO(device_mode); +static NVM_DEV_ATTR_RO(ppa_format); +static NVM_DEV_ATTR_RO(media_manager); + +static NVM_DEV_ATTR_RO(media_type); +static NVM_DEV_ATTR_RO(flash_media_type); +static NVM_DEV_ATTR_RO(num_channels); +static NVM_DEV_ATTR_RO(num_luns); +static NVM_DEV_ATTR_RO(num_planes); +static NVM_DEV_ATTR_RO(num_blocks); +static NVM_DEV_ATTR_RO(num_pages); +static NVM_DEV_ATTR_RO(page_size); +static NVM_DEV_ATTR_RO(hw_sector_size); +static NVM_DEV_ATTR_RO(oob_sector_size); +static NVM_DEV_ATTR_RO(read_typ); +static NVM_DEV_ATTR_RO(read_max); +static NVM_DEV_ATTR_RO(prog_typ); +static NVM_DEV_ATTR_RO(prog_max); +static NVM_DEV_ATTR_RO(erase_typ); +static NVM_DEV_ATTR_RO(erase_max); +static NVM_DEV_ATTR_RO(multiplane_modes); +static NVM_DEV_ATTR_RO(media_capabilities); +static NVM_DEV_ATTR_RO(max_phys_secs); + +#define NVM_DEV_ATTR(_name) (dev_attr_##_name##) + +static struct attribute *nvm_dev_attrs[] = { + &dev_attr_version.attr, + &dev_attr_vendor_opcode.attr, + &dev_attr_capabilities.attr, + &dev_attr_device_mode.attr, + &dev_attr_media_manager.attr, + + &dev_attr_ppa_format.attr, + &dev_attr_media_type.attr, + &dev_attr_flash_media_type.attr, + &dev_attr_num_channels.attr, + &dev_attr_num_luns.attr, + &dev_attr_num_planes.attr, + &dev_attr_num_blocks.attr, + &dev_attr_num_pages.attr, + &dev_attr_page_size.attr, + &dev_attr_hw_sector_size.attr, + &dev_attr_oob_sector_size.attr, + &dev_attr_read_typ.attr, + &dev_attr_read_max.attr, + &dev_attr_prog_typ.attr, + &dev_attr_prog_max.attr, + &dev_attr_erase_typ.attr, + &dev_attr_erase_max.attr, + &dev_attr_multiplane_modes.attr, + &dev_attr_media_capabilities.attr, + &dev_attr_max_phys_secs.attr, + NULL, +}; + +static struct attribute_group nvm_dev_attr_group = { + .name = "lightnvm", + .attrs = nvm_dev_attrs, +}; + +static const struct attribute_group *nvm_dev_attr_groups[] = { + &nvm_dev_attr_group, + NULL, +}; + +static void nvm_dev_release(struct device *device) +{ + struct nvm_dev *dev = container_of(device, struct nvm_dev, dev); + struct request_queue *q = dev->q; + + pr_debug("nvm/sysfs: `nvm_dev_release`\n"); + + blk_mq_unregister_dev(device, q); + + nvm_free(dev); +} + +static struct device_type nvm_type = { + .name = "lightnvm", + .groups = nvm_dev_attr_groups, + .release = nvm_dev_release, +}; + +int nvm_sysfs_register_dev(struct nvm_dev *dev) +{ + int ret; + + if (!dev->parent_dev) + return 0; + + dev->dev.parent = dev->parent_dev; + dev_set_name(&dev->dev, "%s", dev->name); + dev->dev.type = &nvm_type; + device_initialize(&dev->dev); + ret = device_add(&dev->dev); + + if (!ret) + blk_mq_register_dev(&dev->dev, dev->q); + + return ret; +} + +void nvm_sysfs_unregister_dev(struct nvm_dev *dev) +{ + if (dev && dev->parent_dev) + kobject_put(&dev->dev.kobj); +} diff --git a/drivers/macintosh/macio_asic.c b/drivers/macintosh/macio_asic.c index b6819f0fc608..3f041b187033 100644 --- a/drivers/macintosh/macio_asic.c +++ b/drivers/macintosh/macio_asic.c @@ -236,7 +236,7 @@ static void macio_create_fixup_irq(struct macio_dev *dev, int index, unsigned int irq; irq = irq_create_mapping(NULL, line); - if (irq != NO_IRQ) { + if (!irq) { dev->interrupt[index].start = irq; dev->interrupt[index].flags = IORESOURCE_IRQ; dev->interrupt[index].name = dev_name(&dev->ofdev.dev); @@ -299,7 +299,7 @@ static void macio_setup_interrupts(struct macio_dev *dev) break; res = &dev->interrupt[j]; irq = irq_of_parse_and_map(np, i++); - if (irq == NO_IRQ) + if (!irq) break; res->start = irq; res->flags = IORESOURCE_IRQ; diff --git a/drivers/macintosh/rack-meter.c b/drivers/macintosh/rack-meter.c index 465c52219639..775527135b93 100644 --- a/drivers/macintosh/rack-meter.c +++ b/drivers/macintosh/rack-meter.c @@ -427,7 +427,7 @@ static int rackmeter_probe(struct macio_dev* mdev, rm->irq = macio_irq(mdev, 1); #else rm->irq = irq_of_parse_and_map(i2s, 1); - if (rm->irq == NO_IRQ || + if (!rm->irq || of_address_to_resource(i2s, 0, &ri2s) || of_address_to_resource(i2s, 1, &rdma)) { printk(KERN_ERR diff --git a/drivers/macintosh/smu.c b/drivers/macintosh/smu.c index d6f72c826c1c..08edb2c25b60 100644 --- a/drivers/macintosh/smu.c +++ b/drivers/macintosh/smu.c @@ -279,7 +279,7 @@ int smu_queue_cmd(struct smu_cmd *cmd) spin_unlock_irqrestore(&smu->lock, flags); /* Workaround for early calls when irq isn't available */ - if (!smu_irq_inited || smu->db_irq == NO_IRQ) + if (!smu_irq_inited || !smu->db_irq) smu_spinwait_cmd(cmd); return 0; @@ -498,8 +498,8 @@ int __init smu_init (void) INIT_LIST_HEAD(&smu->cmd_list); INIT_LIST_HEAD(&smu->cmd_i2c_list); smu->of_node = np; - smu->db_irq = NO_IRQ; - smu->msg_irq = NO_IRQ; + smu->db_irq = 0; + smu->msg_irq = 0; /* smu_cmdbuf_abs is in the low 2G of RAM, can be converted to a * 32 bits value safely @@ -587,13 +587,13 @@ static int smu_late_init(void) if (smu->db_node) { smu->db_irq = irq_of_parse_and_map(smu->db_node, 0); - if (smu->db_irq == NO_IRQ) + if (!smu->db_irq) printk(KERN_ERR "smu: failed to map irq for node %s\n", smu->db_node->full_name); } if (smu->msg_node) { smu->msg_irq = irq_of_parse_and_map(smu->msg_node, 0); - if (smu->msg_irq == NO_IRQ) + if (!smu->msg_irq) printk(KERN_ERR "smu: failed to map irq for node %s\n", smu->msg_node->full_name); } @@ -602,23 +602,23 @@ static int smu_late_init(void) * Try to request the interrupts */ - if (smu->db_irq != NO_IRQ) { + if (smu->db_irq) { if (request_irq(smu->db_irq, smu_db_intr, IRQF_SHARED, "SMU doorbell", smu) < 0) { printk(KERN_WARNING "SMU: can't " "request interrupt %d\n", smu->db_irq); - smu->db_irq = NO_IRQ; + smu->db_irq = 0; } } - if (smu->msg_irq != NO_IRQ) { + if (smu->msg_irq) { if (request_irq(smu->msg_irq, smu_msg_intr, IRQF_SHARED, "SMU message", smu) < 0) { printk(KERN_WARNING "SMU: can't " "request interrupt %d\n", smu->msg_irq); - smu->msg_irq = NO_IRQ; + smu->msg_irq = 0; } } diff --git a/drivers/macintosh/via-cuda.c b/drivers/macintosh/via-cuda.c index bad18130f125..2088e23a8002 100644 --- a/drivers/macintosh/via-cuda.c +++ b/drivers/macintosh/via-cuda.c @@ -209,7 +209,7 @@ static int __init via_cuda_start(void) cuda_irq = IRQ_MAC_ADB; #else cuda_irq = irq_of_parse_and_map(vias, 0); - if (cuda_irq == NO_IRQ) { + if (!cuda_irq) { printk(KERN_ERR "via-cuda: can't map interrupts for %s\n", vias->full_name); return -ENODEV; diff --git a/drivers/macintosh/via-pmu.c b/drivers/macintosh/via-pmu.c index f8b6d1403c16..91081dcdc272 100644 --- a/drivers/macintosh/via-pmu.c +++ b/drivers/macintosh/via-pmu.c @@ -145,7 +145,7 @@ static int pmu_fully_inited; static int pmu_has_adb; static struct device_node *gpio_node; static unsigned char __iomem *gpio_reg; -static int gpio_irq = NO_IRQ; +static int gpio_irq = 0; static int gpio_irq_enabled = -1; static volatile int pmu_suspended; static spinlock_t pmu_lock; @@ -402,7 +402,7 @@ static int __init via_pmu_start(void) batt_req.complete = 1; irq = irq_of_parse_and_map(vias, 0); - if (irq == NO_IRQ) { + if (!irq) { printk(KERN_ERR "via-pmu: can't map interrupt\n"); return -ENODEV; } @@ -424,7 +424,7 @@ static int __init via_pmu_start(void) if (gpio_node) gpio_irq = irq_of_parse_and_map(gpio_node, 0); - if (gpio_irq != NO_IRQ) { + if (gpio_irq) { if (request_irq(gpio_irq, gpio1_interrupt, IRQF_NO_SUSPEND, "GPIO1 ADB", (void *)0)) diff --git a/drivers/md/bcache/btree.c b/drivers/md/bcache/btree.c index 76f7534d1dd1..81d3db40cd7b 100644 --- a/drivers/md/bcache/btree.c +++ b/drivers/md/bcache/btree.c @@ -361,12 +361,8 @@ static void __btree_node_write_done(struct closure *cl) static void btree_node_write_done(struct closure *cl) { struct btree *b = container_of(cl, struct btree, io); - struct bio_vec *bv; - int n; - - bio_for_each_segment_all(bv, b->bio, n) - __free_page(bv->bv_page); + bio_free_pages(b->bio); __btree_node_write_done(cl); } diff --git a/drivers/md/bcache/debug.c b/drivers/md/bcache/debug.c index c28df164701e..333a1e5f6ae6 100644 --- a/drivers/md/bcache/debug.c +++ b/drivers/md/bcache/debug.c @@ -107,9 +107,8 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) { char name[BDEVNAME_SIZE]; struct bio *check; - struct bio_vec bv, *bv2; + struct bio_vec bv; struct bvec_iter iter; - int i; check = bio_clone(bio, GFP_NOIO); if (!check) @@ -136,8 +135,7 @@ void bch_data_verify(struct cached_dev *dc, struct bio *bio) kunmap_atomic(p1); } - bio_for_each_segment_all(bv2, check, i) - __free_page(bv2->bv_page); + bio_free_pages(check); out_put: bio_put(check); } diff --git a/drivers/md/bcache/movinggc.c b/drivers/md/bcache/movinggc.c index 1881319f2298..5c4bddecfaf0 100644 --- a/drivers/md/bcache/movinggc.c +++ b/drivers/md/bcache/movinggc.c @@ -44,11 +44,8 @@ static void write_moving_finish(struct closure *cl) { struct moving_io *io = container_of(cl, struct moving_io, cl); struct bio *bio = &io->bio.bio; - struct bio_vec *bv; - int i; - bio_for_each_segment_all(bv, bio, i) - __free_page(bv->bv_page); + bio_free_pages(bio); if (io->op.replace_collision) trace_bcache_gc_copy_collision(&io->w->key); diff --git a/drivers/md/bcache/request.c b/drivers/md/bcache/request.c index 4b177fe11ebb..40ffe5e424b3 100644 --- a/drivers/md/bcache/request.c +++ b/drivers/md/bcache/request.c @@ -694,13 +694,8 @@ static void cached_dev_cache_miss_done(struct closure *cl) if (s->iop.replace_collision) bch_mark_cache_miss_collision(s->iop.c, s->d); - if (s->iop.bio) { - int i; - struct bio_vec *bv; - - bio_for_each_segment_all(bv, s->iop.bio, i) - __free_page(bv->bv_page); - } + if (s->iop.bio) + bio_free_pages(s->iop.bio); cached_dev_bio_complete(cl); } diff --git a/drivers/md/bcache/writeback.c b/drivers/md/bcache/writeback.c index d9fd2a62e5f6..e51644e503a5 100644 --- a/drivers/md/bcache/writeback.c +++ b/drivers/md/bcache/writeback.c @@ -128,11 +128,8 @@ static void write_dirty_finish(struct closure *cl) struct dirty_io *io = container_of(cl, struct dirty_io, cl); struct keybuf_key *w = io->bio.bi_private; struct cached_dev *dc = io->dc; - struct bio_vec *bv; - int i; - bio_for_each_segment_all(bv, &io->bio, i) - __free_page(bv->bv_page); + bio_free_pages(&io->bio); /* This is kind of a dumb way of signalling errors. */ if (KEY_DIRTY(&w->key)) { diff --git a/drivers/md/dm-crypt.c b/drivers/md/dm-crypt.c index 874295757caa..0448e7e35c8c 100644 --- a/drivers/md/dm-crypt.c +++ b/drivers/md/dm-crypt.c @@ -1136,7 +1136,7 @@ static void clone_init(struct dm_crypt_io *io, struct bio *clone) clone->bi_private = io; clone->bi_end_io = crypt_endio; clone->bi_bdev = cc->dev->bdev; - bio_set_op_attrs(clone, bio_op(io->base_bio), io->base_bio->bi_opf); + bio_set_op_attrs(clone, bio_op(io->base_bio), bio_flags(io->base_bio)); } static int kcryptd_io_read(struct dm_crypt_io *io, gfp_t gfp) diff --git a/drivers/md/dm-log-writes.c b/drivers/md/dm-log-writes.c index 49e4d8d4558f..4dfe38655a49 100644 --- a/drivers/md/dm-log-writes.c +++ b/drivers/md/dm-log-writes.c @@ -149,8 +149,6 @@ static void put_io_block(struct log_writes_c *lc) static void log_end_io(struct bio *bio) { struct log_writes_c *lc = bio->bi_private; - struct bio_vec *bvec; - int i; if (bio->bi_error) { unsigned long flags; @@ -161,9 +159,7 @@ static void log_end_io(struct bio *bio) spin_unlock_irqrestore(&lc->blocks_lock, flags); } - bio_for_each_segment_all(bvec, bio, i) - __free_page(bvec->bv_page); - + bio_free_pages(bio); put_io_block(lc); bio_put(bio); } diff --git a/drivers/md/dm-rq.c b/drivers/md/dm-rq.c index 1ca7463e8bb2..ee48230a2952 100644 --- a/drivers/md/dm-rq.c +++ b/drivers/md/dm-rq.c @@ -955,7 +955,7 @@ int dm_mq_init_request_queue(struct mapped_device *md, struct dm_table *t) dm_init_md_queue(md); /* backfill 'mq' sysfs registration normally done in blk_register_queue */ - blk_mq_register_disk(md->disk); + blk_mq_register_dev(disk_to_dev(md->disk), q); return 0; diff --git a/drivers/md/raid1.c b/drivers/md/raid1.c index 21dc00eb1989..1961d827dbd1 100644 --- a/drivers/md/raid1.c +++ b/drivers/md/raid1.c @@ -145,12 +145,8 @@ static void * r1buf_pool_alloc(gfp_t gfp_flags, void *data) return r1_bio; out_free_pages: - while (--j >= 0) { - struct bio_vec *bv; - - bio_for_each_segment_all(bv, r1_bio->bios[j], i) - __free_page(bv->bv_page); - } + while (--j >= 0) + bio_free_pages(r1_bio->bios[j]); out_free_bio: while (++j < pi->raid_disks) diff --git a/drivers/media/rc/meson-ir.c b/drivers/media/rc/meson-ir.c index fcc3b82d1454..003fff07ade2 100644 --- a/drivers/media/rc/meson-ir.c +++ b/drivers/media/rc/meson-ir.c @@ -24,6 +24,7 @@ #define DRIVER_NAME "meson-ir" +/* valid on all Meson platforms */ #define IR_DEC_LDR_ACTIVE 0x00 #define IR_DEC_LDR_IDLE 0x04 #define IR_DEC_LDR_REPEAT 0x08 @@ -32,12 +33,21 @@ #define IR_DEC_FRAME 0x14 #define IR_DEC_STATUS 0x18 #define IR_DEC_REG1 0x1c +/* only available on Meson 8b and newer */ +#define IR_DEC_REG2 0x20 #define REG0_RATE_MASK (BIT(11) - 1) -#define REG1_MODE_MASK (BIT(7) | BIT(8)) -#define REG1_MODE_NEC (0 << 7) -#define REG1_MODE_GENERAL (2 << 7) +#define DECODE_MODE_NEC 0x0 +#define DECODE_MODE_RAW 0x2 + +/* Meson 6b uses REG1 to configure the mode */ +#define REG1_MODE_MASK GENMASK(8, 7) +#define REG1_MODE_SHIFT 7 + +/* Meson 8b / GXBB use REG2 to configure the mode */ +#define REG2_MODE_MASK GENMASK(3, 0) +#define REG2_MODE_SHIFT 0 #define REG1_TIME_IV_SHIFT 16 #define REG1_TIME_IV_MASK ((BIT(13) - 1) << REG1_TIME_IV_SHIFT) @@ -158,8 +168,15 @@ static int meson_ir_probe(struct platform_device *pdev) /* Reset the decoder */ meson_ir_set_mask(ir, IR_DEC_REG1, REG1_RESET, REG1_RESET); meson_ir_set_mask(ir, IR_DEC_REG1, REG1_RESET, 0); - /* Set general operation mode */ - meson_ir_set_mask(ir, IR_DEC_REG1, REG1_MODE_MASK, REG1_MODE_GENERAL); + + /* Set general operation mode (= raw/software decoding) */ + if (of_device_is_compatible(node, "amlogic,meson6-ir")) + meson_ir_set_mask(ir, IR_DEC_REG1, REG1_MODE_MASK, + DECODE_MODE_RAW << REG1_MODE_SHIFT); + else + meson_ir_set_mask(ir, IR_DEC_REG2, REG2_MODE_MASK, + DECODE_MODE_RAW << REG2_MODE_SHIFT); + /* Set rate */ meson_ir_set_mask(ir, IR_DEC_REG0, REG0_RATE_MASK, MESON_TRATE - 1); /* IRQ on rising and falling edges */ @@ -197,6 +214,8 @@ static int meson_ir_remove(struct platform_device *pdev) static const struct of_device_id meson_ir_match[] = { { .compatible = "amlogic,meson6-ir" }, + { .compatible = "amlogic,meson8b-ir" }, + { .compatible = "amlogic,meson-gxbb-ir" }, { }, }; diff --git a/drivers/memory/atmel-ebi.c b/drivers/memory/atmel-ebi.c index f87ad6f5d2dc..b5ed3bd082b5 100644 --- a/drivers/memory/atmel-ebi.c +++ b/drivers/memory/atmel-ebi.c @@ -410,10 +410,7 @@ static int at91sam9_ebi_init(struct at91_ebi *ebi) field.reg = AT91SAM9_SMC_MODE(AT91SAM9_SMC_GENERIC); fields->mode = devm_regmap_field_alloc(ebi->dev, ebi->smc, field); - if (IS_ERR(fields->mode)) - return PTR_ERR(fields->mode); - - return 0; + return PTR_ERR_OR_ZERO(fields->mode); } static int sama5d3_ebi_init(struct at91_ebi *ebi) @@ -441,10 +438,7 @@ static int sama5d3_ebi_init(struct at91_ebi *ebi) field.reg = SAMA5_SMC_MODE(SAMA5_SMC_GENERIC); fields->mode = devm_regmap_field_alloc(ebi->dev, ebi->smc, field); - if (IS_ERR(fields->mode)) - return PTR_ERR(fields->mode); - - return 0; + return PTR_ERR_OR_ZERO(fields->mode); } static int at91_ebi_dev_setup(struct at91_ebi *ebi, struct device_node *np, diff --git a/drivers/memory/atmel-sdramc.c b/drivers/memory/atmel-sdramc.c index 53a341f3b305..12080b05e3e6 100644 --- a/drivers/memory/atmel-sdramc.c +++ b/drivers/memory/atmel-sdramc.c @@ -53,12 +53,10 @@ static const struct of_device_id atmel_ramc_of_match[] = { static int atmel_ramc_probe(struct platform_device *pdev) { - const struct of_device_id *match; const struct at91_ramc_caps *caps; struct clk *clk; - match = of_match_device(atmel_ramc_of_match, &pdev->dev); - caps = match->data; + caps = of_device_get_match_data(&pdev->dev); if (caps->has_ddrck) { clk = devm_clk_get(&pdev->dev, "ddrck"); diff --git a/drivers/memory/omap-gpmc.c b/drivers/memory/omap-gpmc.c index f00f3e742265..5457c361ad58 100644 --- a/drivers/memory/omap-gpmc.c +++ b/drivers/memory/omap-gpmc.c @@ -350,8 +350,8 @@ static unsigned int gpmc_ps_to_ticks(unsigned int time_ps) return (time_ps + tick_ps - 1) / tick_ps; } -unsigned int gpmc_clk_ticks_to_ns(unsigned ticks, int cs, - enum gpmc_clk_domain cd) +static unsigned int gpmc_clk_ticks_to_ns(unsigned int ticks, int cs, + enum gpmc_clk_domain cd) { return ticks * gpmc_get_clk_period(cs, cd) / 1000; } @@ -2143,9 +2143,7 @@ err_child_fail: ret = -ENODEV; err_cs: - if (waitpin_desc) - gpiochip_free_own_desc(waitpin_desc); - + gpiochip_free_own_desc(waitpin_desc); err: gpmc_cs_free(cs); @@ -2265,7 +2263,7 @@ static int gpmc_gpio_init(struct gpmc_device *gpmc) gpmc->gpio_chip.get = gpmc_gpio_get; gpmc->gpio_chip.base = -1; - ret = gpiochip_add(&gpmc->gpio_chip); + ret = devm_gpiochip_add_data(gpmc->dev, &gpmc->gpio_chip, NULL); if (ret < 0) { dev_err(gpmc->dev, "could not register gpio chip: %d\n", ret); return ret; @@ -2274,11 +2272,6 @@ static int gpmc_gpio_init(struct gpmc_device *gpmc) return 0; } -static void gpmc_gpio_exit(struct gpmc_device *gpmc) -{ - gpiochip_remove(&gpmc->gpio_chip); -} - static int gpmc_probe(struct platform_device *pdev) { int rc; @@ -2365,15 +2358,13 @@ static int gpmc_probe(struct platform_device *pdev) rc = gpmc_setup_irq(gpmc); if (rc) { dev_err(gpmc->dev, "gpmc_setup_irq failed\n"); - goto setup_irq_failed; + goto gpio_init_failed; } gpmc_probe_dt_children(pdev); return 0; -setup_irq_failed: - gpmc_gpio_exit(gpmc); gpio_init_failed: gpmc_mem_exit(); pm_runtime_put_sync(&pdev->dev); @@ -2387,7 +2378,6 @@ static int gpmc_remove(struct platform_device *pdev) struct gpmc_device *gpmc = platform_get_drvdata(pdev); gpmc_free_irq(gpmc); - gpmc_gpio_exit(gpmc); gpmc_mem_exit(); pm_runtime_put_sync(&pdev->dev); pm_runtime_disable(&pdev->dev); diff --git a/drivers/misc/cxl/cxl.h b/drivers/misc/cxl/cxl.h index 344a0ff8f8c7..01d372aba131 100644 --- a/drivers/misc/cxl/cxl.h +++ b/drivers/misc/cxl/cxl.h @@ -162,7 +162,10 @@ static const cxl_p2n_reg_t CXL_PSL_WED_An = {0x0A0}; #define CXL_PSL_SPAP_V 0x0000000000000001ULL /****** CXL_PSL_Control ****************************************************/ -#define CXL_PSL_Control_tb 0x0000000000000001ULL +#define CXL_PSL_Control_tb (0x1ull << (63-63)) +#define CXL_PSL_Control_Fr (0x1ull << (63-31)) +#define CXL_PSL_Control_Fs_MASK (0x3ull << (63-29)) +#define CXL_PSL_Control_Fs_Complete (0x3ull << (63-29)) /****** CXL_PSL_DLCNTL *****************************************************/ #define CXL_PSL_DLCNTL_D (0x1ull << (63-28)) @@ -854,6 +857,7 @@ int cxl_register_one_irq(struct cxl *adapter, irq_handler_t handler, int cxl_check_error(struct cxl_afu *afu); int cxl_afu_slbia(struct cxl_afu *afu); int cxl_tlb_slb_invalidate(struct cxl *adapter); +int cxl_data_cache_flush(struct cxl *adapter); int cxl_afu_disable(struct cxl_afu *afu); int cxl_psl_purge(struct cxl_afu *afu); diff --git a/drivers/misc/cxl/native.c b/drivers/misc/cxl/native.c index e606fdc4bc9c..a217a74ccc98 100644 --- a/drivers/misc/cxl/native.c +++ b/drivers/misc/cxl/native.c @@ -290,6 +290,37 @@ int cxl_tlb_slb_invalidate(struct cxl *adapter) return 0; } +int cxl_data_cache_flush(struct cxl *adapter) +{ + u64 reg; + unsigned long timeout = jiffies + (HZ * CXL_TIMEOUT); + + pr_devel("Flushing data cache\n"); + + reg = cxl_p1_read(adapter, CXL_PSL_Control); + reg |= CXL_PSL_Control_Fr; + cxl_p1_write(adapter, CXL_PSL_Control, reg); + + reg = cxl_p1_read(adapter, CXL_PSL_Control); + while ((reg & CXL_PSL_Control_Fs_MASK) != CXL_PSL_Control_Fs_Complete) { + if (time_after_eq(jiffies, timeout)) { + dev_warn(&adapter->dev, "WARNING: cache flush timed out!\n"); + return -EBUSY; + } + + if (!cxl_ops->link_ok(adapter, NULL)) { + dev_warn(&adapter->dev, "WARNING: link down when flushing cache\n"); + return -EIO; + } + cpu_relax(); + reg = cxl_p1_read(adapter, CXL_PSL_Control); + } + + reg &= ~CXL_PSL_Control_Fr; + cxl_p1_write(adapter, CXL_PSL_Control, reg); + return 0; +} + static int cxl_write_sstp(struct cxl_afu *afu, u64 sstp0, u64 sstp1) { int rc; diff --git a/drivers/misc/cxl/of.c b/drivers/misc/cxl/of.c index edc458395f68..ec175ea5dfba 100644 --- a/drivers/misc/cxl/of.c +++ b/drivers/misc/cxl/of.c @@ -460,7 +460,7 @@ int cxl_of_probe(struct platform_device *pdev) struct device_node *afu_np = NULL; struct cxl *adapter = NULL; int ret; - int slice, slice_ok; + int slice = 0, slice_ok = 0; pr_devel("in %s\n", __func__); @@ -476,13 +476,13 @@ int cxl_of_probe(struct platform_device *pdev) } /* init afu */ - slice_ok = 0; - for (afu_np = NULL, slice = 0; (afu_np = of_get_next_child(np, afu_np)); slice++) { + for_each_child_of_node(np, afu_np) { if ((ret = cxl_guest_init_afu(adapter, slice, afu_np))) dev_err(&pdev->dev, "AFU %i failed to initialise: %i\n", slice, ret); else slice_ok++; + slice++; } if (slice_ok == 0) { @@ -490,8 +490,6 @@ int cxl_of_probe(struct platform_device *pdev) adapter->slices = 0; } - if (afu_np) - of_node_put(afu_np); return 0; } diff --git a/drivers/misc/cxl/pci.c b/drivers/misc/cxl/pci.c index 6f0c4ac4b649..7afad8477ad5 100644 --- a/drivers/misc/cxl/pci.c +++ b/drivers/misc/cxl/pci.c @@ -1239,6 +1239,9 @@ int cxl_pci_reset(struct cxl *adapter) dev_info(&dev->dev, "CXL reset\n"); + /* the adapter is about to be reset, so ignore errors */ + cxl_data_cache_flush(adapter); + /* pcie_warm_reset requests a fundamental pci reset which includes a * PERST assert/deassert. PERST triggers a loading of the image * if "user" or "factory" is selected in sysfs */ @@ -1530,11 +1533,11 @@ static void set_sl_ops(struct cxl *adapter, struct pci_dev *dev) { if (dev->vendor == PCI_VENDOR_ID_MELLANOX && dev->device == 0x1013) { /* Mellanox CX-4 */ - dev_info(&adapter->dev, "Device uses an XSL\n"); + dev_info(&dev->dev, "Device uses an XSL\n"); adapter->native->sl_ops = &xsl_ops; adapter->min_pe = 1; /* Workaround for CX-4 hardware bug */ } else { - dev_info(&adapter->dev, "Device uses a PSL\n"); + dev_info(&dev->dev, "Device uses a PSL\n"); adapter->native->sl_ops = &psl_ops; } } diff --git a/drivers/mtd/maps/Kconfig b/drivers/mtd/maps/Kconfig index 392f9eff5fb7..5bcc896a48c3 100644 --- a/drivers/mtd/maps/Kconfig +++ b/drivers/mtd/maps/Kconfig @@ -78,7 +78,7 @@ config MTD_PHYSMAP_OF_VERSATILE bool "Support ARM Versatile physmap OF" depends on MTD_PHYSMAP_OF depends on MFD_SYSCON - default y if (ARCH_INTEGRATOR || ARCH_VERSATILE || REALVIEW_DT) + default y if (ARCH_INTEGRATOR || ARCH_VERSATILE || ARCH_REALVIEW) help This provides some extra DT physmap parsing for the ARM Versatile platforms, basically to add a VPP (write protection) callback so diff --git a/drivers/net/ethernet/stmicro/stmmac/Kconfig b/drivers/net/ethernet/stmicro/stmmac/Kconfig index c732b8ce2528..3818c5e06eba 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Kconfig +++ b/drivers/net/ethernet/stmicro/stmmac/Kconfig @@ -61,13 +61,13 @@ config DWMAC_LPC18XX config DWMAC_MESON tristate "Amlogic Meson dwmac support" default ARCH_MESON - depends on OF && (ARCH_MESON || COMPILE_TEST) + depends on OF && COMMON_CLK && (ARCH_MESON || COMPILE_TEST) help Support for Ethernet controller on Amlogic Meson SoCs. This selects the Amlogic Meson SoC glue layer support for - the stmmac device driver. This driver is used for Meson6 and - Meson8 SoCs. + the stmmac device driver. This driver is used for Meson6, + Meson8, Meson8b and GXBB SoCs. config DWMAC_ROCKCHIP tristate "Rockchip dwmac support" diff --git a/drivers/net/ethernet/stmicro/stmmac/Makefile b/drivers/net/ethernet/stmicro/stmmac/Makefile index f0c9396fa28e..5d6ece5919b3 100644 --- a/drivers/net/ethernet/stmicro/stmmac/Makefile +++ b/drivers/net/ethernet/stmicro/stmmac/Makefile @@ -9,7 +9,7 @@ stmmac-objs:= stmmac_main.o stmmac_ethtool.o stmmac_mdio.o ring_mode.o \ obj-$(CONFIG_STMMAC_PLATFORM) += stmmac-platform.o obj-$(CONFIG_DWMAC_IPQ806X) += dwmac-ipq806x.o obj-$(CONFIG_DWMAC_LPC18XX) += dwmac-lpc18xx.o -obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o +obj-$(CONFIG_DWMAC_MESON) += dwmac-meson.o dwmac-meson8b.o obj-$(CONFIG_DWMAC_ROCKCHIP) += dwmac-rk.o obj-$(CONFIG_DWMAC_SOCFPGA) += dwmac-altr-socfpga.o obj-$(CONFIG_DWMAC_STI) += dwmac-sti.o diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c index c1bac1912b37..309d99536a2c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson.c @@ -1,5 +1,5 @@ /* - * Amlogic Meson DWMAC glue layer + * Amlogic Meson6 and Meson8 DWMAC glue layer * * Copyright (C) 2014 Beniamino Galvani <b.galvani@gmail.com> * @@ -96,5 +96,5 @@ static struct platform_driver meson6_dwmac_driver = { module_platform_driver(meson6_dwmac_driver); MODULE_AUTHOR("Beniamino Galvani <b.galvani@gmail.com>"); -MODULE_DESCRIPTION("Amlogic Meson DWMAC glue layer"); +MODULE_DESCRIPTION("Amlogic Meson6 and Meson8 DWMAC glue layer"); MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c new file mode 100644 index 000000000000..250e4ceafc8d --- /dev/null +++ b/drivers/net/ethernet/stmicro/stmmac/dwmac-meson8b.c @@ -0,0 +1,324 @@ +/* + * Amlogic Meson8b and GXBB DWMAC glue layer + * + * Copyright (C) 2016 Martin Blumenstingl <martin.blumenstingl@googlemail.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/clk.h> +#include <linux/clk-provider.h> +#include <linux/device.h> +#include <linux/ethtool.h> +#include <linux/io.h> +#include <linux/ioport.h> +#include <linux/module.h> +#include <linux/of_net.h> +#include <linux/mfd/syscon.h> +#include <linux/platform_device.h> +#include <linux/stmmac.h> + +#include "stmmac_platform.h" + +#define PRG_ETH0 0x0 + +#define PRG_ETH0_RGMII_MODE BIT(0) + +/* mux to choose between fclk_div2 (bit unset) and mpll2 (bit set) */ +#define PRG_ETH0_CLK_M250_SEL_SHIFT 4 +#define PRG_ETH0_CLK_M250_SEL_MASK GENMASK(4, 4) + +#define PRG_ETH0_TXDLY_SHIFT 5 +#define PRG_ETH0_TXDLY_MASK GENMASK(6, 5) +#define PRG_ETH0_TXDLY_OFF (0x0 << PRG_ETH0_TXDLY_SHIFT) +#define PRG_ETH0_TXDLY_QUARTER (0x1 << PRG_ETH0_TXDLY_SHIFT) +#define PRG_ETH0_TXDLY_HALF (0x2 << PRG_ETH0_TXDLY_SHIFT) +#define PRG_ETH0_TXDLY_THREE_QUARTERS (0x3 << PRG_ETH0_TXDLY_SHIFT) + +/* divider for the result of m250_sel */ +#define PRG_ETH0_CLK_M250_DIV_SHIFT 7 +#define PRG_ETH0_CLK_M250_DIV_WIDTH 3 + +/* divides the result of m25_sel by either 5 (bit unset) or 10 (bit set) */ +#define PRG_ETH0_CLK_M25_DIV_SHIFT 10 +#define PRG_ETH0_CLK_M25_DIV_WIDTH 1 + +#define PRG_ETH0_INVERTED_RMII_CLK BIT(11) +#define PRG_ETH0_TX_AND_PHY_REF_CLK BIT(12) + +#define MUX_CLK_NUM_PARENTS 2 + +struct meson8b_dwmac { + struct platform_device *pdev; + + void __iomem *regs; + + phy_interface_t phy_mode; + + struct clk_mux m250_mux; + struct clk *m250_mux_clk; + struct clk *m250_mux_parent[MUX_CLK_NUM_PARENTS]; + + struct clk_divider m250_div; + struct clk *m250_div_clk; + + struct clk_divider m25_div; + struct clk *m25_div_clk; +}; + +static void meson8b_dwmac_mask_bits(struct meson8b_dwmac *dwmac, u32 reg, + u32 mask, u32 value) +{ + u32 data; + + data = readl(dwmac->regs + reg); + data &= ~mask; + data |= (value & mask); + + writel(data, dwmac->regs + reg); +} + +static int meson8b_init_clk(struct meson8b_dwmac *dwmac) +{ + struct clk_init_data init; + int i, ret; + struct device *dev = &dwmac->pdev->dev; + char clk_name[32]; + const char *clk_div_parents[1]; + const char *mux_parent_names[MUX_CLK_NUM_PARENTS]; + static struct clk_div_table clk_25m_div_table[] = { + { .val = 0, .div = 5 }, + { .val = 1, .div = 10 }, + { /* sentinel */ }, + }; + + /* get the mux parents from DT */ + for (i = 0; i < MUX_CLK_NUM_PARENTS; i++) { + char name[16]; + + snprintf(name, sizeof(name), "clkin%d", i); + dwmac->m250_mux_parent[i] = devm_clk_get(dev, name); + if (IS_ERR(dwmac->m250_mux_parent[i])) { + ret = PTR_ERR(dwmac->m250_mux_parent[i]); + if (ret != -EPROBE_DEFER) + dev_err(dev, "Missing clock %s\n", name); + return ret; + } + + mux_parent_names[i] = + __clk_get_name(dwmac->m250_mux_parent[i]); + } + + /* create the m250_mux */ + snprintf(clk_name, sizeof(clk_name), "%s#m250_sel", dev_name(dev)); + init.name = clk_name; + init.ops = &clk_mux_ops; + init.flags = 0; + init.parent_names = mux_parent_names; + init.num_parents = MUX_CLK_NUM_PARENTS; + + dwmac->m250_mux.reg = dwmac->regs + PRG_ETH0; + dwmac->m250_mux.shift = PRG_ETH0_CLK_M250_SEL_SHIFT; + dwmac->m250_mux.mask = PRG_ETH0_CLK_M250_SEL_MASK; + dwmac->m250_mux.flags = 0; + dwmac->m250_mux.table = NULL; + dwmac->m250_mux.hw.init = &init; + + dwmac->m250_mux_clk = devm_clk_register(dev, &dwmac->m250_mux.hw); + if (WARN_ON(IS_ERR(dwmac->m250_mux_clk))) + return PTR_ERR(dwmac->m250_mux_clk); + + /* create the m250_div */ + snprintf(clk_name, sizeof(clk_name), "%s#m250_div", dev_name(dev)); + init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL); + init.ops = &clk_divider_ops; + init.flags = CLK_SET_RATE_PARENT; + clk_div_parents[0] = __clk_get_name(dwmac->m250_mux_clk); + init.parent_names = clk_div_parents; + init.num_parents = ARRAY_SIZE(clk_div_parents); + + dwmac->m250_div.reg = dwmac->regs + PRG_ETH0; + dwmac->m250_div.shift = PRG_ETH0_CLK_M250_DIV_SHIFT; + dwmac->m250_div.width = PRG_ETH0_CLK_M250_DIV_WIDTH; + dwmac->m250_div.hw.init = &init; + dwmac->m250_div.flags = CLK_DIVIDER_ONE_BASED | CLK_DIVIDER_ALLOW_ZERO; + + dwmac->m250_div_clk = devm_clk_register(dev, &dwmac->m250_div.hw); + if (WARN_ON(IS_ERR(dwmac->m250_div_clk))) + return PTR_ERR(dwmac->m250_div_clk); + + /* create the m25_div */ + snprintf(clk_name, sizeof(clk_name), "%s#m25_div", dev_name(dev)); + init.name = devm_kstrdup(dev, clk_name, GFP_KERNEL); + init.ops = &clk_divider_ops; + init.flags = CLK_IS_BASIC | CLK_SET_RATE_PARENT; + clk_div_parents[0] = __clk_get_name(dwmac->m250_div_clk); + init.parent_names = clk_div_parents; + init.num_parents = ARRAY_SIZE(clk_div_parents); + + dwmac->m25_div.reg = dwmac->regs + PRG_ETH0; + dwmac->m25_div.shift = PRG_ETH0_CLK_M25_DIV_SHIFT; + dwmac->m25_div.width = PRG_ETH0_CLK_M25_DIV_WIDTH; + dwmac->m25_div.table = clk_25m_div_table; + dwmac->m25_div.hw.init = &init; + dwmac->m25_div.flags = CLK_DIVIDER_ALLOW_ZERO; + + dwmac->m25_div_clk = devm_clk_register(dev, &dwmac->m25_div.hw); + if (WARN_ON(IS_ERR(dwmac->m25_div_clk))) + return PTR_ERR(dwmac->m25_div_clk); + + return 0; +} + +static int meson8b_init_prg_eth(struct meson8b_dwmac *dwmac) +{ + int ret; + unsigned long clk_rate; + + switch (dwmac->phy_mode) { + case PHY_INTERFACE_MODE_RGMII: + case PHY_INTERFACE_MODE_RGMII_ID: + case PHY_INTERFACE_MODE_RGMII_RXID: + case PHY_INTERFACE_MODE_RGMII_TXID: + /* Generate a 25MHz clock for the PHY */ + clk_rate = 25 * 1000 * 1000; + + /* enable RGMII mode */ + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE, + PRG_ETH0_RGMII_MODE); + + /* only relevant for RMII mode -> disable in RGMII mode */ + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, + PRG_ETH0_INVERTED_RMII_CLK, 0); + + /* TX clock delay - all known boards use a 1/4 cycle delay */ + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK, + PRG_ETH0_TXDLY_QUARTER); + break; + + case PHY_INTERFACE_MODE_RMII: + /* Use the rate of the mux clock for the internal RMII PHY */ + clk_rate = clk_get_rate(dwmac->m250_mux_clk); + + /* disable RGMII mode -> enables RMII mode */ + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_RGMII_MODE, + 0); + + /* invert internal clk_rmii_i to generate 25/2.5 tx_rx_clk */ + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, + PRG_ETH0_INVERTED_RMII_CLK, + PRG_ETH0_INVERTED_RMII_CLK); + + /* TX clock delay cannot be configured in RMII mode */ + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TXDLY_MASK, + 0); + + break; + + default: + dev_err(&dwmac->pdev->dev, "unsupported phy-mode %s\n", + phy_modes(dwmac->phy_mode)); + return -EINVAL; + } + + ret = clk_prepare_enable(dwmac->m25_div_clk); + if (ret) { + dev_err(&dwmac->pdev->dev, "failed to enable the PHY clock\n"); + return ret; + } + + ret = clk_set_rate(dwmac->m25_div_clk, clk_rate); + if (ret) { + clk_disable_unprepare(dwmac->m25_div_clk); + + dev_err(&dwmac->pdev->dev, "failed to set PHY clock\n"); + return ret; + } + + /* enable TX_CLK and PHY_REF_CLK generator */ + meson8b_dwmac_mask_bits(dwmac, PRG_ETH0, PRG_ETH0_TX_AND_PHY_REF_CLK, + PRG_ETH0_TX_AND_PHY_REF_CLK); + + return 0; +} + +static int meson8b_dwmac_probe(struct platform_device *pdev) +{ + struct plat_stmmacenet_data *plat_dat; + struct stmmac_resources stmmac_res; + struct resource *res; + struct meson8b_dwmac *dwmac; + int ret; + + ret = stmmac_get_platform_resources(pdev, &stmmac_res); + if (ret) + return ret; + + plat_dat = stmmac_probe_config_dt(pdev, &stmmac_res.mac); + if (IS_ERR(plat_dat)) + return PTR_ERR(plat_dat); + + dwmac = devm_kzalloc(&pdev->dev, sizeof(*dwmac), GFP_KERNEL); + if (!dwmac) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + dwmac->regs = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(dwmac->regs)) + return PTR_ERR(dwmac->regs); + + dwmac->pdev = pdev; + dwmac->phy_mode = of_get_phy_mode(pdev->dev.of_node); + if (dwmac->phy_mode < 0) { + dev_err(&pdev->dev, "missing phy-mode property\n"); + return -EINVAL; + } + + ret = meson8b_init_clk(dwmac); + if (ret) + return ret; + + ret = meson8b_init_prg_eth(dwmac); + if (ret) + return ret; + + plat_dat->bsp_priv = dwmac; + + return stmmac_dvr_probe(&pdev->dev, plat_dat, &stmmac_res); +} + +static int meson8b_dwmac_remove(struct platform_device *pdev) +{ + struct meson8b_dwmac *dwmac = get_stmmac_bsp_priv(&pdev->dev); + + clk_disable_unprepare(dwmac->m25_div_clk); + + return stmmac_pltfr_remove(pdev); +} + +static const struct of_device_id meson8b_dwmac_match[] = { + { .compatible = "amlogic,meson8b-dwmac" }, + { .compatible = "amlogic,meson-gxbb-dwmac" }, + { } +}; +MODULE_DEVICE_TABLE(of, meson8b_dwmac_match); + +static struct platform_driver meson8b_dwmac_driver = { + .probe = meson8b_dwmac_probe, + .remove = meson8b_dwmac_remove, + .driver = { + .name = "meson8b-dwmac", + .pm = &stmmac_pltfr_pm_ops, + .of_match_table = meson8b_dwmac_match, + }, +}; +module_platform_driver(meson8b_dwmac_driver); + +MODULE_AUTHOR("Martin Blumenstingl <martin.blumenstingl@googlemail.com>"); +MODULE_DESCRIPTION("Amlogic Meson8b and GXBB DWMAC glue layer"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h index ffeb8d9e2b2e..64e147f53a9c 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_platform.h @@ -30,4 +30,12 @@ int stmmac_get_platform_resources(struct platform_device *pdev, int stmmac_pltfr_remove(struct platform_device *pdev); extern const struct dev_pm_ops stmmac_pltfr_pm_ops; +static inline void *get_stmmac_bsp_priv(struct device *dev) +{ + struct net_device *ndev = dev_get_drvdata(dev); + struct stmmac_priv *priv = netdev_priv(ndev); + + return priv->plat->bsp_priv; +} + #endif /* __STMMAC_PLATFORM_H__ */ diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index 2feacc70bf61..4669c052239e 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -156,12 +156,14 @@ static void nvme_free_ns(struct kref *kref) { struct nvme_ns *ns = container_of(kref, struct nvme_ns, kref); - if (ns->type == NVME_NS_LIGHTNVM) - nvme_nvm_unregister(ns->queue, ns->disk->disk_name); + if (ns->ndev) + nvme_nvm_unregister(ns); - spin_lock(&dev_list_lock); - ns->disk->private_data = NULL; - spin_unlock(&dev_list_lock); + if (ns->disk) { + spin_lock(&dev_list_lock); + ns->disk->private_data = NULL; + spin_unlock(&dev_list_lock); + } put_disk(ns->disk); ida_simple_remove(&ns->ctrl->ns_ida, ns->instance); @@ -597,7 +599,7 @@ int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid, } int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, - dma_addr_t dma_addr, u32 *result) + void *buffer, size_t buflen, u32 *result) { struct nvme_command c; struct nvme_completion cqe; @@ -606,10 +608,9 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, memset(&c, 0, sizeof(c)); c.features.opcode = nvme_admin_get_features; c.features.nsid = cpu_to_le32(nsid); - c.features.dptr.prp1 = cpu_to_le64(dma_addr); c.features.fid = cpu_to_le32(fid); - ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, + ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, buffer, buflen, 0, NVME_QID_ANY, 0, 0); if (ret >= 0 && result) *result = le32_to_cpu(cqe.result); @@ -617,7 +618,7 @@ int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, } int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, - dma_addr_t dma_addr, u32 *result) + void *buffer, size_t buflen, u32 *result) { struct nvme_command c; struct nvme_completion cqe; @@ -625,12 +626,11 @@ int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, memset(&c, 0, sizeof(c)); c.features.opcode = nvme_admin_set_features; - c.features.dptr.prp1 = cpu_to_le64(dma_addr); c.features.fid = cpu_to_le32(fid); c.features.dword11 = cpu_to_le32(dword11); - ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, NULL, 0, 0, - NVME_QID_ANY, 0, 0); + ret = __nvme_submit_sync_cmd(dev->admin_q, &c, &cqe, + buffer, buflen, 0, NVME_QID_ANY, 0, 0); if (ret >= 0 && result) *result = le32_to_cpu(cqe.result); return ret; @@ -664,7 +664,7 @@ int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count) u32 result; int status, nr_io_queues; - status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, 0, + status = nvme_set_features(ctrl, NVME_FEAT_NUM_QUEUES, q_count, NULL, 0, &result); if (status < 0) return status; @@ -888,42 +888,32 @@ static void nvme_config_discard(struct nvme_ns *ns) queue_flag_set_unlocked(QUEUE_FLAG_DISCARD, ns->queue); } -static int nvme_revalidate_disk(struct gendisk *disk) +static int nvme_revalidate_ns(struct nvme_ns *ns, struct nvme_id_ns **id) { - struct nvme_ns *ns = disk->private_data; - struct nvme_id_ns *id; - u8 lbaf, pi_type; - u16 old_ms; - unsigned short bs; - - if (test_bit(NVME_NS_DEAD, &ns->flags)) { - set_capacity(disk, 0); - return -ENODEV; - } - if (nvme_identify_ns(ns->ctrl, ns->ns_id, &id)) { - dev_warn(disk_to_dev(ns->disk), "%s: Identify failure\n", - __func__); - return -ENODEV; - } - if (id->ncap == 0) { - kfree(id); + if (nvme_identify_ns(ns->ctrl, ns->ns_id, id)) { + dev_warn(ns->ctrl->dev, "%s: Identify failure\n", __func__); return -ENODEV; } - if (nvme_nvm_ns_supported(ns, id) && ns->type != NVME_NS_LIGHTNVM) { - if (nvme_nvm_register(ns->queue, disk->disk_name)) { - dev_warn(disk_to_dev(ns->disk), - "%s: LightNVM init failure\n", __func__); - kfree(id); - return -ENODEV; - } - ns->type = NVME_NS_LIGHTNVM; + if ((*id)->ncap == 0) { + kfree(*id); + return -ENODEV; } if (ns->ctrl->vs >= NVME_VS(1, 1)) - memcpy(ns->eui, id->eui64, sizeof(ns->eui)); + memcpy(ns->eui, (*id)->eui64, sizeof(ns->eui)); if (ns->ctrl->vs >= NVME_VS(1, 2)) - memcpy(ns->uuid, id->nguid, sizeof(ns->uuid)); + memcpy(ns->uuid, (*id)->nguid, sizeof(ns->uuid)); + + return 0; +} + +static void __nvme_revalidate_disk(struct gendisk *disk, struct nvme_id_ns *id) +{ + struct nvme_ns *ns = disk->private_data; + u8 lbaf, pi_type; + u16 old_ms; + unsigned short bs; old_ms = ns->ms; lbaf = id->flbas & NVME_NS_FLBAS_LBA_MASK; @@ -962,8 +952,26 @@ static int nvme_revalidate_disk(struct gendisk *disk) if (ns->ctrl->oncs & NVME_CTRL_ONCS_DSM) nvme_config_discard(ns); blk_mq_unfreeze_queue(disk->queue); +} + +static int nvme_revalidate_disk(struct gendisk *disk) +{ + struct nvme_ns *ns = disk->private_data; + struct nvme_id_ns *id = NULL; + int ret; + + if (test_bit(NVME_NS_DEAD, &ns->flags)) { + set_capacity(disk, 0); + return -ENODEV; + } + + ret = nvme_revalidate_ns(ns, &id); + if (ret) + return ret; + __nvme_revalidate_disk(disk, id); kfree(id); + return 0; } @@ -1425,7 +1433,7 @@ static DEVICE_ATTR(rescan_controller, S_IWUSR, NULL, nvme_sysfs_rescan); static ssize_t wwid_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct nvme_ns *ns = dev_to_disk(dev)->private_data; + struct nvme_ns *ns = nvme_get_ns_from_dev(dev); struct nvme_ctrl *ctrl = ns->ctrl; int serial_len = sizeof(ctrl->serial); int model_len = sizeof(ctrl->model); @@ -1449,7 +1457,7 @@ static DEVICE_ATTR(wwid, S_IRUGO, wwid_show, NULL); static ssize_t uuid_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct nvme_ns *ns = dev_to_disk(dev)->private_data; + struct nvme_ns *ns = nvme_get_ns_from_dev(dev); return sprintf(buf, "%pU\n", ns->uuid); } static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL); @@ -1457,7 +1465,7 @@ static DEVICE_ATTR(uuid, S_IRUGO, uuid_show, NULL); static ssize_t eui_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct nvme_ns *ns = dev_to_disk(dev)->private_data; + struct nvme_ns *ns = nvme_get_ns_from_dev(dev); return sprintf(buf, "%8phd\n", ns->eui); } static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL); @@ -1465,7 +1473,7 @@ static DEVICE_ATTR(eui, S_IRUGO, eui_show, NULL); static ssize_t nsid_show(struct device *dev, struct device_attribute *attr, char *buf) { - struct nvme_ns *ns = dev_to_disk(dev)->private_data; + struct nvme_ns *ns = nvme_get_ns_from_dev(dev); return sprintf(buf, "%d\n", ns->ns_id); } static DEVICE_ATTR(nsid, S_IRUGO, nsid_show, NULL); @@ -1482,7 +1490,7 @@ static umode_t nvme_ns_attrs_are_visible(struct kobject *kobj, struct attribute *a, int n) { struct device *dev = container_of(kobj, struct device, kobj); - struct nvme_ns *ns = dev_to_disk(dev)->private_data; + struct nvme_ns *ns = nvme_get_ns_from_dev(dev); if (a == &dev_attr_uuid.attr) { if (!memchr_inv(ns->uuid, 0, sizeof(ns->uuid))) @@ -1642,6 +1650,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) { struct nvme_ns *ns; struct gendisk *disk; + struct nvme_id_ns *id; + char disk_name[DISK_NAME_LEN]; int node = dev_to_node(ctrl->dev); ns = kzalloc_node(sizeof(*ns), GFP_KERNEL, node); @@ -1659,34 +1669,49 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) ns->queue->queuedata = ns; ns->ctrl = ctrl; - disk = alloc_disk_node(0, node); - if (!disk) - goto out_free_queue; - kref_init(&ns->kref); ns->ns_id = nsid; - ns->disk = disk; ns->lba_shift = 9; /* set to a default value for 512 until disk is validated */ - blk_queue_logical_block_size(ns->queue, 1 << ns->lba_shift); nvme_set_queue_limits(ctrl, ns->queue); - disk->fops = &nvme_fops; - disk->private_data = ns; - disk->queue = ns->queue; - disk->flags = GENHD_FL_EXT_DEVT; - sprintf(disk->disk_name, "nvme%dn%d", ctrl->instance, ns->instance); + sprintf(disk_name, "nvme%dn%d", ctrl->instance, ns->instance); + + if (nvme_revalidate_ns(ns, &id)) + goto out_free_queue; - if (nvme_revalidate_disk(ns->disk)) - goto out_free_disk; + if (nvme_nvm_ns_supported(ns, id)) { + if (nvme_nvm_register(ns, disk_name, node, + &nvme_ns_attr_group)) { + dev_warn(ctrl->dev, "%s: LightNVM init failure\n", + __func__); + goto out_free_id; + } + } else { + disk = alloc_disk_node(0, node); + if (!disk) + goto out_free_id; + + disk->fops = &nvme_fops; + disk->private_data = ns; + disk->queue = ns->queue; + disk->flags = GENHD_FL_EXT_DEVT; + memcpy(disk->disk_name, disk_name, DISK_NAME_LEN); + ns->disk = disk; + + __nvme_revalidate_disk(disk, id); + } mutex_lock(&ctrl->namespaces_mutex); list_add_tail(&ns->list, &ctrl->namespaces); mutex_unlock(&ctrl->namespaces_mutex); kref_get(&ctrl->kref); - if (ns->type == NVME_NS_LIGHTNVM) + + kfree(id); + + if (ns->ndev) return; device_add_disk(ctrl->device, ns->disk); @@ -1695,8 +1720,8 @@ static void nvme_alloc_ns(struct nvme_ctrl *ctrl, unsigned nsid) pr_warn("%s: failed to create sysfs group for identification\n", ns->disk->disk_name); return; - out_free_disk: - kfree(disk); + out_free_id: + kfree(id); out_free_queue: blk_cleanup_queue(ns->queue); out_release_instance: @@ -1710,7 +1735,7 @@ static void nvme_ns_remove(struct nvme_ns *ns) if (test_and_set_bit(NVME_NS_REMOVING, &ns->flags)) return; - if (ns->disk->flags & GENHD_FL_UP) { + if (ns->disk && ns->disk->flags & GENHD_FL_UP) { if (blk_get_integrity(ns->disk)) blk_integrity_unregister(ns->disk); sysfs_remove_group(&disk_to_dev(ns->disk)->kobj, @@ -1733,7 +1758,7 @@ static void nvme_validate_ns(struct nvme_ctrl *ctrl, unsigned nsid) ns = nvme_find_get_ns(ctrl, nsid); if (ns) { - if (revalidate_disk(ns->disk)) + if (ns->disk && revalidate_disk(ns->disk)) nvme_ns_remove(ns); nvme_put_ns(ns); } else @@ -2038,7 +2063,7 @@ void nvme_kill_queues(struct nvme_ctrl *ctrl) * Revalidating a dead namespace sets capacity to 0. This will * end buffered writers dirtying pages that can't be synced. */ - if (!test_and_set_bit(NVME_NS_DEAD, &ns->flags)) + if (ns->disk && !test_and_set_bit(NVME_NS_DEAD, &ns->flags)) revalidate_disk(ns->disk); blk_set_queue_dying(ns->queue); diff --git a/drivers/nvme/host/fabrics.c b/drivers/nvme/host/fabrics.c index 4eff49174466..5a3f008d3480 100644 --- a/drivers/nvme/host/fabrics.c +++ b/drivers/nvme/host/fabrics.c @@ -111,8 +111,19 @@ static void nvmf_host_put(struct nvmf_host *host) */ int nvmf_get_address(struct nvme_ctrl *ctrl, char *buf, int size) { - return snprintf(buf, size, "traddr=%s,trsvcid=%s\n", - ctrl->opts->traddr, ctrl->opts->trsvcid); + int len = 0; + + if (ctrl->opts->mask & NVMF_OPT_TRADDR) + len += snprintf(buf, size, "traddr=%s", ctrl->opts->traddr); + if (ctrl->opts->mask & NVMF_OPT_TRSVCID) + len += snprintf(buf + len, size - len, "%strsvcid=%s", + (len) ? "," : "", ctrl->opts->trsvcid); + if (ctrl->opts->mask & NVMF_OPT_HOST_TRADDR) + len += snprintf(buf + len, size - len, "%shost_traddr=%s", + (len) ? "," : "", ctrl->opts->host_traddr); + len += snprintf(buf + len, size - len, "\n"); + + return len; } EXPORT_SYMBOL_GPL(nvmf_get_address); @@ -519,6 +530,7 @@ static const match_table_t opt_tokens = { { NVMF_OPT_RECONNECT_DELAY, "reconnect_delay=%d" }, { NVMF_OPT_KATO, "keep_alive_tmo=%d" }, { NVMF_OPT_HOSTNQN, "hostnqn=%s" }, + { NVMF_OPT_HOST_TRADDR, "host_traddr=%s" }, { NVMF_OPT_ERR, NULL } }; @@ -675,6 +687,14 @@ static int nvmf_parse_options(struct nvmf_ctrl_options *opts, } opts->reconnect_delay = token; break; + case NVMF_OPT_HOST_TRADDR: + p = match_strdup(args); + if (!p) { + ret = -ENOMEM; + goto out; + } + opts->host_traddr = p; + break; default: pr_warn("unknown parameter or missing value '%s' in ctrl creation request\n", p); @@ -741,6 +761,7 @@ void nvmf_free_options(struct nvmf_ctrl_options *opts) kfree(opts->traddr); kfree(opts->trsvcid); kfree(opts->subsysnqn); + kfree(opts->host_traddr); kfree(opts); } EXPORT_SYMBOL_GPL(nvmf_free_options); diff --git a/drivers/nvme/host/fabrics.h b/drivers/nvme/host/fabrics.h index 46e460aee52d..924145c979f1 100644 --- a/drivers/nvme/host/fabrics.h +++ b/drivers/nvme/host/fabrics.h @@ -52,6 +52,7 @@ enum { NVMF_OPT_KATO = 1 << 7, NVMF_OPT_HOSTNQN = 1 << 8, NVMF_OPT_RECONNECT_DELAY = 1 << 9, + NVMF_OPT_HOST_TRADDR = 1 << 10, }; /** @@ -64,9 +65,12 @@ enum { * being added. * @subsysnqn: Hold the fully qualified NQN subystem name (format defined * in the NVMe specification, "NVMe Qualified Names"). - * @traddr: network address that will be used by the host to communicate - * to the added NVMe controller. - * @trsvcid: network port used for host-controller communication. + * @traddr: The transport-specific TRADDR field for a port on the + * subsystem which is adding a controller. + * @trsvcid: The transport-specific TRSVCID field for a port on the + * subsystem which is adding a controller. + * @host_traddr: A transport-specific field identifying the NVME host port + * to use for the connection to the controller. * @queue_size: Number of IO queue elements. * @nr_io_queues: Number of controller IO queues that will be established. * @reconnect_delay: Time between two consecutive reconnect attempts. @@ -80,6 +84,7 @@ struct nvmf_ctrl_options { char *subsysnqn; char *traddr; char *trsvcid; + char *host_traddr; size_t queue_size; unsigned int nr_io_queues; unsigned int reconnect_delay; diff --git a/drivers/nvme/host/lightnvm.c b/drivers/nvme/host/lightnvm.c index 63f483daf930..f5e3011e31fc 100644 --- a/drivers/nvme/host/lightnvm.c +++ b/drivers/nvme/host/lightnvm.c @@ -475,7 +475,7 @@ static inline void nvme_nvm_rqtocmd(struct request *rq, struct nvm_rq *rqd, if (rqd->opcode == NVM_OP_HBWRITE || rqd->opcode == NVM_OP_HBREAD) c->hb_rw.slba = cpu_to_le64(nvme_block_nr(ns, - rqd->bio->bi_iter.bi_sector)); + rqd->bio->bi_iter.bi_sector)); } static void nvme_nvm_end_io(struct request *rq, int error) @@ -592,14 +592,37 @@ static struct nvm_dev_ops nvme_nvm_dev_ops = { .max_phys_sect = 64, }; -int nvme_nvm_register(struct request_queue *q, char *disk_name) +int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node, + const struct attribute_group *attrs) { - return nvm_register(q, disk_name, &nvme_nvm_dev_ops); + struct request_queue *q = ns->queue; + struct nvm_dev *dev; + int ret; + + dev = nvm_alloc_dev(node); + if (!dev) + return -ENOMEM; + + dev->q = q; + memcpy(dev->name, disk_name, DISK_NAME_LEN); + dev->ops = &nvme_nvm_dev_ops; + dev->parent_dev = ns->ctrl->device; + dev->private_data = ns; + ns->ndev = dev; + + ret = nvm_register(dev); + + ns->lba_shift = ilog2(dev->sec_size) - 9; + + if (sysfs_create_group(&dev->dev.kobj, attrs)) + pr_warn("%s: failed to create sysfs group for identification\n", + disk_name); + return ret; } -void nvme_nvm_unregister(struct request_queue *q, char *disk_name) +void nvme_nvm_unregister(struct nvme_ns *ns) { - nvm_unregister(disk_name); + nvm_unregister(ns->ndev); } /* move to shared place when used in multiple places. */ diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index ab18b78102bf..b0a9ec681685 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -18,6 +18,7 @@ #include <linux/pci.h> #include <linux/kref.h> #include <linux/blk-mq.h> +#include <linux/lightnvm.h> enum { /* @@ -154,6 +155,7 @@ struct nvme_ns { struct nvme_ctrl *ctrl; struct request_queue *queue; struct gendisk *disk; + struct nvm_dev *ndev; struct kref kref; int instance; @@ -165,7 +167,6 @@ struct nvme_ns { u16 ms; bool ext; u8 pi_type; - int type; unsigned long flags; #define NVME_NS_REMOVING 0 @@ -292,9 +293,9 @@ int nvme_identify_ns(struct nvme_ctrl *dev, unsigned nsid, struct nvme_id_ns **id); int nvme_get_log_page(struct nvme_ctrl *dev, struct nvme_smart_log **log); int nvme_get_features(struct nvme_ctrl *dev, unsigned fid, unsigned nsid, - dma_addr_t dma_addr, u32 *result); + void *buffer, size_t buflen, u32 *result); int nvme_set_features(struct nvme_ctrl *dev, unsigned fid, unsigned dword11, - dma_addr_t dma_addr, u32 *result); + void *buffer, size_t buflen, u32 *result); int nvme_set_queue_count(struct nvme_ctrl *ctrl, int *count); void nvme_start_keep_alive(struct nvme_ctrl *ctrl); void nvme_stop_keep_alive(struct nvme_ctrl *ctrl); @@ -307,20 +308,35 @@ int nvme_sg_get_version_num(int __user *ip); #ifdef CONFIG_NVM int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id); -int nvme_nvm_register(struct request_queue *q, char *disk_name); -void nvme_nvm_unregister(struct request_queue *q, char *disk_name); +int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, int node, + const struct attribute_group *attrs); +void nvme_nvm_unregister(struct nvme_ns *ns); + +static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) +{ + if (dev->type->devnode) + return dev_to_disk(dev)->private_data; + + return (container_of(dev, struct nvm_dev, dev))->private_data; +} #else -static inline int nvme_nvm_register(struct request_queue *q, char *disk_name) +static inline int nvme_nvm_register(struct nvme_ns *ns, char *disk_name, + int node, + const struct attribute_group *attrs) { return 0; } -static inline void nvme_nvm_unregister(struct request_queue *q, char *disk_name) {}; +static inline void nvme_nvm_unregister(struct nvme_ns *ns) {}; static inline int nvme_nvm_ns_supported(struct nvme_ns *ns, struct nvme_id_ns *id) { return 0; } +static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) +{ + return dev_to_disk(dev)->private_data; +} #endif /* CONFIG_NVM */ int __init nvme_core_init(void); diff --git a/drivers/nvme/host/scsi.c b/drivers/nvme/host/scsi.c index e947e298a737..c2a0a1c7d05d 100644 --- a/drivers/nvme/host/scsi.c +++ b/drivers/nvme/host/scsi.c @@ -72,15 +72,6 @@ static int sg_version_num = 30534; /* 2 digits for each component */ #define ALL_LUNS_RETURNED 0x02 #define ALL_WELL_KNOWN_LUNS_RETURNED 0x01 #define RESTRICTED_LUNS_RETURNED 0x00 -#define NVME_POWER_STATE_START_VALID 0x00 -#define NVME_POWER_STATE_ACTIVE 0x01 -#define NVME_POWER_STATE_IDLE 0x02 -#define NVME_POWER_STATE_STANDBY 0x03 -#define NVME_POWER_STATE_LU_CONTROL 0x07 -#define POWER_STATE_0 0 -#define POWER_STATE_1 1 -#define POWER_STATE_2 2 -#define POWER_STATE_3 3 #define DOWNLOAD_SAVE_ACTIVATE 0x05 #define DOWNLOAD_SAVE_DEFER_ACTIVATE 0x0E #define ACTIVATE_DEFERRED_MICROCODE 0x0F @@ -915,7 +906,7 @@ static int nvme_trans_log_temperature(struct nvme_ns *ns, struct sg_io_hdr *hdr, kfree(smart_log); /* Get Features for Temp Threshold */ - res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, 0, + res = nvme_get_features(ns->ctrl, NVME_FEAT_TEMP_THRESH, 0, NULL, 0, &feature_resp); if (res != NVME_SC_SUCCESS) temp_c_thresh = LOG_TEMP_UNKNOWN; @@ -1048,7 +1039,7 @@ static int nvme_trans_fill_caching_page(struct nvme_ns *ns, if (len < MODE_PAGE_CACHING_LEN) return -EINVAL; - nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, 0, + nvme_sc = nvme_get_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, 0, NULL, 0, &feature_resp); res = nvme_trans_status_code(hdr, nvme_sc); if (res) @@ -1229,64 +1220,6 @@ static void nvme_trans_fill_read_cap(u8 *response, struct nvme_id_ns *id_ns, /* Start Stop Unit Helper Functions */ -static int nvme_trans_power_state(struct nvme_ns *ns, struct sg_io_hdr *hdr, - u8 pc, u8 pcmod, u8 start) -{ - int res; - int nvme_sc; - struct nvme_id_ctrl *id_ctrl; - int lowest_pow_st; /* max npss = lowest power consumption */ - unsigned ps_desired = 0; - - nvme_sc = nvme_identify_ctrl(ns->ctrl, &id_ctrl); - res = nvme_trans_status_code(hdr, nvme_sc); - if (res) - return res; - - lowest_pow_st = max(POWER_STATE_0, (int)(id_ctrl->npss - 1)); - kfree(id_ctrl); - - switch (pc) { - case NVME_POWER_STATE_START_VALID: - /* Action unspecified if POWER CONDITION MODIFIER != 0 */ - if (pcmod == 0 && start == 0x1) - ps_desired = POWER_STATE_0; - if (pcmod == 0 && start == 0x0) - ps_desired = lowest_pow_st; - break; - case NVME_POWER_STATE_ACTIVE: - /* Action unspecified if POWER CONDITION MODIFIER != 0 */ - if (pcmod == 0) - ps_desired = POWER_STATE_0; - break; - case NVME_POWER_STATE_IDLE: - /* Action unspecified if POWER CONDITION MODIFIER != [0,1,2] */ - if (pcmod == 0x0) - ps_desired = POWER_STATE_1; - else if (pcmod == 0x1) - ps_desired = POWER_STATE_2; - else if (pcmod == 0x2) - ps_desired = POWER_STATE_3; - break; - case NVME_POWER_STATE_STANDBY: - /* Action unspecified if POWER CONDITION MODIFIER != [0,1] */ - if (pcmod == 0x0) - ps_desired = max(POWER_STATE_0, (lowest_pow_st - 2)); - else if (pcmod == 0x1) - ps_desired = max(POWER_STATE_0, (lowest_pow_st - 1)); - break; - case NVME_POWER_STATE_LU_CONTROL: - default: - res = nvme_trans_completion(hdr, SAM_STAT_CHECK_CONDITION, - ILLEGAL_REQUEST, SCSI_ASC_INVALID_CDB, - SCSI_ASCQ_CAUSE_NOT_REPORTABLE); - break; - } - nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_POWER_MGMT, ps_desired, 0, - NULL); - return nvme_trans_status_code(hdr, nvme_sc); -} - static int nvme_trans_send_activate_fw_cmd(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 buffer_id) { @@ -1395,7 +1328,7 @@ static int nvme_trans_modesel_get_mp(struct nvme_ns *ns, struct sg_io_hdr *hdr, case MODE_PAGE_CACHING: dword11 = ((mode_page[2] & CACHING_MODE_PAGE_WCE_MASK) ? 1 : 0); nvme_sc = nvme_set_features(ns->ctrl, NVME_FEAT_VOLATILE_WC, - dword11, 0, NULL); + dword11, NULL, 0, NULL); res = nvme_trans_status_code(hdr, nvme_sc); break; case MODE_PAGE_CONTROL: @@ -2235,11 +2168,10 @@ static int nvme_trans_synchronize_cache(struct nvme_ns *ns, static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr, u8 *cmd) { - u8 immed, pcmod, pc, no_flush, start; + u8 immed, pcmod, no_flush, start; immed = cmd[1] & 0x01; pcmod = cmd[3] & 0x0f; - pc = (cmd[4] & 0xf0) >> 4; no_flush = cmd[4] & 0x04; start = cmd[4] & 0x01; @@ -2254,8 +2186,8 @@ static int nvme_trans_start_stop(struct nvme_ns *ns, struct sg_io_hdr *hdr, if (res) return res; } - /* Setup the expected power state transition */ - return nvme_trans_power_state(ns, hdr, pc, pcmod, start); + + return 0; } } diff --git a/drivers/nvme/target/admin-cmd.c b/drivers/nvme/target/admin-cmd.c index 47c564b5a289..7ab9c9381b98 100644 --- a/drivers/nvme/target/admin-cmd.c +++ b/drivers/nvme/target/admin-cmd.c @@ -14,6 +14,7 @@ #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt #include <linux/module.h> #include <generated/utsrelease.h> +#include <asm/unaligned.h> #include "nvmet.h" u32 nvmet_get_log_page_len(struct nvme_command *cmd) @@ -29,8 +30,84 @@ u32 nvmet_get_log_page_len(struct nvme_command *cmd) return len; } +static u16 nvmet_get_smart_log_nsid(struct nvmet_req *req, + struct nvme_smart_log *slog) +{ + u16 status; + struct nvmet_ns *ns; + u64 host_reads, host_writes, data_units_read, data_units_written; + + status = NVME_SC_SUCCESS; + ns = nvmet_find_namespace(req->sq->ctrl, req->cmd->get_log_page.nsid); + if (!ns) { + status = NVME_SC_INVALID_NS; + pr_err("nvmet : Counld not find namespace id : %d\n", + le32_to_cpu(req->cmd->get_log_page.nsid)); + goto out; + } + + host_reads = part_stat_read(ns->bdev->bd_part, ios[READ]); + data_units_read = part_stat_read(ns->bdev->bd_part, sectors[READ]); + host_writes = part_stat_read(ns->bdev->bd_part, ios[WRITE]); + data_units_written = part_stat_read(ns->bdev->bd_part, sectors[WRITE]); + + put_unaligned_le64(host_reads, &slog->host_reads[0]); + put_unaligned_le64(data_units_read, &slog->data_units_read[0]); + put_unaligned_le64(host_writes, &slog->host_writes[0]); + put_unaligned_le64(data_units_written, &slog->data_units_written[0]); + nvmet_put_namespace(ns); +out: + return status; +} + +static u16 nvmet_get_smart_log_all(struct nvmet_req *req, + struct nvme_smart_log *slog) +{ + u16 status; + u64 host_reads = 0, host_writes = 0; + u64 data_units_read = 0, data_units_written = 0; + struct nvmet_ns *ns; + struct nvmet_ctrl *ctrl; + + status = NVME_SC_SUCCESS; + ctrl = req->sq->ctrl; + + rcu_read_lock(); + list_for_each_entry_rcu(ns, &ctrl->subsys->namespaces, dev_link) { + host_reads += part_stat_read(ns->bdev->bd_part, ios[READ]); + data_units_read += + part_stat_read(ns->bdev->bd_part, sectors[READ]); + host_writes += part_stat_read(ns->bdev->bd_part, ios[WRITE]); + data_units_written += + part_stat_read(ns->bdev->bd_part, sectors[WRITE]); + + } + rcu_read_unlock(); + + put_unaligned_le64(host_reads, &slog->host_reads[0]); + put_unaligned_le64(data_units_read, &slog->data_units_read[0]); + put_unaligned_le64(host_writes, &slog->host_writes[0]); + put_unaligned_le64(data_units_written, &slog->data_units_written[0]); + + return status; +} + +static u16 nvmet_get_smart_log(struct nvmet_req *req, + struct nvme_smart_log *slog) +{ + u16 status; + + WARN_ON(req == NULL || slog == NULL); + if (req->cmd->get_log_page.nsid == 0xFFFFFFFF) + status = nvmet_get_smart_log_all(req, slog); + else + status = nvmet_get_smart_log_nsid(req, slog); + return status; +} + static void nvmet_execute_get_log_page(struct nvmet_req *req) { + struct nvme_smart_log *smart_log; size_t data_len = nvmet_get_log_page_len(req->cmd); void *buf; u16 status = 0; @@ -59,6 +136,16 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req) * available (e.g. units or commands read/written) those aren't * persistent over power loss. */ + if (data_len != sizeof(*smart_log)) { + status = NVME_SC_INTERNAL; + goto err; + } + smart_log = buf; + status = nvmet_get_smart_log(req, smart_log); + if (status) { + memset(buf, '\0', data_len); + goto err; + } break; case 0x03: /* @@ -73,6 +160,7 @@ static void nvmet_execute_get_log_page(struct nvmet_req *req) status = nvmet_copy_to_sgl(req, 0, buf, data_len); +err: kfree(buf); out: nvmet_req_complete(req, status); diff --git a/drivers/nvme/target/io-cmd.c b/drivers/nvme/target/io-cmd.c index 2cd069b691ae..4a96c2049b7b 100644 --- a/drivers/nvme/target/io-cmd.c +++ b/drivers/nvme/target/io-cmd.c @@ -58,6 +58,7 @@ static void nvmet_execute_rw(struct nvmet_req *req) if (req->cmd->rw.opcode == nvme_cmd_write) { op = REQ_OP_WRITE; + op_flags = WRITE_ODIRECT; if (req->cmd->rw.control & cpu_to_le16(NVME_RW_FUA)) op_flags |= REQ_FUA; } else { @@ -205,7 +206,7 @@ int nvmet_parse_io_cmd(struct nvmet_req *req) return 0; case nvme_cmd_dsm: req->execute = nvmet_execute_dsm; - req->data_len = le32_to_cpu(cmd->dsm.nr) * + req->data_len = le32_to_cpu(cmd->dsm.nr + 1) * sizeof(struct nvme_dsm_range); return 0; default: diff --git a/drivers/nvmem/Kconfig b/drivers/nvmem/Kconfig index f550c4596a7a..ba140eaee5c8 100644 --- a/drivers/nvmem/Kconfig +++ b/drivers/nvmem/Kconfig @@ -101,4 +101,14 @@ config NVMEM_VF610_OCOTP This driver can also be build as a module. If so, the module will be called nvmem-vf610-ocotp. +config MESON_EFUSE + tristate "Amlogic eFuse Support" + depends on (ARCH_MESON || COMPILE_TEST) && MESON_SM + help + This is a driver to retrieve specific values from the eFuse found on + the Amlogic Meson SoCs. + + This driver can also be built as a module. If so, the module + will be called nvmem_meson_efuse. + endif diff --git a/drivers/nvmem/Makefile b/drivers/nvmem/Makefile index 45ab1ae08fa9..8f942a0cdaec 100644 --- a/drivers/nvmem/Makefile +++ b/drivers/nvmem/Makefile @@ -22,3 +22,5 @@ obj-$(CONFIG_NVMEM_SUNXI_SID) += nvmem_sunxi_sid.o nvmem_sunxi_sid-y := sunxi_sid.o obj-$(CONFIG_NVMEM_VF610_OCOTP) += nvmem-vf610-ocotp.o nvmem-vf610-ocotp-y := vf610-ocotp.o +obj-$(CONFIG_MESON_EFUSE) += nvmem_meson_efuse.o +nvmem_meson_efuse-y := meson-efuse.o diff --git a/drivers/nvmem/meson-efuse.c b/drivers/nvmem/meson-efuse.c new file mode 100644 index 000000000000..f207c3b10482 --- /dev/null +++ b/drivers/nvmem/meson-efuse.c @@ -0,0 +1,93 @@ +/* + * Amlogic eFuse Driver + * + * Copyright (c) 2016 Endless Computers, Inc. + * Author: Carlo Caione <carlo@endlessm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but WITHOUT + * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or + * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for + * more details. + */ + +#include <linux/module.h> +#include <linux/nvmem-provider.h> +#include <linux/of.h> +#include <linux/platform_device.h> + +#include <linux/firmware/meson/meson_sm.h> + +static int meson_efuse_read(void *context, unsigned int offset, + void *val, size_t bytes) +{ + u8 *buf = val; + int ret; + + ret = meson_sm_call_read(buf, SM_EFUSE_READ, offset, + bytes, 0, 0, 0); + if (ret < 0) + return ret; + + return 0; +} + +static struct nvmem_config econfig = { + .name = "meson-efuse", + .owner = THIS_MODULE, + .stride = 1, + .word_size = 1, + .read_only = true, +}; + +static const struct of_device_id meson_efuse_match[] = { + { .compatible = "amlogic,meson-gxbb-efuse", }, + { /* sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, meson_efuse_match); + +static int meson_efuse_probe(struct platform_device *pdev) +{ + struct nvmem_device *nvmem; + unsigned int size; + + if (meson_sm_call(SM_EFUSE_USER_MAX, &size, 0, 0, 0, 0, 0) < 0) + return -EINVAL; + + econfig.dev = &pdev->dev; + econfig.reg_read = meson_efuse_read; + econfig.size = size; + + nvmem = nvmem_register(&econfig); + if (IS_ERR(nvmem)) + return PTR_ERR(nvmem); + + platform_set_drvdata(pdev, nvmem); + + return 0; +} + +static int meson_efuse_remove(struct platform_device *pdev) +{ + struct nvmem_device *nvmem = platform_get_drvdata(pdev); + + return nvmem_unregister(nvmem); +} + +static struct platform_driver meson_efuse_driver = { + .probe = meson_efuse_probe, + .remove = meson_efuse_remove, + .driver = { + .name = "meson-efuse", + .of_match_table = meson_efuse_match, + }, +}; + +module_platform_driver(meson_efuse_driver); + +MODULE_AUTHOR("Carlo Caione <carlo@endlessm.com>"); +MODULE_DESCRIPTION("Amlogic Meson NVMEM driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/pci/hotplug/pnv_php.c b/drivers/pci/hotplug/pnv_php.c index e6245b03f0a1..56efaf72d08e 100644 --- a/drivers/pci/hotplug/pnv_php.c +++ b/drivers/pci/hotplug/pnv_php.c @@ -22,6 +22,12 @@ #define DRIVER_AUTHOR "Gavin Shan, IBM Corporation" #define DRIVER_DESC "PowerPC PowerNV PCI Hotplug Driver" +struct pnv_php_event { + bool added; + struct pnv_php_slot *php_slot; + struct work_struct work; +}; + static LIST_HEAD(pnv_php_slot_list); static DEFINE_SPINLOCK(pnv_php_lock); @@ -29,12 +35,40 @@ static void pnv_php_register(struct device_node *dn); static void pnv_php_unregister_one(struct device_node *dn); static void pnv_php_unregister(struct device_node *dn); +static void pnv_php_disable_irq(struct pnv_php_slot *php_slot) +{ + struct pci_dev *pdev = php_slot->pdev; + u16 ctrl; + + if (php_slot->irq > 0) { + pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &ctrl); + ctrl &= ~(PCI_EXP_SLTCTL_HPIE | + PCI_EXP_SLTCTL_PDCE | + PCI_EXP_SLTCTL_DLLSCE); + pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, ctrl); + + free_irq(php_slot->irq, php_slot); + php_slot->irq = 0; + } + + if (php_slot->wq) { + destroy_workqueue(php_slot->wq); + php_slot->wq = NULL; + } + + if (pdev->msix_enabled) + pci_disable_msix(pdev); + else if (pdev->msi_enabled) + pci_disable_msi(pdev); +} + static void pnv_php_free_slot(struct kref *kref) { struct pnv_php_slot *php_slot = container_of(kref, struct pnv_php_slot, kref); WARN_ON(!list_empty(&php_slot->children)); + pnv_php_disable_irq(php_slot); kfree(php_slot->name); kfree(php_slot); } @@ -122,7 +156,7 @@ static void pnv_php_detach_device_nodes(struct device_node *parent) of_node_put(dn); refcount = atomic_read(&dn->kobj.kref.refcount); - if (unlikely(refcount != 1)) + if (refcount != 1) pr_warn("Invalid refcount %d on <%s>\n", refcount, of_node_full_name(dn)); @@ -184,11 +218,11 @@ static int pnv_php_populate_changeset(struct of_changeset *ocs, for_each_child_of_node(dn, child) { ret = of_changeset_attach_node(ocs, child); - if (unlikely(ret)) + if (ret) break; ret = pnv_php_populate_changeset(ocs, child); - if (unlikely(ret)) + if (ret) break; } @@ -201,7 +235,7 @@ static void *pnv_php_add_one_pdn(struct device_node *dn, void *data) struct pci_dn *pdn; pdn = pci_add_device_node_info(hose, dn); - if (unlikely(!pdn)) + if (!pdn) return ERR_PTR(-ENOMEM); return NULL; @@ -224,21 +258,21 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot) * fits the real size. */ fdt1 = kzalloc(0x10000, GFP_KERNEL); - if (unlikely(!fdt1)) { + if (!fdt1) { ret = -ENOMEM; dev_warn(&php_slot->pdev->dev, "Cannot alloc FDT blob\n"); goto out; } ret = pnv_pci_get_device_tree(php_slot->dn->phandle, fdt1, 0x10000); - if (unlikely(ret)) { + if (ret) { dev_warn(&php_slot->pdev->dev, "Error %d getting FDT blob\n", ret); goto free_fdt1; } fdt = kzalloc(fdt_totalsize(fdt1), GFP_KERNEL); - if (unlikely(!fdt)) { + if (!fdt) { ret = -ENOMEM; dev_warn(&php_slot->pdev->dev, "Cannot %d bytes memory\n", fdt_totalsize(fdt1)); @@ -248,7 +282,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot) /* Unflatten device tree blob */ memcpy(fdt, fdt1, fdt_totalsize(fdt1)); dt = of_fdt_unflatten_tree(fdt, php_slot->dn, NULL); - if (unlikely(!dt)) { + if (!dt) { ret = -EINVAL; dev_warn(&php_slot->pdev->dev, "Cannot unflatten FDT\n"); goto free_fdt; @@ -258,7 +292,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot) of_changeset_init(&php_slot->ocs); pnv_php_reverse_nodes(php_slot->dn); ret = pnv_php_populate_changeset(&php_slot->ocs, php_slot->dn); - if (unlikely(ret)) { + if (ret) { pnv_php_reverse_nodes(php_slot->dn); dev_warn(&php_slot->pdev->dev, "Error %d populating changeset\n", ret); @@ -267,7 +301,7 @@ static int pnv_php_add_devtree(struct pnv_php_slot *php_slot) php_slot->dn->child = NULL; ret = of_changeset_apply(&php_slot->ocs); - if (unlikely(ret)) { + if (ret) { dev_warn(&php_slot->pdev->dev, "Error %d applying changeset\n", ret); goto destroy_changeset; @@ -301,7 +335,7 @@ int pnv_php_set_slot_power_state(struct hotplug_slot *slot, int ret; ret = pnv_pci_set_power_state(php_slot->id, state, &msg); - if (likely(ret > 0)) { + if (ret > 0) { if (be64_to_cpu(msg.params[1]) != php_slot->dn->phandle || be64_to_cpu(msg.params[2]) != state || be64_to_cpu(msg.params[3]) != OPAL_SUCCESS) { @@ -311,7 +345,7 @@ int pnv_php_set_slot_power_state(struct hotplug_slot *slot, be64_to_cpu(msg.params[3])); return -ENOMSG; } - } else if (unlikely(ret < 0)) { + } else if (ret < 0) { dev_warn(&php_slot->pdev->dev, "Error %d powering %s\n", ret, (state == OPAL_PCI_SLOT_POWER_ON) ? "on" : "off"); return ret; @@ -338,7 +372,7 @@ static int pnv_php_get_power_state(struct hotplug_slot *slot, u8 *state) * be on. */ ret = pnv_pci_get_power_state(php_slot->id, &power_state); - if (unlikely(ret)) { + if (ret) { dev_warn(&php_slot->pdev->dev, "Error %d getting power status\n", ret); } else { @@ -360,7 +394,7 @@ static int pnv_php_get_adapter_state(struct hotplug_slot *slot, u8 *state) * get that, it will fail back to be empty. */ ret = pnv_pci_get_presence_state(php_slot->id, &presence); - if (likely(ret >= 0)) { + if (ret >= 0) { *state = presence; slot->info->adapter_status = presence; ret = 0; @@ -393,7 +427,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan) /* Retrieve slot presence status */ ret = pnv_php_get_adapter_state(slot, &presence); - if (unlikely(ret)) + if (ret) return ret; /* Proceed if there have nothing behind the slot */ @@ -414,7 +448,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan) php_slot->power_state_check = true; ret = pnv_php_get_power_state(slot, &power_status); - if (unlikely(ret)) + if (ret) return ret; if (power_status != OPAL_PCI_SLOT_POWER_ON) @@ -423,7 +457,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan) /* Check the power status. Scan the slot if it is already on */ ret = pnv_php_get_power_state(slot, &power_status); - if (unlikely(ret)) + if (ret) return ret; if (power_status == OPAL_PCI_SLOT_POWER_ON) @@ -431,7 +465,7 @@ static int pnv_php_enable(struct pnv_php_slot *php_slot, bool rescan) /* Power is off, turn it on and then scan the slot */ ret = pnv_php_set_slot_power_state(slot, OPAL_PCI_SLOT_POWER_ON); - if (unlikely(ret)) + if (ret) return ret; scan: @@ -513,29 +547,30 @@ static struct pnv_php_slot *pnv_php_alloc_slot(struct device_node *dn) struct pci_bus *bus; const char *label; uint64_t id; + int ret; - label = of_get_property(dn, "ibm,slot-label", NULL); - if (unlikely(!label)) + ret = of_property_read_string(dn, "ibm,slot-label", &label); + if (ret) return NULL; if (pnv_pci_get_slot_id(dn, &id)) return NULL; bus = pci_find_bus_by_node(dn); - if (unlikely(!bus)) + if (!bus) return NULL; php_slot = kzalloc(sizeof(*php_slot), GFP_KERNEL); - if (unlikely(!php_slot)) + if (!php_slot) return NULL; php_slot->name = kstrdup(label, GFP_KERNEL); - if (unlikely(!php_slot->name)) { + if (!php_slot->name) { kfree(php_slot); return NULL; } - if (likely(dn->child && PCI_DN(dn->child))) + if (dn->child && PCI_DN(dn->child)) php_slot->slot_no = PCI_SLOT(PCI_DN(dn->child)->devfn); else php_slot->slot_no = -1; /* Placeholder slot */ @@ -567,7 +602,7 @@ static int pnv_php_register_slot(struct pnv_php_slot *php_slot) /* Check if the slot is registered or not */ parent = pnv_php_find_slot(php_slot->dn); - if (unlikely(parent)) { + if (parent) { pnv_php_put_slot(parent); return -EEXIST; } @@ -575,7 +610,7 @@ static int pnv_php_register_slot(struct pnv_php_slot *php_slot) /* Register PCI slot */ ret = pci_hp_register(&php_slot->slot, php_slot->bus, php_slot->slot_no, php_slot->name); - if (unlikely(ret)) { + if (ret) { dev_warn(&php_slot->pdev->dev, "Error %d registering slot\n", ret); return ret; @@ -609,33 +644,213 @@ static int pnv_php_register_slot(struct pnv_php_slot *php_slot) return 0; } +static int pnv_php_enable_msix(struct pnv_php_slot *php_slot) +{ + struct pci_dev *pdev = php_slot->pdev; + struct msix_entry entry; + int nr_entries, ret; + u16 pcie_flag; + + /* Get total number of MSIx entries */ + nr_entries = pci_msix_vec_count(pdev); + if (nr_entries < 0) + return nr_entries; + + /* Check hotplug MSIx entry is in range */ + pcie_capability_read_word(pdev, PCI_EXP_FLAGS, &pcie_flag); + entry.entry = (pcie_flag & PCI_EXP_FLAGS_IRQ) >> 9; + if (entry.entry >= nr_entries) + return -ERANGE; + + /* Enable MSIx */ + ret = pci_enable_msix_exact(pdev, &entry, 1); + if (ret) { + dev_warn(&pdev->dev, "Error %d enabling MSIx\n", ret); + return ret; + } + + return entry.vector; +} + +static void pnv_php_event_handler(struct work_struct *work) +{ + struct pnv_php_event *event = + container_of(work, struct pnv_php_event, work); + struct pnv_php_slot *php_slot = event->php_slot; + + if (event->added) + pnv_php_enable_slot(&php_slot->slot); + else + pnv_php_disable_slot(&php_slot->slot); + + kfree(event); +} + +static irqreturn_t pnv_php_interrupt(int irq, void *data) +{ + struct pnv_php_slot *php_slot = data; + struct pci_dev *pchild, *pdev = php_slot->pdev; + struct eeh_dev *edev; + struct eeh_pe *pe; + struct pnv_php_event *event; + u16 sts, lsts; + u8 presence; + bool added; + unsigned long flags; + int ret; + + pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &sts); + sts &= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); + pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, sts); + if (sts & PCI_EXP_SLTSTA_DLLSC) { + pcie_capability_read_word(pdev, PCI_EXP_LNKSTA, &lsts); + added = !!(lsts & PCI_EXP_LNKSTA_DLLLA); + } else if (sts & PCI_EXP_SLTSTA_PDC) { + ret = pnv_pci_get_presence_state(php_slot->id, &presence); + if (!ret) + return IRQ_HANDLED; + added = !!(presence == OPAL_PCI_SLOT_PRESENT); + } else { + return IRQ_NONE; + } + + /* Freeze the removed PE to avoid unexpected error reporting */ + if (!added) { + pchild = list_first_entry_or_null(&php_slot->bus->devices, + struct pci_dev, bus_list); + edev = pchild ? pci_dev_to_eeh_dev(pchild) : NULL; + pe = edev ? edev->pe : NULL; + if (pe) { + eeh_serialize_lock(&flags); + eeh_pe_state_mark(pe, EEH_PE_ISOLATED); + eeh_serialize_unlock(flags); + eeh_pe_set_option(pe, EEH_OPT_FREEZE_PE); + } + } + + /* + * The PE is left in frozen state if the event is missed. It's + * fine as the PCI devices (PE) aren't functional any more. + */ + event = kzalloc(sizeof(*event), GFP_ATOMIC); + if (!event) { + dev_warn(&pdev->dev, "PCI slot [%s] missed hotplug event 0x%04x\n", + php_slot->name, sts); + return IRQ_HANDLED; + } + + dev_info(&pdev->dev, "PCI slot [%s] %s (IRQ: %d)\n", + php_slot->name, added ? "added" : "removed", irq); + INIT_WORK(&event->work, pnv_php_event_handler); + event->added = added; + event->php_slot = php_slot; + queue_work(php_slot->wq, &event->work); + + return IRQ_HANDLED; +} + +static void pnv_php_init_irq(struct pnv_php_slot *php_slot, int irq) +{ + struct pci_dev *pdev = php_slot->pdev; + u16 sts, ctrl; + int ret; + + /* Allocate workqueue */ + php_slot->wq = alloc_workqueue("pciehp-%s", 0, 0, php_slot->name); + if (!php_slot->wq) { + dev_warn(&pdev->dev, "Cannot alloc workqueue\n"); + pnv_php_disable_irq(php_slot); + return; + } + + /* Clear pending interrupts */ + pcie_capability_read_word(pdev, PCI_EXP_SLTSTA, &sts); + sts |= (PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_DLLSC); + pcie_capability_write_word(pdev, PCI_EXP_SLTSTA, sts); + + /* Request the interrupt */ + ret = request_irq(irq, pnv_php_interrupt, IRQF_SHARED, + php_slot->name, php_slot); + if (ret) { + pnv_php_disable_irq(php_slot); + dev_warn(&pdev->dev, "Error %d enabling IRQ %d\n", ret, irq); + return; + } + + /* Enable the interrupts */ + pcie_capability_read_word(pdev, PCI_EXP_SLTCTL, &ctrl); + ctrl |= (PCI_EXP_SLTCTL_HPIE | + PCI_EXP_SLTCTL_PDCE | + PCI_EXP_SLTCTL_DLLSCE); + pcie_capability_write_word(pdev, PCI_EXP_SLTCTL, ctrl); + + /* The interrupt is initialized successfully when @irq is valid */ + php_slot->irq = irq; +} + +static void pnv_php_enable_irq(struct pnv_php_slot *php_slot) +{ + struct pci_dev *pdev = php_slot->pdev; + int irq, ret; + + ret = pci_enable_device(pdev); + if (ret) { + dev_warn(&pdev->dev, "Error %d enabling device\n", ret); + return; + } + + pci_set_master(pdev); + + /* Enable MSIx interrupt */ + irq = pnv_php_enable_msix(php_slot); + if (irq > 0) { + pnv_php_init_irq(php_slot, irq); + return; + } + + /* + * Use MSI if MSIx doesn't work. Fail back to legacy INTx + * if MSI doesn't work either + */ + ret = pci_enable_msi(pdev); + if (!ret || pdev->irq) { + irq = pdev->irq; + pnv_php_init_irq(php_slot, irq); + } +} + static int pnv_php_register_one(struct device_node *dn) { struct pnv_php_slot *php_slot; - const __be32 *prop32; + u32 prop32; int ret; /* Check if it's hotpluggable slot */ - prop32 = of_get_property(dn, "ibm,slot-pluggable", NULL); - if (!prop32 || !of_read_number(prop32, 1)) + ret = of_property_read_u32(dn, "ibm,slot-pluggable", &prop32); + if (ret || !prop32) return -ENXIO; - prop32 = of_get_property(dn, "ibm,reset-by-firmware", NULL); - if (!prop32 || !of_read_number(prop32, 1)) + ret = of_property_read_u32(dn, "ibm,reset-by-firmware", &prop32); + if (ret || !prop32) return -ENXIO; php_slot = pnv_php_alloc_slot(dn); - if (unlikely(!php_slot)) + if (!php_slot) return -ENODEV; ret = pnv_php_register_slot(php_slot); - if (unlikely(ret)) + if (ret) goto free_slot; ret = pnv_php_enable(php_slot, false); - if (unlikely(ret)) + if (ret) goto unregister_slot; + /* Enable interrupt if the slot supports surprise hotplug */ + ret = of_property_read_u32(dn, "ibm,slot-surprise-pluggable", &prop32); + if (!ret && prop32) + pnv_php_enable_irq(php_slot); + return 0; unregister_slot: diff --git a/drivers/perf/Kconfig b/drivers/perf/Kconfig index 04e2653bb8c0..4d5c5f9f0dbd 100644 --- a/drivers/perf/Kconfig +++ b/drivers/perf/Kconfig @@ -12,4 +12,11 @@ config ARM_PMU Say y if you want to use CPU performance monitors on ARM-based systems. +config XGENE_PMU + depends on PERF_EVENTS && ARCH_XGENE + bool "APM X-Gene SoC PMU" + default n + help + Say y if you want to use APM X-Gene SoC performance monitors. + endmenu diff --git a/drivers/perf/Makefile b/drivers/perf/Makefile index acd2397ded94..b116e982810b 100644 --- a/drivers/perf/Makefile +++ b/drivers/perf/Makefile @@ -1 +1,2 @@ obj-$(CONFIG_ARM_PMU) += arm_pmu.o +obj-$(CONFIG_XGENE_PMU) += xgene_pmu.o diff --git a/drivers/perf/xgene_pmu.c b/drivers/perf/xgene_pmu.c new file mode 100644 index 000000000000..c2ac7646b99f --- /dev/null +++ b/drivers/perf/xgene_pmu.c @@ -0,0 +1,1398 @@ +/* + * APM X-Gene SoC PMU (Performance Monitor Unit) + * + * Copyright (c) 2016, Applied Micro Circuits Corporation + * Author: Hoan Tran <hotran@apm.com> + * Tai Nguyen <ttnguyen@apm.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License as published by the + * Free Software Foundation; either version 2 of the License, or (at your + * option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/acpi.h> +#include <linux/clk.h> +#include <linux/cpumask.h> +#include <linux/interrupt.h> +#include <linux/io.h> +#include <linux/mfd/syscon.h> +#include <linux/of_address.h> +#include <linux/of_fdt.h> +#include <linux/of_irq.h> +#include <linux/of_platform.h> +#include <linux/perf_event.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/slab.h> + +#define CSW_CSWCR 0x0000 +#define CSW_CSWCR_DUALMCB_MASK BIT(0) +#define MCBADDRMR 0x0000 +#define MCBADDRMR_DUALMCU_MODE_MASK BIT(2) + +#define PCPPMU_INTSTATUS_REG 0x000 +#define PCPPMU_INTMASK_REG 0x004 +#define PCPPMU_INTMASK 0x0000000F +#define PCPPMU_INTENMASK 0xFFFFFFFF +#define PCPPMU_INTCLRMASK 0xFFFFFFF0 +#define PCPPMU_INT_MCU BIT(0) +#define PCPPMU_INT_MCB BIT(1) +#define PCPPMU_INT_L3C BIT(2) +#define PCPPMU_INT_IOB BIT(3) + +#define PMU_MAX_COUNTERS 4 +#define PMU_CNT_MAX_PERIOD 0x100000000ULL +#define PMU_OVERFLOW_MASK 0xF +#define PMU_PMCR_E BIT(0) +#define PMU_PMCR_P BIT(1) + +#define PMU_PMEVCNTR0 0x000 +#define PMU_PMEVCNTR1 0x004 +#define PMU_PMEVCNTR2 0x008 +#define PMU_PMEVCNTR3 0x00C +#define PMU_PMEVTYPER0 0x400 +#define PMU_PMEVTYPER1 0x404 +#define PMU_PMEVTYPER2 0x408 +#define PMU_PMEVTYPER3 0x40C +#define PMU_PMAMR0 0xA00 +#define PMU_PMAMR1 0xA04 +#define PMU_PMCNTENSET 0xC00 +#define PMU_PMCNTENCLR 0xC20 +#define PMU_PMINTENSET 0xC40 +#define PMU_PMINTENCLR 0xC60 +#define PMU_PMOVSR 0xC80 +#define PMU_PMCR 0xE04 + +#define to_pmu_dev(p) container_of(p, struct xgene_pmu_dev, pmu) +#define GET_CNTR(ev) (ev->hw.idx) +#define GET_EVENTID(ev) (ev->hw.config & 0xFFULL) +#define GET_AGENTID(ev) (ev->hw.config_base & 0xFFFFFFFFUL) +#define GET_AGENT1ID(ev) ((ev->hw.config_base >> 32) & 0xFFFFFFFFUL) + +struct hw_pmu_info { + u32 type; + u32 enable_mask; + void __iomem *csr; +}; + +struct xgene_pmu_dev { + struct hw_pmu_info *inf; + struct xgene_pmu *parent; + struct pmu pmu; + u8 max_counters; + DECLARE_BITMAP(cntr_assign_mask, PMU_MAX_COUNTERS); + u64 max_period; + const struct attribute_group **attr_groups; + struct perf_event *pmu_counter_event[PMU_MAX_COUNTERS]; +}; + +struct xgene_pmu { + struct device *dev; + int version; + void __iomem *pcppmu_csr; + u32 mcb_active_mask; + u32 mc_active_mask; + cpumask_t cpu; + raw_spinlock_t lock; + struct list_head l3cpmus; + struct list_head iobpmus; + struct list_head mcbpmus; + struct list_head mcpmus; +}; + +struct xgene_pmu_dev_ctx { + char *name; + struct list_head next; + struct xgene_pmu_dev *pmu_dev; + struct hw_pmu_info inf; +}; + +struct xgene_pmu_data { + int id; + u32 data; +}; + +enum xgene_pmu_version { + PCP_PMU_V1 = 1, + PCP_PMU_V2, +}; + +enum xgene_pmu_dev_type { + PMU_TYPE_L3C = 0, + PMU_TYPE_IOB, + PMU_TYPE_MCB, + PMU_TYPE_MC, +}; + +/* + * sysfs format attributes + */ +static ssize_t xgene_pmu_format_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + return sprintf(buf, "%s\n", (char *) eattr->var); +} + +#define XGENE_PMU_FORMAT_ATTR(_name, _config) \ + (&((struct dev_ext_attribute[]) { \ + { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_format_show, NULL), \ + .var = (void *) _config, } \ + })[0].attr.attr) + +static struct attribute *l3c_pmu_format_attrs[] = { + XGENE_PMU_FORMAT_ATTR(l3c_eventid, "config:0-7"), + XGENE_PMU_FORMAT_ATTR(l3c_agentid, "config1:0-9"), + NULL, +}; + +static struct attribute *iob_pmu_format_attrs[] = { + XGENE_PMU_FORMAT_ATTR(iob_eventid, "config:0-7"), + XGENE_PMU_FORMAT_ATTR(iob_agentid, "config1:0-63"), + NULL, +}; + +static struct attribute *mcb_pmu_format_attrs[] = { + XGENE_PMU_FORMAT_ATTR(mcb_eventid, "config:0-5"), + XGENE_PMU_FORMAT_ATTR(mcb_agentid, "config1:0-9"), + NULL, +}; + +static struct attribute *mc_pmu_format_attrs[] = { + XGENE_PMU_FORMAT_ATTR(mc_eventid, "config:0-28"), + NULL, +}; + +static const struct attribute_group l3c_pmu_format_attr_group = { + .name = "format", + .attrs = l3c_pmu_format_attrs, +}; + +static const struct attribute_group iob_pmu_format_attr_group = { + .name = "format", + .attrs = iob_pmu_format_attrs, +}; + +static const struct attribute_group mcb_pmu_format_attr_group = { + .name = "format", + .attrs = mcb_pmu_format_attrs, +}; + +static const struct attribute_group mc_pmu_format_attr_group = { + .name = "format", + .attrs = mc_pmu_format_attrs, +}; + +/* + * sysfs event attributes + */ +static ssize_t xgene_pmu_event_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct dev_ext_attribute *eattr; + + eattr = container_of(attr, struct dev_ext_attribute, attr); + return sprintf(buf, "config=0x%lx\n", (unsigned long) eattr->var); +} + +#define XGENE_PMU_EVENT_ATTR(_name, _config) \ + (&((struct dev_ext_attribute[]) { \ + { .attr = __ATTR(_name, S_IRUGO, xgene_pmu_event_show, NULL), \ + .var = (void *) _config, } \ + })[0].attr.attr) + +static struct attribute *l3c_pmu_events_attrs[] = { + XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), + XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01), + XGENE_PMU_EVENT_ATTR(read-hit, 0x02), + XGENE_PMU_EVENT_ATTR(read-miss, 0x03), + XGENE_PMU_EVENT_ATTR(write-need-replacement, 0x06), + XGENE_PMU_EVENT_ATTR(write-not-need-replacement, 0x07), + XGENE_PMU_EVENT_ATTR(tq-full, 0x08), + XGENE_PMU_EVENT_ATTR(ackq-full, 0x09), + XGENE_PMU_EVENT_ATTR(wdb-full, 0x0a), + XGENE_PMU_EVENT_ATTR(bank-fifo-full, 0x0b), + XGENE_PMU_EVENT_ATTR(odb-full, 0x0c), + XGENE_PMU_EVENT_ATTR(wbq-full, 0x0d), + XGENE_PMU_EVENT_ATTR(bank-conflict-fifo-issue, 0x0e), + XGENE_PMU_EVENT_ATTR(bank-fifo-issue, 0x0f), + NULL, +}; + +static struct attribute *iob_pmu_events_attrs[] = { + XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), + XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01), + XGENE_PMU_EVENT_ATTR(axi0-read, 0x02), + XGENE_PMU_EVENT_ATTR(axi0-read-partial, 0x03), + XGENE_PMU_EVENT_ATTR(axi1-read, 0x04), + XGENE_PMU_EVENT_ATTR(axi1-read-partial, 0x05), + XGENE_PMU_EVENT_ATTR(csw-read-block, 0x06), + XGENE_PMU_EVENT_ATTR(csw-read-partial, 0x07), + XGENE_PMU_EVENT_ATTR(axi0-write, 0x10), + XGENE_PMU_EVENT_ATTR(axi0-write-partial, 0x11), + XGENE_PMU_EVENT_ATTR(axi1-write, 0x13), + XGENE_PMU_EVENT_ATTR(axi1-write-partial, 0x14), + XGENE_PMU_EVENT_ATTR(csw-inbound-dirty, 0x16), + NULL, +}; + +static struct attribute *mcb_pmu_events_attrs[] = { + XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), + XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01), + XGENE_PMU_EVENT_ATTR(csw-read, 0x02), + XGENE_PMU_EVENT_ATTR(csw-write-request, 0x03), + XGENE_PMU_EVENT_ATTR(mcb-csw-stall, 0x04), + XGENE_PMU_EVENT_ATTR(cancel-read-gack, 0x05), + NULL, +}; + +static struct attribute *mc_pmu_events_attrs[] = { + XGENE_PMU_EVENT_ATTR(cycle-count, 0x00), + XGENE_PMU_EVENT_ATTR(cycle-count-div-64, 0x01), + XGENE_PMU_EVENT_ATTR(act-cmd-sent, 0x02), + XGENE_PMU_EVENT_ATTR(pre-cmd-sent, 0x03), + XGENE_PMU_EVENT_ATTR(rd-cmd-sent, 0x04), + XGENE_PMU_EVENT_ATTR(rda-cmd-sent, 0x05), + XGENE_PMU_EVENT_ATTR(wr-cmd-sent, 0x06), + XGENE_PMU_EVENT_ATTR(wra-cmd-sent, 0x07), + XGENE_PMU_EVENT_ATTR(pde-cmd-sent, 0x08), + XGENE_PMU_EVENT_ATTR(sre-cmd-sent, 0x09), + XGENE_PMU_EVENT_ATTR(prea-cmd-sent, 0x0a), + XGENE_PMU_EVENT_ATTR(ref-cmd-sent, 0x0b), + XGENE_PMU_EVENT_ATTR(rd-rda-cmd-sent, 0x0c), + XGENE_PMU_EVENT_ATTR(wr-wra-cmd-sent, 0x0d), + XGENE_PMU_EVENT_ATTR(in-rd-collision, 0x0e), + XGENE_PMU_EVENT_ATTR(in-wr-collision, 0x0f), + XGENE_PMU_EVENT_ATTR(collision-queue-not-empty, 0x10), + XGENE_PMU_EVENT_ATTR(collision-queue-full, 0x11), + XGENE_PMU_EVENT_ATTR(mcu-request, 0x12), + XGENE_PMU_EVENT_ATTR(mcu-rd-request, 0x13), + XGENE_PMU_EVENT_ATTR(mcu-hp-rd-request, 0x14), + XGENE_PMU_EVENT_ATTR(mcu-wr-request, 0x15), + XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-all, 0x16), + XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-cancel, 0x17), + XGENE_PMU_EVENT_ATTR(mcu-rd-response, 0x18), + XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-all, 0x19), + XGENE_PMU_EVENT_ATTR(mcu-rd-proceed-speculative-cancel, 0x1a), + XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-all, 0x1b), + XGENE_PMU_EVENT_ATTR(mcu-wr-proceed-cancel, 0x1c), + NULL, +}; + +static const struct attribute_group l3c_pmu_events_attr_group = { + .name = "events", + .attrs = l3c_pmu_events_attrs, +}; + +static const struct attribute_group iob_pmu_events_attr_group = { + .name = "events", + .attrs = iob_pmu_events_attrs, +}; + +static const struct attribute_group mcb_pmu_events_attr_group = { + .name = "events", + .attrs = mcb_pmu_events_attrs, +}; + +static const struct attribute_group mc_pmu_events_attr_group = { + .name = "events", + .attrs = mc_pmu_events_attrs, +}; + +/* + * sysfs cpumask attributes + */ +static ssize_t xgene_pmu_cpumask_show(struct device *dev, + struct device_attribute *attr, char *buf) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(dev_get_drvdata(dev)); + + return cpumap_print_to_pagebuf(true, buf, &pmu_dev->parent->cpu); +} + +static DEVICE_ATTR(cpumask, S_IRUGO, xgene_pmu_cpumask_show, NULL); + +static struct attribute *xgene_pmu_cpumask_attrs[] = { + &dev_attr_cpumask.attr, + NULL, +}; + +static const struct attribute_group pmu_cpumask_attr_group = { + .attrs = xgene_pmu_cpumask_attrs, +}; + +/* + * Per PMU device attribute groups + */ +static const struct attribute_group *l3c_pmu_attr_groups[] = { + &l3c_pmu_format_attr_group, + &pmu_cpumask_attr_group, + &l3c_pmu_events_attr_group, + NULL +}; + +static const struct attribute_group *iob_pmu_attr_groups[] = { + &iob_pmu_format_attr_group, + &pmu_cpumask_attr_group, + &iob_pmu_events_attr_group, + NULL +}; + +static const struct attribute_group *mcb_pmu_attr_groups[] = { + &mcb_pmu_format_attr_group, + &pmu_cpumask_attr_group, + &mcb_pmu_events_attr_group, + NULL +}; + +static const struct attribute_group *mc_pmu_attr_groups[] = { + &mc_pmu_format_attr_group, + &pmu_cpumask_attr_group, + &mc_pmu_events_attr_group, + NULL +}; + +static int get_next_avail_cntr(struct xgene_pmu_dev *pmu_dev) +{ + int cntr; + + cntr = find_first_zero_bit(pmu_dev->cntr_assign_mask, + pmu_dev->max_counters); + if (cntr == pmu_dev->max_counters) + return -ENOSPC; + set_bit(cntr, pmu_dev->cntr_assign_mask); + + return cntr; +} + +static void clear_avail_cntr(struct xgene_pmu_dev *pmu_dev, int cntr) +{ + clear_bit(cntr, pmu_dev->cntr_assign_mask); +} + +static inline void xgene_pmu_mask_int(struct xgene_pmu *xgene_pmu) +{ + writel(PCPPMU_INTENMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG); +} + +static inline void xgene_pmu_unmask_int(struct xgene_pmu *xgene_pmu) +{ + writel(PCPPMU_INTCLRMASK, xgene_pmu->pcppmu_csr + PCPPMU_INTMASK_REG); +} + +static inline u32 xgene_pmu_read_counter(struct xgene_pmu_dev *pmu_dev, int idx) +{ + return readl(pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx)); +} + +static inline void +xgene_pmu_write_counter(struct xgene_pmu_dev *pmu_dev, int idx, u32 val) +{ + writel(val, pmu_dev->inf->csr + PMU_PMEVCNTR0 + (4 * idx)); +} + +static inline void +xgene_pmu_write_evttype(struct xgene_pmu_dev *pmu_dev, int idx, u32 val) +{ + writel(val, pmu_dev->inf->csr + PMU_PMEVTYPER0 + (4 * idx)); +} + +static inline void +xgene_pmu_write_agentmsk(struct xgene_pmu_dev *pmu_dev, u32 val) +{ + writel(val, pmu_dev->inf->csr + PMU_PMAMR0); +} + +static inline void +xgene_pmu_write_agent1msk(struct xgene_pmu_dev *pmu_dev, u32 val) +{ + writel(val, pmu_dev->inf->csr + PMU_PMAMR1); +} + +static inline void +xgene_pmu_enable_counter(struct xgene_pmu_dev *pmu_dev, int idx) +{ + u32 val; + + val = readl(pmu_dev->inf->csr + PMU_PMCNTENSET); + val |= 1 << idx; + writel(val, pmu_dev->inf->csr + PMU_PMCNTENSET); +} + +static inline void +xgene_pmu_disable_counter(struct xgene_pmu_dev *pmu_dev, int idx) +{ + u32 val; + + val = readl(pmu_dev->inf->csr + PMU_PMCNTENCLR); + val |= 1 << idx; + writel(val, pmu_dev->inf->csr + PMU_PMCNTENCLR); +} + +static inline void +xgene_pmu_enable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx) +{ + u32 val; + + val = readl(pmu_dev->inf->csr + PMU_PMINTENSET); + val |= 1 << idx; + writel(val, pmu_dev->inf->csr + PMU_PMINTENSET); +} + +static inline void +xgene_pmu_disable_counter_int(struct xgene_pmu_dev *pmu_dev, int idx) +{ + u32 val; + + val = readl(pmu_dev->inf->csr + PMU_PMINTENCLR); + val |= 1 << idx; + writel(val, pmu_dev->inf->csr + PMU_PMINTENCLR); +} + +static inline void xgene_pmu_reset_counters(struct xgene_pmu_dev *pmu_dev) +{ + u32 val; + + val = readl(pmu_dev->inf->csr + PMU_PMCR); + val |= PMU_PMCR_P; + writel(val, pmu_dev->inf->csr + PMU_PMCR); +} + +static inline void xgene_pmu_start_counters(struct xgene_pmu_dev *pmu_dev) +{ + u32 val; + + val = readl(pmu_dev->inf->csr + PMU_PMCR); + val |= PMU_PMCR_E; + writel(val, pmu_dev->inf->csr + PMU_PMCR); +} + +static inline void xgene_pmu_stop_counters(struct xgene_pmu_dev *pmu_dev) +{ + u32 val; + + val = readl(pmu_dev->inf->csr + PMU_PMCR); + val &= ~PMU_PMCR_E; + writel(val, pmu_dev->inf->csr + PMU_PMCR); +} + +static void xgene_perf_pmu_enable(struct pmu *pmu) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu); + int enabled = bitmap_weight(pmu_dev->cntr_assign_mask, + pmu_dev->max_counters); + + if (!enabled) + return; + + xgene_pmu_start_counters(pmu_dev); +} + +static void xgene_perf_pmu_disable(struct pmu *pmu) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(pmu); + + xgene_pmu_stop_counters(pmu_dev); +} + +static int xgene_perf_event_init(struct perf_event *event) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); + struct hw_perf_event *hw = &event->hw; + struct perf_event *sibling; + + /* Test the event attr type check for PMU enumeration */ + if (event->attr.type != event->pmu->type) + return -ENOENT; + + /* + * SOC PMU counters are shared across all cores. + * Therefore, it does not support per-process mode. + * Also, it does not support event sampling mode. + */ + if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) + return -EINVAL; + + /* SOC counters do not have usr/os/guest/host bits */ + if (event->attr.exclude_user || event->attr.exclude_kernel || + event->attr.exclude_host || event->attr.exclude_guest) + return -EINVAL; + + if (event->cpu < 0) + return -EINVAL; + /* + * Many perf core operations (eg. events rotation) operate on a + * single CPU context. This is obvious for CPU PMUs, where one + * expects the same sets of events being observed on all CPUs, + * but can lead to issues for off-core PMUs, where each + * event could be theoretically assigned to a different CPU. To + * mitigate this, we enforce CPU assignment to one, selected + * processor (the one described in the "cpumask" attribute). + */ + event->cpu = cpumask_first(&pmu_dev->parent->cpu); + + hw->config = event->attr.config; + /* + * Each bit of the config1 field represents an agent from which the + * request of the event come. The event is counted only if it's caused + * by a request of an agent has the bit cleared. + * By default, the event is counted for all agents. + */ + hw->config_base = event->attr.config1; + + /* + * We must NOT create groups containing mixed PMUs, although software + * events are acceptable + */ + if (event->group_leader->pmu != event->pmu && + !is_software_event(event->group_leader)) + return -EINVAL; + + list_for_each_entry(sibling, &event->group_leader->sibling_list, + group_entry) + if (sibling->pmu != event->pmu && + !is_software_event(sibling)) + return -EINVAL; + + return 0; +} + +static void xgene_perf_enable_event(struct perf_event *event) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); + + xgene_pmu_write_evttype(pmu_dev, GET_CNTR(event), GET_EVENTID(event)); + xgene_pmu_write_agentmsk(pmu_dev, ~((u32)GET_AGENTID(event))); + if (pmu_dev->inf->type == PMU_TYPE_IOB) + xgene_pmu_write_agent1msk(pmu_dev, ~((u32)GET_AGENT1ID(event))); + + xgene_pmu_enable_counter(pmu_dev, GET_CNTR(event)); + xgene_pmu_enable_counter_int(pmu_dev, GET_CNTR(event)); +} + +static void xgene_perf_disable_event(struct perf_event *event) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); + + xgene_pmu_disable_counter(pmu_dev, GET_CNTR(event)); + xgene_pmu_disable_counter_int(pmu_dev, GET_CNTR(event)); +} + +static void xgene_perf_event_set_period(struct perf_event *event) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); + struct hw_perf_event *hw = &event->hw; + /* + * The X-Gene PMU counters have a period of 2^32. To account for the + * possiblity of extreme interrupt latency we program for a period of + * half that. Hopefully we can handle the interrupt before another 2^31 + * events occur and the counter overtakes its previous value. + */ + u64 val = 1ULL << 31; + + local64_set(&hw->prev_count, val); + xgene_pmu_write_counter(pmu_dev, hw->idx, (u32) val); +} + +static void xgene_perf_event_update(struct perf_event *event) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); + struct hw_perf_event *hw = &event->hw; + u64 delta, prev_raw_count, new_raw_count; + +again: + prev_raw_count = local64_read(&hw->prev_count); + new_raw_count = xgene_pmu_read_counter(pmu_dev, GET_CNTR(event)); + + if (local64_cmpxchg(&hw->prev_count, prev_raw_count, + new_raw_count) != prev_raw_count) + goto again; + + delta = (new_raw_count - prev_raw_count) & pmu_dev->max_period; + + local64_add(delta, &event->count); +} + +static void xgene_perf_read(struct perf_event *event) +{ + xgene_perf_event_update(event); +} + +static void xgene_perf_start(struct perf_event *event, int flags) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); + struct hw_perf_event *hw = &event->hw; + + if (WARN_ON_ONCE(!(hw->state & PERF_HES_STOPPED))) + return; + + WARN_ON_ONCE(!(hw->state & PERF_HES_UPTODATE)); + hw->state = 0; + + xgene_perf_event_set_period(event); + + if (flags & PERF_EF_RELOAD) { + u64 prev_raw_count = local64_read(&hw->prev_count); + + xgene_pmu_write_counter(pmu_dev, GET_CNTR(event), + (u32) prev_raw_count); + } + + xgene_perf_enable_event(event); + perf_event_update_userpage(event); +} + +static void xgene_perf_stop(struct perf_event *event, int flags) +{ + struct hw_perf_event *hw = &event->hw; + u64 config; + + if (hw->state & PERF_HES_UPTODATE) + return; + + xgene_perf_disable_event(event); + WARN_ON_ONCE(hw->state & PERF_HES_STOPPED); + hw->state |= PERF_HES_STOPPED; + + if (hw->state & PERF_HES_UPTODATE) + return; + + config = hw->config; + xgene_perf_read(event); + hw->state |= PERF_HES_UPTODATE; +} + +static int xgene_perf_add(struct perf_event *event, int flags) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); + struct hw_perf_event *hw = &event->hw; + + hw->state = PERF_HES_UPTODATE | PERF_HES_STOPPED; + + /* Allocate an event counter */ + hw->idx = get_next_avail_cntr(pmu_dev); + if (hw->idx < 0) + return -EAGAIN; + + /* Update counter event pointer for Interrupt handler */ + pmu_dev->pmu_counter_event[hw->idx] = event; + + if (flags & PERF_EF_START) + xgene_perf_start(event, PERF_EF_RELOAD); + + return 0; +} + +static void xgene_perf_del(struct perf_event *event, int flags) +{ + struct xgene_pmu_dev *pmu_dev = to_pmu_dev(event->pmu); + struct hw_perf_event *hw = &event->hw; + + xgene_perf_stop(event, PERF_EF_UPDATE); + + /* clear the assigned counter */ + clear_avail_cntr(pmu_dev, GET_CNTR(event)); + + perf_event_update_userpage(event); + pmu_dev->pmu_counter_event[hw->idx] = NULL; +} + +static int xgene_init_perf(struct xgene_pmu_dev *pmu_dev, char *name) +{ + struct xgene_pmu *xgene_pmu; + + pmu_dev->max_period = PMU_CNT_MAX_PERIOD - 1; + /* First version PMU supports only single event counter */ + xgene_pmu = pmu_dev->parent; + if (xgene_pmu->version == PCP_PMU_V1) + pmu_dev->max_counters = 1; + else + pmu_dev->max_counters = PMU_MAX_COUNTERS; + + /* Perf driver registration */ + pmu_dev->pmu = (struct pmu) { + .attr_groups = pmu_dev->attr_groups, + .task_ctx_nr = perf_invalid_context, + .pmu_enable = xgene_perf_pmu_enable, + .pmu_disable = xgene_perf_pmu_disable, + .event_init = xgene_perf_event_init, + .add = xgene_perf_add, + .del = xgene_perf_del, + .start = xgene_perf_start, + .stop = xgene_perf_stop, + .read = xgene_perf_read, + }; + + /* Hardware counter init */ + xgene_pmu_stop_counters(pmu_dev); + xgene_pmu_reset_counters(pmu_dev); + + return perf_pmu_register(&pmu_dev->pmu, name, -1); +} + +static int +xgene_pmu_dev_add(struct xgene_pmu *xgene_pmu, struct xgene_pmu_dev_ctx *ctx) +{ + struct device *dev = xgene_pmu->dev; + struct xgene_pmu_dev *pmu; + int rc; + + pmu = devm_kzalloc(dev, sizeof(*pmu), GFP_KERNEL); + if (!pmu) + return -ENOMEM; + pmu->parent = xgene_pmu; + pmu->inf = &ctx->inf; + ctx->pmu_dev = pmu; + + switch (pmu->inf->type) { + case PMU_TYPE_L3C: + pmu->attr_groups = l3c_pmu_attr_groups; + break; + case PMU_TYPE_IOB: + pmu->attr_groups = iob_pmu_attr_groups; + break; + case PMU_TYPE_MCB: + if (!(xgene_pmu->mcb_active_mask & pmu->inf->enable_mask)) + goto dev_err; + pmu->attr_groups = mcb_pmu_attr_groups; + break; + case PMU_TYPE_MC: + if (!(xgene_pmu->mc_active_mask & pmu->inf->enable_mask)) + goto dev_err; + pmu->attr_groups = mc_pmu_attr_groups; + break; + default: + return -EINVAL; + } + + rc = xgene_init_perf(pmu, ctx->name); + if (rc) { + dev_err(dev, "%s PMU: Failed to init perf driver\n", ctx->name); + goto dev_err; + } + + dev_info(dev, "%s PMU registered\n", ctx->name); + + return rc; + +dev_err: + devm_kfree(dev, pmu); + return -ENODEV; +} + +static void _xgene_pmu_isr(int irq, struct xgene_pmu_dev *pmu_dev) +{ + struct xgene_pmu *xgene_pmu = pmu_dev->parent; + u32 pmovsr; + int idx; + + pmovsr = readl(pmu_dev->inf->csr + PMU_PMOVSR) & PMU_OVERFLOW_MASK; + if (!pmovsr) + return; + + /* Clear interrupt flag */ + if (xgene_pmu->version == PCP_PMU_V1) + writel(0x0, pmu_dev->inf->csr + PMU_PMOVSR); + else + writel(pmovsr, pmu_dev->inf->csr + PMU_PMOVSR); + + for (idx = 0; idx < PMU_MAX_COUNTERS; idx++) { + struct perf_event *event = pmu_dev->pmu_counter_event[idx]; + int overflowed = pmovsr & BIT(idx); + + /* Ignore if we don't have an event. */ + if (!event || !overflowed) + continue; + xgene_perf_event_update(event); + xgene_perf_event_set_period(event); + } +} + +static irqreturn_t xgene_pmu_isr(int irq, void *dev_id) +{ + struct xgene_pmu_dev_ctx *ctx; + struct xgene_pmu *xgene_pmu = dev_id; + unsigned long flags; + u32 val; + + raw_spin_lock_irqsave(&xgene_pmu->lock, flags); + + /* Get Interrupt PMU source */ + val = readl(xgene_pmu->pcppmu_csr + PCPPMU_INTSTATUS_REG); + if (val & PCPPMU_INT_MCU) { + list_for_each_entry(ctx, &xgene_pmu->mcpmus, next) { + _xgene_pmu_isr(irq, ctx->pmu_dev); + } + } + if (val & PCPPMU_INT_MCB) { + list_for_each_entry(ctx, &xgene_pmu->mcbpmus, next) { + _xgene_pmu_isr(irq, ctx->pmu_dev); + } + } + if (val & PCPPMU_INT_L3C) { + list_for_each_entry(ctx, &xgene_pmu->l3cpmus, next) { + _xgene_pmu_isr(irq, ctx->pmu_dev); + } + } + if (val & PCPPMU_INT_IOB) { + list_for_each_entry(ctx, &xgene_pmu->iobpmus, next) { + _xgene_pmu_isr(irq, ctx->pmu_dev); + } + } + + raw_spin_unlock_irqrestore(&xgene_pmu->lock, flags); + + return IRQ_HANDLED; +} + +static int acpi_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu, + struct platform_device *pdev) +{ + void __iomem *csw_csr, *mcba_csr, *mcbb_csr; + struct resource *res; + unsigned int reg; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 1); + csw_csr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(csw_csr)) { + dev_err(&pdev->dev, "ioremap failed for CSW CSR resource\n"); + return PTR_ERR(csw_csr); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 2); + mcba_csr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mcba_csr)) { + dev_err(&pdev->dev, "ioremap failed for MCBA CSR resource\n"); + return PTR_ERR(mcba_csr); + } + + res = platform_get_resource(pdev, IORESOURCE_MEM, 3); + mcbb_csr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(mcbb_csr)) { + dev_err(&pdev->dev, "ioremap failed for MCBB CSR resource\n"); + return PTR_ERR(mcbb_csr); + } + + reg = readl(csw_csr + CSW_CSWCR); + if (reg & CSW_CSWCR_DUALMCB_MASK) { + /* Dual MCB active */ + xgene_pmu->mcb_active_mask = 0x3; + /* Probe all active MC(s) */ + reg = readl(mcbb_csr + CSW_CSWCR); + xgene_pmu->mc_active_mask = + (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5; + } else { + /* Single MCB active */ + xgene_pmu->mcb_active_mask = 0x1; + /* Probe all active MC(s) */ + reg = readl(mcba_csr + CSW_CSWCR); + xgene_pmu->mc_active_mask = + (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1; + } + + return 0; +} + +static int fdt_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu, + struct platform_device *pdev) +{ + struct regmap *csw_map, *mcba_map, *mcbb_map; + struct device_node *np = pdev->dev.of_node; + unsigned int reg; + + csw_map = syscon_regmap_lookup_by_phandle(np, "regmap-csw"); + if (IS_ERR(csw_map)) { + dev_err(&pdev->dev, "unable to get syscon regmap csw\n"); + return PTR_ERR(csw_map); + } + + mcba_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcba"); + if (IS_ERR(mcba_map)) { + dev_err(&pdev->dev, "unable to get syscon regmap mcba\n"); + return PTR_ERR(mcba_map); + } + + mcbb_map = syscon_regmap_lookup_by_phandle(np, "regmap-mcbb"); + if (IS_ERR(mcbb_map)) { + dev_err(&pdev->dev, "unable to get syscon regmap mcbb\n"); + return PTR_ERR(mcbb_map); + } + + if (regmap_read(csw_map, CSW_CSWCR, ®)) + return -EINVAL; + + if (reg & CSW_CSWCR_DUALMCB_MASK) { + /* Dual MCB active */ + xgene_pmu->mcb_active_mask = 0x3; + /* Probe all active MC(s) */ + if (regmap_read(mcbb_map, MCBADDRMR, ®)) + return 0; + xgene_pmu->mc_active_mask = + (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0xF : 0x5; + } else { + /* Single MCB active */ + xgene_pmu->mcb_active_mask = 0x1; + /* Probe all active MC(s) */ + if (regmap_read(mcba_map, MCBADDRMR, ®)) + return 0; + xgene_pmu->mc_active_mask = + (reg & MCBADDRMR_DUALMCU_MODE_MASK) ? 0x3 : 0x1; + } + + return 0; +} + +static int xgene_pmu_probe_active_mcb_mcu(struct xgene_pmu *xgene_pmu, + struct platform_device *pdev) +{ + if (has_acpi_companion(&pdev->dev)) + return acpi_pmu_probe_active_mcb_mcu(xgene_pmu, pdev); + return fdt_pmu_probe_active_mcb_mcu(xgene_pmu, pdev); +} + +static char *xgene_pmu_dev_name(struct device *dev, u32 type, int id) +{ + switch (type) { + case PMU_TYPE_L3C: + return devm_kasprintf(dev, GFP_KERNEL, "l3c%d", id); + case PMU_TYPE_IOB: + return devm_kasprintf(dev, GFP_KERNEL, "iob%d", id); + case PMU_TYPE_MCB: + return devm_kasprintf(dev, GFP_KERNEL, "mcb%d", id); + case PMU_TYPE_MC: + return devm_kasprintf(dev, GFP_KERNEL, "mc%d", id); + default: + return devm_kasprintf(dev, GFP_KERNEL, "unknown"); + } +} + +#if defined(CONFIG_ACPI) +static int acpi_pmu_dev_add_resource(struct acpi_resource *ares, void *data) +{ + struct resource *res = data; + + if (ares->type == ACPI_RESOURCE_TYPE_FIXED_MEMORY32) + acpi_dev_resource_memory(ares, res); + + /* Always tell the ACPI core to skip this resource */ + return 1; +} + +static struct +xgene_pmu_dev_ctx *acpi_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, + struct acpi_device *adev, u32 type) +{ + struct device *dev = xgene_pmu->dev; + struct list_head resource_list; + struct xgene_pmu_dev_ctx *ctx; + const union acpi_object *obj; + struct hw_pmu_info *inf; + void __iomem *dev_csr; + struct resource res; + int enable_bit; + int rc; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; + + INIT_LIST_HEAD(&resource_list); + rc = acpi_dev_get_resources(adev, &resource_list, + acpi_pmu_dev_add_resource, &res); + acpi_dev_free_resource_list(&resource_list); + if (rc < 0 || IS_ERR(&res)) { + dev_err(dev, "PMU type %d: No resource address found\n", type); + goto err; + } + + dev_csr = devm_ioremap_resource(dev, &res); + if (IS_ERR(dev_csr)) { + dev_err(dev, "PMU type %d: Fail to map resource\n", type); + goto err; + } + + /* A PMU device node without enable-bit-index is always enabled */ + rc = acpi_dev_get_property(adev, "enable-bit-index", + ACPI_TYPE_INTEGER, &obj); + if (rc < 0) + enable_bit = 0; + else + enable_bit = (int) obj->integer.value; + + ctx->name = xgene_pmu_dev_name(dev, type, enable_bit); + if (!ctx->name) { + dev_err(dev, "PMU type %d: Fail to get device name\n", type); + goto err; + } + inf = &ctx->inf; + inf->type = type; + inf->csr = dev_csr; + inf->enable_mask = 1 << enable_bit; + + return ctx; +err: + devm_kfree(dev, ctx); + return NULL; +} + +static acpi_status acpi_pmu_dev_add(acpi_handle handle, u32 level, + void *data, void **return_value) +{ + struct xgene_pmu *xgene_pmu = data; + struct xgene_pmu_dev_ctx *ctx; + struct acpi_device *adev; + + if (acpi_bus_get_device(handle, &adev)) + return AE_OK; + if (acpi_bus_get_status(adev) || !adev->status.present) + return AE_OK; + + if (!strcmp(acpi_device_hid(adev), "APMC0D5D")) + ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_L3C); + else if (!strcmp(acpi_device_hid(adev), "APMC0D5E")) + ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_IOB); + else if (!strcmp(acpi_device_hid(adev), "APMC0D5F")) + ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_MCB); + else if (!strcmp(acpi_device_hid(adev), "APMC0D60")) + ctx = acpi_get_pmu_hw_inf(xgene_pmu, adev, PMU_TYPE_MC); + else + ctx = NULL; + + if (!ctx) + return AE_OK; + + if (xgene_pmu_dev_add(xgene_pmu, ctx)) { + /* Can't add the PMU device, skip it */ + devm_kfree(xgene_pmu->dev, ctx); + return AE_OK; + } + + switch (ctx->inf.type) { + case PMU_TYPE_L3C: + list_add(&ctx->next, &xgene_pmu->l3cpmus); + break; + case PMU_TYPE_IOB: + list_add(&ctx->next, &xgene_pmu->iobpmus); + break; + case PMU_TYPE_MCB: + list_add(&ctx->next, &xgene_pmu->mcbpmus); + break; + case PMU_TYPE_MC: + list_add(&ctx->next, &xgene_pmu->mcpmus); + break; + } + return AE_OK; +} + +static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu, + struct platform_device *pdev) +{ + struct device *dev = xgene_pmu->dev; + acpi_handle handle; + acpi_status status; + + handle = ACPI_HANDLE(dev); + if (!handle) + return -EINVAL; + + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, handle, 1, + acpi_pmu_dev_add, NULL, xgene_pmu, NULL); + if (ACPI_FAILURE(status)) { + dev_err(dev, "failed to probe PMU devices\n"); + return -ENODEV; + } + + return 0; +} +#else +static int acpi_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu, + struct platform_device *pdev) +{ + return 0; +} +#endif + +static struct +xgene_pmu_dev_ctx *fdt_get_pmu_hw_inf(struct xgene_pmu *xgene_pmu, + struct device_node *np, u32 type) +{ + struct device *dev = xgene_pmu->dev; + struct xgene_pmu_dev_ctx *ctx; + struct hw_pmu_info *inf; + void __iomem *dev_csr; + struct resource res; + int enable_bit; + int rc; + + ctx = devm_kzalloc(dev, sizeof(*ctx), GFP_KERNEL); + if (!ctx) + return NULL; + rc = of_address_to_resource(np, 0, &res); + if (rc < 0) { + dev_err(dev, "PMU type %d: No resource address found\n", type); + goto err; + } + dev_csr = devm_ioremap_resource(dev, &res); + if (IS_ERR(dev_csr)) { + dev_err(dev, "PMU type %d: Fail to map resource\n", type); + goto err; + } + + /* A PMU device node without enable-bit-index is always enabled */ + if (of_property_read_u32(np, "enable-bit-index", &enable_bit)) + enable_bit = 0; + + ctx->name = xgene_pmu_dev_name(dev, type, enable_bit); + if (!ctx->name) { + dev_err(dev, "PMU type %d: Fail to get device name\n", type); + goto err; + } + inf = &ctx->inf; + inf->type = type; + inf->csr = dev_csr; + inf->enable_mask = 1 << enable_bit; + + return ctx; +err: + devm_kfree(dev, ctx); + return NULL; +} + +static int fdt_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu, + struct platform_device *pdev) +{ + struct xgene_pmu_dev_ctx *ctx; + struct device_node *np; + + for_each_child_of_node(pdev->dev.of_node, np) { + if (!of_device_is_available(np)) + continue; + + if (of_device_is_compatible(np, "apm,xgene-pmu-l3c")) + ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_L3C); + else if (of_device_is_compatible(np, "apm,xgene-pmu-iob")) + ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_IOB); + else if (of_device_is_compatible(np, "apm,xgene-pmu-mcb")) + ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MCB); + else if (of_device_is_compatible(np, "apm,xgene-pmu-mc")) + ctx = fdt_get_pmu_hw_inf(xgene_pmu, np, PMU_TYPE_MC); + else + ctx = NULL; + + if (!ctx) + continue; + + if (xgene_pmu_dev_add(xgene_pmu, ctx)) { + /* Can't add the PMU device, skip it */ + devm_kfree(xgene_pmu->dev, ctx); + continue; + } + + switch (ctx->inf.type) { + case PMU_TYPE_L3C: + list_add(&ctx->next, &xgene_pmu->l3cpmus); + break; + case PMU_TYPE_IOB: + list_add(&ctx->next, &xgene_pmu->iobpmus); + break; + case PMU_TYPE_MCB: + list_add(&ctx->next, &xgene_pmu->mcbpmus); + break; + case PMU_TYPE_MC: + list_add(&ctx->next, &xgene_pmu->mcpmus); + break; + } + } + + return 0; +} + +static int xgene_pmu_probe_pmu_dev(struct xgene_pmu *xgene_pmu, + struct platform_device *pdev) +{ + if (has_acpi_companion(&pdev->dev)) + return acpi_pmu_probe_pmu_dev(xgene_pmu, pdev); + return fdt_pmu_probe_pmu_dev(xgene_pmu, pdev); +} + +static const struct xgene_pmu_data xgene_pmu_data = { + .id = PCP_PMU_V1, +}; + +static const struct xgene_pmu_data xgene_pmu_v2_data = { + .id = PCP_PMU_V2, +}; + +static const struct of_device_id xgene_pmu_of_match[] = { + { .compatible = "apm,xgene-pmu", .data = &xgene_pmu_data }, + { .compatible = "apm,xgene-pmu-v2", .data = &xgene_pmu_v2_data }, + {}, +}; +MODULE_DEVICE_TABLE(of, xgene_pmu_of_match); +#ifdef CONFIG_ACPI +static const struct acpi_device_id xgene_pmu_acpi_match[] = { + {"APMC0D5B", PCP_PMU_V1}, + {"APMC0D5C", PCP_PMU_V2}, + {}, +}; +MODULE_DEVICE_TABLE(acpi, xgene_pmu_acpi_match); +#endif + +static int xgene_pmu_probe(struct platform_device *pdev) +{ + const struct xgene_pmu_data *dev_data; + const struct of_device_id *of_id; + struct xgene_pmu *xgene_pmu; + struct resource *res; + int irq, rc; + int version; + + xgene_pmu = devm_kzalloc(&pdev->dev, sizeof(*xgene_pmu), GFP_KERNEL); + if (!xgene_pmu) + return -ENOMEM; + xgene_pmu->dev = &pdev->dev; + platform_set_drvdata(pdev, xgene_pmu); + + version = -EINVAL; + of_id = of_match_device(xgene_pmu_of_match, &pdev->dev); + if (of_id) { + dev_data = (const struct xgene_pmu_data *) of_id->data; + version = dev_data->id; + } + +#ifdef CONFIG_ACPI + if (ACPI_COMPANION(&pdev->dev)) { + const struct acpi_device_id *acpi_id; + + acpi_id = acpi_match_device(xgene_pmu_acpi_match, &pdev->dev); + if (acpi_id) + version = (int) acpi_id->driver_data; + } +#endif + if (version < 0) + return -ENODEV; + + INIT_LIST_HEAD(&xgene_pmu->l3cpmus); + INIT_LIST_HEAD(&xgene_pmu->iobpmus); + INIT_LIST_HEAD(&xgene_pmu->mcbpmus); + INIT_LIST_HEAD(&xgene_pmu->mcpmus); + + xgene_pmu->version = version; + dev_info(&pdev->dev, "X-Gene PMU version %d\n", xgene_pmu->version); + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + xgene_pmu->pcppmu_csr = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(xgene_pmu->pcppmu_csr)) { + dev_err(&pdev->dev, "ioremap failed for PCP PMU resource\n"); + rc = PTR_ERR(xgene_pmu->pcppmu_csr); + goto err; + } + + irq = platform_get_irq(pdev, 0); + if (irq < 0) { + dev_err(&pdev->dev, "No IRQ resource\n"); + rc = -EINVAL; + goto err; + } + rc = devm_request_irq(&pdev->dev, irq, xgene_pmu_isr, + IRQF_NOBALANCING | IRQF_NO_THREAD, + dev_name(&pdev->dev), xgene_pmu); + if (rc) { + dev_err(&pdev->dev, "Could not request IRQ %d\n", irq); + goto err; + } + + raw_spin_lock_init(&xgene_pmu->lock); + + /* Check for active MCBs and MCUs */ + rc = xgene_pmu_probe_active_mcb_mcu(xgene_pmu, pdev); + if (rc) { + dev_warn(&pdev->dev, "Unknown MCB/MCU active status\n"); + xgene_pmu->mcb_active_mask = 0x1; + xgene_pmu->mc_active_mask = 0x1; + } + + /* Pick one core to use for cpumask attributes */ + cpumask_set_cpu(smp_processor_id(), &xgene_pmu->cpu); + + /* Make sure that the overflow interrupt is handled by this CPU */ + rc = irq_set_affinity(irq, &xgene_pmu->cpu); + if (rc) { + dev_err(&pdev->dev, "Failed to set interrupt affinity!\n"); + goto err; + } + + /* Walk through the tree for all PMU perf devices */ + rc = xgene_pmu_probe_pmu_dev(xgene_pmu, pdev); + if (rc) { + dev_err(&pdev->dev, "No PMU perf devices found!\n"); + goto err; + } + + /* Enable interrupt */ + xgene_pmu_unmask_int(xgene_pmu); + + return 0; + +err: + if (xgene_pmu->pcppmu_csr) + devm_iounmap(&pdev->dev, xgene_pmu->pcppmu_csr); + devm_kfree(&pdev->dev, xgene_pmu); + + return rc; +} + +static void +xgene_pmu_dev_cleanup(struct xgene_pmu *xgene_pmu, struct list_head *pmus) +{ + struct xgene_pmu_dev_ctx *ctx; + struct device *dev = xgene_pmu->dev; + struct xgene_pmu_dev *pmu_dev; + + list_for_each_entry(ctx, pmus, next) { + pmu_dev = ctx->pmu_dev; + if (pmu_dev->inf->csr) + devm_iounmap(dev, pmu_dev->inf->csr); + devm_kfree(dev, ctx); + devm_kfree(dev, pmu_dev); + } +} + +static int xgene_pmu_remove(struct platform_device *pdev) +{ + struct xgene_pmu *xgene_pmu = dev_get_drvdata(&pdev->dev); + + xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->l3cpmus); + xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->iobpmus); + xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcbpmus); + xgene_pmu_dev_cleanup(xgene_pmu, &xgene_pmu->mcpmus); + + if (xgene_pmu->pcppmu_csr) + devm_iounmap(&pdev->dev, xgene_pmu->pcppmu_csr); + devm_kfree(&pdev->dev, xgene_pmu); + + return 0; +} + +static struct platform_driver xgene_pmu_driver = { + .probe = xgene_pmu_probe, + .remove = xgene_pmu_remove, + .driver = { + .name = "xgene-pmu", + .of_match_table = xgene_pmu_of_match, + .acpi_match_table = ACPI_PTR(xgene_pmu_acpi_match), + }, +}; + +builtin_platform_driver(xgene_pmu_driver); diff --git a/drivers/pinctrl/mvebu/pinctrl-orion.c b/drivers/pinctrl/mvebu/pinctrl-orion.c index 345c3df669a0..84e144167b44 100644 --- a/drivers/pinctrl/mvebu/pinctrl-orion.c +++ b/drivers/pinctrl/mvebu/pinctrl-orion.c @@ -64,11 +64,11 @@ static int orion_mpp_ctrl_set(unsigned pid, unsigned long config) return 0; } -#define V(f5181l, f5182, f5281) \ - ((f5181l << 0) | (f5182 << 1) | (f5281 << 2)) +#define V(f5181, f5182, f5281) \ + ((f5181 << 0) | (f5182 << 1) | (f5281 << 2)) enum orion_variant { - V_5181L = V(1, 0, 0), + V_5181 = V(1, 0, 0), V_5182 = V(0, 1, 0), V_5281 = V(0, 0, 1), V_ALL = V(1, 1, 1), @@ -103,13 +103,13 @@ static struct mvebu_mpp_mode orion_mpp_modes[] = { MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_ALL), MPP_VAR_FUNCTION(0x2, "pci", "req5", V_ALL), MPP_VAR_FUNCTION(0x4, "nand", "re0", V_5182 | V_5281), - MPP_VAR_FUNCTION(0x5, "pci-1", "clk", V_5181L), + MPP_VAR_FUNCTION(0x5, "pci-1", "clk", V_5181), MPP_VAR_FUNCTION(0x5, "sata0", "act", V_5182)), MPP_MODE(7, MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_ALL), MPP_VAR_FUNCTION(0x2, "pci", "gnt5", V_ALL), MPP_VAR_FUNCTION(0x4, "nand", "we0", V_5182 | V_5281), - MPP_VAR_FUNCTION(0x5, "pci-1", "clk", V_5181L), + MPP_VAR_FUNCTION(0x5, "pci-1", "clk", V_5181), MPP_VAR_FUNCTION(0x5, "sata1", "act", V_5182)), MPP_MODE(8, MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_ALL), @@ -165,7 +165,7 @@ static struct mvebu_mpp_ctrl orion_mpp_controls[] = { MPP_FUNC_CTRL(0, 19, NULL, orion_mpp_ctrl), }; -static struct pinctrl_gpio_range mv88f5181l_gpio_ranges[] = { +static struct pinctrl_gpio_range mv88f5181_gpio_ranges[] = { MPP_GPIO_RANGE(0, 0, 0, 16), }; @@ -177,14 +177,14 @@ static struct pinctrl_gpio_range mv88f5281_gpio_ranges[] = { MPP_GPIO_RANGE(0, 0, 0, 16), }; -static struct mvebu_pinctrl_soc_info mv88f5181l_info = { - .variant = V_5181L, +static struct mvebu_pinctrl_soc_info mv88f5181_info = { + .variant = V_5181, .controls = orion_mpp_controls, .ncontrols = ARRAY_SIZE(orion_mpp_controls), .modes = orion_mpp_modes, .nmodes = ARRAY_SIZE(orion_mpp_modes), - .gpioranges = mv88f5181l_gpio_ranges, - .ngpioranges = ARRAY_SIZE(mv88f5181l_gpio_ranges), + .gpioranges = mv88f5181_gpio_ranges, + .ngpioranges = ARRAY_SIZE(mv88f5181_gpio_ranges), }; static struct mvebu_pinctrl_soc_info mv88f5182_info = { @@ -212,7 +212,8 @@ static struct mvebu_pinctrl_soc_info mv88f5281_info = { * muxing, they are identical. */ static const struct of_device_id orion_pinctrl_of_match[] = { - { .compatible = "marvell,88f5181l-pinctrl", .data = &mv88f5181l_info }, + { .compatible = "marvell,88f5181-pinctrl", .data = &mv88f5181_info }, + { .compatible = "marvell,88f5181l-pinctrl", .data = &mv88f5181_info }, { .compatible = "marvell,88f5182-pinctrl", .data = &mv88f5182_info }, { .compatible = "marvell,88f5281-pinctrl", .data = &mv88f5281_info }, { } diff --git a/drivers/ps3/ps3-vuart.c b/drivers/ps3/ps3-vuart.c index 632701a1d993..b7f300b79ffd 100644 --- a/drivers/ps3/ps3-vuart.c +++ b/drivers/ps3/ps3-vuart.c @@ -958,7 +958,7 @@ static int ps3_vuart_bus_interrupt_get(void) fail_request_irq: ps3_vuart_irq_destroy(vuart_bus_priv.virq); - vuart_bus_priv.virq = NO_IRQ; + vuart_bus_priv.virq = 0; fail_alloc_irq: kfree(vuart_bus_priv.bmp); vuart_bus_priv.bmp = NULL; @@ -982,7 +982,7 @@ static int ps3_vuart_bus_interrupt_put(void) free_irq(vuart_bus_priv.virq, &vuart_bus_priv); ps3_vuart_irq_destroy(vuart_bus_priv.virq); - vuart_bus_priv.virq = NO_IRQ; + vuart_bus_priv.virq = 0; kfree(vuart_bus_priv.bmp); vuart_bus_priv.bmp = NULL; diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index 4be1b8c21f6f..06d9fa2f3bc0 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -14,9 +14,58 @@ menuconfig RESET_CONTROLLER if RESET_CONTROLLER +config RESET_ATH79 + bool "AR71xx Reset Driver" if COMPILE_TEST + default ATH79 + help + This enables the ATH79 reset controller driver that supports the + AR71xx SoC reset controller. + +config RESET_BERLIN + bool "Berlin Reset Driver" if COMPILE_TEST + default ARCH_BERLIN + help + This enables the reset controller driver for Marvell Berlin SoCs. + +config RESET_LPC18XX + bool "LPC18xx/43xx Reset Driver" if COMPILE_TEST + default ARCH_LPC18XX + help + This enables the reset controller driver for NXP LPC18xx/43xx SoCs. + +config RESET_MESON + bool "Meson Reset Driver" if COMPILE_TEST + default ARCH_MESON + help + This enables the reset driver for Amlogic Meson SoCs. + config RESET_OXNAS bool +config RESET_PISTACHIO + bool "Pistachio Reset Driver" if COMPILE_TEST + default MACH_PISTACHIO + help + This enables the reset driver for ImgTec Pistachio SoCs. + +config RESET_SOCFPGA + bool "SoCFPGA Reset Driver" if COMPILE_TEST + default ARCH_SOCFPGA + help + This enables the reset controller driver for Altera SoCFPGAs. + +config RESET_STM32 + bool "STM32 Reset Driver" if COMPILE_TEST + default ARCH_STM32 + help + This enables the RCC reset controller driver for STM32 MCUs. + +config RESET_SUNXI + bool "Allwinner SoCs Reset Driver" if COMPILE_TEST && !ARCH_SUNXI + default ARCH_SUNXI + help + This enables the reset driver for Allwinner SoCs. + config TI_SYSCON_RESET tristate "TI SYSCON Reset Driver" depends on HAS_IOMEM @@ -27,6 +76,22 @@ config TI_SYSCON_RESET you wish to use the reset framework for such memory-mapped devices, say Y here. Otherwise, say N. +config RESET_UNIPHIER + tristate "Reset controller driver for UniPhier SoCs" + depends on ARCH_UNIPHIER || COMPILE_TEST + depends on OF && MFD_SYSCON + default ARCH_UNIPHIER + help + Support for reset controllers on UniPhier SoCs. + Say Y if you want to control reset signals provided by System Control + block, Media I/O block, Peripheral Block. + +config RESET_ZYNQ + bool "ZYNQ Reset Driver" if COMPILE_TEST + default ARCH_ZYNQ + help + This enables the reset controller driver for Xilinx Zynq SoCs. + source "drivers/reset/sti/Kconfig" source "drivers/reset/hisilicon/Kconfig" diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index 5d65a93d3c43..bbe7026617fc 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -1,13 +1,15 @@ obj-y += core.o -obj-$(CONFIG_ARCH_LPC18XX) += reset-lpc18xx.o -obj-$(CONFIG_ARCH_SOCFPGA) += reset-socfpga.o -obj-$(CONFIG_ARCH_BERLIN) += reset-berlin.o -obj-$(CONFIG_MACH_PISTACHIO) += reset-pistachio.o -obj-$(CONFIG_ARCH_MESON) += reset-meson.o -obj-$(CONFIG_ARCH_SUNXI) += reset-sunxi.o +obj-y += hisilicon/ obj-$(CONFIG_ARCH_STI) += sti/ -obj-$(CONFIG_ARCH_HISI) += hisilicon/ -obj-$(CONFIG_ARCH_ZYNQ) += reset-zynq.o -obj-$(CONFIG_ATH79) += reset-ath79.o +obj-$(CONFIG_RESET_ATH79) += reset-ath79.o +obj-$(CONFIG_RESET_BERLIN) += reset-berlin.o +obj-$(CONFIG_RESET_LPC18XX) += reset-lpc18xx.o +obj-$(CONFIG_RESET_MESON) += reset-meson.o obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o +obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o +obj-$(CONFIG_RESET_SOCFPGA) += reset-socfpga.o +obj-$(CONFIG_RESET_STM32) += reset-stm32.o +obj-$(CONFIG_RESET_SUNXI) += reset-sunxi.o obj-$(CONFIG_TI_SYSCON_RESET) += reset-ti-syscon.o +obj-$(CONFIG_RESET_UNIPHIER) += reset-uniphier.o +obj-$(CONFIG_RESET_ZYNQ) += reset-zynq.o diff --git a/drivers/reset/core.c b/drivers/reset/core.c index 395dc9ce492e..b8ae1dbd4c17 100644 --- a/drivers/reset/core.c +++ b/drivers/reset/core.c @@ -138,7 +138,8 @@ EXPORT_SYMBOL_GPL(devm_reset_controller_register); */ int reset_control_reset(struct reset_control *rstc) { - if (WARN_ON(rstc->shared)) + if (WARN_ON(IS_ERR_OR_NULL(rstc)) || + WARN_ON(rstc->shared)) return -EINVAL; if (rstc->rcdev->ops->reset) @@ -161,6 +162,9 @@ EXPORT_SYMBOL_GPL(reset_control_reset); */ int reset_control_assert(struct reset_control *rstc) { + if (WARN_ON(IS_ERR_OR_NULL(rstc))) + return -EINVAL; + if (!rstc->rcdev->ops->assert) return -ENOTSUPP; @@ -184,6 +188,9 @@ EXPORT_SYMBOL_GPL(reset_control_assert); */ int reset_control_deassert(struct reset_control *rstc) { + if (WARN_ON(IS_ERR_OR_NULL(rstc))) + return -EINVAL; + if (!rstc->rcdev->ops->deassert) return -ENOTSUPP; @@ -204,6 +211,9 @@ EXPORT_SYMBOL_GPL(reset_control_deassert); */ int reset_control_status(struct reset_control *rstc) { + if (WARN_ON(IS_ERR_OR_NULL(rstc))) + return -EINVAL; + if (rstc->rcdev->ops->status) return rstc->rcdev->ops->status(rstc->rcdev, rstc->id); diff --git a/drivers/reset/hisilicon/Kconfig b/drivers/reset/hisilicon/Kconfig index 26bf95a83a8e..1ff8b0c80980 100644 --- a/drivers/reset/hisilicon/Kconfig +++ b/drivers/reset/hisilicon/Kconfig @@ -1,5 +1,6 @@ config COMMON_RESET_HI6220 tristate "Hi6220 Reset Driver" - depends on (ARCH_HISI && RESET_CONTROLLER) + depends on ARCH_HISI || COMPILE_TEST + default ARCH_HISI help Build the Hisilicon Hi6220 reset driver. diff --git a/drivers/reset/reset-ath79.c b/drivers/reset/reset-ath79.c index 16d410cd6146..6b97631f5489 100644 --- a/drivers/reset/reset-ath79.c +++ b/drivers/reset/reset-ath79.c @@ -12,6 +12,7 @@ * GNU General Public License for more details. */ +#include <linux/io.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/reset-controller.h> diff --git a/drivers/reset/reset-socfpga.c b/drivers/reset/reset-socfpga.c index 12add9b0fa49..78ebf8424375 100644 --- a/drivers/reset/reset-socfpga.c +++ b/drivers/reset/reset-socfpga.c @@ -28,7 +28,6 @@ struct socfpga_reset_data { spinlock_t lock; void __iomem *membase; - u32 modrst_offset; struct reset_controller_dev rcdev; }; @@ -45,9 +44,8 @@ static int socfpga_reset_assert(struct reset_controller_dev *rcdev, spin_lock_irqsave(&data->lock, flags); - reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS)); - writel(reg | BIT(offset), data->membase + data->modrst_offset + - (bank * NR_BANKS)); + reg = readl(data->membase + (bank * NR_BANKS)); + writel(reg | BIT(offset), data->membase + (bank * NR_BANKS)); spin_unlock_irqrestore(&data->lock, flags); return 0; @@ -67,9 +65,8 @@ static int socfpga_reset_deassert(struct reset_controller_dev *rcdev, spin_lock_irqsave(&data->lock, flags); - reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS)); - writel(reg & ~BIT(offset), data->membase + data->modrst_offset + - (bank * NR_BANKS)); + reg = readl(data->membase + (bank * NR_BANKS)); + writel(reg & ~BIT(offset), data->membase + (bank * NR_BANKS)); spin_unlock_irqrestore(&data->lock, flags); @@ -85,7 +82,7 @@ static int socfpga_reset_status(struct reset_controller_dev *rcdev, int offset = id % BITS_PER_LONG; u32 reg; - reg = readl(data->membase + data->modrst_offset + (bank * NR_BANKS)); + reg = readl(data->membase + (bank * NR_BANKS)); return !(reg & BIT(offset)); } @@ -102,6 +99,7 @@ static int socfpga_reset_probe(struct platform_device *pdev) struct resource *res; struct device *dev = &pdev->dev; struct device_node *np = dev->of_node; + u32 modrst_offset; /* * The binding was mainlined without the required property. @@ -122,10 +120,11 @@ static int socfpga_reset_probe(struct platform_device *pdev) if (IS_ERR(data->membase)) return PTR_ERR(data->membase); - if (of_property_read_u32(np, "altr,modrst-offset", &data->modrst_offset)) { + if (of_property_read_u32(np, "altr,modrst-offset", &modrst_offset)) { dev_warn(dev, "missing altr,modrst-offset property, assuming 0x10!\n"); - data->modrst_offset = 0x10; + modrst_offset = 0x10; } + data->membase += modrst_offset; spin_lock_init(&data->lock); diff --git a/drivers/reset/reset-stm32.c b/drivers/reset/reset-stm32.c new file mode 100644 index 000000000000..3a7c8527e66a --- /dev/null +++ b/drivers/reset/reset-stm32.c @@ -0,0 +1,108 @@ +/* + * Copyright (C) Maxime Coquelin 2015 + * Author: Maxime Coquelin <mcoquelin.stm32@gmail.com> + * License terms: GNU General Public License (GPL), version 2 + * + * Heavily based on sunxi driver from Maxime Ripard. + */ + +#include <linux/err.h> +#include <linux/io.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/platform_device.h> +#include <linux/reset-controller.h> +#include <linux/slab.h> +#include <linux/spinlock.h> +#include <linux/types.h> + +struct stm32_reset_data { + spinlock_t lock; + void __iomem *membase; + struct reset_controller_dev rcdev; +}; + +static int stm32_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct stm32_reset_data *data = container_of(rcdev, + struct stm32_reset_data, + rcdev); + int bank = id / BITS_PER_LONG; + int offset = id % BITS_PER_LONG; + unsigned long flags; + u32 reg; + + spin_lock_irqsave(&data->lock, flags); + + reg = readl(data->membase + (bank * 4)); + writel(reg | BIT(offset), data->membase + (bank * 4)); + + spin_unlock_irqrestore(&data->lock, flags); + + return 0; +} + +static int stm32_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct stm32_reset_data *data = container_of(rcdev, + struct stm32_reset_data, + rcdev); + int bank = id / BITS_PER_LONG; + int offset = id % BITS_PER_LONG; + unsigned long flags; + u32 reg; + + spin_lock_irqsave(&data->lock, flags); + + reg = readl(data->membase + (bank * 4)); + writel(reg & ~BIT(offset), data->membase + (bank * 4)); + + spin_unlock_irqrestore(&data->lock, flags); + + return 0; +} + +static const struct reset_control_ops stm32_reset_ops = { + .assert = stm32_reset_assert, + .deassert = stm32_reset_deassert, +}; + +static const struct of_device_id stm32_reset_dt_ids[] = { + { .compatible = "st,stm32-rcc", }, + { /* sentinel */ }, +}; + +static int stm32_reset_probe(struct platform_device *pdev) +{ + struct stm32_reset_data *data; + struct resource *res; + + data = devm_kzalloc(&pdev->dev, sizeof(*data), GFP_KERNEL); + if (!data) + return -ENOMEM; + + res = platform_get_resource(pdev, IORESOURCE_MEM, 0); + data->membase = devm_ioremap_resource(&pdev->dev, res); + if (IS_ERR(data->membase)) + return PTR_ERR(data->membase); + + spin_lock_init(&data->lock); + + data->rcdev.owner = THIS_MODULE; + data->rcdev.nr_resets = resource_size(res) * 8; + data->rcdev.ops = &stm32_reset_ops; + data->rcdev.of_node = pdev->dev.of_node; + + return devm_reset_controller_register(&pdev->dev, &data->rcdev); +} + +static struct platform_driver stm32_reset_driver = { + .probe = stm32_reset_probe, + .driver = { + .name = "stm32-rcc-reset", + .of_match_table = stm32_reset_dt_ids, + }, +}; +builtin_platform_driver(stm32_reset_driver); diff --git a/drivers/reset/reset-uniphier.c b/drivers/reset/reset-uniphier.c new file mode 100644 index 000000000000..8b2558e7363e --- /dev/null +++ b/drivers/reset/reset-uniphier.c @@ -0,0 +1,440 @@ +/* + * Copyright (C) 2016 Socionext Inc. + * Author: Masahiro Yamada <yamada.masahiro@socionext.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/reset-controller.h> + +struct uniphier_reset_data { + unsigned int id; + unsigned int reg; + unsigned int bit; + unsigned int flags; +#define UNIPHIER_RESET_ACTIVE_LOW BIT(0) +}; + +#define UNIPHIER_RESET_ID_END (unsigned int)(-1) + +#define UNIPHIER_RESET_END \ + { .id = UNIPHIER_RESET_ID_END } + +#define UNIPHIER_RESET(_id, _reg, _bit) \ + { \ + .id = (_id), \ + .reg = (_reg), \ + .bit = (_bit), \ + } + +#define UNIPHIER_RESETX(_id, _reg, _bit) \ + { \ + .id = (_id), \ + .reg = (_reg), \ + .bit = (_bit), \ + .flags = UNIPHIER_RESET_ACTIVE_LOW, \ + } + +/* System reset data */ +#define UNIPHIER_SLD3_SYS_RESET_STDMAC(id) \ + UNIPHIER_RESETX((id), 0x2000, 10) + +#define UNIPHIER_LD11_SYS_RESET_STDMAC(id) \ + UNIPHIER_RESETX((id), 0x200c, 8) + +#define UNIPHIER_PRO4_SYS_RESET_GIO(id) \ + UNIPHIER_RESETX((id), 0x2000, 6) + +#define UNIPHIER_LD20_SYS_RESET_GIO(id) \ + UNIPHIER_RESETX((id), 0x200c, 5) + +#define UNIPHIER_PRO4_SYS_RESET_USB3(id, ch) \ + UNIPHIER_RESETX((id), 0x2000 + 0x4 * (ch), 17) + +const struct uniphier_reset_data uniphier_sld3_sys_reset_data[] = { + UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* Ether, HSC, MIO */ + UNIPHIER_RESET_END, +}; + +const struct uniphier_reset_data uniphier_pro4_sys_reset_data[] = { + UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC, MIO, RLE */ + UNIPHIER_PRO4_SYS_RESET_GIO(12), /* Ether, SATA, USB3 */ + UNIPHIER_PRO4_SYS_RESET_USB3(14, 0), + UNIPHIER_PRO4_SYS_RESET_USB3(15, 1), + UNIPHIER_RESET_END, +}; + +const struct uniphier_reset_data uniphier_pro5_sys_reset_data[] = { + UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC */ + UNIPHIER_PRO4_SYS_RESET_GIO(12), /* PCIe, USB3 */ + UNIPHIER_PRO4_SYS_RESET_USB3(14, 0), + UNIPHIER_PRO4_SYS_RESET_USB3(15, 1), + UNIPHIER_RESET_END, +}; + +const struct uniphier_reset_data uniphier_pxs2_sys_reset_data[] = { + UNIPHIER_SLD3_SYS_RESET_STDMAC(8), /* HSC, RLE */ + UNIPHIER_PRO4_SYS_RESET_USB3(14, 0), + UNIPHIER_PRO4_SYS_RESET_USB3(15, 1), + UNIPHIER_RESETX(16, 0x2014, 4), /* USB30-PHY0 */ + UNIPHIER_RESETX(17, 0x2014, 0), /* USB30-PHY1 */ + UNIPHIER_RESETX(18, 0x2014, 2), /* USB30-PHY2 */ + UNIPHIER_RESETX(20, 0x2014, 5), /* USB31-PHY0 */ + UNIPHIER_RESETX(21, 0x2014, 1), /* USB31-PHY1 */ + UNIPHIER_RESETX(28, 0x2014, 12), /* SATA */ + UNIPHIER_RESET(29, 0x2014, 8), /* SATA-PHY (active high) */ + UNIPHIER_RESET_END, +}; + +const struct uniphier_reset_data uniphier_ld11_sys_reset_data[] = { + UNIPHIER_LD11_SYS_RESET_STDMAC(8), /* HSC, MIO */ + UNIPHIER_RESET_END, +}; + +const struct uniphier_reset_data uniphier_ld20_sys_reset_data[] = { + UNIPHIER_LD11_SYS_RESET_STDMAC(8), /* HSC */ + UNIPHIER_LD20_SYS_RESET_GIO(12), /* PCIe, USB3 */ + UNIPHIER_RESETX(16, 0x200c, 12), /* USB30-PHY0 */ + UNIPHIER_RESETX(17, 0x200c, 13), /* USB30-PHY1 */ + UNIPHIER_RESETX(18, 0x200c, 14), /* USB30-PHY2 */ + UNIPHIER_RESETX(19, 0x200c, 15), /* USB30-PHY3 */ + UNIPHIER_RESET_END, +}; + +/* Media I/O reset data */ +#define UNIPHIER_MIO_RESET_SD(id, ch) \ + UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 0) + +#define UNIPHIER_MIO_RESET_SD_BRIDGE(id, ch) \ + UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 26) + +#define UNIPHIER_MIO_RESET_EMMC_HW_RESET(id, ch) \ + UNIPHIER_RESETX((id), 0x80 + 0x200 * (ch), 0) + +#define UNIPHIER_MIO_RESET_USB2(id, ch) \ + UNIPHIER_RESETX((id), 0x114 + 0x200 * (ch), 0) + +#define UNIPHIER_MIO_RESET_USB2_BRIDGE(id, ch) \ + UNIPHIER_RESETX((id), 0x110 + 0x200 * (ch), 24) + +#define UNIPHIER_MIO_RESET_DMAC(id) \ + UNIPHIER_RESETX((id), 0x110, 17) + +const struct uniphier_reset_data uniphier_sld3_mio_reset_data[] = { + UNIPHIER_MIO_RESET_SD(0, 0), + UNIPHIER_MIO_RESET_SD(1, 1), + UNIPHIER_MIO_RESET_SD(2, 2), + UNIPHIER_MIO_RESET_SD_BRIDGE(3, 0), + UNIPHIER_MIO_RESET_SD_BRIDGE(4, 1), + UNIPHIER_MIO_RESET_SD_BRIDGE(5, 2), + UNIPHIER_MIO_RESET_EMMC_HW_RESET(6, 1), + UNIPHIER_MIO_RESET_DMAC(7), + UNIPHIER_MIO_RESET_USB2(8, 0), + UNIPHIER_MIO_RESET_USB2(9, 1), + UNIPHIER_MIO_RESET_USB2(10, 2), + UNIPHIER_MIO_RESET_USB2(11, 3), + UNIPHIER_MIO_RESET_USB2_BRIDGE(12, 0), + UNIPHIER_MIO_RESET_USB2_BRIDGE(13, 1), + UNIPHIER_MIO_RESET_USB2_BRIDGE(14, 2), + UNIPHIER_MIO_RESET_USB2_BRIDGE(15, 3), + UNIPHIER_RESET_END, +}; + +const struct uniphier_reset_data uniphier_pro5_mio_reset_data[] = { + UNIPHIER_MIO_RESET_SD(0, 0), + UNIPHIER_MIO_RESET_SD(1, 1), + UNIPHIER_MIO_RESET_EMMC_HW_RESET(6, 1), + UNIPHIER_RESET_END, +}; + +/* Peripheral reset data */ +#define UNIPHIER_PERI_RESET_UART(id, ch) \ + UNIPHIER_RESETX((id), 0x114, 19 + (ch)) + +#define UNIPHIER_PERI_RESET_I2C(id, ch) \ + UNIPHIER_RESETX((id), 0x114, 5 + (ch)) + +#define UNIPHIER_PERI_RESET_FI2C(id, ch) \ + UNIPHIER_RESETX((id), 0x114, 24 + (ch)) + +const struct uniphier_reset_data uniphier_ld4_peri_reset_data[] = { + UNIPHIER_PERI_RESET_UART(0, 0), + UNIPHIER_PERI_RESET_UART(1, 1), + UNIPHIER_PERI_RESET_UART(2, 2), + UNIPHIER_PERI_RESET_UART(3, 3), + UNIPHIER_PERI_RESET_I2C(4, 0), + UNIPHIER_PERI_RESET_I2C(5, 1), + UNIPHIER_PERI_RESET_I2C(6, 2), + UNIPHIER_PERI_RESET_I2C(7, 3), + UNIPHIER_PERI_RESET_I2C(8, 4), + UNIPHIER_RESET_END, +}; + +const struct uniphier_reset_data uniphier_pro4_peri_reset_data[] = { + UNIPHIER_PERI_RESET_UART(0, 0), + UNIPHIER_PERI_RESET_UART(1, 1), + UNIPHIER_PERI_RESET_UART(2, 2), + UNIPHIER_PERI_RESET_UART(3, 3), + UNIPHIER_PERI_RESET_FI2C(4, 0), + UNIPHIER_PERI_RESET_FI2C(5, 1), + UNIPHIER_PERI_RESET_FI2C(6, 2), + UNIPHIER_PERI_RESET_FI2C(7, 3), + UNIPHIER_PERI_RESET_FI2C(8, 4), + UNIPHIER_PERI_RESET_FI2C(9, 5), + UNIPHIER_PERI_RESET_FI2C(10, 6), + UNIPHIER_RESET_END, +}; + +/* core implementaton */ +struct uniphier_reset_priv { + struct reset_controller_dev rcdev; + struct device *dev; + struct regmap *regmap; + const struct uniphier_reset_data *data; +}; + +#define to_uniphier_reset_priv(_rcdev) \ + container_of(_rcdev, struct uniphier_reset_priv, rcdev) + +static int uniphier_reset_update(struct reset_controller_dev *rcdev, + unsigned long id, int assert) +{ + struct uniphier_reset_priv *priv = to_uniphier_reset_priv(rcdev); + const struct uniphier_reset_data *p; + + for (p = priv->data; p->id != UNIPHIER_RESET_ID_END; p++) { + unsigned int mask, val; + + if (p->id != id) + continue; + + mask = BIT(p->bit); + + if (assert) + val = mask; + else + val = ~mask; + + if (p->flags & UNIPHIER_RESET_ACTIVE_LOW) + val = ~val; + + return regmap_write_bits(priv->regmap, p->reg, mask, val); + } + + dev_err(priv->dev, "reset_id=%lu was not handled\n", id); + return -EINVAL; +} + +static int uniphier_reset_assert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return uniphier_reset_update(rcdev, id, 1); +} + +static int uniphier_reset_deassert(struct reset_controller_dev *rcdev, + unsigned long id) +{ + return uniphier_reset_update(rcdev, id, 0); +} + +static int uniphier_reset_status(struct reset_controller_dev *rcdev, + unsigned long id) +{ + struct uniphier_reset_priv *priv = to_uniphier_reset_priv(rcdev); + const struct uniphier_reset_data *p; + + for (p = priv->data; p->id != UNIPHIER_RESET_ID_END; p++) { + unsigned int val; + int ret, asserted; + + if (p->id != id) + continue; + + ret = regmap_read(priv->regmap, p->reg, &val); + if (ret) + return ret; + + asserted = !!(val & BIT(p->bit)); + + if (p->flags & UNIPHIER_RESET_ACTIVE_LOW) + asserted = !asserted; + + return asserted; + } + + dev_err(priv->dev, "reset_id=%lu was not found\n", id); + return -EINVAL; +} + +static const struct reset_control_ops uniphier_reset_ops = { + .assert = uniphier_reset_assert, + .deassert = uniphier_reset_deassert, + .status = uniphier_reset_status, +}; + +static int uniphier_reset_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct uniphier_reset_priv *priv; + const struct uniphier_reset_data *p, *data; + struct regmap *regmap; + struct device_node *parent; + unsigned int nr_resets = 0; + + data = of_device_get_match_data(dev); + if (WARN_ON(!data)) + return -EINVAL; + + parent = of_get_parent(dev->of_node); /* parent should be syscon node */ + regmap = syscon_node_to_regmap(parent); + of_node_put(parent); + if (IS_ERR(regmap)) { + dev_err(dev, "failed to get regmap (error %ld)\n", + PTR_ERR(regmap)); + return PTR_ERR(regmap); + } + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + for (p = data; p->id != UNIPHIER_RESET_ID_END; p++) + nr_resets = max(nr_resets, p->id + 1); + + priv->rcdev.ops = &uniphier_reset_ops; + priv->rcdev.owner = dev->driver->owner; + priv->rcdev.of_node = dev->of_node; + priv->rcdev.nr_resets = nr_resets; + priv->dev = dev; + priv->regmap = regmap; + priv->data = data; + + return devm_reset_controller_register(&pdev->dev, &priv->rcdev); +} + +static const struct of_device_id uniphier_reset_match[] = { + /* System reset */ + { + .compatible = "socionext,uniphier-sld3-reset", + .data = uniphier_sld3_sys_reset_data, + }, + { + .compatible = "socionext,uniphier-ld4-reset", + .data = uniphier_sld3_sys_reset_data, + }, + { + .compatible = "socionext,uniphier-pro4-reset", + .data = uniphier_pro4_sys_reset_data, + }, + { + .compatible = "socionext,uniphier-sld8-reset", + .data = uniphier_sld3_sys_reset_data, + }, + { + .compatible = "socionext,uniphier-pro5-reset", + .data = uniphier_pro5_sys_reset_data, + }, + { + .compatible = "socionext,uniphier-pxs2-reset", + .data = uniphier_pxs2_sys_reset_data, + }, + { + .compatible = "socionext,uniphier-ld11-reset", + .data = uniphier_ld11_sys_reset_data, + }, + { + .compatible = "socionext,uniphier-ld20-reset", + .data = uniphier_ld20_sys_reset_data, + }, + /* Media I/O reset */ + { + .compatible = "socionext,uniphier-sld3-mio-reset", + .data = uniphier_sld3_mio_reset_data, + }, + { + .compatible = "socionext,uniphier-ld4-mio-reset", + .data = uniphier_sld3_mio_reset_data, + }, + { + .compatible = "socionext,uniphier-pro4-mio-reset", + .data = uniphier_sld3_mio_reset_data, + }, + { + .compatible = "socionext,uniphier-sld8-mio-reset", + .data = uniphier_sld3_mio_reset_data, + }, + { + .compatible = "socionext,uniphier-pro5-mio-reset", + .data = uniphier_pro5_mio_reset_data, + }, + { + .compatible = "socionext,uniphier-pxs2-mio-reset", + .data = uniphier_pro5_mio_reset_data, + }, + { + .compatible = "socionext,uniphier-ld11-mio-reset", + .data = uniphier_sld3_mio_reset_data, + }, + { + .compatible = "socionext,uniphier-ld20-mio-reset", + .data = uniphier_pro5_mio_reset_data, + }, + /* Peripheral reset */ + { + .compatible = "socionext,uniphier-ld4-peri-reset", + .data = uniphier_ld4_peri_reset_data, + }, + { + .compatible = "socionext,uniphier-pro4-peri-reset", + .data = uniphier_pro4_peri_reset_data, + }, + { + .compatible = "socionext,uniphier-sld8-peri-reset", + .data = uniphier_ld4_peri_reset_data, + }, + { + .compatible = "socionext,uniphier-pro5-peri-reset", + .data = uniphier_pro4_peri_reset_data, + }, + { + .compatible = "socionext,uniphier-pxs2-peri-reset", + .data = uniphier_pro4_peri_reset_data, + }, + { + .compatible = "socionext,uniphier-ld11-peri-reset", + .data = uniphier_pro4_peri_reset_data, + }, + { + .compatible = "socionext,uniphier-ld20-peri-reset", + .data = uniphier_pro4_peri_reset_data, + }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, uniphier_reset_match); + +static struct platform_driver uniphier_reset_driver = { + .probe = uniphier_reset_probe, + .driver = { + .name = "uniphier-reset", + .of_match_table = uniphier_reset_match, + }, +}; +module_platform_driver(uniphier_reset_driver); + +MODULE_AUTHOR("Masahiro Yamada <yamada.masahiro@socionext.com>"); +MODULE_DESCRIPTION("UniPhier Reset Controller Driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/soc/mediatek/mtk-pmic-wrap.c b/drivers/soc/mediatek/mtk-pmic-wrap.c index a003ba26ca6e..a5f10936fb9c 100644 --- a/drivers/soc/mediatek/mtk-pmic-wrap.c +++ b/drivers/soc/mediatek/mtk-pmic-wrap.c @@ -583,7 +583,7 @@ static int pwrap_wait_for_state(struct pmic_wrapper *wrp, { unsigned long timeout; - timeout = jiffies + usecs_to_jiffies(255); + timeout = jiffies + usecs_to_jiffies(10000); do { if (time_after(jiffies, timeout)) diff --git a/drivers/soc/qcom/smd.c b/drivers/soc/qcom/smd.c index ac1957dfdf24..322034ab9d37 100644 --- a/drivers/soc/qcom/smd.c +++ b/drivers/soc/qcom/smd.c @@ -95,7 +95,7 @@ static const struct { /** * struct qcom_smd_edge - representing a remote processor - * @smd: handle to qcom_smd + * @dev: device for this edge * @of_node: of_node handle for information related to this edge * @edge_id: identifier of this edge * @remote_pid: identifier of remote processor @@ -111,7 +111,8 @@ static const struct { * @state_work: work item for edge state changes */ struct qcom_smd_edge { - struct qcom_smd *smd; + struct device dev; + struct device_node *of_node; unsigned edge_id; unsigned remote_pid; @@ -135,6 +136,8 @@ struct qcom_smd_edge { struct work_struct state_work; }; +#define to_smd_edge(d) container_of(d, struct qcom_smd_edge, dev) + /* * SMD channel states. */ @@ -197,20 +200,6 @@ struct qcom_smd_channel { void *drvdata; struct list_head list; - struct list_head dev_list; -}; - -/** - * struct qcom_smd - smd struct - * @dev: device struct - * @num_edges: number of entries in @edges - * @edges: array of edges to be handled - */ -struct qcom_smd { - struct device *dev; - - unsigned num_edges; - struct qcom_smd_edge edges[0]; }; /* @@ -374,7 +363,7 @@ static void qcom_smd_channel_reset(struct qcom_smd_channel *channel) SET_TX_CHANNEL_FLAG(channel, fSTATE, 1); SET_TX_CHANNEL_FLAG(channel, fBLOCKREADINTR, 1); SET_TX_CHANNEL_INFO(channel, head, 0); - SET_TX_CHANNEL_INFO(channel, tail, 0); + SET_RX_CHANNEL_INFO(channel, tail, 0); qcom_smd_signal_channel(channel); @@ -421,7 +410,7 @@ static void qcom_smd_channel_set_state(struct qcom_smd_channel *channel, if (channel->state == state) return; - dev_dbg(edge->smd->dev, "set_state(%s, %d)\n", channel->name, state); + dev_dbg(&edge->dev, "set_state(%s, %d)\n", channel->name, state); SET_TX_CHANNEL_FLAG(channel, fDSR, is_open); SET_TX_CHANNEL_FLAG(channel, fCTS, is_open); @@ -891,8 +880,6 @@ static int qcom_smd_dev_remove(struct device *dev) struct qcom_smd_device *qsdev = to_smd_device(dev); struct qcom_smd_driver *qsdrv = to_smd_driver(dev); struct qcom_smd_channel *channel = qsdev->channel; - struct qcom_smd_channel *tmp; - struct qcom_smd_channel *ch; qcom_smd_channel_set_state(channel, SMD_CHANNEL_CLOSING); @@ -911,15 +898,9 @@ static int qcom_smd_dev_remove(struct device *dev) if (qsdrv->remove) qsdrv->remove(qsdev); - /* - * The client is now gone, close and release all channels associated - * with this sdev - */ - list_for_each_entry_safe(ch, tmp, &channel->dev_list, dev_list) { - qcom_smd_channel_close(ch); - list_del(&ch->dev_list); - ch->qsdev = NULL; - } + /* The client is now gone, close the primary channel */ + qcom_smd_channel_close(channel); + channel->qsdev = NULL; return 0; } @@ -973,13 +954,12 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel) struct qcom_smd_device *qsdev; struct qcom_smd_edge *edge = channel->edge; struct device_node *node; - struct qcom_smd *smd = edge->smd; int ret; if (channel->qsdev) return -EEXIST; - dev_dbg(smd->dev, "registering '%s'\n", channel->name); + dev_dbg(&edge->dev, "registering '%s'\n", channel->name); qsdev = kzalloc(sizeof(*qsdev), GFP_KERNEL); if (!qsdev) @@ -990,7 +970,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel) edge->of_node->name, node ? node->name : channel->name); - qsdev->dev.parent = smd->dev; + qsdev->dev.parent = &edge->dev; qsdev->dev.bus = &qcom_smd_bus; qsdev->dev.release = qcom_smd_release_device; qsdev->dev.of_node = node; @@ -1001,7 +981,7 @@ static int qcom_smd_create_device(struct qcom_smd_channel *channel) ret = device_register(&qsdev->dev); if (ret) { - dev_err(smd->dev, "device_register failed: %d\n", ret); + dev_err(&edge->dev, "device_register failed: %d\n", ret); put_device(&qsdev->dev); } @@ -1091,6 +1071,8 @@ qcom_smd_find_channel(struct qcom_smd_edge *edge, const char *name) * * Returns a channel handle on success, or -EPROBE_DEFER if the channel isn't * ready. + * + * Any channels returned must be closed with a call to qcom_smd_close_channel() */ struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *parent, const char *name, @@ -1120,15 +1102,21 @@ struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *parent, return ERR_PTR(ret); } - /* - * Append the list of channel to the channels associated with the sdev - */ - list_add_tail(&channel->dev_list, &sdev->channel->dev_list); - return channel; } EXPORT_SYMBOL(qcom_smd_open_channel); +/** + * qcom_smd_close_channel() - close an additionally opened channel + * @channel: channel handle, returned by qcom_smd_open_channel() + */ +void qcom_smd_close_channel(struct qcom_smd_channel *channel) +{ + qcom_smd_channel_close(channel); + channel->qsdev = NULL; +} +EXPORT_SYMBOL(qcom_smd_close_channel); + /* * Allocate the qcom_smd_channel object for a newly found smd channel, * retrieving and validating the smem items involved. @@ -1139,20 +1127,18 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed char *name) { struct qcom_smd_channel *channel; - struct qcom_smd *smd = edge->smd; size_t fifo_size; size_t info_size; void *fifo_base; void *info; int ret; - channel = devm_kzalloc(smd->dev, sizeof(*channel), GFP_KERNEL); + channel = devm_kzalloc(&edge->dev, sizeof(*channel), GFP_KERNEL); if (!channel) return ERR_PTR(-ENOMEM); - INIT_LIST_HEAD(&channel->dev_list); channel->edge = edge; - channel->name = devm_kstrdup(smd->dev, name, GFP_KERNEL); + channel->name = devm_kstrdup(&edge->dev, name, GFP_KERNEL); if (!channel->name) return ERR_PTR(-ENOMEM); @@ -1175,7 +1161,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed } else if (info_size == 2 * sizeof(struct smd_channel_info)) { channel->info = info; } else { - dev_err(smd->dev, + dev_err(&edge->dev, "channel info of size %zu not supported\n", info_size); ret = -EINVAL; goto free_name_and_channel; @@ -1190,7 +1176,7 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed /* The channel consist of a rx and tx fifo of equal size */ fifo_size /= 2; - dev_dbg(smd->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n", + dev_dbg(&edge->dev, "new channel '%s' info-size: %zu fifo-size: %zu\n", name, info_size, fifo_size); channel->tx_fifo = fifo_base; @@ -1202,8 +1188,8 @@ static struct qcom_smd_channel *qcom_smd_create_channel(struct qcom_smd_edge *ed return channel; free_name_and_channel: - devm_kfree(smd->dev, channel->name); - devm_kfree(smd->dev, channel); + devm_kfree(&edge->dev, channel->name); + devm_kfree(&edge->dev, channel); return ERR_PTR(ret); } @@ -1219,7 +1205,6 @@ static void qcom_channel_scan_worker(struct work_struct *work) struct qcom_smd_alloc_entry *alloc_tbl; struct qcom_smd_alloc_entry *entry; struct qcom_smd_channel *channel; - struct qcom_smd *smd = edge->smd; unsigned long flags; unsigned fifo_id; unsigned info_id; @@ -1263,7 +1248,7 @@ static void qcom_channel_scan_worker(struct work_struct *work) list_add(&channel->list, &edge->channels); spin_unlock_irqrestore(&edge->channels_lock, flags); - dev_dbg(smd->dev, "new channel found: '%s'\n", channel->name); + dev_dbg(&edge->dev, "new channel found: '%s'\n", channel->name); set_bit(i, edge->allocated[tbl]); wake_up_interruptible(&edge->new_channel_event); @@ -1350,22 +1335,6 @@ static int qcom_smd_parse_edge(struct device *dev, edge->of_node = of_node_get(node); - irq = irq_of_parse_and_map(node, 0); - if (irq < 0) { - dev_err(dev, "required smd interrupt missing\n"); - return -EINVAL; - } - - ret = devm_request_irq(dev, irq, - qcom_smd_edge_intr, IRQF_TRIGGER_RISING, - node->name, edge); - if (ret) { - dev_err(dev, "failed to request smd irq\n"); - return ret; - } - - edge->irq = irq; - key = "qcom,smd-edge"; ret = of_property_read_u32(node, key, &edge->edge_id); if (ret) { @@ -1400,18 +1369,121 @@ static int qcom_smd_parse_edge(struct device *dev, return -EINVAL; } + irq = irq_of_parse_and_map(node, 0); + if (irq < 0) { + dev_err(dev, "required smd interrupt missing\n"); + return -EINVAL; + } + + ret = devm_request_irq(dev, irq, + qcom_smd_edge_intr, IRQF_TRIGGER_RISING, + node->name, edge); + if (ret) { + dev_err(dev, "failed to request smd irq\n"); + return ret; + } + + edge->irq = irq; + return 0; } -static int qcom_smd_probe(struct platform_device *pdev) +/* + * Release function for an edge. + * Reset the state of each associated channel and free the edge context. + */ +static void qcom_smd_edge_release(struct device *dev) +{ + struct qcom_smd_channel *channel; + struct qcom_smd_edge *edge = to_smd_edge(dev); + + list_for_each_entry(channel, &edge->channels, list) { + SET_RX_CHANNEL_INFO(channel, state, SMD_CHANNEL_CLOSED); + SET_RX_CHANNEL_INFO(channel, head, 0); + SET_RX_CHANNEL_INFO(channel, tail, 0); + } + + kfree(edge); +} + +/** + * qcom_smd_register_edge() - register an edge based on an device_node + * @parent: parent device for the edge + * @node: device_node describing the edge + * + * Returns an edge reference, or negative ERR_PTR() on failure. + */ +struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent, + struct device_node *node) { struct qcom_smd_edge *edge; - struct device_node *node; - struct qcom_smd *smd; - size_t array_size; - int num_edges; int ret; - int i = 0; + + edge = kzalloc(sizeof(*edge), GFP_KERNEL); + if (!edge) + return ERR_PTR(-ENOMEM); + + init_waitqueue_head(&edge->new_channel_event); + + edge->dev.parent = parent; + edge->dev.release = qcom_smd_edge_release; + dev_set_name(&edge->dev, "%s:%s", dev_name(parent), node->name); + ret = device_register(&edge->dev); + if (ret) { + pr_err("failed to register smd edge\n"); + return ERR_PTR(ret); + } + + ret = qcom_smd_parse_edge(&edge->dev, node, edge); + if (ret) { + dev_err(&edge->dev, "failed to parse smd edge\n"); + goto unregister_dev; + } + + schedule_work(&edge->scan_work); + + return edge; + +unregister_dev: + put_device(&edge->dev); + return ERR_PTR(ret); +} +EXPORT_SYMBOL(qcom_smd_register_edge); + +static int qcom_smd_remove_device(struct device *dev, void *data) +{ + device_unregister(dev); + of_node_put(dev->of_node); + put_device(dev); + + return 0; +} + +/** + * qcom_smd_unregister_edge() - release an edge and its children + * @edge: edge reference acquired from qcom_smd_register_edge + */ +int qcom_smd_unregister_edge(struct qcom_smd_edge *edge) +{ + int ret; + + disable_irq(edge->irq); + cancel_work_sync(&edge->scan_work); + cancel_work_sync(&edge->state_work); + + ret = device_for_each_child(&edge->dev, NULL, qcom_smd_remove_device); + if (ret) + dev_warn(&edge->dev, "can't remove smd device: %d\n", ret); + + device_unregister(&edge->dev); + + return 0; +} +EXPORT_SYMBOL(qcom_smd_unregister_edge); + +static int qcom_smd_probe(struct platform_device *pdev) +{ + struct device_node *node; void *p; /* Wait for smem */ @@ -1419,29 +1491,17 @@ static int qcom_smd_probe(struct platform_device *pdev) if (PTR_ERR(p) == -EPROBE_DEFER) return PTR_ERR(p); - num_edges = of_get_available_child_count(pdev->dev.of_node); - array_size = sizeof(*smd) + num_edges * sizeof(struct qcom_smd_edge); - smd = devm_kzalloc(&pdev->dev, array_size, GFP_KERNEL); - if (!smd) - return -ENOMEM; - smd->dev = &pdev->dev; - - smd->num_edges = num_edges; - for_each_available_child_of_node(pdev->dev.of_node, node) { - edge = &smd->edges[i++]; - edge->smd = smd; - init_waitqueue_head(&edge->new_channel_event); - - ret = qcom_smd_parse_edge(&pdev->dev, node, edge); - if (ret) - continue; + for_each_available_child_of_node(pdev->dev.of_node, node) + qcom_smd_register_edge(&pdev->dev, node); - schedule_work(&edge->scan_work); - } + return 0; +} - platform_set_drvdata(pdev, smd); +static int qcom_smd_remove_edge(struct device *dev, void *data) +{ + struct qcom_smd_edge *edge = to_smd_edge(dev); - return 0; + return qcom_smd_unregister_edge(edge); } /* @@ -1450,28 +1510,13 @@ static int qcom_smd_probe(struct platform_device *pdev) */ static int qcom_smd_remove(struct platform_device *pdev) { - struct qcom_smd_channel *channel; - struct qcom_smd_edge *edge; - struct qcom_smd *smd = platform_get_drvdata(pdev); - int i; - - for (i = 0; i < smd->num_edges; i++) { - edge = &smd->edges[i]; - - disable_irq(edge->irq); - cancel_work_sync(&edge->scan_work); - cancel_work_sync(&edge->state_work); - - /* No need to lock here, because the writer is gone */ - list_for_each_entry(channel, &edge->channels, list) { - if (!channel->qsdev) - continue; + int ret; - qcom_smd_destroy_device(channel); - } - } + ret = device_for_each_child(&pdev->dev, NULL, qcom_smd_remove_edge); + if (ret) + dev_warn(&pdev->dev, "can't remove smd device: %d\n", ret); - return 0; + return ret; } static const struct of_device_id qcom_smd_of_match[] = { diff --git a/drivers/soc/qcom/smem.c b/drivers/soc/qcom/smem.c index 2e1aa9f130f4..18ec52f2078a 100644 --- a/drivers/soc/qcom/smem.c +++ b/drivers/soc/qcom/smem.c @@ -740,7 +740,8 @@ static int qcom_smem_probe(struct platform_device *pdev) hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); if (hwlock_id < 0) { - dev_err(&pdev->dev, "failed to retrieve hwlock\n"); + if (hwlock_id != -EPROBE_DEFER) + dev_err(&pdev->dev, "failed to retrieve hwlock\n"); return hwlock_id; } diff --git a/drivers/soc/rockchip/pm_domains.c b/drivers/soc/rockchip/pm_domains.c index 44842a205e4b..7acd1517dd37 100644 --- a/drivers/soc/rockchip/pm_domains.c +++ b/drivers/soc/rockchip/pm_domains.c @@ -27,6 +27,7 @@ struct rockchip_domain_info { int req_mask; int idle_mask; int ack_mask; + bool active_wakeup; }; struct rockchip_pmu_info { @@ -75,23 +76,24 @@ struct rockchip_pmu { #define to_rockchip_pd(gpd) container_of(gpd, struct rockchip_pm_domain, genpd) -#define DOMAIN(pwr, status, req, idle, ack) \ +#define DOMAIN(pwr, status, req, idle, ack, wakeup) \ { \ .pwr_mask = (pwr >= 0) ? BIT(pwr) : 0, \ .status_mask = (status >= 0) ? BIT(status) : 0, \ .req_mask = (req >= 0) ? BIT(req) : 0, \ .idle_mask = (idle >= 0) ? BIT(idle) : 0, \ .ack_mask = (ack >= 0) ? BIT(ack) : 0, \ + .active_wakeup = wakeup, \ } -#define DOMAIN_RK3288(pwr, status, req) \ - DOMAIN(pwr, status, req, req, (req) + 16) +#define DOMAIN_RK3288(pwr, status, req, wakeup) \ + DOMAIN(pwr, status, req, req, (req) + 16, wakeup) -#define DOMAIN_RK3368(pwr, status, req) \ - DOMAIN(pwr, status, req, (req) + 16, req) +#define DOMAIN_RK3368(pwr, status, req, wakeup) \ + DOMAIN(pwr, status, req, (req) + 16, req, wakeup) -#define DOMAIN_RK3399(pwr, status, req) \ - DOMAIN(pwr, status, req, req, req) +#define DOMAIN_RK3399(pwr, status, req, wakeup) \ + DOMAIN(pwr, status, req, req, req, wakeup) static bool rockchip_pmu_domain_is_idle(struct rockchip_pm_domain *pd) { @@ -295,6 +297,17 @@ static void rockchip_pd_detach_dev(struct generic_pm_domain *genpd, pm_clk_destroy(dev); } +static bool rockchip_active_wakeup(struct device *dev) +{ + struct generic_pm_domain *genpd; + struct rockchip_pm_domain *pd; + + genpd = pd_to_genpd(dev->pm_domain); + pd = container_of(genpd, struct rockchip_pm_domain, genpd); + + return pd->info->active_wakeup; +} + static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu, struct device_node *node) { @@ -415,6 +428,7 @@ static int rockchip_pm_add_one_domain(struct rockchip_pmu *pmu, pd->genpd.power_on = rockchip_pd_power_on; pd->genpd.attach_dev = rockchip_pd_attach_dev; pd->genpd.detach_dev = rockchip_pd_detach_dev; + pd->genpd.dev_ops.active_wakeup = rockchip_active_wakeup; pd->genpd.flags = GENPD_FLAG_PM_CLK; pm_genpd_init(&pd->genpd, NULL, false); @@ -623,48 +637,48 @@ err_out: } static const struct rockchip_domain_info rk3288_pm_domains[] = { - [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4), - [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9), - [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3), - [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2), + [RK3288_PD_VIO] = DOMAIN_RK3288(7, 7, 4, false), + [RK3288_PD_HEVC] = DOMAIN_RK3288(14, 10, 9, false), + [RK3288_PD_VIDEO] = DOMAIN_RK3288(8, 8, 3, false), + [RK3288_PD_GPU] = DOMAIN_RK3288(9, 9, 2, false), }; static const struct rockchip_domain_info rk3368_pm_domains[] = { - [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6), - [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8), - [RK3368_PD_VIDEO] = DOMAIN_RK3368(14, 13, 7), - [RK3368_PD_GPU_0] = DOMAIN_RK3368(16, 15, 2), - [RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2), + [RK3368_PD_PERI] = DOMAIN_RK3368(13, 12, 6, true), + [RK3368_PD_VIO] = DOMAIN_RK3368(15, 14, 8, false), + [RK3368_PD_VIDEO] = DOMAIN_RK3368(14, 13, 7, false), + [RK3368_PD_GPU_0] = DOMAIN_RK3368(16, 15, 2, false), + [RK3368_PD_GPU_1] = DOMAIN_RK3368(17, 16, 2, false), }; static const struct rockchip_domain_info rk3399_pm_domains[] = { - [RK3399_PD_TCPD0] = DOMAIN_RK3399(8, 8, -1), - [RK3399_PD_TCPD1] = DOMAIN_RK3399(9, 9, -1), - [RK3399_PD_CCI] = DOMAIN_RK3399(10, 10, -1), - [RK3399_PD_CCI0] = DOMAIN_RK3399(-1, -1, 15), - [RK3399_PD_CCI1] = DOMAIN_RK3399(-1, -1, 16), - [RK3399_PD_PERILP] = DOMAIN_RK3399(11, 11, 1), - [RK3399_PD_PERIHP] = DOMAIN_RK3399(12, 12, 2), - [RK3399_PD_CENTER] = DOMAIN_RK3399(13, 13, 14), - [RK3399_PD_VIO] = DOMAIN_RK3399(14, 14, 17), - [RK3399_PD_GPU] = DOMAIN_RK3399(15, 15, 0), - [RK3399_PD_VCODEC] = DOMAIN_RK3399(16, 16, 3), - [RK3399_PD_VDU] = DOMAIN_RK3399(17, 17, 4), - [RK3399_PD_RGA] = DOMAIN_RK3399(18, 18, 5), - [RK3399_PD_IEP] = DOMAIN_RK3399(19, 19, 6), - [RK3399_PD_VO] = DOMAIN_RK3399(20, 20, -1), - [RK3399_PD_VOPB] = DOMAIN_RK3399(-1, -1, 7), - [RK3399_PD_VOPL] = DOMAIN_RK3399(-1, -1, 8), - [RK3399_PD_ISP0] = DOMAIN_RK3399(22, 22, 9), - [RK3399_PD_ISP1] = DOMAIN_RK3399(23, 23, 10), - [RK3399_PD_HDCP] = DOMAIN_RK3399(24, 24, 11), - [RK3399_PD_GMAC] = DOMAIN_RK3399(25, 25, 23), - [RK3399_PD_EMMC] = DOMAIN_RK3399(26, 26, 24), - [RK3399_PD_USB3] = DOMAIN_RK3399(27, 27, 12), - [RK3399_PD_EDP] = DOMAIN_RK3399(28, 28, 22), - [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27), - [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28), - [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29), + [RK3399_PD_TCPD0] = DOMAIN_RK3399(8, 8, -1, false), + [RK3399_PD_TCPD1] = DOMAIN_RK3399(9, 9, -1, false), + [RK3399_PD_CCI] = DOMAIN_RK3399(10, 10, -1, true), + [RK3399_PD_CCI0] = DOMAIN_RK3399(-1, -1, 15, true), + [RK3399_PD_CCI1] = DOMAIN_RK3399(-1, -1, 16, true), + [RK3399_PD_PERILP] = DOMAIN_RK3399(11, 11, 1, true), + [RK3399_PD_PERIHP] = DOMAIN_RK3399(12, 12, 2, true), + [RK3399_PD_CENTER] = DOMAIN_RK3399(13, 13, 14, true), + [RK3399_PD_VIO] = DOMAIN_RK3399(14, 14, 17, false), + [RK3399_PD_GPU] = DOMAIN_RK3399(15, 15, 0, false), + [RK3399_PD_VCODEC] = DOMAIN_RK3399(16, 16, 3, false), + [RK3399_PD_VDU] = DOMAIN_RK3399(17, 17, 4, false), + [RK3399_PD_RGA] = DOMAIN_RK3399(18, 18, 5, false), + [RK3399_PD_IEP] = DOMAIN_RK3399(19, 19, 6, false), + [RK3399_PD_VO] = DOMAIN_RK3399(20, 20, -1, false), + [RK3399_PD_VOPB] = DOMAIN_RK3399(-1, -1, 7, false), + [RK3399_PD_VOPL] = DOMAIN_RK3399(-1, -1, 8, false), + [RK3399_PD_ISP0] = DOMAIN_RK3399(22, 22, 9, false), + [RK3399_PD_ISP1] = DOMAIN_RK3399(23, 23, 10, false), + [RK3399_PD_HDCP] = DOMAIN_RK3399(24, 24, 11, false), + [RK3399_PD_GMAC] = DOMAIN_RK3399(25, 25, 23, true), + [RK3399_PD_EMMC] = DOMAIN_RK3399(26, 26, 24, true), + [RK3399_PD_USB3] = DOMAIN_RK3399(27, 27, 12, true), + [RK3399_PD_EDP] = DOMAIN_RK3399(28, 28, 22, false), + [RK3399_PD_GIC] = DOMAIN_RK3399(29, 29, 27, true), + [RK3399_PD_SD] = DOMAIN_RK3399(30, 30, 28, true), + [RK3399_PD_SDIOAUDIO] = DOMAIN_RK3399(31, 31, 29, true), }; static const struct rockchip_pmu_info rk3288_pmu = { diff --git a/drivers/soc/tegra/pmc.c b/drivers/soc/tegra/pmc.c index 71c834f3847e..7792ed88d80b 100644 --- a/drivers/soc/tegra/pmc.c +++ b/drivers/soc/tegra/pmc.c @@ -967,8 +967,8 @@ static void tegra_io_rail_unprepare(void) int tegra_io_rail_power_on(unsigned int id) { - unsigned long request, status, value; - unsigned int bit, mask; + unsigned long request, status; + unsigned int bit; int err; mutex_lock(&pmc->powergates_lock); @@ -977,15 +977,9 @@ int tegra_io_rail_power_on(unsigned int id) if (err) goto error; - mask = 1 << bit; + tegra_pmc_writel(IO_DPD_REQ_CODE_OFF | BIT(bit), request); - value = tegra_pmc_readl(request); - value |= mask; - value &= ~IO_DPD_REQ_CODE_MASK; - value |= IO_DPD_REQ_CODE_OFF; - tegra_pmc_writel(value, request); - - err = tegra_io_rail_poll(status, mask, 0, 250); + err = tegra_io_rail_poll(status, BIT(bit), 0, 250); if (err) { pr_info("tegra_io_rail_poll() failed: %d\n", err); goto error; @@ -1002,8 +996,8 @@ EXPORT_SYMBOL(tegra_io_rail_power_on); int tegra_io_rail_power_off(unsigned int id) { - unsigned long request, status, value; - unsigned int bit, mask; + unsigned long request, status; + unsigned int bit; int err; mutex_lock(&pmc->powergates_lock); @@ -1014,15 +1008,9 @@ int tegra_io_rail_power_off(unsigned int id) goto error; } - mask = 1 << bit; - - value = tegra_pmc_readl(request); - value |= mask; - value &= ~IO_DPD_REQ_CODE_MASK; - value |= IO_DPD_REQ_CODE_ON; - tegra_pmc_writel(value, request); + tegra_pmc_writel(IO_DPD_REQ_CODE_ON | BIT(bit), request); - err = tegra_io_rail_poll(status, mask, mask, 250); + err = tegra_io_rail_poll(status, BIT(bit), BIT(bit), 250); if (err) goto error; diff --git a/drivers/staging/lustre/lustre/llite/file.c b/drivers/staging/lustre/lustre/llite/file.c index 6e3a188baaae..d56863ff5866 100644 --- a/drivers/staging/lustre/lustre/llite/file.c +++ b/drivers/staging/lustre/lustre/llite/file.c @@ -1138,45 +1138,31 @@ restart: range_lock_init(&range, *ppos, *ppos + count - 1); vio->vui_fd = LUSTRE_FPRIVATE(file); - vio->vui_io_subtype = args->via_io_subtype; + vio->vui_iter = args->u.normal.via_iter; + vio->vui_iocb = args->u.normal.via_iocb; + /* + * Direct IO reads must also take range lock, + * or multiple reads will try to work on the same pages + * See LU-6227 for details. + */ + if (((iot == CIT_WRITE) || + (iot == CIT_READ && (file->f_flags & O_DIRECT))) && + !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) { + CDEBUG(D_VFSTRACE, "Range lock [%llu, %llu]\n", + range.rl_node.in_extent.start, + range.rl_node.in_extent.end); + result = range_lock(&lli->lli_write_tree, + &range); + if (result < 0) + goto out; - switch (vio->vui_io_subtype) { - case IO_NORMAL: - vio->vui_iter = args->u.normal.via_iter; - vio->vui_iocb = args->u.normal.via_iocb; - /* - * Direct IO reads must also take range lock, - * or multiple reads will try to work on the same pages - * See LU-6227 for details. - */ - if (((iot == CIT_WRITE) || - (iot == CIT_READ && (file->f_flags & O_DIRECT))) && - !(vio->vui_fd->fd_flags & LL_FILE_GROUP_LOCKED)) { - CDEBUG(D_VFSTRACE, "Range lock [%llu, %llu]\n", - range.rl_node.in_extent.start, - range.rl_node.in_extent.end); - result = range_lock(&lli->lli_write_tree, - &range); - if (result < 0) - goto out; - - range_locked = true; - } - down_read(&lli->lli_trunc_sem); - break; - case IO_SPLICE: - vio->u.splice.vui_pipe = args->u.splice.via_pipe; - vio->u.splice.vui_flags = args->u.splice.via_flags; - break; - default: - CERROR("Unknown IO type - %u\n", vio->vui_io_subtype); - LBUG(); + range_locked = true; } + down_read(&lli->lli_trunc_sem); ll_cl_add(file, env, io); result = cl_io_loop(env, io); ll_cl_remove(file, env); - if (args->via_io_subtype == IO_NORMAL) - up_read(&lli->lli_trunc_sem); + up_read(&lli->lli_trunc_sem); if (range_locked) { CDEBUG(D_VFSTRACE, "Range unlock [%llu, %llu]\n", range.rl_node.in_extent.start, @@ -1235,7 +1221,7 @@ static ssize_t ll_file_read_iter(struct kiocb *iocb, struct iov_iter *to) if (IS_ERR(env)) return PTR_ERR(env); - args = ll_env_args(env, IO_NORMAL); + args = ll_env_args(env); args->u.normal.via_iter = to; args->u.normal.via_iocb = iocb; @@ -1259,7 +1245,7 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from) if (IS_ERR(env)) return PTR_ERR(env); - args = ll_env_args(env, IO_NORMAL); + args = ll_env_args(env); args->u.normal.via_iter = from; args->u.normal.via_iocb = iocb; @@ -1269,31 +1255,6 @@ static ssize_t ll_file_write_iter(struct kiocb *iocb, struct iov_iter *from) return result; } -/* - * Send file content (through pagecache) somewhere with helper - */ -static ssize_t ll_file_splice_read(struct file *in_file, loff_t *ppos, - struct pipe_inode_info *pipe, size_t count, - unsigned int flags) -{ - struct lu_env *env; - struct vvp_io_args *args; - ssize_t result; - int refcheck; - - env = cl_env_get(&refcheck); - if (IS_ERR(env)) - return PTR_ERR(env); - - args = ll_env_args(env, IO_SPLICE); - args->u.splice.via_pipe = pipe; - args->u.splice.via_flags = flags; - - result = ll_file_io_generic(env, args, in_file, CIT_READ, ppos, count); - cl_env_put(env, &refcheck); - return result; -} - int ll_lov_setstripe_ea_info(struct inode *inode, struct dentry *dentry, __u64 flags, struct lov_user_md *lum, int lum_size) @@ -3267,7 +3228,7 @@ struct file_operations ll_file_operations = { .release = ll_file_release, .mmap = ll_file_mmap, .llseek = ll_file_seek, - .splice_read = ll_file_splice_read, + .splice_read = generic_file_splice_read, .fsync = ll_fsync, .flush = ll_flush }; @@ -3280,7 +3241,7 @@ struct file_operations ll_file_operations_flock = { .release = ll_file_release, .mmap = ll_file_mmap, .llseek = ll_file_seek, - .splice_read = ll_file_splice_read, + .splice_read = generic_file_splice_read, .fsync = ll_fsync, .flush = ll_flush, .flock = ll_file_flock, @@ -3296,7 +3257,7 @@ struct file_operations ll_file_operations_noflock = { .release = ll_file_release, .mmap = ll_file_mmap, .llseek = ll_file_seek, - .splice_read = ll_file_splice_read, + .splice_read = generic_file_splice_read, .fsync = ll_fsync, .flush = ll_flush, .flock = ll_file_noflock, diff --git a/drivers/staging/lustre/lustre/llite/llite_internal.h b/drivers/staging/lustre/lustre/llite/llite_internal.h index 3e98bd685061..4bc551279aa4 100644 --- a/drivers/staging/lustre/lustre/llite/llite_internal.h +++ b/drivers/staging/lustre/lustre/llite/llite_internal.h @@ -908,17 +908,11 @@ void vvp_write_complete(struct vvp_object *club, struct vvp_page *page); */ struct vvp_io_args { /** normal/splice */ - enum vvp_io_subtype via_io_subtype; - union { struct { struct kiocb *via_iocb; struct iov_iter *via_iter; } normal; - struct { - struct pipe_inode_info *via_pipe; - unsigned int via_flags; - } splice; } u; }; @@ -946,14 +940,9 @@ static inline struct ll_thread_info *ll_env_info(const struct lu_env *env) return lti; } -static inline struct vvp_io_args *ll_env_args(const struct lu_env *env, - enum vvp_io_subtype type) +static inline struct vvp_io_args *ll_env_args(const struct lu_env *env) { - struct vvp_io_args *via = &ll_env_info(env)->lti_args; - - via->via_io_subtype = type; - - return via; + return &ll_env_info(env)->lti_args; } void ll_queue_done_writing(struct inode *inode, unsigned long flags); diff --git a/drivers/staging/lustre/lustre/llite/vvp_internal.h b/drivers/staging/lustre/lustre/llite/vvp_internal.h index 5802da81cd0e..4464ad258387 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_internal.h +++ b/drivers/staging/lustre/lustre/llite/vvp_internal.h @@ -49,14 +49,6 @@ struct obd_device; struct obd_export; struct page; -/* specific architecture can implement only part of this list */ -enum vvp_io_subtype { - /** normal IO */ - IO_NORMAL, - /** io started from splice_{read|write} */ - IO_SPLICE -}; - /** * IO state private to IO state private to VVP layer. */ @@ -99,10 +91,6 @@ struct vvp_io { bool ft_flags_valid; } fault; struct { - struct pipe_inode_info *vui_pipe; - unsigned int vui_flags; - } splice; - struct { struct cl_page_list vui_queue; unsigned long vui_written; int vui_from; @@ -110,8 +98,6 @@ struct vvp_io { } write; } u; - enum vvp_io_subtype vui_io_subtype; - /** * Layout version when this IO is initialized */ diff --git a/drivers/staging/lustre/lustre/llite/vvp_io.c b/drivers/staging/lustre/lustre/llite/vvp_io.c index 2ab450359b6d..2b7f182a15e2 100644 --- a/drivers/staging/lustre/lustre/llite/vvp_io.c +++ b/drivers/staging/lustre/lustre/llite/vvp_io.c @@ -54,18 +54,6 @@ static struct vvp_io *cl2vvp_io(const struct lu_env *env, } /** - * True, if \a io is a normal io, False for splice_{read,write} - */ -static int cl_is_normalio(const struct lu_env *env, const struct cl_io *io) -{ - struct vvp_io *vio = vvp_env_io(env); - - LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); - - return vio->vui_io_subtype == IO_NORMAL; -} - -/** * For swapping layout. The file's layout may have changed. * To avoid populating pages to a wrong stripe, we have to verify the * correctness of layout. It works because swapping layout processes @@ -390,9 +378,6 @@ static int vvp_mmap_locks(const struct lu_env *env, LASSERT(io->ci_type == CIT_READ || io->ci_type == CIT_WRITE); - if (!cl_is_normalio(env, io)) - return 0; - if (!vio->vui_iter) /* nfs or loop back device write */ return 0; @@ -461,15 +446,10 @@ static void vvp_io_advance(const struct lu_env *env, const struct cl_io_slice *ios, size_t nob) { - struct vvp_io *vio = cl2vvp_io(env, ios); - struct cl_io *io = ios->cis_io; struct cl_object *obj = ios->cis_io->ci_obj; - + struct vvp_io *vio = cl2vvp_io(env, ios); CLOBINVRNT(env, obj, vvp_object_invariant(obj)); - if (!cl_is_normalio(env, io)) - return; - iov_iter_reexpand(vio->vui_iter, vio->vui_tot_count -= nob); } @@ -478,7 +458,7 @@ static void vvp_io_update_iov(const struct lu_env *env, { size_t size = io->u.ci_rw.crw_count; - if (!cl_is_normalio(env, io) || !vio->vui_iter) + if (!vio->vui_iter) return; iov_iter_truncate(vio->vui_iter, size); @@ -715,25 +695,8 @@ static int vvp_io_read_start(const struct lu_env *env, /* BUG: 5972 */ file_accessed(file); - switch (vio->vui_io_subtype) { - case IO_NORMAL: - LASSERT(vio->vui_iocb->ki_pos == pos); - result = generic_file_read_iter(vio->vui_iocb, vio->vui_iter); - break; - case IO_SPLICE: - result = generic_file_splice_read(file, &pos, - vio->u.splice.vui_pipe, cnt, - vio->u.splice.vui_flags); - /* LU-1109: do splice read stripe by stripe otherwise if it - * may make nfsd stuck if this read occupied all internal pipe - * buffers. - */ - io->ci_continue = 0; - break; - default: - CERROR("Wrong IO type %u\n", vio->vui_io_subtype); - LBUG(); - } + LASSERT(vio->vui_iocb->ki_pos == pos); + result = generic_file_read_iter(vio->vui_iocb, vio->vui_iter); out: if (result >= 0) { diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index 530959a8a6d1..8e1728b39a49 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -182,6 +182,38 @@ static const struct dwc2_core_params params_ltq = { .hibernation = -1, }; +static const struct dwc2_core_params params_amlogic = { + .otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE, + .otg_ver = -1, + .dma_enable = 1, + .dma_desc_enable = 0, + .dma_desc_fs_enable = 0, + .speed = DWC2_SPEED_PARAM_HIGH, + .enable_dynamic_fifo = 1, + .en_multiple_tx_fifo = -1, + .host_rx_fifo_size = 512, + .host_nperio_tx_fifo_size = 500, + .host_perio_tx_fifo_size = 500, + .max_transfer_size = -1, + .max_packet_count = -1, + .host_channels = 16, + .phy_type = DWC2_PHY_TYPE_PARAM_UTMI, + .phy_utmi_width = -1, + .phy_ulpi_ddr = -1, + .phy_ulpi_ext_vbus = -1, + .i2c_enable = -1, + .ulpi_fs_ls = -1, + .host_support_fs_ls_low_power = -1, + .host_ls_low_power_phy_clk = -1, + .ts_dline = -1, + .reload_ctl = 1, + .ahbcfg = GAHBCFG_HBSTLEN_INCR8 << + GAHBCFG_HBSTLEN_SHIFT, + .uframe_sched = 0, + .external_id_pin_ctl = -1, + .hibernation = -1, +}; + /* * Check the dr_mode against the module configuration and hardware * capabilities. @@ -486,6 +518,8 @@ static const struct of_device_id dwc2_of_match_table[] = { { .compatible = "lantiq,xrx200-usb", .data = ¶ms_ltq }, { .compatible = "snps,dwc2", .data = NULL }, { .compatible = "samsung,s3c6400-hsotg", .data = NULL}, + { .compatible = "amlogic,meson8b-usb", .data = ¶ms_amlogic }, + { .compatible = "amlogic,meson-gxbb-usb", .data = ¶ms_amlogic }, {}, }; MODULE_DEVICE_TABLE(of, dwc2_of_match_table); |