diff options
author | Felix Manlunas <felix.manlunas@cavium.com> | 2018-08-28 18:51:30 -0700 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2018-08-29 20:07:41 -0700 |
commit | c9aec05228dc6fa5573c56dba6bed43250228069 (patch) | |
tree | b0d0eb8c1903db75897bc104ea7b5ef38b929e73 | |
parent | 0927f71dbcfb59131b289d7d518e9472e51d4830 (diff) | |
download | linux-c9aec05228dc6fa5573c56dba6bed43250228069.tar.bz2 |
liquidio: improve soft command handling
1. Set LIO_SC_MAX_TMO_MS as the maximum timeout value for a soft command
(sc). All sc's use this value as a hard timeout value. Add expiry_time
in struct octeon_soft_command to keep the hard timeout value. The field
wait_time and timeout in struct octeon_soft_command will be obsoleted in
the last patch of this patch series.
2. Add processing a synchronous sc in sc response thread
lio_process_ordered_list. The memory allocated for a synchronous sc will
be freed by lio_process_ordered_list() to the sc pool.
3. Add two response lists for lio_process_ordered_list to process the
storage allocated for sc's:
OCTEON_DONE_SC_LIST response list keeps all sc's which will be freed to
the pool after their requestors have finished processing the responses.
OCTEON_ZOMBIE_SC_LIST response list keeps all sc's which have got
LIO_SC_MAX_TMO_MS timeout.
When an sc gets a hard timeout, lio_process_order_list() will recheck
its status 1 ms later. If the status has not updated by the firmware at
that time, the sc will be removed from OCTEON_DONE_SC_LIST response list
to OCTEON_ZOMBIE_SC_LIST response list. The sc's in the
OCTEON_ZOMBIE_SC_LIST response list will be freed when the driver is
unloaded.
Signed-off-by: Weilin Chang <weilin.chang@cavium.com>
Signed-off-by: Felix Manlunas <felix.manlunas@cavium.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
8 files changed, 232 insertions, 49 deletions
diff --git a/drivers/net/ethernet/cavium/liquidio/lio_main.c b/drivers/net/ethernet/cavium/liquidio/lio_main.c index 6fb13fa73b27..6663749bb336 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_main.c @@ -1037,12 +1037,12 @@ static void octeon_destroy_resources(struct octeon_device *oct) /* fallthrough */ case OCT_DEV_IO_QUEUES_DONE: - if (wait_for_pending_requests(oct)) - dev_err(&oct->pci_dev->dev, "There were pending requests\n"); - if (lio_wait_for_instr_fetch(oct)) dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + /* Disable the input and output queues now. No more packets will * arrive from Octeon, but we should wait for all packet * processing to finish. @@ -1052,6 +1052,31 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (lio_wait_for_oq_pkts(oct)) dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); + /* Force all requests waiting to be fetched by OCTEON to + * complete. + */ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + struct octeon_instr_queue *iq; + + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + iq = oct->instr_queue[i]; + + if (atomic_read(&iq->instr_pending)) { + spin_lock_bh(&iq->lock); + iq->fill_cnt = 0; + iq->octeon_read_index = iq->host_write_index; + iq->stats.instr_processed += + atomic_read(&iq->instr_pending); + lio_process_iq_request_list(oct, iq, 0); + spin_unlock_bh(&iq->lock); + } + } + + lio_process_ordered_list(oct, 1); + octeon_free_sc_done_list(oct); + octeon_free_sc_zombie_list(oct); + /* fallthrough */ case OCT_DEV_INTR_SET_DONE: /* Disable interrupts */ diff --git a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c index b77835724dc8..59c2dd92aac5 100644 --- a/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c +++ b/drivers/net/ethernet/cavium/liquidio/lio_vf_main.c @@ -471,12 +471,12 @@ static void octeon_destroy_resources(struct octeon_device *oct) case OCT_DEV_HOST_OK: /* fallthrough */ case OCT_DEV_IO_QUEUES_DONE: - if (wait_for_pending_requests(oct)) - dev_err(&oct->pci_dev->dev, "There were pending requests\n"); - if (lio_wait_for_instr_fetch(oct)) dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n"); + if (wait_for_pending_requests(oct)) + dev_err(&oct->pci_dev->dev, "There were pending requests\n"); + /* Disable the input and output queues now. No more packets will * arrive from Octeon, but we should wait for all packet * processing to finish. @@ -485,7 +485,33 @@ static void octeon_destroy_resources(struct octeon_device *oct) if (lio_wait_for_oq_pkts(oct)) dev_err(&oct->pci_dev->dev, "OQ had pending packets\n"); - /* fall through */ + + /* Force all requests waiting to be fetched by OCTEON to + * complete. + */ + for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) { + struct octeon_instr_queue *iq; + + if (!(oct->io_qmask.iq & BIT_ULL(i))) + continue; + iq = oct->instr_queue[i]; + + if (atomic_read(&iq->instr_pending)) { + spin_lock_bh(&iq->lock); + iq->fill_cnt = 0; + iq->octeon_read_index = iq->host_write_index; + iq->stats.instr_processed += + atomic_read(&iq->instr_pending); + lio_process_iq_request_list(oct, iq, 0); + spin_unlock_bh(&iq->lock); + } + } + + lio_process_ordered_list(oct, 1); + octeon_free_sc_done_list(oct); + octeon_free_sc_zombie_list(oct); + + /* fall through */ case OCT_DEV_INTR_SET_DONE: /* Disable interrupts */ oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR); diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_config.h b/drivers/net/ethernet/cavium/liquidio/octeon_config.h index ceac74388e09..056dceb2a558 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_config.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_config.h @@ -440,7 +440,7 @@ struct octeon_config { /* Response lists - 1 ordered, 1 unordered-blocking, 1 unordered-nonblocking * NoResponse Lists are now maintained with each IQ. (Dec' 2007). */ -#define MAX_RESPONSE_LISTS 4 +#define MAX_RESPONSE_LISTS 6 /* Opcode hash bits. The opcode is hashed on the lower 6-bits to lookup the * dispatch table. diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h index aecd0d36d634..3437d7f6acad 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_iq.h +++ b/drivers/net/ethernet/cavium/liquidio/octeon_iq.h @@ -294,11 +294,20 @@ struct octeon_soft_command { /** Time out and callback */ size_t wait_time; size_t timeout; + size_t expiry_time; + u32 iq_no; void (*callback)(struct octeon_device *, u32, void *); void *callback_arg; + + int caller_is_done; + u32 sc_status; + struct completion complete; }; +/* max timeout (in milli sec) for soft request */ +#define LIO_SC_MAX_TMO_MS 60000 + /** Maximum number of buffers to allocate into soft command buffer pool */ #define MAX_SOFT_COMMAND_BUFFERS 256 @@ -319,6 +328,8 @@ struct octeon_sc_buffer_pool { (((octeon_dev_ptr)->instr_queue[iq_no]->stats.field) += count) int octeon_setup_sc_buffer_pool(struct octeon_device *oct); +int octeon_free_sc_done_list(struct octeon_device *oct); +int octeon_free_sc_zombie_list(struct octeon_device *oct); int octeon_free_sc_buffer_pool(struct octeon_device *oct); struct octeon_soft_command * octeon_alloc_soft_command(struct octeon_device *oct, diff --git a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c index 150609bd8849..b7364bb3b186 100644 --- a/drivers/net/ethernet/cavium/liquidio/octeon_nic.c +++ b/drivers/net/ethernet/cavium/liquidio/octeon_nic.c @@ -75,8 +75,7 @@ octeon_alloc_soft_command_resp(struct octeon_device *oct, else sc->cmd.cmd2.rptr = sc->dmarptr; - sc->wait_time = 1000; - sc->timeout = jiffies + sc->wait_time; + sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS); return sc; } diff --git a/drivers/net/ethernet/cavium/liquidio/request_manager.c b/drivers/net/ethernet/cavium/liquidio/request_manager.c index 5de5ce9a8f54..bd0153e16deb 100644 --- a/drivers/net/ethernet/cavium/liquidio/request_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/request_manager.c @@ -409,33 +409,22 @@ lio_process_iq_request_list(struct octeon_device *oct, else irh = (struct octeon_instr_irh *) &sc->cmd.cmd2.irh; - if (irh->rflag) { - /* We're expecting a response from Octeon. - * It's up to lio_process_ordered_list() to - * process sc. Add sc to the ordered soft - * command response list because we expect - * a response from Octeon. - */ - spin_lock_irqsave - (&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock, - flags); - atomic_inc(&oct->response_list - [OCTEON_ORDERED_SC_LIST]. - pending_req_count); - list_add_tail(&sc->node, &oct->response_list - [OCTEON_ORDERED_SC_LIST].head); - spin_unlock_irqrestore - (&oct->response_list - [OCTEON_ORDERED_SC_LIST].lock, - flags); - } else { - if (sc->callback) { - /* This callback must not sleep */ - sc->callback(oct, OCTEON_REQUEST_DONE, - sc->callback_arg); - } - } + + /* We're expecting a response from Octeon. + * It's up to lio_process_ordered_list() to + * process sc. Add sc to the ordered soft + * command response list because we expect + * a response from Octeon. + */ + spin_lock_irqsave(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, flags); + atomic_inc(&oct->response_list + [OCTEON_ORDERED_SC_LIST].pending_req_count); + list_add_tail(&sc->node, &oct->response_list + [OCTEON_ORDERED_SC_LIST].head); + spin_unlock_irqrestore(&oct->response_list + [OCTEON_ORDERED_SC_LIST].lock, + flags); break; default: dev_err(&oct->pci_dev->dev, @@ -755,8 +744,7 @@ int octeon_send_soft_command(struct octeon_device *oct, len = (u32)ih2->dlengsz; } - if (sc->wait_time) - sc->timeout = jiffies + sc->wait_time; + sc->expiry_time = jiffies + msecs_to_jiffies(LIO_SC_MAX_TMO_MS); return (octeon_send_command(oct, sc->iq_no, 1, &sc->cmd, sc, len, REQTYPE_SOFT_COMMAND)); @@ -791,11 +779,76 @@ int octeon_setup_sc_buffer_pool(struct octeon_device *oct) return 0; } +int octeon_free_sc_done_list(struct octeon_device *oct) +{ + struct octeon_response_list *done_sc_list, *zombie_sc_list; + struct octeon_soft_command *sc; + struct list_head *tmp, *tmp2; + spinlock_t *sc_lists_lock; /* lock for response_list */ + + done_sc_list = &oct->response_list[OCTEON_DONE_SC_LIST]; + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; + + if (!atomic_read(&done_sc_list->pending_req_count)) + return 0; + + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; + + spin_lock_bh(sc_lists_lock); + + list_for_each_safe(tmp, tmp2, &done_sc_list->head) { + sc = list_entry(tmp, struct octeon_soft_command, node); + + if (READ_ONCE(sc->caller_is_done)) { + list_del(&sc->node); + atomic_dec(&done_sc_list->pending_req_count); + + if (*sc->status_word == COMPLETION_WORD_INIT) { + /* timeout; move sc to zombie list */ + list_add_tail(&sc->node, &zombie_sc_list->head); + atomic_inc(&zombie_sc_list->pending_req_count); + } else { + octeon_free_soft_command(oct, sc); + } + } + } + + spin_unlock_bh(sc_lists_lock); + + return 0; +} + +int octeon_free_sc_zombie_list(struct octeon_device *oct) +{ + struct octeon_response_list *zombie_sc_list; + struct octeon_soft_command *sc; + struct list_head *tmp, *tmp2; + spinlock_t *sc_lists_lock; /* lock for response_list */ + + zombie_sc_list = &oct->response_list[OCTEON_ZOMBIE_SC_LIST]; + sc_lists_lock = &oct->response_list[OCTEON_ORDERED_SC_LIST].lock; + + spin_lock_bh(sc_lists_lock); + + list_for_each_safe(tmp, tmp2, &zombie_sc_list->head) { + list_del(tmp); + atomic_dec(&zombie_sc_list->pending_req_count); + sc = list_entry(tmp, struct octeon_soft_command, node); + octeon_free_soft_command(oct, sc); + } + + spin_unlock_bh(sc_lists_lock); + + return 0; +} + int octeon_free_sc_buffer_pool(struct octeon_device *oct) { struct list_head *tmp, *tmp2; struct octeon_soft_command *sc; + octeon_free_sc_zombie_list(oct); + spin_lock_bh(&oct->sc_buf_pool.lock); list_for_each_safe(tmp, tmp2, &oct->sc_buf_pool.head) { @@ -824,6 +877,9 @@ struct octeon_soft_command *octeon_alloc_soft_command(struct octeon_device *oct, struct octeon_soft_command *sc = NULL; struct list_head *tmp; + if (!rdatasize) + rdatasize = 16; + WARN_ON((offset + datasize + rdatasize + ctxsize) > SOFT_COMMAND_BUFFER_SIZE); diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.c b/drivers/net/ethernet/cavium/liquidio/response_manager.c index fe5b53700576..ac7747ccf56a 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.c +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.c @@ -69,6 +69,8 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, u32 status; u64 status64; + octeon_free_sc_done_list(octeon_dev); + ordered_sc_list = &octeon_dev->response_list[OCTEON_ORDERED_SC_LIST]; do { @@ -111,26 +113,88 @@ int lio_process_ordered_list(struct octeon_device *octeon_dev, } } } - } else if (force_quit || (sc->timeout && - time_after(jiffies, (unsigned long)sc->timeout))) { - dev_err(&octeon_dev->pci_dev->dev, "%s: cmd failed, timeout (%ld, %ld)\n", - __func__, (long)jiffies, (long)sc->timeout); + } else if (unlikely(force_quit) || (sc->expiry_time && + time_after(jiffies, (unsigned long)sc->expiry_time))) { + struct octeon_instr_irh *irh = + (struct octeon_instr_irh *)&sc->cmd.cmd3.irh; + + dev_err(&octeon_dev->pci_dev->dev, "%s: ", __func__); + dev_err(&octeon_dev->pci_dev->dev, + "cmd %x/%x/%llx/%llx failed, ", + irh->opcode, irh->subcode, + sc->cmd.cmd3.ossp[0], sc->cmd.cmd3.ossp[1]); + dev_err(&octeon_dev->pci_dev->dev, + "timeout (%ld, %ld)\n", + (long)jiffies, (long)sc->expiry_time); status = OCTEON_REQUEST_TIMEOUT; } if (status != OCTEON_REQUEST_PENDING) { + sc->sc_status = status; + /* we have received a response or we have timed out */ /* remove node from linked list */ list_del(&sc->node); atomic_dec(&octeon_dev->response_list - [OCTEON_ORDERED_SC_LIST]. - pending_req_count); - spin_unlock_bh - (&ordered_sc_list->lock); + [OCTEON_ORDERED_SC_LIST]. + pending_req_count); + + if (!sc->callback) { + atomic_inc(&octeon_dev->response_list + [OCTEON_DONE_SC_LIST]. + pending_req_count); + list_add_tail(&sc->node, + &octeon_dev->response_list + [OCTEON_DONE_SC_LIST].head); + + if (unlikely(READ_ONCE(sc->caller_is_done))) { + /* caller does not wait for response + * from firmware + */ + if (status != OCTEON_REQUEST_DONE) { + struct octeon_instr_irh *irh; + + irh = + (struct octeon_instr_irh *) + &sc->cmd.cmd3.irh; + dev_dbg + (&octeon_dev->pci_dev->dev, + "%s: sc failed: opcode=%x, ", + __func__, irh->opcode); + dev_dbg + (&octeon_dev->pci_dev->dev, + "subcode=%x, ossp[0]=%llx, ", + irh->subcode, + sc->cmd.cmd3.ossp[0]); + dev_dbg + (&octeon_dev->pci_dev->dev, + "ossp[1]=%llx, status=%d\n", + sc->cmd.cmd3.ossp[1], + status); + } + } else { + complete(&sc->complete); + } + + spin_unlock_bh(&ordered_sc_list->lock); + } else { + /* sc with callback function */ + if (status == OCTEON_REQUEST_TIMEOUT) { + atomic_inc(&octeon_dev->response_list + [OCTEON_ZOMBIE_SC_LIST]. + pending_req_count); + list_add_tail(&sc->node, + &octeon_dev->response_list + [OCTEON_ZOMBIE_SC_LIST]. + head); + } + + spin_unlock_bh(&ordered_sc_list->lock); - if (sc->callback) sc->callback(octeon_dev, status, sc->callback_arg); + /* sc is freed by caller */ + } request_complete++; diff --git a/drivers/net/ethernet/cavium/liquidio/response_manager.h b/drivers/net/ethernet/cavium/liquidio/response_manager.h index 9169c2815dba..ed4020d26fae 100644 --- a/drivers/net/ethernet/cavium/liquidio/response_manager.h +++ b/drivers/net/ethernet/cavium/liquidio/response_manager.h @@ -53,7 +53,9 @@ enum { OCTEON_ORDERED_LIST = 0, OCTEON_UNORDERED_NONBLOCKING_LIST = 1, OCTEON_UNORDERED_BLOCKING_LIST = 2, - OCTEON_ORDERED_SC_LIST = 3 + OCTEON_ORDERED_SC_LIST = 3, + OCTEON_DONE_SC_LIST = 4, + OCTEON_ZOMBIE_SC_LIST = 5 }; /** Response Order values for a Octeon Request. */ |