summaryrefslogtreecommitdiffstats
path: root/drivers/staging/octeon
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2014-12-15 18:06:13 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2014-12-15 18:06:13 -0800
commitdab363f938a53ddaee60bfecc1aebdbb3d3af5f0 (patch)
treeccdb11a6e6191ba71fbc7716714c47b79172070d /drivers/staging/octeon
parenta68db9cb858d10820add66682ad4d412f9914288 (diff)
parent17d2c6439be65777245914be354c5a97c76ad246 (diff)
downloadlinux-dab363f938a53ddaee60bfecc1aebdbb3d3af5f0.tar.bz2
Merge tag 'staging-3.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging
Pull staging driver updates from Greg KH: "Here's the big staging tree pull request for 3.19-rc1. We continued to delete more lines than were added, always a good thing, but not at a huge rate this release, only about 70k lines removed overall mostly from removing the horrid bcm driver. Lots of normal staging driver cleanups and fixes all over the place, well over a thousand of them, the shortlog shows all the horrid details. The "contentious" thing here is the movement of the Android binder code out of staging into the "real" part of the kernel. This is code that has been stable for a few years now and is working as-is in the tens of millions of devices with no issues. Yes, the code is horrid, and the userspace api leaves a lot to be desired, but it's not going to change due to legacy issues that we have no control over. Because so many devices and companies rely on this, and the code is stable, might as well promote it out of staging. This was all discussed at the Linux Plumbers conference, and everyone participating agreed that this was the best way forward. There is work happening to replace the binder code with something new that is happening right now, but I don't expect to see the results of that work for another year at the earliest. If that ever happens, and Android switches over to it, I'll gladly remove this version. As for maintainers, I'll be glad to maintain this code, I've been doing it for the past few years with no problems. I'll send a MAINTAINERS entry for it before 3.19-final is out, still need to talk to the Google developers about if they are willing to help with it or not, last I checked they were, which was good. All of these patches have been in linux-next for a while with no reported issues" * tag 'staging-3.19-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/gregkh/staging: (1382 commits) Staging: slicoss: Fix long line issues in slicoss.c staging: rtl8712: remove unnecessary else after return staging: comedi: change some printk calls to pr_err staging: rtl8723au: hal: Removed the extra semicolon lustre: Deletion of unnecessary checks before three function calls staging: lustre: fix sparse warnings: static function declaration staging: lustre: fixed sparse warnings related to static declarations staging: unisys: remove duplicate header staging: unisys: remove unneeded structure staging: ft1000 : replace __attribute ((__packed__) with __packed drivers: staging: rtl8192e: Include "asm/unaligned.h" instead of "access_ok.h" in "rtl819x_BAProc.c" Drivers:staging:rtl8192e: Fixed checkpatch warning Drivers:staging:clocking-wizard: Added a newline staging: clocking-wizard: check for a valid clk_name pointer staging: rtl8723au: Hal_InitPGData() avoid unnecessary typecasts staging: rtl8723au: _DisableAnalog(): Avoid zero-init variables unnecessarily staging: rtl8723au: Remove unnecessary wrapper _ResetDigitalProcedure1() staging: rtl8723au: _ResetDigitalProcedure1_92C() reduce code obfuscation staging: rtl8723au: Remove unnecessary wrapper _DisableRFAFEAndResetBB() staging: rtl8723au: _DisableRFAFEAndResetBB8192C(): Reduce code obfuscation ...
Diffstat (limited to 'drivers/staging/octeon')
-rw-r--r--drivers/staging/octeon/ethernet-rx.c156
-rw-r--r--drivers/staging/octeon/ethernet-tx.c11
-rw-r--r--drivers/staging/octeon/ethernet.c8
-rw-r--r--drivers/staging/octeon/octeon-ethernet.h1
4 files changed, 55 insertions, 121 deletions
diff --git a/drivers/staging/octeon/ethernet-rx.c b/drivers/staging/octeon/ethernet-rx.c
index b2b6c3cd2bed..fcbe836aa997 100644
--- a/drivers/staging/octeon/ethernet-rx.c
+++ b/drivers/staging/octeon/ethernet-rx.c
@@ -61,66 +61,7 @@
#include <asm/octeon/cvmx-gmxx-defs.h>
-struct cvm_napi_wrapper {
- struct napi_struct napi;
-} ____cacheline_aligned_in_smp;
-
-static struct cvm_napi_wrapper cvm_oct_napi[NR_CPUS] __cacheline_aligned_in_smp;
-
-struct cvm_oct_core_state {
- int baseline_cores;
- /*
- * The number of additional cores that could be processing
- * input packets.
- */
- atomic_t available_cores;
- cpumask_t cpu_state;
-} ____cacheline_aligned_in_smp;
-
-static struct cvm_oct_core_state core_state __cacheline_aligned_in_smp;
-
-static int cvm_irq_cpu;
-
-static void cvm_oct_enable_napi(void *_)
-{
- int cpu = smp_processor_id();
- napi_schedule(&cvm_oct_napi[cpu].napi);
-}
-
-static void cvm_oct_enable_one_cpu(void)
-{
- int v;
- int cpu;
-
- /* Check to see if more CPUs are available for receive processing... */
- v = atomic_sub_if_positive(1, &core_state.available_cores);
- if (v < 0)
- return;
-
- /* ... if a CPU is available, Turn on NAPI polling for that CPU. */
- for_each_online_cpu(cpu) {
- if (!cpu_test_and_set(cpu, core_state.cpu_state)) {
- v = smp_call_function_single(cpu, cvm_oct_enable_napi,
- NULL, 0);
- if (v)
- panic("Can't enable NAPI.");
- break;
- }
- }
-}
-
-static void cvm_oct_no_more_work(void)
-{
- int cpu = smp_processor_id();
-
- if (cpu == cvm_irq_cpu) {
- enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
- return;
- }
-
- cpu_clear(cpu, core_state.cpu_state);
- atomic_add(1, &core_state.available_cores);
-}
+static struct napi_struct cvm_oct_napi;
/**
* cvm_oct_do_interrupt - interrupt handler.
@@ -132,8 +73,7 @@ static irqreturn_t cvm_oct_do_interrupt(int cpl, void *dev_id)
{
/* Disable the IRQ and start napi_poll. */
disable_irq_nosync(OCTEON_IRQ_WORKQ0 + pow_receive_group);
- cvm_irq_cpu = smp_processor_id();
- cvm_oct_enable_napi(NULL);
+ napi_schedule(&cvm_oct_napi);
return IRQ_HANDLED;
}
@@ -186,13 +126,15 @@ static inline int cvm_oct_check_rcv_error(cvmx_wqe_t *work)
if (*ptr == 0xd5) {
/*
- printk_ratelimited("Port %d received 0xd5 preamble\n", work->ipprt);
+ printk_ratelimited("Port %d received 0xd5 preamble\n",
+ work->ipprt);
*/
work->packet_ptr.s.addr += i + 1;
work->len -= i + 5;
} else if ((*ptr & 0xf) == 0xd) {
/*
- printk_ratelimited("Port %d received 0x?d preamble\n", work->ipprt);
+ printk_ratelimited("Port %d received 0x?d preamble\n",
+ work->ipprt);
*/
work->packet_ptr.s.addr += i;
work->len -= i + 4;
@@ -278,28 +220,15 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
cvmx_write_csr(CVMX_POW_WQ_INT, wq_int.u64);
break;
}
- pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) - sizeof(void *));
+ pskb = (struct sk_buff **)(cvm_oct_get_buffer_ptr(work->packet_ptr) -
+ sizeof(void *));
prefetch(pskb);
if (USE_ASYNC_IOBDMA && rx_count < (budget - 1)) {
- cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH, CVMX_POW_NO_WAIT);
+ cvmx_pow_work_request_async_nocheck(CVMX_SCR_SCRATCH,
+ CVMX_POW_NO_WAIT);
did_work_request = 1;
}
-
- if (rx_count == 0) {
- /*
- * First time through, see if there is enough
- * work waiting to merit waking another
- * CPU.
- */
- union cvmx_pow_wq_int_cntx counts;
- int backlog;
- int cores_in_use = core_state.baseline_cores - atomic_read(&core_state.available_cores);
- counts.u64 = cvmx_read_csr(CVMX_POW_WQ_INT_CNTX(pow_receive_group));
- backlog = counts.s.iq_cnt + counts.s.ds_cnt;
- if (backlog > budget * cores_in_use && napi != NULL)
- cvm_oct_enable_one_cpu();
- }
rx_count++;
skb_in_hw = USE_SKBUFFS_IN_HW && work->word2.s.bufs == 1;
@@ -322,7 +251,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
* buffer.
*/
if (likely(skb_in_hw)) {
- skb->data = skb->head + work->packet_ptr.s.addr - cvmx_ptr_to_phys(skb->head);
+ skb->data = skb->head + work->packet_ptr.s.addr -
+ cvmx_ptr_to_phys(skb->head);
prefetch(skb->data);
skb->len = work->len;
skb_set_tail_pointer(skb, skb->len);
@@ -359,7 +289,8 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
/* No packet buffers to free */
} else {
int segments = work->word2.s.bufs;
- union cvmx_buf_ptr segment_ptr = work->packet_ptr;
+ union cvmx_buf_ptr segment_ptr =
+ work->packet_ptr;
int len = work->len;
while (segments--) {
@@ -375,8 +306,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
* one: int segment_size =
* segment_ptr.s.size;
*/
- int segment_size = CVMX_FPA_PACKET_POOL_SIZE -
- (segment_ptr.s.addr - (((segment_ptr.s.addr >> 7) - segment_ptr.s.back) << 7));
+ int segment_size =
+ CVMX_FPA_PACKET_POOL_SIZE -
+ (segment_ptr.s.addr -
+ (((segment_ptr.s.addr >> 7) -
+ segment_ptr.s.back) << 7));
/*
* Don't copy more than what
* is left in the packet.
@@ -407,8 +341,10 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
skb->protocol = eth_type_trans(skb, dev);
skb->dev = dev;
- if (unlikely(work->word2.s.not_IP || work->word2.s.IP_exc ||
- work->word2.s.L4_error || !work->word2.s.tcp_or_udp))
+ if (unlikely(work->word2.s.not_IP ||
+ work->word2.s.IP_exc ||
+ work->word2.s.L4_error ||
+ !work->word2.s.tcp_or_udp))
skb->ip_summed = CHECKSUM_NONE;
else
skb->ip_summed = CHECKSUM_UNNECESSARY;
@@ -416,11 +352,15 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
/* Increment RX stats for virtual ports */
if (work->ipprt >= CVMX_PIP_NUM_INPUT_PORTS) {
#ifdef CONFIG_64BIT
- atomic64_add(1, (atomic64_t *)&priv->stats.rx_packets);
- atomic64_add(skb->len, (atomic64_t *)&priv->stats.rx_bytes);
+ atomic64_add(1,
+ (atomic64_t *)&priv->stats.rx_packets);
+ atomic64_add(skb->len,
+ (atomic64_t *)&priv->stats.rx_bytes);
#else
- atomic_add(1, (atomic_t *)&priv->stats.rx_packets);
- atomic_add(skb->len, (atomic_t *)&priv->stats.rx_bytes);
+ atomic_add(1,
+ (atomic_t *)&priv->stats.rx_packets);
+ atomic_add(skb->len,
+ (atomic_t *)&priv->stats.rx_bytes);
#endif
}
netif_receive_skb(skb);
@@ -431,9 +371,11 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
dev->name);
*/
#ifdef CONFIG_64BIT
- atomic64_add(1, (atomic64_t *)&priv->stats.rx_dropped);
+ atomic64_add(1,
+ (atomic64_t *)&priv->stats.rx_dropped);
#else
- atomic_add(1, (atomic_t *)&priv->stats.rx_dropped);
+ atomic_add(1,
+ (atomic_t *)&priv->stats.rx_dropped);
#endif
dev_kfree_skb_irq(skb);
}
@@ -476,7 +418,7 @@ static int cvm_oct_napi_poll(struct napi_struct *napi, int budget)
if (rx_count < budget && napi != NULL) {
/* No more work */
napi_complete(napi);
- cvm_oct_no_more_work();
+ enable_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group);
}
return rx_count;
}
@@ -511,18 +453,10 @@ void cvm_oct_rx_initialize(void)
if (NULL == dev_for_napi)
panic("No net_devices were allocated.");
- if (max_rx_cpus >= 1 && max_rx_cpus < num_online_cpus())
- atomic_set(&core_state.available_cores, max_rx_cpus);
- else
- atomic_set(&core_state.available_cores, num_online_cpus());
- core_state.baseline_cores = atomic_read(&core_state.available_cores);
-
- core_state.cpu_state = CPU_MASK_NONE;
- for_each_possible_cpu(i) {
- netif_napi_add(dev_for_napi, &cvm_oct_napi[i].napi,
- cvm_oct_napi_poll, rx_napi_weight);
- napi_enable(&cvm_oct_napi[i].napi);
- }
+ netif_napi_add(dev_for_napi, &cvm_oct_napi, cvm_oct_napi_poll,
+ rx_napi_weight);
+ napi_enable(&cvm_oct_napi);
+
/* Register an IRQ handler to receive POW interrupts */
i = request_irq(OCTEON_IRQ_WORKQ0 + pow_receive_group,
cvm_oct_do_interrupt, 0, "Ethernet", cvm_oct_device);
@@ -543,15 +477,11 @@ void cvm_oct_rx_initialize(void)
int_pc.s.pc_thr = 5;
cvmx_write_csr(CVMX_POW_WQ_INT_PC, int_pc.u64);
-
- /* Scheduld NAPI now. This will indirectly enable interrupts. */
- cvm_oct_enable_one_cpu();
+ /* Schedule NAPI now. This will indirectly enable the interrupt. */
+ napi_schedule(&cvm_oct_napi);
}
void cvm_oct_rx_shutdown(void)
{
- int i;
- /* Shutdown all of the NAPIs */
- for_each_possible_cpu(i)
- netif_napi_del(&cvm_oct_napi[i].napi);
+ netif_napi_del(&cvm_oct_napi);
}
diff --git a/drivers/staging/octeon/ethernet-tx.c b/drivers/staging/octeon/ethernet-tx.c
index 4e54d8540219..b7a7854d3f7e 100644
--- a/drivers/staging/octeon/ethernet-tx.c
+++ b/drivers/staging/octeon/ethernet-tx.c
@@ -77,6 +77,7 @@ static DECLARE_TASKLET(cvm_oct_tx_cleanup_tasklet, cvm_oct_tx_do_cleanup, 0);
static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
{
int32_t undo;
+
undo = skb_to_free > 0 ? MAX_SKB_TO_FREE : skb_to_free +
MAX_SKB_TO_FREE;
if (undo > 0)
@@ -89,6 +90,7 @@ static inline int32_t cvm_oct_adjust_skb_to_free(int32_t skb_to_free, int fau)
static void cvm_oct_kick_tx_poll_watchdog(void)
{
union cvmx_ciu_timx ciu_timx;
+
ciu_timx.u64 = 0;
ciu_timx.s.one_shot = 1;
ciu_timx.s.len = cvm_oct_tx_poll_interval;
@@ -118,9 +120,11 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev)
total_freed += skb_to_free;
if (skb_to_free > 0) {
struct sk_buff *to_free_list = NULL;
+
spin_lock_irqsave(&priv->tx_free_list[qos].lock, flags);
while (skb_to_free > 0) {
struct sk_buff *t;
+
t = __skb_dequeue(&priv->tx_free_list[qos]);
t->next = to_free_list;
to_free_list = t;
@@ -131,6 +135,7 @@ static void cvm_oct_free_tx_skbs(struct net_device *dev)
/* Do the actual freeing outside of the lock. */
while (to_free_list) {
struct sk_buff *t = to_free_list;
+
to_free_list = to_free_list->next;
dev_kfree_skb_any(t);
}
@@ -258,6 +263,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
cvmx_read_csr(CVMX_GMXX_PRTX_CFG(index, interface));
if (gmx_prt_cfg.s.duplex == 0) {
int add_bytes = 64 - skb->len;
+
if ((skb_tail_pointer(skb) + add_bytes) <=
skb_end_pointer(skb))
memset(__skb_put(skb, add_bytes), 0,
@@ -289,6 +295,7 @@ int cvm_oct_xmit(struct sk_buff *skb, struct net_device *dev)
CVM_OCT_SKB_CB(skb)[0] = hw_buffer.u64;
for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
struct skb_frag_struct *fs = skb_shinfo(skb)->frags + i;
+
hw_buffer.s.addr = XKPHYS_TO_PHYS(
(u64)(page_address(fs->page.p) +
fs->page_offset));
@@ -495,6 +502,7 @@ skip_xmit:
while (skb_to_free > 0) {
struct sk_buff *t = __skb_dequeue(&priv->tx_free_list[qos]);
+
t->next = to_free_list;
to_free_list = t;
skb_to_free--;
@@ -505,6 +513,7 @@ skip_xmit:
/* Do the actual freeing outside of the lock. */
while (to_free_list) {
struct sk_buff *t = to_free_list;
+
to_free_list = to_free_list->next;
dev_kfree_skb_any(t);
}
@@ -550,6 +559,7 @@ int cvm_oct_xmit_pow(struct sk_buff *skb, struct net_device *dev)
/* Get a work queue entry */
cvmx_wqe_t *work = cvmx_fpa_alloc(CVMX_FPA_WQE_POOL);
+
if (unlikely(work == NULL)) {
printk_ratelimited("%s: Failed to allocate a work queue entry\n",
dev->name);
@@ -713,6 +723,7 @@ static void cvm_oct_tx_do_cleanup(unsigned long arg)
for (port = 0; port < TOTAL_NUMBER_OF_PORTS; port++) {
if (cvm_oct_device[port]) {
struct net_device *dev = cvm_oct_device[port];
+
cvm_oct_free_tx_skbs(dev);
}
}
diff --git a/drivers/staging/octeon/ethernet.c b/drivers/staging/octeon/ethernet.c
index af24294d9466..ee321496dcdd 100644
--- a/drivers/staging/octeon/ethernet.c
+++ b/drivers/staging/octeon/ethernet.c
@@ -98,12 +98,6 @@ MODULE_PARM_DESC(pow_send_list, "\n"
"\t\"eth2,spi3,spi7\" would cause these three devices to transmit\n"
"\tusing the pow_send_group.");
-int max_rx_cpus = -1;
-module_param(max_rx_cpus, int, 0444);
-MODULE_PARM_DESC(max_rx_cpus, "\n"
- "\t\tThe maximum number of CPUs to use for packet reception.\n"
- "\t\tUse -1 to use all available CPUs.");
-
int rx_napi_weight = 32;
module_param(rx_napi_weight, int, 0444);
MODULE_PARM_DESC(rx_napi_weight, "The NAPI WEIGHT parameter.");
@@ -452,7 +446,7 @@ int cvm_oct_common_init(struct net_device *dev)
mac = of_get_mac_address(priv->of_node);
if (mac)
- memcpy(dev->dev_addr, mac, ETH_ALEN);
+ ether_addr_copy(dev->dev_addr, mac);
else
eth_hw_addr_random(dev);
diff --git a/drivers/staging/octeon/octeon-ethernet.h b/drivers/staging/octeon/octeon-ethernet.h
index d0e321119914..f48dc766fada 100644
--- a/drivers/staging/octeon/octeon-ethernet.h
+++ b/drivers/staging/octeon/octeon-ethernet.h
@@ -99,7 +99,6 @@ extern struct workqueue_struct *cvm_oct_poll_queue;
extern atomic_t cvm_oct_poll_queue_stopping;
extern u64 cvm_oct_tx_poll_interval;
-extern int max_rx_cpus;
extern int rx_napi_weight;
#endif