summaryrefslogtreecommitdiffstats
path: root/drivers/net/dsa/bcm_sf2_cfp.c
diff options
context:
space:
mode:
authorFlorian Fainelli <f.fainelli@gmail.com>2018-11-06 12:58:37 -0800
committerDavid S. Miller <davem@davemloft.net>2018-11-06 15:05:22 -0800
commitae7a5aff783c79d5ca87867df84b08c43447159b (patch)
tree013b5889ed4b2ab0ed2eff07c786ccfe5c91f3d8 /drivers/net/dsa/bcm_sf2_cfp.c
parent95772ec991f2df6ed20c070803dde5716d1a78fa (diff)
downloadlinux-ae7a5aff783c79d5ca87867df84b08c43447159b.tar.bz2
net: dsa: bcm_sf2: Keep copy of inserted rules
We tried hard to use the hardware as a storage area, which made things needlessly complex in that we had to both marshall and unmarshall the ethtool_rx_flow_spec into what the CFP hardware understands but it did not require any driver level allocations, so that was nice. Keep a copy of the ethtool_rx_flow_spec rule we want to insert, and also make sure we don't have a duplicate rule already. This greatly speeds up the deletion time since we only need to clear the slice's valid bit and not perform a full read. This is a preparatory step for being able to restore rules upon system resumption where the hardware loses its context partially or entirely. Signed-off-by: Florian Fainelli <f.fainelli@gmail.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'drivers/net/dsa/bcm_sf2_cfp.c')
-rw-r--r--drivers/net/dsa/bcm_sf2_cfp.c144
1 files changed, 133 insertions, 11 deletions
diff --git a/drivers/net/dsa/bcm_sf2_cfp.c b/drivers/net/dsa/bcm_sf2_cfp.c
index 47c5f272a084..29b6b4204662 100644
--- a/drivers/net/dsa/bcm_sf2_cfp.c
+++ b/drivers/net/dsa/bcm_sf2_cfp.c
@@ -20,6 +20,12 @@
#include "bcm_sf2.h"
#include "bcm_sf2_regs.h"
+struct cfp_rule {
+ int port;
+ struct ethtool_rx_flow_spec fs;
+ struct list_head next;
+};
+
struct cfp_udf_slice_layout {
u8 slices[UDFS_PER_SLICE];
u32 mask_value;
@@ -515,6 +521,61 @@ static void bcm_sf2_cfp_slice_ipv6(struct bcm_sf2_priv *priv,
core_writel(priv, reg, offset);
}
+static struct cfp_rule *bcm_sf2_cfp_rule_find(struct bcm_sf2_priv *priv,
+ int port, u32 location)
+{
+ struct cfp_rule *rule = NULL;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ if (rule->port == port && rule->fs.location == location)
+ break;
+ };
+
+ return rule;
+}
+
+static int bcm_sf2_cfp_rule_cmp(struct bcm_sf2_priv *priv, int port,
+ struct ethtool_rx_flow_spec *fs)
+{
+ struct cfp_rule *rule = NULL;
+ size_t fs_size = 0;
+ int ret = 1;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return ret;
+
+ list_for_each_entry(rule, &priv->cfp.rules_list, next) {
+ ret = 1;
+ if (rule->port != port)
+ continue;
+
+ if (rule->fs.flow_type != fs->flow_type ||
+ rule->fs.ring_cookie != fs->ring_cookie ||
+ rule->fs.m_ext.data[0] != fs->m_ext.data[0])
+ continue;
+
+ switch (fs->flow_type & ~FLOW_EXT) {
+ case TCP_V6_FLOW:
+ case UDP_V6_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip6_spec);
+ break;
+ case TCP_V4_FLOW:
+ case UDP_V4_FLOW:
+ fs_size = sizeof(struct ethtool_tcpip4_spec);
+ break;
+ default:
+ continue;
+ }
+
+ ret = memcmp(&rule->fs.h_u, &fs->h_u, fs_size);
+ ret |= memcmp(&rule->fs.m_u, &fs->m_u, fs_size);
+ if (ret == 0)
+ break;
+ }
+
+ return ret;
+}
+
static int bcm_sf2_cfp_ipv6_rule_set(struct bcm_sf2_priv *priv, int port,
unsigned int port_num,
unsigned int queue_num,
@@ -735,6 +796,7 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
s8 cpu_port = ds->ports[port].cpu_dp->index;
__u64 ring_cookie = fs->ring_cookie;
unsigned int queue_num, port_num;
+ struct cfp_rule *rule = NULL;
int ret = -EINVAL;
/* Check for unsupported extensions */
@@ -750,6 +812,10 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
fs->location > bcm_sf2_cfp_rule_size(priv))
return -EINVAL;
+ ret = bcm_sf2_cfp_rule_cmp(priv, port, fs);
+ if (ret == 0)
+ return -EEXIST;
+
/* This rule is a Wake-on-LAN filter and we must specifically
* target the CPU port in order for it to be working.
*/
@@ -775,6 +841,10 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
if (port_num >= 7)
port_num -= 1;
+ rule = kzalloc(sizeof(*rule), GFP_KERNEL);
+ if (!rule)
+ return -ENOMEM;
+
switch (fs->flow_type & ~FLOW_EXT) {
case TCP_V4_FLOW:
case UDP_V4_FLOW:
@@ -787,9 +857,19 @@ static int bcm_sf2_cfp_rule_set(struct dsa_switch *ds, int port,
queue_num, fs);
break;
default:
+ ret = -EINVAL;
break;
}
+ if (ret) {
+ kfree(rule);
+ return ret;
+ }
+
+ rule->port = port;
+ memcpy(&rule->fs, fs, sizeof(*fs));
+ list_add_tail(&rule->next, &priv->cfp.rules_list);
+
return ret;
}
@@ -833,6 +913,7 @@ static int bcm_sf2_cfp_rule_del_one(struct bcm_sf2_priv *priv, int port,
static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
u32 loc)
{
+ struct cfp_rule *rule;
u32 next_loc = 0;
int ret;
@@ -843,6 +924,10 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
if (!test_bit(loc, priv->cfp.unique) || loc == 0)
return -EINVAL;
+ rule = bcm_sf2_cfp_rule_find(priv, port, loc);
+ if (!rule)
+ return -EINVAL;
+
ret = bcm_sf2_cfp_rule_del_one(priv, port, loc, &next_loc);
if (ret)
return ret;
@@ -851,6 +936,9 @@ static int bcm_sf2_cfp_rule_del(struct bcm_sf2_priv *priv, int port,
if (next_loc)
ret = bcm_sf2_cfp_rule_del_one(priv, port, next_loc, NULL);
+ list_del(&rule->next);
+ kfree(rule);
+
return ret;
}
@@ -867,9 +955,9 @@ static void bcm_sf2_invert_masks(struct ethtool_rx_flow_spec *flow)
flow->m_ext.data[1] ^= cpu_to_be32(~0);
}
-static int bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv,
- struct ethtool_tcpip4_spec *v4_spec,
- bool mask)
+static int __maybe_unused bcm_sf2_cfp_unslice_ipv4(struct bcm_sf2_priv *priv,
+ struct ethtool_tcpip4_spec *v4_spec,
+ bool mask)
{
u32 reg, offset, ipv4;
u16 src_dst_port;
@@ -969,9 +1057,10 @@ static int bcm_sf2_cfp_ipv4_rule_get(struct bcm_sf2_priv *priv, int port,
return bcm_sf2_cfp_unslice_ipv4(priv, v4_m_spec, true);
}
-static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv,
- __be32 *ip6_addr, __be16 *port,
- bool mask)
+static int __maybe_unused bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv,
+ __be32 *ip6_addr,
+ __be16 *port,
+ bool mask)
{
u32 reg, tmp, offset;
@@ -1050,9 +1139,10 @@ static int bcm_sf2_cfp_unslice_ipv6(struct bcm_sf2_priv *priv,
return 0;
}
-static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv, int port,
- struct ethtool_rx_flow_spec *fs,
- u32 next_loc)
+static int __maybe_unused bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv,
+ int port,
+ struct ethtool_rx_flow_spec *fs,
+ u32 next_loc)
{
struct ethtool_tcpip6_spec *v6_spec = NULL, *v6_m_spec = NULL;
u32 reg;
@@ -1111,8 +1201,9 @@ static int bcm_sf2_cfp_ipv6_rule_get(struct bcm_sf2_priv *priv, int port,
&v6_m_spec->psrc, true);
}
-static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
- struct ethtool_rxnfc *nfc)
+static int __maybe_unused bcm_sf2_cfp_rule_get_hw(struct bcm_sf2_priv *priv,
+ int port,
+ struct ethtool_rxnfc *nfc)
{
u32 reg, ipv4_or_chain_id;
unsigned int queue_num;
@@ -1174,6 +1265,25 @@ static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
return 0;
}
+static int bcm_sf2_cfp_rule_get(struct bcm_sf2_priv *priv, int port,
+ struct ethtool_rxnfc *nfc)
+{
+ struct cfp_rule *rule;
+
+ rule = bcm_sf2_cfp_rule_find(priv, port, nfc->fs.location);
+ if (!rule)
+ return -EINVAL;
+
+ memcpy(&nfc->fs, &rule->fs, sizeof(rule->fs));
+
+ bcm_sf2_invert_masks(&nfc->fs);
+
+ /* Put the TCAM size here */
+ nfc->data = bcm_sf2_cfp_rule_size(priv);
+
+ return 0;
+}
+
/* We implement the search doing a TCAM search operation */
static int bcm_sf2_cfp_rule_get_all(struct bcm_sf2_priv *priv,
int port, struct ethtool_rxnfc *nfc,
@@ -1302,3 +1412,15 @@ int bcm_sf2_cfp_rst(struct bcm_sf2_priv *priv)
return 0;
}
+
+void bcm_sf2_cfp_exit(struct dsa_switch *ds)
+{
+ struct bcm_sf2_priv *priv = bcm_sf2_to_priv(ds);
+ struct cfp_rule *rule, *n;
+
+ if (list_empty(&priv->cfp.rules_list))
+ return;
+
+ list_for_each_entry_safe_reverse(rule, n, &priv->cfp.rules_list, next)
+ bcm_sf2_cfp_rule_del(priv, rule->port, rule->fs.location);
+}