summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e.h10
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h5
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_main.c30
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_ptp.c2
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c3
-rw-r--r--drivers/net/ethernet/intel/i40e/i40e_xsk.c5
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_main.c1
-rw-r--r--drivers/net/ethernet/intel/iavf/iavf_virtchnl.c7
-rw-r--r--drivers/net/ethernet/intel/ice/Makefile1
-rw-r--r--drivers/net/ethernet/intel/ice/ice.h16
-rw-r--r--drivers/net/ethernet/intel/ice/ice_adminq_cmd.h73
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.c174
-rw-r--r--drivers/net/ethernet/intel/ice/ice_common.h12
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.c42
-rw-r--r--drivers/net/ethernet/intel/ice/ice_dcb_lib.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_ethtool.c52
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.c1549
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_pipe.h29
-rw-r--r--drivers/net/ethernet/intel/ice/ice_flex_type.h374
-rw-r--r--drivers/net/ethernet/intel/ice/ice_hw_autogen.h2
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.c111
-rw-r--r--drivers/net/ethernet/intel/ice/ice_lib.h3
-rw-r--r--drivers/net/ethernet/intel/ice/ice_main.c657
-rw-r--r--drivers/net/ethernet/intel/ice/ice_type.h36
-rw-r--r--drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbe/ixgbe_main.c6
-rw-r--r--drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c14
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/catas.c2
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/main.c44
-rw-r--r--drivers/net/ethernet/mellanox/mlx4/mlx4.h3
-rw-r--r--drivers/net/ethernet/mellanox/mlxsw/core.c30
-rw-r--r--drivers/net/netdevsim/dev.c13
-rw-r--r--drivers/ptp/ptp_chardev.c63
-rw-r--r--include/linux/netfilter.h4
-rw-r--r--include/linux/netfilter/ipset/ip_set_getport.h2
-rw-r--r--include/linux/netfilter/nf_conntrack_common.h20
-rw-r--r--include/linux/netfilter/x_tables.h8
-rw-r--r--include/linux/netfilter/xt_hashlimit.h11
-rw-r--r--include/linux/netfilter/xt_physdev.h8
-rw-r--r--include/linux/netfilter_arp/arp_tables.h2
-rw-r--r--include/linux/netfilter_bridge.h7
-rw-r--r--include/linux/netfilter_bridge/ebt_802_3.h12
-rw-r--r--include/linux/netfilter_bridge/ebtables.h3
-rw-r--r--include/linux/netfilter_ipv4/ip_tables.h9
-rw-r--r--include/linux/netfilter_ipv6.h28
-rw-r--r--include/linux/netfilter_ipv6/ip6_tables.h20
-rw-r--r--include/linux/skbuff.h32
-rw-r--r--include/net/devlink.h8
-rw-r--r--include/net/inet_sock.h1
-rw-r--r--include/net/ip.h1
-rw-r--r--include/net/netfilter/br_netfilter.h4
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_icmpv6.h21
-rw-r--r--include/net/netfilter/nf_conntrack.h25
-rw-r--r--include/net/netfilter/nf_conntrack_acct.h4
-rw-r--r--include/net/netfilter/nf_conntrack_bridge.h11
-rw-r--r--include/net/netfilter/nf_conntrack_core.h8
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h84
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h2
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h2
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h16
-rw-r--r--include/net/netfilter/nf_conntrack_labels.h11
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h41
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h4
-rw-r--r--include/net/netfilter/nf_conntrack_timestamp.h16
-rw-r--r--include/net/netfilter/nf_conntrack_tuple.h4
-rw-r--r--include/net/netfilter/nf_conntrack_zones.h6
-rw-r--r--include/net/netfilter/nf_dup_netdev.h6
-rw-r--r--include/net/netfilter/nf_flow_table.h6
-rw-r--r--include/net/netfilter/nf_nat.h21
-rw-r--r--include/net/netfilter/nf_nat_masquerade.h1
-rw-r--r--include/net/netfilter/nf_queue.h4
-rw-r--r--include/net/netfilter/nf_synproxy.h44
-rw-r--r--include/net/netfilter/nf_tables.h8
-rw-r--r--include/net/netfilter/nf_tables_offload.h10
-rw-r--r--include/uapi/linux/devlink.h2
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h3
-rw-r--r--include/uapi/linux/ptp_clock.h26
-rw-r--r--net/bridge/netfilter/ebt_802_3.c8
-rw-r--r--net/bridge/netfilter/nf_conntrack_bridge.c15
-rw-r--r--net/core/devlink.c35
-rw-r--r--net/ipv4/ip_output.c3
-rw-r--r--net/ipv4/netfilter/Kconfig8
-rw-r--r--net/ipv4/netfilter/Makefile2
-rw-r--r--net/ipv4/ping.c2
-rw-r--r--net/ipv4/raw.c4
-rw-r--r--net/ipv4/udp.c2
-rw-r--r--net/ipv6/ip6_output.c3
-rw-r--r--net/ipv6/netfilter.c4
-rw-r--r--net/ipv6/netfilter/ip6t_ipv6header.c4
-rw-r--r--net/ipv6/netfilter/nf_log_ipv6.c4
-rw-r--r--net/ipv6/netfilter/nf_socket_ipv6.c1
-rw-r--r--net/ipv6/raw.c4
-rw-r--r--net/ipv6/udp.c3
-rw-r--r--net/netfilter/Kconfig8
-rw-r--r--net/netfilter/Makefile2
-rw-r--r--net/netfilter/nf_conntrack_ecache.c1
-rw-r--r--net/netfilter/nf_conntrack_expect.c2
-rw-r--r--net/netfilter/nf_conntrack_helper.c5
-rw-r--r--net/netfilter/nf_conntrack_proto_icmpv6.c1
-rw-r--r--net/netfilter/nf_conntrack_standalone.c1
-rw-r--r--net/netfilter/nf_conntrack_timeout.c1
-rw-r--r--net/netfilter/nf_dup_netdev.c21
-rw-r--r--net/netfilter/nf_flow_table_core.c1
-rw-r--r--net/netfilter/nf_nat_core.c6
-rw-r--r--net/netfilter/nf_tables_api.c25
-rw-r--r--net/netfilter/nf_tables_offload.c186
-rw-r--r--net/netfilter/nft_dup_netdev.c12
-rw-r--r--net/netfilter/nft_flow_offload.c3
-rw-r--r--net/netfilter/nft_fwd_netdev.c12
-rw-r--r--net/netfilter/nft_synproxy.c143
-rw-r--r--net/netfilter/xt_connlimit.c2
-rw-r--r--net/netfilter/xt_hashlimit.c7
-rw-r--r--net/netfilter/xt_physdev.c5
-rw-r--r--net/sched/act_ct.c2
114 files changed, 3783 insertions, 708 deletions
diff --git a/drivers/net/ethernet/intel/i40e/i40e.h b/drivers/net/ethernet/intel/i40e/i40e.h
index f1a1bd324b50..2af9f6308f84 100644
--- a/drivers/net/ethernet/intel/i40e/i40e.h
+++ b/drivers/net/ethernet/intel/i40e/i40e.h
@@ -243,11 +243,11 @@ struct i40e_fdir_filter {
u32 fd_id;
};
-#define I40E_CLOUD_FIELD_OMAC 0x01
-#define I40E_CLOUD_FIELD_IMAC 0x02
-#define I40E_CLOUD_FIELD_IVLAN 0x04
-#define I40E_CLOUD_FIELD_TEN_ID 0x08
-#define I40E_CLOUD_FIELD_IIP 0x10
+#define I40E_CLOUD_FIELD_OMAC BIT(0)
+#define I40E_CLOUD_FIELD_IMAC BIT(1)
+#define I40E_CLOUD_FIELD_IVLAN BIT(2)
+#define I40E_CLOUD_FIELD_TEN_ID BIT(3)
+#define I40E_CLOUD_FIELD_IIP BIT(4)
#define I40E_CLOUD_FILTER_FLAGS_OMAC I40E_CLOUD_FIELD_OMAC
#define I40E_CLOUD_FILTER_FLAGS_IMAC I40E_CLOUD_FIELD_IMAC
diff --git a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
index 21cccec328e3..530613f31527 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/i40e/i40e_adminq_cmd.h
@@ -1382,7 +1382,7 @@ struct i40e_aqc_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_FILTER_MASK (0x3F << \
I40E_AQC_ADD_CLOUD_FILTER_SHIFT)
/* 0x0000 reserved */
-#define I40E_AQC_ADD_CLOUD_FILTER_OIP 0x0001
+/* 0x0001 reserved */
/* 0x0002 reserved */
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN 0x0003
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC_IVLAN_TEN_ID 0x0004
@@ -1394,6 +1394,9 @@ struct i40e_aqc_cloud_filters_element_data {
#define I40E_AQC_ADD_CLOUD_FILTER_IMAC 0x000A
#define I40E_AQC_ADD_CLOUD_FILTER_OMAC_TEN_ID_IMAC 0x000B
#define I40E_AQC_ADD_CLOUD_FILTER_IIP 0x000C
+/* 0x000D reserved */
+/* 0x000E reserved */
+/* 0x000F reserved */
/* 0x0010 to 0x0017 is for custom filters */
#define I40E_AQC_ADD_CLOUD_FILTER_IP_PORT 0x0010 /* Dest IP + L4 Port */
#define I40E_AQC_ADD_CLOUD_FILTER_MAC_PORT 0x0011 /* Dest MAC + L4 Port */
diff --git a/drivers/net/ethernet/intel/i40e/i40e_main.c b/drivers/net/ethernet/intel/i40e/i40e_main.c
index e9f2f276bf27..6031223eafab 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_main.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_main.c
@@ -2592,6 +2592,10 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
return;
if (!test_and_clear_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state))
return;
+ if (test_and_set_bit(__I40E_VF_DISABLE, pf->state)) {
+ set_bit(__I40E_MACVLAN_SYNC_PENDING, pf->state);
+ return;
+ }
for (v = 0; v < pf->num_alloc_vsi; v++) {
if (pf->vsi[v] &&
@@ -2606,6 +2610,7 @@ static void i40e_sync_filters_subtask(struct i40e_pf *pf)
}
}
}
+ clear_bit(__I40E_VF_DISABLE, pf->state);
}
/**
@@ -6564,19 +6569,19 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
}
if (pf->hw.phy.link_info.link_speed == I40E_LINK_SPEED_25GB) {
- req_fec = ", Requested FEC: None";
- fec = ", FEC: None";
- an = ", Autoneg: False";
+ req_fec = "None";
+ fec = "None";
+ an = "False";
if (pf->hw.phy.link_info.an_info & I40E_AQ_AN_COMPLETED)
- an = ", Autoneg: True";
+ an = "True";
if (pf->hw.phy.link_info.fec_info &
I40E_AQ_CONFIG_FEC_KR_ENA)
- fec = ", FEC: CL74 FC-FEC/BASE-R";
+ fec = "CL74 FC-FEC/BASE-R";
else if (pf->hw.phy.link_info.fec_info &
I40E_AQ_CONFIG_FEC_RS_ENA)
- fec = ", FEC: CL108 RS-FEC";
+ fec = "CL108 RS-FEC";
/* 'CL108 RS-FEC' should be displayed when RS is requested, or
* both RS and FC are requested
@@ -6585,14 +6590,19 @@ void i40e_print_link_message(struct i40e_vsi *vsi, bool isup)
(I40E_AQ_REQUEST_FEC_KR | I40E_AQ_REQUEST_FEC_RS)) {
if (vsi->back->hw.phy.link_info.req_fec_info &
I40E_AQ_REQUEST_FEC_RS)
- req_fec = ", Requested FEC: CL108 RS-FEC";
+ req_fec = "CL108 RS-FEC";
else
- req_fec = ", Requested FEC: CL74 FC-FEC/BASE-R";
+ req_fec = "CL74 FC-FEC/BASE-R";
}
+ netdev_info(vsi->netdev,
+ "NIC Link is Up, %sbps Full Duplex, Requested FEC: %s, Negotiated FEC: %s, Autoneg: %s, Flow Control: %s\n",
+ speed, req_fec, fec, an, fc);
+ } else {
+ netdev_info(vsi->netdev,
+ "NIC Link is Up, %sbps Full Duplex, Flow Control: %s\n",
+ speed, fc);
}
- netdev_info(vsi->netdev, "NIC Link is Up, %sbps Full Duplex%s%s%s, Flow Control: %s\n",
- speed, req_fec, fec, an, fc);
}
/**
diff --git a/drivers/net/ethernet/intel/i40e/i40e_ptp.c b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
index 11394a52e21c..9bf1ad4319f5 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_ptp.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_ptp.c
@@ -725,7 +725,7 @@ static long i40e_ptp_create_clock(struct i40e_pf *pf)
pf->tstamp_config.tx_type = HWTSTAMP_TX_OFF;
/* Set the previous "reset" time to the current Kernel clock time */
- pf->ptp_prev_hw_time = ktime_to_timespec64(ktime_get_real());
+ ktime_get_real_ts64(&pf->ptp_prev_hw_time);
pf->ptp_reset_start = ktime_get();
return 0;
diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
index f8aa4deceb5e..3d2440838822 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c
@@ -4263,7 +4263,8 @@ int i40e_ndo_set_vf_bw(struct net_device *netdev, int vf_id, int min_tx_rate,
if (min_tx_rate) {
dev_err(&pf->pdev->dev, "Invalid min tx rate (%d) (greater than 0) specified for VF %d.\n",
min_tx_rate, vf_id);
- return -EINVAL;
+ ret = -EINVAL;
+ goto error;
}
vf = &pf->vf[vf_id];
diff --git a/drivers/net/ethernet/intel/i40e/i40e_xsk.c b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
index 0373bc6c7e61..feb5bd54d840 100644
--- a/drivers/net/ethernet/intel/i40e/i40e_xsk.c
+++ b/drivers/net/ethernet/intel/i40e/i40e_xsk.c
@@ -157,6 +157,11 @@ static int i40e_xsk_umem_disable(struct i40e_vsi *vsi, u16 qid)
err = i40e_queue_pair_enable(vsi, qid);
if (err)
return err;
+
+ /* Kick start the NAPI context so that receiving will start */
+ err = i40e_xsk_wakeup(vsi->netdev, qid, XDP_WAKEUP_RX);
+ if (err)
+ return err;
}
return 0;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_main.c b/drivers/net/ethernet/intel/iavf/iavf_main.c
index 07f5541a0f01..8f310e520b06 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_main.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_main.c
@@ -804,7 +804,6 @@ static int iavf_set_mac(struct net_device *netdev, void *p)
if (f) {
ether_addr_copy(hw->mac.addr, addr->sa_data);
- ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
}
return (f == NULL) ? -ENOMEM : 0;
diff --git a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
index d49d58a6de80..c46770eba320 100644
--- a/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
+++ b/drivers/net/ethernet/intel/iavf/iavf_virtchnl.c
@@ -1252,6 +1252,8 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
case VIRTCHNL_OP_ADD_ETH_ADDR:
dev_err(&adapter->pdev->dev, "Failed to add MAC filter, error %s\n",
iavf_stat_str(&adapter->hw, v_retval));
+ /* restore administratively set MAC address */
+ ether_addr_copy(adapter->hw.mac.addr, netdev->dev_addr);
break;
case VIRTCHNL_OP_DEL_VLAN:
dev_err(&adapter->pdev->dev, "Failed to delete VLAN filter, error %s\n",
@@ -1319,6 +1321,11 @@ void iavf_virtchnl_completion(struct iavf_adapter *adapter,
}
}
switch (v_opcode) {
+ case VIRTCHNL_OP_ADD_ETH_ADDR: {
+ if (!ether_addr_equal(netdev->dev_addr, adapter->hw.mac.addr))
+ ether_addr_copy(netdev->dev_addr, adapter->hw.mac.addr);
+ }
+ break;
case VIRTCHNL_OP_GET_STATS: {
struct iavf_eth_stats *stats =
(struct iavf_eth_stats *)msg;
diff --git a/drivers/net/ethernet/intel/ice/Makefile b/drivers/net/ethernet/intel/ice/Makefile
index 2d140ba83781..9edde960b4f2 100644
--- a/drivers/net/ethernet/intel/ice/Makefile
+++ b/drivers/net/ethernet/intel/ice/Makefile
@@ -15,6 +15,7 @@ ice-y := ice_main.o \
ice_sched.o \
ice_lib.o \
ice_txrx.o \
+ ice_flex_pipe.o \
ice_ethtool.o
ice-$(CONFIG_PCI_IOV) += ice_virtchnl_pf.o ice_sriov.o
ice-$(CONFIG_DCB) += ice_dcb.o ice_dcb_lib.o
diff --git a/drivers/net/ethernet/intel/ice/ice.h b/drivers/net/ethernet/intel/ice/ice.h
index b36e1cf0e461..45e100666049 100644
--- a/drivers/net/ethernet/intel/ice/ice.h
+++ b/drivers/net/ethernet/intel/ice/ice.h
@@ -8,6 +8,7 @@
#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/module.h>
+#include <linux/firmware.h>
#include <linux/netdevice.h>
#include <linux/compiler.h>
#include <linux/etherdevice.h>
@@ -29,6 +30,7 @@
#include <linux/sctp.h>
#include <linux/ipv6.h>
#include <linux/if_bridge.h>
+#include <linux/ctype.h>
#include <linux/avf/virtchnl.h>
#include <net/ipv6.h>
#include "ice_devids.h"
@@ -52,7 +54,6 @@ extern const char ice_drv_ver[];
#define ICE_DFLT_TRAFFIC_CLASS BIT(0)
#define ICE_INT_NAME_STR_LEN (IFNAMSIZ + 16)
-#define ICE_ETHTOOL_FWVER_LEN 32
#define ICE_AQ_LEN 64
#define ICE_MBXSQ_LEN 64
#define ICE_MBXRQ_LEN 512
@@ -307,6 +308,7 @@ enum ice_pf_flags {
ICE_FLAG_SRIOV_CAPABLE,
ICE_FLAG_DCB_CAPABLE,
ICE_FLAG_DCB_ENA,
+ ICE_FLAG_ADV_FEATURES,
ICE_FLAG_LINK_DOWN_ON_CLOSE_ENA,
ICE_FLAG_NO_MEDIA,
ICE_FLAG_FW_LLDP_AGENT,
@@ -405,6 +407,17 @@ ice_irq_dynamic_ena(struct ice_hw *hw, struct ice_vsi *vsi,
}
/**
+ * ice_netdev_to_pf - Retrieve the PF struct associated with a netdev
+ * @netdev: pointer to the netdev struct
+ */
+static inline struct ice_pf *ice_netdev_to_pf(struct net_device *netdev)
+{
+ struct ice_netdev_priv *np = netdev_priv(netdev);
+
+ return np->vsi->back;
+}
+
+/**
* ice_get_main_vsi - Get the PF VSI
* @pf: PF instance
*
@@ -421,6 +434,7 @@ static inline struct ice_vsi *ice_get_main_vsi(struct ice_pf *pf)
int ice_vsi_setup_tx_rings(struct ice_vsi *vsi);
int ice_vsi_setup_rx_rings(struct ice_vsi *vsi);
void ice_set_ethtool_ops(struct net_device *netdev);
+void ice_set_ethtool_safe_mode_ops(struct net_device *netdev);
u16 ice_get_avail_txq_count(struct ice_pf *pf);
u16 ice_get_avail_rxq_count(struct ice_pf *pf);
void ice_update_vsi_stats(struct ice_vsi *vsi);
diff --git a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
index 4da0cde9695b..023e3d2fee5f 100644
--- a/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
+++ b/drivers/net/ethernet/intel/ice/ice_adminq_cmd.h
@@ -33,6 +33,17 @@ struct ice_aqc_get_ver {
u8 api_patch;
};
+/* Send driver version (indirect 0x0002) */
+struct ice_aqc_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 reserved[4];
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
/* Queue Shutdown (direct 0x0003) */
struct ice_aqc_q_shutdown {
u8 driver_unloading;
@@ -1519,6 +1530,56 @@ struct ice_aqc_get_clear_fw_log {
__le32 addr_low;
};
+/* Download Package (indirect 0x0C40) */
+/* Also used for Update Package (indirect 0x0C42) */
+struct ice_aqc_download_pkg {
+ u8 flags;
+#define ICE_AQC_DOWNLOAD_PKG_LAST_BUF 0x01
+ u8 reserved[3];
+ __le32 reserved1;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+struct ice_aqc_download_pkg_resp {
+ __le32 error_offset;
+ __le32 error_info;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Get Package Info List (indirect 0x0C43) */
+struct ice_aqc_get_pkg_info_list {
+ __le32 reserved1;
+ __le32 reserved2;
+ __le32 addr_high;
+ __le32 addr_low;
+};
+
+/* Version format for packages */
+struct ice_pkg_ver {
+ u8 major;
+ u8 minor;
+ u8 update;
+ u8 draft;
+};
+
+#define ICE_PKG_NAME_SIZE 32
+
+struct ice_aqc_get_pkg_info {
+ struct ice_pkg_ver ver;
+ char name[ICE_PKG_NAME_SIZE];
+ u8 is_in_nvm;
+ u8 is_active;
+ u8 is_active_at_boot;
+ u8 is_modified;
+};
+
+/* Get Package Info List response buffer format (0x0C43) */
+struct ice_aqc_get_pkg_info_resp {
+ __le32 count;
+ struct ice_aqc_get_pkg_info pkg_info[1];
+};
/**
* struct ice_aq_desc - Admin Queue (AQ) descriptor
* @flags: ICE_AQ_FLAG_* flags
@@ -1547,6 +1608,7 @@ struct ice_aq_desc {
u8 raw[16];
struct ice_aqc_generic generic;
struct ice_aqc_get_ver get_ver;
+ struct ice_aqc_driver_ver driver_ver;
struct ice_aqc_q_shutdown q_shutdown;
struct ice_aqc_req_res res_owner;
struct ice_aqc_manage_mac_read mac_read;
@@ -1580,6 +1642,7 @@ struct ice_aq_desc {
struct ice_aqc_add_update_free_vsi_resp add_update_free_vsi_res;
struct ice_aqc_fw_logging fw_logging;
struct ice_aqc_get_clear_fw_log get_clear_fw_log;
+ struct ice_aqc_download_pkg download_pkg;
struct ice_aqc_set_mac_lb set_mac_lb;
struct ice_aqc_alloc_free_res_cmd sw_res_ctrl;
struct ice_aqc_set_event_mask set_event_mask;
@@ -1612,12 +1675,18 @@ enum ice_aq_err {
ICE_AQ_RC_EEXIST = 13, /* Object already exists */
ICE_AQ_RC_ENOSPC = 16, /* No space left or allocation failure */
ICE_AQ_RC_ENOSYS = 17, /* Function not implemented */
+ ICE_AQ_RC_ENOSEC = 24, /* Missing security manifest */
+ ICE_AQ_RC_EBADSIG = 25, /* Bad RSA signature */
+ ICE_AQ_RC_ESVN = 26, /* SVN number prohibits this package */
+ ICE_AQ_RC_EBADMAN = 27, /* Manifest hash mismatch */
+ ICE_AQ_RC_EBADBUF = 28, /* Buffer hash mismatches manifest */
};
/* Admin Queue command opcodes */
enum ice_adminq_opc {
/* AQ commands */
ice_aqc_opc_get_ver = 0x0001,
+ ice_aqc_opc_driver_ver = 0x0002,
ice_aqc_opc_q_shutdown = 0x0003,
/* resource ownership */
@@ -1699,6 +1768,10 @@ enum ice_adminq_opc {
ice_aqc_opc_add_txqs = 0x0C30,
ice_aqc_opc_dis_txqs = 0x0C31,
+ /* package commands */
+ ice_aqc_opc_download_pkg = 0x0C40,
+ ice_aqc_opc_get_pkg_info_list = 0x0C43,
+
/* debug commands */
ice_aqc_opc_fw_logging = 0xFF09,
ice_aqc_opc_fw_logging_info = 0xFF10,
diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c
index 8b2c46615834..3a6b3950eb0e 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.c
+++ b/drivers/net/ethernet/intel/ice/ice_common.c
@@ -729,6 +729,29 @@ static void ice_get_itr_intrl_gran(struct ice_hw *hw)
}
/**
+ * ice_get_nvm_version - get cached NVM version data
+ * @hw: pointer to the hardware structure
+ * @oem_ver: 8 bit NVM version
+ * @oem_build: 16 bit NVM build number
+ * @oem_patch: 8 NVM patch number
+ * @ver_hi: high 16 bits of the NVM version
+ * @ver_lo: low 16 bits of the NVM version
+ */
+void
+ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
+ u8 *oem_patch, u8 *ver_hi, u8 *ver_lo)
+{
+ struct ice_nvm_info *nvm = &hw->nvm;
+
+ *oem_ver = (u8)((nvm->oem_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
+ *oem_patch = (u8)(nvm->oem_ver & ICE_OEM_VER_PATCH_MASK);
+ *oem_build = (u16)((nvm->oem_ver & ICE_OEM_VER_BUILD_MASK) >>
+ ICE_OEM_VER_BUILD_SHIFT);
+ *ver_hi = (nvm->ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT;
+ *ver_lo = (nvm->ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT;
+}
+
+/**
* ice_init_hw - main hardware initialization routine
* @hw: pointer to the hardware structure
*/
@@ -859,7 +882,9 @@ enum ice_status ice_init_hw(struct ice_hw *hw)
ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC);
ice_init_flex_flds(hw, ICE_RXDID_FLEX_NIC_2);
-
+ status = ice_init_hw_tbls(hw);
+ if (status)
+ goto err_unroll_fltr_mgmt_struct;
return 0;
err_unroll_fltr_mgmt_struct:
@@ -887,6 +912,8 @@ void ice_deinit_hw(struct ice_hw *hw)
ice_sched_cleanup_all(hw);
ice_sched_clear_agg(hw);
+ ice_free_seg(hw);
+ ice_free_hw_tbls(hw);
if (hw->port_info) {
devm_kfree(ice_hw_to_dev(hw), hw->port_info);
@@ -1207,6 +1234,12 @@ ice_debug_cq(struct ice_hw *hw, u32 __maybe_unused mask, void *desc, void *buf,
/* FW Admin Queue command wrappers */
+/* Software lock/mutex that is meant to be held while the Global Config Lock
+ * in firmware is acquired by the software to prevent most (but not all) types
+ * of AQ commands from being sent to FW
+ */
+DEFINE_MUTEX(ice_global_cfg_lock_sw);
+
/**
* ice_aq_send_cmd - send FW Admin Queue command to FW Admin Queue
* @hw: pointer to the HW struct
@@ -1221,7 +1254,38 @@ enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc, void *buf,
u16 buf_size, struct ice_sq_cd *cd)
{
- return ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
+ struct ice_aqc_req_res *cmd = &desc->params.res_owner;
+ bool lock_acquired = false;
+ enum ice_status status;
+
+ /* When a package download is in process (i.e. when the firmware's
+ * Global Configuration Lock resource is held), only the Download
+ * Package, Get Version, Get Package Info List and Release Resource
+ * (with resource ID set to Global Config Lock) AdminQ commands are
+ * allowed; all others must block until the package download completes
+ * and the Global Config Lock is released. See also
+ * ice_acquire_global_cfg_lock().
+ */
+ switch (le16_to_cpu(desc->opcode)) {
+ case ice_aqc_opc_download_pkg:
+ case ice_aqc_opc_get_pkg_info_list:
+ case ice_aqc_opc_get_ver:
+ break;
+ case ice_aqc_opc_release_res:
+ if (le16_to_cpu(cmd->res_id) == ICE_AQC_RES_ID_GLBL_LOCK)
+ break;
+ /* fall-through */
+ default:
+ mutex_lock(&ice_global_cfg_lock_sw);
+ lock_acquired = true;
+ break;
+ }
+
+ status = ice_sq_send_cmd(hw, &hw->adminq, desc, buf, buf_size, cd);
+ if (lock_acquired)
+ mutex_unlock(&ice_global_cfg_lock_sw);
+
+ return status;
}
/**
@@ -1259,6 +1323,43 @@ enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd)
}
/**
+ * ice_aq_send_driver_ver
+ * @hw: pointer to the HW struct
+ * @dv: driver's major, minor version
+ * @cd: pointer to command details structure or NULL
+ *
+ * Send the driver version (0x0002) to the firmware
+ */
+enum ice_status
+ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
+ struct ice_sq_cd *cd)
+{
+ struct ice_aqc_driver_ver *cmd;
+ struct ice_aq_desc desc;
+ u16 len;
+
+ cmd = &desc.params.driver_ver;
+
+ if (!dv)
+ return ICE_ERR_PARAM;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_driver_ver);
+
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+ cmd->major_ver = dv->major_ver;
+ cmd->minor_ver = dv->minor_ver;
+ cmd->build_ver = dv->build_ver;
+ cmd->subbuild_ver = dv->subbuild_ver;
+
+ len = 0;
+ while (len < sizeof(dv->driver_string) &&
+ isascii(dv->driver_string[len]) && dv->driver_string[len])
+ len++;
+
+ return ice_aq_send_cmd(hw, &desc, dv->driver_string, len, cd);
+}
+
+/**
* ice_aq_q_shutdown
* @hw: pointer to the HW struct
* @unloading: is the driver unloading itself
@@ -1746,6 +1847,75 @@ ice_discover_caps(struct ice_hw *hw, enum ice_adminq_opc opc)
}
/**
+ * ice_set_safe_mode_caps - Override dev/func capabilities when in safe mode
+ * @hw: pointer to the hardware structure
+ */
+void ice_set_safe_mode_caps(struct ice_hw *hw)
+{
+ struct ice_hw_func_caps *func_caps = &hw->func_caps;
+ struct ice_hw_dev_caps *dev_caps = &hw->dev_caps;
+ u32 valid_func, rxq_first_id, txq_first_id;
+ u32 msix_vector_first_id, max_mtu;
+ u32 num_func = 0;
+ u8 i;
+
+ /* cache some func_caps values that should be restored after memset */
+ valid_func = func_caps->common_cap.valid_functions;
+ txq_first_id = func_caps->common_cap.txq_first_id;
+ rxq_first_id = func_caps->common_cap.rxq_first_id;
+ msix_vector_first_id = func_caps->common_cap.msix_vector_first_id;
+ max_mtu = func_caps->common_cap.max_mtu;
+
+ /* unset func capabilities */
+ memset(func_caps, 0, sizeof(*func_caps));
+
+ /* restore cached values */
+ func_caps->common_cap.valid_functions = valid_func;
+ func_caps->common_cap.txq_first_id = txq_first_id;
+ func_caps->common_cap.rxq_first_id = rxq_first_id;
+ func_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
+ func_caps->common_cap.max_mtu = max_mtu;
+
+ /* one Tx and one Rx queue in safe mode */
+ func_caps->common_cap.num_rxq = 1;
+ func_caps->common_cap.num_txq = 1;
+
+ /* two MSIX vectors, one for traffic and one for misc causes */
+ func_caps->common_cap.num_msix_vectors = 2;
+ func_caps->guar_num_vsi = 1;
+
+ /* cache some dev_caps values that should be restored after memset */
+ valid_func = dev_caps->common_cap.valid_functions;
+ txq_first_id = dev_caps->common_cap.txq_first_id;
+ rxq_first_id = dev_caps->common_cap.rxq_first_id;
+ msix_vector_first_id = dev_caps->common_cap.msix_vector_first_id;
+ max_mtu = dev_caps->common_cap.max_mtu;
+
+ /* unset dev capabilities */
+ memset(dev_caps, 0, sizeof(*dev_caps));
+
+ /* restore cached values */
+ dev_caps->common_cap.valid_functions = valid_func;
+ dev_caps->common_cap.txq_first_id = txq_first_id;
+ dev_caps->common_cap.rxq_first_id = rxq_first_id;
+ dev_caps->common_cap.msix_vector_first_id = msix_vector_first_id;
+ dev_caps->common_cap.max_mtu = max_mtu;
+
+ /* valid_func is a bitmap. get number of functions */
+#define ICE_MAX_FUNCS 8
+ for (i = 0; i < ICE_MAX_FUNCS; i++)
+ if (valid_func & BIT(i))
+ num_func++;
+
+ /* one Tx and one Rx queue per function in safe mode */
+ dev_caps->common_cap.num_rxq = num_func;
+ dev_caps->common_cap.num_txq = num_func;
+
+ /* two MSIX vectors per function */
+ dev_caps->common_cap.num_msix_vectors = 2 * num_func;
+}
+
+/**
* ice_get_caps - get info about the HW
* @hw: pointer to the hardware structure
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_common.h b/drivers/net/ethernet/intel/ice/ice_common.h
index e376d1eadba4..c3df92f57777 100644
--- a/drivers/net/ethernet/intel/ice/ice_common.h
+++ b/drivers/net/ethernet/intel/ice/ice_common.h
@@ -6,6 +6,7 @@
#include "ice.h"
#include "ice_type.h"
+#include "ice_flex_pipe.h"
#include "ice_switch.h"
#include <linux/avf/virtchnl.h>
@@ -41,6 +42,8 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq,
void ice_clear_pxe_mode(struct ice_hw *hw);
enum ice_status ice_get_caps(struct ice_hw *hw);
+void ice_set_safe_mode_caps(struct ice_hw *hw);
+
void ice_dev_onetime_setup(struct ice_hw *hw);
enum ice_status
@@ -66,12 +69,18 @@ void ice_fill_dflt_direct_cmd_desc(struct ice_aq_desc *desc, u16 opcode);
extern const struct ice_ctx_ele ice_tlan_ctx_info[];
enum ice_status
ice_set_ctx(u8 *src_ctx, u8 *dest_ctx, const struct ice_ctx_ele *ce_info);
+
+extern struct mutex ice_global_cfg_lock_sw;
+
enum ice_status
ice_aq_send_cmd(struct ice_hw *hw, struct ice_aq_desc *desc,
void *buf, u16 buf_size, struct ice_sq_cd *cd);
enum ice_status ice_aq_get_fw_ver(struct ice_hw *hw, struct ice_sq_cd *cd);
enum ice_status
+ice_aq_send_driver_ver(struct ice_hw *hw, struct ice_driver_ver *dv,
+ struct ice_sq_cd *cd);
+enum ice_status
ice_aq_get_phy_caps(struct ice_port_info *pi, bool qual_mods, u8 report_mode,
struct ice_aqc_get_phy_caps_data *caps,
struct ice_sq_cd *cd);
@@ -130,6 +139,9 @@ ice_stat_update40(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
void
ice_stat_update32(struct ice_hw *hw, u32 reg, bool prev_stat_loaded,
u64 *prev_stat, u64 *cur_stat);
+void
+ice_get_nvm_version(struct ice_hw *hw, u8 *oem_ver, u16 *oem_build,
+ u8 *oem_patch, u8 *ver_hi, u8 *ver_lo);
enum ice_status
ice_sched_query_elem(struct ice_hw *hw, u32 node_teid,
struct ice_aqc_get_elem *buf);
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
index 97c22d4aae1d..dd47869c4ad4 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.c
@@ -4,6 +4,48 @@
#include "ice_dcb_lib.h"
/**
+ * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
+ * @vsi: the VSI being configured
+ * @ena_tc: TC map to be enabled
+ */
+void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
+{
+ struct net_device *netdev = vsi->netdev;
+ struct ice_pf *pf = vsi->back;
+ struct ice_dcbx_cfg *dcbcfg;
+ u8 netdev_tc;
+ int i;
+
+ if (!netdev)
+ return;
+
+ if (!ena_tc) {
+ netdev_reset_tc(netdev);
+ return;
+ }
+
+ if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
+ return;
+
+ dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
+
+ ice_for_each_traffic_class(i)
+ if (vsi->tc_cfg.ena_tc & BIT(i))
+ netdev_set_tc_queue(netdev,
+ vsi->tc_cfg.tc_info[i].netdev_tc,
+ vsi->tc_cfg.tc_info[i].qcount_tx,
+ vsi->tc_cfg.tc_info[i].qoffset);
+
+ for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
+ u8 ets_tc = dcbcfg->etscfg.prio_table[i];
+
+ /* Get the mapped netdev TC# for the UP */
+ netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
+ netdev_set_prio_tc_map(netdev, i, netdev_tc);
+ }
+}
+
+/**
* ice_dcb_get_ena_tc - return bitmap of enabled TCs
* @dcbcfg: DCB config to evaluate for enabled TCs
*/
diff --git a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
index 819081053ff5..661a6f7bca64 100644
--- a/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_dcb_lib.h
@@ -22,6 +22,7 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring *tx_ring,
void
ice_dcb_process_lldp_set_mib_change(struct ice_pf *pf,
struct ice_rq_event_info *event);
+void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc);
static inline void
ice_set_cgd_num(struct ice_tlan_ctx *tlan_ctx, struct ice_ring *ring)
{
@@ -58,5 +59,6 @@ ice_tx_prepare_vlan_flags_dcb(struct ice_ring __always_unused *tx_ring,
#define ice_vsi_cfg_dcb_rings(vsi) do {} while (0)
#define ice_dcb_process_lldp_set_mib_change(pf, event) do {} while (0)
#define ice_set_cgd_num(tlan_ctx, ring) do {} while (0)
+#define ice_vsi_cfg_netdev_tc(vsi, ena_tc) do {} while (0)
#endif /* CONFIG_DCB */
#endif /* _ICE_DCB_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_ethtool.c b/drivers/net/ethernet/intel/ice/ice_ethtool.c
index d5db1426d484..7e23034df955 100644
--- a/drivers/net/ethernet/intel/ice/ice_ethtool.c
+++ b/drivers/net/ethernet/intel/ice/ice_ethtool.c
@@ -160,31 +160,6 @@ static const struct ice_priv_flag ice_gstrings_priv_flags[] = {
#define ICE_PRIV_FLAG_ARRAY_SIZE ARRAY_SIZE(ice_gstrings_priv_flags)
-/**
- * ice_nvm_version_str - format the NVM version strings
- * @hw: ptr to the hardware info
- */
-static char *ice_nvm_version_str(struct ice_hw *hw)
-{
- static char buf[ICE_ETHTOOL_FWVER_LEN];
- u8 ver, patch;
- u32 full_ver;
- u16 build;
-
- full_ver = hw->nvm.oem_ver;
- ver = (u8)((full_ver & ICE_OEM_VER_MASK) >> ICE_OEM_VER_SHIFT);
- build = (u16)((full_ver & ICE_OEM_VER_BUILD_MASK) >>
- ICE_OEM_VER_BUILD_SHIFT);
- patch = (u8)(full_ver & ICE_OEM_VER_PATCH_MASK);
-
- snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d",
- (hw->nvm.ver & ICE_NVM_VER_HI_MASK) >> ICE_NVM_VER_HI_SHIFT,
- (hw->nvm.ver & ICE_NVM_VER_LO_MASK) >> ICE_NVM_VER_LO_SHIFT,
- hw->nvm.eetrack, ver, build, patch);
-
- return buf;
-}
-
static void
ice_get_drvinfo(struct net_device *netdev, struct ethtool_drvinfo *drvinfo)
{
@@ -3460,6 +3435,33 @@ static const struct ethtool_ops ice_ethtool_ops = {
.set_fecparam = ice_set_fecparam,
};
+static const struct ethtool_ops ice_ethtool_safe_mode_ops = {
+ .get_link_ksettings = ice_get_link_ksettings,
+ .set_link_ksettings = ice_set_link_ksettings,
+ .get_drvinfo = ice_get_drvinfo,
+ .get_regs_len = ice_get_regs_len,
+ .get_regs = ice_get_regs,
+ .get_msglevel = ice_get_msglevel,
+ .set_msglevel = ice_set_msglevel,
+ .get_eeprom_len = ice_get_eeprom_len,
+ .get_eeprom = ice_get_eeprom,
+ .get_strings = ice_get_strings,
+ .get_ethtool_stats = ice_get_ethtool_stats,
+ .get_sset_count = ice_get_sset_count,
+ .get_ringparam = ice_get_ringparam,
+ .set_ringparam = ice_set_ringparam,
+ .nway_reset = ice_nway_reset,
+};
+
+/**
+ * ice_set_ethtool_safe_mode_ops - setup safe mode ethtool ops
+ * @netdev: network interface device structure
+ */
+void ice_set_ethtool_safe_mode_ops(struct net_device *netdev)
+{
+ netdev->ethtool_ops = &ice_ethtool_safe_mode_ops;
+}
+
/**
* ice_set_ethtool_ops - setup netdev ethtool ops
* @netdev: network interface device structure
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.c b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
new file mode 100644
index 000000000000..cbd53b586c36
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.c
@@ -0,0 +1,1549 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2019, Intel Corporation. */
+
+#include "ice_common.h"
+#include "ice_flex_pipe.h"
+
+/**
+ * ice_pkg_val_buf
+ * @buf: pointer to the ice buffer
+ *
+ * This helper function validates a buffer's header.
+ */
+static struct ice_buf_hdr *ice_pkg_val_buf(struct ice_buf *buf)
+{
+ struct ice_buf_hdr *hdr;
+ u16 section_count;
+ u16 data_end;
+
+ hdr = (struct ice_buf_hdr *)buf->buf;
+ /* verify data */
+ section_count = le16_to_cpu(hdr->section_count);
+ if (section_count < ICE_MIN_S_COUNT || section_count > ICE_MAX_S_COUNT)
+ return NULL;
+
+ data_end = le16_to_cpu(hdr->data_end);
+ if (data_end < ICE_MIN_S_DATA_END || data_end > ICE_MAX_S_DATA_END)
+ return NULL;
+
+ return hdr;
+}
+
+/**
+ * ice_find_buf_table
+ * @ice_seg: pointer to the ice segment
+ *
+ * Returns the address of the buffer table within the ice segment.
+ */
+static struct ice_buf_table *ice_find_buf_table(struct ice_seg *ice_seg)
+{
+ struct ice_nvm_table *nvms;
+
+ nvms = (struct ice_nvm_table *)
+ (ice_seg->device_table +
+ le32_to_cpu(ice_seg->device_table_count));
+
+ return (__force struct ice_buf_table *)
+ (nvms->vers + le32_to_cpu(nvms->table_count));
+}
+
+/**
+ * ice_pkg_enum_buf
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This function will enumerate all the buffers in the ice segment. The first
+ * call is made with the ice_seg parameter non-NULL; on subsequent calls,
+ * ice_seg is set to NULL which continues the enumeration. When the function
+ * returns a NULL pointer, then the end of the buffers has been reached, or an
+ * unexpected value has been detected (for example an invalid section count or
+ * an invalid buffer end value).
+ */
+static struct ice_buf_hdr *
+ice_pkg_enum_buf(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
+{
+ if (ice_seg) {
+ state->buf_table = ice_find_buf_table(ice_seg);
+ if (!state->buf_table)
+ return NULL;
+
+ state->buf_idx = 0;
+ return ice_pkg_val_buf(state->buf_table->buf_array);
+ }
+
+ if (++state->buf_idx < le32_to_cpu(state->buf_table->buf_count))
+ return ice_pkg_val_buf(state->buf_table->buf_array +
+ state->buf_idx);
+ else
+ return NULL;
+}
+
+/**
+ * ice_pkg_advance_sect
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ *
+ * This helper function will advance the section within the ice segment,
+ * also advancing the buffer if needed.
+ */
+static bool
+ice_pkg_advance_sect(struct ice_seg *ice_seg, struct ice_pkg_enum *state)
+{
+ if (!ice_seg && !state->buf)
+ return false;
+
+ if (!ice_seg && state->buf)
+ if (++state->sect_idx < le16_to_cpu(state->buf->section_count))
+ return true;
+
+ state->buf = ice_pkg_enum_buf(ice_seg, state);
+ if (!state->buf)
+ return false;
+
+ /* start of new buffer, reset section index */
+ state->sect_idx = 0;
+ return true;
+}
+
+/**
+ * ice_pkg_enum_section
+ * @ice_seg: pointer to the ice segment (or NULL on subsequent calls)
+ * @state: pointer to the enum state
+ * @sect_type: section type to enumerate
+ *
+ * This function will enumerate all the sections of a particular type in the
+ * ice segment. The first call is made with the ice_seg parameter non-NULL;
+ * on subsequent calls, ice_seg is set to NULL which continues the enumeration.
+ * When the function returns a NULL pointer, then the end of the matching
+ * sections has been reached.
+ */
+static void *
+ice_pkg_enum_section(struct ice_seg *ice_seg, struct ice_pkg_enum *state,
+ u32 sect_type)
+{
+ u16 offset, size;
+
+ if (ice_seg)
+ state->type = sect_type;
+
+ if (!ice_pkg_advance_sect(ice_seg, state))
+ return NULL;
+
+ /* scan for next matching section */
+ while (state->buf->section_entry[state->sect_idx].type !=
+ cpu_to_le32(state->type))
+ if (!ice_pkg_advance_sect(NULL, state))
+ return NULL;
+
+ /* validate section */
+ offset = le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
+ if (offset < ICE_MIN_S_OFF || offset > ICE_MAX_S_OFF)
+ return NULL;
+
+ size = le16_to_cpu(state->buf->section_entry[state->sect_idx].size);
+ if (size < ICE_MIN_S_SZ || size > ICE_MAX_S_SZ)
+ return NULL;
+
+ /* make sure the section fits in the buffer */
+ if (offset + size > ICE_PKG_BUF_SIZE)
+ return NULL;
+
+ state->sect_type =
+ le32_to_cpu(state->buf->section_entry[state->sect_idx].type);
+
+ /* calc pointer to this section */
+ state->sect = ((u8 *)state->buf) +
+ le16_to_cpu(state->buf->section_entry[state->sect_idx].offset);
+
+ return state->sect;
+}
+
+/**
+ * ice_acquire_global_cfg_lock
+ * @hw: pointer to the HW structure
+ * @access: access type (read or write)
+ *
+ * This function will request ownership of the global config lock for reading
+ * or writing of the package. When attempting to obtain write access, the
+ * caller must check for the following two return values:
+ *
+ * ICE_SUCCESS - Means the caller has acquired the global config lock
+ * and can perform writing of the package.
+ * ICE_ERR_AQ_NO_WORK - Indicates another driver has already written the
+ * package or has found that no update was necessary; in
+ * this case, the caller can just skip performing any
+ * update of the package.
+ */
+static enum ice_status
+ice_acquire_global_cfg_lock(struct ice_hw *hw,
+ enum ice_aq_res_access_type access)
+{
+ enum ice_status status;
+
+ status = ice_acquire_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID, access,
+ ICE_GLOBAL_CFG_LOCK_TIMEOUT);
+
+ if (!status)
+ mutex_lock(&ice_global_cfg_lock_sw);
+ else if (status == ICE_ERR_AQ_NO_WORK)
+ ice_debug(hw, ICE_DBG_PKG,
+ "Global config lock: No work to do\n");
+
+ return status;
+}
+
+/**
+ * ice_release_global_cfg_lock
+ * @hw: pointer to the HW structure
+ *
+ * This function will release the global config lock.
+ */
+static void ice_release_global_cfg_lock(struct ice_hw *hw)
+{
+ mutex_unlock(&ice_global_cfg_lock_sw);
+ ice_release_res(hw, ICE_GLOBAL_CFG_LOCK_RES_ID);
+}
+
+/**
+ * ice_aq_download_pkg
+ * @hw: pointer to the hardware structure
+ * @pkg_buf: the package buffer to transfer
+ * @buf_size: the size of the package buffer
+ * @last_buf: last buffer indicator
+ * @error_offset: returns error offset
+ * @error_info: returns error information
+ * @cd: pointer to command details structure or NULL
+ *
+ * Download Package (0x0C40)
+ */
+static enum ice_status
+ice_aq_download_pkg(struct ice_hw *hw, struct ice_buf_hdr *pkg_buf,
+ u16 buf_size, bool last_buf, u32 *error_offset,
+ u32 *error_info, struct ice_sq_cd *cd)
+{
+ struct ice_aqc_download_pkg *cmd;
+ struct ice_aq_desc desc;
+ enum ice_status status;
+
+ if (error_offset)
+ *error_offset = 0;
+ if (error_info)
+ *error_info = 0;
+
+ cmd = &desc.params.download_pkg;
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_download_pkg);
+ desc.flags |= cpu_to_le16(ICE_AQ_FLAG_RD);
+
+ if (last_buf)
+ cmd->flags |= ICE_AQC_DOWNLOAD_PKG_LAST_BUF;
+
+ status = ice_aq_send_cmd(hw, &desc, pkg_buf, buf_size, cd);
+ if (status == ICE_ERR_AQ_ERROR) {
+ /* Read error from buffer only when the FW returned an error */
+ struct ice_aqc_download_pkg_resp *resp;
+
+ resp = (struct ice_aqc_download_pkg_resp *)pkg_buf;
+ if (error_offset)
+ *error_offset = le32_to_cpu(resp->error_offset);
+ if (error_info)
+ *error_info = le32_to_cpu(resp->error_info);
+ }
+
+ return status;
+}
+
+/**
+ * ice_find_seg_in_pkg
+ * @hw: pointer to the hardware structure
+ * @seg_type: the segment type to search for (i.e., SEGMENT_TYPE_CPK)
+ * @pkg_hdr: pointer to the package header to be searched
+ *
+ * This function searches a package file for a particular segment type. On
+ * success it returns a pointer to the segment header, otherwise it will
+ * return NULL.
+ */
+static struct ice_generic_seg_hdr *
+ice_find_seg_in_pkg(struct ice_hw *hw, u32 seg_type,
+ struct ice_pkg_hdr *pkg_hdr)
+{
+ u32 i;
+
+ ice_debug(hw, ICE_DBG_PKG, "Package format version: %d.%d.%d.%d\n",
+ pkg_hdr->format_ver.major, pkg_hdr->format_ver.minor,
+ pkg_hdr->format_ver.update, pkg_hdr->format_ver.draft);
+
+ /* Search all package segments for the requested segment type */
+ for (i = 0; i < le32_to_cpu(pkg_hdr->seg_count); i++) {
+ struct ice_generic_seg_hdr *seg;
+
+ seg = (struct ice_generic_seg_hdr *)
+ ((u8 *)pkg_hdr + le32_to_cpu(pkg_hdr->seg_offset[i]));
+
+ if (le32_to_cpu(seg->seg_type) == seg_type)
+ return seg;
+ }
+
+ return NULL;
+}
+
+/**
+ * ice_dwnld_cfg_bufs
+ * @hw: pointer to the hardware structure
+ * @bufs: pointer to an array of buffers
+ * @count: the number of buffers in the array
+ *
+ * Obtains global config lock and downloads the package configuration buffers
+ * to the firmware. Metadata buffers are skipped, and the first metadata buffer
+ * found indicates that the rest of the buffers are all metadata buffers.
+ */
+static enum ice_status
+ice_dwnld_cfg_bufs(struct ice_hw *hw, struct ice_buf *bufs, u32 count)
+{
+ enum ice_status status;
+ struct ice_buf_hdr *bh;
+ u32 offset, info, i;
+
+ if (!bufs || !count)
+ return ICE_ERR_PARAM;
+
+ /* If the first buffer's first section has its metadata bit set
+ * then there are no buffers to be downloaded, and the operation is
+ * considered a success.
+ */
+ bh = (struct ice_buf_hdr *)bufs;
+ if (le32_to_cpu(bh->section_entry[0].type) & ICE_METADATA_BUF)
+ return 0;
+
+ /* reset pkg_dwnld_status in case this function is called in the
+ * reset/rebuild flow
+ */
+ hw->pkg_dwnld_status = ICE_AQ_RC_OK;
+
+ status = ice_acquire_global_cfg_lock(hw, ICE_RES_WRITE);
+ if (status) {
+ if (status == ICE_ERR_AQ_NO_WORK)
+ hw->pkg_dwnld_status = ICE_AQ_RC_EEXIST;
+ else
+ hw->pkg_dwnld_status = hw->adminq.sq_last_status;
+ return status;
+ }
+
+ for (i = 0; i < count; i++) {
+ bool last = ((i + 1) == count);
+
+ if (!last) {
+ /* check next buffer for metadata flag */
+ bh = (struct ice_buf_hdr *)(bufs + i + 1);
+
+ /* A set metadata flag in the next buffer will signal
+ * that the current buffer will be the last buffer
+ * downloaded
+ */
+ if (le16_to_cpu(bh->section_count))
+ if (le32_to_cpu(bh->section_entry[0].type) &
+ ICE_METADATA_BUF)
+ last = true;
+ }
+
+ bh = (struct ice_buf_hdr *)(bufs + i);
+
+ status = ice_aq_download_pkg(hw, bh, ICE_PKG_BUF_SIZE, last,
+ &offset, &info, NULL);
+
+ /* Save AQ status from download package */
+ hw->pkg_dwnld_status = hw->adminq.sq_last_status;
+ if (status) {
+ ice_debug(hw, ICE_DBG_PKG,
+ "Pkg download failed: err %d off %d inf %d\n",
+ status, offset, info);
+
+ break;
+ }
+
+ if (last)
+ break;
+ }
+
+ ice_release_global_cfg_lock(hw);
+
+ return status;
+}
+
+/**
+ * ice_aq_get_pkg_info_list
+ * @hw: pointer to the hardware structure
+ * @pkg_info: the buffer which will receive the information list
+ * @buf_size: the size of the pkg_info information buffer
+ * @cd: pointer to command details structure or NULL
+ *
+ * Get Package Info List (0x0C43)
+ */
+static enum ice_status
+ice_aq_get_pkg_info_list(struct ice_hw *hw,
+ struct ice_aqc_get_pkg_info_resp *pkg_info,
+ u16 buf_size, struct ice_sq_cd *cd)
+{
+ struct ice_aq_desc desc;
+
+ ice_fill_dflt_direct_cmd_desc(&desc, ice_aqc_opc_get_pkg_info_list);
+
+ return ice_aq_send_cmd(hw, &desc, pkg_info, buf_size, cd);
+}
+
+/**
+ * ice_download_pkg
+ * @hw: pointer to the hardware structure
+ * @ice_seg: pointer to the segment of the package to be downloaded
+ *
+ * Handles the download of a complete package.
+ */
+static enum ice_status
+ice_download_pkg(struct ice_hw *hw, struct ice_seg *ice_seg)
+{
+ struct ice_buf_table *ice_buf_tbl;
+
+ ice_debug(hw, ICE_DBG_PKG, "Segment version: %d.%d.%d.%d\n",
+ ice_seg->hdr.seg_ver.major, ice_seg->hdr.seg_ver.minor,
+ ice_seg->hdr.seg_ver.update, ice_seg->hdr.seg_ver.draft);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg: type 0x%X, size %d, name %s\n",
+ le32_to_cpu(ice_seg->hdr.seg_type),
+ le32_to_cpu(ice_seg->hdr.seg_size), ice_seg->hdr.seg_name);
+
+ ice_buf_tbl = ice_find_buf_table(ice_seg);
+
+ ice_debug(hw, ICE_DBG_PKG, "Seg buf count: %d\n",
+ le32_to_cpu(ice_buf_tbl->buf_count));
+
+ return ice_dwnld_cfg_bufs(hw, ice_buf_tbl->buf_array,
+ le32_to_cpu(ice_buf_tbl->buf_count));
+}
+
+/**
+ * ice_init_pkg_info
+ * @hw: pointer to the hardware structure
+ * @pkg_hdr: pointer to the driver's package hdr
+ *
+ * Saves off the package details into the HW structure.
+ */
+static enum ice_status
+ice_init_pkg_info(struct ice_hw *hw, struct ice_pkg_hdr *pkg_hdr)
+{
+ struct ice_global_metadata_seg *meta_seg;
+ struct ice_generic_seg_hdr *seg_hdr;
+
+ if (!pkg_hdr)
+ return ICE_ERR_PARAM;
+
+ meta_seg = (struct ice_global_metadata_seg *)
+ ice_find_seg_in_pkg(hw, SEGMENT_TYPE_METADATA, pkg_hdr);
+ if (meta_seg) {
+ hw->pkg_ver = meta_seg->pkg_ver;
+ memcpy(hw->pkg_name, meta_seg->pkg_name, sizeof(hw->pkg_name));
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg: %d.%d.%d.%d, %s\n",
+ meta_seg->pkg_ver.major, meta_seg->pkg_ver.minor,
+ meta_seg->pkg_ver.update, meta_seg->pkg_ver.draft,
+ meta_seg->pkg_name);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Did not find metadata segment in driver package\n");
+ return ICE_ERR_CFG;
+ }
+
+ seg_hdr = ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg_hdr);
+ if (seg_hdr) {
+ hw->ice_pkg_ver = seg_hdr->seg_ver;
+ memcpy(hw->ice_pkg_name, seg_hdr->seg_name,
+ sizeof(hw->ice_pkg_name));
+
+ ice_debug(hw, ICE_DBG_PKG, "Ice Pkg: %d.%d.%d.%d, %s\n",
+ seg_hdr->seg_ver.major, seg_hdr->seg_ver.minor,
+ seg_hdr->seg_ver.update, seg_hdr->seg_ver.draft,
+ seg_hdr->seg_name);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT,
+ "Did not find ice segment in driver package\n");
+ return ICE_ERR_CFG;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_get_pkg_info
+ * @hw: pointer to the hardware structure
+ *
+ * Store details of the package currently loaded in HW into the HW structure.
+ */
+static enum ice_status ice_get_pkg_info(struct ice_hw *hw)
+{
+ struct ice_aqc_get_pkg_info_resp *pkg_info;
+ enum ice_status status;
+ u16 size;
+ u32 i;
+
+ size = sizeof(*pkg_info) + (sizeof(pkg_info->pkg_info[0]) *
+ (ICE_PKG_CNT - 1));
+ pkg_info = kzalloc(size, GFP_KERNEL);
+ if (!pkg_info)
+ return ICE_ERR_NO_MEMORY;
+
+ status = ice_aq_get_pkg_info_list(hw, pkg_info, size, NULL);
+ if (status)
+ goto init_pkg_free_alloc;
+
+ for (i = 0; i < le32_to_cpu(pkg_info->count); i++) {
+#define ICE_PKG_FLAG_COUNT 4
+ char flags[ICE_PKG_FLAG_COUNT + 1] = { 0 };
+ u8 place = 0;
+
+ if (pkg_info->pkg_info[i].is_active) {
+ flags[place++] = 'A';
+ hw->active_pkg_ver = pkg_info->pkg_info[i].ver;
+ memcpy(hw->active_pkg_name,
+ pkg_info->pkg_info[i].name,
+ sizeof(hw->active_pkg_name));
+ hw->active_pkg_in_nvm = pkg_info->pkg_info[i].is_in_nvm;
+ }
+ if (pkg_info->pkg_info[i].is_active_at_boot)
+ flags[place++] = 'B';
+ if (pkg_info->pkg_info[i].is_modified)
+ flags[place++] = 'M';
+ if (pkg_info->pkg_info[i].is_in_nvm)
+ flags[place++] = 'N';
+
+ ice_debug(hw, ICE_DBG_PKG, "Pkg[%d]: %d.%d.%d.%d,%s,%s\n",
+ i, pkg_info->pkg_info[i].ver.major,
+ pkg_info->pkg_info[i].ver.minor,
+ pkg_info->pkg_info[i].ver.update,
+ pkg_info->pkg_info[i].ver.draft,
+ pkg_info->pkg_info[i].name, flags);
+ }
+
+init_pkg_free_alloc:
+ kfree(pkg_info);
+
+ return status;
+}
+
+/**
+ * ice_verify_pkg - verify package
+ * @pkg: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * Verifies various attributes of the package file, including length, format
+ * version, and the requirement of at least one segment.
+ */
+static enum ice_status ice_verify_pkg(struct ice_pkg_hdr *pkg, u32 len)
+{
+ u32 seg_count;
+ u32 i;
+
+ if (len < sizeof(*pkg))
+ return ICE_ERR_BUF_TOO_SHORT;
+
+ if (pkg->format_ver.major != ICE_PKG_FMT_VER_MAJ ||
+ pkg->format_ver.minor != ICE_PKG_FMT_VER_MNR ||
+ pkg->format_ver.update != ICE_PKG_FMT_VER_UPD ||
+ pkg->format_ver.draft != ICE_PKG_FMT_VER_DFT)
+ return ICE_ERR_CFG;
+
+ /* pkg must have at least one segment */
+ seg_count = le32_to_cpu(pkg->seg_count);
+ if (seg_count < 1)
+ return ICE_ERR_CFG;
+
+ /* make sure segment array fits in package length */
+ if (len < sizeof(*pkg) + ((seg_count - 1) * sizeof(pkg->seg_offset)))
+ return ICE_ERR_BUF_TOO_SHORT;
+
+ /* all segments must fit within length */
+ for (i = 0; i < seg_count; i++) {
+ u32 off = le32_to_cpu(pkg->seg_offset[i]);
+ struct ice_generic_seg_hdr *seg;
+
+ /* segment header must fit */
+ if (len < off + sizeof(*seg))
+ return ICE_ERR_BUF_TOO_SHORT;
+
+ seg = (struct ice_generic_seg_hdr *)((u8 *)pkg + off);
+
+ /* segment body must fit */
+ if (len < off + le32_to_cpu(seg->seg_size))
+ return ICE_ERR_BUF_TOO_SHORT;
+ }
+
+ return 0;
+}
+
+/**
+ * ice_free_seg - free package segment pointer
+ * @hw: pointer to the hardware structure
+ *
+ * Frees the package segment pointer in the proper manner, depending on if the
+ * segment was allocated or just the passed in pointer was stored.
+ */
+void ice_free_seg(struct ice_hw *hw)
+{
+ if (hw->pkg_copy) {
+ devm_kfree(ice_hw_to_dev(hw), hw->pkg_copy);
+ hw->pkg_copy = NULL;
+ hw->pkg_size = 0;
+ }
+ hw->seg = NULL;
+}
+
+/**
+ * ice_init_pkg_regs - initialize additional package registers
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_pkg_regs(struct ice_hw *hw)
+{
+#define ICE_SW_BLK_INP_MASK_L 0xFFFFFFFF
+#define ICE_SW_BLK_INP_MASK_H 0x0000FFFF
+#define ICE_SW_BLK_IDX 0
+
+ /* setup Switch block input mask, which is 48-bits in two parts */
+ wr32(hw, GL_PREEXT_L2_PMASK0(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_L);
+ wr32(hw, GL_PREEXT_L2_PMASK1(ICE_SW_BLK_IDX), ICE_SW_BLK_INP_MASK_H);
+}
+
+/**
+ * ice_chk_pkg_version - check package version for compatibility with driver
+ * @pkg_ver: pointer to a version structure to check
+ *
+ * Check to make sure that the package about to be downloaded is compatible with
+ * the driver. To be compatible, the major and minor components of the package
+ * version must match our ICE_PKG_SUPP_VER_MAJ and ICE_PKG_SUPP_VER_MNR
+ * definitions.
+ */
+static enum ice_status ice_chk_pkg_version(struct ice_pkg_ver *pkg_ver)
+{
+ if (pkg_ver->major != ICE_PKG_SUPP_VER_MAJ ||
+ pkg_ver->minor != ICE_PKG_SUPP_VER_MNR)
+ return ICE_ERR_NOT_SUPPORTED;
+
+ return 0;
+}
+
+/**
+ * ice_init_pkg - initialize/download package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function initializes a package. The package contains HW tables
+ * required to do packet processing. First, the function extracts package
+ * information such as version. Then it finds the ice configuration segment
+ * within the package; this function then saves a copy of the segment pointer
+ * within the supplied package buffer. Next, the function will cache any hints
+ * from the package, followed by downloading the package itself. Note, that if
+ * a previous PF driver has already downloaded the package successfully, then
+ * the current driver will not have to download the package again.
+ *
+ * The local package contents will be used to query default behavior and to
+ * update specific sections of the HW's version of the package (e.g. to update
+ * the parse graph to understand new protocols).
+ *
+ * This function stores a pointer to the package buffer memory, and it is
+ * expected that the supplied buffer will not be freed immediately. If the
+ * package buffer needs to be freed, such as when read from a file, use
+ * ice_copy_and_init_pkg() instead of directly calling ice_init_pkg() in this
+ * case.
+ */
+enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buf, u32 len)
+{
+ struct ice_pkg_hdr *pkg;
+ enum ice_status status;
+ struct ice_seg *seg;
+
+ if (!buf || !len)
+ return ICE_ERR_PARAM;
+
+ pkg = (struct ice_pkg_hdr *)buf;
+ status = ice_verify_pkg(pkg, len);
+ if (status) {
+ ice_debug(hw, ICE_DBG_INIT, "failed to verify pkg (err: %d)\n",
+ status);
+ return status;
+ }
+
+ /* initialize package info */
+ status = ice_init_pkg_info(hw, pkg);
+ if (status)
+ return status;
+
+ /* before downloading the package, check package version for
+ * compatibility with driver
+ */
+ status = ice_chk_pkg_version(&hw->pkg_ver);
+ if (status)
+ return status;
+
+ /* find segment in given package */
+ seg = (struct ice_seg *)ice_find_seg_in_pkg(hw, SEGMENT_TYPE_ICE, pkg);
+ if (!seg) {
+ ice_debug(hw, ICE_DBG_INIT, "no ice segment in package.\n");
+ return ICE_ERR_CFG;
+ }
+
+ /* download package */
+ status = ice_download_pkg(hw, seg);
+ if (status == ICE_ERR_AQ_NO_WORK) {
+ ice_debug(hw, ICE_DBG_INIT,
+ "package previously loaded - no work.\n");
+ status = 0;
+ }
+
+ /* Get information on the package currently loaded in HW, then make sure
+ * the driver is compatible with this version.
+ */
+ if (!status) {
+ status = ice_get_pkg_info(hw);
+ if (!status)
+ status = ice_chk_pkg_version(&hw->active_pkg_ver);
+ }
+
+ if (!status) {
+ hw->seg = seg;
+ /* on successful package download update other required
+ * registers to support the package and fill HW tables
+ * with package content.
+ */
+ ice_init_pkg_regs(hw);
+ ice_fill_blk_tbls(hw);
+ } else {
+ ice_debug(hw, ICE_DBG_INIT, "package load failed, %d\n",
+ status);
+ }
+
+ return status;
+}
+
+/**
+ * ice_copy_and_init_pkg - initialize/download a copy of the package
+ * @hw: pointer to the hardware structure
+ * @buf: pointer to the package buffer
+ * @len: size of the package buffer
+ *
+ * This function copies the package buffer, and then calls ice_init_pkg() to
+ * initialize the copied package contents.
+ *
+ * The copying is necessary if the package buffer supplied is constant, or if
+ * the memory may disappear shortly after calling this function.
+ *
+ * If the package buffer resides in the data segment and can be modified, the
+ * caller is free to use ice_init_pkg() instead of ice_copy_and_init_pkg().
+ *
+ * However, if the package buffer needs to be copied first, such as when being
+ * read from a file, the caller should use ice_copy_and_init_pkg().
+ *
+ * This function will first copy the package buffer, before calling
+ * ice_init_pkg(). The caller is free to immediately destroy the original
+ * package buffer, as the new copy will be managed by this function and
+ * related routines.
+ */
+enum ice_status ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len)
+{
+ enum ice_status status;
+ u8 *buf_copy;
+
+ if (!buf || !len)
+ return ICE_ERR_PARAM;
+
+ buf_copy = devm_kmemdup(ice_hw_to_dev(hw), buf, len, GFP_KERNEL);
+
+ status = ice_init_pkg(hw, buf_copy, len);
+ if (status) {
+ /* Free the copy, since we failed to initialize the package */
+ devm_kfree(ice_hw_to_dev(hw), buf_copy);
+ } else {
+ /* Track the copied pkg so we can free it later */
+ hw->pkg_copy = buf_copy;
+ hw->pkg_size = len;
+ }
+
+ return status;
+}
+
+/* PTG Management */
+
+/**
+ * ice_ptg_find_ptype - Search for packet type group using packet type (ptype)
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @ptype: the ptype to search for
+ * @ptg: pointer to variable that receives the PTG
+ *
+ * This function will search the PTGs for a particular ptype, returning the
+ * PTG ID that contains it through the PTG parameter, with the value of
+ * ICE_DEFAULT_PTG (0) meaning it is part the default PTG.
+ */
+static enum ice_status
+ice_ptg_find_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 *ptg)
+{
+ if (ptype >= ICE_XLT1_CNT || !ptg)
+ return ICE_ERR_PARAM;
+
+ *ptg = hw->blk[blk].xlt1.ptypes[ptype].ptg;
+ return 0;
+}
+
+/**
+ * ice_ptg_alloc_val - Allocates a new packet type group ID by value
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @ptg: the PTG to allocate
+ *
+ * This function allocates a given packet type group ID specified by the PTG
+ * parameter.
+ */
+static void ice_ptg_alloc_val(struct ice_hw *hw, enum ice_block blk, u8 ptg)
+{
+ hw->blk[blk].xlt1.ptg_tbl[ptg].in_use = true;
+}
+
+/**
+ * ice_ptg_remove_ptype - Removes ptype from a particular packet type group
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @ptype: the ptype to remove
+ * @ptg: the PTG to remove the ptype from
+ *
+ * This function will remove the ptype from the specific PTG, and move it to
+ * the default PTG (ICE_DEFAULT_PTG).
+ */
+static enum ice_status
+ice_ptg_remove_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
+{
+ struct ice_ptg_ptype **ch;
+ struct ice_ptg_ptype *p;
+
+ if (ptype > ICE_XLT1_CNT - 1)
+ return ICE_ERR_PARAM;
+
+ if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* Should not happen if .in_use is set, bad config */
+ if (!hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype)
+ return ICE_ERR_CFG;
+
+ /* find the ptype within this PTG, and bypass the link over it */
+ p = hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+ ch = &hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+ while (p) {
+ if (ptype == (p - hw->blk[blk].xlt1.ptypes)) {
+ *ch = p->next_ptype;
+ break;
+ }
+
+ ch = &p->next_ptype;
+ p = p->next_ptype;
+ }
+
+ hw->blk[blk].xlt1.ptypes[ptype].ptg = ICE_DEFAULT_PTG;
+ hw->blk[blk].xlt1.ptypes[ptype].next_ptype = NULL;
+
+ return 0;
+}
+
+/**
+ * ice_ptg_add_mv_ptype - Adds/moves ptype to a particular packet type group
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @ptype: the ptype to add or move
+ * @ptg: the PTG to add or move the ptype to
+ *
+ * This function will either add or move a ptype to a particular PTG depending
+ * on if the ptype is already part of another group. Note that using a
+ * a destination PTG ID of ICE_DEFAULT_PTG (0) will move the ptype to the
+ * default PTG.
+ */
+static enum ice_status
+ice_ptg_add_mv_ptype(struct ice_hw *hw, enum ice_block blk, u16 ptype, u8 ptg)
+{
+ enum ice_status status;
+ u8 original_ptg;
+
+ if (ptype > ICE_XLT1_CNT - 1)
+ return ICE_ERR_PARAM;
+
+ if (!hw->blk[blk].xlt1.ptg_tbl[ptg].in_use && ptg != ICE_DEFAULT_PTG)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ status = ice_ptg_find_ptype(hw, blk, ptype, &original_ptg);
+ if (status)
+ return status;
+
+ /* Is ptype already in the correct PTG? */
+ if (original_ptg == ptg)
+ return 0;
+
+ /* Remove from original PTG and move back to the default PTG */
+ if (original_ptg != ICE_DEFAULT_PTG)
+ ice_ptg_remove_ptype(hw, blk, ptype, original_ptg);
+
+ /* Moving to default PTG? Then we're done with this request */
+ if (ptg == ICE_DEFAULT_PTG)
+ return 0;
+
+ /* Add ptype to PTG at beginning of list */
+ hw->blk[blk].xlt1.ptypes[ptype].next_ptype =
+ hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype;
+ hw->blk[blk].xlt1.ptg_tbl[ptg].first_ptype =
+ &hw->blk[blk].xlt1.ptypes[ptype];
+
+ hw->blk[blk].xlt1.ptypes[ptype].ptg = ptg;
+ hw->blk[blk].xlt1.t[ptype] = ptg;
+
+ return 0;
+}
+
+/* Block / table size info */
+struct ice_blk_size_details {
+ u16 xlt1; /* # XLT1 entries */
+ u16 xlt2; /* # XLT2 entries */
+ u16 prof_tcam; /* # profile ID TCAM entries */
+ u16 prof_id; /* # profile IDs */
+ u8 prof_cdid_bits; /* # CDID one-hot bits used in key */
+ u16 prof_redir; /* # profile redirection entries */
+ u16 es; /* # extraction sequence entries */
+ u16 fvw; /* # field vector words */
+ u8 overwrite; /* overwrite existing entries allowed */
+ u8 reverse; /* reverse FV order */
+};
+
+static const struct ice_blk_size_details blk_sizes[ICE_BLK_COUNT] = {
+ /**
+ * Table Definitions
+ * XLT1 - Number of entries in XLT1 table
+ * XLT2 - Number of entries in XLT2 table
+ * TCAM - Number of entries Profile ID TCAM table
+ * CDID - Control Domain ID of the hardware block
+ * PRED - Number of entries in the Profile Redirection Table
+ * FV - Number of entries in the Field Vector
+ * FVW - Width (in WORDs) of the Field Vector
+ * OVR - Overwrite existing table entries
+ * REV - Reverse FV
+ */
+ /* XLT1 , XLT2 ,TCAM, PID,CDID,PRED, FV, FVW */
+ /* Overwrite , Reverse FV */
+ /* SW */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 256, 0, 256, 256, 48,
+ false, false },
+ /* ACL */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 32,
+ false, false },
+ /* FD */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
+ false, true },
+ /* RSS */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 512, 128, 0, 128, 128, 24,
+ true, true },
+ /* PE */ { ICE_XLT1_CNT, ICE_XLT2_CNT, 64, 32, 0, 32, 32, 24,
+ false, false },
+};
+
+enum ice_sid_all {
+ ICE_SID_XLT1_OFF = 0,
+ ICE_SID_XLT2_OFF,
+ ICE_SID_PR_OFF,
+ ICE_SID_PR_REDIR_OFF,
+ ICE_SID_ES_OFF,
+ ICE_SID_OFF_COUNT,
+};
+
+/* VSIG Management */
+
+/**
+ * ice_vsig_find_vsi - find a VSIG that contains a specified VSI
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsi: VSI of interest
+ * @vsig: pointer to receive the VSI group
+ *
+ * This function will lookup the VSI entry in the XLT2 list and return
+ * the VSI group its associated with.
+ */
+static enum ice_status
+ice_vsig_find_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 *vsig)
+{
+ if (!vsig || vsi >= ICE_MAX_VSI)
+ return ICE_ERR_PARAM;
+
+ /* As long as there's a default or valid VSIG associated with the input
+ * VSI, the functions returns a success. Any handling of VSIG will be
+ * done by the following add, update or remove functions.
+ */
+ *vsig = hw->blk[blk].xlt2.vsis[vsi].vsig;
+
+ return 0;
+}
+
+/**
+ * ice_vsig_alloc_val - allocate a new VSIG by value
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsig: the VSIG to allocate
+ *
+ * This function will allocate a given VSIG specified by the VSIG parameter.
+ */
+static u16 ice_vsig_alloc_val(struct ice_hw *hw, enum ice_block blk, u16 vsig)
+{
+ u16 idx = vsig & ICE_VSIG_IDX_M;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use) {
+ INIT_LIST_HEAD(&hw->blk[blk].xlt2.vsig_tbl[idx].prop_lst);
+ hw->blk[blk].xlt2.vsig_tbl[idx].in_use = true;
+ }
+
+ return ICE_VSIG_VALUE(idx, hw->pf_id);
+}
+
+/**
+ * ice_vsig_remove_vsi - remove VSI from VSIG
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsi: VSI to remove
+ * @vsig: VSI group to remove from
+ *
+ * The function will remove the input VSI from its VSI group and move it
+ * to the DEFAULT_VSIG.
+ */
+static enum ice_status
+ice_vsig_remove_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
+{
+ struct ice_vsig_vsi **vsi_head, *vsi_cur, *vsi_tgt;
+ u16 idx;
+
+ idx = vsig & ICE_VSIG_IDX_M;
+
+ if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
+ return ICE_ERR_PARAM;
+
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ /* entry already in default VSIG, don't have to remove */
+ if (idx == ICE_DEFAULT_VSIG)
+ return 0;
+
+ vsi_head = &hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ if (!(*vsi_head))
+ return ICE_ERR_CFG;
+
+ vsi_tgt = &hw->blk[blk].xlt2.vsis[vsi];
+ vsi_cur = (*vsi_head);
+
+ /* iterate the VSI list, skip over the entry to be removed */
+ while (vsi_cur) {
+ if (vsi_tgt == vsi_cur) {
+ (*vsi_head) = vsi_cur->next_vsi;
+ break;
+ }
+ vsi_head = &vsi_cur->next_vsi;
+ vsi_cur = vsi_cur->next_vsi;
+ }
+
+ /* verify if VSI was removed from group list */
+ if (!vsi_cur)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ vsi_cur->vsig = ICE_DEFAULT_VSIG;
+ vsi_cur->changed = 1;
+ vsi_cur->next_vsi = NULL;
+
+ return 0;
+}
+
+/**
+ * ice_vsig_add_mv_vsi - add or move a VSI to a VSI group
+ * @hw: pointer to the hardware structure
+ * @blk: HW block
+ * @vsi: VSI to move
+ * @vsig: destination VSI group
+ *
+ * This function will move or add the input VSI to the target VSIG.
+ * The function will find the original VSIG the VSI belongs to and
+ * move the entry to the DEFAULT_VSIG, update the original VSIG and
+ * then move entry to the new VSIG.
+ */
+static enum ice_status
+ice_vsig_add_mv_vsi(struct ice_hw *hw, enum ice_block blk, u16 vsi, u16 vsig)
+{
+ struct ice_vsig_vsi *tmp;
+ enum ice_status status;
+ u16 orig_vsig, idx;
+
+ idx = vsig & ICE_VSIG_IDX_M;
+
+ if (vsi >= ICE_MAX_VSI || idx >= ICE_MAX_VSIGS)
+ return ICE_ERR_PARAM;
+
+ /* if VSIG not in use and VSIG is not default type this VSIG
+ * doesn't exist.
+ */
+ if (!hw->blk[blk].xlt2.vsig_tbl[idx].in_use &&
+ vsig != ICE_DEFAULT_VSIG)
+ return ICE_ERR_DOES_NOT_EXIST;
+
+ status = ice_vsig_find_vsi(hw, blk, vsi, &orig_vsig);
+ if (status)
+ return status;
+
+ /* no update required if vsigs match */
+ if (orig_vsig == vsig)
+ return 0;
+
+ if (orig_vsig != ICE_DEFAULT_VSIG) {
+ /* remove entry from orig_vsig and add to default VSIG */
+ status = ice_vsig_remove_vsi(hw, blk, vsi, orig_vsig);
+ if (status)
+ return status;
+ }
+
+ if (idx == ICE_DEFAULT_VSIG)
+ return 0;
+
+ /* Create VSI entry and add VSIG and prop_mask values */
+ hw->blk[blk].xlt2.vsis[vsi].vsig = vsig;
+ hw->blk[blk].xlt2.vsis[vsi].changed = 1;
+
+ /* Add new entry to the head of the VSIG list */
+ tmp = hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi;
+ hw->blk[blk].xlt2.vsig_tbl[idx].first_vsi =
+ &hw->blk[blk].xlt2.vsis[vsi];
+ hw->blk[blk].xlt2.vsis[vsi].next_vsi = tmp;
+ hw->blk[blk].xlt2.t[vsi] = vsig;
+
+ return 0;
+}
+
+/* Block / table section IDs */
+static const u32 ice_blk_sids[ICE_BLK_COUNT][ICE_SID_OFF_COUNT] = {
+ /* SWITCH */
+ { ICE_SID_XLT1_SW,
+ ICE_SID_XLT2_SW,
+ ICE_SID_PROFID_TCAM_SW,
+ ICE_SID_PROFID_REDIR_SW,
+ ICE_SID_FLD_VEC_SW
+ },
+
+ /* ACL */
+ { ICE_SID_XLT1_ACL,
+ ICE_SID_XLT2_ACL,
+ ICE_SID_PROFID_TCAM_ACL,
+ ICE_SID_PROFID_REDIR_ACL,
+ ICE_SID_FLD_VEC_ACL
+ },
+
+ /* FD */
+ { ICE_SID_XLT1_FD,
+ ICE_SID_XLT2_FD,
+ ICE_SID_PROFID_TCAM_FD,
+ ICE_SID_PROFID_REDIR_FD,
+ ICE_SID_FLD_VEC_FD
+ },
+
+ /* RSS */
+ { ICE_SID_XLT1_RSS,
+ ICE_SID_XLT2_RSS,
+ ICE_SID_PROFID_TCAM_RSS,
+ ICE_SID_PROFID_REDIR_RSS,
+ ICE_SID_FLD_VEC_RSS
+ },
+
+ /* PE */
+ { ICE_SID_XLT1_PE,
+ ICE_SID_XLT2_PE,
+ ICE_SID_PROFID_TCAM_PE,
+ ICE_SID_PROFID_REDIR_PE,
+ ICE_SID_FLD_VEC_PE
+ }
+};
+
+/**
+ * ice_init_sw_xlt1_db - init software XLT1 database from HW tables
+ * @hw: pointer to the hardware structure
+ * @blk: the HW block to initialize
+ */
+static void ice_init_sw_xlt1_db(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 pt;
+
+ for (pt = 0; pt < hw->blk[blk].xlt1.count; pt++) {
+ u8 ptg;
+
+ ptg = hw->blk[blk].xlt1.t[pt];
+ if (ptg != ICE_DEFAULT_PTG) {
+ ice_ptg_alloc_val(hw, blk, ptg);
+ ice_ptg_add_mv_ptype(hw, blk, pt, ptg);
+ }
+ }
+}
+
+/**
+ * ice_init_sw_xlt2_db - init software XLT2 database from HW tables
+ * @hw: pointer to the hardware structure
+ * @blk: the HW block to initialize
+ */
+static void ice_init_sw_xlt2_db(struct ice_hw *hw, enum ice_block blk)
+{
+ u16 vsi;
+
+ for (vsi = 0; vsi < hw->blk[blk].xlt2.count; vsi++) {
+ u16 vsig;
+
+ vsig = hw->blk[blk].xlt2.t[vsi];
+ if (vsig) {
+ ice_vsig_alloc_val(hw, blk, vsig);
+ ice_vsig_add_mv_vsi(hw, blk, vsi, vsig);
+ /* no changes at this time, since this has been
+ * initialized from the original package
+ */
+ hw->blk[blk].xlt2.vsis[vsi].changed = 0;
+ }
+ }
+}
+
+/**
+ * ice_init_sw_db - init software database from HW tables
+ * @hw: pointer to the hardware structure
+ */
+static void ice_init_sw_db(struct ice_hw *hw)
+{
+ u16 i;
+
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ ice_init_sw_xlt1_db(hw, (enum ice_block)i);
+ ice_init_sw_xlt2_db(hw, (enum ice_block)i);
+ }
+}
+
+/**
+ * ice_fill_tbl - Reads content of a single table type into database
+ * @hw: pointer to the hardware structure
+ * @block_id: Block ID of the table to copy
+ * @sid: Section ID of the table to copy
+ *
+ * Will attempt to read the entire content of a given table of a single block
+ * into the driver database. We assume that the buffer will always
+ * be as large or larger than the data contained in the package. If
+ * this condition is not met, there is most likely an error in the package
+ * contents.
+ */
+static void ice_fill_tbl(struct ice_hw *hw, enum ice_block block_id, u32 sid)
+{
+ u32 dst_len, sect_len, offset = 0;
+ struct ice_prof_redir_section *pr;
+ struct ice_prof_id_section *pid;
+ struct ice_xlt1_section *xlt1;
+ struct ice_xlt2_section *xlt2;
+ struct ice_sw_fv_section *es;
+ struct ice_pkg_enum state;
+ u8 *src, *dst;
+ void *sect;
+
+ /* if the HW segment pointer is null then the first iteration of
+ * ice_pkg_enum_section() will fail. In this case the HW tables will
+ * not be filled and return success.
+ */
+ if (!hw->seg) {
+ ice_debug(hw, ICE_DBG_PKG, "hw->seg is NULL, tables are not filled\n");
+ return;
+ }
+
+ memset(&state, 0, sizeof(state));
+
+ sect = ice_pkg_enum_section(hw->seg, &state, sid);
+
+ while (sect) {
+ switch (sid) {
+ case ICE_SID_XLT1_SW:
+ case ICE_SID_XLT1_FD:
+ case ICE_SID_XLT1_RSS:
+ case ICE_SID_XLT1_ACL:
+ case ICE_SID_XLT1_PE:
+ xlt1 = (struct ice_xlt1_section *)sect;
+ src = xlt1->value;
+ sect_len = le16_to_cpu(xlt1->count) *
+ sizeof(*hw->blk[block_id].xlt1.t);
+ dst = hw->blk[block_id].xlt1.t;
+ dst_len = hw->blk[block_id].xlt1.count *
+ sizeof(*hw->blk[block_id].xlt1.t);
+ break;
+ case ICE_SID_XLT2_SW:
+ case ICE_SID_XLT2_FD:
+ case ICE_SID_XLT2_RSS:
+ case ICE_SID_XLT2_ACL:
+ case ICE_SID_XLT2_PE:
+ xlt2 = (struct ice_xlt2_section *)sect;
+ src = (__force u8 *)xlt2->value;
+ sect_len = le16_to_cpu(xlt2->count) *
+ sizeof(*hw->blk[block_id].xlt2.t);
+ dst = (u8 *)hw->blk[block_id].xlt2.t;
+ dst_len = hw->blk[block_id].xlt2.count *
+ sizeof(*hw->blk[block_id].xlt2.t);
+ break;
+ case ICE_SID_PROFID_TCAM_SW:
+ case ICE_SID_PROFID_TCAM_FD:
+ case ICE_SID_PROFID_TCAM_RSS:
+ case ICE_SID_PROFID_TCAM_ACL:
+ case ICE_SID_PROFID_TCAM_PE:
+ pid = (struct ice_prof_id_section *)sect;
+ src = (u8 *)pid->entry;
+ sect_len = le16_to_cpu(pid->count) *
+ sizeof(*hw->blk[block_id].prof.t);
+ dst = (u8 *)hw->blk[block_id].prof.t;
+ dst_len = hw->blk[block_id].prof.count *
+ sizeof(*hw->blk[block_id].prof.t);
+ break;
+ case ICE_SID_PROFID_REDIR_SW:
+ case ICE_SID_PROFID_REDIR_FD:
+ case ICE_SID_PROFID_REDIR_RSS:
+ case ICE_SID_PROFID_REDIR_ACL:
+ case ICE_SID_PROFID_REDIR_PE:
+ pr = (struct ice_prof_redir_section *)sect;
+ src = pr->redir_value;
+ sect_len = le16_to_cpu(pr->count) *
+ sizeof(*hw->blk[block_id].prof_redir.t);
+ dst = hw->blk[block_id].prof_redir.t;
+ dst_len = hw->blk[block_id].prof_redir.count *
+ sizeof(*hw->blk[block_id].prof_redir.t);
+ break;
+ case ICE_SID_FLD_VEC_SW:
+ case ICE_SID_FLD_VEC_FD:
+ case ICE_SID_FLD_VEC_RSS:
+ case ICE_SID_FLD_VEC_ACL:
+ case ICE_SID_FLD_VEC_PE:
+ es = (struct ice_sw_fv_section *)sect;
+ src = (u8 *)es->fv;
+ sect_len = (u32)(le16_to_cpu(es->count) *
+ hw->blk[block_id].es.fvw) *
+ sizeof(*hw->blk[block_id].es.t);
+ dst = (u8 *)hw->blk[block_id].es.t;
+ dst_len = (u32)(hw->blk[block_id].es.count *
+ hw->blk[block_id].es.fvw) *
+ sizeof(*hw->blk[block_id].es.t);
+ break;
+ default:
+ return;
+ }
+
+ /* if the section offset exceeds destination length, terminate
+ * table fill.
+ */
+ if (offset > dst_len)
+ return;
+
+ /* if the sum of section size and offset exceed destination size
+ * then we are out of bounds of the HW table size for that PF.
+ * Changing section length to fill the remaining table space
+ * of that PF.
+ */
+ if ((offset + sect_len) > dst_len)
+ sect_len = dst_len - offset;
+
+ memcpy(dst + offset, src, sect_len);
+ offset += sect_len;
+ sect = ice_pkg_enum_section(NULL, &state, sid);
+ }
+}
+
+/**
+ * ice_fill_blk_tbls - Read package context for tables
+ * @hw: pointer to the hardware structure
+ *
+ * Reads the current package contents and populates the driver
+ * database with the data iteratively for all advanced feature
+ * blocks. Assume that the HW tables have been allocated.
+ */
+void ice_fill_blk_tbls(struct ice_hw *hw)
+{
+ u8 i;
+
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ enum ice_block blk_id = (enum ice_block)i;
+
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt1.sid);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].xlt2.sid);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof.sid);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].prof_redir.sid);
+ ice_fill_tbl(hw, blk_id, hw->blk[blk_id].es.sid);
+ }
+
+ ice_init_sw_db(hw);
+}
+
+/**
+ * ice_free_hw_tbls - free hardware table memory
+ * @hw: pointer to the hardware structure
+ */
+void ice_free_hw_tbls(struct ice_hw *hw)
+{
+ u8 i;
+
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ hw->blk[i].is_list_init = false;
+
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptypes);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.ptg_tbl);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt1.t);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.t);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsig_tbl);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].xlt2.vsis);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof.t);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].prof_redir.t);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.t);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.ref_count);
+ devm_kfree(ice_hw_to_dev(hw), hw->blk[i].es.written);
+ }
+
+ memset(hw->blk, 0, sizeof(hw->blk));
+}
+
+/**
+ * ice_clear_hw_tbls - clear HW tables and flow profiles
+ * @hw: pointer to the hardware structure
+ */
+void ice_clear_hw_tbls(struct ice_hw *hw)
+{
+ u8 i;
+
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_tcam *prof = &hw->blk[i].prof;
+ struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
+ struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
+ struct ice_es *es = &hw->blk[i].es;
+
+ memset(xlt1->ptypes, 0, xlt1->count * sizeof(*xlt1->ptypes));
+ memset(xlt1->ptg_tbl, 0,
+ ICE_MAX_PTGS * sizeof(*xlt1->ptg_tbl));
+ memset(xlt1->t, 0, xlt1->count * sizeof(*xlt1->t));
+
+ memset(xlt2->vsis, 0, xlt2->count * sizeof(*xlt2->vsis));
+ memset(xlt2->vsig_tbl, 0,
+ xlt2->count * sizeof(*xlt2->vsig_tbl));
+ memset(xlt2->t, 0, xlt2->count * sizeof(*xlt2->t));
+
+ memset(prof->t, 0, prof->count * sizeof(*prof->t));
+ memset(prof_redir->t, 0,
+ prof_redir->count * sizeof(*prof_redir->t));
+
+ memset(es->t, 0, es->count * sizeof(*es->t));
+ memset(es->ref_count, 0, es->count * sizeof(*es->ref_count));
+ memset(es->written, 0, es->count * sizeof(*es->written));
+ }
+}
+
+/**
+ * ice_init_hw_tbls - init hardware table memory
+ * @hw: pointer to the hardware structure
+ */
+enum ice_status ice_init_hw_tbls(struct ice_hw *hw)
+{
+ u8 i;
+
+ for (i = 0; i < ICE_BLK_COUNT; i++) {
+ struct ice_prof_redir *prof_redir = &hw->blk[i].prof_redir;
+ struct ice_prof_tcam *prof = &hw->blk[i].prof;
+ struct ice_xlt1 *xlt1 = &hw->blk[i].xlt1;
+ struct ice_xlt2 *xlt2 = &hw->blk[i].xlt2;
+ struct ice_es *es = &hw->blk[i].es;
+ u16 j;
+
+ if (hw->blk[i].is_list_init)
+ continue;
+
+ hw->blk[i].is_list_init = true;
+
+ hw->blk[i].overwrite = blk_sizes[i].overwrite;
+ es->reverse = blk_sizes[i].reverse;
+
+ xlt1->sid = ice_blk_sids[i][ICE_SID_XLT1_OFF];
+ xlt1->count = blk_sizes[i].xlt1;
+
+ xlt1->ptypes = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
+ sizeof(*xlt1->ptypes), GFP_KERNEL);
+
+ if (!xlt1->ptypes)
+ goto err;
+
+ xlt1->ptg_tbl = devm_kcalloc(ice_hw_to_dev(hw), ICE_MAX_PTGS,
+ sizeof(*xlt1->ptg_tbl),
+ GFP_KERNEL);
+
+ if (!xlt1->ptg_tbl)
+ goto err;
+
+ xlt1->t = devm_kcalloc(ice_hw_to_dev(hw), xlt1->count,
+ sizeof(*xlt1->t), GFP_KERNEL);
+ if (!xlt1->t)
+ goto err;
+
+ xlt2->sid = ice_blk_sids[i][ICE_SID_XLT2_OFF];
+ xlt2->count = blk_sizes[i].xlt2;
+
+ xlt2->vsis = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
+ sizeof(*xlt2->vsis), GFP_KERNEL);
+
+ if (!xlt2->vsis)
+ goto err;
+
+ xlt2->vsig_tbl = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
+ sizeof(*xlt2->vsig_tbl),
+ GFP_KERNEL);
+ if (!xlt2->vsig_tbl)
+ goto err;
+
+ for (j = 0; j < xlt2->count; j++)
+ INIT_LIST_HEAD(&xlt2->vsig_tbl[j].prop_lst);
+
+ xlt2->t = devm_kcalloc(ice_hw_to_dev(hw), xlt2->count,
+ sizeof(*xlt2->t), GFP_KERNEL);
+ if (!xlt2->t)
+ goto err;
+
+ prof->sid = ice_blk_sids[i][ICE_SID_PR_OFF];
+ prof->count = blk_sizes[i].prof_tcam;
+ prof->max_prof_id = blk_sizes[i].prof_id;
+ prof->cdid_bits = blk_sizes[i].prof_cdid_bits;
+ prof->t = devm_kcalloc(ice_hw_to_dev(hw), prof->count,
+ sizeof(*prof->t), GFP_KERNEL);
+
+ if (!prof->t)
+ goto err;
+
+ prof_redir->sid = ice_blk_sids[i][ICE_SID_PR_REDIR_OFF];
+ prof_redir->count = blk_sizes[i].prof_redir;
+ prof_redir->t = devm_kcalloc(ice_hw_to_dev(hw),
+ prof_redir->count,
+ sizeof(*prof_redir->t),
+ GFP_KERNEL);
+
+ if (!prof_redir->t)
+ goto err;
+
+ es->sid = ice_blk_sids[i][ICE_SID_ES_OFF];
+ es->count = blk_sizes[i].es;
+ es->fvw = blk_sizes[i].fvw;
+ es->t = devm_kcalloc(ice_hw_to_dev(hw),
+ (u32)(es->count * es->fvw),
+ sizeof(*es->t), GFP_KERNEL);
+ if (!es->t)
+ goto err;
+
+ es->ref_count = devm_kcalloc(ice_hw_to_dev(hw), es->count,
+ sizeof(*es->ref_count),
+ GFP_KERNEL);
+
+ es->written = devm_kcalloc(ice_hw_to_dev(hw), es->count,
+ sizeof(*es->written), GFP_KERNEL);
+ if (!es->ref_count)
+ goto err;
+ }
+ return 0;
+
+err:
+ ice_free_hw_tbls(hw);
+ return ICE_ERR_NO_MEMORY;
+}
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_pipe.h b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
new file mode 100644
index 000000000000..37eb282742d1
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_flex_pipe.h
@@ -0,0 +1,29 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_FLEX_PIPE_H_
+#define _ICE_FLEX_PIPE_H_
+
+#include "ice_type.h"
+
+/* Package minimal version supported */
+#define ICE_PKG_SUPP_VER_MAJ 1
+#define ICE_PKG_SUPP_VER_MNR 3
+
+/* Package format version */
+#define ICE_PKG_FMT_VER_MAJ 1
+#define ICE_PKG_FMT_VER_MNR 0
+#define ICE_PKG_FMT_VER_UPD 0
+#define ICE_PKG_FMT_VER_DFT 0
+
+#define ICE_PKG_CNT 4
+
+enum ice_status ice_init_pkg(struct ice_hw *hw, u8 *buff, u32 len);
+enum ice_status
+ice_copy_and_init_pkg(struct ice_hw *hw, const u8 *buf, u32 len);
+enum ice_status ice_init_hw_tbls(struct ice_hw *hw);
+void ice_free_seg(struct ice_hw *hw);
+void ice_fill_blk_tbls(struct ice_hw *hw);
+void ice_clear_hw_tbls(struct ice_hw *hw);
+void ice_free_hw_tbls(struct ice_hw *hw);
+#endif /* _ICE_FLEX_PIPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_flex_type.h b/drivers/net/ethernet/intel/ice/ice_flex_type.h
new file mode 100644
index 000000000000..5d5a7eaffa30
--- /dev/null
+++ b/drivers/net/ethernet/intel/ice/ice_flex_type.h
@@ -0,0 +1,374 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2019, Intel Corporation. */
+
+#ifndef _ICE_FLEX_TYPE_H_
+#define _ICE_FLEX_TYPE_H_
+/* Extraction Sequence (Field Vector) Table */
+struct ice_fv_word {
+ u8 prot_id;
+ u16 off; /* Offset within the protocol header */
+ u8 resvrd;
+} __packed;
+
+#define ICE_MAX_FV_WORDS 48
+struct ice_fv {
+ struct ice_fv_word ew[ICE_MAX_FV_WORDS];
+};
+
+/* Package and segment headers and tables */
+struct ice_pkg_hdr {
+ struct ice_pkg_ver format_ver;
+ __le32 seg_count;
+ __le32 seg_offset[1];
+};
+
+/* generic segment */
+struct ice_generic_seg_hdr {
+#define SEGMENT_TYPE_METADATA 0x00000001
+#define SEGMENT_TYPE_ICE 0x00000010
+ __le32 seg_type;
+ struct ice_pkg_ver seg_ver;
+ __le32 seg_size;
+ char seg_name[ICE_PKG_NAME_SIZE];
+};
+
+/* ice specific segment */
+
+union ice_device_id {
+ struct {
+ __le16 device_id;
+ __le16 vendor_id;
+ } dev_vend_id;
+ __le32 id;
+};
+
+struct ice_device_id_entry {
+ union ice_device_id device;
+ union ice_device_id sub_device;
+};
+
+struct ice_seg {
+ struct ice_generic_seg_hdr hdr;
+ __le32 device_table_count;
+ struct ice_device_id_entry device_table[1];
+};
+
+struct ice_nvm_table {
+ __le32 table_count;
+ __le32 vers[1];
+};
+
+struct ice_buf {
+#define ICE_PKG_BUF_SIZE 4096
+ u8 buf[ICE_PKG_BUF_SIZE];
+};
+
+struct ice_buf_table {
+ __le32 buf_count;
+ struct ice_buf buf_array[1];
+};
+
+/* global metadata specific segment */
+struct ice_global_metadata_seg {
+ struct ice_generic_seg_hdr hdr;
+ struct ice_pkg_ver pkg_ver;
+ __le32 track_id;
+ char pkg_name[ICE_PKG_NAME_SIZE];
+};
+
+#define ICE_MIN_S_OFF 12
+#define ICE_MAX_S_OFF 4095
+#define ICE_MIN_S_SZ 1
+#define ICE_MAX_S_SZ 4084
+
+/* section information */
+struct ice_section_entry {
+ __le32 type;
+ __le16 offset;
+ __le16 size;
+};
+
+#define ICE_MIN_S_COUNT 1
+#define ICE_MAX_S_COUNT 511
+#define ICE_MIN_S_DATA_END 12
+#define ICE_MAX_S_DATA_END 4096
+
+#define ICE_METADATA_BUF 0x80000000
+
+struct ice_buf_hdr {
+ __le16 section_count;
+ __le16 data_end;
+ struct ice_section_entry section_entry[1];
+};
+
+#define ICE_MAX_ENTRIES_IN_BUF(hd_sz, ent_sz) ((ICE_PKG_BUF_SIZE - \
+ sizeof(struct ice_buf_hdr) - (hd_sz)) / (ent_sz))
+
+/* ice package section IDs */
+#define ICE_SID_XLT1_SW 12
+#define ICE_SID_XLT2_SW 13
+#define ICE_SID_PROFID_TCAM_SW 14
+#define ICE_SID_PROFID_REDIR_SW 15
+#define ICE_SID_FLD_VEC_SW 16
+
+#define ICE_SID_XLT1_ACL 22
+#define ICE_SID_XLT2_ACL 23
+#define ICE_SID_PROFID_TCAM_ACL 24
+#define ICE_SID_PROFID_REDIR_ACL 25
+#define ICE_SID_FLD_VEC_ACL 26
+
+#define ICE_SID_XLT1_FD 32
+#define ICE_SID_XLT2_FD 33
+#define ICE_SID_PROFID_TCAM_FD 34
+#define ICE_SID_PROFID_REDIR_FD 35
+#define ICE_SID_FLD_VEC_FD 36
+
+#define ICE_SID_XLT1_RSS 42
+#define ICE_SID_XLT2_RSS 43
+#define ICE_SID_PROFID_TCAM_RSS 44
+#define ICE_SID_PROFID_REDIR_RSS 45
+#define ICE_SID_FLD_VEC_RSS 46
+
+#define ICE_SID_RXPARSER_BOOST_TCAM 56
+
+#define ICE_SID_XLT1_PE 82
+#define ICE_SID_XLT2_PE 83
+#define ICE_SID_PROFID_TCAM_PE 84
+#define ICE_SID_PROFID_REDIR_PE 85
+#define ICE_SID_FLD_VEC_PE 86
+
+/* Label Metadata section IDs */
+#define ICE_SID_LBL_FIRST 0x80000010
+#define ICE_SID_LBL_RXPARSER_TMEM 0x80000018
+/* The following define MUST be updated to reflect the last label section ID */
+#define ICE_SID_LBL_LAST 0x80000038
+
+enum ice_block {
+ ICE_BLK_SW = 0,
+ ICE_BLK_ACL,
+ ICE_BLK_FD,
+ ICE_BLK_RSS,
+ ICE_BLK_PE,
+ ICE_BLK_COUNT
+};
+
+/* package labels */
+struct ice_label {
+ __le16 value;
+#define ICE_PKG_LABEL_SIZE 64
+ char name[ICE_PKG_LABEL_SIZE];
+};
+
+struct ice_label_section {
+ __le16 count;
+ struct ice_label label[1];
+};
+
+#define ICE_MAX_LABELS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
+ sizeof(struct ice_label_section) - sizeof(struct ice_label), \
+ sizeof(struct ice_label))
+
+struct ice_sw_fv_section {
+ __le16 count;
+ __le16 base_offset;
+ struct ice_fv fv[1];
+};
+
+/* The BOOST TCAM stores the match packet header in reverse order, meaning
+ * the fields are reversed; in addition, this means that the normally big endian
+ * fields of the packet are now little endian.
+ */
+struct ice_boost_key_value {
+#define ICE_BOOST_REMAINING_HV_KEY 15
+ u8 remaining_hv_key[ICE_BOOST_REMAINING_HV_KEY];
+ __le16 hv_dst_port_key;
+ __le16 hv_src_port_key;
+ u8 tcam_search_key;
+} __packed;
+
+struct ice_boost_key {
+ struct ice_boost_key_value key;
+ struct ice_boost_key_value key2;
+};
+
+/* package Boost TCAM entry */
+struct ice_boost_tcam_entry {
+ __le16 addr;
+ __le16 reserved;
+ /* break up the 40 bytes of key into different fields */
+ struct ice_boost_key key;
+ u8 boost_hit_index_group;
+ /* The following contains bitfields which are not on byte boundaries.
+ * These fields are currently unused by driver software.
+ */
+#define ICE_BOOST_BIT_FIELDS 43
+ u8 bit_fields[ICE_BOOST_BIT_FIELDS];
+};
+
+struct ice_boost_tcam_section {
+ __le16 count;
+ __le16 reserved;
+ struct ice_boost_tcam_entry tcam[1];
+};
+
+#define ICE_MAX_BST_TCAMS_IN_BUF ICE_MAX_ENTRIES_IN_BUF( \
+ sizeof(struct ice_boost_tcam_section) - \
+ sizeof(struct ice_boost_tcam_entry), \
+ sizeof(struct ice_boost_tcam_entry))
+
+struct ice_xlt1_section {
+ __le16 count;
+ __le16 offset;
+ u8 value[1];
+} __packed;
+
+struct ice_xlt2_section {
+ __le16 count;
+ __le16 offset;
+ __le16 value[1];
+};
+
+struct ice_prof_redir_section {
+ __le16 count;
+ __le16 offset;
+ u8 redir_value[1];
+};
+
+struct ice_pkg_enum {
+ struct ice_buf_table *buf_table;
+ u32 buf_idx;
+
+ u32 type;
+ struct ice_buf_hdr *buf;
+ u32 sect_idx;
+ void *sect;
+ u32 sect_type;
+
+ u32 entry_idx;
+ void *(*handler)(u32 sect_type, void *section, u32 index, u32 *offset);
+};
+
+struct ice_es {
+ u32 sid;
+ u16 count;
+ u16 fvw;
+ u16 *ref_count;
+ struct list_head prof_map;
+ struct ice_fv_word *t;
+ struct mutex prof_map_lock; /* protect access to profiles list */
+ u8 *written;
+ u8 reverse; /* set to true to reverse FV order */
+};
+
+/* PTYPE Group management */
+
+/* Note: XLT1 table takes 13-bit as input, and results in an 8-bit packet type
+ * group (PTG) ID as output.
+ *
+ * Note: PTG 0 is the default packet type group and it is assumed that all PTYPE
+ * are a part of this group until moved to a new PTG.
+ */
+#define ICE_DEFAULT_PTG 0
+
+struct ice_ptg_entry {
+ struct ice_ptg_ptype *first_ptype;
+ u8 in_use;
+};
+
+struct ice_ptg_ptype {
+ struct ice_ptg_ptype *next_ptype;
+ u8 ptg;
+};
+
+struct ice_vsig_entry {
+ struct list_head prop_lst;
+ struct ice_vsig_vsi *first_vsi;
+ u8 in_use;
+};
+
+struct ice_vsig_vsi {
+ struct ice_vsig_vsi *next_vsi;
+ u32 prop_mask;
+ u16 changed;
+ u16 vsig;
+};
+
+#define ICE_XLT1_CNT 1024
+#define ICE_MAX_PTGS 256
+
+/* XLT1 Table */
+struct ice_xlt1 {
+ struct ice_ptg_entry *ptg_tbl;
+ struct ice_ptg_ptype *ptypes;
+ u8 *t;
+ u32 sid;
+ u16 count;
+};
+
+#define ICE_XLT2_CNT 768
+#define ICE_MAX_VSIGS 768
+
+/* VSIG bit layout:
+ * [0:12]: incremental VSIG index 1 to ICE_MAX_VSIGS
+ * [13:15]: PF number of device
+ */
+#define ICE_VSIG_IDX_M (0x1FFF)
+#define ICE_PF_NUM_S 13
+#define ICE_PF_NUM_M (0x07 << ICE_PF_NUM_S)
+#define ICE_VSIG_VALUE(vsig, pf_id) \
+ (u16)((((u16)(vsig)) & ICE_VSIG_IDX_M) | \
+ (((u16)(pf_id) << ICE_PF_NUM_S) & ICE_PF_NUM_M))
+#define ICE_DEFAULT_VSIG 0
+
+/* XLT2 Table */
+struct ice_xlt2 {
+ struct ice_vsig_entry *vsig_tbl;
+ struct ice_vsig_vsi *vsis;
+ u16 *t;
+ u32 sid;
+ u16 count;
+};
+
+/* Keys are made up of two values, each one-half the size of the key.
+ * For TCAM, the entire key is 80 bits wide (or 2, 40-bit wide values)
+ */
+#define ICE_TCAM_KEY_VAL_SZ 5
+#define ICE_TCAM_KEY_SZ (2 * ICE_TCAM_KEY_VAL_SZ)
+
+struct ice_prof_tcam_entry {
+ __le16 addr;
+ u8 key[ICE_TCAM_KEY_SZ];
+ u8 prof_id;
+} __packed;
+
+struct ice_prof_id_section {
+ __le16 count;
+ struct ice_prof_tcam_entry entry[1];
+} __packed;
+
+struct ice_prof_tcam {
+ u32 sid;
+ u16 count;
+ u16 max_prof_id;
+ struct ice_prof_tcam_entry *t;
+ u8 cdid_bits; /* # CDID bits to use in key, 0, 2, 4, or 8 */
+};
+
+struct ice_prof_redir {
+ u8 *t;
+ u32 sid;
+ u16 count;
+};
+
+/* Tables per block */
+struct ice_blk_info {
+ struct ice_xlt1 xlt1;
+ struct ice_xlt2 xlt2;
+ struct ice_prof_tcam prof;
+ struct ice_prof_redir prof_redir;
+ struct ice_es es;
+ u8 overwrite; /* set to true to allow overwrite of table entries */
+ u8 is_list_init;
+};
+
+#endif /* _ICE_FLEX_TYPE_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
index 6f78ff5534af..152fbd556e9b 100644
--- a/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
+++ b/drivers/net/ethernet/intel/ice/ice_hw_autogen.h
@@ -55,6 +55,8 @@
#define PRTDCB_GENS 0x00083020
#define PRTDCB_GENS_DCBX_STATUS_S 0
#define PRTDCB_GENS_DCBX_STATUS_M ICE_M(0x7, 0)
+#define GL_PREEXT_L2_PMASK0(_i) (0x0020F0FC + ((_i) * 4))
+#define GL_PREEXT_L2_PMASK1(_i) (0x0020F108 + ((_i) * 4))
#define GLFLXP_RXDID_FLAGS(_i, _j) (0x0045D000 + ((_i) * 4 + (_j) * 256))
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_S 0
#define GLFLXP_RXDID_FLAGS_FLEXIFLAG_4N_M ICE_M(0x3F, 0)
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.c b/drivers/net/ethernet/intel/ice/ice_lib.c
index 7cd8c5d13bcc..cc755382df25 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.c
+++ b/drivers/net/ethernet/intel/ice/ice_lib.c
@@ -753,6 +753,17 @@ void ice_vsi_put_qs(struct ice_vsi *vsi)
}
/**
+ * ice_is_safe_mode
+ * @pf: pointer to the PF struct
+ *
+ * returns true if driver is in safe mode, false otherwise
+ */
+bool ice_is_safe_mode(struct ice_pf *pf)
+{
+ return !test_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
+}
+
+/**
* ice_rss_clean - Delete RSS related VSI structures that hold user inputs
* @vsi: the VSI being removed
*/
@@ -2629,15 +2640,17 @@ ice_vsi_setup(struct ice_pf *pf, struct ice_port_info *pi,
* DCB settings in the HW. Also, if the FW DCBX engine is not running
* then Rx LLDP packets need to be redirected up the stack.
*/
- if (vsi->type == ICE_VSI_PF) {
- ice_vsi_add_rem_eth_mac(vsi, true);
+ if (!ice_is_safe_mode(pf)) {
+ if (vsi->type == ICE_VSI_PF) {
+ ice_vsi_add_rem_eth_mac(vsi, true);
- /* Tx LLDP packets */
- ice_cfg_sw_lldp(vsi, true, true);
+ /* Tx LLDP packets */
+ ice_cfg_sw_lldp(vsi, true, true);
- /* Rx LLDP packets */
- if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, true);
+ /* Rx LLDP packets */
+ if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+ ice_cfg_sw_lldp(vsi, false, true);
+ }
}
return vsi;
@@ -2905,8 +2918,11 @@ void ice_vsi_dis_irq(struct ice_vsi *vsi)
}
/* disable each interrupt */
- ice_for_each_q_vector(vsi, i)
+ ice_for_each_q_vector(vsi, i) {
+ if (!vsi->q_vectors[i])
+ continue;
wr32(hw, GLINT_DYN_CTL(vsi->q_vectors[i]->reg_idx), 0);
+ }
ice_flush(hw);
@@ -2975,14 +2991,16 @@ int ice_vsi_release(struct ice_vsi *vsi)
pf->num_avail_sw_msix += vsi->num_q_vectors;
}
- if (vsi->type == ICE_VSI_PF) {
- ice_vsi_add_rem_eth_mac(vsi, false);
- ice_cfg_sw_lldp(vsi, true, false);
- /* The Rx rule will only exist to remove if the LLDP FW
- * engine is currently stopped
- */
- if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
- ice_cfg_sw_lldp(vsi, false, false);
+ if (!ice_is_safe_mode(pf)) {
+ if (vsi->type == ICE_VSI_PF) {
+ ice_vsi_add_rem_eth_mac(vsi, false);
+ ice_cfg_sw_lldp(vsi, true, false);
+ /* The Rx rule will only exist to remove if the LLDP FW
+ * engine is currently stopped
+ */
+ if (!test_bit(ICE_FLAG_FW_LLDP_AGENT, pf->flags))
+ ice_cfg_sw_lldp(vsi, false, false);
+ }
}
ice_remove_vsi_fltr(&pf->hw, vsi->idx);
@@ -3169,48 +3187,6 @@ static void ice_vsi_update_q_map(struct ice_vsi *vsi, struct ice_vsi_ctx *ctx)
}
/**
- * ice_vsi_cfg_netdev_tc - Setup the netdev TC configuration
- * @vsi: the VSI being configured
- * @ena_tc: TC map to be enabled
- */
-static void ice_vsi_cfg_netdev_tc(struct ice_vsi *vsi, u8 ena_tc)
-{
- struct net_device *netdev = vsi->netdev;
- struct ice_pf *pf = vsi->back;
- struct ice_dcbx_cfg *dcbcfg;
- u8 netdev_tc;
- int i;
-
- if (!netdev)
- return;
-
- if (!ena_tc) {
- netdev_reset_tc(netdev);
- return;
- }
-
- if (netdev_set_num_tc(netdev, vsi->tc_cfg.numtc))
- return;
-
- dcbcfg = &pf->hw.port_info->local_dcbx_cfg;
-
- ice_for_each_traffic_class(i)
- if (vsi->tc_cfg.ena_tc & BIT(i))
- netdev_set_tc_queue(netdev,
- vsi->tc_cfg.tc_info[i].netdev_tc,
- vsi->tc_cfg.tc_info[i].qcount_tx,
- vsi->tc_cfg.tc_info[i].qoffset);
-
- for (i = 0; i < ICE_MAX_USER_PRIORITY; i++) {
- u8 ets_tc = dcbcfg->etscfg.prio_table[i];
-
- /* Get the mapped netdev TC# for the UP */
- netdev_tc = vsi->tc_cfg.tc_info[ets_tc].netdev_tc;
- netdev_set_prio_tc_map(netdev, i, netdev_tc);
- }
-}
-
-/**
* ice_vsi_cfg_tc - Configure VSI Tx Sched for given TC map
* @vsi: VSI to be configured
* @ena_tc: TC bitmap
@@ -3276,6 +3252,25 @@ out:
#endif /* CONFIG_DCB */
/**
+ * ice_nvm_version_str - format the NVM version strings
+ * @hw: ptr to the hardware info
+ */
+char *ice_nvm_version_str(struct ice_hw *hw)
+{
+ u8 oem_ver, oem_patch, ver_hi, ver_lo;
+ static char buf[ICE_NVM_VER_LEN];
+ u16 oem_build;
+
+ ice_get_nvm_version(hw, &oem_ver, &oem_build, &oem_patch, &ver_hi,
+ &ver_lo);
+
+ snprintf(buf, sizeof(buf), "%x.%02x 0x%x %d.%d.%d", ver_hi, ver_lo,
+ hw->nvm.eetrack, oem_ver, oem_build, oem_patch);
+
+ return buf;
+}
+
+/**
* ice_vsi_cfg_mac_fltr - Add or remove a MAC address filter for a VSI
* @vsi: the VSI being configured MAC filter
* @macaddr: the MAC address to be added.
diff --git a/drivers/net/ethernet/intel/ice/ice_lib.h b/drivers/net/ethernet/intel/ice/ice_lib.h
index 7faf8db844f6..47bc033fff20 100644
--- a/drivers/net/ethernet/intel/ice/ice_lib.h
+++ b/drivers/net/ethernet/intel/ice/ice_lib.h
@@ -120,7 +120,10 @@ int ice_vsi_manage_rss_lut(struct ice_vsi *vsi, bool ena);
u32 ice_intrl_usec_to_reg(u8 intrl, u8 gran);
+char *ice_nvm_version_str(struct ice_hw *hw);
+
enum ice_status
ice_vsi_cfg_mac_fltr(struct ice_vsi *vsi, const u8 *macaddr, bool set);
+bool ice_is_safe_mode(struct ice_pf *pf);
#endif /* !_ICE_LIB_H_ */
diff --git a/drivers/net/ethernet/intel/ice/ice_main.c b/drivers/net/ethernet/intel/ice/ice_main.c
index f8be9ada2447..214cd6eca405 100644
--- a/drivers/net/ethernet/intel/ice/ice_main.c
+++ b/drivers/net/ethernet/intel/ice/ice_main.c
@@ -9,16 +9,27 @@
#include "ice_lib.h"
#include "ice_dcb_lib.h"
-#define DRV_VERSION "0.7.5-k"
+#define DRV_VERSION_MAJOR 0
+#define DRV_VERSION_MINOR 8
+#define DRV_VERSION_BUILD 1
+
+#define DRV_VERSION __stringify(DRV_VERSION_MAJOR) "." \
+ __stringify(DRV_VERSION_MINOR) "." \
+ __stringify(DRV_VERSION_BUILD) "-k"
#define DRV_SUMMARY "Intel(R) Ethernet Connection E800 Series Linux Driver"
const char ice_drv_ver[] = DRV_VERSION;
static const char ice_driver_string[] = DRV_SUMMARY;
static const char ice_copyright[] = "Copyright (c) 2018, Intel Corporation.";
+/* DDP Package file located in firmware search paths (e.g. /lib/firmware/) */
+#define ICE_DDP_PKG_PATH "intel/ice/ddp/"
+#define ICE_DDP_PKG_FILE ICE_DDP_PKG_PATH "ice.pkg"
+
MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
MODULE_DESCRIPTION(DRV_SUMMARY);
MODULE_LICENSE("GPL v2");
MODULE_VERSION(DRV_VERSION);
+MODULE_FIRMWARE(ICE_DDP_PKG_FILE);
static int debug = -1;
module_param(debug, int, 0644);
@@ -29,9 +40,10 @@ MODULE_PARM_DESC(debug, "netif level (0=none,...,16=all)");
#endif /* !CONFIG_DYNAMIC_DEBUG */
static struct workqueue_struct *ice_wq;
+static const struct net_device_ops ice_netdev_safe_mode_ops;
static const struct net_device_ops ice_netdev_ops;
-static void ice_rebuild(struct ice_pf *pf);
+static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type);
static void ice_vsi_release_all(struct ice_pf *pf);
@@ -491,6 +503,8 @@ ice_prepare_for_reset(struct ice_pf *pf)
for (i = 0; i < pf->num_alloc_vfs; i++)
ice_set_vf_state_qs_dis(&pf->vf[i]);
+ /* clear SW filtering DB */
+ ice_clear_hw_tbls(hw);
/* disable the VSIs and their queues that are not already DOWN */
ice_pf_dis_all_vsi(pf, false);
@@ -536,7 +550,7 @@ static void ice_do_reset(struct ice_pf *pf, enum ice_reset_req reset_type)
*/
if (reset_type == ICE_RESET_PFR) {
pf->pfr_count++;
- ice_rebuild(pf);
+ ice_rebuild(pf, reset_type);
clear_bit(__ICE_PREPARED_FOR_RESET, pf->state);
clear_bit(__ICE_PFR_REQ, pf->state);
ice_reset_all_vfs(pf, true);
@@ -580,7 +594,7 @@ static void ice_reset_subtask(struct ice_pf *pf)
} else {
/* done with reset. start rebuild */
pf->hw.reset_ongoing = false;
- ice_rebuild(pf);
+ ice_rebuild(pf, reset_type);
/* clear bit to resume normal operations, but
* ICE_NEEDS_RESTART bit is set in case rebuild failed
*/
@@ -1490,13 +1504,19 @@ static void ice_service_task(struct work_struct *work)
return;
}
+ ice_clean_adminq_subtask(pf);
ice_check_media_subtask(pf);
ice_check_for_hang_subtask(pf);
ice_sync_fltr_subtask(pf);
ice_handle_mdd_event(pf);
- ice_process_vflr_event(pf);
ice_watchdog_subtask(pf);
- ice_clean_adminq_subtask(pf);
+
+ if (ice_is_safe_mode(pf)) {
+ ice_service_task_complete(pf);
+ return;
+ }
+
+ ice_process_vflr_event(pf);
ice_clean_mailboxq_subtask(pf);
/* Clear __ICE_SERVICE_SCHED flag to allow scheduling next event */
@@ -1931,30 +1951,41 @@ static void ice_napi_add(struct ice_vsi *vsi)
}
/**
- * ice_cfg_netdev - Allocate, configure and register a netdev
- * @vsi: the VSI associated with the new netdev
- *
- * Returns 0 on success, negative value on failure
+ * ice_set_ops - set netdev and ethtools ops for the given netdev
+ * @netdev: netdev instance
*/
-static int ice_cfg_netdev(struct ice_vsi *vsi)
+static void ice_set_ops(struct net_device *netdev)
+{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
+
+ if (ice_is_safe_mode(pf)) {
+ netdev->netdev_ops = &ice_netdev_safe_mode_ops;
+ ice_set_ethtool_safe_mode_ops(netdev);
+ return;
+ }
+
+ netdev->netdev_ops = &ice_netdev_ops;
+ ice_set_ethtool_ops(netdev);
+}
+
+/**
+ * ice_set_netdev_features - set features for the given netdev
+ * @netdev: netdev instance
+ */
+static void ice_set_netdev_features(struct net_device *netdev)
{
+ struct ice_pf *pf = ice_netdev_to_pf(netdev);
netdev_features_t csumo_features;
netdev_features_t vlano_features;
netdev_features_t dflt_features;
netdev_features_t tso_features;
- struct ice_netdev_priv *np;
- struct net_device *netdev;
- u8 mac_addr[ETH_ALEN];
- int err;
-
- netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
- vsi->alloc_rxq);
- if (!netdev)
- return -ENOMEM;
- vsi->netdev = netdev;
- np = netdev_priv(netdev);
- np->vsi = vsi;
+ if (ice_is_safe_mode(pf)) {
+ /* safe mode */
+ netdev->features = NETIF_F_SG | NETIF_F_HIGHDMA;
+ netdev->hw_features = netdev->features;
+ return;
+ }
dflt_features = NETIF_F_SG |
NETIF_F_HIGHDMA |
@@ -1982,25 +2013,50 @@ static int ice_cfg_netdev(struct ice_vsi *vsi)
tso_features;
netdev->vlan_features |= dflt_features | csumo_features |
tso_features;
+}
+
+/**
+ * ice_cfg_netdev - Allocate, configure and register a netdev
+ * @vsi: the VSI associated with the new netdev
+ *
+ * Returns 0 on success, negative value on failure
+ */
+static int ice_cfg_netdev(struct ice_vsi *vsi)
+{
+ struct ice_pf *pf = vsi->back;
+ struct ice_netdev_priv *np;
+ struct net_device *netdev;
+ u8 mac_addr[ETH_ALEN];
+ int err;
+
+ netdev = alloc_etherdev_mqs(sizeof(*np), vsi->alloc_txq,
+ vsi->alloc_rxq);
+ if (!netdev)
+ return -ENOMEM;
+
+ vsi->netdev = netdev;
+ np = netdev_priv(netdev);
+ np->vsi = vsi;
+
+ ice_set_netdev_features(netdev);
+
+ ice_set_ops(netdev);
if (vsi->type == ICE_VSI_PF) {
- SET_NETDEV_DEV(netdev, &vsi->back->pdev->dev);
+ SET_NETDEV_DEV(netdev, &pf->pdev->dev);
ether_addr_copy(mac_addr, vsi->port_info->mac.perm_addr);
-
ether_addr_copy(netdev->dev_addr, mac_addr);
ether_addr_copy(netdev->perm_addr, mac_addr);
}
netdev->priv_flags |= IFF_UNICAST_FLT;
- /* assign netdev_ops */
- netdev->netdev_ops = &ice_netdev_ops;
+ /* Setup netdev TC information */
+ ice_vsi_cfg_netdev_tc(vsi, vsi->tc_cfg.ena_tc);
/* setup watchdog timeout value to be 5 second */
netdev->watchdog_timeo = 5 * HZ;
- ice_set_ethtool_ops(netdev);
-
netdev->min_mtu = ETH_MIN_MTU;
netdev->max_mtu = ICE_MAX_MTU;
@@ -2258,29 +2314,41 @@ static void ice_deinit_pf(struct ice_pf *pf)
}
/**
- * ice_init_pf - Initialize general software structures (struct ice_pf)
- * @pf: board private structure to initialize
+ * ice_set_pf_caps - set PFs capability flags
+ * @pf: pointer to the PF instance
*/
-static int ice_init_pf(struct ice_pf *pf)
+static void ice_set_pf_caps(struct ice_pf *pf)
{
- bitmap_zero(pf->flags, ICE_PF_FLAGS_NBITS);
- if (pf->hw.func_caps.common_cap.dcb)
+ struct ice_hw_func_caps *func_caps = &pf->hw.func_caps;
+
+ clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+ if (func_caps->common_cap.dcb)
set_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
#ifdef CONFIG_PCI_IOV
- if (pf->hw.func_caps.common_cap.sr_iov_1_1) {
- struct ice_hw *hw = &pf->hw;
-
+ clear_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
+ if (func_caps->common_cap.sr_iov_1_1) {
set_bit(ICE_FLAG_SRIOV_CAPABLE, pf->flags);
- pf->num_vfs_supported = min_t(int, hw->func_caps.num_allocd_vfs,
+ pf->num_vfs_supported = min_t(int, func_caps->num_allocd_vfs,
ICE_MAX_VF_COUNT);
}
#endif /* CONFIG_PCI_IOV */
+ clear_bit(ICE_FLAG_RSS_ENA, pf->flags);
+ if (func_caps->common_cap.rss_table_size)
+ set_bit(ICE_FLAG_RSS_ENA, pf->flags);
- mutex_init(&pf->sw_mutex);
- mutex_init(&pf->avail_q_mutex);
+ pf->max_pf_txqs = func_caps->common_cap.num_txq;
+ pf->max_pf_rxqs = func_caps->common_cap.num_rxq;
+}
- if (pf->hw.func_caps.common_cap.rss_table_size)
- set_bit(ICE_FLAG_RSS_ENA, pf->flags);
+/**
+ * ice_init_pf - Initialize general software structures (struct ice_pf)
+ * @pf: board private structure to initialize
+ */
+static int ice_init_pf(struct ice_pf *pf)
+{
+ ice_set_pf_caps(pf);
+
+ mutex_init(&pf->sw_mutex);
/* setup service timer and periodic service task */
timer_setup(&pf->serv_tmr, ice_service_timer, 0);
@@ -2288,9 +2356,7 @@ static int ice_init_pf(struct ice_pf *pf)
INIT_WORK(&pf->serv_task, ice_service_task);
clear_bit(__ICE_SERVICE_SCHED, pf->state);
- pf->max_pf_txqs = pf->hw.func_caps.common_cap.num_txq;
- pf->max_pf_rxqs = pf->hw.func_caps.common_cap.num_rxq;
-
+ mutex_init(&pf->avail_q_mutex);
pf->avail_txqs = bitmap_zalloc(pf->max_pf_txqs, GFP_KERNEL);
if (!pf->avail_txqs)
return -ENOMEM;
@@ -2444,6 +2510,163 @@ static int ice_init_interrupt_scheme(struct ice_pf *pf)
}
/**
+ * ice_log_pkg_init - log result of DDP package load
+ * @hw: pointer to hardware info
+ * @status: status of package load
+ */
+static void
+ice_log_pkg_init(struct ice_hw *hw, enum ice_status *status)
+{
+ struct ice_pf *pf = (struct ice_pf *)hw->back;
+ struct device *dev = &pf->pdev->dev;
+
+ switch (*status) {
+ case ICE_SUCCESS:
+ /* The package download AdminQ command returned success because
+ * this download succeeded or ICE_ERR_AQ_NO_WORK since there is
+ * already a package loaded on the device.
+ */
+ if (hw->pkg_ver.major == hw->active_pkg_ver.major &&
+ hw->pkg_ver.minor == hw->active_pkg_ver.minor &&
+ hw->pkg_ver.update == hw->active_pkg_ver.update &&
+ hw->pkg_ver.draft == hw->active_pkg_ver.draft &&
+ !memcmp(hw->pkg_name, hw->active_pkg_name,
+ sizeof(hw->pkg_name))) {
+ if (hw->pkg_dwnld_status == ICE_AQ_RC_EEXIST)
+ dev_info(dev,
+ "DDP package already present on device: %s version %d.%d.%d.%d\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update,
+ hw->active_pkg_ver.draft);
+ else
+ dev_info(dev,
+ "The DDP package was successfully loaded: %s version %d.%d.%d.%d\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update,
+ hw->active_pkg_ver.draft);
+ } else if (hw->active_pkg_ver.major != ICE_PKG_SUPP_VER_MAJ ||
+ hw->active_pkg_ver.minor != ICE_PKG_SUPP_VER_MNR) {
+ dev_err(dev,
+ "The device has a DDP package that is not supported by the driver. The device has package '%s' version %d.%d.x.x. The driver requires version %d.%d.x.x. Entering Safe Mode.\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ *status = ICE_ERR_NOT_SUPPORTED;
+ } else if (hw->active_pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
+ hw->active_pkg_ver.minor == ICE_PKG_SUPP_VER_MNR) {
+ dev_info(dev,
+ "The driver could not load the DDP package file because a compatible DDP package is already present on the device. The device has package '%s' version %d.%d.%d.%d. The package file found by the driver: '%s' version %d.%d.%d.%d.\n",
+ hw->active_pkg_name,
+ hw->active_pkg_ver.major,
+ hw->active_pkg_ver.minor,
+ hw->active_pkg_ver.update,
+ hw->active_pkg_ver.draft,
+ hw->pkg_name,
+ hw->pkg_ver.major,
+ hw->pkg_ver.minor,
+ hw->pkg_ver.update,
+ hw->pkg_ver.draft);
+ } else {
+ dev_err(dev,
+ "An unknown error occurred when loading the DDP package, please reboot the system. If the problem persists, update the NVM. Entering Safe Mode.\n");
+ *status = ICE_ERR_NOT_SUPPORTED;
+ }
+ break;
+ case ICE_ERR_BUF_TOO_SHORT:
+ /* fall-through */
+ case ICE_ERR_CFG:
+ dev_err(dev,
+ "The DDP package file is invalid. Entering Safe Mode.\n");
+ break;
+ case ICE_ERR_NOT_SUPPORTED:
+ /* Package File version not supported */
+ if (hw->pkg_ver.major > ICE_PKG_SUPP_VER_MAJ ||
+ (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
+ hw->pkg_ver.minor > ICE_PKG_SUPP_VER_MNR))
+ dev_err(dev,
+ "The DDP package file version is higher than the driver supports. Please use an updated driver. Entering Safe Mode.\n");
+ else if (hw->pkg_ver.major < ICE_PKG_SUPP_VER_MAJ ||
+ (hw->pkg_ver.major == ICE_PKG_SUPP_VER_MAJ &&
+ hw->pkg_ver.minor < ICE_PKG_SUPP_VER_MNR))
+ dev_err(dev,
+ "The DDP package file version is lower than the driver supports. The driver requires version %d.%d.x.x. Please use an updated DDP Package file. Entering Safe Mode.\n",
+ ICE_PKG_SUPP_VER_MAJ, ICE_PKG_SUPP_VER_MNR);
+ break;
+ case ICE_ERR_AQ_ERROR:
+ switch (hw->adminq.sq_last_status) {
+ case ICE_AQ_RC_ENOSEC:
+ case ICE_AQ_RC_EBADSIG:
+ dev_err(dev,
+ "The DDP package could not be loaded because its signature is not valid. Please use a valid DDP Package. Entering Safe Mode.\n");
+ return;
+ case ICE_AQ_RC_ESVN:
+ dev_err(dev,
+ "The DDP Package could not be loaded because its security revision is too low. Please use an updated DDP Package. Entering Safe Mode.\n");
+ return;
+ case ICE_AQ_RC_EBADMAN:
+ case ICE_AQ_RC_EBADBUF:
+ dev_err(dev,
+ "An error occurred on the device while loading the DDP package. The device will be reset.\n");
+ return;
+ default:
+ break;
+ }
+ /* fall-through */
+ default:
+ dev_err(dev,
+ "An unknown error (%d) occurred when loading the DDP package. Entering Safe Mode.\n",
+ *status);
+ break;
+ }
+}
+
+/**
+ * ice_load_pkg - load/reload the DDP Package file
+ * @firmware: firmware structure when firmware requested or NULL for reload
+ * @pf: pointer to the PF instance
+ *
+ * Called on probe and post CORER/GLOBR rebuild to load DDP Package and
+ * initialize HW tables.
+ */
+static void
+ice_load_pkg(const struct firmware *firmware, struct ice_pf *pf)
+{
+ enum ice_status status = ICE_ERR_PARAM;
+ struct device *dev = &pf->pdev->dev;
+ struct ice_hw *hw = &pf->hw;
+
+ /* Load DDP Package */
+ if (firmware && !hw->pkg_copy) {
+ status = ice_copy_and_init_pkg(hw, firmware->data,
+ firmware->size);
+ ice_log_pkg_init(hw, &status);
+ } else if (!firmware && hw->pkg_copy) {
+ /* Reload package during rebuild after CORER/GLOBR reset */
+ status = ice_init_pkg(hw, hw->pkg_copy, hw->pkg_size);
+ ice_log_pkg_init(hw, &status);
+ } else {
+ dev_err(dev,
+ "The DDP package file failed to load. Entering Safe Mode.\n");
+ }
+
+ if (status) {
+ /* Safe Mode */
+ clear_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
+ return;
+ }
+
+ /* Successful download package is the precondition for advanced
+ * features, hence setting the ICE_FLAG_ADV_FEATURES flag
+ */
+ set_bit(ICE_FLAG_ADV_FEATURES, pf->flags);
+}
+
+/**
* ice_verify_cacheline_size - verify driver's assumption of 64 Byte cache lines
* @pf: pointer to the PF structure
*
@@ -2460,6 +2683,105 @@ static void ice_verify_cacheline_size(struct ice_pf *pf)
}
/**
+ * ice_send_version - update firmware with driver version
+ * @pf: PF struct
+ *
+ * Returns ICE_SUCCESS on success, else error code
+ */
+static enum ice_status ice_send_version(struct ice_pf *pf)
+{
+ struct ice_driver_ver dv;
+
+ dv.major_ver = DRV_VERSION_MAJOR;
+ dv.minor_ver = DRV_VERSION_MINOR;
+ dv.build_ver = DRV_VERSION_BUILD;
+ dv.subbuild_ver = 0;
+ strscpy((char *)dv.driver_string, DRV_VERSION,
+ sizeof(dv.driver_string));
+ return ice_aq_send_driver_ver(&pf->hw, &dv, NULL);
+}
+
+/**
+ * ice_get_opt_fw_name - return optional firmware file name or NULL
+ * @pf: pointer to the PF instance
+ */
+static char *ice_get_opt_fw_name(struct ice_pf *pf)
+{
+ /* Optional firmware name same as default with additional dash
+ * followed by a EUI-64 identifier (PCIe Device Serial Number)
+ */
+ struct pci_dev *pdev = pf->pdev;
+ char *opt_fw_filename = NULL;
+ u32 dword;
+ u8 dsn[8];
+ int pos;
+
+ /* Determine the name of the optional file using the DSN (two
+ * dwords following the start of the DSN Capability).
+ */
+ pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_DSN);
+ if (pos) {
+ opt_fw_filename = kzalloc(NAME_MAX, GFP_KERNEL);
+ if (!opt_fw_filename)
+ return NULL;
+
+ pci_read_config_dword(pdev, pos + 4, &dword);
+ put_unaligned_le32(dword, &dsn[0]);
+ pci_read_config_dword(pdev, pos + 8, &dword);
+ put_unaligned_le32(dword, &dsn[4]);
+ snprintf(opt_fw_filename, NAME_MAX,
+ "%sice-%02x%02x%02x%02x%02x%02x%02x%02x.pkg",
+ ICE_DDP_PKG_PATH,
+ dsn[7], dsn[6], dsn[5], dsn[4],
+ dsn[3], dsn[2], dsn[1], dsn[0]);
+ }
+
+ return opt_fw_filename;
+}
+
+/**
+ * ice_request_fw - Device initialization routine
+ * @pf: pointer to the PF instance
+ */
+static void ice_request_fw(struct ice_pf *pf)
+{
+ char *opt_fw_filename = ice_get_opt_fw_name(pf);
+ const struct firmware *firmware = NULL;
+ struct device *dev = &pf->pdev->dev;
+ int err = 0;
+
+ /* optional device-specific DDP (if present) overrides the default DDP
+ * package file. kernel logs a debug message if the file doesn't exist,
+ * and warning messages for other errors.
+ */
+ if (opt_fw_filename) {
+ err = firmware_request_nowarn(&firmware, opt_fw_filename, dev);
+ if (err) {
+ kfree(opt_fw_filename);
+ goto dflt_pkg_load;
+ }
+
+ /* request for firmware was successful. Download to device */
+ ice_load_pkg(firmware, pf);
+ kfree(opt_fw_filename);
+ release_firmware(firmware);
+ return;
+ }
+
+dflt_pkg_load:
+ err = request_firmware(&firmware, ICE_DDP_PKG_FILE, dev);
+ if (err) {
+ dev_err(dev,
+ "The DDP package file was not found or could not be read. Entering Safe Mode\n");
+ return;
+ }
+
+ /* request for firmware was successful. Download to device */
+ ice_load_pkg(firmware, pf);
+ release_firmware(firmware);
+}
+
+/**
* ice_probe - Device initialization routine
* @pdev: PCI device information struct
* @ent: entry in ice_pci_tbl
@@ -2533,9 +2855,27 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_exit_unroll;
}
- dev_info(dev, "firmware %d.%d.%05d api %d.%d\n",
- hw->fw_maj_ver, hw->fw_min_ver, hw->fw_build,
- hw->api_maj_ver, hw->api_min_ver);
+ dev_info(dev, "firmware %d.%d.%d api %d.%d.%d nvm %s build 0x%08x\n",
+ hw->fw_maj_ver, hw->fw_min_ver, hw->fw_patch,
+ hw->api_maj_ver, hw->api_min_ver, hw->api_patch,
+ ice_nvm_version_str(hw), hw->fw_build);
+
+ ice_request_fw(pf);
+
+ /* if ice_request_fw fails, ICE_FLAG_ADV_FEATURES bit won't be
+ * set in pf->state, which will cause ice_is_safe_mode to return
+ * true
+ */
+ if (ice_is_safe_mode(pf)) {
+ dev_err(dev,
+ "Package download failed. Advanced features disabled - Device now in Safe Mode\n");
+ /* we already got function/device capabilities but these don't
+ * reflect what the driver needs to do in safe mode. Instead of
+ * adding conditional logic everywhere to ignore these
+ * device/function capabilities, override them.
+ */
+ ice_set_safe_mode_caps(hw);
+ }
err = ice_init_pf(pf);
if (err) {
@@ -2543,16 +2883,6 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
goto err_init_pf_unroll;
}
- if (test_bit(ICE_FLAG_DCB_CAPABLE, pf->flags)) {
- /* Note: DCB init failure is non-fatal to load */
- if (ice_init_pf_dcb(pf, false)) {
- clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
- clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
- } else {
- ice_cfg_lldp_mib_change(&pf->hw, true);
- }
- }
-
pf->num_alloc_vsi = hw->func_caps.guar_num_vsi;
if (!pf->num_alloc_vsi) {
err = -EIO;
@@ -2612,6 +2942,15 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
clear_bit(__ICE_SERVICE_DIS, pf->state);
+ /* tell the firmware we are up */
+ err = ice_send_version(pf);
+ if (err) {
+ dev_err(dev,
+ "probe failed sending driver version %s. error: %d\n",
+ ice_drv_ver, err);
+ goto err_alloc_sw_unroll;
+ }
+
/* since everything is good, start the service timer */
mod_timer(&pf->serv_tmr, round_jiffies(jiffies + pf->serv_tmr_period));
@@ -2623,6 +2962,20 @@ ice_probe(struct pci_dev *pdev, const struct pci_device_id __always_unused *ent)
ice_verify_cacheline_size(pf);
+ /* If no DDP driven features have to be setup, return here */
+ if (ice_is_safe_mode(pf))
+ return 0;
+
+ /* initialize DDP driven features */
+
+ /* Note: DCB init failure is non-fatal to load */
+ if (ice_init_pf_dcb(pf, false)) {
+ clear_bit(ICE_FLAG_DCB_CAPABLE, pf->flags);
+ clear_bit(ICE_FLAG_DCB_ENA, pf->flags);
+ } else {
+ ice_cfg_lldp_mib_change(&pf->hw, true);
+ }
+
return 0;
err_alloc_sw_unroll:
@@ -3075,6 +3428,13 @@ ice_set_features(struct net_device *netdev, netdev_features_t features)
struct ice_vsi *vsi = np->vsi;
int ret = 0;
+ /* Don't set any netdev advanced features with device in Safe Mode */
+ if (ice_is_safe_mode(vsi->back)) {
+ dev_err(&vsi->back->pdev->dev,
+ "Device is in Safe Mode - not enabling advanced netdev features\n");
+ return ret;
+ }
+
/* Multiple features can be changed in one call so keep features in
* separate if/else statements to guarantee each feature is checked
*/
@@ -3764,9 +4124,6 @@ static int ice_ena_vsi(struct ice_vsi *vsi, bool locked)
*/
#ifdef CONFIG_DCB
int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
-#else
-static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
-#endif /* CONFIG_DCB */
{
int v;
@@ -3777,94 +4134,107 @@ static int ice_pf_ena_all_vsi(struct ice_pf *pf, bool locked)
return 0;
}
+#endif /* CONFIG_DCB */
/**
- * ice_vsi_rebuild_all - rebuild all VSIs in PF
- * @pf: the PF
+ * ice_vsi_rebuild_by_type - Rebuild VSI of a given type
+ * @pf: pointer to the PF instance
+ * @type: VSI type to rebuild
+ *
+ * Iterates through the pf->vsi array and rebuilds VSIs of the requested type
*/
-static int ice_vsi_rebuild_all(struct ice_pf *pf)
+static int ice_vsi_rebuild_by_type(struct ice_pf *pf, enum ice_vsi_type type)
{
- int i;
+ enum ice_status status;
+ int i, err;
- /* loop through pf->vsi array and reinit the VSI if found */
ice_for_each_vsi(pf, i) {
struct ice_vsi *vsi = pf->vsi[i];
- int err;
- if (!vsi)
+ if (!vsi || vsi->type != type)
continue;
+ /* rebuild the VSI */
err = ice_vsi_rebuild(vsi);
if (err) {
dev_err(&pf->pdev->dev,
- "VSI at index %d rebuild failed\n",
- vsi->idx);
+ "rebuild VSI failed, err %d, VSI index %d, type %d\n",
+ err, vsi->idx, type);
+ return err;
+ }
+
+ /* replay filters for the VSI */
+ status = ice_replay_vsi(&pf->hw, vsi->idx);
+ if (status) {
+ dev_err(&pf->pdev->dev,
+ "replay VSI failed, status %d, VSI index %d, type %d\n",
+ status, vsi->idx, type);
+ return -EIO;
+ }
+
+ /* Re-map HW VSI number, using VSI handle that has been
+ * previously validated in ice_replay_vsi() call above
+ */
+ vsi->vsi_num = ice_get_hw_vsi_num(&pf->hw, vsi->idx);
+
+ /* enable the VSI */
+ err = ice_ena_vsi(vsi, false);
+ if (err) {
+ dev_err(&pf->pdev->dev,
+ "enable VSI failed, err %d, VSI index %d, type %d\n",
+ err, vsi->idx, type);
return err;
}
- dev_info(&pf->pdev->dev,
- "VSI at index %d rebuilt. vsi_num = 0x%x\n",
- vsi->idx, vsi->vsi_num);
+ dev_info(&pf->pdev->dev, "VSI rebuilt. VSI index %d, type %d\n",
+ vsi->idx, type);
}
return 0;
}
/**
- * ice_vsi_replay_all - replay all VSIs configuration in the PF
- * @pf: the PF
+ * ice_update_pf_netdev_link - Update PF netdev link status
+ * @pf: pointer to the PF instance
*/
-static int ice_vsi_replay_all(struct ice_pf *pf)
+static void ice_update_pf_netdev_link(struct ice_pf *pf)
{
- struct ice_hw *hw = &pf->hw;
- enum ice_status ret;
+ bool link_up;
int i;
- /* loop through pf->vsi array and replay the VSI if found */
ice_for_each_vsi(pf, i) {
struct ice_vsi *vsi = pf->vsi[i];
- if (!vsi)
- continue;
+ if (!vsi || vsi->type != ICE_VSI_PF)
+ return;
- ret = ice_replay_vsi(hw, vsi->idx);
- if (ret) {
- dev_err(&pf->pdev->dev,
- "VSI at index %d replay failed %d\n",
- vsi->idx, ret);
- return -EIO;
+ ice_get_link_status(pf->vsi[i]->port_info, &link_up);
+ if (link_up) {
+ netif_carrier_on(pf->vsi[i]->netdev);
+ netif_tx_wake_all_queues(pf->vsi[i]->netdev);
+ } else {
+ netif_carrier_off(pf->vsi[i]->netdev);
+ netif_tx_stop_all_queues(pf->vsi[i]->netdev);
}
-
- /* Re-map HW VSI number, using VSI handle that has been
- * previously validated in ice_replay_vsi() call above
- */
- vsi->vsi_num = ice_get_hw_vsi_num(hw, vsi->idx);
-
- dev_info(&pf->pdev->dev,
- "VSI at index %d filter replayed successfully - vsi_num %i\n",
- vsi->idx, vsi->vsi_num);
}
-
- /* Clean up replay filter after successful re-configuration */
- ice_replay_post(hw);
- return 0;
}
/**
* ice_rebuild - rebuild after reset
* @pf: PF to rebuild
+ * @reset_type: type of reset
*/
-static void ice_rebuild(struct ice_pf *pf)
+static void ice_rebuild(struct ice_pf *pf, enum ice_reset_req reset_type)
{
struct device *dev = &pf->pdev->dev;
struct ice_hw *hw = &pf->hw;
enum ice_status ret;
- int err, i;
+ int err;
if (test_bit(__ICE_DOWN, pf->state))
goto clear_recovery;
- dev_dbg(dev, "rebuilding PF\n");
+ dev_dbg(dev, "rebuilding PF after reset_type=%d\n", reset_type);
ret = ice_init_all_ctrlq(hw);
if (ret) {
@@ -3872,6 +4242,16 @@ static void ice_rebuild(struct ice_pf *pf)
goto err_init_ctrlq;
}
+ /* if DDP was previously loaded successfully */
+ if (!ice_is_safe_mode(pf)) {
+ /* reload the SW DB of filter tables */
+ if (reset_type == ICE_RESET_PFR)
+ ice_fill_blk_tbls(hw);
+ else
+ /* Reload DDP Package after CORER/GLOBR reset */
+ ice_load_pkg(NULL, pf);
+ }
+
ret = ice_clear_pf_cfg(hw);
if (ret) {
dev_err(dev, "clear PF configuration failed %d\n", ret);
@@ -3890,63 +4270,53 @@ static void ice_rebuild(struct ice_pf *pf)
if (err)
goto err_sched_init_port;
- ice_dcb_rebuild(pf);
-
- err = ice_vsi_rebuild_all(pf);
- if (err) {
- dev_err(dev, "ice_vsi_rebuild_all failed\n");
- goto err_vsi_rebuild;
- }
-
err = ice_update_link_info(hw->port_info);
if (err)
dev_err(&pf->pdev->dev, "Get link status error %d\n", err);
- /* Replay all VSIs Configuration, including filters after reset */
- if (ice_vsi_replay_all(pf)) {
- dev_err(&pf->pdev->dev,
- "error replaying VSI configurations with switch filter rules\n");
- goto err_vsi_rebuild;
- }
-
/* start misc vector */
err = ice_req_irq_msix_misc(pf);
if (err) {
dev_err(dev, "misc vector setup failed: %d\n", err);
- goto err_vsi_rebuild;
+ goto err_sched_init_port;
}
- /* restart the VSIs that were rebuilt and running before the reset */
- err = ice_pf_ena_all_vsi(pf, false);
+ if (test_bit(ICE_FLAG_DCB_ENA, pf->flags))
+ ice_dcb_rebuild(pf);
+
+ /* rebuild PF VSI */
+ err = ice_vsi_rebuild_by_type(pf, ICE_VSI_PF);
if (err) {
- dev_err(&pf->pdev->dev, "error enabling VSIs\n");
- /* no need to disable VSIs in tear down path in ice_rebuild()
- * since its already taken care in ice_vsi_open()
- */
+ dev_err(dev, "PF VSI rebuild failed: %d\n", err);
goto err_vsi_rebuild;
}
- ice_for_each_vsi(pf, i) {
- bool link_up;
-
- if (!pf->vsi[i] || pf->vsi[i]->type != ICE_VSI_PF)
- continue;
- ice_get_link_status(pf->vsi[i]->port_info, &link_up);
- if (link_up) {
- netif_carrier_on(pf->vsi[i]->netdev);
- netif_tx_wake_all_queues(pf->vsi[i]->netdev);
- } else {
- netif_carrier_off(pf->vsi[i]->netdev);
- netif_tx_stop_all_queues(pf->vsi[i]->netdev);
+ if (test_bit(ICE_FLAG_SRIOV_ENA, pf->flags)) {
+ err = ice_vsi_rebuild_by_type(pf, ICE_VSI_VF);
+ if (err) {
+ dev_err(dev, "VF VSI rebuild failed: %d\n", err);
+ goto err_vsi_rebuild;
}
}
+ ice_update_pf_netdev_link(pf);
+
+ /* tell the firmware we are up */
+ ret = ice_send_version(pf);
+ if (ret) {
+ dev_err(dev,
+ "Rebuild failed due to error sending driver version: %d\n",
+ ret);
+ goto err_vsi_rebuild;
+ }
+
+ ice_replay_post(hw);
+
/* if we get here, reset flow is successful */
clear_bit(__ICE_RESET_FAILED, pf->state);
return;
err_vsi_rebuild:
- ice_vsi_release_all(pf);
err_sched_init_port:
ice_sched_cleanup_all(hw);
err_init_ctrlq:
@@ -4473,6 +4843,17 @@ out_rm_features:
return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
}
+static const struct net_device_ops ice_netdev_safe_mode_ops = {
+ .ndo_open = ice_open,
+ .ndo_stop = ice_stop,
+ .ndo_start_xmit = ice_start_xmit,
+ .ndo_set_mac_address = ice_set_mac_address,
+ .ndo_validate_addr = eth_validate_addr,
+ .ndo_change_mtu = ice_change_mtu,
+ .ndo_get_stats64 = ice_get_stats64,
+ .ndo_tx_timeout = ice_tx_timeout,
+};
+
static const struct net_device_ops ice_netdev_ops = {
.ndo_open = ice_open,
.ndo_stop = ice_stop,
diff --git a/drivers/net/ethernet/intel/ice/ice_type.h b/drivers/net/ethernet/intel/ice/ice_type.h
index 4501d50a7dcc..6667d17a4206 100644
--- a/drivers/net/ethernet/intel/ice/ice_type.h
+++ b/drivers/net/ethernet/intel/ice/ice_type.h
@@ -12,6 +12,7 @@
#include "ice_osdep.h"
#include "ice_controlq.h"
#include "ice_lan_tx_rx.h"
+#include "ice_flex_type.h"
static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
{
@@ -31,6 +32,7 @@ static inline bool ice_is_tc_ena(unsigned long bitmap, u8 tc)
#define ICE_DBG_LAN BIT_ULL(8)
#define ICE_DBG_SW BIT_ULL(13)
#define ICE_DBG_SCHED BIT_ULL(14)
+#define ICE_DBG_PKG BIT_ULL(16)
#define ICE_DBG_RES BIT_ULL(17)
#define ICE_DBG_AQ_MSG BIT_ULL(24)
#define ICE_DBG_AQ_CMD BIT_ULL(27)
@@ -53,6 +55,14 @@ enum ice_aq_res_access_type {
ICE_RES_WRITE
};
+struct ice_driver_ver {
+ u8 major_ver;
+ u8 minor_ver;
+ u8 build_ver;
+ u8 subbuild_ver;
+ u8 driver_string[32];
+};
+
enum ice_fc_mode {
ICE_FC_NONE = 0,
ICE_FC_RX_PAUSE,
@@ -222,6 +232,8 @@ struct ice_nvm_info {
u8 blank_nvm_mode; /* is NVM empty (no FW present) */
};
+#define ICE_NVM_VER_LEN 32
+
/* Max number of port to queue branches w.r.t topology */
#define ICE_MAX_TRAFFIC_CLASS 8
#define ICE_TXSCHED_MAX_BRANCHES ICE_MAX_TRAFFIC_CLASS
@@ -459,6 +471,30 @@ struct ice_hw {
u8 ucast_shared; /* true if VSIs can share unicast addr */
+ /* Active package version (currently active) */
+ struct ice_pkg_ver active_pkg_ver;
+ u8 active_pkg_name[ICE_PKG_NAME_SIZE];
+ u8 active_pkg_in_nvm;
+
+ enum ice_aq_err pkg_dwnld_status;
+
+ /* Driver's package ver - (from the Metadata seg) */
+ struct ice_pkg_ver pkg_ver;
+ u8 pkg_name[ICE_PKG_NAME_SIZE];
+
+ /* Driver's Ice package version (from the Ice seg) */
+ struct ice_pkg_ver ice_pkg_ver;
+ u8 ice_pkg_name[ICE_PKG_NAME_SIZE];
+
+ /* Pointer to the ice segment */
+ struct ice_seg *seg;
+
+ /* Pointer to allocated copy of pkg memory */
+ u8 *pkg_copy;
+ u32 pkg_size;
+
+ /* HW block tables */
+ struct ice_blk_info blk[ICE_BLK_COUNT];
};
/* Statistics collected by each port, VSI, VEB, and S-channel */
diff --git a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
index 64de05ccbc47..b45797f39b2f 100644
--- a/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
+++ b/drivers/net/ethernet/intel/ice/ice_virtchnl_pf.c
@@ -1443,6 +1443,12 @@ int ice_sriov_configure(struct pci_dev *pdev, int num_vfs)
{
struct ice_pf *pf = pci_get_drvdata(pdev);
+ if (ice_is_safe_mode(pf)) {
+ dev_err(&pf->pdev->dev,
+ "SR-IOV cannot be configured - Device is in Safe Mode\n");
+ return -EOPNOTSUPP;
+ }
+
if (num_vfs)
return ice_pci_sriov_ena(pf, num_vfs);
diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
index 99df595abfba..dc034f4e8cf6 100644
--- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
+++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c
@@ -8748,7 +8748,7 @@ static netdev_tx_t __ixgbe_xmit_frame(struct sk_buff *skb,
if (skb_put_padto(skb, 17))
return NETDEV_TX_OK;
- tx_ring = ring ? ring : adapter->tx_ring[skb->queue_mapping];
+ tx_ring = ring ? ring : adapter->tx_ring[skb_get_queue_mapping(skb)];
if (unlikely(test_bit(__IXGBE_TX_DISABLED, &tx_ring->state)))
return NETDEV_TX_BUSY;
@@ -9490,6 +9490,10 @@ static int ixgbe_configure_clsu32(struct ixgbe_adapter *adapter,
jump->mat = nexthdr[i].jump;
adapter->jump_tables[link_uhtid] = jump;
break;
+ } else {
+ kfree(mask);
+ kfree(input);
+ kfree(jump);
}
}
return 0;
diff --git a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
index 75e849a64db7..75e93ce2ed99 100644
--- a/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
+++ b/drivers/net/ethernet/intel/ixgbevf/ixgbevf_main.c
@@ -2261,12 +2261,14 @@ static void ixgbevf_init_last_counter_stats(struct ixgbevf_adapter *adapter)
static void ixgbevf_negotiate_api(struct ixgbevf_adapter *adapter)
{
struct ixgbe_hw *hw = &adapter->hw;
- int api[] = { ixgbe_mbox_api_14,
- ixgbe_mbox_api_13,
- ixgbe_mbox_api_12,
- ixgbe_mbox_api_11,
- ixgbe_mbox_api_10,
- ixgbe_mbox_api_unknown };
+ static const int api[] = {
+ ixgbe_mbox_api_14,
+ ixgbe_mbox_api_13,
+ ixgbe_mbox_api_12,
+ ixgbe_mbox_api_11,
+ ixgbe_mbox_api_10,
+ ixgbe_mbox_api_unknown
+ };
int err, idx = 0;
spin_lock_bh(&adapter->mbx_lock);
diff --git a/drivers/net/ethernet/mellanox/mlx4/catas.c b/drivers/net/ethernet/mellanox/mlx4/catas.c
index 87e90b5d4d7d..5b11557f1ae4 100644
--- a/drivers/net/ethernet/mellanox/mlx4/catas.c
+++ b/drivers/net/ethernet/mellanox/mlx4/catas.c
@@ -210,7 +210,7 @@ static void mlx4_handle_error_state(struct mlx4_dev_persistent *persist)
mutex_lock(&persist->interface_state_mutex);
if (persist->interface_state & MLX4_INTERFACE_STATE_UP &&
!(persist->interface_state & MLX4_INTERFACE_STATE_DELETION)) {
- err = mlx4_restart_one(persist->pdev, false, NULL);
+ err = mlx4_restart_one(persist->pdev);
mlx4_info(persist->dev, "mlx4_restart_one was ended, ret=%d\n",
err);
}
diff --git a/drivers/net/ethernet/mellanox/mlx4/main.c b/drivers/net/ethernet/mellanox/mlx4/main.c
index 07c204bd3fc4..ef3f3d06ff1e 100644
--- a/drivers/net/ethernet/mellanox/mlx4/main.c
+++ b/drivers/net/ethernet/mellanox/mlx4/main.c
@@ -3931,26 +3931,43 @@ static void mlx4_devlink_param_load_driverinit_values(struct devlink *devlink)
}
}
-static int mlx4_devlink_reload(struct devlink *devlink,
- struct netlink_ext_ack *extack)
+static void mlx4_restart_one_down(struct pci_dev *pdev);
+static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload,
+ struct devlink *devlink);
+
+static int mlx4_devlink_reload_down(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
{
struct mlx4_priv *priv = devlink_priv(devlink);
struct mlx4_dev *dev = &priv->dev;
struct mlx4_dev_persistent *persist = dev->persist;
- int err;
if (persist->num_vfs)
mlx4_warn(persist->dev, "Reload performed on PF, will cause reset on operating Virtual Functions\n");
- err = mlx4_restart_one(persist->pdev, true, devlink);
+ mlx4_restart_one_down(persist->pdev);
+ return 0;
+}
+
+static int mlx4_devlink_reload_up(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ struct mlx4_priv *priv = devlink_priv(devlink);
+ struct mlx4_dev *dev = &priv->dev;
+ struct mlx4_dev_persistent *persist = dev->persist;
+ int err;
+
+ err = mlx4_restart_one_up(persist->pdev, true, devlink);
if (err)
- mlx4_err(persist->dev, "mlx4_restart_one failed, ret=%d\n", err);
+ mlx4_err(persist->dev, "mlx4_restart_one_up failed, ret=%d\n",
+ err);
return err;
}
static const struct devlink_ops mlx4_devlink_ops = {
.port_type_set = mlx4_devlink_port_type_set,
- .reload = mlx4_devlink_reload,
+ .reload_down = mlx4_devlink_reload_down,
+ .reload_up = mlx4_devlink_reload_up,
};
static int mlx4_init_one(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -4163,7 +4180,13 @@ static int restore_current_port_types(struct mlx4_dev *dev,
return err;
}
-int mlx4_restart_one(struct pci_dev *pdev, bool reload, struct devlink *devlink)
+static void mlx4_restart_one_down(struct pci_dev *pdev)
+{
+ mlx4_unload_one(pdev);
+}
+
+static int mlx4_restart_one_up(struct pci_dev *pdev, bool reload,
+ struct devlink *devlink)
{
struct mlx4_dev_persistent *persist = pci_get_drvdata(pdev);
struct mlx4_dev *dev = persist->dev;
@@ -4175,7 +4198,6 @@ int mlx4_restart_one(struct pci_dev *pdev, bool reload, struct devlink *devlink)
total_vfs = dev->persist->num_vfs;
memcpy(nvfs, dev->persist->nvfs, sizeof(dev->persist->nvfs));
- mlx4_unload_one(pdev);
if (reload)
mlx4_devlink_param_load_driverinit_values(devlink);
err = mlx4_load_one(pdev, pci_dev_data, total_vfs, nvfs, priv, 1);
@@ -4194,6 +4216,12 @@ int mlx4_restart_one(struct pci_dev *pdev, bool reload, struct devlink *devlink)
return err;
}
+int mlx4_restart_one(struct pci_dev *pdev)
+{
+ mlx4_restart_one_down(pdev);
+ return mlx4_restart_one_up(pdev, false, NULL);
+}
+
#define MLX_SP(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_FORCE_SENSE_PORT }
#define MLX_VF(id) { PCI_VDEVICE(MELLANOX, id), MLX4_PCI_DEV_IS_VF }
#define MLX_GN(id) { PCI_VDEVICE(MELLANOX, id), 0 }
diff --git a/drivers/net/ethernet/mellanox/mlx4/mlx4.h b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
index 23f1b5b512c2..527b52e48276 100644
--- a/drivers/net/ethernet/mellanox/mlx4/mlx4.h
+++ b/drivers/net/ethernet/mellanox/mlx4/mlx4.h
@@ -1043,8 +1043,7 @@ int mlx4_catas_init(struct mlx4_dev *dev);
void mlx4_catas_end(struct mlx4_dev *dev);
int mlx4_crdump_init(struct mlx4_dev *dev);
void mlx4_crdump_end(struct mlx4_dev *dev);
-int mlx4_restart_one(struct pci_dev *pdev, bool reload,
- struct devlink *devlink);
+int mlx4_restart_one(struct pci_dev *pdev);
int mlx4_register_device(struct mlx4_dev *dev);
void mlx4_unregister_device(struct mlx4_dev *dev);
void mlx4_dispatch_event(struct mlx4_dev *dev, enum mlx4_dev_event type,
diff --git a/drivers/net/ethernet/mellanox/mlxsw/core.c b/drivers/net/ethernet/mellanox/mlxsw/core.c
index 963a2b4b61b1..3fa96076e8a5 100644
--- a/drivers/net/ethernet/mellanox/mlxsw/core.c
+++ b/drivers/net/ethernet/mellanox/mlxsw/core.c
@@ -80,7 +80,6 @@ struct mlxsw_core {
struct mlxsw_thermal *thermal;
struct mlxsw_core_port *ports;
unsigned int max_ports;
- bool reload_fail;
bool fw_flash_in_progress;
unsigned long driver_priv[0];
/* driver_priv has to be always the last item */
@@ -984,23 +983,29 @@ mlxsw_devlink_info_get(struct devlink *devlink, struct devlink_info_req *req,
return 0;
}
-static int mlxsw_devlink_core_bus_device_reload(struct devlink *devlink,
- struct netlink_ext_ack *extack)
+static int
+mlxsw_devlink_core_bus_device_reload_down(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
{
struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
- int err;
if (!(mlxsw_core->bus->features & MLXSW_BUS_F_RESET))
return -EOPNOTSUPP;
mlxsw_core_bus_device_unregister(mlxsw_core, true);
- err = mlxsw_core_bus_device_register(mlxsw_core->bus_info,
- mlxsw_core->bus,
- mlxsw_core->bus_priv, true,
- devlink);
- mlxsw_core->reload_fail = !!err;
+ return 0;
+}
- return err;
+static int
+mlxsw_devlink_core_bus_device_reload_up(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ struct mlxsw_core *mlxsw_core = devlink_priv(devlink);
+
+ return mlxsw_core_bus_device_register(mlxsw_core->bus_info,
+ mlxsw_core->bus,
+ mlxsw_core->bus_priv, true,
+ devlink);
}
static int mlxsw_devlink_flash_update(struct devlink *devlink,
@@ -1066,7 +1071,8 @@ mlxsw_devlink_trap_group_init(struct devlink *devlink,
}
static const struct devlink_ops mlxsw_devlink_ops = {
- .reload = mlxsw_devlink_core_bus_device_reload,
+ .reload_down = mlxsw_devlink_core_bus_device_reload_down,
+ .reload_up = mlxsw_devlink_core_bus_device_reload_up,
.port_type_set = mlxsw_devlink_port_type_set,
.port_split = mlxsw_devlink_port_split,
.port_unsplit = mlxsw_devlink_port_unsplit,
@@ -1243,7 +1249,7 @@ void mlxsw_core_bus_device_unregister(struct mlxsw_core *mlxsw_core,
{
struct devlink *devlink = priv_to_devlink(mlxsw_core);
- if (mlxsw_core->reload_fail) {
+ if (devlink_is_reload_failed(devlink)) {
if (!reload)
/* Only the parts that were not de-initialized in the
* failed reload attempt need to be de-initialized.
diff --git a/drivers/net/netdevsim/dev.c b/drivers/net/netdevsim/dev.c
index 39cdb6c18ec0..7fba7b271a57 100644
--- a/drivers/net/netdevsim/dev.c
+++ b/drivers/net/netdevsim/dev.c
@@ -521,8 +521,14 @@ static void nsim_dev_traps_exit(struct devlink *devlink)
kfree(nsim_dev->trap_data);
}
-static int nsim_dev_reload(struct devlink *devlink,
- struct netlink_ext_ack *extack)
+static int nsim_dev_reload_down(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
+{
+ return 0;
+}
+
+static int nsim_dev_reload_up(struct devlink *devlink,
+ struct netlink_ext_ack *extack)
{
enum nsim_resource_id res_ids[] = {
NSIM_RESOURCE_IPV4_FIB, NSIM_RESOURCE_IPV4_FIB_RULES,
@@ -638,7 +644,8 @@ nsim_dev_devlink_trap_action_set(struct devlink *devlink,
}
static const struct devlink_ops nsim_dev_devlink_ops = {
- .reload = nsim_dev_reload,
+ .reload_down = nsim_dev_reload_down,
+ .reload_up = nsim_dev_reload_up,
.flash_update = nsim_dev_flash_update,
.trap_init = nsim_dev_devlink_trap_init,
.trap_action_set = nsim_dev_devlink_trap_action_set,
diff --git a/drivers/ptp/ptp_chardev.c b/drivers/ptp/ptp_chardev.c
index 18ffe449efdf..9c18476d8d10 100644
--- a/drivers/ptp/ptp_chardev.c
+++ b/drivers/ptp/ptp_chardev.c
@@ -126,7 +126,9 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
switch (cmd) {
case PTP_CLOCK_GETCAPS:
+ case PTP_CLOCK_GETCAPS2:
memset(&caps, 0, sizeof(caps));
+
caps.max_adj = ptp->info->max_adj;
caps.n_alarm = ptp->info->n_alarm;
caps.n_ext_ts = ptp->info->n_ext_ts;
@@ -139,11 +141,24 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
break;
case PTP_EXTTS_REQUEST:
+ case PTP_EXTTS_REQUEST2:
+ memset(&req, 0, sizeof(req));
+
if (copy_from_user(&req.extts, (void __user *)arg,
sizeof(req.extts))) {
err = -EFAULT;
break;
}
+ if (((req.extts.flags & ~PTP_EXTTS_VALID_FLAGS) ||
+ req.extts.rsv[0] || req.extts.rsv[1]) &&
+ cmd == PTP_EXTTS_REQUEST2) {
+ err = -EINVAL;
+ break;
+ } else if (cmd == PTP_EXTTS_REQUEST) {
+ req.extts.flags &= ~PTP_EXTTS_VALID_FLAGS;
+ req.extts.rsv[0] = 0;
+ req.extts.rsv[1] = 0;
+ }
if (req.extts.index >= ops->n_ext_ts) {
err = -EINVAL;
break;
@@ -154,11 +169,27 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
break;
case PTP_PEROUT_REQUEST:
+ case PTP_PEROUT_REQUEST2:
+ memset(&req, 0, sizeof(req));
+
if (copy_from_user(&req.perout, (void __user *)arg,
sizeof(req.perout))) {
err = -EFAULT;
break;
}
+ if (((req.perout.flags & ~PTP_PEROUT_VALID_FLAGS) ||
+ req.perout.rsv[0] || req.perout.rsv[1] ||
+ req.perout.rsv[2] || req.perout.rsv[3]) &&
+ cmd == PTP_PEROUT_REQUEST2) {
+ err = -EINVAL;
+ break;
+ } else if (cmd == PTP_PEROUT_REQUEST) {
+ req.perout.flags &= ~PTP_PEROUT_VALID_FLAGS;
+ req.perout.rsv[0] = 0;
+ req.perout.rsv[1] = 0;
+ req.perout.rsv[2] = 0;
+ req.perout.rsv[3] = 0;
+ }
if (req.perout.index >= ops->n_per_out) {
err = -EINVAL;
break;
@@ -169,6 +200,9 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
break;
case PTP_ENABLE_PPS:
+ case PTP_ENABLE_PPS2:
+ memset(&req, 0, sizeof(req));
+
if (!capable(CAP_SYS_TIME))
return -EPERM;
req.type = PTP_CLK_REQ_PPS;
@@ -177,6 +211,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
break;
case PTP_SYS_OFFSET_PRECISE:
+ case PTP_SYS_OFFSET_PRECISE2:
if (!ptp->info->getcrosststamp) {
err = -EOPNOTSUPP;
break;
@@ -201,6 +236,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
break;
case PTP_SYS_OFFSET_EXTENDED:
+ case PTP_SYS_OFFSET_EXTENDED2:
if (!ptp->info->gettimex64) {
err = -EOPNOTSUPP;
break;
@@ -232,6 +268,7 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
break;
case PTP_SYS_OFFSET:
+ case PTP_SYS_OFFSET2:
sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
if (IS_ERR(sysoff)) {
err = PTR_ERR(sysoff);
@@ -266,10 +303,23 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
break;
case PTP_PIN_GETFUNC:
+ case PTP_PIN_GETFUNC2:
if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
err = -EFAULT;
break;
}
+ if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2]
+ || pd.rsv[3] || pd.rsv[4])
+ && cmd == PTP_PIN_GETFUNC2) {
+ err = -EINVAL;
+ break;
+ } else if (cmd == PTP_PIN_GETFUNC) {
+ pd.rsv[0] = 0;
+ pd.rsv[1] = 0;
+ pd.rsv[2] = 0;
+ pd.rsv[3] = 0;
+ pd.rsv[4] = 0;
+ }
pin_index = pd.index;
if (pin_index >= ops->n_pins) {
err = -EINVAL;
@@ -285,10 +335,23 @@ long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
break;
case PTP_PIN_SETFUNC:
+ case PTP_PIN_SETFUNC2:
if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
err = -EFAULT;
break;
}
+ if ((pd.rsv[0] || pd.rsv[1] || pd.rsv[2]
+ || pd.rsv[3] || pd.rsv[4])
+ && cmd == PTP_PIN_SETFUNC2) {
+ err = -EINVAL;
+ break;
+ } else if (cmd == PTP_PIN_SETFUNC) {
+ pd.rsv[0] = 0;
+ pd.rsv[1] = 0;
+ pd.rsv[2] = 0;
+ pd.rsv[3] = 0;
+ pd.rsv[4] = 0;
+ }
pin_index = pd.index;
if (pin_index >= ops->n_pins) {
err = -EINVAL;
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index 049aeb40fa35..77ebb61faf48 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -15,7 +15,6 @@
#include <linux/netdevice.h>
#include <net/net_namespace.h>
-#ifdef CONFIG_NETFILTER
static inline int NF_DROP_GETERR(int verdict)
{
return -(verdict >> NF_VERDICT_QBITS);
@@ -118,6 +117,7 @@ struct nf_hook_entries {
*/
};
+#ifdef CONFIG_NETFILTER
static inline struct nf_hook_ops **nf_hook_entries_get_hook_ops(const struct nf_hook_entries *e)
{
unsigned int n = e->num_hook_entries;
@@ -422,7 +422,7 @@ nf_nat_decode_session(struct sk_buff *skb, struct flowi *fl, u_int8_t family)
}
#endif /*CONFIG_NETFILTER*/
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <linux/netfilter/nf_conntrack_zones_common.h>
extern void (*ip_ct_attach)(struct sk_buff *, const struct sk_buff *) __rcu;
diff --git a/include/linux/netfilter/ipset/ip_set_getport.h b/include/linux/netfilter/ipset/ip_set_getport.h
index a906df06948b..d74cd112b88a 100644
--- a/include/linux/netfilter/ipset/ip_set_getport.h
+++ b/include/linux/netfilter/ipset/ip_set_getport.h
@@ -9,7 +9,7 @@
extern bool ip_set_get_ip4_port(const struct sk_buff *skb, bool src,
__be16 *port, u8 *proto);
-#if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE)
+#if IS_ENABLED(CONFIG_IP6_NF_IPTABLES)
extern bool ip_set_get_ip6_port(const struct sk_buff *skb, bool src,
__be16 *port, u8 *proto);
#else
diff --git a/include/linux/netfilter/nf_conntrack_common.h b/include/linux/netfilter/nf_conntrack_common.h
index e142b2b5f1ea..1db83c931d9c 100644
--- a/include/linux/netfilter/nf_conntrack_common.h
+++ b/include/linux/netfilter/nf_conntrack_common.h
@@ -2,6 +2,7 @@
#ifndef _NF_CONNTRACK_COMMON_H
#define _NF_CONNTRACK_COMMON_H
+#include <linux/atomic.h>
#include <uapi/linux/netfilter/nf_conntrack_common.h>
struct ip_conntrack_stat {
@@ -19,4 +20,23 @@ struct ip_conntrack_stat {
unsigned int search_restart;
};
+#define NFCT_INFOMASK 7UL
+#define NFCT_PTRMASK ~(NFCT_INFOMASK)
+
+struct nf_conntrack {
+ atomic_t use;
+};
+
+void nf_conntrack_destroy(struct nf_conntrack *nfct);
+static inline void nf_conntrack_put(struct nf_conntrack *nfct)
+{
+ if (nfct && atomic_dec_and_test(&nfct->use))
+ nf_conntrack_destroy(nfct);
+}
+static inline void nf_conntrack_get(struct nf_conntrack *nfct)
+{
+ if (nfct)
+ atomic_inc(&nfct->use);
+}
+
#endif /* _NF_CONNTRACK_COMMON_H */
diff --git a/include/linux/netfilter/x_tables.h b/include/linux/netfilter/x_tables.h
index ae62bf1c6824..1b261c51b3a3 100644
--- a/include/linux/netfilter/x_tables.h
+++ b/include/linux/netfilter/x_tables.h
@@ -35,15 +35,12 @@ struct xt_action_param {
union {
const void *matchinfo, *targinfo;
};
-#if IS_ENABLED(CONFIG_NETFILTER)
const struct nf_hook_state *state;
-#endif
int fragoff;
unsigned int thoff;
bool hotdrop;
};
-#if IS_ENABLED(CONFIG_NETFILTER)
static inline struct net *xt_net(const struct xt_action_param *par)
{
return par->state->net;
@@ -78,7 +75,6 @@ static inline u_int8_t xt_family(const struct xt_action_param *par)
{
return par->state->pf;
}
-#endif
/**
* struct xt_mtchk_param - parameters for match extensions'
@@ -340,7 +336,7 @@ void xt_free_table_info(struct xt_table_info *info);
/**
* xt_recseq - recursive seqcount for netfilter use
- *
+ *
* Packet processing changes the seqcount only if no recursion happened
* get_counters() can use read_seqcount_begin()/read_seqcount_retry(),
* because we use the normal seqcount convention :
@@ -450,9 +446,7 @@ xt_get_per_cpu_counter(struct xt_counters *cnt, unsigned int cpu)
return cnt;
}
-#if IS_ENABLED(CONFIG_NETFILTER)
struct nf_hook_ops *xt_hook_ops_alloc(const struct xt_table *, nf_hookfn *);
-#endif
#ifdef CONFIG_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter/xt_hashlimit.h b/include/linux/netfilter/xt_hashlimit.h
deleted file mode 100644
index 169d03983589..000000000000
--- a/include/linux/netfilter/xt_hashlimit.h
+++ /dev/null
@@ -1,11 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _XT_HASHLIMIT_H
-#define _XT_HASHLIMIT_H
-
-#include <uapi/linux/netfilter/xt_hashlimit.h>
-
-#define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \
- XT_HASHLIMIT_HASH_SIP | XT_HASHLIMIT_HASH_SPT | \
- XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES |\
- XT_HASHLIMIT_RATE_MATCH)
-#endif /*_XT_HASHLIMIT_H*/
diff --git a/include/linux/netfilter/xt_physdev.h b/include/linux/netfilter/xt_physdev.h
deleted file mode 100644
index 4ca0593949cd..000000000000
--- a/include/linux/netfilter/xt_physdev.h
+++ /dev/null
@@ -1,8 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _XT_PHYSDEV_H
-#define _XT_PHYSDEV_H
-
-#include <linux/if.h>
-#include <uapi/linux/netfilter/xt_physdev.h>
-
-#endif /*_XT_PHYSDEV_H*/
diff --git a/include/linux/netfilter_arp/arp_tables.h b/include/linux/netfilter_arp/arp_tables.h
index 1b7b35bb9c27..e98028f00e47 100644
--- a/include/linux/netfilter_arp/arp_tables.h
+++ b/include/linux/netfilter_arp/arp_tables.h
@@ -49,7 +49,6 @@ struct arpt_error {
}
extern void *arpt_alloc_initial_table(const struct xt_table *);
-#if IS_ENABLED(CONFIG_NETFILTER)
int arpt_register_table(struct net *net, const struct xt_table *table,
const struct arpt_replace *repl,
const struct nf_hook_ops *ops, struct xt_table **res);
@@ -58,7 +57,6 @@ void arpt_unregister_table(struct net *net, struct xt_table *table,
extern unsigned int arpt_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct xt_table *table);
-#endif
#ifdef CONFIG_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter_bridge.h b/include/linux/netfilter_bridge.h
index 5f2614d02e03..f980edfdd278 100644
--- a/include/linux/netfilter_bridge.h
+++ b/include/linux/netfilter_bridge.h
@@ -5,6 +5,13 @@
#include <uapi/linux/netfilter_bridge.h>
#include <linux/skbuff.h>
+struct nf_bridge_frag_data {
+ char mac[ETH_HLEN];
+ bool vlan_present;
+ u16 vlan_tci;
+ __be16 vlan_proto;
+};
+
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
int br_handle_frame_finish(struct net *net, struct sock *sk, struct sk_buff *skb);
diff --git a/include/linux/netfilter_bridge/ebt_802_3.h b/include/linux/netfilter_bridge/ebt_802_3.h
deleted file mode 100644
index c6147f9c0d80..000000000000
--- a/include/linux/netfilter_bridge/ebt_802_3.h
+++ /dev/null
@@ -1,12 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef __LINUX_BRIDGE_EBT_802_3_H
-#define __LINUX_BRIDGE_EBT_802_3_H
-
-#include <linux/skbuff.h>
-#include <uapi/linux/netfilter_bridge/ebt_802_3.h>
-
-static inline struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb)
-{
- return (struct ebt_802_3_hdr *)skb_mac_header(skb);
-}
-#endif
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index b5b2d371f0ef..162f59d0d17a 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -105,7 +105,7 @@ struct ebt_table {
#define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \
~(__alignof__(struct _xt_align)-1))
-#if IS_ENABLED(CONFIG_NETFILTER)
+
extern int ebt_register_table(struct net *net,
const struct ebt_table *table,
const struct nf_hook_ops *ops,
@@ -115,7 +115,6 @@ extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
extern unsigned int ebt_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct ebt_table *table);
-#endif
/* True if the hook mask denotes that the rule is in a base chain,
* used in the check() functions */
diff --git a/include/linux/netfilter_ipv4/ip_tables.h b/include/linux/netfilter_ipv4/ip_tables.h
index f40a65481df4..e9e1ed74cdf1 100644
--- a/include/linux/netfilter_ipv4/ip_tables.h
+++ b/include/linux/netfilter_ipv4/ip_tables.h
@@ -17,21 +17,16 @@
#include <linux/if.h>
#include <linux/in.h>
+#include <linux/init.h>
#include <linux/ip.h>
#include <linux/skbuff.h>
-
-#include <linux/init.h>
#include <uapi/linux/netfilter_ipv4/ip_tables.h>
-extern void ipt_init(void) __init;
-
-#if IS_ENABLED(CONFIG_NETFILTER)
int ipt_register_table(struct net *net, const struct xt_table *table,
const struct ipt_replace *repl,
const struct nf_hook_ops *ops, struct xt_table **res);
void ipt_unregister_table(struct net *net, struct xt_table *table,
const struct nf_hook_ops *ops);
-#endif
/* Standard entry. */
struct ipt_standard {
@@ -67,11 +62,9 @@ struct ipt_error {
}
extern void *ipt_alloc_initial_table(const struct xt_table *);
-#if IS_ENABLED(CONFIG_NETFILTER)
extern unsigned int ipt_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct xt_table *table);
-#endif
#ifdef CONFIG_COMPAT
#include <net/compat.h>
diff --git a/include/linux/netfilter_ipv6.h b/include/linux/netfilter_ipv6.h
index 7beb681e1ce5..aac42c28fe62 100644
--- a/include/linux/netfilter_ipv6.h
+++ b/include/linux/netfilter_ipv6.h
@@ -1,7 +1,7 @@
/* IPv6-specific defines for netfilter.
* (C)1998 Rusty Russell -- This code is GPL.
* (C)1999 David Jeffery
- * this header was blatantly ripped from netfilter_ipv4.h
+ * this header was blatantly ripped from netfilter_ipv4.h
* it's amazing what adding a bunch of 6s can do =8^)
*/
#ifndef __LINUX_IP6_NETFILTER_H
@@ -10,6 +10,18 @@
#include <uapi/linux/netfilter_ipv6.h>
#include <net/tcp.h>
+/* Check for an extension */
+static inline int
+nf_ip6_ext_hdr(u8 nexthdr)
+{ return (nexthdr == IPPROTO_HOPOPTS) ||
+ (nexthdr == IPPROTO_ROUTING) ||
+ (nexthdr == IPPROTO_FRAGMENT) ||
+ (nexthdr == IPPROTO_ESP) ||
+ (nexthdr == IPPROTO_AH) ||
+ (nexthdr == IPPROTO_NONE) ||
+ (nexthdr == IPPROTO_DSTOPTS);
+}
+
/* Extra routing may needed on local out, as the QUEUE target never returns
* control to the table.
*/
@@ -20,7 +32,7 @@ struct ip6_rt_info {
};
struct nf_queue_entry;
-struct nf_ct_bridge_frag_data;
+struct nf_bridge_frag_data;
/*
* Hook functions for ipv6 to allow xt_* modules to be built-in even
@@ -49,9 +61,9 @@ struct nf_ipv6_ops {
int (*br_defrag)(struct net *net, struct sk_buff *skb, u32 user);
int (*br_fragment)(struct net *net, struct sock *sk,
struct sk_buff *skb,
- struct nf_ct_bridge_frag_data *data,
+ struct nf_bridge_frag_data *data,
int (*output)(struct net *, struct sock *sk,
- const struct nf_ct_bridge_frag_data *data,
+ const struct nf_bridge_frag_data *data,
struct sk_buff *));
#endif
};
@@ -123,16 +135,16 @@ static inline int nf_ipv6_br_defrag(struct net *net, struct sk_buff *skb,
}
int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
- struct nf_ct_bridge_frag_data *data,
+ struct nf_bridge_frag_data *data,
int (*output)(struct net *, struct sock *sk,
- const struct nf_ct_bridge_frag_data *data,
+ const struct nf_bridge_frag_data *data,
struct sk_buff *));
static inline int nf_br_ip6_fragment(struct net *net, struct sock *sk,
struct sk_buff *skb,
- struct nf_ct_bridge_frag_data *data,
+ struct nf_bridge_frag_data *data,
int (*output)(struct net *, struct sock *sk,
- const struct nf_ct_bridge_frag_data *data,
+ const struct nf_bridge_frag_data *data,
struct sk_buff *))
{
#if IS_MODULE(CONFIG_IPV6)
diff --git a/include/linux/netfilter_ipv6/ip6_tables.h b/include/linux/netfilter_ipv6/ip6_tables.h
index 53b7309613bf..78ab959c4575 100644
--- a/include/linux/netfilter_ipv6/ip6_tables.h
+++ b/include/linux/netfilter_ipv6/ip6_tables.h
@@ -17,16 +17,13 @@
#include <linux/if.h>
#include <linux/in6.h>
+#include <linux/init.h>
#include <linux/ipv6.h>
#include <linux/skbuff.h>
-
-#include <linux/init.h>
#include <uapi/linux/netfilter_ipv6/ip6_tables.h>
-extern void ip6t_init(void) __init;
-
extern void *ip6t_alloc_initial_table(const struct xt_table *);
-#if IS_ENABLED(CONFIG_NETFILTER)
+
int ip6t_register_table(struct net *net, const struct xt_table *table,
const struct ip6t_replace *repl,
const struct nf_hook_ops *ops, struct xt_table **res);
@@ -35,19 +32,6 @@ void ip6t_unregister_table(struct net *net, struct xt_table *table,
extern unsigned int ip6t_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct xt_table *table);
-#endif
-
-/* Check for an extension */
-static inline int
-ip6t_ext_hdr(u8 nexthdr)
-{ return (nexthdr == IPPROTO_HOPOPTS) ||
- (nexthdr == IPPROTO_ROUTING) ||
- (nexthdr == IPPROTO_FRAGMENT) ||
- (nexthdr == IPPROTO_ESP) ||
- (nexthdr == IPPROTO_AH) ||
- (nexthdr == IPPROTO_NONE) ||
- (nexthdr == IPPROTO_DSTOPTS);
-}
#ifdef CONFIG_COMPAT
#include <net/compat.h>
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 028e684fa974..907209c0794e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -37,6 +37,9 @@
#include <linux/in6.h>
#include <linux/if_packet.h>
#include <net/flow.h>
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+#include <linux/netfilter/nf_conntrack_common.h>
+#endif
/* The interface for checksum offload between the stack and networking drivers
* is as follows...
@@ -244,12 +247,6 @@ struct bpf_prog;
union bpf_attr;
struct skb_ext;
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-struct nf_conntrack {
- atomic_t use;
-};
-#endif
-
#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
struct nf_bridge_info {
enum {
@@ -914,7 +911,6 @@ static inline bool skb_pfmemalloc(const struct sk_buff *skb)
#define SKB_DST_NOREF 1UL
#define SKB_DST_PTRMASK ~(SKB_DST_NOREF)
-#define SKB_NFCT_PTRMASK ~(7UL)
/**
* skb_dst - returns skb dst_entry
* @skb: buffer
@@ -4040,25 +4036,27 @@ static inline void skb_remcsum_process(struct sk_buff *skb, void *ptr,
static inline struct nf_conntrack *skb_nfct(const struct sk_buff *skb)
{
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
- return (void *)(skb->_nfct & SKB_NFCT_PTRMASK);
+ return (void *)(skb->_nfct & NFCT_PTRMASK);
#else
return NULL;
#endif
}
-#if defined(CONFIG_NF_CONNTRACK) || defined(CONFIG_NF_CONNTRACK_MODULE)
-void nf_conntrack_destroy(struct nf_conntrack *nfct);
-static inline void nf_conntrack_put(struct nf_conntrack *nfct)
+static inline unsigned long skb_get_nfct(const struct sk_buff *skb)
{
- if (nfct && atomic_dec_and_test(&nfct->use))
- nf_conntrack_destroy(nfct);
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ return skb->_nfct;
+#else
+ return 0UL;
+#endif
}
-static inline void nf_conntrack_get(struct nf_conntrack *nfct)
+
+static inline void skb_set_nfct(struct sk_buff *skb, unsigned long nfct)
{
- if (nfct)
- atomic_inc(&nfct->use);
-}
+#if IS_ENABLED(CONFIG_NF_CONNTRACK)
+ skb->_nfct = nfct;
#endif
+}
#ifdef CONFIG_SKB_EXTENSIONS
enum skb_ext_id {
diff --git a/include/net/devlink.h b/include/net/devlink.h
index 03e4d9244ff3..23e4b65ec9df 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -38,6 +38,7 @@ struct devlink {
struct device *dev;
possible_net_t _net;
struct mutex lock;
+ bool reload_failed;
char priv[0] __aligned(NETDEV_ALIGN);
};
@@ -642,7 +643,10 @@ enum devlink_trap_group_generic_id {
}
struct devlink_ops {
- int (*reload)(struct devlink *devlink, struct netlink_ext_ack *extack);
+ int (*reload_down)(struct devlink *devlink,
+ struct netlink_ext_ack *extack);
+ int (*reload_up)(struct devlink *devlink,
+ struct netlink_ext_ack *extack);
int (*port_type_set)(struct devlink_port *devlink_port,
enum devlink_port_type port_type);
int (*port_split)(struct devlink *devlink, unsigned int port_index,
@@ -942,6 +946,8 @@ void
devlink_health_reporter_state_update(struct devlink_health_reporter *reporter,
enum devlink_health_reporter_state state);
+bool devlink_is_reload_failed(const struct devlink *devlink);
+
void devlink_flash_update_begin_notify(struct devlink *devlink);
void devlink_flash_update_end_notify(struct devlink *devlink);
void devlink_flash_update_status_notify(struct devlink *devlink,
diff --git a/include/net/inet_sock.h b/include/net/inet_sock.h
index 7769c9b36d75..34c4436fd18f 100644
--- a/include/net/inet_sock.h
+++ b/include/net/inet_sock.h
@@ -160,6 +160,7 @@ struct inet_cork {
char priority;
__u16 gso_size;
u64 transmit_time;
+ u32 mark;
};
struct inet_cork_full {
diff --git a/include/net/ip.h b/include/net/ip.h
index 29d89de39822..95bb77f95bcc 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -88,6 +88,7 @@ static inline void ipcm_init_sk(struct ipcm_cookie *ipcm,
{
ipcm_init(ipcm);
+ ipcm->sockc.mark = inet->sk.sk_mark;
ipcm->sockc.tsflags = inet->sk.sk_tsflags;
ipcm->oif = inet->sk.sk_bound_dev_if;
ipcm->addr = inet->inet_saddr;
diff --git a/include/net/netfilter/br_netfilter.h b/include/net/netfilter/br_netfilter.h
index 2a613c84d49f..371696ec11b2 100644
--- a/include/net/netfilter/br_netfilter.h
+++ b/include/net/netfilter/br_netfilter.h
@@ -55,7 +55,6 @@ static inline struct rtable *bridge_parent_rtable(const struct net_device *dev)
struct net_device *setup_pre_routing(struct sk_buff *skb,
const struct net *net);
-#if IS_ENABLED(CONFIG_NETFILTER)
#if IS_ENABLED(CONFIG_IPV6)
int br_validate_ipv6(struct net *net, struct sk_buff *skb);
unsigned int br_nf_pre_routing_ipv6(void *priv,
@@ -68,12 +67,11 @@ static inline int br_validate_ipv6(struct net *net, struct sk_buff *skb)
}
static inline unsigned int
-br_nf_pre_routing_ipv6(const struct nf_hook_ops *ops, struct sk_buff *skb,
+br_nf_pre_routing_ipv6(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state)
{
return NF_ACCEPT;
}
#endif
-#endif
#endif /* _BR_NETFILTER_H_ */
diff --git a/include/net/netfilter/ipv6/nf_conntrack_icmpv6.h b/include/net/netfilter/ipv6/nf_conntrack_icmpv6.h
deleted file mode 100644
index c86895bc5eb6..000000000000
--- a/include/net/netfilter/ipv6/nf_conntrack_icmpv6.h
+++ /dev/null
@@ -1,21 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-/*
- * ICMPv6 tracking.
- *
- * 21 Apl 2004: Yasuyuki Kozakai @USAGI <yasuyuki.kozakai@toshiba.co.jp>
- * - separated from nf_conntrack_icmp.h
- *
- * Derived from include/linux/netfiter_ipv4/ip_conntrack_icmp.h
- */
-
-#ifndef _NF_CONNTRACK_ICMPV6_H
-#define _NF_CONNTRACK_ICMPV6_H
-
-#ifndef ICMPV6_NI_QUERY
-#define ICMPV6_NI_QUERY 139
-#endif
-#ifndef ICMPV6_NI_REPLY
-#define ICMPV6_NI_REPLY 140
-#endif
-
-#endif /* _NF_CONNTRACK_ICMPV6_H */
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 2cc304efe7f9..9f551f3b69c6 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -13,17 +13,14 @@
#ifndef _NF_CONNTRACK_H
#define _NF_CONNTRACK_H
-#include <linux/netfilter/nf_conntrack_common.h>
-
#include <linux/bitops.h>
#include <linux/compiler.h>
-#include <linux/atomic.h>
+#include <linux/netfilter/nf_conntrack_common.h>
#include <linux/netfilter/nf_conntrack_tcp.h>
#include <linux/netfilter/nf_conntrack_dccp.h>
#include <linux/netfilter/nf_conntrack_sctp.h>
#include <linux/netfilter/nf_conntrack_proto_gre.h>
-#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
#include <net/netfilter/nf_conntrack_tuple.h>
@@ -59,7 +56,6 @@ struct nf_conntrack_net {
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
struct nf_conn {
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
/* Usage count in here is 1 for hash table, 1 per skb,
* plus 1 for any connection(s) we are `master' for
*
@@ -69,7 +65,6 @@ struct nf_conn {
* beware nf_ct_get() is different and don't inc refcnt.
*/
struct nf_conntrack ct_general;
-#endif
spinlock_t lock;
/* jiffies32 when this ct is considered dead */
@@ -150,18 +145,14 @@ void nf_conntrack_alter_reply(struct nf_conn *ct,
int nf_conntrack_tuple_taken(const struct nf_conntrack_tuple *tuple,
const struct nf_conn *ignored_conntrack);
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
-
-#define NFCT_INFOMASK 7UL
-#define NFCT_PTRMASK ~(NFCT_INFOMASK)
-
/* Return conntrack_info and tuple hash for given skb. */
static inline struct nf_conn *
nf_ct_get(const struct sk_buff *skb, enum ip_conntrack_info *ctinfo)
{
- *ctinfo = skb->_nfct & NFCT_INFOMASK;
+ unsigned long nfct = skb_get_nfct(skb);
- return (struct nf_conn *)(skb->_nfct & NFCT_PTRMASK);
+ *ctinfo = nfct & NFCT_INFOMASK;
+ return (struct nf_conn *)(nfct & NFCT_PTRMASK);
}
/* decrement reference count on a conntrack */
@@ -171,8 +162,6 @@ static inline void nf_ct_put(struct nf_conn *ct)
nf_conntrack_put(&ct->ct_general);
}
-#endif
-
/* Protocol module loading */
int nf_ct_l3proto_try_module_get(unsigned short l3proto);
void nf_ct_l3proto_module_put(unsigned short l3proto);
@@ -324,16 +313,12 @@ void nf_ct_tmpl_free(struct nf_conn *tmpl);
u32 nf_ct_get_id(const struct nf_conn *ct);
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
-
static inline void
nf_ct_set(struct sk_buff *skb, struct nf_conn *ct, enum ip_conntrack_info info)
{
- skb->_nfct = (unsigned long)ct | info;
+ skb_set_nfct(skb, (unsigned long)ct | info);
}
-#endif
-
#define NF_CT_STAT_INC(net, count) __this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_INC_ATOMIC(net, count) this_cpu_inc((net)->ct.stat->count)
#define NF_CT_STAT_ADD_ATOMIC(net, count, v) this_cpu_add((net)->ct.stat->count, (v))
diff --git a/include/net/netfilter/nf_conntrack_acct.h b/include/net/netfilter/nf_conntrack_acct.h
index ad9f2172dee1..f7a060c6eb28 100644
--- a/include/net/netfilter/nf_conntrack_acct.h
+++ b/include/net/netfilter/nf_conntrack_acct.h
@@ -45,7 +45,7 @@ struct nf_conn_acct *nf_ct_acct_ext_add(struct nf_conn *ct, gfp_t gfp)
#else
return NULL;
#endif
-};
+}
/* Check if connection tracking accounting is enabled */
static inline bool nf_ct_acct_enabled(struct net *net)
@@ -65,11 +65,9 @@ static inline void nf_ct_set_acct(struct net *net, bool enable)
#endif
}
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
void nf_conntrack_acct_pernet_init(struct net *net);
int nf_conntrack_acct_init(void);
void nf_conntrack_acct_fini(void);
-#endif /* IS_ENABLED(CONFIG_NF_CONNTRACK) */
#endif /* _NF_CONNTRACK_ACCT_H */
diff --git a/include/net/netfilter/nf_conntrack_bridge.h b/include/net/netfilter/nf_conntrack_bridge.h
index 34c28f248b18..c564281ede5e 100644
--- a/include/net/netfilter/nf_conntrack_bridge.h
+++ b/include/net/netfilter/nf_conntrack_bridge.h
@@ -5,10 +5,10 @@
#include <linux/types.h>
#include <uapi/linux/if_ether.h>
+struct nf_hook_ops;
+
struct nf_ct_bridge_info {
-#if IS_ENABLED(CONFIG_NETFILTER)
struct nf_hook_ops *ops;
-#endif
unsigned int ops_size;
struct module *me;
};
@@ -16,11 +16,4 @@ struct nf_ct_bridge_info {
void nf_ct_bridge_register(struct nf_ct_bridge_info *info);
void nf_ct_bridge_unregister(struct nf_ct_bridge_info *info);
-struct nf_ct_bridge_frag_data {
- char mac[ETH_HLEN];
- bool vlan_present;
- u16 vlan_tci;
- __be16 vlan_proto;
-};
-
#endif
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 71a2d9cb64ea..09f2efea0b97 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -14,16 +14,16 @@
#define _NF_CONNTRACK_CORE_H
#include <linux/netfilter.h>
-#include <net/netfilter/nf_conntrack_l4proto.h>
+#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_ecache.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
/* This header is used to share core functionality between the
standalone connection tracking module, and the compatibility layer's use
of connection tracking. */
-#if IS_ENABLED(CONFIG_NETFILTER)
-unsigned int nf_conntrack_in(struct sk_buff *skb, const struct nf_hook_state *state);
-#endif
+unsigned int nf_conntrack_in(struct sk_buff *skb,
+ const struct nf_hook_state *state);
int nf_conntrack_init_net(struct net *net);
void nf_conntrack_cleanup_net(struct net *net);
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 52b44192b43f..eb81f9195e28 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -61,9 +61,10 @@ nf_ct_ecache_ext_add(struct nf_conn *ct, u16 ctmask, u16 expmask, gfp_t gfp)
#else
return NULL;
#endif
-};
+}
#ifdef CONFIG_NF_CONNTRACK_EVENTS
+
/* This structure is passed to event handler */
struct nf_ct_event {
struct nf_conn *ct;
@@ -84,9 +85,26 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct);
int nf_conntrack_eventmask_report(unsigned int eventmask, struct nf_conn *ct,
u32 portid, int report);
+#else
+
+static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct)
+{
+}
+
+static inline int nf_conntrack_eventmask_report(unsigned int eventmask,
+ struct nf_conn *ct,
+ u32 portid,
+ int report)
+{
+ return 0;
+}
+
+#endif
+
static inline void
nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
struct net *net = nf_ct_net(ct);
struct nf_conntrack_ecache *e;
@@ -98,31 +116,42 @@ nf_conntrack_event_cache(enum ip_conntrack_events event, struct nf_conn *ct)
return;
set_bit(event, &e->cache);
+#endif
}
static inline int
nf_conntrack_event_report(enum ip_conntrack_events event, struct nf_conn *ct,
u32 portid, int report)
{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
const struct net *net = nf_ct_net(ct);
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return 0;
return nf_conntrack_eventmask_report(1 << event, ct, portid, report);
+#else
+ return 0;
+#endif
}
static inline int
nf_conntrack_event(enum ip_conntrack_events event, struct nf_conn *ct)
{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
const struct net *net = nf_ct_net(ct);
if (!rcu_access_pointer(net->ct.nf_conntrack_event_cb))
return 0;
return nf_conntrack_eventmask_report(1 << event, ct, 0, 0);
+#else
+ return 0;
+#endif
}
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+
struct nf_exp_event {
struct nf_conntrack_expect *exp;
u32 portid;
@@ -148,41 +177,18 @@ void nf_conntrack_ecache_pernet_fini(struct net *net);
int nf_conntrack_ecache_init(void);
void nf_conntrack_ecache_fini(void);
-static inline void nf_conntrack_ecache_delayed_work(struct net *net)
+#else /* CONFIG_NF_CONNTRACK_EVENTS */
+
+static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
+ struct nf_conntrack_expect *exp,
+ u32 portid,
+ int report)
{
- if (!delayed_work_pending(&net->ct.ecache_dwork)) {
- schedule_delayed_work(&net->ct.ecache_dwork, HZ);
- net->ct.ecache_dwork_pending = true;
- }
}
-static inline void nf_conntrack_ecache_work(struct net *net)
+static inline void nf_conntrack_ecache_pernet_init(struct net *net)
{
- if (net->ct.ecache_dwork_pending) {
- net->ct.ecache_dwork_pending = false;
- mod_delayed_work(system_wq, &net->ct.ecache_dwork, 0);
- }
}
-#else /* CONFIG_NF_CONNTRACK_EVENTS */
-static inline void nf_conntrack_event_cache(enum ip_conntrack_events event,
- struct nf_conn *ct) {}
-static inline int nf_conntrack_eventmask_report(unsigned int eventmask,
- struct nf_conn *ct,
- u32 portid,
- int report) { return 0; }
-static inline int nf_conntrack_event(enum ip_conntrack_events event,
- struct nf_conn *ct) { return 0; }
-static inline int nf_conntrack_event_report(enum ip_conntrack_events event,
- struct nf_conn *ct,
- u32 portid,
- int report) { return 0; }
-static inline void nf_ct_deliver_cached_events(const struct nf_conn *ct) {}
-static inline void nf_ct_expect_event_report(enum ip_conntrack_expect_events e,
- struct nf_conntrack_expect *exp,
- u32 portid,
- int report) {}
-
-static inline void nf_conntrack_ecache_pernet_init(struct net *net) {}
static inline void nf_conntrack_ecache_pernet_fini(struct net *net)
{
@@ -197,14 +203,26 @@ static inline void nf_conntrack_ecache_fini(void)
{
}
+#endif /* CONFIG_NF_CONNTRACK_EVENTS */
+
static inline void nf_conntrack_ecache_delayed_work(struct net *net)
{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ if (!delayed_work_pending(&net->ct.ecache_dwork)) {
+ schedule_delayed_work(&net->ct.ecache_dwork, HZ);
+ net->ct.ecache_dwork_pending = true;
+ }
+#endif
}
static inline void nf_conntrack_ecache_work(struct net *net)
{
+#ifdef CONFIG_NF_CONNTRACK_EVENTS
+ if (net->ct.ecache_dwork_pending) {
+ net->ct.ecache_dwork_pending = false;
+ mod_delayed_work(system_wq, &net->ct.ecache_dwork, 0);
+ }
+#endif
}
-#endif /* CONFIG_NF_CONNTRACK_EVENTS */
#endif /*_NF_CONNTRACK_ECACHE_H*/
-
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 573429be4d59..0855b60fba17 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -126,7 +126,7 @@ void nf_ct_expect_init(struct nf_conntrack_expect *, unsigned int, u_int8_t,
const union nf_inet_addr *,
u_int8_t, const __be16 *, const __be16 *);
void nf_ct_expect_put(struct nf_conntrack_expect *exp);
-int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
+int nf_ct_expect_related_report(struct nf_conntrack_expect *expect,
u32 portid, int report, unsigned int flags);
static inline int nf_ct_expect_related(struct nf_conntrack_expect *expect,
unsigned int flags)
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 21f887c5058c..112a6f40dfaf 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -8,7 +8,7 @@
enum nf_ct_ext_id {
NF_CT_EXT_HELPER,
-#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
+#if IS_ENABLED(CONFIG_NF_NAT)
NF_CT_EXT_NAT,
#endif
NF_CT_EXT_SEQADJ,
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index c200b95d27ae..4cad1f0a327a 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -75,7 +75,6 @@ bool nf_conntrack_invert_icmp_tuple(struct nf_conntrack_tuple *tuple,
bool nf_conntrack_invert_icmpv6_tuple(struct nf_conntrack_tuple *tuple,
const struct nf_conntrack_tuple *orig);
-#if IS_ENABLED(CONFIG_NETFILTER)
int nf_conntrack_inet_error(struct nf_conn *tmpl, struct sk_buff *skb,
unsigned int dataoff,
const struct nf_hook_state *state,
@@ -132,7 +131,6 @@ int nf_conntrack_gre_packet(struct nf_conn *ct,
unsigned int dataoff,
enum ip_conntrack_info ctinfo,
const struct nf_hook_state *state);
-#endif
void nf_conntrack_generic_init_net(struct net *net);
void nf_conntrack_tcp_init_net(struct net *net);
@@ -181,41 +179,41 @@ void nf_ct_l4proto_log_invalid(const struct sk_buff *skb,
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
static inline struct nf_generic_net *nf_generic_pernet(struct net *net)
{
- return &net->ct.nf_ct_proto.generic;
+ return &net->ct.nf_ct_proto.generic;
}
static inline struct nf_tcp_net *nf_tcp_pernet(struct net *net)
{
- return &net->ct.nf_ct_proto.tcp;
+ return &net->ct.nf_ct_proto.tcp;
}
static inline struct nf_udp_net *nf_udp_pernet(struct net *net)
{
- return &net->ct.nf_ct_proto.udp;
+ return &net->ct.nf_ct_proto.udp;
}
static inline struct nf_icmp_net *nf_icmp_pernet(struct net *net)
{
- return &net->ct.nf_ct_proto.icmp;
+ return &net->ct.nf_ct_proto.icmp;
}
static inline struct nf_icmp_net *nf_icmpv6_pernet(struct net *net)
{
- return &net->ct.nf_ct_proto.icmpv6;
+ return &net->ct.nf_ct_proto.icmpv6;
}
#endif
#ifdef CONFIG_NF_CT_PROTO_DCCP
static inline struct nf_dccp_net *nf_dccp_pernet(struct net *net)
{
- return &net->ct.nf_ct_proto.dccp;
+ return &net->ct.nf_ct_proto.dccp;
}
#endif
#ifdef CONFIG_NF_CT_PROTO_SCTP
static inline struct nf_sctp_net *nf_sctp_pernet(struct net *net)
{
- return &net->ct.nf_ct_proto.sctp;
+ return &net->ct.nf_ct_proto.sctp;
}
#endif
diff --git a/include/net/netfilter/nf_conntrack_labels.h b/include/net/netfilter/nf_conntrack_labels.h
index 4eacce6f3bcc..ba916411c4e1 100644
--- a/include/net/netfilter/nf_conntrack_labels.h
+++ b/include/net/netfilter/nf_conntrack_labels.h
@@ -1,11 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#include <linux/types.h>
-#include <net/net_namespace.h>
+
+#ifndef _NF_CONNTRACK_LABELS_H
+#define _NF_CONNTRACK_LABELS_H
+
#include <linux/netfilter/nf_conntrack_common.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <linux/types.h>
+#include <net/net_namespace.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_extend.h>
-
#include <uapi/linux/netfilter/xt_connlabel.h>
#define NF_CT_LABELS_MAX_SIZE ((XT_CONNLABEL_MAXBIT + 1) / BITS_PER_BYTE)
@@ -51,3 +54,5 @@ static inline void nf_conntrack_labels_fini(void) {}
static inline int nf_connlabels_get(struct net *net, unsigned int bit) { return 0; }
static inline void nf_connlabels_put(struct net *net) {}
#endif
+
+#endif /* _NF_CONNTRACK_LABELS_H */
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
index 2f0171d24997..6a3ab081e4bf 100644
--- a/include/net/netfilter/nf_conntrack_synproxy.h
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -32,6 +32,7 @@ static inline struct nf_conn_synproxy *nfct_synproxy_ext_add(struct nf_conn *ct)
static inline bool nf_ct_add_synproxy(struct nf_conn *ct,
const struct nf_conn *tmpl)
{
+#if IS_ENABLED(CONFIG_NETFILTER_SYNPROXY)
if (tmpl && nfct_synproxy(tmpl)) {
if (!nfct_seqadj_ext_add(ct))
return false;
@@ -39,47 +40,9 @@ static inline bool nf_ct_add_synproxy(struct nf_conn *ct,
if (!nfct_synproxy_ext_add(ct))
return false;
}
+#endif
return true;
}
-struct synproxy_stats {
- unsigned int syn_received;
- unsigned int cookie_invalid;
- unsigned int cookie_valid;
- unsigned int cookie_retrans;
- unsigned int conn_reopened;
-};
-
-struct synproxy_net {
- struct nf_conn *tmpl;
- struct synproxy_stats __percpu *stats;
- unsigned int hook_ref4;
- unsigned int hook_ref6;
-};
-
-extern unsigned int synproxy_net_id;
-static inline struct synproxy_net *synproxy_pernet(struct net *net)
-{
- return net_generic(net, synproxy_net_id);
-}
-
-struct synproxy_options {
- u8 options;
- u8 wscale;
- u16 mss_option;
- u16 mss_encode;
- u32 tsval;
- u32 tsecr;
-};
-
-struct tcphdr;
-struct nf_synproxy_info;
-bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
- const struct tcphdr *th,
- struct synproxy_options *opts);
-
-void synproxy_init_timestamp_cookie(const struct nf_synproxy_info *info,
- struct synproxy_options *opts);
-
#endif /* _NF_CONNTRACK_SYNPROXY_H */
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index 00a8fbb2d735..6dd72396f534 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -32,6 +32,7 @@ struct nf_conn_timeout {
static inline unsigned int *
nf_ct_timeout_data(const struct nf_conn_timeout *t)
{
+#ifdef CONFIG_NF_CONNTRACK_TIMEOUT
struct nf_ct_timeout *timeout;
timeout = rcu_dereference(t->timeout);
@@ -39,6 +40,9 @@ nf_ct_timeout_data(const struct nf_conn_timeout *t)
return NULL;
return (unsigned int *)timeout->data;
+#else
+ return NULL;
+#endif
}
static inline
diff --git a/include/net/netfilter/nf_conntrack_timestamp.h b/include/net/netfilter/nf_conntrack_timestamp.h
index 2b8aeba649aa..820ea34b6029 100644
--- a/include/net/netfilter/nf_conntrack_timestamp.h
+++ b/include/net/netfilter/nf_conntrack_timestamp.h
@@ -38,22 +38,6 @@ struct nf_conn_tstamp *nf_ct_tstamp_ext_add(struct nf_conn *ct, gfp_t gfp)
#endif
};
-static inline bool nf_ct_tstamp_enabled(struct net *net)
-{
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
- return net->ct.sysctl_tstamp != 0;
-#else
- return false;
-#endif
-}
-
-static inline void nf_ct_set_tstamp(struct net *net, bool enable)
-{
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
- net->ct.sysctl_tstamp = enable;
-#endif
-}
-
#ifdef CONFIG_NF_CONNTRACK_TIMESTAMP
void nf_conntrack_tstamp_pernet_init(struct net *net);
diff --git a/include/net/netfilter/nf_conntrack_tuple.h b/include/net/netfilter/nf_conntrack_tuple.h
index 480c87b44a96..9334371c94e2 100644
--- a/include/net/netfilter/nf_conntrack_tuple.h
+++ b/include/net/netfilter/nf_conntrack_tuple.h
@@ -121,10 +121,9 @@ struct nf_conntrack_tuple_hash {
struct nf_conntrack_tuple tuple;
};
-#if IS_ENABLED(CONFIG_NETFILTER)
static inline bool __nf_ct_tuple_src_equal(const struct nf_conntrack_tuple *t1,
const struct nf_conntrack_tuple *t2)
-{
+{
return (nf_inet_addr_cmp(&t1->src.u3, &t2->src.u3) &&
t1->src.u.all == t2->src.u.all &&
t1->src.l3num == t2->src.l3num);
@@ -184,6 +183,5 @@ nf_ct_tuple_mask_cmp(const struct nf_conntrack_tuple *t,
return nf_ct_tuple_src_mask_cmp(t, tuple, mask) &&
__nf_ct_tuple_dst_equal(t, tuple);
}
-#endif
#endif /* _NF_CONNTRACK_TUPLE_H */
diff --git a/include/net/netfilter/nf_conntrack_zones.h b/include/net/netfilter/nf_conntrack_zones.h
index 52950baa3ab5..48dbadb96fb3 100644
--- a/include/net/netfilter/nf_conntrack_zones.h
+++ b/include/net/netfilter/nf_conntrack_zones.h
@@ -3,9 +3,7 @@
#define _NF_CONNTRACK_ZONES_H
#include <linux/netfilter/nf_conntrack_zones_common.h>
-
-#if IS_ENABLED(CONFIG_NF_CONNTRACK)
-#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack.h>
static inline const struct nf_conntrack_zone *
nf_ct_zone(const struct nf_conn *ct)
@@ -87,5 +85,5 @@ static inline bool nf_ct_zone_equal_any(const struct nf_conn *a,
return true;
#endif
}
-#endif /* IS_ENABLED(CONFIG_NF_CONNTRACK) */
+
#endif /* _NF_CONNTRACK_ZONES_H */
diff --git a/include/net/netfilter/nf_dup_netdev.h b/include/net/netfilter/nf_dup_netdev.h
index 181672672160..b175d271aec9 100644
--- a/include/net/netfilter/nf_dup_netdev.h
+++ b/include/net/netfilter/nf_dup_netdev.h
@@ -7,4 +7,10 @@
void nf_dup_netdev_egress(const struct nft_pktinfo *pkt, int oif);
void nf_fwd_netdev_egress(const struct nft_pktinfo *pkt, int oif);
+struct nft_offload_ctx;
+struct nft_flow_rule;
+
+int nft_fwd_dup_netdev_offload(struct nft_offload_ctx *ctx,
+ struct nft_flow_rule *flow,
+ enum flow_action_id id, int oif);
#endif
diff --git a/include/net/netfilter/nf_flow_table.h b/include/net/netfilter/nf_flow_table.h
index 609df33b1209..b37a7d608134 100644
--- a/include/net/netfilter/nf_flow_table.h
+++ b/include/net/netfilter/nf_flow_table.h
@@ -17,9 +17,7 @@ struct nf_flowtable_type {
int family;
int (*init)(struct nf_flowtable *ft);
void (*free)(struct nf_flowtable *ft);
-#if IS_ENABLED(CONFIG_NETFILTER)
nf_hookfn *hook;
-#endif
struct module *owner;
};
@@ -117,14 +115,12 @@ struct flow_ports {
__be16 source, dest;
};
-#if IS_ENABLED(CONFIG_NETFILTER)
unsigned int nf_flow_offload_ip_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state);
unsigned int nf_flow_offload_ipv6_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state);
-#endif
#define MODULE_ALIAS_NF_FLOWTABLE(family) \
MODULE_ALIAS("nf-flowtable-" __stringify(family))
-#endif /* _FLOW_OFFLOAD_H */
+#endif /* _NF_FLOW_TABLE_H */
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index eec208fb9c23..0d412dd63707 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -1,9 +1,14 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _NF_NAT_H
#define _NF_NAT_H
+
+#include <linux/list.h>
#include <linux/netfilter_ipv4.h>
-#include <linux/netfilter/nf_nat.h>
+#include <linux/netfilter/nf_conntrack_pptp.h>
+#include <net/netfilter/nf_conntrack.h>
+#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_conntrack_tuple.h>
+#include <uapi/linux/netfilter/nf_nat.h>
enum nf_nat_manip_type {
NF_NAT_MANIP_SRC,
@@ -14,20 +19,14 @@ enum nf_nat_manip_type {
#define HOOK2MANIP(hooknum) ((hooknum) != NF_INET_POST_ROUTING && \
(hooknum) != NF_INET_LOCAL_IN)
-#include <linux/list.h>
-#include <linux/netfilter/nf_conntrack_pptp.h>
-#include <net/netfilter/nf_conntrack_extend.h>
-
/* per conntrack: nat application helper private data */
union nf_conntrack_nat_help {
/* insert nat helper private data here */
-#if defined(CONFIG_NF_NAT_PPTP) || defined(CONFIG_NF_NAT_PPTP_MODULE)
+#if IS_ENABLED(CONFIG_NF_NAT_PPTP)
struct nf_nat_pptp nat_pptp_info;
#endif
};
-struct nf_conn;
-
/* The structure embedded in the conntrack structure. */
struct nf_conn_nat {
union nf_conntrack_nat_help help;
@@ -48,7 +47,7 @@ struct nf_conn_nat *nf_ct_nat_ext_add(struct nf_conn *ct);
static inline struct nf_conn_nat *nfct_nat(const struct nf_conn *ct)
{
-#if defined(CONFIG_NF_NAT) || defined(CONFIG_NF_NAT_MODULE)
+#if IS_ENABLED(CONFIG_NF_NAT)
return nf_ct_ext_find(ct, NF_CT_EXT_NAT);
#else
return NULL;
@@ -69,12 +68,10 @@ static inline bool nf_nat_oif_changed(unsigned int hooknum,
#endif
}
-#if IS_ENABLED(CONFIG_NETFILTER)
int nf_nat_register_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
const struct nf_hook_ops *nat_ops, unsigned int ops_count);
void nf_nat_unregister_fn(struct net *net, u8 pf, const struct nf_hook_ops *ops,
unsigned int ops_count);
-#endif
unsigned int nf_nat_packet(struct nf_conn *ct, enum ip_conntrack_info ctinfo,
unsigned int hooknum, struct sk_buff *skb);
@@ -94,7 +91,6 @@ int nf_nat_icmpv6_reply_translation(struct sk_buff *skb, struct nf_conn *ct,
enum ip_conntrack_info ctinfo,
unsigned int hooknum, unsigned int hdrlen);
-#if IS_ENABLED(CONFIG_NETFILTER)
int nf_nat_ipv4_register_fn(struct net *net, const struct nf_hook_ops *ops);
void nf_nat_ipv4_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
@@ -107,7 +103,6 @@ void nf_nat_inet_unregister_fn(struct net *net, const struct nf_hook_ops *ops);
unsigned int
nf_nat_inet_fn(void *priv, struct sk_buff *skb,
const struct nf_hook_state *state);
-#endif
int nf_xfrm_me_harder(struct net *n, struct sk_buff *s, unsigned int family);
diff --git a/include/net/netfilter/nf_nat_masquerade.h b/include/net/netfilter/nf_nat_masquerade.h
index 54a14d643c34..be7abc9d5f22 100644
--- a/include/net/netfilter/nf_nat_masquerade.h
+++ b/include/net/netfilter/nf_nat_masquerade.h
@@ -2,6 +2,7 @@
#ifndef _NF_NAT_MASQUERADE_H_
#define _NF_NAT_MASQUERADE_H_
+#include <linux/skbuff.h>
#include <net/netfilter/nf_nat.h>
unsigned int
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 80edb46a1bbc..47088083667b 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -15,9 +15,7 @@ struct nf_queue_entry {
unsigned int id;
unsigned int hook_index; /* index in hook_entries->hook[] */
-#if IS_ENABLED(CONFIG_NETFILTER)
struct nf_hook_state state;
-#endif
u16 size; /* sizeof(entry) + saved route keys */
/* extra space to store route keys */
@@ -123,9 +121,7 @@ nfqueue_hash(const struct sk_buff *skb, u16 queue, u16 queues_total, u8 family,
return queue;
}
-#if IS_ENABLED(CONFIG_NETFILTER)
int nf_queue(struct sk_buff *skb, struct nf_hook_state *state,
unsigned int index, unsigned int verdict);
-#endif
#endif /* _NF_QUEUE_H */
diff --git a/include/net/netfilter/nf_synproxy.h b/include/net/netfilter/nf_synproxy.h
index dc420b47e3aa..a336f9434e73 100644
--- a/include/net/netfilter/nf_synproxy.h
+++ b/include/net/netfilter/nf_synproxy.h
@@ -11,6 +11,44 @@
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_synproxy.h>
+struct synproxy_stats {
+ unsigned int syn_received;
+ unsigned int cookie_invalid;
+ unsigned int cookie_valid;
+ unsigned int cookie_retrans;
+ unsigned int conn_reopened;
+};
+
+struct synproxy_net {
+ struct nf_conn *tmpl;
+ struct synproxy_stats __percpu *stats;
+ unsigned int hook_ref4;
+ unsigned int hook_ref6;
+};
+
+extern unsigned int synproxy_net_id;
+static inline struct synproxy_net *synproxy_pernet(struct net *net)
+{
+ return net_generic(net, synproxy_net_id);
+}
+
+struct synproxy_options {
+ u8 options;
+ u8 wscale;
+ u16 mss_option;
+ u16 mss_encode;
+ u32 tsval;
+ u32 tsecr;
+};
+
+struct nf_synproxy_info;
+bool synproxy_parse_options(const struct sk_buff *skb, unsigned int doff,
+ const struct tcphdr *th,
+ struct synproxy_options *opts);
+
+void synproxy_init_timestamp_cookie(const struct nf_synproxy_info *info,
+ struct synproxy_options *opts);
+
void synproxy_send_client_synack(struct net *net, const struct sk_buff *skb,
const struct tcphdr *th,
const struct synproxy_options *opts);
@@ -20,10 +58,10 @@ bool synproxy_recv_client_ack(struct net *net,
const struct tcphdr *th,
struct synproxy_options *opts, u32 recv_seq);
-#if IS_ENABLED(CONFIG_NETFILTER)
+struct nf_hook_state;
+
unsigned int ipv4_synproxy_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *nhs);
-#endif
int nf_synproxy_ipv4_init(struct synproxy_net *snet, struct net *net);
void nf_synproxy_ipv4_fini(struct synproxy_net *snet, struct net *net);
@@ -37,10 +75,8 @@ bool synproxy_recv_client_ack_ipv6(struct net *net, const struct sk_buff *skb,
const struct tcphdr *th,
struct synproxy_options *opts, u32 recv_seq);
-#if IS_ENABLED(CONFIG_NETFILTER)
unsigned int ipv6_synproxy_hook(void *priv, struct sk_buff *skb,
const struct nf_hook_state *nhs);
-#endif
int nf_synproxy_ipv6_init(struct synproxy_net *snet, struct net *net);
void nf_synproxy_ipv6_fini(struct synproxy_net *snet, struct net *net);
#else
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 3d9e66aa0139..2655e03dbe1b 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -26,7 +26,6 @@ struct nft_pktinfo {
struct xt_action_param xt;
};
-#if IS_ENABLED(CONFIG_NETFILTER)
static inline struct net *nft_net(const struct nft_pktinfo *pkt)
{
return pkt->xt.state->net;
@@ -59,7 +58,6 @@ static inline void nft_set_pktinfo(struct nft_pktinfo *pkt,
pkt->skb = skb;
pkt->xt.state = state;
}
-#endif
static inline void nft_set_pktinfo_unspec(struct nft_pktinfo *pkt,
struct sk_buff *skb)
@@ -947,11 +945,9 @@ struct nft_chain_type {
int family;
struct module *owner;
unsigned int hook_mask;
-#if IS_ENABLED(CONFIG_NETFILTER)
nf_hookfn *hooks[NF_MAX_HOOKS];
int (*ops_register)(struct net *net, const struct nf_hook_ops *ops);
void (*ops_unregister)(struct net *net, const struct nf_hook_ops *ops);
-#endif
};
int nft_chain_validate_dependency(const struct nft_chain *chain,
@@ -977,9 +973,7 @@ struct nft_stats {
* @flow_block: flow block (for hardware offload)
*/
struct nft_base_chain {
-#if IS_ENABLED(CONFIG_NETFILTER)
struct nf_hook_ops ops;
-#endif
const struct nft_chain_type *type;
u8 policy;
u8 flags;
@@ -1179,9 +1173,7 @@ struct nft_flowtable {
use:30;
u64 handle;
/* runtime data below here */
-#if IS_ENABLED(CONFIG_NETFILTER)
struct nf_hook_ops *ops ____cacheline_aligned;
-#endif
struct nf_flowtable data;
};
diff --git a/include/net/netfilter/nf_tables_offload.h b/include/net/netfilter/nf_tables_offload.h
index db104665a9e4..03cf5856d76f 100644
--- a/include/net/netfilter/nf_tables_offload.h
+++ b/include/net/netfilter/nf_tables_offload.h
@@ -26,6 +26,7 @@ struct nft_offload_ctx {
u8 protonum;
} dep;
unsigned int num_actions;
+ struct net *net;
struct nft_offload_reg regs[NFT_REG32_15 + 1];
};
@@ -61,13 +62,9 @@ struct nft_flow_rule {
#define NFT_OFFLOAD_F_ACTION (1 << 0)
struct nft_rule;
-struct nft_flow_rule *nft_flow_rule_create(const struct nft_rule *rule);
+struct nft_flow_rule *nft_flow_rule_create(struct net *net, const struct nft_rule *rule);
void nft_flow_rule_destroy(struct nft_flow_rule *flow);
int nft_flow_rule_offload_commit(struct net *net);
-void nft_indr_block_get_and_ing_cmd(struct net_device *dev,
- flow_indr_block_bind_cb_t *cb,
- void *cb_priv,
- enum flow_block_command command);
#define NFT_OFFLOAD_MATCH(__key, __base, __field, __len, __reg) \
(__reg)->base_offset = \
@@ -80,4 +77,7 @@ void nft_indr_block_get_and_ing_cmd(struct net_device *dev,
int nft_chain_offload_priority(struct nft_base_chain *basechain);
+int nft_offload_init(void);
+void nft_offload_exit(void);
+
#endif
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 8da5365850cd..580b7a2e40e1 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -419,6 +419,8 @@ enum devlink_attr {
DEVLINK_ATTR_TRAP_METADATA, /* nested */
DEVLINK_ATTR_TRAP_GROUP_NAME, /* string */
+ DEVLINK_ATTR_RELOAD_FAILED, /* u8 0 or 1 */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 0ff932dadc8e..ed8881ad18ed 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -1481,7 +1481,8 @@ enum nft_ct_expectation_attributes {
#define NFT_OBJECT_CT_TIMEOUT 7
#define NFT_OBJECT_SECMARK 8
#define NFT_OBJECT_CT_EXPECT 9
-#define __NFT_OBJECT_MAX 10
+#define NFT_OBJECT_SYNPROXY 10
+#define __NFT_OBJECT_MAX 11
#define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1)
/**
diff --git a/include/uapi/linux/ptp_clock.h b/include/uapi/linux/ptp_clock.h
index 1bc794ad957a..f16301015949 100644
--- a/include/uapi/linux/ptp_clock.h
+++ b/include/uapi/linux/ptp_clock.h
@@ -25,12 +25,22 @@
#include <linux/ioctl.h>
#include <linux/types.h>
-/* PTP_xxx bits, for the flags field within the request structures. */
+/*
+ * Bits of the ptp_extts_request.flags field:
+ */
#define PTP_ENABLE_FEATURE (1<<0)
#define PTP_RISING_EDGE (1<<1)
#define PTP_FALLING_EDGE (1<<2)
+#define PTP_EXTTS_VALID_FLAGS (PTP_ENABLE_FEATURE | \
+ PTP_RISING_EDGE | \
+ PTP_FALLING_EDGE)
/*
+ * Bits of the ptp_perout_request.flags field:
+ */
+#define PTP_PEROUT_ONE_SHOT (1<<0)
+#define PTP_PEROUT_VALID_FLAGS (PTP_PEROUT_ONE_SHOT)
+/*
* struct ptp_clock_time - represents a time value
*
* The sign of the seconds field applies to the whole value. The
@@ -67,7 +77,7 @@ struct ptp_perout_request {
struct ptp_clock_time start; /* Absolute start time. */
struct ptp_clock_time period; /* Desired period, zero means disable. */
unsigned int index; /* Which channel to configure. */
- unsigned int flags; /* Reserved for future use. */
+ unsigned int flags;
unsigned int rsv[4]; /* Reserved for future use. */
};
@@ -149,6 +159,18 @@ struct ptp_pin_desc {
#define PTP_SYS_OFFSET_EXTENDED \
_IOWR(PTP_CLK_MAGIC, 9, struct ptp_sys_offset_extended)
+#define PTP_CLOCK_GETCAPS2 _IOR(PTP_CLK_MAGIC, 10, struct ptp_clock_caps)
+#define PTP_EXTTS_REQUEST2 _IOW(PTP_CLK_MAGIC, 11, struct ptp_extts_request)
+#define PTP_PEROUT_REQUEST2 _IOW(PTP_CLK_MAGIC, 12, struct ptp_perout_request)
+#define PTP_ENABLE_PPS2 _IOW(PTP_CLK_MAGIC, 13, int)
+#define PTP_SYS_OFFSET2 _IOW(PTP_CLK_MAGIC, 14, struct ptp_sys_offset)
+#define PTP_PIN_GETFUNC2 _IOWR(PTP_CLK_MAGIC, 15, struct ptp_pin_desc)
+#define PTP_PIN_SETFUNC2 _IOW(PTP_CLK_MAGIC, 16, struct ptp_pin_desc)
+#define PTP_SYS_OFFSET_PRECISE2 \
+ _IOWR(PTP_CLK_MAGIC, 17, struct ptp_sys_offset_precise)
+#define PTP_SYS_OFFSET_EXTENDED2 \
+ _IOWR(PTP_CLK_MAGIC, 18, struct ptp_sys_offset_extended)
+
struct ptp_extts_event {
struct ptp_clock_time t; /* Time event occured. */
unsigned int index; /* Which channel produced the event. */
diff --git a/net/bridge/netfilter/ebt_802_3.c b/net/bridge/netfilter/ebt_802_3.c
index 2c8fe24400e5..68c2519bdc52 100644
--- a/net/bridge/netfilter/ebt_802_3.c
+++ b/net/bridge/netfilter/ebt_802_3.c
@@ -11,7 +11,13 @@
#include <linux/module.h>
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_bridge/ebtables.h>
-#include <linux/netfilter_bridge/ebt_802_3.h>
+#include <linux/skbuff.h>
+#include <uapi/linux/netfilter_bridge/ebt_802_3.h>
+
+static struct ebt_802_3_hdr *ebt_802_3_hdr(const struct sk_buff *skb)
+{
+ return (struct ebt_802_3_hdr *)skb_mac_header(skb);
+}
static bool
ebt_802_3_mt(const struct sk_buff *skb, struct xt_action_param *par)
diff --git a/net/bridge/netfilter/nf_conntrack_bridge.c b/net/bridge/netfilter/nf_conntrack_bridge.c
index 4f5444d2a526..8842798c29e6 100644
--- a/net/bridge/netfilter/nf_conntrack_bridge.c
+++ b/net/bridge/netfilter/nf_conntrack_bridge.c
@@ -17,7 +17,6 @@
#include <net/netfilter/nf_conntrack_bridge.h>
#include <linux/netfilter/nf_tables.h>
-#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#include <net/netfilter/nf_tables.h>
#include "../br_private.h"
@@ -27,9 +26,9 @@
*/
static int nf_br_ip_fragment(struct net *net, struct sock *sk,
struct sk_buff *skb,
- struct nf_ct_bridge_frag_data *data,
+ struct nf_bridge_frag_data *data,
int (*output)(struct net *, struct sock *sk,
- const struct nf_ct_bridge_frag_data *data,
+ const struct nf_bridge_frag_data *data,
struct sk_buff *))
{
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
@@ -279,7 +278,7 @@ static unsigned int nf_ct_bridge_pre(void *priv, struct sk_buff *skb,
}
static void nf_ct_bridge_frag_save(struct sk_buff *skb,
- struct nf_ct_bridge_frag_data *data)
+ struct nf_bridge_frag_data *data)
{
if (skb_vlan_tag_present(skb)) {
data->vlan_present = true;
@@ -294,10 +293,10 @@ static void nf_ct_bridge_frag_save(struct sk_buff *skb,
static unsigned int
nf_ct_bridge_refrag(struct sk_buff *skb, const struct nf_hook_state *state,
int (*output)(struct net *, struct sock *sk,
- const struct nf_ct_bridge_frag_data *data,
+ const struct nf_bridge_frag_data *data,
struct sk_buff *))
{
- struct nf_ct_bridge_frag_data data;
+ struct nf_bridge_frag_data data;
if (!BR_INPUT_SKB_CB(skb)->frag_max_size)
return NF_ACCEPT;
@@ -320,7 +319,7 @@ nf_ct_bridge_refrag(struct sk_buff *skb, const struct nf_hook_state *state,
/* Actually only slow path refragmentation needs this. */
static int nf_ct_bridge_frag_restore(struct sk_buff *skb,
- const struct nf_ct_bridge_frag_data *data)
+ const struct nf_bridge_frag_data *data)
{
int err;
@@ -341,7 +340,7 @@ static int nf_ct_bridge_frag_restore(struct sk_buff *skb,
}
static int nf_ct_bridge_refrag_post(struct net *net, struct sock *sk,
- const struct nf_ct_bridge_frag_data *data,
+ const struct nf_bridge_frag_data *data,
struct sk_buff *skb)
{
int err;
diff --git a/net/core/devlink.c b/net/core/devlink.c
index 4a2fb94c44cf..e48680efe54a 100644
--- a/net/core/devlink.c
+++ b/net/core/devlink.c
@@ -471,6 +471,8 @@ static int devlink_nl_fill(struct sk_buff *msg, struct devlink *devlink,
if (devlink_nl_put_handle(msg, devlink))
goto nla_put_failure;
+ if (nla_put_u8(msg, DEVLINK_ATTR_RELOAD_FAILED, devlink->reload_failed))
+ goto nla_put_failure;
genlmsg_end(msg, hdr);
return 0;
@@ -2672,12 +2674,32 @@ devlink_resources_validate(struct devlink *devlink,
return err;
}
+static bool devlink_reload_supported(struct devlink *devlink)
+{
+ return devlink->ops->reload_down && devlink->ops->reload_up;
+}
+
+static void devlink_reload_failed_set(struct devlink *devlink,
+ bool reload_failed)
+{
+ if (devlink->reload_failed == reload_failed)
+ return;
+ devlink->reload_failed = reload_failed;
+ devlink_notify(devlink, DEVLINK_CMD_NEW);
+}
+
+bool devlink_is_reload_failed(const struct devlink *devlink)
+{
+ return devlink->reload_failed;
+}
+EXPORT_SYMBOL_GPL(devlink_is_reload_failed);
+
static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
{
struct devlink *devlink = info->user_ptr[0];
int err;
- if (!devlink->ops->reload)
+ if (!devlink_reload_supported(devlink))
return -EOPNOTSUPP;
err = devlink_resources_validate(devlink, NULL, info);
@@ -2685,7 +2707,12 @@ static int devlink_nl_cmd_reload(struct sk_buff *skb, struct genl_info *info)
NL_SET_ERR_MSG_MOD(info->extack, "resources size validation failed");
return err;
}
- return devlink->ops->reload(devlink, info->extack);
+ err = devlink->ops->reload_down(devlink, info->extack);
+ if (err)
+ return err;
+ err = devlink->ops->reload_up(devlink, info->extack);
+ devlink_reload_failed_set(devlink, !!err);
+ return err;
}
static int devlink_nl_flash_update_fill(struct sk_buff *msg,
@@ -7150,7 +7177,7 @@ __devlink_param_driverinit_value_set(struct devlink *devlink,
int devlink_param_driverinit_value_get(struct devlink *devlink, u32 param_id,
union devlink_param_value *init_val)
{
- if (!devlink->ops->reload)
+ if (!devlink_reload_supported(devlink))
return -EOPNOTSUPP;
return __devlink_param_driverinit_value_get(&devlink->param_list,
@@ -7197,7 +7224,7 @@ int devlink_port_param_driverinit_value_get(struct devlink_port *devlink_port,
{
struct devlink *devlink = devlink_port->devlink;
- if (!devlink->ops->reload)
+ if (!devlink_reload_supported(devlink))
return -EOPNOTSUPP;
return __devlink_param_driverinit_value_get(&devlink_port->param_list,
diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c
index cc7ef0d05bbd..5eb73775c3f7 100644
--- a/net/ipv4/ip_output.c
+++ b/net/ipv4/ip_output.c
@@ -1266,6 +1266,7 @@ static int ip_setup_cork(struct sock *sk, struct inet_cork *cork,
cork->length = 0;
cork->ttl = ipc->ttl;
cork->tos = ipc->tos;
+ cork->mark = ipc->sockc.mark;
cork->priority = ipc->priority;
cork->transmit_time = ipc->sockc.transmit_time;
cork->tx_flags = 0;
@@ -1529,7 +1530,7 @@ struct sk_buff *__ip_make_skb(struct sock *sk,
}
skb->priority = (cork->tos != -1) ? cork->priority: sk->sk_priority;
- skb->mark = sk->sk_mark;
+ skb->mark = cork->mark;
skb->tstamp = cork->transmit_time;
/*
* Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
diff --git a/net/ipv4/netfilter/Kconfig b/net/ipv4/netfilter/Kconfig
index 69e76d677f9e..f17b402111ce 100644
--- a/net/ipv4/netfilter/Kconfig
+++ b/net/ipv4/netfilter/Kconfig
@@ -272,7 +272,7 @@ config IP_NF_TARGET_CLUSTERIP
The CLUSTERIP target allows you to build load-balancing clusters of
network servers without having a dedicated load-balancing
router/server/switch.
-
+
To compile it as a module, choose M here. If unsure, say N.
config IP_NF_TARGET_ECN
@@ -281,7 +281,7 @@ config IP_NF_TARGET_ECN
depends on NETFILTER_ADVANCED
---help---
This option adds a `ECN' target, which can be used in the iptables mangle
- table.
+ table.
You can use this target to remove the ECN bits from the IPv4 header of
an IP packet. This is particularly useful, if you need to work around
@@ -306,7 +306,7 @@ config IP_NF_RAW
This option adds a `raw' table to iptables. This table is the very
first in the netfilter framework and hooks in at the PREROUTING
and OUTPUT chains.
-
+
If you want to compile it as a module, say M here and read
<file:Documentation/kbuild/modules.rst>. If unsure, say `N'.
@@ -318,7 +318,7 @@ config IP_NF_SECURITY
help
This option adds a `security' table to iptables, for use
with Mandatory Access Control (MAC) policy.
-
+
If unsure, say N.
endif # IP_NF_IPTABLES
diff --git a/net/ipv4/netfilter/Makefile b/net/ipv4/netfilter/Makefile
index c50e0ec095d2..7c497c78105f 100644
--- a/net/ipv4/netfilter/Makefile
+++ b/net/ipv4/netfilter/Makefile
@@ -31,7 +31,7 @@ obj-$(CONFIG_NFT_DUP_IPV4) += nft_dup_ipv4.o
# flow table support
obj-$(CONFIG_NF_FLOW_TABLE_IPV4) += nf_flow_table_ipv4.o
-# generic IP tables
+# generic IP tables
obj-$(CONFIG_IP_NF_IPTABLES) += ip_tables.o
# the three instances of ip_tables
diff --git a/net/ipv4/ping.c b/net/ipv4/ping.c
index 9d24ef5c5d8f..535427292194 100644
--- a/net/ipv4/ping.c
+++ b/net/ipv4/ping.c
@@ -781,7 +781,7 @@ static int ping_v4_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
} else if (!ipc.oif)
ipc.oif = inet->uc_index;
- flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
+ flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos,
RT_SCOPE_UNIVERSE, sk->sk_protocol,
inet_sk_flowi_flags(sk), faddr, saddr, 0, 0,
sk->sk_uid);
diff --git a/net/ipv4/raw.c b/net/ipv4/raw.c
index 40a6abbc9cf6..80da5a66d5d7 100644
--- a/net/ipv4/raw.c
+++ b/net/ipv4/raw.c
@@ -375,7 +375,7 @@ static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
skb_reserve(skb, hlen);
skb->priority = sk->sk_priority;
- skb->mark = sk->sk_mark;
+ skb->mark = sockc->mark;
skb->tstamp = sockc->transmit_time;
skb_dst_set(skb, &rt->dst);
*rtp = NULL;
@@ -623,7 +623,7 @@ static int raw_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
}
}
- flowi4_init_output(&fl4, ipc.oif, sk->sk_mark, tos,
+ flowi4_init_output(&fl4, ipc.oif, ipc.sockc.mark, tos,
RT_SCOPE_UNIVERSE,
hdrincl ? IPPROTO_RAW : sk->sk_protocol,
inet_sk_flowi_flags(sk) |
diff --git a/net/ipv4/udp.c b/net/ipv4/udp.c
index d88821c794fb..fbcd9be3a470 100644
--- a/net/ipv4/udp.c
+++ b/net/ipv4/udp.c
@@ -1130,7 +1130,7 @@ int udp_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
fl4 = &fl4_stack;
- flowi4_init_output(fl4, ipc.oif, sk->sk_mark, tos,
+ flowi4_init_output(fl4, ipc.oif, ipc.sockc.mark, tos,
RT_SCOPE_UNIVERSE, sk->sk_protocol,
flow_flags,
faddr, saddr, dport, inet->inet_sport,
diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c
index 8e49fd62eea9..89a4c7c2e25d 100644
--- a/net/ipv6/ip6_output.c
+++ b/net/ipv6/ip6_output.c
@@ -1294,6 +1294,7 @@ static int ip6_setup_cork(struct sock *sk, struct inet_cork_full *cork,
cork->base.fragsize = mtu;
cork->base.gso_size = ipc6->gso_size;
cork->base.tx_flags = 0;
+ cork->base.mark = ipc6->sockc.mark;
sock_tx_timestamp(sk, ipc6->sockc.tsflags, &cork->base.tx_flags);
if (dst_allfrag(xfrm_dst_path(&rt->dst)))
@@ -1764,7 +1765,7 @@ struct sk_buff *__ip6_make_skb(struct sock *sk,
hdr->daddr = *final_dst;
skb->priority = sk->sk_priority;
- skb->mark = sk->sk_mark;
+ skb->mark = cork->base.mark;
skb->tstamp = cork->base.transmit_time;
diff --git a/net/ipv6/netfilter.c b/net/ipv6/netfilter.c
index 61819ed858b1..a9bff556d3b2 100644
--- a/net/ipv6/netfilter.c
+++ b/net/ipv6/netfilter.c
@@ -113,9 +113,9 @@ int __nf_ip6_route(struct net *net, struct dst_entry **dst,
EXPORT_SYMBOL_GPL(__nf_ip6_route);
int br_ip6_fragment(struct net *net, struct sock *sk, struct sk_buff *skb,
- struct nf_ct_bridge_frag_data *data,
+ struct nf_bridge_frag_data *data,
int (*output)(struct net *, struct sock *sk,
- const struct nf_ct_bridge_frag_data *data,
+ const struct nf_bridge_frag_data *data,
struct sk_buff *))
{
int frag_max_size = BR_INPUT_SKB_CB(skb)->frag_max_size;
diff --git a/net/ipv6/netfilter/ip6t_ipv6header.c b/net/ipv6/netfilter/ip6t_ipv6header.c
index 0fc6326ef499..c52ff929c93b 100644
--- a/net/ipv6/netfilter/ip6t_ipv6header.c
+++ b/net/ipv6/netfilter/ip6t_ipv6header.c
@@ -16,7 +16,7 @@
#include <net/ipv6.h>
#include <linux/netfilter/x_tables.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter_ipv6.h>
#include <linux/netfilter_ipv6/ip6t_ipv6header.h>
MODULE_LICENSE("GPL");
@@ -42,7 +42,7 @@ ipv6header_mt6(const struct sk_buff *skb, struct xt_action_param *par)
len = skb->len - ptr;
temp = 0;
- while (ip6t_ext_hdr(nexthdr)) {
+ while (nf_ip6_ext_hdr(nexthdr)) {
const struct ipv6_opt_hdr *hp;
struct ipv6_opt_hdr _hdr;
int hdrlen;
diff --git a/net/ipv6/netfilter/nf_log_ipv6.c b/net/ipv6/netfilter/nf_log_ipv6.c
index f53bd8f01219..22b80db6d882 100644
--- a/net/ipv6/netfilter/nf_log_ipv6.c
+++ b/net/ipv6/netfilter/nf_log_ipv6.c
@@ -18,7 +18,7 @@
#include <net/route.h>
#include <linux/netfilter.h>
-#include <linux/netfilter_ipv6/ip6_tables.h>
+#include <linux/netfilter_ipv6.h>
#include <linux/netfilter/xt_LOG.h>
#include <net/netfilter/nf_log.h>
@@ -70,7 +70,7 @@ static void dump_ipv6_packet(struct net *net, struct nf_log_buf *m,
fragment = 0;
ptr = ip6hoff + sizeof(struct ipv6hdr);
currenthdr = ih->nexthdr;
- while (currenthdr != NEXTHDR_NONE && ip6t_ext_hdr(currenthdr)) {
+ while (currenthdr != NEXTHDR_NONE && nf_ip6_ext_hdr(currenthdr)) {
struct ipv6_opt_hdr _hdr;
const struct ipv6_opt_hdr *hp;
diff --git a/net/ipv6/netfilter/nf_socket_ipv6.c b/net/ipv6/netfilter/nf_socket_ipv6.c
index 437d95545c31..b9df879c48d3 100644
--- a/net/ipv6/netfilter/nf_socket_ipv6.c
+++ b/net/ipv6/netfilter/nf_socket_ipv6.c
@@ -12,7 +12,6 @@
#include <net/sock.h>
#include <net/inet_sock.h>
#include <net/inet6_hashtables.h>
-#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
#include <net/netfilter/nf_socket.h>
#if IS_ENABLED(CONFIG_NF_CONNTRACK)
#include <net/netfilter/nf_conntrack.h>
diff --git a/net/ipv6/raw.c b/net/ipv6/raw.c
index 8a6131991e38..6e1888ee4036 100644
--- a/net/ipv6/raw.c
+++ b/net/ipv6/raw.c
@@ -646,7 +646,7 @@ static int rawv6_send_hdrinc(struct sock *sk, struct msghdr *msg, int length,
skb->protocol = htons(ETH_P_IPV6);
skb->priority = sk->sk_priority;
- skb->mark = sk->sk_mark;
+ skb->mark = sockc->mark;
skb->tstamp = sockc->transmit_time;
skb_put(skb, length);
@@ -810,6 +810,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipcm6_init(&ipc6);
ipc6.sockc.tsflags = sk->sk_tsflags;
+ ipc6.sockc.mark = sk->sk_mark;
if (sin6) {
if (addr_len < SIN6_LEN_RFC2133)
@@ -891,6 +892,7 @@ static int rawv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
opt = ipv6_fixup_options(&opt_space, opt);
fl6.flowi6_proto = proto;
+ fl6.flowi6_mark = ipc6.sockc.mark;
if (!hdrincl) {
rfv.msg = msg;
diff --git a/net/ipv6/udp.c b/net/ipv6/udp.c
index 827fe7385078..2c8beb3896d1 100644
--- a/net/ipv6/udp.c
+++ b/net/ipv6/udp.c
@@ -1230,6 +1230,7 @@ int udpv6_sendmsg(struct sock *sk, struct msghdr *msg, size_t len)
ipcm6_init(&ipc6);
ipc6.gso_size = up->gso_size;
ipc6.sockc.tsflags = sk->sk_tsflags;
+ ipc6.sockc.mark = sk->sk_mark;
/* destination address check */
if (sin6) {
@@ -1352,7 +1353,7 @@ do_udp_sendmsg:
if (!fl6.flowi6_oif)
fl6.flowi6_oif = np->sticky_pktinfo.ipi6_ifindex;
- fl6.flowi6_mark = sk->sk_mark;
+ fl6.flowi6_mark = ipc6.sockc.mark;
fl6.flowi6_uid = sk->sk_uid;
if (msg->msg_controllen) {
diff --git a/net/netfilter/Kconfig b/net/netfilter/Kconfig
index 0d65f4d39494..34ec7afec116 100644
--- a/net/netfilter/Kconfig
+++ b/net/netfilter/Kconfig
@@ -20,7 +20,7 @@ config NETFILTER_FAMILY_ARP
bool
config NETFILTER_NETLINK_ACCT
-tristate "Netfilter NFACCT over NFNETLINK interface"
+ tristate "Netfilter NFACCT over NFNETLINK interface"
depends on NETFILTER_ADVANCED
select NETFILTER_NETLINK
help
@@ -34,7 +34,7 @@ config NETFILTER_NETLINK_QUEUE
help
If this option is enabled, the kernel will include support
for queueing packets via NFNETLINK.
-
+
config NETFILTER_NETLINK_LOG
tristate "Netfilter LOG over NFNETLINK interface"
default m if NETFILTER_ADVANCED=n
@@ -1502,7 +1502,7 @@ config NETFILTER_XT_MATCH_REALM
This option adds a `realm' match, which allows you to use the realm
key from the routing subsystem inside iptables.
- This match pretty much resembles the CONFIG_NET_CLS_ROUTE4 option
+ This match pretty much resembles the CONFIG_NET_CLS_ROUTE4 option
in tc world.
If you want to compile it as a module, say M here and read
@@ -1523,7 +1523,7 @@ config NETFILTER_XT_MATCH_SCTP
depends on NETFILTER_ADVANCED
default IP_SCTP
help
- With this option enabled, you will be able to use the
+ With this option enabled, you will be able to use the
`sctp' match in order to match on SCTP source/destination ports
and SCTP chunk types.
diff --git a/net/netfilter/Makefile b/net/netfilter/Makefile
index 9270a7fae484..4fc075b612fe 100644
--- a/net/netfilter/Makefile
+++ b/net/netfilter/Makefile
@@ -124,7 +124,7 @@ nf_flow_table-objs := nf_flow_table_core.o nf_flow_table_ip.o
obj-$(CONFIG_NF_FLOW_TABLE_INET) += nf_flow_table_inet.o
-# generic X tables
+# generic X tables
obj-$(CONFIG_NETFILTER_XTABLES) += x_tables.o xt_tcpudp.o
# combos
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index 5e2812ee2149..6fba74b5aaf7 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -24,6 +24,7 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_extend.h>
static DEFINE_MUTEX(nf_ct_ecache_mutex);
diff --git a/net/netfilter/nf_conntrack_expect.c b/net/netfilter/nf_conntrack_expect.c
index 65364de915d1..42557d2b6a90 100644
--- a/net/netfilter/nf_conntrack_expect.c
+++ b/net/netfilter/nf_conntrack_expect.c
@@ -25,8 +25,10 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_expect.h>
#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_tuple.h>
#include <net/netfilter/nf_conntrack_zones.h>
diff --git a/net/netfilter/nf_conntrack_helper.c b/net/netfilter/nf_conntrack_helper.c
index 8d729e7c36ff..118f415928ae 100644
--- a/net/netfilter/nf_conntrack_helper.c
+++ b/net/netfilter/nf_conntrack_helper.c
@@ -21,10 +21,11 @@
#include <linux/rtnetlink.h>
#include <net/netfilter/nf_conntrack.h>
-#include <net/netfilter/nf_conntrack_l4proto.h>
-#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_ecache.h>
#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_helper.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_log.h>
static DEFINE_MUTEX(nf_ct_helper_mutex);
diff --git a/net/netfilter/nf_conntrack_proto_icmpv6.c b/net/netfilter/nf_conntrack_proto_icmpv6.c
index 7e317e6698ba..6f9144e1f1c1 100644
--- a/net/netfilter/nf_conntrack_proto_icmpv6.c
+++ b/net/netfilter/nf_conntrack_proto_icmpv6.c
@@ -22,7 +22,6 @@
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_timeout.h>
#include <net/netfilter/nf_conntrack_zones.h>
-#include <net/netfilter/ipv6/nf_conntrack_icmpv6.h>
#include <net/netfilter/nf_log.h>
static const unsigned int nf_ct_icmpv6_timeout = 30*HZ;
diff --git a/net/netfilter/nf_conntrack_standalone.c b/net/netfilter/nf_conntrack_standalone.c
index 88d4127df863..410809c669e1 100644
--- a/net/netfilter/nf_conntrack_standalone.c
+++ b/net/netfilter/nf_conntrack_standalone.c
@@ -1167,7 +1167,6 @@ static int __init nf_conntrack_standalone_init(void)
if (ret < 0)
goto out_start;
- BUILD_BUG_ON(SKB_NFCT_PTRMASK != NFCT_PTRMASK);
BUILD_BUG_ON(NFCT_INFOMASK <= IP_CT_NUMBER);
#ifdef CONFIG_SYSCTL
diff --git a/net/netfilter/nf_conntrack_timeout.c b/net/netfilter/nf_conntrack_timeout.c
index 13d0f4a92647..14387e0b8008 100644
--- a/net/netfilter/nf_conntrack_timeout.c
+++ b/net/netfilter/nf_conntrack_timeout.c
@@ -19,6 +19,7 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_extend.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_timeout.h>
struct nf_ct_timeout *
diff --git a/net/netfilter/nf_dup_netdev.c b/net/netfilter/nf_dup_netdev.c
index 5a35ef08c3cb..f108a76925dd 100644
--- a/net/netfilter/nf_dup_netdev.c
+++ b/net/netfilter/nf_dup_netdev.c
@@ -10,6 +10,7 @@
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_offload.h>
#include <net/netfilter/nf_dup_netdev.h>
static void nf_do_netdev_egress(struct sk_buff *skb, struct net_device *dev)
@@ -50,5 +51,25 @@ void nf_dup_netdev_egress(const struct nft_pktinfo *pkt, int oif)
}
EXPORT_SYMBOL_GPL(nf_dup_netdev_egress);
+int nft_fwd_dup_netdev_offload(struct nft_offload_ctx *ctx,
+ struct nft_flow_rule *flow,
+ enum flow_action_id id, int oif)
+{
+ struct flow_action_entry *entry;
+ struct net_device *dev;
+
+ /* nft_flow_rule_destroy() releases the reference on this device. */
+ dev = dev_get_by_index(ctx->net, oif);
+ if (!dev)
+ return -EOPNOTSUPP;
+
+ entry = &flow->rule->action.entries[ctx->num_actions++];
+ entry->id = id;
+ entry->dev = dev;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(nft_fwd_dup_netdev_offload);
+
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Pablo Neira Ayuso <pablo@netfilter.org>");
diff --git a/net/netfilter/nf_flow_table_core.c b/net/netfilter/nf_flow_table_core.c
index 80a8f9ae4c93..09310a1bd91f 100644
--- a/net/netfilter/nf_flow_table_core.c
+++ b/net/netfilter/nf_flow_table_core.c
@@ -11,6 +11,7 @@
#include <net/netfilter/nf_flow_table.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
+#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_tuple.h>
struct flow_offload_entry {
diff --git a/net/netfilter/nf_nat_core.c b/net/netfilter/nf_nat_core.c
index 3f6023ed4966..bfc555fcbc72 100644
--- a/net/netfilter/nf_nat_core.c
+++ b/net/netfilter/nf_nat_core.c
@@ -18,12 +18,12 @@
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
-#include <net/netfilter/nf_nat.h>
-#include <net/netfilter/nf_nat_helper.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_seqadj.h>
#include <net/netfilter/nf_conntrack_zones.h>
-#include <linux/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat.h>
+#include <net/netfilter/nf_nat_helper.h>
+#include <uapi/linux/netfilter/nf_nat.h>
#include "nf_internals.h"
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 013d28899cab..e4a68dc42694 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2853,7 +2853,7 @@ static int nf_tables_newrule(struct net *net, struct sock *nlsk,
return nft_table_validate(net, table);
if (chain->flags & NFT_CHAIN_HW_OFFLOAD) {
- flow = nft_flow_rule_create(rule);
+ flow = nft_flow_rule_create(net, rule);
if (IS_ERR(flow))
return PTR_ERR(flow);
@@ -5151,7 +5151,7 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
newobj = nft_obj_init(ctx, type, attr);
if (IS_ERR(newobj)) {
err = PTR_ERR(newobj);
- goto err1;
+ goto err_free_trans;
}
nft_trans_obj(trans) = obj;
@@ -5160,9 +5160,9 @@ static int nf_tables_updobj(const struct nft_ctx *ctx,
list_add_tail(&trans->list, &ctx->net->nft.commit_list);
return 0;
-err1:
+
+err_free_trans:
kfree(trans);
- kfree(newobj);
return err;
}
@@ -7669,11 +7669,6 @@ static struct pernet_operations nf_tables_net_ops = {
.exit = nf_tables_exit_net,
};
-static struct flow_indr_block_ing_entry block_ing_entry = {
- .cb = nft_indr_block_get_and_ing_cmd,
- .list = LIST_HEAD_INIT(block_ing_entry.list),
-};
-
static int __init nf_tables_module_init(void)
{
int err;
@@ -7699,14 +7694,20 @@ static int __init nf_tables_module_init(void)
if (err < 0)
goto err4;
+ err = nft_offload_init();
+ if (err < 0)
+ goto err5;
+
/* must be last */
err = nfnetlink_subsys_register(&nf_tables_subsys);
if (err < 0)
- goto err5;
+ goto err6;
nft_chain_route_init();
- flow_indr_add_block_ing_cb(&block_ing_entry);
+
return err;
+err6:
+ nft_offload_exit();
err5:
rhltable_destroy(&nft_objname_ht);
err4:
@@ -7722,8 +7723,8 @@ err1:
static void __exit nf_tables_module_exit(void)
{
- flow_indr_del_block_ing_cb(&block_ing_entry);
nfnetlink_subsys_unregister(&nf_tables_subsys);
+ nft_offload_exit();
unregister_netdevice_notifier(&nf_tables_flowtable_notifier);
nft_chain_filter_fini();
nft_chain_route_fini();
diff --git a/net/netfilter/nf_tables_offload.c b/net/netfilter/nf_tables_offload.c
index 3c2725ade61b..21bb772cb4b7 100644
--- a/net/netfilter/nf_tables_offload.c
+++ b/net/netfilter/nf_tables_offload.c
@@ -28,13 +28,10 @@ static struct nft_flow_rule *nft_flow_rule_alloc(int num_actions)
return flow;
}
-struct nft_flow_rule *nft_flow_rule_create(const struct nft_rule *rule)
+struct nft_flow_rule *nft_flow_rule_create(struct net *net,
+ const struct nft_rule *rule)
{
- struct nft_offload_ctx ctx = {
- .dep = {
- .type = NFT_OFFLOAD_DEP_UNSPEC,
- },
- };
+ struct nft_offload_ctx *ctx;
struct nft_flow_rule *flow;
int num_actions = 0, err;
struct nft_expr *expr;
@@ -52,21 +49,32 @@ struct nft_flow_rule *nft_flow_rule_create(const struct nft_rule *rule)
return ERR_PTR(-ENOMEM);
expr = nft_expr_first(rule);
+
+ ctx = kzalloc(sizeof(struct nft_offload_ctx), GFP_KERNEL);
+ if (!ctx) {
+ err = -ENOMEM;
+ goto err_out;
+ }
+ ctx->net = net;
+ ctx->dep.type = NFT_OFFLOAD_DEP_UNSPEC;
+
while (expr->ops && expr != nft_expr_last(rule)) {
if (!expr->ops->offload) {
err = -EOPNOTSUPP;
goto err_out;
}
- err = expr->ops->offload(&ctx, flow, expr);
+ err = expr->ops->offload(ctx, flow, expr);
if (err < 0)
goto err_out;
expr = nft_expr_next(expr);
}
- flow->proto = ctx.dep.l3num;
+ flow->proto = ctx->dep.l3num;
+ kfree(ctx);
return flow;
err_out:
+ kfree(ctx);
nft_flow_rule_destroy(flow);
return ERR_PTR(err);
@@ -74,6 +82,19 @@ err_out:
void nft_flow_rule_destroy(struct nft_flow_rule *flow)
{
+ struct flow_action_entry *entry;
+ int i;
+
+ flow_action_for_each(i, entry, &flow->rule->action) {
+ switch (entry->id) {
+ case FLOW_ACTION_REDIRECT:
+ case FLOW_ACTION_MIRRED:
+ dev_put(entry->dev);
+ break;
+ default:
+ break;
+ }
+ }
kfree(flow->rule);
kfree(flow);
}
@@ -134,20 +155,20 @@ int nft_chain_offload_priority(struct nft_base_chain *basechain)
return 0;
}
-static int nft_flow_offload_rule(struct nft_trans *trans,
+static int nft_flow_offload_rule(struct nft_chain *chain,
+ struct nft_rule *rule,
+ struct nft_flow_rule *flow,
enum flow_cls_command command)
{
- struct nft_flow_rule *flow = nft_trans_flow_rule(trans);
- struct nft_rule *rule = nft_trans_rule(trans);
struct flow_cls_offload cls_flow = {};
struct nft_base_chain *basechain;
struct netlink_ext_ack extack;
__be16 proto = ETH_P_ALL;
- if (!nft_is_base_chain(trans->ctx.chain))
+ if (!nft_is_base_chain(chain))
return -EOPNOTSUPP;
- basechain = nft_base_chain(trans->ctx.chain);
+ basechain = nft_base_chain(chain);
if (flow)
proto = flow->proto;
@@ -273,12 +294,13 @@ static int nft_indr_block_offload_cmd(struct nft_base_chain *chain,
#define FLOW_SETUP_BLOCK TC_SETUP_BLOCK
-static int nft_flow_offload_chain(struct nft_trans *trans,
+static int nft_flow_offload_chain(struct nft_chain *chain,
+ u8 *ppolicy,
enum flow_block_command cmd)
{
- struct nft_chain *chain = trans->ctx.chain;
struct nft_base_chain *basechain;
struct net_device *dev;
+ u8 policy;
if (!nft_is_base_chain(chain))
return -EOPNOTSUPP;
@@ -288,10 +310,10 @@ static int nft_flow_offload_chain(struct nft_trans *trans,
if (!dev)
return -EOPNOTSUPP;
+ policy = ppolicy ? *ppolicy : basechain->policy;
+
/* Only default policy to accept is supported for now. */
- if (cmd == FLOW_BLOCK_BIND &&
- nft_trans_chain_policy(trans) != -1 &&
- nft_trans_chain_policy(trans) != NF_ACCEPT)
+ if (cmd == FLOW_BLOCK_BIND && policy != -1 && policy != NF_ACCEPT)
return -EOPNOTSUPP;
if (dev->netdev_ops->ndo_setup_tc)
@@ -304,6 +326,7 @@ int nft_flow_rule_offload_commit(struct net *net)
{
struct nft_trans *trans;
int err = 0;
+ u8 policy;
list_for_each_entry(trans, &net->nft.commit_list, list) {
if (trans->ctx.family != NFPROTO_NETDEV)
@@ -314,13 +337,17 @@ int nft_flow_rule_offload_commit(struct net *net)
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
- err = nft_flow_offload_chain(trans, FLOW_BLOCK_BIND);
+ policy = nft_trans_chain_policy(trans);
+ err = nft_flow_offload_chain(trans->ctx.chain, &policy,
+ FLOW_BLOCK_BIND);
break;
case NFT_MSG_DELCHAIN:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
- err = nft_flow_offload_chain(trans, FLOW_BLOCK_UNBIND);
+ policy = nft_trans_chain_policy(trans);
+ err = nft_flow_offload_chain(trans->ctx.chain, &policy,
+ FLOW_BLOCK_BIND);
break;
case NFT_MSG_NEWRULE:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
@@ -330,14 +357,20 @@ int nft_flow_rule_offload_commit(struct net *net)
!(trans->ctx.flags & NLM_F_APPEND))
return -EOPNOTSUPP;
- err = nft_flow_offload_rule(trans, FLOW_CLS_REPLACE);
+ err = nft_flow_offload_rule(trans->ctx.chain,
+ nft_trans_rule(trans),
+ nft_trans_flow_rule(trans),
+ FLOW_CLS_REPLACE);
nft_flow_rule_destroy(nft_trans_flow_rule(trans));
break;
case NFT_MSG_DELRULE:
if (!(trans->ctx.chain->flags & NFT_CHAIN_HW_OFFLOAD))
continue;
- err = nft_flow_offload_rule(trans, FLOW_CLS_DESTROY);
+ err = nft_flow_offload_rule(trans->ctx.chain,
+ nft_trans_rule(trans),
+ nft_trans_flow_rule(trans),
+ FLOW_CLS_DESTROY);
break;
}
@@ -348,32 +381,103 @@ int nft_flow_rule_offload_commit(struct net *net)
return err;
}
-void nft_indr_block_get_and_ing_cmd(struct net_device *dev,
- flow_indr_block_bind_cb_t *cb,
- void *cb_priv,
- enum flow_block_command command)
+static struct nft_chain *__nft_offload_get_chain(struct net_device *dev)
{
+ struct nft_base_chain *basechain;
struct net *net = dev_net(dev);
const struct nft_table *table;
- const struct nft_chain *chain;
+ struct nft_chain *chain;
- list_for_each_entry_rcu(table, &net->nft.tables, list) {
+ list_for_each_entry(table, &net->nft.tables, list) {
if (table->family != NFPROTO_NETDEV)
continue;
- list_for_each_entry_rcu(chain, &table->chains, list) {
- if (nft_is_base_chain(chain)) {
- struct nft_base_chain *basechain;
-
- basechain = nft_base_chain(chain);
- if (!strncmp(basechain->dev_name, dev->name,
- IFNAMSIZ)) {
- nft_indr_block_ing_cmd(dev, basechain,
- cb, cb_priv,
- command);
- return;
- }
- }
+ list_for_each_entry(chain, &table->chains, list) {
+ if (!nft_is_base_chain(chain) ||
+ !(chain->flags & NFT_CHAIN_HW_OFFLOAD))
+ continue;
+
+ basechain = nft_base_chain(chain);
+ if (strncmp(basechain->dev_name, dev->name, IFNAMSIZ))
+ continue;
+
+ return chain;
}
}
+
+ return NULL;
+}
+
+static void nft_indr_block_cb(struct net_device *dev,
+ flow_indr_block_bind_cb_t *cb, void *cb_priv,
+ enum flow_block_command cmd)
+{
+ struct net *net = dev_net(dev);
+ struct nft_chain *chain;
+
+ mutex_lock(&net->nft.commit_mutex);
+ chain = __nft_offload_get_chain(dev);
+ if (chain) {
+ struct nft_base_chain *basechain;
+
+ basechain = nft_base_chain(chain);
+ nft_indr_block_ing_cmd(dev, basechain, cb, cb_priv, cmd);
+ }
+ mutex_unlock(&net->nft.commit_mutex);
+}
+
+static void nft_offload_chain_clean(struct nft_chain *chain)
+{
+ struct nft_rule *rule;
+
+ list_for_each_entry(rule, &chain->rules, list) {
+ nft_flow_offload_rule(chain, rule,
+ NULL, FLOW_CLS_DESTROY);
+ }
+
+ nft_flow_offload_chain(chain, NULL, FLOW_BLOCK_UNBIND);
+}
+
+static int nft_offload_netdev_event(struct notifier_block *this,
+ unsigned long event, void *ptr)
+{
+ struct net_device *dev = netdev_notifier_info_to_dev(ptr);
+ struct net *net = dev_net(dev);
+ struct nft_chain *chain;
+
+ mutex_lock(&net->nft.commit_mutex);
+ chain = __nft_offload_get_chain(dev);
+ if (chain)
+ nft_offload_chain_clean(chain);
+ mutex_unlock(&net->nft.commit_mutex);
+
+ return NOTIFY_DONE;
+}
+
+static struct flow_indr_block_ing_entry block_ing_entry = {
+ .cb = nft_indr_block_cb,
+ .list = LIST_HEAD_INIT(block_ing_entry.list),
+};
+
+static struct notifier_block nft_offload_netdev_notifier = {
+ .notifier_call = nft_offload_netdev_event,
+};
+
+int nft_offload_init(void)
+{
+ int err;
+
+ err = register_netdevice_notifier(&nft_offload_netdev_notifier);
+ if (err < 0)
+ return err;
+
+ flow_indr_add_block_ing_cb(&block_ing_entry);
+
+ return 0;
+}
+
+void nft_offload_exit(void)
+{
+ flow_indr_del_block_ing_cb(&block_ing_entry);
+ unregister_netdevice_notifier(&nft_offload_netdev_notifier);
}
diff --git a/net/netfilter/nft_dup_netdev.c b/net/netfilter/nft_dup_netdev.c
index c6052fdd2c40..c2e78c160fd7 100644
--- a/net/netfilter/nft_dup_netdev.c
+++ b/net/netfilter/nft_dup_netdev.c
@@ -10,6 +10,7 @@
#include <linux/netfilter.h>
#include <linux/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_offload.h>
#include <net/netfilter/nf_dup_netdev.h>
struct nft_dup_netdev {
@@ -56,6 +57,16 @@ nla_put_failure:
return -1;
}
+static int nft_dup_netdev_offload(struct nft_offload_ctx *ctx,
+ struct nft_flow_rule *flow,
+ const struct nft_expr *expr)
+{
+ const struct nft_dup_netdev *priv = nft_expr_priv(expr);
+ int oif = ctx->regs[priv->sreg_dev].data.data[0];
+
+ return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_MIRRED, oif);
+}
+
static struct nft_expr_type nft_dup_netdev_type;
static const struct nft_expr_ops nft_dup_netdev_ops = {
.type = &nft_dup_netdev_type,
@@ -63,6 +74,7 @@ static const struct nft_expr_ops nft_dup_netdev_ops = {
.eval = nft_dup_netdev_eval,
.init = nft_dup_netdev_init,
.dump = nft_dup_netdev_dump,
+ .offload = nft_dup_netdev_offload,
};
static struct nft_expr_type nft_dup_netdev_type __read_mostly = {
diff --git a/net/netfilter/nft_flow_offload.c b/net/netfilter/nft_flow_offload.c
index 01705ad74a9a..22cf236eb5d5 100644
--- a/net/netfilter/nft_flow_offload.c
+++ b/net/netfilter/nft_flow_offload.c
@@ -6,12 +6,13 @@
#include <linux/netfilter.h>
#include <linux/workqueue.h>
#include <linux/spinlock.h>
+#include <linux/netfilter/nf_conntrack_common.h>
#include <linux/netfilter/nf_tables.h>
#include <net/ip.h> /* for ipv4 options. */
#include <net/netfilter/nf_tables.h>
#include <net/netfilter/nf_tables_core.h>
#include <net/netfilter/nf_conntrack_core.h>
-#include <linux/netfilter/nf_conntrack_common.h>
+#include <net/netfilter/nf_conntrack_extend.h>
#include <net/netfilter/nf_flow_table.h>
struct nft_flow_offload {
diff --git a/net/netfilter/nft_fwd_netdev.c b/net/netfilter/nft_fwd_netdev.c
index 61b7f93ac681..aba11c2333f3 100644
--- a/net/netfilter/nft_fwd_netdev.c
+++ b/net/netfilter/nft_fwd_netdev.c
@@ -12,6 +12,7 @@
#include <linux/ip.h>
#include <linux/ipv6.h>
#include <net/netfilter/nf_tables.h>
+#include <net/netfilter/nf_tables_offload.h>
#include <net/netfilter/nf_dup_netdev.h>
#include <net/neighbour.h>
#include <net/ip.h>
@@ -63,6 +64,16 @@ nla_put_failure:
return -1;
}
+static int nft_fwd_netdev_offload(struct nft_offload_ctx *ctx,
+ struct nft_flow_rule *flow,
+ const struct nft_expr *expr)
+{
+ const struct nft_fwd_netdev *priv = nft_expr_priv(expr);
+ int oif = ctx->regs[priv->sreg_dev].data.data[0];
+
+ return nft_fwd_dup_netdev_offload(ctx, flow, FLOW_ACTION_REDIRECT, oif);
+}
+
struct nft_fwd_neigh {
enum nft_registers sreg_dev:8;
enum nft_registers sreg_addr:8;
@@ -194,6 +205,7 @@ static const struct nft_expr_ops nft_fwd_netdev_ops = {
.eval = nft_fwd_netdev_eval,
.init = nft_fwd_netdev_init,
.dump = nft_fwd_netdev_dump,
+ .offload = nft_fwd_netdev_offload,
};
static const struct nft_expr_ops *
diff --git a/net/netfilter/nft_synproxy.c b/net/netfilter/nft_synproxy.c
index db4c23f5dfcb..e2c1fc608841 100644
--- a/net/netfilter/nft_synproxy.c
+++ b/net/netfilter/nft_synproxy.c
@@ -24,7 +24,7 @@ static void nft_synproxy_tcp_options(struct synproxy_options *opts,
const struct tcphdr *tcp,
struct synproxy_net *snet,
struct nf_synproxy_info *info,
- struct nft_synproxy *priv)
+ const struct nft_synproxy *priv)
{
this_cpu_inc(snet->stats->syn_received);
if (tcp->ece && tcp->cwr)
@@ -41,14 +41,13 @@ static void nft_synproxy_tcp_options(struct synproxy_options *opts,
NF_SYNPROXY_OPT_ECN);
}
-static void nft_synproxy_eval_v4(const struct nft_expr *expr,
+static void nft_synproxy_eval_v4(const struct nft_synproxy *priv,
struct nft_regs *regs,
const struct nft_pktinfo *pkt,
const struct tcphdr *tcp,
struct tcphdr *_tcph,
struct synproxy_options *opts)
{
- struct nft_synproxy *priv = nft_expr_priv(expr);
struct nf_synproxy_info info = priv->info;
struct net *net = nft_net(pkt);
struct synproxy_net *snet = synproxy_pernet(net);
@@ -73,14 +72,13 @@ static void nft_synproxy_eval_v4(const struct nft_expr *expr,
}
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
-static void nft_synproxy_eval_v6(const struct nft_expr *expr,
+static void nft_synproxy_eval_v6(const struct nft_synproxy *priv,
struct nft_regs *regs,
const struct nft_pktinfo *pkt,
const struct tcphdr *tcp,
struct tcphdr *_tcph,
struct synproxy_options *opts)
{
- struct nft_synproxy *priv = nft_expr_priv(expr);
struct nf_synproxy_info info = priv->info;
struct net *net = nft_net(pkt);
struct synproxy_net *snet = synproxy_pernet(net);
@@ -105,9 +103,9 @@ static void nft_synproxy_eval_v6(const struct nft_expr *expr,
}
#endif /* CONFIG_NF_TABLES_IPV6*/
-static void nft_synproxy_eval(const struct nft_expr *expr,
- struct nft_regs *regs,
- const struct nft_pktinfo *pkt)
+static void nft_synproxy_do_eval(const struct nft_synproxy *priv,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
{
struct synproxy_options opts = {};
struct sk_buff *skb = pkt->skb;
@@ -140,23 +138,22 @@ static void nft_synproxy_eval(const struct nft_expr *expr,
switch (skb->protocol) {
case htons(ETH_P_IP):
- nft_synproxy_eval_v4(expr, regs, pkt, tcp, &_tcph, &opts);
+ nft_synproxy_eval_v4(priv, regs, pkt, tcp, &_tcph, &opts);
return;
#if IS_ENABLED(CONFIG_NF_TABLES_IPV6)
case htons(ETH_P_IPV6):
- nft_synproxy_eval_v6(expr, regs, pkt, tcp, &_tcph, &opts);
+ nft_synproxy_eval_v6(priv, regs, pkt, tcp, &_tcph, &opts);
return;
#endif
}
regs->verdict.code = NFT_BREAK;
}
-static int nft_synproxy_init(const struct nft_ctx *ctx,
- const struct nft_expr *expr,
- const struct nlattr * const tb[])
+static int nft_synproxy_do_init(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[],
+ struct nft_synproxy *priv)
{
struct synproxy_net *snet = synproxy_pernet(ctx->net);
- struct nft_synproxy *priv = nft_expr_priv(expr);
u32 flags;
int err;
@@ -206,8 +203,7 @@ nf_ct_failure:
return err;
}
-static void nft_synproxy_destroy(const struct nft_ctx *ctx,
- const struct nft_expr *expr)
+static void nft_synproxy_do_destroy(const struct nft_ctx *ctx)
{
struct synproxy_net *snet = synproxy_pernet(ctx->net);
@@ -229,10 +225,8 @@ static void nft_synproxy_destroy(const struct nft_ctx *ctx,
nf_ct_netns_put(ctx->net, ctx->family);
}
-static int nft_synproxy_dump(struct sk_buff *skb, const struct nft_expr *expr)
+static int nft_synproxy_do_dump(struct sk_buff *skb, struct nft_synproxy *priv)
{
- const struct nft_synproxy *priv = nft_expr_priv(expr);
-
if (nla_put_be16(skb, NFTA_SYNPROXY_MSS, htons(priv->info.mss)) ||
nla_put_u8(skb, NFTA_SYNPROXY_WSCALE, priv->info.wscale) ||
nla_put_be32(skb, NFTA_SYNPROXY_FLAGS, htonl(priv->info.options)))
@@ -244,6 +238,15 @@ nla_put_failure:
return -1;
}
+static void nft_synproxy_eval(const struct nft_expr *expr,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_synproxy *priv = nft_expr_priv(expr);
+
+ nft_synproxy_do_eval(priv, regs, pkt);
+}
+
static int nft_synproxy_validate(const struct nft_ctx *ctx,
const struct nft_expr *expr,
const struct nft_data **data)
@@ -252,6 +255,28 @@ static int nft_synproxy_validate(const struct nft_ctx *ctx,
(1 << NF_INET_FORWARD));
}
+static int nft_synproxy_init(const struct nft_ctx *ctx,
+ const struct nft_expr *expr,
+ const struct nlattr * const tb[])
+{
+ struct nft_synproxy *priv = nft_expr_priv(expr);
+
+ return nft_synproxy_do_init(ctx, tb, priv);
+}
+
+static void nft_synproxy_destroy(const struct nft_ctx *ctx,
+ const struct nft_expr *expr)
+{
+ nft_synproxy_do_destroy(ctx);
+}
+
+static int nft_synproxy_dump(struct sk_buff *skb, const struct nft_expr *expr)
+{
+ struct nft_synproxy *priv = nft_expr_priv(expr);
+
+ return nft_synproxy_do_dump(skb, priv);
+}
+
static struct nft_expr_type nft_synproxy_type;
static const struct nft_expr_ops nft_synproxy_ops = {
.eval = nft_synproxy_eval,
@@ -271,14 +296,89 @@ static struct nft_expr_type nft_synproxy_type __read_mostly = {
.maxattr = NFTA_SYNPROXY_MAX,
};
+static int nft_synproxy_obj_init(const struct nft_ctx *ctx,
+ const struct nlattr * const tb[],
+ struct nft_object *obj)
+{
+ struct nft_synproxy *priv = nft_obj_data(obj);
+
+ return nft_synproxy_do_init(ctx, tb, priv);
+}
+
+static void nft_synproxy_obj_destroy(const struct nft_ctx *ctx,
+ struct nft_object *obj)
+{
+ nft_synproxy_do_destroy(ctx);
+}
+
+static int nft_synproxy_obj_dump(struct sk_buff *skb,
+ struct nft_object *obj, bool reset)
+{
+ struct nft_synproxy *priv = nft_obj_data(obj);
+
+ return nft_synproxy_do_dump(skb, priv);
+}
+
+static void nft_synproxy_obj_eval(struct nft_object *obj,
+ struct nft_regs *regs,
+ const struct nft_pktinfo *pkt)
+{
+ const struct nft_synproxy *priv = nft_obj_data(obj);
+
+ nft_synproxy_do_eval(priv, regs, pkt);
+}
+
+static void nft_synproxy_obj_update(struct nft_object *obj,
+ struct nft_object *newobj)
+{
+ struct nft_synproxy *newpriv = nft_obj_data(newobj);
+ struct nft_synproxy *priv = nft_obj_data(obj);
+
+ priv->info = newpriv->info;
+}
+
+static struct nft_object_type nft_synproxy_obj_type;
+static const struct nft_object_ops nft_synproxy_obj_ops = {
+ .type = &nft_synproxy_obj_type,
+ .size = sizeof(struct nft_synproxy),
+ .init = nft_synproxy_obj_init,
+ .destroy = nft_synproxy_obj_destroy,
+ .dump = nft_synproxy_obj_dump,
+ .eval = nft_synproxy_obj_eval,
+ .update = nft_synproxy_obj_update,
+};
+
+static struct nft_object_type nft_synproxy_obj_type __read_mostly = {
+ .type = NFT_OBJECT_SYNPROXY,
+ .ops = &nft_synproxy_obj_ops,
+ .maxattr = NFTA_SYNPROXY_MAX,
+ .policy = nft_synproxy_policy,
+ .owner = THIS_MODULE,
+};
+
static int __init nft_synproxy_module_init(void)
{
- return nft_register_expr(&nft_synproxy_type);
+ int err;
+
+ err = nft_register_obj(&nft_synproxy_obj_type);
+ if (err < 0)
+ return err;
+
+ err = nft_register_expr(&nft_synproxy_type);
+ if (err < 0)
+ goto err;
+
+ return 0;
+
+err:
+ nft_unregister_obj(&nft_synproxy_obj_type);
+ return err;
}
static void __exit nft_synproxy_module_exit(void)
{
- return nft_unregister_expr(&nft_synproxy_type);
+ nft_unregister_expr(&nft_synproxy_type);
+ nft_unregister_obj(&nft_synproxy_obj_type);
}
module_init(nft_synproxy_module_init);
@@ -287,3 +387,4 @@ module_exit(nft_synproxy_module_exit);
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Fernando Fernandez <ffmancera@riseup.net>");
MODULE_ALIAS_NFT_EXPR("synproxy");
+MODULE_ALIAS_NFT_OBJ(NFT_OBJECT_SYNPROXY);
diff --git a/net/netfilter/xt_connlimit.c b/net/netfilter/xt_connlimit.c
index bc6c8ab0fa62..46fcac75f726 100644
--- a/net/netfilter/xt_connlimit.c
+++ b/net/netfilter/xt_connlimit.c
@@ -13,6 +13,8 @@
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/ip.h>
+#include <linux/ipv6.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter/x_tables.h>
diff --git a/net/netfilter/xt_hashlimit.c b/net/netfilter/xt_hashlimit.c
index 2d2691dd51e0..ced3fc8fad7c 100644
--- a/net/netfilter/xt_hashlimit.c
+++ b/net/netfilter/xt_hashlimit.c
@@ -34,9 +34,14 @@
#include <linux/netfilter/x_tables.h>
#include <linux/netfilter_ipv4/ip_tables.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
-#include <linux/netfilter/xt_hashlimit.h>
#include <linux/mutex.h>
#include <linux/kernel.h>
+#include <uapi/linux/netfilter/xt_hashlimit.h>
+
+#define XT_HASHLIMIT_ALL (XT_HASHLIMIT_HASH_DIP | XT_HASHLIMIT_HASH_DPT | \
+ XT_HASHLIMIT_HASH_SIP | XT_HASHLIMIT_HASH_SPT | \
+ XT_HASHLIMIT_INVERT | XT_HASHLIMIT_BYTES |\
+ XT_HASHLIMIT_RATE_MATCH)
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Harald Welte <laforge@netfilter.org>");
diff --git a/net/netfilter/xt_physdev.c b/net/netfilter/xt_physdev.c
index b92b22ce8abd..ec6ed6fda96c 100644
--- a/net/netfilter/xt_physdev.c
+++ b/net/netfilter/xt_physdev.c
@@ -5,12 +5,13 @@
/* (C) 2001-2003 Bart De Schuymer <bdschuym@pandora.be>
*/
#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/if.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/netfilter_bridge.h>
-#include <linux/netfilter/xt_physdev.h>
#include <linux/netfilter/x_tables.h>
-#include <net/netfilter/br_netfilter.h>
+#include <uapi/linux/netfilter/xt_physdev.h>
MODULE_LICENSE("GPL");
MODULE_AUTHOR("Bart De Schuymer <bdschuym@pandora.be>");
diff --git a/net/sched/act_ct.c b/net/sched/act_ct.c
index cdd6f3818097..fcc46025e790 100644
--- a/net/sched/act_ct.c
+++ b/net/sched/act_ct.c
@@ -24,12 +24,12 @@
#include <uapi/linux/tc_act/tc_ct.h>
#include <net/tc_act/tc_ct.h>
-#include <linux/netfilter/nf_nat.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+#include <uapi/linux/netfilter/nf_nat.h>
static struct tc_action_ops act_ct_ops;
static unsigned int ct_net_id;