From 8e18be7610aebea50e9327c11afcd5eeaaa06644 Mon Sep 17 00:00:00 2001 From: Yang Li Date: Mon, 7 Nov 2022 14:26:23 +0800 Subject: lib: Fix some kernel-doc comments Make the description of @policy to @p in nla_policy_len() to clear the below warnings: lib/nlattr.c:660: warning: Function parameter or member 'p' not described in 'nla_policy_len' lib/nlattr.c:660: warning: Excess function parameter 'policy' description in 'nla_policy_len' Link: https://bugzilla.openanolis.cn/show_bug.cgi?id=2736 Reported-by: Abaci Robot Signed-off-by: Yang Li Link: https://lore.kernel.org/r/20221107062623.6709-1-yang.lee@linux.alibaba.com Signed-off-by: Jakub Kicinski --- lib/nlattr.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/nlattr.c b/lib/nlattr.c index b67a53e29b8f..9055e8b4d144 100644 --- a/lib/nlattr.c +++ b/lib/nlattr.c @@ -646,7 +646,7 @@ EXPORT_SYMBOL(__nla_validate); /** * nla_policy_len - Determine the max. length of a policy - * @policy: policy to use + * @p: policy to use * @n: number of policies * * Determines the max. length of the policy. It is currently used -- cgit v1.2.3 From 21780f89d65837e23fef825c79aa836c1cb3a8e9 Mon Sep 17 00:00:00 2001 From: Andy Shevchenko Date: Tue, 8 Nov 2022 16:11:08 +0200 Subject: mac_pton: Don't access memory over expected length The strlen() may go too far when estimating the length of the given string. In some cases it may go over the boundary and crash the system which is the case according to the commit 13a55372b64e ("ARM: orion5x: Revert commit 4904dbda41c8."). Rectify this by switching to strnlen() for the expected maximum length of the string. Signed-off-by: Andy Shevchenko Link: https://lore.kernel.org/r/20221108141108.62974-1-andriy.shevchenko@linux.intel.com Signed-off-by: Jakub Kicinski --- lib/net_utils.c | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) (limited to 'lib') diff --git a/lib/net_utils.c b/lib/net_utils.c index af525353395d..c17201df3d08 100644 --- a/lib/net_utils.c +++ b/lib/net_utils.c @@ -6,10 +6,11 @@ bool mac_pton(const char *s, u8 *mac) { + size_t maxlen = 3 * ETH_ALEN - 1; int i; /* XX:XX:XX:XX:XX:XX */ - if (strlen(s) < 3 * ETH_ALEN - 1) + if (strnlen(s, maxlen) < maxlen) return false; /* Don't dirty result unless string is valid MAC. */ -- cgit v1.2.3 From 354259fa73e2aac92ae5e19522adb69a92c15b49 Mon Sep 17 00:00:00 2001 From: Eric Dumazet Date: Wed, 9 Nov 2022 09:57:58 +0000 Subject: net: remove skb->vlan_present skb->vlan_present seems redundant. We can instead derive it from this boolean expression: vlan_present = skb->vlan_proto != 0 || skb->vlan_tci != 0 Add a new union, to access both fields in a single load/store when possible. union { u32 vlan_all; struct { __be16 vlan_proto; __u16 vlan_tci; }; }; This allows following patch to remove a conditional test in GRO stack. Note: We move remcsum_offload to keep TC_AT_INGRESS_MASK and SKB_MONO_DELIVERY_TIME_MASK unchanged. Signed-off-by: Eric Dumazet Acked-by: Yonghong Song Acked-by: Martin KaFai Lau Signed-off-by: Jakub Kicinski --- arch/sparc/net/bpf_jit_comp_32.c | 10 +++++----- .../net/ethernet/marvell/octeontx2/nic/otx2_pf.c | 2 +- include/linux/if_vlan.h | 9 +++------ include/linux/skbuff.h | 18 ++++++++++-------- lib/test_bpf.c | 1 - net/core/filter.c | 22 ++++++++++------------ 6 files changed, 29 insertions(+), 33 deletions(-) (limited to 'lib') diff --git a/arch/sparc/net/bpf_jit_comp_32.c b/arch/sparc/net/bpf_jit_comp_32.c index b1dbf2fa8c0a..a74e5004c6c8 100644 --- a/arch/sparc/net/bpf_jit_comp_32.c +++ b/arch/sparc/net/bpf_jit_comp_32.c @@ -555,11 +555,11 @@ void bpf_jit_compile(struct bpf_prog *fp) emit_skb_load16(vlan_tci, r_A); break; case BPF_ANC | SKF_AD_VLAN_TAG_PRESENT: - __emit_skb_load8(__pkt_vlan_present_offset, r_A); - if (PKT_VLAN_PRESENT_BIT) - emit_alu_K(SRL, PKT_VLAN_PRESENT_BIT); - if (PKT_VLAN_PRESENT_BIT < 7) - emit_andi(r_A, 1, r_A); + emit_skb_load32(vlan_all, r_A); + emit_cmpi(r_A, 0); + emit_branch_off(BE, 12); + emit_nop(); + emit_loadimm(1, r_A); break; case BPF_LD | BPF_W | BPF_LEN: emit_skb_load32(len, r_A); diff --git a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c index 303930499a4c..c1ea60bc2630 100644 --- a/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c +++ b/drivers/net/ethernet/marvell/octeontx2/nic/otx2_pf.c @@ -1973,7 +1973,7 @@ static u16 otx2_select_queue(struct net_device *netdev, struct sk_buff *skb, #endif #ifdef CONFIG_DCB - if (!skb->vlan_present) + if (!skb_vlan_tag_present(skb)) goto pick_tx; vlan_prio = skb->vlan_tci >> 13; diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index e00c4ee81ff7..6864b89ef868 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -76,7 +76,7 @@ static inline bool is_vlan_dev(const struct net_device *dev) return dev->priv_flags & IFF_802_1Q_VLAN; } -#define skb_vlan_tag_present(__skb) ((__skb)->vlan_present) +#define skb_vlan_tag_present(__skb) (!!(__skb)->vlan_all) #define skb_vlan_tag_get(__skb) ((__skb)->vlan_tci) #define skb_vlan_tag_get_id(__skb) ((__skb)->vlan_tci & VLAN_VID_MASK) #define skb_vlan_tag_get_cfi(__skb) (!!((__skb)->vlan_tci & VLAN_CFI_MASK)) @@ -471,7 +471,7 @@ static inline struct sk_buff *vlan_insert_tag_set_proto(struct sk_buff *skb, */ static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb) { - skb->vlan_present = 0; + skb->vlan_all = 0; } /** @@ -483,9 +483,7 @@ static inline void __vlan_hwaccel_clear_tag(struct sk_buff *skb) */ static inline void __vlan_hwaccel_copy_tag(struct sk_buff *dst, const struct sk_buff *src) { - dst->vlan_present = src->vlan_present; - dst->vlan_proto = src->vlan_proto; - dst->vlan_tci = src->vlan_tci; + dst->vlan_all = src->vlan_all; } /* @@ -519,7 +517,6 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, { skb->vlan_proto = vlan_proto; skb->vlan_tci = vlan_tci; - skb->vlan_present = 1; } /** diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index 59c9fd55699d..4e464a27adaf 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -818,7 +818,7 @@ typedef unsigned char *sk_buff_data_t; * @mark: Generic packet mark * @reserved_tailroom: (aka @mark) number of bytes of free space available * at the tail of an sk_buff - * @vlan_present: VLAN tag is present + * @vlan_all: vlan fields (proto & tci) * @vlan_proto: vlan encapsulation protocol * @vlan_tci: vlan tag control information * @inner_protocol: Protocol (encapsulation) @@ -951,7 +951,7 @@ struct sk_buff { /* private: */ __u8 __pkt_vlan_present_offset[0]; /* public: */ - __u8 vlan_present:1; /* See PKT_VLAN_PRESENT_BIT */ + __u8 remcsum_offload:1; __u8 csum_complete_sw:1; __u8 csum_level:2; __u8 dst_pending_confirm:1; @@ -966,7 +966,6 @@ struct sk_buff { __u8 ipvs_property:1; __u8 inner_protocol_type:1; - __u8 remcsum_offload:1; #ifdef CONFIG_NET_SWITCHDEV __u8 offload_fwd_mark:1; __u8 offload_l3_fwd_mark:1; @@ -999,8 +998,13 @@ struct sk_buff { __u32 priority; int skb_iif; __u32 hash; - __be16 vlan_proto; - __u16 vlan_tci; + union { + u32 vlan_all; + struct { + __be16 vlan_proto; + __u16 vlan_tci; + }; + }; #if defined(CONFIG_NET_RX_BUSY_POLL) || defined(CONFIG_XPS) union { unsigned int napi_id; @@ -1059,15 +1063,13 @@ struct sk_buff { #endif #define PKT_TYPE_OFFSET offsetof(struct sk_buff, __pkt_type_offset) -/* if you move pkt_vlan_present, tc_at_ingress, or mono_delivery_time +/* if you move tc_at_ingress or mono_delivery_time * around, you also must adapt these constants. */ #ifdef __BIG_ENDIAN_BITFIELD -#define PKT_VLAN_PRESENT_BIT 7 #define TC_AT_INGRESS_MASK (1 << 0) #define SKB_MONO_DELIVERY_TIME_MASK (1 << 2) #else -#define PKT_VLAN_PRESENT_BIT 0 #define TC_AT_INGRESS_MASK (1 << 7) #define SKB_MONO_DELIVERY_TIME_MASK (1 << 5) #endif diff --git a/lib/test_bpf.c b/lib/test_bpf.c index 5820704165a6..ade9ac672adb 100644 --- a/lib/test_bpf.c +++ b/lib/test_bpf.c @@ -14346,7 +14346,6 @@ static struct sk_buff *populate_skb(char *buf, int size) skb->hash = SKB_HASH; skb->queue_mapping = SKB_QUEUE_MAP; skb->vlan_tci = SKB_VLAN_TCI; - skb->vlan_present = SKB_VLAN_PRESENT; skb->vlan_proto = htons(ETH_P_IP); dev_net_set(&dev, &init_net); skb->dev = &dev; diff --git a/net/core/filter.c b/net/core/filter.c index bb0136e7a8e4..358d5e70671a 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -325,11 +325,11 @@ static u32 convert_skb_access(int skb_field, int dst_reg, int src_reg, offsetof(struct sk_buff, vlan_tci)); break; case SKF_AD_VLAN_TAG_PRESENT: - *insn++ = BPF_LDX_MEM(BPF_B, dst_reg, src_reg, PKT_VLAN_PRESENT_OFFSET); - if (PKT_VLAN_PRESENT_BIT) - *insn++ = BPF_ALU32_IMM(BPF_RSH, dst_reg, PKT_VLAN_PRESENT_BIT); - if (PKT_VLAN_PRESENT_BIT < 7) - *insn++ = BPF_ALU32_IMM(BPF_AND, dst_reg, 1); + BUILD_BUG_ON(sizeof_field(struct sk_buff, vlan_all) != 4); + *insn++ = BPF_LDX_MEM(BPF_W, dst_reg, src_reg, + offsetof(struct sk_buff, vlan_all)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, dst_reg, 0, 1); + *insn++ = BPF_ALU32_IMM(BPF_MOV, dst_reg, 1); break; } @@ -9290,13 +9290,11 @@ static u32 bpf_convert_ctx_access(enum bpf_access_type type, break; case offsetof(struct __sk_buff, vlan_present): - *target_size = 1; - *insn++ = BPF_LDX_MEM(BPF_B, si->dst_reg, si->src_reg, - PKT_VLAN_PRESENT_OFFSET); - if (PKT_VLAN_PRESENT_BIT) - *insn++ = BPF_ALU32_IMM(BPF_RSH, si->dst_reg, PKT_VLAN_PRESENT_BIT); - if (PKT_VLAN_PRESENT_BIT < 7) - *insn++ = BPF_ALU32_IMM(BPF_AND, si->dst_reg, 1); + *insn++ = BPF_LDX_MEM(BPF_W, si->dst_reg, si->src_reg, + bpf_target_off(struct sk_buff, + vlan_all, 4, target_size)); + *insn++ = BPF_JMP_IMM(BPF_JEQ, si->dst_reg, 0, 1); + *insn++ = BPF_ALU32_IMM(BPF_MOV, si->dst_reg, 1); break; case offsetof(struct __sk_buff, vlan_tci): -- cgit v1.2.3 From b084f6cc3563faf4f4d16c98852c0c734fe18914 Mon Sep 17 00:00:00 2001 From: Jiapeng Chong Date: Wed, 23 Nov 2022 17:37:02 +0800 Subject: lib/test_rhashtable: Remove set but unused variable 'insert_retries' Variable 'insert_retries' is not effectively used in the function, so delete it. lib/test_rhashtable.c:437:18: warning: variable 'insert_retries' set but not used. Link: https://bugzilla.openanolis.cn/show_bug.cgi?id=3242 Reported-by: Abaci Robot Signed-off-by: Jiapeng Chong Acked-by: Herbert Xu Signed-off-by: David S. Miller --- lib/test_rhashtable.c | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) (limited to 'lib') diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index f2ba5787055a..3ae3399f3651 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -434,7 +434,7 @@ out_free: static int __init test_rhashtable_max(struct test_obj *array, unsigned int entries) { - unsigned int i, insert_retries = 0; + unsigned int i; int err; test_rht_params.max_size = roundup_pow_of_two(entries / 8); @@ -447,9 +447,7 @@ static int __init test_rhashtable_max(struct test_obj *array, obj->value.id = i * 2; err = insert_retry(&ht, obj, test_rht_params); - if (err > 0) - insert_retries += err; - else if (err) + if (err < 0) return err; } -- cgit v1.2.3 From e47877c7aa821c413b45e05f804819579bdfa1a3 Mon Sep 17 00:00:00 2001 From: Tejun Heo Date: Tue, 6 Dec 2022 11:36:32 -1000 Subject: rhashtable: Allow rhashtable to be used from irq-safe contexts rhashtable currently only does bh-safe synchronization making it impossible to use from irq-safe contexts. Switch it to use irq-safe synchronization to remove the restriction. v2: Update the lock functions to return the ulong flags value and unlock functions to take the value directly instead of passing around the pointer. Suggested by Linus. Signed-off-by: Tejun Heo Reviewed-by: David Vernet Acked-by: Josh Don Acked-by: Hao Luo Acked-by: Barret Rhoden Cc: Linus Torvalds Signed-off-by: David S. Miller --- include/linux/rhashtable.h | 61 +++++++++++++++++++++++++++------------------- lib/rhashtable.c | 16 +++++++----- 2 files changed, 46 insertions(+), 31 deletions(-) (limited to 'lib') diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h index 68dab3e08aad..5b5357c0bd8c 100644 --- a/include/linux/rhashtable.h +++ b/include/linux/rhashtable.h @@ -323,29 +323,36 @@ static inline struct rhash_lock_head __rcu **rht_bucket_insert( * When we write to a bucket without unlocking, we use rht_assign_locked(). */ -static inline void rht_lock(struct bucket_table *tbl, - struct rhash_lock_head __rcu **bkt) +static inline unsigned long rht_lock(struct bucket_table *tbl, + struct rhash_lock_head __rcu **bkt) { - local_bh_disable(); + unsigned long flags; + + local_irq_save(flags); bit_spin_lock(0, (unsigned long *)bkt); lock_map_acquire(&tbl->dep_map); + return flags; } -static inline void rht_lock_nested(struct bucket_table *tbl, - struct rhash_lock_head __rcu **bucket, - unsigned int subclass) +static inline unsigned long rht_lock_nested(struct bucket_table *tbl, + struct rhash_lock_head __rcu **bucket, + unsigned int subclass) { - local_bh_disable(); + unsigned long flags; + + local_irq_save(flags); bit_spin_lock(0, (unsigned long *)bucket); lock_acquire_exclusive(&tbl->dep_map, subclass, 0, NULL, _THIS_IP_); + return flags; } static inline void rht_unlock(struct bucket_table *tbl, - struct rhash_lock_head __rcu **bkt) + struct rhash_lock_head __rcu **bkt, + unsigned long flags) { lock_map_release(&tbl->dep_map); bit_spin_unlock(0, (unsigned long *)bkt); - local_bh_enable(); + local_irq_restore(flags); } static inline struct rhash_head *__rht_ptr( @@ -393,7 +400,8 @@ static inline void rht_assign_locked(struct rhash_lock_head __rcu **bkt, static inline void rht_assign_unlock(struct bucket_table *tbl, struct rhash_lock_head __rcu **bkt, - struct rhash_head *obj) + struct rhash_head *obj, + unsigned long flags) { if (rht_is_a_nulls(obj)) obj = NULL; @@ -401,7 +409,7 @@ static inline void rht_assign_unlock(struct bucket_table *tbl, rcu_assign_pointer(*bkt, (void *)obj); preempt_enable(); __release(bitlock); - local_bh_enable(); + local_irq_restore(flags); } /** @@ -706,6 +714,7 @@ static inline void *__rhashtable_insert_fast( struct rhash_head __rcu **pprev; struct bucket_table *tbl; struct rhash_head *head; + unsigned long flags; unsigned int hash; int elasticity; void *data; @@ -720,11 +729,11 @@ static inline void *__rhashtable_insert_fast( if (!bkt) goto out; pprev = NULL; - rht_lock(tbl, bkt); + flags = rht_lock(tbl, bkt); if (unlikely(rcu_access_pointer(tbl->future_tbl))) { slow_path: - rht_unlock(tbl, bkt); + rht_unlock(tbl, bkt, flags); rcu_read_unlock(); return rhashtable_insert_slow(ht, key, obj); } @@ -756,9 +765,9 @@ slow_path: RCU_INIT_POINTER(list->rhead.next, head); if (pprev) { rcu_assign_pointer(*pprev, obj); - rht_unlock(tbl, bkt); + rht_unlock(tbl, bkt, flags); } else - rht_assign_unlock(tbl, bkt, obj); + rht_assign_unlock(tbl, bkt, obj, flags); data = NULL; goto out; } @@ -785,7 +794,7 @@ slow_path: } atomic_inc(&ht->nelems); - rht_assign_unlock(tbl, bkt, obj); + rht_assign_unlock(tbl, bkt, obj, flags); if (rht_grow_above_75(ht, tbl)) schedule_work(&ht->run_work); @@ -797,7 +806,7 @@ out: return data; out_unlock: - rht_unlock(tbl, bkt); + rht_unlock(tbl, bkt, flags); goto out; } @@ -991,6 +1000,7 @@ static inline int __rhashtable_remove_fast_one( struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; + unsigned long flags; unsigned int hash; int err = -ENOENT; @@ -999,7 +1009,7 @@ static inline int __rhashtable_remove_fast_one( if (!bkt) return -ENOENT; pprev = NULL; - rht_lock(tbl, bkt); + flags = rht_lock(tbl, bkt); rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { struct rhlist_head *list; @@ -1043,14 +1053,14 @@ static inline int __rhashtable_remove_fast_one( if (pprev) { rcu_assign_pointer(*pprev, obj); - rht_unlock(tbl, bkt); + rht_unlock(tbl, bkt, flags); } else { - rht_assign_unlock(tbl, bkt, obj); + rht_assign_unlock(tbl, bkt, obj, flags); } goto unlocked; } - rht_unlock(tbl, bkt); + rht_unlock(tbl, bkt, flags); unlocked: if (err > 0) { atomic_dec(&ht->nelems); @@ -1143,6 +1153,7 @@ static inline int __rhashtable_replace_fast( struct rhash_lock_head __rcu **bkt; struct rhash_head __rcu **pprev; struct rhash_head *he; + unsigned long flags; unsigned int hash; int err = -ENOENT; @@ -1158,7 +1169,7 @@ static inline int __rhashtable_replace_fast( return -ENOENT; pprev = NULL; - rht_lock(tbl, bkt); + flags = rht_lock(tbl, bkt); rht_for_each_from(he, rht_ptr(bkt, tbl, hash), tbl, hash) { if (he != obj_old) { @@ -1169,15 +1180,15 @@ static inline int __rhashtable_replace_fast( rcu_assign_pointer(obj_new->next, obj_old->next); if (pprev) { rcu_assign_pointer(*pprev, obj_new); - rht_unlock(tbl, bkt); + rht_unlock(tbl, bkt, flags); } else { - rht_assign_unlock(tbl, bkt, obj_new); + rht_assign_unlock(tbl, bkt, obj_new, flags); } err = 0; goto unlocked; } - rht_unlock(tbl, bkt); + rht_unlock(tbl, bkt, flags); unlocked: return err; diff --git a/lib/rhashtable.c b/lib/rhashtable.c index e12bbfb240b8..6ae2ba8e06a2 100644 --- a/lib/rhashtable.c +++ b/lib/rhashtable.c @@ -231,6 +231,7 @@ static int rhashtable_rehash_one(struct rhashtable *ht, struct rhash_head *head, *next, *entry; struct rhash_head __rcu **pprev = NULL; unsigned int new_hash; + unsigned long flags; if (new_tbl->nest) goto out; @@ -253,13 +254,14 @@ static int rhashtable_rehash_one(struct rhashtable *ht, new_hash = head_hashfn(ht, new_tbl, entry); - rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], SINGLE_DEPTH_NESTING); + flags = rht_lock_nested(new_tbl, &new_tbl->buckets[new_hash], + SINGLE_DEPTH_NESTING); head = rht_ptr(new_tbl->buckets + new_hash, new_tbl, new_hash); RCU_INIT_POINTER(entry->next, head); - rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry); + rht_assign_unlock(new_tbl, &new_tbl->buckets[new_hash], entry, flags); if (pprev) rcu_assign_pointer(*pprev, next); @@ -276,18 +278,19 @@ static int rhashtable_rehash_chain(struct rhashtable *ht, { struct bucket_table *old_tbl = rht_dereference(ht->tbl, ht); struct rhash_lock_head __rcu **bkt = rht_bucket_var(old_tbl, old_hash); + unsigned long flags; int err; if (!bkt) return 0; - rht_lock(old_tbl, bkt); + flags = rht_lock(old_tbl, bkt); while (!(err = rhashtable_rehash_one(ht, bkt, old_hash))) ; if (err == -ENOENT) err = 0; - rht_unlock(old_tbl, bkt); + rht_unlock(old_tbl, bkt, flags); return err; } @@ -590,6 +593,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, struct bucket_table *new_tbl; struct bucket_table *tbl; struct rhash_lock_head __rcu **bkt; + unsigned long flags; unsigned int hash; void *data; @@ -607,7 +611,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, new_tbl = rht_dereference_rcu(tbl->future_tbl, ht); data = ERR_PTR(-EAGAIN); } else { - rht_lock(tbl, bkt); + flags = rht_lock(tbl, bkt); data = rhashtable_lookup_one(ht, bkt, tbl, hash, key, obj); new_tbl = rhashtable_insert_one(ht, bkt, tbl, @@ -615,7 +619,7 @@ static void *rhashtable_try_insert(struct rhashtable *ht, const void *key, if (PTR_ERR(new_tbl) != -EEXIST) data = ERR_CAST(new_tbl); - rht_unlock(tbl, bkt); + rht_unlock(tbl, bkt, flags); } } while (!IS_ERR_OR_NULL(new_tbl)); -- cgit v1.2.3 From 1280d4b76f3402645aa7075a53f49a3a14be07a8 Mon Sep 17 00:00:00 2001 From: Uladzislau Koshchanka Date: Sat, 10 Dec 2022 03:44:23 +0300 Subject: lib: packing: replace bit_reverse() with bitrev8() Remove bit_reverse() function. Instead use bitrev8() from linux/bitrev.h + bitshift. Reduces code-repetition. Signed-off-by: Uladzislau Koshchanka Link: https://lore.kernel.org/r/20221210004423.32332-1-koshchanka@gmail.com Signed-off-by: Jakub Kicinski --- lib/Kconfig | 1 + lib/packing.c | 16 ++-------------- 2 files changed, 3 insertions(+), 14 deletions(-) (limited to 'lib') diff --git a/lib/Kconfig b/lib/Kconfig index 9bbf8a4b2108..cc969ef58a2a 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -24,6 +24,7 @@ config LINEAR_RANGES config PACKING bool "Generic bitfield packing and unpacking" + select BITREVERSE default n help This option provides the packing() helper function, which permits diff --git a/lib/packing.c b/lib/packing.c index 9a72f4bbf0e2..a96169237ae6 100644 --- a/lib/packing.c +++ b/lib/packing.c @@ -7,6 +7,7 @@ #include #include #include +#include static int get_le_offset(int offset) { @@ -29,19 +30,6 @@ static int get_reverse_lsw32_offset(int offset, size_t len) return word_index * 4 + offset; } -static u64 bit_reverse(u64 val, unsigned int width) -{ - u64 new_val = 0; - unsigned int bit; - unsigned int i; - - for (i = 0; i < width; i++) { - bit = (val & (1 << i)) != 0; - new_val |= (bit << (width - i - 1)); - } - return new_val; -} - static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit, int *box_end_bit, u8 *box_mask) { @@ -49,7 +37,7 @@ static void adjust_for_msb_right_quirk(u64 *to_write, int *box_start_bit, int new_box_start_bit, new_box_end_bit; *to_write >>= *box_end_bit; - *to_write = bit_reverse(*to_write, box_bit_width); + *to_write = bitrev8(*to_write) >> (8 - box_bit_width); *to_write <<= *box_end_bit; new_box_end_bit = box_bit_width - *box_start_bit - 1; -- cgit v1.2.3