summaryrefslogtreecommitdiffstats
path: root/security/selinux/ss/sidtab.c
diff options
context:
space:
mode:
authorOndrej Mosnacek <omosnace@redhat.com>2019-11-26 14:57:00 +0100
committerPaul Moore <paul@paul-moore.com>2019-12-09 16:14:51 -0500
commitd97bd23c2d7d866e99eb3a927c742715c85a90ef (patch)
treed5c29d661384fbc83a0244623b9daac7f3715d5a /security/selinux/ss/sidtab.c
parent66f8e2f03c02e812002f8e9e465681cc62edda5b (diff)
downloadlinux-d97bd23c2d7d866e99eb3a927c742715c85a90ef.tar.bz2
selinux: cache the SID -> context string translation
Translating a context struct to string can be quite slow, especially if the context has a lot of category bits set. This can cause quite noticeable performance impact in situations where the translation needs to be done repeatedly. A common example is a UNIX datagram socket with the SO_PASSSEC option enabled, which is used e.g. by systemd-journald when receiving log messages via datagram socket. This scenario can be reproduced with: cat /dev/urandom | base64 | logger & timeout 30s perf record -p $(pidof systemd-journald) -a -g kill %1 perf report -g none --pretty raw | grep security_secid_to_secctx Before the caching introduced by this patch, computing the context string (security_secid_to_secctx() function) takes up ~65% of systemd-journald's CPU time (assuming a context with 1024 categories set and Fedora x86_64 release kernel configs). After this patch (assuming near-perfect cache hit ratio) this overhead is reduced to just ~2%. This patch addresses the issue by caching a certain number (compile-time configurable) of recently used context strings to speed up repeated translations of the same context, while using only a small amount of memory. The cache is integrated into the existing sidtab table by adding a field to each entry, which when not NULL contains an RCU-protected pointer to a cache entry containing the cached string. The cache entries are kept in a linked list sorted according to how recently they were used. On a cache miss when the cache is full, the least recently used entry is removed to make space for the new entry. The patch migrates security_sid_to_context_core() to use the cache (also a few other functions where it was possible without too much fuss, but these mostly use the translation for logging in case of error, which is rare). Link: https://bugzilla.redhat.com/show_bug.cgi?id=1733259 Cc: Michal Sekletar <msekleta@redhat.com> Signed-off-by: Ondrej Mosnacek <omosnace@redhat.com> Reviewed-by: Stephen Smalley <sds@tycho.nsa.gov> Tested-by: Stephen Smalley <sds@tycho.nsa.gov> Reviewed-by: Paul E. McKenney <paulmck@kernel.org> [PM: lots of merge fixups due to collisions with other sidtab patches] Signed-off-by: Paul Moore <paul@paul-moore.com>
Diffstat (limited to 'security/selinux/ss/sidtab.c')
-rw-r--r--security/selinux/ss/sidtab.c175
1 files changed, 144 insertions, 31 deletions
diff --git a/security/selinux/ss/sidtab.c b/security/selinux/ss/sidtab.c
index d9d8599e8e63..a308ce1e6a13 100644
--- a/security/selinux/ss/sidtab.c
+++ b/security/selinux/ss/sidtab.c
@@ -9,6 +9,8 @@
*/
#include <linux/errno.h>
#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/rcupdate.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
@@ -17,6 +19,14 @@
#include "security.h"
#include "sidtab.h"
+struct sidtab_str_cache {
+ struct rcu_head rcu_member;
+ struct list_head lru_member;
+ struct sidtab_entry *parent;
+ u32 len;
+ char str[];
+};
+
#define index_to_sid(index) (index + SECINITSID_NUM + 1)
#define sid_to_index(sid) (sid - (SECINITSID_NUM + 1))
@@ -34,12 +44,19 @@ int sidtab_init(struct sidtab *s)
hash_init(s->context_to_sid);
spin_lock_init(&s->lock);
+
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+ s->cache_free_slots = CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE;
+ INIT_LIST_HEAD(&s->cache_lru_list);
+ spin_lock_init(&s->cache_lock);
+#endif
+
return 0;
}
static u32 context_to_sid(struct sidtab *s, struct context *context)
{
- struct sidtab_entry_leaf *entry;
+ struct sidtab_entry *entry;
u32 sid = 0;
rcu_read_lock();
@@ -56,19 +73,22 @@ static u32 context_to_sid(struct sidtab *s, struct context *context)
int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
{
- struct sidtab_isid_entry *entry;
+ struct sidtab_isid_entry *isid;
int rc;
if (sid == 0 || sid > SECINITSID_NUM)
return -EINVAL;
- entry = &s->isids[sid - 1];
+ isid = &s->isids[sid - 1];
- rc = context_cpy(&entry->leaf.context, context);
+ rc = context_cpy(&isid->entry.context, context);
if (rc)
return rc;
- entry->set = 1;
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+ isid->entry.cache = NULL;
+#endif
+ isid->set = 1;
/*
* Multiple initial sids may map to the same context. Check that this
@@ -77,8 +97,8 @@ int sidtab_set_initial(struct sidtab *s, u32 sid, struct context *context)
* collision.
*/
if (!context_to_sid(s, context)) {
- entry->leaf.sid = sid;
- hash_add(s->context_to_sid, &entry->leaf.list, context->hash);
+ isid->entry.sid = sid;
+ hash_add(s->context_to_sid, &isid->entry.list, context->hash);
}
return 0;
@@ -92,7 +112,7 @@ int sidtab_hash_stats(struct sidtab *sidtab, char *page)
int entries = 0;
int max_chain_len = 0;
int cur_bucket = 0;
- struct sidtab_entry_leaf *entry;
+ struct sidtab_entry *entry;
rcu_read_lock();
hash_for_each_rcu(sidtab->context_to_sid, i, entry, list) {
@@ -151,8 +171,8 @@ static int sidtab_alloc_roots(struct sidtab *s, u32 level)
return 0;
}
-static struct sidtab_entry_leaf *sidtab_do_lookup(struct sidtab *s, u32 index,
- int alloc)
+static struct sidtab_entry *sidtab_do_lookup(struct sidtab *s, u32 index,
+ int alloc)
{
union sidtab_entry_inner *entry;
u32 level, capacity_shift, leaf_index = index / SIDTAB_LEAF_ENTRIES;
@@ -192,7 +212,7 @@ static struct sidtab_entry_leaf *sidtab_do_lookup(struct sidtab *s, u32 index,
return &entry->ptr_leaf->entries[index % SIDTAB_LEAF_ENTRIES];
}
-static struct context *sidtab_lookup(struct sidtab *s, u32 index)
+static struct sidtab_entry *sidtab_lookup(struct sidtab *s, u32 index)
{
/* read entries only after reading count */
u32 count = smp_load_acquire(&s->count);
@@ -200,36 +220,37 @@ static struct context *sidtab_lookup(struct sidtab *s, u32 index)
if (index >= count)
return NULL;
- return &sidtab_do_lookup(s, index, 0)->context;
+ return sidtab_do_lookup(s, index, 0);
}
-static struct context *sidtab_lookup_initial(struct sidtab *s, u32 sid)
+static struct sidtab_entry *sidtab_lookup_initial(struct sidtab *s, u32 sid)
{
- return s->isids[sid - 1].set ? &s->isids[sid - 1].leaf.context : NULL;
+ return s->isids[sid - 1].set ? &s->isids[sid - 1].entry : NULL;
}
-static struct context *sidtab_search_core(struct sidtab *s, u32 sid, int force)
+static struct sidtab_entry *sidtab_search_core(struct sidtab *s, u32 sid,
+ int force)
{
- struct context *context;
-
if (sid != 0) {
+ struct sidtab_entry *entry;
+
if (sid > SECINITSID_NUM)
- context = sidtab_lookup(s, sid_to_index(sid));
+ entry = sidtab_lookup(s, sid_to_index(sid));
else
- context = sidtab_lookup_initial(s, sid);
- if (context && (!context->len || force))
- return context;
+ entry = sidtab_lookup_initial(s, sid);
+ if (entry && (!entry->context.len || force))
+ return entry;
}
return sidtab_lookup_initial(s, SECINITSID_UNLABELED);
}
-struct context *sidtab_search(struct sidtab *s, u32 sid)
+struct sidtab_entry *sidtab_search_entry(struct sidtab *s, u32 sid)
{
return sidtab_search_core(s, sid, 0);
}
-struct context *sidtab_search_force(struct sidtab *s, u32 sid)
+struct sidtab_entry *sidtab_search_entry_force(struct sidtab *s, u32 sid)
{
return sidtab_search_core(s, sid, 1);
}
@@ -240,7 +261,7 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context,
unsigned long flags;
u32 count;
struct sidtab_convert_params *convert;
- struct sidtab_entry_leaf *dst, *dst_convert;
+ struct sidtab_entry *dst, *dst_convert;
int rc;
*sid = context_to_sid(s, context);
@@ -289,7 +310,7 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context,
}
rc = convert->func(context, &dst_convert->context,
- convert->args);
+ convert->args);
if (rc) {
context_destroy(&dst->context);
goto out_unlock;
@@ -298,7 +319,7 @@ int sidtab_context_to_sid(struct sidtab *s, struct context *context,
convert->target->count = count + 1;
hash_add_rcu(convert->target->context_to_sid,
- &dst_convert->list, dst_convert->context.hash);
+ &dst_convert->list, dst_convert->context.hash);
}
if (context->len)
@@ -319,7 +340,7 @@ out_unlock:
static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
{
- struct sidtab_entry_leaf *entry;
+ struct sidtab_entry *entry;
u32 i;
for (i = 0; i < count; i++) {
@@ -327,7 +348,7 @@ static void sidtab_convert_hashtable(struct sidtab *s, u32 count)
entry->sid = index_to_sid(i);
hash_add_rcu(s->context_to_sid, &entry->list,
- entry->context.hash);
+ entry->context.hash);
}
}
@@ -376,7 +397,6 @@ static int sidtab_convert_tree(union sidtab_entry_inner *edst,
}
cond_resched();
}
-
return 0;
}
@@ -439,6 +459,14 @@ int sidtab_convert(struct sidtab *s, struct sidtab_convert_params *params)
return 0;
}
+static void sidtab_destroy_entry(struct sidtab_entry *entry)
+{
+ context_destroy(&entry->context);
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+ kfree(rcu_dereference_raw(entry->cache));
+#endif
+}
+
static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
{
u32 i;
@@ -459,7 +487,7 @@ static void sidtab_destroy_tree(union sidtab_entry_inner entry, u32 level)
return;
for (i = 0; i < SIDTAB_LEAF_ENTRIES; i++)
- context_destroy(&node->entries[i].context);
+ sidtab_destroy_entry(&node->entries[i]);
kfree(node);
}
}
@@ -470,7 +498,7 @@ void sidtab_destroy(struct sidtab *s)
for (i = 0; i < SECINITSID_NUM; i++)
if (s->isids[i].set)
- context_destroy(&s->isids[i].leaf.context);
+ sidtab_destroy_entry(&s->isids[i].entry);
level = SIDTAB_MAX_LEVEL;
while (level && !s->roots[level].ptr_inner)
@@ -483,3 +511,88 @@ void sidtab_destroy(struct sidtab *s)
* to be cleaned up here.
*/
}
+
+#if CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0
+
+void sidtab_sid2str_put(struct sidtab *s, struct sidtab_entry *entry,
+ const char *str, u32 str_len)
+{
+ struct sidtab_str_cache *cache, *victim = NULL;
+
+ /* do not cache invalid contexts */
+ if (entry->context.len)
+ return;
+
+ /*
+ * Skip the put operation when in non-task context to avoid the need
+ * to disable interrupts while holding s->cache_lock.
+ */
+ if (!in_task())
+ return;
+
+ spin_lock(&s->cache_lock);
+
+ cache = rcu_dereference_protected(entry->cache,
+ lockdep_is_held(&s->cache_lock));
+ if (cache) {
+ /* entry in cache - just bump to the head of LRU list */
+ list_move(&cache->lru_member, &s->cache_lru_list);
+ goto out_unlock;
+ }
+
+ cache = kmalloc(sizeof(struct sidtab_str_cache) + str_len, GFP_ATOMIC);
+ if (!cache)
+ goto out_unlock;
+
+ if (s->cache_free_slots == 0) {
+ /* pop a cache entry from the tail and free it */
+ victim = container_of(s->cache_lru_list.prev,
+ struct sidtab_str_cache, lru_member);
+ list_del(&victim->lru_member);
+ rcu_assign_pointer(victim->parent->cache, NULL);
+ } else {
+ s->cache_free_slots--;
+ }
+ cache->parent = entry;
+ cache->len = str_len;
+ memcpy(cache->str, str, str_len);
+ list_add(&cache->lru_member, &s->cache_lru_list);
+
+ rcu_assign_pointer(entry->cache, cache);
+
+out_unlock:
+ spin_unlock(&s->cache_lock);
+ kfree_rcu(victim, rcu_member);
+}
+
+int sidtab_sid2str_get(struct sidtab *s, struct sidtab_entry *entry,
+ char **out, u32 *out_len)
+{
+ struct sidtab_str_cache *cache;
+ int rc = 0;
+
+ if (entry->context.len)
+ return -ENOENT; /* do not cache invalid contexts */
+
+ rcu_read_lock();
+
+ cache = rcu_dereference(entry->cache);
+ if (!cache) {
+ rc = -ENOENT;
+ } else {
+ *out_len = cache->len;
+ if (out) {
+ *out = kmemdup(cache->str, cache->len, GFP_ATOMIC);
+ if (!*out)
+ rc = -ENOMEM;
+ }
+ }
+
+ rcu_read_unlock();
+
+ if (!rc && out)
+ sidtab_sid2str_put(s, entry, *out, *out_len);
+ return rc;
+}
+
+#endif /* CONFIG_SECURITY_SELINUX_SID2STR_CACHE_SIZE > 0 */