diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-04 10:17:59 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2020-06-04 10:17:59 -0700 |
commit | 38b3a5aaf2fd35e997550b855cfb7460b077236a (patch) | |
tree | 88a13c8dfc117a43da8ebabc155b514631f680c2 /tools/perf/util/hashmap.c | |
parent | 6929f71e46bdddbf1c4d67c2728648176c67c555 (diff) | |
parent | 3e9b26dc2268cfbeef85bee095f883264c18425c (diff) | |
download | linux-38b3a5aaf2fd35e997550b855cfb7460b077236a.tar.bz2 |
Merge tag 'perf-tools-2020-06-02' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux
Pull perf tooling updates from Arnaldo Carvalho de Melo:
"These are additional changes to the perf tools, on top of what Ingo
already submitted.
- Further Intel PT call-trace fixes
- Improve SELinux docs and tool warnings
- Fix race at exit in 'perf record' using eventfd.
- Add missing build tests to the default set of 'make -C tools/perf
build-test'
- Sync msr-index.h getting new AMD MSRs to decode and filter in 'perf
trace'.
- Fix fallback to libaudit in 'perf trace' for arches not using
per-arch *.tbl files.
- Fixes for 'perf ftrace'.
- Fixes and improvements for the 'perf stat' metrics.
- Use dummy event to get PERF_RECORD_{FORK,MMAP,etc} while
synthesizing those metadata events for pre-existing threads.
- Fix leaks detected using clang tooling.
- Improvements to PMU event metric testing.
- Report summary for 'perf stat' interval mode at the end, summing up
all the intervals.
- Improve pipe mode, i.e. this now works as expected, continuously
dumping samples:
# perf record -g -e raw_syscalls:sys_enter | perf --no-pager script
- Fixes for event grouping, detecting incompatible groups such as:
# perf stat -e '{cycles,power/energy-cores/}' -v
WARNING: group events cpu maps do not match, disabling group:
anon group { power/energy-cores/, cycles }
power/energy-cores/: 0
cycles: 0-7
- Fixes for 'perf probe': blacklist address checking, number of
kretprobe instances, etc.
- JIT processing improvements and fixes plus the addition of a 'perf
test' entry for the java demangler.
- Add support for synthesizing first/last level cache, TLB and remove
access events from HW tracing in the auxtrace code, first to use is
ARM SPE.
- Vendor events updates and fixes, including for POWER9 and Intel.
- Allow using ~/.perfconfig for removing the ',' separators in 'perf
stat' output.
- Opt-in support for libpfm4"
* tag 'perf-tools-2020-06-02' of git://git.kernel.org/pub/scm/linux/kernel/git/acme/linux: (120 commits)
perf tools: Remove some duplicated includes
perf symbols: Fix kernel maps for kcore and eBPF
tools arch x86: Sync the msr-index.h copy with the kernel sources
perf stat: Ensure group is defined on top of the same cpu mask
perf libdw: Fix off-by 1 relative directory includes
perf arm-spe: Support synthetic events
perf auxtrace: Add four itrace options
perf tools: Move arm-spe-pkt-decoder.h/c to the new dir
perf test: Initialize memory in dwarf-unwind
perf tests: Don't tail call optimize in unwind test
tools compiler.h: Add attribute to disable tail calls
perf build: Add a LIBPFM4=1 build test entry
perf tools: Add optional support for libpfm4
perf tools: Correct license on jsmn JSON parser
perf jit: Fix inaccurate DWARF line table
perf jvmti: Remove redundant jitdump line table entries
perf build: Add NO_SDT=1 to the default set of build tests
perf build: Add NO_LIBCRYPTO=1 to the default set of build tests
perf build: Add NO_SYSCALL_TABLE=1 to the build tests
perf build: Remove libaudit from the default feature checks
...
Diffstat (limited to 'tools/perf/util/hashmap.c')
-rw-r--r-- | tools/perf/util/hashmap.c | 238 |
1 files changed, 238 insertions, 0 deletions
diff --git a/tools/perf/util/hashmap.c b/tools/perf/util/hashmap.c new file mode 100644 index 000000000000..a405dad068f5 --- /dev/null +++ b/tools/perf/util/hashmap.c @@ -0,0 +1,238 @@ +// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) + +/* + * Generic non-thread safe hash map implementation. + * + * Copyright (c) 2019 Facebook + */ +#include <stdint.h> +#include <stdlib.h> +#include <stdio.h> +#include <errno.h> +#include <linux/err.h> +#include "hashmap.h" + +/* make sure libbpf doesn't use kernel-only integer typedefs */ +#pragma GCC poison u8 u16 u32 u64 s8 s16 s32 s64 + +/* start with 4 buckets */ +#define HASHMAP_MIN_CAP_BITS 2 + +static void hashmap_add_entry(struct hashmap_entry **pprev, + struct hashmap_entry *entry) +{ + entry->next = *pprev; + *pprev = entry; +} + +static void hashmap_del_entry(struct hashmap_entry **pprev, + struct hashmap_entry *entry) +{ + *pprev = entry->next; + entry->next = NULL; +} + +void hashmap__init(struct hashmap *map, hashmap_hash_fn hash_fn, + hashmap_equal_fn equal_fn, void *ctx) +{ + map->hash_fn = hash_fn; + map->equal_fn = equal_fn; + map->ctx = ctx; + + map->buckets = NULL; + map->cap = 0; + map->cap_bits = 0; + map->sz = 0; +} + +struct hashmap *hashmap__new(hashmap_hash_fn hash_fn, + hashmap_equal_fn equal_fn, + void *ctx) +{ + struct hashmap *map = malloc(sizeof(struct hashmap)); + + if (!map) + return ERR_PTR(-ENOMEM); + hashmap__init(map, hash_fn, equal_fn, ctx); + return map; +} + +void hashmap__clear(struct hashmap *map) +{ + struct hashmap_entry *cur, *tmp; + size_t bkt; + + hashmap__for_each_entry_safe(map, cur, tmp, bkt) { + free(cur); + } + free(map->buckets); + map->buckets = NULL; + map->cap = map->cap_bits = map->sz = 0; +} + +void hashmap__free(struct hashmap *map) +{ + if (!map) + return; + + hashmap__clear(map); + free(map); +} + +size_t hashmap__size(const struct hashmap *map) +{ + return map->sz; +} + +size_t hashmap__capacity(const struct hashmap *map) +{ + return map->cap; +} + +static bool hashmap_needs_to_grow(struct hashmap *map) +{ + /* grow if empty or more than 75% filled */ + return (map->cap == 0) || ((map->sz + 1) * 4 / 3 > map->cap); +} + +static int hashmap_grow(struct hashmap *map) +{ + struct hashmap_entry **new_buckets; + struct hashmap_entry *cur, *tmp; + size_t new_cap_bits, new_cap; + size_t h, bkt; + + new_cap_bits = map->cap_bits + 1; + if (new_cap_bits < HASHMAP_MIN_CAP_BITS) + new_cap_bits = HASHMAP_MIN_CAP_BITS; + + new_cap = 1UL << new_cap_bits; + new_buckets = calloc(new_cap, sizeof(new_buckets[0])); + if (!new_buckets) + return -ENOMEM; + + hashmap__for_each_entry_safe(map, cur, tmp, bkt) { + h = hash_bits(map->hash_fn(cur->key, map->ctx), new_cap_bits); + hashmap_add_entry(&new_buckets[h], cur); + } + + map->cap = new_cap; + map->cap_bits = new_cap_bits; + free(map->buckets); + map->buckets = new_buckets; + + return 0; +} + +static bool hashmap_find_entry(const struct hashmap *map, + const void *key, size_t hash, + struct hashmap_entry ***pprev, + struct hashmap_entry **entry) +{ + struct hashmap_entry *cur, **prev_ptr; + + if (!map->buckets) + return false; + + for (prev_ptr = &map->buckets[hash], cur = *prev_ptr; + cur; + prev_ptr = &cur->next, cur = cur->next) { + if (map->equal_fn(cur->key, key, map->ctx)) { + if (pprev) + *pprev = prev_ptr; + *entry = cur; + return true; + } + } + + return false; +} + +int hashmap__insert(struct hashmap *map, const void *key, void *value, + enum hashmap_insert_strategy strategy, + const void **old_key, void **old_value) +{ + struct hashmap_entry *entry; + size_t h; + int err; + + if (old_key) + *old_key = NULL; + if (old_value) + *old_value = NULL; + + h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); + if (strategy != HASHMAP_APPEND && + hashmap_find_entry(map, key, h, NULL, &entry)) { + if (old_key) + *old_key = entry->key; + if (old_value) + *old_value = entry->value; + + if (strategy == HASHMAP_SET || strategy == HASHMAP_UPDATE) { + entry->key = key; + entry->value = value; + return 0; + } else if (strategy == HASHMAP_ADD) { + return -EEXIST; + } + } + + if (strategy == HASHMAP_UPDATE) + return -ENOENT; + + if (hashmap_needs_to_grow(map)) { + err = hashmap_grow(map); + if (err) + return err; + h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); + } + + entry = malloc(sizeof(struct hashmap_entry)); + if (!entry) + return -ENOMEM; + + entry->key = key; + entry->value = value; + hashmap_add_entry(&map->buckets[h], entry); + map->sz++; + + return 0; +} + +bool hashmap__find(const struct hashmap *map, const void *key, void **value) +{ + struct hashmap_entry *entry; + size_t h; + + h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); + if (!hashmap_find_entry(map, key, h, NULL, &entry)) + return false; + + if (value) + *value = entry->value; + return true; +} + +bool hashmap__delete(struct hashmap *map, const void *key, + const void **old_key, void **old_value) +{ + struct hashmap_entry **pprev, *entry; + size_t h; + + h = hash_bits(map->hash_fn(key, map->ctx), map->cap_bits); + if (!hashmap_find_entry(map, key, h, &pprev, &entry)) + return false; + + if (old_key) + *old_key = entry->key; + if (old_value) + *old_value = entry->value; + + hashmap_del_entry(pprev, entry); + free(entry); + map->sz--; + + return true; +} + |