summaryrefslogtreecommitdiffstats
path: root/kernel/trace
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2019-11-25 19:23:46 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2019-11-25 19:23:46 -0800
commit1b96a41b420972f8ba563373de8154d59d5b2208 (patch)
tree068e16af7181c333b2d73f777a992188abceb9bb /kernel/trace
parent9391edee8667733e22baa50d182191ba3a00d5e1 (diff)
parent40363cf13999ee4fb3b5c1e67fa5e6f0e9da34bd (diff)
downloadlinux-1b96a41b420972f8ba563373de8154d59d5b2208.tar.bz2
Merge branch 'for-5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup
Pull cgroup updates from Tejun Heo: "There are several notable changes here: - Single thread migrating itself has been optimized so that it doesn't need threadgroup rwsem anymore. - Freezer optimization to avoid unnecessary frozen state changes. - cgroup ID unification so that cgroup fs ino is the only unique ID used for the cgroup and can be used to directly look up live cgroups through filehandle interface on 64bit ino archs. On 32bit archs, cgroup fs ino is still the only ID in use but it is only unique when combined with gen. - selftest and other changes" * 'for-5.5' of git://git.kernel.org/pub/scm/linux/kernel/git/tj/cgroup: (24 commits) writeback: fix -Wformat compilation warnings docs: cgroup: mm: Fix spelling of "list" cgroup: fix incorrect WARN_ON_ONCE() in cgroup_setup_root() cgroup: use cgrp->kn->id as the cgroup ID kernfs: use 64bit inos if ino_t is 64bit kernfs: implement custom exportfs ops and fid type kernfs: combine ino/id lookup functions into kernfs_find_and_get_node_by_id() kernfs: convert kernfs_node->id from union kernfs_node_id to u64 kernfs: kernfs_find_and_get_node_by_ino() should only look up activated nodes kernfs: use dumber locking for kernfs_find_and_get_node_by_ino() netprio: use css ID instead of cgroup ID writeback: use ino_t for inodes in tracepoints kernfs: fix ino wrap-around detection kselftests: cgroup: Avoid the reuse of fd after it is deallocated cgroup: freezer: don't change task and cgroups status unnecessarily cgroup: use cgroup->last_bstat instead of cgroup->bstat_pending for consistency cgroup: remove cgroup_enable_task_cg_lists() optimization cgroup: pids: use atomic64_t for pids->limit selftests: cgroup: Run test_core under interfering stress selftests: cgroup: Add task migration tests ...
Diffstat (limited to 'kernel/trace')
-rw-r--r--kernel/trace/blktrace.c84
1 files changed, 46 insertions, 38 deletions
diff --git a/kernel/trace/blktrace.c b/kernel/trace/blktrace.c
index 2d6e93ab0478..475e29498bca 100644
--- a/kernel/trace/blktrace.c
+++ b/kernel/trace/blktrace.c
@@ -64,8 +64,7 @@ static void blk_unregister_tracepoints(void);
* Send out a notify message.
*/
static void trace_note(struct blk_trace *bt, pid_t pid, int action,
- const void *data, size_t len,
- union kernfs_node_id *cgid)
+ const void *data, size_t len, u64 cgid)
{
struct blk_io_trace *t;
struct ring_buffer_event *event = NULL;
@@ -73,7 +72,7 @@ static void trace_note(struct blk_trace *bt, pid_t pid, int action,
int pc = 0;
int cpu = smp_processor_id();
bool blk_tracer = blk_tracer_enabled;
- ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
+ ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
if (blk_tracer) {
buffer = blk_tr->trace_buffer.buffer;
@@ -100,8 +99,8 @@ record_it:
t->pid = pid;
t->cpu = cpu;
t->pdu_len = len + cgid_len;
- if (cgid)
- memcpy((void *)t + sizeof(*t), cgid, cgid_len);
+ if (cgid_len)
+ memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
memcpy((void *) t + sizeof(*t) + cgid_len, data, len);
if (blk_tracer)
@@ -122,7 +121,7 @@ static void trace_note_tsk(struct task_struct *tsk)
spin_lock_irqsave(&running_trace_lock, flags);
list_for_each_entry(bt, &running_trace_list, running_list) {
trace_note(bt, tsk->pid, BLK_TN_PROCESS, tsk->comm,
- sizeof(tsk->comm), NULL);
+ sizeof(tsk->comm), 0);
}
spin_unlock_irqrestore(&running_trace_lock, flags);
}
@@ -139,7 +138,7 @@ static void trace_note_time(struct blk_trace *bt)
words[1] = now.tv_nsec;
local_irq_save(flags);
- trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), NULL);
+ trace_note(bt, 0, BLK_TN_TIMESTAMP, words, sizeof(words), 0);
local_irq_restore(flags);
}
@@ -172,9 +171,9 @@ void __trace_note_message(struct blk_trace *bt, struct blkcg *blkcg,
blkcg = NULL;
#ifdef CONFIG_BLK_CGROUP
trace_note(bt, 0, BLK_TN_MESSAGE, buf, n,
- blkcg ? cgroup_get_kernfs_id(blkcg->css.cgroup) : NULL);
+ blkcg ? cgroup_id(blkcg->css.cgroup) : 1);
#else
- trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, NULL);
+ trace_note(bt, 0, BLK_TN_MESSAGE, buf, n, 0);
#endif
local_irq_restore(flags);
}
@@ -212,7 +211,7 @@ static const u32 ddir_act[2] = { BLK_TC_ACT(BLK_TC_READ),
*/
static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
int op, int op_flags, u32 what, int error, int pdu_len,
- void *pdu_data, union kernfs_node_id *cgid)
+ void *pdu_data, u64 cgid)
{
struct task_struct *tsk = current;
struct ring_buffer_event *event = NULL;
@@ -223,7 +222,7 @@ static void __blk_add_trace(struct blk_trace *bt, sector_t sector, int bytes,
pid_t pid;
int cpu, pc = 0;
bool blk_tracer = blk_tracer_enabled;
- ssize_t cgid_len = cgid ? sizeof(*cgid) : 0;
+ ssize_t cgid_len = cgid ? sizeof(cgid) : 0;
if (unlikely(bt->trace_state != Blktrace_running && !blk_tracer))
return;
@@ -294,7 +293,7 @@ record_it:
t->pdu_len = pdu_len + cgid_len;
if (cgid_len)
- memcpy((void *)t + sizeof(*t), cgid, cgid_len);
+ memcpy((void *)t + sizeof(*t), &cgid, cgid_len);
if (pdu_len)
memcpy((void *)t + sizeof(*t) + cgid_len, pdu_data, pdu_len);
@@ -751,31 +750,29 @@ void blk_trace_shutdown(struct request_queue *q)
}
#ifdef CONFIG_BLK_CGROUP
-static union kernfs_node_id *
-blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
+static u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
{
struct blk_trace *bt = q->blk_trace;
if (!bt || !(blk_tracer_flags.val & TRACE_BLK_OPT_CGROUP))
- return NULL;
+ return 0;
if (!bio->bi_blkg)
- return NULL;
- return cgroup_get_kernfs_id(bio_blkcg(bio)->css.cgroup);
+ return 0;
+ return cgroup_id(bio_blkcg(bio)->css.cgroup);
}
#else
-static union kernfs_node_id *
-blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
+u64 blk_trace_bio_get_cgid(struct request_queue *q, struct bio *bio)
{
- return NULL;
+ return 0;
}
#endif
-static union kernfs_node_id *
+static u64
blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
{
if (!rq->bio)
- return NULL;
+ return 0;
/* Use the first bio */
return blk_trace_bio_get_cgid(q, rq->bio);
}
@@ -797,8 +794,7 @@ blk_trace_request_get_cgid(struct request_queue *q, struct request *rq)
*
**/
static void blk_add_trace_rq(struct request *rq, int error,
- unsigned int nr_bytes, u32 what,
- union kernfs_node_id *cgid)
+ unsigned int nr_bytes, u32 what, u64 cgid)
{
struct blk_trace *bt = rq->q->blk_trace;
@@ -913,7 +909,7 @@ static void blk_add_trace_getrq(void *ignore,
if (bt)
__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_GETRQ, 0, 0,
- NULL, NULL);
+ NULL, 0);
}
}
@@ -929,7 +925,7 @@ static void blk_add_trace_sleeprq(void *ignore,
if (bt)
__blk_add_trace(bt, 0, 0, rw, 0, BLK_TA_SLEEPRQ,
- 0, 0, NULL, NULL);
+ 0, 0, NULL, 0);
}
}
@@ -938,7 +934,7 @@ static void blk_add_trace_plug(void *ignore, struct request_queue *q)
struct blk_trace *bt = q->blk_trace;
if (bt)
- __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, NULL);
+ __blk_add_trace(bt, 0, 0, 0, 0, BLK_TA_PLUG, 0, 0, NULL, 0);
}
static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
@@ -955,7 +951,7 @@ static void blk_add_trace_unplug(void *ignore, struct request_queue *q,
else
what = BLK_TA_UNPLUG_TIMER;
- __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, NULL);
+ __blk_add_trace(bt, 0, 0, 0, 0, what, 0, sizeof(rpdu), &rpdu, 0);
}
}
@@ -1172,19 +1168,17 @@ const struct blk_io_trace *te_blk_io_trace(const struct trace_entry *ent)
static inline const void *pdu_start(const struct trace_entry *ent, bool has_cg)
{
- return (void *)(te_blk_io_trace(ent) + 1) +
- (has_cg ? sizeof(union kernfs_node_id) : 0);
+ return (void *)(te_blk_io_trace(ent) + 1) + (has_cg ? sizeof(u64) : 0);
}
-static inline const void *cgid_start(const struct trace_entry *ent)
+static inline u64 t_cgid(const struct trace_entry *ent)
{
- return (void *)(te_blk_io_trace(ent) + 1);
+ return *(u64 *)(te_blk_io_trace(ent) + 1);
}
static inline int pdu_real_len(const struct trace_entry *ent, bool has_cg)
{
- return te_blk_io_trace(ent)->pdu_len -
- (has_cg ? sizeof(union kernfs_node_id) : 0);
+ return te_blk_io_trace(ent)->pdu_len - (has_cg ? sizeof(u64) : 0);
}
static inline u32 t_action(const struct trace_entry *ent)
@@ -1257,7 +1251,7 @@ static void blk_log_action(struct trace_iterator *iter, const char *act,
fill_rwbs(rwbs, t);
if (has_cg) {
- const union kernfs_node_id *id = cgid_start(iter->ent);
+ u64 id = t_cgid(iter->ent);
if (blk_tracer_flags.val & TRACE_BLK_OPT_CGNAME) {
char blkcg_name_buf[NAME_MAX + 1] = "<...>";
@@ -1267,11 +1261,25 @@ static void blk_log_action(struct trace_iterator *iter, const char *act,
trace_seq_printf(&iter->seq, "%3d,%-3d %s %2s %3s ",
MAJOR(t->device), MINOR(t->device),
blkcg_name_buf, act, rwbs);
- } else
+ } else {
+ /*
+ * The cgid portion used to be "INO,GEN". Userland
+ * builds a FILEID_INO32_GEN fid out of them and
+ * opens the cgroup using open_by_handle_at(2).
+ * While 32bit ino setups are still the same, 64bit
+ * ones now use the 64bit ino as the whole ID and
+ * no longer use generation.
+ *
+ * Regarldess of the content, always output
+ * "LOW32,HIGH32" so that FILEID_INO32_GEN fid can
+ * be mapped back to @id on both 64 and 32bit ino
+ * setups. See __kernfs_fh_to_dentry().
+ */
trace_seq_printf(&iter->seq,
- "%3d,%-3d %x,%-x %2s %3s ",
+ "%3d,%-3d %llx,%-llx %2s %3s ",
MAJOR(t->device), MINOR(t->device),
- id->ino, id->generation, act, rwbs);
+ id & U32_MAX, id >> 32, act, rwbs);
+ }
} else
trace_seq_printf(&iter->seq, "%3d,%-3d %2s %3s ",
MAJOR(t->device), MINOR(t->device), act, rwbs);