summaryrefslogtreecommitdiffstats
path: root/kernel/events
diff options
context:
space:
mode:
Diffstat (limited to 'kernel/events')
-rw-r--r--kernel/events/core.c10
1 files changed, 6 insertions, 4 deletions
diff --git a/kernel/events/core.c b/kernel/events/core.c
index fc9f5ebf4849..82e15c08edfb 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5639,8 +5639,10 @@ static void perf_mmap_close(struct vm_area_struct *vma)
perf_pmu_output_stop(event);
/* now it's safe to free the pages */
- atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
- atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
+ if (!rb->aux_mmap_locked)
+ atomic_long_sub(rb->aux_nr_pages, &mmap_user->locked_vm);
+ else
+ atomic64_sub(rb->aux_mmap_locked, &vma->vm_mm->pinned_vm);
/* this has to be the last one */
rb_free_aux(rb);
@@ -6979,7 +6981,7 @@ static void __perf_event_output_stop(struct perf_event *event, void *data)
static int __perf_pmu_output_stop(void *info)
{
struct perf_event *event = info;
- struct pmu *pmu = event->pmu;
+ struct pmu *pmu = event->ctx->pmu;
struct perf_cpu_context *cpuctx = this_cpu_ptr(pmu->pmu_cpu_context);
struct remote_output ro = {
.rb = event->rb,
@@ -10665,7 +10667,7 @@ static int perf_copy_attr(struct perf_event_attr __user *uattr,
attr->size = size;
- if (attr->__reserved_1)
+ if (attr->__reserved_1 || attr->__reserved_2)
return -EINVAL;
if (attr->sample_type & ~(PERF_SAMPLE_MAX-1))