summaryrefslogtreecommitdiffstats
path: root/arch/sparc/kernel/perf_event.c
diff options
context:
space:
mode:
authorGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-07-13 15:26:47 -0700
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2014-07-13 15:26:47 -0700
commit9f48c89862e39b7f33b44123fc425cf901c89428 (patch)
tree373886606ada8c2e0c362afbcce490af27d21552 /arch/sparc/kernel/perf_event.c
parent2a211f320ee3d86835b40efd2948642482d3c933 (diff)
parent1795cd9b3a91d4b5473c97f491d63892442212ab (diff)
downloadlinux-9f48c89862e39b7f33b44123fc425cf901c89428.tar.bz2
Merge 3.16-rc5 into char-misc-next
This resolves a number of merge issues with changes in this tree and Linus's tree at the same time. Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch/sparc/kernel/perf_event.c')
-rw-r--r--arch/sparc/kernel/perf_event.c23
1 files changed, 13 insertions, 10 deletions
diff --git a/arch/sparc/kernel/perf_event.c b/arch/sparc/kernel/perf_event.c
index b5c38faa4ead..8efd33753ad3 100644
--- a/arch/sparc/kernel/perf_event.c
+++ b/arch/sparc/kernel/perf_event.c
@@ -110,7 +110,7 @@ struct cpu_hw_events {
unsigned int group_flag;
};
-DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
+static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
/* An event map describes the characteristics of a performance
* counter event. In particular it gives the encoding as well as
@@ -1153,7 +1153,7 @@ static void perf_stop_nmi_watchdog(void *unused)
cpuc->pcr[i] = pcr_ops->read_pcr(i);
}
-void perf_event_grab_pmc(void)
+static void perf_event_grab_pmc(void)
{
if (atomic_inc_not_zero(&active_events))
return;
@@ -1169,7 +1169,7 @@ void perf_event_grab_pmc(void)
mutex_unlock(&pmc_grab_mutex);
}
-void perf_event_release_pmc(void)
+static void perf_event_release_pmc(void)
{
if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
if (atomic_read(&nmi_active) == 0)
@@ -1669,7 +1669,7 @@ static bool __init supported_pmu(void)
return false;
}
-int __init init_hw_perf_events(void)
+static int __init init_hw_perf_events(void)
{
pr_info("Performance events: ");
@@ -1742,10 +1742,11 @@ static void perf_callchain_user_64(struct perf_callchain_entry *entry,
ufp = regs->u_regs[UREG_I6] + STACK_BIAS;
do {
- struct sparc_stackf *usf, sf;
+ struct sparc_stackf __user *usf;
+ struct sparc_stackf sf;
unsigned long pc;
- usf = (struct sparc_stackf *) ufp;
+ usf = (struct sparc_stackf __user *)ufp;
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
@@ -1765,17 +1766,19 @@ static void perf_callchain_user_32(struct perf_callchain_entry *entry,
unsigned long pc;
if (thread32_stack_is_64bit(ufp)) {
- struct sparc_stackf *usf, sf;
+ struct sparc_stackf __user *usf;
+ struct sparc_stackf sf;
ufp += STACK_BIAS;
- usf = (struct sparc_stackf *) ufp;
+ usf = (struct sparc_stackf __user *)ufp;
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
pc = sf.callers_pc & 0xffffffff;
ufp = ((unsigned long) sf.fp) & 0xffffffff;
} else {
- struct sparc_stackf32 *usf, sf;
- usf = (struct sparc_stackf32 *) ufp;
+ struct sparc_stackf32 __user *usf;
+ struct sparc_stackf32 sf;
+ usf = (struct sparc_stackf32 __user *)ufp;
if (__copy_from_user_inatomic(&sf, usf, sizeof(sf)))
break;
pc = sf.callers_pc;