summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/kernel/process.c1
-rw-r--r--arch/powerpc/mm/4xx_mmu.c4
-rw-r--r--arch/powerpc/mm/hugetlbpage.c10
-rw-r--r--arch/powerpc/mm/mem.c2
-rw-r--r--arch/powerpc/mm/tlb_32.c6
-rw-r--r--arch/powerpc/mm/tlb_64.c4
6 files changed, 13 insertions, 14 deletions
diff --git a/arch/powerpc/kernel/process.c b/arch/powerpc/kernel/process.c
index de69fb37c731..a5a7542a8ff3 100644
--- a/arch/powerpc/kernel/process.c
+++ b/arch/powerpc/kernel/process.c
@@ -457,7 +457,6 @@ void flush_thread(void)
if (t->flags & _TIF_ABI_PENDING)
t->flags ^= (_TIF_ABI_PENDING | _TIF_32BIT);
#endif
- kprobe_flush_task(current);
#ifndef CONFIG_SMP
if (last_task_used_math == current)
diff --git a/arch/powerpc/mm/4xx_mmu.c b/arch/powerpc/mm/4xx_mmu.c
index b7bcbc232f39..4d006aa1a0d1 100644
--- a/arch/powerpc/mm/4xx_mmu.c
+++ b/arch/powerpc/mm/4xx_mmu.c
@@ -110,13 +110,11 @@ unsigned long __init mmu_mapin_ram(void)
pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_16M | _PAGE_HWEXEC | _PAGE_HWWRITE;
- spin_lock(&init_mm.page_table_lock);
pmdp = pmd_offset(pgd_offset_k(v), v);
pmd_val(*pmdp++) = val;
pmd_val(*pmdp++) = val;
pmd_val(*pmdp++) = val;
pmd_val(*pmdp++) = val;
- spin_unlock(&init_mm.page_table_lock);
v += LARGE_PAGE_SIZE_16M;
p += LARGE_PAGE_SIZE_16M;
@@ -127,10 +125,8 @@ unsigned long __init mmu_mapin_ram(void)
pmd_t *pmdp;
unsigned long val = p | _PMD_SIZE_4M | _PAGE_HWEXEC | _PAGE_HWWRITE;
- spin_lock(&init_mm.page_table_lock);
pmdp = pmd_offset(pgd_offset_k(v), v);
pmd_val(*pmdp) = val;
- spin_unlock(&init_mm.page_table_lock);
v += LARGE_PAGE_SIZE_4M;
p += LARGE_PAGE_SIZE_4M;
diff --git a/arch/powerpc/mm/hugetlbpage.c b/arch/powerpc/mm/hugetlbpage.c
index 426c269e552e..f867bba893ca 100644
--- a/arch/powerpc/mm/hugetlbpage.c
+++ b/arch/powerpc/mm/hugetlbpage.c
@@ -287,15 +287,15 @@ static int open_high_hpage_areas(struct mm_struct *mm, u16 newareas)
int prepare_hugepage_range(unsigned long addr, unsigned long len)
{
- int err;
+ int err = 0;
if ( (addr+len) < addr )
return -EINVAL;
- if ((addr + len) < 0x100000000UL)
+ if (addr < 0x100000000UL)
err = open_low_hpage_areas(current->mm,
LOW_ESID_MASK(addr, len));
- else
+ if ((addr + len) >= 0x100000000UL)
err = open_high_hpage_areas(current->mm,
HTLB_AREA_MASK(addr, len));
if (err) {
@@ -754,9 +754,7 @@ repeat:
}
/*
- * No need to use ldarx/stdcx here because all who
- * might be updating the pte will hold the
- * page_table_lock
+ * No need to use ldarx/stdcx here
*/
*ptep = __pte(new_pte & ~_PAGE_BUSY);
diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index 4bd7b0a70996..ed6ed2e30dac 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -495,7 +495,7 @@ EXPORT_SYMBOL(flush_icache_user_range);
* We use it to preload an HPTE into the hash table corresponding to
* the updated linux PTE.
*
- * This must always be called with the mm->page_table_lock held
+ * This must always be called with the pte lock held.
*/
void update_mmu_cache(struct vm_area_struct *vma, unsigned long address,
pte_t pte)
diff --git a/arch/powerpc/mm/tlb_32.c b/arch/powerpc/mm/tlb_32.c
index 6c3dc3c44c86..ad580f3742e5 100644
--- a/arch/powerpc/mm/tlb_32.c
+++ b/arch/powerpc/mm/tlb_32.c
@@ -149,6 +149,12 @@ void flush_tlb_mm(struct mm_struct *mm)
return;
}
+ /*
+ * It is safe to go down the mm's list of vmas when called
+ * from dup_mmap, holding mmap_sem. It would also be safe from
+ * unmap_region or exit_mmap, but not from vmtruncate on SMP -
+ * but it seems dup_mmap is the only SMP case which gets here.
+ */
for (mp = mm->mmap; mp != NULL; mp = mp->vm_next)
flush_range(mp->vm_mm, mp->vm_start, mp->vm_end);
FINISH_FLUSH;
diff --git a/arch/powerpc/mm/tlb_64.c b/arch/powerpc/mm/tlb_64.c
index 53e31b834ace..859d29a0cac5 100644
--- a/arch/powerpc/mm/tlb_64.c
+++ b/arch/powerpc/mm/tlb_64.c
@@ -95,7 +95,7 @@ static void pte_free_submit(struct pte_freelist_batch *batch)
void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf)
{
- /* This is safe as we are holding page_table_lock */
+ /* This is safe since tlb_gather_mmu has disabled preemption */
cpumask_t local_cpumask = cpumask_of_cpu(smp_processor_id());
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
@@ -206,7 +206,7 @@ void __flush_tlb_pending(struct ppc64_tlb_batch *batch)
void pte_free_finish(void)
{
- /* This is safe as we are holding page_table_lock */
+ /* This is safe since tlb_gather_mmu has disabled preemption */
struct pte_freelist_batch **batchp = &__get_cpu_var(pte_freelist_cur);
if (*batchp == NULL)