diff options
author | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2017-08-17 18:17:49 +0200 |
---|---|---|
committer | Martin Schwidefsky <schwidefsky@de.ibm.com> | 2017-09-06 09:24:43 +0200 |
commit | f28a4b4ddf8e7181c6c0bc45603d65c4ab6b14f9 (patch) | |
tree | ac57ff2985e8915e5cc54666bf6b400f3d5db587 /arch/s390 | |
parent | 60f07c8ec5fae06c23e9fd7bab67dabce92b3414 (diff) | |
download | linux-f28a4b4ddf8e7181c6c0bc45603d65c4ab6b14f9.tar.bz2 |
s390/mm: use a single lock for the fields in mm_context_t
The three locks 'lock', 'pgtable_lock' and 'gmap_lock' in the
mm_context_t can be reduced to a single lock.
Signed-off-by: Martin Schwidefsky <schwidefsky@de.ibm.com>
Diffstat (limited to 'arch/s390')
-rw-r--r-- | arch/s390/include/asm/mmu.h | 5 | ||||
-rw-r--r-- | arch/s390/include/asm/mmu_context.h | 2 | ||||
-rw-r--r-- | arch/s390/mm/gmap.c | 8 | ||||
-rw-r--r-- | arch/s390/mm/pgalloc.c | 16 |
4 files changed, 12 insertions, 19 deletions
diff --git a/arch/s390/include/asm/mmu.h b/arch/s390/include/asm/mmu.h index 3525fe6e7e4c..3f46a6577b8d 100644 --- a/arch/s390/include/asm/mmu.h +++ b/arch/s390/include/asm/mmu.h @@ -9,9 +9,7 @@ typedef struct { cpumask_t cpu_attach_mask; atomic_t flush_count; unsigned int flush_mm; - spinlock_t pgtable_lock; struct list_head pgtable_list; - spinlock_t gmap_lock; struct list_head gmap_list; unsigned long gmap_asce; unsigned long asce; @@ -29,10 +27,7 @@ typedef struct { #define INIT_MM_CONTEXT(name) \ .context.lock = __SPIN_LOCK_UNLOCKED(name.context.lock), \ - .context.pgtable_lock = \ - __SPIN_LOCK_UNLOCKED(name.context.pgtable_lock), \ .context.pgtable_list = LIST_HEAD_INIT(name.context.pgtable_list), \ - .context.gmap_lock = __SPIN_LOCK_UNLOCKED(name.context.gmap_lock), \ .context.gmap_list = LIST_HEAD_INIT(name.context.gmap_list), static inline int tprot(unsigned long addr) diff --git a/arch/s390/include/asm/mmu_context.h b/arch/s390/include/asm/mmu_context.h index 484efe8f4234..3c9abedc323c 100644 --- a/arch/s390/include/asm/mmu_context.h +++ b/arch/s390/include/asm/mmu_context.h @@ -18,9 +18,7 @@ static inline int init_new_context(struct task_struct *tsk, struct mm_struct *mm) { spin_lock_init(&mm->context.lock); - spin_lock_init(&mm->context.pgtable_lock); INIT_LIST_HEAD(&mm->context.pgtable_list); - spin_lock_init(&mm->context.gmap_lock); INIT_LIST_HEAD(&mm->context.gmap_list); cpumask_clear(&mm->context.cpu_attach_mask); atomic_set(&mm->context.flush_count, 0); diff --git a/arch/s390/mm/gmap.c b/arch/s390/mm/gmap.c index 9e1494e3d849..2f66290c9b92 100644 --- a/arch/s390/mm/gmap.c +++ b/arch/s390/mm/gmap.c @@ -100,14 +100,14 @@ struct gmap *gmap_create(struct mm_struct *mm, unsigned long limit) if (!gmap) return NULL; gmap->mm = mm; - spin_lock(&mm->context.gmap_lock); + spin_lock(&mm->context.lock); list_add_rcu(&gmap->list, &mm->context.gmap_list); if (list_is_singular(&mm->context.gmap_list)) gmap_asce = gmap->asce; else gmap_asce = -1UL; WRITE_ONCE(mm->context.gmap_asce, gmap_asce); - spin_unlock(&mm->context.gmap_lock); + spin_unlock(&mm->context.lock); return gmap; } EXPORT_SYMBOL_GPL(gmap_create); @@ -248,7 +248,7 @@ void gmap_remove(struct gmap *gmap) spin_unlock(&gmap->shadow_lock); } /* Remove gmap from the pre-mm list */ - spin_lock(&gmap->mm->context.gmap_lock); + spin_lock(&gmap->mm->context.lock); list_del_rcu(&gmap->list); if (list_empty(&gmap->mm->context.gmap_list)) gmap_asce = 0; @@ -258,7 +258,7 @@ void gmap_remove(struct gmap *gmap) else gmap_asce = -1UL; WRITE_ONCE(gmap->mm->context.gmap_asce, gmap_asce); - spin_unlock(&gmap->mm->context.gmap_lock); + spin_unlock(&gmap->mm->context.lock); synchronize_rcu(); /* Put reference */ gmap_put(gmap); diff --git a/arch/s390/mm/pgalloc.c b/arch/s390/mm/pgalloc.c index 05b5b1b0a8d9..05f1f27e6708 100644 --- a/arch/s390/mm/pgalloc.c +++ b/arch/s390/mm/pgalloc.c @@ -188,7 +188,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) /* Try to get a fragment of a 4K page as a 2K page table */ if (!mm_alloc_pgste(mm)) { table = NULL; - spin_lock_bh(&mm->context.pgtable_lock); + spin_lock_bh(&mm->context.lock); if (!list_empty(&mm->context.pgtable_list)) { page = list_first_entry(&mm->context.pgtable_list, struct page, lru); @@ -203,7 +203,7 @@ unsigned long *page_table_alloc(struct mm_struct *mm) list_del(&page->lru); } } - spin_unlock_bh(&mm->context.pgtable_lock); + spin_unlock_bh(&mm->context.lock); if (table) return table; } @@ -227,9 +227,9 @@ unsigned long *page_table_alloc(struct mm_struct *mm) /* Return the first 2K fragment of the page */ atomic_set(&page->_mapcount, 1); clear_table(table, _PAGE_INVALID, PAGE_SIZE); - spin_lock_bh(&mm->context.pgtable_lock); + spin_lock_bh(&mm->context.lock); list_add(&page->lru, &mm->context.pgtable_list); - spin_unlock_bh(&mm->context.pgtable_lock); + spin_unlock_bh(&mm->context.lock); } return table; } @@ -243,13 +243,13 @@ void page_table_free(struct mm_struct *mm, unsigned long *table) if (!mm_alloc_pgste(mm)) { /* Free 2K page table fragment of a 4K page */ bit = (__pa(table) & ~PAGE_MASK)/(PTRS_PER_PTE*sizeof(pte_t)); - spin_lock_bh(&mm->context.pgtable_lock); + spin_lock_bh(&mm->context.lock); mask = atomic_xor_bits(&page->_mapcount, 1U << bit); if (mask & 3) list_add(&page->lru, &mm->context.pgtable_list); else list_del(&page->lru); - spin_unlock_bh(&mm->context.pgtable_lock); + spin_unlock_bh(&mm->context.lock); if (mask != 0) return; } @@ -275,13 +275,13 @@ void page_table_free_rcu(struct mmu_gather *tlb, unsigned long *table, return; } bit = (__pa(table) & ~PAGE_MASK) / (PTRS_PER_PTE*sizeof(pte_t)); - spin_lock_bh(&mm->context.pgtable_lock); + spin_lock_bh(&mm->context.lock); mask = atomic_xor_bits(&page->_mapcount, 0x11U << bit); if (mask & 3) list_add_tail(&page->lru, &mm->context.pgtable_list); else list_del(&page->lru); - spin_unlock_bh(&mm->context.pgtable_lock); + spin_unlock_bh(&mm->context.lock); table = (unsigned long *) (__pa(table) | (1U << bit)); tlb_remove_table(tlb, table); } |