summaryrefslogtreecommitdiffstats
path: root/include/asm-s390
diff options
context:
space:
mode:
authorHeiko Carstens <heiko.carstens@de.ibm.com>2008-04-04 16:03:34 +0200
committerAvi Kivity <avi@qumranet.com>2008-04-27 12:01:00 +0300
commitc71799c1f404c6e4f34fa64e6be39cd6149e5019 (patch)
treecfc9b0d49e02156b0b9931363c21e957ff9caae1 /include/asm-s390
parentf603f0731f43421403160f5f8b12e90f2e51f064 (diff)
downloadlinux-c71799c1f404c6e4f34fa64e6be39cd6149e5019.tar.bz2
KVM: s390: Improve pgste accesses
There is no need to use interlocked updates when the rcp lock is held. Therefore the simple bitops variants can be used. This should improve performance. Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com> Signed-off-by: Carsten Otte <cotte@de.ibm.com> Signed-off-by: Avi Kivity <avi@qumranet.com>
Diffstat (limited to 'include/asm-s390')
-rw-r--r--include/asm-s390/pgtable.h12
1 files changed, 6 insertions, 6 deletions
diff --git a/include/asm-s390/pgtable.h b/include/asm-s390/pgtable.h
index 7fe5c4b6d82d..4c0698c0dda5 100644
--- a/include/asm-s390/pgtable.h
+++ b/include/asm-s390/pgtable.h
@@ -553,12 +553,12 @@ static inline void ptep_rcp_copy(pte_t *ptep)
skey = page_get_storage_key(page_to_phys(page));
if (skey & _PAGE_CHANGED)
- set_bit(RCP_GC_BIT, pgste);
+ set_bit_simple(RCP_GC_BIT, pgste);
if (skey & _PAGE_REFERENCED)
- set_bit(RCP_GR_BIT, pgste);
- if (test_and_clear_bit(RCP_HC_BIT, pgste))
+ set_bit_simple(RCP_GR_BIT, pgste);
+ if (test_and_clear_bit_simple(RCP_HC_BIT, pgste))
SetPageDirty(page);
- if (test_and_clear_bit(RCP_HR_BIT, pgste))
+ if (test_and_clear_bit_simple(RCP_HR_BIT, pgste))
SetPageReferenced(page);
#endif
}
@@ -732,8 +732,8 @@ static inline int ptep_test_and_clear_young(struct vm_area_struct *vma,
young = ((page_get_storage_key(physpage) & _PAGE_REFERENCED) != 0);
rcp_lock(ptep);
if (young)
- set_bit(RCP_GR_BIT, pgste);
- young |= test_and_clear_bit(RCP_HR_BIT, pgste);
+ set_bit_simple(RCP_GR_BIT, pgste);
+ young |= test_and_clear_bit_simple(RCP_HR_BIT, pgste);
rcp_unlock(ptep);
return young;
#endif