diff options
author | Eric Sandeen <sandeen@redhat.com> | 2008-09-13 15:23:29 -0400 |
---|---|---|
committer | Theodore Ts'o <tytso@mit.edu> | 2008-09-13 15:23:29 -0400 |
commit | 730c213c79a638137b47a90624e4bac252f07ae7 (patch) | |
tree | dd4e5ebd64045721d5834cea52e6edb4de2fe676 /fs/ext4 | |
parent | 8eea80d52b9d87cfd771055534bd2c24f73704d7 (diff) | |
download | linux-730c213c79a638137b47a90624e4bac252f07ae7.tar.bz2 |
ext4: use percpu data structures for lg_prealloc_list
lg_prealloc_list seems to cry out for a per-cpu data structure; on a large
smp system I think this should be better. I've lightly tested this change
on a 4-cpu system.
Signed-off-by: Eric Sandeen <sandeen@redhat.com>
Acked-by: Aneesh Kumar K.V <aneesh.kumar@linux.vnet.ibm.com>
Signed-off-by: "Theodore Ts'o" <tytso@mit.edu>
Diffstat (limited to 'fs/ext4')
-rw-r--r-- | fs/ext4/mballoc.c | 13 |
1 files changed, 5 insertions, 8 deletions
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c index 9122271e3d65..14ebd572bea8 100644 --- a/fs/ext4/mballoc.c +++ b/fs/ext4/mballoc.c @@ -2541,17 +2541,16 @@ int ext4_mb_init(struct super_block *sb, int needs_recovery) sbi->s_mb_history_filter = EXT4_MB_HISTORY_DEFAULT; sbi->s_mb_group_prealloc = MB_DEFAULT_GROUP_PREALLOC; - i = sizeof(struct ext4_locality_group) * nr_cpu_ids; - sbi->s_locality_groups = kmalloc(i, GFP_KERNEL); + sbi->s_locality_groups = alloc_percpu(struct ext4_locality_group); if (sbi->s_locality_groups == NULL) { clear_opt(sbi->s_mount_opt, MBALLOC); kfree(sbi->s_mb_offsets); kfree(sbi->s_mb_maxs); return -ENOMEM; } - for (i = 0; i < nr_cpu_ids; i++) { + for_each_possible_cpu(i) { struct ext4_locality_group *lg; - lg = &sbi->s_locality_groups[i]; + lg = per_cpu_ptr(sbi->s_locality_groups, i); mutex_init(&lg->lg_mutex); for (j = 0; j < PREALLOC_TB_SIZE; j++) INIT_LIST_HEAD(&lg->lg_prealloc_list[j]); @@ -2648,8 +2647,7 @@ int ext4_mb_release(struct super_block *sb) atomic_read(&sbi->s_mb_discarded)); } - kfree(sbi->s_locality_groups); - + free_percpu(sbi->s_locality_groups); ext4_mb_history_release(sb); ext4_mb_destroy_per_dev_proc(sb); @@ -4106,8 +4104,7 @@ static void ext4_mb_group_or_file(struct ext4_allocation_context *ac) * per cpu locality group is to reduce the contention between block * request from multiple CPUs. */ - ac->ac_lg = &sbi->s_locality_groups[get_cpu()]; - put_cpu(); + ac->ac_lg = per_cpu_ptr(sbi->s_locality_groups, raw_smp_processor_id()); /* we're going to use group allocation */ ac->ac_flags |= EXT4_MB_HINT_GROUP_ALLOC; |