summaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/i915/i915_gemfs.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/i915/i915_gemfs.c')
-rw-r--r--drivers/gpu/drm/i915/i915_gemfs.c22
1 files changed, 22 insertions, 0 deletions
diff --git a/drivers/gpu/drm/i915/i915_gemfs.c b/drivers/gpu/drm/i915/i915_gemfs.c
index 168d0bd98f60..e2993857df37 100644
--- a/drivers/gpu/drm/i915/i915_gemfs.c
+++ b/drivers/gpu/drm/i915/i915_gemfs.c
@@ -24,6 +24,7 @@
#include <linux/fs.h>
#include <linux/mount.h>
+#include <linux/pagemap.h>
#include "i915_drv.h"
#include "i915_gemfs.h"
@@ -41,6 +42,27 @@ int i915_gemfs_init(struct drm_i915_private *i915)
if (IS_ERR(gemfs))
return PTR_ERR(gemfs);
+ /*
+ * Enable huge-pages for objects that are at least HPAGE_PMD_SIZE, most
+ * likely 2M. Note that within_size may overallocate huge-pages, if say
+ * we allocate an object of size 2M + 4K, we may get 2M + 2M, but under
+ * memory pressure shmem should split any huge-pages which can be
+ * shrunk.
+ */
+
+ if (has_transparent_hugepage()) {
+ struct super_block *sb = gemfs->mnt_sb;
+ char options[] = "huge=within_size";
+ int flags = 0;
+ int err;
+
+ err = sb->s_op->remount_fs(sb, &flags, options);
+ if (err) {
+ kern_unmount(gemfs);
+ return err;
+ }
+ }
+
i915->mm.gemfs = gemfs;
return 0;