summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-14 17:30:49 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-14 17:30:49 -0700
commite7cdb60fd28b252f1c15a0e50f79a01906124915 (patch)
treed1d3865f6fe6af12b7f431a8a1347fde3df53d6b /fs
parenta2bc8dea9e96872e16248884367ad0013e040089 (diff)
parent87bf54bb43ddd385d2538b777324bf737f243042 (diff)
downloadlinux-e7cdb60fd28b252f1c15a0e50f79a01906124915.tar.bz2
Merge branch 'zstd-minimal' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs
Pull zstd support from Chris Mason: "Nick Terrell's patch series to add zstd support to the kernel has been floating around for a while. After talking with Dave Sterba, Herbert and Phillip, we decided to send the whole thing in as one pull request. zstd is a big win in speed over zlib and in compression ratio over lzo, and the compression team here at FB has gotten great results using it in production. Nick will continue to update the kernel side with new improvements from the open source zstd userland code. Nick has a number of benchmarks for the main zstd code in his lib/zstd commit: I ran the benchmarks on a Ubuntu 14.04 VM with 2 cores and 4 GiB of RAM. The VM is running on a MacBook Pro with a 3.1 GHz Intel Core i7 processor, 16 GB of RAM, and a SSD. I benchmarked using `silesia.tar` [3], which is 211,988,480 B large. Run the following commands for the benchmark: sudo modprobe zstd_compress_test sudo mknod zstd_compress_test c 245 0 sudo cp silesia.tar zstd_compress_test The time is reported by the time of the userland `cp`. The MB/s is computed with 1,536,217,008 B / time(buffer size, hash) which includes the time to copy from userland. The Adjusted MB/s is computed with 1,536,217,088 B / (time(buffer size, hash) - time(buffer size, none)). The memory reported is the amount of memory the compressor requests. | Method | Size (B) | Time (s) | Ratio | MB/s | Adj MB/s | Mem (MB) | |----------|----------|----------|-------|---------|----------|----------| | none | 11988480 | 0.100 | 1 | 2119.88 | - | - | | zstd -1 | 73645762 | 1.044 | 2.878 | 203.05 | 224.56 | 1.23 | | zstd -3 | 66988878 | 1.761 | 3.165 | 120.38 | 127.63 | 2.47 | | zstd -5 | 65001259 | 2.563 | 3.261 | 82.71 | 86.07 | 2.86 | | zstd -10 | 60165346 | 13.242 | 3.523 | 16.01 | 16.13 | 13.22 | | zstd -15 | 58009756 | 47.601 | 3.654 | 4.45 | 4.46 | 21.61 | | zstd -19 | 54014593 | 102.835 | 3.925 | 2.06 | 2.06 | 60.15 | | zlib -1 | 77260026 | 2.895 | 2.744 | 73.23 | 75.85 | 0.27 | | zlib -3 | 72972206 | 4.116 | 2.905 | 51.50 | 52.79 | 0.27 | | zlib -6 | 68190360 | 9.633 | 3.109 | 22.01 | 22.24 | 0.27 | | zlib -9 | 67613382 | 22.554 | 3.135 | 9.40 | 9.44 | 0.27 | I benchmarked zstd decompression using the same method on the same machine. The benchmark file is located in the upstream zstd repo under `contrib/linux-kernel/zstd_decompress_test.c` [4]. The memory reported is the amount of memory required to decompress data compressed with the given compression level. If you know the maximum size of your input, you can reduce the memory usage of decompression irrespective of the compression level. | Method | Time (s) | MB/s | Adjusted MB/s | Memory (MB) | |----------|----------|---------|---------------|-------------| | none | 0.025 | 8479.54 | - | - | | zstd -1 | 0.358 | 592.15 | 636.60 | 0.84 | | zstd -3 | 0.396 | 535.32 | 571.40 | 1.46 | | zstd -5 | 0.396 | 535.32 | 571.40 | 1.46 | | zstd -10 | 0.374 | 566.81 | 607.42 | 2.51 | | zstd -15 | 0.379 | 559.34 | 598.84 | 4.61 | | zstd -19 | 0.412 | 514.54 | 547.77 | 8.80 | | zlib -1 | 0.940 | 225.52 | 231.68 | 0.04 | | zlib -3 | 0.883 | 240.08 | 247.07 | 0.04 | | zlib -6 | 0.844 | 251.17 | 258.84 | 0.04 | | zlib -9 | 0.837 | 253.27 | 287.64 | 0.04 | I ran a long series of tests and benchmarks on the btrfs side and the gains are very similar to the core benchmarks Nick ran" * 'zstd-minimal' of git://git.kernel.org/pub/scm/linux/kernel/git/mason/linux-btrfs: squashfs: Add zstd support btrfs: Add zstd support lib: Add zstd modules lib: Add xxhash module
Diffstat (limited to 'fs')
-rw-r--r--fs/btrfs/Kconfig2
-rw-r--r--fs/btrfs/Makefile2
-rw-r--r--fs/btrfs/compression.c1
-rw-r--r--fs/btrfs/compression.h4
-rw-r--r--fs/btrfs/ctree.h1
-rw-r--r--fs/btrfs/disk-io.c2
-rw-r--r--fs/btrfs/ioctl.c6
-rw-r--r--fs/btrfs/props.c6
-rw-r--r--fs/btrfs/super.c12
-rw-r--r--fs/btrfs/sysfs.c2
-rw-r--r--fs/btrfs/zstd.c432
-rw-r--r--fs/squashfs/Kconfig14
-rw-r--r--fs/squashfs/Makefile1
-rw-r--r--fs/squashfs/decompressor.c7
-rw-r--r--fs/squashfs/decompressor.h4
-rw-r--r--fs/squashfs/squashfs_fs.h1
-rw-r--r--fs/squashfs/zstd_wrapper.c151
17 files changed, 644 insertions, 4 deletions
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
index 80e9c18ea64f..a26c63b4ad68 100644
--- a/fs/btrfs/Kconfig
+++ b/fs/btrfs/Kconfig
@@ -6,6 +6,8 @@ config BTRFS_FS
select ZLIB_DEFLATE
select LZO_COMPRESS
select LZO_DECOMPRESS
+ select ZSTD_COMPRESS
+ select ZSTD_DECOMPRESS
select RAID6_PQ
select XOR_BLOCKS
select SRCU
diff --git a/fs/btrfs/Makefile b/fs/btrfs/Makefile
index 128ce17a80b0..962a95aefb81 100644
--- a/fs/btrfs/Makefile
+++ b/fs/btrfs/Makefile
@@ -6,7 +6,7 @@ btrfs-y += super.o ctree.o extent-tree.o print-tree.o root-tree.o dir-item.o \
transaction.o inode.o file.o tree-defrag.o \
extent_map.o sysfs.o struct-funcs.o xattr.o ordered-data.o \
extent_io.o volumes.o async-thread.o ioctl.o locking.o orphan.o \
- export.o tree-log.o free-space-cache.o zlib.o lzo.o \
+ export.o tree-log.o free-space-cache.o zlib.o lzo.o zstd.o \
compression.o delayed-ref.o relocation.o delayed-inode.o scrub.o \
reada.o backref.o ulist.o qgroup.o send.o dev-replace.o raid56.o \
uuid-tree.o props.o hash.o free-space-tree.o
diff --git a/fs/btrfs/compression.c b/fs/btrfs/compression.c
index 883ecc58fd0d..b51d23f5cafa 100644
--- a/fs/btrfs/compression.c
+++ b/fs/btrfs/compression.c
@@ -704,6 +704,7 @@ static struct {
static const struct btrfs_compress_op * const btrfs_compress_op[] = {
&btrfs_zlib_compress,
&btrfs_lzo_compress,
+ &btrfs_zstd_compress,
};
void __init btrfs_init_compress(void)
diff --git a/fs/btrfs/compression.h b/fs/btrfs/compression.h
index 3b1b0ac15fdc..d2781ff8f994 100644
--- a/fs/btrfs/compression.h
+++ b/fs/btrfs/compression.h
@@ -99,7 +99,8 @@ enum btrfs_compression_type {
BTRFS_COMPRESS_NONE = 0,
BTRFS_COMPRESS_ZLIB = 1,
BTRFS_COMPRESS_LZO = 2,
- BTRFS_COMPRESS_TYPES = 2,
+ BTRFS_COMPRESS_ZSTD = 3,
+ BTRFS_COMPRESS_TYPES = 3,
};
struct btrfs_compress_op {
@@ -127,6 +128,7 @@ struct btrfs_compress_op {
extern const struct btrfs_compress_op btrfs_zlib_compress;
extern const struct btrfs_compress_op btrfs_lzo_compress;
+extern const struct btrfs_compress_op btrfs_zstd_compress;
int btrfs_compress_heuristic(struct inode *inode, u64 start, u64 end);
diff --git a/fs/btrfs/ctree.h b/fs/btrfs/ctree.h
index 2add002662f4..5a8933da39a7 100644
--- a/fs/btrfs/ctree.h
+++ b/fs/btrfs/ctree.h
@@ -270,6 +270,7 @@ struct btrfs_super_block {
BTRFS_FEATURE_INCOMPAT_MIXED_GROUPS | \
BTRFS_FEATURE_INCOMPAT_BIG_METADATA | \
BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO | \
+ BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD | \
BTRFS_FEATURE_INCOMPAT_RAID56 | \
BTRFS_FEATURE_INCOMPAT_EXTENDED_IREF | \
BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA | \
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 46329524dd5f..b6dc1d179d23 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -2828,6 +2828,8 @@ int open_ctree(struct super_block *sb,
features |= BTRFS_FEATURE_INCOMPAT_MIXED_BACKREF;
if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_LZO;
+ else if (fs_info->compress_type == BTRFS_COMPRESS_ZSTD)
+ features |= BTRFS_FEATURE_INCOMPAT_COMPRESS_ZSTD;
if (features & BTRFS_FEATURE_INCOMPAT_SKINNY_METADATA)
btrfs_info(fs_info, "has skinny extents");
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index ae8fbf9d3de2..0ebaf5d116bc 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -296,8 +296,10 @@ static int btrfs_ioctl_setflags(struct file *file, void __user *arg)
if (fs_info->compress_type == BTRFS_COMPRESS_LZO)
comp = "lzo";
- else
+ else if (fs_info->compress_type == BTRFS_COMPRESS_ZLIB)
comp = "zlib";
+ else
+ comp = "zstd";
ret = btrfs_set_prop(inode, "btrfs.compression",
comp, strlen(comp), 0);
if (ret)
@@ -1435,6 +1437,8 @@ int btrfs_defrag_file(struct inode *inode, struct file *file,
if (range->compress_type == BTRFS_COMPRESS_LZO) {
btrfs_set_fs_incompat(fs_info, COMPRESS_LZO);
+ } else if (range->compress_type == BTRFS_COMPRESS_ZSTD) {
+ btrfs_set_fs_incompat(fs_info, COMPRESS_ZSTD);
}
ret = defrag_count;
diff --git a/fs/btrfs/props.c b/fs/btrfs/props.c
index 09c0266f248d..f6a05f836629 100644
--- a/fs/btrfs/props.c
+++ b/fs/btrfs/props.c
@@ -390,6 +390,8 @@ static int prop_compression_validate(const char *value, size_t len)
return 0;
else if (!strncmp("zlib", value, len))
return 0;
+ else if (!strncmp("zstd", value, len))
+ return 0;
return -EINVAL;
}
@@ -412,6 +414,8 @@ static int prop_compression_apply(struct inode *inode,
type = BTRFS_COMPRESS_LZO;
else if (!strncmp("zlib", value, 4))
type = BTRFS_COMPRESS_ZLIB;
+ else if (!strncmp("zstd", value, len))
+ type = BTRFS_COMPRESS_ZSTD;
else
return -EINVAL;
@@ -429,6 +433,8 @@ static const char *prop_compression_extract(struct inode *inode)
return "zlib";
case BTRFS_COMPRESS_LZO:
return "lzo";
+ case BTRFS_COMPRESS_ZSTD:
+ return "zstd";
}
return NULL;
diff --git a/fs/btrfs/super.c b/fs/btrfs/super.c
index 0b7a1d8cd08b..2b13d1a69f0b 100644
--- a/fs/btrfs/super.c
+++ b/fs/btrfs/super.c
@@ -514,6 +514,14 @@ int btrfs_parse_options(struct btrfs_fs_info *info, char *options,
btrfs_clear_opt(info->mount_opt, NODATASUM);
btrfs_set_fs_incompat(info, COMPRESS_LZO);
no_compress = 0;
+ } else if (strcmp(args[0].from, "zstd") == 0) {
+ compress_type = "zstd";
+ info->compress_type = BTRFS_COMPRESS_ZSTD;
+ btrfs_set_opt(info->mount_opt, COMPRESS);
+ btrfs_clear_opt(info->mount_opt, NODATACOW);
+ btrfs_clear_opt(info->mount_opt, NODATASUM);
+ btrfs_set_fs_incompat(info, COMPRESS_ZSTD);
+ no_compress = 0;
} else if (strncmp(args[0].from, "no", 2) == 0) {
compress_type = "no";
btrfs_clear_opt(info->mount_opt, COMPRESS);
@@ -1230,8 +1238,10 @@ static int btrfs_show_options(struct seq_file *seq, struct dentry *dentry)
if (btrfs_test_opt(info, COMPRESS)) {
if (info->compress_type == BTRFS_COMPRESS_ZLIB)
compress_type = "zlib";
- else
+ else if (info->compress_type == BTRFS_COMPRESS_LZO)
compress_type = "lzo";
+ else
+ compress_type = "zstd";
if (btrfs_test_opt(info, FORCE_COMPRESS))
seq_printf(seq, ",compress-force=%s", compress_type);
else
diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c
index c2d5f3580b4c..2b6d37c09a81 100644
--- a/fs/btrfs/sysfs.c
+++ b/fs/btrfs/sysfs.c
@@ -200,6 +200,7 @@ BTRFS_FEAT_ATTR_INCOMPAT(mixed_backref, MIXED_BACKREF);
BTRFS_FEAT_ATTR_INCOMPAT(default_subvol, DEFAULT_SUBVOL);
BTRFS_FEAT_ATTR_INCOMPAT(mixed_groups, MIXED_GROUPS);
BTRFS_FEAT_ATTR_INCOMPAT(compress_lzo, COMPRESS_LZO);
+BTRFS_FEAT_ATTR_INCOMPAT(compress_zstd, COMPRESS_ZSTD);
BTRFS_FEAT_ATTR_INCOMPAT(big_metadata, BIG_METADATA);
BTRFS_FEAT_ATTR_INCOMPAT(extended_iref, EXTENDED_IREF);
BTRFS_FEAT_ATTR_INCOMPAT(raid56, RAID56);
@@ -212,6 +213,7 @@ static struct attribute *btrfs_supported_feature_attrs[] = {
BTRFS_FEAT_ATTR_PTR(default_subvol),
BTRFS_FEAT_ATTR_PTR(mixed_groups),
BTRFS_FEAT_ATTR_PTR(compress_lzo),
+ BTRFS_FEAT_ATTR_PTR(compress_zstd),
BTRFS_FEAT_ATTR_PTR(big_metadata),
BTRFS_FEAT_ATTR_PTR(extended_iref),
BTRFS_FEAT_ATTR_PTR(raid56),
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
new file mode 100644
index 000000000000..607ce47b483a
--- /dev/null
+++ b/fs/btrfs/zstd.c
@@ -0,0 +1,432 @@
+/*
+ * Copyright (c) 2016-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#include <linux/bio.h>
+#include <linux/err.h>
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pagemap.h>
+#include <linux/refcount.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/zstd.h>
+#include "compression.h"
+
+#define ZSTD_BTRFS_MAX_WINDOWLOG 17
+#define ZSTD_BTRFS_MAX_INPUT (1 << ZSTD_BTRFS_MAX_WINDOWLOG)
+#define ZSTD_BTRFS_DEFAULT_LEVEL 3
+
+static ZSTD_parameters zstd_get_btrfs_parameters(size_t src_len)
+{
+ ZSTD_parameters params = ZSTD_getParams(ZSTD_BTRFS_DEFAULT_LEVEL,
+ src_len, 0);
+
+ if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
+ params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
+ WARN_ON(src_len > ZSTD_BTRFS_MAX_INPUT);
+ return params;
+}
+
+struct workspace {
+ void *mem;
+ size_t size;
+ char *buf;
+ struct list_head list;
+};
+
+static void zstd_free_workspace(struct list_head *ws)
+{
+ struct workspace *workspace = list_entry(ws, struct workspace, list);
+
+ kvfree(workspace->mem);
+ kfree(workspace->buf);
+ kfree(workspace);
+}
+
+static struct list_head *zstd_alloc_workspace(void)
+{
+ ZSTD_parameters params =
+ zstd_get_btrfs_parameters(ZSTD_BTRFS_MAX_INPUT);
+ struct workspace *workspace;
+
+ workspace = kzalloc(sizeof(*workspace), GFP_KERNEL);
+ if (!workspace)
+ return ERR_PTR(-ENOMEM);
+
+ workspace->size = max_t(size_t,
+ ZSTD_CStreamWorkspaceBound(params.cParams),
+ ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT));
+ workspace->mem = kvmalloc(workspace->size, GFP_KERNEL);
+ workspace->buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!workspace->mem || !workspace->buf)
+ goto fail;
+
+ INIT_LIST_HEAD(&workspace->list);
+
+ return &workspace->list;
+fail:
+ zstd_free_workspace(&workspace->list);
+ return ERR_PTR(-ENOMEM);
+}
+
+static int zstd_compress_pages(struct list_head *ws,
+ struct address_space *mapping,
+ u64 start,
+ struct page **pages,
+ unsigned long *out_pages,
+ unsigned long *total_in,
+ unsigned long *total_out)
+{
+ struct workspace *workspace = list_entry(ws, struct workspace, list);
+ ZSTD_CStream *stream;
+ int ret = 0;
+ int nr_pages = 0;
+ struct page *in_page = NULL; /* The current page to read */
+ struct page *out_page = NULL; /* The current page to write to */
+ ZSTD_inBuffer in_buf = { NULL, 0, 0 };
+ ZSTD_outBuffer out_buf = { NULL, 0, 0 };
+ unsigned long tot_in = 0;
+ unsigned long tot_out = 0;
+ unsigned long len = *total_out;
+ const unsigned long nr_dest_pages = *out_pages;
+ unsigned long max_out = nr_dest_pages * PAGE_SIZE;
+ ZSTD_parameters params = zstd_get_btrfs_parameters(len);
+
+ *out_pages = 0;
+ *total_out = 0;
+ *total_in = 0;
+
+ /* Initialize the stream */
+ stream = ZSTD_initCStream(params, len, workspace->mem,
+ workspace->size);
+ if (!stream) {
+ pr_warn("BTRFS: ZSTD_initCStream failed\n");
+ ret = -EIO;
+ goto out;
+ }
+
+ /* map in the first page of input data */
+ in_page = find_get_page(mapping, start >> PAGE_SHIFT);
+ in_buf.src = kmap(in_page);
+ in_buf.pos = 0;
+ in_buf.size = min_t(size_t, len, PAGE_SIZE);
+
+
+ /* Allocate and map in the output buffer */
+ out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (out_page == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pages[nr_pages++] = out_page;
+ out_buf.dst = kmap(out_page);
+ out_buf.pos = 0;
+ out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
+
+ while (1) {
+ size_t ret2;
+
+ ret2 = ZSTD_compressStream(stream, &out_buf, &in_buf);
+ if (ZSTD_isError(ret2)) {
+ pr_debug("BTRFS: ZSTD_compressStream returned %d\n",
+ ZSTD_getErrorCode(ret2));
+ ret = -EIO;
+ goto out;
+ }
+
+ /* Check to see if we are making it bigger */
+ if (tot_in + in_buf.pos > 8192 &&
+ tot_in + in_buf.pos <
+ tot_out + out_buf.pos) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ /* We've reached the end of our output range */
+ if (out_buf.pos >= max_out) {
+ tot_out += out_buf.pos;
+ ret = -E2BIG;
+ goto out;
+ }
+
+ /* Check if we need more output space */
+ if (out_buf.pos == out_buf.size) {
+ tot_out += PAGE_SIZE;
+ max_out -= PAGE_SIZE;
+ kunmap(out_page);
+ if (nr_pages == nr_dest_pages) {
+ out_page = NULL;
+ ret = -E2BIG;
+ goto out;
+ }
+ out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (out_page == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pages[nr_pages++] = out_page;
+ out_buf.dst = kmap(out_page);
+ out_buf.pos = 0;
+ out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
+ }
+
+ /* We've reached the end of the input */
+ if (in_buf.pos >= len) {
+ tot_in += in_buf.pos;
+ break;
+ }
+
+ /* Check if we need more input */
+ if (in_buf.pos == in_buf.size) {
+ tot_in += PAGE_SIZE;
+ kunmap(in_page);
+ put_page(in_page);
+
+ start += PAGE_SIZE;
+ len -= PAGE_SIZE;
+ in_page = find_get_page(mapping, start >> PAGE_SHIFT);
+ in_buf.src = kmap(in_page);
+ in_buf.pos = 0;
+ in_buf.size = min_t(size_t, len, PAGE_SIZE);
+ }
+ }
+ while (1) {
+ size_t ret2;
+
+ ret2 = ZSTD_endStream(stream, &out_buf);
+ if (ZSTD_isError(ret2)) {
+ pr_debug("BTRFS: ZSTD_endStream returned %d\n",
+ ZSTD_getErrorCode(ret2));
+ ret = -EIO;
+ goto out;
+ }
+ if (ret2 == 0) {
+ tot_out += out_buf.pos;
+ break;
+ }
+ if (out_buf.pos >= max_out) {
+ tot_out += out_buf.pos;
+ ret = -E2BIG;
+ goto out;
+ }
+
+ tot_out += PAGE_SIZE;
+ max_out -= PAGE_SIZE;
+ kunmap(out_page);
+ if (nr_pages == nr_dest_pages) {
+ out_page = NULL;
+ ret = -E2BIG;
+ goto out;
+ }
+ out_page = alloc_page(GFP_NOFS | __GFP_HIGHMEM);
+ if (out_page == NULL) {
+ ret = -ENOMEM;
+ goto out;
+ }
+ pages[nr_pages++] = out_page;
+ out_buf.dst = kmap(out_page);
+ out_buf.pos = 0;
+ out_buf.size = min_t(size_t, max_out, PAGE_SIZE);
+ }
+
+ if (tot_out >= tot_in) {
+ ret = -E2BIG;
+ goto out;
+ }
+
+ ret = 0;
+ *total_in = tot_in;
+ *total_out = tot_out;
+out:
+ *out_pages = nr_pages;
+ /* Cleanup */
+ if (in_page) {
+ kunmap(in_page);
+ put_page(in_page);
+ }
+ if (out_page)
+ kunmap(out_page);
+ return ret;
+}
+
+static int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
+{
+ struct workspace *workspace = list_entry(ws, struct workspace, list);
+ struct page **pages_in = cb->compressed_pages;
+ u64 disk_start = cb->start;
+ struct bio *orig_bio = cb->orig_bio;
+ size_t srclen = cb->compressed_len;
+ ZSTD_DStream *stream;
+ int ret = 0;
+ unsigned long page_in_index = 0;
+ unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
+ unsigned long buf_start;
+ unsigned long total_out = 0;
+ ZSTD_inBuffer in_buf = { NULL, 0, 0 };
+ ZSTD_outBuffer out_buf = { NULL, 0, 0 };
+
+ stream = ZSTD_initDStream(
+ ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
+ if (!stream) {
+ pr_debug("BTRFS: ZSTD_initDStream failed\n");
+ ret = -EIO;
+ goto done;
+ }
+
+ in_buf.src = kmap(pages_in[page_in_index]);
+ in_buf.pos = 0;
+ in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
+
+ out_buf.dst = workspace->buf;
+ out_buf.pos = 0;
+ out_buf.size = PAGE_SIZE;
+
+ while (1) {
+ size_t ret2;
+
+ ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf);
+ if (ZSTD_isError(ret2)) {
+ pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
+ ZSTD_getErrorCode(ret2));
+ ret = -EIO;
+ goto done;
+ }
+ buf_start = total_out;
+ total_out += out_buf.pos;
+ out_buf.pos = 0;
+
+ ret = btrfs_decompress_buf2page(out_buf.dst, buf_start,
+ total_out, disk_start, orig_bio);
+ if (ret == 0)
+ break;
+
+ if (in_buf.pos >= srclen)
+ break;
+
+ /* Check if we've hit the end of a frame */
+ if (ret2 == 0)
+ break;
+
+ if (in_buf.pos == in_buf.size) {
+ kunmap(pages_in[page_in_index++]);
+ if (page_in_index >= total_pages_in) {
+ in_buf.src = NULL;
+ ret = -EIO;
+ goto done;
+ }
+ srclen -= PAGE_SIZE;
+ in_buf.src = kmap(pages_in[page_in_index]);
+ in_buf.pos = 0;
+ in_buf.size = min_t(size_t, srclen, PAGE_SIZE);
+ }
+ }
+ ret = 0;
+ zero_fill_bio(orig_bio);
+done:
+ if (in_buf.src)
+ kunmap(pages_in[page_in_index]);
+ return ret;
+}
+
+static int zstd_decompress(struct list_head *ws, unsigned char *data_in,
+ struct page *dest_page,
+ unsigned long start_byte,
+ size_t srclen, size_t destlen)
+{
+ struct workspace *workspace = list_entry(ws, struct workspace, list);
+ ZSTD_DStream *stream;
+ int ret = 0;
+ size_t ret2;
+ ZSTD_inBuffer in_buf = { NULL, 0, 0 };
+ ZSTD_outBuffer out_buf = { NULL, 0, 0 };
+ unsigned long total_out = 0;
+ unsigned long pg_offset = 0;
+ char *kaddr;
+
+ stream = ZSTD_initDStream(
+ ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
+ if (!stream) {
+ pr_warn("BTRFS: ZSTD_initDStream failed\n");
+ ret = -EIO;
+ goto finish;
+ }
+
+ destlen = min_t(size_t, destlen, PAGE_SIZE);
+
+ in_buf.src = data_in;
+ in_buf.pos = 0;
+ in_buf.size = srclen;
+
+ out_buf.dst = workspace->buf;
+ out_buf.pos = 0;
+ out_buf.size = PAGE_SIZE;
+
+ ret2 = 1;
+ while (pg_offset < destlen && in_buf.pos < in_buf.size) {
+ unsigned long buf_start;
+ unsigned long buf_offset;
+ unsigned long bytes;
+
+ /* Check if the frame is over and we still need more input */
+ if (ret2 == 0) {
+ pr_debug("BTRFS: ZSTD_decompressStream ended early\n");
+ ret = -EIO;
+ goto finish;
+ }
+ ret2 = ZSTD_decompressStream(stream, &out_buf, &in_buf);
+ if (ZSTD_isError(ret2)) {
+ pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
+ ZSTD_getErrorCode(ret2));
+ ret = -EIO;
+ goto finish;
+ }
+
+ buf_start = total_out;
+ total_out += out_buf.pos;
+ out_buf.pos = 0;
+
+ if (total_out <= start_byte)
+ continue;
+
+ if (total_out > start_byte && buf_start < start_byte)
+ buf_offset = start_byte - buf_start;
+ else
+ buf_offset = 0;
+
+ bytes = min_t(unsigned long, destlen - pg_offset,
+ out_buf.size - buf_offset);
+
+ kaddr = kmap_atomic(dest_page);
+ memcpy(kaddr + pg_offset, out_buf.dst + buf_offset, bytes);
+ kunmap_atomic(kaddr);
+
+ pg_offset += bytes;
+ }
+ ret = 0;
+finish:
+ if (pg_offset < destlen) {
+ kaddr = kmap_atomic(dest_page);
+ memset(kaddr + pg_offset, 0, destlen - pg_offset);
+ kunmap_atomic(kaddr);
+ }
+ return ret;
+}
+
+const struct btrfs_compress_op btrfs_zstd_compress = {
+ .alloc_workspace = zstd_alloc_workspace,
+ .free_workspace = zstd_free_workspace,
+ .compress_pages = zstd_compress_pages,
+ .decompress_bio = zstd_decompress_bio,
+ .decompress = zstd_decompress,
+};
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
index ffb093e72b6c..1adb3346b9d6 100644
--- a/fs/squashfs/Kconfig
+++ b/fs/squashfs/Kconfig
@@ -165,6 +165,20 @@ config SQUASHFS_XZ
If unsure, say N.
+config SQUASHFS_ZSTD
+ bool "Include support for ZSTD compressed file systems"
+ depends on SQUASHFS
+ select ZSTD_DECOMPRESS
+ help
+ Saying Y here includes support for reading Squashfs file systems
+ compressed with ZSTD compression. ZSTD gives better compression than
+ the default ZLIB compression, while using less CPU.
+
+ ZSTD is not the standard compression used in Squashfs and so most
+ file systems will be readable without selecting this option.
+
+ If unsure, say N.
+
config SQUASHFS_4K_DEVBLK_SIZE
bool "Use 4K device block size?"
depends on SQUASHFS
diff --git a/fs/squashfs/Makefile b/fs/squashfs/Makefile
index 246a6f329d89..6655631c53ae 100644
--- a/fs/squashfs/Makefile
+++ b/fs/squashfs/Makefile
@@ -15,3 +15,4 @@ squashfs-$(CONFIG_SQUASHFS_LZ4) += lz4_wrapper.o
squashfs-$(CONFIG_SQUASHFS_LZO) += lzo_wrapper.o
squashfs-$(CONFIG_SQUASHFS_XZ) += xz_wrapper.o
squashfs-$(CONFIG_SQUASHFS_ZLIB) += zlib_wrapper.o
+squashfs-$(CONFIG_SQUASHFS_ZSTD) += zstd_wrapper.o
diff --git a/fs/squashfs/decompressor.c b/fs/squashfs/decompressor.c
index d2bc13636f79..836639810ea0 100644
--- a/fs/squashfs/decompressor.c
+++ b/fs/squashfs/decompressor.c
@@ -65,6 +65,12 @@ static const struct squashfs_decompressor squashfs_zlib_comp_ops = {
};
#endif
+#ifndef CONFIG_SQUASHFS_ZSTD
+static const struct squashfs_decompressor squashfs_zstd_comp_ops = {
+ NULL, NULL, NULL, NULL, ZSTD_COMPRESSION, "zstd", 0
+};
+#endif
+
static const struct squashfs_decompressor squashfs_unknown_comp_ops = {
NULL, NULL, NULL, NULL, 0, "unknown", 0
};
@@ -75,6 +81,7 @@ static const struct squashfs_decompressor *decompressor[] = {
&squashfs_lzo_comp_ops,
&squashfs_xz_comp_ops,
&squashfs_lzma_unsupported_comp_ops,
+ &squashfs_zstd_comp_ops,
&squashfs_unknown_comp_ops
};
diff --git a/fs/squashfs/decompressor.h b/fs/squashfs/decompressor.h
index a25713c031a5..0f5a8e4e58da 100644
--- a/fs/squashfs/decompressor.h
+++ b/fs/squashfs/decompressor.h
@@ -58,4 +58,8 @@ extern const struct squashfs_decompressor squashfs_lzo_comp_ops;
extern const struct squashfs_decompressor squashfs_zlib_comp_ops;
#endif
+#ifdef CONFIG_SQUASHFS_ZSTD
+extern const struct squashfs_decompressor squashfs_zstd_comp_ops;
+#endif
+
#endif
diff --git a/fs/squashfs/squashfs_fs.h b/fs/squashfs/squashfs_fs.h
index 506f4ba5b983..24d12fd14177 100644
--- a/fs/squashfs/squashfs_fs.h
+++ b/fs/squashfs/squashfs_fs.h
@@ -241,6 +241,7 @@ struct meta_index {
#define LZO_COMPRESSION 3
#define XZ_COMPRESSION 4
#define LZ4_COMPRESSION 5
+#define ZSTD_COMPRESSION 6
struct squashfs_super_block {
__le32 s_magic;
diff --git a/fs/squashfs/zstd_wrapper.c b/fs/squashfs/zstd_wrapper.c
new file mode 100644
index 000000000000..eeaabf881159
--- /dev/null
+++ b/fs/squashfs/zstd_wrapper.c
@@ -0,0 +1,151 @@
+/*
+ * Squashfs - a compressed read only filesystem for Linux
+ *
+ * Copyright (c) 2016-present, Facebook, Inc.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version 2,
+ * or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * zstd_wrapper.c
+ */
+
+#include <linux/mutex.h>
+#include <linux/buffer_head.h>
+#include <linux/slab.h>
+#include <linux/zstd.h>
+#include <linux/vmalloc.h>
+
+#include "squashfs_fs.h"
+#include "squashfs_fs_sb.h"
+#include "squashfs.h"
+#include "decompressor.h"
+#include "page_actor.h"
+
+struct workspace {
+ void *mem;
+ size_t mem_size;
+ size_t window_size;
+};
+
+static void *zstd_init(struct squashfs_sb_info *msblk, void *buff)
+{
+ struct workspace *wksp = kmalloc(sizeof(*wksp), GFP_KERNEL);
+
+ if (wksp == NULL)
+ goto failed;
+ wksp->window_size = max_t(size_t,
+ msblk->block_size, SQUASHFS_METADATA_SIZE);
+ wksp->mem_size = ZSTD_DStreamWorkspaceBound(wksp->window_size);
+ wksp->mem = vmalloc(wksp->mem_size);
+ if (wksp->mem == NULL)
+ goto failed;
+
+ return wksp;
+
+failed:
+ ERROR("Failed to allocate zstd workspace\n");
+ kfree(wksp);
+ return ERR_PTR(-ENOMEM);
+}
+
+
+static void zstd_free(void *strm)
+{
+ struct workspace *wksp = strm;
+
+ if (wksp)
+ vfree(wksp->mem);
+ kfree(wksp);
+}
+
+
+static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
+ struct buffer_head **bh, int b, int offset, int length,
+ struct squashfs_page_actor *output)
+{
+ struct workspace *wksp = strm;
+ ZSTD_DStream *stream;
+ size_t total_out = 0;
+ size_t zstd_err;
+ int k = 0;
+ ZSTD_inBuffer in_buf = { NULL, 0, 0 };
+ ZSTD_outBuffer out_buf = { NULL, 0, 0 };
+
+ stream = ZSTD_initDStream(wksp->window_size, wksp->mem, wksp->mem_size);
+
+ if (!stream) {
+ ERROR("Failed to initialize zstd decompressor\n");
+ goto out;
+ }
+
+ out_buf.size = PAGE_SIZE;
+ out_buf.dst = squashfs_first_page(output);
+
+ do {
+ if (in_buf.pos == in_buf.size && k < b) {
+ int avail = min(length, msblk->devblksize - offset);
+
+ length -= avail;
+ in_buf.src = bh[k]->b_data + offset;
+ in_buf.size = avail;
+ in_buf.pos = 0;
+ offset = 0;
+ }
+
+ if (out_buf.pos == out_buf.size) {
+ out_buf.dst = squashfs_next_page(output);
+ if (out_buf.dst == NULL) {
+ /* Shouldn't run out of pages
+ * before stream is done.
+ */
+ squashfs_finish_page(output);
+ goto out;
+ }
+ out_buf.pos = 0;
+ out_buf.size = PAGE_SIZE;
+ }
+
+ total_out -= out_buf.pos;
+ zstd_err = ZSTD_decompressStream(stream, &out_buf, &in_buf);
+ total_out += out_buf.pos; /* add the additional data produced */
+
+ if (in_buf.pos == in_buf.size && k < b)
+ put_bh(bh[k++]);
+ } while (zstd_err != 0 && !ZSTD_isError(zstd_err));
+
+ squashfs_finish_page(output);
+
+ if (ZSTD_isError(zstd_err)) {
+ ERROR("zstd decompression error: %d\n",
+ (int)ZSTD_getErrorCode(zstd_err));
+ goto out;
+ }
+
+ if (k < b)
+ goto out;
+
+ return (int)total_out;
+
+out:
+ for (; k < b; k++)
+ put_bh(bh[k]);
+
+ return -EIO;
+}
+
+const struct squashfs_decompressor squashfs_zstd_comp_ops = {
+ .init = zstd_init,
+ .free = zstd_free,
+ .decompress = zstd_uncompress,
+ .id = ZSTD_COMPRESSION,
+ .name = "zstd",
+ .supported = 1
+};