summaryrefslogtreecommitdiffstats
path: root/fs/erofs/zdata.h
diff options
context:
space:
mode:
authorYue Hu <huyue2@yulong.com>2021-12-29 07:29:19 +0800
committerGao Xiang <hsiangkao@linux.alibaba.com>2021-12-31 00:50:50 +0800
commitcecf864d3d76d50e3d9c58145e286a0b8c284e92 (patch)
tree05934d21aafe3b7fa7ac4cfab88cdad7ce8abda9 /fs/erofs/zdata.h
parentab749badf9f41f32509cd103391b81ea7e684b76 (diff)
downloadlinux-cecf864d3d76d50e3d9c58145e286a0b8c284e92.tar.bz2
erofs: support inline data decompression
Currently, we have already support tail-packing inline for uncompressed file, let's also implement this for compressed files to save I/Os and storage space. Different from normal pclusters, compressed data is available in advance because of other metadata I/Os. Therefore, they directly move into the bypass queue without extra I/O submission. It's the last compression feature before folio/subpage support. Link: https://lore.kernel.org/r/20211228232919.21413-1-xiang@kernel.org Reviewed-by: Chao Yu <chao@kernel.org> Signed-off-by: Yue Hu <huyue2@yulong.com> Signed-off-by: Gao Xiang <hsiangkao@linux.alibaba.com>
Diffstat (limited to 'fs/erofs/zdata.h')
-rw-r--r--fs/erofs/zdata.h24
1 files changed, 22 insertions, 2 deletions
diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h
index 4a69515dea75..e043216b545f 100644
--- a/fs/erofs/zdata.h
+++ b/fs/erofs/zdata.h
@@ -62,8 +62,16 @@ struct z_erofs_pcluster {
/* A: lower limit of decompressed length and if full length or not */
unsigned int length;
- /* I: physical cluster size in pages */
- unsigned short pclusterpages;
+ /* I: page offset of inline compressed data */
+ unsigned short pageofs_in;
+
+ union {
+ /* I: physical cluster size in pages */
+ unsigned short pclusterpages;
+
+ /* I: tailpacking inline compressed size */
+ unsigned short tailpacking_size;
+ };
/* I: compression algorithm format */
unsigned char algorithmformat;
@@ -94,6 +102,18 @@ struct z_erofs_decompressqueue {
} u;
};
+static inline bool z_erofs_is_inline_pcluster(struct z_erofs_pcluster *pcl)
+{
+ return !pcl->obj.index;
+}
+
+static inline unsigned int z_erofs_pclusterpages(struct z_erofs_pcluster *pcl)
+{
+ if (z_erofs_is_inline_pcluster(pcl))
+ return 1;
+ return pcl->pclusterpages;
+}
+
#define Z_EROFS_ONLINEPAGE_COUNT_BITS 2
#define Z_EROFS_ONLINEPAGE_COUNT_MASK ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT (Z_EROFS_ONLINEPAGE_COUNT_BITS)