summaryrefslogtreecommitdiffstats
path: root/fs/erofs/zdata.h
blob: 4fc547bc01f9b9765188102b480ce00946ba6437 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * Copyright (C) 2018 HUAWEI, Inc.
 *             http://www.huawei.com/
 * Created by Gao Xiang <gaoxiang25@huawei.com>
 */
#ifndef __EROFS_FS_ZDATA_H
#define __EROFS_FS_ZDATA_H

#include "internal.h"
#include "zpvec.h"

#define Z_EROFS_NR_INLINE_PAGEVECS      3

/*
 * Structure fields follow one of the following exclusion rules.
 *
 * I: Modifiable by initialization/destruction paths and read-only
 *    for everyone else;
 *
 * L: Field should be protected by pageset lock;
 *
 * A: Field should be accessed / updated in atomic for parallelized code.
 */
struct z_erofs_collection {
	struct mutex lock;

	/* I: page offset of start position of decompression */
	unsigned short pageofs;

	/* L: maximum relative page index in pagevec[] */
	unsigned short nr_pages;

	/* L: total number of pages in pagevec[] */
	unsigned int vcnt;

	union {
		/* L: inline a certain number of pagevecs for bootstrap */
		erofs_vtptr_t pagevec[Z_EROFS_NR_INLINE_PAGEVECS];

		/* I: can be used to free the pcluster by RCU. */
		struct rcu_head rcu;
	};
};

#define Z_EROFS_PCLUSTER_FULL_LENGTH    0x00000001
#define Z_EROFS_PCLUSTER_LENGTH_BIT     1

/*
 * let's leave a type here in case of introducing
 * another tagged pointer later.
 */
typedef void *z_erofs_next_pcluster_t;

struct z_erofs_pcluster {
	struct erofs_workgroup obj;
	struct z_erofs_collection primary_collection;

	/* A: point to next chained pcluster or TAILs */
	z_erofs_next_pcluster_t next;

	/* A: compressed pages (including multi-usage pages) */
	struct page *compressed_pages[Z_EROFS_CLUSTER_MAX_PAGES];

	/* A: lower limit of decompressed length and if full length or not */
	unsigned int length;

	/* I: compression algorithm format */
	unsigned char algorithmformat;
	/* I: bit shift of physical cluster size */
	unsigned char clusterbits;
};

#define z_erofs_primarycollection(pcluster) (&(pcluster)->primary_collection)

/* let's avoid the valid 32-bit kernel addresses */

/* the chained workgroup has't submitted io (still open) */
#define Z_EROFS_PCLUSTER_TAIL           ((void *)0x5F0ECAFE)
/* the chained workgroup has already submitted io */
#define Z_EROFS_PCLUSTER_TAIL_CLOSED    ((void *)0x5F0EDEAD)

#define Z_EROFS_PCLUSTER_NIL            (NULL)

#define Z_EROFS_WORKGROUP_SIZE  sizeof(struct z_erofs_pcluster)

struct z_erofs_unzip_io {
	atomic_t pending_bios;
	z_erofs_next_pcluster_t head;

	union {
		wait_queue_head_t wait;
		struct work_struct work;
	} u;
};

struct z_erofs_unzip_io_sb {
	struct z_erofs_unzip_io io;
	struct super_block *sb;
};

#define MNGD_MAPPING(sbi)	((sbi)->managed_cache->i_mapping)
static inline bool erofs_page_is_managed(const struct erofs_sb_info *sbi,
					 struct page *page)
{
	return page->mapping == MNGD_MAPPING(sbi);
}

#define Z_EROFS_ONLINEPAGE_COUNT_BITS   2
#define Z_EROFS_ONLINEPAGE_COUNT_MASK   ((1 << Z_EROFS_ONLINEPAGE_COUNT_BITS) - 1)
#define Z_EROFS_ONLINEPAGE_INDEX_SHIFT  (Z_EROFS_ONLINEPAGE_COUNT_BITS)

/*
 * waiters (aka. ongoing_packs): # to unlock the page
 * sub-index: 0 - for partial page, >= 1 full page sub-index
 */
typedef atomic_t z_erofs_onlinepage_t;

/* type punning */
union z_erofs_onlinepage_converter {
	z_erofs_onlinepage_t *o;
	unsigned long *v;
};

static inline unsigned int z_erofs_onlinepage_index(struct page *page)
{
	union z_erofs_onlinepage_converter u;

	DBG_BUGON(!PagePrivate(page));
	u.v = &page_private(page);

	return atomic_read(u.o) >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
}

static inline void z_erofs_onlinepage_init(struct page *page)
{
	union {
		z_erofs_onlinepage_t o;
		unsigned long v;
	/* keep from being unlocked in advance */
	} u = { .o = ATOMIC_INIT(1) };

	set_page_private(page, u.v);
	smp_wmb();
	SetPagePrivate(page);
}

static inline void z_erofs_onlinepage_fixup(struct page *page,
	uintptr_t index, bool down)
{
	unsigned long *p, o, v, id;
repeat:
	p = &page_private(page);
	o = READ_ONCE(*p);

	id = o >> Z_EROFS_ONLINEPAGE_INDEX_SHIFT;
	if (id) {
		if (!index)
			return;

		DBG_BUGON(id != index);
	}

	v = (index << Z_EROFS_ONLINEPAGE_INDEX_SHIFT) |
		((o & Z_EROFS_ONLINEPAGE_COUNT_MASK) + (unsigned int)down);
	if (cmpxchg(p, o, v) != o)
		goto repeat;
}

static inline void z_erofs_onlinepage_endio(struct page *page)
{
	union z_erofs_onlinepage_converter u;
	unsigned int v;

	DBG_BUGON(!PagePrivate(page));
	u.v = &page_private(page);

	v = atomic_dec_return(u.o);
	if (!(v & Z_EROFS_ONLINEPAGE_COUNT_MASK)) {
		ClearPagePrivate(page);
		if (!PageError(page))
			SetPageUptodate(page);
		unlock_page(page);
	}
	debugln("%s, page %p value %x", __func__, page, atomic_read(u.o));
}

#define Z_EROFS_VMAP_ONSTACK_PAGES	\
	min_t(unsigned int, THREAD_SIZE / 8 / sizeof(struct page *), 96U)
#define Z_EROFS_VMAP_GLOBAL_PAGES	2048

#endif