summaryrefslogtreecommitdiffstats
path: root/fs/cifs/file.c
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2022-01-27 16:02:58 +0000
committerSteve French <stfrench@microsoft.com>2022-02-01 10:29:18 -0600
commit0174ee9947bd0f24fee2794b35258960d108b7aa (patch)
tree83086393333773940fbdac71557aca27d17c66d2 /fs/cifs/file.c
parentbee9f65523218e3baeeecde9295c8fbe9bc08e0a (diff)
downloadlinux-0174ee9947bd0f24fee2794b35258960d108b7aa.tar.bz2
cifs: Implement cache I/O by accessing the cache directly
Move cifs to using fscache DIO API instead of the old upstream I/O API as that has been removed. This is a stopgap solution as the intention is that at sometime in the future, the cache will move to using larger blocks and won't be able to store individual pages in order to deal with the potential for data corruption due to the backing filesystem being able insert/remove bridging blocks of zeros into its extent list[1]. cifs then reads and writes cache pages synchronously and one page at a time. The preferred change would be to use the netfs lib, but the new I/O API can be used directly. It's just that as the cache now needs to track data for itself, caching blocks may exceed page size... This code is somewhat borrowed from my "fallback I/O" patchset[2]. Signed-off-by: David Howells <dhowells@redhat.com> cc: Steve French <smfrench@gmail.com> cc: Shyam Prasad N <nspmangalore@gmail.com> cc: linux-cifs@vger.kernel.org cc: linux-cachefs@redhat.com Link: https://lore.kernel.org/r/YO17ZNOcq+9PajfQ@mit.edu [1] Link: https://lore.kernel.org/r/202112100957.2oEDT20W-lkp@intel.com/ [2] Acked-by: Jeff Layton <jlayton@kernel.org> Signed-off-by: Steve French <stfrench@microsoft.com>
Diffstat (limited to 'fs/cifs/file.c')
-rw-r--r--fs/cifs/file.c55
1 files changed, 50 insertions, 5 deletions
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index be62dc29dc54..a50912674915 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -4276,12 +4276,12 @@ cifs_readv_complete(struct work_struct *work)
} else
SetPageError(page);
- unlock_page(page);
-
if (rdata->result == 0 ||
(rdata->result == -EAGAIN && got_bytes))
cifs_readpage_to_fscache(rdata->mapping->host, page);
+ unlock_page(page);
+
got_bytes -= min_t(unsigned int, PAGE_SIZE, got_bytes);
put_page(page);
@@ -4396,7 +4396,11 @@ static void cifs_readahead(struct readahead_control *ractl)
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(ractl->file);
struct TCP_Server_Info *server;
pid_t pid;
- unsigned int xid, last_batch_size = 0;
+ unsigned int xid, nr_pages, last_batch_size = 0, cache_nr_pages = 0;
+ pgoff_t next_cached = ULONG_MAX;
+ bool caching = fscache_cookie_enabled(cifs_inode_cookie(ractl->mapping->host)) &&
+ cifs_inode_cookie(ractl->mapping->host)->cache_priv;
+ bool check_cache = caching;
xid = get_xid();
@@ -4414,12 +4418,52 @@ static void cifs_readahead(struct readahead_control *ractl)
/*
* Chop the readahead request up into rsize-sized read requests.
*/
- while (readahead_count(ractl) - last_batch_size) {
- unsigned int i, nr_pages, got, rsize;
+ while ((nr_pages = readahead_count(ractl) - last_batch_size)) {
+ unsigned int i, got, rsize;
struct page *page;
struct cifs_readdata *rdata;
struct cifs_credits credits_on_stack;
struct cifs_credits *credits = &credits_on_stack;
+ pgoff_t index = readahead_index(ractl) + last_batch_size;
+
+ /*
+ * Find out if we have anything cached in the range of
+ * interest, and if so, where the next chunk of cached data is.
+ */
+ if (caching) {
+ if (check_cache) {
+ rc = cifs_fscache_query_occupancy(
+ ractl->mapping->host, index, nr_pages,
+ &next_cached, &cache_nr_pages);
+ if (rc < 0)
+ caching = false;
+ check_cache = false;
+ }
+
+ if (index == next_cached) {
+ /*
+ * TODO: Send a whole batch of pages to be read
+ * by the cache.
+ */
+ page = readahead_page(ractl);
+
+ if (cifs_readpage_from_fscache(ractl->mapping->host,
+ page) < 0) {
+ /*
+ * TODO: Deal with cache read failure
+ * here, but for the moment, delegate
+ * that to readpage.
+ */
+ caching = false;
+ }
+ unlock_page(page);
+ next_cached++;
+ cache_nr_pages--;
+ if (cache_nr_pages == 0)
+ check_cache = true;
+ continue;
+ }
+ }
if (open_file->invalidHandle) {
rc = cifs_reopen_file(open_file, true);
@@ -4435,6 +4479,7 @@ static void cifs_readahead(struct readahead_control *ractl)
if (rc)
break;
nr_pages = min_t(size_t, rsize / PAGE_SIZE, readahead_count(ractl));
+ nr_pages = min_t(size_t, nr_pages, next_cached - index);
/*
* Give up immediately if rsize is too small to read an entire