summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
authorDavid Howells <dhowells@redhat.com>2017-03-16 16:27:44 +0000
committerDavid Howells <dhowells@redhat.com>2017-03-16 16:27:44 +0000
commite8e581a88c5f5fc7cf1f636d122b77fbcfc8c2f6 (patch)
treef3a0da55163b43bbbc888c5638bb99e5cde1c301 /fs
parent3448e6521755862446aed28e29abf12565d8844e (diff)
downloadlinux-e8e581a88c5f5fc7cf1f636d122b77fbcfc8c2f6.tar.bz2
afs: Handle a short write to an AFS page
Handle the situation where afs_write_begin() is told to expect that a full-page write will be made, but this doesn't happen (EFAULT, CTRL-C, etc.), and so afs_write_end() sees a partial write took place. Currently, no attempt is to deal with the discrepency. Fix this by loading the gap from the server. Reported-by: Al Viro <viro@ZenIV.linux.org.uk> Signed-off-by: David Howells <dhowells@redhat.com>
Diffstat (limited to 'fs')
-rw-r--r--fs/afs/fsclient.c4
-rw-r--r--fs/afs/internal.h2
-rw-r--r--fs/afs/write.c28
3 files changed, 23 insertions, 11 deletions
diff --git a/fs/afs/fsclient.c b/fs/afs/fsclient.c
index bf8904a1a58f..6f917dd1238c 100644
--- a/fs/afs/fsclient.c
+++ b/fs/afs/fsclient.c
@@ -393,8 +393,10 @@ static int afs_deliver_fs_fetch_data(struct afs_call *call)
if (req->remain > 0) {
call->offset = 0;
req->index++;
- if (req->index >= req->nr_pages)
+ if (req->index >= req->nr_pages) {
+ call->unmarshall = 4;
goto begin_discard;
+ }
goto begin_page;
}
}
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 7784a8bc375c..dc2cb486e127 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -130,7 +130,7 @@ struct afs_call_type {
*/
struct afs_read {
loff_t pos; /* Where to start reading */
- loff_t len; /* How much to read */
+ loff_t len; /* How much we're asking for */
loff_t actual_len; /* How much we're actually getting */
atomic_t usage;
unsigned int remain; /* Amount remaining */
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 3ac52f6a96ff..ea66890fc188 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -84,10 +84,9 @@ void afs_put_writeback(struct afs_writeback *wb)
* partly or wholly fill a page that's under preparation for writing
*/
static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
- loff_t pos, struct page *page)
+ loff_t pos, unsigned int len, struct page *page)
{
struct afs_read *req;
- loff_t i_size;
int ret;
_enter(",,%llu", (unsigned long long)pos);
@@ -99,16 +98,11 @@ static int afs_fill_page(struct afs_vnode *vnode, struct key *key,
atomic_set(&req->usage, 1);
req->pos = pos;
+ req->len = len;
req->nr_pages = 1;
req->pages[0] = page;
get_page(page);
- i_size = i_size_read(&vnode->vfs_inode);
- if (pos + PAGE_SIZE > i_size)
- req->len = i_size - pos;
- else
- req->len = PAGE_SIZE;
-
ret = afs_vnode_fetch_data(vnode, key, req);
afs_put_read(req);
if (ret < 0) {
@@ -164,7 +158,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
/* page won't leak in error case: it eventually gets cleaned off LRU */
if (!PageUptodate(page) && len != PAGE_SIZE) {
- ret = afs_fill_page(vnode, key, index << PAGE_SHIFT, page);
+ ret = afs_fill_page(vnode, key, pos & PAGE_MASK, PAGE_SIZE, page);
if (ret < 0) {
kfree(candidate);
_leave(" = %d [prep]", ret);
@@ -258,7 +252,9 @@ int afs_write_end(struct file *file, struct address_space *mapping,
struct page *page, void *fsdata)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
+ struct key *key = file->private_data;
loff_t i_size, maybe_i_size;
+ int ret;
_enter("{%x:%u},{%lx}",
vnode->fid.vid, vnode->fid.vnode, page->index);
@@ -274,6 +270,20 @@ int afs_write_end(struct file *file, struct address_space *mapping,
spin_unlock(&vnode->writeback_lock);
}
+ if (!PageUptodate(page)) {
+ if (copied < len) {
+ /* Try and load any missing data from the server. The
+ * unmarshalling routine will take care of clearing any
+ * bits that are beyond the EOF.
+ */
+ ret = afs_fill_page(vnode, key, pos + copied,
+ len - copied, page);
+ if (ret < 0)
+ return ret;
+ }
+ SetPageUptodate(page);
+ }
+
set_page_dirty(page);
if (PageDirty(page))
_debug("dirtied");