summaryrefslogtreecommitdiffstats
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/vfs_addr.c83
-rw-r--r--fs/9p/vfs_file.c20
-rw-r--r--fs/afs/dir.c229
-rw-r--r--fs/afs/dir_edit.c154
-rw-r--r--fs/afs/file.c70
-rw-r--r--fs/afs/internal.h46
-rw-r--r--fs/afs/write.c347
-rw-r--r--fs/attr.c4
-rw-r--r--fs/btrfs/async-thread.c14
-rw-r--r--fs/btrfs/disk-io.c14
-rw-r--r--fs/btrfs/ioctl.c4
-rw-r--r--fs/btrfs/lzo.c13
-rw-r--r--fs/btrfs/scrub.c4
-rw-r--r--fs/btrfs/volumes.c18
-rw-r--r--fs/btrfs/zstd.c68
-rw-r--r--fs/ceph/addr.c109
-rw-r--r--fs/ceph/cache.c23
-rw-r--r--fs/ceph/caps.c151
-rw-r--r--fs/ceph/debugfs.c167
-rw-r--r--fs/ceph/export.c12
-rw-r--r--fs/ceph/file.c103
-rw-r--r--fs/ceph/inode.c54
-rw-r--r--fs/ceph/locks.c6
-rw-r--r--fs/ceph/mds_client.c139
-rw-r--r--fs/ceph/mdsmap.c4
-rw-r--r--fs/ceph/metric.c128
-rw-r--r--fs/ceph/metric.h88
-rw-r--r--fs/ceph/super.c17
-rw-r--r--fs/ceph/super.h18
-rw-r--r--fs/cifs/cifs_debug.c7
-rw-r--r--fs/cifs/cifs_dfs_ref.c59
-rw-r--r--fs/cifs/cifs_fs_sb.h5
-rw-r--r--fs/cifs/cifsglob.h47
-rw-r--r--fs/cifs/cifsproto.h10
-rw-r--r--fs/cifs/connect.c1468
-rw-r--r--fs/cifs/dfs_cache.c46
-rw-r--r--fs/cifs/file.c35
-rw-r--r--fs/cifs/fs_context.c36
-rw-r--r--fs/cifs/fs_context.h1
-rw-r--r--fs/cifs/fscache.c8
-rw-r--r--fs/cifs/misc.c64
-rw-r--r--fs/cifs/ntlmssp.h4
-rw-r--r--fs/cifs/sess.c240
-rw-r--r--fs/cifs/smb2inode.c22
-rw-r--r--fs/cifs/smb2ops.c10
-rw-r--r--fs/cifs/smb2pdu.c52
-rw-r--r--fs/cifs/transport.c3
-rw-r--r--fs/erofs/zdata.c33
-rw-r--r--fs/erofs/zdata.h1
-rw-r--r--fs/erofs/zpvec.h13
-rw-r--r--fs/f2fs/checkpoint.c8
-rw-r--r--fs/f2fs/compress.c76
-rw-r--r--fs/f2fs/data.c95
-rw-r--r--fs/f2fs/f2fs.h54
-rw-r--r--fs/f2fs/file.c6
-rw-r--r--fs/f2fs/gc.c5
-rw-r--r--fs/f2fs/inline.c2
-rw-r--r--fs/f2fs/inode.c4
-rw-r--r--fs/f2fs/namei.c32
-rw-r--r--fs/f2fs/node.c1
-rw-r--r--fs/f2fs/node.h5
-rw-r--r--fs/f2fs/recovery.c14
-rw-r--r--fs/f2fs/segment.c83
-rw-r--r--fs/f2fs/segment.h1
-rw-r--r--fs/f2fs/super.c41
-rw-r--r--fs/f2fs/sysfs.c24
-rw-r--r--fs/f2fs/verity.c2
-rw-r--r--fs/f2fs/xattr.c2
-rw-r--r--fs/gfs2/bmap.c2
-rw-r--r--fs/gfs2/file.c9
-rw-r--r--fs/gfs2/glock.c12
-rw-r--r--fs/gfs2/super.c14
-rw-r--r--fs/io-wq.c17
-rw-r--r--fs/ksmbd/Kconfig2
-rw-r--r--fs/ksmbd/auth.c11
-rw-r--r--fs/ksmbd/connection.c11
-rw-r--r--fs/ksmbd/ksmbd_work.c1
-rw-r--r--fs/ksmbd/ksmbd_work.h4
-rw-r--r--fs/ksmbd/oplock.c48
-rw-r--r--fs/ksmbd/oplock.h2
-rw-r--r--fs/ksmbd/server.c2
-rw-r--r--fs/ksmbd/smb2misc.c16
-rw-r--r--fs/ksmbd/smb2ops.c9
-rw-r--r--fs/ksmbd/smb2pdu.c591
-rw-r--r--fs/ksmbd/smb2pdu.h696
-rw-r--r--fs/ksmbd/smb_common.c13
-rw-r--r--fs/ksmbd/smb_common.h55
-rw-r--r--fs/ksmbd/transport_rdma.c3
-rw-r--r--fs/ksmbd/vfs.c8
-rw-r--r--fs/ksmbd/vfs.h39
-rw-r--r--fs/netfs/read_helper.c165
-rw-r--r--fs/nfsd/nfs4xdr.c7
-rw-r--r--fs/pstore/platform.c2
-rw-r--r--fs/squashfs/zstd_wrapper.c16
-rw-r--r--fs/udf/dir.c32
-rw-r--r--fs/udf/namei.c3
-rw-r--r--fs/udf/super.c2
-rw-r--r--fs/xfs/libxfs/xfs_ag.c2
-rw-r--r--fs/xfs/libxfs/xfs_ag.h8
-rw-r--r--fs/xfs/libxfs/xfs_btree.c4
-rw-r--r--fs/xfs/libxfs/xfs_da_btree.c5
101 files changed, 3198 insertions, 3339 deletions
diff --git a/fs/9p/vfs_addr.c b/fs/9p/vfs_addr.c
index adafdf86f42f..fac918ccb305 100644
--- a/fs/9p/vfs_addr.c
+++ b/fs/9p/vfs_addr.c
@@ -108,7 +108,9 @@ static const struct netfs_read_request_ops v9fs_req_ops = {
*/
static int v9fs_vfs_readpage(struct file *file, struct page *page)
{
- return netfs_readpage(file, page, &v9fs_req_ops, NULL);
+ struct folio *folio = page_folio(page);
+
+ return netfs_readpage(file, folio, &v9fs_req_ops, NULL);
}
/**
@@ -130,13 +132,15 @@ static void v9fs_vfs_readahead(struct readahead_control *ractl)
static int v9fs_release_page(struct page *page, gfp_t gfp)
{
- if (PagePrivate(page))
+ struct folio *folio = page_folio(page);
+
+ if (folio_test_private(folio))
return 0;
#ifdef CONFIG_9P_FSCACHE
- if (PageFsCache(page)) {
+ if (folio_test_fscache(folio)) {
if (!(gfp & __GFP_DIRECT_RECLAIM) || !(gfp & __GFP_FS))
return 0;
- wait_on_page_fscache(page);
+ folio_wait_fscache(folio);
}
#endif
return 1;
@@ -152,55 +156,58 @@ static int v9fs_release_page(struct page *page, gfp_t gfp)
static void v9fs_invalidate_page(struct page *page, unsigned int offset,
unsigned int length)
{
- wait_on_page_fscache(page);
+ struct folio *folio = page_folio(page);
+
+ folio_wait_fscache(folio);
}
-static int v9fs_vfs_writepage_locked(struct page *page)
+static int v9fs_vfs_write_folio_locked(struct folio *folio)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio_inode(folio);
struct v9fs_inode *v9inode = V9FS_I(inode);
- loff_t start = page_offset(page);
- loff_t size = i_size_read(inode);
+ loff_t start = folio_pos(folio);
+ loff_t i_size = i_size_read(inode);
struct iov_iter from;
- int err, len;
+ size_t len = folio_size(folio);
+ int err;
+
+ if (start >= i_size)
+ return 0; /* Simultaneous truncation occurred */
- if (page->index == size >> PAGE_SHIFT)
- len = size & ~PAGE_MASK;
- else
- len = PAGE_SIZE;
+ len = min_t(loff_t, i_size - start, len);
- iov_iter_xarray(&from, WRITE, &page->mapping->i_pages, start, len);
+ iov_iter_xarray(&from, WRITE, &folio_mapping(folio)->i_pages, start, len);
/* We should have writeback_fid always set */
BUG_ON(!v9inode->writeback_fid);
- set_page_writeback(page);
+ folio_start_writeback(folio);
p9_client_write(v9inode->writeback_fid, start, &from, &err);
- end_page_writeback(page);
+ folio_end_writeback(folio);
return err;
}
static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
{
+ struct folio *folio = page_folio(page);
int retval;
- p9_debug(P9_DEBUG_VFS, "page %p\n", page);
+ p9_debug(P9_DEBUG_VFS, "folio %p\n", folio);
- retval = v9fs_vfs_writepage_locked(page);
+ retval = v9fs_vfs_write_folio_locked(folio);
if (retval < 0) {
if (retval == -EAGAIN) {
- redirty_page_for_writepage(wbc, page);
+ folio_redirty_for_writepage(wbc, folio);
retval = 0;
} else {
- SetPageError(page);
- mapping_set_error(page->mapping, retval);
+ mapping_set_error(folio_mapping(folio), retval);
}
} else
retval = 0;
- unlock_page(page);
+ folio_unlock(folio);
return retval;
}
@@ -213,14 +220,15 @@ static int v9fs_vfs_writepage(struct page *page, struct writeback_control *wbc)
static int v9fs_launder_page(struct page *page)
{
+ struct folio *folio = page_folio(page);
int retval;
- if (clear_page_dirty_for_io(page)) {
- retval = v9fs_vfs_writepage_locked(page);
+ if (folio_clear_dirty_for_io(folio)) {
+ retval = v9fs_vfs_write_folio_locked(folio);
if (retval)
return retval;
}
- wait_on_page_fscache(page);
+ folio_wait_fscache(folio);
return 0;
}
@@ -265,10 +273,10 @@ v9fs_direct_IO(struct kiocb *iocb, struct iov_iter *iter)
static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int flags,
- struct page **pagep, void **fsdata)
+ struct page **subpagep, void **fsdata)
{
int retval;
- struct page *page;
+ struct folio *folio;
struct v9fs_inode *v9inode = V9FS_I(mapping->host);
p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
@@ -279,31 +287,32 @@ static int v9fs_write_begin(struct file *filp, struct address_space *mapping,
* file. We need to do this before we get a lock on the page in case
* there's more than one writer competing for the same cache block.
*/
- retval = netfs_write_begin(filp, mapping, pos, len, flags, &page, fsdata,
+ retval = netfs_write_begin(filp, mapping, pos, len, flags, &folio, fsdata,
&v9fs_req_ops, NULL);
if (retval < 0)
return retval;
- *pagep = find_subpage(page, pos / PAGE_SIZE);
+ *subpagep = &folio->page;
return retval;
}
static int v9fs_write_end(struct file *filp, struct address_space *mapping,
loff_t pos, unsigned int len, unsigned int copied,
- struct page *page, void *fsdata)
+ struct page *subpage, void *fsdata)
{
loff_t last_pos = pos + copied;
- struct inode *inode = page->mapping->host;
+ struct folio *folio = page_folio(subpage);
+ struct inode *inode = mapping->host;
p9_debug(P9_DEBUG_VFS, "filp %p, mapping %p\n", filp, mapping);
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
if (unlikely(copied < len)) {
copied = 0;
goto out;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
/*
@@ -314,10 +323,10 @@ static int v9fs_write_end(struct file *filp, struct address_space *mapping,
inode_add_bytes(inode, last_pos - inode->i_size);
i_size_write(inode, last_pos);
}
- set_page_dirty(page);
+ folio_mark_dirty(folio);
out:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return copied;
}
diff --git a/fs/9p/vfs_file.c b/fs/9p/vfs_file.c
index 4244d48398ef..612e297f3763 100644
--- a/fs/9p/vfs_file.c
+++ b/fs/9p/vfs_file.c
@@ -528,13 +528,13 @@ static vm_fault_t
v9fs_vm_page_mkwrite(struct vm_fault *vmf)
{
struct v9fs_inode *v9inode;
- struct page *page = vmf->page;
+ struct folio *folio = page_folio(vmf->page);
struct file *filp = vmf->vma->vm_file;
struct inode *inode = file_inode(filp);
- p9_debug(P9_DEBUG_VFS, "page %p fid %lx\n",
- page, (unsigned long)filp->private_data);
+ p9_debug(P9_DEBUG_VFS, "folio %p fid %lx\n",
+ folio, (unsigned long)filp->private_data);
v9inode = V9FS_I(inode);
@@ -542,24 +542,24 @@ v9fs_vm_page_mkwrite(struct vm_fault *vmf)
* be modified. We then assume the entire page will need writing back.
*/
#ifdef CONFIG_9P_FSCACHE
- if (PageFsCache(page) &&
- wait_on_page_fscache_killable(page) < 0)
- return VM_FAULT_RETRY;
+ if (folio_test_fscache(folio) &&
+ folio_wait_fscache_killable(folio) < 0)
+ return VM_FAULT_NOPAGE;
#endif
/* Update file times before taking page lock */
file_update_time(filp);
BUG_ON(!v9inode->writeback_fid);
- if (lock_page_killable(page) < 0)
+ if (folio_lock_killable(folio) < 0)
return VM_FAULT_RETRY;
- if (page->mapping != inode->i_mapping)
+ if (folio_mapping(folio) != inode->i_mapping)
goto out_unlock;
- wait_for_stable_page(page);
+ folio_wait_stable(folio);
return VM_FAULT_LOCKED;
out_unlock:
- unlock_page(page);
+ folio_unlock(folio);
return VM_FAULT_NOPAGE;
}
diff --git a/fs/afs/dir.c b/fs/afs/dir.c
index 4579bbda4634..da9b4f8577a1 100644
--- a/fs/afs/dir.c
+++ b/fs/afs/dir.c
@@ -103,13 +103,13 @@ struct afs_lookup_cookie {
};
/*
- * Drop the refs that we're holding on the pages we were reading into. We've
+ * Drop the refs that we're holding on the folios we were reading into. We've
* got refs on the first nr_pages pages.
*/
static void afs_dir_read_cleanup(struct afs_read *req)
{
struct address_space *mapping = req->vnode->vfs_inode.i_mapping;
- struct page *page;
+ struct folio *folio;
pgoff_t last = req->nr_pages - 1;
XA_STATE(xas, &mapping->i_pages, 0);
@@ -118,65 +118,56 @@ static void afs_dir_read_cleanup(struct afs_read *req)
return;
rcu_read_lock();
- xas_for_each(&xas, page, last) {
- if (xas_retry(&xas, page))
+ xas_for_each(&xas, folio, last) {
+ if (xas_retry(&xas, folio))
continue;
- BUG_ON(xa_is_value(page));
- BUG_ON(PageCompound(page));
- ASSERTCMP(page->mapping, ==, mapping);
+ BUG_ON(xa_is_value(folio));
+ ASSERTCMP(folio_file_mapping(folio), ==, mapping);
- put_page(page);
+ folio_put(folio);
}
rcu_read_unlock();
}
/*
- * check that a directory page is valid
+ * check that a directory folio is valid
*/
-static bool afs_dir_check_page(struct afs_vnode *dvnode, struct page *page,
- loff_t i_size)
+static bool afs_dir_check_folio(struct afs_vnode *dvnode, struct folio *folio,
+ loff_t i_size)
{
- struct afs_xdr_dir_page *dbuf;
- loff_t latter, off;
- int tmp, qty;
+ union afs_xdr_dir_block *block;
+ size_t offset, size;
+ loff_t pos;
- /* Determine how many magic numbers there should be in this page, but
+ /* Determine how many magic numbers there should be in this folio, but
* we must take care because the directory may change size under us.
*/
- off = page_offset(page);
- if (i_size <= off)
+ pos = folio_pos(folio);
+ if (i_size <= pos)
goto checked;
- latter = i_size - off;
- if (latter >= PAGE_SIZE)
- qty = PAGE_SIZE;
- else
- qty = latter;
- qty /= sizeof(union afs_xdr_dir_block);
-
- /* check them */
- dbuf = kmap_atomic(page);
- for (tmp = 0; tmp < qty; tmp++) {
- if (dbuf->blocks[tmp].hdr.magic != AFS_DIR_MAGIC) {
- printk("kAFS: %s(%lx): bad magic %d/%d is %04hx\n",
- __func__, dvnode->vfs_inode.i_ino, tmp, qty,
- ntohs(dbuf->blocks[tmp].hdr.magic));
- trace_afs_dir_check_failed(dvnode, off, i_size);
- kunmap(page);
+ size = min_t(loff_t, folio_size(folio), i_size - pos);
+ for (offset = 0; offset < size; offset += sizeof(*block)) {
+ block = kmap_local_folio(folio, offset);
+ if (block->hdr.magic != AFS_DIR_MAGIC) {
+ printk("kAFS: %s(%lx): [%llx] bad magic %zx/%zx is %04hx\n",
+ __func__, dvnode->vfs_inode.i_ino,
+ pos, offset, size, ntohs(block->hdr.magic));
+ trace_afs_dir_check_failed(dvnode, pos + offset, i_size);
+ kunmap_local(block);
trace_afs_file_error(dvnode, -EIO, afs_file_error_dir_bad_magic);
goto error;
}
/* Make sure each block is NUL terminated so we can reasonably
- * use string functions on it. The filenames in the page
+ * use string functions on it. The filenames in the folio
* *should* be NUL-terminated anyway.
*/
- ((u8 *)&dbuf->blocks[tmp])[AFS_DIR_BLOCK_SIZE - 1] = 0;
- }
-
- kunmap_atomic(dbuf);
+ ((u8 *)block)[AFS_DIR_BLOCK_SIZE - 1] = 0;
+ kunmap_local(block);
+ }
checked:
afs_stat_v(dvnode, n_read_dir);
return true;
@@ -190,11 +181,11 @@ error:
*/
static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
{
- struct afs_xdr_dir_page *dbuf;
+ union afs_xdr_dir_block *block;
struct address_space *mapping = dvnode->vfs_inode.i_mapping;
- struct page *page;
- unsigned int i, qty = PAGE_SIZE / sizeof(union afs_xdr_dir_block);
+ struct folio *folio;
pgoff_t last = req->nr_pages - 1;
+ size_t offset, size;
XA_STATE(xas, &mapping->i_pages, 0);
@@ -205,30 +196,28 @@ static void afs_dir_dump(struct afs_vnode *dvnode, struct afs_read *req)
req->pos, req->nr_pages,
req->iter->iov_offset, iov_iter_count(req->iter));
- xas_for_each(&xas, page, last) {
- if (xas_retry(&xas, page))
+ xas_for_each(&xas, folio, last) {
+ if (xas_retry(&xas, folio))
continue;
- BUG_ON(PageCompound(page));
- BUG_ON(page->mapping != mapping);
-
- dbuf = kmap_atomic(page);
- for (i = 0; i < qty; i++) {
- union afs_xdr_dir_block *block = &dbuf->blocks[i];
+ BUG_ON(folio_file_mapping(folio) != mapping);
- pr_warn("[%02lx] %32phN\n", page->index * qty + i, block);
+ size = min_t(loff_t, folio_size(folio), req->actual_len - folio_pos(folio));
+ for (offset = 0; offset < size; offset += sizeof(*block)) {
+ block = kmap_local_folio(folio, offset);
+ pr_warn("[%02lx] %32phN\n", folio_index(folio) + offset, block);
+ kunmap_local(block);
}
- kunmap_atomic(dbuf);
}
}
/*
- * Check all the pages in a directory. All the pages are held pinned.
+ * Check all the blocks in a directory. All the folios are held pinned.
*/
static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req)
{
struct address_space *mapping = dvnode->vfs_inode.i_mapping;
- struct page *page;
+ struct folio *folio;
pgoff_t last = req->nr_pages - 1;
int ret = 0;
@@ -238,14 +227,13 @@ static int afs_dir_check(struct afs_vnode *dvnode, struct afs_read *req)
return 0;
rcu_read_lock();
- xas_for_each(&xas, page, last) {
- if (xas_retry(&xas, page))
+ xas_for_each(&xas, folio, last) {
+ if (xas_retry(&xas, folio))
continue;
- BUG_ON(PageCompound(page));
- BUG_ON(page->mapping != mapping);
+ BUG_ON(folio_file_mapping(folio) != mapping);
- if (!afs_dir_check_page(dvnode, page, req->file_size)) {
+ if (!afs_dir_check_folio(dvnode, folio, req->actual_len)) {
afs_dir_dump(dvnode, req);
ret = -EIO;
break;
@@ -274,15 +262,16 @@ static int afs_dir_open(struct inode *inode, struct file *file)
/*
* Read the directory into the pagecache in one go, scrubbing the previous
- * contents. The list of pages is returned, pinning them so that they don't
+ * contents. The list of folios is returned, pinning them so that they don't
* get reclaimed during the iteration.
*/
static struct afs_read *afs_read_dir(struct afs_vnode *dvnode, struct key *key)
__acquires(&dvnode->validate_lock)
{
+ struct address_space *mapping = dvnode->vfs_inode.i_mapping;
struct afs_read *req;
loff_t i_size;
- int nr_pages, i, n;
+ int nr_pages, i;
int ret;
_enter("");
@@ -320,43 +309,30 @@ expand:
req->iter = &req->def_iter;
/* Fill in any gaps that we might find where the memory reclaimer has
- * been at work and pin all the pages. If there are any gaps, we will
+ * been at work and pin all the folios. If there are any gaps, we will
* need to reread the entire directory contents.
*/
i = req->nr_pages;
while (i < nr_pages) {
- struct page *pages[8], *page;
-
- n = find_get_pages_contig(dvnode->vfs_inode.i_mapping, i,
- min_t(unsigned int, nr_pages - i,
- ARRAY_SIZE(pages)),
- pages);
- _debug("find %u at %u/%u", n, i, nr_pages);
-
- if (n == 0) {
- gfp_t gfp = dvnode->vfs_inode.i_mapping->gfp_mask;
+ struct folio *folio;
+ folio = filemap_get_folio(mapping, i);
+ if (!folio) {
if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
afs_stat_v(dvnode, n_inval);
ret = -ENOMEM;
- page = __page_cache_alloc(gfp);
- if (!page)
+ folio = __filemap_get_folio(mapping,
+ i, FGP_LOCK | FGP_CREAT,
+ mapping->gfp_mask);
+ if (!folio)
goto error;
- ret = add_to_page_cache_lru(page,
- dvnode->vfs_inode.i_mapping,
- i, gfp);
- if (ret < 0)
- goto error;
-
- attach_page_private(page, (void *)1);
- unlock_page(page);
- req->nr_pages++;
- i++;
- } else {
- req->nr_pages += n;
- i += n;
+ folio_attach_private(folio, (void *)1);
+ folio_unlock(folio);
}
+
+ req->nr_pages += folio_nr_pages(folio);
+ i += folio_nr_pages(folio);
}
/* If we're going to reload, we need to lock all the pages to prevent
@@ -424,7 +400,7 @@ static int afs_dir_iterate_block(struct afs_vnode *dvnode,
size_t nlen;
int tmp;
- _enter("%u,%x,%p,,",(unsigned)ctx->pos,blkoff,block);
+ _enter("%llx,%x", ctx->pos, blkoff);
curr = (ctx->pos - blkoff) / sizeof(union afs_xdr_dirent);
@@ -513,12 +489,10 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
struct key *key, afs_dataversion_t *_dir_version)
{
struct afs_vnode *dvnode = AFS_FS_I(dir);
- struct afs_xdr_dir_page *dbuf;
union afs_xdr_dir_block *dblock;
struct afs_read *req;
- struct page *page;
- unsigned blkoff, limit;
- void __rcu **slot;
+ struct folio *folio;
+ unsigned offset, size;
int ret;
_enter("{%lu},%u,,", dir->i_ino, (unsigned)ctx->pos);
@@ -540,43 +514,30 @@ static int afs_dir_iterate(struct inode *dir, struct dir_context *ctx,
/* walk through the blocks in sequence */
ret = 0;
while (ctx->pos < req->actual_len) {
- blkoff = ctx->pos & ~(sizeof(union afs_xdr_dir_block) - 1);
-
- /* Fetch the appropriate page from the directory and re-add it
+ /* Fetch the appropriate folio from the directory and re-add it
* to the LRU. We have all the pages pinned with an extra ref.
*/
- rcu_read_lock();
- page = NULL;
- slot = radix_tree_lookup_slot(&dvnode->vfs_inode.i_mapping->i_pages,
- blkoff / PAGE_SIZE);
- if (slot)
- page = radix_tree_deref_slot(slot);
- rcu_read_unlock();
- if (!page) {
+ folio = __filemap_get_folio(dir->i_mapping, ctx->pos / PAGE_SIZE,
+ FGP_ACCESSED, 0);
+ if (!folio) {
ret = afs_bad(dvnode, afs_file_error_dir_missing_page);
break;
}
- mark_page_accessed(page);
- limit = blkoff & ~(PAGE_SIZE - 1);
+ offset = round_down(ctx->pos, sizeof(*dblock)) - folio_file_pos(folio);
+ size = min_t(loff_t, folio_size(folio),
+ req->actual_len - folio_file_pos(folio));
- dbuf = kmap(page);
-
- /* deal with the individual blocks stashed on this page */
do {
- dblock = &dbuf->blocks[(blkoff % PAGE_SIZE) /
- sizeof(union afs_xdr_dir_block)];
- ret = afs_dir_iterate_block(dvnode, ctx, dblock, blkoff);
- if (ret != 1) {
- kunmap(page);
+ dblock = kmap_local_folio(folio, offset);
+ ret = afs_dir_iterate_block(dvnode, ctx, dblock,
+ folio_file_pos(folio) + offset);
+ kunmap_local(dblock);
+ if (ret != 1)
goto out;
- }
- blkoff += sizeof(union afs_xdr_dir_block);
+ } while (offset += sizeof(*dblock), offset < size);
- } while (ctx->pos < dir->i_size && blkoff < limit);
-
- kunmap(page);
ret = 0;
}
@@ -2037,42 +1998,42 @@ error:
}
/*
- * Release a directory page and clean up its private state if it's not busy
- * - return true if the page can now be released, false if not
+ * Release a directory folio and clean up its private state if it's not busy
+ * - return true if the folio can now be released, false if not
*/
-static int afs_dir_releasepage(struct page *page, gfp_t gfp_flags)
+static int afs_dir_releasepage(struct page *subpage, gfp_t gfp_flags)
{
- struct afs_vnode *dvnode = AFS_FS_I(page->mapping->host);
+ struct folio *folio = page_folio(subpage);
+ struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
- _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, page->index);
+ _enter("{{%llx:%llu}[%lu]}", dvnode->fid.vid, dvnode->fid.vnode, folio_index(folio));
- detach_page_private(page);
+ folio_detach_private(folio);
/* The directory will need reloading. */
if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
afs_stat_v(dvnode, n_relpg);
- return 1;
+ return true;
}
/*
- * invalidate part or all of a page
- * - release a page and clean up its private data if offset is 0 (indicating
- * the entire page)
+ * Invalidate part or all of a folio.
*/
-static void afs_dir_invalidatepage(struct page *page, unsigned int offset,
+static void afs_dir_invalidatepage(struct page *subpage, unsigned int offset,
unsigned int length)
{
- struct afs_vnode *dvnode = AFS_FS_I(page->mapping->host);
+ struct folio *folio = page_folio(subpage);
+ struct afs_vnode *dvnode = AFS_FS_I(folio_inode(folio));
- _enter("{%lu},%u,%u", page->index, offset, length);
+ _enter("{%lu},%u,%u", folio_index(folio), offset, length);
- BUG_ON(!PageLocked(page));
+ BUG_ON(!folio_test_locked(folio));
/* The directory will need reloading. */
if (test_and_clear_bit(AFS_VNODE_DIR_VALID, &dvnode->flags))
afs_stat_v(dvnode, n_inval);
- /* we clean up only if the entire page is being invalidated */
- if (offset == 0 && length == thp_size(page))
- detach_page_private(page);
+ /* we clean up only if the entire folio is being invalidated */
+ if (offset == 0 && length == folio_size(folio))
+ folio_detach_private(folio);
}
diff --git a/fs/afs/dir_edit.c b/fs/afs/dir_edit.c
index 540b9fc96824..d98e109ecee9 100644
--- a/fs/afs/dir_edit.c
+++ b/fs/afs/dir_edit.c
@@ -105,6 +105,25 @@ static void afs_clear_contig_bits(union afs_xdr_dir_block *block,
}
/*
+ * Get a new directory folio.
+ */
+static struct folio *afs_dir_get_folio(struct afs_vnode *vnode, pgoff_t index)
+{
+ struct address_space *mapping = vnode->vfs_inode.i_mapping;
+ struct folio *folio;
+
+ folio = __filemap_get_folio(mapping, index,
+ FGP_LOCK | FGP_ACCESSED | FGP_CREAT,
+ mapping->gfp_mask);
+ if (!folio)
+ clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ else if (folio && !folio_test_private(folio))
+ folio_attach_private(folio, (void *)1);
+
+ return folio;
+}
+
+/*
* Scan a directory block looking for a dirent of the right name.
*/
static int afs_dir_scan_block(union afs_xdr_dir_block *block, struct qstr *name,
@@ -188,13 +207,11 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
enum afs_edit_dir_reason why)
{
union afs_xdr_dir_block *meta, *block;
- struct afs_xdr_dir_page *meta_page, *dir_page;
union afs_xdr_dirent *de;
- struct page *page0, *page;
+ struct folio *folio0, *folio;
unsigned int need_slots, nr_blocks, b;
pgoff_t index;
loff_t i_size;
- gfp_t gfp;
int slot;
_enter(",,{%d,%s},", name->len, name->name);
@@ -206,10 +223,8 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
return;
}
- gfp = vnode->vfs_inode.i_mapping->gfp_mask;
- page0 = find_or_create_page(vnode->vfs_inode.i_mapping, 0, gfp);
- if (!page0) {
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ folio0 = afs_dir_get_folio(vnode, 0);
+ if (!folio0) {
_leave(" [fgp]");
return;
}
@@ -217,42 +232,35 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
/* Work out how many slots we're going to need. */
need_slots = afs_dir_calc_slots(name->len);
- meta_page = kmap(page0);
- meta = &meta_page->blocks[0];
+ meta = kmap_local_folio(folio0, 0);
if (i_size == 0)
goto new_directory;
nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
- /* Find a block that has sufficient slots available. Each VM page
+ /* Find a block that has sufficient slots available. Each folio
* contains two or more directory blocks.
*/
for (b = 0; b < nr_blocks + 1; b++) {
- /* If the directory extended into a new page, then we need to
- * tack a new page on the end.
+ /* If the directory extended into a new folio, then we need to
+ * tack a new folio on the end.
*/
index = b / AFS_DIR_BLOCKS_PER_PAGE;
- if (index == 0) {
- page = page0;
- dir_page = meta_page;
- } else {
- if (nr_blocks >= AFS_DIR_MAX_BLOCKS)
- goto error;
- gfp = vnode->vfs_inode.i_mapping->gfp_mask;
- page = find_or_create_page(vnode->vfs_inode.i_mapping,
- index, gfp);
- if (!page)
+ if (nr_blocks >= AFS_DIR_MAX_BLOCKS)
+ goto error;
+ if (index >= folio_nr_pages(folio0)) {
+ folio = afs_dir_get_folio(vnode, index);
+ if (!folio)
goto error;
- if (!PagePrivate(page))
- attach_page_private(page, (void *)1);
- dir_page = kmap(page);
+ } else {
+ folio = folio0;
}
+ block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_file_pos(folio));
+
/* Abandon the edit if we got a callback break. */
if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
goto invalidated;
- block = &dir_page->blocks[b % AFS_DIR_BLOCKS_PER_PAGE];
-
_debug("block %u: %2u %3u %u",
b,
(b < AFS_DIR_BLOCKS_WITH_CTR) ? meta->meta.alloc_ctrs[b] : 99,
@@ -266,7 +274,7 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
afs_set_i_size(vnode, (b + 1) * AFS_DIR_BLOCK_SIZE);
}
- /* Only lower dir pages have a counter in the header. */
+ /* Only lower dir blocks have a counter in the header. */
if (b >= AFS_DIR_BLOCKS_WITH_CTR ||
meta->meta.alloc_ctrs[b] >= need_slots) {
/* We need to try and find one or more consecutive
@@ -279,10 +287,10 @@ void afs_edit_dir_add(struct afs_vnode *vnode,
}
}
- if (page != page0) {
- unlock_page(page);
- kunmap(page);
- put_page(page);
+ kunmap_local(block);
+ if (folio != folio0) {
+ folio_unlock(folio);
+ folio_put(folio);
}
}
@@ -298,8 +306,8 @@ new_directory:
i_size = AFS_DIR_BLOCK_SIZE;
afs_set_i_size(vnode, i_size);
slot = AFS_DIR_RESV_BLOCKS0;
- page = page0;
- block = meta;
+ folio = folio0;
+ block = kmap_local_folio(folio, 0);
nr_blocks = 1;
b = 0;
@@ -318,10 +326,10 @@ found_space:
/* Adjust the bitmap. */
afs_set_contig_bits(block, slot, need_slots);
- if (page != page0) {
- unlock_page(page);
- kunmap(page);
- put_page(page);
+ kunmap_local(block);
+ if (folio != folio0) {
+ folio_unlock(folio);
+ folio_put(folio);
}
/* Adjust the allocation counter. */
@@ -333,18 +341,19 @@ found_space:
_debug("Insert %s in %u[%u]", name->name, b, slot);
out_unmap:
- unlock_page(page0);
- kunmap(page0);
- put_page(page0);
+ kunmap_local(meta);
+ folio_unlock(folio0);
+ folio_put(folio0);
_leave("");
return;
invalidated:
trace_afs_edit_dir(vnode, why, afs_edit_dir_create_inval, 0, 0, 0, 0, name->name);
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
- if (page != page0) {
- kunmap(page);
- put_page(page);
+ kunmap_local(block);
+ if (folio != folio0) {
+ folio_unlock(folio);
+ folio_put(folio);
}
goto out_unmap;
@@ -364,10 +373,9 @@ error:
void afs_edit_dir_remove(struct afs_vnode *vnode,
struct qstr *name, enum afs_edit_dir_reason why)
{
- struct afs_xdr_dir_page *meta_page, *dir_page;
union afs_xdr_dir_block *meta, *block;
union afs_xdr_dirent *de;
- struct page *page0, *page;
+ struct folio *folio0, *folio;
unsigned int need_slots, nr_blocks, b;
pgoff_t index;
loff_t i_size;
@@ -384,9 +392,8 @@ void afs_edit_dir_remove(struct afs_vnode *vnode,
}
nr_blocks = i_size / AFS_DIR_BLOCK_SIZE;
- page0 = find_lock_page(vnode->vfs_inode.i_mapping, 0);
- if (!page0) {
- clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
+ folio0 = afs_dir_get_folio(vnode, 0);
+ if (!folio0) {
_leave(" [fgp]");
return;
}
@@ -394,30 +401,27 @@ void afs_edit_dir_remove(struct afs_vnode *vnode,
/* Work out how many slots we're going to discard. */
need_slots = afs_dir_calc_slots(name->len);
- meta_page = kmap(page0);
- meta = &meta_page->blocks[0];
+ meta = kmap_local_folio(folio0, 0);
- /* Find a page that has sufficient slots available. Each VM page
+ /* Find a block that has sufficient slots available. Each folio
* contains two or more directory blocks.
*/
for (b = 0; b < nr_blocks; b++) {
index = b / AFS_DIR_BLOCKS_PER_PAGE;
- if (index != 0) {
- page = find_lock_page(vnode->vfs_inode.i_mapping, index);
- if (!page)
+ if (index >= folio_nr_pages(folio0)) {
+ folio = afs_dir_get_folio(vnode, index);
+ if (!folio)
goto error;
- dir_page = kmap(page);
} else {
- page = page0;
- dir_page = meta_page;
+ folio = folio0;
}
+ block = kmap_local_folio(folio, b * AFS_DIR_BLOCK_SIZE - folio_file_pos(folio));
+
/* Abandon the edit if we got a callback break. */
if (!test_bit(AFS_VNODE_DIR_VALID, &vnode->flags))
goto invalidated;
- block = &dir_page->blocks[b % AFS_DIR_BLOCKS_PER_PAGE];
-
if (b > AFS_DIR_BLOCKS_WITH_CTR ||
meta->meta.alloc_ctrs[b] <= AFS_DIR_SLOTS_PER_BLOCK - 1 - need_slots) {
slot = afs_dir_scan_block(block, name, b);
@@ -425,10 +429,10 @@ void afs_edit_dir_remove(struct afs_vnode *vnode,
goto found_dirent;
}
- if (page != page0) {
- unlock_page(page);
- kunmap(page);
- put_page(page);
+ kunmap_local(block);
+ if (folio != folio0) {
+ folio_unlock(folio);
+ folio_put(folio);
}
}
@@ -449,10 +453,10 @@ found_dirent:
/* Adjust the bitmap. */
afs_clear_contig_bits(block, slot, need_slots);
- if (page != page0) {
- unlock_page(page);
- kunmap(page);
- put_page(page);
+ kunmap_local(block);
+ if (folio != folio0) {
+ folio_unlock(folio);
+ folio_put(folio);
}
/* Adjust the allocation counter. */
@@ -464,9 +468,9 @@ found_dirent:
_debug("Remove %s from %u[%u]", name->name, b, slot);
out_unmap:
- unlock_page(page0);
- kunmap(page0);
- put_page(page0);
+ kunmap_local(meta);
+ folio_unlock(folio0);
+ folio_put(folio0);
_leave("");
return;
@@ -474,10 +478,10 @@ invalidated:
trace_afs_edit_dir(vnode, why, afs_edit_dir_delete_inval,
0, 0, 0, 0, name->name);
clear_bit(AFS_VNODE_DIR_VALID, &vnode->flags);
- if (page != page0) {
- unlock_page(page);
- kunmap(page);
- put_page(page);
+ kunmap_local(block);
+ if (folio != folio0) {
+ folio_unlock(folio);
+ folio_put(folio);
}
goto out_unmap;
diff --git a/fs/afs/file.c b/fs/afs/file.c
index eb11d047c0ae..cb6ad61eec3b 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -324,21 +324,24 @@ static int afs_symlink_readpage(struct file *file, struct page *page)
{
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
struct afs_read *fsreq;
+ struct folio *folio = page_folio(page);
int ret;
fsreq = afs_alloc_read(GFP_NOFS);
if (!fsreq)
return -ENOMEM;
- fsreq->pos = page->index * PAGE_SIZE;
- fsreq->len = PAGE_SIZE;
+ fsreq->pos = folio_pos(folio);
+ fsreq->len = folio_size(folio);
fsreq->vnode = vnode;
fsreq->iter = &fsreq->def_iter;
iov_iter_xarray(&fsreq->def_iter, READ, &page->mapping->i_pages,
fsreq->pos, fsreq->len);
ret = afs_fetch_data(fsreq->vnode, fsreq);
- page_endio(page, false, ret);
+ if (ret == 0)
+ SetPageUptodate(page);
+ unlock_page(page);
return ret;
}
@@ -362,7 +365,7 @@ static int afs_begin_cache_operation(struct netfs_read_request *rreq)
}
static int afs_check_write_begin(struct file *file, loff_t pos, unsigned len,
- struct page *page, void **_fsdata)
+ struct folio *folio, void **_fsdata)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
@@ -385,7 +388,9 @@ const struct netfs_read_request_ops afs_req_ops = {
static int afs_readpage(struct file *file, struct page *page)
{
- return netfs_readpage(file, page, &afs_req_ops, NULL);
+ struct folio *folio = page_folio(page);
+
+ return netfs_readpage(file, folio, &afs_req_ops, NULL);
}
static void afs_readahead(struct readahead_control *ractl)
@@ -397,29 +402,29 @@ static void afs_readahead(struct readahead_control *ractl)
* Adjust the dirty region of the page on truncation or full invalidation,
* getting rid of the markers altogether if the region is entirely invalidated.
*/
-static void afs_invalidate_dirty(struct page *page, unsigned int offset,
+static void afs_invalidate_dirty(struct folio *folio, unsigned int offset,
unsigned int length)
{
- struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
+ struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
unsigned long priv;
unsigned int f, t, end = offset + length;
- priv = page_private(page);
+ priv = (unsigned long)folio_get_private(folio);
/* we clean up only if the entire page is being invalidated */
- if (offset == 0 && length == thp_size(page))
+ if (offset == 0 && length == folio_size(folio))
goto full_invalidate;
/* If the page was dirtied by page_mkwrite(), the PTE stays writable
* and we don't get another notification to tell us to expand it
* again.
*/
- if (afs_is_page_dirty_mmapped(priv))
+ if (afs_is_folio_dirty_mmapped(priv))
return;
/* We may need to shorten the dirty region */
- f = afs_page_dirty_from(page, priv);
- t = afs_page_dirty_to(page, priv);
+ f = afs_folio_dirty_from(folio, priv);
+ t = afs_folio_dirty_to(folio, priv);
if (t <= offset || f >= end)
return; /* Doesn't overlap */
@@ -437,17 +442,17 @@ static void afs_invalidate_dirty(struct page *page, unsigned int offset,
if (f == t)
goto undirty;
- priv = afs_page_dirty(page, f, t);
- set_page_private(page, priv);
- trace_afs_page_dirty(vnode, tracepoint_string("trunc"), page);
+ priv = afs_folio_dirty(folio, f, t);
+ folio_change_private(folio, (void *)priv);
+ trace_afs_folio_dirty(vnode, tracepoint_string("trunc"), folio);
return;
undirty:
- trace_afs_page_dirty(vnode, tracepoint_string("undirty"), page);
- clear_page_dirty_for_io(page);
+ trace_afs_folio_dirty(vnode, tracepoint_string("undirty"), folio);
+ folio_clear_dirty_for_io(folio);
full_invalidate:
- trace_afs_page_dirty(vnode, tracepoint_string("inval"), page);
- detach_page_private(page);
+ trace_afs_folio_dirty(vnode, tracepoint_string("inval"), folio);
+ folio_detach_private(folio);
}
/*
@@ -458,14 +463,16 @@ full_invalidate:
static void afs_invalidatepage(struct page *page, unsigned int offset,
unsigned int length)
{
- _enter("{%lu},%u,%u", page->index, offset, length);
+ struct folio *folio = page_folio(page);
+
+ _enter("{%lu},%u,%u", folio_index(folio), offset, length);
BUG_ON(!PageLocked(page));
if (PagePrivate(page))
- afs_invalidate_dirty(page, offset, length);
+ afs_invalidate_dirty(folio, offset, length);
- wait_on_page_fscache(page);
+ folio_wait_fscache(folio);
_leave("");
}
@@ -475,30 +482,31 @@ static void afs_invalidatepage(struct page *page, unsigned int offset,
*/
static int afs_releasepage(struct page *page, gfp_t gfp_flags)
{
- struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
+ struct folio *folio = page_folio(page);
+ struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
_enter("{{%llx:%llu}[%lu],%lx},%x",
- vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
+ vnode->fid.vid, vnode->fid.vnode, folio_index(folio), folio->flags,
gfp_flags);
/* deny if page is being written to the cache and the caller hasn't
* elected to wait */
#ifdef CONFIG_AFS_FSCACHE
- if (PageFsCache(page)) {
+ if (folio_test_fscache(folio)) {
if (!(gfp_flags & __GFP_DIRECT_RECLAIM) || !(gfp_flags & __GFP_FS))
return false;
- wait_on_page_fscache(page);
+ folio_wait_fscache(folio);
}
#endif
- if (PagePrivate(page)) {
- trace_afs_page_dirty(vnode, tracepoint_string("rel"), page);
- detach_page_private(page);
+ if (folio_test_private(folio)) {
+ trace_afs_folio_dirty(vnode, tracepoint_string("rel"), folio);
+ folio_detach_private(folio);
}
- /* indicate that the page can be released */
+ /* Indicate that the folio can be released */
_leave(" = T");
- return 1;
+ return true;
}
static void afs_add_open_mmap(struct afs_vnode *vnode)
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 9357c53faa69..aa4c0d6c9780 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -876,59 +876,59 @@ struct afs_vnode_cache_aux {
} __packed;
/*
- * We use page->private to hold the amount of the page that we've written to,
+ * We use folio->private to hold the amount of the folio that we've written to,
* splitting the field into two parts. However, we need to represent a range
- * 0...PAGE_SIZE, so we reduce the resolution if the size of the page
+ * 0...FOLIO_SIZE, so we reduce the resolution if the size of the folio
* exceeds what we can encode.
*/
#ifdef CONFIG_64BIT
-#define __AFS_PAGE_PRIV_MASK 0x7fffffffUL
-#define __AFS_PAGE_PRIV_SHIFT 32
-#define __AFS_PAGE_PRIV_MMAPPED 0x80000000UL
+#define __AFS_FOLIO_PRIV_MASK 0x7fffffffUL
+#define __AFS_FOLIO_PRIV_SHIFT 32
+#define __AFS_FOLIO_PRIV_MMAPPED 0x80000000UL
#else
-#define __AFS_PAGE_PRIV_MASK 0x7fffUL
-#define __AFS_PAGE_PRIV_SHIFT 16
-#define __AFS_PAGE_PRIV_MMAPPED 0x8000UL
+#define __AFS_FOLIO_PRIV_MASK 0x7fffUL
+#define __AFS_FOLIO_PRIV_SHIFT 16
+#define __AFS_FOLIO_PRIV_MMAPPED 0x8000UL
#endif
-static inline unsigned int afs_page_dirty_resolution(struct page *page)
+static inline unsigned int afs_folio_dirty_resolution(struct folio *folio)
{
- int shift = thp_order(page) + PAGE_SHIFT - (__AFS_PAGE_PRIV_SHIFT - 1);
+ int shift = folio_shift(folio) - (__AFS_FOLIO_PRIV_SHIFT - 1);
return (shift > 0) ? shift : 0;
}
-static inline size_t afs_page_dirty_from(struct page *page, unsigned long priv)
+static inline size_t afs_folio_dirty_from(struct folio *folio, unsigned long priv)
{
- unsigned long x = priv & __AFS_PAGE_PRIV_MASK;
+ unsigned long x = priv & __AFS_FOLIO_PRIV_MASK;
/* The lower bound is inclusive */
- return x << afs_page_dirty_resolution(page);
+ return x << afs_folio_dirty_resolution(folio);
}
-static inline size_t afs_page_dirty_to(struct page *page, unsigned long priv)
+static inline size_t afs_folio_dirty_to(struct folio *folio, unsigned long priv)
{
- unsigned long x = (priv >> __AFS_PAGE_PRIV_SHIFT) & __AFS_PAGE_PRIV_MASK;
+ unsigned long x = (priv >> __AFS_FOLIO_PRIV_SHIFT) & __AFS_FOLIO_PRIV_MASK;
/* The upper bound is immediately beyond the region */
- return (x + 1) << afs_page_dirty_resolution(page);
+ return (x + 1) << afs_folio_dirty_resolution(folio);
}
-static inline unsigned long afs_page_dirty(struct page *page, size_t from, size_t to)
+static inline unsigned long afs_folio_dirty(struct folio *folio, size_t from, size_t to)
{
- unsigned int res = afs_page_dirty_resolution(page);
+ unsigned int res = afs_folio_dirty_resolution(folio);
from >>= res;
to = (to - 1) >> res;
- return (to << __AFS_PAGE_PRIV_SHIFT) | from;
+ return (to << __AFS_FOLIO_PRIV_SHIFT) | from;
}
-static inline unsigned long afs_page_dirty_mmapped(unsigned long priv)
+static inline unsigned long afs_folio_dirty_mmapped(unsigned long priv)
{
- return priv | __AFS_PAGE_PRIV_MMAPPED;
+ return priv | __AFS_FOLIO_PRIV_MMAPPED;
}
-static inline bool afs_is_page_dirty_mmapped(unsigned long priv)
+static inline bool afs_is_folio_dirty_mmapped(unsigned long priv)
{
- return priv & __AFS_PAGE_PRIV_MMAPPED;
+ return priv & __AFS_FOLIO_PRIV_MMAPPED;
}
#include <trace/events/afs.h>
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 8b1d9c2f6bec..ca4909baf5e6 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -32,7 +32,7 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
struct page **_page, void **fsdata)
{
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
- struct page *page;
+ struct folio *folio;
unsigned long priv;
unsigned f, from;
unsigned t, to;
@@ -46,12 +46,12 @@ int afs_write_begin(struct file *file, struct address_space *mapping,
* file. We need to do this before we get a lock on the page in case
* there's more than one writer competing for the same cache block.
*/
- ret = netfs_write_begin(file, mapping, pos, len, flags, &page, fsdata,
+ ret = netfs_write_begin(file, mapping, pos, len, flags, &folio, fsdata,
&afs_req_ops, NULL);
if (ret < 0)
return ret;
- index = page->index;
+ index = folio_index(folio);
from = pos - index * PAGE_SIZE;
to = from + len;
@@ -59,14 +59,14 @@ try_again:
/* See if this page is already partially written in a way that we can
* merge the new write with.
*/
- if (PagePrivate(page)) {
- priv = page_private(page);
- f = afs_page_dirty_from(page, priv);
- t = afs_page_dirty_to(page, priv);
+ if (folio_test_private(folio)) {
+ priv = (unsigned long)folio_get_private(folio);
+ f = afs_folio_dirty_from(folio, priv);
+ t = afs_folio_dirty_to(folio, priv);
ASSERTCMP(f, <=, t);
- if (PageWriteback(page)) {
- trace_afs_page_dirty(vnode, tracepoint_string("alrdy"), page);
+ if (folio_test_writeback(folio)) {
+ trace_afs_folio_dirty(vnode, tracepoint_string("alrdy"), folio);
goto flush_conflicting_write;
}
/* If the file is being filled locally, allow inter-write
@@ -78,7 +78,7 @@ try_again:
goto flush_conflicting_write;
}
- *_page = page;
+ *_page = &folio->page;
_leave(" = 0");
return 0;
@@ -87,17 +87,17 @@ try_again:
*/
flush_conflicting_write:
_debug("flush conflict");
- ret = write_one_page(page);
+ ret = folio_write_one(folio);
if (ret < 0)
goto error;
- ret = lock_page_killable(page);
+ ret = folio_lock_killable(folio);
if (ret < 0)
goto error;
goto try_again;
error:
- put_page(page);
+ folio_put(folio);
_leave(" = %d", ret);
return ret;
}
@@ -107,24 +107,25 @@ error:
*/
int afs_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct page *subpage, void *fsdata)
{
+ struct folio *folio = page_folio(subpage);
struct afs_vnode *vnode = AFS_FS_I(file_inode(file));
unsigned long priv;
- unsigned int f, from = pos & (thp_size(page) - 1);
+ unsigned int f, from = offset_in_folio(folio, pos);
unsigned int t, to = from + copied;
loff_t i_size, maybe_i_size;
_enter("{%llx:%llu},{%lx}",
- vnode->fid.vid, vnode->fid.vnode, page->index);
+ vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
if (copied < len) {
copied = 0;
goto out;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
if (copied == 0)
@@ -141,29 +142,29 @@ int afs_write_end(struct file *file, struct address_space *mapping,
write_sequnlock(&vnode->cb_lock);
}
- if (PagePrivate(page)) {
- priv = page_private(page);
- f = afs_page_dirty_from(page, priv);
- t = afs_page_dirty_to(page, priv);
+ if (folio_test_private(folio)) {
+ priv = (unsigned long)folio_get_private(folio);
+ f = afs_folio_dirty_from(folio, priv);
+ t = afs_folio_dirty_to(folio, priv);
if (from < f)
f = from;
if (to > t)
t = to;
- priv = afs_page_dirty(page, f, t);
- set_page_private(page, priv);
- trace_afs_page_dirty(vnode, tracepoint_string("dirty+"), page);
+ priv = afs_folio_dirty(folio, f, t);
+ folio_change_private(folio, (void *)priv);
+ trace_afs_folio_dirty(vnode, tracepoint_string("dirty+"), folio);
} else {
- priv = afs_page_dirty(page, from, to);
- attach_page_private(page, (void *)priv);
- trace_afs_page_dirty(vnode, tracepoint_string("dirty"), page);
+ priv = afs_folio_dirty(folio, from, to);
+ folio_attach_private(folio, (void *)priv);
+ trace_afs_folio_dirty(vnode, tracepoint_string("dirty"), folio);
}
- if (set_page_dirty(page))
- _debug("dirtied %lx", page->index);
+ if (folio_mark_dirty(folio))
+ _debug("dirtied %lx", folio_index(folio));
out:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return copied;
}
@@ -174,40 +175,32 @@ static void afs_kill_pages(struct address_space *mapping,
loff_t start, loff_t len)
{
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
- struct pagevec pv;
- unsigned int loop, psize;
+ struct folio *folio;
+ pgoff_t index = start / PAGE_SIZE;
+ pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
_enter("{%llx:%llu},%llx @%llx",
vnode->fid.vid, vnode->fid.vnode, len, start);
- pagevec_init(&pv);
-
do {
- _debug("kill %llx @%llx", len, start);
-
- pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE,
- PAGEVEC_SIZE, pv.pages);
- if (pv.nr == 0)
- break;
+ _debug("kill %lx (to %lx)", index, last);
- for (loop = 0; loop < pv.nr; loop++) {
- struct page *page = pv.pages[loop];
+ folio = filemap_get_folio(mapping, index);
+ if (!folio) {
+ next = index + 1;
+ continue;
+ }
- if (page->index * PAGE_SIZE >= start + len)
- break;
+ next = folio_next_index(folio);
- psize = thp_size(page);
- start += psize;
- len -= psize;
- ClearPageUptodate(page);
- end_page_writeback(page);
- lock_page(page);
- generic_error_remove_page(mapping, page);
- unlock_page(page);
- }
+ folio_clear_uptodate(folio);
+ folio_end_writeback(folio);
+ folio_lock(folio);
+ generic_error_remove_page(mapping, &folio->page);
+ folio_unlock(folio);
+ folio_put(folio);
- __pagevec_release(&pv);
- } while (len > 0);
+ } while (index = next, index <= last);
_leave("");
}
@@ -220,37 +213,27 @@ static void afs_redirty_pages(struct writeback_control *wbc,
loff_t start, loff_t len)
{
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
- struct pagevec pv;
- unsigned int loop, psize;
+ struct folio *folio;
+ pgoff_t index = start / PAGE_SIZE;
+ pgoff_t last = (start + len - 1) / PAGE_SIZE, next;
_enter("{%llx:%llu},%llx @%llx",
vnode->fid.vid, vnode->fid.vnode, len, start);
- pagevec_init(&pv);
-
do {
_debug("redirty %llx @%llx", len, start);
- pv.nr = find_get_pages_contig(mapping, start / PAGE_SIZE,
- PAGEVEC_SIZE, pv.pages);
- if (pv.nr == 0)
- break;
-
- for (loop = 0; loop < pv.nr; loop++) {
- struct page *page = pv.pages[loop];
-
- if (page->index * PAGE_SIZE >= start + len)
- break;
-
- psize = thp_size(page);
- start += psize;
- len -= psize;
- redirty_page_for_writepage(wbc, page);
- end_page_writeback(page);
+ folio = filemap_get_folio(mapping, index);
+ if (!folio) {
+ next = index + 1;
+ continue;
}
- __pagevec_release(&pv);
- } while (len > 0);
+ next = index + folio_nr_pages(folio);
+ folio_redirty_for_writepage(wbc, folio);
+ folio_end_writeback(folio);
+ folio_put(folio);
+ } while (index = next, index <= last);
_leave("");
}
@@ -261,7 +244,7 @@ static void afs_redirty_pages(struct writeback_control *wbc,
static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsigned int len)
{
struct address_space *mapping = vnode->vfs_inode.i_mapping;
- struct page *page;
+ struct folio *folio;
pgoff_t end;
XA_STATE(xas, &mapping->i_pages, start / PAGE_SIZE);
@@ -272,15 +255,16 @@ static void afs_pages_written_back(struct afs_vnode *vnode, loff_t start, unsign
rcu_read_lock();
end = (start + len - 1) / PAGE_SIZE;
- xas_for_each(&xas, page, end) {
- if (!PageWriteback(page)) {
- kdebug("bad %x @%llx page %lx %lx", len, start, page->index, end);
- ASSERT(PageWriteback(page));
+ xas_for_each(&xas, folio, end) {
+ if (!folio_test_writeback(folio)) {
+ kdebug("bad %x @%llx page %lx %lx",
+ len, start, folio_index(folio), end);
+ ASSERT(folio_test_writeback(folio));
}
- trace_afs_page_dirty(vnode, tracepoint_string("clear"), page);
- detach_page_private(page);
- page_endio(page, true, 0);
+ trace_afs_folio_dirty(vnode, tracepoint_string("clear"), folio);
+ folio_detach_private(folio);
+ folio_end_writeback(folio);
}
rcu_read_unlock();
@@ -437,7 +421,7 @@ static void afs_extend_writeback(struct address_space *mapping,
unsigned int *_len)
{
struct pagevec pvec;
- struct page *page;
+ struct folio *folio;
unsigned long priv;
unsigned int psize, filler = 0;
unsigned int f, t;
@@ -456,43 +440,43 @@ static void afs_extend_writeback(struct address_space *mapping,
*/
rcu_read_lock();
- xas_for_each(&xas, page, ULONG_MAX) {
+ xas_for_each(&xas, folio, ULONG_MAX) {
stop = true;
- if (xas_retry(&xas, page))
+ if (xas_retry(&xas, folio))
continue;
- if (xa_is_value(page))
+ if (xa_is_value(folio))
break;
- if (page->index != index)
+ if (folio_index(folio) != index)
break;
- if (!page_cache_get_speculative(page)) {
+ if (!folio_try_get_rcu(folio)) {
xas_reset(&xas);
continue;
}
/* Has the page moved or been split? */
- if (unlikely(page != xas_reload(&xas))) {
- put_page(page);
+ if (unlikely(folio != xas_reload(&xas))) {
+ folio_put(folio);
break;
}
- if (!trylock_page(page)) {
- put_page(page);
+ if (!folio_trylock(folio)) {
+ folio_put(folio);
break;
}
- if (!PageDirty(page) || PageWriteback(page)) {
- unlock_page(page);
- put_page(page);
+ if (!folio_test_dirty(folio) || folio_test_writeback(folio)) {
+ folio_unlock(folio);
+ folio_put(folio);
break;
}
- psize = thp_size(page);
- priv = page_private(page);
- f = afs_page_dirty_from(page, priv);
- t = afs_page_dirty_to(page, priv);
+ psize = folio_size(folio);
+ priv = (unsigned long)folio_get_private(folio);
+ f = afs_folio_dirty_from(folio, priv);
+ t = afs_folio_dirty_to(folio, priv);
if (f != 0 && !new_content) {
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
break;
}
@@ -503,8 +487,8 @@ static void afs_extend_writeback(struct address_space *mapping,
else if (t == psize || new_content)
stop = false;
- index += thp_nr_pages(page);
- if (!pagevec_add(&pvec, page))
+ index += folio_nr_pages(folio);
+ if (!pagevec_add(&pvec, &folio->page))
break;
if (stop)
break;
@@ -521,16 +505,16 @@ static void afs_extend_writeback(struct address_space *mapping,
break;
for (i = 0; i < pagevec_count(&pvec); i++) {
- page = pvec.pages[i];
- trace_afs_page_dirty(vnode, tracepoint_string("store+"), page);
+ folio = page_folio(pvec.pages[i]);
+ trace_afs_folio_dirty(vnode, tracepoint_string("store+"), folio);
- if (!clear_page_dirty_for_io(page))
+ if (!folio_clear_dirty_for_io(folio))
BUG();
- if (test_set_page_writeback(page))
+ if (folio_start_writeback(folio))
BUG();
- *_count -= thp_nr_pages(page);
- unlock_page(page);
+ *_count -= folio_nr_pages(folio);
+ folio_unlock(folio);
}
pagevec_release(&pvec);
@@ -544,10 +528,10 @@ static void afs_extend_writeback(struct address_space *mapping,
* Synchronously write back the locked page and any subsequent non-locked dirty
* pages.
*/
-static ssize_t afs_write_back_from_locked_page(struct address_space *mapping,
- struct writeback_control *wbc,
- struct page *page,
- loff_t start, loff_t end)
+static ssize_t afs_write_back_from_locked_folio(struct address_space *mapping,
+ struct writeback_control *wbc,
+ struct folio *folio,
+ loff_t start, loff_t end)
{
struct afs_vnode *vnode = AFS_FS_I(mapping->host);
struct iov_iter iter;
@@ -558,22 +542,22 @@ static ssize_t afs_write_back_from_locked_page(struct address_space *mapping,
long count = wbc->nr_to_write;
int ret;
- _enter(",%lx,%llx-%llx", page->index, start, end);
+ _enter(",%lx,%llx-%llx", folio_index(folio), start, end);
- if (test_set_page_writeback(page))
+ if (folio_start_writeback(folio))
BUG();
- count -= thp_nr_pages(page);
+ count -= folio_nr_pages(folio);
/* Find all consecutive lockable dirty pages that have contiguous
* written regions, stopping when we find a page that is not
* immediately lockable, is not dirty or is missing, or we reach the
* end of the range.
*/
- priv = page_private(page);
- offset = afs_page_dirty_from(page, priv);
- to = afs_page_dirty_to(page, priv);
- trace_afs_page_dirty(vnode, tracepoint_string("store"), page);
+ priv = (unsigned long)folio_get_private(folio);
+ offset = afs_folio_dirty_from(folio, priv);
+ to = afs_folio_dirty_to(folio, priv);
+ trace_afs_folio_dirty(vnode, tracepoint_string("store"), folio);
len = to - offset;
start += offset;
@@ -586,7 +570,7 @@ static ssize_t afs_write_back_from_locked_page(struct address_space *mapping,
max_len = min_t(unsigned long long, max_len, i_size - start);
if (len < max_len &&
- (to == thp_size(page) || new_content))
+ (to == folio_size(folio) || new_content))
afs_extend_writeback(mapping, vnode, &count,
start, max_len, new_content, &len);
len = min_t(loff_t, len, max_len);
@@ -596,7 +580,7 @@ static ssize_t afs_write_back_from_locked_page(struct address_space *mapping,
* set; the first page is still locked at this point, but all the rest
* have been unlocked.
*/
- unlock_page(page);
+ folio_unlock(folio);
if (start < i_size) {
_debug("write back %x @%llx [%llx]", len, start, i_size);
@@ -657,16 +641,17 @@ static ssize_t afs_write_back_from_locked_page(struct address_space *mapping,
* write a page back to the server
* - the caller locked the page for us
*/
-int afs_writepage(struct page *page, struct writeback_control *wbc)
+int afs_writepage(struct page *subpage, struct writeback_control *wbc)
{
+ struct folio *folio = page_folio(subpage);
ssize_t ret;
loff_t start;
- _enter("{%lx},", page->index);
+ _enter("{%lx},", folio_index(folio));
- start = page->index * PAGE_SIZE;
- ret = afs_write_back_from_locked_page(page->mapping, wbc, page,
- start, LLONG_MAX - start);
+ start = folio_index(folio) * PAGE_SIZE;
+ ret = afs_write_back_from_locked_folio(folio_mapping(folio), wbc,
+ folio, start, LLONG_MAX - start);
if (ret < 0) {
_leave(" = %zd", ret);
return ret;
@@ -683,7 +668,8 @@ static int afs_writepages_region(struct address_space *mapping,
struct writeback_control *wbc,
loff_t start, loff_t end, loff_t *_next)
{
- struct page *page;
+ struct folio *folio;
+ struct page *head_page;
ssize_t ret;
int n;
@@ -693,13 +679,14 @@ static int afs_writepages_region(struct address_space *mapping,
pgoff_t index = start / PAGE_SIZE;
n = find_get_pages_range_tag(mapping, &index, end / PAGE_SIZE,
- PAGECACHE_TAG_DIRTY, 1, &page);
+ PAGECACHE_TAG_DIRTY, 1, &head_page);
if (!n)
break;
- start = (loff_t)page->index * PAGE_SIZE; /* May regress with THPs */
+ folio = page_folio(head_page);
+ start = folio_pos(folio); /* May regress with THPs */
- _debug("wback %lx", page->index);
+ _debug("wback %lx", folio_index(folio));
/* At this point we hold neither the i_pages lock nor the
* page lock: the page may be truncated or invalidated
@@ -707,37 +694,38 @@ static int afs_writepages_region(struct address_space *mapping,
* back from swapper_space to tmpfs file mapping
*/
if (wbc->sync_mode != WB_SYNC_NONE) {
- ret = lock_page_killable(page);
+ ret = folio_lock_killable(folio);
if (ret < 0) {
- put_page(page);
+ folio_put(folio);
return ret;
}
} else {
- if (!trylock_page(page)) {
- put_page(page);
+ if (!folio_trylock(folio)) {
+ folio_put(folio);
return 0;
}
}
- if (page->mapping != mapping || !PageDirty(page)) {
- start += thp_size(page);
- unlock_page(page);
- put_page(page);
+ if (folio_mapping(folio) != mapping ||
+ !folio_test_dirty(folio)) {
+ start += folio_size(folio);
+ folio_unlock(folio);
+ folio_put(folio);
continue;
}
- if (PageWriteback(page)) {
- unlock_page(page);
+ if (folio_test_writeback(folio)) {
+ folio_unlock(folio);
if (wbc->sync_mode != WB_SYNC_NONE)
- wait_on_page_writeback(page);
- put_page(page);
+ folio_wait_writeback(folio);
+ folio_put(folio);
continue;
}
- if (!clear_page_dirty_for_io(page))
+ if (!folio_clear_dirty_for_io(folio))
BUG();
- ret = afs_write_back_from_locked_page(mapping, wbc, page, start, end);
- put_page(page);
+ ret = afs_write_back_from_locked_folio(mapping, wbc, folio, start, end);
+ folio_put(folio);
if (ret < 0) {
_leave(" = %zd", ret);
return ret;
@@ -862,7 +850,6 @@ int afs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
{
struct folio *folio = page_folio(vmf->page);
- struct page *page = &folio->page;
struct file *file = vmf->vma->vm_file;
struct inode *inode = file_inode(file);
struct afs_vnode *vnode = AFS_FS_I(inode);
@@ -870,7 +857,7 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
unsigned long priv;
vm_fault_t ret = VM_FAULT_RETRY;
- _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, page->index);
+ _enter("{{%llx:%llu}},{%lx}", vnode->fid.vid, vnode->fid.vnode, folio_index(folio));
afs_validate(vnode, af->key);
@@ -880,18 +867,18 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
* be modified. We then assume the entire page will need writing back.
*/
#ifdef CONFIG_AFS_FSCACHE
- if (PageFsCache(page) &&
- wait_on_page_fscache_killable(page) < 0)
+ if (folio_test_fscache(folio) &&
+ folio_wait_fscache_killable(folio) < 0)
goto out;
#endif
if (folio_wait_writeback_killable(folio))
goto out;
- if (lock_page_killable(page) < 0)
+ if (folio_lock_killable(folio) < 0)
goto out;
- /* We mustn't change page->private until writeback is complete as that
+ /* We mustn't change folio->private until writeback is complete as that
* details the portion of the page we need to write back and we might
* need to redirty the page if there's a problem.
*/
@@ -900,14 +887,14 @@ vm_fault_t afs_page_mkwrite(struct vm_fault *vmf)
goto out;
}
- priv = afs_page_dirty(page, 0, thp_size(page));
- priv = afs_page_dirty_mmapped(priv);
- if (PagePrivate(page)) {
- set_page_private(page, priv);
- trace_afs_page_dirty(vnode, tracepoint_string("mkwrite+"), page);
+ priv = afs_folio_dirty(folio, 0, folio_size(folio));
+ priv = afs_folio_dirty_mmapped(priv);
+ if (folio_test_private(folio)) {
+ folio_change_private(folio, (void *)priv);
+ trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite+"), folio);
} else {
- attach_page_private(page, (void *)priv);
- trace_afs_page_dirty(vnode, tracepoint_string("mkwrite"), page);
+ folio_attach_private(folio, (void *)priv);
+ trace_afs_folio_dirty(vnode, tracepoint_string("mkwrite"), folio);
}
file_update_time(file);
@@ -948,38 +935,38 @@ void afs_prune_wb_keys(struct afs_vnode *vnode)
/*
* Clean up a page during invalidation.
*/
-int afs_launder_page(struct page *page)
+int afs_launder_page(struct page *subpage)
{
- struct address_space *mapping = page->mapping;
- struct afs_vnode *vnode = AFS_FS_I(mapping->host);
+ struct folio *folio = page_folio(subpage);
+ struct afs_vnode *vnode = AFS_FS_I(folio_inode(folio));
struct iov_iter iter;
struct bio_vec bv[1];
unsigned long priv;
unsigned int f, t;
int ret = 0;
- _enter("{%lx}", page->index);
+ _enter("{%lx}", folio_index(folio));
- priv = page_private(page);
- if (clear_page_dirty_for_io(page)) {
+ priv = (unsigned long)folio_get_private(folio);
+ if (folio_clear_dirty_for_io(folio)) {
f = 0;
- t = thp_size(page);
- if (PagePrivate(page)) {
- f = afs_page_dirty_from(page, priv);
- t = afs_page_dirty_to(page, priv);
+ t = folio_size(folio);
+ if (folio_test_private(folio)) {
+ f = afs_folio_dirty_from(folio, priv);
+ t = afs_folio_dirty_to(folio, priv);
}
- bv[0].bv_page = page;
+ bv[0].bv_page = &folio->page;
bv[0].bv_offset = f;
bv[0].bv_len = t - f;
iov_iter_bvec(&iter, WRITE, bv, 1, bv[0].bv_len);
- trace_afs_page_dirty(vnode, tracepoint_string("launder"), page);
- ret = afs_store_data(vnode, &iter, page_offset(page) + f, true);
+ trace_afs_folio_dirty(vnode, tracepoint_string("launder"), folio);
+ ret = afs_store_data(vnode, &iter, folio_pos(folio) + f, true);
}
- trace_afs_page_dirty(vnode, tracepoint_string("laundered"), page);
- detach_page_private(page);
- wait_on_page_fscache(page);
+ trace_afs_folio_dirty(vnode, tracepoint_string("laundered"), folio);
+ folio_detach_private(folio);
+ folio_wait_fscache(folio);
return ret;
}
diff --git a/fs/attr.c b/fs/attr.c
index 473d21b3a86d..66899b6e9bd8 100644
--- a/fs/attr.c
+++ b/fs/attr.c
@@ -35,7 +35,7 @@ static bool chown_ok(struct user_namespace *mnt_userns,
kuid_t uid)
{
kuid_t kuid = i_uid_into_mnt(mnt_userns, inode);
- if (uid_eq(current_fsuid(), kuid) && uid_eq(uid, kuid))
+ if (uid_eq(current_fsuid(), kuid) && uid_eq(uid, inode->i_uid))
return true;
if (capable_wrt_inode_uidgid(mnt_userns, inode, CAP_CHOWN))
return true;
@@ -62,7 +62,7 @@ static bool chgrp_ok(struct user_namespace *mnt_userns,
{
kgid_t kgid = i_gid_into_mnt(mnt_userns, inode);
if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode)) &&
- (in_group_p(gid) || gid_eq(gid, kgid)))
+ (in_group_p(gid) || gid_eq(gid, inode->i_gid)))
return true;
if (capable_wrt_inode_uidgid(mnt_userns, inode, CAP_CHOWN))
return true;
diff --git a/fs/btrfs/async-thread.c b/fs/btrfs/async-thread.c
index 309516e6a968..43c89952b7d2 100644
--- a/fs/btrfs/async-thread.c
+++ b/fs/btrfs/async-thread.c
@@ -234,6 +234,13 @@ static void run_ordered_work(struct __btrfs_workqueue *wq,
ordered_list);
if (!test_bit(WORK_DONE_BIT, &work->flags))
break;
+ /*
+ * Orders all subsequent loads after reading WORK_DONE_BIT,
+ * paired with the smp_mb__before_atomic in btrfs_work_helper
+ * this guarantees that the ordered function will see all
+ * updates from ordinary work function.
+ */
+ smp_rmb();
/*
* we are going to call the ordered done function, but
@@ -317,6 +324,13 @@ static void btrfs_work_helper(struct work_struct *normal_work)
thresh_exec_hook(wq);
work->func(work);
if (need_order) {
+ /*
+ * Ensures all memory accesses done in the work function are
+ * ordered before setting the WORK_DONE_BIT. Ensuring the thread
+ * which is going to executed the ordered work sees them.
+ * Pairs with the smp_rmb in run_ordered_work.
+ */
+ smp_mb__before_atomic();
set_bit(WORK_DONE_BIT, &work->flags);
run_ordered_work(wq, work);
} else {
diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c
index 59c3be8c1f4c..514ead6e93b6 100644
--- a/fs/btrfs/disk-io.c
+++ b/fs/btrfs/disk-io.c
@@ -3978,11 +3978,23 @@ static void btrfs_end_empty_barrier(struct bio *bio)
*/
static void write_dev_flush(struct btrfs_device *device)
{
- struct request_queue *q = bdev_get_queue(device->bdev);
struct bio *bio = device->flush_bio;
+#ifndef CONFIG_BTRFS_FS_CHECK_INTEGRITY
+ /*
+ * When a disk has write caching disabled, we skip submission of a bio
+ * with flush and sync requests before writing the superblock, since
+ * it's not needed. However when the integrity checker is enabled, this
+ * results in reports that there are metadata blocks referred by a
+ * superblock that were not properly flushed. So don't skip the bio
+ * submission only when the integrity checker is enabled for the sake
+ * of simplicity, since this is a debug tool and not meant for use in
+ * non-debug builds.
+ */
+ struct request_queue *q = bdev_get_queue(device->bdev);
if (!test_bit(QUEUE_FLAG_WC, &q->queue_flags))
return;
+#endif
bio_reset(bio);
bio->bi_end_io = btrfs_end_empty_barrier;
diff --git a/fs/btrfs/ioctl.c b/fs/btrfs/ioctl.c
index fb8cc9642ac4..92138ac2a4e2 100644
--- a/fs/btrfs/ioctl.c
+++ b/fs/btrfs/ioctl.c
@@ -3985,6 +3985,10 @@ static long btrfs_ioctl_balance(struct file *file, void __user *arg)
bool need_unlock; /* for mut. excl. ops lock */
int ret;
+ if (!arg)
+ btrfs_warn(fs_info,
+ "IOC_BALANCE ioctl (v1) is deprecated and will be removed in kernel 5.18");
+
if (!capable(CAP_SYS_ADMIN))
return -EPERM;
diff --git a/fs/btrfs/lzo.c b/fs/btrfs/lzo.c
index 65cb0766e62d..9febb8025825 100644
--- a/fs/btrfs/lzo.c
+++ b/fs/btrfs/lzo.c
@@ -125,6 +125,7 @@ static inline size_t read_compress_length(const char *buf)
static int copy_compressed_data_to_page(char *compressed_data,
size_t compressed_size,
struct page **out_pages,
+ unsigned long max_nr_page,
u32 *cur_out,
const u32 sectorsize)
{
@@ -133,6 +134,9 @@ static int copy_compressed_data_to_page(char *compressed_data,
struct page *cur_page;
char *kaddr;
+ if ((*cur_out / PAGE_SIZE) >= max_nr_page)
+ return -E2BIG;
+
/*
* We never allow a segment header crossing sector boundary, previous
* run should ensure we have enough space left inside the sector.
@@ -161,6 +165,10 @@ static int copy_compressed_data_to_page(char *compressed_data,
orig_out + compressed_size - *cur_out);
kunmap(cur_page);
+
+ if ((*cur_out / PAGE_SIZE) >= max_nr_page)
+ return -E2BIG;
+
cur_page = out_pages[*cur_out / PAGE_SIZE];
/* Allocate a new page */
if (!cur_page) {
@@ -203,6 +211,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
const u32 sectorsize = btrfs_sb(mapping->host->i_sb)->sectorsize;
struct page *page_in = NULL;
char *sizes_ptr;
+ const unsigned long max_nr_page = *out_pages;
int ret = 0;
/* Points to the file offset of input data */
u64 cur_in = start;
@@ -210,6 +219,7 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
u32 cur_out = 0;
u32 len = *total_out;
+ ASSERT(max_nr_page > 0);
*out_pages = 0;
*total_out = 0;
*total_in = 0;
@@ -248,7 +258,8 @@ int lzo_compress_pages(struct list_head *ws, struct address_space *mapping,
}
ret = copy_compressed_data_to_page(workspace->cbuf, out_len,
- pages, &cur_out, sectorsize);
+ pages, max_nr_page,
+ &cur_out, sectorsize);
if (ret < 0)
goto out;
diff --git a/fs/btrfs/scrub.c b/fs/btrfs/scrub.c
index cf82ea6f54fb..8f6ceea33969 100644
--- a/fs/btrfs/scrub.c
+++ b/fs/btrfs/scrub.c
@@ -73,8 +73,8 @@ struct scrub_page {
u64 physical_for_dev_replace;
atomic_t refs;
u8 mirror_num;
- int have_csum:1;
- int io_error:1;
+ unsigned int have_csum:1;
+ unsigned int io_error:1;
u8 csum[BTRFS_CSUM_SIZE];
struct scrub_recover *recover;
diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c
index 61ac57bcbf1a..0997e3cd74e9 100644
--- a/fs/btrfs/volumes.c
+++ b/fs/btrfs/volumes.c
@@ -7559,6 +7559,19 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
fs_info->fs_devices->total_rw_bytes = 0;
/*
+ * Lockdep complains about possible circular locking dependency between
+ * a disk's open_mutex (struct gendisk.open_mutex), the rw semaphores
+ * used for freeze procection of a fs (struct super_block.s_writers),
+ * which we take when starting a transaction, and extent buffers of the
+ * chunk tree if we call read_one_dev() while holding a lock on an
+ * extent buffer of the chunk tree. Since we are mounting the filesystem
+ * and at this point there can't be any concurrent task modifying the
+ * chunk tree, to keep it simple, just skip locking on the chunk tree.
+ */
+ ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
+ path->skip_locking = 1;
+
+ /*
* Read all device items, and then all the chunk items. All
* device items are found before any chunk item (their object id
* is smaller than the lowest possible object id for a chunk
@@ -7583,10 +7596,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
goto error;
break;
}
- /*
- * The nodes on level 1 are not locked but we don't need to do
- * that during mount time as nothing else can access the tree
- */
node = path->nodes[1];
if (node) {
if (last_ra_node != node->start) {
@@ -7614,7 +7623,6 @@ int btrfs_read_chunk_tree(struct btrfs_fs_info *fs_info)
* requirement for chunk allocation, see the comment on
* top of btrfs_chunk_alloc() for details.
*/
- ASSERT(!test_bit(BTRFS_FS_OPEN, &fs_info->flags));
chunk = btrfs_item_ptr(leaf, slot, struct btrfs_chunk);
ret = read_one_chunk(&found_key, leaf, chunk);
if (ret)
diff --git a/fs/btrfs/zstd.c b/fs/btrfs/zstd.c
index f06b68040352..fc42dd0badd7 100644
--- a/fs/btrfs/zstd.c
+++ b/fs/btrfs/zstd.c
@@ -28,10 +28,10 @@
/* 307s to avoid pathologically clashing with transaction commit */
#define ZSTD_BTRFS_RECLAIM_JIFFIES (307 * HZ)
-static ZSTD_parameters zstd_get_btrfs_parameters(unsigned int level,
+static zstd_parameters zstd_get_btrfs_parameters(unsigned int level,
size_t src_len)
{
- ZSTD_parameters params = ZSTD_getParams(level, src_len, 0);
+ zstd_parameters params = zstd_get_params(level, src_len);
if (params.cParams.windowLog > ZSTD_BTRFS_MAX_WINDOWLOG)
params.cParams.windowLog = ZSTD_BTRFS_MAX_WINDOWLOG;
@@ -48,8 +48,8 @@ struct workspace {
unsigned long last_used; /* jiffies */
struct list_head list;
struct list_head lru_list;
- ZSTD_inBuffer in_buf;
- ZSTD_outBuffer out_buf;
+ zstd_in_buffer in_buf;
+ zstd_out_buffer out_buf;
};
/*
@@ -155,12 +155,12 @@ static void zstd_calc_ws_mem_sizes(void)
unsigned int level;
for (level = 1; level <= ZSTD_BTRFS_MAX_LEVEL; level++) {
- ZSTD_parameters params =
+ zstd_parameters params =
zstd_get_btrfs_parameters(level, ZSTD_BTRFS_MAX_INPUT);
size_t level_size =
max_t(size_t,
- ZSTD_CStreamWorkspaceBound(params.cParams),
- ZSTD_DStreamWorkspaceBound(ZSTD_BTRFS_MAX_INPUT));
+ zstd_cstream_workspace_bound(&params.cParams),
+ zstd_dstream_workspace_bound(ZSTD_BTRFS_MAX_INPUT));
max_size = max_t(size_t, max_size, level_size);
zstd_ws_mem_sizes[level - 1] = max_size;
@@ -371,7 +371,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
unsigned long *total_in, unsigned long *total_out)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
- ZSTD_CStream *stream;
+ zstd_cstream *stream;
int ret = 0;
int nr_pages = 0;
struct page *in_page = NULL; /* The current page to read */
@@ -381,7 +381,7 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
unsigned long len = *total_out;
const unsigned long nr_dest_pages = *out_pages;
unsigned long max_out = nr_dest_pages * PAGE_SIZE;
- ZSTD_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
+ zstd_parameters params = zstd_get_btrfs_parameters(workspace->req_level,
len);
*out_pages = 0;
@@ -389,10 +389,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
*total_in = 0;
/* Initialize the stream */
- stream = ZSTD_initCStream(params, len, workspace->mem,
+ stream = zstd_init_cstream(&params, len, workspace->mem,
workspace->size);
if (!stream) {
- pr_warn("BTRFS: ZSTD_initCStream failed\n");
+ pr_warn("BTRFS: zstd_init_cstream failed\n");
ret = -EIO;
goto out;
}
@@ -418,11 +418,11 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
while (1) {
size_t ret2;
- ret2 = ZSTD_compressStream(stream, &workspace->out_buf,
+ ret2 = zstd_compress_stream(stream, &workspace->out_buf,
&workspace->in_buf);
- if (ZSTD_isError(ret2)) {
- pr_debug("BTRFS: ZSTD_compressStream returned %d\n",
- ZSTD_getErrorCode(ret2));
+ if (zstd_is_error(ret2)) {
+ pr_debug("BTRFS: zstd_compress_stream returned %d\n",
+ zstd_get_error_code(ret2));
ret = -EIO;
goto out;
}
@@ -487,10 +487,10 @@ int zstd_compress_pages(struct list_head *ws, struct address_space *mapping,
while (1) {
size_t ret2;
- ret2 = ZSTD_endStream(stream, &workspace->out_buf);
- if (ZSTD_isError(ret2)) {
- pr_debug("BTRFS: ZSTD_endStream returned %d\n",
- ZSTD_getErrorCode(ret2));
+ ret2 = zstd_end_stream(stream, &workspace->out_buf);
+ if (zstd_is_error(ret2)) {
+ pr_debug("BTRFS: zstd_end_stream returned %d\n",
+ zstd_get_error_code(ret2));
ret = -EIO;
goto out;
}
@@ -548,17 +548,17 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
struct workspace *workspace = list_entry(ws, struct workspace, list);
struct page **pages_in = cb->compressed_pages;
size_t srclen = cb->compressed_len;
- ZSTD_DStream *stream;
+ zstd_dstream *stream;
int ret = 0;
unsigned long page_in_index = 0;
unsigned long total_pages_in = DIV_ROUND_UP(srclen, PAGE_SIZE);
unsigned long buf_start;
unsigned long total_out = 0;
- stream = ZSTD_initDStream(
+ stream = zstd_init_dstream(
ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
if (!stream) {
- pr_debug("BTRFS: ZSTD_initDStream failed\n");
+ pr_debug("BTRFS: zstd_init_dstream failed\n");
ret = -EIO;
goto done;
}
@@ -574,11 +574,11 @@ int zstd_decompress_bio(struct list_head *ws, struct compressed_bio *cb)
while (1) {
size_t ret2;
- ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
+ ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
&workspace->in_buf);
- if (ZSTD_isError(ret2)) {
- pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
- ZSTD_getErrorCode(ret2));
+ if (zstd_is_error(ret2)) {
+ pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
+ zstd_get_error_code(ret2));
ret = -EIO;
goto done;
}
@@ -624,16 +624,16 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in,
size_t destlen)
{
struct workspace *workspace = list_entry(ws, struct workspace, list);
- ZSTD_DStream *stream;
+ zstd_dstream *stream;
int ret = 0;
size_t ret2;
unsigned long total_out = 0;
unsigned long pg_offset = 0;
- stream = ZSTD_initDStream(
+ stream = zstd_init_dstream(
ZSTD_BTRFS_MAX_INPUT, workspace->mem, workspace->size);
if (!stream) {
- pr_warn("BTRFS: ZSTD_initDStream failed\n");
+ pr_warn("BTRFS: zstd_init_dstream failed\n");
ret = -EIO;
goto finish;
}
@@ -657,15 +657,15 @@ int zstd_decompress(struct list_head *ws, unsigned char *data_in,
/* Check if the frame is over and we still need more input */
if (ret2 == 0) {
- pr_debug("BTRFS: ZSTD_decompressStream ended early\n");
+ pr_debug("BTRFS: zstd_decompress_stream ended early\n");
ret = -EIO;
goto finish;
}
- ret2 = ZSTD_decompressStream(stream, &workspace->out_buf,
+ ret2 = zstd_decompress_stream(stream, &workspace->out_buf,
&workspace->in_buf);
- if (ZSTD_isError(ret2)) {
- pr_debug("BTRFS: ZSTD_decompressStream returned %d\n",
- ZSTD_getErrorCode(ret2));
+ if (zstd_is_error(ret2)) {
+ pr_debug("BTRFS: zstd_decompress_stream returned %d\n",
+ zstd_get_error_code(ret2));
ret = -EIO;
goto finish;
}
diff --git a/fs/ceph/addr.c b/fs/ceph/addr.c
index 99b80b5c7a93..e53c8541f5b2 100644
--- a/fs/ceph/addr.c
+++ b/fs/ceph/addr.c
@@ -63,7 +63,7 @@
(CONGESTION_ON_THRESH(congestion_kb) >> 2))
static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
- struct page *page, void **_fsdata);
+ struct folio *folio, void **_fsdata);
static inline struct ceph_snap_context *page_snap_context(struct page *page)
{
@@ -317,13 +317,14 @@ static const struct netfs_read_request_ops ceph_netfs_read_ops = {
};
/* read a single page, without unlocking it. */
-static int ceph_readpage(struct file *file, struct page *page)
+static int ceph_readpage(struct file *file, struct page *subpage)
{
+ struct folio *folio = page_folio(subpage);
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_vino vino = ceph_vino(inode);
- u64 off = page_offset(page);
- u64 len = thp_size(page);
+ size_t len = folio_size(folio);
+ u64 off = folio_file_pos(folio);
if (ci->i_inline_version != CEPH_INLINE_NONE) {
/*
@@ -331,19 +332,19 @@ static int ceph_readpage(struct file *file, struct page *page)
* into page cache while getting Fcr caps.
*/
if (off == 0) {
- unlock_page(page);
+ folio_unlock(folio);
return -EINVAL;
}
- zero_user_segment(page, 0, thp_size(page));
- SetPageUptodate(page);
- unlock_page(page);
+ zero_user_segment(&folio->page, 0, folio_size(folio));
+ folio_mark_uptodate(folio);
+ folio_unlock(folio);
return 0;
}
- dout("readpage ino %llx.%llx file %p off %llu len %llu page %p index %lu\n",
- vino.ino, vino.snap, file, off, len, page, page->index);
+ dout("readpage ino %llx.%llx file %p off %llu len %zu folio %p index %lu\n",
+ vino.ino, vino.snap, file, off, len, folio, folio_index(folio));
- return netfs_readpage(file, page, &ceph_netfs_read_ops, NULL);
+ return netfs_readpage(file, folio, &ceph_netfs_read_ops, NULL);
}
static void ceph_readahead(struct readahead_control *ractl)
@@ -724,7 +725,7 @@ static int ceph_writepages_start(struct address_space *mapping,
wbc->sync_mode == WB_SYNC_NONE ? "NONE" :
(wbc->sync_mode == WB_SYNC_ALL ? "ALL" : "HOLD"));
- if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
+ if (ceph_inode_is_shutdown(inode)) {
if (ci->i_wrbuffer_ref > 0) {
pr_warn_ratelimited(
"writepage_start %p %lld forced umount\n",
@@ -1145,12 +1146,12 @@ static struct ceph_snap_context *
ceph_find_incompatible(struct page *page)
{
struct inode *inode = page->mapping->host;
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
struct ceph_inode_info *ci = ceph_inode(inode);
- if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
- dout(" page %p forced umount\n", page);
- return ERR_PTR(-EIO);
+ if (ceph_inode_is_shutdown(inode)) {
+ dout(" page %p %llx:%llx is shutdown\n", page,
+ ceph_vinop(inode));
+ return ERR_PTR(-ESTALE);
}
for (;;) {
@@ -1187,18 +1188,18 @@ ceph_find_incompatible(struct page *page)
}
static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned int len,
- struct page *page, void **_fsdata)
+ struct folio *folio, void **_fsdata)
{
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_snap_context *snapc;
- snapc = ceph_find_incompatible(page);
+ snapc = ceph_find_incompatible(folio_page(folio, 0));
if (snapc) {
int r;
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
if (IS_ERR(snapc))
return PTR_ERR(snapc);
@@ -1216,12 +1217,12 @@ static int ceph_netfs_check_write_begin(struct file *file, loff_t pos, unsigned
* clean, or already dirty within the same snap context.
*/
static int ceph_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned len, unsigned flags,
+ loff_t pos, unsigned len, unsigned aop_flags,
struct page **pagep, void **fsdata)
{
struct inode *inode = file_inode(file);
struct ceph_inode_info *ci = ceph_inode(inode);
- struct page *page = NULL;
+ struct folio *folio = NULL;
pgoff_t index = pos >> PAGE_SHIFT;
int r;
@@ -1230,39 +1231,43 @@ static int ceph_write_begin(struct file *file, struct address_space *mapping,
* for inline_version sent to the MDS.
*/
if (ci->i_inline_version != CEPH_INLINE_NONE) {
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
+ unsigned int fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
+ if (aop_flags & AOP_FLAG_NOFS)
+ fgp_flags |= FGP_NOFS;
+ folio = __filemap_get_folio(mapping, index, fgp_flags,
+ mapping_gfp_mask(mapping));
+ if (!folio)
return -ENOMEM;
/*
* The inline_version on a new inode is set to 1. If that's the
- * case, then the page is brand new and isn't yet Uptodate.
+ * case, then the folio is brand new and isn't yet Uptodate.
*/
r = 0;
if (index == 0 && ci->i_inline_version != 1) {
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
WARN_ONCE(1, "ceph: write_begin called on still-inlined inode (inline_version %llu)!\n",
ci->i_inline_version);
r = -EINVAL;
}
goto out;
}
- zero_user_segment(page, 0, thp_size(page));
- SetPageUptodate(page);
+ zero_user_segment(&folio->page, 0, folio_size(folio));
+ folio_mark_uptodate(folio);
goto out;
}
- r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &page, NULL,
+ r = netfs_write_begin(file, inode->i_mapping, pos, len, 0, &folio, NULL,
&ceph_netfs_read_ops, NULL);
out:
if (r == 0)
- wait_on_page_fscache(page);
+ folio_wait_fscache(folio);
if (r < 0) {
- if (page)
- put_page(page);
+ if (folio)
+ folio_put(folio);
} else {
- WARN_ON_ONCE(!PageLocked(page));
- *pagep = page;
+ WARN_ON_ONCE(!folio_test_locked(folio));
+ *pagep = &folio->page;
}
return r;
}
@@ -1273,32 +1278,33 @@ out:
*/
static int ceph_write_end(struct file *file, struct address_space *mapping,
loff_t pos, unsigned len, unsigned copied,
- struct page *page, void *fsdata)
+ struct page *subpage, void *fsdata)
{
+ struct folio *folio = page_folio(subpage);
struct inode *inode = file_inode(file);
bool check_cap = false;
- dout("write_end file %p inode %p page %p %d~%d (%d)\n", file,
- inode, page, (int)pos, (int)copied, (int)len);
+ dout("write_end file %p inode %p folio %p %d~%d (%d)\n", file,
+ inode, folio, (int)pos, (int)copied, (int)len);
- if (!PageUptodate(page)) {
+ if (!folio_test_uptodate(folio)) {
/* just return that nothing was copied on a short copy */
if (copied < len) {
copied = 0;
goto out;
}
- SetPageUptodate(page);
+ folio_mark_uptodate(folio);
}
/* did file size increase? */
if (pos+copied > i_size_read(inode))
check_cap = ceph_inode_set_size(inode, pos+copied);
- set_page_dirty(page);
+ folio_mark_dirty(folio);
out:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
if (check_cap)
ceph_check_caps(ceph_inode(inode), CHECK_CAPS_AUTHONLY, NULL);
@@ -1306,17 +1312,6 @@ out:
return copied;
}
-/*
- * we set .direct_IO to indicate direct io is supported, but since we
- * intercept O_DIRECT reads and writes early, this function should
- * never get called.
- */
-static ssize_t ceph_direct_io(struct kiocb *iocb, struct iov_iter *iter)
-{
- WARN_ON(1);
- return -EINVAL;
-}
-
const struct address_space_operations ceph_aops = {
.readpage = ceph_readpage,
.readahead = ceph_readahead,
@@ -1327,7 +1322,7 @@ const struct address_space_operations ceph_aops = {
.set_page_dirty = ceph_set_page_dirty,
.invalidatepage = ceph_invalidatepage,
.releasepage = ceph_releasepage,
- .direct_IO = ceph_direct_io,
+ .direct_IO = noop_direct_IO,
};
static void ceph_block_sigs(sigset_t *oldset)
@@ -1356,6 +1351,9 @@ static vm_fault_t ceph_filemap_fault(struct vm_fault *vmf)
sigset_t oldset;
vm_fault_t ret = VM_FAULT_SIGBUS;
+ if (ceph_inode_is_shutdown(inode))
+ return ret;
+
ceph_block_sigs(&oldset);
dout("filemap_fault %p %llx.%llx %llu trying to get caps\n",
@@ -1447,6 +1445,9 @@ static vm_fault_t ceph_page_mkwrite(struct vm_fault *vmf)
sigset_t oldset;
vm_fault_t ret = VM_FAULT_SIGBUS;
+ if (ceph_inode_is_shutdown(inode))
+ return ret;
+
prealloc_cf = ceph_alloc_cap_flush();
if (!prealloc_cf)
return VM_FAULT_OOM;
diff --git a/fs/ceph/cache.c b/fs/ceph/cache.c
index 9cfadbb86568..457afda5498a 100644
--- a/fs/ceph/cache.c
+++ b/fs/ceph/cache.c
@@ -12,12 +12,6 @@
#include "super.h"
#include "cache.h"
-struct ceph_aux_inode {
- u64 version;
- u64 mtime_sec;
- u64 mtime_nsec;
-};
-
struct fscache_netfs ceph_cache_netfs = {
.name = "ceph",
.version = 0,
@@ -109,20 +103,14 @@ static enum fscache_checkaux ceph_fscache_inode_check_aux(
void *cookie_netfs_data, const void *data, uint16_t dlen,
loff_t object_size)
{
- struct ceph_aux_inode aux;
struct ceph_inode_info* ci = cookie_netfs_data;
struct inode* inode = &ci->vfs_inode;
- if (dlen != sizeof(aux) ||
+ if (dlen != sizeof(ci->i_version) ||
i_size_read(inode) != object_size)
return FSCACHE_CHECKAUX_OBSOLETE;
- memset(&aux, 0, sizeof(aux));
- aux.version = ci->i_version;
- aux.mtime_sec = inode->i_mtime.tv_sec;
- aux.mtime_nsec = inode->i_mtime.tv_nsec;
-
- if (memcmp(data, &aux, sizeof(aux)) != 0)
+ if (*(u64 *)data != ci->i_version)
return FSCACHE_CHECKAUX_OBSOLETE;
dout("ceph inode 0x%p cached okay\n", ci);
@@ -139,7 +127,6 @@ void ceph_fscache_register_inode_cookie(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
- struct ceph_aux_inode aux;
/* No caching for filesystem */
if (!fsc->fscache)
@@ -151,14 +138,10 @@ void ceph_fscache_register_inode_cookie(struct inode *inode)
inode_lock_nested(inode, I_MUTEX_CHILD);
if (!ci->fscache) {
- memset(&aux, 0, sizeof(aux));
- aux.version = ci->i_version;
- aux.mtime_sec = inode->i_mtime.tv_sec;
- aux.mtime_nsec = inode->i_mtime.tv_nsec;
ci->fscache = fscache_acquire_cookie(fsc->fscache,
&ceph_fscache_inode_object_def,
&ci->i_vino, sizeof(ci->i_vino),
- &aux, sizeof(aux),
+ &ci->i_version, sizeof(ci->i_version),
ci, i_size_read(inode), false);
}
inode_unlock(inode);
diff --git a/fs/ceph/caps.c b/fs/ceph/caps.c
index 8f537f1d9d1d..b9460b6fb76f 100644
--- a/fs/ceph/caps.c
+++ b/fs/ceph/caps.c
@@ -1188,11 +1188,11 @@ void ceph_remove_cap(struct ceph_cap *cap, bool queue_release)
lockdep_assert_held(&ci->i_ceph_lock);
- fsc = ceph_sb_to_client(ci->vfs_inode.i_sb);
+ fsc = ceph_inode_to_client(&ci->vfs_inode);
WARN_ON_ONCE(ci->i_auth_cap == cap &&
!list_empty(&ci->i_dirty_item) &&
!fsc->blocklisted &&
- READ_ONCE(fsc->mount_state) != CEPH_MOUNT_SHUTDOWN);
+ !ceph_inode_is_shutdown(&ci->vfs_inode));
__ceph_remove_cap(cap, queue_release);
}
@@ -1968,8 +1968,8 @@ retry:
}
}
- dout("check_caps %p file_want %s used %s dirty %s flushing %s"
- " issued %s revoking %s retain %s %s%s\n", inode,
+ dout("check_caps %llx.%llx file_want %s used %s dirty %s flushing %s"
+ " issued %s revoking %s retain %s %s%s\n", ceph_vinop(inode),
ceph_cap_string(file_wanted),
ceph_cap_string(used), ceph_cap_string(ci->i_dirty_caps),
ceph_cap_string(ci->i_flushing_caps),
@@ -1990,7 +1990,8 @@ retry:
(revoking & (CEPH_CAP_FILE_CACHE|
CEPH_CAP_FILE_LAZYIO)) && /* or revoking cache */
!tried_invalidate) {
- dout("check_caps trying to invalidate on %p\n", inode);
+ dout("check_caps trying to invalidate on %llx.%llx\n",
+ ceph_vinop(inode));
if (try_nonblocking_invalidate(inode) < 0) {
dout("check_caps queuing invalidate\n");
queue_invalidate = true;
@@ -2629,9 +2630,9 @@ void ceph_take_cap_refs(struct ceph_inode_info *ci, int got,
*
* Returns 0 if caps were not able to be acquired (yet), 1 if succeed,
* or a negative error code. There are 3 speical error codes:
- * -EAGAIN: need to sleep but non-blocking is specified
- * -EFBIG: ask caller to call check_max_size() and try again.
- * -ESTALE: ask caller to call ceph_renew_caps() and try again.
+ * -EAGAIN: need to sleep but non-blocking is specified
+ * -EFBIG: ask caller to call check_max_size() and try again.
+ * -EUCLEAN: ask caller to call ceph_renew_caps() and try again.
*/
enum {
/* first 8 bits are reserved for CEPH_FILE_MODE_FOO */
@@ -2679,7 +2680,7 @@ again:
dout("get_cap_refs %p endoff %llu > maxsize %llu\n",
inode, endoff, ci->i_max_size);
if (endoff > ci->i_requested_max_size)
- ret = ci->i_auth_cap ? -EFBIG : -ESTALE;
+ ret = ci->i_auth_cap ? -EFBIG : -EUCLEAN;
goto out_unlock;
}
/*
@@ -2749,9 +2750,9 @@ again:
goto out_unlock;
}
- if (READ_ONCE(mdsc->fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
- dout("get_cap_refs %p forced umount\n", inode);
- ret = -EIO;
+ if (ceph_inode_is_shutdown(inode)) {
+ dout("get_cap_refs %p inode is shutdown\n", inode);
+ ret = -ESTALE;
goto out_unlock;
}
mds_wanted = __ceph_caps_mds_wanted(ci, false);
@@ -2759,7 +2760,7 @@ again:
dout("get_cap_refs %p need %s > mds_wanted %s\n",
inode, ceph_cap_string(need),
ceph_cap_string(mds_wanted));
- ret = -ESTALE;
+ ret = -EUCLEAN;
goto out_unlock;
}
@@ -2843,7 +2844,7 @@ int ceph_try_get_caps(struct inode *inode, int need, int want,
ret = try_get_cap_refs(inode, need, want, 0, flags, got);
/* three special error codes */
- if (ret == -EAGAIN || ret == -EFBIG || ret == -ESTALE)
+ if (ret == -EAGAIN || ret == -EFBIG || ret == -EUCLEAN)
ret = 0;
return ret;
}
@@ -2926,7 +2927,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
}
if (ret < 0) {
- if (ret == -EFBIG || ret == -ESTALE) {
+ if (ret == -EFBIG || ret == -EUCLEAN) {
int ret2 = ceph_wait_on_async_create(inode);
if (ret2 < 0)
return ret2;
@@ -2935,7 +2936,7 @@ int ceph_get_caps(struct file *filp, int need, int want, loff_t endoff, int *got
check_max_size(inode, endoff);
continue;
}
- if (ret == -ESTALE) {
+ if (ret == -EUCLEAN) {
/* session was killed, try renew caps */
ret = ceph_renew_caps(inode, flags);
if (ret == 0)
@@ -4315,7 +4316,7 @@ static void flush_dirty_session_caps(struct ceph_mds_session *s)
i_dirty_item);
inode = &ci->vfs_inode;
ihold(inode);
- dout("flush_dirty_caps %p\n", inode);
+ dout("flush_dirty_caps %llx.%llx\n", ceph_vinop(inode));
spin_unlock(&mdsc->cap_dirty_lock);
ceph_check_caps(ci, CHECK_CAPS_FLUSH, NULL);
iput(inode);
@@ -4560,3 +4561,119 @@ int ceph_encode_dentry_release(void **p, struct dentry *dentry,
spin_unlock(&dentry->d_lock);
return ret;
}
+
+static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct ceph_cap_snap *capsnap;
+ int capsnap_release = 0;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+
+ dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode);
+
+ while (!list_empty(&ci->i_cap_snaps)) {
+ capsnap = list_first_entry(&ci->i_cap_snaps,
+ struct ceph_cap_snap, ci_item);
+ __ceph_remove_capsnap(inode, capsnap, NULL, NULL);
+ ceph_put_snap_context(capsnap->context);
+ ceph_put_cap_snap(capsnap);
+ capsnap_release++;
+ }
+ wake_up_all(&ci->i_cap_wq);
+ wake_up_all(&mdsc->cap_flushing_wq);
+ return capsnap_release;
+}
+
+int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invalidate)
+{
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ struct ceph_mds_client *mdsc = fsc->mdsc;
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ bool is_auth;
+ bool dirty_dropped = false;
+ int iputs = 0;
+
+ lockdep_assert_held(&ci->i_ceph_lock);
+
+ dout("removing cap %p, ci is %p, inode is %p\n",
+ cap, ci, &ci->vfs_inode);
+
+ is_auth = (cap == ci->i_auth_cap);
+ __ceph_remove_cap(cap, false);
+ if (is_auth) {
+ struct ceph_cap_flush *cf;
+
+ if (ceph_inode_is_shutdown(inode)) {
+ if (inode->i_data.nrpages > 0)
+ *invalidate = true;
+ if (ci->i_wrbuffer_ref > 0)
+ mapping_set_error(&inode->i_data, -EIO);
+ }
+
+ spin_lock(&mdsc->cap_dirty_lock);
+
+ /* trash all of the cap flushes for this inode */
+ while (!list_empty(&ci->i_cap_flush_list)) {
+ cf = list_first_entry(&ci->i_cap_flush_list,
+ struct ceph_cap_flush, i_list);
+ list_del_init(&cf->g_list);
+ list_del_init(&cf->i_list);
+ if (!cf->is_capsnap)
+ ceph_free_cap_flush(cf);
+ }
+
+ if (!list_empty(&ci->i_dirty_item)) {
+ pr_warn_ratelimited(
+ " dropping dirty %s state for %p %lld\n",
+ ceph_cap_string(ci->i_dirty_caps),
+ inode, ceph_ino(inode));
+ ci->i_dirty_caps = 0;
+ list_del_init(&ci->i_dirty_item);
+ dirty_dropped = true;
+ }
+ if (!list_empty(&ci->i_flushing_item)) {
+ pr_warn_ratelimited(
+ " dropping dirty+flushing %s state for %p %lld\n",
+ ceph_cap_string(ci->i_flushing_caps),
+ inode, ceph_ino(inode));
+ ci->i_flushing_caps = 0;
+ list_del_init(&ci->i_flushing_item);
+ mdsc->num_cap_flushing--;
+ dirty_dropped = true;
+ }
+ spin_unlock(&mdsc->cap_dirty_lock);
+
+ if (dirty_dropped) {
+ mapping_set_error(inode->i_mapping, -EIO);
+
+ if (ci->i_wrbuffer_ref_head == 0 &&
+ ci->i_wr_ref == 0 &&
+ ci->i_dirty_caps == 0 &&
+ ci->i_flushing_caps == 0) {
+ ceph_put_snap_context(ci->i_head_snapc);
+ ci->i_head_snapc = NULL;
+ }
+ }
+
+ if (atomic_read(&ci->i_filelock_ref) > 0) {
+ /* make further file lock syscall return -EIO */
+ ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
+ pr_warn_ratelimited(" dropping file locks for %p %lld\n",
+ inode, ceph_ino(inode));
+ }
+
+ if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
+ cf = ci->i_prealloc_cap_flush;
+ ci->i_prealloc_cap_flush = NULL;
+ if (!cf->is_capsnap)
+ ceph_free_cap_flush(cf);
+ }
+
+ if (!list_empty(&ci->i_cap_snaps))
+ iputs = remove_capsnaps(mdsc, inode);
+ }
+ if (dirty_dropped)
+ ++iputs;
+ return iputs;
+}
diff --git a/fs/ceph/debugfs.c b/fs/ceph/debugfs.c
index 38b78b45811f..3cf7c9c1085b 100644
--- a/fs/ceph/debugfs.c
+++ b/fs/ceph/debugfs.c
@@ -146,82 +146,93 @@ static int mdsc_show(struct seq_file *s, void *p)
name, total, avg, _min, max, sum); \
}
-static int metric_show(struct seq_file *s, void *p)
+static int metrics_file_show(struct seq_file *s, void *p)
{
struct ceph_fs_client *fsc = s->private;
- struct ceph_mds_client *mdsc = fsc->mdsc;
- struct ceph_client_metric *m = &mdsc->metric;
- int nr_caps = 0;
- s64 total, sum, avg, min, max, sq;
- u64 sum_sz, avg_sz, min_sz, max_sz;
+ struct ceph_client_metric *m = &fsc->mdsc->metric;
- sum = percpu_counter_sum(&m->total_inodes);
seq_printf(s, "item total\n");
seq_printf(s, "------------------------------------------\n");
- seq_printf(s, "%-35s%lld / %lld\n", "opened files / total inodes",
- atomic64_read(&m->opened_files), sum);
- seq_printf(s, "%-35s%lld / %lld\n", "pinned i_caps / total inodes",
- atomic64_read(&m->total_caps), sum);
- seq_printf(s, "%-35s%lld / %lld\n", "opened inodes / total inodes",
- percpu_counter_sum(&m->opened_inodes), sum);
-
- seq_printf(s, "\n");
+ seq_printf(s, "%-35s%lld\n", "total inodes",
+ percpu_counter_sum(&m->total_inodes));
+ seq_printf(s, "%-35s%lld\n", "opened files",
+ atomic64_read(&m->opened_files));
+ seq_printf(s, "%-35s%lld\n", "pinned i_caps",
+ atomic64_read(&m->total_caps));
+ seq_printf(s, "%-35s%lld\n", "opened inodes",
+ percpu_counter_sum(&m->opened_inodes));
+ return 0;
+}
+
+static const char * const metric_str[] = {
+ "read",
+ "write",
+ "metadata",
+ "copyfrom"
+};
+static int metrics_latency_show(struct seq_file *s, void *p)
+{
+ struct ceph_fs_client *fsc = s->private;
+ struct ceph_client_metric *cm = &fsc->mdsc->metric;
+ struct ceph_metric *m;
+ s64 total, sum, avg, min, max, sq;
+ int i;
+
seq_printf(s, "item total avg_lat(us) min_lat(us) max_lat(us) stdev(us)\n");
seq_printf(s, "-----------------------------------------------------------------------------------\n");
- spin_lock(&m->read_metric_lock);
- total = m->total_reads;
- sum = m->read_latency_sum;
- avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
- min = m->read_latency_min;
- max = m->read_latency_max;
- sq = m->read_latency_sq_sum;
- spin_unlock(&m->read_metric_lock);
- CEPH_LAT_METRIC_SHOW("read", total, avg, min, max, sq);
-
- spin_lock(&m->write_metric_lock);
- total = m->total_writes;
- sum = m->write_latency_sum;
- avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
- min = m->write_latency_min;
- max = m->write_latency_max;
- sq = m->write_latency_sq_sum;
- spin_unlock(&m->write_metric_lock);
- CEPH_LAT_METRIC_SHOW("write", total, avg, min, max, sq);
-
- spin_lock(&m->metadata_metric_lock);
- total = m->total_metadatas;
- sum = m->metadata_latency_sum;
- avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
- min = m->metadata_latency_min;
- max = m->metadata_latency_max;
- sq = m->metadata_latency_sq_sum;
- spin_unlock(&m->metadata_metric_lock);
- CEPH_LAT_METRIC_SHOW("metadata", total, avg, min, max, sq);
-
- seq_printf(s, "\n");
+ for (i = 0; i < METRIC_MAX; i++) {
+ m = &cm->metric[i];
+ spin_lock(&m->lock);
+ total = m->total;
+ sum = m->latency_sum;
+ avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
+ min = m->latency_min;
+ max = m->latency_max;
+ sq = m->latency_sq_sum;
+ spin_unlock(&m->lock);
+ CEPH_LAT_METRIC_SHOW(metric_str[i], total, avg, min, max, sq);
+ }
+
+ return 0;
+}
+
+static int metrics_size_show(struct seq_file *s, void *p)
+{
+ struct ceph_fs_client *fsc = s->private;
+ struct ceph_client_metric *cm = &fsc->mdsc->metric;
+ struct ceph_metric *m;
+ s64 total;
+ u64 sum, avg, min, max;
+ int i;
+
seq_printf(s, "item total avg_sz(bytes) min_sz(bytes) max_sz(bytes) total_sz(bytes)\n");
seq_printf(s, "----------------------------------------------------------------------------------------\n");
- spin_lock(&m->read_metric_lock);
- total = m->total_reads;
- sum_sz = m->read_size_sum;
- avg_sz = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum_sz, total) : 0;
- min_sz = m->read_size_min;
- max_sz = m->read_size_max;
- spin_unlock(&m->read_metric_lock);
- CEPH_SZ_METRIC_SHOW("read", total, avg_sz, min_sz, max_sz, sum_sz);
-
- spin_lock(&m->write_metric_lock);
- total = m->total_writes;
- sum_sz = m->write_size_sum;
- avg_sz = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum_sz, total) : 0;
- min_sz = m->write_size_min;
- max_sz = m->write_size_max;
- spin_unlock(&m->write_metric_lock);
- CEPH_SZ_METRIC_SHOW("write", total, avg_sz, min_sz, max_sz, sum_sz);
-
- seq_printf(s, "\n");
+ for (i = 0; i < METRIC_MAX; i++) {
+ /* skip 'metadata' as it doesn't use the size metric */
+ if (i == METRIC_METADATA)
+ continue;
+ m = &cm->metric[i];
+ spin_lock(&m->lock);
+ total = m->total;
+ sum = m->size_sum;
+ avg = total > 0 ? DIV64_U64_ROUND_CLOSEST(sum, total) : 0;
+ min = m->size_min;
+ max = m->size_max;
+ spin_unlock(&m->lock);
+ CEPH_SZ_METRIC_SHOW(metric_str[i], total, avg, min, max, sum);
+ }
+
+ return 0;
+}
+
+static int metrics_caps_show(struct seq_file *s, void *p)
+{
+ struct ceph_fs_client *fsc = s->private;
+ struct ceph_client_metric *m = &fsc->mdsc->metric;
+ int nr_caps = 0;
+
seq_printf(s, "item total miss hit\n");
seq_printf(s, "-------------------------------------------------\n");
@@ -350,8 +361,11 @@ DEFINE_SHOW_ATTRIBUTE(mdsmap);
DEFINE_SHOW_ATTRIBUTE(mdsc);
DEFINE_SHOW_ATTRIBUTE(caps);
DEFINE_SHOW_ATTRIBUTE(mds_sessions);
-DEFINE_SHOW_ATTRIBUTE(metric);
DEFINE_SHOW_ATTRIBUTE(status);
+DEFINE_SHOW_ATTRIBUTE(metrics_file);
+DEFINE_SHOW_ATTRIBUTE(metrics_latency);
+DEFINE_SHOW_ATTRIBUTE(metrics_size);
+DEFINE_SHOW_ATTRIBUTE(metrics_caps);
/*
@@ -385,8 +399,9 @@ void ceph_fs_debugfs_cleanup(struct ceph_fs_client *fsc)
debugfs_remove(fsc->debugfs_mdsmap);
debugfs_remove(fsc->debugfs_mds_sessions);
debugfs_remove(fsc->debugfs_caps);
- debugfs_remove(fsc->debugfs_metric);
+ debugfs_remove(fsc->debugfs_status);
debugfs_remove(fsc->debugfs_mdsc);
+ debugfs_remove_recursive(fsc->debugfs_metrics_dir);
}
void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
@@ -426,12 +441,6 @@ void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
fsc,
&mdsc_fops);
- fsc->debugfs_metric = debugfs_create_file("metrics",
- 0400,
- fsc->client->debugfs_dir,
- fsc,
- &metric_fops);
-
fsc->debugfs_caps = debugfs_create_file("caps",
0400,
fsc->client->debugfs_dir,
@@ -443,6 +452,18 @@ void ceph_fs_debugfs_init(struct ceph_fs_client *fsc)
fsc->client->debugfs_dir,
fsc,
&status_fops);
+
+ fsc->debugfs_metrics_dir = debugfs_create_dir("metrics",
+ fsc->client->debugfs_dir);
+
+ debugfs_create_file("file", 0400, fsc->debugfs_metrics_dir, fsc,
+ &metrics_file_fops);
+ debugfs_create_file("latency", 0400, fsc->debugfs_metrics_dir, fsc,
+ &metrics_latency_fops);
+ debugfs_create_file("size", 0400, fsc->debugfs_metrics_dir, fsc,
+ &metrics_size_fops);
+ debugfs_create_file("caps", 0400, fsc->debugfs_metrics_dir, fsc,
+ &metrics_caps_fops);
}
diff --git a/fs/ceph/export.c b/fs/ceph/export.c
index 1d65934c1262..e0fa66ac8b9f 100644
--- a/fs/ceph/export.c
+++ b/fs/ceph/export.c
@@ -157,6 +157,11 @@ static struct inode *__lookup_inode(struct super_block *sb, u64 ino)
ceph_mdsc_put_request(req);
if (!inode)
return err < 0 ? ERR_PTR(err) : ERR_PTR(-ESTALE);
+ } else {
+ if (ceph_inode_is_shutdown(inode)) {
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
}
return inode;
}
@@ -223,8 +228,13 @@ static struct dentry *__snapfh_to_dentry(struct super_block *sb,
return ERR_PTR(-ESTALE);
inode = ceph_find_inode(sb, vino);
- if (inode)
+ if (inode) {
+ if (ceph_inode_is_shutdown(inode)) {
+ iput(inode);
+ return ERR_PTR(-ESTALE);
+ }
return d_obtain_alias(inode);
+ }
req = ceph_mdsc_create_request(mdsc, CEPH_MDS_OP_LOOKUPINO,
USE_ANY_MDS);
diff --git a/fs/ceph/file.c b/fs/ceph/file.c
index b129ea551378..02a0a0fd9ccd 100644
--- a/fs/ceph/file.c
+++ b/fs/ceph/file.c
@@ -525,6 +525,7 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
if (result) {
struct dentry *dentry = req->r_dentry;
+ struct inode *inode = d_inode(dentry);
int pathlen = 0;
u64 base = 0;
char *path = ceph_mdsc_build_path(req->r_dentry, &pathlen,
@@ -534,7 +535,8 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
if (!d_unhashed(dentry))
d_drop(dentry);
- /* FIXME: start returning I/O errors on all accesses? */
+ ceph_inode_shutdown(inode);
+
pr_warn("ceph: async create failure path=(%llx)%s result=%d!\n",
base, IS_ERR(path) ? "<<bad>>" : path, result);
ceph_mdsc_free_path(path, pathlen);
@@ -556,7 +558,7 @@ static void ceph_async_create_cb(struct ceph_mds_client *mdsc,
}
ceph_kick_flushing_inode_caps(req->r_session, ci);
spin_unlock(&ci->i_ceph_lock);
- } else {
+ } else if (!result) {
pr_warn("%s: no req->r_target_inode for 0x%llx\n", __func__,
req->r_deleg_ino);
}
@@ -845,6 +847,7 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
ssize_t ret;
u64 off = iocb->ki_pos;
u64 len = iov_iter_count(to);
+ u64 i_size;
dout("sync_read on file %p %llu~%u %s\n", file, off, (unsigned)len,
(file->f_flags & O_DIRECT) ? "O_DIRECT" : "");
@@ -868,7 +871,6 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
struct page **pages;
int num_pages;
size_t page_off;
- u64 i_size;
bool more;
int idx;
size_t left;
@@ -951,11 +953,14 @@ static ssize_t ceph_sync_read(struct kiocb *iocb, struct iov_iter *to,
}
if (off > iocb->ki_pos) {
- if (ret >= 0 &&
- iov_iter_count(to) > 0 && off >= i_size_read(inode))
+ if (off >= i_size) {
*retry_op = CHECK_EOF;
- ret = off - iocb->ki_pos;
- iocb->ki_pos = off;
+ ret = i_size - iocb->ki_pos;
+ iocb->ki_pos = i_size;
+ } else {
+ ret = off - iocb->ki_pos;
+ iocb->ki_pos = off;
+ }
}
dout("sync_read result %zd retry_op %d\n", ret, *retry_op);
@@ -1526,6 +1531,9 @@ again:
dout("aio_read %p %llx.%llx %llu~%u trying to get caps on %p\n",
inode, ceph_vinop(inode), iocb->ki_pos, (unsigned)len, inode);
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
if (direct_lock)
ceph_start_io_direct(inode);
else
@@ -1678,6 +1686,9 @@ static ssize_t ceph_write_iter(struct kiocb *iocb, struct iov_iter *from)
loff_t pos;
loff_t limit = max(i_size_read(inode), fsc->max_file_size);
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS;
@@ -2200,6 +2211,54 @@ static int is_file_size_ok(struct inode *src_inode, struct inode *dst_inode,
return 0;
}
+static struct ceph_osd_request *
+ceph_alloc_copyfrom_request(struct ceph_osd_client *osdc,
+ u64 src_snapid,
+ struct ceph_object_id *src_oid,
+ struct ceph_object_locator *src_oloc,
+ struct ceph_object_id *dst_oid,
+ struct ceph_object_locator *dst_oloc,
+ u32 truncate_seq, u64 truncate_size)
+{
+ struct ceph_osd_request *req;
+ int ret;
+ u32 src_fadvise_flags =
+ CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
+ CEPH_OSD_OP_FLAG_FADVISE_NOCACHE;
+ u32 dst_fadvise_flags =
+ CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
+ CEPH_OSD_OP_FLAG_FADVISE_DONTNEED;
+
+ req = ceph_osdc_alloc_request(osdc, NULL, 1, false, GFP_KERNEL);
+ if (!req)
+ return ERR_PTR(-ENOMEM);
+
+ req->r_flags = CEPH_OSD_FLAG_WRITE;
+
+ ceph_oloc_copy(&req->r_t.base_oloc, dst_oloc);
+ ceph_oid_copy(&req->r_t.base_oid, dst_oid);
+
+ ret = osd_req_op_copy_from_init(req, src_snapid, 0,
+ src_oid, src_oloc,
+ src_fadvise_flags,
+ dst_fadvise_flags,
+ truncate_seq,
+ truncate_size,
+ CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
+ if (ret)
+ goto out;
+
+ ret = ceph_osdc_alloc_messages(req, GFP_KERNEL);
+ if (ret)
+ goto out;
+
+ return req;
+
+out:
+ ceph_osdc_put_request(req);
+ return ERR_PTR(ret);
+}
+
static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off,
struct ceph_inode_info *dst_ci, u64 *dst_off,
struct ceph_fs_client *fsc,
@@ -2207,6 +2266,8 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
{
struct ceph_object_locator src_oloc, dst_oloc;
struct ceph_object_id src_oid, dst_oid;
+ struct ceph_osd_client *osdc;
+ struct ceph_osd_request *req;
size_t bytes = 0;
u64 src_objnum, src_objoff, dst_objnum, dst_objoff;
u32 src_objlen, dst_objlen;
@@ -2217,6 +2278,7 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
src_oloc.pool_ns = ceph_try_get_string(src_ci->i_layout.pool_ns);
dst_oloc.pool = dst_ci->i_layout.pool_id;
dst_oloc.pool_ns = ceph_try_get_string(dst_ci->i_layout.pool_ns);
+ osdc = &fsc->client->osdc;
while (len >= object_size) {
ceph_calc_file_object_mapping(&src_ci->i_layout, *src_off,
@@ -2232,17 +2294,22 @@ static ssize_t ceph_do_objects_copy(struct ceph_inode_info *src_ci, u64 *src_off
ceph_oid_printf(&dst_oid, "%llx.%08llx",
dst_ci->i_vino.ino, dst_objnum);
/* Do an object remote copy */
- ret = ceph_osdc_copy_from(&fsc->client->osdc,
- src_ci->i_vino.snap, 0,
- &src_oid, &src_oloc,
- CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
- CEPH_OSD_OP_FLAG_FADVISE_NOCACHE,
- &dst_oid, &dst_oloc,
- CEPH_OSD_OP_FLAG_FADVISE_SEQUENTIAL |
- CEPH_OSD_OP_FLAG_FADVISE_DONTNEED,
- dst_ci->i_truncate_seq,
- dst_ci->i_truncate_size,
- CEPH_OSD_COPY_FROM_FLAG_TRUNCATE_SEQ);
+ req = ceph_alloc_copyfrom_request(osdc, src_ci->i_vino.snap,
+ &src_oid, &src_oloc,
+ &dst_oid, &dst_oloc,
+ dst_ci->i_truncate_seq,
+ dst_ci->i_truncate_size);
+ if (IS_ERR(req))
+ ret = PTR_ERR(req);
+ else {
+ ceph_osdc_start_request(osdc, req, false);
+ ret = ceph_osdc_wait_request(osdc, req);
+ ceph_update_copyfrom_metrics(&fsc->mdsc->metric,
+ req->r_start_latency,
+ req->r_end_latency,
+ object_size, ret);
+ ceph_osdc_put_request(req);
+ }
if (ret) {
if (ret == -EOPNOTSUPP) {
fsc->have_copy_from2 = false;
diff --git a/fs/ceph/inode.c b/fs/ceph/inode.c
index 1c7574105478..e3322fcb2e8d 100644
--- a/fs/ceph/inode.c
+++ b/fs/ceph/inode.c
@@ -1841,15 +1841,14 @@ void ceph_queue_inode_work(struct inode *inode, int work_bit)
static void ceph_do_invalidate_pages(struct inode *inode)
{
struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
u32 orig_gen;
int check = 0;
mutex_lock(&ci->i_truncate_mutex);
- if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
- pr_warn_ratelimited("invalidate_pages %p %lld forced umount\n",
- inode, ceph_ino(inode));
+ if (ceph_inode_is_shutdown(inode)) {
+ pr_warn_ratelimited("%s: inode %llx.%llx is shut down\n",
+ __func__, ceph_vinop(inode));
mapping_set_error(inode->i_mapping, -EIO);
truncate_pagecache(inode, 0);
mutex_unlock(&ci->i_truncate_mutex);
@@ -1871,7 +1870,8 @@ static void ceph_do_invalidate_pages(struct inode *inode)
ceph_fscache_invalidate(inode);
if (invalidate_inode_pages2(inode->i_mapping) < 0) {
- pr_err("invalidate_pages %p fails\n", inode);
+ pr_err("invalidate_inode_pages2 %llx.%llx failed\n",
+ ceph_vinop(inode));
}
spin_lock(&ci->i_ceph_lock);
@@ -2103,12 +2103,14 @@ int __ceph_setattr(struct inode *inode, struct iattr *attr)
loff_t isize = i_size_read(inode);
dout("setattr %p size %lld -> %lld\n", inode, isize, attr->ia_size);
- if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size > isize) {
- i_size_write(inode, attr->ia_size);
- inode->i_blocks = calc_inode_blocks(attr->ia_size);
- ci->i_reported_size = attr->ia_size;
- dirtied |= CEPH_CAP_FILE_EXCL;
- ia_valid |= ATTR_MTIME;
+ if ((issued & CEPH_CAP_FILE_EXCL) && attr->ia_size >= isize) {
+ if (attr->ia_size > isize) {
+ i_size_write(inode, attr->ia_size);
+ inode->i_blocks = calc_inode_blocks(attr->ia_size);
+ ci->i_reported_size = attr->ia_size;
+ dirtied |= CEPH_CAP_FILE_EXCL;
+ ia_valid |= ATTR_MTIME;
+ }
} else if ((issued & CEPH_CAP_FILE_SHARED) == 0 ||
attr->ia_size != isize) {
req->r_args.setattr.size = cpu_to_le64(attr->ia_size);
@@ -2217,6 +2219,9 @@ int ceph_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
if (ceph_snap(inode) != CEPH_NOSNAP)
return -EROFS;
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
err = setattr_prepare(&init_user_ns, dentry, attr);
if (err != 0)
return err;
@@ -2347,6 +2352,9 @@ int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
u32 valid_mask = STATX_BASIC_STATS;
int err = 0;
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
/* Skip the getattr altogether if we're asked not to sync */
if (!(flags & AT_STATX_DONT_SYNC)) {
err = ceph_do_getattr(inode,
@@ -2394,3 +2402,27 @@ int ceph_getattr(struct user_namespace *mnt_userns, const struct path *path,
stat->result_mask = request_mask & valid_mask;
return err;
}
+
+void ceph_inode_shutdown(struct inode *inode)
+{
+ struct ceph_inode_info *ci = ceph_inode(inode);
+ struct rb_node *p;
+ int iputs = 0;
+ bool invalidate = false;
+
+ spin_lock(&ci->i_ceph_lock);
+ ci->i_ceph_flags |= CEPH_I_SHUTDOWN;
+ p = rb_first(&ci->i_caps);
+ while (p) {
+ struct ceph_cap *cap = rb_entry(p, struct ceph_cap, ci_node);
+
+ p = rb_next(p);
+ iputs += ceph_purge_inode_cap(inode, cap, &invalidate);
+ }
+ spin_unlock(&ci->i_ceph_lock);
+
+ if (invalidate)
+ ceph_queue_invalidate(inode);
+ while (iputs--)
+ iput(inode);
+}
diff --git a/fs/ceph/locks.c b/fs/ceph/locks.c
index d8c31069fbf2..d1f154aec249 100644
--- a/fs/ceph/locks.c
+++ b/fs/ceph/locks.c
@@ -241,6 +241,9 @@ int ceph_lock(struct file *file, int cmd, struct file_lock *fl)
if (!(fl->fl_flags & FL_POSIX))
return -ENOLCK;
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
dout("ceph_lock, fl_owner: %p\n", fl->fl_owner);
/* set wait bit as appropriate, then make command as Ceph expects it*/
@@ -303,6 +306,9 @@ int ceph_flock(struct file *file, int cmd, struct file_lock *fl)
if (!(fl->fl_flags & FL_FLOCK))
return -ENOLCK;
+ if (ceph_inode_is_shutdown(inode))
+ return -ESTALE;
+
dout("ceph_flock, fl_file: %p\n", fl->fl_file);
spin_lock(&ci->i_ceph_lock);
diff --git a/fs/ceph/mds_client.c b/fs/ceph/mds_client.c
index d64413adc0fd..250aad330a10 100644
--- a/fs/ceph/mds_client.c
+++ b/fs/ceph/mds_client.c
@@ -1590,129 +1590,23 @@ out:
return ret;
}
-static int remove_capsnaps(struct ceph_mds_client *mdsc, struct inode *inode)
-{
- struct ceph_inode_info *ci = ceph_inode(inode);
- struct ceph_cap_snap *capsnap;
- int capsnap_release = 0;
-
- lockdep_assert_held(&ci->i_ceph_lock);
-
- dout("removing capsnaps, ci is %p, inode is %p\n", ci, inode);
-
- while (!list_empty(&ci->i_cap_snaps)) {
- capsnap = list_first_entry(&ci->i_cap_snaps,
- struct ceph_cap_snap, ci_item);
- __ceph_remove_capsnap(inode, capsnap, NULL, NULL);
- ceph_put_snap_context(capsnap->context);
- ceph_put_cap_snap(capsnap);
- capsnap_release++;
- }
- wake_up_all(&ci->i_cap_wq);
- wake_up_all(&mdsc->cap_flushing_wq);
- return capsnap_release;
-}
-
static int remove_session_caps_cb(struct inode *inode, struct ceph_cap *cap,
void *arg)
{
- struct ceph_fs_client *fsc = (struct ceph_fs_client *)arg;
- struct ceph_mds_client *mdsc = fsc->mdsc;
struct ceph_inode_info *ci = ceph_inode(inode);
- LIST_HEAD(to_remove);
- bool dirty_dropped = false;
bool invalidate = false;
- int capsnap_release = 0;
+ int iputs;
dout("removing cap %p, ci is %p, inode is %p\n",
cap, ci, &ci->vfs_inode);
spin_lock(&ci->i_ceph_lock);
- __ceph_remove_cap(cap, false);
- if (!ci->i_auth_cap) {
- struct ceph_cap_flush *cf;
-
- if (READ_ONCE(fsc->mount_state) >= CEPH_MOUNT_SHUTDOWN) {
- if (inode->i_data.nrpages > 0)
- invalidate = true;
- if (ci->i_wrbuffer_ref > 0)
- mapping_set_error(&inode->i_data, -EIO);
- }
-
- while (!list_empty(&ci->i_cap_flush_list)) {
- cf = list_first_entry(&ci->i_cap_flush_list,
- struct ceph_cap_flush, i_list);
- list_move(&cf->i_list, &to_remove);
- }
-
- spin_lock(&mdsc->cap_dirty_lock);
-
- list_for_each_entry(cf, &to_remove, i_list)
- list_del_init(&cf->g_list);
-
- if (!list_empty(&ci->i_dirty_item)) {
- pr_warn_ratelimited(
- " dropping dirty %s state for %p %lld\n",
- ceph_cap_string(ci->i_dirty_caps),
- inode, ceph_ino(inode));
- ci->i_dirty_caps = 0;
- list_del_init(&ci->i_dirty_item);
- dirty_dropped = true;
- }
- if (!list_empty(&ci->i_flushing_item)) {
- pr_warn_ratelimited(
- " dropping dirty+flushing %s state for %p %lld\n",
- ceph_cap_string(ci->i_flushing_caps),
- inode, ceph_ino(inode));
- ci->i_flushing_caps = 0;
- list_del_init(&ci->i_flushing_item);
- mdsc->num_cap_flushing--;
- dirty_dropped = true;
- }
- spin_unlock(&mdsc->cap_dirty_lock);
-
- if (dirty_dropped) {
- mapping_set_error(inode->i_mapping, -EIO);
-
- if (ci->i_wrbuffer_ref_head == 0 &&
- ci->i_wr_ref == 0 &&
- ci->i_dirty_caps == 0 &&
- ci->i_flushing_caps == 0) {
- ceph_put_snap_context(ci->i_head_snapc);
- ci->i_head_snapc = NULL;
- }
- }
-
- if (atomic_read(&ci->i_filelock_ref) > 0) {
- /* make further file lock syscall return -EIO */
- ci->i_ceph_flags |= CEPH_I_ERROR_FILELOCK;
- pr_warn_ratelimited(" dropping file locks for %p %lld\n",
- inode, ceph_ino(inode));
- }
-
- if (!ci->i_dirty_caps && ci->i_prealloc_cap_flush) {
- list_add(&ci->i_prealloc_cap_flush->i_list, &to_remove);
- ci->i_prealloc_cap_flush = NULL;
- }
-
- if (!list_empty(&ci->i_cap_snaps))
- capsnap_release = remove_capsnaps(mdsc, inode);
- }
+ iputs = ceph_purge_inode_cap(inode, cap, &invalidate);
spin_unlock(&ci->i_ceph_lock);
- while (!list_empty(&to_remove)) {
- struct ceph_cap_flush *cf;
- cf = list_first_entry(&to_remove,
- struct ceph_cap_flush, i_list);
- list_del_init(&cf->i_list);
- if (!cf->is_capsnap)
- ceph_free_cap_flush(cf);
- }
wake_up_all(&ci->i_cap_wq);
if (invalidate)
ceph_queue_invalidate(inode);
- if (dirty_dropped)
- iput(inode);
- while (capsnap_release--)
+ while (iputs--)
iput(inode);
return 0;
}
@@ -3467,9 +3361,14 @@ static void handle_session(struct ceph_mds_session *session,
if (msg_version >= 3) {
u32 len;
- /* version >= 2, metadata */
- if (__decode_session_metadata(&p, end, &blocklisted) < 0)
+ /* version >= 2 and < 5, decode metadata, skip otherwise
+ * as it's handled via flags.
+ */
+ if (msg_version >= 5)
+ ceph_decode_skip_map(&p, end, string, string, bad);
+ else if (__decode_session_metadata(&p, end, &blocklisted) < 0)
goto bad;
+
/* version >= 3, feature bits */
ceph_decode_32_safe(&p, end, len, bad);
if (len) {
@@ -3478,6 +3377,18 @@ static void handle_session(struct ceph_mds_session *session,
}
}
+ if (msg_version >= 5) {
+ u32 flags;
+ /* version >= 4, struct_v, struct_cv, len, metric_spec */
+ ceph_decode_skip_n(&p, end, 2 + sizeof(u32) * 2, bad);
+ /* version >= 5, flags */
+ ceph_decode_32_safe(&p, end, flags, bad);
+ if (flags & CEPH_SESSION_BLOCKLISTED) {
+ pr_warn("mds%d session blocklisted\n", session->s_mds);
+ blocklisted = true;
+ }
+ }
+
mutex_lock(&mdsc->mutex);
if (op == CEPH_SESSION_CLOSE) {
ceph_get_mds_session(session);
@@ -5072,7 +4983,8 @@ void ceph_mdsc_handle_fsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
return;
bad:
- pr_err("error decoding fsmap\n");
+ pr_err("error decoding fsmap %d. Shutting down mount.\n", err);
+ ceph_umount_begin(mdsc->fsc->sb);
err_out:
mutex_lock(&mdsc->mutex);
mdsc->mdsmap_err = err;
@@ -5139,7 +5051,8 @@ void ceph_mdsc_handle_mdsmap(struct ceph_mds_client *mdsc, struct ceph_msg *msg)
bad_unlock:
mutex_unlock(&mdsc->mutex);
bad:
- pr_err("error decoding mdsmap %d\n", err);
+ pr_err("error decoding mdsmap %d. Shutting down mount.\n", err);
+ ceph_umount_begin(mdsc->fsc->sb);
return;
}
diff --git a/fs/ceph/mdsmap.c b/fs/ceph/mdsmap.c
index 61d67cbcb367..30387733765d 100644
--- a/fs/ceph/mdsmap.c
+++ b/fs/ceph/mdsmap.c
@@ -263,10 +263,6 @@ struct ceph_mdsmap *ceph_mdsmap_decode(void **p, void *end, bool msgr2)
goto nomem;
for (j = 0; j < num_export_targets; j++) {
target = ceph_decode_32(&pexport_targets);
- if (target >= m->possible_max_rank) {
- err = -EIO;
- goto corrupt;
- }
info->export_targets[j] = target;
}
} else {
diff --git a/fs/ceph/metric.c b/fs/ceph/metric.c
index 04d5df29bbbf..c57699d8408d 100644
--- a/fs/ceph/metric.c
+++ b/fs/ceph/metric.c
@@ -62,7 +62,7 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
read->header.ver = 1;
read->header.compat = 1;
read->header.data_len = cpu_to_le32(sizeof(*read) - header_len);
- sum = m->read_latency_sum;
+ sum = m->metric[METRIC_READ].latency_sum;
jiffies_to_timespec64(sum, &ts);
read->sec = cpu_to_le32(ts.tv_sec);
read->nsec = cpu_to_le32(ts.tv_nsec);
@@ -74,7 +74,7 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
write->header.ver = 1;
write->header.compat = 1;
write->header.data_len = cpu_to_le32(sizeof(*write) - header_len);
- sum = m->write_latency_sum;
+ sum = m->metric[METRIC_WRITE].latency_sum;
jiffies_to_timespec64(sum, &ts);
write->sec = cpu_to_le32(ts.tv_sec);
write->nsec = cpu_to_le32(ts.tv_nsec);
@@ -86,7 +86,7 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
meta->header.ver = 1;
meta->header.compat = 1;
meta->header.data_len = cpu_to_le32(sizeof(*meta) - header_len);
- sum = m->metadata_latency_sum;
+ sum = m->metric[METRIC_METADATA].latency_sum;
jiffies_to_timespec64(sum, &ts);
meta->sec = cpu_to_le32(ts.tv_sec);
meta->nsec = cpu_to_le32(ts.tv_nsec);
@@ -141,8 +141,8 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
rsize->header.ver = 1;
rsize->header.compat = 1;
rsize->header.data_len = cpu_to_le32(sizeof(*rsize) - header_len);
- rsize->total_ops = cpu_to_le64(m->total_reads);
- rsize->total_size = cpu_to_le64(m->read_size_sum);
+ rsize->total_ops = cpu_to_le64(m->metric[METRIC_READ].total);
+ rsize->total_size = cpu_to_le64(m->metric[METRIC_READ].size_sum);
items++;
/* encode the write io size metric */
@@ -151,8 +151,8 @@ static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
wsize->header.ver = 1;
wsize->header.compat = 1;
wsize->header.data_len = cpu_to_le32(sizeof(*wsize) - header_len);
- wsize->total_ops = cpu_to_le64(m->total_writes);
- wsize->total_size = cpu_to_le64(m->write_size_sum);
+ wsize->total_ops = cpu_to_le64(m->metric[METRIC_WRITE].total);
+ wsize->total_size = cpu_to_le64(m->metric[METRIC_WRITE].size_sum);
items++;
put_unaligned_le32(items, &head->num);
@@ -220,7 +220,8 @@ static void metric_delayed_work(struct work_struct *work)
int ceph_metric_init(struct ceph_client_metric *m)
{
- int ret;
+ struct ceph_metric *metric;
+ int ret, i;
if (!m)
return -EINVAL;
@@ -243,32 +244,18 @@ int ceph_metric_init(struct ceph_client_metric *m)
if (ret)
goto err_i_caps_mis;
- spin_lock_init(&m->read_metric_lock);
- m->read_latency_sq_sum = 0;
- m->read_latency_min = KTIME_MAX;
- m->read_latency_max = 0;
- m->total_reads = 0;
- m->read_latency_sum = 0;
- m->read_size_min = U64_MAX;
- m->read_size_max = 0;
- m->read_size_sum = 0;
-
- spin_lock_init(&m->write_metric_lock);
- m->write_latency_sq_sum = 0;
- m->write_latency_min = KTIME_MAX;
- m->write_latency_max = 0;
- m->total_writes = 0;
- m->write_latency_sum = 0;
- m->write_size_min = U64_MAX;
- m->write_size_max = 0;
- m->write_size_sum = 0;
-
- spin_lock_init(&m->metadata_metric_lock);
- m->metadata_latency_sq_sum = 0;
- m->metadata_latency_min = KTIME_MAX;
- m->metadata_latency_max = 0;
- m->total_metadatas = 0;
- m->metadata_latency_sum = 0;
+ for (i = 0; i < METRIC_MAX; i++) {
+ metric = &m->metric[i];
+ spin_lock_init(&metric->lock);
+ metric->size_sum = 0;
+ metric->size_min = U64_MAX;
+ metric->size_max = 0;
+ metric->total = 0;
+ metric->latency_sum = 0;
+ metric->latency_sq_sum = 0;
+ metric->latency_min = KTIME_MAX;
+ metric->latency_max = 0;
+ }
atomic64_set(&m->opened_files, 0);
ret = percpu_counter_init(&m->opened_inodes, 0, GFP_KERNEL);
@@ -338,9 +325,9 @@ static inline void __update_stdev(ktime_t total, ktime_t lsum,
*sq_sump += sq;
}
-void ceph_update_read_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- unsigned int size, int rc)
+void ceph_update_metrics(struct ceph_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ unsigned int size, int rc)
{
ktime_t lat = ktime_sub(r_end, r_start);
ktime_t total;
@@ -348,63 +335,12 @@ void ceph_update_read_metrics(struct ceph_client_metric *m,
if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
return;
- spin_lock(&m->read_metric_lock);
- total = ++m->total_reads;
- m->read_size_sum += size;
- m->read_latency_sum += lat;
- METRIC_UPDATE_MIN_MAX(m->read_size_min,
- m->read_size_max,
- size);
- METRIC_UPDATE_MIN_MAX(m->read_latency_min,
- m->read_latency_max,
- lat);
- __update_stdev(total, m->read_latency_sum,
- &m->read_latency_sq_sum, lat);
- spin_unlock(&m->read_metric_lock);
-}
-
-void ceph_update_write_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- unsigned int size, int rc)
-{
- ktime_t lat = ktime_sub(r_end, r_start);
- ktime_t total;
-
- if (unlikely(rc && rc != -ETIMEDOUT))
- return;
-
- spin_lock(&m->write_metric_lock);
- total = ++m->total_writes;
- m->write_size_sum += size;
- m->write_latency_sum += lat;
- METRIC_UPDATE_MIN_MAX(m->write_size_min,
- m->write_size_max,
- size);
- METRIC_UPDATE_MIN_MAX(m->write_latency_min,
- m->write_latency_max,
- lat);
- __update_stdev(total, m->write_latency_sum,
- &m->write_latency_sq_sum, lat);
- spin_unlock(&m->write_metric_lock);
-}
-
-void ceph_update_metadata_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- int rc)
-{
- ktime_t lat = ktime_sub(r_end, r_start);
- ktime_t total;
-
- if (unlikely(rc && rc != -ENOENT))
- return;
-
- spin_lock(&m->metadata_metric_lock);
- total = ++m->total_metadatas;
- m->metadata_latency_sum += lat;
- METRIC_UPDATE_MIN_MAX(m->metadata_latency_min,
- m->metadata_latency_max,
- lat);
- __update_stdev(total, m->metadata_latency_sum,
- &m->metadata_latency_sq_sum, lat);
- spin_unlock(&m->metadata_metric_lock);
+ spin_lock(&m->lock);
+ total = ++m->total;
+ m->size_sum += size;
+ METRIC_UPDATE_MIN_MAX(m->size_min, m->size_max, size);
+ m->latency_sum += lat;
+ METRIC_UPDATE_MIN_MAX(m->latency_min, m->latency_max, lat);
+ __update_stdev(total, m->latency_sum, &m->latency_sq_sum, lat);
+ spin_unlock(&m->lock);
}
diff --git a/fs/ceph/metric.h b/fs/ceph/metric.h
index 0133955a3c6a..bb45608181e7 100644
--- a/fs/ceph/metric.h
+++ b/fs/ceph/metric.h
@@ -125,6 +125,26 @@ struct ceph_metric_head {
__le32 num; /* the number of metrics that will be sent */
} __packed;
+enum metric_type {
+ METRIC_READ,
+ METRIC_WRITE,
+ METRIC_METADATA,
+ METRIC_COPYFROM,
+ METRIC_MAX
+};
+
+struct ceph_metric {
+ spinlock_t lock;
+ u64 total;
+ u64 size_sum;
+ u64 size_min;
+ u64 size_max;
+ ktime_t latency_sum;
+ ktime_t latency_sq_sum;
+ ktime_t latency_min;
+ ktime_t latency_max;
+};
+
/* This is the global metrics */
struct ceph_client_metric {
atomic64_t total_dentries;
@@ -135,32 +155,7 @@ struct ceph_client_metric {
struct percpu_counter i_caps_hit;
struct percpu_counter i_caps_mis;
- spinlock_t read_metric_lock;
- u64 total_reads;
- u64 read_size_sum;
- u64 read_size_min;
- u64 read_size_max;
- ktime_t read_latency_sum;
- ktime_t read_latency_sq_sum;
- ktime_t read_latency_min;
- ktime_t read_latency_max;
-
- spinlock_t write_metric_lock;
- u64 total_writes;
- u64 write_size_sum;
- u64 write_size_min;
- u64 write_size_max;
- ktime_t write_latency_sum;
- ktime_t write_latency_sq_sum;
- ktime_t write_latency_min;
- ktime_t write_latency_max;
-
- spinlock_t metadata_metric_lock;
- u64 total_metadatas;
- ktime_t metadata_latency_sum;
- ktime_t metadata_latency_sq_sum;
- ktime_t metadata_latency_min;
- ktime_t metadata_latency_max;
+ struct ceph_metric metric[METRIC_MAX];
/* The total number of directories and files that are opened */
atomic64_t opened_files;
@@ -195,13 +190,36 @@ static inline void ceph_update_cap_mis(struct ceph_client_metric *m)
percpu_counter_inc(&m->i_caps_mis);
}
-extern void ceph_update_read_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- unsigned int size, int rc);
-extern void ceph_update_write_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- unsigned int size, int rc);
-extern void ceph_update_metadata_metrics(struct ceph_client_metric *m,
- ktime_t r_start, ktime_t r_end,
- int rc);
+extern void ceph_update_metrics(struct ceph_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ unsigned int size, int rc);
+
+static inline void ceph_update_read_metrics(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ unsigned int size, int rc)
+{
+ ceph_update_metrics(&m->metric[METRIC_READ],
+ r_start, r_end, size, rc);
+}
+static inline void ceph_update_write_metrics(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ unsigned int size, int rc)
+{
+ ceph_update_metrics(&m->metric[METRIC_WRITE],
+ r_start, r_end, size, rc);
+}
+static inline void ceph_update_metadata_metrics(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ int rc)
+{
+ ceph_update_metrics(&m->metric[METRIC_METADATA],
+ r_start, r_end, 0, rc);
+}
+static inline void ceph_update_copyfrom_metrics(struct ceph_client_metric *m,
+ ktime_t r_start, ktime_t r_end,
+ unsigned int size, int rc)
+{
+ ceph_update_metrics(&m->metric[METRIC_COPYFROM],
+ r_start, r_end, size, rc);
+}
#endif /* _FS_CEPH_MDS_METRIC_H */
diff --git a/fs/ceph/super.c b/fs/ceph/super.c
index fd8742bae847..bab61232dc5a 100644
--- a/fs/ceph/super.c
+++ b/fs/ceph/super.c
@@ -52,8 +52,7 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
struct ceph_fs_client *fsc = ceph_inode_to_client(d_inode(dentry));
struct ceph_mon_client *monc = &fsc->client->monc;
struct ceph_statfs st;
- u64 fsid;
- int err;
+ int i, err;
u64 data_pool;
if (fsc->mdsc->mdsmap->m_num_data_pg_pools == 1) {
@@ -99,12 +98,14 @@ static int ceph_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_namelen = NAME_MAX;
/* Must convert the fsid, for consistent values across arches */
+ buf->f_fsid.val[0] = 0;
mutex_lock(&monc->mutex);
- fsid = le64_to_cpu(*(__le64 *)(&monc->monmap->fsid)) ^
- le64_to_cpu(*((__le64 *)&monc->monmap->fsid + 1));
+ for (i = 0 ; i < sizeof(monc->monmap->fsid) / sizeof(__le32) ; ++i)
+ buf->f_fsid.val[0] ^= le32_to_cpu(((__le32 *)&monc->monmap->fsid)[i]);
mutex_unlock(&monc->mutex);
- buf->f_fsid = u64_to_fsid(fsid);
+ /* fold the fs_cluster_id into the upper bits */
+ buf->f_fsid.val[1] = monc->fs_cluster_id;
return 0;
}
@@ -577,8 +578,8 @@ static int ceph_show_options(struct seq_file *m, struct dentry *root)
if (fsopt->flags & CEPH_MOUNT_OPT_CLEANRECOVER)
seq_show_option(m, "recover_session", "clean");
- if (fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS)
- seq_puts(m, ",nowsync");
+ if (!(fsopt->flags & CEPH_MOUNT_OPT_ASYNC_DIROPS))
+ seq_puts(m, ",wsync");
if (fsopt->wsize != CEPH_MAX_WRITE_SIZE)
seq_printf(m, ",wsize=%u", fsopt->wsize);
@@ -842,7 +843,7 @@ static void __ceph_umount_begin(struct ceph_fs_client *fsc)
* ceph_umount_begin - initiate forced umount. Tear down the
* mount, skipping steps that may hang while waiting for server(s).
*/
-static void ceph_umount_begin(struct super_block *sb)
+void ceph_umount_begin(struct super_block *sb)
{
struct ceph_fs_client *fsc = ceph_sb_to_client(sb);
diff --git a/fs/ceph/super.h b/fs/ceph/super.h
index 14f951cd5b61..ac331aa07cfa 100644
--- a/fs/ceph/super.h
+++ b/fs/ceph/super.h
@@ -48,7 +48,8 @@
#define CEPH_MOUNT_OPT_DEFAULT \
(CEPH_MOUNT_OPT_DCACHE | \
- CEPH_MOUNT_OPT_NOCOPYFROM)
+ CEPH_MOUNT_OPT_NOCOPYFROM | \
+ CEPH_MOUNT_OPT_ASYNC_DIROPS)
#define ceph_set_mount_opt(fsc, opt) \
(fsc)->mount_options->flags |= CEPH_MOUNT_OPT_##opt
@@ -128,9 +129,9 @@ struct ceph_fs_client {
struct dentry *debugfs_congestion_kb;
struct dentry *debugfs_bdi;
struct dentry *debugfs_mdsc, *debugfs_mdsmap;
- struct dentry *debugfs_metric;
struct dentry *debugfs_status;
struct dentry *debugfs_mds_sessions;
+ struct dentry *debugfs_metrics_dir;
#endif
#ifdef CONFIG_CEPH_FSCACHE
@@ -580,6 +581,7 @@ static inline struct inode *ceph_find_inode(struct super_block *sb,
#define CEPH_I_ODIRECT (1 << 11) /* inode in direct I/O mode */
#define CEPH_ASYNC_CREATE_BIT (12) /* async create in flight for this */
#define CEPH_I_ASYNC_CREATE (1 << CEPH_ASYNC_CREATE_BIT)
+#define CEPH_I_SHUTDOWN (1 << 13) /* inode is no longer usable */
/*
* Masks of ceph inode work.
@@ -939,6 +941,7 @@ extern void ceph_put_snapid_map(struct ceph_mds_client* mdsc,
struct ceph_snapid_map *sm);
extern void ceph_trim_snapid_map(struct ceph_mds_client *mdsc);
extern void ceph_cleanup_snapid_map(struct ceph_mds_client *mdsc);
+void ceph_umount_begin(struct super_block *sb);
/*
@@ -1027,6 +1030,16 @@ extern int ceph_setattr(struct user_namespace *mnt_userns,
extern int ceph_getattr(struct user_namespace *mnt_userns,
const struct path *path, struct kstat *stat,
u32 request_mask, unsigned int flags);
+void ceph_inode_shutdown(struct inode *inode);
+
+static inline bool ceph_inode_is_shutdown(struct inode *inode)
+{
+ unsigned long flags = READ_ONCE(ceph_inode(inode)->i_ceph_flags);
+ struct ceph_fs_client *fsc = ceph_inode_to_client(inode);
+ int state = READ_ONCE(fsc->mount_state);
+
+ return (flags & CEPH_I_SHUTDOWN) || state >= CEPH_MOUNT_SHUTDOWN;
+}
/* xattr.c */
int __ceph_setxattr(struct inode *, const char *, const void *, size_t, int);
@@ -1198,6 +1211,7 @@ extern int ceph_mmap(struct file *file, struct vm_area_struct *vma);
extern int ceph_uninline_data(struct file *filp, struct page *locked_page);
extern int ceph_pool_perm_check(struct inode *inode, int need);
extern void ceph_pool_perm_destroy(struct ceph_mds_client* mdsc);
+int ceph_purge_inode_cap(struct inode *inode, struct ceph_cap *cap, bool *invalidate);
/* file.c */
extern const struct file_operations ceph_file_fops;
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index de2c12bcfa4b..d282caf9f037 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -271,7 +271,8 @@ static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
c = 0;
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
- if (server->is_channel)
+ /* channel info will be printed as a part of sessions below */
+ if (CIFS_SERVER_IS_CHAN(server))
continue;
c++;
@@ -358,6 +359,8 @@ skip_rdma:
seq_printf(m, " signed");
if (server->posix_ext_supported)
seq_printf(m, " posix");
+ if (server->nosharesock)
+ seq_printf(m, " nosharesock");
if (server->rdma)
seq_printf(m, "\nRDMA ");
@@ -412,12 +415,14 @@ skip_rdma:
from_kuid(&init_user_ns, ses->linux_uid),
from_kuid(&init_user_ns, ses->cred_uid));
+ spin_lock(&ses->chan_lock);
if (ses->chan_count > 1) {
seq_printf(m, "\n\n\tExtra Channels: %zu ",
ses->chan_count-1);
for (j = 1; j < ses->chan_count; j++)
cifs_dump_channel(m, j, &ses->chans[j]);
}
+ spin_unlock(&ses->chan_lock);
seq_puts(m, "\n\n\tShares: ");
j = 0;
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index 007427ba75e5..b0864da9ef43 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -307,12 +307,8 @@ static struct vfsmount *cifs_dfs_do_mount(struct dentry *mntpt,
static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
{
struct cifs_sb_info *cifs_sb;
- struct cifs_ses *ses;
- struct cifs_tcon *tcon;
void *page;
- char *full_path, *root_path;
- unsigned int xid;
- int rc;
+ char *full_path;
struct vfsmount *mnt;
cifs_dbg(FYI, "in %s\n", __func__);
@@ -324,8 +320,6 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
* the double backslashes usually used in the UNC. This function
* gives us the latter, so we must adjust the result.
*/
- mnt = ERR_PTR(-ENOMEM);
-
cifs_sb = CIFS_SB(mntpt->d_sb);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) {
mnt = ERR_PTR(-EREMOTE);
@@ -341,60 +335,11 @@ static struct vfsmount *cifs_dfs_do_automount(struct dentry *mntpt)
}
convert_delimiter(full_path, '\\');
-
cifs_dbg(FYI, "%s: full_path: %s\n", __func__, full_path);
- if (!cifs_sb_master_tlink(cifs_sb)) {
- cifs_dbg(FYI, "%s: master tlink is NULL\n", __func__);
- goto free_full_path;
- }
-
- tcon = cifs_sb_master_tcon(cifs_sb);
- if (!tcon) {
- cifs_dbg(FYI, "%s: master tcon is NULL\n", __func__);
- goto free_full_path;
- }
-
- root_path = kstrdup(tcon->treeName, GFP_KERNEL);
- if (!root_path) {
- mnt = ERR_PTR(-ENOMEM);
- goto free_full_path;
- }
- cifs_dbg(FYI, "%s: root path: %s\n", __func__, root_path);
-
- ses = tcon->ses;
- xid = get_xid();
-
- /*
- * If DFS root has been expired, then unconditionally fetch it again to
- * refresh DFS referral cache.
- */
- rc = dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
- root_path + 1, NULL, NULL);
- if (!rc) {
- rc = dfs_cache_find(xid, ses, cifs_sb->local_nls,
- cifs_remap(cifs_sb), full_path + 1,
- NULL, NULL);
- }
-
- free_xid(xid);
-
- if (rc) {
- mnt = ERR_PTR(rc);
- goto free_root_path;
- }
- /*
- * OK - we were able to get and cache a referral for @full_path.
- *
- * Now, pass it down to cifs_mount() and it will retry every available
- * node server in case of failures - no need to do it here.
- */
mnt = cifs_dfs_do_mount(mntpt, cifs_sb, full_path);
- cifs_dbg(FYI, "%s: cifs_dfs_do_mount:%s , mnt:%p\n", __func__,
- full_path + 1, mnt);
+ cifs_dbg(FYI, "%s: cifs_dfs_do_mount:%s , mnt:%p\n", __func__, full_path + 1, mnt);
-free_root_path:
- kfree(root_path);
free_full_path:
free_dentry_path(page);
cdda_exit:
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index f97407520ea1..013a4bd65280 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -61,11 +61,6 @@ struct cifs_sb_info {
/* only used when CIFS_MOUNT_USE_PREFIX_PATH is set */
char *prepath;
- /*
- * Canonical DFS path initially provided by the mount call. We might connect to something
- * different via DFS but we want to keep it to do failover properly.
- */
- char *origin_fullpath; /* \\HOST\SHARE\[OPTIONAL PATH] */
/* randomly generated 128-bit number for indexing dfs mount groups in referral cache */
uuid_t dfs_mount_id;
/*
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index abff31dcd005..be74606724c7 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -15,6 +15,7 @@
#include <linux/slab.h>
#include <linux/mempool.h>
#include <linux/workqueue.h>
+#include <linux/utsname.h>
#include "cifs_fs_sb.h"
#include "cifsacl.h"
#include <crypto/internal/hash.h>
@@ -75,7 +76,8 @@
#define SMB_ECHO_INTERVAL_MAX 600
#define SMB_ECHO_INTERVAL_DEFAULT 60
-/* dns resolution interval in seconds */
+/* dns resolution intervals in seconds */
+#define SMB_DNS_RESOLVE_INTERVAL_MIN 120
#define SMB_DNS_RESOLVE_INTERVAL_DEFAULT 600
/* maximum number of PDUs in one compound */
@@ -99,6 +101,8 @@
#define XATTR_DOS_ATTRIB "user.DOSATTRIB"
#endif
+#define CIFS_MAX_WORKSTATION_LEN (__NEW_UTS_LEN + 1) /* reasonable max for client */
+
/*
* CIFS vfs client Status information (based on what we know.)
*/
@@ -592,6 +596,7 @@ struct TCP_Server_Info {
struct list_head pending_mid_q;
bool noblocksnd; /* use blocking sendmsg */
bool noautotune; /* do not autotune send buf sizes */
+ bool nosharesock;
bool tcp_nodelay;
unsigned int credits; /* send no more requests at once */
unsigned int max_credits; /* can override large 32000 default at mnt */
@@ -685,13 +690,34 @@ struct TCP_Server_Info {
*/
int nr_targets;
bool noblockcnt; /* use non-blocking connect() */
- bool is_channel; /* if a session channel */
+
+ /*
+ * If this is a session channel,
+ * primary_server holds the ref-counted
+ * pointer to primary channel connection for the session.
+ */
+#define CIFS_SERVER_IS_CHAN(server) (!!(server)->primary_server)
+ struct TCP_Server_Info *primary_server;
+
#ifdef CONFIG_CIFS_SWN_UPCALL
bool use_swn_dstaddr;
struct sockaddr_storage swn_dstaddr;
#endif
#ifdef CONFIG_CIFS_DFS_UPCALL
bool is_dfs_conn; /* if a dfs connection */
+ struct mutex refpath_lock; /* protects leaf_fullpath */
+ /*
+ * Canonical DFS full paths that were used to chase referrals in mount and reconnect.
+ *
+ * origin_fullpath: first or original referral path
+ * leaf_fullpath: last referral path (might be changed due to nested links in reconnect)
+ *
+ * current_fullpath: pointer to either origin_fullpath or leaf_fullpath
+ * NOTE: cannot be accessed outside cifs_reconnect() and smb2_reconnect()
+ *
+ * format: \\HOST\SHARE\[OPTIONAL PATH]
+ */
+ char *origin_fullpath, *leaf_fullpath, *current_fullpath;
#endif
};
@@ -908,6 +934,7 @@ struct cifs_ses {
and after mount option parsing we fill it */
char *domainName;
char *password;
+ char *workstation_name;
struct session_key auth_key;
struct ntlmssp_auth *ntlmssp; /* ciphertext, flags, server challenge */
enum securityEnum sectype; /* what security flavor was specified? */
@@ -933,16 +960,21 @@ struct cifs_ses {
* iface_lock should be taken when accessing any of these fields
*/
spinlock_t iface_lock;
+ /* ========= begin: protected by iface_lock ======== */
struct cifs_server_iface *iface_list;
size_t iface_count;
unsigned long iface_last_update; /* jiffies */
+ /* ========= end: protected by iface_lock ======== */
+ spinlock_t chan_lock;
+ /* ========= begin: protected by chan_lock ======== */
#define CIFS_MAX_CHANNELS 16
struct cifs_chan chans[CIFS_MAX_CHANNELS];
struct cifs_chan *binding_chan;
size_t chan_count;
size_t chan_max;
atomic_t chan_seq; /* round robin state */
+ /* ========= end: protected by chan_lock ======== */
};
/*
@@ -1091,7 +1123,6 @@ struct cifs_tcon {
struct cached_fid crfid; /* Cached root fid */
/* BB add field for back pointer to sb struct(s)? */
#ifdef CONFIG_CIFS_DFS_UPCALL
- char *dfs_path; /* canonical DFS path */
struct list_head ulist; /* cache update list */
#endif
};
@@ -1942,4 +1973,14 @@ static inline bool is_tcon_dfs(struct cifs_tcon *tcon)
tcon->share_flags & (SHI1005_FLAGS_DFS | SHI1005_FLAGS_DFS_ROOT);
}
+static inline bool cifs_is_referral_server(struct cifs_tcon *tcon,
+ const struct dfs_info3_param *ref)
+{
+ /*
+ * Check if all targets are capable of handling DFS referrals as per
+ * MS-DFSC 2.2.4 RESP_GET_DFS_REFERRAL.
+ */
+ return is_tcon_dfs(tcon) || (ref && (ref->flags & DFSREF_REFERRAL_SERVER));
+}
+
#endif /* _CIFS_GLOB_H */
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index d0f85b666662..f3073a62ce57 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -269,8 +269,9 @@ extern void cifs_close_all_deferred_files(struct cifs_tcon *cifs_tcon);
extern void cifs_close_deferred_file_under_dentry(struct cifs_tcon *cifs_tcon,
const char *path);
-
-extern struct TCP_Server_Info *cifs_get_tcp_session(struct smb3_fs_context *ctx);
+extern struct TCP_Server_Info *
+cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ struct TCP_Server_Info *primary_server);
extern void cifs_put_tcp_session(struct TCP_Server_Info *server,
int from_reconnect);
extern void cifs_put_tcon(struct cifs_tcon *tcon);
@@ -607,7 +608,7 @@ int smb2_parse_query_directory(struct cifs_tcon *tcon, struct kvec *rsp_iov,
struct super_block *cifs_get_tcp_super(struct TCP_Server_Info *server);
void cifs_put_tcp_super(struct super_block *sb);
-int update_super_prepath(struct cifs_tcon *tcon, char *prefix);
+int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix);
char *extract_hostname(const char *unc);
char *extract_sharename(const char *unc);
@@ -634,4 +635,7 @@ static inline int cifs_create_options(struct cifs_sb_info *cifs_sb, int options)
return options;
}
+struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon);
+void cifs_put_tcon_super(struct super_block *sb);
+
#endif /* _CIFSPROTO_H */
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index 0abbff4e4135..82577a7a5bb1 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -61,6 +61,20 @@ extern bool disable_legacy_dialects;
/* Drop the connection to not overload the server */
#define NUM_STATUS_IO_TIMEOUT 5
+struct mount_ctx {
+ struct cifs_sb_info *cifs_sb;
+ struct smb3_fs_context *fs_ctx;
+ unsigned int xid;
+ struct TCP_Server_Info *server;
+ struct cifs_ses *ses;
+ struct cifs_tcon *tcon;
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ struct cifs_ses *root_ses;
+ uuid_t mount_id;
+ char *origin_fullpath, *leaf_fullpath;
+#endif
+};
+
static int ip_connect(struct TCP_Server_Info *server);
static int generic_ip_connect(struct TCP_Server_Info *server);
static void tlink_rb_insert(struct rb_root *root, struct tcon_link *new_tlink);
@@ -115,7 +129,7 @@ static int reconn_set_ipaddr_from_hostname(struct TCP_Server_Info *server)
* To make sure we don't use the cached entry, retry 1s
* after expiry.
*/
- ttl = (expiry - now + 1);
+ ttl = max_t(unsigned long, expiry - now, SMB_DNS_RESOLVE_INTERVAL_MIN) + 1;
}
rc = !rc ? -1 : 0;
@@ -148,139 +162,38 @@ static void cifs_resolve_server(struct work_struct *work)
mutex_unlock(&server->srv_mutex);
}
-#ifdef CONFIG_CIFS_DFS_UPCALL
-/* These functions must be called with server->srv_mutex held */
-static void reconn_set_next_dfs_target(struct TCP_Server_Info *server,
- struct cifs_sb_info *cifs_sb,
- struct dfs_cache_tgt_list *tgt_list,
- struct dfs_cache_tgt_iterator **tgt_it)
-{
- const char *name;
- int rc;
-
- if (!cifs_sb || !cifs_sb->origin_fullpath)
- return;
-
- if (!*tgt_it) {
- *tgt_it = dfs_cache_get_tgt_iterator(tgt_list);
- } else {
- *tgt_it = dfs_cache_get_next_tgt(tgt_list, *tgt_it);
- if (!*tgt_it)
- *tgt_it = dfs_cache_get_tgt_iterator(tgt_list);
- }
-
- cifs_dbg(FYI, "%s: UNC: %s\n", __func__, cifs_sb->origin_fullpath);
-
- name = dfs_cache_get_tgt_name(*tgt_it);
-
- kfree(server->hostname);
-
- server->hostname = extract_hostname(name);
- if (IS_ERR(server->hostname)) {
- cifs_dbg(FYI,
- "%s: failed to extract hostname from target: %ld\n",
- __func__, PTR_ERR(server->hostname));
- return;
- }
-
- rc = reconn_set_ipaddr_from_hostname(server);
- if (rc) {
- cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
- __func__, rc);
- }
-}
-
-static inline int reconn_setup_dfs_targets(struct cifs_sb_info *cifs_sb,
- struct dfs_cache_tgt_list *tl)
-{
- if (!cifs_sb->origin_fullpath)
- return -EOPNOTSUPP;
- return dfs_cache_noreq_find(cifs_sb->origin_fullpath + 1, NULL, tl);
-}
-#endif
-
-/*
- * cifs tcp session reconnection
+/**
+ * Mark all sessions and tcons for reconnect.
*
- * mark tcp session as reconnecting so temporarily locked
- * mark all smb sessions as reconnecting for tcp session
- * reconnect tcp session
- * wake up waiters on reconnection? - (not needed currently)
+ * @server needs to be previously set to CifsNeedReconnect.
*/
-int
-cifs_reconnect(struct TCP_Server_Info *server)
+static void cifs_mark_tcp_ses_conns_for_reconnect(struct TCP_Server_Info *server)
{
- int rc = 0;
- struct list_head *tmp, *tmp2;
struct cifs_ses *ses;
struct cifs_tcon *tcon;
- struct mid_q_entry *mid_entry;
+ struct mid_q_entry *mid, *nmid;
struct list_head retry_list;
-#ifdef CONFIG_CIFS_DFS_UPCALL
- struct super_block *sb = NULL;
- struct cifs_sb_info *cifs_sb = NULL;
- struct dfs_cache_tgt_list tgt_list = DFS_CACHE_TGT_LIST_INIT(tgt_list);
- struct dfs_cache_tgt_iterator *tgt_it = NULL;
-#endif
+ struct TCP_Server_Info *pserver;
- spin_lock(&GlobalMid_Lock);
- server->nr_targets = 1;
-#ifdef CONFIG_CIFS_DFS_UPCALL
- spin_unlock(&GlobalMid_Lock);
- sb = cifs_get_tcp_super(server);
- if (IS_ERR(sb)) {
- rc = PTR_ERR(sb);
- cifs_dbg(FYI, "%s: will not do DFS failover: rc = %d\n",
- __func__, rc);
- sb = NULL;
- } else {
- cifs_sb = CIFS_SB(sb);
- rc = reconn_setup_dfs_targets(cifs_sb, &tgt_list);
- if (rc) {
- cifs_sb = NULL;
- if (rc != -EOPNOTSUPP) {
- cifs_server_dbg(VFS, "%s: no target servers for DFS failover\n",
- __func__);
- }
- } else {
- server->nr_targets = dfs_cache_get_nr_tgts(&tgt_list);
- }
- }
- cifs_dbg(FYI, "%s: will retry %d target(s)\n", __func__,
- server->nr_targets);
- spin_lock(&GlobalMid_Lock);
-#endif
- if (server->tcpStatus == CifsExiting) {
- /* the demux thread will exit normally
- next time through the loop */
- spin_unlock(&GlobalMid_Lock);
-#ifdef CONFIG_CIFS_DFS_UPCALL
- dfs_cache_free_tgts(&tgt_list);
- cifs_put_tcp_super(sb);
-#endif
- wake_up(&server->response_q);
- return rc;
- } else
- server->tcpStatus = CifsNeedReconnect;
- spin_unlock(&GlobalMid_Lock);
server->maxBuf = 0;
server->max_read = 0;
cifs_dbg(FYI, "Mark tcp session as need reconnect\n");
trace_smb3_reconnect(server->CurrentMid, server->conn_id, server->hostname);
+ /*
+ * before reconnecting the tcp session, mark the smb session (uid) and the tid bad so they
+ * are not used until reconnected.
+ */
+ cifs_dbg(FYI, "%s: marking sessions and tcons for reconnect\n", __func__);
+
+ /* If server is a channel, select the primary channel */
+ pserver = CIFS_SERVER_IS_CHAN(server) ? server->primary_server : server;
- /* before reconnecting the tcp session, mark the smb session (uid)
- and the tid bad so they are not used until reconnected */
- cifs_dbg(FYI, "%s: marking sessions and tcons for reconnect\n",
- __func__);
spin_lock(&cifs_tcp_ses_lock);
- list_for_each(tmp, &server->smb_ses_list) {
- ses = list_entry(tmp, struct cifs_ses, smb_ses_list);
+ list_for_each_entry(ses, &pserver->smb_ses_list, smb_ses_list) {
ses->need_reconnect = true;
- list_for_each(tmp2, &ses->tcon_list) {
- tcon = list_entry(tmp2, struct cifs_tcon, tcon_list);
+ list_for_each_entry(tcon, &ses->tcon_list, tcon_list)
tcon->need_reconnect = true;
- }
if (ses->tcon_ipc)
ses->tcon_ipc->need_reconnect = true;
}
@@ -290,11 +203,11 @@ cifs_reconnect(struct TCP_Server_Info *server)
cifs_dbg(FYI, "%s: tearing down socket\n", __func__);
mutex_lock(&server->srv_mutex);
if (server->ssocket) {
- cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n",
- server->ssocket->state, server->ssocket->flags);
+ cifs_dbg(FYI, "State: 0x%x Flags: 0x%lx\n", server->ssocket->state,
+ server->ssocket->flags);
kernel_sock_shutdown(server->ssocket, SHUT_WR);
- cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n",
- server->ssocket->state, server->ssocket->flags);
+ cifs_dbg(FYI, "Post shutdown state: 0x%x Flags: 0x%lx\n", server->ssocket->state,
+ server->ssocket->flags);
sock_release(server->ssocket);
server->ssocket = NULL;
}
@@ -309,23 +222,21 @@ cifs_reconnect(struct TCP_Server_Info *server)
INIT_LIST_HEAD(&retry_list);
cifs_dbg(FYI, "%s: moving mids to private list\n", __func__);
spin_lock(&GlobalMid_Lock);
- list_for_each_safe(tmp, tmp2, &server->pending_mid_q) {
- mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- kref_get(&mid_entry->refcount);
- if (mid_entry->mid_state == MID_REQUEST_SUBMITTED)
- mid_entry->mid_state = MID_RETRY_NEEDED;
- list_move(&mid_entry->qhead, &retry_list);
- mid_entry->mid_flags |= MID_DELETED;
+ list_for_each_entry_safe(mid, nmid, &server->pending_mid_q, qhead) {
+ kref_get(&mid->refcount);
+ if (mid->mid_state == MID_REQUEST_SUBMITTED)
+ mid->mid_state = MID_RETRY_NEEDED;
+ list_move(&mid->qhead, &retry_list);
+ mid->mid_flags |= MID_DELETED;
}
spin_unlock(&GlobalMid_Lock);
mutex_unlock(&server->srv_mutex);
cifs_dbg(FYI, "%s: issuing mid callbacks\n", __func__);
- list_for_each_safe(tmp, tmp2, &retry_list) {
- mid_entry = list_entry(tmp, struct mid_q_entry, qhead);
- list_del_init(&mid_entry->qhead);
- mid_entry->callback(mid_entry);
- cifs_mid_q_entry_release(mid_entry);
+ list_for_each_entry_safe(mid, nmid, &retry_list, qhead) {
+ list_del_init(&mid->qhead);
+ mid->callback(mid);
+ cifs_mid_q_entry_release(mid);
}
if (cifs_rdma_enabled(server)) {
@@ -333,38 +244,48 @@ cifs_reconnect(struct TCP_Server_Info *server)
smbd_destroy(server);
mutex_unlock(&server->srv_mutex);
}
+}
+
+static bool cifs_tcp_ses_needs_reconnect(struct TCP_Server_Info *server, int num_targets)
+{
+ spin_lock(&GlobalMid_Lock);
+ server->nr_targets = num_targets;
+ if (server->tcpStatus == CifsExiting) {
+ /* the demux thread will exit normally next time through the loop */
+ spin_unlock(&GlobalMid_Lock);
+ wake_up(&server->response_q);
+ return false;
+ }
+ server->tcpStatus = CifsNeedReconnect;
+ spin_unlock(&GlobalMid_Lock);
+ return true;
+}
+
+/*
+ * cifs tcp session reconnection
+ *
+ * mark tcp session as reconnecting so temporarily locked
+ * mark all smb sessions as reconnecting for tcp session
+ * reconnect tcp session
+ * wake up waiters on reconnection? - (not needed currently)
+ */
+static int __cifs_reconnect(struct TCP_Server_Info *server)
+{
+ int rc = 0;
+
+ if (!cifs_tcp_ses_needs_reconnect(server, 1))
+ return 0;
+
+ cifs_mark_tcp_ses_conns_for_reconnect(server);
do {
try_to_freeze();
-
mutex_lock(&server->srv_mutex);
-
if (!cifs_swn_set_server_dstaddr(server)) {
-#ifdef CONFIG_CIFS_DFS_UPCALL
- if (cifs_sb && cifs_sb->origin_fullpath)
- /*
- * Set up next DFS target server (if any) for reconnect. If DFS
- * feature is disabled, then we will retry last server we
- * connected to before.
- */
- reconn_set_next_dfs_target(server, cifs_sb, &tgt_list, &tgt_it);
- else {
-#endif
- /*
- * Resolve the hostname again to make sure that IP address is up-to-date.
- */
+ /* resolve the hostname again to make sure that IP address is up-to-date */
rc = reconn_set_ipaddr_from_hostname(server);
- if (rc) {
- cifs_dbg(FYI, "%s: failed to resolve hostname: %d\n",
- __func__, rc);
- }
-
-#ifdef CONFIG_CIFS_DFS_UPCALL
- }
-#endif
-
-
+ cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
}
if (cifs_rdma_enabled(server))
@@ -372,8 +293,8 @@ cifs_reconnect(struct TCP_Server_Info *server)
else
rc = generic_ip_connect(server);
if (rc) {
- cifs_dbg(FYI, "reconnect error %d\n", rc);
mutex_unlock(&server->srv_mutex);
+ cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
msleep(3000);
} else {
atomic_inc(&tcpSesReconnectCount);
@@ -387,19 +308,128 @@ cifs_reconnect(struct TCP_Server_Info *server)
}
} while (server->tcpStatus == CifsNeedReconnect);
+ if (server->tcpStatus == CifsNeedNegotiate)
+ mod_delayed_work(cifsiod_wq, &server->echo, 0);
+
+ wake_up(&server->response_q);
+ return rc;
+}
+
#ifdef CONFIG_CIFS_DFS_UPCALL
- if (tgt_it) {
- rc = dfs_cache_noreq_update_tgthint(cifs_sb->origin_fullpath + 1,
- tgt_it);
- if (rc) {
- cifs_server_dbg(VFS, "%s: failed to update DFS target hint: rc = %d\n",
- __func__, rc);
+static int __reconnect_target_unlocked(struct TCP_Server_Info *server, const char *target)
+{
+ int rc;
+ char *hostname;
+
+ if (!cifs_swn_set_server_dstaddr(server)) {
+ if (server->hostname != target) {
+ hostname = extract_hostname(target);
+ if (!IS_ERR(hostname)) {
+ kfree(server->hostname);
+ server->hostname = hostname;
+ } else {
+ cifs_dbg(FYI, "%s: couldn't extract hostname or address from dfs target: %ld\n",
+ __func__, PTR_ERR(hostname));
+ cifs_dbg(FYI, "%s: default to last target server: %s\n", __func__,
+ server->hostname);
+ }
}
- dfs_cache_free_tgts(&tgt_list);
+ /* resolve the hostname again to make sure that IP address is up-to-date. */
+ rc = reconn_set_ipaddr_from_hostname(server);
+ cifs_dbg(FYI, "%s: reconn_set_ipaddr_from_hostname: rc=%d\n", __func__, rc);
}
+ /* Reconnect the socket */
+ if (cifs_rdma_enabled(server))
+ rc = smbd_reconnect(server);
+ else
+ rc = generic_ip_connect(server);
- cifs_put_tcp_super(sb);
-#endif
+ return rc;
+}
+
+static int reconnect_target_unlocked(struct TCP_Server_Info *server, struct dfs_cache_tgt_list *tl,
+ struct dfs_cache_tgt_iterator **target_hint)
+{
+ int rc;
+ struct dfs_cache_tgt_iterator *tit;
+
+ *target_hint = NULL;
+
+ /* If dfs target list is empty, then reconnect to last server */
+ tit = dfs_cache_get_tgt_iterator(tl);
+ if (!tit)
+ return __reconnect_target_unlocked(server, server->hostname);
+
+ /* Otherwise, try every dfs target in @tl */
+ for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
+ rc = __reconnect_target_unlocked(server, dfs_cache_get_tgt_name(tit));
+ if (!rc) {
+ *target_hint = tit;
+ break;
+ }
+ }
+ return rc;
+}
+
+static int reconnect_dfs_server(struct TCP_Server_Info *server)
+{
+ int rc = 0;
+ const char *refpath = server->current_fullpath + 1;
+ struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+ struct dfs_cache_tgt_iterator *target_hint = NULL;
+ int num_targets = 0;
+
+ /*
+ * Determine the number of dfs targets the referral path in @cifs_sb resolves to.
+ *
+ * smb2_reconnect() needs to know how long it should wait based upon the number of dfs
+ * targets (server->nr_targets). It's also possible that the cached referral was cleared
+ * through /proc/fs/cifs/dfscache or the target list is empty due to server settings after
+ * refreshing the referral, so, in this case, default it to 1.
+ */
+ if (!dfs_cache_noreq_find(refpath, NULL, &tl))
+ num_targets = dfs_cache_get_nr_tgts(&tl);
+ if (!num_targets)
+ num_targets = 1;
+
+ if (!cifs_tcp_ses_needs_reconnect(server, num_targets))
+ return 0;
+
+ cifs_mark_tcp_ses_conns_for_reconnect(server);
+
+ do {
+ try_to_freeze();
+ mutex_lock(&server->srv_mutex);
+
+ rc = reconnect_target_unlocked(server, &tl, &target_hint);
+ if (rc) {
+ /* Failed to reconnect socket */
+ mutex_unlock(&server->srv_mutex);
+ cifs_dbg(FYI, "%s: reconnect error %d\n", __func__, rc);
+ msleep(3000);
+ continue;
+ }
+ /*
+ * Socket was created. Update tcp session status to CifsNeedNegotiate so that a
+ * process waiting for reconnect will know it needs to re-establish session and tcon
+ * through the reconnected target server.
+ */
+ atomic_inc(&tcpSesReconnectCount);
+ set_credits(server, 1);
+ spin_lock(&GlobalMid_Lock);
+ if (server->tcpStatus != CifsExiting)
+ server->tcpStatus = CifsNeedNegotiate;
+ spin_unlock(&GlobalMid_Lock);
+ cifs_swn_reset_server_dstaddr(server);
+ mutex_unlock(&server->srv_mutex);
+ } while (server->tcpStatus == CifsNeedReconnect);
+
+ if (target_hint)
+ dfs_cache_noreq_update_tgthint(refpath, target_hint);
+
+ dfs_cache_free_tgts(&tl);
+
+ /* Need to set up echo worker again once connection has been established */
if (server->tcpStatus == CifsNeedNegotiate)
mod_delayed_work(cifsiod_wq, &server->echo, 0);
@@ -407,6 +437,25 @@ cifs_reconnect(struct TCP_Server_Info *server)
return rc;
}
+int cifs_reconnect(struct TCP_Server_Info *server)
+{
+ /* If tcp session is not an dfs connection, then reconnect to last target server */
+ spin_lock(&cifs_tcp_ses_lock);
+ if (!server->is_dfs_conn || !server->origin_fullpath || !server->leaf_fullpath) {
+ spin_unlock(&cifs_tcp_ses_lock);
+ return __cifs_reconnect(server);
+ }
+ spin_unlock(&cifs_tcp_ses_lock);
+
+ return reconnect_dfs_server(server);
+}
+#else
+int cifs_reconnect(struct TCP_Server_Info *server)
+{
+ return __cifs_reconnect(server);
+}
+#endif
+
static void
cifs_echo_request(struct work_struct *work)
{
@@ -665,13 +714,14 @@ dequeue_mid(struct mid_q_entry *mid, bool malformed)
* Trying to handle/dequeue a mid after the send_recv()
* function has finished processing it is a bug.
*/
- if (mid->mid_flags & MID_DELETED)
+ if (mid->mid_flags & MID_DELETED) {
+ spin_unlock(&GlobalMid_Lock);
pr_warn_once("trying to dequeue a deleted mid\n");
- else {
+ } else {
list_del_init(&mid->qhead);
mid->mid_flags |= MID_DELETED;
+ spin_unlock(&GlobalMid_Lock);
}
- spin_unlock(&GlobalMid_Lock);
}
static unsigned int
@@ -794,6 +844,10 @@ static void clean_demultiplex_info(struct TCP_Server_Info *server)
*/
}
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ kfree(server->origin_fullpath);
+ kfree(server->leaf_fullpath);
+#endif
kfree(server);
length = atomic_dec_return(&tcpSesAllocCount);
@@ -1217,7 +1271,13 @@ static int match_server(struct TCP_Server_Info *server, struct smb3_fs_context *
{
struct sockaddr *addr = (struct sockaddr *)&ctx->dstaddr;
- if (ctx->nosharesock)
+ if (ctx->nosharesock) {
+ server->nosharesock = true;
+ return 0;
+ }
+
+ /* this server does not share socket */
+ if (server->nosharesock)
return 0;
/* If multidialect negotiation see if existing sessions match one */
@@ -1283,7 +1343,7 @@ cifs_find_tcp_session(struct smb3_fs_context *ctx)
* Skip ses channels since they're only handled in lower layers
* (e.g. cifs_send_recv).
*/
- if (server->is_channel || !match_server(server, ctx))
+ if (CIFS_SERVER_IS_CHAN(server) || !match_server(server, ctx))
continue;
++server->srv_count;
@@ -1314,6 +1374,10 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
list_del_init(&server->tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
+ /* For secondary channels, we pick up ref-count on the primary server */
+ if (CIFS_SERVER_IS_CHAN(server))
+ cifs_put_tcp_session(server->primary_server, from_reconnect);
+
cancel_delayed_work_sync(&server->echo);
cancel_delayed_work_sync(&server->resolve);
@@ -1333,7 +1397,10 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
spin_unlock(&GlobalMid_Lock);
cifs_crypto_secmech_release(server);
- cifs_fscache_release_client_cookie(server);
+
+ /* fscache server cookies are based on primary channel only */
+ if (!CIFS_SERVER_IS_CHAN(server))
+ cifs_fscache_release_client_cookie(server);
kfree(server->session_key.response);
server->session_key.response = NULL;
@@ -1346,7 +1413,8 @@ cifs_put_tcp_session(struct TCP_Server_Info *server, int from_reconnect)
}
struct TCP_Server_Info *
-cifs_get_tcp_session(struct smb3_fs_context *ctx)
+cifs_get_tcp_session(struct smb3_fs_context *ctx,
+ struct TCP_Server_Info *primary_server)
{
struct TCP_Server_Info *tcp_ses = NULL;
int rc;
@@ -1383,6 +1451,10 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx)
tcp_ses->in_flight = 0;
tcp_ses->max_in_flight = 0;
tcp_ses->credits = 1;
+ if (primary_server) {
+ ++primary_server->srv_count;
+ tcp_ses->primary_server = primary_server;
+ }
init_waitqueue_head(&tcp_ses->response_q);
init_waitqueue_head(&tcp_ses->request_q);
INIT_LIST_HEAD(&tcp_ses->pending_mid_q);
@@ -1403,6 +1475,9 @@ cifs_get_tcp_session(struct smb3_fs_context *ctx)
INIT_DELAYED_WORK(&tcp_ses->resolve, cifs_resolve_server);
INIT_DELAYED_WORK(&tcp_ses->reconnect, smb2_reconnect_server);
mutex_init(&tcp_ses->reconnect_mutex);
+#ifdef CONFIG_CIFS_DFS_UPCALL
+ mutex_init(&tcp_ses->refpath_lock);
+#endif
memcpy(&tcp_ses->srcaddr, &ctx->srcaddr,
sizeof(tcp_ses->srcaddr));
memcpy(&tcp_ses->dstaddr, &ctx->dstaddr,
@@ -1481,7 +1556,9 @@ smbd_connected:
list_add(&tcp_ses->tcp_ses_list, &cifs_tcp_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
- cifs_fscache_get_client_cookie(tcp_ses);
+ /* fscache server cookies are based on primary channel only */
+ if (!CIFS_SERVER_IS_CHAN(tcp_ses))
+ cifs_fscache_get_client_cookie(tcp_ses);
/* queue echo request delayed work */
queue_delayed_work(cifsiod_wq, &tcp_ses->echo, tcp_ses->echo_interval);
@@ -1501,6 +1578,8 @@ out_err_crypto_release:
out_err:
if (tcp_ses) {
+ if (CIFS_SERVER_IS_CHAN(tcp_ses))
+ cifs_put_tcp_session(tcp_ses->primary_server, false);
kfree(tcp_ses->hostname);
if (tcp_ses->ssocket)
sock_release(tcp_ses->ssocket);
@@ -1519,8 +1598,12 @@ static int match_session(struct cifs_ses *ses, struct smb3_fs_context *ctx)
* If an existing session is limited to less channels than
* requested, it should not be reused
*/
- if (ses->chan_max < ctx->max_channels)
+ spin_lock(&ses->chan_lock);
+ if (ses->chan_max < ctx->max_channels) {
+ spin_unlock(&ses->chan_lock);
return 0;
+ }
+ spin_unlock(&ses->chan_lock);
switch (ses->sectype) {
case Kerberos:
@@ -1655,6 +1738,7 @@ cifs_find_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
void cifs_put_smb_ses(struct cifs_ses *ses)
{
unsigned int rc, xid;
+ unsigned int chan_count;
struct TCP_Server_Info *server = ses->server;
cifs_dbg(FYI, "%s: ses_count=%d\n", __func__, ses->ses_count);
@@ -1696,12 +1780,24 @@ void cifs_put_smb_ses(struct cifs_ses *ses)
list_del_init(&ses->smb_ses_list);
spin_unlock(&cifs_tcp_ses_lock);
+ spin_lock(&ses->chan_lock);
+ chan_count = ses->chan_count;
+ spin_unlock(&ses->chan_lock);
+
/* close any extra channels */
- if (ses->chan_count > 1) {
+ if (chan_count > 1) {
int i;
- for (i = 1; i < ses->chan_count; i++)
+ for (i = 1; i < chan_count; i++) {
+ /*
+ * note: for now, we're okay accessing ses->chans
+ * without chan_lock. But when chans can go away, we'll
+ * need to introduce ref counting to make sure that chan
+ * is not freed from under us.
+ */
cifs_put_tcp_session(ses->chans[i].server, 0);
+ ses->chans[i].server = NULL;
+ }
}
sesInfoFree(ses);
@@ -1885,16 +1981,18 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
ses->status);
mutex_lock(&ses->session_mutex);
- rc = cifs_negotiate_protocol(xid, ses);
- if (rc) {
- mutex_unlock(&ses->session_mutex);
- /* problem -- put our ses reference */
- cifs_put_smb_ses(ses);
- free_xid(xid);
- return ERR_PTR(rc);
- }
if (ses->need_reconnect) {
cifs_dbg(FYI, "Session needs reconnect\n");
+
+ rc = cifs_negotiate_protocol(xid, ses);
+ if (rc) {
+ mutex_unlock(&ses->session_mutex);
+ /* problem -- put our ses reference */
+ cifs_put_smb_ses(ses);
+ free_xid(xid);
+ return ERR_PTR(rc);
+ }
+
rc = cifs_setup_session(xid, ses,
ctx->local_nls);
if (rc) {
@@ -1942,6 +2040,12 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
if (!ses->domainName)
goto get_ses_fail;
}
+ if (ctx->workstation_name) {
+ ses->workstation_name = kstrdup(ctx->workstation_name,
+ GFP_KERNEL);
+ if (!ses->workstation_name)
+ goto get_ses_fail;
+ }
if (ctx->domainauto)
ses->domainAuto = ctx->domainauto;
ses->cred_uid = ctx->cred_uid;
@@ -1952,9 +2056,11 @@ cifs_get_smb_ses(struct TCP_Server_Info *server, struct smb3_fs_context *ctx)
mutex_lock(&ses->session_mutex);
/* add server as first channel */
+ spin_lock(&ses->chan_lock);
ses->chans[0].server = server;
ses->chan_count = 1;
ses->chan_max = ctx->multichannel ? ctx->max_channels:1;
+ spin_unlock(&ses->chan_lock);
rc = cifs_negotiate_protocol(xid, ses);
if (!rc)
@@ -2286,8 +2392,6 @@ cifs_get_tcon(struct cifs_ses *ses, struct smb3_fs_context *ctx)
list_add(&tcon->tcon_list, &ses->tcon_list);
spin_unlock(&cifs_tcp_ses_lock);
- cifs_fscache_get_super_cookie(tcon);
-
return tcon;
out_fail:
@@ -2849,73 +2953,64 @@ int cifs_setup_cifs_sb(struct cifs_sb_info *cifs_sb)
}
/* Release all succeed connections */
-static inline void mount_put_conns(struct cifs_sb_info *cifs_sb,
- unsigned int xid,
- struct TCP_Server_Info *server,
- struct cifs_ses *ses, struct cifs_tcon *tcon)
+static inline void mount_put_conns(struct mount_ctx *mnt_ctx)
{
int rc = 0;
- if (tcon)
- cifs_put_tcon(tcon);
- else if (ses)
- cifs_put_smb_ses(ses);
- else if (server)
- cifs_put_tcp_session(server, 0);
- cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
- free_xid(xid);
+ if (mnt_ctx->tcon)
+ cifs_put_tcon(mnt_ctx->tcon);
+ else if (mnt_ctx->ses)
+ cifs_put_smb_ses(mnt_ctx->ses);
+ else if (mnt_ctx->server)
+ cifs_put_tcp_session(mnt_ctx->server, 0);
+ mnt_ctx->cifs_sb->mnt_cifs_flags &= ~CIFS_MOUNT_POSIX_PATHS;
+ free_xid(mnt_ctx->xid);
}
/* Get connections for tcp, ses and tcon */
-static int mount_get_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cifs_sb,
- unsigned int *xid,
- struct TCP_Server_Info **nserver,
- struct cifs_ses **nses, struct cifs_tcon **ntcon)
+static int mount_get_conns(struct mount_ctx *mnt_ctx)
{
int rc = 0;
- struct TCP_Server_Info *server;
- struct cifs_ses *ses;
- struct cifs_tcon *tcon;
-
- *nserver = NULL;
- *nses = NULL;
- *ntcon = NULL;
+ struct TCP_Server_Info *server = NULL;
+ struct cifs_ses *ses = NULL;
+ struct cifs_tcon *tcon = NULL;
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ unsigned int xid;
- *xid = get_xid();
+ xid = get_xid();
/* get a reference to a tcp session */
- server = cifs_get_tcp_session(ctx);
+ server = cifs_get_tcp_session(ctx, NULL);
if (IS_ERR(server)) {
rc = PTR_ERR(server);
- return rc;
+ server = NULL;
+ goto out;
}
- *nserver = server;
-
/* get a reference to a SMB session */
ses = cifs_get_smb_ses(server, ctx);
if (IS_ERR(ses)) {
rc = PTR_ERR(ses);
- return rc;
+ ses = NULL;
+ goto out;
}
- *nses = ses;
-
if ((ctx->persistent == true) && (!(ses->server->capabilities &
SMB2_GLOBAL_CAP_PERSISTENT_HANDLES))) {
cifs_server_dbg(VFS, "persistent handles not supported by server\n");
- return -EOPNOTSUPP;
+ rc = -EOPNOTSUPP;
+ goto out;
}
/* search for existing tcon to this server share */
tcon = cifs_get_tcon(ses, ctx);
if (IS_ERR(tcon)) {
rc = PTR_ERR(tcon);
- return rc;
+ tcon = NULL;
+ goto out;
}
- *ntcon = tcon;
-
/* if new SMB3.11 POSIX extensions are supported do not remap / and \ */
if (tcon->posix_extensions)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_POSIX_PATHS;
@@ -2926,17 +3021,19 @@ static int mount_get_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cif
* reset of caps checks mount to see if unix extensions disabled
* for just this mount.
*/
- reset_cifs_unix_caps(*xid, tcon, cifs_sb, ctx);
+ reset_cifs_unix_caps(xid, tcon, cifs_sb, ctx);
if ((tcon->ses->server->tcpStatus == CifsNeedReconnect) &&
(le64_to_cpu(tcon->fsUnixInfo.Capability) &
- CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP))
- return -EACCES;
+ CIFS_UNIX_TRANSPORT_ENCRYPTION_MANDATORY_CAP)) {
+ rc = -EACCES;
+ goto out;
+ }
} else
tcon->unix_ext = 0; /* server does not support them */
/* do not care if a following call succeed - informational */
if (!tcon->pipe && server->ops->qfs_tcon) {
- server->ops->qfs_tcon(*xid, tcon, cifs_sb);
+ server->ops->qfs_tcon(xid, tcon, cifs_sb);
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RO_CACHE) {
if (tcon->fsDevInfo.DeviceCharacteristics &
cpu_to_le32(FILE_READ_ONLY_DEVICE))
@@ -2946,6 +3043,12 @@ static int mount_get_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cif
cifs_dbg(VFS, "read only mount of RW share\n");
/* no need to log a RW mount of a typical RW share */
}
+ /*
+ * The cookie is initialized from volume info returned above.
+ * Inside cifs_fscache_get_super_cookie it checks
+ * that we do not get super cookie twice.
+ */
+ cifs_fscache_get_super_cookie(tcon);
}
/*
@@ -2960,7 +3063,13 @@ static int mount_get_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cif
(cifs_sb->ctx->rsize > server->ops->negotiate_rsize(tcon, ctx)))
cifs_sb->ctx->rsize = server->ops->negotiate_rsize(tcon, ctx);
- return 0;
+out:
+ mnt_ctx->server = server;
+ mnt_ctx->ses = ses;
+ mnt_ctx->tcon = tcon;
+ mnt_ctx->xid = xid;
+
+ return rc;
}
static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
@@ -2990,18 +3099,17 @@ static int mount_setup_tlink(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
}
#ifdef CONFIG_CIFS_DFS_UPCALL
-static int mount_get_dfs_conns(struct smb3_fs_context *ctx, struct cifs_sb_info *cifs_sb,
- unsigned int *xid, struct TCP_Server_Info **nserver,
- struct cifs_ses **nses, struct cifs_tcon **ntcon)
+/* Get unique dfs connections */
+static int mount_get_dfs_conns(struct mount_ctx *mnt_ctx)
{
int rc;
- ctx->nosharesock = true;
- rc = mount_get_conns(ctx, cifs_sb, xid, nserver, nses, ntcon);
- if (*nserver) {
+ mnt_ctx->fs_ctx->nosharesock = true;
+ rc = mount_get_conns(mnt_ctx);
+ if (mnt_ctx->server) {
cifs_dbg(FYI, "%s: marking tcp session as a dfs connection\n", __func__);
spin_lock(&cifs_tcp_ses_lock);
- (*nserver)->is_dfs_conn = true;
+ mnt_ctx->server->is_dfs_conn = true;
spin_unlock(&cifs_tcp_ses_lock);
}
return rc;
@@ -3043,190 +3151,38 @@ build_unc_path_to_root(const struct smb3_fs_context *ctx,
}
/*
- * expand_dfs_referral - Perform a dfs referral query and update the cifs_sb
+ * expand_dfs_referral - Update cifs_sb from dfs referral path
*
- * If a referral is found, cifs_sb->ctx->mount_options will be (re-)allocated
- * to a string containing updated options for the submount. Otherwise it
- * will be left untouched.
- *
- * Returns the rc from get_dfs_path to the caller, which can be used to
- * determine whether there were referrals.
+ * cifs_sb->ctx->mount_options will be (re-)allocated to a string containing updated options for the
+ * submount. Otherwise it will be left untouched.
*/
-static int
-expand_dfs_referral(const unsigned int xid, struct cifs_ses *ses,
- struct smb3_fs_context *ctx, struct cifs_sb_info *cifs_sb,
- char *ref_path)
-{
- int rc;
- struct dfs_info3_param referral = {0};
- char *full_path = NULL, *mdata = NULL;
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
- return -EREMOTE;
-
- full_path = build_unc_path_to_root(ctx, cifs_sb, true);
- if (IS_ERR(full_path))
- return PTR_ERR(full_path);
-
- rc = dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
- ref_path, &referral, NULL);
- if (!rc) {
- char *fake_devname = NULL;
-
- mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options,
- full_path + 1, &referral,
- &fake_devname);
- free_dfs_info_param(&referral);
-
- if (IS_ERR(mdata)) {
- rc = PTR_ERR(mdata);
- mdata = NULL;
- } else {
- /*
- * We can not clear out the whole structure since we
- * no longer have an explicit function to parse
- * a mount-string. Instead we need to clear out the
- * individual fields that are no longer valid.
- */
- kfree(ctx->prepath);
- ctx->prepath = NULL;
- rc = cifs_setup_volume_info(ctx, mdata, fake_devname);
- }
- kfree(fake_devname);
- kfree(cifs_sb->ctx->mount_options);
- cifs_sb->ctx->mount_options = mdata;
- }
- kfree(full_path);
- return rc;
-}
-
-static int get_next_dfs_tgt(struct dfs_cache_tgt_list *tgt_list,
- struct dfs_cache_tgt_iterator **tgt_it)
-{
- if (!*tgt_it)
- *tgt_it = dfs_cache_get_tgt_iterator(tgt_list);
- else
- *tgt_it = dfs_cache_get_next_tgt(tgt_list, *tgt_it);
- return !*tgt_it ? -EHOSTDOWN : 0;
-}
-
-static int update_vol_info(const struct dfs_cache_tgt_iterator *tgt_it,
- struct smb3_fs_context *fake_ctx, struct smb3_fs_context *ctx)
-{
- const char *tgt = dfs_cache_get_tgt_name(tgt_it);
- int len = strlen(tgt) + 2;
- char *new_unc;
-
- new_unc = kmalloc(len, GFP_KERNEL);
- if (!new_unc)
- return -ENOMEM;
- scnprintf(new_unc, len, "\\%s", tgt);
-
- kfree(ctx->UNC);
- ctx->UNC = new_unc;
-
- if (fake_ctx->prepath) {
- kfree(ctx->prepath);
- ctx->prepath = fake_ctx->prepath;
- fake_ctx->prepath = NULL;
- }
- memcpy(&ctx->dstaddr, &fake_ctx->dstaddr, sizeof(ctx->dstaddr));
-
- return 0;
-}
-
-static int do_dfs_failover(const char *path, const char *full_path, struct cifs_sb_info *cifs_sb,
- struct smb3_fs_context *ctx, struct cifs_ses *root_ses,
- unsigned int *xid, struct TCP_Server_Info **server,
- struct cifs_ses **ses, struct cifs_tcon **tcon)
+static int expand_dfs_referral(struct mount_ctx *mnt_ctx, const char *full_path,
+ struct dfs_info3_param *referral)
{
int rc;
- char *npath = NULL;
- struct dfs_cache_tgt_list tgt_list = DFS_CACHE_TGT_LIST_INIT(tgt_list);
- struct dfs_cache_tgt_iterator *tgt_it = NULL;
- struct smb3_fs_context tmp_ctx = {NULL};
-
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS)
- return -EOPNOTSUPP;
-
- npath = dfs_cache_canonical_path(path, cifs_sb->local_nls, cifs_remap(cifs_sb));
- if (IS_ERR(npath))
- return PTR_ERR(npath);
-
- cifs_dbg(FYI, "%s: path=%s full_path=%s\n", __func__, npath, full_path);
-
- rc = dfs_cache_noreq_find(npath, NULL, &tgt_list);
- if (rc)
- goto out;
- /*
- * We use a 'tmp_ctx' here because we need pass it down to the mount_{get,put} functions to
- * test connection against new DFS targets.
- */
- rc = smb3_fs_context_dup(&tmp_ctx, ctx);
- if (rc)
- goto out;
-
- for (;;) {
- struct dfs_info3_param ref = {0};
- char *fake_devname = NULL, *mdata = NULL;
-
- /* Get next DFS target server - if any */
- rc = get_next_dfs_tgt(&tgt_list, &tgt_it);
- if (rc)
- break;
-
- rc = dfs_cache_get_tgt_referral(npath, tgt_it, &ref);
- if (rc)
- break;
-
- cifs_dbg(FYI, "%s: old ctx: UNC=%s prepath=%s\n", __func__, tmp_ctx.UNC,
- tmp_ctx.prepath);
-
- mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options, full_path + 1, &ref,
- &fake_devname);
- free_dfs_info_param(&ref);
-
- if (IS_ERR(mdata)) {
- rc = PTR_ERR(mdata);
- mdata = NULL;
- } else
- rc = cifs_setup_volume_info(&tmp_ctx, mdata, fake_devname);
-
- kfree(mdata);
- kfree(fake_devname);
-
- if (rc)
- break;
-
- cifs_dbg(FYI, "%s: new ctx: UNC=%s prepath=%s\n", __func__, tmp_ctx.UNC,
- tmp_ctx.prepath);
-
- mount_put_conns(cifs_sb, *xid, *server, *ses, *tcon);
- rc = mount_get_dfs_conns(&tmp_ctx, cifs_sb, xid, server, ses, tcon);
- if (!rc || (*server && *ses)) {
- /*
- * We were able to connect to new target server. Update current context with
- * new target server.
- */
- rc = update_vol_info(tgt_it, &tmp_ctx, ctx);
- break;
- }
- }
- if (!rc) {
- cifs_dbg(FYI, "%s: final ctx: UNC=%s prepath=%s\n", __func__, tmp_ctx.UNC,
- tmp_ctx.prepath);
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ char *fake_devname = NULL, *mdata = NULL;
+
+ mdata = cifs_compose_mount_options(cifs_sb->ctx->mount_options, full_path + 1, referral,
+ &fake_devname);
+ if (IS_ERR(mdata)) {
+ rc = PTR_ERR(mdata);
+ mdata = NULL;
+ } else {
/*
- * Update DFS target hint in DFS referral cache with the target server we
- * successfully reconnected to.
+ * We can not clear out the whole structure since we no longer have an explicit
+ * function to parse a mount-string. Instead we need to clear out the individual
+ * fields that are no longer valid.
*/
- rc = dfs_cache_update_tgthint(*xid, root_ses ? root_ses : *ses, cifs_sb->local_nls,
- cifs_remap(cifs_sb), path, tgt_it);
+ kfree(ctx->prepath);
+ ctx->prepath = NULL;
+ rc = cifs_setup_volume_info(ctx, mdata, fake_devname);
}
+ kfree(fake_devname);
+ kfree(cifs_sb->ctx->mount_options);
+ cifs_sb->ctx->mount_options = mdata;
-out:
- kfree(npath);
- smb3_cleanup_fs_context_contents(&tmp_ctx);
- dfs_cache_free_tgts(&tgt_list);
return rc;
}
#endif
@@ -3333,12 +3289,14 @@ cifs_are_all_path_components_accessible(struct TCP_Server_Info *server,
* Check if path is remote (e.g. a DFS share). Return -EREMOTE if it is,
* otherwise 0.
*/
-static int is_path_remote(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx,
- const unsigned int xid,
- struct TCP_Server_Info *server,
- struct cifs_tcon *tcon)
+static int is_path_remote(struct mount_ctx *mnt_ctx)
{
int rc;
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ struct TCP_Server_Info *server = mnt_ctx->server;
+ unsigned int xid = mnt_ctx->xid;
+ struct cifs_tcon *tcon = mnt_ctx->tcon;
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
char *full_path;
if (!server->ops->is_path_accessible)
@@ -3376,280 +3334,289 @@ static int is_path_remote(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *
}
#ifdef CONFIG_CIFS_DFS_UPCALL
-static void set_root_ses(struct cifs_sb_info *cifs_sb, const uuid_t *mount_id, struct cifs_ses *ses,
- struct cifs_ses **root_ses)
+static void set_root_ses(struct mount_ctx *mnt_ctx)
{
- if (ses) {
+ if (mnt_ctx->ses) {
spin_lock(&cifs_tcp_ses_lock);
- ses->ses_count++;
+ mnt_ctx->ses->ses_count++;
spin_unlock(&cifs_tcp_ses_lock);
- dfs_cache_add_refsrv_session(mount_id, ses);
+ dfs_cache_add_refsrv_session(&mnt_ctx->mount_id, mnt_ctx->ses);
}
- *root_ses = ses;
+ mnt_ctx->root_ses = mnt_ctx->ses;
}
-/* Set up next dfs prefix path in @dfs_path */
-static int next_dfs_prepath(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx,
- const unsigned int xid, struct TCP_Server_Info *server,
- struct cifs_tcon *tcon, char **dfs_path)
+static int is_dfs_mount(struct mount_ctx *mnt_ctx, bool *isdfs, struct dfs_cache_tgt_list *root_tl)
{
- char *path, *npath;
- int added_treename = is_tcon_dfs(tcon);
int rc;
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
- path = cifs_build_path_to_root(ctx, cifs_sb, tcon, added_treename);
- if (!path)
- return -ENOMEM;
+ *isdfs = true;
- rc = is_path_remote(cifs_sb, ctx, xid, server, tcon);
- if (rc == -EREMOTE) {
- struct smb3_fs_context v = {NULL};
- /* if @path contains a tree name, skip it in the prefix path */
- if (added_treename) {
- rc = smb3_parse_devname(path, &v);
- if (rc)
- goto out;
- npath = build_unc_path_to_root(&v, cifs_sb, true);
- smb3_cleanup_fs_context_contents(&v);
- } else {
- v.UNC = ctx->UNC;
- v.prepath = path + 1;
- npath = build_unc_path_to_root(&v, cifs_sb, true);
- }
+ rc = mount_get_conns(mnt_ctx);
+ /*
+ * If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
+ * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
+ *
+ * Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem
+ * to respond with PATH_NOT_COVERED to requests that include the prefix.
+ */
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
+ dfs_cache_find(mnt_ctx->xid, mnt_ctx->ses, cifs_sb->local_nls, cifs_remap(cifs_sb),
+ ctx->UNC + 1, NULL, root_tl)) {
+ if (rc)
+ return rc;
+ /* Check if it is fully accessible and then mount it */
+ rc = is_path_remote(mnt_ctx);
+ if (!rc)
+ *isdfs = false;
+ else if (rc != -EREMOTE)
+ return rc;
+ }
+ return 0;
+}
- if (IS_ERR(npath)) {
- rc = PTR_ERR(npath);
- goto out;
- }
+static int connect_dfs_target(struct mount_ctx *mnt_ctx, const char *full_path,
+ const char *ref_path, struct dfs_cache_tgt_iterator *tit)
+{
+ int rc;
+ struct dfs_info3_param ref = {};
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ char *oldmnt = cifs_sb->ctx->mount_options;
- kfree(*dfs_path);
- *dfs_path = npath;
- rc = -EREMOTE;
+ rc = dfs_cache_get_tgt_referral(ref_path, tit, &ref);
+ if (rc)
+ goto out;
+
+ rc = expand_dfs_referral(mnt_ctx, full_path, &ref);
+ if (rc)
+ goto out;
+
+ /* Connect to new target only if we were redirected (e.g. mount options changed) */
+ if (oldmnt != cifs_sb->ctx->mount_options) {
+ mount_put_conns(mnt_ctx);
+ rc = mount_get_dfs_conns(mnt_ctx);
+ }
+ if (!rc) {
+ if (cifs_is_referral_server(mnt_ctx->tcon, &ref))
+ set_root_ses(mnt_ctx);
+ rc = dfs_cache_update_tgthint(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
+ cifs_remap(cifs_sb), ref_path, tit);
}
out:
- kfree(path);
+ free_dfs_info_param(&ref);
return rc;
}
-/* Check if resolved targets can handle any DFS referrals */
-static int is_referral_server(const char *ref_path, struct cifs_sb_info *cifs_sb,
- struct cifs_tcon *tcon, bool *ref_server)
+static int connect_dfs_root(struct mount_ctx *mnt_ctx, struct dfs_cache_tgt_list *root_tl)
{
int rc;
- struct dfs_info3_param ref = {0};
+ char *full_path;
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ struct dfs_cache_tgt_iterator *tit;
- cifs_dbg(FYI, "%s: ref_path=%s\n", __func__, ref_path);
+ /* Put initial connections as they might be shared with other mounts. We need unique dfs
+ * connections per mount to properly failover, so mount_get_dfs_conns() must be used from
+ * now on.
+ */
+ mount_put_conns(mnt_ctx);
+ mount_get_dfs_conns(mnt_ctx);
- if (is_tcon_dfs(tcon)) {
- *ref_server = true;
- } else {
- char *npath;
+ full_path = build_unc_path_to_root(ctx, cifs_sb, true);
+ if (IS_ERR(full_path))
+ return PTR_ERR(full_path);
- npath = dfs_cache_canonical_path(ref_path, cifs_sb->local_nls, cifs_remap(cifs_sb));
- if (IS_ERR(npath))
- return PTR_ERR(npath);
+ mnt_ctx->origin_fullpath = dfs_cache_canonical_path(ctx->UNC, cifs_sb->local_nls,
+ cifs_remap(cifs_sb));
+ if (IS_ERR(mnt_ctx->origin_fullpath)) {
+ rc = PTR_ERR(mnt_ctx->origin_fullpath);
+ mnt_ctx->origin_fullpath = NULL;
+ goto out;
+ }
- rc = dfs_cache_noreq_find(npath, &ref, NULL);
- kfree(npath);
- if (rc) {
- cifs_dbg(VFS, "%s: dfs_cache_noreq_find: failed (rc=%d)\n", __func__, rc);
- return rc;
+ /* Try all dfs root targets */
+ for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(root_tl);
+ tit; tit = dfs_cache_get_next_tgt(root_tl, tit)) {
+ rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->origin_fullpath + 1, tit);
+ if (!rc) {
+ mnt_ctx->leaf_fullpath = kstrdup(mnt_ctx->origin_fullpath, GFP_KERNEL);
+ if (!mnt_ctx->leaf_fullpath)
+ rc = -ENOMEM;
+ break;
}
- cifs_dbg(FYI, "%s: ref.flags=0x%x\n", __func__, ref.flags);
- /*
- * Check if all targets are capable of handling DFS referrals as per
- * MS-DFSC 2.2.4 RESP_GET_DFS_REFERRAL.
- */
- *ref_server = !!(ref.flags & DFSREF_REFERRAL_SERVER);
- free_dfs_info_param(&ref);
}
- return 0;
+
+out:
+ kfree(full_path);
+ return rc;
}
-int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+static int __follow_dfs_link(struct mount_ctx *mnt_ctx)
{
- int rc = 0;
- unsigned int xid;
- struct TCP_Server_Info *server = NULL;
- struct cifs_ses *ses = NULL, *root_ses = NULL;
- struct cifs_tcon *tcon = NULL;
- int count = 0;
- uuid_t mount_id = {0};
- char *ref_path = NULL, *full_path = NULL;
- char *oldmnt = NULL;
- bool ref_server = false;
+ int rc;
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ char *full_path;
+ struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+ struct dfs_cache_tgt_iterator *tit;
- rc = mount_get_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
- /*
- * If called with 'nodfs' mount option, then skip DFS resolving. Otherwise unconditionally
- * try to get an DFS referral (even cached) to determine whether it is an DFS mount.
- *
- * Skip prefix path to provide support for DFS referrals from w2k8 servers which don't seem
- * to respond with PATH_NOT_COVERED to requests that include the prefix.
- */
- if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_DFS) ||
- dfs_cache_find(xid, ses, cifs_sb->local_nls, cifs_remap(cifs_sb), ctx->UNC + 1, NULL,
- NULL)) {
- if (rc)
- goto error;
- /* Check if it is fully accessible and then mount it */
- rc = is_path_remote(cifs_sb, ctx, xid, server, tcon);
- if (!rc)
- goto out;
- if (rc != -EREMOTE)
- goto error;
+ full_path = build_unc_path_to_root(ctx, cifs_sb, true);
+ if (IS_ERR(full_path))
+ return PTR_ERR(full_path);
+
+ kfree(mnt_ctx->leaf_fullpath);
+ mnt_ctx->leaf_fullpath = dfs_cache_canonical_path(full_path, cifs_sb->local_nls,
+ cifs_remap(cifs_sb));
+ if (IS_ERR(mnt_ctx->leaf_fullpath)) {
+ rc = PTR_ERR(mnt_ctx->leaf_fullpath);
+ mnt_ctx->leaf_fullpath = NULL;
+ goto out;
}
- mount_put_conns(cifs_sb, xid, server, ses, tcon);
- /*
- * Ignore error check here because we may failover to other targets from cached a
- * referral.
- */
- (void)mount_get_dfs_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
+ /* Get referral from dfs link */
+ rc = dfs_cache_find(mnt_ctx->xid, mnt_ctx->root_ses, cifs_sb->local_nls,
+ cifs_remap(cifs_sb), mnt_ctx->leaf_fullpath + 1, NULL, &tl);
+ if (rc)
+ goto out;
- /* Get path of DFS root */
- ref_path = build_unc_path_to_root(ctx, cifs_sb, false);
- if (IS_ERR(ref_path)) {
- rc = PTR_ERR(ref_path);
- ref_path = NULL;
- goto error;
+ /* Try all dfs link targets */
+ for (rc = -ENOENT, tit = dfs_cache_get_tgt_iterator(&tl);
+ tit; tit = dfs_cache_get_next_tgt(&tl, tit)) {
+ rc = connect_dfs_target(mnt_ctx, full_path, mnt_ctx->leaf_fullpath + 1, tit);
+ if (!rc) {
+ rc = is_path_remote(mnt_ctx);
+ break;
+ }
+ }
+
+out:
+ kfree(full_path);
+ dfs_cache_free_tgts(&tl);
+ return rc;
+}
+
+static int follow_dfs_link(struct mount_ctx *mnt_ctx)
+{
+ int rc;
+ struct cifs_sb_info *cifs_sb = mnt_ctx->cifs_sb;
+ struct smb3_fs_context *ctx = mnt_ctx->fs_ctx;
+ char *full_path;
+ int num_links = 0;
+
+ full_path = build_unc_path_to_root(ctx, cifs_sb, true);
+ if (IS_ERR(full_path))
+ return PTR_ERR(full_path);
+
+ kfree(mnt_ctx->origin_fullpath);
+ mnt_ctx->origin_fullpath = dfs_cache_canonical_path(full_path, cifs_sb->local_nls,
+ cifs_remap(cifs_sb));
+ kfree(full_path);
+
+ if (IS_ERR(mnt_ctx->origin_fullpath)) {
+ rc = PTR_ERR(mnt_ctx->origin_fullpath);
+ mnt_ctx->origin_fullpath = NULL;
+ return rc;
}
- uuid_gen(&mount_id);
- set_root_ses(cifs_sb, &mount_id, ses, &root_ses);
do {
- /* Save full path of last DFS path we used to resolve final target server */
- kfree(full_path);
- full_path = build_unc_path_to_root(ctx, cifs_sb, !!count);
- if (IS_ERR(full_path)) {
- rc = PTR_ERR(full_path);
- full_path = NULL;
- break;
- }
- /* Chase referral */
- oldmnt = cifs_sb->ctx->mount_options;
- rc = expand_dfs_referral(xid, root_ses, ctx, cifs_sb, ref_path + 1);
- if (rc)
- break;
- /* Connect to new DFS target only if we were redirected */
- if (oldmnt != cifs_sb->ctx->mount_options) {
- mount_put_conns(cifs_sb, xid, server, ses, tcon);
- rc = mount_get_dfs_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
- }
- if (rc && !server && !ses) {
- /* Failed to connect. Try to connect to other targets in the referral. */
- rc = do_dfs_failover(ref_path + 1, full_path, cifs_sb, ctx, root_ses, &xid,
- &server, &ses, &tcon);
- }
- if (rc == -EACCES || rc == -EOPNOTSUPP || !server || !ses)
+ rc = __follow_dfs_link(mnt_ctx);
+ if (!rc || rc != -EREMOTE)
break;
- if (!tcon)
- continue;
+ } while (rc = -ELOOP, ++num_links < MAX_NESTED_LINKS);
- /* Make sure that requests go through new root servers */
- rc = is_referral_server(ref_path + 1, cifs_sb, tcon, &ref_server);
- if (rc)
- break;
- if (ref_server)
- set_root_ses(cifs_sb, &mount_id, ses, &root_ses);
+ return rc;
+}
- /* Get next dfs path and then continue chasing them if -EREMOTE */
- rc = next_dfs_prepath(cifs_sb, ctx, xid, server, tcon, &ref_path);
- /* Prevent recursion on broken link referrals */
- if (rc == -EREMOTE && ++count > MAX_NESTED_LINKS)
- rc = -ELOOP;
- } while (rc == -EREMOTE);
+/* Set up DFS referral paths for failover */
+static void setup_server_referral_paths(struct mount_ctx *mnt_ctx)
+{
+ struct TCP_Server_Info *server = mnt_ctx->server;
+
+ server->origin_fullpath = mnt_ctx->origin_fullpath;
+ server->leaf_fullpath = mnt_ctx->leaf_fullpath;
+ server->current_fullpath = mnt_ctx->leaf_fullpath;
+ mnt_ctx->origin_fullpath = mnt_ctx->leaf_fullpath = NULL;
+}
- if (rc || !tcon || !ses)
+int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
+{
+ int rc;
+ struct mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
+ struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+ bool isdfs;
+
+ rc = is_dfs_mount(&mnt_ctx, &isdfs, &tl);
+ if (rc)
goto error;
+ if (!isdfs)
+ goto out;
- kfree(ref_path);
- /*
- * Store DFS full path in both superblock and tree connect structures.
- *
- * For DFS root mounts, the prefix path (cifs_sb->prepath) is preserved during reconnect so
- * only the root path is set in cifs_sb->origin_fullpath and tcon->dfs_path. And for DFS
- * links, the prefix path is included in both and may be changed during reconnect. See
- * cifs_tree_connect().
- */
- ref_path = dfs_cache_canonical_path(full_path, cifs_sb->local_nls, cifs_remap(cifs_sb));
- kfree(full_path);
- full_path = NULL;
+ uuid_gen(&mnt_ctx.mount_id);
+ rc = connect_dfs_root(&mnt_ctx, &tl);
+ dfs_cache_free_tgts(&tl);
- if (IS_ERR(ref_path)) {
- rc = PTR_ERR(ref_path);
- ref_path = NULL;
+ if (rc)
goto error;
- }
- cifs_sb->origin_fullpath = ref_path;
- ref_path = kstrdup(cifs_sb->origin_fullpath, GFP_KERNEL);
- if (!ref_path) {
- rc = -ENOMEM;
+ rc = is_path_remote(&mnt_ctx);
+ if (rc == -EREMOTE)
+ rc = follow_dfs_link(&mnt_ctx);
+ if (rc)
goto error;
- }
- spin_lock(&cifs_tcp_ses_lock);
- tcon->dfs_path = ref_path;
- ref_path = NULL;
- spin_unlock(&cifs_tcp_ses_lock);
+ setup_server_referral_paths(&mnt_ctx);
/*
- * After reconnecting to a different server, unique ids won't
- * match anymore, so we disable serverino. This prevents
- * dentry revalidation to think the dentry are stale (ESTALE).
+ * After reconnecting to a different server, unique ids won't match anymore, so we disable
+ * serverino. This prevents dentry revalidation to think the dentry are stale (ESTALE).
*/
cifs_autodisable_serverino(cifs_sb);
/*
- * Force the use of prefix path to support failover on DFS paths that
- * resolve to targets that have different prefix paths.
+ * Force the use of prefix path to support failover on DFS paths that resolve to targets
+ * that have different prefix paths.
*/
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
kfree(cifs_sb->prepath);
cifs_sb->prepath = ctx->prepath;
ctx->prepath = NULL;
- uuid_copy(&cifs_sb->dfs_mount_id, &mount_id);
+ uuid_copy(&cifs_sb->dfs_mount_id, &mnt_ctx.mount_id);
out:
- free_xid(xid);
- cifs_try_adding_channels(cifs_sb, ses);
- return mount_setup_tlink(cifs_sb, ses, tcon);
+ free_xid(mnt_ctx.xid);
+ cifs_try_adding_channels(cifs_sb, mnt_ctx.ses);
+ return mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
error:
- kfree(ref_path);
- kfree(full_path);
- kfree(cifs_sb->origin_fullpath);
- dfs_cache_put_refsrv_sessions(&mount_id);
- mount_put_conns(cifs_sb, xid, server, ses, tcon);
+ dfs_cache_put_refsrv_sessions(&mnt_ctx.mount_id);
+ kfree(mnt_ctx.origin_fullpath);
+ kfree(mnt_ctx.leaf_fullpath);
+ mount_put_conns(&mnt_ctx);
return rc;
}
#else
int cifs_mount(struct cifs_sb_info *cifs_sb, struct smb3_fs_context *ctx)
{
int rc = 0;
- unsigned int xid;
- struct cifs_ses *ses;
- struct cifs_tcon *tcon;
- struct TCP_Server_Info *server;
+ struct mount_ctx mnt_ctx = { .cifs_sb = cifs_sb, .fs_ctx = ctx, };
- rc = mount_get_conns(ctx, cifs_sb, &xid, &server, &ses, &tcon);
+ rc = mount_get_conns(&mnt_ctx);
if (rc)
goto error;
- if (tcon) {
- rc = is_path_remote(cifs_sb, ctx, xid, server, tcon);
+ if (mnt_ctx.tcon) {
+ rc = is_path_remote(&mnt_ctx);
if (rc == -EREMOTE)
rc = -EOPNOTSUPP;
if (rc)
goto error;
}
- free_xid(xid);
-
- return mount_setup_tlink(cifs_sb, ses, tcon);
+ free_xid(mnt_ctx.xid);
+ return mount_setup_tlink(cifs_sb, mnt_ctx.ses, mnt_ctx.tcon);
error:
- mount_put_conns(cifs_sb, xid, server, ses, tcon);
+ mount_put_conns(&mnt_ctx);
return rc;
}
#endif
@@ -3818,7 +3785,6 @@ cifs_umount(struct cifs_sb_info *cifs_sb)
kfree(cifs_sb->prepath);
#ifdef CONFIG_CIFS_DFS_UPCALL
dfs_cache_put_refsrv_sessions(&cifs_sb->dfs_mount_id);
- kfree(cifs_sb->origin_fullpath);
#endif
call_rcu(&cifs_sb->rcu, delayed_free);
}
@@ -4145,104 +4111,246 @@ cifs_prune_tlinks(struct work_struct *work)
}
#ifdef CONFIG_CIFS_DFS_UPCALL
-int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
+static void mark_tcon_tcp_ses_for_reconnect(struct cifs_tcon *tcon)
+{
+ int i;
+
+ for (i = 0; i < tcon->ses->chan_count; i++) {
+ spin_lock(&GlobalMid_Lock);
+ if (tcon->ses->chans[i].server->tcpStatus != CifsExiting)
+ tcon->ses->chans[i].server->tcpStatus = CifsNeedReconnect;
+ spin_unlock(&GlobalMid_Lock);
+ }
+}
+
+/* Update dfs referral path of superblock */
+static int update_server_fullpath(struct TCP_Server_Info *server, struct cifs_sb_info *cifs_sb,
+ const char *target)
+{
+ int rc = 0;
+ size_t len = strlen(target);
+ char *refpath, *npath;
+
+ if (unlikely(len < 2 || *target != '\\'))
+ return -EINVAL;
+
+ if (target[1] == '\\') {
+ len += 1;
+ refpath = kmalloc(len, GFP_KERNEL);
+ if (!refpath)
+ return -ENOMEM;
+
+ scnprintf(refpath, len, "%s", target);
+ } else {
+ len += sizeof("\\");
+ refpath = kmalloc(len, GFP_KERNEL);
+ if (!refpath)
+ return -ENOMEM;
+
+ scnprintf(refpath, len, "\\%s", target);
+ }
+
+ npath = dfs_cache_canonical_path(refpath, cifs_sb->local_nls, cifs_remap(cifs_sb));
+ kfree(refpath);
+
+ if (IS_ERR(npath)) {
+ rc = PTR_ERR(npath);
+ } else {
+ mutex_lock(&server->refpath_lock);
+ kfree(server->leaf_fullpath);
+ server->leaf_fullpath = npath;
+ mutex_unlock(&server->refpath_lock);
+ server->current_fullpath = server->leaf_fullpath;
+ }
+ return rc;
+}
+
+static int target_share_matches_server(struct TCP_Server_Info *server, const char *tcp_host,
+ size_t tcp_host_len, char *share, bool *target_match)
+{
+ int rc = 0;
+ const char *dfs_host;
+ size_t dfs_host_len;
+
+ *target_match = true;
+ extract_unc_hostname(share, &dfs_host, &dfs_host_len);
+
+ /* Check if hostnames or addresses match */
+ if (dfs_host_len != tcp_host_len || strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) {
+ cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n", __func__, (int)dfs_host_len,
+ dfs_host, (int)tcp_host_len, tcp_host);
+ rc = match_target_ip(server, dfs_host, dfs_host_len, target_match);
+ if (rc)
+ cifs_dbg(VFS, "%s: failed to match target ip: %d\n", __func__, rc);
+ }
+ return rc;
+}
+
+static int __tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, char *tree, bool islink,
+ struct dfs_cache_tgt_list *tl)
{
int rc;
struct TCP_Server_Info *server = tcon->ses->server;
const struct smb_version_operations *ops = server->ops;
- struct dfs_cache_tgt_list tl;
- struct dfs_cache_tgt_iterator *it = NULL;
- char *tree;
+ struct cifs_tcon *ipc = tcon->ses->tcon_ipc;
+ char *share = NULL, *prefix = NULL;
const char *tcp_host;
size_t tcp_host_len;
- const char *dfs_host;
- size_t dfs_host_len;
- char *share = NULL, *prefix = NULL;
- struct dfs_info3_param ref = {0};
- bool isroot;
+ struct dfs_cache_tgt_iterator *tit;
+ bool target_match;
- tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
- if (!tree)
- return -ENOMEM;
+ extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len);
- /* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
- if (!tcon->dfs_path || dfs_cache_noreq_find(tcon->dfs_path + 1, &ref, &tl)) {
- if (tcon->ipc) {
- scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
- rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
- } else {
- rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, nlsc);
- }
+ tit = dfs_cache_get_tgt_iterator(tl);
+ if (!tit) {
+ rc = -ENOENT;
goto out;
}
- isroot = ref.server_type == DFS_TYPE_ROOT;
- free_dfs_info_param(&ref);
-
- extract_unc_hostname(server->hostname, &tcp_host, &tcp_host_len);
-
- for (it = dfs_cache_get_tgt_iterator(&tl); it; it = dfs_cache_get_next_tgt(&tl, it)) {
- bool target_match;
+ /* Try to tree connect to all dfs targets */
+ for (; tit; tit = dfs_cache_get_next_tgt(tl, tit)) {
+ const char *target = dfs_cache_get_tgt_name(tit);
+ struct dfs_cache_tgt_list ntl = DFS_CACHE_TGT_LIST_INIT(ntl);
kfree(share);
kfree(prefix);
- share = NULL;
- prefix = NULL;
+ share = prefix = NULL;
- rc = dfs_cache_get_tgt_share(tcon->dfs_path + 1, it, &share, &prefix);
+ /* Check if share matches with tcp ses */
+ rc = dfs_cache_get_tgt_share(server->current_fullpath + 1, tit, &share, &prefix);
if (rc) {
- cifs_dbg(VFS, "%s: failed to parse target share %d\n",
- __func__, rc);
- continue;
+ cifs_dbg(VFS, "%s: failed to parse target share: %d\n", __func__, rc);
+ break;
}
- extract_unc_hostname(share, &dfs_host, &dfs_host_len);
-
- if (dfs_host_len != tcp_host_len
- || strncasecmp(dfs_host, tcp_host, dfs_host_len) != 0) {
- cifs_dbg(FYI, "%s: %.*s doesn't match %.*s\n", __func__, (int)dfs_host_len,
- dfs_host, (int)tcp_host_len, tcp_host);
+ rc = target_share_matches_server(server, tcp_host, tcp_host_len, share,
+ &target_match);
+ if (rc)
+ break;
+ if (!target_match) {
+ rc = -EHOSTUNREACH;
+ continue;
+ }
- rc = match_target_ip(server, dfs_host, dfs_host_len, &target_match);
- if (rc) {
- cifs_dbg(VFS, "%s: failed to match target ip: %d\n", __func__, rc);
+ if (ipc->need_reconnect) {
+ scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
+ rc = ops->tree_connect(xid, ipc->ses, tree, ipc, cifs_sb->local_nls);
+ if (rc)
break;
- }
-
- if (!target_match) {
- cifs_dbg(FYI, "%s: skipping target\n", __func__);
- continue;
- }
}
- if (tcon->ipc) {
- scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", share);
- rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
+ scnprintf(tree, MAX_TREE_SIZE, "\\%s", share);
+ if (!islink) {
+ rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
+ break;
+ }
+ /*
+ * If no dfs referrals were returned from link target, then just do a TREE_CONNECT
+ * to it. Otherwise, cache the dfs referral and then mark current tcp ses for
+ * reconnect so either the demultiplex thread or the echo worker will reconnect to
+ * newly resolved target.
+ */
+ if (dfs_cache_find(xid, tcon->ses, cifs_sb->local_nls, cifs_remap(cifs_sb), target,
+ NULL, &ntl)) {
+ rc = ops->tree_connect(xid, tcon->ses, tree, tcon, cifs_sb->local_nls);
+ if (rc)
+ continue;
+ rc = dfs_cache_noreq_update_tgthint(server->current_fullpath + 1, tit);
+ if (!rc)
+ rc = cifs_update_super_prepath(cifs_sb, prefix);
} else {
- scnprintf(tree, MAX_TREE_SIZE, "\\%s", share);
- rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
- /* Only handle prefix paths of DFS link targets */
- if (!rc && !isroot) {
- rc = update_super_prepath(tcon, prefix);
- break;
- }
+ /* Target is another dfs share */
+ rc = update_server_fullpath(server, cifs_sb, target);
+ dfs_cache_free_tgts(tl);
+
+ if (!rc) {
+ rc = -EREMOTE;
+ list_replace_init(&ntl.tl_list, &tl->tl_list);
+ } else
+ dfs_cache_free_tgts(&ntl);
}
- if (rc == -EREMOTE)
- break;
+ break;
}
+out:
kfree(share);
kfree(prefix);
- if (!rc) {
- if (it)
- rc = dfs_cache_noreq_update_tgthint(tcon->dfs_path + 1, it);
- else
- rc = -ENOENT;
+ return rc;
+}
+
+static int tree_connect_dfs_target(const unsigned int xid, struct cifs_tcon *tcon,
+ struct cifs_sb_info *cifs_sb, char *tree, bool islink,
+ struct dfs_cache_tgt_list *tl)
+{
+ int rc;
+ int num_links = 0;
+ struct TCP_Server_Info *server = tcon->ses->server;
+
+ do {
+ rc = __tree_connect_dfs_target(xid, tcon, cifs_sb, tree, islink, tl);
+ if (!rc || rc != -EREMOTE)
+ break;
+ } while (rc = -ELOOP, ++num_links < MAX_NESTED_LINKS);
+ /*
+ * If we couldn't tree connect to any targets from last referral path, then retry from
+ * original referral path.
+ */
+ if (rc && server->current_fullpath != server->origin_fullpath) {
+ server->current_fullpath = server->origin_fullpath;
+ mark_tcon_tcp_ses_for_reconnect(tcon);
}
- dfs_cache_free_tgts(&tl);
+
+ dfs_cache_free_tgts(tl);
+ return rc;
+}
+
+int cifs_tree_connect(const unsigned int xid, struct cifs_tcon *tcon, const struct nls_table *nlsc)
+{
+ int rc;
+ struct TCP_Server_Info *server = tcon->ses->server;
+ const struct smb_version_operations *ops = server->ops;
+ struct super_block *sb = NULL;
+ struct cifs_sb_info *cifs_sb;
+ struct dfs_cache_tgt_list tl = DFS_CACHE_TGT_LIST_INIT(tl);
+ char *tree;
+ struct dfs_info3_param ref = {0};
+
+ tree = kzalloc(MAX_TREE_SIZE, GFP_KERNEL);
+ if (!tree)
+ return -ENOMEM;
+
+ if (tcon->ipc) {
+ scnprintf(tree, MAX_TREE_SIZE, "\\\\%s\\IPC$", server->hostname);
+ rc = ops->tree_connect(xid, tcon->ses, tree, tcon, nlsc);
+ goto out;
+ }
+
+ sb = cifs_get_tcp_super(server);
+ if (IS_ERR(sb)) {
+ rc = PTR_ERR(sb);
+ cifs_dbg(VFS, "%s: could not find superblock: %d\n", __func__, rc);
+ goto out;
+ }
+
+ cifs_sb = CIFS_SB(sb);
+
+ /* If it is not dfs or there was no cached dfs referral, then reconnect to same share */
+ if (!server->current_fullpath ||
+ dfs_cache_noreq_find(server->current_fullpath + 1, &ref, &tl)) {
+ rc = ops->tree_connect(xid, tcon->ses, tcon->treeName, tcon, cifs_sb->local_nls);
+ goto out;
+ }
+
+ rc = tree_connect_dfs_target(xid, tcon, cifs_sb, tree, ref.server_type == DFS_TYPE_LINK,
+ &tl);
+ free_dfs_info_param(&ref);
+
out:
kfree(tree);
+ cifs_put_tcp_super(sb);
+
return rc;
}
#else
diff --git a/fs/cifs/dfs_cache.c b/fs/cifs/dfs_cache.c
index 283745592844..5c1259d2eeac 100644
--- a/fs/cifs/dfs_cache.c
+++ b/fs/cifs/dfs_cache.c
@@ -283,7 +283,7 @@ static int dfscache_proc_show(struct seq_file *m, void *v)
seq_printf(m,
"cache entry: path=%s,type=%s,ttl=%d,etime=%ld,hdr_flags=0x%x,ref_flags=0x%x,interlink=%s,path_consumed=%d,expired=%s\n",
ce->path, ce->srvtype == DFS_TYPE_ROOT ? "root" : "link",
- ce->ttl, ce->etime.tv_nsec, ce->ref_flags, ce->hdr_flags,
+ ce->ttl, ce->etime.tv_nsec, ce->hdr_flags, ce->ref_flags,
IS_DFS_INTERLINK(ce->hdr_flags) ? "yes" : "no",
ce->path_consumed, cache_entry_expired(ce) ? "yes" : "no");
@@ -1364,9 +1364,9 @@ static void mark_for_reconnect_if_needed(struct cifs_tcon *tcon, struct dfs_cach
}
/* Refresh dfs referral of tcon and mark it for reconnect if needed */
-static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool force_refresh)
+static int __refresh_tcon(const char *path, struct cifs_ses **sessions, struct cifs_tcon *tcon,
+ bool force_refresh)
{
- const char *path = tcon->dfs_path + 1;
struct cifs_ses *ses;
struct cache_entry *ce;
struct dfs_info3_param *refs = NULL;
@@ -1422,6 +1422,20 @@ out:
return rc;
}
+static int refresh_tcon(struct cifs_ses **sessions, struct cifs_tcon *tcon, bool force_refresh)
+{
+ struct TCP_Server_Info *server = tcon->ses->server;
+
+ mutex_lock(&server->refpath_lock);
+ if (strcasecmp(server->leaf_fullpath, server->origin_fullpath))
+ __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, force_refresh);
+ mutex_unlock(&server->refpath_lock);
+
+ __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, force_refresh);
+
+ return 0;
+}
+
/**
* dfs_cache_remount_fs - remount a DFS share
*
@@ -1435,6 +1449,7 @@ out:
int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
{
struct cifs_tcon *tcon;
+ struct TCP_Server_Info *server;
struct mount_group *mg;
struct cifs_ses *sessions[CACHE_MAX_ENTRIES + 1] = {NULL};
int rc;
@@ -1443,13 +1458,15 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
return -EINVAL;
tcon = cifs_sb_master_tcon(cifs_sb);
- if (!tcon->dfs_path) {
- cifs_dbg(FYI, "%s: not a dfs tcon\n", __func__);
+ server = tcon->ses->server;
+
+ if (!server->origin_fullpath) {
+ cifs_dbg(FYI, "%s: not a dfs mount\n", __func__);
return 0;
}
if (uuid_is_null(&cifs_sb->dfs_mount_id)) {
- cifs_dbg(FYI, "%s: tcon has no dfs mount group id\n", __func__);
+ cifs_dbg(FYI, "%s: no dfs mount group id\n", __func__);
return -EINVAL;
}
@@ -1457,7 +1474,7 @@ int dfs_cache_remount_fs(struct cifs_sb_info *cifs_sb)
mg = find_mount_group_locked(&cifs_sb->dfs_mount_id);
if (IS_ERR(mg)) {
mutex_unlock(&mount_group_list_lock);
- cifs_dbg(FYI, "%s: tcon has ipc session to refresh referral\n", __func__);
+ cifs_dbg(FYI, "%s: no ipc session for refreshing referral\n", __func__);
return PTR_ERR(mg);
}
kref_get(&mg->refcount);
@@ -1498,9 +1515,12 @@ static void refresh_mounts(struct cifs_ses **sessions)
spin_lock(&cifs_tcp_ses_lock);
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
+ if (!server->is_dfs_conn)
+ continue;
+
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
list_for_each_entry(tcon, &ses->tcon_list, tcon_list) {
- if (tcon->dfs_path) {
+ if (!tcon->ipc && !tcon->need_reconnect) {
tcon->tc_count++;
list_add_tail(&tcon->ulist, &tcons);
}
@@ -1510,8 +1530,16 @@ static void refresh_mounts(struct cifs_ses **sessions)
spin_unlock(&cifs_tcp_ses_lock);
list_for_each_entry_safe(tcon, ntcon, &tcons, ulist) {
+ struct TCP_Server_Info *server = tcon->ses->server;
+
list_del_init(&tcon->ulist);
- refresh_tcon(sessions, tcon, false);
+
+ mutex_lock(&server->refpath_lock);
+ if (strcasecmp(server->leaf_fullpath, server->origin_fullpath))
+ __refresh_tcon(server->leaf_fullpath + 1, sessions, tcon, false);
+ mutex_unlock(&server->refpath_lock);
+
+ __refresh_tcon(server->origin_fullpath + 1, sessions, tcon, false);
cifs_put_tcon(tcon);
}
}
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 1b855fcb179e..9fee3af83a73 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -2692,12 +2692,23 @@ int cifs_strict_fsync(struct file *file, loff_t start, loff_t end,
tcon = tlink_tcon(smbfile->tlink);
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
server = tcon->ses->server;
- if (server->ops->flush)
- rc = server->ops->flush(xid, tcon, &smbfile->fid);
- else
+ if (server->ops->flush == NULL) {
rc = -ENOSYS;
+ goto strict_fsync_exit;
+ }
+
+ if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
+ smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
+ if (smbfile) {
+ rc = server->ops->flush(xid, tcon, &smbfile->fid);
+ cifsFileInfo_put(smbfile);
+ } else
+ cifs_dbg(FYI, "ignore fsync for file not open for write\n");
+ } else
+ rc = server->ops->flush(xid, tcon, &smbfile->fid);
}
+strict_fsync_exit:
free_xid(xid);
return rc;
}
@@ -2709,6 +2720,7 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
struct cifs_tcon *tcon;
struct TCP_Server_Info *server;
struct cifsFileInfo *smbfile = file->private_data;
+ struct inode *inode = file_inode(file);
struct cifs_sb_info *cifs_sb = CIFS_FILE_SB(file);
rc = file_write_and_wait_range(file, start, end);
@@ -2725,12 +2737,23 @@ int cifs_fsync(struct file *file, loff_t start, loff_t end, int datasync)
tcon = tlink_tcon(smbfile->tlink);
if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NOSSYNC)) {
server = tcon->ses->server;
- if (server->ops->flush)
- rc = server->ops->flush(xid, tcon, &smbfile->fid);
- else
+ if (server->ops->flush == NULL) {
rc = -ENOSYS;
+ goto fsync_exit;
+ }
+
+ if ((OPEN_FMODE(smbfile->f_flags) & FMODE_WRITE) == 0) {
+ smbfile = find_writable_file(CIFS_I(inode), FIND_WR_ANY);
+ if (smbfile) {
+ rc = server->ops->flush(xid, tcon, &smbfile->fid);
+ cifsFileInfo_put(smbfile);
+ } else
+ cifs_dbg(FYI, "ignore fsync for file not open for write\n");
+ } else
+ rc = server->ops->flush(xid, tcon, &smbfile->fid);
}
+fsync_exit:
free_xid(xid);
return rc;
}
diff --git a/fs/cifs/fs_context.c b/fs/cifs/fs_context.c
index 38d96a480745..6a179ae753c1 100644
--- a/fs/cifs/fs_context.c
+++ b/fs/cifs/fs_context.c
@@ -308,7 +308,9 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
new_ctx->nodename = NULL;
new_ctx->username = NULL;
new_ctx->password = NULL;
+ new_ctx->server_hostname = NULL;
new_ctx->domainname = NULL;
+ new_ctx->workstation_name = NULL;
new_ctx->UNC = NULL;
new_ctx->source = NULL;
new_ctx->iocharset = NULL;
@@ -323,6 +325,7 @@ smb3_fs_context_dup(struct smb3_fs_context *new_ctx, struct smb3_fs_context *ctx
DUP_CTX_STR(UNC);
DUP_CTX_STR(source);
DUP_CTX_STR(domainname);
+ DUP_CTX_STR(workstation_name);
DUP_CTX_STR(nodename);
DUP_CTX_STR(iocharset);
@@ -459,6 +462,7 @@ smb3_parse_devname(const char *devname, struct smb3_fs_context *ctx)
return -EINVAL;
/* record the server hostname */
+ kfree(ctx->server_hostname);
ctx->server_hostname = kstrndup(devname + 2, pos - devname - 2, GFP_KERNEL);
if (!ctx->server_hostname)
return -ENOMEM;
@@ -720,6 +724,11 @@ static int smb3_verify_reconfigure_ctx(struct fs_context *fc,
cifs_errorf(fc, "can not change domainname during remount\n");
return -EINVAL;
}
+ if (new_ctx->workstation_name &&
+ (!old_ctx->workstation_name || strcmp(new_ctx->workstation_name, old_ctx->workstation_name))) {
+ cifs_errorf(fc, "can not change workstation_name during remount\n");
+ return -EINVAL;
+ }
if (new_ctx->nodename &&
(!old_ctx->nodename || strcmp(new_ctx->nodename, old_ctx->nodename))) {
cifs_errorf(fc, "can not change nodename during remount\n");
@@ -753,7 +762,8 @@ static int smb3_reconfigure(struct fs_context *fc)
return rc;
/*
- * We can not change UNC/username/password/domainname/nodename/iocharset
+ * We can not change UNC/username/password/domainname/
+ * workstation_name/nodename/iocharset
* during reconnect so ignore what we have in the new context and
* just use what we already have in cifs_sb->ctx.
*/
@@ -762,6 +772,7 @@ static int smb3_reconfigure(struct fs_context *fc)
STEAL_STRING(cifs_sb, ctx, username);
STEAL_STRING(cifs_sb, ctx, password);
STEAL_STRING(cifs_sb, ctx, domainname);
+ STEAL_STRING(cifs_sb, ctx, workstation_name);
STEAL_STRING(cifs_sb, ctx, nodename);
STEAL_STRING(cifs_sb, ctx, iocharset);
@@ -1414,13 +1425,22 @@ static int smb3_fs_context_parse_param(struct fs_context *fc,
int smb3_init_fs_context(struct fs_context *fc)
{
+ int rc;
struct smb3_fs_context *ctx;
char *nodename = utsname()->nodename;
int i;
ctx = kzalloc(sizeof(struct smb3_fs_context), GFP_KERNEL);
- if (unlikely(!ctx))
- return -ENOMEM;
+ if (unlikely(!ctx)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
+
+ ctx->workstation_name = kstrdup(nodename, GFP_KERNEL);
+ if (unlikely(!ctx->workstation_name)) {
+ rc = -ENOMEM;
+ goto err_exit;
+ }
/*
* does not have to be perfect mapping since field is
@@ -1493,6 +1513,14 @@ int smb3_init_fs_context(struct fs_context *fc)
fc->fs_private = ctx;
fc->ops = &smb3_fs_context_ops;
return 0;
+
+err_exit:
+ if (ctx) {
+ kfree(ctx->workstation_name);
+ kfree(ctx);
+ }
+
+ return rc;
}
void
@@ -1518,6 +1546,8 @@ smb3_cleanup_fs_context_contents(struct smb3_fs_context *ctx)
ctx->source = NULL;
kfree(ctx->domainname);
ctx->domainname = NULL;
+ kfree(ctx->workstation_name);
+ ctx->workstation_name = NULL;
kfree(ctx->nodename);
ctx->nodename = NULL;
kfree(ctx->iocharset);
diff --git a/fs/cifs/fs_context.h b/fs/cifs/fs_context.h
index b2d22cf9cb18..e54090d9ef36 100644
--- a/fs/cifs/fs_context.h
+++ b/fs/cifs/fs_context.h
@@ -170,6 +170,7 @@ struct smb3_fs_context {
char *server_hostname;
char *UNC;
char *nodename;
+ char *workstation_name;
char *iocharset; /* local code page for mapping to and from Unicode */
char source_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* clnt nb name */
char target_rfc1001_name[RFC1001_NAME_LEN_WITH_NULL]; /* srvr nb name */
diff --git a/fs/cifs/fscache.c b/fs/cifs/fscache.c
index 8eedd20c44ab..7e409a38a2d7 100644
--- a/fs/cifs/fscache.c
+++ b/fs/cifs/fscache.c
@@ -87,6 +87,14 @@ void cifs_fscache_get_super_cookie(struct cifs_tcon *tcon)
char *sharename;
struct cifs_fscache_super_auxdata auxdata;
+ /*
+ * Check if cookie was already initialized so don't reinitialize it.
+ * In the future, as we integrate with newer fscache features,
+ * we may want to instead add a check if cookie has changed
+ */
+ if (tcon->fscache == NULL)
+ return;
+
sharename = extract_sharename(tcon->treeName);
if (IS_ERR(sharename)) {
cifs_dbg(FYI, "%s: couldn't extract sharename\n", __func__);
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index ba2c3e897b29..5148d48d6a35 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -75,6 +75,7 @@ sesInfoAlloc(void)
INIT_LIST_HEAD(&ret_buf->tcon_list);
mutex_init(&ret_buf->session_mutex);
spin_lock_init(&ret_buf->iface_lock);
+ spin_lock_init(&ret_buf->chan_lock);
}
return ret_buf;
}
@@ -94,6 +95,7 @@ sesInfoFree(struct cifs_ses *buf_to_free)
kfree_sensitive(buf_to_free->password);
kfree(buf_to_free->user_name);
kfree(buf_to_free->domainName);
+ kfree(buf_to_free->workstation_name);
kfree_sensitive(buf_to_free->auth_key.response);
kfree(buf_to_free->iface_list);
kfree_sensitive(buf_to_free);
@@ -138,9 +140,6 @@ tconInfoFree(struct cifs_tcon *buf_to_free)
kfree(buf_to_free->nativeFileSystem);
kfree_sensitive(buf_to_free->password);
kfree(buf_to_free->crfid.fid);
-#ifdef CONFIG_CIFS_DFS_UPCALL
- kfree(buf_to_free->dfs_path);
-#endif
kfree(buf_to_free);
}
@@ -1287,69 +1286,20 @@ out:
return rc;
}
-static void tcon_super_cb(struct super_block *sb, void *arg)
+int cifs_update_super_prepath(struct cifs_sb_info *cifs_sb, char *prefix)
{
- struct super_cb_data *sd = arg;
- struct cifs_tcon *tcon = sd->data;
- struct cifs_sb_info *cifs_sb;
-
- if (sd->sb)
- return;
-
- cifs_sb = CIFS_SB(sb);
- if (tcon->dfs_path && cifs_sb->origin_fullpath &&
- !strcasecmp(tcon->dfs_path, cifs_sb->origin_fullpath))
- sd->sb = sb;
-}
-
-static inline struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon)
-{
- return __cifs_get_super(tcon_super_cb, tcon);
-}
-
-static inline void cifs_put_tcon_super(struct super_block *sb)
-{
- __cifs_put_super(sb);
-}
-#else
-static inline struct super_block *cifs_get_tcon_super(struct cifs_tcon *tcon)
-{
- return ERR_PTR(-EOPNOTSUPP);
-}
-
-static inline void cifs_put_tcon_super(struct super_block *sb)
-{
-}
-#endif
-
-int update_super_prepath(struct cifs_tcon *tcon, char *prefix)
-{
- struct super_block *sb;
- struct cifs_sb_info *cifs_sb;
- int rc = 0;
-
- sb = cifs_get_tcon_super(tcon);
- if (IS_ERR(sb))
- return PTR_ERR(sb);
-
- cifs_sb = CIFS_SB(sb);
-
kfree(cifs_sb->prepath);
if (prefix && *prefix) {
cifs_sb->prepath = kstrdup(prefix, GFP_ATOMIC);
- if (!cifs_sb->prepath) {
- rc = -ENOMEM;
- goto out;
- }
+ if (!cifs_sb->prepath)
+ return -ENOMEM;
convert_delimiter(cifs_sb->prepath, CIFS_DIR_SEP(cifs_sb));
} else
cifs_sb->prepath = NULL;
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_USE_PREFIX_PATH;
-
-out:
- cifs_put_tcon_super(sb);
- return rc;
+ return 0;
}
+#endif
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 25a2b8ef88b9..fe707f45da89 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -119,7 +119,9 @@ typedef struct _AUTHENTICATE_MESSAGE {
*/
int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len, struct cifs_ses *ses);
-void build_ntlmssp_negotiate_blob(unsigned char *pbuffer, struct cifs_ses *ses);
+int build_ntlmssp_negotiate_blob(unsigned char **pbuffer, u16 *buflen,
+ struct cifs_ses *ses,
+ const struct nls_table *nls_cp);
int build_ntlmssp_auth_blob(unsigned char **pbuffer, u16 *buflen,
struct cifs_ses *ses,
const struct nls_table *nls_cp);
diff --git a/fs/cifs/sess.c b/fs/cifs/sess.c
index 23e02db7923f..2c10b186ed6e 100644
--- a/fs/cifs/sess.c
+++ b/fs/cifs/sess.c
@@ -54,41 +54,53 @@ bool is_ses_using_iface(struct cifs_ses *ses, struct cifs_server_iface *iface)
{
int i;
+ spin_lock(&ses->chan_lock);
for (i = 0; i < ses->chan_count; i++) {
- if (is_server_using_iface(ses->chans[i].server, iface))
+ if (is_server_using_iface(ses->chans[i].server, iface)) {
+ spin_unlock(&ses->chan_lock);
return true;
+ }
}
+ spin_unlock(&ses->chan_lock);
return false;
}
/* returns number of channels added */
int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
{
- int old_chan_count = ses->chan_count;
- int left = ses->chan_max - ses->chan_count;
+ int old_chan_count, new_chan_count;
+ int left;
int i = 0;
int rc = 0;
int tries = 0;
struct cifs_server_iface *ifaces = NULL;
size_t iface_count;
+ if (ses->server->dialect < SMB30_PROT_ID) {
+ cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
+ return 0;
+ }
+
+ spin_lock(&ses->chan_lock);
+
+ new_chan_count = old_chan_count = ses->chan_count;
+ left = ses->chan_max - ses->chan_count;
+
if (left <= 0) {
cifs_dbg(FYI,
"ses already at max_channels (%zu), nothing to open\n",
ses->chan_max);
- return 0;
- }
-
- if (ses->server->dialect < SMB30_PROT_ID) {
- cifs_dbg(VFS, "multichannel is not supported on this protocol version, use 3.0 or above\n");
+ spin_unlock(&ses->chan_lock);
return 0;
}
if (!(ses->server->capabilities & SMB2_GLOBAL_CAP_MULTI_CHANNEL)) {
cifs_dbg(VFS, "server %s does not support multichannel\n", ses->server->hostname);
ses->chan_max = 1;
+ spin_unlock(&ses->chan_lock);
return 0;
}
+ spin_unlock(&ses->chan_lock);
/*
* Make a copy of the iface list at the time and use that
@@ -142,10 +154,11 @@ int cifs_try_adding_channels(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses)
cifs_dbg(FYI, "successfully opened new channel on iface#%d\n",
i);
left--;
+ new_chan_count++;
}
kfree(ifaces);
- return ses->chan_count - old_chan_count;
+ return new_chan_count - old_chan_count;
}
/*
@@ -157,10 +170,14 @@ cifs_ses_find_chan(struct cifs_ses *ses, struct TCP_Server_Info *server)
{
int i;
+ spin_lock(&ses->chan_lock);
for (i = 0; i < ses->chan_count; i++) {
- if (ses->chans[i].server == server)
+ if (ses->chans[i].server == server) {
+ spin_unlock(&ses->chan_lock);
return &ses->chans[i];
+ }
}
+ spin_unlock(&ses->chan_lock);
return NULL;
}
@@ -168,6 +185,7 @@ static int
cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
struct cifs_server_iface *iface)
{
+ struct TCP_Server_Info *chan_server;
struct cifs_chan *chan;
struct smb3_fs_context ctx = {NULL};
static const char unc_fmt[] = "\\%s\\foo";
@@ -240,18 +258,19 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
SMB2_CLIENT_GUID_SIZE);
ctx.use_client_guid = true;
- mutex_lock(&ses->session_mutex);
+ chan_server = cifs_get_tcp_session(&ctx, ses->server);
+ mutex_lock(&ses->session_mutex);
+ spin_lock(&ses->chan_lock);
chan = ses->binding_chan = &ses->chans[ses->chan_count];
- chan->server = cifs_get_tcp_session(&ctx);
+ chan->server = chan_server;
if (IS_ERR(chan->server)) {
rc = PTR_ERR(chan->server);
chan->server = NULL;
+ spin_unlock(&ses->chan_lock);
goto out;
}
- spin_lock(&cifs_tcp_ses_lock);
- chan->server->is_channel = true;
- spin_unlock(&cifs_tcp_ses_lock);
+ spin_unlock(&ses->chan_lock);
/*
* We need to allocate the server crypto now as we will need
@@ -283,8 +302,11 @@ cifs_ses_add_channel(struct cifs_sb_info *cifs_sb, struct cifs_ses *ses,
* ses to the new server.
*/
+ spin_lock(&ses->chan_lock);
ses->chan_count++;
atomic_set(&ses->chan_seq, 0);
+ spin_unlock(&ses->chan_lock);
+
out:
ses->binding = false;
ses->binding_chan = NULL;
@@ -599,18 +621,85 @@ int decode_ntlmssp_challenge(char *bcc_ptr, int blob_len,
return 0;
}
+static int size_of_ntlmssp_blob(struct cifs_ses *ses, int base_size)
+{
+ int sz = base_size + ses->auth_key.len
+ - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
+
+ if (ses->domainName)
+ sz += sizeof(__le16) * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
+ else
+ sz += sizeof(__le16);
+
+ if (ses->user_name)
+ sz += sizeof(__le16) * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
+ else
+ sz += sizeof(__le16);
+
+ sz += sizeof(__le16) * strnlen(ses->workstation_name, CIFS_MAX_WORKSTATION_LEN);
+
+ return sz;
+}
+
+static inline void cifs_security_buffer_from_str(SECURITY_BUFFER *pbuf,
+ char *str_value,
+ int str_length,
+ unsigned char *pstart,
+ unsigned char **pcur,
+ const struct nls_table *nls_cp)
+{
+ unsigned char *tmp = pstart;
+ int len;
+
+ if (!pbuf)
+ return;
+
+ if (!pcur)
+ pcur = &tmp;
+
+ if (!str_value) {
+ pbuf->BufferOffset = cpu_to_le32(*pcur - pstart);
+ pbuf->Length = 0;
+ pbuf->MaximumLength = 0;
+ *pcur += sizeof(__le16);
+ } else {
+ len = cifs_strtoUTF16((__le16 *)*pcur,
+ str_value,
+ str_length,
+ nls_cp);
+ len *= sizeof(__le16);
+ pbuf->BufferOffset = cpu_to_le32(*pcur - pstart);
+ pbuf->Length = cpu_to_le16(len);
+ pbuf->MaximumLength = cpu_to_le16(len);
+ *pcur += len;
+ }
+}
+
/* BB Move to ntlmssp.c eventually */
-/* We do not malloc the blob, it is passed in pbuffer, because
- it is fixed size, and small, making this approach cleaner */
-void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
- struct cifs_ses *ses)
+int build_ntlmssp_negotiate_blob(unsigned char **pbuffer,
+ u16 *buflen,
+ struct cifs_ses *ses,
+ const struct nls_table *nls_cp)
{
+ int rc = 0;
struct TCP_Server_Info *server = cifs_ses_server(ses);
- NEGOTIATE_MESSAGE *sec_blob = (NEGOTIATE_MESSAGE *)pbuffer;
+ NEGOTIATE_MESSAGE *sec_blob;
__u32 flags;
+ unsigned char *tmp;
+ int len;
+
+ len = size_of_ntlmssp_blob(ses, sizeof(NEGOTIATE_MESSAGE));
+ *pbuffer = kmalloc(len, GFP_KERNEL);
+ if (!*pbuffer) {
+ rc = -ENOMEM;
+ cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
+ *buflen = 0;
+ goto setup_ntlm_neg_ret;
+ }
+ sec_blob = (NEGOTIATE_MESSAGE *)*pbuffer;
- memset(pbuffer, 0, sizeof(NEGOTIATE_MESSAGE));
+ memset(*pbuffer, 0, sizeof(NEGOTIATE_MESSAGE));
memcpy(sec_blob->Signature, NTLMSSP_SIGNATURE, 8);
sec_blob->MessageType = NtLmNegotiate;
@@ -624,34 +713,25 @@ void build_ntlmssp_negotiate_blob(unsigned char *pbuffer,
if (!server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
flags |= NTLMSSP_NEGOTIATE_KEY_XCH;
+ tmp = *pbuffer + sizeof(NEGOTIATE_MESSAGE);
sec_blob->NegotiateFlags = cpu_to_le32(flags);
- sec_blob->WorkstationName.BufferOffset = 0;
- sec_blob->WorkstationName.Length = 0;
- sec_blob->WorkstationName.MaximumLength = 0;
+ /* these fields should be null in negotiate phase MS-NLMP 3.1.5.1.1 */
+ cifs_security_buffer_from_str(&sec_blob->DomainName,
+ NULL,
+ CIFS_MAX_DOMAINNAME_LEN,
+ *pbuffer, &tmp,
+ nls_cp);
- /* Domain name is sent on the Challenge not Negotiate NTLMSSP request */
- sec_blob->DomainName.BufferOffset = 0;
- sec_blob->DomainName.Length = 0;
- sec_blob->DomainName.MaximumLength = 0;
-}
-
-static int size_of_ntlmssp_blob(struct cifs_ses *ses)
-{
- int sz = sizeof(AUTHENTICATE_MESSAGE) + ses->auth_key.len
- - CIFS_SESS_KEY_SIZE + CIFS_CPHTXT_SIZE + 2;
-
- if (ses->domainName)
- sz += 2 * strnlen(ses->domainName, CIFS_MAX_DOMAINNAME_LEN);
- else
- sz += 2;
-
- if (ses->user_name)
- sz += 2 * strnlen(ses->user_name, CIFS_MAX_USERNAME_LEN);
- else
- sz += 2;
+ cifs_security_buffer_from_str(&sec_blob->WorkstationName,
+ NULL,
+ CIFS_MAX_WORKSTATION_LEN,
+ *pbuffer, &tmp,
+ nls_cp);
- return sz;
+ *buflen = tmp - *pbuffer;
+setup_ntlm_neg_ret:
+ return rc;
}
int build_ntlmssp_auth_blob(unsigned char **pbuffer,
@@ -663,6 +743,7 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
AUTHENTICATE_MESSAGE *sec_blob;
__u32 flags;
unsigned char *tmp;
+ int len;
rc = setup_ntlmv2_rsp(ses, nls_cp);
if (rc) {
@@ -670,7 +751,9 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
*buflen = 0;
goto setup_ntlmv2_ret;
}
- *pbuffer = kmalloc(size_of_ntlmssp_blob(ses), GFP_KERNEL);
+
+ len = size_of_ntlmssp_blob(ses, sizeof(AUTHENTICATE_MESSAGE));
+ *pbuffer = kmalloc(len, GFP_KERNEL);
if (!*pbuffer) {
rc = -ENOMEM;
cifs_dbg(VFS, "Error %d during NTLMSSP allocation\n", rc);
@@ -686,7 +769,7 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
NTLMSSP_REQUEST_TARGET | NTLMSSP_NEGOTIATE_TARGET_INFO |
NTLMSSP_NEGOTIATE_128 | NTLMSSP_NEGOTIATE_UNICODE |
NTLMSSP_NEGOTIATE_NTLM | NTLMSSP_NEGOTIATE_EXTENDED_SEC |
- NTLMSSP_NEGOTIATE_SEAL;
+ NTLMSSP_NEGOTIATE_SEAL | NTLMSSP_NEGOTIATE_WORKSTATION_SUPPLIED;
if (ses->server->sign)
flags |= NTLMSSP_NEGOTIATE_SIGN;
if (!ses->server->session_estab || ses->ntlmssp->sesskey_per_smbsess)
@@ -719,42 +802,23 @@ int build_ntlmssp_auth_blob(unsigned char **pbuffer,
sec_blob->NtChallengeResponse.MaximumLength = 0;
}
- if (ses->domainName == NULL) {
- sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
- sec_blob->DomainName.Length = 0;
- sec_blob->DomainName.MaximumLength = 0;
- tmp += 2;
- } else {
- int len;
- len = cifs_strtoUTF16((__le16 *)tmp, ses->domainName,
- CIFS_MAX_DOMAINNAME_LEN, nls_cp);
- len *= 2; /* unicode is 2 bytes each */
- sec_blob->DomainName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
- sec_blob->DomainName.Length = cpu_to_le16(len);
- sec_blob->DomainName.MaximumLength = cpu_to_le16(len);
- tmp += len;
- }
+ cifs_security_buffer_from_str(&sec_blob->DomainName,
+ ses->domainName,
+ CIFS_MAX_DOMAINNAME_LEN,
+ *pbuffer, &tmp,
+ nls_cp);
- if (ses->user_name == NULL) {
- sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
- sec_blob->UserName.Length = 0;
- sec_blob->UserName.MaximumLength = 0;
- tmp += 2;
- } else {
- int len;
- len = cifs_strtoUTF16((__le16 *)tmp, ses->user_name,
- CIFS_MAX_USERNAME_LEN, nls_cp);
- len *= 2; /* unicode is 2 bytes each */
- sec_blob->UserName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
- sec_blob->UserName.Length = cpu_to_le16(len);
- sec_blob->UserName.MaximumLength = cpu_to_le16(len);
- tmp += len;
- }
+ cifs_security_buffer_from_str(&sec_blob->UserName,
+ ses->user_name,
+ CIFS_MAX_USERNAME_LEN,
+ *pbuffer, &tmp,
+ nls_cp);
- sec_blob->WorkstationName.BufferOffset = cpu_to_le32(tmp - *pbuffer);
- sec_blob->WorkstationName.Length = 0;
- sec_blob->WorkstationName.MaximumLength = 0;
- tmp += 2;
+ cifs_security_buffer_from_str(&sec_blob->WorkstationName,
+ ses->workstation_name,
+ CIFS_MAX_WORKSTATION_LEN,
+ *pbuffer, &tmp,
+ nls_cp);
if (((ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_KEY_XCH) ||
(ses->ntlmssp->server_flags & NTLMSSP_NEGOTIATE_EXTENDED_SEC))
@@ -1230,6 +1294,7 @@ sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data)
struct cifs_ses *ses = sess_data->ses;
__u16 bytes_remaining;
char *bcc_ptr;
+ unsigned char *ntlmsspblob = NULL;
u16 blob_len;
cifs_dbg(FYI, "rawntlmssp session setup negotiate phase\n");
@@ -1253,10 +1318,15 @@ sess_auth_rawntlmssp_negotiate(struct sess_data *sess_data)
pSMB = (SESSION_SETUP_ANDX *)sess_data->iov[0].iov_base;
/* Build security blob before we assemble the request */
- build_ntlmssp_negotiate_blob(pSMB->req.SecurityBlob, ses);
- sess_data->iov[1].iov_len = sizeof(NEGOTIATE_MESSAGE);
- sess_data->iov[1].iov_base = pSMB->req.SecurityBlob;
- pSMB->req.SecurityBlobLength = cpu_to_le16(sizeof(NEGOTIATE_MESSAGE));
+ rc = build_ntlmssp_negotiate_blob(&ntlmsspblob,
+ &blob_len, ses,
+ sess_data->nls_cp);
+ if (rc)
+ goto out;
+
+ sess_data->iov[1].iov_len = blob_len;
+ sess_data->iov[1].iov_base = ntlmsspblob;
+ pSMB->req.SecurityBlobLength = cpu_to_le16(blob_len);
rc = _sess_auth_rawntlmssp_assemble_req(sess_data);
if (rc)
diff --git a/fs/cifs/smb2inode.c b/fs/cifs/smb2inode.c
index 8297703492ee..fe5bfa245fa7 100644
--- a/fs/cifs/smb2inode.c
+++ b/fs/cifs/smb2inode.c
@@ -46,6 +46,10 @@ struct cop_vars {
struct smb2_file_link_info link_info;
};
+/*
+ * note: If cfile is passed, the reference to it is dropped here.
+ * So make sure that you do not reuse cfile after return from this func.
+ */
static int
smb2_compound_op(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, const char *full_path,
@@ -536,10 +540,11 @@ smb2_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
create_options |= OPEN_REPARSE_POINT;
/* Failed on a symbolic link - query a reparse point info */
+ cifs_get_readable_path(tcon, full_path, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_READ_ATTRIBUTES, FILE_OPEN,
create_options, ACL_NO_MODE,
- smb2_data, SMB2_OP_QUERY_INFO, NULL);
+ smb2_data, SMB2_OP_QUERY_INFO, cfile);
}
if (rc)
goto out;
@@ -587,10 +592,11 @@ smb311_posix_query_path_info(const unsigned int xid, struct cifs_tcon *tcon,
create_options |= OPEN_REPARSE_POINT;
/* Failed on a symbolic link - query a reparse point info */
+ cifs_get_readable_path(tcon, full_path, &cfile);
rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_READ_ATTRIBUTES, FILE_OPEN,
create_options, ACL_NO_MODE,
- smb2_data, SMB2_OP_POSIX_QUERY_INFO, NULL);
+ smb2_data, SMB2_OP_POSIX_QUERY_INFO, cfile);
}
if (rc)
goto out;
@@ -707,10 +713,12 @@ smb2_set_path_size(const unsigned int xid, struct cifs_tcon *tcon,
struct cifs_sb_info *cifs_sb, bool set_alloc)
{
__le64 eof = cpu_to_le64(size);
+ struct cifsFileInfo *cfile;
+ cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
return smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_WRITE_DATA, FILE_OPEN, 0, ACL_NO_MODE,
- &eof, SMB2_OP_SET_EOF, NULL);
+ &eof, SMB2_OP_SET_EOF, cfile);
}
int
@@ -719,6 +727,8 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
{
struct cifs_sb_info *cifs_sb = CIFS_SB(inode->i_sb);
struct tcon_link *tlink;
+ struct cifs_tcon *tcon;
+ struct cifsFileInfo *cfile;
int rc;
if ((buf->CreationTime == 0) && (buf->LastAccessTime == 0) &&
@@ -729,10 +739,12 @@ smb2_set_file_info(struct inode *inode, const char *full_path,
tlink = cifs_sb_tlink(cifs_sb);
if (IS_ERR(tlink))
return PTR_ERR(tlink);
+ tcon = tlink_tcon(tlink);
- rc = smb2_compound_op(xid, tlink_tcon(tlink), cifs_sb, full_path,
+ cifs_get_writable_path(tcon, full_path, FIND_WR_ANY, &cfile);
+ rc = smb2_compound_op(xid, tcon, cifs_sb, full_path,
FILE_WRITE_ATTRIBUTES, FILE_OPEN,
- 0, ACL_NO_MODE, buf, SMB2_OP_SET_INFO, NULL);
+ 0, ACL_NO_MODE, buf, SMB2_OP_SET_INFO, cfile);
cifs_put_tlink(tlink);
return rc;
}
diff --git a/fs/cifs/smb2ops.c b/fs/cifs/smb2ops.c
index 7acf71defea7..c5b1dea54ebc 100644
--- a/fs/cifs/smb2ops.c
+++ b/fs/cifs/smb2ops.c
@@ -2844,6 +2844,7 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
struct fsctl_get_dfs_referral_req *dfs_req = NULL;
struct get_dfs_referral_rsp *dfs_rsp = NULL;
u32 dfs_req_size = 0, dfs_rsp_size = 0;
+ int retry_count = 0;
cifs_dbg(FYI, "%s: path: %s\n", __func__, search_name);
@@ -2895,11 +2896,14 @@ smb2_get_dfs_refer(const unsigned int xid, struct cifs_ses *ses,
true /* is_fsctl */,
(char *)dfs_req, dfs_req_size, CIFSMaxBufSize,
(char **)&dfs_rsp, &dfs_rsp_size);
- } while (rc == -EAGAIN);
+ if (!is_retryable_error(rc))
+ break;
+ usleep_range(512, 2048);
+ } while (++retry_count < 5);
if (rc) {
- if ((rc != -ENOENT) && (rc != -EOPNOTSUPP))
- cifs_tcon_dbg(VFS, "ioctl error in %s rc=%d\n", __func__, rc);
+ if (!is_retryable_error(rc) && rc != -ENOENT && rc != -EOPNOTSUPP)
+ cifs_tcon_dbg(VFS, "%s: ioctl error: rc=%d\n", __func__, rc);
goto out;
}
diff --git a/fs/cifs/smb2pdu.c b/fs/cifs/smb2pdu.c
index d2ecb2ea37c0..2f5f2c4c6183 100644
--- a/fs/cifs/smb2pdu.c
+++ b/fs/cifs/smb2pdu.c
@@ -155,7 +155,11 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
if (tcon == NULL)
return 0;
- if (smb2_command == SMB2_TREE_CONNECT)
+ /*
+ * Need to also skip SMB2_IOCTL because it is used for checking nested dfs links in
+ * cifs_tree_connect().
+ */
+ if (smb2_command == SMB2_TREE_CONNECT || smb2_command == SMB2_IOCTL)
return 0;
if (tcon->tidStatus == CifsExiting) {
@@ -253,7 +257,7 @@ smb2_reconnect(__le16 smb2_command, struct cifs_tcon *tcon,
/*
* If we are reconnecting an extra channel, bind
*/
- if (server->is_channel) {
+ if (CIFS_SERVER_IS_CHAN(server)) {
ses->binding = true;
ses->binding_chan = cifs_ses_find_chan(ses, server);
}
@@ -1456,7 +1460,7 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
int rc;
struct cifs_ses *ses = sess_data->ses;
struct smb2_sess_setup_rsp *rsp = NULL;
- char *ntlmssp_blob = NULL;
+ unsigned char *ntlmssp_blob = NULL;
bool use_spnego = false; /* else use raw ntlmssp */
u16 blob_length = 0;
@@ -1475,22 +1479,17 @@ SMB2_sess_auth_rawntlmssp_negotiate(struct SMB2_sess_data *sess_data)
if (rc)
goto out_err;
- ntlmssp_blob = kmalloc(sizeof(struct _NEGOTIATE_MESSAGE),
- GFP_KERNEL);
- if (ntlmssp_blob == NULL) {
- rc = -ENOMEM;
- goto out;
- }
+ rc = build_ntlmssp_negotiate_blob(&ntlmssp_blob,
+ &blob_length, ses,
+ sess_data->nls_cp);
+ if (rc)
+ goto out_err;
- build_ntlmssp_negotiate_blob(ntlmssp_blob, ses);
if (use_spnego) {
/* BB eventually need to add this */
cifs_dbg(VFS, "spnego not supported for SMB2 yet\n");
rc = -EOPNOTSUPP;
goto out;
- } else {
- blob_length = sizeof(struct _NEGOTIATE_MESSAGE);
- /* with raw NTLMSSP we don't encapsulate in SPNEGO */
}
sess_data->iov[1].iov_base = ntlmssp_blob;
sess_data->iov[1].iov_len = blob_length;
@@ -1841,7 +1840,7 @@ SMB2_tcon(const unsigned int xid, struct cifs_ses *ses, const char *tree,
cifs_small_buf_release(req);
rsp = (struct smb2_tree_connect_rsp *)rsp_iov.iov_base;
trace_smb3_tcon(xid, tcon->tid, ses->Suid, tree, rc);
- if (rc != 0) {
+ if ((rc != 0) || (rsp == NULL)) {
cifs_stats_fail_inc(tcon, SMB2_TREE_CONNECT_HE);
tcon->need_reconnect = true;
goto tcon_error_exit;
@@ -2669,7 +2668,18 @@ int smb311_posix_mkdir(const unsigned int xid, struct inode *inode,
goto err_free_rsp_buf;
}
+ /*
+ * Although unlikely to be possible for rsp to be null and rc not set,
+ * adding check below is slightly safer long term (and quiets Coverity
+ * warning)
+ */
rsp = (struct smb2_create_rsp *)rsp_iov.iov_base;
+ if (rsp == NULL) {
+ rc = -EIO;
+ kfree(pc_buf);
+ goto err_free_req;
+ }
+
trace_smb3_posix_mkdir_done(xid, le64_to_cpu(rsp->PersistentFileId),
tcon->tid,
ses->Suid, CREATE_NOT_FILE,
@@ -2942,7 +2952,9 @@ SMB2_open(const unsigned int xid, struct cifs_open_parms *oparms, __le16 *path,
tcon->need_reconnect = true;
}
goto creat_exit;
- } else
+ } else if (rsp == NULL) /* unlikely to happen, but safer to check */
+ goto creat_exit;
+ else
trace_smb3_open_done(xid, le64_to_cpu(rsp->PersistentFileId),
tcon->tid,
ses->Suid, oparms->create_options,
@@ -3163,6 +3175,16 @@ SMB2_ioctl(const unsigned int xid, struct cifs_tcon *tcon, u64 persistent_fid,
if ((plen == NULL) || (out_data == NULL))
goto ioctl_exit;
+ /*
+ * Although unlikely to be possible for rsp to be null and rc not set,
+ * adding check below is slightly safer long term (and quiets Coverity
+ * warning)
+ */
+ if (rsp == NULL) {
+ rc = -EIO;
+ goto ioctl_exit;
+ }
+
*plen = le32_to_cpu(rsp->OutputCount);
/* We check for obvious errors in the output buffer length and offset */
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index b7379329b741..61ea3d3f95b4 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -1044,14 +1044,17 @@ struct TCP_Server_Info *cifs_pick_channel(struct cifs_ses *ses)
if (!ses)
return NULL;
+ spin_lock(&ses->chan_lock);
if (!ses->binding) {
/* round robin */
if (ses->chan_count > 1) {
index = (uint)atomic_inc_return(&ses->chan_seq);
index %= ses->chan_count;
}
+ spin_unlock(&ses->chan_lock);
return ses->chans[index].server;
} else {
+ spin_unlock(&ses->chan_lock);
return cifs_ses_server(ses);
}
}
diff --git a/fs/erofs/zdata.c b/fs/erofs/zdata.c
index bcb1b91b234f..9a249bfc2770 100644
--- a/fs/erofs/zdata.c
+++ b/fs/erofs/zdata.c
@@ -96,16 +96,9 @@ static void z_erofs_free_pcluster(struct z_erofs_pcluster *pcl)
DBG_BUGON(1);
}
-/*
- * a compressed_pages[] placeholder in order to avoid
- * being filled with file pages for in-place decompression.
- */
-#define PAGE_UNALLOCATED ((void *)0x5F0E4B1D)
-
/* how to allocate cached pages for a pcluster */
enum z_erofs_cache_alloctype {
DONTALLOC, /* don't allocate any cached pages */
- DELAYEDALLOC, /* delayed allocation (at the time of submitting io) */
/*
* try to use cached I/O if page allocation succeeds or fallback
* to in-place I/O instead to avoid any direct reclaim.
@@ -267,10 +260,6 @@ static void preload_compressed_pages(struct z_erofs_collector *clt,
/* I/O is needed, no possible to decompress directly */
standalone = false;
switch (type) {
- case DELAYEDALLOC:
- t = tagptr_init(compressed_page_t,
- PAGE_UNALLOCATED);
- break;
case TRYALLOC:
newpage = erofs_allocpage(pagepool, gfp);
if (!newpage)
@@ -371,8 +360,8 @@ static bool z_erofs_try_inplace_io(struct z_erofs_collector *clt,
/* callers must be with collection lock held */
static int z_erofs_attach_page(struct z_erofs_collector *clt,
- struct page *page,
- enum z_erofs_page_type type)
+ struct page *page, enum z_erofs_page_type type,
+ bool pvec_safereuse)
{
int ret;
@@ -382,9 +371,9 @@ static int z_erofs_attach_page(struct z_erofs_collector *clt,
z_erofs_try_inplace_io(clt, page))
return 0;
- ret = z_erofs_pagevec_enqueue(&clt->vector, page, type);
+ ret = z_erofs_pagevec_enqueue(&clt->vector, page, type,
+ pvec_safereuse);
clt->cl->vcnt += (unsigned int)ret;
-
return ret ? 0 : -EAGAIN;
}
@@ -727,7 +716,8 @@ hitted:
tight &= (clt->mode >= COLLECT_PRIMARY_FOLLOWED);
retry:
- err = z_erofs_attach_page(clt, page, page_type);
+ err = z_erofs_attach_page(clt, page, page_type,
+ clt->mode >= COLLECT_PRIMARY_FOLLOWED);
/* should allocate an additional short-lived page for pagevec */
if (err == -EAGAIN) {
struct page *const newpage =
@@ -735,7 +725,7 @@ retry:
set_page_private(newpage, Z_EROFS_SHORTLIVED_PAGE);
err = z_erofs_attach_page(clt, newpage,
- Z_EROFS_PAGE_TYPE_EXCLUSIVE);
+ Z_EROFS_PAGE_TYPE_EXCLUSIVE, true);
if (!err)
goto retry;
}
@@ -1089,15 +1079,6 @@ repeat:
if (!page)
goto out_allocpage;
- /*
- * the cached page has not been allocated and
- * an placeholder is out there, prepare it now.
- */
- if (page == PAGE_UNALLOCATED) {
- tocache = true;
- goto out_allocpage;
- }
-
/* process the target tagged pointer */
t = tagptr_init(compressed_page_t, page);
justfound = tagptr_unfold_tags(t);
diff --git a/fs/erofs/zdata.h b/fs/erofs/zdata.h
index 879df5362777..4a69515dea75 100644
--- a/fs/erofs/zdata.h
+++ b/fs/erofs/zdata.h
@@ -179,4 +179,3 @@ static inline void z_erofs_onlinepage_endio(struct page *page)
#define Z_EROFS_VMAP_GLOBAL_PAGES 2048
#endif
-
diff --git a/fs/erofs/zpvec.h b/fs/erofs/zpvec.h
index dfd7fe0503bb..b05464f4a808 100644
--- a/fs/erofs/zpvec.h
+++ b/fs/erofs/zpvec.h
@@ -106,11 +106,18 @@ static inline void z_erofs_pagevec_ctor_init(struct z_erofs_pagevec_ctor *ctor,
static inline bool z_erofs_pagevec_enqueue(struct z_erofs_pagevec_ctor *ctor,
struct page *page,
- enum z_erofs_page_type type)
+ enum z_erofs_page_type type,
+ bool pvec_safereuse)
{
- if (!ctor->next && type)
- if (ctor->index + 1 == ctor->nr)
+ if (!ctor->next) {
+ /* some pages cannot be reused as pvec safely without I/O */
+ if (type == Z_EROFS_PAGE_TYPE_EXCLUSIVE && !pvec_safereuse)
+ type = Z_EROFS_VLE_PAGE_TYPE_TAIL_SHARED;
+
+ if (type != Z_EROFS_PAGE_TYPE_EXCLUSIVE &&
+ ctor->index + 1 == ctor->nr)
return false;
+ }
if (ctor->index >= ctor->nr)
z_erofs_pagevec_ctor_pagedown(ctor, false);
diff --git a/fs/f2fs/checkpoint.c b/fs/f2fs/checkpoint.c
index 83e9bc0f91ff..f1693d45bb78 100644
--- a/fs/f2fs/checkpoint.c
+++ b/fs/f2fs/checkpoint.c
@@ -653,7 +653,7 @@ static int recover_orphan_inode(struct f2fs_sb_info *sbi, nid_t ino)
return PTR_ERR(inode);
}
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err) {
iput(inode);
goto err_out;
@@ -705,9 +705,6 @@ int f2fs_recover_orphan_inodes(struct f2fs_sb_info *sbi)
}
#ifdef CONFIG_QUOTA
- /* Needed for iput() to work correctly and not trash data */
- sbi->sb->s_flags |= SB_ACTIVE;
-
/*
* Turn on quotas which were not enabled for read-only mounts if
* filesystem has quota feature, so that they are updated correctly.
@@ -1162,7 +1159,8 @@ static bool __need_flush_quota(struct f2fs_sb_info *sbi)
if (!is_journalled_quota(sbi))
return false;
- down_write(&sbi->quota_sem);
+ if (!down_write_trylock(&sbi->quota_sem))
+ return true;
if (is_sbi_flag_set(sbi, SBI_QUOTA_SKIP_FLUSH)) {
ret = false;
} else if (is_sbi_flag_set(sbi, SBI_QUOTA_NEED_REPAIR)) {
diff --git a/fs/f2fs/compress.c b/fs/f2fs/compress.c
index 20a083dc9042..49121a21f749 100644
--- a/fs/f2fs/compress.c
+++ b/fs/f2fs/compress.c
@@ -336,8 +336,8 @@ static const struct f2fs_compress_ops f2fs_lz4_ops = {
static int zstd_init_compress_ctx(struct compress_ctx *cc)
{
- ZSTD_parameters params;
- ZSTD_CStream *stream;
+ zstd_parameters params;
+ zstd_cstream *stream;
void *workspace;
unsigned int workspace_size;
unsigned char level = F2FS_I(cc->inode)->i_compress_flag >>
@@ -346,17 +346,17 @@ static int zstd_init_compress_ctx(struct compress_ctx *cc)
if (!level)
level = F2FS_ZSTD_DEFAULT_CLEVEL;
- params = ZSTD_getParams(level, cc->rlen, 0);
- workspace_size = ZSTD_CStreamWorkspaceBound(params.cParams);
+ params = zstd_get_params(F2FS_ZSTD_DEFAULT_CLEVEL, cc->rlen);
+ workspace_size = zstd_cstream_workspace_bound(&params.cParams);
workspace = f2fs_kvmalloc(F2FS_I_SB(cc->inode),
workspace_size, GFP_NOFS);
if (!workspace)
return -ENOMEM;
- stream = ZSTD_initCStream(params, 0, workspace, workspace_size);
+ stream = zstd_init_cstream(&params, 0, workspace, workspace_size);
if (!stream) {
- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initCStream failed\n",
+ printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_cstream failed\n",
KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
__func__);
kvfree(workspace);
@@ -379,9 +379,9 @@ static void zstd_destroy_compress_ctx(struct compress_ctx *cc)
static int zstd_compress_pages(struct compress_ctx *cc)
{
- ZSTD_CStream *stream = cc->private2;
- ZSTD_inBuffer inbuf;
- ZSTD_outBuffer outbuf;
+ zstd_cstream *stream = cc->private2;
+ zstd_in_buffer inbuf;
+ zstd_out_buffer outbuf;
int src_size = cc->rlen;
int dst_size = src_size - PAGE_SIZE - COMPRESS_HEADER_SIZE;
int ret;
@@ -394,19 +394,19 @@ static int zstd_compress_pages(struct compress_ctx *cc)
outbuf.dst = cc->cbuf->cdata;
outbuf.size = dst_size;
- ret = ZSTD_compressStream(stream, &outbuf, &inbuf);
- if (ZSTD_isError(ret)) {
- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
+ ret = zstd_compress_stream(stream, &outbuf, &inbuf);
+ if (zstd_is_error(ret)) {
+ printk_ratelimited("%sF2FS-fs (%s): %s zstd_compress_stream failed, ret: %d\n",
KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
- __func__, ZSTD_getErrorCode(ret));
+ __func__, zstd_get_error_code(ret));
return -EIO;
}
- ret = ZSTD_endStream(stream, &outbuf);
- if (ZSTD_isError(ret)) {
- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_endStream returned %d\n",
+ ret = zstd_end_stream(stream, &outbuf);
+ if (zstd_is_error(ret)) {
+ printk_ratelimited("%sF2FS-fs (%s): %s zstd_end_stream returned %d\n",
KERN_ERR, F2FS_I_SB(cc->inode)->sb->s_id,
- __func__, ZSTD_getErrorCode(ret));
+ __func__, zstd_get_error_code(ret));
return -EIO;
}
@@ -423,22 +423,22 @@ static int zstd_compress_pages(struct compress_ctx *cc)
static int zstd_init_decompress_ctx(struct decompress_io_ctx *dic)
{
- ZSTD_DStream *stream;
+ zstd_dstream *stream;
void *workspace;
unsigned int workspace_size;
unsigned int max_window_size =
MAX_COMPRESS_WINDOW_SIZE(dic->log_cluster_size);
- workspace_size = ZSTD_DStreamWorkspaceBound(max_window_size);
+ workspace_size = zstd_dstream_workspace_bound(max_window_size);
workspace = f2fs_kvmalloc(F2FS_I_SB(dic->inode),
workspace_size, GFP_NOFS);
if (!workspace)
return -ENOMEM;
- stream = ZSTD_initDStream(max_window_size, workspace, workspace_size);
+ stream = zstd_init_dstream(max_window_size, workspace, workspace_size);
if (!stream) {
- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_initDStream failed\n",
+ printk_ratelimited("%sF2FS-fs (%s): %s zstd_init_dstream failed\n",
KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
__func__);
kvfree(workspace);
@@ -460,9 +460,9 @@ static void zstd_destroy_decompress_ctx(struct decompress_io_ctx *dic)
static int zstd_decompress_pages(struct decompress_io_ctx *dic)
{
- ZSTD_DStream *stream = dic->private2;
- ZSTD_inBuffer inbuf;
- ZSTD_outBuffer outbuf;
+ zstd_dstream *stream = dic->private2;
+ zstd_in_buffer inbuf;
+ zstd_out_buffer outbuf;
int ret;
inbuf.pos = 0;
@@ -473,11 +473,11 @@ static int zstd_decompress_pages(struct decompress_io_ctx *dic)
outbuf.dst = dic->rbuf;
outbuf.size = dic->rlen;
- ret = ZSTD_decompressStream(stream, &outbuf, &inbuf);
- if (ZSTD_isError(ret)) {
- printk_ratelimited("%sF2FS-fs (%s): %s ZSTD_compressStream failed, ret: %d\n",
+ ret = zstd_decompress_stream(stream, &outbuf, &inbuf);
+ if (zstd_is_error(ret)) {
+ printk_ratelimited("%sF2FS-fs (%s): %s zstd_decompress_stream failed, ret: %d\n",
KERN_ERR, F2FS_I_SB(dic->inode)->sb->s_id,
- __func__, ZSTD_getErrorCode(ret));
+ __func__, zstd_get_error_code(ret));
return -EIO;
}
@@ -882,6 +882,25 @@ bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index)
return is_page_in_cluster(cc, index);
}
+bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
+ int index, int nr_pages)
+{
+ unsigned long pgidx;
+ int i;
+
+ if (nr_pages - index < cc->cluster_size)
+ return false;
+
+ pgidx = pvec->pages[index]->index;
+
+ for (i = 1; i < cc->cluster_size; i++) {
+ if (pvec->pages[index + i]->index != pgidx + i)
+ return false;
+ }
+
+ return true;
+}
+
static bool cluster_has_invalid_data(struct compress_ctx *cc)
{
loff_t i_size = i_size_read(cc->inode);
@@ -1531,6 +1550,7 @@ int f2fs_write_multi_pages(struct compress_ctx *cc,
if (cluster_may_compress(cc)) {
err = f2fs_compress_pages(cc);
if (err == -EAGAIN) {
+ add_compr_block_stat(cc->inode, cc->cluster_size);
goto write;
} else if (err) {
f2fs_put_rpages_wbc(cc, wbc, true, 1);
diff --git a/fs/f2fs/data.c b/fs/f2fs/data.c
index f4fd6c246c9a..9f754aaef558 100644
--- a/fs/f2fs/data.c
+++ b/fs/f2fs/data.c
@@ -1465,10 +1465,15 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
struct extent_info ei = {0, };
block_t blkaddr;
unsigned int start_pgofs;
+ int bidx = 0;
if (!maxblocks)
return 0;
+ map->m_bdev = inode->i_sb->s_bdev;
+ map->m_multidev_dio =
+ f2fs_allow_multi_device_dio(F2FS_I_SB(inode), flag);
+
map->m_len = 0;
map->m_flags = 0;
@@ -1491,6 +1496,21 @@ int f2fs_map_blocks(struct inode *inode, struct f2fs_map_blocks *map,
if (flag == F2FS_GET_BLOCK_DIO)
f2fs_wait_on_block_writeback_range(inode,
map->m_pblk, map->m_len);
+
+ if (map->m_multidev_dio) {
+ block_t blk_addr = map->m_pblk;
+
+ bidx = f2fs_target_device_index(sbi, map->m_pblk);
+
+ map->m_bdev = FDEV(bidx).bdev;
+ map->m_pblk -= FDEV(bidx).start_blk;
+ map->m_len = min(map->m_len,
+ FDEV(bidx).end_blk + 1 - map->m_pblk);
+
+ if (map->m_may_create)
+ f2fs_update_device_state(sbi, inode->i_ino,
+ blk_addr, map->m_len);
+ }
goto out;
}
@@ -1609,6 +1629,9 @@ next_block:
if (flag == F2FS_GET_BLOCK_PRE_AIO)
goto skip;
+ if (map->m_multidev_dio)
+ bidx = f2fs_target_device_index(sbi, blkaddr);
+
if (map->m_len == 0) {
/* preallocated unwritten block should be mapped for fiemap. */
if (blkaddr == NEW_ADDR)
@@ -1617,10 +1640,15 @@ next_block:
map->m_pblk = blkaddr;
map->m_len = 1;
+
+ if (map->m_multidev_dio)
+ map->m_bdev = FDEV(bidx).bdev;
} else if ((map->m_pblk != NEW_ADDR &&
blkaddr == (map->m_pblk + ofs)) ||
(map->m_pblk == NEW_ADDR && blkaddr == NEW_ADDR) ||
flag == F2FS_GET_BLOCK_PRE_DIO) {
+ if (map->m_multidev_dio && map->m_bdev != FDEV(bidx).bdev)
+ goto sync_out;
ofs++;
map->m_len++;
} else {
@@ -1673,10 +1701,32 @@ skip:
sync_out:
- /* for hardware encryption, but to avoid potential issue in future */
- if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED)
+ if (flag == F2FS_GET_BLOCK_DIO && map->m_flags & F2FS_MAP_MAPPED) {
+ /*
+ * for hardware encryption, but to avoid potential issue
+ * in future
+ */
f2fs_wait_on_block_writeback_range(inode,
map->m_pblk, map->m_len);
+ invalidate_mapping_pages(META_MAPPING(sbi),
+ map->m_pblk, map->m_pblk);
+
+ if (map->m_multidev_dio) {
+ block_t blk_addr = map->m_pblk;
+
+ bidx = f2fs_target_device_index(sbi, map->m_pblk);
+
+ map->m_bdev = FDEV(bidx).bdev;
+ map->m_pblk -= FDEV(bidx).start_blk;
+
+ if (map->m_may_create)
+ f2fs_update_device_state(sbi, inode->i_ino,
+ blk_addr, map->m_len);
+
+ f2fs_bug_on(sbi, blk_addr + map->m_len >
+ FDEV(bidx).end_blk + 1);
+ }
+ }
if (flag == F2FS_GET_BLOCK_PRECACHE) {
if (map->m_flags & F2FS_MAP_MAPPED) {
@@ -1696,7 +1746,7 @@ unlock_out:
f2fs_balance_fs(sbi, dn.node_changed);
}
out:
- trace_f2fs_map_blocks(inode, map, err);
+ trace_f2fs_map_blocks(inode, map, create, flag, err);
return err;
}
@@ -1755,6 +1805,9 @@ static int __get_data_block(struct inode *inode, sector_t iblock,
map_bh(bh, inode->i_sb, map.m_pblk);
bh->b_state = (bh->b_state & ~F2FS_MAP_FLAGS) | map.m_flags;
bh->b_size = blks_to_bytes(inode, map.m_len);
+
+ if (map.m_multidev_dio)
+ bh->b_bdev = map.m_bdev;
}
return err;
}
@@ -2989,6 +3042,10 @@ readd:
need_readd = false;
#ifdef CONFIG_F2FS_FS_COMPRESSION
if (f2fs_compressed_file(inode)) {
+ void *fsdata = NULL;
+ struct page *pagep;
+ int ret2;
+
ret = f2fs_init_compress_ctx(&cc);
if (ret) {
done = 1;
@@ -3007,27 +3064,23 @@ readd:
if (unlikely(f2fs_cp_error(sbi)))
goto lock_page;
- if (f2fs_cluster_is_empty(&cc)) {
- void *fsdata = NULL;
- struct page *pagep;
- int ret2;
+ if (!f2fs_cluster_is_empty(&cc))
+ goto lock_page;
- ret2 = f2fs_prepare_compress_overwrite(
+ ret2 = f2fs_prepare_compress_overwrite(
inode, &pagep,
page->index, &fsdata);
- if (ret2 < 0) {
- ret = ret2;
- done = 1;
- break;
- } else if (ret2 &&
- !f2fs_compress_write_end(inode,
- fsdata, page->index,
- 1)) {
- retry = 1;
- break;
- }
- } else {
- goto lock_page;
+ if (ret2 < 0) {
+ ret = ret2;
+ done = 1;
+ break;
+ } else if (ret2 &&
+ (!f2fs_compress_write_end(inode,
+ fsdata, page->index, 1) ||
+ !f2fs_all_cluster_page_loaded(&cc,
+ &pvec, i, nr_pages))) {
+ retry = 1;
+ break;
}
}
#endif
diff --git a/fs/f2fs/f2fs.h b/fs/f2fs/f2fs.h
index b339ae89c1ad..ce9fc9f13000 100644
--- a/fs/f2fs/f2fs.h
+++ b/fs/f2fs/f2fs.h
@@ -55,6 +55,7 @@ enum {
FAULT_DISCARD,
FAULT_WRITE_IO,
FAULT_SLAB_ALLOC,
+ FAULT_DQUOT_INIT,
FAULT_MAX,
};
@@ -561,6 +562,9 @@ enum {
#define MAX_DIR_RA_PAGES 4 /* maximum ra pages of dir */
+/* dirty segments threshold for triggering CP */
+#define DEFAULT_DIRTY_THRESHOLD 4
+
/* for in-memory extent cache entry */
#define F2FS_MIN_EXTENT_LEN 64 /* minimum extent length */
@@ -617,6 +621,7 @@ struct extent_tree {
F2FS_MAP_UNWRITTEN)
struct f2fs_map_blocks {
+ struct block_device *m_bdev; /* for multi-device dio */
block_t m_pblk;
block_t m_lblk;
unsigned int m_len;
@@ -625,6 +630,7 @@ struct f2fs_map_blocks {
pgoff_t *m_next_extent; /* point to next possible extent */
int m_seg_type;
bool m_may_create; /* indicate it is from write path */
+ bool m_multidev_dio; /* indicate it allows multi-device dio */
};
/* for flag in get_data_block */
@@ -1284,8 +1290,10 @@ enum {
};
enum {
- FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */
- FS_MODE_LFS, /* use lfs allocation only */
+ FS_MODE_ADAPTIVE, /* use both lfs/ssr allocation */
+ FS_MODE_LFS, /* use lfs allocation only */
+ FS_MODE_FRAGMENT_SEG, /* segment fragmentation mode */
+ FS_MODE_FRAGMENT_BLK, /* block fragmentation mode */
};
enum {
@@ -1728,12 +1736,15 @@ struct f2fs_sb_info {
/* For shrinker support */
struct list_head s_list;
+ struct mutex umount_mutex;
+ unsigned int shrinker_run_no;
+
+ /* For multi devices */
int s_ndevs; /* number of devices */
struct f2fs_dev_info *devs; /* for device list */
unsigned int dirty_device; /* for checkpoint data flush */
spinlock_t dev_lock; /* protect dirty_device */
- struct mutex umount_mutex;
- unsigned int shrinker_run_no;
+ bool aligned_blksize; /* all devices has the same logical blksize */
/* For write statistics */
u64 sectors_written_start;
@@ -1756,6 +1767,9 @@ struct f2fs_sb_info {
unsigned long seq_file_ra_mul; /* multiplier for ra_pages of seq. files in fadvise */
+ int max_fragment_chunk; /* max chunk size for block fragmentation mode */
+ int max_fragment_hole; /* max hole size for block fragmentation mode */
+
#ifdef CONFIG_F2FS_FS_COMPRESSION
struct kmem_cache *page_array_slab; /* page array entry */
unsigned int page_array_slab_size; /* default page array slab size */
@@ -3363,6 +3377,7 @@ static inline int f2fs_add_link(struct dentry *dentry, struct inode *inode)
*/
int f2fs_inode_dirtied(struct inode *inode, bool sync);
void f2fs_inode_synced(struct inode *inode);
+int f2fs_dquot_initialize(struct inode *inode);
int f2fs_enable_quota_files(struct f2fs_sb_info *sbi, bool rdonly);
int f2fs_quota_sync(struct super_block *sb, int type);
loff_t max_file_blocks(struct inode *inode);
@@ -3492,6 +3507,8 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
block_t old_blkaddr, block_t *new_blkaddr,
struct f2fs_summary *sum, int type,
struct f2fs_io_info *fio);
+void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
+ block_t blkaddr, unsigned int blkcnt);
void f2fs_wait_on_page_writeback(struct page *page,
enum page_type type, bool ordered, bool locked);
void f2fs_wait_on_block_writeback(struct inode *inode, block_t blkaddr);
@@ -3516,6 +3533,16 @@ unsigned int f2fs_usable_segs_in_sec(struct f2fs_sb_info *sbi,
unsigned int f2fs_usable_blks_in_seg(struct f2fs_sb_info *sbi,
unsigned int segno);
+#define DEF_FRAGMENT_SIZE 4
+#define MIN_FRAGMENT_SIZE 1
+#define MAX_FRAGMENT_SIZE 512
+
+static inline bool f2fs_need_rand_seg(struct f2fs_sb_info *sbi)
+{
+ return F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG ||
+ F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK;
+}
+
/*
* checkpoint.c
*/
@@ -4027,6 +4054,8 @@ void f2fs_end_read_compressed_page(struct page *page, bool failed,
block_t blkaddr);
bool f2fs_cluster_is_empty(struct compress_ctx *cc);
bool f2fs_cluster_can_merge_page(struct compress_ctx *cc, pgoff_t index);
+bool f2fs_all_cluster_page_loaded(struct compress_ctx *cc, struct pagevec *pvec,
+ int index, int nr_pages);
bool f2fs_sanity_check_cluster(struct dnode_of_data *dn);
void f2fs_compress_ctx_add_page(struct compress_ctx *cc, struct page *page);
int f2fs_write_multi_pages(struct compress_ctx *cc,
@@ -4152,8 +4181,7 @@ static inline bool f2fs_disable_compressed_file(struct inode *inode)
if (!f2fs_compressed_file(inode))
return true;
- if (S_ISREG(inode->i_mode) &&
- (get_dirty_pages(inode) || atomic_read(&fi->i_compr_blocks)))
+ if (S_ISREG(inode->i_mode) && F2FS_HAS_BLOCKS(inode))
return false;
fi->i_flags &= ~F2FS_COMPR_FL;
@@ -4302,6 +4330,16 @@ static inline int block_unaligned_IO(struct inode *inode,
return align & blocksize_mask;
}
+static inline bool f2fs_allow_multi_device_dio(struct f2fs_sb_info *sbi,
+ int flag)
+{
+ if (!f2fs_is_multi_device(sbi))
+ return false;
+ if (flag != F2FS_GET_BLOCK_DIO)
+ return false;
+ return sbi->aligned_blksize;
+}
+
static inline bool f2fs_force_buffered_io(struct inode *inode,
struct kiocb *iocb, struct iov_iter *iter)
{
@@ -4310,7 +4348,9 @@ static inline bool f2fs_force_buffered_io(struct inode *inode,
if (f2fs_post_read_required(inode))
return true;
- if (f2fs_is_multi_device(sbi))
+
+ /* disallow direct IO if any of devices has unaligned blksize */
+ if (f2fs_is_multi_device(sbi) && !sbi->aligned_blksize)
return true;
/*
* for blkzoned device, fallback direct IO to buffered IO, so
diff --git a/fs/f2fs/file.c b/fs/f2fs/file.c
index eb971e1e7227..92ec2699bc85 100644
--- a/fs/f2fs/file.c
+++ b/fs/f2fs/file.c
@@ -786,7 +786,7 @@ int f2fs_truncate(struct inode *inode)
return -EIO;
}
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
return err;
@@ -916,7 +916,7 @@ int f2fs_setattr(struct user_namespace *mnt_userns, struct dentry *dentry,
return err;
if (is_quota_modification(inode, attr)) {
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
return err;
}
@@ -3020,7 +3020,7 @@ static int f2fs_ioc_setproject(struct inode *inode, __u32 projid)
}
f2fs_put_page(ipage, 1);
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
return err;
diff --git a/fs/f2fs/gc.c b/fs/f2fs/gc.c
index 77391e3b7d68..a946ce0ead34 100644
--- a/fs/f2fs/gc.c
+++ b/fs/f2fs/gc.c
@@ -14,6 +14,7 @@
#include <linux/delay.h>
#include <linux/freezer.h>
#include <linux/sched/signal.h>
+#include <linux/random.h>
#include "f2fs.h"
#include "node.h"
@@ -257,7 +258,9 @@ static void select_policy(struct f2fs_sb_info *sbi, int gc_type,
p->max_search = sbi->max_victim_search;
/* let's select beginning hot/small space first in no_heap mode*/
- if (test_opt(sbi, NOHEAP) &&
+ if (f2fs_need_rand_seg(sbi))
+ p->offset = prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
+ else if (test_opt(sbi, NOHEAP) &&
(type == CURSEG_HOT_DATA || IS_NODESEG(type)))
p->offset = 0;
else
diff --git a/fs/f2fs/inline.c b/fs/f2fs/inline.c
index 56a20d5c15da..ea08f0dfa1bd 100644
--- a/fs/f2fs/inline.c
+++ b/fs/f2fs/inline.c
@@ -192,7 +192,7 @@ int f2fs_convert_inline_inode(struct inode *inode)
f2fs_hw_is_readonly(sbi) || f2fs_readonly(sbi->sb))
return 0;
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
return err;
diff --git a/fs/f2fs/inode.c b/fs/f2fs/inode.c
index 9141147b5bb0..0f8b2df3e1e0 100644
--- a/fs/f2fs/inode.c
+++ b/fs/f2fs/inode.c
@@ -527,7 +527,7 @@ make_now:
inode->i_op = &f2fs_dir_inode_operations;
inode->i_fop = &f2fs_dir_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
- inode_nohighmem(inode);
+ mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
} else if (S_ISLNK(inode->i_mode)) {
if (file_is_encrypt(inode))
inode->i_op = &f2fs_encrypted_symlink_inode_operations;
@@ -754,7 +754,7 @@ void f2fs_evict_inode(struct inode *inode)
if (inode->i_nlink || is_bad_inode(inode))
goto no_delete;
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err) {
err = 0;
set_sbi_flag(sbi, SBI_QUOTA_NEED_REPAIR);
diff --git a/fs/f2fs/namei.c b/fs/f2fs/namei.c
index 9c528e583c9d..a728a0af9ce0 100644
--- a/fs/f2fs/namei.c
+++ b/fs/f2fs/namei.c
@@ -74,7 +74,7 @@ static struct inode *f2fs_new_inode(struct inode *dir, umode_t mode)
if (err)
goto fail_drop;
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
goto fail_drop;
@@ -345,7 +345,7 @@ static int f2fs_create(struct user_namespace *mnt_userns, struct inode *dir,
if (!f2fs_is_checkpoint_ready(sbi))
return -ENOSPC;
- err = dquot_initialize(dir);
+ err = f2fs_dquot_initialize(dir);
if (err)
return err;
@@ -404,7 +404,7 @@ static int f2fs_link(struct dentry *old_dentry, struct inode *dir,
F2FS_I(old_dentry->d_inode)->i_projid)))
return -EXDEV;
- err = dquot_initialize(dir);
+ err = f2fs_dquot_initialize(dir);
if (err)
return err;
@@ -460,7 +460,7 @@ static int __recover_dot_dentries(struct inode *dir, nid_t pino)
return 0;
}
- err = dquot_initialize(dir);
+ err = f2fs_dquot_initialize(dir);
if (err)
return err;
@@ -598,10 +598,10 @@ static int f2fs_unlink(struct inode *dir, struct dentry *dentry)
goto fail;
}
- err = dquot_initialize(dir);
+ err = f2fs_dquot_initialize(dir);
if (err)
goto fail;
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
goto fail;
@@ -675,7 +675,7 @@ static int f2fs_symlink(struct user_namespace *mnt_userns, struct inode *dir,
if (err)
return err;
- err = dquot_initialize(dir);
+ err = f2fs_dquot_initialize(dir);
if (err)
return err;
@@ -746,7 +746,7 @@ static int f2fs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
if (unlikely(f2fs_cp_error(sbi)))
return -EIO;
- err = dquot_initialize(dir);
+ err = f2fs_dquot_initialize(dir);
if (err)
return err;
@@ -757,7 +757,7 @@ static int f2fs_mkdir(struct user_namespace *mnt_userns, struct inode *dir,
inode->i_op = &f2fs_dir_inode_operations;
inode->i_fop = &f2fs_dir_operations;
inode->i_mapping->a_ops = &f2fs_dblock_aops;
- inode_nohighmem(inode);
+ mapping_set_gfp_mask(inode->i_mapping, GFP_NOFS);
set_inode_flag(inode, FI_INC_LINK);
f2fs_lock_op(sbi);
@@ -803,7 +803,7 @@ static int f2fs_mknod(struct user_namespace *mnt_userns, struct inode *dir,
if (!f2fs_is_checkpoint_ready(sbi))
return -ENOSPC;
- err = dquot_initialize(dir);
+ err = f2fs_dquot_initialize(dir);
if (err)
return err;
@@ -841,7 +841,7 @@ static int __f2fs_tmpfile(struct inode *dir, struct dentry *dentry,
struct inode *inode;
int err;
- err = dquot_initialize(dir);
+ err = f2fs_dquot_initialize(dir);
if (err)
return err;
@@ -965,16 +965,16 @@ static int f2fs_rename(struct inode *old_dir, struct dentry *old_dentry,
return err;
}
- err = dquot_initialize(old_dir);
+ err = f2fs_dquot_initialize(old_dir);
if (err)
goto out;
- err = dquot_initialize(new_dir);
+ err = f2fs_dquot_initialize(new_dir);
if (err)
goto out;
if (new_inode) {
- err = dquot_initialize(new_inode);
+ err = f2fs_dquot_initialize(new_inode);
if (err)
goto out;
}
@@ -1138,11 +1138,11 @@ static int f2fs_cross_rename(struct inode *old_dir, struct dentry *old_dentry,
F2FS_I(new_dentry->d_inode)->i_projid)))
return -EXDEV;
- err = dquot_initialize(old_dir);
+ err = f2fs_dquot_initialize(old_dir);
if (err)
goto out;
- err = dquot_initialize(new_dir);
+ err = f2fs_dquot_initialize(new_dir);
if (err)
goto out;
diff --git a/fs/f2fs/node.c b/fs/f2fs/node.c
index e863136081b4..556fcd8457f3 100644
--- a/fs/f2fs/node.c
+++ b/fs/f2fs/node.c
@@ -1443,6 +1443,7 @@ page_hit:
nid, nid_of_node(page), ino_of_node(page),
ofs_of_node(page), cpver_of_node(page),
next_blkaddr_of_node(page));
+ set_sbi_flag(sbi, SBI_NEED_FSCK);
err = -EINVAL;
out_err:
ClearPageUptodate(page);
diff --git a/fs/f2fs/node.h b/fs/f2fs/node.h
index ff14a6e5ac1c..18b98cf0465b 100644
--- a/fs/f2fs/node.h
+++ b/fs/f2fs/node.h
@@ -138,11 +138,6 @@ static inline bool excess_cached_nats(struct f2fs_sb_info *sbi)
return NM_I(sbi)->nat_cnt[TOTAL_NAT] >= DEF_NAT_CACHE_THRESHOLD;
}
-static inline bool excess_dirty_nodes(struct f2fs_sb_info *sbi)
-{
- return get_pages(sbi, F2FS_DIRTY_NODES) >= sbi->blocks_per_seg * 8;
-}
-
enum mem_type {
FREE_NIDS, /* indicates the free nid list */
NAT_ENTRIES, /* indicates the cached nat entry */
diff --git a/fs/f2fs/recovery.c b/fs/f2fs/recovery.c
index 04655511d7f5..6a1b4668d933 100644
--- a/fs/f2fs/recovery.c
+++ b/fs/f2fs/recovery.c
@@ -81,7 +81,7 @@ static struct fsync_inode_entry *add_fsync_inode(struct f2fs_sb_info *sbi,
if (IS_ERR(inode))
return ERR_CAST(inode);
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
goto err_out;
@@ -203,7 +203,7 @@ retry:
goto out_put;
}
- err = dquot_initialize(einode);
+ err = f2fs_dquot_initialize(einode);
if (err) {
iput(einode);
goto out_put;
@@ -508,7 +508,7 @@ got_it:
if (IS_ERR(inode))
return PTR_ERR(inode);
- ret = dquot_initialize(inode);
+ ret = f2fs_dquot_initialize(inode);
if (ret) {
iput(inode);
return ret;
@@ -787,8 +787,6 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
}
#ifdef CONFIG_QUOTA
- /* Needed for iput() to work correctly and not trash data */
- sbi->sb->s_flags |= SB_ACTIVE;
/* Turn on quotas so that they are updated correctly */
quota_enabled = f2fs_enable_quota_files(sbi, s_flags & SB_RDONLY);
#endif
@@ -816,10 +814,8 @@ int f2fs_recover_fsync_data(struct f2fs_sb_info *sbi, bool check_only)
err = recover_data(sbi, &inode_list, &tmp_inode_list, &dir_list);
if (!err)
f2fs_bug_on(sbi, !list_empty(&inode_list));
- else {
- /* restore s_flags to let iput() trash data */
- sbi->sb->s_flags = s_flags;
- }
+ else
+ f2fs_bug_on(sbi, sbi->sb->s_flags & SB_ACTIVE);
skip:
fix_curseg_write_pointer = !check_only || list_empty(&inode_list);
diff --git a/fs/f2fs/segment.c b/fs/f2fs/segment.c
index a135d2247415..df9ed75f0b7a 100644
--- a/fs/f2fs/segment.c
+++ b/fs/f2fs/segment.c
@@ -15,6 +15,7 @@
#include <linux/timer.h>
#include <linux/freezer.h>
#include <linux/sched/signal.h>
+#include <linux/random.h>
#include "f2fs.h"
#include "segment.h"
@@ -529,6 +530,25 @@ void f2fs_balance_fs(struct f2fs_sb_info *sbi, bool need)
}
}
+static inline bool excess_dirty_threshold(struct f2fs_sb_info *sbi)
+{
+ int factor = rwsem_is_locked(&sbi->cp_rwsem) ? 3 : 2;
+ unsigned int dents = get_pages(sbi, F2FS_DIRTY_DENTS);
+ unsigned int qdata = get_pages(sbi, F2FS_DIRTY_QDATA);
+ unsigned int nodes = get_pages(sbi, F2FS_DIRTY_NODES);
+ unsigned int meta = get_pages(sbi, F2FS_DIRTY_META);
+ unsigned int imeta = get_pages(sbi, F2FS_DIRTY_IMETA);
+ unsigned int threshold = sbi->blocks_per_seg * factor *
+ DEFAULT_DIRTY_THRESHOLD;
+ unsigned int global_threshold = threshold * 3 / 2;
+
+ if (dents >= threshold || qdata >= threshold ||
+ nodes >= threshold || meta >= threshold ||
+ imeta >= threshold)
+ return true;
+ return dents + qdata + nodes + meta + imeta > global_threshold;
+}
+
void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
{
if (unlikely(is_sbi_flag_set(sbi, SBI_POR_DOING)))
@@ -547,8 +567,8 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
else
f2fs_build_free_nids(sbi, false, false);
- if (excess_dirty_nats(sbi) || excess_dirty_nodes(sbi) ||
- excess_prefree_segs(sbi))
+ if (excess_dirty_nats(sbi) || excess_dirty_threshold(sbi) ||
+ excess_prefree_segs(sbi) || !f2fs_space_for_roll_forward(sbi))
goto do_sync;
/* there is background inflight IO or foreground operation recently */
@@ -561,7 +581,7 @@ void f2fs_balance_fs_bg(struct f2fs_sb_info *sbi, bool from_bg)
goto do_sync;
/* checkpoint is the only way to shrink partial cached entries */
- if (f2fs_available_free_memory(sbi, NAT_ENTRIES) ||
+ if (f2fs_available_free_memory(sbi, NAT_ENTRIES) &&
f2fs_available_free_memory(sbi, INO_ENTRIES))
return;
@@ -2630,6 +2650,8 @@ static unsigned int __get_next_segno(struct f2fs_sb_info *sbi, int type)
unsigned short seg_type = curseg->seg_type;
sanity_check_seg_type(sbi, seg_type);
+ if (f2fs_need_rand_seg(sbi))
+ return prandom_u32() % (MAIN_SECS(sbi) * sbi->segs_per_sec);
/* if segs_per_sec is large than 1, we need to keep original policy. */
if (__is_large_section(sbi))
@@ -2681,6 +2703,9 @@ static void new_curseg(struct f2fs_sb_info *sbi, int type, bool new_sec)
curseg->next_segno = segno;
reset_curseg(sbi, type, 1);
curseg->alloc_type = LFS;
+ if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
+ curseg->fragment_remained_chunk =
+ prandom_u32() % sbi->max_fragment_chunk + 1;
}
static int __next_free_blkoff(struct f2fs_sb_info *sbi,
@@ -2707,12 +2732,22 @@ static int __next_free_blkoff(struct f2fs_sb_info *sbi,
static void __refresh_next_blkoff(struct f2fs_sb_info *sbi,
struct curseg_info *seg)
{
- if (seg->alloc_type == SSR)
+ if (seg->alloc_type == SSR) {
seg->next_blkoff =
__next_free_blkoff(sbi, seg->segno,
seg->next_blkoff + 1);
- else
+ } else {
seg->next_blkoff++;
+ if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK) {
+ /* To allocate block chunks in different sizes, use random number */
+ if (--seg->fragment_remained_chunk <= 0) {
+ seg->fragment_remained_chunk =
+ prandom_u32() % sbi->max_fragment_chunk + 1;
+ seg->next_blkoff +=
+ prandom_u32() % sbi->max_fragment_hole + 1;
+ }
+ }
+ }
}
bool f2fs_segment_has_free_slot(struct f2fs_sb_info *sbi, int segno)
@@ -3485,24 +3520,30 @@ void f2fs_allocate_data_block(struct f2fs_sb_info *sbi, struct page *page,
up_read(&SM_I(sbi)->curseg_lock);
}
-static void update_device_state(struct f2fs_io_info *fio)
+void f2fs_update_device_state(struct f2fs_sb_info *sbi, nid_t ino,
+ block_t blkaddr, unsigned int blkcnt)
{
- struct f2fs_sb_info *sbi = fio->sbi;
- unsigned int devidx;
-
if (!f2fs_is_multi_device(sbi))
return;
- devidx = f2fs_target_device_index(sbi, fio->new_blkaddr);
+ while (1) {
+ unsigned int devidx = f2fs_target_device_index(sbi, blkaddr);
+ unsigned int blks = FDEV(devidx).end_blk - blkaddr + 1;
- /* update device state for fsync */
- f2fs_set_dirty_device(sbi, fio->ino, devidx, FLUSH_INO);
+ /* update device state for fsync */
+ f2fs_set_dirty_device(sbi, ino, devidx, FLUSH_INO);
- /* update device state for checkpoint */
- if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
- spin_lock(&sbi->dev_lock);
- f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
- spin_unlock(&sbi->dev_lock);
+ /* update device state for checkpoint */
+ if (!f2fs_test_bit(devidx, (char *)&sbi->dirty_device)) {
+ spin_lock(&sbi->dev_lock);
+ f2fs_set_bit(devidx, (char *)&sbi->dirty_device);
+ spin_unlock(&sbi->dev_lock);
+ }
+
+ if (blkcnt <= blks)
+ break;
+ blkcnt -= blks;
+ blkaddr += blks;
}
}
@@ -3529,7 +3570,7 @@ reallocate:
goto reallocate;
}
- update_device_state(fio);
+ f2fs_update_device_state(fio->sbi, fio->ino, fio->new_blkaddr, 1);
if (keep_order)
up_read(&fio->sbi->io_order_lock);
@@ -3611,6 +3652,9 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
goto drop_bio;
}
+ invalidate_mapping_pages(META_MAPPING(sbi),
+ fio->new_blkaddr, fio->new_blkaddr);
+
stat_inc_inplace_blocks(fio->sbi);
if (fio->bio && !(SM_I(sbi)->ipu_policy & (1 << F2FS_IPU_NOCACHE)))
@@ -3618,7 +3662,8 @@ int f2fs_inplace_write_data(struct f2fs_io_info *fio)
else
err = f2fs_submit_page_bio(fio);
if (!err) {
- update_device_state(fio);
+ f2fs_update_device_state(fio->sbi, fio->ino,
+ fio->new_blkaddr, 1);
f2fs_update_iostat(fio->sbi, fio->io_type, F2FS_BLKSIZE);
}
diff --git a/fs/f2fs/segment.h b/fs/f2fs/segment.h
index 89fff258727d..46fde9f3f28e 100644
--- a/fs/f2fs/segment.h
+++ b/fs/f2fs/segment.h
@@ -314,6 +314,7 @@ struct curseg_info {
unsigned short next_blkoff; /* next block offset to write */
unsigned int zone; /* current zone number */
unsigned int next_segno; /* preallocated segment */
+ int fragment_remained_chunk; /* remained block size in a chunk for block fragmentation mode */
bool inited; /* indicate inmem log is inited */
};
diff --git a/fs/f2fs/super.c b/fs/f2fs/super.c
index cf049a042482..040b6d02e1d8 100644
--- a/fs/f2fs/super.c
+++ b/fs/f2fs/super.c
@@ -58,6 +58,7 @@ const char *f2fs_fault_name[FAULT_MAX] = {
[FAULT_DISCARD] = "discard error",
[FAULT_WRITE_IO] = "write IO error",
[FAULT_SLAB_ALLOC] = "slab alloc",
+ [FAULT_DQUOT_INIT] = "dquot initialize",
};
void f2fs_build_fault_attr(struct f2fs_sb_info *sbi, unsigned int rate,
@@ -592,7 +593,7 @@ static int f2fs_set_zstd_level(struct f2fs_sb_info *sbi, const char *str)
if (kstrtouint(str + 1, 10, &level))
return -EINVAL;
- if (!level || level > ZSTD_maxCLevel()) {
+ if (!level || level > zstd_max_clevel()) {
f2fs_info(sbi, "invalid zstd compress level: %d", level);
return -EINVAL;
}
@@ -817,6 +818,10 @@ static int parse_options(struct super_block *sb, char *options, bool is_remount)
F2FS_OPTION(sbi).fs_mode = FS_MODE_ADAPTIVE;
} else if (!strcmp(name, "lfs")) {
F2FS_OPTION(sbi).fs_mode = FS_MODE_LFS;
+ } else if (!strcmp(name, "fragment:segment")) {
+ F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_SEG;
+ } else if (!strcmp(name, "fragment:block")) {
+ F2FS_OPTION(sbi).fs_mode = FS_MODE_FRAGMENT_BLK;
} else {
kfree(name);
return -EINVAL;
@@ -1292,7 +1297,7 @@ default_check:
/* Not pass down write hints if the number of active logs is lesser
* than NR_CURSEG_PERSIST_TYPE.
*/
- if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_TYPE)
+ if (F2FS_OPTION(sbi).active_logs != NR_CURSEG_PERSIST_TYPE)
F2FS_OPTION(sbi).whint_mode = WHINT_MODE_OFF;
if (f2fs_sb_has_readonly(sbi) && !f2fs_readonly(sbi->sb)) {
@@ -1896,6 +1901,10 @@ static int f2fs_show_options(struct seq_file *seq, struct dentry *root)
seq_puts(seq, "adaptive");
else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_LFS)
seq_puts(seq, "lfs");
+ else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_SEG)
+ seq_puts(seq, "fragment:segment");
+ else if (F2FS_OPTION(sbi).fs_mode == FS_MODE_FRAGMENT_BLK)
+ seq_puts(seq, "fragment:block");
seq_printf(seq, ",active_logs=%u", F2FS_OPTION(sbi).active_logs);
if (test_opt(sbi, RESERVE_ROOT))
seq_printf(seq, ",reserve_root=%u,resuid=%u,resgid=%u",
@@ -2491,6 +2500,16 @@ retry:
return len - towrite;
}
+int f2fs_dquot_initialize(struct inode *inode)
+{
+ if (time_to_inject(F2FS_I_SB(inode), FAULT_DQUOT_INIT)) {
+ f2fs_show_injection_info(F2FS_I_SB(inode), FAULT_DQUOT_INIT);
+ return -ESRCH;
+ }
+
+ return dquot_initialize(inode);
+}
+
static struct dquot **f2fs_get_dquots(struct inode *inode)
{
return F2FS_I(inode)->i_dquot;
@@ -2875,6 +2894,11 @@ static const struct quotactl_ops f2fs_quotactl_ops = {
.get_nextdqblk = dquot_get_next_dqblk,
};
#else
+int f2fs_dquot_initialize(struct inode *inode)
+{
+ return 0;
+}
+
int f2fs_quota_sync(struct super_block *sb, int type)
{
return 0;
@@ -3486,7 +3510,7 @@ skip_cross:
NR_CURSEG_PERSIST_TYPE + nat_bits_blocks >= blocks_per_seg)) {
f2fs_warn(sbi, "Insane cp_payload: %u, nat_bits_blocks: %u)",
cp_payload, nat_bits_blocks);
- return -EFSCORRUPTED;
+ return 1;
}
if (unlikely(f2fs_cp_error(sbi))) {
@@ -3522,6 +3546,8 @@ static void init_sb_info(struct f2fs_sb_info *sbi)
sbi->max_victim_search = DEF_MAX_VICTIM_SEARCH;
sbi->migration_granularity = sbi->segs_per_sec;
sbi->seq_file_ra_mul = MIN_RA_MUL;
+ sbi->max_fragment_chunk = DEF_FRAGMENT_SIZE;
+ sbi->max_fragment_hole = DEF_FRAGMENT_SIZE;
sbi->dir_level = DEF_DIR_LEVEL;
sbi->interval_time[CP_TIME] = DEF_CP_INTERVAL;
@@ -3746,6 +3772,7 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
{
struct f2fs_super_block *raw_super = F2FS_RAW_SUPER(sbi);
unsigned int max_devices = MAX_DEVICES;
+ unsigned int logical_blksize;
int i;
/* Initialize single device information */
@@ -3766,6 +3793,9 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
if (!sbi->devs)
return -ENOMEM;
+ logical_blksize = bdev_logical_block_size(sbi->sb->s_bdev);
+ sbi->aligned_blksize = true;
+
for (i = 0; i < max_devices; i++) {
if (i > 0 && !RDEV(i).path[0])
@@ -3802,6 +3832,9 @@ static int f2fs_scan_devices(struct f2fs_sb_info *sbi)
/* to release errored devices */
sbi->s_ndevs = i + 1;
+ if (logical_blksize != bdev_logical_block_size(FDEV(i).bdev))
+ sbi->aligned_blksize = false;
+
#ifdef CONFIG_BLK_DEV_ZONED
if (bdev_zoned_model(FDEV(i).bdev) == BLK_ZONED_HM &&
!f2fs_sb_has_blkzoned(sbi)) {
@@ -4351,6 +4384,8 @@ free_node_inode:
free_stats:
f2fs_destroy_stats(sbi);
free_nm:
+ /* stop discard thread before destroying node manager */
+ f2fs_stop_discard_thread(sbi);
f2fs_destroy_node_manager(sbi);
free_sm:
f2fs_destroy_segment_manager(sbi);
diff --git a/fs/f2fs/sysfs.c b/fs/f2fs/sysfs.c
index a32fe31c33b8..7d289249cd7e 100644
--- a/fs/f2fs/sysfs.c
+++ b/fs/f2fs/sysfs.c
@@ -196,7 +196,7 @@ static ssize_t encoding_show(struct f2fs_attr *a,
struct super_block *sb = sbi->sb;
if (f2fs_sb_has_casefold(sbi))
- return snprintf(buf, PAGE_SIZE, "%s (%d.%d.%d)\n",
+ return sysfs_emit(buf, "%s (%d.%d.%d)\n",
sb->s_encoding->charset,
(sb->s_encoding->version >> 16) & 0xff,
(sb->s_encoding->version >> 8) & 0xff,
@@ -245,7 +245,7 @@ static ssize_t avg_vblocks_show(struct f2fs_attr *a,
static ssize_t main_blkaddr_show(struct f2fs_attr *a,
struct f2fs_sb_info *sbi, char *buf)
{
- return snprintf(buf, PAGE_SIZE, "%llu\n",
+ return sysfs_emit(buf, "%llu\n",
(unsigned long long)MAIN_BLKADDR(sbi));
}
@@ -551,6 +551,22 @@ out:
return count;
}
+ if (!strcmp(a->attr.name, "max_fragment_chunk")) {
+ if (t >= MIN_FRAGMENT_SIZE && t <= MAX_FRAGMENT_SIZE)
+ sbi->max_fragment_chunk = t;
+ else
+ return -EINVAL;
+ return count;
+ }
+
+ if (!strcmp(a->attr.name, "max_fragment_hole")) {
+ if (t >= MIN_FRAGMENT_SIZE && t <= MAX_FRAGMENT_SIZE)
+ sbi->max_fragment_hole = t;
+ else
+ return -EINVAL;
+ return count;
+ }
+
*ui = (unsigned int)t;
return count;
@@ -781,6 +797,8 @@ F2FS_RW_ATTR(ATGC_INFO, atgc_management, atgc_age_threshold, age_threshold);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, seq_file_ra_mul, seq_file_ra_mul);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_segment_mode, gc_segment_mode);
F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, gc_reclaimed_segments, gc_reclaimed_segs);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_fragment_chunk, max_fragment_chunk);
+F2FS_RW_ATTR(F2FS_SBI, f2fs_sb_info, max_fragment_hole, max_fragment_hole);
#define ATTR_LIST(name) (&f2fs_attr_##name.attr)
static struct attribute *f2fs_attrs[] = {
@@ -859,6 +877,8 @@ static struct attribute *f2fs_attrs[] = {
ATTR_LIST(seq_file_ra_mul),
ATTR_LIST(gc_segment_mode),
ATTR_LIST(gc_reclaimed_segments),
+ ATTR_LIST(max_fragment_chunk),
+ ATTR_LIST(max_fragment_hole),
NULL,
};
ATTRIBUTE_GROUPS(f2fs);
diff --git a/fs/f2fs/verity.c b/fs/f2fs/verity.c
index 03549b5ba204..fe5acdccaae1 100644
--- a/fs/f2fs/verity.c
+++ b/fs/f2fs/verity.c
@@ -136,7 +136,7 @@ static int f2fs_begin_enable_verity(struct file *filp)
* here and not rely on ->open() doing it. This must be done before
* evicting the inline data.
*/
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
return err;
diff --git a/fs/f2fs/xattr.c b/fs/f2fs/xattr.c
index 1d2d29dcd41c..e348f33bcb2b 100644
--- a/fs/f2fs/xattr.c
+++ b/fs/f2fs/xattr.c
@@ -773,7 +773,7 @@ int f2fs_setxattr(struct inode *inode, int index, const char *name,
if (!f2fs_is_checkpoint_ready(sbi))
return -ENOSPC;
- err = dquot_initialize(inode);
+ err = f2fs_dquot_initialize(inode);
if (err)
return err;
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 7235d539e969..d67108489148 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -940,7 +940,7 @@ do_alloc:
else if (height == ip->i_height)
ret = gfs2_hole_size(inode, lblock, len, mp, iomap);
else
- iomap->length = size - pos;
+ iomap->length = size - iomap->offset;
} else if (flags & IOMAP_WRITE) {
u64 alloc_size;
diff --git a/fs/gfs2/file.c b/fs/gfs2/file.c
index adafaaf7d24d..3e718cfc19a7 100644
--- a/fs/gfs2/file.c
+++ b/fs/gfs2/file.c
@@ -773,8 +773,8 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
size_t *prev_count,
size_t *window_size)
{
- char __user *p = i->iov[0].iov_base + i->iov_offset;
size_t count = iov_iter_count(i);
+ char __user *p;
int pages = 1;
if (likely(!count))
@@ -787,14 +787,14 @@ static inline bool should_fault_in_pages(ssize_t ret, struct iov_iter *i,
if (*prev_count != count || !*window_size) {
int pages, nr_dirtied;
- pages = min_t(int, BIO_MAX_VECS,
- DIV_ROUND_UP(iov_iter_count(i), PAGE_SIZE));
+ pages = min_t(int, BIO_MAX_VECS, DIV_ROUND_UP(count, PAGE_SIZE));
nr_dirtied = max(current->nr_dirtied_pause -
current->nr_dirtied, 1);
pages = min(pages, nr_dirtied);
}
*prev_count = count;
+ p = i->iov[0].iov_base + i->iov_offset;
*window_size = (size_t)PAGE_SIZE * pages - offset_in_page(p);
return true;
}
@@ -1013,6 +1013,7 @@ static ssize_t gfs2_file_buffered_write(struct kiocb *iocb,
struct gfs2_sbd *sdp = GFS2_SB(inode);
struct gfs2_holder *statfs_gh = NULL;
size_t prev_count = 0, window_size = 0;
+ size_t orig_count = iov_iter_count(from);
size_t read = 0;
ssize_t ret;
@@ -1057,6 +1058,7 @@ retry_under_glock:
if (inode == sdp->sd_rindex)
gfs2_glock_dq_uninit(statfs_gh);
+ from->count = orig_count - read;
if (should_fault_in_pages(ret, from, &prev_count, &window_size)) {
size_t leftover;
@@ -1064,6 +1066,7 @@ retry_under_glock:
leftover = fault_in_iov_iter_readable(from, window_size);
gfs2_holder_disallow_demote(gh);
if (leftover != window_size) {
+ from->count = min(from->count, window_size - leftover);
if (!gfs2_holder_queued(gh)) {
if (read)
goto out_uninit;
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 19f38aee1b61..8dbd6fe66420 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -411,14 +411,14 @@ static void do_error(struct gfs2_glock *gl, const int ret)
static void demote_incompat_holders(struct gfs2_glock *gl,
struct gfs2_holder *new_gh)
{
- struct gfs2_holder *gh;
+ struct gfs2_holder *gh, *tmp;
/*
* Demote incompatible holders before we make ourselves eligible.
* (This holder may or may not allow auto-demoting, but we don't want
* to demote the new holder before it's even granted.)
*/
- list_for_each_entry(gh, &gl->gl_holders, gh_list) {
+ list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
/*
* Since holders are at the front of the list, we stop when we
* find the first non-holder.
@@ -496,7 +496,7 @@ again:
* Since we unlock the lockref lock, we set a flag to indicate
* instantiate is in progress.
*/
- if (test_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) {
+ if (test_and_set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags)) {
wait_on_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG,
TASK_UNINTERRUPTIBLE);
/*
@@ -509,14 +509,10 @@ again:
goto again;
}
- set_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
-
ret = glops->go_instantiate(gh);
if (!ret)
clear_bit(GLF_INSTANTIATE_NEEDED, &gl->gl_flags);
- clear_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
- smp_mb__after_atomic();
- wake_up_bit(&gl->gl_flags, GLF_INSTANTIATE_IN_PROG);
+ clear_and_wake_up_bit(GLF_INSTANTIATE_IN_PROG, &gl->gl_flags);
return ret;
}
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 5b121371508a..0f93e8beca4d 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -1402,13 +1402,6 @@ out:
gfs2_ordered_del_inode(ip);
clear_inode(inode);
gfs2_dir_hash_inval(ip);
- if (ip->i_gl) {
- glock_clear_object(ip->i_gl, ip);
- wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
- gfs2_glock_add_to_lru(ip->i_gl);
- gfs2_glock_put_eventually(ip->i_gl);
- ip->i_gl = NULL;
- }
if (gfs2_holder_initialized(&ip->i_iopen_gh)) {
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
@@ -1421,6 +1414,13 @@ out:
gfs2_holder_uninit(&ip->i_iopen_gh);
gfs2_glock_put_eventually(gl);
}
+ if (ip->i_gl) {
+ glock_clear_object(ip->i_gl, ip);
+ wait_on_bit_io(&ip->i_flags, GIF_GLOP_PENDING, TASK_UNINTERRUPTIBLE);
+ gfs2_glock_add_to_lru(ip->i_gl);
+ gfs2_glock_put_eventually(ip->i_gl);
+ ip->i_gl = NULL;
+ }
}
static struct inode *gfs2_alloc_inode(struct super_block *sb)
diff --git a/fs/io-wq.c b/fs/io-wq.c
index afd955d53db9..88202de519f6 100644
--- a/fs/io-wq.c
+++ b/fs/io-wq.c
@@ -423,9 +423,10 @@ static inline unsigned int io_get_work_hash(struct io_wq_work *work)
return work->flags >> IO_WQ_HASH_SHIFT;
}
-static void io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
+static bool io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
{
struct io_wq *wq = wqe->wq;
+ bool ret = false;
spin_lock_irq(&wq->hash->wait.lock);
if (list_empty(&wqe->wait.entry)) {
@@ -433,9 +434,11 @@ static void io_wait_on_hash(struct io_wqe *wqe, unsigned int hash)
if (!test_bit(hash, &wq->hash->map)) {
__set_current_state(TASK_RUNNING);
list_del_init(&wqe->wait.entry);
+ ret = true;
}
}
spin_unlock_irq(&wq->hash->wait.lock);
+ return ret;
}
static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
@@ -475,14 +478,21 @@ static struct io_wq_work *io_get_next_work(struct io_wqe_acct *acct,
}
if (stall_hash != -1U) {
+ bool unstalled;
+
/*
* Set this before dropping the lock to avoid racing with new
* work being added and clearing the stalled bit.
*/
set_bit(IO_ACCT_STALLED_BIT, &acct->flags);
raw_spin_unlock(&wqe->lock);
- io_wait_on_hash(wqe, stall_hash);
+ unstalled = io_wait_on_hash(wqe, stall_hash);
raw_spin_lock(&wqe->lock);
+ if (unstalled) {
+ clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
+ if (wq_has_sleeper(&wqe->wq->hash->wait))
+ wake_up(&wqe->wq->hash->wait);
+ }
}
return NULL;
@@ -564,8 +574,11 @@ get_next:
io_wqe_enqueue(wqe, linked);
if (hash != -1U && !next_hashed) {
+ /* serialize hash clear with wake_up() */
+ spin_lock_irq(&wq->hash->wait.lock);
clear_bit(hash, &wq->hash->map);
clear_bit(IO_ACCT_STALLED_BIT, &acct->flags);
+ spin_unlock_irq(&wq->hash->wait.lock);
if (wq_has_sleeper(&wq->hash->wait))
wake_up(&wq->hash->wait);
raw_spin_lock(&wqe->lock);
diff --git a/fs/ksmbd/Kconfig b/fs/ksmbd/Kconfig
index b83cbd756ae5..e1fe17747ed6 100644
--- a/fs/ksmbd/Kconfig
+++ b/fs/ksmbd/Kconfig
@@ -6,7 +6,6 @@ config SMB_SERVER
select NLS
select NLS_UTF8
select CRYPTO
- select CRYPTO_MD4
select CRYPTO_MD5
select CRYPTO_HMAC
select CRYPTO_ECB
@@ -19,6 +18,7 @@ config SMB_SERVER
select CRYPTO_GCM
select ASN1
select OID_REGISTRY
+ select CRC32
default n
help
Choose Y here if you want to allow SMB3 compliant clients
diff --git a/fs/ksmbd/auth.c b/fs/ksmbd/auth.c
index 30a92ddc1817..3503b1c48cb4 100644
--- a/fs/ksmbd/auth.c
+++ b/fs/ksmbd/auth.c
@@ -873,9 +873,9 @@ int ksmbd_gen_preauth_integrity_hash(struct ksmbd_conn *conn, char *buf,
__u8 *pi_hash)
{
int rc;
- struct smb2_hdr *rcv_hdr = (struct smb2_hdr *)buf;
+ struct smb2_hdr *rcv_hdr = smb2_get_msg(buf);
char *all_bytes_msg = (char *)&rcv_hdr->ProtocolId;
- int msg_size = be32_to_cpu(rcv_hdr->smb2_buf_length);
+ int msg_size = get_rfc1002_len(buf);
struct ksmbd_crypto_ctx *ctx = NULL;
if (conn->preauth_info->Preauth_HashId !=
@@ -983,7 +983,7 @@ static struct scatterlist *ksmbd_init_sg(struct kvec *iov, unsigned int nvec,
u8 *sign)
{
struct scatterlist *sg;
- unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 24;
+ unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
int i, nr_entries[3] = {0}, total_entries = 0, sg_idx = 0;
if (!nvec)
@@ -1047,9 +1047,8 @@ static struct scatterlist *ksmbd_init_sg(struct kvec *iov, unsigned int nvec,
int ksmbd_crypt_message(struct ksmbd_conn *conn, struct kvec *iov,
unsigned int nvec, int enc)
{
- struct smb2_transform_hdr *tr_hdr =
- (struct smb2_transform_hdr *)iov[0].iov_base;
- unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 24;
+ struct smb2_transform_hdr *tr_hdr = smb2_get_msg(iov[0].iov_base);
+ unsigned int assoc_data_len = sizeof(struct smb2_transform_hdr) - 20;
int rc;
struct scatterlist *sg;
u8 sign[SMB2_SIGNATURE_SIZE] = {};
diff --git a/fs/ksmbd/connection.c b/fs/ksmbd/connection.c
index b57a0d8a392f..83a94d0bb480 100644
--- a/fs/ksmbd/connection.c
+++ b/fs/ksmbd/connection.c
@@ -158,26 +158,25 @@ void ksmbd_conn_wait_idle(struct ksmbd_conn *conn)
int ksmbd_conn_write(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb_hdr *rsp_hdr = work->response_buf;
size_t len = 0;
int sent;
struct kvec iov[3];
int iov_idx = 0;
ksmbd_conn_try_dequeue_request(work);
- if (!rsp_hdr) {
+ if (!work->response_buf) {
pr_err("NULL response header\n");
return -EINVAL;
}
if (work->tr_buf) {
iov[iov_idx] = (struct kvec) { work->tr_buf,
- sizeof(struct smb2_transform_hdr) };
+ sizeof(struct smb2_transform_hdr) + 4 };
len += iov[iov_idx++].iov_len;
}
if (work->aux_payload_sz) {
- iov[iov_idx] = (struct kvec) { rsp_hdr, work->resp_hdr_sz };
+ iov[iov_idx] = (struct kvec) { work->response_buf, work->resp_hdr_sz };
len += iov[iov_idx++].iov_len;
iov[iov_idx] = (struct kvec) { work->aux_payload_buf, work->aux_payload_sz };
len += iov[iov_idx++].iov_len;
@@ -185,8 +184,8 @@ int ksmbd_conn_write(struct ksmbd_work *work)
if (work->tr_buf)
iov[iov_idx].iov_len = work->resp_hdr_sz;
else
- iov[iov_idx].iov_len = get_rfc1002_len(rsp_hdr) + 4;
- iov[iov_idx].iov_base = rsp_hdr;
+ iov[iov_idx].iov_len = get_rfc1002_len(work->response_buf) + 4;
+ iov[iov_idx].iov_base = work->response_buf;
len += iov[iov_idx++].iov_len;
}
diff --git a/fs/ksmbd/ksmbd_work.c b/fs/ksmbd/ksmbd_work.c
index fd58eb4809f6..14b9caebf7a4 100644
--- a/fs/ksmbd/ksmbd_work.c
+++ b/fs/ksmbd/ksmbd_work.c
@@ -69,7 +69,6 @@ int ksmbd_workqueue_init(void)
void ksmbd_workqueue_destroy(void)
{
- flush_workqueue(ksmbd_wq);
destroy_workqueue(ksmbd_wq);
ksmbd_wq = NULL;
}
diff --git a/fs/ksmbd/ksmbd_work.h b/fs/ksmbd/ksmbd_work.h
index f7156bc50049..5ece58e40c97 100644
--- a/fs/ksmbd/ksmbd_work.h
+++ b/fs/ksmbd/ksmbd_work.h
@@ -92,7 +92,7 @@ struct ksmbd_work {
*/
static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work)
{
- return work->response_buf + work->next_smb2_rsp_hdr_off;
+ return work->response_buf + work->next_smb2_rsp_hdr_off + 4;
}
/**
@@ -101,7 +101,7 @@ static inline void *ksmbd_resp_buf_next(struct ksmbd_work *work)
*/
static inline void *ksmbd_req_buf_next(struct ksmbd_work *work)
{
- return work->request_buf + work->next_smb2_rcv_hdr_off;
+ return work->request_buf + work->next_smb2_rcv_hdr_off + 4;
}
struct ksmbd_work *ksmbd_alloc_work_struct(void);
diff --git a/fs/ksmbd/oplock.c b/fs/ksmbd/oplock.c
index f9dae6ef2115..077b8761d099 100644
--- a/fs/ksmbd/oplock.c
+++ b/fs/ksmbd/oplock.c
@@ -629,10 +629,10 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
return;
}
- rsp_hdr = work->response_buf;
+ rsp_hdr = smb2_get_msg(work->response_buf);
memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
- rsp_hdr->smb2_buf_length =
- cpu_to_be32(smb2_hdr_size_no_buflen(conn->vals));
+ *(__be32 *)work->response_buf =
+ cpu_to_be32(conn->vals->header_size);
rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
rsp_hdr->CreditRequest = cpu_to_le16(0);
@@ -645,7 +645,7 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
rsp_hdr->SessionId = 0;
memset(rsp_hdr->Signature, 0, 16);
- rsp = work->response_buf;
+ rsp = smb2_get_msg(work->response_buf);
rsp->StructureSize = cpu_to_le16(24);
if (!br_info->open_trunc &&
@@ -659,7 +659,7 @@ static void __smb2_oplock_break_noti(struct work_struct *wk)
rsp->PersistentFid = cpu_to_le64(fp->persistent_id);
rsp->VolatileFid = cpu_to_le64(fp->volatile_id);
- inc_rfc1001_len(rsp, 24);
+ inc_rfc1001_len(work->response_buf, 24);
ksmbd_debug(OPLOCK,
"sending oplock break v_id %llu p_id = %llu lock level = %d\n",
@@ -736,10 +736,10 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
return;
}
- rsp_hdr = work->response_buf;
+ rsp_hdr = smb2_get_msg(work->response_buf);
memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
- rsp_hdr->smb2_buf_length =
- cpu_to_be32(smb2_hdr_size_no_buflen(conn->vals));
+ *(__be32 *)work->response_buf =
+ cpu_to_be32(conn->vals->header_size);
rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
rsp_hdr->CreditRequest = cpu_to_le16(0);
@@ -752,7 +752,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
rsp_hdr->SessionId = 0;
memset(rsp_hdr->Signature, 0, 16);
- rsp = work->response_buf;
+ rsp = smb2_get_msg(work->response_buf);
rsp->StructureSize = cpu_to_le16(44);
rsp->Epoch = br_info->epoch;
rsp->Flags = 0;
@@ -768,7 +768,7 @@ static void __smb2_lease_break_noti(struct work_struct *wk)
rsp->AccessMaskHint = 0;
rsp->ShareMaskHint = 0;
- inc_rfc1001_len(rsp, 44);
+ inc_rfc1001_len(work->response_buf, 44);
ksmbd_conn_write(work);
ksmbd_free_work_struct(work);
@@ -1335,19 +1335,16 @@ __u8 smb2_map_lease_to_oplock(__le32 lease_state)
*/
void create_lease_buf(u8 *rbuf, struct lease *lease)
{
- char *LeaseKey = (char *)&lease->lease_key;
-
if (lease->version == 2) {
struct create_lease_v2 *buf = (struct create_lease_v2 *)rbuf;
- char *ParentLeaseKey = (char *)&lease->parent_lease_key;
memset(buf, 0, sizeof(struct create_lease_v2));
- buf->lcontext.LeaseKeyLow = *((__le64 *)LeaseKey);
- buf->lcontext.LeaseKeyHigh = *((__le64 *)(LeaseKey + 8));
+ memcpy(buf->lcontext.LeaseKey, lease->lease_key,
+ SMB2_LEASE_KEY_SIZE);
buf->lcontext.LeaseFlags = lease->flags;
buf->lcontext.LeaseState = lease->state;
- buf->lcontext.ParentLeaseKeyLow = *((__le64 *)ParentLeaseKey);
- buf->lcontext.ParentLeaseKeyHigh = *((__le64 *)(ParentLeaseKey + 8));
+ memcpy(buf->lcontext.ParentLeaseKey, lease->parent_lease_key,
+ SMB2_LEASE_KEY_SIZE);
buf->ccontext.DataOffset = cpu_to_le16(offsetof
(struct create_lease_v2, lcontext));
buf->ccontext.DataLength = cpu_to_le32(sizeof(struct lease_context_v2));
@@ -1362,8 +1359,7 @@ void create_lease_buf(u8 *rbuf, struct lease *lease)
struct create_lease *buf = (struct create_lease *)rbuf;
memset(buf, 0, sizeof(struct create_lease));
- buf->lcontext.LeaseKeyLow = *((__le64 *)LeaseKey);
- buf->lcontext.LeaseKeyHigh = *((__le64 *)(LeaseKey + 8));
+ memcpy(buf->lcontext.LeaseKey, lease->lease_key, SMB2_LEASE_KEY_SIZE);
buf->lcontext.LeaseFlags = lease->flags;
buf->lcontext.LeaseState = lease->state;
buf->ccontext.DataOffset = cpu_to_le16(offsetof
@@ -1398,7 +1394,7 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
if (!lreq)
return NULL;
- data_offset = (char *)req + 4 + le32_to_cpu(req->CreateContextsOffset);
+ data_offset = (char *)req + le32_to_cpu(req->CreateContextsOffset);
cc = (struct create_context *)data_offset;
do {
cc = (struct create_context *)((char *)cc + next);
@@ -1416,19 +1412,17 @@ struct lease_ctx_info *parse_lease_state(void *open_req)
if (sizeof(struct lease_context_v2) == le32_to_cpu(cc->DataLength)) {
struct create_lease_v2 *lc = (struct create_lease_v2 *)cc;
- *((__le64 *)lreq->lease_key) = lc->lcontext.LeaseKeyLow;
- *((__le64 *)(lreq->lease_key + 8)) = lc->lcontext.LeaseKeyHigh;
+ memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
lreq->req_state = lc->lcontext.LeaseState;
lreq->flags = lc->lcontext.LeaseFlags;
lreq->duration = lc->lcontext.LeaseDuration;
- *((__le64 *)lreq->parent_lease_key) = lc->lcontext.ParentLeaseKeyLow;
- *((__le64 *)(lreq->parent_lease_key + 8)) = lc->lcontext.ParentLeaseKeyHigh;
+ memcpy(lreq->parent_lease_key, lc->lcontext.ParentLeaseKey,
+ SMB2_LEASE_KEY_SIZE);
lreq->version = 2;
} else {
struct create_lease *lc = (struct create_lease *)cc;
- *((__le64 *)lreq->lease_key) = lc->lcontext.LeaseKeyLow;
- *((__le64 *)(lreq->lease_key + 8)) = lc->lcontext.LeaseKeyHigh;
+ memcpy(lreq->lease_key, lc->lcontext.LeaseKey, SMB2_LEASE_KEY_SIZE);
lreq->req_state = lc->lcontext.LeaseState;
lreq->flags = lc->lcontext.LeaseFlags;
lreq->duration = lc->lcontext.LeaseDuration;
@@ -1462,7 +1456,7 @@ struct create_context *smb2_find_context_vals(void *open_req, const char *tag)
* CreateContextsOffset and CreateContextsLength are guaranteed to
* be valid because of ksmbd_smb2_check_message().
*/
- cc = (struct create_context *)((char *)req + 4 +
+ cc = (struct create_context *)((char *)req +
le32_to_cpu(req->CreateContextsOffset));
remain_len = le32_to_cpu(req->CreateContextsLength);
do {
diff --git a/fs/ksmbd/oplock.h b/fs/ksmbd/oplock.h
index 119b8047cfbd..0cf7a2b5bbc0 100644
--- a/fs/ksmbd/oplock.h
+++ b/fs/ksmbd/oplock.h
@@ -28,8 +28,6 @@
#define OPLOCK_WRITE_TO_NONE 0x04
#define OPLOCK_READ_TO_NONE 0x08
-#define SMB2_LEASE_KEY_SIZE 16
-
struct lease_ctx_info {
__u8 lease_key[SMB2_LEASE_KEY_SIZE];
__le32 req_state;
diff --git a/fs/ksmbd/server.c b/fs/ksmbd/server.c
index 2a2b2135bfde..2e12f6d8483b 100644
--- a/fs/ksmbd/server.c
+++ b/fs/ksmbd/server.c
@@ -622,7 +622,6 @@ MODULE_DESCRIPTION("Linux kernel CIFS/SMB SERVER");
MODULE_LICENSE("GPL");
MODULE_SOFTDEP("pre: ecb");
MODULE_SOFTDEP("pre: hmac");
-MODULE_SOFTDEP("pre: md4");
MODULE_SOFTDEP("pre: md5");
MODULE_SOFTDEP("pre: nls");
MODULE_SOFTDEP("pre: aes");
@@ -632,5 +631,6 @@ MODULE_SOFTDEP("pre: sha512");
MODULE_SOFTDEP("pre: aead2");
MODULE_SOFTDEP("pre: ccm");
MODULE_SOFTDEP("pre: gcm");
+MODULE_SOFTDEP("pre: crc32");
module_init(ksmbd_server_init)
module_exit(ksmbd_server_exit)
diff --git a/fs/ksmbd/smb2misc.c b/fs/ksmbd/smb2misc.c
index 030ca57c3784..50d0b1022289 100644
--- a/fs/ksmbd/smb2misc.c
+++ b/fs/ksmbd/smb2misc.c
@@ -6,7 +6,6 @@
#include "glob.h"
#include "nterr.h"
-#include "smb2pdu.h"
#include "smb_common.h"
#include "smbstatus.h"
#include "mgmt/user_session.h"
@@ -347,23 +346,16 @@ static int smb2_validate_credit_charge(struct ksmbd_conn *conn,
int ksmbd_smb2_check_message(struct ksmbd_work *work)
{
- struct smb2_pdu *pdu = work->request_buf;
+ struct smb2_pdu *pdu = ksmbd_req_buf_next(work);
struct smb2_hdr *hdr = &pdu->hdr;
int command;
__u32 clc_len; /* calculated length */
- __u32 len = get_rfc1002_len(pdu);
+ __u32 len = get_rfc1002_len(work->request_buf);
- if (work->next_smb2_rcv_hdr_off) {
- pdu = ksmbd_req_buf_next(work);
- hdr = &pdu->hdr;
- }
-
- if (le32_to_cpu(hdr->NextCommand) > 0) {
+ if (le32_to_cpu(hdr->NextCommand) > 0)
len = le32_to_cpu(hdr->NextCommand);
- } else if (work->next_smb2_rcv_hdr_off) {
+ else if (work->next_smb2_rcv_hdr_off)
len -= work->next_smb2_rcv_hdr_off;
- len = round_up(len, 8);
- }
if (check_smb2_hdr(hdr))
return 1;
diff --git a/fs/ksmbd/smb2ops.c b/fs/ksmbd/smb2ops.c
index fb6a65d23139..0a5d8450e835 100644
--- a/fs/ksmbd/smb2ops.c
+++ b/fs/ksmbd/smb2ops.c
@@ -6,7 +6,6 @@
#include <linux/slab.h>
#include "glob.h"
-#include "smb2pdu.h"
#include "auth.h"
#include "connection.h"
@@ -199,7 +198,7 @@ void init_smb2_1_server(struct ksmbd_conn *conn)
conn->cmds = smb2_0_server_cmds;
conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
conn->max_credits = SMB2_MAX_CREDITS;
- conn->signing_algorithm = SIGNING_ALG_HMAC_SHA256;
+ conn->signing_algorithm = SIGNING_ALG_HMAC_SHA256_LE;
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
@@ -217,7 +216,7 @@ void init_smb3_0_server(struct ksmbd_conn *conn)
conn->cmds = smb2_0_server_cmds;
conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
conn->max_credits = SMB2_MAX_CREDITS;
- conn->signing_algorithm = SIGNING_ALG_AES_CMAC;
+ conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
@@ -242,7 +241,7 @@ void init_smb3_02_server(struct ksmbd_conn *conn)
conn->cmds = smb2_0_server_cmds;
conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
conn->max_credits = SMB2_MAX_CREDITS;
- conn->signing_algorithm = SIGNING_ALG_AES_CMAC;
+ conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
@@ -267,7 +266,7 @@ int init_smb3_11_server(struct ksmbd_conn *conn)
conn->cmds = smb2_0_server_cmds;
conn->max_cmds = ARRAY_SIZE(smb2_0_server_cmds);
conn->max_credits = SMB2_MAX_CREDITS;
- conn->signing_algorithm = SIGNING_ALG_AES_CMAC;
+ conn->signing_algorithm = SIGNING_ALG_AES_CMAC_LE;
if (server_conf.flags & KSMBD_GLOBAL_FLAG_SMB2_LEASES)
conn->vals->capabilities |= SMB2_GLOBAL_CAP_LEASING;
diff --git a/fs/ksmbd/smb2pdu.c b/fs/ksmbd/smb2pdu.c
index 7e448df3f847..121f8e8c70ac 100644
--- a/fs/ksmbd/smb2pdu.c
+++ b/fs/ksmbd/smb2pdu.c
@@ -13,7 +13,6 @@
#include <linux/falloc.h>
#include "glob.h"
-#include "smb2pdu.h"
#include "smbfsctl.h"
#include "oplock.h"
#include "smbacl.h"
@@ -44,8 +43,8 @@ static void __wbuf(struct ksmbd_work *work, void **req, void **rsp)
*req = ksmbd_req_buf_next(work);
*rsp = ksmbd_resp_buf_next(work);
} else {
- *req = work->request_buf;
- *rsp = work->response_buf;
+ *req = smb2_get_msg(work->request_buf);
+ *rsp = smb2_get_msg(work->response_buf);
}
}
@@ -93,13 +92,14 @@ struct channel *lookup_chann_list(struct ksmbd_session *sess, struct ksmbd_conn
*/
int smb2_get_ksmbd_tcon(struct ksmbd_work *work)
{
- struct smb2_hdr *req_hdr = work->request_buf;
+ struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf);
+ unsigned int cmd = le16_to_cpu(req_hdr->Command);
int tree_id;
work->tcon = NULL;
- if (work->conn->ops->get_cmd_val(work) == SMB2_TREE_CONNECT_HE ||
- work->conn->ops->get_cmd_val(work) == SMB2_CANCEL_HE ||
- work->conn->ops->get_cmd_val(work) == SMB2_LOGOFF_HE) {
+ if (cmd == SMB2_TREE_CONNECT_HE ||
+ cmd == SMB2_CANCEL_HE ||
+ cmd == SMB2_LOGOFF_HE) {
ksmbd_debug(SMB, "skip to check tree connect request\n");
return 0;
}
@@ -130,7 +130,7 @@ void smb2_set_err_rsp(struct ksmbd_work *work)
if (work->next_smb2_rcv_hdr_off)
err_rsp = ksmbd_resp_buf_next(work);
else
- err_rsp = work->response_buf;
+ err_rsp = smb2_get_msg(work->response_buf);
if (err_rsp->hdr.Status != STATUS_STOPPED_ON_SYMLINK) {
err_rsp->StructureSize = SMB2_ERROR_STRUCTURE_SIZE2_LE;
@@ -150,7 +150,7 @@ void smb2_set_err_rsp(struct ksmbd_work *work)
*/
bool is_smb2_neg_cmd(struct ksmbd_work *work)
{
- struct smb2_hdr *hdr = work->request_buf;
+ struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
/* is it SMB2 header ? */
if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
@@ -174,7 +174,7 @@ bool is_smb2_neg_cmd(struct ksmbd_work *work)
*/
bool is_smb2_rsp(struct ksmbd_work *work)
{
- struct smb2_hdr *hdr = work->response_buf;
+ struct smb2_hdr *hdr = smb2_get_msg(work->response_buf);
/* is it SMB2 header ? */
if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
@@ -200,7 +200,7 @@ u16 get_smb2_cmd_val(struct ksmbd_work *work)
if (work->next_smb2_rcv_hdr_off)
rcv_hdr = ksmbd_req_buf_next(work);
else
- rcv_hdr = work->request_buf;
+ rcv_hdr = smb2_get_msg(work->request_buf);
return le16_to_cpu(rcv_hdr->Command);
}
@@ -216,7 +216,7 @@ void set_smb2_rsp_status(struct ksmbd_work *work, __le32 err)
if (work->next_smb2_rcv_hdr_off)
rsp_hdr = ksmbd_resp_buf_next(work);
else
- rsp_hdr = work->response_buf;
+ rsp_hdr = smb2_get_msg(work->response_buf);
rsp_hdr->Status = err;
smb2_set_err_rsp(work);
}
@@ -237,13 +237,11 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
if (conn->need_neg == false)
return -EINVAL;
- rsp_hdr = work->response_buf;
+ *(__be32 *)work->response_buf =
+ cpu_to_be32(conn->vals->header_size);
+ rsp_hdr = smb2_get_msg(work->response_buf);
memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
-
- rsp_hdr->smb2_buf_length =
- cpu_to_be32(smb2_hdr_size_no_buflen(conn->vals));
-
rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
rsp_hdr->CreditRequest = cpu_to_le16(2);
@@ -256,7 +254,7 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
rsp_hdr->SessionId = 0;
memset(rsp_hdr->Signature, 0, 16);
- rsp = work->response_buf;
+ rsp = smb2_get_msg(work->response_buf);
WARN_ON(ksmbd_conn_good(work));
@@ -277,12 +275,12 @@ int init_smb2_neg_rsp(struct ksmbd_work *work)
rsp->SecurityBufferOffset = cpu_to_le16(128);
rsp->SecurityBufferLength = cpu_to_le16(AUTH_GSS_LENGTH);
- ksmbd_copy_gss_neg_header(((char *)(&rsp->hdr) +
- sizeof(rsp->hdr.smb2_buf_length)) +
+ ksmbd_copy_gss_neg_header((char *)(&rsp->hdr) +
le16_to_cpu(rsp->SecurityBufferOffset));
- inc_rfc1001_len(rsp, sizeof(struct smb2_negotiate_rsp) -
- sizeof(struct smb2_hdr) - sizeof(rsp->Buffer) +
- AUTH_GSS_LENGTH);
+ inc_rfc1001_len(work->response_buf,
+ sizeof(struct smb2_negotiate_rsp) -
+ sizeof(struct smb2_hdr) - sizeof(rsp->Buffer) +
+ AUTH_GSS_LENGTH);
rsp->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED_LE;
if (server_conf.signing == KSMBD_CONFIG_OPT_MANDATORY)
rsp->SecurityMode |= SMB2_NEGOTIATE_SIGNING_REQUIRED_LE;
@@ -387,8 +385,8 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
next_hdr_offset = le32_to_cpu(req->NextCommand);
new_len = ALIGN(len, 8);
- inc_rfc1001_len(work->response_buf, ((sizeof(struct smb2_hdr) - 4)
- + new_len - len));
+ inc_rfc1001_len(work->response_buf,
+ sizeof(struct smb2_hdr) + new_len - len);
rsp->NextCommand = cpu_to_le32(new_len);
work->next_smb2_rcv_hdr_off += next_hdr_offset;
@@ -406,7 +404,7 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
work->compound_fid = KSMBD_NO_FID;
work->compound_pfid = KSMBD_NO_FID;
}
- memset((char *)rsp_hdr + 4, 0, sizeof(struct smb2_hdr) + 2);
+ memset((char *)rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
rsp_hdr->ProtocolId = SMB2_PROTO_NUMBER;
rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
rsp_hdr->Command = rcv_hdr->Command;
@@ -432,7 +430,7 @@ static void init_chained_smb2_rsp(struct ksmbd_work *work)
*/
bool is_chained_smb2_message(struct ksmbd_work *work)
{
- struct smb2_hdr *hdr = work->request_buf;
+ struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
unsigned int len, next_cmd;
if (hdr->ProtocolId != SMB2_PROTO_NUMBER)
@@ -483,13 +481,13 @@ bool is_chained_smb2_message(struct ksmbd_work *work)
*/
int init_smb2_rsp_hdr(struct ksmbd_work *work)
{
- struct smb2_hdr *rsp_hdr = work->response_buf;
- struct smb2_hdr *rcv_hdr = work->request_buf;
+ struct smb2_hdr *rsp_hdr = smb2_get_msg(work->response_buf);
+ struct smb2_hdr *rcv_hdr = smb2_get_msg(work->request_buf);
struct ksmbd_conn *conn = work->conn;
memset(rsp_hdr, 0, sizeof(struct smb2_hdr) + 2);
- rsp_hdr->smb2_buf_length =
- cpu_to_be32(smb2_hdr_size_no_buflen(conn->vals));
+ *(__be32 *)work->response_buf =
+ cpu_to_be32(conn->vals->header_size);
rsp_hdr->ProtocolId = rcv_hdr->ProtocolId;
rsp_hdr->StructureSize = SMB2_HEADER_STRUCTURE_SIZE;
rsp_hdr->Command = rcv_hdr->Command;
@@ -522,7 +520,7 @@ int init_smb2_rsp_hdr(struct ksmbd_work *work)
*/
int smb2_allocate_rsp_buf(struct ksmbd_work *work)
{
- struct smb2_hdr *hdr = work->request_buf;
+ struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
size_t small_sz = MAX_CIFS_SMALL_BUFFER_SIZE;
size_t large_sz = small_sz + work->conn->vals->max_trans_size;
size_t sz = small_sz;
@@ -534,7 +532,7 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
if (cmd == SMB2_QUERY_INFO_HE) {
struct smb2_query_info_req *req;
- req = work->request_buf;
+ req = smb2_get_msg(work->request_buf);
if (req->InfoType == SMB2_O_INFO_FILE &&
(req->FileInfoClass == FILE_FULL_EA_INFORMATION ||
req->FileInfoClass == FILE_ALL_INFORMATION))
@@ -561,7 +559,7 @@ int smb2_allocate_rsp_buf(struct ksmbd_work *work)
*/
int smb2_check_user_session(struct ksmbd_work *work)
{
- struct smb2_hdr *req_hdr = work->request_buf;
+ struct smb2_hdr *req_hdr = smb2_get_msg(work->request_buf);
struct ksmbd_conn *conn = work->conn;
unsigned int cmd = conn->ops->get_cmd_val(work);
unsigned long long sess_id;
@@ -642,7 +640,7 @@ int setup_async_work(struct ksmbd_work *work, void (*fn)(void **), void **arg)
struct ksmbd_conn *conn = work->conn;
int id;
- rsp_hdr = work->response_buf;
+ rsp_hdr = smb2_get_msg(work->response_buf);
rsp_hdr->Flags |= SMB2_FLAGS_ASYNC_COMMAND;
id = ksmbd_acquire_async_msg_id(&conn->async_ida);
@@ -674,7 +672,7 @@ void smb2_send_interim_resp(struct ksmbd_work *work, __le32 status)
{
struct smb2_hdr *rsp_hdr;
- rsp_hdr = work->response_buf;
+ rsp_hdr = smb2_get_msg(work->response_buf);
smb2_set_err_rsp(work);
rsp_hdr->Status = status;
@@ -715,17 +713,17 @@ static int smb2_get_dos_mode(struct kstat *stat, int attribute)
int attr = 0;
if (S_ISDIR(stat->mode)) {
- attr = ATTR_DIRECTORY |
- (attribute & (ATTR_HIDDEN | ATTR_SYSTEM));
+ attr = FILE_ATTRIBUTE_DIRECTORY |
+ (attribute & (FILE_ATTRIBUTE_HIDDEN | FILE_ATTRIBUTE_SYSTEM));
} else {
- attr = (attribute & 0x00005137) | ATTR_ARCHIVE;
- attr &= ~(ATTR_DIRECTORY);
+ attr = (attribute & 0x00005137) | FILE_ATTRIBUTE_ARCHIVE;
+ attr &= ~(FILE_ATTRIBUTE_DIRECTORY);
if (S_ISREG(stat->mode) && (server_conf.share_fake_fscaps &
FILE_SUPPORTS_SPARSE_FILES))
- attr |= ATTR_SPARSE;
+ attr |= FILE_ATTRIBUTE_SPARSE_FILE;
if (smb2_get_reparse_tag_special_file(stat->mode))
- attr |= ATTR_REPARSE;
+ attr |= FILE_ATTRIBUTE_REPARSE_POINT;
}
return attr;
@@ -753,16 +751,16 @@ static void build_encrypt_ctxt(struct smb2_encryption_neg_context *pneg_ctxt,
pneg_ctxt->Ciphers[0] = cipher_type;
}
-static void build_compression_ctxt(struct smb2_compression_ctx *pneg_ctxt,
+static void build_compression_ctxt(struct smb2_compression_capabilities_context *pneg_ctxt,
__le16 comp_algo)
{
pneg_ctxt->ContextType = SMB2_COMPRESSION_CAPABILITIES;
pneg_ctxt->DataLength =
- cpu_to_le16(sizeof(struct smb2_compression_ctx)
+ cpu_to_le16(sizeof(struct smb2_compression_capabilities_context)
- sizeof(struct smb2_neg_context));
pneg_ctxt->Reserved = cpu_to_le32(0);
pneg_ctxt->CompressionAlgorithmCount = cpu_to_le16(1);
- pneg_ctxt->Reserved1 = cpu_to_le32(0);
+ pneg_ctxt->Flags = cpu_to_le32(0);
pneg_ctxt->CompressionAlgorithms[0] = comp_algo;
}
@@ -802,11 +800,11 @@ static void build_posix_ctxt(struct smb2_posix_neg_context *pneg_ctxt)
}
static void assemble_neg_contexts(struct ksmbd_conn *conn,
- struct smb2_negotiate_rsp *rsp)
+ struct smb2_negotiate_rsp *rsp,
+ void *smb2_buf_len)
{
- /* +4 is to account for the RFC1001 len field */
char *pneg_ctxt = (char *)rsp +
- le32_to_cpu(rsp->NegotiateContextOffset) + 4;
+ le32_to_cpu(rsp->NegotiateContextOffset);
int neg_ctxt_cnt = 1;
int ctxt_size;
@@ -815,7 +813,7 @@ static void assemble_neg_contexts(struct ksmbd_conn *conn,
build_preauth_ctxt((struct smb2_preauth_neg_context *)pneg_ctxt,
conn->preauth_info->Preauth_HashId);
rsp->NegotiateContextCount = cpu_to_le16(neg_ctxt_cnt);
- inc_rfc1001_len(rsp, AUTH_GSS_PADDING);
+ inc_rfc1001_len(smb2_buf_len, AUTH_GSS_PADDING);
ctxt_size = sizeof(struct smb2_preauth_neg_context);
/* Round to 8 byte boundary */
pneg_ctxt += round_up(sizeof(struct smb2_preauth_neg_context), 8);
@@ -839,12 +837,12 @@ static void assemble_neg_contexts(struct ksmbd_conn *conn,
ksmbd_debug(SMB,
"assemble SMB2_COMPRESSION_CAPABILITIES context\n");
/* Temporarily set to SMB3_COMPRESS_NONE */
- build_compression_ctxt((struct smb2_compression_ctx *)pneg_ctxt,
+ build_compression_ctxt((struct smb2_compression_capabilities_context *)pneg_ctxt,
conn->compress_algorithm);
rsp->NegotiateContextCount = cpu_to_le16(++neg_ctxt_cnt);
- ctxt_size += sizeof(struct smb2_compression_ctx) + 2;
+ ctxt_size += sizeof(struct smb2_compression_capabilities_context) + 2;
/* Round to 8 byte boundary */
- pneg_ctxt += round_up(sizeof(struct smb2_compression_ctx) + 2,
+ pneg_ctxt += round_up(sizeof(struct smb2_compression_capabilities_context) + 2,
8);
}
@@ -869,7 +867,7 @@ static void assemble_neg_contexts(struct ksmbd_conn *conn,
ctxt_size += sizeof(struct smb2_signing_capabilities) + 2;
}
- inc_rfc1001_len(rsp, ctxt_size);
+ inc_rfc1001_len(smb2_buf_len, ctxt_size);
}
static __le32 decode_preauth_ctxt(struct ksmbd_conn *conn,
@@ -918,7 +916,7 @@ static void decode_encrypt_ctxt(struct ksmbd_conn *conn,
}
static void decode_compress_ctxt(struct ksmbd_conn *conn,
- struct smb2_compression_ctx *pneg_ctxt)
+ struct smb2_compression_capabilities_context *pneg_ctxt)
{
conn->compress_algorithm = SMB3_COMPRESS_NONE;
}
@@ -939,8 +937,8 @@ static void decode_sign_cap_ctxt(struct ksmbd_conn *conn,
}
for (i = 0; i < sign_algo_cnt; i++) {
- if (pneg_ctxt->SigningAlgorithms[i] == SIGNING_ALG_HMAC_SHA256 ||
- pneg_ctxt->SigningAlgorithms[i] == SIGNING_ALG_AES_CMAC) {
+ if (pneg_ctxt->SigningAlgorithms[i] == SIGNING_ALG_HMAC_SHA256_LE ||
+ pneg_ctxt->SigningAlgorithms[i] == SIGNING_ALG_AES_CMAC_LE) {
ksmbd_debug(SMB, "Signing Algorithm ID = 0x%x\n",
pneg_ctxt->SigningAlgorithms[i]);
conn->signing_negotiated = true;
@@ -952,14 +950,14 @@ static void decode_sign_cap_ctxt(struct ksmbd_conn *conn,
}
static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
- struct smb2_negotiate_req *req)
+ struct smb2_negotiate_req *req,
+ int len_of_smb)
{
/* +4 is to account for the RFC1001 len field */
- struct smb2_neg_context *pctx = (struct smb2_neg_context *)((char *)req + 4);
+ struct smb2_neg_context *pctx = (struct smb2_neg_context *)req;
int i = 0, len_of_ctxts;
int offset = le32_to_cpu(req->NegotiateContextOffset);
int neg_ctxt_cnt = le16_to_cpu(req->NegotiateContextCount);
- int len_of_smb = be32_to_cpu(req->hdr.smb2_buf_length);
__le32 status = STATUS_INVALID_PARAMETER;
ksmbd_debug(SMB, "decoding %d negotiate contexts\n", neg_ctxt_cnt);
@@ -1011,7 +1009,7 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
break;
decode_compress_ctxt(conn,
- (struct smb2_compression_ctx *)pctx);
+ (struct smb2_compression_capabilities_context *)pctx);
} else if (pctx->ContextType == SMB2_NETNAME_NEGOTIATE_CONTEXT_ID) {
ksmbd_debug(SMB,
"deassemble SMB2_NETNAME_NEGOTIATE_CONTEXT_ID context\n");
@@ -1044,8 +1042,8 @@ static __le32 deassemble_neg_contexts(struct ksmbd_conn *conn,
int smb2_handle_negotiate(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_negotiate_req *req = work->request_buf;
- struct smb2_negotiate_rsp *rsp = work->response_buf;
+ struct smb2_negotiate_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_negotiate_rsp *rsp = smb2_get_msg(work->response_buf);
int rc = 0;
unsigned int smb2_buf_len, smb2_neg_size;
__le32 status;
@@ -1066,7 +1064,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
}
smb2_buf_len = get_rfc1002_len(work->request_buf);
- smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects) - 4;
+ smb2_neg_size = offsetof(struct smb2_negotiate_req, Dialects);
if (smb2_neg_size > smb2_buf_len) {
rsp->hdr.Status = STATUS_INVALID_PARAMETER;
rc = -EINVAL;
@@ -1115,7 +1113,8 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
goto err_out;
}
- status = deassemble_neg_contexts(conn, req);
+ status = deassemble_neg_contexts(conn, req,
+ get_rfc1002_len(work->request_buf));
if (status != STATUS_SUCCESS) {
pr_err("deassemble_neg_contexts error(0x%x)\n",
status);
@@ -1135,7 +1134,7 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
conn->preauth_info->Preauth_HashValue);
rsp->NegotiateContextOffset =
cpu_to_le32(OFFSET_OF_NEG_CONTEXT);
- assemble_neg_contexts(conn, rsp);
+ assemble_neg_contexts(conn, rsp, work->response_buf);
break;
case SMB302_PROT_ID:
init_smb3_02_server(conn);
@@ -1183,10 +1182,9 @@ int smb2_handle_negotiate(struct ksmbd_work *work)
rsp->SecurityBufferOffset = cpu_to_le16(128);
rsp->SecurityBufferLength = cpu_to_le16(AUTH_GSS_LENGTH);
- ksmbd_copy_gss_neg_header(((char *)(&rsp->hdr) +
- sizeof(rsp->hdr.smb2_buf_length)) +
- le16_to_cpu(rsp->SecurityBufferOffset));
- inc_rfc1001_len(rsp, sizeof(struct smb2_negotiate_rsp) -
+ ksmbd_copy_gss_neg_header((char *)(&rsp->hdr) +
+ le16_to_cpu(rsp->SecurityBufferOffset));
+ inc_rfc1001_len(work->response_buf, sizeof(struct smb2_negotiate_rsp) -
sizeof(struct smb2_hdr) - sizeof(rsp->Buffer) +
AUTH_GSS_LENGTH);
rsp->SecurityMode = SMB2_NEGOTIATE_SIGNING_ENABLED_LE;
@@ -1278,7 +1276,7 @@ static int ntlm_negotiate(struct ksmbd_work *work,
struct negotiate_message *negblob,
size_t negblob_len)
{
- struct smb2_sess_setup_rsp *rsp = work->response_buf;
+ struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
struct challenge_message *chgblob;
unsigned char *spnego_blob = NULL;
u16 spnego_blob_len;
@@ -1386,8 +1384,8 @@ static struct ksmbd_user *session_user(struct ksmbd_conn *conn,
static int ntlm_authenticate(struct ksmbd_work *work)
{
- struct smb2_sess_setup_req *req = work->request_buf;
- struct smb2_sess_setup_rsp *rsp = work->response_buf;
+ struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_conn *conn = work->conn;
struct ksmbd_session *sess = work->sess;
struct channel *chann = NULL;
@@ -1410,7 +1408,7 @@ static int ntlm_authenticate(struct ksmbd_work *work)
memcpy((char *)&rsp->hdr.ProtocolId + sz, spnego_blob, spnego_blob_len);
rsp->SecurityBufferLength = cpu_to_le16(spnego_blob_len);
kfree(spnego_blob);
- inc_rfc1001_len(rsp, spnego_blob_len - 1);
+ inc_rfc1001_len(work->response_buf, spnego_blob_len - 1);
}
user = session_user(conn, req);
@@ -1522,8 +1520,8 @@ binding_session:
#ifdef CONFIG_SMB_SERVER_KERBEROS5
static int krb5_authenticate(struct ksmbd_work *work)
{
- struct smb2_sess_setup_req *req = work->request_buf;
- struct smb2_sess_setup_rsp *rsp = work->response_buf;
+ struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_conn *conn = work->conn;
struct ksmbd_session *sess = work->sess;
char *in_blob, *out_blob;
@@ -1538,8 +1536,7 @@ static int krb5_authenticate(struct ksmbd_work *work)
out_blob = (char *)&rsp->hdr.ProtocolId +
le16_to_cpu(rsp->SecurityBufferOffset);
out_len = work->response_sz -
- offsetof(struct smb2_hdr, smb2_buf_length) -
- le16_to_cpu(rsp->SecurityBufferOffset);
+ (le16_to_cpu(rsp->SecurityBufferOffset) + 4);
/* Check previous session */
prev_sess_id = le64_to_cpu(req->PreviousSessionId);
@@ -1556,7 +1553,7 @@ static int krb5_authenticate(struct ksmbd_work *work)
return -EINVAL;
}
rsp->SecurityBufferLength = cpu_to_le16(out_len);
- inc_rfc1001_len(rsp, out_len - 1);
+ inc_rfc1001_len(work->response_buf, out_len - 1);
if ((conn->sign || server_conf.enforced_signing) ||
(req->SecurityMode & SMB2_NEGOTIATE_SIGNING_REQUIRED))
@@ -1612,8 +1609,8 @@ static int krb5_authenticate(struct ksmbd_work *work)
int smb2_sess_setup(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_sess_setup_req *req = work->request_buf;
- struct smb2_sess_setup_rsp *rsp = work->response_buf;
+ struct smb2_sess_setup_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_sess_setup_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_session *sess;
struct negotiate_message *negblob;
unsigned int negblob_len, negblob_off;
@@ -1625,7 +1622,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
rsp->SessionFlags = 0;
rsp->SecurityBufferOffset = cpu_to_le16(72);
rsp->SecurityBufferLength = 0;
- inc_rfc1001_len(rsp, 9);
+ inc_rfc1001_len(work->response_buf, 9);
if (!req->hdr.SessionId) {
sess = ksmbd_smb2_session_create();
@@ -1699,7 +1696,7 @@ int smb2_sess_setup(struct ksmbd_work *work)
negblob_off = le16_to_cpu(req->SecurityBufferOffset);
negblob_len = le16_to_cpu(req->SecurityBufferLength);
- if (negblob_off < (offsetof(struct smb2_sess_setup_req, Buffer) - 4) ||
+ if (negblob_off < offsetof(struct smb2_sess_setup_req, Buffer) ||
negblob_len < offsetof(struct negotiate_message, NegotiateFlags))
return -EINVAL;
@@ -1739,7 +1736,8 @@ int smb2_sess_setup(struct ksmbd_work *work)
* Note: here total size -1 is done as an
* adjustment for 0 size blob
*/
- inc_rfc1001_len(rsp, le16_to_cpu(rsp->SecurityBufferLength) - 1);
+ inc_rfc1001_len(work->response_buf,
+ le16_to_cpu(rsp->SecurityBufferLength) - 1);
} else if (negblob->MessageType == NtLmAuthenticate) {
rc = ntlm_authenticate(work);
@@ -1828,8 +1826,8 @@ out_err:
int smb2_tree_connect(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_tree_connect_req *req = work->request_buf;
- struct smb2_tree_connect_rsp *rsp = work->response_buf;
+ struct smb2_tree_connect_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_tree_connect_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_session *sess = work->sess;
char *treename = NULL, *name = NULL;
struct ksmbd_tree_conn_status status;
@@ -1894,7 +1892,7 @@ out_err1:
rsp->Reserved = 0;
/* default manual caching */
rsp->ShareFlags = SMB2_SHAREFLAG_MANUAL_CACHING;
- inc_rfc1001_len(rsp, 16);
+ inc_rfc1001_len(work->response_buf, 16);
if (!IS_ERR(treename))
kfree(treename);
@@ -1999,17 +1997,18 @@ static int smb2_create_open_flags(bool file_present, __le32 access,
*/
int smb2_tree_disconnect(struct ksmbd_work *work)
{
- struct smb2_tree_disconnect_rsp *rsp = work->response_buf;
+ struct smb2_tree_disconnect_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_session *sess = work->sess;
struct ksmbd_tree_connect *tcon = work->tcon;
rsp->StructureSize = cpu_to_le16(4);
- inc_rfc1001_len(rsp, 4);
+ inc_rfc1001_len(work->response_buf, 4);
ksmbd_debug(SMB, "request\n");
if (!tcon) {
- struct smb2_tree_disconnect_req *req = work->request_buf;
+ struct smb2_tree_disconnect_req *req =
+ smb2_get_msg(work->request_buf);
ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
@@ -2031,11 +2030,11 @@ int smb2_tree_disconnect(struct ksmbd_work *work)
int smb2_session_logoff(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_logoff_rsp *rsp = work->response_buf;
+ struct smb2_logoff_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_session *sess = work->sess;
rsp->StructureSize = cpu_to_le16(4);
- inc_rfc1001_len(rsp, 4);
+ inc_rfc1001_len(work->response_buf, 4);
ksmbd_debug(SMB, "request\n");
@@ -2048,7 +2047,7 @@ int smb2_session_logoff(struct ksmbd_work *work)
ksmbd_conn_wait_idle(conn);
if (ksmbd_tree_conn_session_logoff(sess)) {
- struct smb2_logoff_req *req = work->request_buf;
+ struct smb2_logoff_req *req = smb2_get_msg(work->request_buf);
ksmbd_debug(SMB, "Invalid tid %d\n", req->hdr.Id.SyncId.TreeId);
rsp->hdr.Status = STATUS_NETWORK_NAME_DELETED;
@@ -2075,8 +2074,8 @@ int smb2_session_logoff(struct ksmbd_work *work)
*/
static noinline int create_smb2_pipe(struct ksmbd_work *work)
{
- struct smb2_create_rsp *rsp = work->response_buf;
- struct smb2_create_req *req = work->request_buf;
+ struct smb2_create_rsp *rsp = smb2_get_msg(work->response_buf);
+ struct smb2_create_req *req = smb2_get_msg(work->request_buf);
int id;
int err;
char *name;
@@ -2099,7 +2098,7 @@ static noinline int create_smb2_pipe(struct ksmbd_work *work)
rsp->hdr.Status = STATUS_SUCCESS;
rsp->StructureSize = cpu_to_le16(89);
rsp->OplockLevel = SMB2_OPLOCK_LEVEL_NONE;
- rsp->Reserved = 0;
+ rsp->Flags = 0;
rsp->CreateAction = cpu_to_le32(FILE_OPENED);
rsp->CreationTime = cpu_to_le64(0);
@@ -2107,14 +2106,14 @@ static noinline int create_smb2_pipe(struct ksmbd_work *work)
rsp->ChangeTime = cpu_to_le64(0);
rsp->AllocationSize = cpu_to_le64(0);
rsp->EndofFile = cpu_to_le64(0);
- rsp->FileAttributes = ATTR_NORMAL_LE;
+ rsp->FileAttributes = FILE_ATTRIBUTE_NORMAL_LE;
rsp->Reserved2 = 0;
rsp->VolatileFileId = cpu_to_le64(id);
rsp->PersistentFileId = 0;
rsp->CreateContextsOffset = 0;
rsp->CreateContextsLength = 0;
- inc_rfc1001_len(rsp, 88); /* StructureSize - 1*/
+ inc_rfc1001_len(work->response_buf, 88); /* StructureSize - 1*/
kfree(name);
return 0;
@@ -2353,7 +2352,7 @@ static void smb2_update_xattrs(struct ksmbd_tree_connect *tcon,
struct xattr_dos_attrib da;
int rc;
- fp->f_ci->m_fattr &= ~(ATTR_HIDDEN_LE | ATTR_SYSTEM_LE);
+ fp->f_ci->m_fattr &= ~(FILE_ATTRIBUTE_HIDDEN_LE | FILE_ATTRIBUTE_SYSTEM_LE);
/* get FileAttributes from XATTR_NAME_DOS_ATTRIBUTE */
if (!test_share_config_flag(tcon->share_conf,
@@ -2463,7 +2462,7 @@ int smb2_open(struct ksmbd_work *work)
struct ksmbd_session *sess = work->sess;
struct ksmbd_tree_connect *tcon = work->tcon;
struct smb2_create_req *req;
- struct smb2_create_rsp *rsp, *rsp_org;
+ struct smb2_create_rsp *rsp;
struct path path;
struct ksmbd_share_config *share = tcon->share_conf;
struct ksmbd_file *fp = NULL;
@@ -2489,7 +2488,6 @@ int smb2_open(struct ksmbd_work *work)
umode_t posix_mode = 0;
__le32 daccess, maximal_access = 0;
- rsp_org = work->response_buf;
WORK_BUFFERS(work, req, rsp);
if (req->hdr.NextCommand && !work->next_smb2_rcv_hdr_off &&
@@ -2559,7 +2557,7 @@ int smb2_open(struct ksmbd_work *work)
if (req_op_level == SMB2_OPLOCK_LEVEL_LEASE)
lc = parse_lease_state(req);
- if (le32_to_cpu(req->ImpersonationLevel) > le32_to_cpu(IL_DELEGATE_LE)) {
+ if (le32_to_cpu(req->ImpersonationLevel) > le32_to_cpu(IL_DELEGATE)) {
pr_err("Invalid impersonationlevel : 0x%x\n",
le32_to_cpu(req->ImpersonationLevel));
rc = -EIO;
@@ -2567,7 +2565,7 @@ int smb2_open(struct ksmbd_work *work)
goto err_out1;
}
- if (req->CreateOptions && !(req->CreateOptions & CREATE_OPTIONS_MASK)) {
+ if (req->CreateOptions && !(req->CreateOptions & CREATE_OPTIONS_MASK_LE)) {
pr_err("Invalid create options : 0x%x\n",
le32_to_cpu(req->CreateOptions));
rc = -EINVAL;
@@ -2609,7 +2607,7 @@ int smb2_open(struct ksmbd_work *work)
goto err_out1;
}
- if (req->FileAttributes && !(req->FileAttributes & ATTR_MASK_LE)) {
+ if (req->FileAttributes && !(req->FileAttributes & FILE_ATTRIBUTE_MASK_LE)) {
pr_err("Invalid file attribute : 0x%x\n",
le32_to_cpu(req->FileAttributes));
rc = -EINVAL;
@@ -2740,7 +2738,7 @@ int smb2_open(struct ksmbd_work *work)
}
if (req->CreateOptions & FILE_DIRECTORY_FILE_LE &&
- req->FileAttributes & ATTR_NORMAL_LE) {
+ req->FileAttributes & FILE_ATTRIBUTE_NORMAL_LE) {
rsp->hdr.Status = STATUS_NOT_A_DIRECTORY;
rc = -EIO;
}
@@ -3119,7 +3117,7 @@ int smb2_open(struct ksmbd_work *work)
opinfo = rcu_dereference(fp->f_opinfo);
rsp->OplockLevel = opinfo != NULL ? opinfo->level : 0;
rcu_read_unlock();
- rsp->Reserved = 0;
+ rsp->Flags = 0;
rsp->CreateAction = cpu_to_le32(file_info);
rsp->CreationTime = cpu_to_le64(fp->create_time);
time = ksmbd_UnixTimeToNT(stat.atime);
@@ -3140,7 +3138,7 @@ int smb2_open(struct ksmbd_work *work)
rsp->CreateContextsOffset = 0;
rsp->CreateContextsLength = 0;
- inc_rfc1001_len(rsp_org, 88); /* StructureSize - 1*/
+ inc_rfc1001_len(work->response_buf, 88); /* StructureSize - 1*/
/* If lease is request send lease context response */
if (opinfo && opinfo->is_lease) {
@@ -3155,7 +3153,8 @@ int smb2_open(struct ksmbd_work *work)
create_lease_buf(rsp->Buffer, opinfo->o_lease);
le32_add_cpu(&rsp->CreateContextsLength,
conn->vals->create_lease_size);
- inc_rfc1001_len(rsp_org, conn->vals->create_lease_size);
+ inc_rfc1001_len(work->response_buf,
+ conn->vals->create_lease_size);
next_ptr = &lease_ccontext->Next;
next_off = conn->vals->create_lease_size;
}
@@ -3175,7 +3174,8 @@ int smb2_open(struct ksmbd_work *work)
le32_to_cpu(maximal_access));
le32_add_cpu(&rsp->CreateContextsLength,
conn->vals->create_mxac_size);
- inc_rfc1001_len(rsp_org, conn->vals->create_mxac_size);
+ inc_rfc1001_len(work->response_buf,
+ conn->vals->create_mxac_size);
if (next_ptr)
*next_ptr = cpu_to_le32(next_off);
next_ptr = &mxac_ccontext->Next;
@@ -3193,7 +3193,8 @@ int smb2_open(struct ksmbd_work *work)
stat.ino, tcon->id);
le32_add_cpu(&rsp->CreateContextsLength,
conn->vals->create_disk_id_size);
- inc_rfc1001_len(rsp_org, conn->vals->create_disk_id_size);
+ inc_rfc1001_len(work->response_buf,
+ conn->vals->create_disk_id_size);
if (next_ptr)
*next_ptr = cpu_to_le32(next_off);
next_ptr = &disk_id_ccontext->Next;
@@ -3207,15 +3208,15 @@ int smb2_open(struct ksmbd_work *work)
fp);
le32_add_cpu(&rsp->CreateContextsLength,
conn->vals->create_posix_size);
- inc_rfc1001_len(rsp_org, conn->vals->create_posix_size);
+ inc_rfc1001_len(work->response_buf,
+ conn->vals->create_posix_size);
if (next_ptr)
*next_ptr = cpu_to_le32(next_off);
}
if (contxt_cnt > 0) {
rsp->CreateContextsOffset =
- cpu_to_le32(offsetof(struct smb2_create_rsp, Buffer)
- - 4);
+ cpu_to_le32(offsetof(struct smb2_create_rsp, Buffer));
}
err_out:
@@ -3422,9 +3423,9 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
ffdinfo->EaSize =
smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
if (ffdinfo->EaSize)
- ffdinfo->ExtFileAttributes = ATTR_REPARSE_POINT_LE;
+ ffdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
if (d_info->hide_dot_file && d_info->name[0] == '.')
- ffdinfo->ExtFileAttributes |= ATTR_HIDDEN_LE;
+ ffdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
memcpy(ffdinfo->FileName, conv_name, conv_len);
ffdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
break;
@@ -3438,11 +3439,11 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
fbdinfo->EaSize =
smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
if (fbdinfo->EaSize)
- fbdinfo->ExtFileAttributes = ATTR_REPARSE_POINT_LE;
+ fbdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
fbdinfo->ShortNameLength = 0;
fbdinfo->Reserved = 0;
if (d_info->hide_dot_file && d_info->name[0] == '.')
- fbdinfo->ExtFileAttributes |= ATTR_HIDDEN_LE;
+ fbdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
memcpy(fbdinfo->FileName, conv_name, conv_len);
fbdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
break;
@@ -3454,7 +3455,7 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
fdinfo = (struct file_directory_info *)kstat;
fdinfo->FileNameLength = cpu_to_le32(conv_len);
if (d_info->hide_dot_file && d_info->name[0] == '.')
- fdinfo->ExtFileAttributes |= ATTR_HIDDEN_LE;
+ fdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
memcpy(fdinfo->FileName, conv_name, conv_len);
fdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
break;
@@ -3478,11 +3479,11 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
dinfo->EaSize =
smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
if (dinfo->EaSize)
- dinfo->ExtFileAttributes = ATTR_REPARSE_POINT_LE;
+ dinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
dinfo->Reserved = 0;
dinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
if (d_info->hide_dot_file && d_info->name[0] == '.')
- dinfo->ExtFileAttributes |= ATTR_HIDDEN_LE;
+ dinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
memcpy(dinfo->FileName, conv_name, conv_len);
dinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
break;
@@ -3496,13 +3497,13 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
fibdinfo->EaSize =
smb2_get_reparse_tag_special_file(ksmbd_kstat->kstat->mode);
if (fibdinfo->EaSize)
- fibdinfo->ExtFileAttributes = ATTR_REPARSE_POINT_LE;
+ fibdinfo->ExtFileAttributes = FILE_ATTRIBUTE_REPARSE_POINT_LE;
fibdinfo->UniqueId = cpu_to_le64(ksmbd_kstat->kstat->ino);
fibdinfo->ShortNameLength = 0;
fibdinfo->Reserved = 0;
fibdinfo->Reserved2 = cpu_to_le16(0);
if (d_info->hide_dot_file && d_info->name[0] == '.')
- fibdinfo->ExtFileAttributes |= ATTR_HIDDEN_LE;
+ fibdinfo->ExtFileAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
memcpy(fibdinfo->FileName, conv_name, conv_len);
fibdinfo->NextEntryOffset = cpu_to_le32(next_entry_offset);
break;
@@ -3528,9 +3529,10 @@ static int smb2_populate_readdir_entry(struct ksmbd_conn *conn, int info_level,
posix_info->Mode = cpu_to_le32(ksmbd_kstat->kstat->mode);
posix_info->Inode = cpu_to_le64(ksmbd_kstat->kstat->ino);
posix_info->DosAttributes =
- S_ISDIR(ksmbd_kstat->kstat->mode) ? ATTR_DIRECTORY_LE : ATTR_ARCHIVE_LE;
+ S_ISDIR(ksmbd_kstat->kstat->mode) ?
+ FILE_ATTRIBUTE_DIRECTORY_LE : FILE_ATTRIBUTE_ARCHIVE_LE;
if (d_info->hide_dot_file && d_info->name[0] == '.')
- posix_info->DosAttributes |= ATTR_HIDDEN_LE;
+ posix_info->DosAttributes |= FILE_ATTRIBUTE_HIDDEN_LE;
id_to_sid(from_kuid_munged(&init_user_ns, ksmbd_kstat->kstat->uid),
SIDNFS_USER, (struct smb_sid *)&posix_info->SidBuffer[0]);
id_to_sid(from_kgid_munged(&init_user_ns, ksmbd_kstat->kstat->gid),
@@ -3816,7 +3818,7 @@ int smb2_query_dir(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
struct smb2_query_directory_req *req;
- struct smb2_query_directory_rsp *rsp, *rsp_org;
+ struct smb2_query_directory_rsp *rsp;
struct ksmbd_share_config *share = work->tcon->share_conf;
struct ksmbd_file *dir_fp = NULL;
struct ksmbd_dir_info d_info;
@@ -3826,7 +3828,6 @@ int smb2_query_dir(struct ksmbd_work *work)
int buffer_sz;
struct smb2_query_dir_private query_dir_private = {NULL, };
- rsp_org = work->response_buf;
WORK_BUFFERS(work, req, rsp);
if (ksmbd_override_fsids(work)) {
@@ -3947,7 +3948,7 @@ int smb2_query_dir(struct ksmbd_work *work)
rsp->OutputBufferOffset = cpu_to_le16(0);
rsp->OutputBufferLength = cpu_to_le32(0);
rsp->Buffer[0] = 0;
- inc_rfc1001_len(rsp_org, 9);
+ inc_rfc1001_len(work->response_buf, 9);
} else {
((struct file_directory_info *)
((char *)rsp->Buffer + d_info.last_entry_offset))
@@ -3956,7 +3957,7 @@ int smb2_query_dir(struct ksmbd_work *work)
rsp->StructureSize = cpu_to_le16(9);
rsp->OutputBufferOffset = cpu_to_le16(72);
rsp->OutputBufferLength = cpu_to_le32(d_info.data_count);
- inc_rfc1001_len(rsp_org, 8 + d_info.data_count);
+ inc_rfc1001_len(work->response_buf, 8 + d_info.data_count);
}
kfree(srch_ptr);
@@ -3999,26 +4000,28 @@ err_out2:
* Return: 0 on success, otherwise error
*/
static int buffer_check_err(int reqOutputBufferLength,
- struct smb2_query_info_rsp *rsp, int infoclass_size)
+ struct smb2_query_info_rsp *rsp,
+ void *rsp_org, int infoclass_size)
{
if (reqOutputBufferLength < le32_to_cpu(rsp->OutputBufferLength)) {
if (reqOutputBufferLength < infoclass_size) {
pr_err("Invalid Buffer Size Requested\n");
rsp->hdr.Status = STATUS_INFO_LENGTH_MISMATCH;
- rsp->hdr.smb2_buf_length = cpu_to_be32(sizeof(struct smb2_hdr) - 4);
+ *(__be32 *)rsp_org = cpu_to_be32(sizeof(struct smb2_hdr));
return -EINVAL;
}
ksmbd_debug(SMB, "Buffer Overflow\n");
rsp->hdr.Status = STATUS_BUFFER_OVERFLOW;
- rsp->hdr.smb2_buf_length = cpu_to_be32(sizeof(struct smb2_hdr) - 4 +
+ *(__be32 *)rsp_org = cpu_to_be32(sizeof(struct smb2_hdr) +
reqOutputBufferLength);
rsp->OutputBufferLength = cpu_to_le32(reqOutputBufferLength);
}
return 0;
}
-static void get_standard_info_pipe(struct smb2_query_info_rsp *rsp)
+static void get_standard_info_pipe(struct smb2_query_info_rsp *rsp,
+ void *rsp_org)
{
struct smb2_file_standard_info *sinfo;
@@ -4031,10 +4034,11 @@ static void get_standard_info_pipe(struct smb2_query_info_rsp *rsp)
sinfo->Directory = 0;
rsp->OutputBufferLength =
cpu_to_le32(sizeof(struct smb2_file_standard_info));
- inc_rfc1001_len(rsp, sizeof(struct smb2_file_standard_info));
+ inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_standard_info));
}
-static void get_internal_info_pipe(struct smb2_query_info_rsp *rsp, u64 num)
+static void get_internal_info_pipe(struct smb2_query_info_rsp *rsp, u64 num,
+ void *rsp_org)
{
struct smb2_file_internal_info *file_info;
@@ -4044,12 +4048,13 @@ static void get_internal_info_pipe(struct smb2_query_info_rsp *rsp, u64 num)
file_info->IndexNumber = cpu_to_le64(num | (1ULL << 63));
rsp->OutputBufferLength =
cpu_to_le32(sizeof(struct smb2_file_internal_info));
- inc_rfc1001_len(rsp, sizeof(struct smb2_file_internal_info));
+ inc_rfc1001_len(rsp_org, sizeof(struct smb2_file_internal_info));
}
static int smb2_get_info_file_pipe(struct ksmbd_session *sess,
struct smb2_query_info_req *req,
- struct smb2_query_info_rsp *rsp)
+ struct smb2_query_info_rsp *rsp,
+ void *rsp_org)
{
u64 id;
int rc;
@@ -4067,14 +4072,16 @@ static int smb2_get_info_file_pipe(struct ksmbd_session *sess,
switch (req->FileInfoClass) {
case FILE_STANDARD_INFORMATION:
- get_standard_info_pipe(rsp);
+ get_standard_info_pipe(rsp, rsp_org);
rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
- rsp, FILE_STANDARD_INFORMATION_SIZE);
+ rsp, rsp_org,
+ FILE_STANDARD_INFORMATION_SIZE);
break;
case FILE_INTERNAL_INFORMATION:
- get_internal_info_pipe(rsp, id);
+ get_internal_info_pipe(rsp, id, rsp_org);
rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
- rsp, FILE_INTERNAL_INFORMATION_SIZE);
+ rsp, rsp_org,
+ FILE_INTERNAL_INFORMATION_SIZE);
break;
default:
ksmbd_debug(SMB, "smb2_info_file_pipe for %u not supported\n",
@@ -4688,7 +4695,7 @@ static int find_file_posix_info(struct smb2_query_info_rsp *rsp,
static int smb2_get_info_file(struct ksmbd_work *work,
struct smb2_query_info_req *req,
- struct smb2_query_info_rsp *rsp, void *rsp_org)
+ struct smb2_query_info_rsp *rsp)
{
struct ksmbd_file *fp;
int fileinfoclass = 0;
@@ -4699,7 +4706,8 @@ static int smb2_get_info_file(struct ksmbd_work *work,
if (test_share_config_flag(work->tcon->share_conf,
KSMBD_SHARE_FLAG_PIPE)) {
/* smb2 info file called for pipe */
- return smb2_get_info_file_pipe(work->sess, req, rsp);
+ return smb2_get_info_file_pipe(work->sess, req, rsp,
+ work->response_buf);
}
if (work->next_smb2_rcv_hdr_off) {
@@ -4724,77 +4732,77 @@ static int smb2_get_info_file(struct ksmbd_work *work,
switch (fileinfoclass) {
case FILE_ACCESS_INFORMATION:
- get_file_access_info(rsp, fp, rsp_org);
+ get_file_access_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_ACCESS_INFORMATION_SIZE;
break;
case FILE_BASIC_INFORMATION:
- rc = get_file_basic_info(rsp, fp, rsp_org);
+ rc = get_file_basic_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_BASIC_INFORMATION_SIZE;
break;
case FILE_STANDARD_INFORMATION:
- get_file_standard_info(rsp, fp, rsp_org);
+ get_file_standard_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_STANDARD_INFORMATION_SIZE;
break;
case FILE_ALIGNMENT_INFORMATION:
- get_file_alignment_info(rsp, rsp_org);
+ get_file_alignment_info(rsp, work->response_buf);
file_infoclass_size = FILE_ALIGNMENT_INFORMATION_SIZE;
break;
case FILE_ALL_INFORMATION:
- rc = get_file_all_info(work, rsp, fp, rsp_org);
+ rc = get_file_all_info(work, rsp, fp, work->response_buf);
file_infoclass_size = FILE_ALL_INFORMATION_SIZE;
break;
case FILE_ALTERNATE_NAME_INFORMATION:
- get_file_alternate_info(work, rsp, fp, rsp_org);
+ get_file_alternate_info(work, rsp, fp, work->response_buf);
file_infoclass_size = FILE_ALTERNATE_NAME_INFORMATION_SIZE;
break;
case FILE_STREAM_INFORMATION:
- get_file_stream_info(work, rsp, fp, rsp_org);
+ get_file_stream_info(work, rsp, fp, work->response_buf);
file_infoclass_size = FILE_STREAM_INFORMATION_SIZE;
break;
case FILE_INTERNAL_INFORMATION:
- get_file_internal_info(rsp, fp, rsp_org);
+ get_file_internal_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_INTERNAL_INFORMATION_SIZE;
break;
case FILE_NETWORK_OPEN_INFORMATION:
- rc = get_file_network_open_info(rsp, fp, rsp_org);
+ rc = get_file_network_open_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_NETWORK_OPEN_INFORMATION_SIZE;
break;
case FILE_EA_INFORMATION:
- get_file_ea_info(rsp, rsp_org);
+ get_file_ea_info(rsp, work->response_buf);
file_infoclass_size = FILE_EA_INFORMATION_SIZE;
break;
case FILE_FULL_EA_INFORMATION:
- rc = smb2_get_ea(work, fp, req, rsp, rsp_org);
+ rc = smb2_get_ea(work, fp, req, rsp, work->response_buf);
file_infoclass_size = FILE_FULL_EA_INFORMATION_SIZE;
break;
case FILE_POSITION_INFORMATION:
- get_file_position_info(rsp, fp, rsp_org);
+ get_file_position_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_POSITION_INFORMATION_SIZE;
break;
case FILE_MODE_INFORMATION:
- get_file_mode_info(rsp, fp, rsp_org);
+ get_file_mode_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_MODE_INFORMATION_SIZE;
break;
case FILE_COMPRESSION_INFORMATION:
- get_file_compression_info(rsp, fp, rsp_org);
+ get_file_compression_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_COMPRESSION_INFORMATION_SIZE;
break;
case FILE_ATTRIBUTE_TAG_INFORMATION:
- rc = get_file_attribute_tag_info(rsp, fp, rsp_org);
+ rc = get_file_attribute_tag_info(rsp, fp, work->response_buf);
file_infoclass_size = FILE_ATTRIBUTE_TAG_INFORMATION_SIZE;
break;
case SMB_FIND_FILE_POSIX_INFO:
@@ -4802,7 +4810,7 @@ static int smb2_get_info_file(struct ksmbd_work *work,
pr_err("client doesn't negotiate with SMB3.1.1 POSIX Extensions\n");
rc = -EOPNOTSUPP;
} else {
- rc = find_file_posix_info(rsp, fp, rsp_org);
+ rc = find_file_posix_info(rsp, fp, work->response_buf);
file_infoclass_size = sizeof(struct smb311_posix_qinfo);
}
break;
@@ -4813,7 +4821,7 @@ static int smb2_get_info_file(struct ksmbd_work *work,
}
if (!rc)
rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
- rsp,
+ rsp, work->response_buf,
file_infoclass_size);
ksmbd_fd_put(work, fp);
return rc;
@@ -4821,7 +4829,7 @@ static int smb2_get_info_file(struct ksmbd_work *work,
static int smb2_get_info_filesystem(struct ksmbd_work *work,
struct smb2_query_info_req *req,
- struct smb2_query_info_rsp *rsp, void *rsp_org)
+ struct smb2_query_info_rsp *rsp)
{
struct ksmbd_session *sess = work->sess;
struct ksmbd_conn *conn = sess->conn;
@@ -4857,7 +4865,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->DeviceType = cpu_to_le32(stfs.f_type);
info->DeviceCharacteristics = cpu_to_le32(0x00000020);
rsp->OutputBufferLength = cpu_to_le32(8);
- inc_rfc1001_len(rsp_org, 8);
+ inc_rfc1001_len(work->response_buf, 8);
fs_infoclass_size = FS_DEVICE_INFORMATION_SIZE;
break;
}
@@ -4883,7 +4891,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->FileSystemNameLen = cpu_to_le32(len);
sz = sizeof(struct filesystem_attribute_info) - 2 + len;
rsp->OutputBufferLength = cpu_to_le32(sz);
- inc_rfc1001_len(rsp_org, sz);
+ inc_rfc1001_len(work->response_buf, sz);
fs_infoclass_size = FS_ATTRIBUTE_INFORMATION_SIZE;
break;
}
@@ -4891,11 +4899,18 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
{
struct filesystem_vol_info *info;
size_t sz;
+ unsigned int serial_crc = 0;
info = (struct filesystem_vol_info *)(rsp->Buffer);
info->VolumeCreationTime = 0;
+ serial_crc = crc32_le(serial_crc, share->name,
+ strlen(share->name));
+ serial_crc = crc32_le(serial_crc, share->path,
+ strlen(share->path));
+ serial_crc = crc32_le(serial_crc, ksmbd_netbios_name(),
+ strlen(ksmbd_netbios_name()));
/* Taking dummy value of serial number*/
- info->SerialNumber = cpu_to_le32(0xbc3ac512);
+ info->SerialNumber = cpu_to_le32(serial_crc);
len = smbConvertToUTF16((__le16 *)info->VolumeLabel,
share->name, PATH_MAX,
conn->local_nls, 0);
@@ -4904,7 +4919,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->Reserved = 0;
sz = sizeof(struct filesystem_vol_info) - 2 + len;
rsp->OutputBufferLength = cpu_to_le32(sz);
- inc_rfc1001_len(rsp_org, sz);
+ inc_rfc1001_len(work->response_buf, sz);
fs_infoclass_size = FS_VOLUME_INFORMATION_SIZE;
break;
}
@@ -4918,7 +4933,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->SectorsPerAllocationUnit = cpu_to_le32(1);
info->BytesPerSector = cpu_to_le32(stfs.f_bsize);
rsp->OutputBufferLength = cpu_to_le32(24);
- inc_rfc1001_len(rsp_org, 24);
+ inc_rfc1001_len(work->response_buf, 24);
fs_infoclass_size = FS_SIZE_INFORMATION_SIZE;
break;
}
@@ -4935,7 +4950,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->SectorsPerAllocationUnit = cpu_to_le32(1);
info->BytesPerSector = cpu_to_le32(stfs.f_bsize);
rsp->OutputBufferLength = cpu_to_le32(32);
- inc_rfc1001_len(rsp_org, 32);
+ inc_rfc1001_len(work->response_buf, 32);
fs_infoclass_size = FS_FULL_SIZE_INFORMATION_SIZE;
break;
}
@@ -4956,7 +4971,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->extended_info.rel_date = 0;
memcpy(info->extended_info.version_string, "1.1.0", strlen("1.1.0"));
rsp->OutputBufferLength = cpu_to_le32(64);
- inc_rfc1001_len(rsp_org, 64);
+ inc_rfc1001_len(work->response_buf, 64);
fs_infoclass_size = FS_OBJECT_ID_INFORMATION_SIZE;
break;
}
@@ -4977,7 +4992,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->ByteOffsetForSectorAlignment = 0;
info->ByteOffsetForPartitionAlignment = 0;
rsp->OutputBufferLength = cpu_to_le32(28);
- inc_rfc1001_len(rsp_org, 28);
+ inc_rfc1001_len(work->response_buf, 28);
fs_infoclass_size = FS_SECTOR_SIZE_INFORMATION_SIZE;
break;
}
@@ -4999,7 +5014,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->DefaultQuotaLimit = cpu_to_le64(SMB2_NO_FID);
info->Padding = 0;
rsp->OutputBufferLength = cpu_to_le32(48);
- inc_rfc1001_len(rsp_org, 48);
+ inc_rfc1001_len(work->response_buf, 48);
fs_infoclass_size = FS_CONTROL_INFORMATION_SIZE;
break;
}
@@ -5020,7 +5035,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
info->TotalFileNodes = cpu_to_le64(stfs.f_files);
info->FreeFileNodes = cpu_to_le64(stfs.f_ffree);
rsp->OutputBufferLength = cpu_to_le32(56);
- inc_rfc1001_len(rsp_org, 56);
+ inc_rfc1001_len(work->response_buf, 56);
fs_infoclass_size = FS_POSIX_INFORMATION_SIZE;
}
break;
@@ -5030,7 +5045,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
return -EOPNOTSUPP;
}
rc = buffer_check_err(le32_to_cpu(req->OutputBufferLength),
- rsp,
+ rsp, work->response_buf,
fs_infoclass_size);
path_put(&path);
return rc;
@@ -5038,7 +5053,7 @@ static int smb2_get_info_filesystem(struct ksmbd_work *work,
static int smb2_get_info_sec(struct ksmbd_work *work,
struct smb2_query_info_req *req,
- struct smb2_query_info_rsp *rsp, void *rsp_org)
+ struct smb2_query_info_rsp *rsp)
{
struct ksmbd_file *fp;
struct user_namespace *user_ns;
@@ -5065,7 +5080,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
secdesclen = sizeof(struct smb_ntsd);
rsp->OutputBufferLength = cpu_to_le32(secdesclen);
- inc_rfc1001_len(rsp_org, secdesclen);
+ inc_rfc1001_len(work->response_buf, secdesclen);
return 0;
}
@@ -5107,7 +5122,7 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
return rc;
rsp->OutputBufferLength = cpu_to_le32(secdesclen);
- inc_rfc1001_len(rsp_org, secdesclen);
+ inc_rfc1001_len(work->response_buf, secdesclen);
return 0;
}
@@ -5120,10 +5135,9 @@ static int smb2_get_info_sec(struct ksmbd_work *work,
int smb2_query_info(struct ksmbd_work *work)
{
struct smb2_query_info_req *req;
- struct smb2_query_info_rsp *rsp, *rsp_org;
+ struct smb2_query_info_rsp *rsp;
int rc = 0;
- rsp_org = work->response_buf;
WORK_BUFFERS(work, req, rsp);
ksmbd_debug(SMB, "GOT query info request\n");
@@ -5131,15 +5145,15 @@ int smb2_query_info(struct ksmbd_work *work)
switch (req->InfoType) {
case SMB2_O_INFO_FILE:
ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILE\n");
- rc = smb2_get_info_file(work, req, rsp, (void *)rsp_org);
+ rc = smb2_get_info_file(work, req, rsp);
break;
case SMB2_O_INFO_FILESYSTEM:
ksmbd_debug(SMB, "GOT SMB2_O_INFO_FILESYSTEM\n");
- rc = smb2_get_info_filesystem(work, req, rsp, (void *)rsp_org);
+ rc = smb2_get_info_filesystem(work, req, rsp);
break;
case SMB2_O_INFO_SECURITY:
ksmbd_debug(SMB, "GOT SMB2_O_INFO_SECURITY\n");
- rc = smb2_get_info_sec(work, req, rsp, (void *)rsp_org);
+ rc = smb2_get_info_sec(work, req, rsp);
break;
default:
ksmbd_debug(SMB, "InfoType %d not supported yet\n",
@@ -5164,7 +5178,7 @@ int smb2_query_info(struct ksmbd_work *work)
}
rsp->StructureSize = cpu_to_le16(9);
rsp->OutputBufferOffset = cpu_to_le16(72);
- inc_rfc1001_len(rsp_org, 8);
+ inc_rfc1001_len(work->response_buf, 8);
return 0;
}
@@ -5177,8 +5191,8 @@ int smb2_query_info(struct ksmbd_work *work)
static noinline int smb2_close_pipe(struct ksmbd_work *work)
{
u64 id;
- struct smb2_close_req *req = work->request_buf;
- struct smb2_close_rsp *rsp = work->response_buf;
+ struct smb2_close_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_close_rsp *rsp = smb2_get_msg(work->response_buf);
id = le64_to_cpu(req->VolatileFileId);
ksmbd_session_rpc_close(work->sess, id);
@@ -5193,7 +5207,7 @@ static noinline int smb2_close_pipe(struct ksmbd_work *work)
rsp->AllocationSize = 0;
rsp->EndOfFile = 0;
rsp->Attributes = 0;
- inc_rfc1001_len(rsp, 60);
+ inc_rfc1001_len(work->response_buf, 60);
return 0;
}
@@ -5209,14 +5223,12 @@ int smb2_close(struct ksmbd_work *work)
u64 sess_id;
struct smb2_close_req *req;
struct smb2_close_rsp *rsp;
- struct smb2_close_rsp *rsp_org;
struct ksmbd_conn *conn = work->conn;
struct ksmbd_file *fp;
struct inode *inode;
u64 time;
int err = 0;
- rsp_org = work->response_buf;
WORK_BUFFERS(work, req, rsp);
if (test_share_config_flag(work->tcon->share_conf,
@@ -5306,7 +5318,7 @@ out:
rsp->hdr.Status = STATUS_FILE_CLOSED;
smb2_set_err_rsp(work);
} else {
- inc_rfc1001_len(rsp_org, 60);
+ inc_rfc1001_len(work->response_buf, 60);
}
return 0;
@@ -5320,11 +5332,11 @@ out:
*/
int smb2_echo(struct ksmbd_work *work)
{
- struct smb2_echo_rsp *rsp = work->response_buf;
+ struct smb2_echo_rsp *rsp = smb2_get_msg(work->response_buf);
rsp->StructureSize = cpu_to_le16(4);
rsp->Reserved = 0;
- inc_rfc1001_len(rsp, 4);
+ inc_rfc1001_len(work->response_buf, 4);
return 0;
}
@@ -5566,14 +5578,14 @@ static int set_file_basic_info(struct ksmbd_file *fp,
if (file_info->Attributes) {
if (!S_ISDIR(inode->i_mode) &&
- file_info->Attributes & ATTR_DIRECTORY_LE) {
+ file_info->Attributes & FILE_ATTRIBUTE_DIRECTORY_LE) {
pr_err("can't change a file to a directory\n");
return -EINVAL;
}
- if (!(S_ISDIR(inode->i_mode) && file_info->Attributes == ATTR_NORMAL_LE))
+ if (!(S_ISDIR(inode->i_mode) && file_info->Attributes == FILE_ATTRIBUTE_NORMAL_LE))
fp->f_ci->m_fattr = file_info->Attributes |
- (fp->f_ci->m_fattr & ATTR_DIRECTORY_LE);
+ (fp->f_ci->m_fattr & FILE_ATTRIBUTE_DIRECTORY_LE);
}
if (test_share_config_flag(share, KSMBD_SHARE_FLAG_STORE_DOS_ATTRS) &&
@@ -5794,9 +5806,7 @@ static int set_file_mode_info(struct ksmbd_file *fp,
mode = file_info->Mode;
- if ((mode & ~FILE_MODE_INFO_MASK) ||
- (mode & FILE_SYNCHRONOUS_IO_ALERT_LE &&
- mode & FILE_SYNCHRONOUS_IO_NONALERT_LE)) {
+ if ((mode & ~FILE_MODE_INFO_MASK)) {
pr_err("Mode is not valid : 0x%x\n", le32_to_cpu(mode));
return -EINVAL;
}
@@ -5943,14 +5953,13 @@ static int smb2_set_info_sec(struct ksmbd_file *fp, int addition_info,
int smb2_set_info(struct ksmbd_work *work)
{
struct smb2_set_info_req *req;
- struct smb2_set_info_rsp *rsp, *rsp_org;
+ struct smb2_set_info_rsp *rsp;
struct ksmbd_file *fp;
int rc = 0;
unsigned int id = KSMBD_NO_FID, pid = KSMBD_NO_FID;
ksmbd_debug(SMB, "Received set info request\n");
- rsp_org = work->response_buf;
if (work->next_smb2_rcv_hdr_off) {
req = ksmbd_req_buf_next(work);
rsp = ksmbd_resp_buf_next(work);
@@ -5961,8 +5970,8 @@ int smb2_set_info(struct ksmbd_work *work)
pid = work->compound_pfid;
}
} else {
- req = work->request_buf;
- rsp = work->response_buf;
+ req = smb2_get_msg(work->request_buf);
+ rsp = smb2_get_msg(work->response_buf);
}
if (!has_file_id(id)) {
@@ -6002,7 +6011,7 @@ int smb2_set_info(struct ksmbd_work *work)
goto err_out;
rsp->StructureSize = cpu_to_le16(2);
- inc_rfc1001_len(rsp_org, 2);
+ inc_rfc1001_len(work->response_buf, 2);
ksmbd_fd_put(work, fp);
return 0;
@@ -6042,12 +6051,12 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
int nbytes = 0, err;
u64 id;
struct ksmbd_rpc_command *rpc_resp;
- struct smb2_read_req *req = work->request_buf;
- struct smb2_read_rsp *rsp = work->response_buf;
+ struct smb2_read_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_read_rsp *rsp = smb2_get_msg(work->response_buf);
id = le64_to_cpu(req->VolatileFileId);
- inc_rfc1001_len(rsp, 16);
+ inc_rfc1001_len(work->response_buf, 16);
rpc_resp = ksmbd_rpc_read(work->sess, id);
if (rpc_resp) {
if (rpc_resp->flags != KSMBD_RPC_OK) {
@@ -6066,7 +6075,7 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
rpc_resp->payload_sz);
nbytes = rpc_resp->payload_sz;
- work->resp_hdr_sz = get_rfc1002_len(rsp) + 4;
+ work->resp_hdr_sz = get_rfc1002_len(work->response_buf) + 4;
work->aux_payload_sz = nbytes;
kvfree(rpc_resp);
}
@@ -6076,8 +6085,8 @@ static noinline int smb2_read_pipe(struct ksmbd_work *work)
rsp->Reserved = 0;
rsp->DataLength = cpu_to_le32(nbytes);
rsp->DataRemaining = 0;
- rsp->Reserved2 = 0;
- inc_rfc1001_len(rsp, nbytes);
+ rsp->Flags = 0;
+ inc_rfc1001_len(work->response_buf, nbytes);
return 0;
out:
@@ -6127,14 +6136,13 @@ int smb2_read(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
struct smb2_read_req *req;
- struct smb2_read_rsp *rsp, *rsp_org;
+ struct smb2_read_rsp *rsp;
struct ksmbd_file *fp;
loff_t offset;
size_t length, mincount;
ssize_t nbytes = 0, remain_bytes = 0;
int err = 0;
- rsp_org = work->response_buf;
WORK_BUFFERS(work, req, rsp);
if (test_share_config_flag(work->tcon->share_conf,
@@ -6215,11 +6223,11 @@ int smb2_read(struct ksmbd_work *work)
rsp->Reserved = 0;
rsp->DataLength = cpu_to_le32(nbytes);
rsp->DataRemaining = cpu_to_le32(remain_bytes);
- rsp->Reserved2 = 0;
- inc_rfc1001_len(rsp_org, 16);
- work->resp_hdr_sz = get_rfc1002_len(rsp_org) + 4;
+ rsp->Flags = 0;
+ inc_rfc1001_len(work->response_buf, 16);
+ work->resp_hdr_sz = get_rfc1002_len(work->response_buf) + 4;
work->aux_payload_sz = nbytes;
- inc_rfc1001_len(rsp_org, nbytes);
+ inc_rfc1001_len(work->response_buf, nbytes);
ksmbd_fd_put(work, fp);
return 0;
@@ -6254,8 +6262,8 @@ out:
*/
static noinline int smb2_write_pipe(struct ksmbd_work *work)
{
- struct smb2_write_req *req = work->request_buf;
- struct smb2_write_rsp *rsp = work->response_buf;
+ struct smb2_write_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_write_rsp *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_rpc_command *rpc_resp;
u64 id = 0;
int err = 0, ret = 0;
@@ -6266,13 +6274,14 @@ static noinline int smb2_write_pipe(struct ksmbd_work *work)
id = le64_to_cpu(req->VolatileFileId);
if (le16_to_cpu(req->DataOffset) ==
- (offsetof(struct smb2_write_req, Buffer) - 4)) {
+ offsetof(struct smb2_write_req, Buffer)) {
data_buf = (char *)&req->Buffer[0];
} else {
- if ((u64)le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req)) {
+ if ((u64)le16_to_cpu(req->DataOffset) + length >
+ get_rfc1002_len(work->request_buf)) {
pr_err("invalid write data offset %u, smb_len %u\n",
le16_to_cpu(req->DataOffset),
- get_rfc1002_len(req));
+ get_rfc1002_len(work->request_buf));
err = -EINVAL;
goto out;
}
@@ -6304,7 +6313,7 @@ static noinline int smb2_write_pipe(struct ksmbd_work *work)
rsp->DataLength = cpu_to_le32(length);
rsp->DataRemaining = 0;
rsp->Reserved2 = 0;
- inc_rfc1001_len(rsp, 16);
+ inc_rfc1001_len(work->response_buf, 16);
return 0;
out:
if (err) {
@@ -6372,7 +6381,7 @@ static ssize_t smb2_write_rdma_channel(struct ksmbd_work *work,
int smb2_write(struct ksmbd_work *work)
{
struct smb2_write_req *req;
- struct smb2_write_rsp *rsp, *rsp_org;
+ struct smb2_write_rsp *rsp;
struct ksmbd_file *fp = NULL;
loff_t offset;
size_t length;
@@ -6381,7 +6390,6 @@ int smb2_write(struct ksmbd_work *work)
bool writethrough = false;
int err = 0;
- rsp_org = work->response_buf;
WORK_BUFFERS(work, req, rsp);
if (test_share_config_flag(work->tcon->share_conf, KSMBD_SHARE_FLAG_PIPE)) {
@@ -6424,13 +6432,14 @@ int smb2_write(struct ksmbd_work *work)
if (req->Channel != SMB2_CHANNEL_RDMA_V1 &&
req->Channel != SMB2_CHANNEL_RDMA_V1_INVALIDATE) {
if (le16_to_cpu(req->DataOffset) ==
- (offsetof(struct smb2_write_req, Buffer) - 4)) {
+ offsetof(struct smb2_write_req, Buffer)) {
data_buf = (char *)&req->Buffer[0];
} else {
- if ((u64)le16_to_cpu(req->DataOffset) + length > get_rfc1002_len(req)) {
+ if ((u64)le16_to_cpu(req->DataOffset) + length >
+ get_rfc1002_len(work->request_buf)) {
pr_err("invalid write data offset %u, smb_len %u\n",
le16_to_cpu(req->DataOffset),
- get_rfc1002_len(req));
+ get_rfc1002_len(work->request_buf));
err = -EINVAL;
goto out;
}
@@ -6468,7 +6477,7 @@ int smb2_write(struct ksmbd_work *work)
rsp->DataLength = cpu_to_le32(nbytes);
rsp->DataRemaining = 0;
rsp->Reserved2 = 0;
- inc_rfc1001_len(rsp_org, 16);
+ inc_rfc1001_len(work->response_buf, 16);
ksmbd_fd_put(work, fp);
return 0;
@@ -6502,10 +6511,9 @@ out:
int smb2_flush(struct ksmbd_work *work)
{
struct smb2_flush_req *req;
- struct smb2_flush_rsp *rsp, *rsp_org;
+ struct smb2_flush_rsp *rsp;
int err;
- rsp_org = work->response_buf;
WORK_BUFFERS(work, req, rsp);
ksmbd_debug(SMB, "SMB2_FLUSH called for fid %llu\n",
@@ -6519,7 +6527,7 @@ int smb2_flush(struct ksmbd_work *work)
rsp->StructureSize = cpu_to_le16(4);
rsp->Reserved = 0;
- inc_rfc1001_len(rsp_org, 4);
+ inc_rfc1001_len(work->response_buf, 4);
return 0;
out:
@@ -6540,7 +6548,7 @@ out:
int smb2_cancel(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_hdr *hdr = work->request_buf;
+ struct smb2_hdr *hdr = smb2_get_msg(work->request_buf);
struct smb2_hdr *chdr;
struct ksmbd_work *cancel_work = NULL;
int canceled = 0;
@@ -6555,7 +6563,7 @@ int smb2_cancel(struct ksmbd_work *work)
spin_lock(&conn->request_lock);
list_for_each_entry(cancel_work, command_list,
async_request_entry) {
- chdr = cancel_work->request_buf;
+ chdr = smb2_get_msg(cancel_work->request_buf);
if (cancel_work->async_id !=
le64_to_cpu(hdr->Id.AsyncId))
@@ -6574,7 +6582,7 @@ int smb2_cancel(struct ksmbd_work *work)
spin_lock(&conn->request_lock);
list_for_each_entry(cancel_work, command_list, request_entry) {
- chdr = cancel_work->request_buf;
+ chdr = smb2_get_msg(cancel_work->request_buf);
if (chdr->MessageId != hdr->MessageId ||
cancel_work == work)
@@ -6709,8 +6717,8 @@ static inline bool lock_defer_pending(struct file_lock *fl)
*/
int smb2_lock(struct ksmbd_work *work)
{
- struct smb2_lock_req *req = work->request_buf;
- struct smb2_lock_rsp *rsp = work->response_buf;
+ struct smb2_lock_req *req = smb2_get_msg(work->request_buf);
+ struct smb2_lock_rsp *rsp = smb2_get_msg(work->response_buf);
struct smb2_lock_element *lock_ele;
struct ksmbd_file *fp = NULL;
struct file_lock *flock = NULL;
@@ -7017,7 +7025,7 @@ skip:
ksmbd_debug(SMB, "successful in taking lock\n");
rsp->hdr.Status = STATUS_SUCCESS;
rsp->Reserved = 0;
- inc_rfc1001_len(rsp, 4);
+ inc_rfc1001_len(work->response_buf, 4);
ksmbd_fd_put(work, fp);
return 0;
@@ -7312,7 +7320,7 @@ static int fsctl_validate_negotiate_info(struct ksmbd_conn *conn,
int ret = 0;
int dialect;
- if (in_buf_len < sizeof(struct validate_negotiate_info_req) +
+ if (in_buf_len < offsetof(struct validate_negotiate_info_req, Dialects) +
le16_to_cpu(neg_req->DialectCount) * sizeof(__le16))
return -EINVAL;
@@ -7435,9 +7443,9 @@ static inline int fsctl_set_sparse(struct ksmbd_work *work, u64 id,
old_fattr = fp->f_ci->m_fattr;
if (sparse->SetSparse)
- fp->f_ci->m_fattr |= ATTR_SPARSE_FILE_LE;
+ fp->f_ci->m_fattr |= FILE_ATTRIBUTE_SPARSE_FILE_LE;
else
- fp->f_ci->m_fattr &= ~ATTR_SPARSE_FILE_LE;
+ fp->f_ci->m_fattr &= ~FILE_ATTRIBUTE_SPARSE_FILE_LE;
if (fp->f_ci->m_fattr != old_fattr &&
test_share_config_flag(work->tcon->share_conf,
@@ -7490,13 +7498,12 @@ static int fsctl_request_resume_key(struct ksmbd_work *work,
int smb2_ioctl(struct ksmbd_work *work)
{
struct smb2_ioctl_req *req;
- struct smb2_ioctl_rsp *rsp, *rsp_org;
+ struct smb2_ioctl_rsp *rsp;
unsigned int cnt_code, nbytes = 0, out_buf_len, in_buf_len;
u64 id = KSMBD_NO_FID;
struct ksmbd_conn *conn = work->conn;
int ret = 0;
- rsp_org = work->response_buf;
if (work->next_smb2_rcv_hdr_off) {
req = ksmbd_req_buf_next(work);
rsp = ksmbd_resp_buf_next(work);
@@ -7506,8 +7513,8 @@ int smb2_ioctl(struct ksmbd_work *work)
id = work->compound_fid;
}
} else {
- req = work->request_buf;
- rsp = work->response_buf;
+ req = smb2_get_msg(work->request_buf);
+ rsp = smb2_get_msg(work->response_buf);
}
if (!has_file_id(id))
@@ -7787,7 +7794,7 @@ dup_ext_out:
rsp->Reserved = cpu_to_le16(0);
rsp->Flags = cpu_to_le32(0);
rsp->Reserved2 = cpu_to_le32(0);
- inc_rfc1001_len(rsp_org, 48 + nbytes);
+ inc_rfc1001_len(work->response_buf, 48 + nbytes);
return 0;
@@ -7814,8 +7821,8 @@ out:
*/
static void smb20_oplock_break_ack(struct ksmbd_work *work)
{
- struct smb2_oplock_break *req = work->request_buf;
- struct smb2_oplock_break *rsp = work->response_buf;
+ struct smb2_oplock_break *req = smb2_get_msg(work->request_buf);
+ struct smb2_oplock_break *rsp = smb2_get_msg(work->response_buf);
struct ksmbd_file *fp;
struct oplock_info *opinfo = NULL;
__le32 err = 0;
@@ -7922,7 +7929,7 @@ static void smb20_oplock_break_ack(struct ksmbd_work *work)
rsp->Reserved2 = 0;
rsp->VolatileFid = cpu_to_le64(volatile_id);
rsp->PersistentFid = cpu_to_le64(persistent_id);
- inc_rfc1001_len(rsp, 24);
+ inc_rfc1001_len(work->response_buf, 24);
return;
err_out:
@@ -7958,8 +7965,8 @@ static int check_lease_state(struct lease *lease, __le32 req_state)
static void smb21_lease_break_ack(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_lease_ack *req = work->request_buf;
- struct smb2_lease_ack *rsp = work->response_buf;
+ struct smb2_lease_ack *req = smb2_get_msg(work->request_buf);
+ struct smb2_lease_ack *rsp = smb2_get_msg(work->response_buf);
struct oplock_info *opinfo;
__le32 err = 0;
int ret = 0;
@@ -8071,7 +8078,7 @@ static void smb21_lease_break_ack(struct ksmbd_work *work)
memcpy(rsp->LeaseKey, req->LeaseKey, 16);
rsp->LeaseState = lease_state;
rsp->LeaseDuration = 0;
- inc_rfc1001_len(rsp, 36);
+ inc_rfc1001_len(work->response_buf, 36);
return;
err_out:
@@ -8092,8 +8099,8 @@ err_out:
*/
int smb2_oplock_break(struct ksmbd_work *work)
{
- struct smb2_oplock_break *req = work->request_buf;
- struct smb2_oplock_break *rsp = work->response_buf;
+ struct smb2_oplock_break *req = smb2_get_msg(work->request_buf);
+ struct smb2_oplock_break *rsp = smb2_get_msg(work->response_buf);
switch (le16_to_cpu(req->StructureSize)) {
case OP_BREAK_STRUCT_SIZE_20:
@@ -8120,8 +8127,8 @@ int smb2_oplock_break(struct ksmbd_work *work)
*/
int smb2_notify(struct ksmbd_work *work)
{
- struct smb2_notify_req *req;
- struct smb2_notify_rsp *rsp;
+ struct smb2_change_notify_req *req;
+ struct smb2_change_notify_rsp *rsp;
WORK_BUFFERS(work, req, rsp);
@@ -8145,7 +8152,7 @@ int smb2_notify(struct ksmbd_work *work)
*/
bool smb2_is_sign_req(struct ksmbd_work *work, unsigned int command)
{
- struct smb2_hdr *rcv_hdr2 = work->request_buf;
+ struct smb2_hdr *rcv_hdr2 = smb2_get_msg(work->request_buf);
if ((rcv_hdr2->Flags & SMB2_FLAGS_SIGNED) &&
command != SMB2_NEGOTIATE_HE &&
@@ -8164,22 +8171,22 @@ bool smb2_is_sign_req(struct ksmbd_work *work, unsigned int command)
*/
int smb2_check_sign_req(struct ksmbd_work *work)
{
- struct smb2_hdr *hdr, *hdr_org;
+ struct smb2_hdr *hdr;
char signature_req[SMB2_SIGNATURE_SIZE];
char signature[SMB2_HMACSHA256_SIZE];
struct kvec iov[1];
size_t len;
- hdr_org = hdr = work->request_buf;
+ hdr = smb2_get_msg(work->request_buf);
if (work->next_smb2_rcv_hdr_off)
hdr = ksmbd_req_buf_next(work);
if (!hdr->NextCommand && !work->next_smb2_rcv_hdr_off)
- len = be32_to_cpu(hdr_org->smb2_buf_length);
+ len = get_rfc1002_len(work->request_buf);
else if (hdr->NextCommand)
len = le32_to_cpu(hdr->NextCommand);
else
- len = be32_to_cpu(hdr_org->smb2_buf_length) -
+ len = get_rfc1002_len(work->request_buf) -
work->next_smb2_rcv_hdr_off;
memcpy(signature_req, hdr->Signature, SMB2_SIGNATURE_SIZE);
@@ -8207,25 +8214,26 @@ int smb2_check_sign_req(struct ksmbd_work *work)
*/
void smb2_set_sign_rsp(struct ksmbd_work *work)
{
- struct smb2_hdr *hdr, *hdr_org;
+ struct smb2_hdr *hdr;
struct smb2_hdr *req_hdr;
char signature[SMB2_HMACSHA256_SIZE];
struct kvec iov[2];
size_t len;
int n_vec = 1;
- hdr_org = hdr = work->response_buf;
+ hdr = smb2_get_msg(work->response_buf);
if (work->next_smb2_rsp_hdr_off)
hdr = ksmbd_resp_buf_next(work);
req_hdr = ksmbd_req_buf_next(work);
if (!work->next_smb2_rsp_hdr_off) {
- len = get_rfc1002_len(hdr_org);
+ len = get_rfc1002_len(work->response_buf);
if (req_hdr->NextCommand)
len = ALIGN(len, 8);
} else {
- len = get_rfc1002_len(hdr_org) - work->next_smb2_rsp_hdr_off;
+ len = get_rfc1002_len(work->response_buf) -
+ work->next_smb2_rsp_hdr_off;
len = ALIGN(len, 8);
}
@@ -8261,23 +8269,23 @@ int smb3_check_sign_req(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
char *signing_key;
- struct smb2_hdr *hdr, *hdr_org;
+ struct smb2_hdr *hdr;
struct channel *chann;
char signature_req[SMB2_SIGNATURE_SIZE];
char signature[SMB2_CMACAES_SIZE];
struct kvec iov[1];
size_t len;
- hdr_org = hdr = work->request_buf;
+ hdr = smb2_get_msg(work->request_buf);
if (work->next_smb2_rcv_hdr_off)
hdr = ksmbd_req_buf_next(work);
if (!hdr->NextCommand && !work->next_smb2_rcv_hdr_off)
- len = be32_to_cpu(hdr_org->smb2_buf_length);
+ len = get_rfc1002_len(work->request_buf);
else if (hdr->NextCommand)
len = le32_to_cpu(hdr->NextCommand);
else
- len = be32_to_cpu(hdr_org->smb2_buf_length) -
+ len = get_rfc1002_len(work->request_buf) -
work->next_smb2_rcv_hdr_off;
if (le16_to_cpu(hdr->Command) == SMB2_SESSION_SETUP_HE) {
@@ -8318,8 +8326,7 @@ int smb3_check_sign_req(struct ksmbd_work *work)
void smb3_set_sign_rsp(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_hdr *req_hdr;
- struct smb2_hdr *hdr, *hdr_org;
+ struct smb2_hdr *req_hdr, *hdr;
struct channel *chann;
char signature[SMB2_CMACAES_SIZE];
struct kvec iov[2];
@@ -8327,18 +8334,19 @@ void smb3_set_sign_rsp(struct ksmbd_work *work)
size_t len;
char *signing_key;
- hdr_org = hdr = work->response_buf;
+ hdr = smb2_get_msg(work->response_buf);
if (work->next_smb2_rsp_hdr_off)
hdr = ksmbd_resp_buf_next(work);
req_hdr = ksmbd_req_buf_next(work);
if (!work->next_smb2_rsp_hdr_off) {
- len = get_rfc1002_len(hdr_org);
+ len = get_rfc1002_len(work->response_buf);
if (req_hdr->NextCommand)
len = ALIGN(len, 8);
} else {
- len = get_rfc1002_len(hdr_org) - work->next_smb2_rsp_hdr_off;
+ len = get_rfc1002_len(work->response_buf) -
+ work->next_smb2_rsp_hdr_off;
len = ALIGN(len, 8);
}
@@ -8391,7 +8399,7 @@ void smb3_preauth_hash_rsp(struct ksmbd_work *work)
if (le16_to_cpu(req->Command) == SMB2_NEGOTIATE_HE &&
conn->preauth_info)
- ksmbd_gen_preauth_integrity_hash(conn, (char *)rsp,
+ ksmbd_gen_preauth_integrity_hash(conn, work->response_buf,
conn->preauth_info->Preauth_HashValue);
if (le16_to_cpu(rsp->Command) == SMB2_SESSION_SETUP_HE && sess) {
@@ -8409,35 +8417,34 @@ void smb3_preauth_hash_rsp(struct ksmbd_work *work)
if (!hash_value)
return;
}
- ksmbd_gen_preauth_integrity_hash(conn, (char *)rsp,
+ ksmbd_gen_preauth_integrity_hash(conn, work->response_buf,
hash_value);
}
}
-static void fill_transform_hdr(struct smb2_transform_hdr *tr_hdr, char *old_buf,
- __le16 cipher_type)
+static void fill_transform_hdr(void *tr_buf, char *old_buf, __le16 cipher_type)
{
- struct smb2_hdr *hdr = (struct smb2_hdr *)old_buf;
+ struct smb2_transform_hdr *tr_hdr = tr_buf + 4;
+ struct smb2_hdr *hdr = smb2_get_msg(old_buf);
unsigned int orig_len = get_rfc1002_len(old_buf);
- memset(tr_hdr, 0, sizeof(struct smb2_transform_hdr));
+ memset(tr_buf, 0, sizeof(struct smb2_transform_hdr) + 4);
tr_hdr->ProtocolId = SMB2_TRANSFORM_PROTO_NUM;
tr_hdr->OriginalMessageSize = cpu_to_le32(orig_len);
- tr_hdr->Flags = cpu_to_le16(0x01);
+ tr_hdr->Flags = cpu_to_le16(TRANSFORM_FLAG_ENCRYPTED);
if (cipher_type == SMB2_ENCRYPTION_AES128_GCM ||
cipher_type == SMB2_ENCRYPTION_AES256_GCM)
get_random_bytes(&tr_hdr->Nonce, SMB3_AES_GCM_NONCE);
else
get_random_bytes(&tr_hdr->Nonce, SMB3_AES_CCM_NONCE);
memcpy(&tr_hdr->SessionId, &hdr->SessionId, 8);
- inc_rfc1001_len(tr_hdr, sizeof(struct smb2_transform_hdr) - 4);
- inc_rfc1001_len(tr_hdr, orig_len);
+ inc_rfc1001_len(tr_buf, sizeof(struct smb2_transform_hdr));
+ inc_rfc1001_len(tr_buf, orig_len);
}
int smb3_encrypt_resp(struct ksmbd_work *work)
{
char *buf = work->response_buf;
- struct smb2_transform_hdr *tr_hdr;
struct kvec iov[3];
int rc = -ENOMEM;
int buf_size = 0, rq_nvec = 2 + (work->aux_payload_sz ? 1 : 0);
@@ -8445,15 +8452,15 @@ int smb3_encrypt_resp(struct ksmbd_work *work)
if (ARRAY_SIZE(iov) < rq_nvec)
return -ENOMEM;
- tr_hdr = kzalloc(sizeof(struct smb2_transform_hdr), GFP_KERNEL);
- if (!tr_hdr)
+ work->tr_buf = kzalloc(sizeof(struct smb2_transform_hdr) + 4, GFP_KERNEL);
+ if (!work->tr_buf)
return rc;
/* fill transform header */
- fill_transform_hdr(tr_hdr, buf, work->conn->cipher_type);
+ fill_transform_hdr(work->tr_buf, buf, work->conn->cipher_type);
- iov[0].iov_base = tr_hdr;
- iov[0].iov_len = sizeof(struct smb2_transform_hdr);
+ iov[0].iov_base = work->tr_buf;
+ iov[0].iov_len = sizeof(struct smb2_transform_hdr) + 4;
buf_size += iov[0].iov_len - 4;
iov[1].iov_base = buf + 4;
@@ -8473,15 +8480,14 @@ int smb3_encrypt_resp(struct ksmbd_work *work)
return rc;
memmove(buf, iov[1].iov_base, iov[1].iov_len);
- tr_hdr->smb2_buf_length = cpu_to_be32(buf_size);
- work->tr_buf = tr_hdr;
+ *(__be32 *)work->tr_buf = cpu_to_be32(buf_size);
return rc;
}
bool smb3_is_transform_hdr(void *buf)
{
- struct smb2_transform_hdr *trhdr = buf;
+ struct smb2_transform_hdr *trhdr = smb2_get_msg(buf);
return trhdr->ProtocolId == SMB2_TRANSFORM_PROTO_NUM;
}
@@ -8491,12 +8497,10 @@ int smb3_decrypt_req(struct ksmbd_work *work)
struct ksmbd_conn *conn = work->conn;
struct ksmbd_session *sess;
char *buf = work->request_buf;
- struct smb2_hdr *hdr;
unsigned int pdu_length = get_rfc1002_len(buf);
struct kvec iov[2];
- int buf_data_size = pdu_length + 4 -
- sizeof(struct smb2_transform_hdr);
- struct smb2_transform_hdr *tr_hdr = (struct smb2_transform_hdr *)buf;
+ int buf_data_size = pdu_length - sizeof(struct smb2_transform_hdr);
+ struct smb2_transform_hdr *tr_hdr = smb2_get_msg(buf);
int rc = 0;
if (buf_data_size < sizeof(struct smb2_hdr)) {
@@ -8518,16 +8522,15 @@ int smb3_decrypt_req(struct ksmbd_work *work)
}
iov[0].iov_base = buf;
- iov[0].iov_len = sizeof(struct smb2_transform_hdr);
- iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr);
+ iov[0].iov_len = sizeof(struct smb2_transform_hdr) + 4;
+ iov[1].iov_base = buf + sizeof(struct smb2_transform_hdr) + 4;
iov[1].iov_len = buf_data_size;
rc = ksmbd_crypt_message(conn, iov, 2, 0);
if (rc)
return rc;
memmove(buf + 4, iov[1].iov_base, buf_data_size);
- hdr = (struct smb2_hdr *)buf;
- hdr->smb2_buf_length = cpu_to_be32(buf_data_size);
+ *(__be32 *)buf = cpu_to_be32(buf_data_size);
return rc;
}
@@ -8535,7 +8538,7 @@ int smb3_decrypt_req(struct ksmbd_work *work)
bool smb3_11_final_sess_setup_resp(struct ksmbd_work *work)
{
struct ksmbd_conn *conn = work->conn;
- struct smb2_hdr *rsp = work->response_buf;
+ struct smb2_hdr *rsp = smb2_get_msg(work->response_buf);
if (conn->dialect < SMB30_PROT_ID)
return false;
diff --git a/fs/ksmbd/smb2pdu.h b/fs/ksmbd/smb2pdu.h
index ff5a2f01d34a..4a3e4339d4c4 100644
--- a/fs/ksmbd/smb2pdu.h
+++ b/fs/ksmbd/smb2pdu.h
@@ -10,60 +10,6 @@
#include "ntlmssp.h"
#include "smbacl.h"
-/*
- * Note that, due to trying to use names similar to the protocol specifications,
- * there are many mixed case field names in the structures below. Although
- * this does not match typical Linux kernel style, it is necessary to be
- * able to match against the protocol specfication.
- *
- * SMB2 commands
- * Some commands have minimal (wct=0,bcc=0), or uninteresting, responses
- * (ie no useful data other than the SMB error code itself) and are marked such.
- * Knowing this helps avoid response buffer allocations and copy in some cases.
- */
-
-/* List of commands in host endian */
-#define SMB2_NEGOTIATE_HE 0x0000
-#define SMB2_SESSION_SETUP_HE 0x0001
-#define SMB2_LOGOFF_HE 0x0002 /* trivial request/resp */
-#define SMB2_TREE_CONNECT_HE 0x0003
-#define SMB2_TREE_DISCONNECT_HE 0x0004 /* trivial req/resp */
-#define SMB2_CREATE_HE 0x0005
-#define SMB2_CLOSE_HE 0x0006
-#define SMB2_FLUSH_HE 0x0007 /* trivial resp */
-#define SMB2_READ_HE 0x0008
-#define SMB2_WRITE_HE 0x0009
-#define SMB2_LOCK_HE 0x000A
-#define SMB2_IOCTL_HE 0x000B
-#define SMB2_CANCEL_HE 0x000C
-#define SMB2_ECHO_HE 0x000D
-#define SMB2_QUERY_DIRECTORY_HE 0x000E
-#define SMB2_CHANGE_NOTIFY_HE 0x000F
-#define SMB2_QUERY_INFO_HE 0x0010
-#define SMB2_SET_INFO_HE 0x0011
-#define SMB2_OPLOCK_BREAK_HE 0x0012
-
-/* The same list in little endian */
-#define SMB2_NEGOTIATE cpu_to_le16(SMB2_NEGOTIATE_HE)
-#define SMB2_SESSION_SETUP cpu_to_le16(SMB2_SESSION_SETUP_HE)
-#define SMB2_LOGOFF cpu_to_le16(SMB2_LOGOFF_HE)
-#define SMB2_TREE_CONNECT cpu_to_le16(SMB2_TREE_CONNECT_HE)
-#define SMB2_TREE_DISCONNECT cpu_to_le16(SMB2_TREE_DISCONNECT_HE)
-#define SMB2_CREATE cpu_to_le16(SMB2_CREATE_HE)
-#define SMB2_CLOSE cpu_to_le16(SMB2_CLOSE_HE)
-#define SMB2_FLUSH cpu_to_le16(SMB2_FLUSH_HE)
-#define SMB2_READ cpu_to_le16(SMB2_READ_HE)
-#define SMB2_WRITE cpu_to_le16(SMB2_WRITE_HE)
-#define SMB2_LOCK cpu_to_le16(SMB2_LOCK_HE)
-#define SMB2_IOCTL cpu_to_le16(SMB2_IOCTL_HE)
-#define SMB2_CANCEL cpu_to_le16(SMB2_CANCEL_HE)
-#define SMB2_ECHO cpu_to_le16(SMB2_ECHO_HE)
-#define SMB2_QUERY_DIRECTORY cpu_to_le16(SMB2_QUERY_DIRECTORY_HE)
-#define SMB2_CHANGE_NOTIFY cpu_to_le16(SMB2_CHANGE_NOTIFY_HE)
-#define SMB2_QUERY_INFO cpu_to_le16(SMB2_QUERY_INFO_HE)
-#define SMB2_SET_INFO cpu_to_le16(SMB2_SET_INFO_HE)
-#define SMB2_OPLOCK_BREAK cpu_to_le16(SMB2_OPLOCK_BREAK_HE)
-
/*Create Action Flags*/
#define FILE_SUPERSEDED 0x00000000
#define FILE_OPENED 0x00000001
@@ -96,9 +42,6 @@
/* SMB2 Max Credits */
#define SMB2_MAX_CREDITS 8192
-#define SMB2_CLIENT_GUID_SIZE 16
-#define SMB2_CREATE_GUID_SIZE 16
-
/* Maximum buffer size value we can send with 1 credit */
#define SMB2_MAX_BUFFER_SIZE 65536
@@ -107,9 +50,6 @@
/* BB FIXME - analyze following length BB */
#define MAX_SMB2_HDR_SIZE 0x78 /* 4 len + 64 hdr + (2*24 wct) + 2 bct + 2 pad */
-#define SMB2_PROTO_NUMBER cpu_to_le32(0x424d53fe) /* 'B''M''S' */
-#define SMB2_TRANSFORM_PROTO_NUM cpu_to_le32(0x424d53fd)
-
#define SMB21_DEFAULT_IOSIZE (1024 * 1024)
#define SMB3_DEFAULT_IOSIZE (4 * 1024 * 1024)
#define SMB3_DEFAULT_TRANS_SIZE (1024 * 1024)
@@ -117,78 +57,6 @@
#define SMB3_MAX_IOSIZE (8 * 1024 * 1024)
/*
- * SMB2 Header Definition
- *
- * "MBZ" : Must be Zero
- * "BB" : BugBug, Something to check/review/analyze later
- * "PDU" : "Protocol Data Unit" (ie a network "frame")
- *
- */
-
-#define __SMB2_HEADER_STRUCTURE_SIZE 64
-#define SMB2_HEADER_STRUCTURE_SIZE \
- cpu_to_le16(__SMB2_HEADER_STRUCTURE_SIZE)
-
-struct smb2_hdr {
- __be32 smb2_buf_length; /* big endian on wire */
- /*
- * length is only two or three bytes - with
- * one or two byte type preceding it that MBZ
- */
- __le32 ProtocolId; /* 0xFE 'S' 'M' 'B' */
- __le16 StructureSize; /* 64 */
- __le16 CreditCharge; /* MBZ */
- __le32 Status; /* Error from server */
- __le16 Command;
- __le16 CreditRequest; /* CreditResponse */
- __le32 Flags;
- __le32 NextCommand;
- __le64 MessageId;
- union {
- struct {
- __le32 ProcessId;
- __le32 TreeId;
- } __packed SyncId;
- __le64 AsyncId;
- } __packed Id;
- __le64 SessionId;
- __u8 Signature[16];
-} __packed;
-
-struct smb2_pdu {
- struct smb2_hdr hdr;
- __le16 StructureSize2; /* size of wct area (varies, request specific) */
-} __packed;
-
-#define SMB3_AES_CCM_NONCE 11
-#define SMB3_AES_GCM_NONCE 12
-
-struct smb2_transform_hdr {
- __be32 smb2_buf_length; /* big endian on wire */
- /*
- * length is only two or three bytes - with
- * one or two byte type preceding it that MBZ
- */
- __le32 ProtocolId; /* 0xFD 'S' 'M' 'B' */
- __u8 Signature[16];
- __u8 Nonce[16];
- __le32 OriginalMessageSize;
- __u16 Reserved1;
- __le16 Flags; /* EncryptionAlgorithm */
- __le64 SessionId;
-} __packed;
-
-/*
- * SMB2 flag definitions
- */
-#define SMB2_FLAGS_SERVER_TO_REDIR cpu_to_le32(0x00000001)
-#define SMB2_FLAGS_ASYNC_COMMAND cpu_to_le32(0x00000002)
-#define SMB2_FLAGS_RELATED_OPERATIONS cpu_to_le32(0x00000004)
-#define SMB2_FLAGS_SIGNED cpu_to_le32(0x00000008)
-#define SMB2_FLAGS_DFS_OPERATIONS cpu_to_le32(0x10000000)
-#define SMB2_FLAGS_REPLAY_OPERATIONS cpu_to_le32(0x20000000)
-
-/*
* Definitions for SMB2 Protocol Data Units (network frames)
*
* See MS-SMB2.PDF specification for protocol details.
@@ -209,425 +77,30 @@ struct smb2_err_rsp {
__u8 ErrorData[1]; /* variable length */
} __packed;
-struct smb2_negotiate_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 36 */
- __le16 DialectCount;
- __le16 SecurityMode;
- __le16 Reserved; /* MBZ */
- __le32 Capabilities;
- __u8 ClientGUID[SMB2_CLIENT_GUID_SIZE];
- /* In SMB3.02 and earlier next three were MBZ le64 ClientStartTime */
- __le32 NegotiateContextOffset; /* SMB3.1.1 only. MBZ earlier */
- __le16 NegotiateContextCount; /* SMB3.1.1 only. MBZ earlier */
- __le16 Reserved2;
- __le16 Dialects[1]; /* One dialect (vers=) at a time for now */
-} __packed;
-
-/* SecurityMode flags */
-#define SMB2_NEGOTIATE_SIGNING_ENABLED_LE cpu_to_le16(0x0001)
-#define SMB2_NEGOTIATE_SIGNING_REQUIRED 0x0002
-#define SMB2_NEGOTIATE_SIGNING_REQUIRED_LE cpu_to_le16(0x0002)
-/* Capabilities flags */
-#define SMB2_GLOBAL_CAP_DFS 0x00000001
-#define SMB2_GLOBAL_CAP_LEASING 0x00000002 /* Resp only New to SMB2.1 */
-#define SMB2_GLOBAL_CAP_LARGE_MTU 0X00000004 /* Resp only New to SMB2.1 */
-#define SMB2_GLOBAL_CAP_MULTI_CHANNEL 0x00000008 /* New to SMB3 */
-#define SMB2_GLOBAL_CAP_PERSISTENT_HANDLES 0x00000010 /* New to SMB3 */
-#define SMB2_GLOBAL_CAP_DIRECTORY_LEASING 0x00000020 /* New to SMB3 */
-#define SMB2_GLOBAL_CAP_ENCRYPTION 0x00000040 /* New to SMB3 */
-/* Internal types */
-#define SMB2_NT_FIND 0x00100000
-#define SMB2_LARGE_FILES 0x00200000
-
-#define SMB311_SALT_SIZE 32
-/* Hash Algorithm Types */
-#define SMB2_PREAUTH_INTEGRITY_SHA512 cpu_to_le16(0x0001)
-
-#define PREAUTH_HASHVALUE_SIZE 64
-
struct preauth_integrity_info {
/* PreAuth integrity Hash ID */
__le16 Preauth_HashId;
/* PreAuth integrity Hash Value */
- __u8 Preauth_HashValue[PREAUTH_HASHVALUE_SIZE];
+ __u8 Preauth_HashValue[SMB2_PREAUTH_HASH_SIZE];
};
-/* offset is sizeof smb2_negotiate_rsp - 4 but rounded up to 8 bytes. */
+/* offset is sizeof smb2_negotiate_rsp but rounded up to 8 bytes. */
#ifdef CONFIG_SMB_SERVER_KERBEROS5
-/* sizeof(struct smb2_negotiate_rsp) - 4 =
+/* sizeof(struct smb2_negotiate_rsp) =
* header(64) + response(64) + GSS_LENGTH(96) + GSS_PADDING(0)
*/
#define OFFSET_OF_NEG_CONTEXT 0xe0
#else
-/* sizeof(struct smb2_negotiate_rsp) - 4 =
+/* sizeof(struct smb2_negotiate_rsp) =
* header(64) + response(64) + GSS_LENGTH(74) + GSS_PADDING(6)
*/
#define OFFSET_OF_NEG_CONTEXT 0xd0
#endif
-#define SMB2_PREAUTH_INTEGRITY_CAPABILITIES cpu_to_le16(1)
-#define SMB2_ENCRYPTION_CAPABILITIES cpu_to_le16(2)
-#define SMB2_COMPRESSION_CAPABILITIES cpu_to_le16(3)
-#define SMB2_NETNAME_NEGOTIATE_CONTEXT_ID cpu_to_le16(5)
-#define SMB2_SIGNING_CAPABILITIES cpu_to_le16(8)
-#define SMB2_POSIX_EXTENSIONS_AVAILABLE cpu_to_le16(0x100)
-
-struct smb2_neg_context {
- __le16 ContextType;
- __le16 DataLength;
- __le32 Reserved;
- /* Followed by array of data */
-} __packed;
-
-struct smb2_preauth_neg_context {
- __le16 ContextType; /* 1 */
- __le16 DataLength;
- __le32 Reserved;
- __le16 HashAlgorithmCount; /* 1 */
- __le16 SaltLength;
- __le16 HashAlgorithms; /* HashAlgorithms[0] since only one defined */
- __u8 Salt[SMB311_SALT_SIZE];
-} __packed;
-
-/* Encryption Algorithms Ciphers */
-#define SMB2_ENCRYPTION_AES128_CCM cpu_to_le16(0x0001)
-#define SMB2_ENCRYPTION_AES128_GCM cpu_to_le16(0x0002)
-#define SMB2_ENCRYPTION_AES256_CCM cpu_to_le16(0x0003)
-#define SMB2_ENCRYPTION_AES256_GCM cpu_to_le16(0x0004)
-
-struct smb2_encryption_neg_context {
- __le16 ContextType; /* 2 */
- __le16 DataLength;
- __le32 Reserved;
- /* CipherCount usally 2, but can be 3 when AES256-GCM enabled */
- __le16 CipherCount; /* AES-128-GCM and AES-128-CCM by default */
- __le16 Ciphers[];
-} __packed;
-
-#define SMB3_COMPRESS_NONE cpu_to_le16(0x0000)
-#define SMB3_COMPRESS_LZNT1 cpu_to_le16(0x0001)
-#define SMB3_COMPRESS_LZ77 cpu_to_le16(0x0002)
-#define SMB3_COMPRESS_LZ77_HUFF cpu_to_le16(0x0003)
-
-struct smb2_compression_ctx {
- __le16 ContextType; /* 3 */
- __le16 DataLength;
- __le32 Reserved;
- __le16 CompressionAlgorithmCount;
- __u16 Padding;
- __le32 Reserved1;
- __le16 CompressionAlgorithms[];
-} __packed;
-
-#define POSIX_CTXT_DATA_LEN 16
-struct smb2_posix_neg_context {
- __le16 ContextType; /* 0x100 */
- __le16 DataLength;
- __le32 Reserved;
- __u8 Name[16]; /* POSIX ctxt GUID 93AD25509CB411E7B42383DE968BCD7C */
-} __packed;
-
-struct smb2_netname_neg_context {
- __le16 ContextType; /* 0x100 */
- __le16 DataLength;
- __le32 Reserved;
- __le16 NetName[]; /* hostname of target converted to UCS-2 */
-} __packed;
-
-/* Signing algorithms */
-#define SIGNING_ALG_HMAC_SHA256 cpu_to_le16(0)
-#define SIGNING_ALG_AES_CMAC cpu_to_le16(1)
-#define SIGNING_ALG_AES_GMAC cpu_to_le16(2)
-
-struct smb2_signing_capabilities {
- __le16 ContextType; /* 8 */
- __le16 DataLength;
- __le32 Reserved;
- __le16 SigningAlgorithmCount;
- __le16 SigningAlgorithms[];
-} __packed;
-
-struct smb2_negotiate_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 65 */
- __le16 SecurityMode;
- __le16 DialectRevision;
- __le16 NegotiateContextCount; /* Prior to SMB3.1.1 was Reserved & MBZ */
- __u8 ServerGUID[16];
- __le32 Capabilities;
- __le32 MaxTransactSize;
- __le32 MaxReadSize;
- __le32 MaxWriteSize;
- __le64 SystemTime; /* MBZ */
- __le64 ServerStartTime;
- __le16 SecurityBufferOffset;
- __le16 SecurityBufferLength;
- __le32 NegotiateContextOffset; /* Pre:SMB3.1.1 was reserved/ignored */
- __u8 Buffer[1]; /* variable length GSS security buffer */
-} __packed;
-
-/* Flags */
-#define SMB2_SESSION_REQ_FLAG_BINDING 0x01
-#define SMB2_SESSION_REQ_FLAG_ENCRYPT_DATA 0x04
-
#define SMB2_SESSION_EXPIRED (0)
#define SMB2_SESSION_IN_PROGRESS BIT(0)
#define SMB2_SESSION_VALID BIT(1)
-/* Flags */
-#define SMB2_SESSION_REQ_FLAG_BINDING 0x01
-#define SMB2_SESSION_REQ_FLAG_ENCRYPT_DATA 0x04
-
-struct smb2_sess_setup_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 25 */
- __u8 Flags;
- __u8 SecurityMode;
- __le32 Capabilities;
- __le32 Channel;
- __le16 SecurityBufferOffset;
- __le16 SecurityBufferLength;
- __le64 PreviousSessionId;
- __u8 Buffer[1]; /* variable length GSS security buffer */
-} __packed;
-
-/* Flags/Reserved for SMB3.1.1 */
-#define SMB2_SHAREFLAG_CLUSTER_RECONNECT 0x0001
-
-/* Currently defined SessionFlags */
-#define SMB2_SESSION_FLAG_IS_GUEST_LE cpu_to_le16(0x0001)
-#define SMB2_SESSION_FLAG_IS_NULL_LE cpu_to_le16(0x0002)
-#define SMB2_SESSION_FLAG_ENCRYPT_DATA_LE cpu_to_le16(0x0004)
-struct smb2_sess_setup_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 SessionFlags;
- __le16 SecurityBufferOffset;
- __le16 SecurityBufferLength;
- __u8 Buffer[1]; /* variable length GSS security buffer */
-} __packed;
-
-struct smb2_logoff_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __le16 Reserved;
-} __packed;
-
-struct smb2_logoff_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __le16 Reserved;
-} __packed;
-
-struct smb2_tree_connect_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 Reserved; /* Flags in SMB3.1.1 */
- __le16 PathOffset;
- __le16 PathLength;
- __u8 Buffer[1]; /* variable length */
-} __packed;
-
-struct smb2_tree_connect_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 16 */
- __u8 ShareType; /* see below */
- __u8 Reserved;
- __le32 ShareFlags; /* see below */
- __le32 Capabilities; /* see below */
- __le32 MaximalAccess;
-} __packed;
-
-/* Possible ShareType values */
-#define SMB2_SHARE_TYPE_DISK 0x01
-#define SMB2_SHARE_TYPE_PIPE 0x02
-#define SMB2_SHARE_TYPE_PRINT 0x03
-
-/*
- * Possible ShareFlags - exactly one and only one of the first 4 caching flags
- * must be set (any of the remaining, SHI1005, flags may be set individually
- * or in combination.
- */
-#define SMB2_SHAREFLAG_MANUAL_CACHING 0x00000000
-#define SMB2_SHAREFLAG_AUTO_CACHING 0x00000010
-#define SMB2_SHAREFLAG_VDO_CACHING 0x00000020
-#define SMB2_SHAREFLAG_NO_CACHING 0x00000030
-#define SHI1005_FLAGS_DFS 0x00000001
-#define SHI1005_FLAGS_DFS_ROOT 0x00000002
-#define SHI1005_FLAGS_RESTRICT_EXCLUSIVE_OPENS 0x00000100
-#define SHI1005_FLAGS_FORCE_SHARED_DELETE 0x00000200
-#define SHI1005_FLAGS_ALLOW_NAMESPACE_CACHING 0x00000400
-#define SHI1005_FLAGS_ACCESS_BASED_DIRECTORY_ENUM 0x00000800
-#define SHI1005_FLAGS_FORCE_LEVELII_OPLOCK 0x00001000
-#define SHI1005_FLAGS_ENABLE_HASH 0x00002000
-
-/* Possible share capabilities */
-#define SMB2_SHARE_CAP_DFS cpu_to_le32(0x00000008)
-
-struct smb2_tree_disconnect_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __le16 Reserved;
-} __packed;
-
-struct smb2_tree_disconnect_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 4 */
- __le16 Reserved;
-} __packed;
-
-#define ATTR_READONLY_LE cpu_to_le32(ATTR_READONLY)
-#define ATTR_HIDDEN_LE cpu_to_le32(ATTR_HIDDEN)
-#define ATTR_SYSTEM_LE cpu_to_le32(ATTR_SYSTEM)
-#define ATTR_DIRECTORY_LE cpu_to_le32(ATTR_DIRECTORY)
-#define ATTR_ARCHIVE_LE cpu_to_le32(ATTR_ARCHIVE)
-#define ATTR_NORMAL_LE cpu_to_le32(ATTR_NORMAL)
-#define ATTR_TEMPORARY_LE cpu_to_le32(ATTR_TEMPORARY)
-#define ATTR_SPARSE_FILE_LE cpu_to_le32(ATTR_SPARSE)
-#define ATTR_REPARSE_POINT_LE cpu_to_le32(ATTR_REPARSE)
-#define ATTR_COMPRESSED_LE cpu_to_le32(ATTR_COMPRESSED)
-#define ATTR_OFFLINE_LE cpu_to_le32(ATTR_OFFLINE)
-#define ATTR_NOT_CONTENT_INDEXED_LE cpu_to_le32(ATTR_NOT_CONTENT_INDEXED)
-#define ATTR_ENCRYPTED_LE cpu_to_le32(ATTR_ENCRYPTED)
-#define ATTR_INTEGRITY_STREAML_LE cpu_to_le32(0x00008000)
-#define ATTR_NO_SCRUB_DATA_LE cpu_to_le32(0x00020000)
-#define ATTR_MASK_LE cpu_to_le32(0x00007FB7)
-
-/* Oplock levels */
-#define SMB2_OPLOCK_LEVEL_NONE 0x00
-#define SMB2_OPLOCK_LEVEL_II 0x01
-#define SMB2_OPLOCK_LEVEL_EXCLUSIVE 0x08
-#define SMB2_OPLOCK_LEVEL_BATCH 0x09
-#define SMB2_OPLOCK_LEVEL_LEASE 0xFF
-/* Non-spec internal type */
-#define SMB2_OPLOCK_LEVEL_NOCHANGE 0x99
-
-/* Desired Access Flags */
-#define FILE_READ_DATA_LE cpu_to_le32(0x00000001)
-#define FILE_LIST_DIRECTORY_LE cpu_to_le32(0x00000001)
-#define FILE_WRITE_DATA_LE cpu_to_le32(0x00000002)
-#define FILE_ADD_FILE_LE cpu_to_le32(0x00000002)
-#define FILE_APPEND_DATA_LE cpu_to_le32(0x00000004)
-#define FILE_ADD_SUBDIRECTORY_LE cpu_to_le32(0x00000004)
-#define FILE_READ_EA_LE cpu_to_le32(0x00000008)
-#define FILE_WRITE_EA_LE cpu_to_le32(0x00000010)
-#define FILE_EXECUTE_LE cpu_to_le32(0x00000020)
-#define FILE_TRAVERSE_LE cpu_to_le32(0x00000020)
-#define FILE_DELETE_CHILD_LE cpu_to_le32(0x00000040)
-#define FILE_READ_ATTRIBUTES_LE cpu_to_le32(0x00000080)
-#define FILE_WRITE_ATTRIBUTES_LE cpu_to_le32(0x00000100)
-#define FILE_DELETE_LE cpu_to_le32(0x00010000)
-#define FILE_READ_CONTROL_LE cpu_to_le32(0x00020000)
-#define FILE_WRITE_DAC_LE cpu_to_le32(0x00040000)
-#define FILE_WRITE_OWNER_LE cpu_to_le32(0x00080000)
-#define FILE_SYNCHRONIZE_LE cpu_to_le32(0x00100000)
-#define FILE_ACCESS_SYSTEM_SECURITY_LE cpu_to_le32(0x01000000)
-#define FILE_MAXIMAL_ACCESS_LE cpu_to_le32(0x02000000)
-#define FILE_GENERIC_ALL_LE cpu_to_le32(0x10000000)
-#define FILE_GENERIC_EXECUTE_LE cpu_to_le32(0x20000000)
-#define FILE_GENERIC_WRITE_LE cpu_to_le32(0x40000000)
-#define FILE_GENERIC_READ_LE cpu_to_le32(0x80000000)
-#define DESIRED_ACCESS_MASK cpu_to_le32(0xF21F01FF)
-
-/* ShareAccess Flags */
-#define FILE_SHARE_READ_LE cpu_to_le32(0x00000001)
-#define FILE_SHARE_WRITE_LE cpu_to_le32(0x00000002)
-#define FILE_SHARE_DELETE_LE cpu_to_le32(0x00000004)
-#define FILE_SHARE_ALL_LE cpu_to_le32(0x00000007)
-
-/* CreateDisposition Flags */
-#define FILE_SUPERSEDE_LE cpu_to_le32(0x00000000)
-#define FILE_OPEN_LE cpu_to_le32(0x00000001)
-#define FILE_CREATE_LE cpu_to_le32(0x00000002)
-#define FILE_OPEN_IF_LE cpu_to_le32(0x00000003)
-#define FILE_OVERWRITE_LE cpu_to_le32(0x00000004)
-#define FILE_OVERWRITE_IF_LE cpu_to_le32(0x00000005)
-#define FILE_CREATE_MASK_LE cpu_to_le32(0x00000007)
-
-#define FILE_READ_DESIRED_ACCESS_LE (FILE_READ_DATA_LE | \
- FILE_READ_EA_LE | \
- FILE_GENERIC_READ_LE)
-#define FILE_WRITE_DESIRE_ACCESS_LE (FILE_WRITE_DATA_LE | \
- FILE_APPEND_DATA_LE | \
- FILE_WRITE_EA_LE | \
- FILE_WRITE_ATTRIBUTES_LE | \
- FILE_GENERIC_WRITE_LE)
-
-/* Impersonation Levels */
-#define IL_ANONYMOUS_LE cpu_to_le32(0x00000000)
-#define IL_IDENTIFICATION_LE cpu_to_le32(0x00000001)
-#define IL_IMPERSONATION_LE cpu_to_le32(0x00000002)
-#define IL_DELEGATE_LE cpu_to_le32(0x00000003)
-
-/* Create Context Values */
-#define SMB2_CREATE_EA_BUFFER "ExtA" /* extended attributes */
-#define SMB2_CREATE_SD_BUFFER "SecD" /* security descriptor */
-#define SMB2_CREATE_DURABLE_HANDLE_REQUEST "DHnQ"
-#define SMB2_CREATE_DURABLE_HANDLE_RECONNECT "DHnC"
-#define SMB2_CREATE_ALLOCATION_SIZE "AlSi"
-#define SMB2_CREATE_QUERY_MAXIMAL_ACCESS_REQUEST "MxAc"
-#define SMB2_CREATE_TIMEWARP_REQUEST "TWrp"
-#define SMB2_CREATE_QUERY_ON_DISK_ID "QFid"
-#define SMB2_CREATE_REQUEST_LEASE "RqLs"
-#define SMB2_CREATE_DURABLE_HANDLE_REQUEST_V2 "DH2Q"
-#define SMB2_CREATE_DURABLE_HANDLE_RECONNECT_V2 "DH2C"
-#define SMB2_CREATE_APP_INSTANCE_ID "\x45\xBC\xA6\x6A\xEF\xA7\xF7\x4A\x90\x08\xFA\x46\x2E\x14\x4D\x74"
- #define SMB2_CREATE_APP_INSTANCE_VERSION "\xB9\x82\xD0\xB7\x3B\x56\x07\x4F\xA0\x7B\x52\x4A\x81\x16\xA0\x10"
-#define SVHDX_OPEN_DEVICE_CONTEXT 0x83CE6F1AD851E0986E34401CC9BCFCE9
-#define SMB2_CREATE_TAG_POSIX "\x93\xAD\x25\x50\x9C\xB4\x11\xE7\xB4\x23\x83\xDE\x96\x8B\xCD\x7C"
-
-struct smb2_create_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 57 */
- __u8 SecurityFlags;
- __u8 RequestedOplockLevel;
- __le32 ImpersonationLevel;
- __le64 SmbCreateFlags;
- __le64 Reserved;
- __le32 DesiredAccess;
- __le32 FileAttributes;
- __le32 ShareAccess;
- __le32 CreateDisposition;
- __le32 CreateOptions;
- __le16 NameOffset;
- __le16 NameLength;
- __le32 CreateContextsOffset;
- __le32 CreateContextsLength;
- __u8 Buffer[0];
-} __packed;
-
-struct smb2_create_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 89 */
- __u8 OplockLevel;
- __u8 Reserved;
- __le32 CreateAction;
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 AllocationSize;
- __le64 EndofFile;
- __le32 FileAttributes;
- __le32 Reserved2;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __le32 CreateContextsOffset;
- __le32 CreateContextsLength;
- __u8 Buffer[1];
-} __packed;
-
-struct create_context {
- __le32 Next;
- __le16 NameOffset;
- __le16 NameLength;
- __le16 Reserved;
- __le16 DataOffset;
- __le32 DataLength;
- __u8 Buffer[0];
-} __packed;
-
struct create_durable_req_v2 {
struct create_context ccontext;
__u8 Name[8];
@@ -743,22 +216,21 @@ struct create_posix_rsp {
#define SMB2_LEASE_FLAG_BREAK_IN_PROGRESS_LE cpu_to_le32(0x02)
+#define SMB2_LEASE_KEY_SIZE 16
+
struct lease_context {
- __le64 LeaseKeyLow;
- __le64 LeaseKeyHigh;
+ __u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
__le32 LeaseState;
__le32 LeaseFlags;
__le64 LeaseDuration;
} __packed;
struct lease_context_v2 {
- __le64 LeaseKeyLow;
- __le64 LeaseKeyHigh;
+ __u8 LeaseKey[SMB2_LEASE_KEY_SIZE];
__le32 LeaseState;
__le32 LeaseFlags;
__le64 LeaseDuration;
- __le64 ParentLeaseKeyLow;
- __le64 ParentLeaseKeyHigh;
+ __u8 ParentLeaseKey[SMB2_LEASE_KEY_SIZE];
__le16 Epoch;
__le16 Reserved;
} __packed;
@@ -776,114 +248,12 @@ struct create_lease_v2 {
__u8 Pad[4];
} __packed;
-/* Currently defined values for close flags */
-#define SMB2_CLOSE_FLAG_POSTQUERY_ATTRIB cpu_to_le16(0x0001)
-struct smb2_close_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 24 */
- __le16 Flags;
- __le32 Reserved;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
-} __packed;
-
-struct smb2_close_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* 60 */
- __le16 Flags;
- __le32 Reserved;
- __le64 CreationTime;
- __le64 LastAccessTime;
- __le64 LastWriteTime;
- __le64 ChangeTime;
- __le64 AllocationSize; /* Beginning of FILE_STANDARD_INFO equivalent */
- __le64 EndOfFile;
- __le32 Attributes;
-} __packed;
-
-struct smb2_flush_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 24 */
- __le16 Reserved1;
- __le32 Reserved2;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
-} __packed;
-
-struct smb2_flush_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize;
- __le16 Reserved;
-} __packed;
-
struct smb2_buffer_desc_v1 {
__le64 offset;
__le32 token;
__le32 length;
} __packed;
-#define SMB2_CHANNEL_NONE cpu_to_le32(0x00000000)
-#define SMB2_CHANNEL_RDMA_V1 cpu_to_le32(0x00000001)
-#define SMB2_CHANNEL_RDMA_V1_INVALIDATE cpu_to_le32(0x00000002)
-
-struct smb2_read_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 49 */
- __u8 Padding; /* offset from start of SMB2 header to place read */
- __u8 Reserved;
- __le32 Length;
- __le64 Offset;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __le32 MinimumCount;
- __le32 Channel; /* Reserved MBZ */
- __le32 RemainingBytes;
- __le16 ReadChannelInfoOffset; /* Reserved MBZ */
- __le16 ReadChannelInfoLength; /* Reserved MBZ */
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_read_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 17 */
- __u8 DataOffset;
- __u8 Reserved;
- __le32 DataLength;
- __le32 DataRemaining;
- __u32 Reserved2;
- __u8 Buffer[1];
-} __packed;
-
-/* For write request Flags field below the following flag is defined: */
-#define SMB2_WRITEFLAG_WRITE_THROUGH 0x00000001
-
-struct smb2_write_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 49 */
- __le16 DataOffset; /* offset from start of SMB2 header to write data */
- __le32 Length;
- __le64 Offset;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __le32 Channel; /* Reserved MBZ */
- __le32 RemainingBytes;
- __le16 WriteChannelInfoOffset; /* Reserved MBZ */
- __le16 WriteChannelInfoLength; /* Reserved MBZ */
- __le32 Flags;
- __u8 Buffer[1];
-} __packed;
-
-struct smb2_write_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 17 */
- __u8 DataOffset;
- __u8 Reserved;
- __le32 DataLength;
- __le32 DataRemaining;
- __u32 Reserved2;
- __u8 Buffer[1];
-} __packed;
-
#define SMB2_0_IOCTL_IS_FSCTL 0x00000001
struct duplicate_extents_to_file {
@@ -1033,43 +403,6 @@ struct reparse_data_buffer {
__u8 DataBuffer[]; /* Variable Length */
} __packed;
-/* Completion Filter flags for Notify */
-#define FILE_NOTIFY_CHANGE_FILE_NAME 0x00000001
-#define FILE_NOTIFY_CHANGE_DIR_NAME 0x00000002
-#define FILE_NOTIFY_CHANGE_NAME 0x00000003
-#define FILE_NOTIFY_CHANGE_ATTRIBUTES 0x00000004
-#define FILE_NOTIFY_CHANGE_SIZE 0x00000008
-#define FILE_NOTIFY_CHANGE_LAST_WRITE 0x00000010
-#define FILE_NOTIFY_CHANGE_LAST_ACCESS 0x00000020
-#define FILE_NOTIFY_CHANGE_CREATION 0x00000040
-#define FILE_NOTIFY_CHANGE_EA 0x00000080
-#define FILE_NOTIFY_CHANGE_SECURITY 0x00000100
-#define FILE_NOTIFY_CHANGE_STREAM_NAME 0x00000200
-#define FILE_NOTIFY_CHANGE_STREAM_SIZE 0x00000400
-#define FILE_NOTIFY_CHANGE_STREAM_WRITE 0x00000800
-
-/* Flags */
-#define SMB2_WATCH_TREE 0x0001
-
-struct smb2_notify_req {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 32 */
- __le16 Flags;
- __le32 OutputBufferLength;
- __le64 PersistentFileId;
- __le64 VolatileFileId;
- __u32 CompletionFileter;
- __u32 Reserved;
-} __packed;
-
-struct smb2_notify_rsp {
- struct smb2_hdr hdr;
- __le16 StructureSize; /* Must be 9 */
- __le16 OutputBufferOffset;
- __le32 OutputBufferLength;
- __u8 Buffer[1];
-} __packed;
-
/* SMB2 Notify Action Flags */
#define FILE_ACTION_ADDED 0x00000001
#define FILE_ACTION_REMOVED 0x00000002
@@ -1528,7 +861,7 @@ struct smb2_file_pos_info {
__le64 CurrentByteOffset;
} __packed;
-#define FILE_MODE_INFO_MASK cpu_to_le32(0x0000103e)
+#define FILE_MODE_INFO_MASK cpu_to_le32(0x0000100e)
struct smb2_file_mode_info {
__le32 Mode;
@@ -1705,4 +1038,13 @@ int smb2_ioctl(struct ksmbd_work *work);
int smb2_oplock_break(struct ksmbd_work *work);
int smb2_notify(struct ksmbd_work *ksmbd_work);
+/*
+ * Get the body of the smb2 message excluding the 4 byte rfc1002 headers
+ * from request/response buffer.
+ */
+static inline void *smb2_get_msg(void *buf)
+{
+ return buf + 4;
+}
+
#endif /* _SMB2PDU_H */
diff --git a/fs/ksmbd/smb_common.c b/fs/ksmbd/smb_common.c
index 707490ab1f4c..ef7f42b0290a 100644
--- a/fs/ksmbd/smb_common.c
+++ b/fs/ksmbd/smb_common.c
@@ -132,7 +132,7 @@ int ksmbd_lookup_protocol_idx(char *str)
*/
int ksmbd_verify_smb_message(struct ksmbd_work *work)
{
- struct smb2_hdr *smb2_hdr = work->request_buf + work->next_smb2_rcv_hdr_off;
+ struct smb2_hdr *smb2_hdr = ksmbd_req_buf_next(work);
struct smb_hdr *hdr;
if (smb2_hdr->ProtocolId == SMB2_PROTO_NUMBER)
@@ -239,14 +239,14 @@ int ksmbd_lookup_dialect_by_id(__le16 *cli_dialects, __le16 dialects_count)
static int ksmbd_negotiate_smb_dialect(void *buf)
{
int smb_buf_length = get_rfc1002_len(buf);
- __le32 proto = ((struct smb2_hdr *)buf)->ProtocolId;
+ __le32 proto = ((struct smb2_hdr *)smb2_get_msg(buf))->ProtocolId;
if (proto == SMB2_PROTO_NUMBER) {
struct smb2_negotiate_req *req;
int smb2_neg_size =
- offsetof(struct smb2_negotiate_req, Dialects) - 4;
+ offsetof(struct smb2_negotiate_req, Dialects);
- req = (struct smb2_negotiate_req *)buf;
+ req = (struct smb2_negotiate_req *)smb2_get_msg(buf);
if (smb2_neg_size > smb_buf_length)
goto err_out;
@@ -445,11 +445,12 @@ int ksmbd_smb_negotiate_common(struct ksmbd_work *work, unsigned int command)
struct ksmbd_conn *conn = work->conn;
int ret;
- conn->dialect = ksmbd_negotiate_smb_dialect(work->request_buf);
+ conn->dialect =
+ ksmbd_negotiate_smb_dialect(work->request_buf);
ksmbd_debug(SMB, "conn->dialect 0x%x\n", conn->dialect);
if (command == SMB2_NEGOTIATE_HE) {
- struct smb2_hdr *smb2_hdr = work->request_buf;
+ struct smb2_hdr *smb2_hdr = smb2_get_msg(work->request_buf);
if (smb2_hdr->ProtocolId != SMB2_PROTO_NUMBER) {
ksmbd_debug(SMB, "Downgrade to SMB1 negotiation\n");
diff --git a/fs/ksmbd/smb_common.h b/fs/ksmbd/smb_common.h
index 6e79e7577f6b..50590842b651 100644
--- a/fs/ksmbd/smb_common.h
+++ b/fs/ksmbd/smb_common.h
@@ -10,6 +10,7 @@
#include "glob.h"
#include "nterr.h"
+#include "../smbfs_common/smb2pdu.h"
#include "smb2pdu.h"
/* ksmbd's Specific ERRNO */
@@ -32,17 +33,6 @@
#define SMB302_VERSION_STRING "3.02"
#define SMB311_VERSION_STRING "3.1.1"
-/* Dialects */
-#define SMB10_PROT_ID 0x00
-#define SMB20_PROT_ID 0x0202
-#define SMB21_PROT_ID 0x0210
-/* multi-protocol negotiate request */
-#define SMB2X_PROT_ID 0x02FF
-#define SMB30_PROT_ID 0x0300
-#define SMB302_PROT_ID 0x0302
-#define SMB311_PROT_ID 0x0311
-#define BAD_PROT_ID 0xFFFF
-
#define SMB_ECHO_INTERVAL (60 * HZ)
#define CIFS_DEFAULT_IOSIZE (64 * 1024)
@@ -59,21 +49,6 @@
/*
* File Attribute flags
*/
-#define ATTR_READONLY 0x0001
-#define ATTR_HIDDEN 0x0002
-#define ATTR_SYSTEM 0x0004
-#define ATTR_VOLUME 0x0008
-#define ATTR_DIRECTORY 0x0010
-#define ATTR_ARCHIVE 0x0020
-#define ATTR_DEVICE 0x0040
-#define ATTR_NORMAL 0x0080
-#define ATTR_TEMPORARY 0x0100
-#define ATTR_SPARSE 0x0200
-#define ATTR_REPARSE 0x0400
-#define ATTR_COMPRESSED 0x0800
-#define ATTR_OFFLINE 0x1000
-#define ATTR_NOT_CONTENT_INDEXED 0x2000
-#define ATTR_ENCRYPTED 0x4000
#define ATTR_POSIX_SEMANTICS 0x01000000
#define ATTR_BACKUP_SEMANTICS 0x02000000
#define ATTR_DELETE_ON_CLOSE 0x04000000
@@ -82,23 +57,6 @@
#define ATTR_NO_BUFFERING 0x20000000
#define ATTR_WRITE_THROUGH 0x80000000
-#define ATTR_READONLY_LE cpu_to_le32(ATTR_READONLY)
-#define ATTR_HIDDEN_LE cpu_to_le32(ATTR_HIDDEN)
-#define ATTR_SYSTEM_LE cpu_to_le32(ATTR_SYSTEM)
-#define ATTR_DIRECTORY_LE cpu_to_le32(ATTR_DIRECTORY)
-#define ATTR_ARCHIVE_LE cpu_to_le32(ATTR_ARCHIVE)
-#define ATTR_NORMAL_LE cpu_to_le32(ATTR_NORMAL)
-#define ATTR_TEMPORARY_LE cpu_to_le32(ATTR_TEMPORARY)
-#define ATTR_SPARSE_FILE_LE cpu_to_le32(ATTR_SPARSE)
-#define ATTR_REPARSE_POINT_LE cpu_to_le32(ATTR_REPARSE)
-#define ATTR_COMPRESSED_LE cpu_to_le32(ATTR_COMPRESSED)
-#define ATTR_OFFLINE_LE cpu_to_le32(ATTR_OFFLINE)
-#define ATTR_NOT_CONTENT_INDEXED_LE cpu_to_le32(ATTR_NOT_CONTENT_INDEXED)
-#define ATTR_ENCRYPTED_LE cpu_to_le32(ATTR_ENCRYPTED)
-#define ATTR_INTEGRITY_STREAML_LE cpu_to_le32(0x00008000)
-#define ATTR_NO_SCRUB_DATA_LE cpu_to_le32(0x00020000)
-#define ATTR_MASK_LE cpu_to_le32(0x00007FB7)
-
/* List of FileSystemAttributes - see 2.5.1 of MS-FSCC */
#define FILE_SUPPORTS_SPARSE_VDL 0x10000000 /* faster nonsparse extend */
#define FILE_SUPPORTS_BLOCK_REFCOUNTING 0x08000000 /* allow ioctl dup extents */
@@ -160,11 +118,6 @@
/* file_execute, file_read_attributes*/
/* write_dac, and delete. */
-#define FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA | FILE_READ_ATTRIBUTES)
-#define FILE_WRITE_RIGHTS (FILE_WRITE_DATA | FILE_APPEND_DATA \
- | FILE_WRITE_EA | FILE_WRITE_ATTRIBUTES)
-#define FILE_EXEC_RIGHTS (FILE_EXECUTE)
-
#define SET_FILE_READ_RIGHTS (FILE_READ_DATA | FILE_READ_EA \
| FILE_READ_ATTRIBUTES \
| DELETE | READ_CONTROL | WRITE_DAC \
@@ -477,12 +430,6 @@ struct smb_version_cmds {
int (*proc)(struct ksmbd_work *swork);
};
-static inline size_t
-smb2_hdr_size_no_buflen(struct smb_version_values *vals)
-{
- return vals->header_size - 4;
-}
-
int ksmbd_min_protocol(void);
int ksmbd_max_protocol(void);
diff --git a/fs/ksmbd/transport_rdma.c b/fs/ksmbd/transport_rdma.c
index a2fd5a4d4cd5..7e57cbb0bb35 100644
--- a/fs/ksmbd/transport_rdma.c
+++ b/fs/ksmbd/transport_rdma.c
@@ -484,7 +484,7 @@ static int smb_direct_check_recvmsg(struct smb_direct_recvmsg *recvmsg)
struct smb_direct_data_transfer *req =
(struct smb_direct_data_transfer *)recvmsg->packet;
struct smb2_hdr *hdr = (struct smb2_hdr *)(recvmsg->packet
- + le32_to_cpu(req->data_offset) - 4);
+ + le32_to_cpu(req->data_offset));
ksmbd_debug(RDMA,
"CreditGranted: %u, CreditRequested: %u, DataLength: %u, RemainingDataLength: %u, SMB: %x, Command: %u\n",
le16_to_cpu(req->credits_granted),
@@ -2043,7 +2043,6 @@ int ksmbd_rdma_destroy(void)
smb_direct_listener.cm_id = NULL;
if (smb_direct_wq) {
- flush_workqueue(smb_direct_wq);
destroy_workqueue(smb_direct_wq);
smb_direct_wq = NULL;
}
diff --git a/fs/ksmbd/vfs.c b/fs/ksmbd/vfs.c
index 835b384b0895..19d36393974c 100644
--- a/fs/ksmbd/vfs.c
+++ b/fs/ksmbd/vfs.c
@@ -1013,7 +1013,7 @@ int ksmbd_vfs_zero_data(struct ksmbd_work *work, struct ksmbd_file *fp,
loff_t off, loff_t len)
{
smb_break_all_levII_oplock(work, fp, 1);
- if (fp->f_ci->m_fattr & ATTR_SPARSE_FILE_LE)
+ if (fp->f_ci->m_fattr & FILE_ATTRIBUTE_SPARSE_FILE_LE)
return vfs_fallocate(fp->filp,
FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE,
off, len);
@@ -1624,7 +1624,7 @@ void *ksmbd_vfs_init_kstat(char **p, struct ksmbd_kstat *ksmbd_kstat)
time = ksmbd_UnixTimeToNT(kstat->ctime);
info->ChangeTime = cpu_to_le64(time);
- if (ksmbd_kstat->file_attributes & ATTR_DIRECTORY_LE) {
+ if (ksmbd_kstat->file_attributes & FILE_ATTRIBUTE_DIRECTORY_LE) {
info->EndOfFile = 0;
info->AllocationSize = 0;
} else {
@@ -1654,9 +1654,9 @@ int ksmbd_vfs_fill_dentry_attrs(struct ksmbd_work *work,
* or that acl is disable in server's filesystem and the config is yes.
*/
if (S_ISDIR(ksmbd_kstat->kstat->mode))
- ksmbd_kstat->file_attributes = ATTR_DIRECTORY_LE;
+ ksmbd_kstat->file_attributes = FILE_ATTRIBUTE_DIRECTORY_LE;
else
- ksmbd_kstat->file_attributes = ATTR_ARCHIVE_LE;
+ ksmbd_kstat->file_attributes = FILE_ATTRIBUTE_ARCHIVE_LE;
if (test_share_config_flag(work->tcon->share_conf,
KSMBD_SHARE_FLAG_STORE_DOS_ATTRS)) {
diff --git a/fs/ksmbd/vfs.h b/fs/ksmbd/vfs.h
index b0d5b8feb4a3..adf94a4f22fa 100644
--- a/fs/ksmbd/vfs.h
+++ b/fs/ksmbd/vfs.h
@@ -25,48 +25,9 @@ enum {
};
/* CreateOptions */
-/* Flag is set, it must not be a file , valid for directory only */
-#define FILE_DIRECTORY_FILE_LE cpu_to_le32(0x00000001)
-#define FILE_WRITE_THROUGH_LE cpu_to_le32(0x00000002)
-#define FILE_SEQUENTIAL_ONLY_LE cpu_to_le32(0x00000004)
-
-/* Should not buffer on server*/
-#define FILE_NO_INTERMEDIATE_BUFFERING_LE cpu_to_le32(0x00000008)
-/* MBZ */
-#define FILE_SYNCHRONOUS_IO_ALERT_LE cpu_to_le32(0x00000010)
-/* MBZ */
-#define FILE_SYNCHRONOUS_IO_NONALERT_LE cpu_to_le32(0x00000020)
-
-/* Flaf must not be set for directory */
-#define FILE_NON_DIRECTORY_FILE_LE cpu_to_le32(0x00000040)
-
-/* Should be zero */
#define CREATE_TREE_CONNECTION cpu_to_le32(0x00000080)
-#define FILE_COMPLETE_IF_OPLOCKED_LE cpu_to_le32(0x00000100)
-#define FILE_NO_EA_KNOWLEDGE_LE cpu_to_le32(0x00000200)
-#define FILE_OPEN_REMOTE_INSTANCE cpu_to_le32(0x00000400)
-
-/**
- * Doc says this is obsolete "open for recovery" flag should be zero
- * in any case.
- */
-#define CREATE_OPEN_FOR_RECOVERY cpu_to_le32(0x00000400)
-#define FILE_RANDOM_ACCESS_LE cpu_to_le32(0x00000800)
-#define FILE_DELETE_ON_CLOSE_LE cpu_to_le32(0x00001000)
-#define FILE_OPEN_BY_FILE_ID_LE cpu_to_le32(0x00002000)
-#define FILE_OPEN_FOR_BACKUP_INTENT_LE cpu_to_le32(0x00004000)
-#define FILE_NO_COMPRESSION_LE cpu_to_le32(0x00008000)
-
-/* Should be zero*/
-#define FILE_OPEN_REQUIRING_OPLOCK cpu_to_le32(0x00010000)
-#define FILE_DISALLOW_EXCLUSIVE cpu_to_le32(0x00020000)
#define FILE_RESERVE_OPFILTER_LE cpu_to_le32(0x00100000)
-#define FILE_OPEN_REPARSE_POINT_LE cpu_to_le32(0x00200000)
-#define FILE_OPEN_NO_RECALL_LE cpu_to_le32(0x00400000)
-/* Should be zero */
-#define FILE_OPEN_FOR_FREE_SPACE_QUERY_LE cpu_to_le32(0x00800000)
-#define CREATE_OPTIONS_MASK cpu_to_le32(0x00FFFFFF)
#define CREATE_OPTION_READONLY 0x10000000
/* system. NB not sent over wire */
#define CREATE_OPTION_SPECIAL 0x20000000
diff --git a/fs/netfs/read_helper.c b/fs/netfs/read_helper.c
index 994ec22d4040..9320a42dfaf9 100644
--- a/fs/netfs/read_helper.c
+++ b/fs/netfs/read_helper.c
@@ -230,7 +230,7 @@ static void netfs_rreq_completed(struct netfs_read_request *rreq, bool was_async
/*
* Deal with the completion of writing the data to the cache. We have to clear
- * the PG_fscache bits on the pages involved and release the caller's ref.
+ * the PG_fscache bits on the folios involved and release the caller's ref.
*
* May be called in softirq mode and we inherit a ref from the caller.
*/
@@ -238,7 +238,7 @@ static void netfs_rreq_unmark_after_write(struct netfs_read_request *rreq,
bool was_async)
{
struct netfs_read_subrequest *subreq;
- struct page *page;
+ struct folio *folio;
pgoff_t unlocked = 0;
bool have_unlocked = false;
@@ -247,14 +247,14 @@ static void netfs_rreq_unmark_after_write(struct netfs_read_request *rreq,
list_for_each_entry(subreq, &rreq->subrequests, rreq_link) {
XA_STATE(xas, &rreq->mapping->i_pages, subreq->start / PAGE_SIZE);
- xas_for_each(&xas, page, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
+ xas_for_each(&xas, folio, (subreq->start + subreq->len - 1) / PAGE_SIZE) {
/* We might have multiple writes from the same huge
- * page, but we mustn't unlock a page more than once.
+ * folio, but we mustn't unlock a folio more than once.
*/
- if (have_unlocked && page->index <= unlocked)
+ if (have_unlocked && folio_index(folio) <= unlocked)
continue;
- unlocked = page->index;
- end_page_fscache(page);
+ unlocked = folio_index(folio);
+ folio_end_fscache(folio);
have_unlocked = true;
}
}
@@ -367,18 +367,17 @@ static void netfs_rreq_write_to_cache(struct netfs_read_request *rreq,
}
/*
- * Unlock the pages in a read operation. We need to set PG_fscache on any
- * pages we're going to write back before we unlock them.
+ * Unlock the folios in a read operation. We need to set PG_fscache on any
+ * folios we're going to write back before we unlock them.
*/
static void netfs_rreq_unlock(struct netfs_read_request *rreq)
{
struct netfs_read_subrequest *subreq;
- struct page *page;
+ struct folio *folio;
unsigned int iopos, account = 0;
pgoff_t start_page = rreq->start / PAGE_SIZE;
pgoff_t last_page = ((rreq->start + rreq->len) / PAGE_SIZE) - 1;
bool subreq_failed = false;
- int i;
XA_STATE(xas, &rreq->mapping->i_pages, start_page);
@@ -403,9 +402,9 @@ static void netfs_rreq_unlock(struct netfs_read_request *rreq)
trace_netfs_rreq(rreq, netfs_rreq_trace_unlock);
rcu_read_lock();
- xas_for_each(&xas, page, last_page) {
- unsigned int pgpos = (page->index - start_page) * PAGE_SIZE;
- unsigned int pgend = pgpos + thp_size(page);
+ xas_for_each(&xas, folio, last_page) {
+ unsigned int pgpos = (folio_index(folio) - start_page) * PAGE_SIZE;
+ unsigned int pgend = pgpos + folio_size(folio);
bool pg_failed = false;
for (;;) {
@@ -414,7 +413,7 @@ static void netfs_rreq_unlock(struct netfs_read_request *rreq)
break;
}
if (test_bit(NETFS_SREQ_WRITE_TO_CACHE, &subreq->flags))
- set_page_fscache(page);
+ folio_start_fscache(folio);
pg_failed |= subreq_failed;
if (pgend < iopos + subreq->len)
break;
@@ -433,17 +432,16 @@ static void netfs_rreq_unlock(struct netfs_read_request *rreq)
}
if (!pg_failed) {
- for (i = 0; i < thp_nr_pages(page); i++)
- flush_dcache_page(page);
- SetPageUptodate(page);
+ flush_dcache_folio(folio);
+ folio_mark_uptodate(folio);
}
- if (!test_bit(NETFS_RREQ_DONT_UNLOCK_PAGES, &rreq->flags)) {
- if (page->index == rreq->no_unlock_page &&
- test_bit(NETFS_RREQ_NO_UNLOCK_PAGE, &rreq->flags))
+ if (!test_bit(NETFS_RREQ_DONT_UNLOCK_FOLIOS, &rreq->flags)) {
+ if (folio_index(folio) == rreq->no_unlock_folio &&
+ test_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags))
_debug("no unlock");
else
- unlock_page(page);
+ folio_unlock(folio);
}
}
rcu_read_unlock();
@@ -876,7 +874,6 @@ void netfs_readahead(struct readahead_control *ractl,
void *netfs_priv)
{
struct netfs_read_request *rreq;
- struct page *page;
unsigned int debug_index = 0;
int ret;
@@ -911,11 +908,11 @@ void netfs_readahead(struct readahead_control *ractl,
} while (rreq->submitted < rreq->len);
- /* Drop the refs on the pages here rather than in the cache or
+ /* Drop the refs on the folios here rather than in the cache or
* filesystem. The locks will be dropped in netfs_rreq_unlock().
*/
- while ((page = readahead_page(ractl)))
- put_page(page);
+ while (readahead_folio(ractl))
+ ;
/* If we decrement nr_rd_ops to 0, the ref belongs to us. */
if (atomic_dec_and_test(&rreq->nr_rd_ops))
@@ -935,7 +932,7 @@ EXPORT_SYMBOL(netfs_readahead);
/**
* netfs_readpage - Helper to manage a readpage request
* @file: The file to read from
- * @page: The page to read
+ * @folio: The folio to read
* @ops: The network filesystem's operations for the helper to use
* @netfs_priv: Private netfs data to be retained in the request
*
@@ -950,7 +947,7 @@ EXPORT_SYMBOL(netfs_readahead);
* This is usable whether or not caching is enabled.
*/
int netfs_readpage(struct file *file,
- struct page *page,
+ struct folio *folio,
const struct netfs_read_request_ops *ops,
void *netfs_priv)
{
@@ -958,23 +955,23 @@ int netfs_readpage(struct file *file,
unsigned int debug_index = 0;
int ret;
- _enter("%lx", page_index(page));
+ _enter("%lx", folio_index(folio));
rreq = netfs_alloc_read_request(ops, netfs_priv, file);
if (!rreq) {
if (netfs_priv)
- ops->cleanup(netfs_priv, page_file_mapping(page));
- unlock_page(page);
+ ops->cleanup(netfs_priv, folio_file_mapping(folio));
+ folio_unlock(folio);
return -ENOMEM;
}
- rreq->mapping = page_file_mapping(page);
- rreq->start = page_file_offset(page);
- rreq->len = thp_size(page);
+ rreq->mapping = folio_file_mapping(folio);
+ rreq->start = folio_file_pos(folio);
+ rreq->len = folio_size(folio);
if (ops->begin_cache_operation) {
ret = ops->begin_cache_operation(rreq);
if (ret == -ENOMEM || ret == -EINTR || ret == -ERESTARTSYS) {
- unlock_page(page);
+ folio_unlock(folio);
goto out;
}
}
@@ -1012,40 +1009,40 @@ out:
EXPORT_SYMBOL(netfs_readpage);
/**
- * netfs_skip_page_read - prep a page for writing without reading first
- * @page: page being prepared
+ * netfs_skip_folio_read - prep a folio for writing without reading first
+ * @folio: The folio being prepared
* @pos: starting position for the write
* @len: length of write
*
* In some cases, write_begin doesn't need to read at all:
- * - full page write
- * - write that lies in a page that is completely beyond EOF
- * - write that covers the the page from start to EOF or beyond it
+ * - full folio write
+ * - write that lies in a folio that is completely beyond EOF
+ * - write that covers the folio from start to EOF or beyond it
*
* If any of these criteria are met, then zero out the unwritten parts
- * of the page and return true. Otherwise, return false.
+ * of the folio and return true. Otherwise, return false.
*/
-static bool netfs_skip_page_read(struct page *page, loff_t pos, size_t len)
+static bool netfs_skip_folio_read(struct folio *folio, loff_t pos, size_t len)
{
- struct inode *inode = page->mapping->host;
+ struct inode *inode = folio_inode(folio);
loff_t i_size = i_size_read(inode);
- size_t offset = offset_in_thp(page, pos);
+ size_t offset = offset_in_folio(folio, pos);
- /* Full page write */
- if (offset == 0 && len >= thp_size(page))
+ /* Full folio write */
+ if (offset == 0 && len >= folio_size(folio))
return true;
- /* pos beyond last page in the file */
+ /* pos beyond last folio in the file */
if (pos - offset >= i_size)
goto zero_out;
- /* Write that covers from the start of the page to EOF or beyond */
+ /* Write that covers from the start of the folio to EOF or beyond */
if (offset == 0 && (pos + len) >= i_size)
goto zero_out;
return false;
zero_out:
- zero_user_segments(page, 0, offset, offset + len, thp_size(page));
+ zero_user_segments(&folio->page, 0, offset, offset + len, folio_size(folio));
return true;
}
@@ -1054,9 +1051,9 @@ zero_out:
* @file: The file to read from
* @mapping: The mapping to read from
* @pos: File position at which the write will begin
- * @len: The length of the write (may extend beyond the end of the page chosen)
- * @flags: AOP_* flags
- * @_page: Where to put the resultant page
+ * @len: The length of the write (may extend beyond the end of the folio chosen)
+ * @aop_flags: AOP_* flags
+ * @_folio: Where to put the resultant folio
* @_fsdata: Place for the netfs to store a cookie
* @ops: The network filesystem's operations for the helper to use
* @netfs_priv: Private netfs data to be retained in the request
@@ -1072,37 +1069,41 @@ zero_out:
* issue_op, is mandatory.
*
* The check_write_begin() operation can be provided to check for and flush
- * conflicting writes once the page is grabbed and locked. It is passed a
+ * conflicting writes once the folio is grabbed and locked. It is passed a
* pointer to the fsdata cookie that gets returned to the VM to be passed to
* write_end. It is permitted to sleep. It should return 0 if the request
- * should go ahead; unlock the page and return -EAGAIN to cause the page to be
- * regot; or return an error.
+ * should go ahead; unlock the folio and return -EAGAIN to cause the folio to
+ * be regot; or return an error.
*
* This is usable whether or not caching is enabled.
*/
int netfs_write_begin(struct file *file, struct address_space *mapping,
- loff_t pos, unsigned int len, unsigned int flags,
- struct page **_page, void **_fsdata,
+ loff_t pos, unsigned int len, unsigned int aop_flags,
+ struct folio **_folio, void **_fsdata,
const struct netfs_read_request_ops *ops,
void *netfs_priv)
{
struct netfs_read_request *rreq;
- struct page *page, *xpage;
+ struct folio *folio;
struct inode *inode = file_inode(file);
- unsigned int debug_index = 0;
+ unsigned int debug_index = 0, fgp_flags;
pgoff_t index = pos >> PAGE_SHIFT;
int ret;
DEFINE_READAHEAD(ractl, file, NULL, mapping, index);
retry:
- page = grab_cache_page_write_begin(mapping, index, flags);
- if (!page)
+ fgp_flags = FGP_LOCK | FGP_WRITE | FGP_CREAT | FGP_STABLE;
+ if (aop_flags & AOP_FLAG_NOFS)
+ fgp_flags |= FGP_NOFS;
+ folio = __filemap_get_folio(mapping, index, fgp_flags,
+ mapping_gfp_mask(mapping));
+ if (!folio)
return -ENOMEM;
if (ops->check_write_begin) {
/* Allow the netfs (eg. ceph) to flush conflicts. */
- ret = ops->check_write_begin(file, pos, len, page, _fsdata);
+ ret = ops->check_write_begin(file, pos, len, folio, _fsdata);
if (ret < 0) {
trace_netfs_failure(NULL, NULL, ret, netfs_fail_check_write_begin);
if (ret == -EAGAIN)
@@ -1111,28 +1112,28 @@ retry:
}
}
- if (PageUptodate(page))
- goto have_page;
+ if (folio_test_uptodate(folio))
+ goto have_folio;
/* If the page is beyond the EOF, we want to clear it - unless it's
* within the cache granule containing the EOF, in which case we need
* to preload the granule.
*/
if (!ops->is_cache_enabled(inode) &&
- netfs_skip_page_read(page, pos, len)) {
+ netfs_skip_folio_read(folio, pos, len)) {
netfs_stat(&netfs_n_rh_write_zskip);
- goto have_page_no_wait;
+ goto have_folio_no_wait;
}
ret = -ENOMEM;
rreq = netfs_alloc_read_request(ops, netfs_priv, file);
if (!rreq)
goto error;
- rreq->mapping = page->mapping;
- rreq->start = page_offset(page);
- rreq->len = thp_size(page);
- rreq->no_unlock_page = page->index;
- __set_bit(NETFS_RREQ_NO_UNLOCK_PAGE, &rreq->flags);
+ rreq->mapping = folio_file_mapping(folio);
+ rreq->start = folio_file_pos(folio);
+ rreq->len = folio_size(folio);
+ rreq->no_unlock_folio = folio_index(folio);
+ __set_bit(NETFS_RREQ_NO_UNLOCK_FOLIO, &rreq->flags);
netfs_priv = NULL;
if (ops->begin_cache_operation) {
@@ -1147,14 +1148,14 @@ retry:
/* Expand the request to meet caching requirements and download
* preferences.
*/
- ractl._nr_pages = thp_nr_pages(page);
+ ractl._nr_pages = folio_nr_pages(folio);
netfs_rreq_expand(rreq, &ractl);
netfs_get_read_request(rreq);
- /* We hold the page locks, so we can drop the references */
- while ((xpage = readahead_page(&ractl)))
- if (xpage != page)
- put_page(xpage);
+ /* We hold the folio locks, so we can drop the references */
+ folio_get(folio);
+ while (readahead_folio(&ractl))
+ ;
atomic_set(&rreq->nr_rd_ops, 1);
do {
@@ -1184,22 +1185,22 @@ retry:
if (ret < 0)
goto error;
-have_page:
- ret = wait_on_page_fscache_killable(page);
+have_folio:
+ ret = folio_wait_fscache_killable(folio);
if (ret < 0)
goto error;
-have_page_no_wait:
+have_folio_no_wait:
if (netfs_priv)
ops->cleanup(netfs_priv, mapping);
- *_page = page;
+ *_folio = folio;
_leave(" = 0");
return 0;
error_put:
netfs_put_read_request(rreq, false);
error:
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
if (netfs_priv)
ops->cleanup(netfs_priv, mapping);
_leave(" = %d", ret);
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index b2a1d969a172..5a93a5db4fb0 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -288,11 +288,8 @@ nfsd4_decode_bitmap4(struct nfsd4_compoundargs *argp, u32 *bmval, u32 bmlen)
p = xdr_inline_decode(argp->xdr, count << 2);
if (!p)
return nfserr_bad_xdr;
- i = 0;
- while (i < count)
- bmval[i++] = be32_to_cpup(p++);
- while (i < bmlen)
- bmval[i++] = 0;
+ for (i = 0; i < bmlen; i++)
+ bmval[i] = (i < count) ? be32_to_cpup(p++) : 0;
return nfs_ok;
}
diff --git a/fs/pstore/platform.c b/fs/pstore/platform.c
index b9614db48b1d..f243cb5e6a4f 100644
--- a/fs/pstore/platform.c
+++ b/fs/pstore/platform.c
@@ -218,7 +218,7 @@ static int zbufsize_842(size_t size)
#if IS_ENABLED(CONFIG_PSTORE_ZSTD_COMPRESS)
static int zbufsize_zstd(size_t size)
{
- return ZSTD_compressBound(size);
+ return zstd_compress_bound(size);
}
#endif
diff --git a/fs/squashfs/zstd_wrapper.c b/fs/squashfs/zstd_wrapper.c
index 0015cf8b5582..c40445dbf38c 100644
--- a/fs/squashfs/zstd_wrapper.c
+++ b/fs/squashfs/zstd_wrapper.c
@@ -34,7 +34,7 @@ static void *zstd_init(struct squashfs_sb_info *msblk, void *buff)
goto failed;
wksp->window_size = max_t(size_t,
msblk->block_size, SQUASHFS_METADATA_SIZE);
- wksp->mem_size = ZSTD_DStreamWorkspaceBound(wksp->window_size);
+ wksp->mem_size = zstd_dstream_workspace_bound(wksp->window_size);
wksp->mem = vmalloc(wksp->mem_size);
if (wksp->mem == NULL)
goto failed;
@@ -63,15 +63,15 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
struct squashfs_page_actor *output)
{
struct workspace *wksp = strm;
- ZSTD_DStream *stream;
+ zstd_dstream *stream;
size_t total_out = 0;
int error = 0;
- ZSTD_inBuffer in_buf = { NULL, 0, 0 };
- ZSTD_outBuffer out_buf = { NULL, 0, 0 };
+ zstd_in_buffer in_buf = { NULL, 0, 0 };
+ zstd_out_buffer out_buf = { NULL, 0, 0 };
struct bvec_iter_all iter_all = {};
struct bio_vec *bvec = bvec_init_iter_all(&iter_all);
- stream = ZSTD_initDStream(wksp->window_size, wksp->mem, wksp->mem_size);
+ stream = zstd_init_dstream(wksp->window_size, wksp->mem, wksp->mem_size);
if (!stream) {
ERROR("Failed to initialize zstd decompressor\n");
@@ -116,14 +116,14 @@ static int zstd_uncompress(struct squashfs_sb_info *msblk, void *strm,
}
total_out -= out_buf.pos;
- zstd_err = ZSTD_decompressStream(stream, &out_buf, &in_buf);
+ zstd_err = zstd_decompress_stream(stream, &out_buf, &in_buf);
total_out += out_buf.pos; /* add the additional data produced */
if (zstd_err == 0)
break;
- if (ZSTD_isError(zstd_err)) {
+ if (zstd_is_error(zstd_err)) {
ERROR("zstd decompression error: %d\n",
- (int)ZSTD_getErrorCode(zstd_err));
+ (int)zstd_get_error_code(zstd_err));
error = -EIO;
break;
}
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 70abdfad2df1..42e3e551fa4c 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -31,6 +31,7 @@
#include <linux/mm.h>
#include <linux/slab.h>
#include <linux/bio.h>
+#include <linux/iversion.h>
#include "udf_i.h"
#include "udf_sb.h"
@@ -43,7 +44,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
struct fileIdentDesc *fi = NULL;
struct fileIdentDesc cfi;
udf_pblk_t block, iblock;
- loff_t nf_pos;
+ loff_t nf_pos, emit_pos = 0;
int flen;
unsigned char *fname = NULL, *copy_name = NULL;
unsigned char *nameptr;
@@ -57,6 +58,7 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
int i, num, ret = 0;
struct extent_position epos = { NULL, 0, {0, 0} };
struct super_block *sb = dir->i_sb;
+ bool pos_valid = false;
if (ctx->pos == 0) {
if (!dir_emit_dot(file, ctx))
@@ -67,6 +69,21 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
if (nf_pos >= size)
goto out;
+ /*
+ * Something changed since last readdir (either lseek was called or dir
+ * changed)? We need to verify the position correctly points at the
+ * beginning of some dir entry so that the directory parsing code does
+ * not get confused. Since UDF does not have any reliable way of
+ * identifying beginning of dir entry (names are under user control),
+ * we need to scan the directory from the beginning.
+ */
+ if (!inode_eq_iversion(dir, file->f_version)) {
+ emit_pos = nf_pos;
+ nf_pos = 0;
+ } else {
+ pos_valid = true;
+ }
+
fname = kmalloc(UDF_NAME_LEN, GFP_NOFS);
if (!fname) {
ret = -ENOMEM;
@@ -122,13 +139,21 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
while (nf_pos < size) {
struct kernel_lb_addr tloc;
+ loff_t cur_pos = nf_pos;
- ctx->pos = (nf_pos >> 2) + 1;
+ /* Update file position only if we got past the current one */
+ if (nf_pos >= emit_pos) {
+ ctx->pos = (nf_pos >> 2) + 1;
+ pos_valid = true;
+ }
fi = udf_fileident_read(dir, &nf_pos, &fibh, &cfi, &epos, &eloc,
&elen, &offset);
if (!fi)
goto out;
+ /* Still not at offset where user asked us to read from? */
+ if (cur_pos < emit_pos)
+ continue;
liu = le16_to_cpu(cfi.lengthOfImpUse);
lfi = cfi.lengthFileIdent;
@@ -186,8 +211,11 @@ static int udf_readdir(struct file *file, struct dir_context *ctx)
} /* end while */
ctx->pos = (nf_pos >> 2) + 1;
+ pos_valid = true;
out:
+ if (pos_valid)
+ file->f_version = inode_query_iversion(dir);
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index caeef08efed2..0ed4861b038f 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -30,6 +30,7 @@
#include <linux/sched.h>
#include <linux/crc-itu-t.h>
#include <linux/exportfs.h>
+#include <linux/iversion.h>
static inline int udf_match(int len1, const unsigned char *name1, int len2,
const unsigned char *name2)
@@ -134,6 +135,8 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
mark_buffer_dirty_inode(fibh->ebh, inode);
mark_buffer_dirty_inode(fibh->sbh, inode);
}
+ inode_inc_iversion(inode);
+
return 0;
}
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 34247fba6df9..f26b5e0b84b6 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -57,6 +57,7 @@
#include <linux/crc-itu-t.h>
#include <linux/log2.h>
#include <asm/byteorder.h>
+#include <linux/iversion.h>
#include "udf_sb.h"
#include "udf_i.h"
@@ -149,6 +150,7 @@ static struct inode *udf_alloc_inode(struct super_block *sb)
init_rwsem(&ei->i_data_sem);
ei->cached_extent.lstart = -1;
spin_lock_init(&ei->i_extent_cache_lock);
+ inode_set_iversion(&ei->vfs_inode, 1);
return &ei->vfs_inode;
}
diff --git a/fs/xfs/libxfs/xfs_ag.c b/fs/xfs/libxfs/xfs_ag.c
index d7d875cef07a..1e4ee042d52f 100644
--- a/fs/xfs/libxfs/xfs_ag.c
+++ b/fs/xfs/libxfs/xfs_ag.c
@@ -248,6 +248,7 @@ xfs_initialize_perag(
spin_unlock(&mp->m_perag_lock);
radix_tree_preload_end();
+#ifdef __KERNEL__
/* Place kernel structure only init below this point. */
spin_lock_init(&pag->pag_ici_lock);
spin_lock_init(&pag->pagb_lock);
@@ -257,6 +258,7 @@ xfs_initialize_perag(
init_waitqueue_head(&pag->pagb_wait);
pag->pagb_count = 0;
pag->pagb_tree = RB_ROOT;
+#endif /* __KERNEL__ */
error = xfs_buf_hash_init(pag);
if (error)
diff --git a/fs/xfs/libxfs/xfs_ag.h b/fs/xfs/libxfs/xfs_ag.h
index 3f597cad2c33..e411d51c2589 100644
--- a/fs/xfs/libxfs/xfs_ag.h
+++ b/fs/xfs/libxfs/xfs_ag.h
@@ -64,6 +64,10 @@ struct xfs_perag {
/* Blocks reserved for the reverse mapping btree. */
struct xfs_ag_resv pag_rmapbt_resv;
+ /* for rcu-safe freeing */
+ struct rcu_head rcu_head;
+
+#ifdef __KERNEL__
/* -- kernel only structures below this line -- */
/*
@@ -90,9 +94,6 @@ struct xfs_perag {
spinlock_t pag_buf_lock; /* lock for pag_buf_hash */
struct rhashtable pag_buf_hash;
- /* for rcu-safe freeing */
- struct rcu_head rcu_head;
-
/* background prealloc block trimming */
struct delayed_work pag_blockgc_work;
@@ -102,6 +103,7 @@ struct xfs_perag {
* or have some other means to control concurrency.
*/
struct rhashtable pagi_unlinked_hash;
+#endif /* __KERNEL__ */
};
int xfs_initialize_perag(struct xfs_mount *mp, xfs_agnumber_t agcount,
diff --git a/fs/xfs/libxfs/xfs_btree.c b/fs/xfs/libxfs/xfs_btree.c
index b4e19aacb9de..f18a875f51c6 100644
--- a/fs/xfs/libxfs/xfs_btree.c
+++ b/fs/xfs/libxfs/xfs_btree.c
@@ -2785,6 +2785,7 @@ error0:
return error;
}
+#ifdef __KERNEL__
struct xfs_btree_split_args {
struct xfs_btree_cur *cur;
int level;
@@ -2870,6 +2871,9 @@ xfs_btree_split(
destroy_work_on_stack(&args.work);
return args.result;
}
+#else
+#define xfs_btree_split __xfs_btree_split
+#endif /* __KERNEL__ */
/*
diff --git a/fs/xfs/libxfs/xfs_da_btree.c b/fs/xfs/libxfs/xfs_da_btree.c
index dd7a2dbce1d1..9dc1ecb9713d 100644
--- a/fs/xfs/libxfs/xfs_da_btree.c
+++ b/fs/xfs/libxfs/xfs_da_btree.c
@@ -864,7 +864,6 @@ xfs_da3_node_rebalance(
{
struct xfs_da_intnode *node1;
struct xfs_da_intnode *node2;
- struct xfs_da_intnode *tmpnode;
struct xfs_da_node_entry *btree1;
struct xfs_da_node_entry *btree2;
struct xfs_da_node_entry *btree_s;
@@ -894,9 +893,7 @@ xfs_da3_node_rebalance(
((be32_to_cpu(btree2[0].hashval) < be32_to_cpu(btree1[0].hashval)) ||
(be32_to_cpu(btree2[nodehdr2.count - 1].hashval) <
be32_to_cpu(btree1[nodehdr1.count - 1].hashval)))) {
- tmpnode = node1;
- node1 = node2;
- node2 = tmpnode;
+ swap(node1, node2);
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr1, node1);
xfs_da3_node_hdr_from_disk(dp->i_mount, &nodehdr2, node2);
btree1 = nodehdr1.btree;