summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/Kconfig8
-rw-r--r--fs/Kconfig.binfmt4
-rw-r--r--fs/afs/callback.c2
-rw-r--r--fs/afs/inode.c2
-rw-r--r--fs/afs/super.c2
-rw-r--r--fs/aio.c4
-rw-r--r--fs/binfmt_elf.c9
-rw-r--r--fs/binfmt_elf_fdpic.c47
-rw-r--r--fs/binfmt_flat.c8
-rw-r--r--fs/block_dev.c8
-rw-r--r--fs/cifs/AUTHORS1
-rw-r--r--fs/cifs/CHANGES10
-rw-r--r--fs/cifs/README5
-rw-r--r--fs/cifs/TODO15
-rw-r--r--fs/cifs/asn1.c14
-rw-r--r--fs/cifs/cifs_dfs_ref.c49
-rw-r--r--fs/cifs/cifs_fs_sb.h1
-rw-r--r--fs/cifs/cifs_spnego.c3
-rw-r--r--fs/cifs/cifsfs.c65
-rw-r--r--fs/cifs/cifsfs.h3
-rw-r--r--fs/cifs/cifsglob.h4
-rw-r--r--fs/cifs/cifspdu.h48
-rw-r--r--fs/cifs/cifsproto.h13
-rw-r--r--fs/cifs/cifssmb.c339
-rw-r--r--fs/cifs/connect.c73
-rw-r--r--fs/cifs/dir.c34
-rw-r--r--fs/cifs/dns_resolve.c9
-rw-r--r--fs/cifs/file.c13
-rw-r--r--fs/cifs/inode.c567
-rw-r--r--fs/cifs/ioctl.c4
-rw-r--r--fs/cifs/link.c43
-rw-r--r--fs/cifs/misc.c3
-rw-r--r--fs/cifs/netmisc.c6
-rw-r--r--fs/cifs/ntlmssp.h4
-rw-r--r--fs/cifs/readdir.c84
-rw-r--r--fs/coda/psdev.c5
-rw-r--r--fs/compat.c4
-rw-r--r--fs/dcache.c102
-rw-r--r--fs/dlm/lock.c3
-rw-r--r--fs/dlm/lowcomms.c27
-rw-r--r--fs/dlm/netlink.c2
-rw-r--r--fs/dlm/plock.c2
-rw-r--r--fs/dlm/user.c2
-rw-r--r--fs/ecryptfs/crypto.c2
-rw-r--r--fs/ecryptfs/ecryptfs_kernel.h2
-rw-r--r--fs/ecryptfs/miscdev.c26
-rw-r--r--fs/ecryptfs/read_write.c22
-rw-r--r--fs/exec.c13
-rw-r--r--fs/ext3/resize.c3
-rw-r--r--fs/ext4/balloc.c83
-rw-r--r--fs/ext4/ext4.h7
-rw-r--r--fs/ext4/ext4_i.h2
-rw-r--r--fs/ext4/ext4_sb.h2
-rw-r--r--fs/ext4/inode.c20
-rw-r--r--fs/ext4/mballoc.c86
-rw-r--r--fs/ext4/resize.c3
-rw-r--r--fs/ext4/super.c36
-rw-r--r--fs/file.c152
-rw-r--r--fs/fuse/inode.c7
-rw-r--r--fs/gfs2/Kconfig18
-rw-r--r--fs/gfs2/Makefile1
-rw-r--r--fs/gfs2/glock.c1624
-rw-r--r--fs/gfs2/glock.h9
-rw-r--r--fs/gfs2/glops.c72
-rw-r--r--fs/gfs2/incore.h36
-rw-r--r--fs/gfs2/inode.c10
-rw-r--r--fs/gfs2/locking.c52
-rw-r--r--fs/gfs2/locking/dlm/lock.c356
-rw-r--r--fs/gfs2/locking/dlm/lock_dlm.h12
-rw-r--r--fs/gfs2/locking/dlm/mount.c4
-rw-r--r--fs/gfs2/locking/dlm/thread.c324
-rw-r--r--fs/gfs2/locking/nolock/Makefile3
-rw-r--r--fs/gfs2/locking/nolock/main.c238
-rw-r--r--fs/gfs2/log.c2
-rw-r--r--fs/gfs2/log.h2
-rw-r--r--fs/gfs2/main.c2
-rw-r--r--fs/gfs2/meta_io.c20
-rw-r--r--fs/gfs2/meta_io.h1
-rw-r--r--fs/gfs2/ops_address.c40
-rw-r--r--fs/gfs2/ops_file.c8
-rw-r--r--fs/gfs2/ops_fstype.c9
-rw-r--r--fs/gfs2/ops_super.c18
-rw-r--r--fs/gfs2/recovery.c5
-rw-r--r--fs/gfs2/rgrp.c2
-rw-r--r--fs/gfs2/super.c3
-rw-r--r--fs/hppfs/Makefile5
-rw-r--r--fs/jbd2/commit.c4
-rw-r--r--fs/jbd2/recovery.c12
-rw-r--r--fs/jfs/jfs_debug.c62
-rw-r--r--fs/jfs/jfs_debug.h10
-rw-r--r--fs/jfs/jfs_dtree.h3
-rw-r--r--fs/jfs/jfs_imap.c2
-rw-r--r--fs/jfs/jfs_logmgr.c35
-rw-r--r--fs/jfs/jfs_metapage.c36
-rw-r--r--fs/jfs/jfs_txnmgr.c68
-rw-r--r--fs/jfs/jfs_xtree.c36
-rw-r--r--fs/jfs/namei.c2
-rw-r--r--fs/jfs/super.c7
-rw-r--r--fs/libfs.c18
-rw-r--r--fs/namei.c12
-rw-r--r--fs/nfs/callback.c2
-rw-r--r--fs/nfs/callback_proc.c4
-rw-r--r--fs/nfs/callback_xdr.c40
-rw-r--r--fs/nfs/client.c8
-rw-r--r--fs/nfs/delegation.c4
-rw-r--r--fs/nfs/dir.c18
-rw-r--r--fs/nfs/file.c2
-rw-r--r--fs/nfs/inode.c14
-rw-r--r--fs/nfs/namespace.c11
-rw-r--r--fs/nfs/nfs3proc.c6
-rw-r--r--fs/nfs/nfs4_fs.h1
-rw-r--r--fs/nfs/nfs4namespace.c12
-rw-r--r--fs/nfs/nfs4proc.c38
-rw-r--r--fs/nfs/nfs4renewd.c10
-rw-r--r--fs/nfs/nfs4state.c8
-rw-r--r--fs/nfs/nfs4xdr.c100
-rw-r--r--fs/nfs/proc.c8
-rw-r--r--fs/nfs/read.c2
-rw-r--r--fs/nfs/super.c12
-rw-r--r--fs/nfs/write.c2
-rw-r--r--fs/nfsd/nfs4callback.c2
-rw-r--r--fs/nfsd/nfs4proc.c1
-rw-r--r--fs/nfsd/nfs4state.c21
-rw-r--r--fs/nfsd/nfs4xdr.c2
-rw-r--r--fs/nfsd/nfsctl.c103
-rw-r--r--fs/nfsd/nfssvc.c124
-rw-r--r--fs/ntfs/upcase.c5
-rw-r--r--fs/ocfs2/alloc.c4
-rw-r--r--fs/ocfs2/cluster/netdebug.c8
-rw-r--r--fs/ocfs2/cluster/nodemanager.c74
-rw-r--r--fs/ocfs2/cluster/nodemanager.h4
-rw-r--r--fs/ocfs2/cluster/tcp.c28
-rw-r--r--fs/ocfs2/cluster/tcp.h12
-rw-r--r--fs/ocfs2/cluster/tcp_internal.h32
-rw-r--r--fs/ocfs2/dlm/dlmdebug.h12
-rw-r--r--fs/ocfs2/dlmglue.c122
-rw-r--r--fs/ocfs2/file.c2
-rw-r--r--fs/ocfs2/inode.c4
-rw-r--r--fs/ocfs2/ocfs2.h12
-rw-r--r--fs/ocfs2/stack_o2cb.c41
-rw-r--r--fs/ocfs2/stack_user.c56
-rw-r--r--fs/ocfs2/stackglue.c119
-rw-r--r--fs/ocfs2/stackglue.h19
-rw-r--r--fs/ocfs2/super.c6
-rw-r--r--fs/partitions/check.c2
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/base.c44
-rw-r--r--fs/proc/inode.c3
-rw-r--r--fs/proc/proc_misc.c24
-rw-r--r--fs/proc/task_mmu.c36
-rw-r--r--fs/proc/task_nommu.c2
-rw-r--r--fs/reiserfs/journal.c2
-rw-r--r--fs/splice.c17
-rw-r--r--fs/sysfs/dir.c6
-rw-r--r--fs/sysfs/file.c13
-rw-r--r--fs/sysfs/mount.c2
-rw-r--r--fs/sysfs/sysfs.h1
-rw-r--r--fs/udf/super.c2
-rw-r--r--fs/xfs/linux-2.6/kmem.c6
-rw-r--r--fs/xfs/linux-2.6/kmem.h4
-rw-r--r--fs/xfs/linux-2.6/sema.h8
-rw-r--r--fs/xfs/linux-2.6/xfs_aops.c5
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.c42
-rw-r--r--fs/xfs/linux-2.6/xfs_buf.h21
-rw-r--r--fs/xfs/linux-2.6/xfs_export.c2
-rw-r--r--fs/xfs/linux-2.6/xfs_file.c17
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.c74
-rw-r--r--fs/xfs/linux-2.6/xfs_iops.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_linux.h1
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c566
-rw-r--r--fs/xfs/linux-2.6/xfs_super.h3
-rw-r--r--fs/xfs/linux-2.6/xfs_vnode.h8
-rw-r--r--fs/xfs/quota/xfs_dquot.c3
-rw-r--r--fs/xfs/quota/xfs_dquot.h2
-rw-r--r--fs/xfs/quota/xfs_dquot_item.c4
-rw-r--r--fs/xfs/quota/xfs_qm.c14
-rw-r--r--fs/xfs/quota/xfs_qm_syscalls.c8
-rw-r--r--fs/xfs/support/ktrace.c4
-rw-r--r--fs/xfs/xfs_attr.c158
-rw-r--r--fs/xfs/xfs_attr.h2
-rw-r--r--fs/xfs/xfs_attr_leaf.c38
-rw-r--r--fs/xfs/xfs_attr_sf.h10
-rw-r--r--fs/xfs/xfs_bmap.c2
-rw-r--r--fs/xfs/xfs_buf_item.c8
-rw-r--r--fs/xfs/xfs_clnt.h1
-rw-r--r--fs/xfs/xfs_da_btree.c46
-rw-r--r--fs/xfs/xfs_da_btree.h36
-rw-r--r--fs/xfs/xfs_dfrag.c4
-rw-r--r--fs/xfs/xfs_dir2.c125
-rw-r--r--fs/xfs/xfs_dir2.h6
-rw-r--r--fs/xfs/xfs_dir2_block.c56
-rw-r--r--fs/xfs/xfs_dir2_data.c5
-rw-r--r--fs/xfs/xfs_dir2_leaf.c80
-rw-r--r--fs/xfs/xfs_dir2_node.c387
-rw-r--r--fs/xfs/xfs_dir2_sf.c83
-rw-r--r--fs/xfs/xfs_dir2_trace.c20
-rw-r--r--fs/xfs/xfs_error.c5
-rw-r--r--fs/xfs/xfs_extfree_item.c6
-rw-r--r--fs/xfs/xfs_fs.h1
-rw-r--r--fs/xfs/xfs_fsops.c4
-rw-r--r--fs/xfs/xfs_inode.c104
-rw-r--r--fs/xfs/xfs_inode.h3
-rw-r--r--fs/xfs/xfs_inode_item.c7
-rw-r--r--fs/xfs/xfs_itable.c6
-rw-r--r--fs/xfs/xfs_log.c49
-rw-r--r--fs/xfs/xfs_log_priv.h6
-rw-r--r--fs/xfs/xfs_log_recover.c21
-rw-r--r--fs/xfs/xfs_mount.c92
-rw-r--r--fs/xfs/xfs_mount.h15
-rw-r--r--fs/xfs/xfs_mru_cache.c8
-rw-r--r--fs/xfs/xfs_rtalloc.c2
-rw-r--r--fs/xfs/xfs_sb.h17
-rw-r--r--fs/xfs/xfs_trans.c4
-rw-r--r--fs/xfs/xfs_trans_inode.c2
-rw-r--r--fs/xfs/xfs_trans_item.c8
-rw-r--r--fs/xfs/xfs_vfsops.c479
-rw-r--r--fs/xfs/xfs_vfsops.h5
-rw-r--r--fs/xfs/xfs_vnodeops.c146
-rw-r--r--fs/xfs/xfs_vnodeops.h5
219 files changed, 4877 insertions, 4549 deletions
diff --git a/fs/Kconfig b/fs/Kconfig
index cf12c403b8c7..c264610ca27c 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -470,6 +470,14 @@ config OCFS2_FS_USERSPACE_CLUSTER
It is safe to say Y, as the clustering method is run-time
selectable.
+config OCFS2_FS_STATS
+ bool "OCFS2 statistics"
+ depends on OCFS2_FS
+ default y
+ help
+ This option allows some fs statistics to be captured. Enabling
+ this option may increase the memory consumption.
+
config OCFS2_DEBUG_MASKLOG
bool "OCFS2 logging support"
depends on OCFS2_FS
diff --git a/fs/Kconfig.binfmt b/fs/Kconfig.binfmt
index 55e8ee1900a5..4a551af6f3fc 100644
--- a/fs/Kconfig.binfmt
+++ b/fs/Kconfig.binfmt
@@ -30,7 +30,7 @@ config COMPAT_BINFMT_ELF
config BINFMT_ELF_FDPIC
bool "Kernel support for FDPIC ELF binaries"
default y
- depends on (FRV || BLACKFIN)
+ depends on (FRV || BLACKFIN || (SUPERH32 && !MMU))
help
ELF FDPIC binaries are based on ELF, but allow the individual load
segments of a binary to be located in memory independently of each
@@ -42,7 +42,7 @@ config BINFMT_ELF_FDPIC
config BINFMT_FLAT
bool "Kernel support for flat binaries"
- depends on !MMU
+ depends on !MMU && (!FRV || BROKEN)
help
Support uClinux FLAT format binaries.
diff --git a/fs/afs/callback.c b/fs/afs/callback.c
index a78d5b236bb1..587ef5123cd8 100644
--- a/fs/afs/callback.c
+++ b/fs/afs/callback.c
@@ -8,7 +8,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Authors: David Woodhouse <dwmw2@infradead.org>
* David Howells <dhowells@redhat.com>
*
*/
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index 08db82e1343a..bb47217f6a18 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -8,7 +8,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
- * Authors: David Woodhouse <dwmw2@cambridge.redhat.com>
+ * Authors: David Woodhouse <dwmw2@infradead.org>
* David Howells <dhowells@redhat.com>
*
*/
diff --git a/fs/afs/super.c b/fs/afs/super.c
index 4b572b801d8d..7e3faeef6818 100644
--- a/fs/afs/super.c
+++ b/fs/afs/super.c
@@ -10,7 +10,7 @@
* Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
*
* Authors: David Howells <dhowells@redhat.com>
- * David Woodhouse <dwmw2@redhat.com>
+ * David Woodhouse <dwmw2@infradead.org>
*
*/
diff --git a/fs/aio.c b/fs/aio.c
index b5253e77eb2f..0fb3117ddd93 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -591,10 +591,6 @@ static void use_mm(struct mm_struct *mm)
atomic_inc(&mm->mm_count);
tsk->mm = mm;
tsk->active_mm = mm;
- /*
- * Note that on UML this *requires* PF_BORROWED_MM to be set, otherwise
- * it won't work. Update it accordingly if you change it here
- */
switch_mm(active_mm, mm, tsk);
task_unlock(tsk);
diff --git a/fs/binfmt_elf.c b/fs/binfmt_elf.c
index b25707fee2cc..0fa95b198e6e 100644
--- a/fs/binfmt_elf.c
+++ b/fs/binfmt_elf.c
@@ -256,7 +256,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
return -EFAULT;
len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
- return 0;
+ return -EINVAL;
p += len;
}
if (__put_user(0, argv))
@@ -268,7 +268,7 @@ create_elf_tables(struct linux_binprm *bprm, struct elfhdr *exec,
return -EFAULT;
len = strnlen_user((void __user *)p, MAX_ARG_STRLEN);
if (!len || len > MAX_ARG_STRLEN)
- return 0;
+ return -EINVAL;
p += len;
}
if (__put_user(0, envp))
@@ -1900,7 +1900,7 @@ static int elf_core_dump(long signr, struct pt_regs *regs, struct file *file, un
/* alloc memory for large data structures: too large to be on stack */
elf = kmalloc(sizeof(*elf), GFP_KERNEL);
if (!elf)
- goto cleanup;
+ goto out;
segs = current->mm->map_count;
#ifdef ELF_CORE_EXTRA_PHDRS
@@ -2034,8 +2034,9 @@ end_coredump:
set_fs(fs);
cleanup:
- kfree(elf);
free_note_info(&info);
+ kfree(elf);
+out:
return has_dumped;
}
diff --git a/fs/binfmt_elf_fdpic.c b/fs/binfmt_elf_fdpic.c
index ddd35d873391..54a526e57f9f 100644
--- a/fs/binfmt_elf_fdpic.c
+++ b/fs/binfmt_elf_fdpic.c
@@ -390,7 +390,7 @@ static int load_elf_fdpic_binary(struct linux_binprm *bprm,
}
/* expand the stack mapping to use up the entire allocation granule */
- fullsize = ksize((char *) current->mm->start_brk);
+ fullsize = kobjsize((char *) current->mm->start_brk);
if (!IS_ERR_VALUE(do_mremap(current->mm->start_brk, stack_size,
fullsize, 0, 0)))
stack_size = fullsize;
@@ -477,6 +477,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
char __user *u_platform, *p;
long hwcap;
int loop;
+ int nr; /* reset for each csp adjustment */
/* we're going to shovel a whole load of stuff onto the stack */
#ifdef CONFIG_MMU
@@ -549,10 +550,7 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
/* force 16 byte _final_ alignment here for generality */
#define DLINFO_ITEMS 13
- nitems = 1 + DLINFO_ITEMS + (k_platform ? 1 : 0);
-#ifdef DLINFO_ARCH_ITEMS
- nitems += DLINFO_ARCH_ITEMS;
-#endif
+ nitems = 1 + DLINFO_ITEMS + (k_platform ? 1 : 0) + AT_VECTOR_SIZE_ARCH;
csp = sp;
sp -= nitems * 2 * sizeof(unsigned long);
@@ -564,39 +562,46 @@ static int create_elf_fdpic_tables(struct linux_binprm *bprm,
sp -= sp & 15UL;
/* put the ELF interpreter info on the stack */
-#define NEW_AUX_ENT(nr, id, val) \
+#define NEW_AUX_ENT(id, val) \
do { \
struct { unsigned long _id, _val; } __user *ent; \
\
ent = (void __user *) csp; \
__put_user((id), &ent[nr]._id); \
__put_user((val), &ent[nr]._val); \
+ nr++; \
} while (0)
+ nr = 0;
csp -= 2 * sizeof(unsigned long);
- NEW_AUX_ENT(0, AT_NULL, 0);
+ NEW_AUX_ENT(AT_NULL, 0);
if (k_platform) {
+ nr = 0;
csp -= 2 * sizeof(unsigned long);
- NEW_AUX_ENT(0, AT_PLATFORM,
+ NEW_AUX_ENT(AT_PLATFORM,
(elf_addr_t) (unsigned long) u_platform);
}
+ nr = 0;
csp -= DLINFO_ITEMS * 2 * sizeof(unsigned long);
- NEW_AUX_ENT( 0, AT_HWCAP, hwcap);
- NEW_AUX_ENT( 1, AT_PAGESZ, PAGE_SIZE);
- NEW_AUX_ENT( 2, AT_CLKTCK, CLOCKS_PER_SEC);
- NEW_AUX_ENT( 3, AT_PHDR, exec_params->ph_addr);
- NEW_AUX_ENT( 4, AT_PHENT, sizeof(struct elf_phdr));
- NEW_AUX_ENT( 5, AT_PHNUM, exec_params->hdr.e_phnum);
- NEW_AUX_ENT( 6, AT_BASE, interp_params->elfhdr_addr);
- NEW_AUX_ENT( 7, AT_FLAGS, 0);
- NEW_AUX_ENT( 8, AT_ENTRY, exec_params->entry_addr);
- NEW_AUX_ENT( 9, AT_UID, (elf_addr_t) current->uid);
- NEW_AUX_ENT(10, AT_EUID, (elf_addr_t) current->euid);
- NEW_AUX_ENT(11, AT_GID, (elf_addr_t) current->gid);
- NEW_AUX_ENT(12, AT_EGID, (elf_addr_t) current->egid);
+ NEW_AUX_ENT(AT_HWCAP, hwcap);
+ NEW_AUX_ENT(AT_PAGESZ, PAGE_SIZE);
+ NEW_AUX_ENT(AT_CLKTCK, CLOCKS_PER_SEC);
+ NEW_AUX_ENT(AT_PHDR, exec_params->ph_addr);
+ NEW_AUX_ENT(AT_PHENT, sizeof(struct elf_phdr));
+ NEW_AUX_ENT(AT_PHNUM, exec_params->hdr.e_phnum);
+ NEW_AUX_ENT(AT_BASE, interp_params->elfhdr_addr);
+ NEW_AUX_ENT(AT_FLAGS, 0);
+ NEW_AUX_ENT(AT_ENTRY, exec_params->entry_addr);
+ NEW_AUX_ENT(AT_UID, (elf_addr_t) current->uid);
+ NEW_AUX_ENT(AT_EUID, (elf_addr_t) current->euid);
+ NEW_AUX_ENT(AT_GID, (elf_addr_t) current->gid);
+ NEW_AUX_ENT(AT_EGID, (elf_addr_t) current->egid);
#ifdef ARCH_DLINFO
+ nr = 0;
+ csp -= AT_VECTOR_SIZE_ARCH * 2 * sizeof(unsigned long);
+
/* ARCH_DLINFO must come last so platform specific code can enforce
* special alignment requirements on the AUXV if necessary (eg. PPC).
*/
diff --git a/fs/binfmt_flat.c b/fs/binfmt_flat.c
index 3b40d45a3a16..2cb1acda3a82 100644
--- a/fs/binfmt_flat.c
+++ b/fs/binfmt_flat.c
@@ -548,7 +548,7 @@ static int load_flat_file(struct linux_binprm * bprm,
PROT_READ|PROT_WRITE|PROT_EXEC, MAP_PRIVATE, 0);
/* Remap to use all availabe slack region space */
if (realdatastart && (realdatastart < (unsigned long)-4096)) {
- reallen = ksize((void *)realdatastart);
+ reallen = kobjsize((void *)realdatastart);
if (reallen > len) {
realdatastart = do_mremap(realdatastart, len,
reallen, MREMAP_FIXED, realdatastart);
@@ -600,7 +600,7 @@ static int load_flat_file(struct linux_binprm * bprm,
PROT_READ | PROT_EXEC | PROT_WRITE, MAP_PRIVATE, 0);
/* Remap to use all availabe slack region space */
if (textpos && (textpos < (unsigned long) -4096)) {
- reallen = ksize((void *)textpos);
+ reallen = kobjsize((void *)textpos);
if (reallen > len) {
textpos = do_mremap(textpos, len, reallen,
MREMAP_FIXED, textpos);
@@ -683,7 +683,7 @@ static int load_flat_file(struct linux_binprm * bprm,
*/
current->mm->start_brk = datapos + data_len + bss_len;
current->mm->brk = (current->mm->start_brk + 3) & ~3;
- current->mm->context.end_brk = memp + ksize((void *) memp) - stack_len;
+ current->mm->context.end_brk = memp + kobjsize((void *) memp) - stack_len;
}
if (flags & FLAT_FLAG_KTRACE)
@@ -790,7 +790,7 @@ static int load_flat_file(struct linux_binprm * bprm,
/* zero the BSS, BRK and stack areas */
memset((void*)(datapos + data_len), 0, bss_len +
- (memp + ksize((void *) memp) - stack_len - /* end brk */
+ (memp + kobjsize((void *) memp) - stack_len - /* end brk */
libinfo->lib_list[id].start_brk) + /* start brk */
stack_len);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 7d822fae7765..470c10ceb0fb 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -12,6 +12,7 @@
#include <linux/kmod.h>
#include <linux/major.h>
#include <linux/smp_lock.h>
+#include <linux/device_cgroup.h>
#include <linux/highmem.h>
#include <linux/blkdev.h>
#include <linux/module.h>
@@ -928,9 +929,14 @@ static int do_open(struct block_device *bdev, struct file *file, int for_part)
{
struct module *owner = NULL;
struct gendisk *disk;
- int ret = -ENXIO;
+ int ret;
int part;
+ ret = devcgroup_inode_permission(bdev->bd_inode, file->f_mode);
+ if (ret != 0)
+ return ret;
+
+ ret = -ENXIO;
file->f_mapping = bdev->bd_inode->i_mapping;
lock_kernel();
disk = get_gendisk(bdev->bd_dev, &part);
diff --git a/fs/cifs/AUTHORS b/fs/cifs/AUTHORS
index 8848e4dfa026..9c136d7803d9 100644
--- a/fs/cifs/AUTHORS
+++ b/fs/cifs/AUTHORS
@@ -36,6 +36,7 @@ Miklos Szeredi
Kazeon team for various fixes especially for 2.4 version.
Asser Ferno (Change Notify support)
Shaggy (Dave Kleikamp) for inumerable small fs suggestions and some good cleanup
+Igor Mammedov (DFS support)
Test case and Bug Report contributors
-------------------------------------
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 8355e918fddf..1f3465201fdf 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -1,5 +1,12 @@
Version 1.53
------------
+DFS support added (Microsoft Distributed File System client support needed
+for referrals which enable a hierarchical name space among servers).
+Disable temporary caching of mode bits to servers which do not support
+storing of mode (e.g. Windows servers, when client mounts without cifsacl
+mount option) and add new "dynperm" mount option to enable temporary caching
+of mode (enable old behavior). Fix hang on mount caused when server crashes
+tcp session during negotiate protocol.
Version 1.52
------------
@@ -12,7 +19,8 @@ Add ability to modify cifs acls for handling chmod (when mounted with
cifsacl flag). Fix prefixpath path separator so we can handle mounts
with prefixpaths longer than one directory (one path component) when
mounted to Windows servers. Fix slow file open when cifsacl
-enabled.
+enabled. Fix memory leak in FindNext when the SMB call returns -EBADF.
+
Version 1.51
------------
diff --git a/fs/cifs/README b/fs/cifs/README
index 621aa1a85971..2bd6fe556f88 100644
--- a/fs/cifs/README
+++ b/fs/cifs/README
@@ -483,6 +483,11 @@ A partial list of the supported mount options follows:
sign Must use packet signing (helps avoid unwanted data modification
by intermediate systems in the route). Note that signing
does not work with lanman or plaintext authentication.
+ seal Must seal (encrypt) all data on this mounted share before
+ sending on the network. Requires support for Unix Extensions.
+ Note that this differs from the sign mount option in that it
+ causes encryption of data sent over this mounted share but other
+ shares mounted to the same server are unaffected.
sec Security mode. Allowed values are:
none attempt to connection as a null user (no name)
krb5 Use Kerberos version 5 authentication
diff --git a/fs/cifs/TODO b/fs/cifs/TODO
index 92c9feac440f..5aff46c61e52 100644
--- a/fs/cifs/TODO
+++ b/fs/cifs/TODO
@@ -1,4 +1,4 @@
-Version 1.52 January 3, 2008
+Version 1.53 May 20, 2008
A Partial List of Missing Features
==================================
@@ -20,20 +20,21 @@ d) Cleanup now unneeded SessSetup code in
fs/cifs/connect.c and add back in NTLMSSP code if any servers
need it
-e) ms-dfs and ms-dfs host name resolution cleanup
-
-f) fix NTLMv2 signing when two mounts with different users to same
+e) fix NTLMv2 signing when two mounts with different users to same
server.
-g) Directory entry caching relies on a 1 second timer, rather than
+f) Directory entry caching relies on a 1 second timer, rather than
using FindNotify or equivalent. - (started)
-h) quota support (needs minor kernel change since quota calls
+g) quota support (needs minor kernel change since quota calls
to make it to network filesystems or deviceless filesystems)
-i) investigate sync behavior (including syncpage) and check
+h) investigate sync behavior (including syncpage) and check
for proper behavior of intr/nointr
+i) improve support for very old servers (OS/2 and Win9x for example)
+Including support for changing the time remotely (utimes command).
+
j) hook lower into the sockets api (as NFS/SunRPC does) to avoid the
extra copy in/out of the socket buffers in some cases.
diff --git a/fs/cifs/asn1.c b/fs/cifs/asn1.c
index cb52cbbe45ff..f58e41d3ba48 100644
--- a/fs/cifs/asn1.c
+++ b/fs/cifs/asn1.c
@@ -186,6 +186,11 @@ asn1_length_decode(struct asn1_ctx *ctx, unsigned int *def, unsigned int *len)
}
}
}
+
+ /* don't trust len bigger than ctx buffer */
+ if (*len > ctx->end - ctx->pointer)
+ return 0;
+
return 1;
}
@@ -203,6 +208,10 @@ asn1_header_decode(struct asn1_ctx *ctx,
if (!asn1_length_decode(ctx, &def, &len))
return 0;
+ /* primitive shall be definite, indefinite shall be constructed */
+ if (*con == ASN1_PRI && !def)
+ return 0;
+
if (def)
*eoc = ctx->pointer + len;
else
@@ -389,6 +398,11 @@ asn1_oid_decode(struct asn1_ctx *ctx,
unsigned long *optr;
size = eoc - ctx->pointer + 1;
+
+ /* first subid actually encodes first two subids */
+ if (size < 2 || size > ULONG_MAX/sizeof(unsigned long))
+ return 0;
+
*oid = kmalloc(size * sizeof(unsigned long), GFP_ATOMIC);
if (*oid == NULL)
return 0;
diff --git a/fs/cifs/cifs_dfs_ref.c b/fs/cifs/cifs_dfs_ref.c
index f6fdecf6598c..d82374c9e329 100644
--- a/fs/cifs/cifs_dfs_ref.c
+++ b/fs/cifs/cifs_dfs_ref.c
@@ -219,53 +219,6 @@ static struct vfsmount *cifs_dfs_do_refmount(const struct vfsmount *mnt_parent,
}
-static char *build_full_dfs_path_from_dentry(struct dentry *dentry)
-{
- char *full_path = NULL;
- char *search_path;
- char *tmp_path;
- size_t l_max_len;
- struct cifs_sb_info *cifs_sb;
-
- if (dentry->d_inode == NULL)
- return NULL;
-
- cifs_sb = CIFS_SB(dentry->d_inode->i_sb);
-
- if (cifs_sb->tcon == NULL)
- return NULL;
-
- search_path = build_path_from_dentry(dentry);
- if (search_path == NULL)
- return NULL;
-
- if (cifs_sb->tcon->Flags & SMB_SHARE_IS_IN_DFS) {
- int i;
- /* we should use full path name for correct working with DFS */
- l_max_len = strnlen(cifs_sb->tcon->treeName, MAX_TREE_SIZE+1) +
- strnlen(search_path, MAX_PATHCONF) + 1;
- tmp_path = kmalloc(l_max_len, GFP_KERNEL);
- if (tmp_path == NULL) {
- kfree(search_path);
- return NULL;
- }
- strncpy(tmp_path, cifs_sb->tcon->treeName, l_max_len);
- tmp_path[l_max_len-1] = 0;
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
- for (i = 0; i < l_max_len; i++) {
- if (tmp_path[i] == '\\')
- tmp_path[i] = '/';
- }
- strncat(tmp_path, search_path, l_max_len - strlen(tmp_path));
-
- full_path = tmp_path;
- kfree(search_path);
- } else {
- full_path = search_path;
- }
- return full_path;
-}
-
static int add_mount_helper(struct vfsmount *newmnt, struct nameidata *nd,
struct list_head *mntlist)
{
@@ -333,7 +286,7 @@ cifs_dfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
goto out_err;
}
- full_path = build_full_dfs_path_from_dentry(dentry);
+ full_path = build_path_from_dentry(dentry);
if (full_path == NULL) {
rc = -ENOMEM;
goto out_err;
diff --git a/fs/cifs/cifs_fs_sb.h b/fs/cifs/cifs_fs_sb.h
index 8ad2330ba061..877c85409f1f 100644
--- a/fs/cifs/cifs_fs_sb.h
+++ b/fs/cifs/cifs_fs_sb.h
@@ -30,6 +30,7 @@
#define CIFS_MOUNT_CIFS_ACL 0x200 /* send ACL requests to non-POSIX srv */
#define CIFS_MOUNT_OVERR_UID 0x400 /* override uid returned from server */
#define CIFS_MOUNT_OVERR_GID 0x800 /* override gid returned from server */
+#define CIFS_MOUNT_DYNPERM 0x1000 /* allow in-memory only mode setting */
struct cifs_sb_info {
struct cifsTconInfo *tcon; /* primary mount */
diff --git a/fs/cifs/cifs_spnego.c b/fs/cifs/cifs_spnego.c
index 6653e29637a7..7013aaff6aed 100644
--- a/fs/cifs/cifs_spnego.c
+++ b/fs/cifs/cifs_spnego.c
@@ -119,6 +119,9 @@ cifs_get_spnego_key(struct cifsSesInfo *sesInfo)
dp = description + strlen(description);
sprintf(dp, ";uid=0x%x", sesInfo->linux_uid);
+ dp = description + strlen(description);
+ sprintf(dp, ";user=%s", sesInfo->userName);
+
cFYI(1, ("key description = %s", description));
spnego_key = request_key(&cifs_spnego_key_type, description, "");
diff --git a/fs/cifs/cifsfs.c b/fs/cifs/cifsfs.c
index 427a7c695896..86b4d5f405ae 100644
--- a/fs/cifs/cifsfs.c
+++ b/fs/cifs/cifsfs.c
@@ -1,7 +1,7 @@
/*
* fs/cifs/cifsfs.c
*
- * Copyright (C) International Business Machines Corp., 2002,2007
+ * Copyright (C) International Business Machines Corp., 2002,2008
* Author(s): Steve French (sfrench@us.ibm.com)
*
* Common Internet FileSystem (CIFS) client
@@ -97,9 +97,6 @@ cifs_read_super(struct super_block *sb, void *data,
{
struct inode *inode;
struct cifs_sb_info *cifs_sb;
-#ifdef CONFIG_CIFS_DFS_UPCALL
- int len;
-#endif
int rc = 0;
/* BB should we make this contingent on mount parm? */
@@ -117,15 +114,17 @@ cifs_read_super(struct super_block *sb, void *data,
* complex operation (mount), and in case of fail
* just exit instead of doing mount and attempting
* undo it if this copy fails?*/
- len = strlen(data);
- cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL);
- if (cifs_sb->mountdata == NULL) {
- kfree(sb->s_fs_info);
- sb->s_fs_info = NULL;
- return -ENOMEM;
+ if (data) {
+ int len = strlen(data);
+ cifs_sb->mountdata = kzalloc(len + 1, GFP_KERNEL);
+ if (cifs_sb->mountdata == NULL) {
+ kfree(sb->s_fs_info);
+ sb->s_fs_info = NULL;
+ return -ENOMEM;
+ }
+ strncpy(cifs_sb->mountdata, data, len + 1);
+ cifs_sb->mountdata[len] = '\0';
}
- strncpy(cifs_sb->mountdata, data, len + 1);
- cifs_sb->mountdata[len] = '\0';
#endif
rc = cifs_mount(sb, cifs_sb, data, devname);
@@ -353,9 +352,41 @@ cifs_show_options(struct seq_file *s, struct vfsmount *m)
if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_OVERR_GID) ||
!(cifs_sb->tcon->unix_ext))
seq_printf(s, ",gid=%d", cifs_sb->mnt_gid);
+ if (!cifs_sb->tcon->unix_ext) {
+ seq_printf(s, ",file_mode=0%o,dir_mode=0%o",
+ cifs_sb->mnt_file_mode,
+ cifs_sb->mnt_dir_mode);
+ }
+ if (cifs_sb->tcon->seal)
+ seq_printf(s, ",seal");
+ if (cifs_sb->tcon->nocase)
+ seq_printf(s, ",nocase");
+ if (cifs_sb->tcon->retry)
+ seq_printf(s, ",hard");
}
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
seq_printf(s, ",posixpaths");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)
+ seq_printf(s, ",setuids");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+ seq_printf(s, ",serverino");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DIRECT_IO)
+ seq_printf(s, ",directio");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_XATTR)
+ seq_printf(s, ",nouser_xattr");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR)
+ seq_printf(s, ",mapchars");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)
+ seq_printf(s, ",sfu");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_NO_BRL)
+ seq_printf(s, ",nobrl");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
+ seq_printf(s, ",cifsacl");
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)
+ seq_printf(s, ",dynperm");
+ if (m->mnt_sb->s_flags & MS_POSIXACL)
+ seq_printf(s, ",acl");
+
seq_printf(s, ",rsize=%d", cifs_sb->rsize);
seq_printf(s, ",wsize=%d", cifs_sb->wsize);
}
@@ -657,7 +688,7 @@ const struct file_operations cifs_file_ops = {
.splice_read = generic_file_splice_read,
.llseek = cifs_llseek,
#ifdef CONFIG_CIFS_POSIX
- .ioctl = cifs_ioctl,
+ .unlocked_ioctl = cifs_ioctl,
#endif /* CONFIG_CIFS_POSIX */
#ifdef CONFIG_CIFS_EXPERIMENTAL
@@ -677,7 +708,7 @@ const struct file_operations cifs_file_direct_ops = {
.flush = cifs_flush,
.splice_read = generic_file_splice_read,
#ifdef CONFIG_CIFS_POSIX
- .ioctl = cifs_ioctl,
+ .unlocked_ioctl = cifs_ioctl,
#endif /* CONFIG_CIFS_POSIX */
.llseek = cifs_llseek,
#ifdef CONFIG_CIFS_EXPERIMENTAL
@@ -697,7 +728,7 @@ const struct file_operations cifs_file_nobrl_ops = {
.splice_read = generic_file_splice_read,
.llseek = cifs_llseek,
#ifdef CONFIG_CIFS_POSIX
- .ioctl = cifs_ioctl,
+ .unlocked_ioctl = cifs_ioctl,
#endif /* CONFIG_CIFS_POSIX */
#ifdef CONFIG_CIFS_EXPERIMENTAL
@@ -716,7 +747,7 @@ const struct file_operations cifs_file_direct_nobrl_ops = {
.flush = cifs_flush,
.splice_read = generic_file_splice_read,
#ifdef CONFIG_CIFS_POSIX
- .ioctl = cifs_ioctl,
+ .unlocked_ioctl = cifs_ioctl,
#endif /* CONFIG_CIFS_POSIX */
.llseek = cifs_llseek,
#ifdef CONFIG_CIFS_EXPERIMENTAL
@@ -731,7 +762,7 @@ const struct file_operations cifs_dir_ops = {
#ifdef CONFIG_CIFS_EXPERIMENTAL
.dir_notify = cifs_dir_notify,
#endif /* CONFIG_CIFS_EXPERIMENTAL */
- .ioctl = cifs_ioctl,
+ .unlocked_ioctl = cifs_ioctl,
};
static void
diff --git a/fs/cifs/cifsfs.h b/fs/cifs/cifsfs.h
index cd1301a09b3b..25a6cbd15529 100644
--- a/fs/cifs/cifsfs.h
+++ b/fs/cifs/cifsfs.h
@@ -95,8 +95,7 @@ extern int cifs_setxattr(struct dentry *, const char *, const void *,
size_t, int);
extern ssize_t cifs_getxattr(struct dentry *, const char *, void *, size_t);
extern ssize_t cifs_listxattr(struct dentry *, char *, size_t);
-extern int cifs_ioctl(struct inode *inode, struct file *filep,
- unsigned int command, unsigned long arg);
+extern long cifs_ioctl(struct file *filep, unsigned int cmd, unsigned long arg);
#ifdef CONFIG_CIFS_EXPERIMENTAL
extern const struct export_operations cifs_export_ops;
diff --git a/fs/cifs/cifsglob.h b/fs/cifs/cifsglob.h
index b7d9f698e63e..9cfcf326ead3 100644
--- a/fs/cifs/cifsglob.h
+++ b/fs/cifs/cifsglob.h
@@ -281,6 +281,7 @@ struct cifsTconInfo {
bool ipc:1; /* set if connection to IPC$ eg for RPC/PIPES */
bool retry:1;
bool nocase:1;
+ bool seal:1; /* transport encryption for this mounted share */
bool unix_ext:1; /* if false disable Linux extensions to CIFS protocol
for this mount even if server would support */
/* BB add field for back pointer to sb struct(s)? */
@@ -332,7 +333,6 @@ struct cifsFileInfo {
bool messageMode:1; /* for pipes: message vs byte mode */
atomic_t wrtPending; /* handle in use - defer close */
struct semaphore fh_sem; /* prevents reopen race after dead ses*/
- char *search_resume_name; /* BB removeme BB */
struct cifs_search_info srch_inf;
};
@@ -625,7 +625,7 @@ GLOBAL_EXTERN atomic_t tcpSesAllocCount;
GLOBAL_EXTERN atomic_t tcpSesReconnectCount;
GLOBAL_EXTERN atomic_t tconInfoReconnectCount;
-/* Various Debug counters to remove someday (BB) */
+/* Various Debug counters */
GLOBAL_EXTERN atomic_t bufAllocCount; /* current number allocated */
#ifdef CONFIG_CIFS_STATS2
GLOBAL_EXTERN atomic_t totBufAllocCount; /* total allocated over all time */
diff --git a/fs/cifs/cifspdu.h b/fs/cifs/cifspdu.h
index c43bf4b7a556..0f327c224da3 100644
--- a/fs/cifs/cifspdu.h
+++ b/fs/cifs/cifspdu.h
@@ -79,6 +79,19 @@
#define TRANS2_GET_DFS_REFERRAL 0x10
#define TRANS2_REPORT_DFS_INCOSISTENCY 0x11
+/* SMB Transact (Named Pipe) subcommand codes */
+#define TRANS_SET_NMPIPE_STATE 0x0001
+#define TRANS_RAW_READ_NMPIPE 0x0011
+#define TRANS_QUERY_NMPIPE_STATE 0x0021
+#define TRANS_QUERY_NMPIPE_INFO 0x0022
+#define TRANS_PEEK_NMPIPE 0x0023
+#define TRANS_TRANSACT_NMPIPE 0x0026
+#define TRANS_RAW_WRITE_NMPIPE 0x0031
+#define TRANS_READ_NMPIPE 0x0036
+#define TRANS_WRITE_NMPIPE 0x0037
+#define TRANS_WAIT_NMPIPE 0x0053
+#define TRANS_CALL_NMPIPE 0x0054
+
/* NT Transact subcommand codes */
#define NT_TRANSACT_CREATE 0x01
#define NT_TRANSACT_IOCTL 0x02
@@ -328,12 +341,13 @@
#define CREATE_COMPLETE_IF_OPLK 0x00000100 /* should be zero */
#define CREATE_NO_EA_KNOWLEDGE 0x00000200
#define CREATE_EIGHT_DOT_THREE 0x00000400 /* doc says this is obsolete
- open for recovery flag - should
- be zero */
+ "open for recovery" flag - should
+ be zero in any case */
+#define CREATE_OPEN_FOR_RECOVERY 0x00000400
#define CREATE_RANDOM_ACCESS 0x00000800
#define CREATE_DELETE_ON_CLOSE 0x00001000
#define CREATE_OPEN_BY_ID 0x00002000
-#define CREATE_OPEN_BACKUP_INTN 0x00004000
+#define CREATE_OPEN_BACKUP_INTENT 0x00004000
#define CREATE_NO_COMPRESSION 0x00008000
#define CREATE_RESERVE_OPFILTER 0x00100000 /* should be zero */
#define OPEN_REPARSE_POINT 0x00200000
@@ -722,7 +736,6 @@ typedef struct smb_com_tconx_rsp_ext {
#define SMB_CSC_CACHE_AUTO_REINT 0x0004
#define SMB_CSC_CACHE_VDO 0x0008
#define SMB_CSC_NO_CACHING 0x000C
-
#define SMB_UNIQUE_FILE_NAME 0x0010
#define SMB_EXTENDED_SIGNATURES 0x0020
@@ -806,7 +819,7 @@ typedef struct smb_com_findclose_req {
#define ICOUNT_MASK 0x00FF
#define PIPE_READ_MODE 0x0100
#define NAMED_PIPE_TYPE 0x0400
-#define PIPE_END_POINT 0x0800
+#define PIPE_END_POINT 0x4000
#define BLOCKING_NAMED_PIPE 0x8000
typedef struct smb_com_open_req { /* also handles create */
@@ -1904,19 +1917,26 @@ typedef struct smb_com_transaction2_get_dfs_refer_req {
char RequestFileName[1];
} __attribute__((packed)) TRANSACTION2_GET_DFS_REFER_REQ;
+#define DFS_VERSION cpu_to_le16(0x0003)
+
+/* DFS server target type */
+#define DFS_TYPE_LINK 0x0000 /* also for sysvol targets */
+#define DFS_TYPE_ROOT 0x0001
+
+/* Referral Entry Flags */
+#define DFS_NAME_LIST_REF 0x0200
+
typedef struct dfs_referral_level_3 {
__le16 VersionNumber;
- __le16 ReferralSize;
- __le16 ServerType; /* 0x0001 = CIFS server */
- __le16 ReferralFlags; /* or proximity - not clear which since it is
- always set to zero - SNIA spec says 0x01
- means strip off PathConsumed chars before
- submitting RequestFileName to remote node */
- __le16 TimeToLive;
- __le16 Proximity;
+ __le16 Size;
+ __le16 ServerType; /* 0x0001 = root targets; 0x0000 = link targets */
+ __le16 ReferralEntryFlags; /* 0x0200 bit set only for domain
+ or DC referral responce */
+ __le32 TimeToLive;
__le16 DfsPathOffset;
__le16 DfsAlternatePathOffset;
- __le16 NetworkAddressOffset;
+ __le16 NetworkAddressOffset; /* offset of the link target */
+ __le16 ServiceSiteGuid;
} __attribute__((packed)) REFERRAL3;
typedef struct smb_com_transaction_get_dfs_refer_rsp {
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index d481f6c5a2be..b9f5e935f821 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -93,7 +93,7 @@ extern struct timespec cnvrtDosUnixTm(__u16 date, __u16 time);
extern int cifs_get_inode_info(struct inode **pinode,
const unsigned char *search_path,
- FILE_ALL_INFO * pfile_info,
+ FILE_ALL_INFO *pfile_info,
struct super_block *sb, int xid, const __u16 *pfid);
extern int cifs_get_inode_info_unix(struct inode **pinode,
const unsigned char *search_path,
@@ -130,7 +130,7 @@ extern int CIFSFindClose(const int, struct cifsTconInfo *tcon,
extern int CIFSSMBQPathInfo(const int xid, struct cifsTconInfo *tcon,
const unsigned char *searchName,
- FILE_ALL_INFO * findData,
+ FILE_ALL_INFO *findData,
int legacy /* whether to use old info level */,
const struct nls_table *nls_codepage, int remap);
extern int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
@@ -141,18 +141,15 @@ extern int SMBQueryInformation(const int xid, struct cifsTconInfo *tcon,
extern int CIFSSMBUnixQPathInfo(const int xid,
struct cifsTconInfo *tcon,
const unsigned char *searchName,
- FILE_UNIX_BASIC_INFO * pFindData,
+ FILE_UNIX_BASIC_INFO *pFindData,
const struct nls_table *nls_codepage, int remap);
extern int CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses,
const unsigned char *searchName,
- unsigned char **targetUNCs,
- unsigned int *number_of_UNC_in_array,
+ struct dfs_info3_param **target_nodes,
+ unsigned int *number_of_nodes_in_array,
const struct nls_table *nls_codepage, int remap);
-extern int connect_to_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
- const char *old_path,
- const struct nls_table *nls_codepage, int remap);
extern int get_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
const char *old_path,
const struct nls_table *nls_codepage,
diff --git a/fs/cifs/cifssmb.c b/fs/cifs/cifssmb.c
index 95fbba4ea7d4..4511b708f0f3 100644
--- a/fs/cifs/cifssmb.c
+++ b/fs/cifs/cifssmb.c
@@ -81,6 +81,40 @@ static struct {
#endif /* CONFIG_CIFS_WEAK_PW_HASH */
#endif /* CIFS_POSIX */
+/* Allocates buffer into dst and copies smb string from src to it.
+ * caller is responsible for freeing dst if function returned 0.
+ * returns:
+ * on success - 0
+ * on failure - errno
+ */
+static int
+cifs_strncpy_to_host(char **dst, const char *src, const int maxlen,
+ const bool is_unicode, const struct nls_table *nls_codepage)
+{
+ int plen;
+
+ if (is_unicode) {
+ plen = UniStrnlen((wchar_t *)src, maxlen);
+ *dst = kmalloc(plen + 2, GFP_KERNEL);
+ if (!*dst)
+ goto cifs_strncpy_to_host_ErrExit;
+ cifs_strfromUCS_le(*dst, (__le16 *)src, plen, nls_codepage);
+ } else {
+ plen = strnlen(src, maxlen);
+ *dst = kmalloc(plen + 2, GFP_KERNEL);
+ if (!*dst)
+ goto cifs_strncpy_to_host_ErrExit;
+ strncpy(*dst, src, plen);
+ }
+ (*dst)[plen] = 0;
+ (*dst)[plen+1] = 0; /* harmless for ASCII case, needed for Unicode */
+ return 0;
+
+cifs_strncpy_to_host_ErrExit:
+ cERROR(1, ("Failed to allocate buffer for string\n"));
+ return -ENOMEM;
+}
+
/* Mark as invalid, all open files on tree connections since they
were closed when session to server was lost */
@@ -1166,6 +1200,20 @@ static __u16 convert_disposition(int disposition)
return ofun;
}
+static int
+access_flags_to_smbopen_mode(const int access_flags)
+{
+ int masked_flags = access_flags & (GENERIC_READ | GENERIC_WRITE);
+
+ if (masked_flags == GENERIC_READ)
+ return SMBOPEN_READ;
+ else if (masked_flags == GENERIC_WRITE)
+ return SMBOPEN_WRITE;
+
+ /* just go for read/write */
+ return SMBOPEN_READWRITE;
+}
+
int
SMBLegacyOpen(const int xid, struct cifsTconInfo *tcon,
const char *fileName, const int openDisposition,
@@ -1207,13 +1255,7 @@ OldOpenRetry:
pSMB->OpenFlags = cpu_to_le16(REQ_BATCHOPLOCK);
pSMB->OpenFlags |= cpu_to_le16(REQ_MORE_INFO);
- /* BB fixme add conversion for access_flags to bits 0 - 2 of mode */
- /* 0 = read
- 1 = write
- 2 = rw
- 3 = execute
- */
- pSMB->Mode = cpu_to_le16(2);
+ pSMB->Mode = cpu_to_le16(access_flags_to_smbopen_mode(access_flags));
pSMB->Mode |= cpu_to_le16(0x40); /* deny none */
/* set file as system file if special file such
as fifo and server expecting SFU style and
@@ -1247,7 +1289,7 @@ OldOpenRetry:
} else {
/* BB verify if wct == 15 */
-/* *pOplock = pSMBr->OplockLevel; */ /* BB take from action field BB */
+/* *pOplock = pSMBr->OplockLevel; */ /* BB take from action field*/
*netfid = pSMBr->Fid; /* cifs fid stays in le */
/* Let caller know file was created so we can set the mode. */
@@ -1686,7 +1728,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
{
int rc = 0;
LOCK_REQ *pSMB = NULL;
- LOCK_RSP *pSMBr = NULL;
+/* LOCK_RSP *pSMBr = NULL; */ /* No response data other than rc to parse */
int bytes_returned;
int timeout = 0;
__u16 count;
@@ -1697,8 +1739,6 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
if (rc)
return rc;
- pSMBr = (LOCK_RSP *)pSMB; /* BB removeme BB */
-
if (lockType == LOCKING_ANDX_OPLOCK_RELEASE) {
timeout = CIFS_ASYNC_OP; /* no response expected */
pSMB->Timeout = 0;
@@ -1732,7 +1772,7 @@ CIFSSMBLock(const int xid, struct cifsTconInfo *tcon,
if (waitFlag) {
rc = SendReceiveBlockingLock(xid, tcon, (struct smb_hdr *) pSMB,
- (struct smb_hdr *) pSMBr, &bytes_returned);
+ (struct smb_hdr *) pSMB, &bytes_returned);
cifs_small_buf_release(pSMB);
} else {
rc = SendReceiveNoRsp(xid, tcon->ses, (struct smb_hdr *)pSMB,
@@ -1767,7 +1807,7 @@ CIFSSMBPosixLock(const int xid, struct cifsTconInfo *tcon,
cFYI(1, ("Posix Lock"));
if (pLockData == NULL)
- return EINVAL;
+ return -EINVAL;
rc = small_smb_init(SMB_COM_TRANSACTION2, 15, tcon, (void **) &pSMB);
@@ -1944,7 +1984,7 @@ renameRetry:
/* protocol requires ASCII signature byte on Unicode string */
pSMB->OldFileName[name_len + 1] = 0x00;
name_len2 =
- cifsConvertToUCS((__le16 *) &pSMB->OldFileName[name_len + 2],
+ cifsConvertToUCS((__le16 *)&pSMB->OldFileName[name_len + 2],
toName, PATH_MAX, nls_codepage, remap);
name_len2 += 1 /* trailing null */ + 1 /* Signature word */ ;
name_len2 *= 2; /* convert to bytes */
@@ -2117,8 +2157,7 @@ copyRetry:
cFYI(1, ("Send error in copy = %d with %d files copied",
rc, le16_to_cpu(pSMBr->CopyCount)));
}
- if (pSMB)
- cifs_buf_release(pSMB);
+ cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto copyRetry;
@@ -2207,8 +2246,7 @@ createSymLinkRetry:
if (rc)
cFYI(1, ("Send error in SetPathInfo create symlink = %d", rc));
- if (pSMB)
- cifs_buf_release(pSMB);
+ cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto createSymLinkRetry;
@@ -2925,7 +2963,8 @@ setAclRetry:
}
params = 6 + name_len;
pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB size from sess */
+ /* BB find max SMB size from sess */
+ pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
@@ -3322,7 +3361,8 @@ QPathInfoRetry:
params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(4000); /* BB find exact max SMB PDU from sess structure BB */
+ /* BB find exact max SMB PDU from sess structure BB */
+ pSMB->MaxDataCount = cpu_to_le16(4000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
@@ -3388,7 +3428,7 @@ QPathInfoRetry:
int
CIFSSMBUnixQPathInfo(const int xid, struct cifsTconInfo *tcon,
const unsigned char *searchName,
- FILE_UNIX_BASIC_INFO * pFindData,
+ FILE_UNIX_BASIC_INFO *pFindData,
const struct nls_table *nls_codepage, int remap)
{
/* SMB_QUERY_FILE_UNIX_BASIC */
@@ -3679,6 +3719,7 @@ int CIFSFindNext(const int xid, struct cifsTconInfo *tcon,
if (rc) {
if (rc == -EBADF) {
psrch_inf->endOfSearch = true;
+ cifs_buf_release(pSMB);
rc = 0; /* search probably was closed at end of search*/
} else
cFYI(1, ("FindNext returned = %d", rc));
@@ -3856,25 +3897,112 @@ GetInodeNumOut:
return rc;
}
+/* parses DFS refferal V3 structure
+ * caller is responsible for freeing target_nodes
+ * returns:
+ * on success - 0
+ * on failure - errno
+ */
+static int
+parse_DFS_referrals(TRANSACTION2_GET_DFS_REFER_RSP *pSMBr,
+ unsigned int *num_of_nodes,
+ struct dfs_info3_param **target_nodes,
+ const struct nls_table *nls_codepage)
+{
+ int i, rc = 0;
+ char *data_end;
+ bool is_unicode;
+ struct dfs_referral_level_3 *ref;
+
+ is_unicode = pSMBr->hdr.Flags2 & SMBFLG2_UNICODE;
+ *num_of_nodes = le16_to_cpu(pSMBr->NumberOfReferrals);
+
+ if (*num_of_nodes < 1) {
+ cERROR(1, ("num_referrals: must be at least > 0,"
+ "but we get num_referrals = %d\n", *num_of_nodes));
+ rc = -EINVAL;
+ goto parse_DFS_referrals_exit;
+ }
+
+ ref = (struct dfs_referral_level_3 *) &(pSMBr->referrals);
+ if (ref->VersionNumber != cpu_to_le16(3)) {
+ cERROR(1, ("Referrals of V%d version are not supported,"
+ "should be V3", le16_to_cpu(ref->VersionNumber)));
+ rc = -EINVAL;
+ goto parse_DFS_referrals_exit;
+ }
+
+ /* get the upper boundary of the resp buffer */
+ data_end = (char *)(&(pSMBr->PathConsumed)) +
+ le16_to_cpu(pSMBr->t2.DataCount);
+
+ cFYI(1, ("num_referrals: %d dfs flags: 0x%x ... \n",
+ *num_of_nodes,
+ le16_to_cpu(pSMBr->DFSFlags)));
+
+ *target_nodes = kzalloc(sizeof(struct dfs_info3_param) *
+ *num_of_nodes, GFP_KERNEL);
+ if (*target_nodes == NULL) {
+ cERROR(1, ("Failed to allocate buffer for target_nodes\n"));
+ rc = -ENOMEM;
+ goto parse_DFS_referrals_exit;
+ }
+
+ /* collect neccessary data from referrals */
+ for (i = 0; i < *num_of_nodes; i++) {
+ char *temp;
+ int max_len;
+ struct dfs_info3_param *node = (*target_nodes)+i;
+
+ node->flags = le16_to_cpu(pSMBr->DFSFlags);
+ node->path_consumed = le16_to_cpu(pSMBr->PathConsumed);
+ node->server_type = le16_to_cpu(ref->ServerType);
+ node->ref_flag = le16_to_cpu(ref->ReferralEntryFlags);
+
+ /* copy DfsPath */
+ temp = (char *)ref + le16_to_cpu(ref->DfsPathOffset);
+ max_len = data_end - temp;
+ rc = cifs_strncpy_to_host(&(node->path_name), temp,
+ max_len, is_unicode, nls_codepage);
+ if (rc)
+ goto parse_DFS_referrals_exit;
+
+ /* copy link target UNC */
+ temp = (char *)ref + le16_to_cpu(ref->NetworkAddressOffset);
+ max_len = data_end - temp;
+ rc = cifs_strncpy_to_host(&(node->node_name), temp,
+ max_len, is_unicode, nls_codepage);
+ if (rc)
+ goto parse_DFS_referrals_exit;
+
+ ref += le16_to_cpu(ref->Size);
+ }
+
+parse_DFS_referrals_exit:
+ if (rc) {
+ free_dfs_info_array(*target_nodes, *num_of_nodes);
+ *target_nodes = NULL;
+ *num_of_nodes = 0;
+ }
+ return rc;
+}
+
int
CIFSGetDFSRefer(const int xid, struct cifsSesInfo *ses,
const unsigned char *searchName,
- unsigned char **targetUNCs,
- unsigned int *number_of_UNC_in_array,
+ struct dfs_info3_param **target_nodes,
+ unsigned int *num_of_nodes,
const struct nls_table *nls_codepage, int remap)
{
/* TRANS2_GET_DFS_REFERRAL */
TRANSACTION2_GET_DFS_REFER_REQ *pSMB = NULL;
TRANSACTION2_GET_DFS_REFER_RSP *pSMBr = NULL;
- struct dfs_referral_level_3 *referrals = NULL;
int rc = 0;
int bytes_returned;
int name_len;
- unsigned int i;
- char *temp;
__u16 params, byte_count;
- *number_of_UNC_in_array = 0;
- *targetUNCs = NULL;
+ *num_of_nodes = 0;
+ *target_nodes = NULL;
cFYI(1, ("In GetDFSRefer the path %s", searchName));
if (ses == NULL)
@@ -3921,7 +4049,8 @@ getDFSRetry:
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->MaxParameterCount = 0;
- pSMB->MaxDataCount = cpu_to_le16(4000); /* BB find exact max SMB PDU from sess structure BB */
+ /* BB find exact max SMB PDU from sess structure BB */
+ pSMB->MaxDataCount = cpu_to_le16(4000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
@@ -3943,103 +4072,26 @@ getDFSRetry:
(struct smb_hdr *) pSMBr, &bytes_returned, 0);
if (rc) {
cFYI(1, ("Send error in GetDFSRefer = %d", rc));
- } else { /* decode response */
-/* BB Add logic to parse referrals here */
- rc = validate_t2((struct smb_t2_rsp *)pSMBr);
+ goto GetDFSRefExit;
+ }
+ rc = validate_t2((struct smb_t2_rsp *)pSMBr);
- /* BB Also check if enough total bytes returned? */
- if (rc || (pSMBr->ByteCount < 17))
- rc = -EIO; /* bad smb */
- else {
- __u16 data_offset = le16_to_cpu(pSMBr->t2.DataOffset);
- __u16 data_count = le16_to_cpu(pSMBr->t2.DataCount);
+ /* BB Also check if enough total bytes returned? */
+ if (rc || (pSMBr->ByteCount < 17)) {
+ rc = -EIO; /* bad smb */
+ goto GetDFSRefExit;
+ }
- cFYI(1,
- ("Decoding GetDFSRefer response BCC: %d Offset %d",
- pSMBr->ByteCount, data_offset));
- referrals =
- (struct dfs_referral_level_3 *)
- (8 /* sizeof start of data block */ +
- data_offset +
- (char *) &pSMBr->hdr.Protocol);
- cFYI(1, ("num_referrals: %d dfs flags: 0x%x ... \n"
- "for referral one refer size: 0x%x srv "
- "type: 0x%x refer flags: 0x%x ttl: 0x%x",
- le16_to_cpu(pSMBr->NumberOfReferrals),
- le16_to_cpu(pSMBr->DFSFlags),
- le16_to_cpu(referrals->ReferralSize),
- le16_to_cpu(referrals->ServerType),
- le16_to_cpu(referrals->ReferralFlags),
- le16_to_cpu(referrals->TimeToLive)));
- /* BB This field is actually two bytes in from start of
- data block so we could do safety check that DataBlock
- begins at address of pSMBr->NumberOfReferrals */
- *number_of_UNC_in_array =
- le16_to_cpu(pSMBr->NumberOfReferrals);
-
- /* BB Fix below so can return more than one referral */
- if (*number_of_UNC_in_array > 1)
- *number_of_UNC_in_array = 1;
-
- /* get the length of the strings describing refs */
- name_len = 0;
- for (i = 0; i < *number_of_UNC_in_array; i++) {
- /* make sure that DfsPathOffset not past end */
- __u16 offset =
- le16_to_cpu(referrals->DfsPathOffset);
- if (offset > data_count) {
- /* if invalid referral, stop here and do
- not try to copy any more */
- *number_of_UNC_in_array = i;
- break;
- }
- temp = ((char *)referrals) + offset;
+ cFYI(1, ("Decoding GetDFSRefer response BCC: %d Offset %d",
+ pSMBr->ByteCount,
+ le16_to_cpu(pSMBr->t2.DataOffset)));
- if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) {
- name_len += UniStrnlen((wchar_t *)temp,
- data_count);
- } else {
- name_len += strnlen(temp, data_count);
- }
- referrals++;
- /* BB add check that referral pointer does
- not fall off end PDU */
- }
- /* BB add check for name_len bigger than bcc */
- *targetUNCs =
- kmalloc(name_len+1+(*number_of_UNC_in_array),
- GFP_KERNEL);
- if (*targetUNCs == NULL) {
- rc = -ENOMEM;
- goto GetDFSRefExit;
- }
- /* copy the ref strings */
- referrals = (struct dfs_referral_level_3 *)
- (8 /* sizeof data hdr */ + data_offset +
- (char *) &pSMBr->hdr.Protocol);
-
- for (i = 0; i < *number_of_UNC_in_array; i++) {
- temp = ((char *)referrals) +
- le16_to_cpu(referrals->DfsPathOffset);
- if (pSMBr->hdr.Flags2 & SMBFLG2_UNICODE) {
- cifs_strfromUCS_le(*targetUNCs,
- (__le16 *) temp,
- name_len,
- nls_codepage);
- } else {
- strncpy(*targetUNCs, temp, name_len);
- }
- /* BB update target_uncs pointers */
- referrals++;
- }
- temp = *targetUNCs;
- temp[name_len] = 0;
- }
+ /* parse returned result into more usable form */
+ rc = parse_DFS_referrals(pSMBr, num_of_nodes,
+ target_nodes, nls_codepage);
- }
GetDFSRefExit:
- if (pSMB)
- cifs_buf_release(pSMB);
+ cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto getDFSRetry;
@@ -4229,7 +4281,8 @@ QFSAttributeRetry:
params = 2; /* level */
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find exact max SMB PDU from sess structure BB */
+ /* BB find exact max SMB PDU from sess structure BB */
+ pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
@@ -4298,7 +4351,8 @@ QFSDeviceRetry:
params = 2; /* level */
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find exact max SMB PDU from sess structure BB */
+ /* BB find exact max SMB PDU from sess structure BB */
+ pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
@@ -4369,7 +4423,8 @@ QFSUnixRetry:
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(100); /* BB find exact max SMB PDU from sess structure BB */
+ /* BB find exact max SMB PDU from sess structure BB */
+ pSMB->MaxDataCount = cpu_to_le16(100);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
@@ -4444,7 +4499,8 @@ SETFSUnixRetry:
offset = param_offset + params;
pSMB->MaxParameterCount = cpu_to_le16(4);
- pSMB->MaxDataCount = cpu_to_le16(100); /* BB find exact max SMB PDU from sess structure BB */
+ /* BB find exact max SMB PDU from sess structure BB */
+ pSMB->MaxDataCount = cpu_to_le16(100);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FS_INFORMATION);
@@ -4512,7 +4568,8 @@ QFSPosixRetry:
pSMB->DataCount = 0;
pSMB->DataOffset = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(100); /* BB find exact max SMB PDU from sess structure BB */
+ /* BB find exact max SMB PDU from sess structure BB */
+ pSMB->MaxDataCount = cpu_to_le16(100);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
@@ -4702,7 +4759,8 @@ CIFSSMBSetFileSize(const int xid, struct cifsTconInfo *tcon, __u64 size,
count = sizeof(struct file_end_of_file_info);
pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB PDU from sess */
+ /* BB find exact max SMB PDU from sess structure BB */
+ pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
@@ -4789,7 +4847,8 @@ CIFSSMBSetFileTimes(const int xid, struct cifsTconInfo *tcon,
count = sizeof(FILE_BASIC_INFO);
pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB PDU from sess */
+ /* BB find max SMB PDU from sess */
+ pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->SetupCount = 1;
pSMB->Reserved3 = 0;
pSMB->SubCommand = cpu_to_le16(TRANS2_SET_FILE_INFORMATION);
@@ -4856,7 +4915,8 @@ SetTimesRetry:
params = 6 + name_len;
count = sizeof(FILE_BASIC_INFO);
pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find exact max SMB PDU from sess structure BB */
+ /* BB find max SMB PDU from sess structure BB */
+ pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
@@ -4986,7 +5046,8 @@ setPermsRetry:
params = 6 + name_len;
count = sizeof(FILE_UNIX_BASIC_INFO);
pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find exact max SMB PDU from sess structure BB */
+ /* BB find max SMB PDU from sess structure BB */
+ pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
@@ -5051,8 +5112,7 @@ setPermsRetry:
if (rc)
cFYI(1, ("SetPathInfo (perms) returned %d", rc));
- if (pSMB)
- cifs_buf_release(pSMB);
+ cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto setPermsRetry;
return rc;
@@ -5169,7 +5229,8 @@ QAllEAsRetry:
params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(4000); /* BB find exact max SMB PDU from sess structure BB */
+ /* BB find exact max SMB PDU from sess structure BB */
+ pSMB->MaxDataCount = cpu_to_le16(4000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
@@ -5273,8 +5334,7 @@ QAllEAsRetry:
}
}
}
- if (pSMB)
- cifs_buf_release(pSMB);
+ cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto QAllEAsRetry;
@@ -5317,7 +5377,8 @@ QEARetry:
params = 2 /* level */ + 4 /* reserved */ + name_len /* includes NUL */;
pSMB->TotalDataCount = 0;
pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(4000); /* BB find exact max SMB PDU from sess structure BB */
+ /* BB find exact max SMB PDU from sess structure BB */
+ pSMB->MaxDataCount = cpu_to_le16(4000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
@@ -5422,8 +5483,7 @@ QEARetry:
}
}
}
- if (pSMB)
- cifs_buf_release(pSMB);
+ cifs_buf_release(pSMB);
if (rc == -EAGAIN)
goto QEARetry;
@@ -5475,7 +5535,8 @@ SetEARetry:
count = sizeof(*parm_data) + ea_value_len + name_len;
pSMB->MaxParameterCount = cpu_to_le16(2);
- pSMB->MaxDataCount = cpu_to_le16(1000); /* BB find max SMB size from sess */
+ /* BB find max SMB PDU from sess */
+ pSMB->MaxDataCount = cpu_to_le16(1000);
pSMB->MaxSetupCount = 0;
pSMB->Reserved = 0;
pSMB->Flags = 0;
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index f428bf3bf1a9..e8fa46c7cff2 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -60,7 +60,7 @@ struct smb_vol {
char *domainname;
char *UNC;
char *UNCip;
- char *in6_addr; /* ipv6 address as human readable form of in6_addr */
+ char *in6_addr; /* ipv6 address as human readable form of in6_addr */
char *iocharset; /* local code page for mapping to and from Unicode */
char source_rfc1001_name[16]; /* netbios name of client */
char target_rfc1001_name[16]; /* netbios name of server for Win9x/ME */
@@ -75,19 +75,21 @@ struct smb_vol {
bool setuids:1;
bool override_uid:1;
bool override_gid:1;
+ bool dynperm:1;
bool noperm:1;
bool no_psx_acl:1; /* set if posix acl support should be disabled */
bool cifs_acl:1;
bool no_xattr:1; /* set if xattr (EA) support should be disabled*/
bool server_ino:1; /* use inode numbers from server ie UniqueId */
bool direct_io:1;
- bool remap:1; /* set to remap seven reserved chars in filenames */
- bool posix_paths:1; /* unset to not ask for posix pathnames. */
+ bool remap:1; /* set to remap seven reserved chars in filenames */
+ bool posix_paths:1; /* unset to not ask for posix pathnames. */
bool no_linux_ext:1;
bool sfu_emul:1;
- bool nullauth:1; /* attempt to authenticate with null user */
- unsigned nocase; /* request case insensitive filenames */
- unsigned nobrl; /* disable sending byte range locks to srv */
+ bool nullauth:1; /* attempt to authenticate with null user */
+ bool nocase:1; /* request case insensitive filenames */
+ bool nobrl:1; /* disable sending byte range locks to srv */
+ bool seal:1; /* request transport encryption on share */
unsigned int rsize;
unsigned int wsize;
unsigned int sockopt;
@@ -651,6 +653,7 @@ multi_t2_fnd:
spin_lock(&GlobalMid_Lock);
server->tcpStatus = CifsExiting;
spin_unlock(&GlobalMid_Lock);
+ wake_up_all(&server->response_q);
/* don't exit until kthread_stop is called */
set_current_state(TASK_UNINTERRUPTIBLE);
@@ -1246,6 +1249,10 @@ cifs_parse_mount_options(char *options, const char *devname,
vol->setuids = 1;
} else if (strnicmp(data, "nosetuids", 9) == 0) {
vol->setuids = 0;
+ } else if (strnicmp(data, "dynperm", 7) == 0) {
+ vol->dynperm = true;
+ } else if (strnicmp(data, "nodynperm", 9) == 0) {
+ vol->dynperm = false;
} else if (strnicmp(data, "nohard", 6) == 0) {
vol->retry = 0;
} else if (strnicmp(data, "nosoft", 6) == 0) {
@@ -1268,8 +1275,12 @@ cifs_parse_mount_options(char *options, const char *devname,
vol->no_psx_acl = 1;
} else if (strnicmp(data, "sign", 4) == 0) {
vol->secFlg |= CIFSSEC_MUST_SIGN;
-/* } else if (strnicmp(data, "seal",4) == 0) {
- vol->secFlg |= CIFSSEC_MUST_SEAL; */
+ } else if (strnicmp(data, "seal", 4) == 0) {
+ /* we do not do the following in secFlags because seal
+ is a per tree connection (mount) not a per socket
+ or per-smb connection option in the protocol */
+ /* vol->secFlg |= CIFSSEC_MUST_SEAL; */
+ vol->seal = 1;
} else if (strnicmp(data, "direct", 6) == 0) {
vol->direct_io = 1;
} else if (strnicmp(data, "forcedirectio", 13) == 0) {
@@ -1414,34 +1425,12 @@ find_unc(__be32 new_target_ip_addr, char *uncName, char *userName)
}
int
-connect_to_dfs_path(int xid, struct cifsSesInfo *pSesInfo,
- const char *old_path, const struct nls_table *nls_codepage,
- int remap)
-{
- struct dfs_info3_param *referrals = NULL;
- unsigned int num_referrals;
- int rc = 0;
-
- rc = get_dfs_path(xid, pSesInfo, old_path, nls_codepage,
- &num_referrals, &referrals, remap);
-
- /* BB Add in code to: if valid refrl, if not ip address contact
- the helper that resolves tcp names, mount to it, try to
- tcon to it unmount it if fail */
-
- kfree(referrals);
-
- return rc;
-}
-
-int
get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path,
const struct nls_table *nls_codepage, unsigned int *pnum_referrals,
struct dfs_info3_param **preferrals, int remap)
{
char *temp_unc;
int rc = 0;
- unsigned char *targetUNCs;
*pnum_referrals = 0;
*preferrals = NULL;
@@ -1464,7 +1453,7 @@ get_dfs_path(int xid, struct cifsSesInfo *pSesInfo, const char *old_path,
kfree(temp_unc);
}
if (rc == 0)
- rc = CIFSGetDFSRefer(xid, pSesInfo, old_path, &targetUNCs,
+ rc = CIFSGetDFSRefer(xid, pSesInfo, old_path, preferrals,
pnum_referrals, nls_codepage, remap);
/* BB map targetUNCs to dfs_info3 structures, here or
in CIFSGetDFSRefer BB */
@@ -1815,7 +1804,7 @@ convert_delimiter(char *path, char delim)
if (path == NULL)
return;
- if (delim == '/')
+ if (delim == '/')
old_delim = '\\';
else
old_delim = '/';
@@ -2125,11 +2114,17 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_UID;
if (volume_info.override_gid)
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_OVERR_GID;
+ if (volume_info.dynperm)
+ cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DYNPERM;
if (volume_info.direct_io) {
cFYI(1, ("mounting share using direct i/o"));
cifs_sb->mnt_cifs_flags |= CIFS_MOUNT_DIRECT_IO;
}
+ if ((volume_info.cifs_acl) && (volume_info.dynperm))
+ cERROR(1, ("mount option dynperm ignored if cifsacl "
+ "mount option supported"));
+
tcon =
find_unc(sin_server.sin_addr.s_addr, volume_info.UNC,
volume_info.username);
@@ -2141,6 +2136,9 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
for the retry flag is used */
tcon->retry = volume_info.retry;
tcon->nocase = volume_info.nocase;
+ if (tcon->seal != volume_info.seal)
+ cERROR(1, ("transport encryption setting "
+ "conflicts with existing tid"));
} else {
tcon = tconInfoAlloc();
if (tcon == NULL)
@@ -2154,10 +2152,11 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
if ((strchr(volume_info.UNC + 3, '\\') == NULL)
&& (strchr(volume_info.UNC + 3, '/') ==
NULL)) {
- rc = connect_to_dfs_path(xid, pSesInfo,
+/* rc = connect_to_dfs_path(xid, pSesInfo,
"", cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
+ CIFS_MOUNT_MAP_SPECIAL_CHR);*/
+ cFYI(1, ("DFS root not supported"));
rc = -ENODEV;
goto out;
} else {
@@ -2173,6 +2172,7 @@ cifs_mount(struct super_block *sb, struct cifs_sb_info *cifs_sb,
atomic_inc(&pSesInfo->inUse);
tcon->retry = volume_info.retry;
tcon->nocase = volume_info.nocase;
+ tcon->seal = volume_info.seal;
}
}
}
@@ -2314,9 +2314,10 @@ CIFSSessSetup(unsigned int xid, struct cifsSesInfo *ses,
user = ses->userName;
domain = ses->domainName;
smb_buffer = cifs_buf_get();
- if (smb_buffer == NULL) {
+
+ if (smb_buffer == NULL)
return -ENOMEM;
- }
+
smb_buffer_response = smb_buffer;
pSMBr = pSMB = (SESSION_SETUP_ANDX *) smb_buffer;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index e4e0078a0526..fb69c1fa85c9 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -49,18 +49,25 @@ build_path_from_dentry(struct dentry *direntry)
struct dentry *temp;
int namelen;
int pplen;
+ int dfsplen;
char *full_path;
char dirsep;
+ struct cifs_sb_info *cifs_sb;
if (direntry == NULL)
return NULL; /* not much we can do if dentry is freed and
we need to reopen the file after it was closed implicitly
when the server crashed */
- dirsep = CIFS_DIR_SEP(CIFS_SB(direntry->d_sb));
- pplen = CIFS_SB(direntry->d_sb)->prepathlen;
+ cifs_sb = CIFS_SB(direntry->d_sb);
+ dirsep = CIFS_DIR_SEP(cifs_sb);
+ pplen = cifs_sb->prepathlen;
+ if (cifs_sb->tcon && (cifs_sb->tcon->Flags & SMB_SHARE_IS_IN_DFS))
+ dfsplen = strnlen(cifs_sb->tcon->treeName, MAX_TREE_SIZE + 1);
+ else
+ dfsplen = 0;
cifs_bp_rename_retry:
- namelen = pplen;
+ namelen = pplen + dfsplen;
for (temp = direntry; !IS_ROOT(temp);) {
namelen += (1 + temp->d_name.len);
temp = temp->d_parent;
@@ -91,7 +98,7 @@ cifs_bp_rename_retry:
return NULL;
}
}
- if (namelen != pplen) {
+ if (namelen != pplen + dfsplen) {
cERROR(1,
("did not end path lookup where expected namelen is %d",
namelen));
@@ -107,7 +114,18 @@ cifs_bp_rename_retry:
since the '\' is a valid posix character so we can not switch
those safely to '/' if any are found in the middle of the prepath */
/* BB test paths to Windows with '/' in the midst of prepath */
- strncpy(full_path, CIFS_SB(direntry->d_sb)->prepath, pplen);
+
+ if (dfsplen) {
+ strncpy(full_path, cifs_sb->tcon->treeName, dfsplen);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS) {
+ int i;
+ for (i = 0; i < dfsplen; i++) {
+ if (full_path[i] == '\\')
+ full_path[i] = '/';
+ }
+ }
+ }
+ strncpy(full_path + dfsplen, CIFS_SB(direntry->d_sb)->prepath, pplen);
return full_path;
}
@@ -242,7 +260,9 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
buf, inode->i_sb, xid,
&fileHandle);
if (newinode) {
- newinode->i_mode = mode;
+ if (cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_DYNPERM)
+ newinode->i_mode = mode;
if ((oplock & CIFS_CREATE_ACTION) &&
(cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_SET_UID)) {
@@ -590,7 +610,7 @@ static int cifs_ci_compare(struct dentry *dentry, struct qstr *a,
* case take precedence. If a is not a negative dentry, this
* should have no side effects
*/
- memcpy(a->name, b->name, a->len);
+ memcpy((void *)a->name, b->name, a->len);
return 0;
}
return 1;
diff --git a/fs/cifs/dns_resolve.c b/fs/cifs/dns_resolve.c
index 939e256f8497..f730ef35499e 100644
--- a/fs/cifs/dns_resolve.c
+++ b/fs/cifs/dns_resolve.c
@@ -134,10 +134,6 @@ dns_resolve_server_name_to_ip(const char *unc, char **ip_addr)
rkey = request_key(&key_type_dns_resolver, name, "");
if (!IS_ERR(rkey)) {
data = rkey->payload.data;
- cFYI(1, ("%s: resolved: %s to %s", __func__,
- rkey->description,
- *ip_addr
- ));
} else {
cERROR(1, ("%s: unable to resolve: %s", __func__, name));
goto out;
@@ -150,6 +146,11 @@ skip_upcall:
if (*ip_addr) {
memcpy(*ip_addr, data, len);
(*ip_addr)[len] = '\0';
+ if (!IS_ERR(rkey))
+ cFYI(1, ("%s: resolved: %s to %s", __func__,
+ name,
+ *ip_addr
+ ));
rc = 0;
} else {
rc = -ENOMEM;
diff --git a/fs/cifs/file.c b/fs/cifs/file.c
index 31a0a33b9d95..0aac824371a5 100644
--- a/fs/cifs/file.c
+++ b/fs/cifs/file.c
@@ -75,7 +75,11 @@ static inline int cifs_convert_flags(unsigned int flags)
return (GENERIC_READ | GENERIC_WRITE);
}
- return 0x20197;
+ return (READ_CONTROL | FILE_WRITE_ATTRIBUTES | FILE_READ_ATTRIBUTES |
+ FILE_WRITE_EA | FILE_APPEND_DATA | FILE_WRITE_DATA |
+ FILE_READ_DATA);
+
+
}
static inline int cifs_get_disposition(unsigned int flags)
@@ -542,7 +546,6 @@ int cifs_close(struct inode *inode, struct file *file)
msleep(timeout);
timeout *= 8;
}
- kfree(pSMBFile->search_resume_name);
kfree(file->private_data);
file->private_data = NULL;
} else
@@ -601,12 +604,6 @@ int cifs_closedir(struct inode *inode, struct file *file)
else
cifs_buf_release(ptmp);
}
- ptmp = pCFileStruct->search_resume_name;
- if (ptmp) {
- cFYI(1, ("closedir free resume name"));
- pCFileStruct->search_resume_name = NULL;
- kfree(ptmp);
- }
kfree(file->private_data);
file->private_data = NULL;
}
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index fcbdbb6ad7bf..722be543ceec 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -161,118 +161,115 @@ static void cifs_unix_info_to_inode(struct inode *inode,
spin_unlock(&inode->i_lock);
}
-static const unsigned char *cifs_get_search_path(struct cifs_sb_info *cifs_sb,
- const char *search_path)
-{
- int tree_len;
- int path_len;
- int i;
- char *tmp_path;
- struct cifsTconInfo *pTcon = cifs_sb->tcon;
-
- if (!(pTcon->Flags & SMB_SHARE_IS_IN_DFS))
- return search_path;
-
- /* use full path name for working with DFS */
- tree_len = strnlen(pTcon->treeName, MAX_TREE_SIZE + 1);
- path_len = strnlen(search_path, MAX_PATHCONF);
- tmp_path = kmalloc(tree_len+path_len+1, GFP_KERNEL);
- if (tmp_path == NULL)
- return search_path;
+/*
+ * Needed to setup inode data for the directory which is the
+ * junction to the new submount (ie to setup the fake directory
+ * which represents a DFS referral)
+ */
+static void fill_fake_finddataunix(FILE_UNIX_BASIC_INFO *pfnd_dat,
+ struct super_block *sb)
+{
+ struct inode *pinode = NULL;
+
+ memset(pfnd_dat, 0, sizeof(FILE_UNIX_BASIC_INFO));
+
+/* __le64 pfnd_dat->EndOfFile = cpu_to_le64(0);
+ __le64 pfnd_dat->NumOfBytes = cpu_to_le64(0);
+ __u64 UniqueId = 0; */
+ pfnd_dat->LastStatusChange =
+ cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+ pfnd_dat->LastAccessTime =
+ cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+ pfnd_dat->LastModificationTime =
+ cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+ pfnd_dat->Type = cpu_to_le32(UNIX_DIR);
+ pfnd_dat->Permissions = cpu_to_le64(S_IXUGO | S_IRWXU);
+ pfnd_dat->Nlinks = cpu_to_le64(2);
+ if (sb->s_root)
+ pinode = sb->s_root->d_inode;
+ if (pinode == NULL)
+ return;
- strncpy(tmp_path, pTcon->treeName, tree_len);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_POSIX_PATHS)
- for (i = 0; i < tree_len; i++) {
- if (tmp_path[i] == '\\')
- tmp_path[i] = '/';
- }
- strncpy(tmp_path+tree_len, search_path, path_len);
- tmp_path[tree_len+path_len] = 0;
- return tmp_path;
+ /* fill in default values for the remaining based on root
+ inode since we can not query the server for this inode info */
+ pfnd_dat->DevMajor = cpu_to_le64(MAJOR(pinode->i_rdev));
+ pfnd_dat->DevMinor = cpu_to_le64(MINOR(pinode->i_rdev));
+ pfnd_dat->Uid = cpu_to_le64(pinode->i_uid);
+ pfnd_dat->Gid = cpu_to_le64(pinode->i_gid);
}
int cifs_get_inode_info_unix(struct inode **pinode,
- const unsigned char *search_path, struct super_block *sb, int xid)
+ const unsigned char *full_path, struct super_block *sb, int xid)
{
int rc = 0;
- FILE_UNIX_BASIC_INFO findData;
+ FILE_UNIX_BASIC_INFO find_data;
struct cifsTconInfo *pTcon;
struct inode *inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- const unsigned char *full_path;
bool is_dfs_referral = false;
+ struct cifsInodeInfo *cifsInfo;
+ __u64 num_of_bytes;
+ __u64 end_of_file;
pTcon = cifs_sb->tcon;
- cFYI(1, ("Getting info on %s", search_path));
-
- full_path = cifs_get_search_path(cifs_sb, search_path);
+ cFYI(1, ("Getting info on %s", full_path));
-try_again_CIFSSMBUnixQPathInfo:
/* could have done a find first instead but this returns more info */
- rc = CIFSSMBUnixQPathInfo(xid, pTcon, full_path, &findData,
+ rc = CIFSSMBUnixQPathInfo(xid, pTcon, full_path, &find_data,
cifs_sb->local_nls, cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
-/* dump_mem("\nUnixQPathInfo return data", &findData,
- sizeof(findData)); */
if (rc) {
if (rc == -EREMOTE && !is_dfs_referral) {
is_dfs_referral = true;
- if (full_path != search_path) {
- kfree(full_path);
- full_path = search_path;
- }
- goto try_again_CIFSSMBUnixQPathInfo;
+ cFYI(DBG2, ("DFS ref"));
+ /* for DFS, server does not give us real inode data */
+ fill_fake_finddataunix(&find_data, sb);
+ rc = 0;
}
- goto cgiiu_exit;
- } else {
- struct cifsInodeInfo *cifsInfo;
- __u64 num_of_bytes = le64_to_cpu(findData.NumOfBytes);
- __u64 end_of_file = le64_to_cpu(findData.EndOfFile);
+ }
+ num_of_bytes = le64_to_cpu(find_data.NumOfBytes);
+ end_of_file = le64_to_cpu(find_data.EndOfFile);
- /* get new inode */
+ /* get new inode */
+ if (*pinode == NULL) {
+ *pinode = new_inode(sb);
if (*pinode == NULL) {
- *pinode = new_inode(sb);
- if (*pinode == NULL) {
- rc = -ENOMEM;
- goto cgiiu_exit;
- }
- /* Is an i_ino of zero legal? */
- /* Are there sanity checks we can use to ensure that
- the server is really filling in that field? */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
- (*pinode)->i_ino =
- (unsigned long)findData.UniqueId;
- } /* note ino incremented to unique num in new_inode */
- if (sb->s_flags & MS_NOATIME)
- (*pinode)->i_flags |= S_NOATIME | S_NOCMTIME;
-
- insert_inode_hash(*pinode);
+ rc = -ENOMEM;
+ goto cgiiu_exit;
}
+ /* Is an i_ino of zero legal? */
+ /* note ino incremented to unique num in new_inode */
+ /* Are there sanity checks we can use to ensure that
+ the server is really filling in that field? */
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM)
+ (*pinode)->i_ino = (unsigned long)find_data.UniqueId;
- inode = *pinode;
- cifsInfo = CIFS_I(inode);
+ if (sb->s_flags & MS_NOATIME)
+ (*pinode)->i_flags |= S_NOATIME | S_NOCMTIME;
- cFYI(1, ("Old time %ld", cifsInfo->time));
- cifsInfo->time = jiffies;
- cFYI(1, ("New time %ld", cifsInfo->time));
- /* this is ok to set on every inode revalidate */
- atomic_set(&cifsInfo->inUse, 1);
+ insert_inode_hash(*pinode);
+ }
+
+ inode = *pinode;
+ cifsInfo = CIFS_I(inode);
- cifs_unix_info_to_inode(inode, &findData, 0);
+ cFYI(1, ("Old time %ld", cifsInfo->time));
+ cifsInfo->time = jiffies;
+ cFYI(1, ("New time %ld", cifsInfo->time));
+ /* this is ok to set on every inode revalidate */
+ atomic_set(&cifsInfo->inUse, 1);
+ cifs_unix_info_to_inode(inode, &find_data, 0);
- if (num_of_bytes < end_of_file)
- cFYI(1, ("allocation size less than end of file"));
- cFYI(1, ("Size %ld and blocks %llu",
- (unsigned long) inode->i_size,
- (unsigned long long)inode->i_blocks));
+ if (num_of_bytes < end_of_file)
+ cFYI(1, ("allocation size less than end of file"));
+ cFYI(1, ("Size %ld and blocks %llu",
+ (unsigned long) inode->i_size,
+ (unsigned long long)inode->i_blocks));
- cifs_set_ops(inode, is_dfs_referral);
- }
+ cifs_set_ops(inode, is_dfs_referral);
cgiiu_exit:
- if (full_path != search_path)
- kfree(full_path);
return rc;
}
@@ -379,21 +376,52 @@ static int get_sfu_mode(struct inode *inode,
#endif
}
+/*
+ * Needed to setup inode data for the directory which is the
+ * junction to the new submount (ie to setup the fake directory
+ * which represents a DFS referral)
+ */
+static void fill_fake_finddata(FILE_ALL_INFO *pfnd_dat,
+ struct super_block *sb)
+{
+ memset(pfnd_dat, 0, sizeof(FILE_ALL_INFO));
+
+/* __le64 pfnd_dat->AllocationSize = cpu_to_le64(0);
+ __le64 pfnd_dat->EndOfFile = cpu_to_le64(0);
+ __u8 pfnd_dat->DeletePending = 0;
+ __u8 pfnd_data->Directory = 0;
+ __le32 pfnd_dat->EASize = 0;
+ __u64 pfnd_dat->IndexNumber = 0;
+ __u64 pfnd_dat->IndexNumber1 = 0; */
+ pfnd_dat->CreationTime =
+ cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+ pfnd_dat->LastAccessTime =
+ cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+ pfnd_dat->LastWriteTime =
+ cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+ pfnd_dat->ChangeTime =
+ cpu_to_le64(cifs_UnixTimeToNT(CURRENT_TIME));
+ pfnd_dat->Attributes = cpu_to_le32(ATTR_DIRECTORY);
+ pfnd_dat->NumberOfLinks = cpu_to_le32(2);
+}
+
int cifs_get_inode_info(struct inode **pinode,
- const unsigned char *search_path, FILE_ALL_INFO *pfindData,
+ const unsigned char *full_path, FILE_ALL_INFO *pfindData,
struct super_block *sb, int xid, const __u16 *pfid)
{
int rc = 0;
+ __u32 attr;
+ struct cifsInodeInfo *cifsInfo;
struct cifsTconInfo *pTcon;
struct inode *inode;
struct cifs_sb_info *cifs_sb = CIFS_SB(sb);
- const unsigned char *full_path = NULL;
char *buf = NULL;
bool adjustTZ = false;
bool is_dfs_referral = false;
+ umode_t default_mode;
pTcon = cifs_sb->tcon;
- cFYI(1, ("Getting info on %s", search_path));
+ cFYI(1, ("Getting info on %s", full_path));
if ((pfindData == NULL) && (*pinode != NULL)) {
if (CIFS_I(*pinode)->clientCanCacheRead) {
@@ -409,9 +437,6 @@ int cifs_get_inode_info(struct inode **pinode,
return -ENOMEM;
pfindData = (FILE_ALL_INFO *)buf;
- full_path = cifs_get_search_path(cifs_sb, search_path);
-
-try_again_CIFSSMBQPathInfo:
/* could do find first instead but this returns more info */
rc = CIFSSMBQPathInfo(xid, pTcon, full_path, pfindData,
0 /* not legacy */,
@@ -429,178 +454,163 @@ try_again_CIFSSMBQPathInfo:
}
}
/* dump_mem("\nQPathInfo return data",&findData, sizeof(findData)); */
- if (rc) {
- if (rc == -EREMOTE && !is_dfs_referral) {
- is_dfs_referral = true;
- if (full_path != search_path) {
- kfree(full_path);
- full_path = search_path;
- }
- goto try_again_CIFSSMBQPathInfo;
- }
+ if (rc == -EREMOTE) {
+ is_dfs_referral = true;
+ fill_fake_finddata(pfindData, sb);
+ rc = 0;
+ } else if (rc)
goto cgii_exit;
- } else {
- struct cifsInodeInfo *cifsInfo;
- __u32 attr = le32_to_cpu(pfindData->Attributes);
-
- /* get new inode */
- if (*pinode == NULL) {
- *pinode = new_inode(sb);
- if (*pinode == NULL) {
- rc = -ENOMEM;
- goto cgii_exit;
- }
- /* Is an i_ino of zero legal? Can we use that to check
- if the server supports returning inode numbers? Are
- there other sanity checks we can use to ensure that
- the server is really filling in that field? */
-
- /* We can not use the IndexNumber field by default from
- Windows or Samba (in ALL_INFO buf) but we can request
- it explicitly. It may not be unique presumably if
- the server has multiple devices mounted under one
- share */
- /* There may be higher info levels that work but are
- there Windows server or network appliances for which
- IndexNumber field is not guaranteed unique? */
+ attr = le32_to_cpu(pfindData->Attributes);
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
- int rc1 = 0;
- __u64 inode_num;
-
- rc1 = CIFSGetSrvInodeNumber(xid, pTcon,
- search_path, &inode_num,
+ /* get new inode */
+ if (*pinode == NULL) {
+ *pinode = new_inode(sb);
+ if (*pinode == NULL) {
+ rc = -ENOMEM;
+ goto cgii_exit;
+ }
+ /* Is an i_ino of zero legal? Can we use that to check
+ if the server supports returning inode numbers? Are
+ there other sanity checks we can use to ensure that
+ the server is really filling in that field? */
+
+ /* We can not use the IndexNumber field by default from
+ Windows or Samba (in ALL_INFO buf) but we can request
+ it explicitly. It may not be unique presumably if
+ the server has multiple devices mounted under one share */
+
+ /* There may be higher info levels that work but are
+ there Windows server or network appliances for which
+ IndexNumber field is not guaranteed unique? */
+
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SERVER_INUM) {
+ int rc1 = 0;
+ __u64 inode_num;
+
+ rc1 = CIFSGetSrvInodeNumber(xid, pTcon,
+ full_path, &inode_num,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
- if (rc1) {
- cFYI(1, ("GetSrvInodeNum rc %d", rc1));
- /* BB EOPNOSUPP disable SERVER_INUM? */
- } else /* do we need cast or hash to ino? */
- (*pinode)->i_ino = inode_num;
- } /* else ino incremented to unique num in new_inode*/
- if (sb->s_flags & MS_NOATIME)
- (*pinode)->i_flags |= S_NOATIME | S_NOCMTIME;
- insert_inode_hash(*pinode);
- }
- inode = *pinode;
- cifsInfo = CIFS_I(inode);
- cifsInfo->cifsAttrs = attr;
- cFYI(1, ("Old time %ld", cifsInfo->time));
- cifsInfo->time = jiffies;
- cFYI(1, ("New time %ld", cifsInfo->time));
-
- /* blksize needs to be multiple of two. So safer to default to
- blksize and blkbits set in superblock so 2**blkbits and blksize
- will match rather than setting to:
- (pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/
-
- /* Linux can not store file creation time so ignore it */
- if (pfindData->LastAccessTime)
- inode->i_atime = cifs_NTtimeToUnix
- (le64_to_cpu(pfindData->LastAccessTime));
- else /* do not need to use current_fs_time - time not stored */
- inode->i_atime = CURRENT_TIME;
- inode->i_mtime =
+ if (rc1) {
+ cFYI(1, ("GetSrvInodeNum rc %d", rc1));
+ /* BB EOPNOSUPP disable SERVER_INUM? */
+ } else /* do we need cast or hash to ino? */
+ (*pinode)->i_ino = inode_num;
+ } /* else ino incremented to unique num in new_inode*/
+ if (sb->s_flags & MS_NOATIME)
+ (*pinode)->i_flags |= S_NOATIME | S_NOCMTIME;
+ insert_inode_hash(*pinode);
+ }
+ inode = *pinode;
+ cifsInfo = CIFS_I(inode);
+ cifsInfo->cifsAttrs = attr;
+ cFYI(1, ("Old time %ld", cifsInfo->time));
+ cifsInfo->time = jiffies;
+ cFYI(1, ("New time %ld", cifsInfo->time));
+
+ /* blksize needs to be multiple of two. So safer to default to
+ blksize and blkbits set in superblock so 2**blkbits and blksize
+ will match rather than setting to:
+ (pTcon->ses->server->maxBuf - MAX_CIFS_HDR_SIZE) & 0xFFFFFE00;*/
+
+ /* Linux can not store file creation time so ignore it */
+ if (pfindData->LastAccessTime)
+ inode->i_atime = cifs_NTtimeToUnix
+ (le64_to_cpu(pfindData->LastAccessTime));
+ else /* do not need to use current_fs_time - time not stored */
+ inode->i_atime = CURRENT_TIME;
+ inode->i_mtime =
cifs_NTtimeToUnix(le64_to_cpu(pfindData->LastWriteTime));
- inode->i_ctime =
- cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
- cFYI(0, ("Attributes came in as 0x%x", attr));
- if (adjustTZ && (pTcon->ses) && (pTcon->ses->server)) {
- inode->i_ctime.tv_sec += pTcon->ses->server->timeAdj;
- inode->i_mtime.tv_sec += pTcon->ses->server->timeAdj;
- }
+ inode->i_ctime =
+ cifs_NTtimeToUnix(le64_to_cpu(pfindData->ChangeTime));
+ cFYI(DBG2, ("Attributes came in as 0x%x", attr));
+ if (adjustTZ && (pTcon->ses) && (pTcon->ses->server)) {
+ inode->i_ctime.tv_sec += pTcon->ses->server->timeAdj;
+ inode->i_mtime.tv_sec += pTcon->ses->server->timeAdj;
+ }
- /* set default mode. will override for dirs below */
- if (atomic_read(&cifsInfo->inUse) == 0)
- /* new inode, can safely set these fields */
- inode->i_mode = cifs_sb->mnt_file_mode;
- else /* since we set the inode type below we need to mask off
- to avoid strange results if type changes and both
- get orred in */
+ /* get default inode mode */
+ if (attr & ATTR_DIRECTORY)
+ default_mode = cifs_sb->mnt_dir_mode;
+ else
+ default_mode = cifs_sb->mnt_file_mode;
+
+ /* set permission bits */
+ if (atomic_read(&cifsInfo->inUse) == 0 ||
+ (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0)
+ inode->i_mode = default_mode;
+ else {
+ /* just reenable write bits if !ATTR_READONLY */
+ if ((inode->i_mode & S_IWUGO) == 0 &&
+ (attr & ATTR_READONLY) == 0)
+ inode->i_mode |= (S_IWUGO & default_mode);
inode->i_mode &= ~S_IFMT;
-/* if (attr & ATTR_REPARSE) */
- /* We no longer handle these as symlinks because we could not
- follow them due to the absolute path with drive letter */
- if (attr & ATTR_DIRECTORY) {
- /* override default perms since we do not do byte range locking
- on dirs */
- inode->i_mode = cifs_sb->mnt_dir_mode;
- inode->i_mode |= S_IFDIR;
- } else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) &&
- (cifsInfo->cifsAttrs & ATTR_SYSTEM) &&
- /* No need to le64 convert size of zero */
- (pfindData->EndOfFile == 0)) {
- inode->i_mode = cifs_sb->mnt_file_mode;
+ }
+ /* clear write bits if ATTR_READONLY is set */
+ if (attr & ATTR_READONLY)
+ inode->i_mode &= ~S_IWUGO;
+
+ /* set inode type */
+ if ((attr & ATTR_SYSTEM) &&
+ (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) {
+ /* no need to fix endianness on 0 */
+ if (pfindData->EndOfFile == 0)
inode->i_mode |= S_IFIFO;
-/* BB Finish for SFU style symlinks and devices */
- } else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) &&
- (cifsInfo->cifsAttrs & ATTR_SYSTEM)) {
- if (decode_sfu_inode(inode,
- le64_to_cpu(pfindData->EndOfFile),
- search_path,
- cifs_sb, xid))
- cFYI(1, ("Unrecognized sfu inode type"));
-
- cFYI(1, ("sfu mode 0%o", inode->i_mode));
- } else {
+ else if (decode_sfu_inode(inode,
+ le64_to_cpu(pfindData->EndOfFile),
+ full_path, cifs_sb, xid))
+ cFYI(1, ("unknown SFU file type\n"));
+ } else {
+ if (attr & ATTR_DIRECTORY)
+ inode->i_mode |= S_IFDIR;
+ else
inode->i_mode |= S_IFREG;
- /* treat the dos attribute of read-only as read-only
- mode e.g. 555 */
- if (cifsInfo->cifsAttrs & ATTR_READONLY)
- inode->i_mode &= ~(S_IWUGO);
- else if ((inode->i_mode & S_IWUGO) == 0)
- /* the ATTR_READONLY flag may have been */
- /* changed on server -- set any w bits */
- /* allowed by mnt_file_mode */
- inode->i_mode |= (S_IWUGO &
- cifs_sb->mnt_file_mode);
- /* BB add code here -
- validate if device or weird share or device type? */
- }
+ }
- spin_lock(&inode->i_lock);
- if (is_size_safe_to_change(cifsInfo,
- le64_to_cpu(pfindData->EndOfFile))) {
- /* can not safely shrink the file size here if the
- client is writing to it due to potential races */
- i_size_write(inode, le64_to_cpu(pfindData->EndOfFile));
-
- /* 512 bytes (2**9) is the fake blocksize that must be
- used for this calculation */
- inode->i_blocks = (512 - 1 + le64_to_cpu(
- pfindData->AllocationSize)) >> 9;
- }
- spin_unlock(&inode->i_lock);
+ spin_lock(&inode->i_lock);
+ if (is_size_safe_to_change(cifsInfo,
+ le64_to_cpu(pfindData->EndOfFile))) {
+ /* can not safely shrink the file size here if the
+ client is writing to it due to potential races */
+ i_size_write(inode, le64_to_cpu(pfindData->EndOfFile));
+
+ /* 512 bytes (2**9) is the fake blocksize that must be
+ used for this calculation */
+ inode->i_blocks = (512 - 1 + le64_to_cpu(
+ pfindData->AllocationSize)) >> 9;
+ }
+ spin_unlock(&inode->i_lock);
- inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks);
+ inode->i_nlink = le32_to_cpu(pfindData->NumberOfLinks);
- /* BB fill in uid and gid here? with help from winbind?
- or retrieve from NTFS stream extended attribute */
+ /* BB fill in uid and gid here? with help from winbind?
+ or retrieve from NTFS stream extended attribute */
#ifdef CONFIG_CIFS_EXPERIMENTAL
- /* fill in 0777 bits from ACL */
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
- cFYI(1, ("Getting mode bits from ACL"));
- acl_to_uid_mode(inode, search_path, pfid);
- }
+ /* fill in 0777 bits from ACL */
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL) {
+ cFYI(1, ("Getting mode bits from ACL"));
+ acl_to_uid_mode(inode, full_path, pfid);
+ }
#endif
- if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
- /* fill in remaining high mode bits e.g. SUID, VTX */
- get_sfu_mode(inode, search_path, cifs_sb, xid);
- } else if (atomic_read(&cifsInfo->inUse) == 0) {
- inode->i_uid = cifs_sb->mnt_uid;
- inode->i_gid = cifs_sb->mnt_gid;
- /* set so we do not keep refreshing these fields with
- bad data after user has changed them in memory */
- atomic_set(&cifsInfo->inUse, 1);
- }
-
- cifs_set_ops(inode, is_dfs_referral);
+ if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) {
+ /* fill in remaining high mode bits e.g. SUID, VTX */
+ get_sfu_mode(inode, full_path, cifs_sb, xid);
+ } else if (atomic_read(&cifsInfo->inUse) == 0) {
+ inode->i_uid = cifs_sb->mnt_uid;
+ inode->i_gid = cifs_sb->mnt_gid;
+ /* set so we do not keep refreshing these fields with
+ bad data after user has changed them in memory */
+ atomic_set(&cifsInfo->inUse, 1);
}
+
+ cifs_set_ops(inode, is_dfs_referral);
+
+
+
+
cgii_exit:
- if (full_path != search_path)
- kfree(full_path);
kfree(buf);
return rc;
}
@@ -1005,8 +1015,11 @@ mkdir_get_info:
CIFS_MOUNT_MAP_SPECIAL_CHR);
}
if (direntry->d_inode) {
- direntry->d_inode->i_mode = mode;
- direntry->d_inode->i_mode |= S_IFDIR;
+ if (cifs_sb->mnt_cifs_flags &
+ CIFS_MOUNT_DYNPERM)
+ direntry->d_inode->i_mode =
+ (mode | S_IFDIR);
+
if (cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_SET_UID) {
direntry->d_inode->i_uid =
@@ -1502,8 +1515,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
int oplock = 0;
rc = SMBLegacyOpen(xid, pTcon, full_path,
- FILE_OPEN,
- SYNCHRONIZE | FILE_WRITE_ATTRIBUTES,
+ FILE_OPEN, GENERIC_WRITE,
CREATE_NOT_DIR, &netfid, &oplock,
NULL, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
@@ -1534,13 +1546,26 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
} else
goto cifs_setattr_exit;
}
- if (attrs->ia_valid & ATTR_UID) {
- cFYI(1, ("UID changed to %d", attrs->ia_uid));
- uid = attrs->ia_uid;
- }
- if (attrs->ia_valid & ATTR_GID) {
- cFYI(1, ("GID changed to %d", attrs->ia_gid));
- gid = attrs->ia_gid;
+
+ /*
+ * Without unix extensions we can't send ownership changes to the
+ * server, so silently ignore them. This is consistent with how
+ * local DOS/Windows filesystems behave (VFAT, NTFS, etc). With
+ * CIFSACL support + proper Windows to Unix idmapping, we may be
+ * able to support this in the future.
+ */
+ if (!pTcon->unix_ext &&
+ !(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_SET_UID)) {
+ attrs->ia_valid &= ~(ATTR_UID | ATTR_GID);
+ } else {
+ if (attrs->ia_valid & ATTR_UID) {
+ cFYI(1, ("UID changed to %d", attrs->ia_uid));
+ uid = attrs->ia_uid;
+ }
+ if (attrs->ia_valid & ATTR_GID) {
+ cFYI(1, ("GID changed to %d", attrs->ia_gid));
+ gid = attrs->ia_gid;
+ }
}
time_buf.Attributes = 0;
@@ -1550,7 +1575,7 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
attrs->ia_valid &= ~ATTR_MODE;
if (attrs->ia_valid & ATTR_MODE) {
- cFYI(1, ("Mode changed to 0x%x", attrs->ia_mode));
+ cFYI(1, ("Mode changed to 0%o", attrs->ia_mode));
mode = attrs->ia_mode;
}
@@ -1565,18 +1590,18 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
#ifdef CONFIG_CIFS_EXPERIMENTAL
if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_CIFS_ACL)
rc = mode_to_acl(inode, full_path, mode);
- else if ((mode & S_IWUGO) == 0) {
-#else
- if ((mode & S_IWUGO) == 0) {
+ else
#endif
- /* not writeable */
- if ((cifsInode->cifsAttrs & ATTR_READONLY) == 0) {
- set_dosattr = true;
- time_buf.Attributes =
- cpu_to_le32(cifsInode->cifsAttrs |
- ATTR_READONLY);
- }
- } else if (cifsInode->cifsAttrs & ATTR_READONLY) {
+ if (((mode & S_IWUGO) == 0) &&
+ (cifsInode->cifsAttrs & ATTR_READONLY) == 0) {
+ set_dosattr = true;
+ time_buf.Attributes = cpu_to_le32(cifsInode->cifsAttrs |
+ ATTR_READONLY);
+ /* fix up mode if we're not using dynperm */
+ if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0)
+ attrs->ia_mode = inode->i_mode & ~S_IWUGO;
+ } else if ((mode & S_IWUGO) &&
+ (cifsInode->cifsAttrs & ATTR_READONLY)) {
/* If file is readonly on server, we would
not be able to write to it - so if any write
bit is enabled for user or group or other we
@@ -1587,6 +1612,20 @@ int cifs_setattr(struct dentry *direntry, struct iattr *attrs)
/* Windows ignores set to zero */
if (time_buf.Attributes == 0)
time_buf.Attributes |= cpu_to_le32(ATTR_NORMAL);
+
+ /* reset local inode permissions to normal */
+ if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) {
+ attrs->ia_mode &= ~(S_IALLUGO);
+ if (S_ISDIR(inode->i_mode))
+ attrs->ia_mode |=
+ cifs_sb->mnt_dir_mode;
+ else
+ attrs->ia_mode |=
+ cifs_sb->mnt_file_mode;
+ }
+ } else if (!(cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM)) {
+ /* ignore mode change - ATTR_READONLY hasn't changed */
+ attrs->ia_valid &= ~ATTR_MODE;
}
}
diff --git a/fs/cifs/ioctl.c b/fs/cifs/ioctl.c
index 5c792df13d62..0088a5b52564 100644
--- a/fs/cifs/ioctl.c
+++ b/fs/cifs/ioctl.c
@@ -30,9 +30,9 @@
#define CIFS_IOC_CHECKUMOUNT _IO(0xCF, 2)
-int cifs_ioctl(struct inode *inode, struct file *filep,
- unsigned int command, unsigned long arg)
+long cifs_ioctl(struct file *filep, unsigned int command, unsigned long arg)
{
+ struct inode *inode = filep->f_dentry->d_inode;
int rc = -ENOTTY; /* strange error - but the precedent */
int xid;
struct cifs_sb_info *cifs_sb;
diff --git a/fs/cifs/link.c b/fs/cifs/link.c
index 1c2c3ce5020b..63f644000ce5 100644
--- a/fs/cifs/link.c
+++ b/fs/cifs/link.c
@@ -234,7 +234,6 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
struct cifs_sb_info *cifs_sb;
struct cifsTconInfo *pTcon;
char *full_path = NULL;
- char *tmp_path = NULL;
char *tmpbuffer;
int len;
__u16 fid;
@@ -295,45 +294,9 @@ cifs_readlink(struct dentry *direntry, char __user *pBuffer, int buflen)
cFYI(1, ("Error closing junction point "
"(open for ioctl)"));
}
- /* BB unwind this long, nested function, or remove BB */
- if (rc == -EIO) {
- /* Query if DFS Junction */
- unsigned int num_referrals = 0;
- struct dfs_info3_param *refs = NULL;
- tmp_path =
- kmalloc(MAX_TREE_SIZE + MAX_PATHCONF + 1,
- GFP_KERNEL);
- if (tmp_path) {
- strncpy(tmp_path, pTcon->treeName,
- MAX_TREE_SIZE);
- strncat(tmp_path, full_path,
- MAX_PATHCONF);
- rc = get_dfs_path(xid, pTcon->ses,
- tmp_path,
- cifs_sb->local_nls,
- &num_referrals, &refs,
- cifs_sb->mnt_cifs_flags &
- CIFS_MOUNT_MAP_SPECIAL_CHR);
- cFYI(1, ("Get DFS for %s rc = %d ",
- tmp_path, rc));
- if ((num_referrals == 0) && (rc == 0))
- rc = -EACCES;
- else {
- cFYI(1, ("num referral: %d",
- num_referrals));
- if (refs && refs->path_name) {
- strncpy(tmpbuffer,
- refs->path_name,
- len-1);
- }
- }
- kfree(refs);
- kfree(tmp_path);
-}
- /* BB add code like else decode referrals
- then memcpy to tmpbuffer and free referrals
- string array BB */
- }
+ /* If it is a DFS junction earlier we would have gotten
+ PATH_NOT_COVERED returned from server so we do
+ not need to request the DFS info here */
}
}
/* BB Anything else to do to handle recursive links? */
diff --git a/fs/cifs/misc.c b/fs/cifs/misc.c
index 1d69b8014e0b..4b17f8fe3157 100644
--- a/fs/cifs/misc.c
+++ b/fs/cifs/misc.c
@@ -519,8 +519,7 @@ is_valid_oplock_break(struct smb_hdr *buf, struct TCP_Server_Info *srv)
pnotify = (struct file_notify_information *)
((char *)&pSMBr->hdr.Protocol + data_offset);
cFYI(1, ("dnotify on %s Action: 0x%x",
- pnotify->FileName,
- pnotify->Action)); /* BB removeme BB */
+ pnotify->FileName, pnotify->Action));
/* cifs_dump_mem("Rcvd notify Data: ",buf,
sizeof(struct smb_hdr)+60); */
return true;
diff --git a/fs/cifs/netmisc.c b/fs/cifs/netmisc.c
index 00f4cff400b3..8703d68f5b20 100644
--- a/fs/cifs/netmisc.c
+++ b/fs/cifs/netmisc.c
@@ -141,11 +141,11 @@ cifs_inet_pton(const int address_family, const char *cp, void *dst)
int ret = 0;
/* calculate length by finding first slash or NULL */
- if (address_family == AF_INET) {
+ if (address_family == AF_INET)
ret = in4_pton(cp, -1 /* len */, dst, '\\', NULL);
- } else if (address_family == AF_INET6) {
+ else if (address_family == AF_INET6)
ret = in6_pton(cp, -1 /* len */, dst , '\\', NULL);
- }
+
cFYI(DBG2, ("address conversion returned %d for %s", ret, cp));
if (ret > 0)
ret = 1;
diff --git a/fs/cifs/ntlmssp.h b/fs/cifs/ntlmssp.h
index 7170a9b70f1e..c377d8065d99 100644
--- a/fs/cifs/ntlmssp.h
+++ b/fs/cifs/ntlmssp.h
@@ -64,7 +64,7 @@ typedef struct _SECURITY_BUFFER {
} __attribute__((packed)) SECURITY_BUFFER;
typedef struct _NEGOTIATE_MESSAGE {
- __u8 Signature[sizeof (NTLMSSP_SIGNATURE)];
+ __u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
__le32 MessageType; /* 1 */
__le32 NegotiateFlags;
SECURITY_BUFFER DomainName; /* RFC 1001 style and ASCII */
@@ -74,7 +74,7 @@ typedef struct _NEGOTIATE_MESSAGE {
} __attribute__((packed)) NEGOTIATE_MESSAGE, *PNEGOTIATE_MESSAGE;
typedef struct _CHALLENGE_MESSAGE {
- __u8 Signature[sizeof (NTLMSSP_SIGNATURE)];
+ __u8 Signature[sizeof(NTLMSSP_SIGNATURE)];
__le32 MessageType; /* 2 */
SECURITY_BUFFER TargetName;
__le32 NegotiateFlags;
diff --git a/fs/cifs/readdir.c b/fs/cifs/readdir.c
index 34ec32100c72..83f306954883 100644
--- a/fs/cifs/readdir.c
+++ b/fs/cifs/readdir.c
@@ -132,6 +132,7 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
__u32 attr;
__u64 allocation_size;
__u64 end_of_file;
+ umode_t default_mode;
/* save mtime and size */
local_mtime = tmp_inode->i_mtime;
@@ -187,48 +188,54 @@ static void fill_in_inode(struct inode *tmp_inode, int new_buf_type,
if (atomic_read(&cifsInfo->inUse) == 0) {
tmp_inode->i_uid = cifs_sb->mnt_uid;
tmp_inode->i_gid = cifs_sb->mnt_gid;
- /* set default mode. will override for dirs below */
- tmp_inode->i_mode = cifs_sb->mnt_file_mode;
- } else {
- /* mask off the type bits since it gets set
- below and we do not want to get two type
- bits set */
+ }
+
+ if (attr & ATTR_DIRECTORY)
+ default_mode = cifs_sb->mnt_dir_mode;
+ else
+ default_mode = cifs_sb->mnt_file_mode;
+
+ /* set initial permissions */
+ if ((atomic_read(&cifsInfo->inUse) == 0) ||
+ (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_DYNPERM) == 0)
+ tmp_inode->i_mode = default_mode;
+ else {
+ /* just reenable write bits if !ATTR_READONLY */
+ if ((tmp_inode->i_mode & S_IWUGO) == 0 &&
+ (attr & ATTR_READONLY) == 0)
+ tmp_inode->i_mode |= (S_IWUGO & default_mode);
+
tmp_inode->i_mode &= ~S_IFMT;
}
- if (attr & ATTR_DIRECTORY) {
- *pobject_type = DT_DIR;
- /* override default perms since we do not lock dirs */
- if (atomic_read(&cifsInfo->inUse) == 0)
- tmp_inode->i_mode = cifs_sb->mnt_dir_mode;
- tmp_inode->i_mode |= S_IFDIR;
- } else if ((cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL) &&
- (attr & ATTR_SYSTEM)) {
+ /* clear write bits if ATTR_READONLY is set */
+ if (attr & ATTR_READONLY)
+ tmp_inode->i_mode &= ~S_IWUGO;
+
+ /* set inode type */
+ if ((attr & ATTR_SYSTEM) &&
+ (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_UNX_EMUL)) {
if (end_of_file == 0) {
- *pobject_type = DT_FIFO;
tmp_inode->i_mode |= S_IFIFO;
+ *pobject_type = DT_FIFO;
} else {
- /* rather than get the type here, we mark the
- inode as needing revalidate and get the real type
- (blk vs chr vs. symlink) later ie in lookup */
- *pobject_type = DT_REG;
+ /*
+ * trying to get the type can be slow, so just call
+ * this a regular file for now, and mark for reval
+ */
tmp_inode->i_mode |= S_IFREG;
+ *pobject_type = DT_REG;
cifsInfo->time = 0;
}
-/* we no longer mark these because we could not follow them */
-/* } else if (attr & ATTR_REPARSE) {
- *pobject_type = DT_LNK;
- tmp_inode->i_mode |= S_IFLNK; */
} else {
- *pobject_type = DT_REG;
- tmp_inode->i_mode |= S_IFREG;
- if (attr & ATTR_READONLY)
- tmp_inode->i_mode &= ~(S_IWUGO);
- else if ((tmp_inode->i_mode & S_IWUGO) == 0)
- /* the ATTR_READONLY flag may have been changed on */
- /* server -- set any w bits allowed by mnt_file_mode */
- tmp_inode->i_mode |= (S_IWUGO & cifs_sb->mnt_file_mode);
- } /* could add code here - to validate if device or weird share type? */
+ if (attr & ATTR_DIRECTORY) {
+ tmp_inode->i_mode |= S_IFDIR;
+ *pobject_type = DT_DIR;
+ } else {
+ tmp_inode->i_mode |= S_IFREG;
+ *pobject_type = DT_REG;
+ }
+ }
/* can not fill in nlink here as in qpathinfo version and Unx search */
if (atomic_read(&cifsInfo->inUse) == 0)
@@ -670,10 +677,11 @@ static int find_cifs_entry(const int xid, struct cifsTconInfo *pTcon,
(index_to_find < first_entry_in_buffer)) {
/* close and restart search */
cFYI(1, ("search backing up - close and restart search"));
- cifsFile->invalidHandle = true;
- CIFSFindClose(xid, pTcon, cifsFile->netfid);
- kfree(cifsFile->search_resume_name);
- cifsFile->search_resume_name = NULL;
+ if (!cifsFile->srch_inf.endOfSearch &&
+ !cifsFile->invalidHandle) {
+ cifsFile->invalidHandle = true;
+ CIFSFindClose(xid, pTcon, cifsFile->netfid);
+ }
if (cifsFile->srch_inf.ntwrk_buf_start) {
cFYI(1, ("freeing SMB ff cache buf on search rewind"));
if (cifsFile->srch_inf.smallBuf)
@@ -1040,9 +1048,7 @@ int cifs_readdir(struct file *file, void *direntry, filldir_t filldir)
} /* else {
cifsFile->invalidHandle = true;
CIFSFindClose(xid, pTcon, cifsFile->netfid);
- }
- kfree(cifsFile->search_resume_name);
- cifsFile->search_resume_name = NULL; */
+ } */
rc = find_cifs_entry(xid, pTcon, file,
&current_entry, &num_to_fill);
diff --git a/fs/coda/psdev.c b/fs/coda/psdev.c
index e3eb3556622b..40c36f7352a6 100644
--- a/fs/coda/psdev.c
+++ b/fs/coda/psdev.c
@@ -362,8 +362,9 @@ static int init_coda_psdev(void)
goto out_chrdev;
}
for (i = 0; i < MAX_CODADEVS; i++)
- device_create(coda_psdev_class, NULL,
- MKDEV(CODA_PSDEV_MAJOR,i), "cfs%d", i);
+ device_create_drvdata(coda_psdev_class, NULL,
+ MKDEV(CODA_PSDEV_MAJOR, i),
+ NULL, "cfs%d", i);
coda_sysctl_init();
goto out;
diff --git a/fs/compat.c b/fs/compat.c
index 332a869d2c53..ed43e17a5dc6 100644
--- a/fs/compat.c
+++ b/fs/compat.c
@@ -1405,7 +1405,7 @@ int compat_do_execve(char * filename,
/* execve success */
security_bprm_free(bprm);
acct_update_integrals(current);
- kfree(bprm);
+ free_bprm(bprm);
return retval;
}
@@ -1424,7 +1424,7 @@ out_file:
}
out_kfree:
- kfree(bprm);
+ free_bprm(bprm);
out_ret:
return retval;
diff --git a/fs/dcache.c b/fs/dcache.c
index 3ee588d5f585..40e4d511c4ea 100644
--- a/fs/dcache.c
+++ b/fs/dcache.c
@@ -1191,6 +1191,107 @@ struct dentry *d_splice_alias(struct inode *inode, struct dentry *dentry)
return new;
}
+/**
+ * d_add_ci - lookup or allocate new dentry with case-exact name
+ * @inode: the inode case-insensitive lookup has found
+ * @dentry: the negative dentry that was passed to the parent's lookup func
+ * @name: the case-exact name to be associated with the returned dentry
+ *
+ * This is to avoid filling the dcache with case-insensitive names to the
+ * same inode, only the actual correct case is stored in the dcache for
+ * case-insensitive filesystems.
+ *
+ * For a case-insensitive lookup match and if the the case-exact dentry
+ * already exists in in the dcache, use it and return it.
+ *
+ * If no entry exists with the exact case name, allocate new dentry with
+ * the exact case, and return the spliced entry.
+ */
+struct dentry *d_add_ci(struct inode *inode, struct dentry *dentry,
+ struct qstr *name)
+{
+ int error;
+ struct dentry *found;
+ struct dentry *new;
+
+ /* Does a dentry matching the name exist already? */
+ found = d_hash_and_lookup(dentry->d_parent, name);
+ /* If not, create it now and return */
+ if (!found) {
+ new = d_alloc(dentry->d_parent, name);
+ if (!new) {
+ error = -ENOMEM;
+ goto err_out;
+ }
+ found = d_splice_alias(inode, new);
+ if (found) {
+ dput(new);
+ return found;
+ }
+ return new;
+ }
+ /* Matching dentry exists, check if it is negative. */
+ if (found->d_inode) {
+ if (unlikely(found->d_inode != inode)) {
+ /* This can't happen because bad inodes are unhashed. */
+ BUG_ON(!is_bad_inode(inode));
+ BUG_ON(!is_bad_inode(found->d_inode));
+ }
+ /*
+ * Already have the inode and the dentry attached, decrement
+ * the reference count to balance the iget() done
+ * earlier on. We found the dentry using d_lookup() so it
+ * cannot be disconnected and thus we do not need to worry
+ * about any NFS/disconnectedness issues here.
+ */
+ iput(inode);
+ return found;
+ }
+ /*
+ * Negative dentry: instantiate it unless the inode is a directory and
+ * has a 'disconnected' dentry (i.e. IS_ROOT and DCACHE_DISCONNECTED),
+ * in which case d_move() that in place of the found dentry.
+ */
+ if (!S_ISDIR(inode->i_mode)) {
+ /* Not a directory; everything is easy. */
+ d_instantiate(found, inode);
+ return found;
+ }
+ spin_lock(&dcache_lock);
+ if (list_empty(&inode->i_dentry)) {
+ /*
+ * Directory without a 'disconnected' dentry; we need to do
+ * d_instantiate() by hand because it takes dcache_lock which
+ * we already hold.
+ */
+ list_add(&found->d_alias, &inode->i_dentry);
+ found->d_inode = inode;
+ spin_unlock(&dcache_lock);
+ security_d_instantiate(found, inode);
+ return found;
+ }
+ /*
+ * Directory with a 'disconnected' dentry; get a reference to the
+ * 'disconnected' dentry.
+ */
+ new = list_entry(inode->i_dentry.next, struct dentry, d_alias);
+ dget_locked(new);
+ spin_unlock(&dcache_lock);
+ /* Do security vodoo. */
+ security_d_instantiate(found, inode);
+ /* Move new in place of found. */
+ d_move(new, found);
+ /* Balance the iget() we did above. */
+ iput(inode);
+ /* Throw away found. */
+ dput(found);
+ /* Use new as the actual dentry. */
+ return new;
+
+err_out:
+ iput(inode);
+ return ERR_PTR(error);
+}
/**
* d_lookup - search for a dentry
@@ -2228,6 +2329,7 @@ EXPORT_SYMBOL(d_path);
EXPORT_SYMBOL(d_prune_aliases);
EXPORT_SYMBOL(d_rehash);
EXPORT_SYMBOL(d_splice_alias);
+EXPORT_SYMBOL(d_add_ci);
EXPORT_SYMBOL(d_validate);
EXPORT_SYMBOL(dget_locked);
EXPORT_SYMBOL(dput);
diff --git a/fs/dlm/lock.c b/fs/dlm/lock.c
index 2d3d1027ce2b..7ba9586a0943 100644
--- a/fs/dlm/lock.c
+++ b/fs/dlm/lock.c
@@ -1782,7 +1782,8 @@ static void grant_pending_locks(struct dlm_rsb *r)
list_for_each_entry_safe(lkb, s, &r->res_grantqueue, lkb_statequeue) {
if (lkb->lkb_bastfn && lock_requires_bast(lkb, high, cw)) {
- if (cw && high == DLM_LOCK_PR)
+ if (cw && high == DLM_LOCK_PR &&
+ lkb->lkb_grmode == DLM_LOCK_PR)
queue_bast(r, lkb, DLM_LOCK_CW);
else
queue_bast(r, lkb, high);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 7c1e5e5cccd8..637018c891ef 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -50,6 +50,7 @@
#include <linux/pagemap.h>
#include <linux/idr.h>
#include <linux/file.h>
+#include <linux/mutex.h>
#include <linux/sctp.h>
#include <net/sctp/user.h>
@@ -138,7 +139,7 @@ static struct workqueue_struct *recv_workqueue;
static struct workqueue_struct *send_workqueue;
static DEFINE_IDR(connections_idr);
-static DECLARE_MUTEX(connections_lock);
+static DEFINE_MUTEX(connections_lock);
static int max_nodeid;
static struct kmem_cache *con_cache;
@@ -205,9 +206,9 @@ static struct connection *nodeid2con(int nodeid, gfp_t allocation)
{
struct connection *con;
- down(&connections_lock);
+ mutex_lock(&connections_lock);
con = __nodeid2con(nodeid, allocation);
- up(&connections_lock);
+ mutex_unlock(&connections_lock);
return con;
}
@@ -218,15 +219,15 @@ static struct connection *assoc2con(int assoc_id)
int i;
struct connection *con;
- down(&connections_lock);
+ mutex_lock(&connections_lock);
for (i=0; i<=max_nodeid; i++) {
con = __nodeid2con(i, 0);
if (con && con->sctp_assoc == assoc_id) {
- up(&connections_lock);
+ mutex_unlock(&connections_lock);
return con;
}
}
- up(&connections_lock);
+ mutex_unlock(&connections_lock);
return NULL;
}
@@ -381,7 +382,7 @@ static void sctp_init_failed(void)
int i;
struct connection *con;
- down(&connections_lock);
+ mutex_lock(&connections_lock);
for (i=1; i<=max_nodeid; i++) {
con = __nodeid2con(i, 0);
if (!con)
@@ -393,7 +394,7 @@ static void sctp_init_failed(void)
}
}
}
- up(&connections_lock);
+ mutex_unlock(&connections_lock);
}
/* Something happened to an association */
@@ -930,7 +931,7 @@ out_err:
* errors we try again until the max number of retries is reached.
*/
if (result != -EHOSTUNREACH && result != -ENETUNREACH &&
- result != -ENETDOWN && result != EINVAL
+ result != -ENETDOWN && result != -EINVAL
&& result != -EPROTONOSUPPORT) {
lowcomms_connect_sock(con);
result = 0;
@@ -1417,7 +1418,7 @@ void dlm_lowcomms_stop(void)
/* Set all the flags to prevent any
socket activity.
*/
- down(&connections_lock);
+ mutex_lock(&connections_lock);
for (i = 0; i <= max_nodeid; i++) {
con = __nodeid2con(i, 0);
if (con) {
@@ -1426,11 +1427,11 @@ void dlm_lowcomms_stop(void)
con->sock->sk->sk_user_data = NULL;
}
}
- up(&connections_lock);
+ mutex_unlock(&connections_lock);
work_stop();
- down(&connections_lock);
+ mutex_lock(&connections_lock);
clean_writequeues();
for (i = 0; i <= max_nodeid; i++) {
@@ -1443,7 +1444,7 @@ void dlm_lowcomms_stop(void)
}
}
max_nodeid = 0;
- up(&connections_lock);
+ mutex_unlock(&connections_lock);
kmem_cache_destroy(con_cache);
idr_init(&connections_idr);
}
diff --git a/fs/dlm/netlink.c b/fs/dlm/netlink.c
index 714593621f4f..18bda83cc892 100644
--- a/fs/dlm/netlink.c
+++ b/fs/dlm/netlink.c
@@ -95,7 +95,7 @@ int __init dlm_netlink_init(void)
return rv;
}
-void __exit dlm_netlink_exit(void)
+void dlm_netlink_exit(void)
{
genl_unregister_ops(&family, &dlm_nl_ops);
genl_unregister_family(&family);
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index d6d6e370f89c..78878c5781ca 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -379,7 +379,7 @@ static ssize_t dev_write(struct file *file, const char __user *u, size_t count,
struct plock_xop *xop;
xop = (struct plock_xop *)op;
if (xop->callback)
- count = dlm_plock_callback(op);
+ dlm_plock_callback(op);
else
wake_up(&recv_wq);
} else
diff --git a/fs/dlm/user.c b/fs/dlm/user.c
index f976f303c196..929e48ae7591 100644
--- a/fs/dlm/user.c
+++ b/fs/dlm/user.c
@@ -539,7 +539,7 @@ static ssize_t device_write(struct file *file, const char __user *buf,
/* do we really need this? can a write happen after a close? */
if ((kbuf->cmd == DLM_USER_LOCK || kbuf->cmd == DLM_USER_UNLOCK) &&
- test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags))
+ (proc && test_bit(DLM_PROC_FLAGS_CLOSING, &proc->flags)))
return -EINVAL;
sigfillset(&allsigs);
diff --git a/fs/ecryptfs/crypto.c b/fs/ecryptfs/crypto.c
index cd62d75b2cc0..e2832bc7869a 100644
--- a/fs/ecryptfs/crypto.c
+++ b/fs/ecryptfs/crypto.c
@@ -1906,9 +1906,9 @@ int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm,
goto out;
}
}
- mutex_unlock(&key_tfm_list_mutex);
(*tfm) = key_tfm->key_tfm;
(*tfm_mutex) = &key_tfm->key_tfm_mutex;
out:
+ mutex_unlock(&key_tfm_list_mutex);
return rc;
}
diff --git a/fs/ecryptfs/ecryptfs_kernel.h b/fs/ecryptfs/ecryptfs_kernel.h
index 951ee33a022d..c15c25745e05 100644
--- a/fs/ecryptfs/ecryptfs_kernel.h
+++ b/fs/ecryptfs/ecryptfs_kernel.h
@@ -660,8 +660,6 @@ int ecryptfs_get_tfm_and_mutex_for_cipher_name(struct crypto_blkcipher **tfm,
int ecryptfs_keyring_auth_tok_for_sig(struct key **auth_tok_key,
struct ecryptfs_auth_tok **auth_tok,
char *sig);
-int ecryptfs_write_zeros(struct file *file, pgoff_t index, int start,
- int num_zeros);
int ecryptfs_write_lower(struct inode *ecryptfs_inode, char *data,
loff_t offset, size_t size);
int ecryptfs_write_lower_page_segment(struct inode *ecryptfs_inode,
diff --git a/fs/ecryptfs/miscdev.c b/fs/ecryptfs/miscdev.c
index 6560da1a58ce..50c994a249a5 100644
--- a/fs/ecryptfs/miscdev.c
+++ b/fs/ecryptfs/miscdev.c
@@ -243,7 +243,6 @@ ecryptfs_miscdev_read(struct file *file, char __user *buf, size_t count,
struct ecryptfs_daemon *daemon;
struct ecryptfs_msg_ctx *msg_ctx;
size_t packet_length_size;
- u32 counter_nbo;
char packet_length[3];
size_t i;
size_t total_length;
@@ -328,20 +327,18 @@ check_list:
"pending message\n", __func__, count, total_length);
goto out_unlock_msg_ctx;
}
- i = 0;
- buf[i++] = msg_ctx->type;
- counter_nbo = cpu_to_be32(msg_ctx->counter);
- memcpy(&buf[i], (char *)&counter_nbo, 4);
- i += 4;
+ rc = -EFAULT;
+ if (put_user(msg_ctx->type, buf))
+ goto out_unlock_msg_ctx;
+ if (put_user(cpu_to_be32(msg_ctx->counter), (__be32 __user *)(buf + 1)))
+ goto out_unlock_msg_ctx;
+ i = 5;
if (msg_ctx->msg) {
- memcpy(&buf[i], packet_length, packet_length_size);
+ if (copy_to_user(&buf[i], packet_length, packet_length_size))
+ goto out_unlock_msg_ctx;
i += packet_length_size;
- rc = copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size);
- if (rc) {
- printk(KERN_ERR "%s: copy_to_user returned error "
- "[%d]\n", __func__, rc);
+ if (copy_to_user(&buf[i], msg_ctx->msg, msg_ctx->msg_size))
goto out_unlock_msg_ctx;
- }
i += msg_ctx->msg_size;
}
rc = i;
@@ -452,7 +449,8 @@ static ssize_t
ecryptfs_miscdev_write(struct file *file, const char __user *buf,
size_t count, loff_t *ppos)
{
- u32 counter_nbo, seq;
+ __be32 counter_nbo;
+ u32 seq;
size_t packet_size, packet_size_length, i;
ssize_t sz = 0;
char *data;
@@ -485,7 +483,7 @@ ecryptfs_miscdev_write(struct file *file, const char __user *buf,
count);
goto out_free;
}
- memcpy((char *)&counter_nbo, &data[i], 4);
+ memcpy(&counter_nbo, &data[i], 4);
seq = be32_to_cpu(counter_nbo);
i += 4;
rc = ecryptfs_parse_packet_length(&data[i], &packet_size,
diff --git a/fs/ecryptfs/read_write.c b/fs/ecryptfs/read_write.c
index ebf55150be56..75c2ea9fee35 100644
--- a/fs/ecryptfs/read_write.c
+++ b/fs/ecryptfs/read_write.c
@@ -157,20 +157,6 @@ int ecryptfs_write(struct file *ecryptfs_file, char *data, loff_t offset,
ecryptfs_page_idx, rc);
goto out;
}
- if (start_offset_in_page) {
- /* Read in the page from the lower
- * into the eCryptfs inode page cache,
- * decrypting */
- rc = ecryptfs_decrypt_page(ecryptfs_page);
- if (rc) {
- printk(KERN_ERR "%s: Error decrypting "
- "page; rc = [%d]\n",
- __func__, rc);
- ClearPageUptodate(ecryptfs_page);
- page_cache_release(ecryptfs_page);
- goto out;
- }
- }
ecryptfs_page_virt = kmap_atomic(ecryptfs_page, KM_USER0);
/*
@@ -349,14 +335,6 @@ int ecryptfs_read(char *data, loff_t offset, size_t size,
ecryptfs_page_idx, rc);
goto out;
}
- rc = ecryptfs_decrypt_page(ecryptfs_page);
- if (rc) {
- printk(KERN_ERR "%s: Error decrypting "
- "page; rc = [%d]\n", __func__, rc);
- ClearPageUptodate(ecryptfs_page);
- page_cache_release(ecryptfs_page);
- goto out;
- }
ecryptfs_page_virt = kmap_atomic(ecryptfs_page, KM_USER0);
memcpy((data + data_offset),
((char *)ecryptfs_page_virt + start_offset_in_page),
diff --git a/fs/exec.c b/fs/exec.c
index 1f8a24aa1f8b..9448f1b50b4a 100644
--- a/fs/exec.c
+++ b/fs/exec.c
@@ -860,6 +860,7 @@ static int de_thread(struct task_struct *tsk)
no_thread_group:
exit_itimers(sig);
+ flush_itimer_signals();
if (leader)
release_task(leader);
@@ -1251,6 +1252,12 @@ int search_binary_handler(struct linux_binprm *bprm,struct pt_regs *regs)
EXPORT_SYMBOL(search_binary_handler);
+void free_bprm(struct linux_binprm *bprm)
+{
+ free_arg_pages(bprm);
+ kfree(bprm);
+}
+
/*
* sys_execve() executes a new program.
*/
@@ -1320,17 +1327,15 @@ int do_execve(char * filename,
retval = search_binary_handler(bprm,regs);
if (retval >= 0) {
/* execve success */
- free_arg_pages(bprm);
security_bprm_free(bprm);
acct_update_integrals(current);
- kfree(bprm);
+ free_bprm(bprm);
if (displaced)
put_files_struct(displaced);
return retval;
}
out:
- free_arg_pages(bprm);
if (bprm->security)
security_bprm_free(bprm);
@@ -1344,7 +1349,7 @@ out_file:
fput(bprm->file);
}
out_kfree:
- kfree(bprm);
+ free_bprm(bprm);
out_files:
if (displaced)
diff --git a/fs/ext3/resize.c b/fs/ext3/resize.c
index 28cfd0b40527..77278e947e94 100644
--- a/fs/ext3/resize.c
+++ b/fs/ext3/resize.c
@@ -580,7 +580,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
}
blk = EXT3_SB(sb)->s_sbh->b_blocknr + 1 + EXT3_SB(sb)->s_gdb_count;
- data = (__le32 *)dind->b_data + EXT3_SB(sb)->s_gdb_count;
+ data = (__le32 *)dind->b_data + (EXT3_SB(sb)->s_gdb_count %
+ EXT3_ADDR_PER_BLOCK(sb));
end = (__le32 *)dind->b_data + EXT3_ADDR_PER_BLOCK(sb);
/* Get each reserved primary GDT block and verify it holds backups */
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 30494c5da843..b2bdef0234c5 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -43,6 +43,46 @@ void ext4_get_group_no_and_offset(struct super_block *sb, ext4_fsblk_t blocknr,
}
+static int ext4_block_in_group(struct super_block *sb, ext4_fsblk_t block,
+ ext4_group_t block_group)
+{
+ ext4_group_t actual_group;
+ ext4_get_group_no_and_offset(sb, block, &actual_group, 0);
+ if (actual_group == block_group)
+ return 1;
+ return 0;
+}
+
+static int ext4_group_used_meta_blocks(struct super_block *sb,
+ ext4_group_t block_group)
+{
+ ext4_fsblk_t tmp;
+ struct ext4_sb_info *sbi = EXT4_SB(sb);
+ /* block bitmap, inode bitmap, and inode table blocks */
+ int used_blocks = sbi->s_itb_per_group + 2;
+
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb, EXT4_FEATURE_INCOMPAT_FLEX_BG)) {
+ struct ext4_group_desc *gdp;
+ struct buffer_head *bh;
+
+ gdp = ext4_get_group_desc(sb, block_group, &bh);
+ if (!ext4_block_in_group(sb, ext4_block_bitmap(sb, gdp),
+ block_group))
+ used_blocks--;
+
+ if (!ext4_block_in_group(sb, ext4_inode_bitmap(sb, gdp),
+ block_group))
+ used_blocks--;
+
+ tmp = ext4_inode_table(sb, gdp);
+ for (; tmp < ext4_inode_table(sb, gdp) +
+ sbi->s_itb_per_group; tmp++) {
+ if (!ext4_block_in_group(sb, tmp, block_group))
+ used_blocks -= 1;
+ }
+ }
+ return used_blocks;
+}
/* Initializes an uninitialized block bitmap if given, and returns the
* number of blocks free in the group. */
unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
@@ -105,20 +145,34 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
free_blocks = group_blocks - bit_max;
if (bh) {
- ext4_fsblk_t start;
+ ext4_fsblk_t start, tmp;
+ int flex_bg = 0;
for (bit = 0; bit < bit_max; bit++)
ext4_set_bit(bit, bh->b_data);
start = ext4_group_first_block_no(sb, block_group);
- /* Set bits for block and inode bitmaps, and inode table */
- ext4_set_bit(ext4_block_bitmap(sb, gdp) - start, bh->b_data);
- ext4_set_bit(ext4_inode_bitmap(sb, gdp) - start, bh->b_data);
- for (bit = (ext4_inode_table(sb, gdp) - start),
- bit_max = bit + sbi->s_itb_per_group; bit < bit_max; bit++)
- ext4_set_bit(bit, bh->b_data);
+ if (EXT4_HAS_INCOMPAT_FEATURE(sb,
+ EXT4_FEATURE_INCOMPAT_FLEX_BG))
+ flex_bg = 1;
+ /* Set bits for block and inode bitmaps, and inode table */
+ tmp = ext4_block_bitmap(sb, gdp);
+ if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+ ext4_set_bit(tmp - start, bh->b_data);
+
+ tmp = ext4_inode_bitmap(sb, gdp);
+ if (!flex_bg || ext4_block_in_group(sb, tmp, block_group))
+ ext4_set_bit(tmp - start, bh->b_data);
+
+ tmp = ext4_inode_table(sb, gdp);
+ for (; tmp < ext4_inode_table(sb, gdp) +
+ sbi->s_itb_per_group; tmp++) {
+ if (!flex_bg ||
+ ext4_block_in_group(sb, tmp, block_group))
+ ext4_set_bit(tmp - start, bh->b_data);
+ }
/*
* Also if the number of blocks within the group is
* less than the blocksize * 8 ( which is the size
@@ -126,8 +180,7 @@ unsigned ext4_init_block_bitmap(struct super_block *sb, struct buffer_head *bh,
*/
mark_bitmap_end(group_blocks, sb->s_blocksize * 8, bh->b_data);
}
-
- return free_blocks - sbi->s_itb_per_group - 2;
+ return free_blocks - ext4_group_used_meta_blocks(sb, block_group);
}
@@ -356,8 +409,7 @@ restart:
prev = rsv;
}
printk("Window map complete.\n");
- if (bad)
- BUG();
+ BUG_ON(bad);
}
#define rsv_window_dump(root, verbose) \
__rsv_window_dump((root), (verbose), __func__)
@@ -1883,7 +1935,8 @@ ext4_fsblk_t ext4_new_block(handle_t *handle, struct inode *inode,
}
ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
- ext4_fsblk_t goal, unsigned long *count, int *errp)
+ ext4_lblk_t iblock, ext4_fsblk_t goal,
+ unsigned long *count, int *errp)
{
struct ext4_allocation_request ar;
ext4_fsblk_t ret;
@@ -1897,6 +1950,12 @@ ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
ar.inode = inode;
ar.goal = goal;
ar.len = *count;
+ ar.logical = iblock;
+ if (S_ISREG(inode->i_mode))
+ ar.flags = EXT4_MB_HINT_DATA;
+ else
+ /* disable in-core preallocation for non-regular files */
+ ar.flags = 0;
ret = ext4_mb_new_blocks(handle, &ar, errp);
*count = ar.len;
return ret;
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index 8158083f7ac0..b7ffb91eb832 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -22,7 +22,7 @@
#include "ext4_i.h"
/*
- * The second extended filesystem constants/structures
+ * The fourth extended filesystem constants/structures
*/
/*
@@ -960,8 +960,9 @@ extern unsigned long ext4_bg_num_gdb(struct super_block *sb,
ext4_group_t group);
extern ext4_fsblk_t ext4_new_block (handle_t *handle, struct inode *inode,
ext4_fsblk_t goal, int *errp);
-extern ext4_fsblk_t ext4_new_blocks (handle_t *handle, struct inode *inode,
- ext4_fsblk_t goal, unsigned long *count, int *errp);
+extern ext4_fsblk_t ext4_new_blocks(handle_t *handle, struct inode *inode,
+ ext4_lblk_t iblock, ext4_fsblk_t goal,
+ unsigned long *count, int *errp);
extern ext4_fsblk_t ext4_new_blocks_old(handle_t *handle, struct inode *inode,
ext4_fsblk_t goal, unsigned long *count, int *errp);
extern void ext4_free_blocks (handle_t *handle, struct inode *inode,
diff --git a/fs/ext4/ext4_i.h b/fs/ext4/ext4_i.h
index 26a4ae255d79..abf2744164e0 100644
--- a/fs/ext4/ext4_i.h
+++ b/fs/ext4/ext4_i.h
@@ -79,7 +79,7 @@ struct ext4_ext_cache {
};
/*
- * third extended file system inode data in memory
+ * fourth extended file system inode data in memory
*/
struct ext4_inode_info {
__le32 i_data[15]; /* unconverted */
diff --git a/fs/ext4/ext4_sb.h b/fs/ext4/ext4_sb.h
index 5802e69f2191..4de9a75ca6af 100644
--- a/fs/ext4/ext4_sb.h
+++ b/fs/ext4/ext4_sb.h
@@ -25,7 +25,7 @@
#include <linux/rbtree.h>
/*
- * third extended-fs super-block data in memory
+ * fourth extended-fs super-block data in memory
*/
struct ext4_sb_info {
unsigned long s_desc_size; /* Size of a group descriptor in bytes */
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index 8d9707746413..0d1923e51bff 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -508,8 +508,9 @@ static int ext4_blks_to_allocate(Indirect *branch, int k, unsigned long blks,
* direct blocks
*/
static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
- ext4_fsblk_t goal, int indirect_blks, int blks,
- ext4_fsblk_t new_blocks[4], int *err)
+ ext4_lblk_t iblock, ext4_fsblk_t goal,
+ int indirect_blks, int blks,
+ ext4_fsblk_t new_blocks[4], int *err)
{
int target, i;
unsigned long count = 0;
@@ -530,7 +531,8 @@ static int ext4_alloc_blocks(handle_t *handle, struct inode *inode,
while (1) {
count = target;
/* allocating blocks for indirect blocks and direct blocks */
- current_block = ext4_new_blocks(handle,inode,goal,&count,err);
+ current_block = ext4_new_blocks(handle, inode, iblock,
+ goal, &count, err);
if (*err)
goto failed_out;
@@ -584,8 +586,9 @@ failed_out:
* as described above and return 0.
*/
static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
- int indirect_blks, int *blks, ext4_fsblk_t goal,
- ext4_lblk_t *offsets, Indirect *branch)
+ ext4_lblk_t iblock, int indirect_blks,
+ int *blks, ext4_fsblk_t goal,
+ ext4_lblk_t *offsets, Indirect *branch)
{
int blocksize = inode->i_sb->s_blocksize;
int i, n = 0;
@@ -595,7 +598,7 @@ static int ext4_alloc_branch(handle_t *handle, struct inode *inode,
ext4_fsblk_t new_blocks[4];
ext4_fsblk_t current_block;
- num = ext4_alloc_blocks(handle, inode, goal, indirect_blks,
+ num = ext4_alloc_blocks(handle, inode, iblock, goal, indirect_blks,
*blks, new_blocks, &err);
if (err)
return err;
@@ -855,8 +858,9 @@ int ext4_get_blocks_handle(handle_t *handle, struct inode *inode,
/*
* Block out ext4_truncate while we alter the tree
*/
- err = ext4_alloc_branch(handle, inode, indirect_blks, &count, goal,
- offsets + (partial - chain), partial);
+ err = ext4_alloc_branch(handle, inode, iblock, indirect_blks,
+ &count, goal,
+ offsets + (partial - chain), partial);
/*
* The ext4_splice_branch call will free and forget any buffers
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 873ad9b3418c..789719384be7 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -1730,10 +1730,6 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
ac->ac_g_ex.fe_start = sbi->s_mb_last_start;
spin_unlock(&sbi->s_md_lock);
}
-
- /* searching for the right group start from the goal value specified */
- group = ac->ac_g_ex.fe_group;
-
/* Let's just scan groups to find more-less suitable blocks */
cr = ac->ac_2order ? 0 : 1;
/*
@@ -1743,6 +1739,12 @@ ext4_mb_regular_allocator(struct ext4_allocation_context *ac)
repeat:
for (; cr < 4 && ac->ac_status == AC_STATUS_CONTINUE; cr++) {
ac->ac_criteria = cr;
+ /*
+ * searching for the right group start
+ * from the goal value specified
+ */
+ group = ac->ac_g_ex.fe_group;
+
for (i = 0; i < EXT4_SB(sb)->s_groups_count; group++, i++) {
struct ext4_group_info *grp;
struct ext4_group_desc *desc;
@@ -2575,25 +2577,24 @@ ext4_mb_free_committed_blocks(struct super_block *sb)
-#define MB_PROC_VALUE_READ(name) \
-static int ext4_mb_read_##name(char *page, char **start, \
- off_t off, int count, int *eof, void *data) \
+#define MB_PROC_FOPS(name) \
+static int ext4_mb_##name##_proc_show(struct seq_file *m, void *v) \
{ \
- struct ext4_sb_info *sbi = data; \
- int len; \
- *eof = 1; \
- if (off != 0) \
- return 0; \
- len = sprintf(page, "%ld\n", sbi->s_mb_##name); \
- *start = page; \
- return len; \
-}
-
-#define MB_PROC_VALUE_WRITE(name) \
-static int ext4_mb_write_##name(struct file *file, \
- const char __user *buf, unsigned long cnt, void *data) \
+ struct ext4_sb_info *sbi = m->private; \
+ \
+ seq_printf(m, "%ld\n", sbi->s_mb_##name); \
+ return 0; \
+} \
+ \
+static int ext4_mb_##name##_proc_open(struct inode *inode, struct file *file)\
+{ \
+ return single_open(file, ext4_mb_##name##_proc_show, PDE(inode)->data);\
+} \
+ \
+static ssize_t ext4_mb_##name##_proc_write(struct file *file, \
+ const char __user *buf, size_t cnt, loff_t *ppos) \
{ \
- struct ext4_sb_info *sbi = data; \
+ struct ext4_sb_info *sbi = PDE(file->f_path.dentry->d_inode)->data;\
char str[32]; \
long value; \
if (cnt >= sizeof(str)) \
@@ -2605,31 +2606,32 @@ static int ext4_mb_write_##name(struct file *file, \
return -ERANGE; \
sbi->s_mb_##name = value; \
return cnt; \
-}
+} \
+ \
+static const struct file_operations ext4_mb_##name##_proc_fops = { \
+ .owner = THIS_MODULE, \
+ .open = ext4_mb_##name##_proc_open, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+ .write = ext4_mb_##name##_proc_write, \
+};
-MB_PROC_VALUE_READ(stats);
-MB_PROC_VALUE_WRITE(stats);
-MB_PROC_VALUE_READ(max_to_scan);
-MB_PROC_VALUE_WRITE(max_to_scan);
-MB_PROC_VALUE_READ(min_to_scan);
-MB_PROC_VALUE_WRITE(min_to_scan);
-MB_PROC_VALUE_READ(order2_reqs);
-MB_PROC_VALUE_WRITE(order2_reqs);
-MB_PROC_VALUE_READ(stream_request);
-MB_PROC_VALUE_WRITE(stream_request);
-MB_PROC_VALUE_READ(group_prealloc);
-MB_PROC_VALUE_WRITE(group_prealloc);
+MB_PROC_FOPS(stats);
+MB_PROC_FOPS(max_to_scan);
+MB_PROC_FOPS(min_to_scan);
+MB_PROC_FOPS(order2_reqs);
+MB_PROC_FOPS(stream_request);
+MB_PROC_FOPS(group_prealloc);
#define MB_PROC_HANDLER(name, var) \
do { \
- proc = create_proc_entry(name, mode, sbi->s_mb_proc); \
+ proc = proc_create_data(name, mode, sbi->s_mb_proc, \
+ &ext4_mb_##var##_proc_fops, sbi); \
if (proc == NULL) { \
printk(KERN_ERR "EXT4-fs: can't to create %s\n", name); \
goto err_out; \
} \
- proc->data = sbi; \
- proc->read_proc = ext4_mb_read_##var ; \
- proc->write_proc = ext4_mb_write_##var; \
} while (0)
static int ext4_mb_init_per_dev_proc(struct super_block *sb)
@@ -2745,8 +2747,6 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
sbi = EXT4_SB(sb);
es = sbi->s_es;
- ext4_debug("using block group %lu(%d)\n", ac->ac_b_ex.fe_group,
- gdp->bg_free_blocks_count);
err = -EIO;
bitmap_bh = read_block_bitmap(sb, ac->ac_b_ex.fe_group);
@@ -2762,6 +2762,9 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
if (!gdp)
goto out_err;
+ ext4_debug("using block group %lu(%d)\n", ac->ac_b_ex.fe_group,
+ gdp->bg_free_blocks_count);
+
err = ext4_journal_get_write_access(handle, gdp_bh);
if (err)
goto out_err;
@@ -3094,8 +3097,7 @@ static void ext4_mb_use_inode_pa(struct ext4_allocation_context *ac,
static void ext4_mb_use_group_pa(struct ext4_allocation_context *ac,
struct ext4_prealloc_space *pa)
{
- unsigned len = ac->ac_o_ex.fe_len;
-
+ unsigned int len = ac->ac_o_ex.fe_len;
ext4_get_group_no_and_offset(ac->ac_sb, pa->pa_pstart,
&ac->ac_b_ex.fe_group,
&ac->ac_b_ex.fe_start);
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index 9f086a6a472b..9ecb92f68543 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -563,7 +563,8 @@ static int reserve_backup_gdb(handle_t *handle, struct inode *inode,
}
blk = EXT4_SB(sb)->s_sbh->b_blocknr + 1 + EXT4_SB(sb)->s_gdb_count;
- data = (__le32 *)dind->b_data + EXT4_SB(sb)->s_gdb_count;
+ data = (__le32 *)dind->b_data + (EXT4_SB(sb)->s_gdb_count %
+ EXT4_ADDR_PER_BLOCK(sb));
end = (__le32 *)dind->b_data + EXT4_ADDR_PER_BLOCK(sb);
/* Get each reserved primary GDT block and verify it holds backups */
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index 09d9359c8055..cb96f127c366 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -671,6 +671,7 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
unsigned long def_mount_opts;
struct super_block *sb = vfs->mnt_sb;
struct ext4_sb_info *sbi = EXT4_SB(sb);
+ journal_t *journal = sbi->s_journal;
struct ext4_super_block *es = sbi->s_es;
def_mount_opts = le32_to_cpu(es->s_default_mount_opts);
@@ -729,8 +730,15 @@ static int ext4_show_options(struct seq_file *seq, struct vfsmount *vfs)
seq_printf(seq, ",commit=%u",
(unsigned) (sbi->s_commit_interval / HZ));
}
- if (test_opt(sb, BARRIER))
- seq_puts(seq, ",barrier=1");
+ /*
+ * We're changing the default of barrier mount option, so
+ * let's always display its mount state so it's clear what its
+ * status is.
+ */
+ seq_puts(seq, ",barrier=");
+ seq_puts(seq, test_opt(sb, BARRIER) ? "1" : "0");
+ if (test_opt(sb, JOURNAL_ASYNC_COMMIT))
+ seq_puts(seq, ",journal_async_commit");
if (test_opt(sb, NOBH))
seq_puts(seq, ",nobh");
if (!test_opt(sb, EXTENTS))
@@ -1907,6 +1915,7 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
sbi->s_resgid = le16_to_cpu(es->s_def_resgid);
set_opt(sbi->s_mount_opt, RESERVATION);
+ set_opt(sbi->s_mount_opt, BARRIER);
/*
* turn on extents feature by default in ext4 filesystem
@@ -2189,6 +2198,29 @@ static int ext4_fill_super (struct super_block *sb, void *data, int silent)
EXT4_HAS_COMPAT_FEATURE(sb, EXT4_FEATURE_COMPAT_HAS_JOURNAL)) {
if (ext4_load_journal(sb, es, journal_devnum))
goto failed_mount3;
+ if (!(sb->s_flags & MS_RDONLY) &&
+ EXT4_SB(sb)->s_journal->j_failed_commit) {
+ printk(KERN_CRIT "EXT4-fs error (device %s): "
+ "ext4_fill_super: Journal transaction "
+ "%u is corrupt\n", sb->s_id,
+ EXT4_SB(sb)->s_journal->j_failed_commit);
+ if (test_opt (sb, ERRORS_RO)) {
+ printk (KERN_CRIT
+ "Mounting filesystem read-only\n");
+ sb->s_flags |= MS_RDONLY;
+ EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
+ es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
+ }
+ if (test_opt(sb, ERRORS_PANIC)) {
+ EXT4_SB(sb)->s_mount_state |= EXT4_ERROR_FS;
+ es->s_state |= cpu_to_le16(EXT4_ERROR_FS);
+ ext4_commit_super(sb, es, 1);
+ printk(KERN_CRIT
+ "EXT4-fs (device %s): mount failed\n",
+ sb->s_id);
+ goto failed_mount4;
+ }
+ }
} else if (journal_inum) {
if (ext4_create_journal(sb, es, journal_inum))
goto failed_mount3;
diff --git a/fs/file.c b/fs/file.c
index 4c6f0ea12c41..7b3887e054d0 100644
--- a/fs/file.c
+++ b/fs/file.c
@@ -26,6 +26,8 @@ struct fdtable_defer {
};
int sysctl_nr_open __read_mostly = 1024*1024;
+int sysctl_nr_open_min = BITS_PER_LONG;
+int sysctl_nr_open_max = 1024 * 1024; /* raised later */
/*
* We use this list to defer free fdtables that have vmalloced
@@ -119,8 +121,6 @@ static void copy_fdtable(struct fdtable *nfdt, struct fdtable *ofdt)
unsigned int cpy, set;
BUG_ON(nfdt->max_fds < ofdt->max_fds);
- if (ofdt->max_fds == 0)
- return;
cpy = ofdt->max_fds * sizeof(struct file *);
set = (nfdt->max_fds - ofdt->max_fds) * sizeof(struct file *);
@@ -261,6 +261,139 @@ int expand_files(struct files_struct *files, int nr)
return expand_fdtable(files, nr);
}
+static int count_open_files(struct fdtable *fdt)
+{
+ int size = fdt->max_fds;
+ int i;
+
+ /* Find the last open fd */
+ for (i = size/(8*sizeof(long)); i > 0; ) {
+ if (fdt->open_fds->fds_bits[--i])
+ break;
+ }
+ i = (i+1) * 8 * sizeof(long);
+ return i;
+}
+
+/*
+ * Allocate a new files structure and copy contents from the
+ * passed in files structure.
+ * errorp will be valid only when the returned files_struct is NULL.
+ */
+struct files_struct *dup_fd(struct files_struct *oldf, int *errorp)
+{
+ struct files_struct *newf;
+ struct file **old_fds, **new_fds;
+ int open_files, size, i;
+ struct fdtable *old_fdt, *new_fdt;
+
+ *errorp = -ENOMEM;
+ newf = kmem_cache_alloc(files_cachep, GFP_KERNEL);
+ if (!newf)
+ goto out;
+
+ atomic_set(&newf->count, 1);
+
+ spin_lock_init(&newf->file_lock);
+ newf->next_fd = 0;
+ new_fdt = &newf->fdtab;
+ new_fdt->max_fds = NR_OPEN_DEFAULT;
+ new_fdt->close_on_exec = (fd_set *)&newf->close_on_exec_init;
+ new_fdt->open_fds = (fd_set *)&newf->open_fds_init;
+ new_fdt->fd = &newf->fd_array[0];
+ INIT_RCU_HEAD(&new_fdt->rcu);
+ new_fdt->next = NULL;
+
+ spin_lock(&oldf->file_lock);
+ old_fdt = files_fdtable(oldf);
+ open_files = count_open_files(old_fdt);
+
+ /*
+ * Check whether we need to allocate a larger fd array and fd set.
+ */
+ while (unlikely(open_files > new_fdt->max_fds)) {
+ spin_unlock(&oldf->file_lock);
+
+ if (new_fdt != &newf->fdtab) {
+ free_fdarr(new_fdt);
+ free_fdset(new_fdt);
+ kfree(new_fdt);
+ }
+
+ new_fdt = alloc_fdtable(open_files - 1);
+ if (!new_fdt) {
+ *errorp = -ENOMEM;
+ goto out_release;
+ }
+
+ /* beyond sysctl_nr_open; nothing to do */
+ if (unlikely(new_fdt->max_fds < open_files)) {
+ free_fdarr(new_fdt);
+ free_fdset(new_fdt);
+ kfree(new_fdt);
+ *errorp = -EMFILE;
+ goto out_release;
+ }
+
+ /*
+ * Reacquire the oldf lock and a pointer to its fd table
+ * who knows it may have a new bigger fd table. We need
+ * the latest pointer.
+ */
+ spin_lock(&oldf->file_lock);
+ old_fdt = files_fdtable(oldf);
+ open_files = count_open_files(old_fdt);
+ }
+
+ old_fds = old_fdt->fd;
+ new_fds = new_fdt->fd;
+
+ memcpy(new_fdt->open_fds->fds_bits,
+ old_fdt->open_fds->fds_bits, open_files/8);
+ memcpy(new_fdt->close_on_exec->fds_bits,
+ old_fdt->close_on_exec->fds_bits, open_files/8);
+
+ for (i = open_files; i != 0; i--) {
+ struct file *f = *old_fds++;
+ if (f) {
+ get_file(f);
+ } else {
+ /*
+ * The fd may be claimed in the fd bitmap but not yet
+ * instantiated in the files array if a sibling thread
+ * is partway through open(). So make sure that this
+ * fd is available to the new process.
+ */
+ FD_CLR(open_files - i, new_fdt->open_fds);
+ }
+ rcu_assign_pointer(*new_fds++, f);
+ }
+ spin_unlock(&oldf->file_lock);
+
+ /* compute the remainder to be cleared */
+ size = (new_fdt->max_fds - open_files) * sizeof(struct file *);
+
+ /* This is long word aligned thus could use a optimized version */
+ memset(new_fds, 0, size);
+
+ if (new_fdt->max_fds > open_files) {
+ int left = (new_fdt->max_fds-open_files)/8;
+ int start = open_files / (8 * sizeof(unsigned long));
+
+ memset(&new_fdt->open_fds->fds_bits[start], 0, left);
+ memset(&new_fdt->close_on_exec->fds_bits[start], 0, left);
+ }
+
+ rcu_assign_pointer(newf->fdt, new_fdt);
+
+ return newf;
+
+out_release:
+ kmem_cache_free(files_cachep, newf);
+out:
+ return NULL;
+}
+
static void __devinit fdtable_defer_list_init(int cpu)
{
struct fdtable_defer *fddef = &per_cpu(fdtable_defer_list, cpu);
@@ -274,4 +407,19 @@ void __init files_defer_init(void)
int i;
for_each_possible_cpu(i)
fdtable_defer_list_init(i);
+ sysctl_nr_open_max = min((size_t)INT_MAX, ~(size_t)0/sizeof(void *)) &
+ -BITS_PER_LONG;
}
+
+struct files_struct init_files = {
+ .count = ATOMIC_INIT(1),
+ .fdt = &init_files.fdtab,
+ .fdtab = {
+ .max_fds = NR_OPEN_DEFAULT,
+ .fd = &init_files.fd_array[0],
+ .close_on_exec = (fd_set *)&init_files.close_on_exec_init,
+ .open_fds = (fd_set *)&init_files.open_fds_init,
+ .rcu = RCU_HEAD_INIT,
+ },
+ .file_lock = __SPIN_LOCK_UNLOCKED(init_task.file_lock),
+};
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index fb77e0962132..43e99513334a 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -488,7 +488,12 @@ static struct fuse_conn *new_conn(struct super_block *sb)
err = bdi_init(&fc->bdi);
if (err)
goto error_kfree;
- err = bdi_register_dev(&fc->bdi, fc->dev);
+ if (sb->s_bdev) {
+ err = bdi_register(&fc->bdi, NULL, "%u:%u-fuseblk",
+ MAJOR(fc->dev), MINOR(fc->dev));
+ } else {
+ err = bdi_register_dev(&fc->bdi, fc->dev);
+ }
if (err)
goto error_bdi_destroy;
/*
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index 7f7947e3dfbb..ab2f57e3fb87 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -14,23 +14,11 @@ config GFS2_FS
GFS is perfect consistency -- changes made to the filesystem on one
machine show up immediately on all other machines in the cluster.
- To use the GFS2 filesystem, you will need to enable one or more of
- the below locking modules. Documentation and utilities for GFS2 can
+ To use the GFS2 filesystem in a cluster, you will need to enable
+ the locking module below. Documentation and utilities for GFS2 can
be found here: http://sources.redhat.com/cluster
-config GFS2_FS_LOCKING_NOLOCK
- tristate "GFS2 \"nolock\" locking module"
- depends on GFS2_FS
- help
- Single node locking module for GFS2.
-
- Use this module if you want to use GFS2 on a single node without
- its clustering features. You can still take advantage of the
- large file support, and upgrade to running a full cluster later on
- if required.
-
- If you will only be using GFS2 in cluster mode, you do not need this
- module.
+ The "nolock" lock module is now built in to GFS2 by default.
config GFS2_FS_LOCKING_DLM
tristate "GFS2 DLM locking module"
diff --git a/fs/gfs2/Makefile b/fs/gfs2/Makefile
index e2350df02a07..ec65851ec80a 100644
--- a/fs/gfs2/Makefile
+++ b/fs/gfs2/Makefile
@@ -5,6 +5,5 @@ gfs2-y := acl.o bmap.o daemon.o dir.o eaops.o eattr.o glock.o \
ops_fstype.o ops_inode.o ops_super.o quota.o \
recovery.o rgrp.o super.o sys.o trans.o util.o
-obj-$(CONFIG_GFS2_FS_LOCKING_NOLOCK) += locking/nolock/
obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += locking/dlm/
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index d636b3e80f5d..be7ed503f012 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -45,21 +45,19 @@ struct gfs2_gl_hash_bucket {
struct hlist_head hb_list;
};
-struct glock_iter {
- int hash; /* hash bucket index */
- struct gfs2_sbd *sdp; /* incore superblock */
- struct gfs2_glock *gl; /* current glock struct */
- struct seq_file *seq; /* sequence file for debugfs */
- char string[512]; /* scratch space */
+struct gfs2_glock_iter {
+ int hash; /* hash bucket index */
+ struct gfs2_sbd *sdp; /* incore superblock */
+ struct gfs2_glock *gl; /* current glock struct */
+ char string[512]; /* scratch space */
};
typedef void (*glock_examiner) (struct gfs2_glock * gl);
static int gfs2_dump_lockstate(struct gfs2_sbd *sdp);
-static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl);
-static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh);
-static void gfs2_glock_drop_th(struct gfs2_glock *gl);
-static void run_queue(struct gfs2_glock *gl);
+static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl);
+#define GLOCK_BUG_ON(gl,x) do { if (unlikely(x)) { __dump_glock(NULL, gl); BUG(); } } while(0)
+static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target);
static DECLARE_RWSEM(gfs2_umount_flush_sem);
static struct dentry *gfs2_root;
@@ -123,33 +121,6 @@ static inline rwlock_t *gl_lock_addr(unsigned int x)
#endif
/**
- * relaxed_state_ok - is a requested lock compatible with the current lock mode?
- * @actual: the current state of the lock
- * @requested: the lock state that was requested by the caller
- * @flags: the modifier flags passed in by the caller
- *
- * Returns: 1 if the locks are compatible, 0 otherwise
- */
-
-static inline int relaxed_state_ok(unsigned int actual, unsigned requested,
- int flags)
-{
- if (actual == requested)
- return 1;
-
- if (flags & GL_EXACT)
- return 0;
-
- if (actual == LM_ST_EXCLUSIVE && requested == LM_ST_SHARED)
- return 1;
-
- if (actual != LM_ST_UNLOCKED && (flags & LM_FLAG_ANY))
- return 1;
-
- return 0;
-}
-
-/**
* gl_hash() - Turn glock number into hash bucket number
* @lock: The glock number
*
@@ -182,7 +153,7 @@ static void glock_free(struct gfs2_glock *gl)
struct gfs2_sbd *sdp = gl->gl_sbd;
struct inode *aspace = gl->gl_aspace;
- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+ if (sdp->sd_lockstruct.ls_ops->lm_put_lock)
sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock);
if (aspace)
@@ -211,17 +182,14 @@ static void gfs2_glock_hold(struct gfs2_glock *gl)
int gfs2_glock_put(struct gfs2_glock *gl)
{
int rv = 0;
- struct gfs2_sbd *sdp = gl->gl_sbd;
write_lock(gl_lock_addr(gl->gl_hash));
if (atomic_dec_and_test(&gl->gl_ref)) {
hlist_del(&gl->gl_list);
write_unlock(gl_lock_addr(gl->gl_hash));
- gfs2_assert(sdp, gl->gl_state == LM_ST_UNLOCKED);
- gfs2_assert(sdp, list_empty(&gl->gl_reclaim));
- gfs2_assert(sdp, list_empty(&gl->gl_holders));
- gfs2_assert(sdp, list_empty(&gl->gl_waiters1));
- gfs2_assert(sdp, list_empty(&gl->gl_waiters3));
+ GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED);
+ GLOCK_BUG_ON(gl, !list_empty(&gl->gl_reclaim));
+ GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
glock_free(gl);
rv = 1;
goto out;
@@ -281,22 +249,394 @@ static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
return gl;
}
+/**
+ * may_grant - check if its ok to grant a new lock
+ * @gl: The glock
+ * @gh: The lock request which we wish to grant
+ *
+ * Returns: true if its ok to grant the lock
+ */
+
+static inline int may_grant(const struct gfs2_glock *gl, const struct gfs2_holder *gh)
+{
+ const struct gfs2_holder *gh_head = list_entry(gl->gl_holders.next, const struct gfs2_holder, gh_list);
+ if ((gh->gh_state == LM_ST_EXCLUSIVE ||
+ gh_head->gh_state == LM_ST_EXCLUSIVE) && gh != gh_head)
+ return 0;
+ if (gl->gl_state == gh->gh_state)
+ return 1;
+ if (gh->gh_flags & GL_EXACT)
+ return 0;
+ if (gh->gh_state == LM_ST_SHARED && gl->gl_state == LM_ST_EXCLUSIVE)
+ return 1;
+ if (gl->gl_state != LM_ST_UNLOCKED && (gh->gh_flags & LM_FLAG_ANY))
+ return 1;
+ return 0;
+}
+
+static void gfs2_holder_wake(struct gfs2_holder *gh)
+{
+ clear_bit(HIF_WAIT, &gh->gh_iflags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&gh->gh_iflags, HIF_WAIT);
+}
+
+/**
+ * do_promote - promote as many requests as possible on the current queue
+ * @gl: The glock
+ *
+ * Returns: true if there is a blocked holder at the head of the list
+ */
+
+static int do_promote(struct gfs2_glock *gl)
+{
+ const struct gfs2_glock_operations *glops = gl->gl_ops;
+ struct gfs2_holder *gh, *tmp;
+ int ret;
+
+restart:
+ list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
+ if (test_bit(HIF_HOLDER, &gh->gh_iflags))
+ continue;
+ if (may_grant(gl, gh)) {
+ if (gh->gh_list.prev == &gl->gl_holders &&
+ glops->go_lock) {
+ spin_unlock(&gl->gl_spin);
+ /* FIXME: eliminate this eventually */
+ ret = glops->go_lock(gh);
+ spin_lock(&gl->gl_spin);
+ if (ret) {
+ gh->gh_error = ret;
+ list_del_init(&gh->gh_list);
+ gfs2_holder_wake(gh);
+ goto restart;
+ }
+ set_bit(HIF_HOLDER, &gh->gh_iflags);
+ gfs2_holder_wake(gh);
+ goto restart;
+ }
+ set_bit(HIF_HOLDER, &gh->gh_iflags);
+ gfs2_holder_wake(gh);
+ continue;
+ }
+ if (gh->gh_list.prev == &gl->gl_holders)
+ return 1;
+ break;
+ }
+ return 0;
+}
+
+/**
+ * do_error - Something unexpected has happened during a lock request
+ *
+ */
+
+static inline void do_error(struct gfs2_glock *gl, const int ret)
+{
+ struct gfs2_holder *gh, *tmp;
+
+ list_for_each_entry_safe(gh, tmp, &gl->gl_holders, gh_list) {
+ if (test_bit(HIF_HOLDER, &gh->gh_iflags))
+ continue;
+ if (ret & LM_OUT_ERROR)
+ gh->gh_error = -EIO;
+ else if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
+ gh->gh_error = GLR_TRYFAILED;
+ else
+ continue;
+ list_del_init(&gh->gh_list);
+ gfs2_holder_wake(gh);
+ }
+}
+
+/**
+ * find_first_waiter - find the first gh that's waiting for the glock
+ * @gl: the glock
+ */
+
+static inline struct gfs2_holder *find_first_waiter(const struct gfs2_glock *gl)
+{
+ struct gfs2_holder *gh;
+
+ list_for_each_entry(gh, &gl->gl_holders, gh_list) {
+ if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
+ return gh;
+ }
+ return NULL;
+}
+
+/**
+ * state_change - record that the glock is now in a different state
+ * @gl: the glock
+ * @new_state the new state
+ *
+ */
+
+static void state_change(struct gfs2_glock *gl, unsigned int new_state)
+{
+ int held1, held2;
+
+ held1 = (gl->gl_state != LM_ST_UNLOCKED);
+ held2 = (new_state != LM_ST_UNLOCKED);
+
+ if (held1 != held2) {
+ if (held2)
+ gfs2_glock_hold(gl);
+ else
+ gfs2_glock_put(gl);
+ }
+
+ gl->gl_state = new_state;
+ gl->gl_tchange = jiffies;
+}
+
+static void gfs2_demote_wake(struct gfs2_glock *gl)
+{
+ gl->gl_demote_state = LM_ST_EXCLUSIVE;
+ clear_bit(GLF_DEMOTE, &gl->gl_flags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
+}
+
+/**
+ * finish_xmote - The DLM has replied to one of our lock requests
+ * @gl: The glock
+ * @ret: The status from the DLM
+ *
+ */
+
+static void finish_xmote(struct gfs2_glock *gl, unsigned int ret)
+{
+ const struct gfs2_glock_operations *glops = gl->gl_ops;
+ struct gfs2_holder *gh;
+ unsigned state = ret & LM_OUT_ST_MASK;
+
+ spin_lock(&gl->gl_spin);
+ state_change(gl, state);
+ gh = find_first_waiter(gl);
+
+ /* Demote to UN request arrived during demote to SH or DF */
+ if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags) &&
+ state != LM_ST_UNLOCKED && gl->gl_demote_state == LM_ST_UNLOCKED)
+ gl->gl_target = LM_ST_UNLOCKED;
+
+ /* Check for state != intended state */
+ if (unlikely(state != gl->gl_target)) {
+ if (gh && !test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags)) {
+ /* move to back of queue and try next entry */
+ if (ret & LM_OUT_CANCELED) {
+ if ((gh->gh_flags & LM_FLAG_PRIORITY) == 0)
+ list_move_tail(&gh->gh_list, &gl->gl_holders);
+ gh = find_first_waiter(gl);
+ gl->gl_target = gh->gh_state;
+ goto retry;
+ }
+ /* Some error or failed "try lock" - report it */
+ if ((ret & LM_OUT_ERROR) ||
+ (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))) {
+ gl->gl_target = gl->gl_state;
+ do_error(gl, ret);
+ goto out;
+ }
+ }
+ switch(state) {
+ /* Unlocked due to conversion deadlock, try again */
+ case LM_ST_UNLOCKED:
+retry:
+ do_xmote(gl, gh, gl->gl_target);
+ break;
+ /* Conversion fails, unlock and try again */
+ case LM_ST_SHARED:
+ case LM_ST_DEFERRED:
+ do_xmote(gl, gh, LM_ST_UNLOCKED);
+ break;
+ default: /* Everything else */
+ printk(KERN_ERR "GFS2: wanted %u got %u\n", gl->gl_target, state);
+ GLOCK_BUG_ON(gl, 1);
+ }
+ spin_unlock(&gl->gl_spin);
+ gfs2_glock_put(gl);
+ return;
+ }
+
+ /* Fast path - we got what we asked for */
+ if (test_and_clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
+ gfs2_demote_wake(gl);
+ if (state != LM_ST_UNLOCKED) {
+ if (glops->go_xmote_bh) {
+ int rv;
+ spin_unlock(&gl->gl_spin);
+ rv = glops->go_xmote_bh(gl, gh);
+ if (rv == -EAGAIN)
+ return;
+ spin_lock(&gl->gl_spin);
+ if (rv) {
+ do_error(gl, rv);
+ goto out;
+ }
+ }
+ do_promote(gl);
+ }
+out:
+ clear_bit(GLF_LOCK, &gl->gl_flags);
+ spin_unlock(&gl->gl_spin);
+ gfs2_glock_put(gl);
+}
+
+static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
+ unsigned int cur_state, unsigned int req_state,
+ unsigned int flags)
+{
+ int ret = LM_OUT_ERROR;
+
+ if (!sdp->sd_lockstruct.ls_ops->lm_lock)
+ return req_state == LM_ST_UNLOCKED ? 0 : req_state;
+
+ if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+ ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
+ req_state, flags);
+ return ret;
+}
+
+/**
+ * do_xmote - Calls the DLM to change the state of a lock
+ * @gl: The lock state
+ * @gh: The holder (only for promotes)
+ * @target: The target lock state
+ *
+ */
+
+static void do_xmote(struct gfs2_glock *gl, struct gfs2_holder *gh, unsigned int target)
+{
+ const struct gfs2_glock_operations *glops = gl->gl_ops;
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ unsigned int lck_flags = gh ? gh->gh_flags : 0;
+ int ret;
+
+ lck_flags &= (LM_FLAG_TRY | LM_FLAG_TRY_1CB | LM_FLAG_NOEXP |
+ LM_FLAG_PRIORITY);
+ BUG_ON(gl->gl_state == target);
+ BUG_ON(gl->gl_state == gl->gl_target);
+ if ((target == LM_ST_UNLOCKED || target == LM_ST_DEFERRED) &&
+ glops->go_inval) {
+ set_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
+ do_error(gl, 0); /* Fail queued try locks */
+ }
+ spin_unlock(&gl->gl_spin);
+ if (glops->go_xmote_th)
+ glops->go_xmote_th(gl);
+ if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
+ glops->go_inval(gl, target == LM_ST_DEFERRED ? 0 : DIO_METADATA);
+ clear_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags);
+
+ gfs2_glock_hold(gl);
+ if (target != LM_ST_UNLOCKED && (gl->gl_state == LM_ST_SHARED ||
+ gl->gl_state == LM_ST_DEFERRED) &&
+ !(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
+ lck_flags |= LM_FLAG_TRY_1CB;
+ ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, target, lck_flags);
+
+ if (!(ret & LM_OUT_ASYNC)) {
+ finish_xmote(gl, ret);
+ gfs2_glock_hold(gl);
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ gfs2_glock_put(gl);
+ } else {
+ GLOCK_BUG_ON(gl, ret != LM_OUT_ASYNC);
+ }
+ spin_lock(&gl->gl_spin);
+}
+
+/**
+ * find_first_holder - find the first "holder" gh
+ * @gl: the glock
+ */
+
+static inline struct gfs2_holder *find_first_holder(const struct gfs2_glock *gl)
+{
+ struct gfs2_holder *gh;
+
+ if (!list_empty(&gl->gl_holders)) {
+ gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
+ if (test_bit(HIF_HOLDER, &gh->gh_iflags))
+ return gh;
+ }
+ return NULL;
+}
+
+/**
+ * run_queue - do all outstanding tasks related to a glock
+ * @gl: The glock in question
+ * @nonblock: True if we must not block in run_queue
+ *
+ */
+
+static void run_queue(struct gfs2_glock *gl, const int nonblock)
+{
+ struct gfs2_holder *gh = NULL;
+
+ if (test_and_set_bit(GLF_LOCK, &gl->gl_flags))
+ return;
+
+ GLOCK_BUG_ON(gl, test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags));
+
+ if (test_bit(GLF_DEMOTE, &gl->gl_flags) &&
+ gl->gl_demote_state != gl->gl_state) {
+ if (find_first_holder(gl))
+ goto out;
+ if (nonblock)
+ goto out_sched;
+ set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
+ gl->gl_target = gl->gl_demote_state;
+ } else {
+ if (test_bit(GLF_DEMOTE, &gl->gl_flags))
+ gfs2_demote_wake(gl);
+ if (do_promote(gl) == 0)
+ goto out;
+ gh = find_first_waiter(gl);
+ gl->gl_target = gh->gh_state;
+ if (!(gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
+ do_error(gl, 0); /* Fail queued try locks */
+ }
+ do_xmote(gl, gh, gl->gl_target);
+ return;
+
+out_sched:
+ gfs2_glock_hold(gl);
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ gfs2_glock_put(gl);
+out:
+ clear_bit(GLF_LOCK, &gl->gl_flags);
+}
+
static void glock_work_func(struct work_struct *work)
{
+ unsigned long delay = 0;
struct gfs2_glock *gl = container_of(work, struct gfs2_glock, gl_work.work);
+ if (test_and_clear_bit(GLF_REPLY_PENDING, &gl->gl_flags))
+ finish_xmote(gl, gl->gl_reply);
spin_lock(&gl->gl_spin);
- if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags))
- set_bit(GLF_DEMOTE, &gl->gl_flags);
- run_queue(gl);
+ if (test_and_clear_bit(GLF_PENDING_DEMOTE, &gl->gl_flags)) {
+ unsigned long holdtime, now = jiffies;
+ holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
+ if (time_before(now, holdtime))
+ delay = holdtime - now;
+ set_bit(delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE, &gl->gl_flags);
+ }
+ run_queue(gl, 0);
spin_unlock(&gl->gl_spin);
- gfs2_glock_put(gl);
+ if (!delay ||
+ queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
+ gfs2_glock_put(gl);
}
static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
void **lockp)
{
int error = -EIO;
+ if (!sdp->sd_lockstruct.ls_ops->lm_get_lock)
+ return 0;
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
sdp->sd_lockstruct.ls_lockspace, name, lockp);
@@ -342,12 +682,10 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_name = name;
atomic_set(&gl->gl_ref, 1);
gl->gl_state = LM_ST_UNLOCKED;
+ gl->gl_target = LM_ST_UNLOCKED;
gl->gl_demote_state = LM_ST_EXCLUSIVE;
gl->gl_hash = hash;
- gl->gl_owner_pid = NULL;
- gl->gl_ip = 0;
gl->gl_ops = glops;
- gl->gl_req_gh = NULL;
gl->gl_stamp = jiffies;
gl->gl_tchange = jiffies;
gl->gl_object = NULL;
@@ -447,13 +785,6 @@ void gfs2_holder_uninit(struct gfs2_holder *gh)
gh->gh_ip = 0;
}
-static void gfs2_holder_wake(struct gfs2_holder *gh)
-{
- clear_bit(HIF_WAIT, &gh->gh_iflags);
- smp_mb__after_clear_bit();
- wake_up_bit(&gh->gh_iflags, HIF_WAIT);
-}
-
static int just_schedule(void *word)
{
schedule();
@@ -466,14 +797,6 @@ static void wait_on_holder(struct gfs2_holder *gh)
wait_on_bit(&gh->gh_iflags, HIF_WAIT, just_schedule, TASK_UNINTERRUPTIBLE);
}
-static void gfs2_demote_wake(struct gfs2_glock *gl)
-{
- gl->gl_demote_state = LM_ST_EXCLUSIVE;
- clear_bit(GLF_DEMOTE, &gl->gl_flags);
- smp_mb__after_clear_bit();
- wake_up_bit(&gl->gl_flags, GLF_DEMOTE);
-}
-
static void wait_on_demote(struct gfs2_glock *gl)
{
might_sleep();
@@ -481,217 +804,6 @@ static void wait_on_demote(struct gfs2_glock *gl)
}
/**
- * rq_mutex - process a mutex request in the queue
- * @gh: the glock holder
- *
- * Returns: 1 if the queue is blocked
- */
-
-static int rq_mutex(struct gfs2_holder *gh)
-{
- struct gfs2_glock *gl = gh->gh_gl;
-
- list_del_init(&gh->gh_list);
- /* gh->gh_error never examined. */
- set_bit(GLF_LOCK, &gl->gl_flags);
- clear_bit(HIF_WAIT, &gh->gh_iflags);
- smp_mb();
- wake_up_bit(&gh->gh_iflags, HIF_WAIT);
-
- return 1;
-}
-
-/**
- * rq_promote - process a promote request in the queue
- * @gh: the glock holder
- *
- * Acquire a new inter-node lock, or change a lock state to more restrictive.
- *
- * Returns: 1 if the queue is blocked
- */
-
-static int rq_promote(struct gfs2_holder *gh)
-{
- struct gfs2_glock *gl = gh->gh_gl;
-
- if (!relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
- if (list_empty(&gl->gl_holders)) {
- gl->gl_req_gh = gh;
- set_bit(GLF_LOCK, &gl->gl_flags);
- spin_unlock(&gl->gl_spin);
- gfs2_glock_xmote_th(gh->gh_gl, gh);
- spin_lock(&gl->gl_spin);
- }
- return 1;
- }
-
- if (list_empty(&gl->gl_holders)) {
- set_bit(HIF_FIRST, &gh->gh_iflags);
- set_bit(GLF_LOCK, &gl->gl_flags);
- } else {
- struct gfs2_holder *next_gh;
- if (gh->gh_state == LM_ST_EXCLUSIVE)
- return 1;
- next_gh = list_entry(gl->gl_holders.next, struct gfs2_holder,
- gh_list);
- if (next_gh->gh_state == LM_ST_EXCLUSIVE)
- return 1;
- }
-
- list_move_tail(&gh->gh_list, &gl->gl_holders);
- gh->gh_error = 0;
- set_bit(HIF_HOLDER, &gh->gh_iflags);
-
- gfs2_holder_wake(gh);
-
- return 0;
-}
-
-/**
- * rq_demote - process a demote request in the queue
- * @gh: the glock holder
- *
- * Returns: 1 if the queue is blocked
- */
-
-static int rq_demote(struct gfs2_glock *gl)
-{
- if (!list_empty(&gl->gl_holders))
- return 1;
-
- if (gl->gl_state == gl->gl_demote_state ||
- gl->gl_state == LM_ST_UNLOCKED) {
- gfs2_demote_wake(gl);
- return 0;
- }
-
- set_bit(GLF_LOCK, &gl->gl_flags);
- set_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
-
- if (gl->gl_demote_state == LM_ST_UNLOCKED ||
- gl->gl_state != LM_ST_EXCLUSIVE) {
- spin_unlock(&gl->gl_spin);
- gfs2_glock_drop_th(gl);
- } else {
- spin_unlock(&gl->gl_spin);
- gfs2_glock_xmote_th(gl, NULL);
- }
-
- spin_lock(&gl->gl_spin);
- clear_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags);
-
- return 0;
-}
-
-/**
- * run_queue - process holder structures on a glock
- * @gl: the glock
- *
- */
-static void run_queue(struct gfs2_glock *gl)
-{
- struct gfs2_holder *gh;
- int blocked = 1;
-
- for (;;) {
- if (test_bit(GLF_LOCK, &gl->gl_flags))
- break;
-
- if (!list_empty(&gl->gl_waiters1)) {
- gh = list_entry(gl->gl_waiters1.next,
- struct gfs2_holder, gh_list);
- blocked = rq_mutex(gh);
- } else if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
- blocked = rq_demote(gl);
- if (test_bit(GLF_WAITERS2, &gl->gl_flags) &&
- !blocked) {
- set_bit(GLF_DEMOTE, &gl->gl_flags);
- gl->gl_demote_state = LM_ST_UNLOCKED;
- }
- clear_bit(GLF_WAITERS2, &gl->gl_flags);
- } else if (!list_empty(&gl->gl_waiters3)) {
- gh = list_entry(gl->gl_waiters3.next,
- struct gfs2_holder, gh_list);
- blocked = rq_promote(gh);
- } else
- break;
-
- if (blocked)
- break;
- }
-}
-
-/**
- * gfs2_glmutex_lock - acquire a local lock on a glock
- * @gl: the glock
- *
- * Gives caller exclusive access to manipulate a glock structure.
- */
-
-static void gfs2_glmutex_lock(struct gfs2_glock *gl)
-{
- spin_lock(&gl->gl_spin);
- if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
- struct gfs2_holder gh;
-
- gfs2_holder_init(gl, 0, 0, &gh);
- set_bit(HIF_WAIT, &gh.gh_iflags);
- list_add_tail(&gh.gh_list, &gl->gl_waiters1);
- spin_unlock(&gl->gl_spin);
- wait_on_holder(&gh);
- gfs2_holder_uninit(&gh);
- } else {
- gl->gl_owner_pid = get_pid(task_pid(current));
- gl->gl_ip = (unsigned long)__builtin_return_address(0);
- spin_unlock(&gl->gl_spin);
- }
-}
-
-/**
- * gfs2_glmutex_trylock - try to acquire a local lock on a glock
- * @gl: the glock
- *
- * Returns: 1 if the glock is acquired
- */
-
-static int gfs2_glmutex_trylock(struct gfs2_glock *gl)
-{
- int acquired = 1;
-
- spin_lock(&gl->gl_spin);
- if (test_and_set_bit(GLF_LOCK, &gl->gl_flags)) {
- acquired = 0;
- } else {
- gl->gl_owner_pid = get_pid(task_pid(current));
- gl->gl_ip = (unsigned long)__builtin_return_address(0);
- }
- spin_unlock(&gl->gl_spin);
-
- return acquired;
-}
-
-/**
- * gfs2_glmutex_unlock - release a local lock on a glock
- * @gl: the glock
- *
- */
-
-static void gfs2_glmutex_unlock(struct gfs2_glock *gl)
-{
- struct pid *pid;
-
- spin_lock(&gl->gl_spin);
- clear_bit(GLF_LOCK, &gl->gl_flags);
- pid = gl->gl_owner_pid;
- gl->gl_owner_pid = NULL;
- gl->gl_ip = 0;
- run_queue(gl);
- spin_unlock(&gl->gl_spin);
-
- put_pid(pid);
-}
-
-/**
* handle_callback - process a demote request
* @gl: the glock
* @state: the state the caller wants us to change to
@@ -705,398 +817,45 @@ static void handle_callback(struct gfs2_glock *gl, unsigned int state,
{
int bit = delay ? GLF_PENDING_DEMOTE : GLF_DEMOTE;
- spin_lock(&gl->gl_spin);
set_bit(bit, &gl->gl_flags);
if (gl->gl_demote_state == LM_ST_EXCLUSIVE) {
gl->gl_demote_state = state;
gl->gl_demote_time = jiffies;
if (remote && gl->gl_ops->go_type == LM_TYPE_IOPEN &&
- gl->gl_object) {
+ gl->gl_object)
gfs2_glock_schedule_for_reclaim(gl);
- spin_unlock(&gl->gl_spin);
- return;
- }
} else if (gl->gl_demote_state != LM_ST_UNLOCKED &&
gl->gl_demote_state != state) {
- if (test_bit(GLF_DEMOTE_IN_PROGRESS, &gl->gl_flags))
- set_bit(GLF_WAITERS2, &gl->gl_flags);
- else
- gl->gl_demote_state = LM_ST_UNLOCKED;
- }
- spin_unlock(&gl->gl_spin);
-}
-
-/**
- * state_change - record that the glock is now in a different state
- * @gl: the glock
- * @new_state the new state
- *
- */
-
-static void state_change(struct gfs2_glock *gl, unsigned int new_state)
-{
- int held1, held2;
-
- held1 = (gl->gl_state != LM_ST_UNLOCKED);
- held2 = (new_state != LM_ST_UNLOCKED);
-
- if (held1 != held2) {
- if (held2)
- gfs2_glock_hold(gl);
- else
- gfs2_glock_put(gl);
- }
-
- gl->gl_state = new_state;
- gl->gl_tchange = jiffies;
-}
-
-/**
- * drop_bh - Called after a lock module unlock completes
- * @gl: the glock
- * @ret: the return status
- *
- * Doesn't wake up the process waiting on the struct gfs2_holder (if any)
- * Doesn't drop the reference on the glock the top half took out
- *
- */
-
-static void drop_bh(struct gfs2_glock *gl, unsigned int ret)
-{
- struct gfs2_sbd *sdp = gl->gl_sbd;
- struct gfs2_holder *gh = gl->gl_req_gh;
-
- gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
- gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
- gfs2_assert_warn(sdp, !ret);
-
- state_change(gl, LM_ST_UNLOCKED);
-
- if (test_and_clear_bit(GLF_CONV_DEADLK, &gl->gl_flags)) {
- spin_lock(&gl->gl_spin);
- gh->gh_error = 0;
- spin_unlock(&gl->gl_spin);
- gfs2_glock_xmote_th(gl, gl->gl_req_gh);
- gfs2_glock_put(gl);
- return;
- }
-
- spin_lock(&gl->gl_spin);
- gfs2_demote_wake(gl);
- clear_bit(GLF_LOCK, &gl->gl_flags);
- spin_unlock(&gl->gl_spin);
- gfs2_glock_put(gl);
-}
-
-/**
- * xmote_bh - Called after the lock module is done acquiring a lock
- * @gl: The glock in question
- * @ret: the int returned from the lock module
- *
- */
-
-static void xmote_bh(struct gfs2_glock *gl, unsigned int ret)
-{
- struct gfs2_sbd *sdp = gl->gl_sbd;
- const struct gfs2_glock_operations *glops = gl->gl_ops;
- struct gfs2_holder *gh = gl->gl_req_gh;
- int op_done = 1;
-
- if (!gh && (ret & LM_OUT_ST_MASK) == LM_ST_UNLOCKED) {
- drop_bh(gl, ret);
- return;
- }
-
- gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
- gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
- gfs2_assert_warn(sdp, !(ret & LM_OUT_ASYNC));
-
- state_change(gl, ret & LM_OUT_ST_MASK);
-
- /* Deal with each possible exit condition */
-
- if (!gh) {
- gl->gl_stamp = jiffies;
- if (ret & LM_OUT_CANCELED) {
- op_done = 0;
- } else {
- spin_lock(&gl->gl_spin);
- if (gl->gl_state != gl->gl_demote_state) {
- spin_unlock(&gl->gl_spin);
- gfs2_glock_drop_th(gl);
- gfs2_glock_put(gl);
- return;
- }
- gfs2_demote_wake(gl);
- spin_unlock(&gl->gl_spin);
- }
- } else {
- spin_lock(&gl->gl_spin);
- if (ret & LM_OUT_CONV_DEADLK) {
- gh->gh_error = 0;
- set_bit(GLF_CONV_DEADLK, &gl->gl_flags);
- spin_unlock(&gl->gl_spin);
- gfs2_glock_drop_th(gl);
- gfs2_glock_put(gl);
- return;
- }
- list_del_init(&gh->gh_list);
- gh->gh_error = -EIO;
- if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
- goto out;
- gh->gh_error = GLR_CANCELED;
- if (ret & LM_OUT_CANCELED)
- goto out;
- if (relaxed_state_ok(gl->gl_state, gh->gh_state, gh->gh_flags)) {
- list_add_tail(&gh->gh_list, &gl->gl_holders);
- gh->gh_error = 0;
- set_bit(HIF_HOLDER, &gh->gh_iflags);
- set_bit(HIF_FIRST, &gh->gh_iflags);
- op_done = 0;
- goto out;
- }
- gh->gh_error = GLR_TRYFAILED;
- if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB))
- goto out;
- gh->gh_error = -EINVAL;
- if (gfs2_assert_withdraw(sdp, 0) == -1)
- fs_err(sdp, "ret = 0x%.8X\n", ret);
-out:
- spin_unlock(&gl->gl_spin);
- }
-
- if (glops->go_xmote_bh)
- glops->go_xmote_bh(gl);
-
- if (op_done) {
- spin_lock(&gl->gl_spin);
- gl->gl_req_gh = NULL;
- clear_bit(GLF_LOCK, &gl->gl_flags);
- spin_unlock(&gl->gl_spin);
- }
-
- gfs2_glock_put(gl);
-
- if (gh)
- gfs2_holder_wake(gh);
-}
-
-static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
- unsigned int cur_state, unsigned int req_state,
- unsigned int flags)
-{
- int ret = 0;
- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
- ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
- req_state, flags);
- return ret;
-}
-
-/**
- * gfs2_glock_xmote_th - Call into the lock module to acquire or change a glock
- * @gl: The glock in question
- * @state: the requested state
- * @flags: modifier flags to the lock call
- *
- */
-
-static void gfs2_glock_xmote_th(struct gfs2_glock *gl, struct gfs2_holder *gh)
-{
- struct gfs2_sbd *sdp = gl->gl_sbd;
- int flags = gh ? gh->gh_flags : 0;
- unsigned state = gh ? gh->gh_state : gl->gl_demote_state;
- const struct gfs2_glock_operations *glops = gl->gl_ops;
- int lck_flags = flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB |
- LM_FLAG_NOEXP | LM_FLAG_ANY |
- LM_FLAG_PRIORITY);
- unsigned int lck_ret;
-
- if (glops->go_xmote_th)
- glops->go_xmote_th(gl);
- if (state == LM_ST_DEFERRED && glops->go_inval)
- glops->go_inval(gl, DIO_METADATA);
-
- gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
- gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
- gfs2_assert_warn(sdp, state != LM_ST_UNLOCKED);
- gfs2_assert_warn(sdp, state != gl->gl_state);
-
- gfs2_glock_hold(gl);
-
- lck_ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, state, lck_flags);
-
- if (gfs2_assert_withdraw(sdp, !(lck_ret & LM_OUT_ERROR)))
- return;
-
- if (lck_ret & LM_OUT_ASYNC)
- gfs2_assert_warn(sdp, lck_ret == LM_OUT_ASYNC);
- else
- xmote_bh(gl, lck_ret);
-}
-
-static unsigned int gfs2_lm_unlock(struct gfs2_sbd *sdp, void *lock,
- unsigned int cur_state)
-{
- int ret = 0;
- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
- ret = sdp->sd_lockstruct.ls_ops->lm_unlock(lock, cur_state);
- return ret;
-}
-
-/**
- * gfs2_glock_drop_th - call into the lock module to unlock a lock
- * @gl: the glock
- *
- */
-
-static void gfs2_glock_drop_th(struct gfs2_glock *gl)
-{
- struct gfs2_sbd *sdp = gl->gl_sbd;
- const struct gfs2_glock_operations *glops = gl->gl_ops;
- unsigned int ret;
-
- if (glops->go_xmote_th)
- glops->go_xmote_th(gl);
- if (glops->go_inval)
- glops->go_inval(gl, DIO_METADATA);
-
- gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
- gfs2_assert_warn(sdp, list_empty(&gl->gl_holders));
- gfs2_assert_warn(sdp, gl->gl_state != LM_ST_UNLOCKED);
-
- gfs2_glock_hold(gl);
-
- ret = gfs2_lm_unlock(sdp, gl->gl_lock, gl->gl_state);
-
- if (gfs2_assert_withdraw(sdp, !(ret & LM_OUT_ERROR)))
- return;
-
- if (!ret)
- drop_bh(gl, ret);
- else
- gfs2_assert_warn(sdp, ret == LM_OUT_ASYNC);
-}
-
-/**
- * do_cancels - cancel requests for locks stuck waiting on an expire flag
- * @gh: the LM_FLAG_PRIORITY holder waiting to acquire the lock
- *
- * Don't cancel GL_NOCANCEL requests.
- */
-
-static void do_cancels(struct gfs2_holder *gh)
-{
- struct gfs2_glock *gl = gh->gh_gl;
- struct gfs2_sbd *sdp = gl->gl_sbd;
-
- spin_lock(&gl->gl_spin);
-
- while (gl->gl_req_gh != gh &&
- !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
- !list_empty(&gh->gh_list)) {
- if (!(gl->gl_req_gh && (gl->gl_req_gh->gh_flags & GL_NOCANCEL))) {
- spin_unlock(&gl->gl_spin);
- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
- sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock);
- msleep(100);
- spin_lock(&gl->gl_spin);
- } else {
- spin_unlock(&gl->gl_spin);
- msleep(100);
- spin_lock(&gl->gl_spin);
- }
+ gl->gl_demote_state = LM_ST_UNLOCKED;
}
-
- spin_unlock(&gl->gl_spin);
}
/**
- * glock_wait_internal - wait on a glock acquisition
+ * gfs2_glock_wait - wait on a glock acquisition
* @gh: the glock holder
*
* Returns: 0 on success
*/
-static int glock_wait_internal(struct gfs2_holder *gh)
+int gfs2_glock_wait(struct gfs2_holder *gh)
{
- struct gfs2_glock *gl = gh->gh_gl;
- struct gfs2_sbd *sdp = gl->gl_sbd;
- const struct gfs2_glock_operations *glops = gl->gl_ops;
-
- if (test_bit(HIF_ABORTED, &gh->gh_iflags))
- return -EIO;
-
- if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
- spin_lock(&gl->gl_spin);
- if (gl->gl_req_gh != gh &&
- !test_bit(HIF_HOLDER, &gh->gh_iflags) &&
- !list_empty(&gh->gh_list)) {
- list_del_init(&gh->gh_list);
- gh->gh_error = GLR_TRYFAILED;
- run_queue(gl);
- spin_unlock(&gl->gl_spin);
- return gh->gh_error;
- }
- spin_unlock(&gl->gl_spin);
- }
-
- if (gh->gh_flags & LM_FLAG_PRIORITY)
- do_cancels(gh);
-
wait_on_holder(gh);
- if (gh->gh_error)
- return gh->gh_error;
-
- gfs2_assert_withdraw(sdp, test_bit(HIF_HOLDER, &gh->gh_iflags));
- gfs2_assert_withdraw(sdp, relaxed_state_ok(gl->gl_state, gh->gh_state,
- gh->gh_flags));
-
- if (test_bit(HIF_FIRST, &gh->gh_iflags)) {
- gfs2_assert_warn(sdp, test_bit(GLF_LOCK, &gl->gl_flags));
-
- if (glops->go_lock) {
- gh->gh_error = glops->go_lock(gh);
- if (gh->gh_error) {
- spin_lock(&gl->gl_spin);
- list_del_init(&gh->gh_list);
- spin_unlock(&gl->gl_spin);
- }
- }
-
- spin_lock(&gl->gl_spin);
- gl->gl_req_gh = NULL;
- clear_bit(GLF_LOCK, &gl->gl_flags);
- run_queue(gl);
- spin_unlock(&gl->gl_spin);
- }
-
return gh->gh_error;
}
-static inline struct gfs2_holder *
-find_holder_by_owner(struct list_head *head, struct pid *pid)
-{
- struct gfs2_holder *gh;
-
- list_for_each_entry(gh, head, gh_list) {
- if (gh->gh_owner_pid == pid)
- return gh;
- }
-
- return NULL;
-}
-
-static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
+void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...)
{
va_list args;
va_start(args, fmt);
- if (gi) {
+ if (seq) {
+ struct gfs2_glock_iter *gi = seq->private;
vsprintf(gi->string, fmt, args);
- seq_printf(gi->seq, gi->string);
- }
- else
+ seq_printf(seq, gi->string);
+ } else {
+ printk(KERN_ERR " ");
vprintk(fmt, args);
+ }
va_end(args);
}
@@ -1104,50 +863,76 @@ static void print_dbg(struct glock_iter *gi, const char *fmt, ...)
* add_to_queue - Add a holder to the wait queue (but look for recursion)
* @gh: the holder structure to add
*
+ * Eventually we should move the recursive locking trap to a
+ * debugging option or something like that. This is the fast
+ * path and needs to have the minimum number of distractions.
+ *
*/
-static void add_to_queue(struct gfs2_holder *gh)
+static inline void add_to_queue(struct gfs2_holder *gh)
{
struct gfs2_glock *gl = gh->gh_gl;
- struct gfs2_holder *existing;
+ struct gfs2_sbd *sdp = gl->gl_sbd;
+ struct list_head *insert_pt = NULL;
+ struct gfs2_holder *gh2;
+ int try_lock = 0;
BUG_ON(gh->gh_owner_pid == NULL);
if (test_and_set_bit(HIF_WAIT, &gh->gh_iflags))
BUG();
- if (!(gh->gh_flags & GL_FLOCK)) {
- existing = find_holder_by_owner(&gl->gl_holders,
- gh->gh_owner_pid);
- if (existing) {
- print_symbol(KERN_WARNING "original: %s\n",
- existing->gh_ip);
- printk(KERN_INFO "pid : %d\n",
- pid_nr(existing->gh_owner_pid));
- printk(KERN_INFO "lock type : %d lock state : %d\n",
- existing->gh_gl->gl_name.ln_type,
- existing->gh_gl->gl_state);
- print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
- printk(KERN_INFO "pid : %d\n",
- pid_nr(gh->gh_owner_pid));
- printk(KERN_INFO "lock type : %d lock state : %d\n",
- gl->gl_name.ln_type, gl->gl_state);
- BUG();
- }
-
- existing = find_holder_by_owner(&gl->gl_waiters3,
- gh->gh_owner_pid);
- if (existing) {
- print_symbol(KERN_WARNING "original: %s\n",
- existing->gh_ip);
- print_symbol(KERN_WARNING "new: %s\n", gh->gh_ip);
- BUG();
+ if (gh->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) {
+ if (test_bit(GLF_LOCK, &gl->gl_flags))
+ try_lock = 1;
+ if (test_bit(GLF_INVALIDATE_IN_PROGRESS, &gl->gl_flags))
+ goto fail;
+ }
+
+ list_for_each_entry(gh2, &gl->gl_holders, gh_list) {
+ if (unlikely(gh2->gh_owner_pid == gh->gh_owner_pid &&
+ (gh->gh_gl->gl_ops->go_type != LM_TYPE_FLOCK)))
+ goto trap_recursive;
+ if (try_lock &&
+ !(gh2->gh_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)) &&
+ !may_grant(gl, gh)) {
+fail:
+ gh->gh_error = GLR_TRYFAILED;
+ gfs2_holder_wake(gh);
+ return;
}
+ if (test_bit(HIF_HOLDER, &gh2->gh_iflags))
+ continue;
+ if (unlikely((gh->gh_flags & LM_FLAG_PRIORITY) && !insert_pt))
+ insert_pt = &gh2->gh_list;
+ }
+ if (likely(insert_pt == NULL)) {
+ list_add_tail(&gh->gh_list, &gl->gl_holders);
+ if (unlikely(gh->gh_flags & LM_FLAG_PRIORITY))
+ goto do_cancel;
+ return;
}
+ list_add_tail(&gh->gh_list, insert_pt);
+do_cancel:
+ gh = list_entry(gl->gl_holders.next, struct gfs2_holder, gh_list);
+ if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
+ spin_unlock(&gl->gl_spin);
+ if (sdp->sd_lockstruct.ls_ops->lm_cancel)
+ sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock);
+ spin_lock(&gl->gl_spin);
+ }
+ return;
- if (gh->gh_flags & LM_FLAG_PRIORITY)
- list_add(&gh->gh_list, &gl->gl_waiters3);
- else
- list_add_tail(&gh->gh_list, &gl->gl_waiters3);
+trap_recursive:
+ print_symbol(KERN_ERR "original: %s\n", gh2->gh_ip);
+ printk(KERN_ERR "pid: %d\n", pid_nr(gh2->gh_owner_pid));
+ printk(KERN_ERR "lock type: %d req lock state : %d\n",
+ gh2->gh_gl->gl_name.ln_type, gh2->gh_state);
+ print_symbol(KERN_ERR "new: %s\n", gh->gh_ip);
+ printk(KERN_ERR "pid: %d\n", pid_nr(gh->gh_owner_pid));
+ printk(KERN_ERR "lock type: %d req lock state : %d\n",
+ gh->gh_gl->gl_name.ln_type, gh->gh_state);
+ __dump_glock(NULL, gl);
+ BUG();
}
/**
@@ -1165,24 +950,16 @@ int gfs2_glock_nq(struct gfs2_holder *gh)
struct gfs2_sbd *sdp = gl->gl_sbd;
int error = 0;
-restart:
- if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags))) {
- set_bit(HIF_ABORTED, &gh->gh_iflags);
+ if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
return -EIO;
- }
spin_lock(&gl->gl_spin);
add_to_queue(gh);
- run_queue(gl);
+ run_queue(gl, 1);
spin_unlock(&gl->gl_spin);
- if (!(gh->gh_flags & GL_ASYNC)) {
- error = glock_wait_internal(gh);
- if (error == GLR_CANCELED) {
- msleep(100);
- goto restart;
- }
- }
+ if (!(gh->gh_flags & GL_ASYNC))
+ error = gfs2_glock_wait(gh);
return error;
}
@@ -1196,48 +973,7 @@ restart:
int gfs2_glock_poll(struct gfs2_holder *gh)
{
- struct gfs2_glock *gl = gh->gh_gl;
- int ready = 0;
-
- spin_lock(&gl->gl_spin);
-
- if (test_bit(HIF_HOLDER, &gh->gh_iflags))
- ready = 1;
- else if (list_empty(&gh->gh_list)) {
- if (gh->gh_error == GLR_CANCELED) {
- spin_unlock(&gl->gl_spin);
- msleep(100);
- if (gfs2_glock_nq(gh))
- return 1;
- return 0;
- } else
- ready = 1;
- }
-
- spin_unlock(&gl->gl_spin);
-
- return ready;
-}
-
-/**
- * gfs2_glock_wait - wait for a lock acquisition that ended in a GLR_ASYNC
- * @gh: the holder structure
- *
- * Returns: 0, GLR_TRYFAILED, or errno on failure
- */
-
-int gfs2_glock_wait(struct gfs2_holder *gh)
-{
- int error;
-
- error = glock_wait_internal(gh);
- if (error == GLR_CANCELED) {
- msleep(100);
- gh->gh_flags &= ~GL_ASYNC;
- error = gfs2_glock_nq(gh);
- }
-
- return error;
+ return test_bit(HIF_WAIT, &gh->gh_iflags) ? 0 : 1;
}
/**
@@ -1251,26 +987,30 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
struct gfs2_glock *gl = gh->gh_gl;
const struct gfs2_glock_operations *glops = gl->gl_ops;
unsigned delay = 0;
+ int fast_path = 0;
+ spin_lock(&gl->gl_spin);
if (gh->gh_flags & GL_NOCACHE)
handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
- gfs2_glmutex_lock(gl);
-
- spin_lock(&gl->gl_spin);
list_del_init(&gh->gh_list);
-
- if (list_empty(&gl->gl_holders)) {
+ if (find_first_holder(gl) == NULL) {
if (glops->go_unlock) {
+ GLOCK_BUG_ON(gl, test_and_set_bit(GLF_LOCK, &gl->gl_flags));
spin_unlock(&gl->gl_spin);
glops->go_unlock(gh);
spin_lock(&gl->gl_spin);
+ clear_bit(GLF_LOCK, &gl->gl_flags);
}
gl->gl_stamp = jiffies;
+ if (list_empty(&gl->gl_holders) &&
+ !test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
+ !test_bit(GLF_DEMOTE, &gl->gl_flags))
+ fast_path = 1;
}
-
- clear_bit(GLF_LOCK, &gl->gl_flags);
spin_unlock(&gl->gl_spin);
+ if (likely(fast_path))
+ return;
gfs2_glock_hold(gl);
if (test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
@@ -1454,6 +1194,8 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp)
{
int error = -EIO;
+ if (!sdp->sd_lockstruct.ls_ops->lm_hold_lvb)
+ return 0;
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp);
return error;
@@ -1469,20 +1211,14 @@ int gfs2_lvb_hold(struct gfs2_glock *gl)
{
int error;
- gfs2_glmutex_lock(gl);
-
if (!atomic_read(&gl->gl_lvb_count)) {
error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
- if (error) {
- gfs2_glmutex_unlock(gl);
+ if (error)
return error;
- }
gfs2_glock_hold(gl);
}
atomic_inc(&gl->gl_lvb_count);
- gfs2_glmutex_unlock(gl);
-
return 0;
}
@@ -1497,17 +1233,13 @@ void gfs2_lvb_unhold(struct gfs2_glock *gl)
struct gfs2_sbd *sdp = gl->gl_sbd;
gfs2_glock_hold(gl);
- gfs2_glmutex_lock(gl);
-
gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
if (atomic_dec_and_test(&gl->gl_lvb_count)) {
- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+ if (sdp->sd_lockstruct.ls_ops->lm_unhold_lvb)
sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb);
gl->gl_lvb = NULL;
gfs2_glock_put(gl);
}
-
- gfs2_glmutex_unlock(gl);
gfs2_glock_put(gl);
}
@@ -1527,7 +1259,9 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
if (time_before(now, holdtime))
delay = holdtime - now;
+ spin_lock(&gl->gl_spin);
handle_callback(gl, state, 1, delay);
+ spin_unlock(&gl->gl_spin);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, delay) == 0)
gfs2_glock_put(gl);
}
@@ -1568,7 +1302,8 @@ void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
gl = gfs2_glock_find(sdp, &async->lc_name);
if (gfs2_assert_warn(sdp, gl))
return;
- xmote_bh(gl, async->lc_ret);
+ gl->gl_reply = async->lc_ret;
+ set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
gfs2_glock_put(gl);
up_read(&gfs2_umount_flush_sem);
@@ -1646,6 +1381,7 @@ void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl)
void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
{
struct gfs2_glock *gl;
+ int done_callback = 0;
spin_lock(&sdp->sd_reclaim_lock);
if (list_empty(&sdp->sd_reclaim_list)) {
@@ -1660,14 +1396,16 @@ void gfs2_reclaim_glock(struct gfs2_sbd *sdp)
atomic_dec(&sdp->sd_reclaim_count);
atomic_inc(&sdp->sd_reclaimed);
- if (gfs2_glmutex_trylock(gl)) {
- if (list_empty(&gl->gl_holders) &&
- gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
- handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
- gfs2_glmutex_unlock(gl);
+ spin_lock(&gl->gl_spin);
+ if (find_first_holder(gl) == NULL &&
+ gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl)) {
+ handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
+ done_callback = 1;
}
-
- gfs2_glock_put(gl);
+ spin_unlock(&gl->gl_spin);
+ if (!done_callback ||
+ queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ gfs2_glock_put(gl);
}
/**
@@ -1724,18 +1462,14 @@ static void scan_glock(struct gfs2_glock *gl)
{
if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object)
return;
+ if (test_bit(GLF_LOCK, &gl->gl_flags))
+ return;
- if (gfs2_glmutex_trylock(gl)) {
- if (list_empty(&gl->gl_holders) &&
- gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
- goto out_schedule;
- gfs2_glmutex_unlock(gl);
- }
- return;
-
-out_schedule:
- gfs2_glmutex_unlock(gl);
- gfs2_glock_schedule_for_reclaim(gl);
+ spin_lock(&gl->gl_spin);
+ if (find_first_holder(gl) == NULL &&
+ gl->gl_state != LM_ST_UNLOCKED && demote_ok(gl))
+ gfs2_glock_schedule_for_reclaim(gl);
+ spin_unlock(&gl->gl_spin);
}
/**
@@ -1760,12 +1494,13 @@ static void clear_glock(struct gfs2_glock *gl)
spin_unlock(&sdp->sd_reclaim_lock);
}
- if (gfs2_glmutex_trylock(gl)) {
- if (list_empty(&gl->gl_holders) &&
- gl->gl_state != LM_ST_UNLOCKED)
- handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
- gfs2_glmutex_unlock(gl);
- }
+ spin_lock(&gl->gl_spin);
+ if (find_first_holder(gl) == NULL && gl->gl_state != LM_ST_UNLOCKED)
+ handle_callback(gl, LM_ST_UNLOCKED, 0, 0);
+ spin_unlock(&gl->gl_spin);
+ gfs2_glock_hold(gl);
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ gfs2_glock_put(gl);
}
/**
@@ -1810,180 +1545,164 @@ void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait)
}
}
-/*
- * Diagnostic routines to help debug distributed deadlock
- */
-
-static void gfs2_print_symbol(struct glock_iter *gi, const char *fmt,
- unsigned long address)
+static const char *state2str(unsigned state)
{
- char buffer[KSYM_SYMBOL_LEN];
-
- sprint_symbol(buffer, address);
- print_dbg(gi, fmt, buffer);
+ switch(state) {
+ case LM_ST_UNLOCKED:
+ return "UN";
+ case LM_ST_SHARED:
+ return "SH";
+ case LM_ST_DEFERRED:
+ return "DF";
+ case LM_ST_EXCLUSIVE:
+ return "EX";
+ }
+ return "??";
+}
+
+static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
+{
+ char *p = buf;
+ if (flags & LM_FLAG_TRY)
+ *p++ = 't';
+ if (flags & LM_FLAG_TRY_1CB)
+ *p++ = 'T';
+ if (flags & LM_FLAG_NOEXP)
+ *p++ = 'e';
+ if (flags & LM_FLAG_ANY)
+ *p++ = 'a';
+ if (flags & LM_FLAG_PRIORITY)
+ *p++ = 'p';
+ if (flags & GL_ASYNC)
+ *p++ = 'a';
+ if (flags & GL_EXACT)
+ *p++ = 'E';
+ if (flags & GL_ATIME)
+ *p++ = 'a';
+ if (flags & GL_NOCACHE)
+ *p++ = 'c';
+ if (test_bit(HIF_HOLDER, &iflags))
+ *p++ = 'H';
+ if (test_bit(HIF_WAIT, &iflags))
+ *p++ = 'W';
+ if (test_bit(HIF_FIRST, &iflags))
+ *p++ = 'F';
+ *p = 0;
+ return buf;
}
/**
* dump_holder - print information about a glock holder
- * @str: a string naming the type of holder
+ * @seq: the seq_file struct
* @gh: the glock holder
*
* Returns: 0 on success, -ENOBUFS when we run out of space
*/
-static int dump_holder(struct glock_iter *gi, char *str,
- struct gfs2_holder *gh)
+static int dump_holder(struct seq_file *seq, const struct gfs2_holder *gh)
{
- unsigned int x;
- struct task_struct *gh_owner;
+ struct task_struct *gh_owner = NULL;
+ char buffer[KSYM_SYMBOL_LEN];
+ char flags_buf[32];
- print_dbg(gi, " %s\n", str);
- if (gh->gh_owner_pid) {
- print_dbg(gi, " owner = %ld ",
- (long)pid_nr(gh->gh_owner_pid));
+ sprint_symbol(buffer, gh->gh_ip);
+ if (gh->gh_owner_pid)
gh_owner = pid_task(gh->gh_owner_pid, PIDTYPE_PID);
- if (gh_owner)
- print_dbg(gi, "(%s)\n", gh_owner->comm);
- else
- print_dbg(gi, "(ended)\n");
- } else
- print_dbg(gi, " owner = -1\n");
- print_dbg(gi, " gh_state = %u\n", gh->gh_state);
- print_dbg(gi, " gh_flags =");
- for (x = 0; x < 32; x++)
- if (gh->gh_flags & (1 << x))
- print_dbg(gi, " %u", x);
- print_dbg(gi, " \n");
- print_dbg(gi, " error = %d\n", gh->gh_error);
- print_dbg(gi, " gh_iflags =");
- for (x = 0; x < 32; x++)
- if (test_bit(x, &gh->gh_iflags))
- print_dbg(gi, " %u", x);
- print_dbg(gi, " \n");
- gfs2_print_symbol(gi, " initialized at: %s\n", gh->gh_ip);
-
+ gfs2_print_dbg(seq, " H: s:%s f:%s e:%d p:%ld [%s] %s\n",
+ state2str(gh->gh_state),
+ hflags2str(flags_buf, gh->gh_flags, gh->gh_iflags),
+ gh->gh_error,
+ gh->gh_owner_pid ? (long)pid_nr(gh->gh_owner_pid) : -1,
+ gh_owner ? gh_owner->comm : "(ended)", buffer);
return 0;
}
-/**
- * dump_inode - print information about an inode
- * @ip: the inode
- *
- * Returns: 0 on success, -ENOBUFS when we run out of space
- */
-
-static int dump_inode(struct glock_iter *gi, struct gfs2_inode *ip)
-{
- unsigned int x;
-
- print_dbg(gi, " Inode:\n");
- print_dbg(gi, " num = %llu/%llu\n",
- (unsigned long long)ip->i_no_formal_ino,
- (unsigned long long)ip->i_no_addr);
- print_dbg(gi, " type = %u\n", IF2DT(ip->i_inode.i_mode));
- print_dbg(gi, " i_flags =");
- for (x = 0; x < 32; x++)
- if (test_bit(x, &ip->i_flags))
- print_dbg(gi, " %u", x);
- print_dbg(gi, " \n");
- return 0;
+static const char *gflags2str(char *buf, const unsigned long *gflags)
+{
+ char *p = buf;
+ if (test_bit(GLF_LOCK, gflags))
+ *p++ = 'l';
+ if (test_bit(GLF_STICKY, gflags))
+ *p++ = 's';
+ if (test_bit(GLF_DEMOTE, gflags))
+ *p++ = 'D';
+ if (test_bit(GLF_PENDING_DEMOTE, gflags))
+ *p++ = 'd';
+ if (test_bit(GLF_DEMOTE_IN_PROGRESS, gflags))
+ *p++ = 'p';
+ if (test_bit(GLF_DIRTY, gflags))
+ *p++ = 'y';
+ if (test_bit(GLF_LFLUSH, gflags))
+ *p++ = 'f';
+ if (test_bit(GLF_INVALIDATE_IN_PROGRESS, gflags))
+ *p++ = 'i';
+ if (test_bit(GLF_REPLY_PENDING, gflags))
+ *p++ = 'r';
+ *p = 0;
+ return buf;
}
/**
- * dump_glock - print information about a glock
+ * __dump_glock - print information about a glock
+ * @seq: The seq_file struct
* @gl: the glock
- * @count: where we are in the buffer
+ *
+ * The file format is as follows:
+ * One line per object, capital letters are used to indicate objects
+ * G = glock, I = Inode, R = rgrp, H = holder. Glocks are not indented,
+ * other objects are indented by a single space and follow the glock to
+ * which they are related. Fields are indicated by lower case letters
+ * followed by a colon and the field value, except for strings which are in
+ * [] so that its possible to see if they are composed of spaces for
+ * example. The field's are n = number (id of the object), f = flags,
+ * t = type, s = state, r = refcount, e = error, p = pid.
*
* Returns: 0 on success, -ENOBUFS when we run out of space
*/
-static int dump_glock(struct glock_iter *gi, struct gfs2_glock *gl)
+static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
{
- struct gfs2_holder *gh;
- unsigned int x;
- int error = -ENOBUFS;
- struct task_struct *gl_owner;
+ const struct gfs2_glock_operations *glops = gl->gl_ops;
+ unsigned long long dtime;
+ const struct gfs2_holder *gh;
+ char gflags_buf[32];
+ int error = 0;
- spin_lock(&gl->gl_spin);
+ dtime = jiffies - gl->gl_demote_time;
+ dtime *= 1000000/HZ; /* demote time in uSec */
+ if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
+ dtime = 0;
+ gfs2_print_dbg(seq, "G: s:%s n:%u/%llu f:%s t:%s d:%s/%llu l:%d a:%d r:%d\n",
+ state2str(gl->gl_state),
+ gl->gl_name.ln_type,
+ (unsigned long long)gl->gl_name.ln_number,
+ gflags2str(gflags_buf, &gl->gl_flags),
+ state2str(gl->gl_target),
+ state2str(gl->gl_demote_state), dtime,
+ atomic_read(&gl->gl_lvb_count),
+ atomic_read(&gl->gl_ail_count),
+ atomic_read(&gl->gl_ref));
- print_dbg(gi, "Glock 0x%p (%u, 0x%llx)\n", gl, gl->gl_name.ln_type,
- (unsigned long long)gl->gl_name.ln_number);
- print_dbg(gi, " gl_flags =");
- for (x = 0; x < 32; x++) {
- if (test_bit(x, &gl->gl_flags))
- print_dbg(gi, " %u", x);
- }
- if (!test_bit(GLF_LOCK, &gl->gl_flags))
- print_dbg(gi, " (unlocked)");
- print_dbg(gi, " \n");
- print_dbg(gi, " gl_ref = %d\n", atomic_read(&gl->gl_ref));
- print_dbg(gi, " gl_state = %u\n", gl->gl_state);
- if (gl->gl_owner_pid) {
- gl_owner = pid_task(gl->gl_owner_pid, PIDTYPE_PID);
- if (gl_owner)
- print_dbg(gi, " gl_owner = pid %d (%s)\n",
- pid_nr(gl->gl_owner_pid), gl_owner->comm);
- else
- print_dbg(gi, " gl_owner = %d (ended)\n",
- pid_nr(gl->gl_owner_pid));
- } else
- print_dbg(gi, " gl_owner = -1\n");
- print_dbg(gi, " gl_ip = %lu\n", gl->gl_ip);
- print_dbg(gi, " req_gh = %s\n", (gl->gl_req_gh) ? "yes" : "no");
- print_dbg(gi, " lvb_count = %d\n", atomic_read(&gl->gl_lvb_count));
- print_dbg(gi, " object = %s\n", (gl->gl_object) ? "yes" : "no");
- print_dbg(gi, " reclaim = %s\n",
- (list_empty(&gl->gl_reclaim)) ? "no" : "yes");
- if (gl->gl_aspace)
- print_dbg(gi, " aspace = 0x%p nrpages = %lu\n", gl->gl_aspace,
- gl->gl_aspace->i_mapping->nrpages);
- else
- print_dbg(gi, " aspace = no\n");
- print_dbg(gi, " ail = %d\n", atomic_read(&gl->gl_ail_count));
- if (gl->gl_req_gh) {
- error = dump_holder(gi, "Request", gl->gl_req_gh);
- if (error)
- goto out;
- }
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
- error = dump_holder(gi, "Holder", gh);
+ error = dump_holder(seq, gh);
if (error)
goto out;
}
- list_for_each_entry(gh, &gl->gl_waiters1, gh_list) {
- error = dump_holder(gi, "Waiter1", gh);
- if (error)
- goto out;
- }
- list_for_each_entry(gh, &gl->gl_waiters3, gh_list) {
- error = dump_holder(gi, "Waiter3", gh);
- if (error)
- goto out;
- }
- if (test_bit(GLF_DEMOTE, &gl->gl_flags)) {
- print_dbg(gi, " Demotion req to state %u (%llu uS ago)\n",
- gl->gl_demote_state, (unsigned long long)
- (jiffies - gl->gl_demote_time)*(1000000/HZ));
- }
- if (gl->gl_ops == &gfs2_inode_glops && gl->gl_object) {
- if (!test_bit(GLF_LOCK, &gl->gl_flags) &&
- list_empty(&gl->gl_holders)) {
- error = dump_inode(gi, gl->gl_object);
- if (error)
- goto out;
- } else {
- error = -ENOBUFS;
- print_dbg(gi, " Inode: busy\n");
- }
- }
-
- error = 0;
-
+ if (gl->gl_state != LM_ST_UNLOCKED && glops->go_dump)
+ error = glops->go_dump(seq, gl);
out:
- spin_unlock(&gl->gl_spin);
return error;
}
+static int dump_glock(struct seq_file *seq, struct gfs2_glock *gl)
+{
+ int ret;
+ spin_lock(&gl->gl_spin);
+ ret = __dump_glock(seq, gl);
+ spin_unlock(&gl->gl_spin);
+ return ret;
+}
+
/**
* gfs2_dump_lockstate - print out the current lockstate
* @sdp: the filesystem
@@ -2086,7 +1805,7 @@ void gfs2_glock_exit(void)
module_param(scand_secs, uint, S_IRUGO|S_IWUSR);
MODULE_PARM_DESC(scand_secs, "The number of seconds between scand runs");
-static int gfs2_glock_iter_next(struct glock_iter *gi)
+static int gfs2_glock_iter_next(struct gfs2_glock_iter *gi)
{
struct gfs2_glock *gl;
@@ -2104,7 +1823,7 @@ restart:
gfs2_glock_put(gl);
if (gl && gi->gl == NULL)
gi->hash++;
- while(gi->gl == NULL) {
+ while (gi->gl == NULL) {
if (gi->hash >= GFS2_GL_HASH_SIZE)
return 1;
read_lock(gl_lock_addr(gi->hash));
@@ -2122,58 +1841,34 @@ restart:
return 0;
}
-static void gfs2_glock_iter_free(struct glock_iter *gi)
+static void gfs2_glock_iter_free(struct gfs2_glock_iter *gi)
{
if (gi->gl)
gfs2_glock_put(gi->gl);
- kfree(gi);
-}
-
-static struct glock_iter *gfs2_glock_iter_init(struct gfs2_sbd *sdp)
-{
- struct glock_iter *gi;
-
- gi = kmalloc(sizeof (*gi), GFP_KERNEL);
- if (!gi)
- return NULL;
-
- gi->sdp = sdp;
- gi->hash = 0;
- gi->seq = NULL;
gi->gl = NULL;
- memset(gi->string, 0, sizeof(gi->string));
-
- if (gfs2_glock_iter_next(gi)) {
- gfs2_glock_iter_free(gi);
- return NULL;
- }
-
- return gi;
}
-static void *gfs2_glock_seq_start(struct seq_file *file, loff_t *pos)
+static void *gfs2_glock_seq_start(struct seq_file *seq, loff_t *pos)
{
- struct glock_iter *gi;
+ struct gfs2_glock_iter *gi = seq->private;
loff_t n = *pos;
- gi = gfs2_glock_iter_init(file->private);
- if (!gi)
- return NULL;
+ gi->hash = 0;
- while(n--) {
+ do {
if (gfs2_glock_iter_next(gi)) {
gfs2_glock_iter_free(gi);
return NULL;
}
- }
+ } while (n--);
- return gi;
+ return gi->gl;
}
-static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
+static void *gfs2_glock_seq_next(struct seq_file *seq, void *iter_ptr,
loff_t *pos)
{
- struct glock_iter *gi = iter_ptr;
+ struct gfs2_glock_iter *gi = seq->private;
(*pos)++;
@@ -2182,24 +1877,18 @@ static void *gfs2_glock_seq_next(struct seq_file *file, void *iter_ptr,
return NULL;
}
- return gi;
+ return gi->gl;
}
-static void gfs2_glock_seq_stop(struct seq_file *file, void *iter_ptr)
+static void gfs2_glock_seq_stop(struct seq_file *seq, void *iter_ptr)
{
- struct glock_iter *gi = iter_ptr;
- if (gi)
- gfs2_glock_iter_free(gi);
+ struct gfs2_glock_iter *gi = seq->private;
+ gfs2_glock_iter_free(gi);
}
-static int gfs2_glock_seq_show(struct seq_file *file, void *iter_ptr)
+static int gfs2_glock_seq_show(struct seq_file *seq, void *iter_ptr)
{
- struct glock_iter *gi = iter_ptr;
-
- gi->seq = file;
- dump_glock(gi, gi->gl);
-
- return 0;
+ return dump_glock(seq, iter_ptr);
}
static const struct seq_operations gfs2_glock_seq_ops = {
@@ -2211,17 +1900,14 @@ static const struct seq_operations gfs2_glock_seq_ops = {
static int gfs2_debugfs_open(struct inode *inode, struct file *file)
{
- struct seq_file *seq;
- int ret;
-
- ret = seq_open(file, &gfs2_glock_seq_ops);
- if (ret)
- return ret;
-
- seq = file->private_data;
- seq->private = inode->i_private;
-
- return 0;
+ int ret = seq_open_private(file, &gfs2_glock_seq_ops,
+ sizeof(struct gfs2_glock_iter));
+ if (ret == 0) {
+ struct seq_file *seq = file->private_data;
+ struct gfs2_glock_iter *gi = seq->private;
+ gi->sdp = inode->i_private;
+ }
+ return ret;
}
static const struct file_operations gfs2_debug_fops = {
@@ -2229,7 +1915,7 @@ static const struct file_operations gfs2_debug_fops = {
.open = gfs2_debugfs_open,
.read = seq_read,
.llseek = seq_lseek,
- .release = seq_release
+ .release = seq_release_private,
};
int gfs2_create_debugfs_file(struct gfs2_sbd *sdp)
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index cdad3e6f8150..7389f8ef0a31 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -26,11 +26,8 @@
#define GL_SKIP 0x00000100
#define GL_ATIME 0x00000200
#define GL_NOCACHE 0x00000400
-#define GL_FLOCK 0x00000800
-#define GL_NOCANCEL 0x00001000
#define GLR_TRYFAILED 13
-#define GLR_CANCELED 14
static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *gl)
{
@@ -41,6 +38,8 @@ static inline struct gfs2_holder *gfs2_glock_is_locked_by_me(struct gfs2_glock *
spin_lock(&gl->gl_spin);
pid = task_pid(current);
list_for_each_entry(gh, &gl->gl_holders, gh_list) {
+ if (!test_bit(HIF_HOLDER, &gh->gh_iflags))
+ break;
if (gh->gh_owner_pid == pid)
goto out;
}
@@ -70,7 +69,7 @@ static inline int gfs2_glock_is_blocking(struct gfs2_glock *gl)
{
int ret;
spin_lock(&gl->gl_spin);
- ret = test_bit(GLF_DEMOTE, &gl->gl_flags) || !list_empty(&gl->gl_waiters3);
+ ret = test_bit(GLF_DEMOTE, &gl->gl_flags);
spin_unlock(&gl->gl_spin);
return ret;
}
@@ -98,6 +97,7 @@ int gfs2_glock_nq_num(struct gfs2_sbd *sdp,
int gfs2_glock_nq_m(unsigned int num_gh, struct gfs2_holder *ghs);
void gfs2_glock_dq_m(unsigned int num_gh, struct gfs2_holder *ghs);
void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs);
+void gfs2_print_dbg(struct seq_file *seq, const char *fmt, ...);
/**
* gfs2_glock_nq_init - intialize a holder and enqueue it on a glock
@@ -130,7 +130,6 @@ int gfs2_lvb_hold(struct gfs2_glock *gl);
void gfs2_lvb_unhold(struct gfs2_glock *gl);
void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
-
void gfs2_glock_schedule_for_reclaim(struct gfs2_glock *gl);
void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp, int wait);
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index d31badadef8f..c6c318c2a0f6 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -13,6 +13,7 @@
#include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h>
#include <linux/lm_interface.h>
+#include <linux/bio.h>
#include "gfs2.h"
#include "incore.h"
@@ -172,26 +173,6 @@ static void inode_go_sync(struct gfs2_glock *gl)
}
/**
- * inode_go_xmote_bh - After promoting/demoting a glock
- * @gl: the glock
- *
- */
-
-static void inode_go_xmote_bh(struct gfs2_glock *gl)
-{
- struct gfs2_holder *gh = gl->gl_req_gh;
- struct buffer_head *bh;
- int error;
-
- if (gl->gl_state != LM_ST_UNLOCKED &&
- (!gh || !(gh->gh_flags & GL_SKIP))) {
- error = gfs2_meta_read(gl, gl->gl_name.ln_number, 0, &bh);
- if (!error)
- brelse(bh);
- }
-}
-
-/**
* inode_go_inval - prepare a inode glock to be released
* @gl: the glock
* @flags:
@@ -249,7 +230,7 @@ static int inode_go_lock(struct gfs2_holder *gh)
struct gfs2_inode *ip = gl->gl_object;
int error = 0;
- if (!ip)
+ if (!ip || (gh->gh_flags & GL_SKIP))
return 0;
if (test_bit(GIF_INVALID, &ip->i_flags)) {
@@ -267,6 +248,26 @@ static int inode_go_lock(struct gfs2_holder *gh)
}
/**
+ * inode_go_dump - print information about an inode
+ * @seq: The iterator
+ * @ip: the inode
+ *
+ * Returns: 0 on success, -ENOBUFS when we run out of space
+ */
+
+static int inode_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
+{
+ const struct gfs2_inode *ip = gl->gl_object;
+ if (ip == NULL)
+ return 0;
+ gfs2_print_dbg(seq, " I: n:%llu/%llu t:%u f:0x%08lx\n",
+ (unsigned long long)ip->i_no_formal_ino,
+ (unsigned long long)ip->i_no_addr,
+ IF2DT(ip->i_inode.i_mode), ip->i_flags);
+ return 0;
+}
+
+/**
* rgrp_go_demote_ok - Check to see if it's ok to unlock a RG's glock
* @gl: the glock
*
@@ -306,6 +307,22 @@ static void rgrp_go_unlock(struct gfs2_holder *gh)
}
/**
+ * rgrp_go_dump - print out an rgrp
+ * @seq: The iterator
+ * @gl: The glock in question
+ *
+ */
+
+static int rgrp_go_dump(struct seq_file *seq, const struct gfs2_glock *gl)
+{
+ const struct gfs2_rgrpd *rgd = gl->gl_object;
+ if (rgd == NULL)
+ return 0;
+ gfs2_print_dbg(seq, " R: n:%llu\n", (unsigned long long)rgd->rd_addr);
+ return 0;
+}
+
+/**
* trans_go_sync - promote/demote the transaction glock
* @gl: the glock
* @state: the requested state
@@ -330,7 +347,7 @@ static void trans_go_sync(struct gfs2_glock *gl)
*
*/
-static void trans_go_xmote_bh(struct gfs2_glock *gl)
+static int trans_go_xmote_bh(struct gfs2_glock *gl, struct gfs2_holder *gh)
{
struct gfs2_sbd *sdp = gl->gl_sbd;
struct gfs2_inode *ip = GFS2_I(sdp->sd_jdesc->jd_inode);
@@ -338,8 +355,7 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
struct gfs2_log_header_host head;
int error;
- if (gl->gl_state != LM_ST_UNLOCKED &&
- test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
+ if (test_bit(SDF_JOURNAL_LIVE, &sdp->sd_flags)) {
j_gl->gl_ops->go_inval(j_gl, DIO_METADATA);
error = gfs2_find_jhead(sdp->sd_jdesc, &head);
@@ -354,6 +370,7 @@ static void trans_go_xmote_bh(struct gfs2_glock *gl)
gfs2_log_pointers_init(sdp, head.lh_blkno);
}
}
+ return 0;
}
/**
@@ -375,12 +392,12 @@ const struct gfs2_glock_operations gfs2_meta_glops = {
const struct gfs2_glock_operations gfs2_inode_glops = {
.go_xmote_th = inode_go_sync,
- .go_xmote_bh = inode_go_xmote_bh,
.go_inval = inode_go_inval,
.go_demote_ok = inode_go_demote_ok,
.go_lock = inode_go_lock,
+ .go_dump = inode_go_dump,
.go_type = LM_TYPE_INODE,
- .go_min_hold_time = HZ / 10,
+ .go_min_hold_time = HZ / 5,
};
const struct gfs2_glock_operations gfs2_rgrp_glops = {
@@ -389,8 +406,9 @@ const struct gfs2_glock_operations gfs2_rgrp_glops = {
.go_demote_ok = rgrp_go_demote_ok,
.go_lock = rgrp_go_lock,
.go_unlock = rgrp_go_unlock,
+ .go_dump = rgrp_go_dump,
.go_type = LM_TYPE_RGRP,
- .go_min_hold_time = HZ / 10,
+ .go_min_hold_time = HZ / 5,
};
const struct gfs2_glock_operations gfs2_trans_glops = {
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 9c2c0b90b22a..4b734c6e34f0 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -128,20 +128,20 @@ struct gfs2_bufdata {
struct gfs2_glock_operations {
void (*go_xmote_th) (struct gfs2_glock *gl);
- void (*go_xmote_bh) (struct gfs2_glock *gl);
+ int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
void (*go_inval) (struct gfs2_glock *gl, int flags);
int (*go_demote_ok) (struct gfs2_glock *gl);
int (*go_lock) (struct gfs2_holder *gh);
void (*go_unlock) (struct gfs2_holder *gh);
+ int (*go_dump)(struct seq_file *seq, const struct gfs2_glock *gl);
const int go_type;
const unsigned long go_min_hold_time;
};
enum {
/* States */
- HIF_HOLDER = 6,
+ HIF_HOLDER = 6, /* Set for gh that "holds" the glock */
HIF_FIRST = 7,
- HIF_ABORTED = 9,
HIF_WAIT = 10,
};
@@ -154,20 +154,20 @@ struct gfs2_holder {
unsigned gh_flags;
int gh_error;
- unsigned long gh_iflags;
+ unsigned long gh_iflags; /* HIF_... */
unsigned long gh_ip;
};
enum {
- GLF_LOCK = 1,
- GLF_STICKY = 2,
- GLF_DEMOTE = 3,
- GLF_PENDING_DEMOTE = 4,
- GLF_DIRTY = 5,
- GLF_DEMOTE_IN_PROGRESS = 6,
- GLF_LFLUSH = 7,
- GLF_WAITERS2 = 8,
- GLF_CONV_DEADLK = 9,
+ GLF_LOCK = 1,
+ GLF_STICKY = 2,
+ GLF_DEMOTE = 3,
+ GLF_PENDING_DEMOTE = 4,
+ GLF_DEMOTE_IN_PROGRESS = 5,
+ GLF_DIRTY = 6,
+ GLF_LFLUSH = 7,
+ GLF_INVALIDATE_IN_PROGRESS = 8,
+ GLF_REPLY_PENDING = 9,
};
struct gfs2_glock {
@@ -179,19 +179,14 @@ struct gfs2_glock {
spinlock_t gl_spin;
unsigned int gl_state;
+ unsigned int gl_target;
+ unsigned int gl_reply;
unsigned int gl_hash;
unsigned int gl_demote_state; /* state requested by remote node */
unsigned long gl_demote_time; /* time of first demote request */
- struct pid *gl_owner_pid;
- unsigned long gl_ip;
struct list_head gl_holders;
- struct list_head gl_waiters1; /* HIF_MUTEX */
- struct list_head gl_waiters3; /* HIF_PROMOTE */
const struct gfs2_glock_operations *gl_ops;
-
- struct gfs2_holder *gl_req_gh;
-
void *gl_lock;
char *gl_lvb;
atomic_t gl_lvb_count;
@@ -236,6 +231,7 @@ enum {
GIF_INVALID = 0,
GIF_QD_LOCKED = 1,
GIF_SW_PAGED = 3,
+ GIF_USER = 4, /* user inode, not metadata addr space */
};
struct gfs2_dinode_host {
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 3a9ef526c308..09453d057e41 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -47,8 +47,7 @@ static int iget_test(struct inode *inode, void *opaque)
struct gfs2_inode *ip = GFS2_I(inode);
u64 *no_addr = opaque;
- if (ip->i_no_addr == *no_addr &&
- inode->i_private != NULL)
+ if (ip->i_no_addr == *no_addr && test_bit(GIF_USER, &ip->i_flags))
return 1;
return 0;
@@ -61,6 +60,7 @@ static int iget_set(struct inode *inode, void *opaque)
inode->i_ino = (unsigned long)*no_addr;
ip->i_no_addr = *no_addr;
+ set_bit(GIF_USER, &ip->i_flags);
return 0;
}
@@ -86,7 +86,7 @@ static int iget_skip_test(struct inode *inode, void *opaque)
struct gfs2_inode *ip = GFS2_I(inode);
struct gfs2_skip_data *data = opaque;
- if (ip->i_no_addr == data->no_addr && inode->i_private != NULL){
+ if (ip->i_no_addr == data->no_addr && test_bit(GIF_USER, &ip->i_flags)){
if (inode->i_state & (I_FREEING|I_CLEAR|I_WILL_FREE)){
data->skipped = 1;
return 0;
@@ -105,6 +105,7 @@ static int iget_skip_set(struct inode *inode, void *opaque)
return 1;
inode->i_ino = (unsigned long)(data->no_addr);
ip->i_no_addr = data->no_addr;
+ set_bit(GIF_USER, &ip->i_flags);
return 0;
}
@@ -166,7 +167,7 @@ void gfs2_set_iop(struct inode *inode)
* Returns: A VFS inode, or an error
*/
-struct inode *gfs2_inode_lookup(struct super_block *sb,
+struct inode *gfs2_inode_lookup(struct super_block *sb,
unsigned int type,
u64 no_addr,
u64 no_formal_ino, int skip_freeing)
@@ -187,7 +188,6 @@ struct inode *gfs2_inode_lookup(struct super_block *sb,
if (inode->i_state & I_NEW) {
struct gfs2_sbd *sdp = GFS2_SB(inode);
- inode->i_private = ip;
ip->i_no_formal_ino = no_formal_ino;
error = gfs2_glock_get(sdp, no_addr, &gfs2_inode_glops, CREATE, &ip->i_gl);
diff --git a/fs/gfs2/locking.c b/fs/gfs2/locking.c
index 663fee728783..523243a13a21 100644
--- a/fs/gfs2/locking.c
+++ b/fs/gfs2/locking.c
@@ -23,12 +23,54 @@ struct lmh_wrapper {
const struct lm_lockops *lw_ops;
};
+static int nolock_mount(char *table_name, char *host_data,
+ lm_callback_t cb, void *cb_data,
+ unsigned int min_lvb_size, int flags,
+ struct lm_lockstruct *lockstruct,
+ struct kobject *fskobj);
+
/* List of registered low-level locking protocols. A file system selects one
of them by name at mount time, e.g. lock_nolock, lock_dlm. */
+static const struct lm_lockops nolock_ops = {
+ .lm_proto_name = "lock_nolock",
+ .lm_mount = nolock_mount,
+};
+
+static struct lmh_wrapper nolock_proto = {
+ .lw_list = LIST_HEAD_INIT(nolock_proto.lw_list),
+ .lw_ops = &nolock_ops,
+};
+
static LIST_HEAD(lmh_list);
static DEFINE_MUTEX(lmh_lock);
+static int nolock_mount(char *table_name, char *host_data,
+ lm_callback_t cb, void *cb_data,
+ unsigned int min_lvb_size, int flags,
+ struct lm_lockstruct *lockstruct,
+ struct kobject *fskobj)
+{
+ char *c;
+ unsigned int jid;
+
+ c = strstr(host_data, "jid=");
+ if (!c)
+ jid = 0;
+ else {
+ c += 4;
+ sscanf(c, "%u", &jid);
+ }
+
+ lockstruct->ls_jid = jid;
+ lockstruct->ls_first = 1;
+ lockstruct->ls_lvb_size = min_lvb_size;
+ lockstruct->ls_ops = &nolock_ops;
+ lockstruct->ls_flags = LM_LSFLAG_LOCAL;
+
+ return 0;
+}
+
/**
* gfs2_register_lockproto - Register a low-level locking protocol
* @proto: the protocol definition
@@ -116,9 +158,13 @@ int gfs2_mount_lockproto(char *proto_name, char *table_name, char *host_data,
int try = 0;
int error, found;
+
retry:
mutex_lock(&lmh_lock);
+ if (list_empty(&nolock_proto.lw_list))
+ list_add(&nolock_proto.lw_list, &lmh_list);
+
found = 0;
list_for_each_entry(lw, &lmh_list, lw_list) {
if (!strcmp(lw->lw_ops->lm_proto_name, proto_name)) {
@@ -139,7 +185,8 @@ retry:
goto out;
}
- if (!try_module_get(lw->lw_ops->lm_owner)) {
+ if (lw->lw_ops->lm_owner &&
+ !try_module_get(lw->lw_ops->lm_owner)) {
try = 0;
mutex_unlock(&lmh_lock);
msleep(1000);
@@ -158,7 +205,8 @@ out:
void gfs2_unmount_lockproto(struct lm_lockstruct *lockstruct)
{
mutex_lock(&lmh_lock);
- lockstruct->ls_ops->lm_unmount(lockstruct->ls_lockspace);
+ if (lockstruct->ls_ops->lm_unmount)
+ lockstruct->ls_ops->lm_unmount(lockstruct->ls_lockspace);
if (lockstruct->ls_ops->lm_owner)
module_put(lockstruct->ls_ops->lm_owner);
mutex_unlock(&lmh_lock);
diff --git a/fs/gfs2/locking/dlm/lock.c b/fs/gfs2/locking/dlm/lock.c
index cf7ea8abec87..871ffc9578f2 100644
--- a/fs/gfs2/locking/dlm/lock.c
+++ b/fs/gfs2/locking/dlm/lock.c
@@ -11,46 +11,63 @@
static char junk_lvb[GDLM_LVB_SIZE];
-static void queue_complete(struct gdlm_lock *lp)
+
+/* convert dlm lock-mode to gfs lock-state */
+
+static s16 gdlm_make_lmstate(s16 dlmmode)
{
- struct gdlm_ls *ls = lp->ls;
+ switch (dlmmode) {
+ case DLM_LOCK_IV:
+ case DLM_LOCK_NL:
+ return LM_ST_UNLOCKED;
+ case DLM_LOCK_EX:
+ return LM_ST_EXCLUSIVE;
+ case DLM_LOCK_CW:
+ return LM_ST_DEFERRED;
+ case DLM_LOCK_PR:
+ return LM_ST_SHARED;
+ }
+ gdlm_assert(0, "unknown DLM mode %d", dlmmode);
+ return -1;
+}
- clear_bit(LFL_ACTIVE, &lp->flags);
+/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
+ thread gets to it. */
+
+static void queue_submit(struct gdlm_lock *lp)
+{
+ struct gdlm_ls *ls = lp->ls;
spin_lock(&ls->async_lock);
- list_add_tail(&lp->clist, &ls->complete);
+ list_add_tail(&lp->delay_list, &ls->submit);
spin_unlock(&ls->async_lock);
wake_up(&ls->thread_wait);
}
-static inline void gdlm_ast(void *astarg)
+static void wake_up_ast(struct gdlm_lock *lp)
{
- queue_complete(astarg);
+ clear_bit(LFL_AST_WAIT, &lp->flags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&lp->flags, LFL_AST_WAIT);
}
-static inline void gdlm_bast(void *astarg, int mode)
+static void gdlm_delete_lp(struct gdlm_lock *lp)
{
- struct gdlm_lock *lp = astarg;
struct gdlm_ls *ls = lp->ls;
- if (!mode) {
- printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
- lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number);
- return;
- }
-
spin_lock(&ls->async_lock);
- if (!lp->bast_mode) {
- list_add_tail(&lp->blist, &ls->blocking);
- lp->bast_mode = mode;
- } else if (lp->bast_mode < mode)
- lp->bast_mode = mode;
+ if (!list_empty(&lp->delay_list))
+ list_del_init(&lp->delay_list);
+ gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type,
+ (unsigned long long)lp->lockname.ln_number);
+ list_del_init(&lp->all_list);
+ ls->all_locks_count--;
spin_unlock(&ls->async_lock);
- wake_up(&ls->thread_wait);
+
+ kfree(lp);
}
-void gdlm_queue_delayed(struct gdlm_lock *lp)
+static void gdlm_queue_delayed(struct gdlm_lock *lp)
{
struct gdlm_ls *ls = lp->ls;
@@ -59,6 +76,249 @@ void gdlm_queue_delayed(struct gdlm_lock *lp)
spin_unlock(&ls->async_lock);
}
+static void process_complete(struct gdlm_lock *lp)
+{
+ struct gdlm_ls *ls = lp->ls;
+ struct lm_async_cb acb;
+ s16 prev_mode = lp->cur;
+
+ memset(&acb, 0, sizeof(acb));
+
+ if (lp->lksb.sb_status == -DLM_ECANCEL) {
+ log_info("complete dlm cancel %x,%llx flags %lx",
+ lp->lockname.ln_type,
+ (unsigned long long)lp->lockname.ln_number,
+ lp->flags);
+
+ lp->req = lp->cur;
+ acb.lc_ret |= LM_OUT_CANCELED;
+ if (lp->cur == DLM_LOCK_IV)
+ lp->lksb.sb_lkid = 0;
+ goto out;
+ }
+
+ if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
+ if (lp->lksb.sb_status != -DLM_EUNLOCK) {
+ log_info("unlock sb_status %d %x,%llx flags %lx",
+ lp->lksb.sb_status, lp->lockname.ln_type,
+ (unsigned long long)lp->lockname.ln_number,
+ lp->flags);
+ return;
+ }
+
+ lp->cur = DLM_LOCK_IV;
+ lp->req = DLM_LOCK_IV;
+ lp->lksb.sb_lkid = 0;
+
+ if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
+ gdlm_delete_lp(lp);
+ return;
+ }
+ goto out;
+ }
+
+ if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
+ memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
+
+ if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
+ if (lp->req == DLM_LOCK_PR)
+ lp->req = DLM_LOCK_CW;
+ else if (lp->req == DLM_LOCK_CW)
+ lp->req = DLM_LOCK_PR;
+ }
+
+ /*
+ * A canceled lock request. The lock was just taken off the delayed
+ * list and was never even submitted to dlm.
+ */
+
+ if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
+ log_info("complete internal cancel %x,%llx",
+ lp->lockname.ln_type,
+ (unsigned long long)lp->lockname.ln_number);
+ lp->req = lp->cur;
+ acb.lc_ret |= LM_OUT_CANCELED;
+ goto out;
+ }
+
+ /*
+ * An error occured.
+ */
+
+ if (lp->lksb.sb_status) {
+ /* a "normal" error */
+ if ((lp->lksb.sb_status == -EAGAIN) &&
+ (lp->lkf & DLM_LKF_NOQUEUE)) {
+ lp->req = lp->cur;
+ if (lp->cur == DLM_LOCK_IV)
+ lp->lksb.sb_lkid = 0;
+ goto out;
+ }
+
+ /* this could only happen with cancels I think */
+ log_info("ast sb_status %d %x,%llx flags %lx",
+ lp->lksb.sb_status, lp->lockname.ln_type,
+ (unsigned long long)lp->lockname.ln_number,
+ lp->flags);
+ if (lp->lksb.sb_status == -EDEADLOCK &&
+ lp->ls->fsflags & LM_MFLAG_CONV_NODROP) {
+ lp->req = lp->cur;
+ acb.lc_ret |= LM_OUT_CONV_DEADLK;
+ if (lp->cur == DLM_LOCK_IV)
+ lp->lksb.sb_lkid = 0;
+ goto out;
+ } else
+ return;
+ }
+
+ /*
+ * This is an AST for an EX->EX conversion for sync_lvb from GFS.
+ */
+
+ if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
+ wake_up_ast(lp);
+ return;
+ }
+
+ /*
+ * A lock has been demoted to NL because it initially completed during
+ * BLOCK_LOCKS. Now it must be requested in the originally requested
+ * mode.
+ */
+
+ if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
+ gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
+ lp->lockname.ln_type,
+ (unsigned long long)lp->lockname.ln_number);
+ gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
+ lp->lockname.ln_type,
+ (unsigned long long)lp->lockname.ln_number);
+
+ lp->cur = DLM_LOCK_NL;
+ lp->req = lp->prev_req;
+ lp->prev_req = DLM_LOCK_IV;
+ lp->lkf &= ~DLM_LKF_CONVDEADLK;
+
+ set_bit(LFL_NOCACHE, &lp->flags);
+
+ if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
+ !test_bit(LFL_NOBLOCK, &lp->flags))
+ gdlm_queue_delayed(lp);
+ else
+ queue_submit(lp);
+ return;
+ }
+
+ /*
+ * A request is granted during dlm recovery. It may be granted
+ * because the locks of a failed node were cleared. In that case,
+ * there may be inconsistent data beneath this lock and we must wait
+ * for recovery to complete to use it. When gfs recovery is done this
+ * granted lock will be converted to NL and then reacquired in this
+ * granted state.
+ */
+
+ if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
+ !test_bit(LFL_NOBLOCK, &lp->flags) &&
+ lp->req != DLM_LOCK_NL) {
+
+ lp->cur = lp->req;
+ lp->prev_req = lp->req;
+ lp->req = DLM_LOCK_NL;
+ lp->lkf |= DLM_LKF_CONVERT;
+ lp->lkf &= ~DLM_LKF_CONVDEADLK;
+
+ log_debug("rereq %x,%llx id %x %d,%d",
+ lp->lockname.ln_type,
+ (unsigned long long)lp->lockname.ln_number,
+ lp->lksb.sb_lkid, lp->cur, lp->req);
+
+ set_bit(LFL_REREQUEST, &lp->flags);
+ queue_submit(lp);
+ return;
+ }
+
+ /*
+ * DLM demoted the lock to NL before it was granted so GFS must be
+ * told it cannot cache data for this lock.
+ */
+
+ if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
+ set_bit(LFL_NOCACHE, &lp->flags);
+
+out:
+ /*
+ * This is an internal lock_dlm lock
+ */
+
+ if (test_bit(LFL_INLOCK, &lp->flags)) {
+ clear_bit(LFL_NOBLOCK, &lp->flags);
+ lp->cur = lp->req;
+ wake_up_ast(lp);
+ return;
+ }
+
+ /*
+ * Normal completion of a lock request. Tell GFS it now has the lock.
+ */
+
+ clear_bit(LFL_NOBLOCK, &lp->flags);
+ lp->cur = lp->req;
+
+ acb.lc_name = lp->lockname;
+ acb.lc_ret |= gdlm_make_lmstate(lp->cur);
+
+ if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) &&
+ (lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL))
+ acb.lc_ret |= LM_OUT_CACHEABLE;
+
+ ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
+}
+
+static void gdlm_ast(void *astarg)
+{
+ struct gdlm_lock *lp = astarg;
+ clear_bit(LFL_ACTIVE, &lp->flags);
+ process_complete(lp);
+}
+
+static void process_blocking(struct gdlm_lock *lp, int bast_mode)
+{
+ struct gdlm_ls *ls = lp->ls;
+ unsigned int cb = 0;
+
+ switch (gdlm_make_lmstate(bast_mode)) {
+ case LM_ST_EXCLUSIVE:
+ cb = LM_CB_NEED_E;
+ break;
+ case LM_ST_DEFERRED:
+ cb = LM_CB_NEED_D;
+ break;
+ case LM_ST_SHARED:
+ cb = LM_CB_NEED_S;
+ break;
+ default:
+ gdlm_assert(0, "unknown bast mode %u", bast_mode);
+ }
+
+ ls->fscb(ls->sdp, cb, &lp->lockname);
+}
+
+
+static void gdlm_bast(void *astarg, int mode)
+{
+ struct gdlm_lock *lp = astarg;
+
+ if (!mode) {
+ printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
+ lp->lockname.ln_type,
+ (unsigned long long)lp->lockname.ln_number);
+ return;
+ }
+
+ process_blocking(lp, mode);
+}
+
/* convert gfs lock-state to dlm lock-mode */
static s16 make_mode(s16 lmstate)
@@ -77,24 +337,6 @@ static s16 make_mode(s16 lmstate)
return -1;
}
-/* convert dlm lock-mode to gfs lock-state */
-
-s16 gdlm_make_lmstate(s16 dlmmode)
-{
- switch (dlmmode) {
- case DLM_LOCK_IV:
- case DLM_LOCK_NL:
- return LM_ST_UNLOCKED;
- case DLM_LOCK_EX:
- return LM_ST_EXCLUSIVE;
- case DLM_LOCK_CW:
- return LM_ST_DEFERRED;
- case DLM_LOCK_PR:
- return LM_ST_SHARED;
- }
- gdlm_assert(0, "unknown DLM mode %d", dlmmode);
- return -1;
-}
/* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
@@ -173,10 +415,6 @@ static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
make_strname(name, &lp->strname);
lp->ls = ls;
lp->cur = DLM_LOCK_IV;
- lp->lvb = NULL;
- lp->hold_null = NULL;
- INIT_LIST_HEAD(&lp->clist);
- INIT_LIST_HEAD(&lp->blist);
INIT_LIST_HEAD(&lp->delay_list);
spin_lock(&ls->async_lock);
@@ -188,26 +426,6 @@ static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
return 0;
}
-void gdlm_delete_lp(struct gdlm_lock *lp)
-{
- struct gdlm_ls *ls = lp->ls;
-
- spin_lock(&ls->async_lock);
- if (!list_empty(&lp->clist))
- list_del_init(&lp->clist);
- if (!list_empty(&lp->blist))
- list_del_init(&lp->blist);
- if (!list_empty(&lp->delay_list))
- list_del_init(&lp->delay_list);
- gdlm_assert(!list_empty(&lp->all_list), "%x,%llx", lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number);
- list_del_init(&lp->all_list);
- ls->all_locks_count--;
- spin_unlock(&ls->async_lock);
-
- kfree(lp);
-}
-
int gdlm_get_lock(void *lockspace, struct lm_lockname *name,
void **lockp)
{
@@ -261,7 +479,7 @@ unsigned int gdlm_do_lock(struct gdlm_lock *lp)
if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
lp->lksb.sb_status = -EAGAIN;
- queue_complete(lp);
+ gdlm_ast(lp);
error = 0;
}
@@ -308,6 +526,12 @@ unsigned int gdlm_lock(void *lock, unsigned int cur_state,
{
struct gdlm_lock *lp = lock;
+ if (req_state == LM_ST_UNLOCKED)
+ return gdlm_unlock(lock, cur_state);
+
+ if (req_state == LM_ST_UNLOCKED)
+ return gdlm_unlock(lock, cur_state);
+
clear_bit(LFL_DLM_CANCEL, &lp->flags);
if (flags & LM_FLAG_NOEXP)
set_bit(LFL_NOBLOCK, &lp->flags);
@@ -351,7 +575,7 @@ void gdlm_cancel(void *lock)
if (delay_list) {
set_bit(LFL_CANCEL, &lp->flags);
set_bit(LFL_ACTIVE, &lp->flags);
- queue_complete(lp);
+ gdlm_ast(lp);
return;
}
diff --git a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h
index a243cf69c54e..ad944c64eab1 100644
--- a/fs/gfs2/locking/dlm/lock_dlm.h
+++ b/fs/gfs2/locking/dlm/lock_dlm.h
@@ -72,15 +72,12 @@ struct gdlm_ls {
int recover_jid_done;
int recover_jid_status;
spinlock_t async_lock;
- struct list_head complete;
- struct list_head blocking;
struct list_head delayed;
struct list_head submit;
struct list_head all_locks;
u32 all_locks_count;
wait_queue_head_t wait_control;
- struct task_struct *thread1;
- struct task_struct *thread2;
+ struct task_struct *thread;
wait_queue_head_t thread_wait;
unsigned long drop_time;
int drop_locks_count;
@@ -117,10 +114,6 @@ struct gdlm_lock {
u32 lkf; /* dlm flags DLM_LKF_ */
unsigned long flags; /* lock_dlm flags LFL_ */
- int bast_mode; /* protected by async_lock */
-
- struct list_head clist; /* complete */
- struct list_head blist; /* blocking */
struct list_head delay_list; /* delayed */
struct list_head all_list; /* all locks for the fs */
struct gdlm_lock *hold_null; /* NL lock for hold_lvb */
@@ -159,11 +152,8 @@ void gdlm_release_threads(struct gdlm_ls *);
/* lock.c */
-s16 gdlm_make_lmstate(s16);
-void gdlm_queue_delayed(struct gdlm_lock *);
void gdlm_submit_delayed(struct gdlm_ls *);
int gdlm_release_all_locks(struct gdlm_ls *);
-void gdlm_delete_lp(struct gdlm_lock *);
unsigned int gdlm_do_lock(struct gdlm_lock *);
int gdlm_get_lock(void *, struct lm_lockname *, void **);
diff --git a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c
index 470bdf650b50..0628520a445f 100644
--- a/fs/gfs2/locking/dlm/mount.c
+++ b/fs/gfs2/locking/dlm/mount.c
@@ -28,15 +28,11 @@ static struct gdlm_ls *init_gdlm(lm_callback_t cb, struct gfs2_sbd *sdp,
ls->sdp = sdp;
ls->fsflags = flags;
spin_lock_init(&ls->async_lock);
- INIT_LIST_HEAD(&ls->complete);
- INIT_LIST_HEAD(&ls->blocking);
INIT_LIST_HEAD(&ls->delayed);
INIT_LIST_HEAD(&ls->submit);
INIT_LIST_HEAD(&ls->all_locks);
init_waitqueue_head(&ls->thread_wait);
init_waitqueue_head(&ls->wait_control);
- ls->thread1 = NULL;
- ls->thread2 = NULL;
ls->drop_time = jiffies;
ls->jid = -1;
diff --git a/fs/gfs2/locking/dlm/thread.c b/fs/gfs2/locking/dlm/thread.c
index e53db6fd28ab..f30350abd62f 100644
--- a/fs/gfs2/locking/dlm/thread.c
+++ b/fs/gfs2/locking/dlm/thread.c
@@ -9,255 +9,12 @@
#include "lock_dlm.h"
-/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
- thread gets to it. */
-
-static void queue_submit(struct gdlm_lock *lp)
-{
- struct gdlm_ls *ls = lp->ls;
-
- spin_lock(&ls->async_lock);
- list_add_tail(&lp->delay_list, &ls->submit);
- spin_unlock(&ls->async_lock);
- wake_up(&ls->thread_wait);
-}
-
-static void process_blocking(struct gdlm_lock *lp, int bast_mode)
-{
- struct gdlm_ls *ls = lp->ls;
- unsigned int cb = 0;
-
- switch (gdlm_make_lmstate(bast_mode)) {
- case LM_ST_EXCLUSIVE:
- cb = LM_CB_NEED_E;
- break;
- case LM_ST_DEFERRED:
- cb = LM_CB_NEED_D;
- break;
- case LM_ST_SHARED:
- cb = LM_CB_NEED_S;
- break;
- default:
- gdlm_assert(0, "unknown bast mode %u", lp->bast_mode);
- }
-
- ls->fscb(ls->sdp, cb, &lp->lockname);
-}
-
-static void wake_up_ast(struct gdlm_lock *lp)
-{
- clear_bit(LFL_AST_WAIT, &lp->flags);
- smp_mb__after_clear_bit();
- wake_up_bit(&lp->flags, LFL_AST_WAIT);
-}
-
-static void process_complete(struct gdlm_lock *lp)
-{
- struct gdlm_ls *ls = lp->ls;
- struct lm_async_cb acb;
- s16 prev_mode = lp->cur;
-
- memset(&acb, 0, sizeof(acb));
-
- if (lp->lksb.sb_status == -DLM_ECANCEL) {
- log_info("complete dlm cancel %x,%llx flags %lx",
- lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number,
- lp->flags);
-
- lp->req = lp->cur;
- acb.lc_ret |= LM_OUT_CANCELED;
- if (lp->cur == DLM_LOCK_IV)
- lp->lksb.sb_lkid = 0;
- goto out;
- }
-
- if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
- if (lp->lksb.sb_status != -DLM_EUNLOCK) {
- log_info("unlock sb_status %d %x,%llx flags %lx",
- lp->lksb.sb_status, lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number,
- lp->flags);
- return;
- }
-
- lp->cur = DLM_LOCK_IV;
- lp->req = DLM_LOCK_IV;
- lp->lksb.sb_lkid = 0;
-
- if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
- gdlm_delete_lp(lp);
- return;
- }
- goto out;
- }
-
- if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
- memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
-
- if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
- if (lp->req == DLM_LOCK_PR)
- lp->req = DLM_LOCK_CW;
- else if (lp->req == DLM_LOCK_CW)
- lp->req = DLM_LOCK_PR;
- }
-
- /*
- * A canceled lock request. The lock was just taken off the delayed
- * list and was never even submitted to dlm.
- */
-
- if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
- log_info("complete internal cancel %x,%llx",
- lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number);
- lp->req = lp->cur;
- acb.lc_ret |= LM_OUT_CANCELED;
- goto out;
- }
-
- /*
- * An error occured.
- */
-
- if (lp->lksb.sb_status) {
- /* a "normal" error */
- if ((lp->lksb.sb_status == -EAGAIN) &&
- (lp->lkf & DLM_LKF_NOQUEUE)) {
- lp->req = lp->cur;
- if (lp->cur == DLM_LOCK_IV)
- lp->lksb.sb_lkid = 0;
- goto out;
- }
-
- /* this could only happen with cancels I think */
- log_info("ast sb_status %d %x,%llx flags %lx",
- lp->lksb.sb_status, lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number,
- lp->flags);
- if (lp->lksb.sb_status == -EDEADLOCK &&
- lp->ls->fsflags & LM_MFLAG_CONV_NODROP) {
- lp->req = lp->cur;
- acb.lc_ret |= LM_OUT_CONV_DEADLK;
- if (lp->cur == DLM_LOCK_IV)
- lp->lksb.sb_lkid = 0;
- goto out;
- } else
- return;
- }
-
- /*
- * This is an AST for an EX->EX conversion for sync_lvb from GFS.
- */
-
- if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
- wake_up_ast(lp);
- return;
- }
-
- /*
- * A lock has been demoted to NL because it initially completed during
- * BLOCK_LOCKS. Now it must be requested in the originally requested
- * mode.
- */
-
- if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
- gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
- lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number);
- gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
- lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number);
-
- lp->cur = DLM_LOCK_NL;
- lp->req = lp->prev_req;
- lp->prev_req = DLM_LOCK_IV;
- lp->lkf &= ~DLM_LKF_CONVDEADLK;
-
- set_bit(LFL_NOCACHE, &lp->flags);
-
- if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
- !test_bit(LFL_NOBLOCK, &lp->flags))
- gdlm_queue_delayed(lp);
- else
- queue_submit(lp);
- return;
- }
-
- /*
- * A request is granted during dlm recovery. It may be granted
- * because the locks of a failed node were cleared. In that case,
- * there may be inconsistent data beneath this lock and we must wait
- * for recovery to complete to use it. When gfs recovery is done this
- * granted lock will be converted to NL and then reacquired in this
- * granted state.
- */
-
- if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
- !test_bit(LFL_NOBLOCK, &lp->flags) &&
- lp->req != DLM_LOCK_NL) {
-
- lp->cur = lp->req;
- lp->prev_req = lp->req;
- lp->req = DLM_LOCK_NL;
- lp->lkf |= DLM_LKF_CONVERT;
- lp->lkf &= ~DLM_LKF_CONVDEADLK;
-
- log_debug("rereq %x,%llx id %x %d,%d",
- lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number,
- lp->lksb.sb_lkid, lp->cur, lp->req);
-
- set_bit(LFL_REREQUEST, &lp->flags);
- queue_submit(lp);
- return;
- }
-
- /*
- * DLM demoted the lock to NL before it was granted so GFS must be
- * told it cannot cache data for this lock.
- */
-
- if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
- set_bit(LFL_NOCACHE, &lp->flags);
-
-out:
- /*
- * This is an internal lock_dlm lock
- */
-
- if (test_bit(LFL_INLOCK, &lp->flags)) {
- clear_bit(LFL_NOBLOCK, &lp->flags);
- lp->cur = lp->req;
- wake_up_ast(lp);
- return;
- }
-
- /*
- * Normal completion of a lock request. Tell GFS it now has the lock.
- */
-
- clear_bit(LFL_NOBLOCK, &lp->flags);
- lp->cur = lp->req;
-
- acb.lc_name = lp->lockname;
- acb.lc_ret |= gdlm_make_lmstate(lp->cur);
-
- if (!test_and_clear_bit(LFL_NOCACHE, &lp->flags) &&
- (lp->cur > DLM_LOCK_NL) && (prev_mode > DLM_LOCK_NL))
- acb.lc_ret |= LM_OUT_CACHEABLE;
-
- ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
-}
-
-static inline int no_work(struct gdlm_ls *ls, int blocking)
+static inline int no_work(struct gdlm_ls *ls)
{
int ret;
spin_lock(&ls->async_lock);
- ret = list_empty(&ls->complete) && list_empty(&ls->submit);
- if (ret && blocking)
- ret = list_empty(&ls->blocking);
+ ret = list_empty(&ls->submit);
spin_unlock(&ls->async_lock);
return ret;
@@ -276,100 +33,55 @@ static inline int check_drop(struct gdlm_ls *ls)
return 0;
}
-static int gdlm_thread(void *data, int blist)
+static int gdlm_thread(void *data)
{
struct gdlm_ls *ls = (struct gdlm_ls *) data;
struct gdlm_lock *lp = NULL;
- uint8_t complete, blocking, submit, drop;
-
- /* Only thread1 is allowed to do blocking callbacks since gfs
- may wait for a completion callback within a blocking cb. */
while (!kthread_should_stop()) {
wait_event_interruptible(ls->thread_wait,
- !no_work(ls, blist) || kthread_should_stop());
-
- complete = blocking = submit = drop = 0;
+ !no_work(ls) || kthread_should_stop());
spin_lock(&ls->async_lock);
- if (blist && !list_empty(&ls->blocking)) {
- lp = list_entry(ls->blocking.next, struct gdlm_lock,
- blist);
- list_del_init(&lp->blist);
- blocking = lp->bast_mode;
- lp->bast_mode = 0;
- } else if (!list_empty(&ls->complete)) {
- lp = list_entry(ls->complete.next, struct gdlm_lock,
- clist);
- list_del_init(&lp->clist);
- complete = 1;
- } else if (!list_empty(&ls->submit)) {
+ if (!list_empty(&ls->submit)) {
lp = list_entry(ls->submit.next, struct gdlm_lock,
delay_list);
list_del_init(&lp->delay_list);
- submit = 1;
- }
-
- drop = check_drop(ls);
- spin_unlock(&ls->async_lock);
-
- if (complete)
- process_complete(lp);
-
- else if (blocking)
- process_blocking(lp, blocking);
-
- else if (submit)
+ spin_unlock(&ls->async_lock);
gdlm_do_lock(lp);
-
- if (drop)
+ spin_lock(&ls->async_lock);
+ }
+ /* Does this ever happen these days? I hope not anyway */
+ if (check_drop(ls)) {
+ spin_unlock(&ls->async_lock);
ls->fscb(ls->sdp, LM_CB_DROPLOCKS, NULL);
-
- schedule();
+ spin_lock(&ls->async_lock);
+ }
+ spin_unlock(&ls->async_lock);
}
return 0;
}
-static int gdlm_thread1(void *data)
-{
- return gdlm_thread(data, 1);
-}
-
-static int gdlm_thread2(void *data)
-{
- return gdlm_thread(data, 0);
-}
-
int gdlm_init_threads(struct gdlm_ls *ls)
{
struct task_struct *p;
int error;
- p = kthread_run(gdlm_thread1, ls, "lock_dlm1");
- error = IS_ERR(p);
- if (error) {
- log_error("can't start lock_dlm1 thread %d", error);
- return error;
- }
- ls->thread1 = p;
-
- p = kthread_run(gdlm_thread2, ls, "lock_dlm2");
+ p = kthread_run(gdlm_thread, ls, "lock_dlm");
error = IS_ERR(p);
if (error) {
- log_error("can't start lock_dlm2 thread %d", error);
- kthread_stop(ls->thread1);
+ log_error("can't start lock_dlm thread %d", error);
return error;
}
- ls->thread2 = p;
+ ls->thread = p;
return 0;
}
void gdlm_release_threads(struct gdlm_ls *ls)
{
- kthread_stop(ls->thread1);
- kthread_stop(ls->thread2);
+ kthread_stop(ls->thread);
}
diff --git a/fs/gfs2/locking/nolock/Makefile b/fs/gfs2/locking/nolock/Makefile
deleted file mode 100644
index 35e9730bc3a8..000000000000
--- a/fs/gfs2/locking/nolock/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_GFS2_FS_LOCKING_NOLOCK) += lock_nolock.o
-lock_nolock-y := main.o
-
diff --git a/fs/gfs2/locking/nolock/main.c b/fs/gfs2/locking/nolock/main.c
deleted file mode 100644
index 284a5ece8d94..000000000000
--- a/fs/gfs2/locking/nolock/main.c
+++ /dev/null
@@ -1,238 +0,0 @@
-/*
- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License version 2.
- */
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/init.h>
-#include <linux/types.h>
-#include <linux/fs.h>
-#include <linux/lm_interface.h>
-
-struct nolock_lockspace {
- unsigned int nl_lvb_size;
-};
-
-static const struct lm_lockops nolock_ops;
-
-static int nolock_mount(char *table_name, char *host_data,
- lm_callback_t cb, void *cb_data,
- unsigned int min_lvb_size, int flags,
- struct lm_lockstruct *lockstruct,
- struct kobject *fskobj)
-{
- char *c;
- unsigned int jid;
- struct nolock_lockspace *nl;
-
- c = strstr(host_data, "jid=");
- if (!c)
- jid = 0;
- else {
- c += 4;
- sscanf(c, "%u", &jid);
- }
-
- nl = kzalloc(sizeof(struct nolock_lockspace), GFP_KERNEL);
- if (!nl)
- return -ENOMEM;
-
- nl->nl_lvb_size = min_lvb_size;
-
- lockstruct->ls_jid = jid;
- lockstruct->ls_first = 1;
- lockstruct->ls_lvb_size = min_lvb_size;
- lockstruct->ls_lockspace = nl;
- lockstruct->ls_ops = &nolock_ops;
- lockstruct->ls_flags = LM_LSFLAG_LOCAL;
-
- return 0;
-}
-
-static void nolock_others_may_mount(void *lockspace)
-{
-}
-
-static void nolock_unmount(void *lockspace)
-{
- struct nolock_lockspace *nl = lockspace;
- kfree(nl);
-}
-
-static void nolock_withdraw(void *lockspace)
-{
-}
-
-/**
- * nolock_get_lock - get a lm_lock_t given a descripton of the lock
- * @lockspace: the lockspace the lock lives in
- * @name: the name of the lock
- * @lockp: return the lm_lock_t here
- *
- * Returns: 0 on success, -EXXX on failure
- */
-
-static int nolock_get_lock(void *lockspace, struct lm_lockname *name,
- void **lockp)
-{
- *lockp = lockspace;
- return 0;
-}
-
-/**
- * nolock_put_lock - get rid of a lock structure
- * @lock: the lock to throw away
- *
- */
-
-static void nolock_put_lock(void *lock)
-{
-}
-
-/**
- * nolock_lock - acquire a lock
- * @lock: the lock to manipulate
- * @cur_state: the current state
- * @req_state: the requested state
- * @flags: modifier flags
- *
- * Returns: A bitmap of LM_OUT_*
- */
-
-static unsigned int nolock_lock(void *lock, unsigned int cur_state,
- unsigned int req_state, unsigned int flags)
-{
- return req_state | LM_OUT_CACHEABLE;
-}
-
-/**
- * nolock_unlock - unlock a lock
- * @lock: the lock to manipulate
- * @cur_state: the current state
- *
- * Returns: 0
- */
-
-static unsigned int nolock_unlock(void *lock, unsigned int cur_state)
-{
- return 0;
-}
-
-static void nolock_cancel(void *lock)
-{
-}
-
-/**
- * nolock_hold_lvb - hold on to a lock value block
- * @lock: the lock the LVB is associated with
- * @lvbp: return the lm_lvb_t here
- *
- * Returns: 0 on success, -EXXX on failure
- */
-
-static int nolock_hold_lvb(void *lock, char **lvbp)
-{
- struct nolock_lockspace *nl = lock;
- int error = 0;
-
- *lvbp = kzalloc(nl->nl_lvb_size, GFP_NOFS);
- if (!*lvbp)
- error = -ENOMEM;
-
- return error;
-}
-
-/**
- * nolock_unhold_lvb - release a LVB
- * @lock: the lock the LVB is associated with
- * @lvb: the lock value block
- *
- */
-
-static void nolock_unhold_lvb(void *lock, char *lvb)
-{
- kfree(lvb);
-}
-
-static int nolock_plock_get(void *lockspace, struct lm_lockname *name,
- struct file *file, struct file_lock *fl)
-{
- posix_test_lock(file, fl);
-
- return 0;
-}
-
-static int nolock_plock(void *lockspace, struct lm_lockname *name,
- struct file *file, int cmd, struct file_lock *fl)
-{
- int error;
- error = posix_lock_file_wait(file, fl);
- return error;
-}
-
-static int nolock_punlock(void *lockspace, struct lm_lockname *name,
- struct file *file, struct file_lock *fl)
-{
- int error;
- error = posix_lock_file_wait(file, fl);
- return error;
-}
-
-static void nolock_recovery_done(void *lockspace, unsigned int jid,
- unsigned int message)
-{
-}
-
-static const struct lm_lockops nolock_ops = {
- .lm_proto_name = "lock_nolock",
- .lm_mount = nolock_mount,
- .lm_others_may_mount = nolock_others_may_mount,
- .lm_unmount = nolock_unmount,
- .lm_withdraw = nolock_withdraw,
- .lm_get_lock = nolock_get_lock,
- .lm_put_lock = nolock_put_lock,
- .lm_lock = nolock_lock,
- .lm_unlock = nolock_unlock,
- .lm_cancel = nolock_cancel,
- .lm_hold_lvb = nolock_hold_lvb,
- .lm_unhold_lvb = nolock_unhold_lvb,
- .lm_plock_get = nolock_plock_get,
- .lm_plock = nolock_plock,
- .lm_punlock = nolock_punlock,
- .lm_recovery_done = nolock_recovery_done,
- .lm_owner = THIS_MODULE,
-};
-
-static int __init init_nolock(void)
-{
- int error;
-
- error = gfs2_register_lockproto(&nolock_ops);
- if (error) {
- printk(KERN_WARNING
- "lock_nolock: can't register protocol: %d\n", error);
- return error;
- }
-
- printk(KERN_INFO
- "Lock_Nolock (built %s %s) installed\n", __DATE__, __TIME__);
- return 0;
-}
-
-static void __exit exit_nolock(void)
-{
- gfs2_unregister_lockproto(&nolock_ops);
-}
-
-module_init(init_nolock);
-module_exit(exit_nolock);
-
-MODULE_DESCRIPTION("GFS Nolock Locking Module");
-MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL");
-
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index 548264b1836d..6c6af9f5e3ab 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -87,6 +87,8 @@ void gfs2_remove_from_ail(struct gfs2_bufdata *bd)
*/
static void gfs2_ail1_start_one(struct gfs2_sbd *sdp, struct gfs2_ail *ai)
+__releases(&sdp->sd_log_lock)
+__acquires(&sdp->sd_log_lock)
{
struct gfs2_bufdata *bd, *s;
struct buffer_head *bh;
diff --git a/fs/gfs2/log.h b/fs/gfs2/log.h
index 771152816508..7c64510ccfd2 100644
--- a/fs/gfs2/log.h
+++ b/fs/gfs2/log.h
@@ -21,6 +21,7 @@
*/
static inline void gfs2_log_lock(struct gfs2_sbd *sdp)
+__acquires(&sdp->sd_log_lock)
{
spin_lock(&sdp->sd_log_lock);
}
@@ -32,6 +33,7 @@ static inline void gfs2_log_lock(struct gfs2_sbd *sdp)
*/
static inline void gfs2_log_unlock(struct gfs2_sbd *sdp)
+__releases(&sdp->sd_log_lock)
{
spin_unlock(&sdp->sd_log_lock);
}
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 053e2ebbbd50..bcc668d0fadd 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -40,8 +40,6 @@ static void gfs2_init_glock_once(struct kmem_cache *cachep, void *foo)
INIT_HLIST_NODE(&gl->gl_list);
spin_lock_init(&gl->gl_spin);
INIT_LIST_HEAD(&gl->gl_holders);
- INIT_LIST_HEAD(&gl->gl_waiters1);
- INIT_LIST_HEAD(&gl->gl_waiters3);
gl->gl_lvb = NULL;
atomic_set(&gl->gl_lvb_count, 0);
INIT_LIST_HEAD(&gl->gl_reclaim);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 85aea27b4a86..09853620c951 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
- * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
@@ -69,13 +69,15 @@ static const struct address_space_operations aspace_aops = {
struct inode *gfs2_aspace_get(struct gfs2_sbd *sdp)
{
struct inode *aspace;
+ struct gfs2_inode *ip;
aspace = new_inode(sdp->sd_vfs);
if (aspace) {
mapping_set_gfp_mask(aspace->i_mapping, GFP_NOFS);
aspace->i_mapping->a_ops = &aspace_aops;
aspace->i_size = ~0ULL;
- aspace->i_private = NULL;
+ ip = GFS2_I(aspace);
+ clear_bit(GIF_USER, &ip->i_flags);
insert_inode_hash(aspace);
}
return aspace;
@@ -127,7 +129,7 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
}
/**
- * getbuf - Get a buffer with a given address space
+ * gfs2_getbuf - Get a buffer with a given address space
* @gl: the glock
* @blkno: the block number (filesystem scope)
* @create: 1 if the buffer should be created
@@ -135,7 +137,7 @@ void gfs2_meta_sync(struct gfs2_glock *gl)
* Returns: the buffer
*/
-static struct buffer_head *getbuf(struct gfs2_glock *gl, u64 blkno, int create)
+struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
{
struct address_space *mapping = gl->gl_aspace->i_mapping;
struct gfs2_sbd *sdp = gl->gl_sbd;
@@ -203,7 +205,7 @@ static void meta_prep_new(struct buffer_head *bh)
struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
{
struct buffer_head *bh;
- bh = getbuf(gl, blkno, CREATE);
+ bh = gfs2_getbuf(gl, blkno, CREATE);
meta_prep_new(bh);
return bh;
}
@@ -221,7 +223,7 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno)
int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno, int flags,
struct buffer_head **bhp)
{
- *bhp = getbuf(gl, blkno, CREATE);
+ *bhp = gfs2_getbuf(gl, blkno, CREATE);
if (!buffer_uptodate(*bhp)) {
ll_rw_block(READ_META, 1, bhp);
if (flags & DIO_WAIT) {
@@ -344,7 +346,7 @@ void gfs2_meta_wipe(struct gfs2_inode *ip, u64 bstart, u32 blen)
struct buffer_head *bh;
while (blen) {
- bh = getbuf(ip->i_gl, bstart, NO_CREATE);
+ bh = gfs2_getbuf(ip->i_gl, bstart, NO_CREATE);
if (bh) {
lock_buffer(bh);
gfs2_log_lock(sdp);
@@ -419,7 +421,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
if (extlen > max_ra)
extlen = max_ra;
- first_bh = getbuf(gl, dblock, CREATE);
+ first_bh = gfs2_getbuf(gl, dblock, CREATE);
if (buffer_uptodate(first_bh))
goto out;
@@ -430,7 +432,7 @@ struct buffer_head *gfs2_meta_ra(struct gfs2_glock *gl, u64 dblock, u32 extlen)
extlen--;
while (extlen) {
- bh = getbuf(gl, dblock, CREATE);
+ bh = gfs2_getbuf(gl, dblock, CREATE);
if (!buffer_uptodate(bh) && !buffer_locked(bh))
ll_rw_block(READA, 1, &bh);
diff --git a/fs/gfs2/meta_io.h b/fs/gfs2/meta_io.h
index 73e3b1c76fe1..b1a5f3674d43 100644
--- a/fs/gfs2/meta_io.h
+++ b/fs/gfs2/meta_io.h
@@ -47,6 +47,7 @@ struct buffer_head *gfs2_meta_new(struct gfs2_glock *gl, u64 blkno);
int gfs2_meta_read(struct gfs2_glock *gl, u64 blkno,
int flags, struct buffer_head **bhp);
int gfs2_meta_wait(struct gfs2_sbd *sdp, struct buffer_head *bh);
+struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create);
void gfs2_attach_bufdata(struct gfs2_glock *gl, struct buffer_head *bh,
int meta);
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index f55394e57cb2..e64a1b04117a 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -499,34 +499,34 @@ static int __gfs2_readpage(void *file, struct page *page)
* @file: The file to read
* @page: The page of the file
*
- * This deals with the locking required. We use a trylock in order to
- * avoid the page lock / glock ordering problems returning AOP_TRUNCATED_PAGE
- * in the event that we are unable to get the lock.
+ * This deals with the locking required. We have to unlock and
+ * relock the page in order to get the locking in the right
+ * order.
*/
static int gfs2_readpage(struct file *file, struct page *page)
{
- struct gfs2_inode *ip = GFS2_I(page->mapping->host);
- struct gfs2_holder *gh;
+ struct address_space *mapping = page->mapping;
+ struct gfs2_inode *ip = GFS2_I(mapping->host);
+ struct gfs2_holder gh;
int error;
- gh = gfs2_glock_is_locked_by_me(ip->i_gl);
- if (!gh) {
- gh = kmalloc(sizeof(struct gfs2_holder), GFP_NOFS);
- if (!gh)
- return -ENOBUFS;
- gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, gh);
+ unlock_page(page);
+ gfs2_holder_init(ip->i_gl, LM_ST_SHARED, GL_ATIME, &gh);
+ error = gfs2_glock_nq_atime(&gh);
+ if (unlikely(error))
+ goto out;
+ error = AOP_TRUNCATED_PAGE;
+ lock_page(page);
+ if (page->mapping == mapping && !PageUptodate(page))
+ error = __gfs2_readpage(file, page);
+ else
unlock_page(page);
- error = gfs2_glock_nq_atime(gh);
- if (likely(error != 0))
- goto out;
- return AOP_TRUNCATED_PAGE;
- }
- error = __gfs2_readpage(file, page);
- gfs2_glock_dq(gh);
+ gfs2_glock_dq(&gh);
out:
- gfs2_holder_uninit(gh);
- kfree(gh);
+ gfs2_holder_uninit(&gh);
+ if (error && error != AOP_TRUNCATED_PAGE)
+ lock_page(page);
return error;
}
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index e1b7d525a066..0ff512a11925 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -669,8 +669,7 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
int error = 0;
state = (fl->fl_type == F_WRLCK) ? LM_ST_EXCLUSIVE : LM_ST_SHARED;
- flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE
- | GL_FLOCK;
+ flags = (IS_SETLKW(cmd) ? 0 : LM_FLAG_TRY) | GL_EXACT | GL_NOCACHE;
mutex_lock(&fp->f_fl_mutex);
@@ -683,9 +682,8 @@ static int do_flock(struct file *file, int cmd, struct file_lock *fl)
gfs2_glock_dq_wait(fl_gh);
gfs2_holder_reinit(state, flags, fl_gh);
} else {
- error = gfs2_glock_get(GFS2_SB(&ip->i_inode),
- ip->i_no_addr, &gfs2_flock_glops,
- CREATE, &gl);
+ error = gfs2_glock_get(GFS2_SB(&ip->i_inode), ip->i_no_addr,
+ &gfs2_flock_glops, CREATE, &gl);
if (error)
goto out;
gfs2_holder_init(gl, state, flags, fl_gh);
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index ef9c6c4f80f6..9bd97c5543bd 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -142,8 +142,8 @@ static int init_names(struct gfs2_sbd *sdp, int silent)
if (!table[0])
table = sdp->sd_vfs->s_id;
- snprintf(sdp->sd_proto_name, GFS2_FSNAME_LEN, "%s", proto);
- snprintf(sdp->sd_table_name, GFS2_FSNAME_LEN, "%s", table);
+ strlcpy(sdp->sd_proto_name, proto, GFS2_FSNAME_LEN);
+ strlcpy(sdp->sd_table_name, table, GFS2_FSNAME_LEN);
table = sdp->sd_table_name;
while ((table = strchr(table, '/')))
@@ -364,6 +364,8 @@ static int map_journal_extents(struct gfs2_sbd *sdp)
static void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp)
{
+ if (!sdp->sd_lockstruct.ls_ops->lm_others_may_mount)
+ return;
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
sdp->sd_lockstruct.ls_ops->lm_others_may_mount(
sdp->sd_lockstruct.ls_lockspace);
@@ -741,8 +743,7 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
goto out;
}
- if (gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lockspace) ||
- gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_ops) ||
+ if (gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_ops) ||
gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lvb_size >=
GFS2_MIN_LVB_SIZE)) {
gfs2_unmount_lockproto(&sdp->sd_lockstruct);
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index 2278c68b7e35..66907922109f 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -1,6 +1,6 @@
/*
* Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
- * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
+ * Copyright (C) 2004-2008 Red Hat, Inc. All rights reserved.
*
* This copyrighted material is made available to anyone wishing to use,
* modify, copy, or redistribute it subject to the terms and conditions
@@ -52,7 +52,7 @@ static int gfs2_write_inode(struct inode *inode, int sync)
struct gfs2_inode *ip = GFS2_I(inode);
/* Check this is a "normal" inode */
- if (inode->i_private) {
+ if (test_bit(GIF_USER, &ip->i_flags)) {
if (current->flags & PF_MEMALLOC)
return 0;
if (sync)
@@ -155,7 +155,7 @@ static void gfs2_write_super(struct super_block *sb)
static int gfs2_sync_fs(struct super_block *sb, int wait)
{
sb->s_dirt = 0;
- if (wait)
+ if (wait && sb->s_fs_info)
gfs2_log_flush(sb->s_fs_info, NULL);
return 0;
}
@@ -297,8 +297,9 @@ static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
*/
static void gfs2_drop_inode(struct inode *inode)
{
- if (inode->i_private && inode->i_nlink) {
- struct gfs2_inode *ip = GFS2_I(inode);
+ struct gfs2_inode *ip = GFS2_I(inode);
+
+ if (test_bit(GIF_USER, &ip->i_flags) && inode->i_nlink) {
struct gfs2_glock *gl = ip->i_iopen_gh.gh_gl;
if (gl && test_bit(GLF_DEMOTE, &gl->gl_flags))
clear_nlink(inode);
@@ -314,12 +315,13 @@ static void gfs2_drop_inode(struct inode *inode)
static void gfs2_clear_inode(struct inode *inode)
{
+ struct gfs2_inode *ip = GFS2_I(inode);
+
/* This tells us its a "real" inode and not one which only
* serves to contain an address space (see rgrp.c, meta_io.c)
* which therefore doesn't have its own glocks.
*/
- if (inode->i_private) {
- struct gfs2_inode *ip = GFS2_I(inode);
+ if (test_bit(GIF_USER, &ip->i_flags)) {
ip->i_gl->gl_object = NULL;
gfs2_glock_schedule_for_reclaim(ip->i_gl);
gfs2_glock_put(ip->i_gl);
@@ -419,7 +421,7 @@ static void gfs2_delete_inode(struct inode *inode)
struct gfs2_holder gh;
int error;
- if (!inode->i_private)
+ if (!test_bit(GIF_USER, &ip->i_flags))
goto out;
error = gfs2_glock_nq_init(ip->i_gl, LM_ST_EXCLUSIVE, 0, &gh);
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index 2888e4b4b1c5..d5e91f4f6a0b 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -428,6 +428,9 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea
static void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
unsigned int message)
{
+ if (!sdp->sd_lockstruct.ls_ops->lm_recovery_done)
+ return;
+
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
sdp->sd_lockstruct.ls_ops->lm_recovery_done(
sdp->sd_lockstruct.ls_lockspace, jid, message);
@@ -505,7 +508,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd)
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_SHARED,
LM_FLAG_NOEXP | LM_FLAG_PRIORITY |
- GL_NOCANCEL | GL_NOCACHE, &t_gh);
+ GL_NOCACHE, &t_gh);
if (error)
goto fail_gunlock_ji;
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 7e8f0b1d6c6e..6387523a3153 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -1495,7 +1495,7 @@ u64 gfs2_alloc_block(struct gfs2_inode *ip, unsigned int *n)
al->al_alloced += *n;
- gfs2_statfs_change(sdp, 0, -*n, 0);
+ gfs2_statfs_change(sdp, 0, -(s64)*n, 0);
gfs2_quota_change(ip, *n, ip->i_inode.i_uid, ip->i_inode.i_gid);
spin_lock(&sdp->sd_rindex_spin);
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 7aeacbc65f35..12fe38fe498f 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -941,8 +941,7 @@ static int gfs2_lock_fs_check_clean(struct gfs2_sbd *sdp,
}
error = gfs2_glock_nq_init(sdp->sd_trans_gl, LM_ST_DEFERRED,
- LM_FLAG_PRIORITY | GL_NOCACHE,
- t_gh);
+ GL_NOCACHE, t_gh);
list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
error = gfs2_jdesc_check(jd);
diff --git a/fs/hppfs/Makefile b/fs/hppfs/Makefile
index 8a1f50344368..3a982bd975d2 100644
--- a/fs/hppfs/Makefile
+++ b/fs/hppfs/Makefile
@@ -3,7 +3,4 @@
# Licensed under the GPL
#
-hppfs-objs := hppfs.o
-
-obj-y =
-obj-$(CONFIG_HPPFS) += $(hppfs-objs)
+obj-$(CONFIG_HPPFS) += hppfs.o
diff --git a/fs/jbd2/commit.c b/fs/jbd2/commit.c
index 4d99685fdce4..92b6ac3df8ab 100644
--- a/fs/jbd2/commit.c
+++ b/fs/jbd2/commit.c
@@ -112,6 +112,7 @@ static int journal_submit_commit_record(journal_t *journal,
struct buffer_head *bh;
int ret;
int barrier_done = 0;
+ struct timespec now = current_kernel_time();
if (is_journal_aborted(journal))
return 0;
@@ -126,6 +127,8 @@ static int journal_submit_commit_record(journal_t *journal,
tmp->h_magic = cpu_to_be32(JBD2_MAGIC_NUMBER);
tmp->h_blocktype = cpu_to_be32(JBD2_COMMIT_BLOCK);
tmp->h_sequence = cpu_to_be32(commit_transaction->t_tid);
+ tmp->h_commit_sec = cpu_to_be64(now.tv_sec);
+ tmp->h_commit_nsec = cpu_to_be32(now.tv_nsec);
if (JBD2_HAS_COMPAT_FEATURE(journal,
JBD2_FEATURE_COMPAT_CHECKSUM)) {
@@ -168,6 +171,7 @@ static int journal_submit_commit_record(journal_t *journal,
spin_unlock(&journal->j_state_lock);
/* And try again, without the barrier */
+ lock_buffer(bh);
set_buffer_uptodate(bh);
set_buffer_dirty(bh);
ret = submit_bh(WRITE, bh);
diff --git a/fs/jbd2/recovery.c b/fs/jbd2/recovery.c
index 5d0405a9e7ca..058f50f65b76 100644
--- a/fs/jbd2/recovery.c
+++ b/fs/jbd2/recovery.c
@@ -344,6 +344,7 @@ static int calc_chksums(journal_t *journal, struct buffer_head *bh,
*crc32_sum = crc32_be(*crc32_sum, (void *)obh->b_data,
obh->b_size);
}
+ put_bh(obh);
}
return 0;
}
@@ -610,9 +611,8 @@ static int do_one_pass(journal_t *journal,
chksum_err = chksum_seen = 0;
if (info->end_transaction) {
- printk(KERN_ERR "JBD: Transaction %u "
- "found to be corrupt.\n",
- next_commit_ID - 1);
+ journal->j_failed_commit =
+ info->end_transaction;
brelse(bh);
break;
}
@@ -643,10 +643,8 @@ static int do_one_pass(journal_t *journal,
if (!JBD2_HAS_INCOMPAT_FEATURE(journal,
JBD2_FEATURE_INCOMPAT_ASYNC_COMMIT)){
- printk(KERN_ERR
- "JBD: Transaction %u "
- "found to be corrupt.\n",
- next_commit_ID);
+ journal->j_failed_commit =
+ next_commit_ID;
brelse(bh);
break;
}
diff --git a/fs/jfs/jfs_debug.c b/fs/jfs/jfs_debug.c
index bf6ab19b86ee..6a73de84bcef 100644
--- a/fs/jfs/jfs_debug.c
+++ b/fs/jfs/jfs_debug.c
@@ -21,6 +21,7 @@
#include <linux/ctype.h>
#include <linux/module.h>
#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
#include <asm/uaccess.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
@@ -30,29 +31,19 @@
static struct proc_dir_entry *base;
#ifdef CONFIG_JFS_DEBUG
-static int loglevel_read(char *page, char **start, off_t off,
- int count, int *eof, void *data)
+static int jfs_loglevel_proc_show(struct seq_file *m, void *v)
{
- int len;
-
- len = sprintf(page, "%d\n", jfsloglevel);
-
- len -= off;
- *start = page + off;
-
- if (len > count)
- len = count;
- else
- *eof = 1;
-
- if (len < 0)
- len = 0;
+ seq_printf(m, "%d\n", jfsloglevel);
+ return 0;
+}
- return len;
+static int jfs_loglevel_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, jfs_loglevel_proc_show, NULL);
}
-static int loglevel_write(struct file *file, const char __user *buffer,
- unsigned long count, void *data)
+static ssize_t jfs_loglevel_proc_write(struct file *file,
+ const char __user *buffer, size_t count, loff_t *ppos)
{
char c;
@@ -65,22 +56,30 @@ static int loglevel_write(struct file *file, const char __user *buffer,
jfsloglevel = c - '0';
return count;
}
+
+static const struct file_operations jfs_loglevel_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = jfs_loglevel_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ .write = jfs_loglevel_proc_write,
+};
#endif
static struct {
const char *name;
- read_proc_t *read_fn;
- write_proc_t *write_fn;
+ const struct file_operations *proc_fops;
} Entries[] = {
#ifdef CONFIG_JFS_STATISTICS
- { "lmstats", jfs_lmstats_read, },
- { "txstats", jfs_txstats_read, },
- { "xtstat", jfs_xtstat_read, },
- { "mpstat", jfs_mpstat_read, },
+ { "lmstats", &jfs_lmstats_proc_fops, },
+ { "txstats", &jfs_txstats_proc_fops, },
+ { "xtstat", &jfs_xtstat_proc_fops, },
+ { "mpstat", &jfs_mpstat_proc_fops, },
#endif
#ifdef CONFIG_JFS_DEBUG
- { "TxAnchor", jfs_txanchor_read, },
- { "loglevel", loglevel_read, loglevel_write }
+ { "TxAnchor", &jfs_txanchor_proc_fops, },
+ { "loglevel", &jfs_loglevel_proc_fops }
#endif
};
#define NPROCENT ARRAY_SIZE(Entries)
@@ -93,13 +92,8 @@ void jfs_proc_init(void)
return;
base->owner = THIS_MODULE;
- for (i = 0; i < NPROCENT; i++) {
- struct proc_dir_entry *p;
- if ((p = create_proc_entry(Entries[i].name, 0, base))) {
- p->read_proc = Entries[i].read_fn;
- p->write_proc = Entries[i].write_fn;
- }
- }
+ for (i = 0; i < NPROCENT; i++)
+ proc_create(Entries[i].name, 0, base, Entries[i].proc_fops);
}
void jfs_proc_clean(void)
diff --git a/fs/jfs/jfs_debug.h b/fs/jfs/jfs_debug.h
index 044c1e654cc0..eafd1300a00b 100644
--- a/fs/jfs/jfs_debug.h
+++ b/fs/jfs/jfs_debug.h
@@ -62,7 +62,7 @@ extern void jfs_proc_clean(void);
extern int jfsloglevel;
-extern int jfs_txanchor_read(char *, char **, off_t, int, int *, void *);
+extern const struct file_operations jfs_txanchor_proc_fops;
/* information message: e.g., configuration, major event */
#define jfs_info(fmt, arg...) do { \
@@ -105,10 +105,10 @@ extern int jfs_txanchor_read(char *, char **, off_t, int, int *, void *);
* ----------
*/
#ifdef CONFIG_JFS_STATISTICS
-extern int jfs_lmstats_read(char *, char **, off_t, int, int *, void *);
-extern int jfs_txstats_read(char *, char **, off_t, int, int *, void *);
-extern int jfs_mpstat_read(char *, char **, off_t, int, int *, void *);
-extern int jfs_xtstat_read(char *, char **, off_t, int, int *, void *);
+extern const struct file_operations jfs_lmstats_proc_fops;
+extern const struct file_operations jfs_txstats_proc_fops;
+extern const struct file_operations jfs_mpstat_proc_fops;
+extern const struct file_operations jfs_xtstat_proc_fops;
#define INCREMENT(x) ((x)++)
#define DECREMENT(x) ((x)--)
diff --git a/fs/jfs/jfs_dtree.h b/fs/jfs/jfs_dtree.h
index cdac2d5bafeb..2545bb317235 100644
--- a/fs/jfs/jfs_dtree.h
+++ b/fs/jfs/jfs_dtree.h
@@ -243,9 +243,6 @@ typedef union {
#define JFS_REMOVE 3
#define JFS_RENAME 4
-#define DIRENTSIZ(namlen) \
- ( (sizeof(struct dirent) - 2*(JFS_NAME_MAX+1) + 2*((namlen)+1) + 3) &~ 3 )
-
/*
* Maximum file offset for directories.
*/
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 734ec916beaf..d6363d8309d0 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -1520,7 +1520,7 @@ int diAlloc(struct inode *pip, bool dir, struct inode *ip)
jfs_error(ip->i_sb,
"diAlloc: can't find free bit "
"in wmap");
- return EIO;
+ return -EIO;
}
/* determine the inode number within the
diff --git a/fs/jfs/jfs_logmgr.c b/fs/jfs/jfs_logmgr.c
index 325a9679b95a..cd2ec2988b59 100644
--- a/fs/jfs/jfs_logmgr.c
+++ b/fs/jfs/jfs_logmgr.c
@@ -69,6 +69,7 @@
#include <linux/freezer.h>
#include <linux/delay.h>
#include <linux/mutex.h>
+#include <linux/seq_file.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_metapage.h"
@@ -2503,13 +2504,9 @@ exit:
}
#ifdef CONFIG_JFS_STATISTICS
-int jfs_lmstats_read(char *buffer, char **start, off_t offset, int length,
- int *eof, void *data)
+static int jfs_lmstats_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
- off_t begin;
-
- len += sprintf(buffer,
+ seq_printf(m,
"JFS Logmgr stats\n"
"================\n"
"commits = %d\n"
@@ -2522,19 +2519,19 @@ int jfs_lmstats_read(char *buffer, char **start, off_t offset, int length,
lmStat.pagedone,
lmStat.full_page,
lmStat.partial_page);
+ return 0;
+}
- begin = offset;
- *start = buffer + begin;
- len -= begin;
-
- if (len > length)
- len = length;
- else
- *eof = 1;
-
- if (len < 0)
- len = 0;
-
- return len;
+static int jfs_lmstats_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, jfs_lmstats_proc_show, NULL);
}
+
+const struct file_operations jfs_lmstats_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = jfs_lmstats_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif /* CONFIG_JFS_STATISTICS */
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index d1e64f2f2fcd..854ff0ec574f 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -19,10 +19,12 @@
#include <linux/fs.h>
#include <linux/mm.h>
+#include <linux/module.h>
#include <linux/bio.h>
#include <linux/init.h>
#include <linux/buffer_head.h>
#include <linux/mempool.h>
+#include <linux/seq_file.h>
#include "jfs_incore.h"
#include "jfs_superblock.h"
#include "jfs_filsys.h"
@@ -804,13 +806,9 @@ void __invalidate_metapages(struct inode *ip, s64 addr, int len)
}
#ifdef CONFIG_JFS_STATISTICS
-int jfs_mpstat_read(char *buffer, char **start, off_t offset, int length,
- int *eof, void *data)
+static int jfs_mpstat_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
- off_t begin;
-
- len += sprintf(buffer,
+ seq_printf(m,
"JFS Metapage statistics\n"
"=======================\n"
"page allocations = %d\n"
@@ -819,19 +817,19 @@ int jfs_mpstat_read(char *buffer, char **start, off_t offset, int length,
mpStat.pagealloc,
mpStat.pagefree,
mpStat.lockwait);
+ return 0;
+}
- begin = offset;
- *start = buffer + begin;
- len -= begin;
-
- if (len > length)
- len = length;
- else
- *eof = 1;
-
- if (len < 0)
- len = 0;
-
- return len;
+static int jfs_mpstat_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, jfs_mpstat_proc_show, NULL);
}
+
+const struct file_operations jfs_mpstat_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = jfs_mpstat_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif
diff --git a/fs/jfs/jfs_txnmgr.c b/fs/jfs/jfs_txnmgr.c
index e7c60ae6b5b2..f26e4d03ada5 100644
--- a/fs/jfs/jfs_txnmgr.c
+++ b/fs/jfs/jfs_txnmgr.c
@@ -49,6 +49,7 @@
#include <linux/module.h>
#include <linux/moduleparam.h>
#include <linux/kthread.h>
+#include <linux/seq_file.h>
#include "jfs_incore.h"
#include "jfs_inode.h"
#include "jfs_filsys.h"
@@ -3009,11 +3010,8 @@ int jfs_sync(void *arg)
}
#if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_DEBUG)
-int jfs_txanchor_read(char *buffer, char **start, off_t offset, int length,
- int *eof, void *data)
+static int jfs_txanchor_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
- off_t begin;
char *freewait;
char *freelockwait;
char *lowlockwait;
@@ -3025,7 +3023,7 @@ int jfs_txanchor_read(char *buffer, char **start, off_t offset, int length,
lowlockwait =
waitqueue_active(&TxAnchor.lowlockwait) ? "active" : "empty";
- len += sprintf(buffer,
+ seq_printf(m,
"JFS TxAnchor\n"
"============\n"
"freetid = %d\n"
@@ -3044,31 +3042,27 @@ int jfs_txanchor_read(char *buffer, char **start, off_t offset, int length,
TxAnchor.tlocksInUse,
jfs_tlocks_low,
list_empty(&TxAnchor.unlock_queue) ? "" : "not ");
+ return 0;
+}
- begin = offset;
- *start = buffer + begin;
- len -= begin;
-
- if (len > length)
- len = length;
- else
- *eof = 1;
-
- if (len < 0)
- len = 0;
-
- return len;
+static int jfs_txanchor_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, jfs_txanchor_proc_show, NULL);
}
+
+const struct file_operations jfs_txanchor_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = jfs_txanchor_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif
#if defined(CONFIG_PROC_FS) && defined(CONFIG_JFS_STATISTICS)
-int jfs_txstats_read(char *buffer, char **start, off_t offset, int length,
- int *eof, void *data)
+static int jfs_txstats_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
- off_t begin;
-
- len += sprintf(buffer,
+ seq_printf(m,
"JFS TxStats\n"
"===========\n"
"calls to txBegin = %d\n"
@@ -3089,19 +3083,19 @@ int jfs_txstats_read(char *buffer, char **start, off_t offset, int length,
TxStat.txBeginAnon_lockslow,
TxStat.txLockAlloc,
TxStat.txLockAlloc_freelock);
+ return 0;
+}
- begin = offset;
- *start = buffer + begin;
- len -= begin;
-
- if (len > length)
- len = length;
- else
- *eof = 1;
-
- if (len < 0)
- len = 0;
-
- return len;
+static int jfs_txstats_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, jfs_txstats_proc_show, NULL);
}
+
+const struct file_operations jfs_txstats_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = jfs_txstats_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
index 5a61ebf2cbcc..ae3acafb447b 100644
--- a/fs/jfs/jfs_xtree.c
+++ b/fs/jfs/jfs_xtree.c
@@ -20,7 +20,9 @@
*/
#include <linux/fs.h>
+#include <linux/module.h>
#include <linux/quotaops.h>
+#include <linux/seq_file.h>
#include "jfs_incore.h"
#include "jfs_filsys.h"
#include "jfs_metapage.h"
@@ -4134,13 +4136,9 @@ s64 xtTruncate_pmap(tid_t tid, struct inode *ip, s64 committed_size)
}
#ifdef CONFIG_JFS_STATISTICS
-int jfs_xtstat_read(char *buffer, char **start, off_t offset, int length,
- int *eof, void *data)
+static int jfs_xtstat_proc_show(struct seq_file *m, void *v)
{
- int len = 0;
- off_t begin;
-
- len += sprintf(buffer,
+ seq_printf(m,
"JFS Xtree statistics\n"
"====================\n"
"searches = %d\n"
@@ -4149,19 +4147,19 @@ int jfs_xtstat_read(char *buffer, char **start, off_t offset, int length,
xtStat.search,
xtStat.fastSearch,
xtStat.split);
+ return 0;
+}
- begin = offset;
- *start = buffer + begin;
- len -= begin;
-
- if (len > length)
- len = length;
- else
- *eof = 1;
-
- if (len < 0)
- len = 0;
-
- return len;
+static int jfs_xtstat_proc_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, jfs_xtstat_proc_show, NULL);
}
+
+const struct file_operations jfs_xtstat_proc_fops = {
+ .owner = THIS_MODULE,
+ .open = jfs_xtstat_proc_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
#endif
diff --git a/fs/jfs/namei.c b/fs/jfs/namei.c
index 0ba6778edaa2..2aba82386810 100644
--- a/fs/jfs/namei.c
+++ b/fs/jfs/namei.c
@@ -1455,7 +1455,7 @@ static struct dentry *jfs_lookup(struct inode *dip, struct dentry *dentry, struc
free_UCSname(&key);
if (rc == -ENOENT) {
d_add(dentry, NULL);
- return ERR_PTR(0);
+ return NULL;
} else if (rc) {
jfs_err("jfs_lookup: dtSearch returned %d", rc);
return ERR_PTR(rc);
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index 50ea65451732..0288e6d7936a 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -499,7 +499,7 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
inode = jfs_iget(sb, ROOT_I);
if (IS_ERR(inode)) {
ret = PTR_ERR(inode);
- goto out_no_root;
+ goto out_no_rw;
}
sb->s_root = d_alloc_root(inode);
if (!sb->s_root)
@@ -521,9 +521,8 @@ static int jfs_fill_super(struct super_block *sb, void *data, int silent)
return 0;
out_no_root:
- jfs_err("jfs_read_super: get root inode failed");
- if (inode)
- iput(inode);
+ jfs_err("jfs_read_super: get root dentry failed");
+ iput(inode);
out_no_rw:
rc = jfs_umount(sb);
diff --git a/fs/libfs.c b/fs/libfs.c
index b004dfadd891..892d41cb3382 100644
--- a/fs/libfs.c
+++ b/fs/libfs.c
@@ -528,6 +528,23 @@ ssize_t simple_read_from_buffer(void __user *to, size_t count, loff_t *ppos,
return count;
}
+ssize_t memory_read_from_buffer(void *to, size_t count, loff_t *ppos,
+ const void *from, size_t available)
+{
+ loff_t pos = *ppos;
+
+ if (pos < 0)
+ return -EINVAL;
+ if (pos >= available)
+ return 0;
+ if (count > available - pos)
+ count = available - pos;
+ memcpy(to, from + pos, count);
+ *ppos = pos + count;
+
+ return count;
+}
+
/*
* Transaction based IO.
* The file expects a single write which triggers the transaction, and then
@@ -800,6 +817,7 @@ EXPORT_SYMBOL(simple_statfs);
EXPORT_SYMBOL(simple_sync_file);
EXPORT_SYMBOL(simple_unlink);
EXPORT_SYMBOL(simple_read_from_buffer);
+EXPORT_SYMBOL(memory_read_from_buffer);
EXPORT_SYMBOL(simple_transaction_get);
EXPORT_SYMBOL(simple_transaction_read);
EXPORT_SYMBOL(simple_transaction_release);
diff --git a/fs/namei.c b/fs/namei.c
index 32fd9655485b..c7e43536c49a 100644
--- a/fs/namei.c
+++ b/fs/namei.c
@@ -2003,18 +2003,22 @@ struct dentry *lookup_create(struct nameidata *nd, int is_dir)
if (IS_ERR(dentry))
goto fail;
+ if (dentry->d_inode)
+ goto eexist;
/*
* Special case - lookup gave negative, but... we had foo/bar/
* From the vfs_mknod() POV we just have a negative dentry -
* all is fine. Let's be bastards - you had / on the end, you've
* been asking for (non-existent) directory. -ENOENT for you.
*/
- if (!is_dir && nd->last.name[nd->last.len] && !dentry->d_inode)
- goto enoent;
+ if (unlikely(!is_dir && nd->last.name[nd->last.len])) {
+ dput(dentry);
+ dentry = ERR_PTR(-ENOENT);
+ }
return dentry;
-enoent:
+eexist:
dput(dentry);
- dentry = ERR_PTR(-ENOENT);
+ dentry = ERR_PTR(-EEXIST);
fail:
return dentry;
}
diff --git a/fs/nfs/callback.c b/fs/nfs/callback.c
index 5606ae3d72d3..c1e7c8300629 100644
--- a/fs/nfs/callback.c
+++ b/fs/nfs/callback.c
@@ -182,7 +182,7 @@ static int nfs_callback_authenticate(struct svc_rqst *rqstp)
if (clp == NULL)
return SVC_DROP;
- dprintk("%s: %s NFSv4 callback!\n", __FUNCTION__,
+ dprintk("%s: %s NFSv4 callback!\n", __func__,
svc_print_addr(rqstp, buf, sizeof(buf)));
nfs_put_client(clp);
diff --git a/fs/nfs/callback_proc.c b/fs/nfs/callback_proc.c
index 15f7785048d3..f7e83e23cf9f 100644
--- a/fs/nfs/callback_proc.c
+++ b/fs/nfs/callback_proc.c
@@ -57,7 +57,7 @@ out_iput:
out_putclient:
nfs_put_client(clp);
out:
- dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(res->status));
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(res->status));
return res->status;
}
@@ -98,6 +98,6 @@ __be32 nfs4_callback_recall(struct cb_recallargs *args, void *dummy)
nfs_put_client(prev);
} while (clp != NULL);
out:
- dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(res));
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(res));
return res;
}
diff --git a/fs/nfs/callback_xdr.c b/fs/nfs/callback_xdr.c
index 13619d24f023..dd0ef34b5845 100644
--- a/fs/nfs/callback_xdr.c
+++ b/fs/nfs/callback_xdr.c
@@ -141,7 +141,7 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound
/* We do not like overly long tags! */
if (hdr->taglen > CB_OP_TAGLEN_MAXSZ - 12) {
printk("NFSv4 CALLBACK %s: client sent tag of length %u\n",
- __FUNCTION__, hdr->taglen);
+ __func__, hdr->taglen);
return htonl(NFS4ERR_RESOURCE);
}
p = read_buf(xdr, 12);
@@ -151,7 +151,7 @@ static __be32 decode_compound_hdr_arg(struct xdr_stream *xdr, struct cb_compound
/* Check minor version is zero. */
if (minor_version != 0) {
printk(KERN_WARNING "%s: NFSv4 server callback with illegal minor version %u!\n",
- __FUNCTION__, minor_version);
+ __func__, minor_version);
return htonl(NFS4ERR_MINOR_VERS_MISMATCH);
}
hdr->callback_ident = ntohl(*p++);
@@ -179,7 +179,7 @@ static __be32 decode_getattr_args(struct svc_rqst *rqstp, struct xdr_stream *xdr
args->addr = svc_addr(rqstp);
status = decode_bitmap(xdr, args->bitmap);
out:
- dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(status));
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
return status;
}
@@ -200,7 +200,7 @@ static __be32 decode_recall_args(struct svc_rqst *rqstp, struct xdr_stream *xdr,
args->truncate = ntohl(*p);
status = decode_fh(xdr, &args->fh);
out:
- dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(status));
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
return status;
}
@@ -349,7 +349,7 @@ static __be32 encode_getattr_res(struct svc_rqst *rqstp, struct xdr_stream *xdr,
status = encode_attr_mtime(xdr, res->bitmap, &res->mtime);
*savep = htonl((unsigned int)((char *)xdr->p - (char *)(savep+1)));
out:
- dprintk("%s: exit with status = %d\n", __FUNCTION__, ntohl(status));
+ dprintk("%s: exit with status = %d\n", __func__, ntohl(status));
return status;
}
@@ -363,7 +363,7 @@ static __be32 process_op(struct svc_rqst *rqstp,
long maxlen;
__be32 res;
- dprintk("%s: start\n", __FUNCTION__);
+ dprintk("%s: start\n", __func__);
status = decode_op_hdr(xdr_in, &op_nr);
if (likely(status == 0)) {
switch (op_nr) {
@@ -392,7 +392,7 @@ static __be32 process_op(struct svc_rqst *rqstp,
status = res;
if (op->encode_res != NULL && status == 0)
status = op->encode_res(rqstp, xdr_out, resp);
- dprintk("%s: done, status = %d\n", __FUNCTION__, ntohl(status));
+ dprintk("%s: done, status = %d\n", __func__, ntohl(status));
return status;
}
@@ -401,37 +401,37 @@ static __be32 process_op(struct svc_rqst *rqstp,
*/
static __be32 nfs4_callback_compound(struct svc_rqst *rqstp, void *argp, void *resp)
{
- struct cb_compound_hdr_arg hdr_arg;
- struct cb_compound_hdr_res hdr_res;
+ struct cb_compound_hdr_arg hdr_arg = { 0 };
+ struct cb_compound_hdr_res hdr_res = { NULL };
struct xdr_stream xdr_in, xdr_out;
__be32 *p;
__be32 status;
- unsigned int nops = 1;
+ unsigned int nops = 0;
- dprintk("%s: start\n", __FUNCTION__);
+ dprintk("%s: start\n", __func__);
xdr_init_decode(&xdr_in, &rqstp->rq_arg, rqstp->rq_arg.head[0].iov_base);
p = (__be32*)((char *)rqstp->rq_res.head[0].iov_base + rqstp->rq_res.head[0].iov_len);
xdr_init_encode(&xdr_out, &rqstp->rq_res, p);
- decode_compound_hdr_arg(&xdr_in, &hdr_arg);
+ status = decode_compound_hdr_arg(&xdr_in, &hdr_arg);
+ if (status == __constant_htonl(NFS4ERR_RESOURCE))
+ return rpc_garbage_args;
+
hdr_res.taglen = hdr_arg.taglen;
hdr_res.tag = hdr_arg.tag;
- hdr_res.nops = NULL;
- encode_compound_hdr_res(&xdr_out, &hdr_res);
+ if (encode_compound_hdr_res(&xdr_out, &hdr_res) != 0)
+ return rpc_system_err;
- for (;;) {
+ while (status == 0 && nops != hdr_arg.nops) {
status = process_op(rqstp, &xdr_in, argp, &xdr_out, resp);
- if (status != 0)
- break;
- if (nops == hdr_arg.nops)
- break;
nops++;
}
+
*hdr_res.status = status;
*hdr_res.nops = htonl(nops);
- dprintk("%s: done, status = %u\n", __FUNCTION__, ntohl(status));
+ dprintk("%s: done, status = %u\n", __func__, ntohl(status));
return rpc_success;
}
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 89ac5bb0401c..f2a092ca69b5 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -488,7 +488,7 @@ static int nfs_create_rpc_client(struct nfs_client *clp,
clnt = rpc_create(&args);
if (IS_ERR(clnt)) {
dprintk("%s: cannot create RPC client. Error = %ld\n",
- __FUNCTION__, PTR_ERR(clnt));
+ __func__, PTR_ERR(clnt));
return PTR_ERR(clnt);
}
@@ -576,7 +576,7 @@ static int nfs_init_server_rpcclient(struct nfs_server *server,
server->client = rpc_clone_client(clp->cl_rpcclient);
if (IS_ERR(server->client)) {
- dprintk("%s: couldn't create rpc_client!\n", __FUNCTION__);
+ dprintk("%s: couldn't create rpc_client!\n", __func__);
return PTR_ERR(server->client);
}
@@ -590,7 +590,7 @@ static int nfs_init_server_rpcclient(struct nfs_server *server,
auth = rpcauth_create(pseudoflavour, server->client);
if (IS_ERR(auth)) {
- dprintk("%s: couldn't create credcache!\n", __FUNCTION__);
+ dprintk("%s: couldn't create credcache!\n", __func__);
return PTR_ERR(auth);
}
}
@@ -985,7 +985,7 @@ static int nfs4_init_client(struct nfs_client *clp,
error = nfs_idmap_new(clp);
if (error < 0) {
dprintk("%s: failed to create idmapper. Error = %d\n",
- __FUNCTION__, error);
+ __func__, error);
goto error;
}
__set_bit(NFS_CS_IDMAP, &clp->cl_res_state);
diff --git a/fs/nfs/delegation.c b/fs/nfs/delegation.c
index 00a5e4405e16..cc563cfa6940 100644
--- a/fs/nfs/delegation.c
+++ b/fs/nfs/delegation.c
@@ -60,7 +60,7 @@ static int nfs_delegation_claim_locks(struct nfs_open_context *ctx, struct nfs4_
switch (status) {
default:
printk(KERN_ERR "%s: unhandled error %d.\n",
- __FUNCTION__, status);
+ __func__, status);
case -NFS4ERR_EXPIRED:
/* kill_proc(fl->fl_pid, SIGLOST, 1); */
case -NFS4ERR_STALE_CLIENTID:
@@ -186,7 +186,7 @@ int nfs_inode_set_delegation(struct inode *inode, struct rpc_cred *cred, struct
*/
dfprintk(FILE, "%s: server %s handed out "
"a duplicate delegation!\n",
- __FUNCTION__, clp->cl_hostname);
+ __func__, clp->cl_hostname);
if (delegation->type <= nfsi->delegation->type) {
freeme = delegation;
delegation = NULL;
diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c
index f288b3ecab4a..58d43daec084 100644
--- a/fs/nfs/dir.c
+++ b/fs/nfs/dir.c
@@ -180,7 +180,7 @@ int nfs_readdir_filler(nfs_readdir_descriptor_t *desc, struct page *page)
int error;
dfprintk(DIRCACHE, "NFS: %s: reading cookie %Lu into page %lu\n",
- __FUNCTION__, (long long)desc->entry->cookie,
+ __func__, (long long)desc->entry->cookie,
page->index);
again:
@@ -256,7 +256,7 @@ int find_dirent(nfs_readdir_descriptor_t *desc)
while((status = dir_decode(desc)) == 0) {
dfprintk(DIRCACHE, "NFS: %s: examining cookie %Lu\n",
- __FUNCTION__, (unsigned long long)entry->cookie);
+ __func__, (unsigned long long)entry->cookie);
if (entry->prev_cookie == *desc->dir_cookie)
break;
if (loop_count++ > 200) {
@@ -315,7 +315,7 @@ int find_dirent_page(nfs_readdir_descriptor_t *desc)
int status;
dfprintk(DIRCACHE, "NFS: %s: searching page %ld for target %Lu\n",
- __FUNCTION__, desc->page_index,
+ __func__, desc->page_index,
(long long) *desc->dir_cookie);
/* If we find the page in the page_cache, we cannot be sure
@@ -339,7 +339,7 @@ int find_dirent_page(nfs_readdir_descriptor_t *desc)
if (status < 0)
dir_page_release(desc);
out:
- dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __FUNCTION__, status);
+ dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __func__, status);
return status;
}
@@ -380,7 +380,7 @@ int readdir_search_pagecache(nfs_readdir_descriptor_t *desc)
}
}
- dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __FUNCTION__, res);
+ dfprintk(DIRCACHE, "NFS: %s: returns %d\n", __func__, res);
return res;
}
@@ -506,7 +506,7 @@ int uncached_readdir(nfs_readdir_descriptor_t *desc, void *dirent,
desc->entry->eof = 0;
out:
dfprintk(DIRCACHE, "NFS: %s: returns %d\n",
- __FUNCTION__, status);
+ __func__, status);
return status;
out_release:
dir_page_release(desc);
@@ -780,7 +780,7 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
if (is_bad_inode(inode)) {
dfprintk(LOOKUPCACHE, "%s: %s/%s has dud inode\n",
- __FUNCTION__, dentry->d_parent->d_name.name,
+ __func__, dentry->d_parent->d_name.name,
dentry->d_name.name);
goto out_bad;
}
@@ -808,7 +808,7 @@ static int nfs_lookup_revalidate(struct dentry * dentry, struct nameidata *nd)
unlock_kernel();
dput(parent);
dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is valid\n",
- __FUNCTION__, dentry->d_parent->d_name.name,
+ __func__, dentry->d_parent->d_name.name,
dentry->d_name.name);
return 1;
out_zap_parent:
@@ -827,7 +827,7 @@ out_zap_parent:
unlock_kernel();
dput(parent);
dfprintk(LOOKUPCACHE, "NFS: %s(%s/%s) is invalid\n",
- __FUNCTION__, dentry->d_parent->d_name.name,
+ __func__, dentry->d_parent->d_name.name,
dentry->d_name.name);
return 0;
}
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 3536b01164f9..d84a3d8f32af 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -526,7 +526,7 @@ static int do_vfs_lock(struct file *file, struct file_lock *fl)
if (res < 0)
dprintk(KERN_WARNING "%s: VFS is out of sync with lock manager"
" - error %d!\n",
- __FUNCTION__, res);
+ __func__, res);
return res;
}
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 5cb3345eb694..596c5d8e86f4 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -541,8 +541,7 @@ static void __put_nfs_open_context(struct nfs_open_context *ctx, int wait)
}
if (ctx->cred != NULL)
put_rpccred(ctx->cred);
- dput(ctx->path.dentry);
- mntput(ctx->path.mnt);
+ path_put(&ctx->path);
kfree(ctx);
}
@@ -707,6 +706,13 @@ int nfs_attribute_timeout(struct inode *inode)
if (nfs_have_delegation(inode, FMODE_READ))
return 0;
+ /*
+ * Special case: if the attribute timeout is set to 0, then always
+ * treat the cache as having expired (unless holding
+ * a delegation).
+ */
+ if (nfsi->attrtimeo == 0)
+ return 1;
return !time_in_range(jiffies, nfsi->read_cache_jiffies, nfsi->read_cache_jiffies + nfsi->attrtimeo);
}
@@ -995,7 +1001,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
unsigned long now = jiffies;
dfprintk(VFS, "NFS: %s(%s/%ld ct=%d info=0x%x)\n",
- __FUNCTION__, inode->i_sb->s_id, inode->i_ino,
+ __func__, inode->i_sb->s_id, inode->i_ino,
atomic_read(&inode->i_count), fattr->valid);
if (nfsi->fileid != fattr->fileid)
@@ -1119,7 +1125,7 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
* Big trouble! The inode has become a different object.
*/
printk(KERN_DEBUG "%s: inode %ld mode changed, %07o to %07o\n",
- __FUNCTION__, inode->i_ino, inode->i_mode, fattr->mode);
+ __func__, inode->i_ino, inode->i_mode, fattr->mode);
out_err:
/*
* No need to worry about unhashing the dentry, as the
diff --git a/fs/nfs/namespace.c b/fs/nfs/namespace.c
index af4d0f1e402c..2f285ef76399 100644
--- a/fs/nfs/namespace.c
+++ b/fs/nfs/namespace.c
@@ -106,7 +106,7 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
dprintk("--> nfs_follow_mountpoint()\n");
BUG_ON(IS_ROOT(dentry));
- dprintk("%s: enter\n", __FUNCTION__);
+ dprintk("%s: enter\n", __func__);
dput(nd->path.dentry);
nd->path.dentry = dget(dentry);
@@ -137,13 +137,12 @@ static void * nfs_follow_mountpoint(struct dentry *dentry, struct nameidata *nd)
goto out_follow;
goto out_err;
}
- mntput(nd->path.mnt);
- dput(nd->path.dentry);
+ path_put(&nd->path);
nd->path.mnt = mnt;
nd->path.dentry = dget(mnt->mnt_root);
schedule_delayed_work(&nfs_automount_task, nfs_mountpoint_expiry_timeout);
out:
- dprintk("%s: done, returned %d\n", __FUNCTION__, err);
+ dprintk("%s: done, returned %d\n", __func__, err);
dprintk("<-- nfs_follow_mountpoint() = %d\n", err);
return ERR_PTR(err);
@@ -230,7 +229,7 @@ static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
dprintk("--> nfs_do_submount()\n");
- dprintk("%s: submounting on %s/%s\n", __FUNCTION__,
+ dprintk("%s: submounting on %s/%s\n", __func__,
dentry->d_parent->d_name.name,
dentry->d_name.name);
if (page == NULL)
@@ -243,7 +242,7 @@ static struct vfsmount *nfs_do_submount(const struct vfsmount *mnt_parent,
free_page:
free_page((unsigned long)page);
out:
- dprintk("%s: done\n", __FUNCTION__);
+ dprintk("%s: done\n", __func__);
dprintk("<-- nfs_do_submount() = %p\n", mnt);
return mnt;
diff --git a/fs/nfs/nfs3proc.c b/fs/nfs/nfs3proc.c
index 549dbce714a4..c3523ad03ed1 100644
--- a/fs/nfs/nfs3proc.c
+++ b/fs/nfs/nfs3proc.c
@@ -63,15 +63,15 @@ do_proc_get_root(struct rpc_clnt *client, struct nfs_fh *fhandle,
};
int status;
- dprintk("%s: call fsinfo\n", __FUNCTION__);
+ dprintk("%s: call fsinfo\n", __func__);
nfs_fattr_init(info->fattr);
status = rpc_call_sync(client, &msg, 0);
- dprintk("%s: reply fsinfo: %d\n", __FUNCTION__, status);
+ dprintk("%s: reply fsinfo: %d\n", __func__, status);
if (!(info->fattr->valid & NFS_ATTR_FATTR)) {
msg.rpc_proc = &nfs3_procedures[NFS3PROC_GETATTR];
msg.rpc_resp = info->fattr;
status = rpc_call_sync(client, &msg, 0);
- dprintk("%s: reply getattr: %d\n", __FUNCTION__, status);
+ dprintk("%s: reply getattr: %d\n", __func__, status);
}
return status;
}
diff --git a/fs/nfs/nfs4_fs.h b/fs/nfs/nfs4_fs.h
index bd1b9d663fb9..ea790645fda6 100644
--- a/fs/nfs/nfs4_fs.h
+++ b/fs/nfs/nfs4_fs.h
@@ -206,7 +206,6 @@ struct rpc_cred *nfs4_get_renew_cred(struct nfs_client *clp);
extern struct nfs4_state_owner * nfs4_get_state_owner(struct nfs_server *, struct rpc_cred *);
extern void nfs4_put_state_owner(struct nfs4_state_owner *);
-extern void nfs4_drop_state_owner(struct nfs4_state_owner *);
extern struct nfs4_state * nfs4_get_open_state(struct inode *, struct nfs4_state_owner *);
extern void nfs4_put_open_state(struct nfs4_state *);
extern void nfs4_close_state(struct path *, struct nfs4_state *, mode_t);
diff --git a/fs/nfs/nfs4namespace.c b/fs/nfs/nfs4namespace.c
index 5f9ba41ed5bf..b112857301f7 100644
--- a/fs/nfs/nfs4namespace.c
+++ b/fs/nfs/nfs4namespace.c
@@ -86,7 +86,7 @@ static int nfs4_validate_fspath(const struct vfsmount *mnt_parent,
if (strncmp(path, fs_path, strlen(fs_path)) != 0) {
dprintk("%s: path %s does not begin with fsroot %s\n",
- __FUNCTION__, path, fs_path);
+ __func__, path, fs_path);
return -ENOENT;
}
@@ -134,7 +134,7 @@ static struct vfsmount *nfs_follow_referral(const struct vfsmount *mnt_parent,
if (locations == NULL || locations->nlocations <= 0)
goto out;
- dprintk("%s: referral at %s/%s\n", __FUNCTION__,
+ dprintk("%s: referral at %s/%s\n", __func__,
dentry->d_parent->d_name.name, dentry->d_name.name);
page = (char *) __get_free_page(GFP_USER);
@@ -204,7 +204,7 @@ static struct vfsmount *nfs_follow_referral(const struct vfsmount *mnt_parent,
out:
free_page((unsigned long) page);
free_page((unsigned long) page2);
- dprintk("%s: done\n", __FUNCTION__);
+ dprintk("%s: done\n", __func__);
return mnt;
}
@@ -223,7 +223,7 @@ struct vfsmount *nfs_do_refmount(const struct vfsmount *mnt_parent, struct dentr
int err;
/* BUG_ON(IS_ROOT(dentry)); */
- dprintk("%s: enter\n", __FUNCTION__);
+ dprintk("%s: enter\n", __func__);
page = alloc_page(GFP_KERNEL);
if (page == NULL)
@@ -238,7 +238,7 @@ struct vfsmount *nfs_do_refmount(const struct vfsmount *mnt_parent, struct dentr
parent = dget_parent(dentry);
dprintk("%s: getting locations for %s/%s\n",
- __FUNCTION__, parent->d_name.name, dentry->d_name.name);
+ __func__, parent->d_name.name, dentry->d_name.name);
err = nfs4_proc_fs_locations(parent->d_inode, &dentry->d_name, fs_locations, page);
dput(parent);
@@ -252,6 +252,6 @@ out_free:
__free_page(page);
kfree(fs_locations);
out:
- dprintk("%s: done\n", __FUNCTION__);
+ dprintk("%s: done\n", __func__);
return mnt;
}
diff --git a/fs/nfs/nfs4proc.c b/fs/nfs/nfs4proc.c
index dbc09271af02..1293e0acd82b 100644
--- a/fs/nfs/nfs4proc.c
+++ b/fs/nfs/nfs4proc.c
@@ -73,7 +73,7 @@ int nfs4_map_errors(int err)
{
if (err < -1000) {
dprintk("%s could not handle NFSv4 error %d\n",
- __FUNCTION__, -err);
+ __func__, -err);
return -EIO;
}
return err;
@@ -306,8 +306,7 @@ static void nfs4_opendata_free(struct kref *kref)
nfs4_put_open_state(p->state);
nfs4_put_state_owner(p->owner);
dput(p->dir);
- dput(p->path.dentry);
- mntput(p->path.mnt);
+ path_put(&p->path);
kfree(p);
}
@@ -1210,8 +1209,7 @@ static void nfs4_free_closedata(void *data)
nfs4_put_open_state(calldata->state);
nfs_free_seqid(calldata->arg.seqid);
nfs4_put_state_owner(sp);
- dput(calldata->path.dentry);
- mntput(calldata->path.mnt);
+ path_put(&calldata->path);
kfree(calldata);
}
@@ -1578,7 +1576,7 @@ static int nfs4_get_referral(struct inode *dir, const struct qstr *name, struct
goto out;
/* Make sure server returned a different fsid for the referral */
if (nfs_fsid_equal(&NFS_SERVER(dir)->fsid, &locations->fattr.fsid)) {
- dprintk("%s: server did not return a different fsid for a referral at %s\n", __FUNCTION__, name->name);
+ dprintk("%s: server did not return a different fsid for a referral at %s\n", __func__, name->name);
status = -EIO;
goto out;
}
@@ -2211,7 +2209,7 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
};
int status;
- dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __FUNCTION__,
+ dprintk("%s: dentry = %s/%s, cookie = %Lu\n", __func__,
dentry->d_parent->d_name.name,
dentry->d_name.name,
(unsigned long long)cookie);
@@ -2223,7 +2221,7 @@ static int _nfs4_proc_readdir(struct dentry *dentry, struct rpc_cred *cred,
nfs_invalidate_atime(dir);
- dprintk("%s: returns %d\n", __FUNCTION__, status);
+ dprintk("%s: returns %d\n", __func__, status);
return status;
}
@@ -3342,7 +3340,7 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
struct nfs4_lockdata *data = calldata;
struct nfs4_state *state = data->lsp->ls_state;
- dprintk("%s: begin!\n", __FUNCTION__);
+ dprintk("%s: begin!\n", __func__);
if (nfs_wait_on_sequence(data->arg.lock_seqid, task) != 0)
return;
/* Do we need to do an open_to_lock_owner? */
@@ -3356,14 +3354,14 @@ static void nfs4_lock_prepare(struct rpc_task *task, void *calldata)
data->arg.new_lock_owner = 0;
data->timestamp = jiffies;
rpc_call_start(task);
- dprintk("%s: done!, ret = %d\n", __FUNCTION__, data->rpc_status);
+ dprintk("%s: done!, ret = %d\n", __func__, data->rpc_status);
}
static void nfs4_lock_done(struct rpc_task *task, void *calldata)
{
struct nfs4_lockdata *data = calldata;
- dprintk("%s: begin!\n", __FUNCTION__);
+ dprintk("%s: begin!\n", __func__);
data->rpc_status = task->tk_status;
if (RPC_ASSASSINATED(task))
@@ -3381,14 +3379,14 @@ static void nfs4_lock_done(struct rpc_task *task, void *calldata)
renew_lease(NFS_SERVER(data->ctx->path.dentry->d_inode), data->timestamp);
}
out:
- dprintk("%s: done, ret = %d!\n", __FUNCTION__, data->rpc_status);
+ dprintk("%s: done, ret = %d!\n", __func__, data->rpc_status);
}
static void nfs4_lock_release(void *calldata)
{
struct nfs4_lockdata *data = calldata;
- dprintk("%s: begin!\n", __FUNCTION__);
+ dprintk("%s: begin!\n", __func__);
nfs_free_seqid(data->arg.open_seqid);
if (data->cancelled != 0) {
struct rpc_task *task;
@@ -3396,13 +3394,13 @@ static void nfs4_lock_release(void *calldata)
data->arg.lock_seqid);
if (!IS_ERR(task))
rpc_put_task(task);
- dprintk("%s: cancelling lock!\n", __FUNCTION__);
+ dprintk("%s: cancelling lock!\n", __func__);
} else
nfs_free_seqid(data->arg.lock_seqid);
nfs4_put_lock_state(data->lsp);
put_nfs_open_context(data->ctx);
kfree(data);
- dprintk("%s: done!\n", __FUNCTION__);
+ dprintk("%s: done!\n", __func__);
}
static const struct rpc_call_ops nfs4_lock_ops = {
@@ -3428,7 +3426,7 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
};
int ret;
- dprintk("%s: begin!\n", __FUNCTION__);
+ dprintk("%s: begin!\n", __func__);
data = nfs4_alloc_lockdata(fl, nfs_file_open_context(fl->fl_file),
fl->fl_u.nfs4_fl.owner);
if (data == NULL)
@@ -3451,7 +3449,7 @@ static int _nfs4_do_setlk(struct nfs4_state *state, int cmd, struct file_lock *f
} else
data->cancelled = 1;
rpc_put_task(task);
- dprintk("%s: done, ret = %d!\n", __FUNCTION__, ret);
+ dprintk("%s: done, ret = %d!\n", __func__, ret);
return ret;
}
@@ -3527,7 +3525,7 @@ static int _nfs4_proc_setlk(struct nfs4_state *state, int cmd, struct file_lock
/* Note: we always want to sleep here! */
request->fl_flags = fl_flags | FL_SLEEP;
if (do_vfs_lock(request->fl_file, request) < 0)
- printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __FUNCTION__);
+ printk(KERN_WARNING "%s: VFS is out of sync with lock manager!\n", __func__);
out_unlock:
up_read(&clp->cl_sem);
out:
@@ -3665,12 +3663,12 @@ int nfs4_proc_fs_locations(struct inode *dir, const struct qstr *name,
};
int status;
- dprintk("%s: start\n", __FUNCTION__);
+ dprintk("%s: start\n", __func__);
nfs_fattr_init(&fs_locations->fattr);
fs_locations->server = server;
fs_locations->nlocations = 0;
status = rpc_call_sync(server->client, &msg, 0);
- dprintk("%s: returned status = %d\n", __FUNCTION__, status);
+ dprintk("%s: returned status = %d\n", __func__, status);
return status;
}
diff --git a/fs/nfs/nfs4renewd.c b/fs/nfs/nfs4renewd.c
index 5e2e4af1a0e6..3305acbbe2ae 100644
--- a/fs/nfs/nfs4renewd.c
+++ b/fs/nfs/nfs4renewd.c
@@ -66,7 +66,7 @@ nfs4_renew_state(struct work_struct *work)
unsigned long last, now;
down_read(&clp->cl_sem);
- dprintk("%s: start\n", __FUNCTION__);
+ dprintk("%s: start\n", __func__);
/* Are there any active superblocks? */
if (list_empty(&clp->cl_superblocks))
goto out;
@@ -92,17 +92,17 @@ nfs4_renew_state(struct work_struct *work)
spin_lock(&clp->cl_lock);
} else
dprintk("%s: failed to call renewd. Reason: lease not expired \n",
- __FUNCTION__);
+ __func__);
if (timeout < 5 * HZ) /* safeguard */
timeout = 5 * HZ;
dprintk("%s: requeueing work. Lease period = %ld\n",
- __FUNCTION__, (timeout + HZ - 1) / HZ);
+ __func__, (timeout + HZ - 1) / HZ);
cancel_delayed_work(&clp->cl_renewd);
schedule_delayed_work(&clp->cl_renewd, timeout);
spin_unlock(&clp->cl_lock);
out:
up_read(&clp->cl_sem);
- dprintk("%s: done\n", __FUNCTION__);
+ dprintk("%s: done\n", __func__);
}
/* Must be called with clp->cl_sem locked for writes */
@@ -117,7 +117,7 @@ nfs4_schedule_state_renewal(struct nfs_client *clp)
if (timeout < 5 * HZ)
timeout = 5 * HZ;
dprintk("%s: requeueing work. Lease period = %ld\n",
- __FUNCTION__, (timeout + HZ - 1) / HZ);
+ __func__, (timeout + HZ - 1) / HZ);
cancel_delayed_work(&clp->cl_renewd);
schedule_delayed_work(&clp->cl_renewd, timeout);
set_bit(NFS_CS_RENEWD, &clp->cl_res_state);
diff --git a/fs/nfs/nfs4state.c b/fs/nfs/nfs4state.c
index 46eb624e4f16..856a8934f610 100644
--- a/fs/nfs/nfs4state.c
+++ b/fs/nfs/nfs4state.c
@@ -282,7 +282,7 @@ nfs4_alloc_state_owner(void)
return sp;
}
-void
+static void
nfs4_drop_state_owner(struct nfs4_state_owner *sp)
{
if (!RB_EMPTY_NODE(&sp->so_client_node)) {
@@ -828,7 +828,7 @@ static int nfs4_reclaim_locks(struct nfs4_state_recovery_ops *ops, struct nfs4_s
switch (status) {
default:
printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
- __FUNCTION__, status);
+ __func__, status);
case -NFS4ERR_EXPIRED:
case -NFS4ERR_NO_GRACE:
case -NFS4ERR_RECLAIM_BAD:
@@ -869,14 +869,14 @@ static int nfs4_reclaim_open_state(struct nfs4_state_recovery_ops *ops, struct n
list_for_each_entry(lock, &state->lock_states, ls_locks) {
if (!(lock->ls_flags & NFS_LOCK_INITIALIZED))
printk("%s: Lock reclaim failed!\n",
- __FUNCTION__);
+ __func__);
}
continue;
}
switch (status) {
default:
printk(KERN_ERR "%s: unhandled error %d. Zeroing state\n",
- __FUNCTION__, status);
+ __func__, status);
case -ENOENT:
case -NFS4ERR_RECLAIM_BAD:
case -NFS4ERR_RECLAIM_CONFLICT:
diff --git a/fs/nfs/nfs4xdr.c b/fs/nfs/nfs4xdr.c
index 5a2d64927b35..b916297d2334 100644
--- a/fs/nfs/nfs4xdr.c
+++ b/fs/nfs/nfs4xdr.c
@@ -1831,7 +1831,7 @@ static int nfs4_xdr_enc_readdir(struct rpc_rqst *req, __be32 *p, const struct nf
xdr_inline_pages(&req->rq_rcv_buf, replen, args->pages,
args->pgbase, args->count);
dprintk("%s: inlined page args = (%u, %p, %u, %u)\n",
- __FUNCTION__, replen, args->pages,
+ __func__, replen, args->pages,
args->pgbase, args->count);
out:
@@ -2192,9 +2192,9 @@ out:
p = xdr_inline_decode(xdr, nbytes); \
if (unlikely(!p)) { \
dprintk("nfs: %s: prematurely hit end of receive" \
- " buffer\n", __FUNCTION__); \
+ " buffer\n", __func__); \
dprintk("nfs: %s: xdr->p=%p, bytes=%u, xdr->end=%p\n", \
- __FUNCTION__, xdr->p, nbytes, xdr->end); \
+ __func__, xdr->p, nbytes, xdr->end); \
return -EIO; \
} \
} while (0)
@@ -2306,12 +2306,12 @@ static int decode_attr_type(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *
READ_BUF(4);
READ32(*type);
if (*type < NF4REG || *type > NF4NAMEDATTR) {
- dprintk("%s: bad type %d\n", __FUNCTION__, *type);
+ dprintk("%s: bad type %d\n", __func__, *type);
return -EIO;
}
bitmap[0] &= ~FATTR4_WORD0_TYPE;
}
- dprintk("%s: type=0%o\n", __FUNCTION__, nfs_type2fmt[*type].nfs2type);
+ dprintk("%s: type=0%o\n", __func__, nfs_type2fmt[*type].nfs2type);
return 0;
}
@@ -2327,7 +2327,7 @@ static int decode_attr_change(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t
READ64(*change);
bitmap[0] &= ~FATTR4_WORD0_CHANGE;
}
- dprintk("%s: change attribute=%Lu\n", __FUNCTION__,
+ dprintk("%s: change attribute=%Lu\n", __func__,
(unsigned long long)*change);
return 0;
}
@@ -2344,7 +2344,7 @@ static int decode_attr_size(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t *
READ64(*size);
bitmap[0] &= ~FATTR4_WORD0_SIZE;
}
- dprintk("%s: file size=%Lu\n", __FUNCTION__, (unsigned long long)*size);
+ dprintk("%s: file size=%Lu\n", __func__, (unsigned long long)*size);
return 0;
}
@@ -2360,7 +2360,7 @@ static int decode_attr_link_support(struct xdr_stream *xdr, uint32_t *bitmap, ui
READ32(*res);
bitmap[0] &= ~FATTR4_WORD0_LINK_SUPPORT;
}
- dprintk("%s: link support=%s\n", __FUNCTION__, *res == 0 ? "false" : "true");
+ dprintk("%s: link support=%s\n", __func__, *res == 0 ? "false" : "true");
return 0;
}
@@ -2376,7 +2376,7 @@ static int decode_attr_symlink_support(struct xdr_stream *xdr, uint32_t *bitmap,
READ32(*res);
bitmap[0] &= ~FATTR4_WORD0_SYMLINK_SUPPORT;
}
- dprintk("%s: symlink support=%s\n", __FUNCTION__, *res == 0 ? "false" : "true");
+ dprintk("%s: symlink support=%s\n", __func__, *res == 0 ? "false" : "true");
return 0;
}
@@ -2394,7 +2394,7 @@ static int decode_attr_fsid(struct xdr_stream *xdr, uint32_t *bitmap, struct nfs
READ64(fsid->minor);
bitmap[0] &= ~FATTR4_WORD0_FSID;
}
- dprintk("%s: fsid=(0x%Lx/0x%Lx)\n", __FUNCTION__,
+ dprintk("%s: fsid=(0x%Lx/0x%Lx)\n", __func__,
(unsigned long long)fsid->major,
(unsigned long long)fsid->minor);
return 0;
@@ -2412,7 +2412,7 @@ static int decode_attr_lease_time(struct xdr_stream *xdr, uint32_t *bitmap, uint
READ32(*res);
bitmap[0] &= ~FATTR4_WORD0_LEASE_TIME;
}
- dprintk("%s: file size=%u\n", __FUNCTION__, (unsigned int)*res);
+ dprintk("%s: file size=%u\n", __func__, (unsigned int)*res);
return 0;
}
@@ -2428,7 +2428,7 @@ static int decode_attr_aclsupport(struct xdr_stream *xdr, uint32_t *bitmap, uint
READ32(*res);
bitmap[0] &= ~FATTR4_WORD0_ACLSUPPORT;
}
- dprintk("%s: ACLs supported=%u\n", __FUNCTION__, (unsigned int)*res);
+ dprintk("%s: ACLs supported=%u\n", __func__, (unsigned int)*res);
return 0;
}
@@ -2444,7 +2444,7 @@ static int decode_attr_fileid(struct xdr_stream *xdr, uint32_t *bitmap, uint64_t
READ64(*fileid);
bitmap[0] &= ~FATTR4_WORD0_FILEID;
}
- dprintk("%s: fileid=%Lu\n", __FUNCTION__, (unsigned long long)*fileid);
+ dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid);
return 0;
}
@@ -2460,7 +2460,7 @@ static int decode_attr_mounted_on_fileid(struct xdr_stream *xdr, uint32_t *bitma
READ64(*fileid);
bitmap[1] &= ~FATTR4_WORD1_MOUNTED_ON_FILEID;
}
- dprintk("%s: fileid=%Lu\n", __FUNCTION__, (unsigned long long)*fileid);
+ dprintk("%s: fileid=%Lu\n", __func__, (unsigned long long)*fileid);
return 0;
}
@@ -2477,7 +2477,7 @@ static int decode_attr_files_avail(struct xdr_stream *xdr, uint32_t *bitmap, uin
READ64(*res);
bitmap[0] &= ~FATTR4_WORD0_FILES_AVAIL;
}
- dprintk("%s: files avail=%Lu\n", __FUNCTION__, (unsigned long long)*res);
+ dprintk("%s: files avail=%Lu\n", __func__, (unsigned long long)*res);
return status;
}
@@ -2494,7 +2494,7 @@ static int decode_attr_files_free(struct xdr_stream *xdr, uint32_t *bitmap, uint
READ64(*res);
bitmap[0] &= ~FATTR4_WORD0_FILES_FREE;
}
- dprintk("%s: files free=%Lu\n", __FUNCTION__, (unsigned long long)*res);
+ dprintk("%s: files free=%Lu\n", __func__, (unsigned long long)*res);
return status;
}
@@ -2511,7 +2511,7 @@ static int decode_attr_files_total(struct xdr_stream *xdr, uint32_t *bitmap, uin
READ64(*res);
bitmap[0] &= ~FATTR4_WORD0_FILES_TOTAL;
}
- dprintk("%s: files total=%Lu\n", __FUNCTION__, (unsigned long long)*res);
+ dprintk("%s: files total=%Lu\n", __func__, (unsigned long long)*res);
return status;
}
@@ -2569,7 +2569,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
status = 0;
if (unlikely(!(bitmap[0] & FATTR4_WORD0_FS_LOCATIONS)))
goto out;
- dprintk("%s: fsroot ", __FUNCTION__);
+ dprintk("%s: fsroot ", __func__);
status = decode_pathname(xdr, &res->fs_path);
if (unlikely(status != 0))
goto out;
@@ -2586,7 +2586,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
READ32(m);
loc->nservers = 0;
- dprintk("%s: servers ", __FUNCTION__);
+ dprintk("%s: servers ", __func__);
while (loc->nservers < m) {
struct nfs4_string *server = &loc->servers[loc->nservers];
status = decode_opaque_inline(xdr, &server->len, &server->data);
@@ -2599,7 +2599,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
unsigned int i;
dprintk("%s: using first %u of %u servers "
"returned for location %u\n",
- __FUNCTION__,
+ __func__,
NFS4_FS_LOCATION_MAXSERVERS,
m, res->nlocations);
for (i = loc->nservers; i < m; i++) {
@@ -2618,7 +2618,7 @@ static int decode_attr_fs_locations(struct xdr_stream *xdr, uint32_t *bitmap, st
res->nlocations++;
}
out:
- dprintk("%s: fs_locations done, error = %d\n", __FUNCTION__, status);
+ dprintk("%s: fs_locations done, error = %d\n", __func__, status);
return status;
out_eio:
status = -EIO;
@@ -2638,7 +2638,7 @@ static int decode_attr_maxfilesize(struct xdr_stream *xdr, uint32_t *bitmap, uin
READ64(*res);
bitmap[0] &= ~FATTR4_WORD0_MAXFILESIZE;
}
- dprintk("%s: maxfilesize=%Lu\n", __FUNCTION__, (unsigned long long)*res);
+ dprintk("%s: maxfilesize=%Lu\n", __func__, (unsigned long long)*res);
return status;
}
@@ -2655,7 +2655,7 @@ static int decode_attr_maxlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
READ32(*maxlink);
bitmap[0] &= ~FATTR4_WORD0_MAXLINK;
}
- dprintk("%s: maxlink=%u\n", __FUNCTION__, *maxlink);
+ dprintk("%s: maxlink=%u\n", __func__, *maxlink);
return status;
}
@@ -2672,7 +2672,7 @@ static int decode_attr_maxname(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
READ32(*maxname);
bitmap[0] &= ~FATTR4_WORD0_MAXNAME;
}
- dprintk("%s: maxname=%u\n", __FUNCTION__, *maxname);
+ dprintk("%s: maxname=%u\n", __func__, *maxname);
return status;
}
@@ -2693,7 +2693,7 @@ static int decode_attr_maxread(struct xdr_stream *xdr, uint32_t *bitmap, uint32_
*res = (uint32_t)maxread;
bitmap[0] &= ~FATTR4_WORD0_MAXREAD;
}
- dprintk("%s: maxread=%lu\n", __FUNCTION__, (unsigned long)*res);
+ dprintk("%s: maxread=%lu\n", __func__, (unsigned long)*res);
return status;
}
@@ -2714,7 +2714,7 @@ static int decode_attr_maxwrite(struct xdr_stream *xdr, uint32_t *bitmap, uint32
*res = (uint32_t)maxwrite;
bitmap[0] &= ~FATTR4_WORD0_MAXWRITE;
}
- dprintk("%s: maxwrite=%lu\n", __FUNCTION__, (unsigned long)*res);
+ dprintk("%s: maxwrite=%lu\n", __func__, (unsigned long)*res);
return status;
}
@@ -2731,7 +2731,7 @@ static int decode_attr_mode(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t *
*mode &= ~S_IFMT;
bitmap[1] &= ~FATTR4_WORD1_MODE;
}
- dprintk("%s: file mode=0%o\n", __FUNCTION__, (unsigned int)*mode);
+ dprintk("%s: file mode=0%o\n", __func__, (unsigned int)*mode);
return 0;
}
@@ -2747,7 +2747,7 @@ static int decode_attr_nlink(struct xdr_stream *xdr, uint32_t *bitmap, uint32_t
READ32(*nlink);
bitmap[1] &= ~FATTR4_WORD1_NUMLINKS;
}
- dprintk("%s: nlink=%u\n", __FUNCTION__, (unsigned int)*nlink);
+ dprintk("%s: nlink=%u\n", __func__, (unsigned int)*nlink);
return 0;
}
@@ -2766,13 +2766,13 @@ static int decode_attr_owner(struct xdr_stream *xdr, uint32_t *bitmap, struct nf
if (len < XDR_MAX_NETOBJ) {
if (nfs_map_name_to_uid(clp, (char *)p, len, uid) != 0)
dprintk("%s: nfs_map_name_to_uid failed!\n",
- __FUNCTION__);
+ __func__);
} else
dprintk("%s: name too long (%u)!\n",
- __FUNCTION__, len);
+ __func__, len);
bitmap[1] &= ~FATTR4_WORD1_OWNER;
}
- dprintk("%s: uid=%d\n", __FUNCTION__, (int)*uid);
+ dprintk("%s: uid=%d\n", __func__, (int)*uid);
return 0;
}
@@ -2791,13 +2791,13 @@ static int decode_attr_group(struct xdr_stream *xdr, uint32_t *bitmap, struct nf
if (len < XDR_MAX_NETOBJ) {
if (nfs_map_group_to_gid(clp, (char *)p, len, gid) != 0)
dprintk("%s: nfs_map_group_to_gid failed!\n",
- __FUNCTION__);
+ __func__);
} else
dprintk("%s: name too long (%u)!\n",
- __FUNCTION__, len);
+ __func__, len);
bitmap[1] &= ~FATTR4_WORD1_OWNER_GROUP;
}
- dprintk("%s: gid=%d\n", __FUNCTION__, (int)*gid);
+ dprintk("%s: gid=%d\n", __func__, (int)*gid);
return 0;
}
@@ -2820,7 +2820,7 @@ static int decode_attr_rdev(struct xdr_stream *xdr, uint32_t *bitmap, dev_t *rde
*rdev = tmp;
bitmap[1] &= ~ FATTR4_WORD1_RAWDEV;
}
- dprintk("%s: rdev=(0x%x:0x%x)\n", __FUNCTION__, major, minor);
+ dprintk("%s: rdev=(0x%x:0x%x)\n", __func__, major, minor);
return 0;
}
@@ -2837,7 +2837,7 @@ static int decode_attr_space_avail(struct xdr_stream *xdr, uint32_t *bitmap, uin
READ64(*res);
bitmap[1] &= ~FATTR4_WORD1_SPACE_AVAIL;
}
- dprintk("%s: space avail=%Lu\n", __FUNCTION__, (unsigned long long)*res);
+ dprintk("%s: space avail=%Lu\n", __func__, (unsigned long long)*res);
return status;
}
@@ -2854,7 +2854,7 @@ static int decode_attr_space_free(struct xdr_stream *xdr, uint32_t *bitmap, uint
READ64(*res);
bitmap[1] &= ~FATTR4_WORD1_SPACE_FREE;
}
- dprintk("%s: space free=%Lu\n", __FUNCTION__, (unsigned long long)*res);
+ dprintk("%s: space free=%Lu\n", __func__, (unsigned long long)*res);
return status;
}
@@ -2871,7 +2871,7 @@ static int decode_attr_space_total(struct xdr_stream *xdr, uint32_t *bitmap, uin
READ64(*res);
bitmap[1] &= ~FATTR4_WORD1_SPACE_TOTAL;
}
- dprintk("%s: space total=%Lu\n", __FUNCTION__, (unsigned long long)*res);
+ dprintk("%s: space total=%Lu\n", __func__, (unsigned long long)*res);
return status;
}
@@ -2887,7 +2887,7 @@ static int decode_attr_space_used(struct xdr_stream *xdr, uint32_t *bitmap, uint
READ64(*used);
bitmap[1] &= ~FATTR4_WORD1_SPACE_USED;
}
- dprintk("%s: space used=%Lu\n", __FUNCTION__,
+ dprintk("%s: space used=%Lu\n", __func__,
(unsigned long long)*used);
return 0;
}
@@ -2918,7 +2918,7 @@ static int decode_attr_time_access(struct xdr_stream *xdr, uint32_t *bitmap, str
status = decode_attr_time(xdr, time);
bitmap[1] &= ~FATTR4_WORD1_TIME_ACCESS;
}
- dprintk("%s: atime=%ld\n", __FUNCTION__, (long)time->tv_sec);
+ dprintk("%s: atime=%ld\n", __func__, (long)time->tv_sec);
return status;
}
@@ -2934,7 +2934,7 @@ static int decode_attr_time_metadata(struct xdr_stream *xdr, uint32_t *bitmap, s
status = decode_attr_time(xdr, time);
bitmap[1] &= ~FATTR4_WORD1_TIME_METADATA;
}
- dprintk("%s: ctime=%ld\n", __FUNCTION__, (long)time->tv_sec);
+ dprintk("%s: ctime=%ld\n", __func__, (long)time->tv_sec);
return status;
}
@@ -2950,7 +2950,7 @@ static int decode_attr_time_modify(struct xdr_stream *xdr, uint32_t *bitmap, str
status = decode_attr_time(xdr, time);
bitmap[1] &= ~FATTR4_WORD1_TIME_MODIFY;
}
- dprintk("%s: mtime=%ld\n", __FUNCTION__, (long)time->tv_sec);
+ dprintk("%s: mtime=%ld\n", __func__, (long)time->tv_sec);
return status;
}
@@ -2962,7 +2962,7 @@ static int verify_attr_len(struct xdr_stream *xdr, __be32 *savep, uint32_t attrl
if (unlikely(attrwords != nwords)) {
dprintk("%s: server returned incorrect attribute length: "
"%u %c %u\n",
- __FUNCTION__,
+ __func__,
attrwords << 2,
(attrwords < nwords) ? '<' : '>',
nwords << 2);
@@ -3067,7 +3067,7 @@ static int decode_server_caps(struct xdr_stream *xdr, struct nfs4_server_caps_re
goto xdr_error;
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
- dprintk("%s: xdr returned %d!\n", __FUNCTION__, -status);
+ dprintk("%s: xdr returned %d!\n", __func__, -status);
return status;
}
@@ -3100,7 +3100,7 @@ static int decode_statfs(struct xdr_stream *xdr, struct nfs_fsstat *fsstat)
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
- dprintk("%s: xdr returned %d!\n", __FUNCTION__, -status);
+ dprintk("%s: xdr returned %d!\n", __func__, -status);
return status;
}
@@ -3125,7 +3125,7 @@ static int decode_pathconf(struct xdr_stream *xdr, struct nfs_pathconf *pathconf
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
- dprintk("%s: xdr returned %d!\n", __FUNCTION__, -status);
+ dprintk("%s: xdr returned %d!\n", __func__, -status);
return status;
}
@@ -3193,7 +3193,7 @@ static int decode_getfattr(struct xdr_stream *xdr, struct nfs_fattr *fattr, cons
if ((status = verify_attr_len(xdr, savep, attrlen)) == 0)
fattr->valid = NFS_ATTR_FATTR | NFS_ATTR_FATTR_V3 | NFS_ATTR_FATTR_V4;
xdr_error:
- dprintk("%s: xdr returned %d\n", __FUNCTION__, -status);
+ dprintk("%s: xdr returned %d\n", __func__, -status);
return status;
}
@@ -3226,7 +3226,7 @@ static int decode_fsinfo(struct xdr_stream *xdr, struct nfs_fsinfo *fsinfo)
status = verify_attr_len(xdr, savep, attrlen);
xdr_error:
- dprintk("%s: xdr returned %d!\n", __FUNCTION__, -status);
+ dprintk("%s: xdr returned %d!\n", __func__, -status);
return status;
}
@@ -3418,7 +3418,7 @@ static int decode_open(struct xdr_stream *xdr, struct nfs_openres *res)
return decode_delegation(xdr, res);
xdr_error:
- dprintk("%s: Bitmap too large! Length = %u\n", __FUNCTION__, bmlen);
+ dprintk("%s: Bitmap too large! Length = %u\n", __func__, bmlen);
return -EIO;
}
@@ -3575,7 +3575,7 @@ short_pkt:
* the call was successful, but incomplete. The caller can retry the
* readdir starting at the last cookie.
*/
- dprintk("%s: short packet at entry %d\n", __FUNCTION__, nr);
+ dprintk("%s: short packet at entry %d\n", __func__, nr);
entry[0] = entry[1] = 0;
if (nr)
goto out;
diff --git a/fs/nfs/proc.c b/fs/nfs/proc.c
index 5ccf7faee19c..03599bfe81cf 100644
--- a/fs/nfs/proc.c
+++ b/fs/nfs/proc.c
@@ -63,17 +63,17 @@ nfs_proc_get_root(struct nfs_server *server, struct nfs_fh *fhandle,
};
int status;
- dprintk("%s: call getattr\n", __FUNCTION__);
+ dprintk("%s: call getattr\n", __func__);
nfs_fattr_init(fattr);
status = rpc_call_sync(server->nfs_client->cl_rpcclient, &msg, 0);
- dprintk("%s: reply getattr: %d\n", __FUNCTION__, status);
+ dprintk("%s: reply getattr: %d\n", __func__, status);
if (status)
return status;
- dprintk("%s: call statfs\n", __FUNCTION__);
+ dprintk("%s: call statfs\n", __func__);
msg.rpc_proc = &nfs_procedures[NFSPROC_STATFS];
msg.rpc_resp = &fsinfo;
status = rpc_call_sync(server->nfs_client->cl_rpcclient, &msg, 0);
- dprintk("%s: reply statfs: %d\n", __FUNCTION__, status);
+ dprintk("%s: reply statfs: %d\n", __func__, status);
if (status)
return status;
info->rtmax = NFS_MAXDATA;
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index 16f57e0af999..40d17987d0e8 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -329,7 +329,7 @@ int nfs_readpage_result(struct rpc_task *task, struct nfs_read_data *data)
{
int status;
- dprintk("NFS: %s: %5u, (status %d)\n", __FUNCTION__, task->tk_pid,
+ dprintk("NFS: %s: %5u, (status %d)\n", __func__, task->tk_pid,
task->tk_status);
status = NFS_PROTO(data->inode)->read_done(task, data);
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index 7226a506f3ca..2a4a024a4e7b 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -405,7 +405,7 @@ static int nfs_statfs(struct dentry *dentry, struct kstatfs *buf)
return 0;
out_err:
- dprintk("%s: statfs error = %d\n", __FUNCTION__, -error);
+ dprintk("%s: statfs error = %d\n", __func__, -error);
unlock_kernel();
return error;
}
@@ -2015,6 +2015,10 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
goto error_splat_super;
}
+ error = security_sb_set_mnt_opts(s, &data.lsm_opts);
+ if (error)
+ goto error_splat_root;
+
s->s_flags |= MS_ACTIVE;
mnt->mnt_sb = s;
mnt->mnt_root = mntroot;
@@ -2031,6 +2035,8 @@ out_free:
nfs_free_server(server);
goto out;
+error_splat_root:
+ dput(mntroot);
error_splat_super:
up_write(&s->s_umount);
deactivate_super(s);
@@ -2114,6 +2120,8 @@ static int nfs4_xdev_get_sb(struct file_system_type *fs_type, int flags,
mnt->mnt_sb = s;
mnt->mnt_root = mntroot;
+ security_sb_clone_mnt_opts(data->sb, s);
+
dprintk("<-- nfs4_xdev_get_sb() = 0\n");
return 0;
@@ -2197,6 +2205,8 @@ static int nfs4_referral_get_sb(struct file_system_type *fs_type, int flags,
mnt->mnt_sb = s;
mnt->mnt_root = mntroot;
+ security_sb_clone_mnt_opts(data->sb, s);
+
dprintk("<-- nfs4_referral_get_sb() = 0\n");
return 0;
diff --git a/fs/nfs/write.c b/fs/nfs/write.c
index 1ade11d1ba07..6d8ace3e3259 100644
--- a/fs/nfs/write.c
+++ b/fs/nfs/write.c
@@ -415,7 +415,7 @@ nfs_dirty_request(struct nfs_page *req)
if (page == NULL || test_bit(PG_NEED_COMMIT, &req->wb_flags))
return 0;
- return !PageWriteback(req->wb_page);
+ return !PageWriteback(page);
}
#if defined(CONFIG_NFS_V3) || defined(CONFIG_NFS_V4)
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index 0b3ffa9840c2..4d4760e687c3 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -419,9 +419,9 @@ static int do_probe_callback(void *data)
out_release_client:
rpc_shutdown_client(client);
out_err:
- put_nfs4_client(clp);
dprintk("NFSD: warning: no callback path to client %.*s\n",
(int)clp->cl_name.len, clp->cl_name.data);
+ put_nfs4_client(clp);
return status;
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index c309c881bd4e..313484380a9b 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -952,6 +952,7 @@ encode_op:
out:
nfsd4_release_compoundargs(args);
cstate_free(cstate);
+ dprintk("nfsv4 compound returned %d\n", ntohl(status));
return status;
}
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 8799b8708188..bf11d6879ab4 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -1579,8 +1579,8 @@ nfs4_upgrade_open(struct svc_rqst *rqstp, struct svc_fh *cur_fh, struct nfs4_sta
}
/* remember the open */
filp->f_mode |= open->op_share_access;
- set_bit(open->op_share_access, &stp->st_access_bmap);
- set_bit(open->op_share_deny, &stp->st_deny_bmap);
+ __set_bit(open->op_share_access, &stp->st_access_bmap);
+ __set_bit(open->op_share_deny, &stp->st_deny_bmap);
return nfs_ok;
}
@@ -3249,12 +3249,14 @@ nfs4_state_shutdown(void)
nfs4_unlock_state();
}
+/*
+ * user_recovery_dirname is protected by the nfsd_mutex since it's only
+ * accessed when nfsd is starting.
+ */
static void
nfs4_set_recdir(char *recdir)
{
- nfs4_lock_state();
strcpy(user_recovery_dirname, recdir);
- nfs4_unlock_state();
}
/*
@@ -3278,6 +3280,12 @@ nfs4_reset_recoverydir(char *recdir)
return status;
}
+char *
+nfs4_recoverydir(void)
+{
+ return user_recovery_dirname;
+}
+
/*
* Called when leasetime is changed.
*
@@ -3286,11 +3294,12 @@ nfs4_reset_recoverydir(char *recdir)
* we start to register any changes in lease time. If the administrator
* really wants to change the lease time *now*, they can go ahead and bring
* nfsd down and then back up again after changing the lease time.
+ *
+ * user_lease_time is protected by nfsd_mutex since it's only really accessed
+ * when nfsd is starting
*/
void
nfs4_reset_lease(time_t leasetime)
{
- lock_kernel();
user_lease_time = leasetime;
- unlock_kernel();
}
diff --git a/fs/nfsd/nfs4xdr.c b/fs/nfsd/nfs4xdr.c
index c513bbdf2d36..9547ab636274 100644
--- a/fs/nfsd/nfs4xdr.c
+++ b/fs/nfsd/nfs4xdr.c
@@ -1201,7 +1201,7 @@ nfsd4_decode_compound(struct nfsd4_compoundargs *argp)
*p++ = htonl((u32)((n) >> 32)); \
*p++ = htonl((u32)(n)); \
} while (0)
-#define WRITEMEM(ptr,nbytes) do { \
+#define WRITEMEM(ptr,nbytes) do if (nbytes > 0) { \
*(p + XDR_QUADLEN(nbytes) -1) = 0; \
memcpy(p, ptr, nbytes); \
p += XDR_QUADLEN(nbytes); \
diff --git a/fs/nfsd/nfsctl.c b/fs/nfsd/nfsctl.c
index 5ac00c4fee91..2c2eb8796c10 100644
--- a/fs/nfsd/nfsctl.c
+++ b/fs/nfsd/nfsctl.c
@@ -450,22 +450,26 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size)
int i;
int rv;
int len;
- int npools = nfsd_nrpools();
+ int npools;
int *nthreads;
+ mutex_lock(&nfsd_mutex);
+ npools = nfsd_nrpools();
if (npools == 0) {
/*
* NFS is shut down. The admin can start it by
* writing to the threads file but NOT the pool_threads
* file, sorry. Report zero threads.
*/
+ mutex_unlock(&nfsd_mutex);
strcpy(buf, "0\n");
return strlen(buf);
}
nthreads = kcalloc(npools, sizeof(int), GFP_KERNEL);
+ rv = -ENOMEM;
if (nthreads == NULL)
- return -ENOMEM;
+ goto out_free;
if (size > 0) {
for (i = 0; i < npools; i++) {
@@ -496,14 +500,16 @@ static ssize_t write_pool_threads(struct file *file, char *buf, size_t size)
mesg += len;
}
+ mutex_unlock(&nfsd_mutex);
return (mesg-buf);
out_free:
kfree(nthreads);
+ mutex_unlock(&nfsd_mutex);
return rv;
}
-static ssize_t write_versions(struct file *file, char *buf, size_t size)
+static ssize_t __write_versions(struct file *file, char *buf, size_t size)
{
/*
* Format:
@@ -566,14 +572,23 @@ static ssize_t write_versions(struct file *file, char *buf, size_t size)
return len;
}
-static ssize_t write_ports(struct file *file, char *buf, size_t size)
+static ssize_t write_versions(struct file *file, char *buf, size_t size)
+{
+ ssize_t rv;
+
+ mutex_lock(&nfsd_mutex);
+ rv = __write_versions(file, buf, size);
+ mutex_unlock(&nfsd_mutex);
+ return rv;
+}
+
+static ssize_t __write_ports(struct file *file, char *buf, size_t size)
{
if (size == 0) {
int len = 0;
- lock_kernel();
+
if (nfsd_serv)
len = svc_xprt_names(nfsd_serv, buf, 0);
- unlock_kernel();
return len;
}
/* Either a single 'fd' number is written, in which
@@ -603,9 +618,7 @@ static ssize_t write_ports(struct file *file, char *buf, size_t size)
/* Decrease the count, but don't shutdown the
* the service
*/
- lock_kernel();
nfsd_serv->sv_nrthreads--;
- unlock_kernel();
}
return err < 0 ? err : 0;
}
@@ -614,10 +627,8 @@ static ssize_t write_ports(struct file *file, char *buf, size_t size)
int len = 0;
if (!toclose)
return -ENOMEM;
- lock_kernel();
if (nfsd_serv)
len = svc_sock_names(buf, nfsd_serv, toclose);
- unlock_kernel();
if (len >= 0)
lockd_down();
kfree(toclose);
@@ -655,7 +666,6 @@ static ssize_t write_ports(struct file *file, char *buf, size_t size)
if (sscanf(&buf[1], "%15s %4d", transport, &port) == 2) {
if (port == 0)
return -EINVAL;
- lock_kernel();
if (nfsd_serv) {
xprt = svc_find_xprt(nfsd_serv, transport,
AF_UNSPEC, port);
@@ -666,13 +676,23 @@ static ssize_t write_ports(struct file *file, char *buf, size_t size)
} else
err = -ENOTCONN;
}
- unlock_kernel();
return err < 0 ? err : 0;
}
}
return -EINVAL;
}
+static ssize_t write_ports(struct file *file, char *buf, size_t size)
+{
+ ssize_t rv;
+
+ mutex_lock(&nfsd_mutex);
+ rv = __write_ports(file, buf, size);
+ mutex_unlock(&nfsd_mutex);
+ return rv;
+}
+
+
int nfsd_max_blksize;
static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
@@ -691,13 +711,13 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
if (bsize > NFSSVC_MAXBLKSIZE)
bsize = NFSSVC_MAXBLKSIZE;
bsize &= ~(1024-1);
- lock_kernel();
+ mutex_lock(&nfsd_mutex);
if (nfsd_serv && nfsd_serv->sv_nrthreads) {
- unlock_kernel();
+ mutex_unlock(&nfsd_mutex);
return -EBUSY;
}
nfsd_max_blksize = bsize;
- unlock_kernel();
+ mutex_unlock(&nfsd_mutex);
}
return sprintf(buf, "%d\n", nfsd_max_blksize);
}
@@ -705,16 +725,17 @@ static ssize_t write_maxblksize(struct file *file, char *buf, size_t size)
#ifdef CONFIG_NFSD_V4
extern time_t nfs4_leasetime(void);
-static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
+static ssize_t __write_leasetime(struct file *file, char *buf, size_t size)
{
/* if size > 10 seconds, call
* nfs4_reset_lease() then write out the new lease (seconds) as reply
*/
char *mesg = buf;
- int rv;
+ int rv, lease;
if (size > 0) {
- int lease;
+ if (nfsd_serv)
+ return -EBUSY;
rv = get_int(&mesg, &lease);
if (rv)
return rv;
@@ -726,24 +747,52 @@ static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
return strlen(buf);
}
-static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
+static ssize_t write_leasetime(struct file *file, char *buf, size_t size)
+{
+ ssize_t rv;
+
+ mutex_lock(&nfsd_mutex);
+ rv = __write_leasetime(file, buf, size);
+ mutex_unlock(&nfsd_mutex);
+ return rv;
+}
+
+extern char *nfs4_recoverydir(void);
+
+static ssize_t __write_recoverydir(struct file *file, char *buf, size_t size)
{
char *mesg = buf;
char *recdir;
int len, status;
- if (size == 0 || size > PATH_MAX || buf[size-1] != '\n')
- return -EINVAL;
- buf[size-1] = 0;
+ if (size > 0) {
+ if (nfsd_serv)
+ return -EBUSY;
+ if (size > PATH_MAX || buf[size-1] != '\n')
+ return -EINVAL;
+ buf[size-1] = 0;
- recdir = mesg;
- len = qword_get(&mesg, recdir, size);
- if (len <= 0)
- return -EINVAL;
+ recdir = mesg;
+ len = qword_get(&mesg, recdir, size);
+ if (len <= 0)
+ return -EINVAL;
- status = nfs4_reset_recoverydir(recdir);
+ status = nfs4_reset_recoverydir(recdir);
+ }
+ sprintf(buf, "%s\n", nfs4_recoverydir());
return strlen(buf);
}
+
+static ssize_t write_recoverydir(struct file *file, char *buf, size_t size)
+{
+ ssize_t rv;
+
+ mutex_lock(&nfsd_mutex);
+ rv = __write_recoverydir(file, buf, size);
+ mutex_unlock(&nfsd_mutex);
+ return rv;
+}
+
#endif
/*----------------------------------------------------------------------------*/
diff --git a/fs/nfsd/nfssvc.c b/fs/nfsd/nfssvc.c
index 941041f4b136..26c81149d49a 100644
--- a/fs/nfsd/nfssvc.c
+++ b/fs/nfsd/nfssvc.c
@@ -21,6 +21,7 @@
#include <linux/smp_lock.h>
#include <linux/freezer.h>
#include <linux/fs_struct.h>
+#include <linux/kthread.h>
#include <linux/sunrpc/types.h>
#include <linux/sunrpc/stats.h>
@@ -44,20 +45,39 @@
* when not handling a request. i.e. when waiting
*/
#define SHUTDOWN_SIGS (sigmask(SIGKILL) | sigmask(SIGHUP) | sigmask(SIGINT) | sigmask(SIGQUIT))
-/* if the last thread dies with SIGHUP, then the exports table is
- * left unchanged ( like 2.4-{0-9} ). Any other signal will clear
- * the exports table (like 2.2).
- */
-#define SIG_NOCLEAN SIGHUP
extern struct svc_program nfsd_program;
-static void nfsd(struct svc_rqst *rqstp);
+static int nfsd(void *vrqstp);
struct timeval nfssvc_boot;
- struct svc_serv *nfsd_serv;
static atomic_t nfsd_busy;
static unsigned long nfsd_last_call;
static DEFINE_SPINLOCK(nfsd_call_lock);
+/*
+ * nfsd_mutex protects nfsd_serv -- both the pointer itself and the members
+ * of the svc_serv struct. In particular, ->sv_nrthreads but also to some
+ * extent ->sv_temp_socks and ->sv_permsocks. It also protects nfsdstats.th_cnt
+ *
+ * If (out side the lock) nfsd_serv is non-NULL, then it must point to a
+ * properly initialised 'struct svc_serv' with ->sv_nrthreads > 0. That number
+ * of nfsd threads must exist and each must listed in ->sp_all_threads in each
+ * entry of ->sv_pools[].
+ *
+ * Transitions of the thread count between zero and non-zero are of particular
+ * interest since the svc_serv needs to be created and initialized at that
+ * point, or freed.
+ *
+ * Finally, the nfsd_mutex also protects some of the global variables that are
+ * accessed when nfsd starts and that are settable via the write_* routines in
+ * nfsctl.c. In particular:
+ *
+ * user_recovery_dirname
+ * user_lease_time
+ * nfsd_versions
+ */
+DEFINE_MUTEX(nfsd_mutex);
+struct svc_serv *nfsd_serv;
+
#if defined(CONFIG_NFSD_V2_ACL) || defined(CONFIG_NFSD_V3_ACL)
static struct svc_stat nfsd_acl_svcstats;
static struct svc_version * nfsd_acl_version[] = {
@@ -151,7 +171,6 @@ int nfsd_nrthreads(void)
return nfsd_serv->sv_nrthreads;
}
-static int killsig; /* signal that was used to kill last nfsd */
static void nfsd_last_thread(struct svc_serv *serv)
{
/* When last nfsd thread exits we need to do some clean-up */
@@ -162,11 +181,9 @@ static void nfsd_last_thread(struct svc_serv *serv)
nfsd_racache_shutdown();
nfs4_state_shutdown();
- printk(KERN_WARNING "nfsd: last server has exited\n");
- if (killsig != SIG_NOCLEAN) {
- printk(KERN_WARNING "nfsd: unexporting all filesystems\n");
- nfsd_export_flush();
- }
+ printk(KERN_WARNING "nfsd: last server has exited, flushing export "
+ "cache\n");
+ nfsd_export_flush();
}
void nfsd_reset_versions(void)
@@ -190,13 +207,14 @@ void nfsd_reset_versions(void)
}
}
+
int nfsd_create_serv(void)
{
int err = 0;
- lock_kernel();
+
+ WARN_ON(!mutex_is_locked(&nfsd_mutex));
if (nfsd_serv) {
svc_get(nfsd_serv);
- unlock_kernel();
return 0;
}
if (nfsd_max_blksize == 0) {
@@ -217,13 +235,11 @@ int nfsd_create_serv(void)
}
atomic_set(&nfsd_busy, 0);
- nfsd_serv = svc_create_pooled(&nfsd_program,
- nfsd_max_blksize,
- nfsd_last_thread,
- nfsd, SIG_NOCLEAN, THIS_MODULE);
+ nfsd_serv = svc_create_pooled(&nfsd_program, nfsd_max_blksize,
+ nfsd_last_thread, nfsd, THIS_MODULE);
if (nfsd_serv == NULL)
err = -ENOMEM;
- unlock_kernel();
+
do_gettimeofday(&nfssvc_boot); /* record boot time */
return err;
}
@@ -282,6 +298,8 @@ int nfsd_set_nrthreads(int n, int *nthreads)
int tot = 0;
int err = 0;
+ WARN_ON(!mutex_is_locked(&nfsd_mutex));
+
if (nfsd_serv == NULL || n <= 0)
return 0;
@@ -316,7 +334,6 @@ int nfsd_set_nrthreads(int n, int *nthreads)
nthreads[0] = 1;
/* apply the new numbers */
- lock_kernel();
svc_get(nfsd_serv);
for (i = 0; i < n; i++) {
err = svc_set_num_threads(nfsd_serv, &nfsd_serv->sv_pools[i],
@@ -325,7 +342,6 @@ int nfsd_set_nrthreads(int n, int *nthreads)
break;
}
svc_destroy(nfsd_serv);
- unlock_kernel();
return err;
}
@@ -334,8 +350,8 @@ int
nfsd_svc(unsigned short port, int nrservs)
{
int error;
-
- lock_kernel();
+
+ mutex_lock(&nfsd_mutex);
dprintk("nfsd: creating service\n");
error = -EINVAL;
if (nrservs <= 0)
@@ -363,7 +379,7 @@ nfsd_svc(unsigned short port, int nrservs)
failure:
svc_destroy(nfsd_serv); /* Release server */
out:
- unlock_kernel();
+ mutex_unlock(&nfsd_mutex);
return error;
}
@@ -391,18 +407,19 @@ update_thread_usage(int busy_threads)
/*
* This is the NFS server kernel thread
*/
-static void
-nfsd(struct svc_rqst *rqstp)
+static int
+nfsd(void *vrqstp)
{
+ struct svc_rqst *rqstp = (struct svc_rqst *) vrqstp;
struct fs_struct *fsp;
- int err;
sigset_t shutdown_mask, allowed_mask;
+ int err, preverr = 0;
+ unsigned int signo;
/* Lock module and set up kernel thread */
- lock_kernel();
- daemonize("nfsd");
+ mutex_lock(&nfsd_mutex);
- /* After daemonize() this kernel thread shares current->fs
+ /* At this point, the thread shares current->fs
* with the init process. We need to create files with a
* umask of 0 instead of init's umask. */
fsp = copy_fs_struct(current->fs);
@@ -417,11 +434,17 @@ nfsd(struct svc_rqst *rqstp)
siginitsetinv(&shutdown_mask, SHUTDOWN_SIGS);
siginitsetinv(&allowed_mask, ALLOWED_SIGS);
- nfsdstats.th_cnt++;
-
- rqstp->rq_task = current;
+ /*
+ * thread is spawned with all signals set to SIG_IGN, re-enable
+ * the ones that matter
+ */
+ for (signo = 1; signo <= _NSIG; signo++) {
+ if (!sigismember(&shutdown_mask, signo))
+ allow_signal(signo);
+ }
- unlock_kernel();
+ nfsdstats.th_cnt++;
+ mutex_unlock(&nfsd_mutex);
/*
* We want less throttling in balance_dirty_pages() so that nfs to
@@ -444,15 +467,25 @@ nfsd(struct svc_rqst *rqstp)
*/
while ((err = svc_recv(rqstp, 60*60*HZ)) == -EAGAIN)
;
- if (err < 0)
+ if (err == -EINTR)
break;
+ else if (err < 0) {
+ if (err != preverr) {
+ printk(KERN_WARNING "%s: unexpected error "
+ "from svc_recv (%d)\n", __func__, -err);
+ preverr = err;
+ }
+ schedule_timeout_uninterruptible(HZ);
+ continue;
+ }
+
update_thread_usage(atomic_read(&nfsd_busy));
atomic_inc(&nfsd_busy);
/* Lock the export hash tables for reading. */
exp_readlock();
- /* Process request with signals blocked. */
+ /* Process request with signals blocked. */
sigprocmask(SIG_SETMASK, &allowed_mask, NULL);
svc_process(rqstp);
@@ -463,22 +496,10 @@ nfsd(struct svc_rqst *rqstp)
atomic_dec(&nfsd_busy);
}
- if (err != -EINTR) {
- printk(KERN_WARNING "nfsd: terminating on error %d\n", -err);
- } else {
- unsigned int signo;
-
- for (signo = 1; signo <= _NSIG; signo++)
- if (sigismember(&current->pending.signal, signo) &&
- !sigismember(&current->blocked, signo))
- break;
- killsig = signo;
- }
/* Clear signals before calling svc_exit_thread() */
flush_signals(current);
- lock_kernel();
-
+ mutex_lock(&nfsd_mutex);
nfsdstats.th_cnt --;
out:
@@ -486,8 +507,9 @@ out:
svc_exit_thread(rqstp);
/* Release module */
- unlock_kernel();
+ mutex_unlock(&nfsd_mutex);
module_put_and_exit(0);
+ return 0;
}
static __be32 map_new_errors(u32 vers, __be32 nfserr)
diff --git a/fs/ntfs/upcase.c b/fs/ntfs/upcase.c
index 9101807dc81a..e2f72ca98037 100644
--- a/fs/ntfs/upcase.c
+++ b/fs/ntfs/upcase.c
@@ -77,11 +77,10 @@ ntfschar *generate_default_upcase(void)
uc[i] = cpu_to_le16(i);
for (r = 0; uc_run_table[r][0]; r++)
for (i = uc_run_table[r][0]; i < uc_run_table[r][1]; i++)
- uc[i] = cpu_to_le16(le16_to_cpu(uc[i]) +
- uc_run_table[r][2]);
+ le16_add_cpu(&uc[i], uc_run_table[r][2]);
for (r = 0; uc_dup_table[r][0]; r++)
for (i = uc_dup_table[r][0]; i < uc_dup_table[r][1]; i += 2)
- uc[i + 1] = cpu_to_le16(le16_to_cpu(uc[i + 1]) - 1);
+ le16_add_cpu(&uc[i + 1], -1);
for (r = 0; uc_word_table[r][0]; r++)
uc[uc_word_table[r][0]] = cpu_to_le16(uc_word_table[r][1]);
return uc;
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index 41f84c92094f..10bfb466e068 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -2788,7 +2788,7 @@ static int ocfs2_merge_rec_right(struct inode *inode,
BUG_ON(index >= le16_to_cpu(el->l_next_free_rec));
left_rec = &el->l_recs[index];
- if (index == le16_to_cpu(el->l_next_free_rec - 1) &&
+ if (index == le16_to_cpu(el->l_next_free_rec) - 1 &&
le16_to_cpu(el->l_next_free_rec) == le16_to_cpu(el->l_count)) {
/* we meet with a cross extent block merge. */
ret = ocfs2_get_right_path(inode, left_path, &right_path);
@@ -2802,7 +2802,7 @@ static int ocfs2_merge_rec_right(struct inode *inode,
BUG_ON(next_free <= 0);
right_rec = &right_el->l_recs[0];
if (ocfs2_is_empty_extent(right_rec)) {
- BUG_ON(le16_to_cpu(next_free) <= 1);
+ BUG_ON(next_free <= 1);
right_rec = &right_el->l_recs[1];
}
diff --git a/fs/ocfs2/cluster/netdebug.c b/fs/ocfs2/cluster/netdebug.c
index 7bf3c0ea7bd9..d8bfa0eb41b2 100644
--- a/fs/ocfs2/cluster/netdebug.c
+++ b/fs/ocfs2/cluster/netdebug.c
@@ -146,8 +146,10 @@ static int nst_seq_show(struct seq_file *seq, void *v)
nst->st_task->comm, nst->st_node,
nst->st_sc, nst->st_id, nst->st_msg_type,
nst->st_msg_key,
- nst->st_sock_time.tv_sec, nst->st_sock_time.tv_usec,
- nst->st_send_time.tv_sec, nst->st_send_time.tv_usec,
+ nst->st_sock_time.tv_sec,
+ (unsigned long)nst->st_sock_time.tv_usec,
+ nst->st_send_time.tv_sec,
+ (unsigned long)nst->st_send_time.tv_usec,
nst->st_status_time.tv_sec,
nst->st_status_time.tv_usec);
}
@@ -274,7 +276,7 @@ static void *sc_seq_next(struct seq_file *seq, void *v, loff_t *pos)
return sc; /* unused, just needs to be null when done */
}
-#define TV_SEC_USEC(TV) TV.tv_sec, TV.tv_usec
+#define TV_SEC_USEC(TV) TV.tv_sec, (unsigned long)TV.tv_usec
static int sc_seq_show(struct seq_file *seq, void *v)
{
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index cf9401e8cd0b..cfdb08b484ed 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -21,7 +21,6 @@
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/sysctl.h>
#include <linux/configfs.h>
#include "tcp.h"
@@ -36,65 +35,6 @@
* cluster references throughout where nodes are looked up */
struct o2nm_cluster *o2nm_single_cluster = NULL;
-#define OCFS2_MAX_HB_CTL_PATH 256
-static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
-
-static ctl_table ocfs2_nm_table[] = {
- {
- .ctl_name = 1,
- .procname = "hb_ctl_path",
- .data = ocfs2_hb_ctl_path,
- .maxlen = OCFS2_MAX_HB_CTL_PATH,
- .mode = 0644,
- .proc_handler = &proc_dostring,
- .strategy = &sysctl_string,
- },
- { .ctl_name = 0 }
-};
-
-static ctl_table ocfs2_mod_table[] = {
- {
- .ctl_name = FS_OCFS2_NM,
- .procname = "nm",
- .data = NULL,
- .maxlen = 0,
- .mode = 0555,
- .child = ocfs2_nm_table
- },
- { .ctl_name = 0}
-};
-
-static ctl_table ocfs2_kern_table[] = {
- {
- .ctl_name = FS_OCFS2,
- .procname = "ocfs2",
- .data = NULL,
- .maxlen = 0,
- .mode = 0555,
- .child = ocfs2_mod_table
- },
- { .ctl_name = 0}
-};
-
-static ctl_table ocfs2_root_table[] = {
- {
- .ctl_name = CTL_FS,
- .procname = "fs",
- .data = NULL,
- .maxlen = 0,
- .mode = 0555,
- .child = ocfs2_kern_table
- },
- { .ctl_name = 0 }
-};
-
-static struct ctl_table_header *ocfs2_table_header = NULL;
-
-const char *o2nm_get_hb_ctl_path(void)
-{
- return ocfs2_hb_ctl_path;
-}
-EXPORT_SYMBOL_GPL(o2nm_get_hb_ctl_path);
struct o2nm_node *o2nm_get_node_by_num(u8 node_num)
{
@@ -941,9 +881,6 @@ void o2nm_undepend_this_node(void)
static void __exit exit_o2nm(void)
{
- if (ocfs2_table_header)
- unregister_sysctl_table(ocfs2_table_header);
-
/* XXX sync with hb callbacks and shut down hb? */
o2net_unregister_hb_callbacks();
configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
@@ -964,16 +901,9 @@ static int __init init_o2nm(void)
if (ret)
goto out;
- ocfs2_table_header = register_sysctl_table(ocfs2_root_table);
- if (!ocfs2_table_header) {
- printk(KERN_ERR "nodemanager: unable to register sysctl\n");
- ret = -ENOMEM; /* or something. */
- goto out_o2net;
- }
-
ret = o2net_register_hb_callbacks();
if (ret)
- goto out_sysctl;
+ goto out_o2net;
config_group_init(&o2nm_cluster_group.cs_subsys.su_group);
mutex_init(&o2nm_cluster_group.cs_subsys.su_mutex);
@@ -990,8 +920,6 @@ static int __init init_o2nm(void)
configfs_unregister_subsystem(&o2nm_cluster_group.cs_subsys);
out_callbacks:
o2net_unregister_hb_callbacks();
-out_sysctl:
- unregister_sysctl_table(ocfs2_table_header);
out_o2net:
o2net_exit();
out:
diff --git a/fs/ocfs2/cluster/nodemanager.h b/fs/ocfs2/cluster/nodemanager.h
index 7c860361b8dd..c992ea0da4ad 100644
--- a/fs/ocfs2/cluster/nodemanager.h
+++ b/fs/ocfs2/cluster/nodemanager.h
@@ -33,10 +33,6 @@
#include <linux/configfs.h>
#include <linux/rbtree.h>
-#define FS_OCFS2_NM 1
-
-const char *o2nm_get_hb_ctl_path(void);
-
struct o2nm_node {
spinlock_t nd_lock;
struct config_item nd_item;
diff --git a/fs/ocfs2/cluster/tcp.c b/fs/ocfs2/cluster/tcp.c
index 1e44ad14881a..a27d61581bd6 100644
--- a/fs/ocfs2/cluster/tcp.c
+++ b/fs/ocfs2/cluster/tcp.c
@@ -142,53 +142,43 @@ static void o2net_idle_timer(unsigned long data);
static void o2net_sc_postpone_idle(struct o2net_sock_container *sc);
static void o2net_sc_reset_idle_timer(struct o2net_sock_container *sc);
-static void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype,
- u32 msgkey, struct task_struct *task, u8 node)
-{
#ifdef CONFIG_DEBUG_FS
+void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype,
+ u32 msgkey, struct task_struct *task, u8 node)
+{
INIT_LIST_HEAD(&nst->st_net_debug_item);
nst->st_task = task;
nst->st_msg_type = msgtype;
nst->st_msg_key = msgkey;
nst->st_node = node;
-#endif
}
-static void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
+void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
{
-#ifdef CONFIG_DEBUG_FS
do_gettimeofday(&nst->st_sock_time);
-#endif
}
-static void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
+void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
{
-#ifdef CONFIG_DEBUG_FS
do_gettimeofday(&nst->st_send_time);
-#endif
}
-static void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
+void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
{
-#ifdef CONFIG_DEBUG_FS
do_gettimeofday(&nst->st_status_time);
-#endif
}
-static void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
+void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
struct o2net_sock_container *sc)
{
-#ifdef CONFIG_DEBUG_FS
nst->st_sc = sc;
-#endif
}
-static void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, u32 msg_id)
+void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, u32 msg_id)
{
-#ifdef CONFIG_DEBUG_FS
nst->st_id = msg_id;
-#endif
}
+#endif /* CONFIG_DEBUG_FS */
static inline int o2net_reconnect_delay(void)
{
diff --git a/fs/ocfs2/cluster/tcp.h b/fs/ocfs2/cluster/tcp.h
index a705d5d19036..fd6179eb26d4 100644
--- a/fs/ocfs2/cluster/tcp.h
+++ b/fs/ocfs2/cluster/tcp.h
@@ -128,23 +128,23 @@ void o2net_debug_del_nst(struct o2net_send_tracking *nst);
void o2net_debug_add_sc(struct o2net_sock_container *sc);
void o2net_debug_del_sc(struct o2net_sock_container *sc);
#else
-static int o2net_debugfs_init(void)
+static inline int o2net_debugfs_init(void)
{
return 0;
}
-static void o2net_debugfs_exit(void)
+static inline void o2net_debugfs_exit(void)
{
}
-static void o2net_debug_add_nst(struct o2net_send_tracking *nst)
+static inline void o2net_debug_add_nst(struct o2net_send_tracking *nst)
{
}
-static void o2net_debug_del_nst(struct o2net_send_tracking *nst)
+static inline void o2net_debug_del_nst(struct o2net_send_tracking *nst)
{
}
-static void o2net_debug_add_sc(struct o2net_sock_container *sc)
+static inline void o2net_debug_add_sc(struct o2net_sock_container *sc)
{
}
-static void o2net_debug_del_sc(struct o2net_sock_container *sc)
+static inline void o2net_debug_del_sc(struct o2net_sock_container *sc)
{
}
#endif /* CONFIG_DEBUG_FS */
diff --git a/fs/ocfs2/cluster/tcp_internal.h b/fs/ocfs2/cluster/tcp_internal.h
index 8d58cfe410b1..18307ff81b77 100644
--- a/fs/ocfs2/cluster/tcp_internal.h
+++ b/fs/ocfs2/cluster/tcp_internal.h
@@ -224,10 +224,42 @@ struct o2net_send_tracking {
struct timeval st_send_time;
struct timeval st_status_time;
};
+
+void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype,
+ u32 msgkey, struct task_struct *task, u8 node);
+void o2net_set_nst_sock_time(struct o2net_send_tracking *nst);
+void o2net_set_nst_send_time(struct o2net_send_tracking *nst);
+void o2net_set_nst_status_time(struct o2net_send_tracking *nst);
+void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
+ struct o2net_sock_container *sc);
+void o2net_set_nst_msg_id(struct o2net_send_tracking *nst, u32 msg_id);
+
#else
struct o2net_send_tracking {
u32 dummy;
};
+
+static inline void o2net_init_nst(struct o2net_send_tracking *nst, u32 msgtype,
+ u32 msgkey, struct task_struct *task, u8 node)
+{
+}
+static inline void o2net_set_nst_sock_time(struct o2net_send_tracking *nst)
+{
+}
+static inline void o2net_set_nst_send_time(struct o2net_send_tracking *nst)
+{
+}
+static inline void o2net_set_nst_status_time(struct o2net_send_tracking *nst)
+{
+}
+static inline void o2net_set_nst_sock_container(struct o2net_send_tracking *nst,
+ struct o2net_sock_container *sc)
+{
+}
+static inline void o2net_set_nst_msg_id(struct o2net_send_tracking *nst,
+ u32 msg_id)
+{
+}
#endif /* CONFIG_DEBUG_FS */
#endif /* O2CLUSTER_TCP_INTERNAL_H */
diff --git a/fs/ocfs2/dlm/dlmdebug.h b/fs/ocfs2/dlm/dlmdebug.h
index d34a62a3a625..8c686d22f9c7 100644
--- a/fs/ocfs2/dlm/dlmdebug.h
+++ b/fs/ocfs2/dlm/dlmdebug.h
@@ -60,25 +60,25 @@ void dlm_destroy_debugfs_root(void);
#else
-static int dlm_debug_init(struct dlm_ctxt *dlm)
+static inline int dlm_debug_init(struct dlm_ctxt *dlm)
{
return 0;
}
-static void dlm_debug_shutdown(struct dlm_ctxt *dlm)
+static inline void dlm_debug_shutdown(struct dlm_ctxt *dlm)
{
}
-static int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
+static inline int dlm_create_debugfs_subroot(struct dlm_ctxt *dlm)
{
return 0;
}
-static void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
+static inline void dlm_destroy_debugfs_subroot(struct dlm_ctxt *dlm)
{
}
-static int dlm_create_debugfs_root(void)
+static inline int dlm_create_debugfs_root(void)
{
return 0;
}
-static void dlm_destroy_debugfs_root(void)
+static inline void dlm_destroy_debugfs_root(void)
{
}
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index 394d25a131a5..1c6da696d213 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -31,6 +31,7 @@
#include <linux/pagemap.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/time.h>
#define MLOG_MASK_PREFIX ML_DLM_GLUE
#include <cluster/masklog.h>
@@ -59,6 +60,9 @@ struct ocfs2_mask_waiter {
struct completion mw_complete;
unsigned long mw_mask;
unsigned long mw_goal;
+#ifdef CONFIG_OCFS2_FS_STATS
+ unsigned long long mw_lock_start;
+#endif
};
static struct ocfs2_super *ocfs2_get_dentry_osb(struct ocfs2_lock_res *lockres);
@@ -366,6 +370,75 @@ static void ocfs2_remove_lockres_tracking(struct ocfs2_lock_res *res)
spin_unlock(&ocfs2_dlm_tracking_lock);
}
+#ifdef CONFIG_OCFS2_FS_STATS
+static void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
+{
+ res->l_lock_num_prmode = 0;
+ res->l_lock_num_prmode_failed = 0;
+ res->l_lock_total_prmode = 0;
+ res->l_lock_max_prmode = 0;
+ res->l_lock_num_exmode = 0;
+ res->l_lock_num_exmode_failed = 0;
+ res->l_lock_total_exmode = 0;
+ res->l_lock_max_exmode = 0;
+ res->l_lock_refresh = 0;
+}
+
+static void ocfs2_update_lock_stats(struct ocfs2_lock_res *res, int level,
+ struct ocfs2_mask_waiter *mw, int ret)
+{
+ unsigned long long *num, *sum;
+ unsigned int *max, *failed;
+ struct timespec ts = current_kernel_time();
+ unsigned long long time = timespec_to_ns(&ts) - mw->mw_lock_start;
+
+ if (level == LKM_PRMODE) {
+ num = &res->l_lock_num_prmode;
+ sum = &res->l_lock_total_prmode;
+ max = &res->l_lock_max_prmode;
+ failed = &res->l_lock_num_prmode_failed;
+ } else if (level == LKM_EXMODE) {
+ num = &res->l_lock_num_exmode;
+ sum = &res->l_lock_total_exmode;
+ max = &res->l_lock_max_exmode;
+ failed = &res->l_lock_num_exmode_failed;
+ } else
+ return;
+
+ (*num)++;
+ (*sum) += time;
+ if (time > *max)
+ *max = time;
+ if (ret)
+ (*failed)++;
+}
+
+static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
+{
+ lockres->l_lock_refresh++;
+}
+
+static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
+{
+ struct timespec ts = current_kernel_time();
+ mw->mw_lock_start = timespec_to_ns(&ts);
+}
+#else
+static inline void ocfs2_init_lock_stats(struct ocfs2_lock_res *res)
+{
+}
+static inline void ocfs2_update_lock_stats(struct ocfs2_lock_res *res,
+ int level, struct ocfs2_mask_waiter *mw, int ret)
+{
+}
+static inline void ocfs2_track_lock_refresh(struct ocfs2_lock_res *lockres)
+{
+}
+static inline void ocfs2_init_start_time(struct ocfs2_mask_waiter *mw)
+{
+}
+#endif
+
static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
struct ocfs2_lock_res *res,
enum ocfs2_lock_type type,
@@ -385,6 +458,8 @@ static void ocfs2_lock_res_init_common(struct ocfs2_super *osb,
res->l_flags = OCFS2_LOCK_INITIALIZED;
ocfs2_add_lockres_tracking(res, osb->osb_dlm_debug);
+
+ ocfs2_init_lock_stats(res);
}
void ocfs2_lock_res_init_once(struct ocfs2_lock_res *res)
@@ -1048,6 +1123,7 @@ static void ocfs2_init_mask_waiter(struct ocfs2_mask_waiter *mw)
{
INIT_LIST_HEAD(&mw->mw_item);
init_completion(&mw->mw_complete);
+ ocfs2_init_start_time(mw);
}
static int ocfs2_wait_for_mask(struct ocfs2_mask_waiter *mw)
@@ -1254,6 +1330,7 @@ out:
goto again;
mlog_errno(ret);
}
+ ocfs2_update_lock_stats(lockres, level, &mw, ret);
mlog_exit(ret);
return ret;
@@ -1983,6 +2060,7 @@ static int ocfs2_inode_lock_update(struct inode *inode,
le32_to_cpu(fe->i_flags));
ocfs2_refresh_inode(inode, fe);
+ ocfs2_track_lock_refresh(lockres);
}
status = 0;
@@ -2267,6 +2345,7 @@ int ocfs2_super_lock(struct ocfs2_super *osb,
if (status < 0)
mlog_errno(status);
+ ocfs2_track_lock_refresh(lockres);
}
bail:
mlog_exit(status);
@@ -2461,7 +2540,7 @@ static void *ocfs2_dlm_seq_next(struct seq_file *m, void *v, loff_t *pos)
}
/* So that debugfs.ocfs2 can determine which format is being used */
-#define OCFS2_DLM_DEBUG_STR_VERSION 1
+#define OCFS2_DLM_DEBUG_STR_VERSION 2
static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
{
int i;
@@ -2502,6 +2581,47 @@ static int ocfs2_dlm_seq_show(struct seq_file *m, void *v)
for(i = 0; i < DLM_LVB_LEN; i++)
seq_printf(m, "0x%x\t", lvb[i]);
+#ifdef CONFIG_OCFS2_FS_STATS
+# define lock_num_prmode(_l) (_l)->l_lock_num_prmode
+# define lock_num_exmode(_l) (_l)->l_lock_num_exmode
+# define lock_num_prmode_failed(_l) (_l)->l_lock_num_prmode_failed
+# define lock_num_exmode_failed(_l) (_l)->l_lock_num_exmode_failed
+# define lock_total_prmode(_l) (_l)->l_lock_total_prmode
+# define lock_total_exmode(_l) (_l)->l_lock_total_exmode
+# define lock_max_prmode(_l) (_l)->l_lock_max_prmode
+# define lock_max_exmode(_l) (_l)->l_lock_max_exmode
+# define lock_refresh(_l) (_l)->l_lock_refresh
+#else
+# define lock_num_prmode(_l) (0ULL)
+# define lock_num_exmode(_l) (0ULL)
+# define lock_num_prmode_failed(_l) (0)
+# define lock_num_exmode_failed(_l) (0)
+# define lock_total_prmode(_l) (0ULL)
+# define lock_total_exmode(_l) (0ULL)
+# define lock_max_prmode(_l) (0)
+# define lock_max_exmode(_l) (0)
+# define lock_refresh(_l) (0)
+#endif
+ /* The following seq_print was added in version 2 of this output */
+ seq_printf(m, "%llu\t"
+ "%llu\t"
+ "%u\t"
+ "%u\t"
+ "%llu\t"
+ "%llu\t"
+ "%u\t"
+ "%u\t"
+ "%u\t",
+ lock_num_prmode(lockres),
+ lock_num_exmode(lockres),
+ lock_num_prmode_failed(lockres),
+ lock_num_exmode_failed(lockres),
+ lock_total_prmode(lockres),
+ lock_total_exmode(lockres),
+ lock_max_prmode(lockres),
+ lock_max_exmode(lockres),
+ lock_refresh(lockres));
+
/* End the line */
seq_printf(m, "\n");
return 0;
diff --git a/fs/ocfs2/file.c b/fs/ocfs2/file.c
index 57e0d30cde98..e8514e8b6ce8 100644
--- a/fs/ocfs2/file.c
+++ b/fs/ocfs2/file.c
@@ -2202,7 +2202,7 @@ static ssize_t ocfs2_file_aio_read(struct kiocb *iocb,
ret = generic_file_aio_read(iocb, iov, nr_segs, iocb->ki_pos);
if (ret == -EINVAL)
- mlog(ML_ERROR, "generic_file_aio_read returned -EINVAL\n");
+ mlog(0, "generic_file_aio_read returned -EINVAL\n");
/* buffered aio wouldn't have proper lock coverage today */
BUG_ON(ret == -EIOCBQUEUED && !(filp->f_flags & O_DIRECT));
diff --git a/fs/ocfs2/inode.c b/fs/ocfs2/inode.c
index 7e9e4c79aec7..64211fc74266 100644
--- a/fs/ocfs2/inode.c
+++ b/fs/ocfs2/inode.c
@@ -1062,10 +1062,6 @@ void ocfs2_clear_inode(struct inode *inode)
(unsigned long long)oi->ip_blkno);
mutex_unlock(&oi->ip_io_mutex);
- /*
- * down_trylock() returns 0, down_write_trylock() returns 1
- * kernel 1, world 0
- */
mlog_bug_on_msg(!down_write_trylock(&oi->ip_alloc_sem),
"Clear inode of %llu, alloc_sem is locked\n",
(unsigned long long)oi->ip_blkno);
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index 31692379c170..1cb814be8ef1 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -132,6 +132,18 @@ struct ocfs2_lock_res {
wait_queue_head_t l_event;
struct list_head l_debug_list;
+
+#ifdef CONFIG_OCFS2_FS_STATS
+ unsigned long long l_lock_num_prmode; /* PR acquires */
+ unsigned long long l_lock_num_exmode; /* EX acquires */
+ unsigned int l_lock_num_prmode_failed; /* Failed PR gets */
+ unsigned int l_lock_num_exmode_failed; /* Failed EX gets */
+ unsigned long long l_lock_total_prmode; /* Tot wait for PR */
+ unsigned long long l_lock_total_exmode; /* Tot wait for EX */
+ unsigned int l_lock_max_prmode; /* Max wait for PR */
+ unsigned int l_lock_max_exmode; /* Max wait for EX */
+ unsigned int l_lock_refresh; /* Disk refreshes */
+#endif
};
struct ocfs2_dlm_debug {
diff --git a/fs/ocfs2/stack_o2cb.c b/fs/ocfs2/stack_o2cb.c
index bbd1667aa7d3..fcd120f1493a 100644
--- a/fs/ocfs2/stack_o2cb.c
+++ b/fs/ocfs2/stack_o2cb.c
@@ -317,8 +317,7 @@ out:
return rc;
}
-static int o2cb_cluster_disconnect(struct ocfs2_cluster_connection *conn,
- int hangup_pending)
+static int o2cb_cluster_disconnect(struct ocfs2_cluster_connection *conn)
{
struct dlm_ctxt *dlm = conn->cc_lockspace;
struct o2dlm_private *priv = conn->cc_private;
@@ -333,43 +332,6 @@ static int o2cb_cluster_disconnect(struct ocfs2_cluster_connection *conn,
return 0;
}
-static void o2hb_stop(const char *group)
-{
- int ret;
- char *argv[5], *envp[3];
-
- argv[0] = (char *)o2nm_get_hb_ctl_path();
- argv[1] = "-K";
- argv[2] = "-u";
- argv[3] = (char *)group;
- argv[4] = NULL;
-
- mlog(0, "Run: %s %s %s %s\n", argv[0], argv[1], argv[2], argv[3]);
-
- /* minimal command environment taken from cpu_run_sbin_hotplug */
- envp[0] = "HOME=/";
- envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
- envp[2] = NULL;
-
- ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
- if (ret < 0)
- mlog_errno(ret);
-}
-
-/*
- * Hangup is a hack for tools compatibility. Older ocfs2-tools software
- * expects the filesystem to call "ocfs2_hb_ctl" during unmount. This
- * happens regardless of whether the DLM got started, so we can't do it
- * in ocfs2_cluster_disconnect(). We bring the o2hb_stop() function into
- * the glue and provide a "hangup" API for super.c to call.
- *
- * Other stacks will eventually provide a NULL ->hangup() pointer.
- */
-static void o2cb_cluster_hangup(const char *group, int grouplen)
-{
- o2hb_stop(group);
-}
-
static int o2cb_cluster_this_node(unsigned int *node)
{
int node_num;
@@ -388,7 +350,6 @@ static int o2cb_cluster_this_node(unsigned int *node)
static struct ocfs2_stack_operations o2cb_stack_ops = {
.connect = o2cb_cluster_connect,
.disconnect = o2cb_cluster_disconnect,
- .hangup = o2cb_cluster_hangup,
.this_node = o2cb_cluster_this_node,
.dlm_lock = o2cb_dlm_lock,
.dlm_unlock = o2cb_dlm_unlock,
diff --git a/fs/ocfs2/stack_user.c b/fs/ocfs2/stack_user.c
index cd120011104d..353fc35c6748 100644
--- a/fs/ocfs2/stack_user.c
+++ b/fs/ocfs2/stack_user.c
@@ -62,7 +62,7 @@
* negotiated by the client. The client negotiates based on the maximum
* version advertised in /sys/fs/ocfs2/max_locking_protocol. The major
* number from the "SETV" message must match
- * user_stack.sp_proto->lp_max_version.pv_major, and the minor number
+ * ocfs2_user_plugin.sp_proto->lp_max_version.pv_major, and the minor number
* must be less than or equal to ...->lp_max_version.pv_minor.
*
* Once this information has been set, mounts will be allowed. From this
@@ -154,7 +154,7 @@ union ocfs2_control_message {
struct ocfs2_control_message_down u_down;
};
-static struct ocfs2_stack_plugin user_stack;
+static struct ocfs2_stack_plugin ocfs2_user_plugin;
static atomic_t ocfs2_control_opened;
static int ocfs2_control_this_node = -1;
@@ -400,7 +400,7 @@ static int ocfs2_control_do_setversion_msg(struct file *file,
char *ptr = NULL;
struct ocfs2_control_private *p = file->private_data;
struct ocfs2_protocol_version *max =
- &user_stack.sp_proto->lp_max_version;
+ &ocfs2_user_plugin.sp_proto->lp_max_version;
if (ocfs2_control_get_handshake_state(file) !=
OCFS2_CONTROL_HANDSHAKE_PROTOCOL)
@@ -550,26 +550,17 @@ static ssize_t ocfs2_control_read(struct file *file,
size_t count,
loff_t *ppos)
{
- char *proto_string = OCFS2_CONTROL_PROTO;
- size_t to_write = 0;
-
- if (*ppos >= OCFS2_CONTROL_PROTO_LEN)
- return 0;
-
- to_write = OCFS2_CONTROL_PROTO_LEN - *ppos;
- if (to_write > count)
- to_write = count;
- if (copy_to_user(buf, proto_string + *ppos, to_write))
- return -EFAULT;
+ ssize_t ret;
- *ppos += to_write;
+ ret = simple_read_from_buffer(buf, count, ppos,
+ OCFS2_CONTROL_PROTO, OCFS2_CONTROL_PROTO_LEN);
/* Have we read the whole protocol list? */
- if (*ppos >= OCFS2_CONTROL_PROTO_LEN)
+ if (ret > 0 && *ppos >= OCFS2_CONTROL_PROTO_LEN)
ocfs2_control_set_handshake_state(file,
OCFS2_CONTROL_HANDSHAKE_READ);
- return to_write;
+ return ret;
}
static int ocfs2_control_release(struct inode *inode, struct file *file)
@@ -683,7 +674,7 @@ static void fsdlm_lock_ast_wrapper(void *astarg)
struct dlm_lksb *lksb = fsdlm_astarg_to_lksb(astarg);
int status = lksb->sb_status;
- BUG_ON(user_stack.sp_proto == NULL);
+ BUG_ON(ocfs2_user_plugin.sp_proto == NULL);
/*
* For now we're punting on the issue of other non-standard errors
@@ -696,16 +687,16 @@ static void fsdlm_lock_ast_wrapper(void *astarg)
*/
if (status == -DLM_EUNLOCK || status == -DLM_ECANCEL)
- user_stack.sp_proto->lp_unlock_ast(astarg, 0);
+ ocfs2_user_plugin.sp_proto->lp_unlock_ast(astarg, 0);
else
- user_stack.sp_proto->lp_lock_ast(astarg);
+ ocfs2_user_plugin.sp_proto->lp_lock_ast(astarg);
}
static void fsdlm_blocking_ast_wrapper(void *astarg, int level)
{
- BUG_ON(user_stack.sp_proto == NULL);
+ BUG_ON(ocfs2_user_plugin.sp_proto == NULL);
- user_stack.sp_proto->lp_blocking_ast(astarg, level);
+ ocfs2_user_plugin.sp_proto->lp_blocking_ast(astarg, level);
}
static int user_dlm_lock(struct ocfs2_cluster_connection *conn,
@@ -819,8 +810,7 @@ out:
return rc;
}
-static int user_cluster_disconnect(struct ocfs2_cluster_connection *conn,
- int hangup_pending)
+static int user_cluster_disconnect(struct ocfs2_cluster_connection *conn)
{
dlm_release_lockspace(conn->cc_lockspace, 2);
conn->cc_lockspace = NULL;
@@ -841,7 +831,7 @@ static int user_cluster_this_node(unsigned int *this_node)
return 0;
}
-static struct ocfs2_stack_operations user_stack_ops = {
+static struct ocfs2_stack_operations ocfs2_user_plugin_ops = {
.connect = user_cluster_connect,
.disconnect = user_cluster_disconnect,
.this_node = user_cluster_this_node,
@@ -852,20 +842,20 @@ static struct ocfs2_stack_operations user_stack_ops = {
.dump_lksb = user_dlm_dump_lksb,
};
-static struct ocfs2_stack_plugin user_stack = {
+static struct ocfs2_stack_plugin ocfs2_user_plugin = {
.sp_name = "user",
- .sp_ops = &user_stack_ops,
+ .sp_ops = &ocfs2_user_plugin_ops,
.sp_owner = THIS_MODULE,
};
-static int __init user_stack_init(void)
+static int __init ocfs2_user_plugin_init(void)
{
int rc;
rc = ocfs2_control_init();
if (!rc) {
- rc = ocfs2_stack_glue_register(&user_stack);
+ rc = ocfs2_stack_glue_register(&ocfs2_user_plugin);
if (rc)
ocfs2_control_exit();
}
@@ -873,14 +863,14 @@ static int __init user_stack_init(void)
return rc;
}
-static void __exit user_stack_exit(void)
+static void __exit ocfs2_user_plugin_exit(void)
{
- ocfs2_stack_glue_unregister(&user_stack);
+ ocfs2_stack_glue_unregister(&ocfs2_user_plugin);
ocfs2_control_exit();
}
MODULE_AUTHOR("Oracle");
MODULE_DESCRIPTION("ocfs2 driver for userspace cluster stacks");
MODULE_LICENSE("GPL");
-module_init(user_stack_init);
-module_exit(user_stack_exit);
+module_init(ocfs2_user_plugin_init);
+module_exit(ocfs2_user_plugin_exit);
diff --git a/fs/ocfs2/stackglue.c b/fs/ocfs2/stackglue.c
index 119f60cea9cc..10e149ae5e3a 100644
--- a/fs/ocfs2/stackglue.c
+++ b/fs/ocfs2/stackglue.c
@@ -26,6 +26,7 @@
#include <linux/fs.h>
#include <linux/kobject.h>
#include <linux/sysfs.h>
+#include <linux/sysctl.h>
#include "ocfs2_fs.h"
@@ -33,11 +34,13 @@
#define OCFS2_STACK_PLUGIN_O2CB "o2cb"
#define OCFS2_STACK_PLUGIN_USER "user"
+#define OCFS2_MAX_HB_CTL_PATH 256
static struct ocfs2_locking_protocol *lproto;
static DEFINE_SPINLOCK(ocfs2_stack_lock);
static LIST_HEAD(ocfs2_stack_list);
static char cluster_stack_name[OCFS2_STACK_LABEL_LEN + 1];
+static char ocfs2_hb_ctl_path[OCFS2_MAX_HB_CTL_PATH] = "/sbin/ocfs2_hb_ctl";
/*
* The stack currently in use. If not null, active_stack->sp_count > 0,
@@ -349,7 +352,7 @@ int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn,
BUG_ON(conn == NULL);
- ret = active_stack->sp_ops->disconnect(conn, hangup_pending);
+ ret = active_stack->sp_ops->disconnect(conn);
/* XXX Should we free it anyway? */
if (!ret) {
@@ -362,13 +365,48 @@ int ocfs2_cluster_disconnect(struct ocfs2_cluster_connection *conn,
}
EXPORT_SYMBOL_GPL(ocfs2_cluster_disconnect);
+/*
+ * Leave the group for this filesystem. This is executed by a userspace
+ * program (stored in ocfs2_hb_ctl_path).
+ */
+static void ocfs2_leave_group(const char *group)
+{
+ int ret;
+ char *argv[5], *envp[3];
+
+ argv[0] = ocfs2_hb_ctl_path;
+ argv[1] = "-K";
+ argv[2] = "-u";
+ argv[3] = (char *)group;
+ argv[4] = NULL;
+
+ /* minimal command environment taken from cpu_run_sbin_hotplug */
+ envp[0] = "HOME=/";
+ envp[1] = "PATH=/sbin:/bin:/usr/sbin:/usr/bin";
+ envp[2] = NULL;
+
+ ret = call_usermodehelper(argv[0], argv, envp, UMH_WAIT_PROC);
+ if (ret < 0) {
+ printk(KERN_ERR
+ "ocfs2: Error %d running user helper "
+ "\"%s %s %s %s\"\n",
+ ret, argv[0], argv[1], argv[2], argv[3]);
+ }
+}
+
+/*
+ * Hangup is a required post-umount. ocfs2-tools software expects the
+ * filesystem to call "ocfs2_hb_ctl" during unmount. This happens
+ * regardless of whether the DLM got started, so we can't do it
+ * in ocfs2_cluster_disconnect(). The ocfs2_leave_group() function does
+ * the actual work.
+ */
void ocfs2_cluster_hangup(const char *group, int grouplen)
{
BUG_ON(group == NULL);
BUG_ON(group[grouplen] != '\0');
- if (active_stack->sp_ops->hangup)
- active_stack->sp_ops->hangup(group, grouplen);
+ ocfs2_leave_group(group);
/* cluster_disconnect() was called with hangup_pending==1 */
ocfs2_stack_driver_put();
@@ -548,10 +586,83 @@ error:
return ret;
}
+/*
+ * Sysctl bits
+ *
+ * The sysctl lives at /proc/sys/fs/ocfs2/nm/hb_ctl_path. The 'nm' doesn't
+ * make as much sense in a multiple cluster stack world, but it's safer
+ * and easier to preserve the name.
+ */
+
+#define FS_OCFS2_NM 1
+
+static ctl_table ocfs2_nm_table[] = {
+ {
+ .ctl_name = 1,
+ .procname = "hb_ctl_path",
+ .data = ocfs2_hb_ctl_path,
+ .maxlen = OCFS2_MAX_HB_CTL_PATH,
+ .mode = 0644,
+ .proc_handler = &proc_dostring,
+ .strategy = &sysctl_string,
+ },
+ { .ctl_name = 0 }
+};
+
+static ctl_table ocfs2_mod_table[] = {
+ {
+ .ctl_name = FS_OCFS2_NM,
+ .procname = "nm",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0555,
+ .child = ocfs2_nm_table
+ },
+ { .ctl_name = 0}
+};
+
+static ctl_table ocfs2_kern_table[] = {
+ {
+ .ctl_name = FS_OCFS2,
+ .procname = "ocfs2",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0555,
+ .child = ocfs2_mod_table
+ },
+ { .ctl_name = 0}
+};
+
+static ctl_table ocfs2_root_table[] = {
+ {
+ .ctl_name = CTL_FS,
+ .procname = "fs",
+ .data = NULL,
+ .maxlen = 0,
+ .mode = 0555,
+ .child = ocfs2_kern_table
+ },
+ { .ctl_name = 0 }
+};
+
+static struct ctl_table_header *ocfs2_table_header = NULL;
+
+
+/*
+ * Initialization
+ */
+
static int __init ocfs2_stack_glue_init(void)
{
strcpy(cluster_stack_name, OCFS2_STACK_PLUGIN_O2CB);
+ ocfs2_table_header = register_sysctl_table(ocfs2_root_table);
+ if (!ocfs2_table_header) {
+ printk(KERN_ERR
+ "ocfs2 stack glue: unable to register sysctl\n");
+ return -ENOMEM; /* or something. */
+ }
+
return ocfs2_sysfs_init();
}
@@ -559,6 +670,8 @@ static void __exit ocfs2_stack_glue_exit(void)
{
lproto = NULL;
ocfs2_sysfs_exit();
+ if (ocfs2_table_header)
+ unregister_sysctl_table(ocfs2_table_header);
}
MODULE_AUTHOR("Oracle");
diff --git a/fs/ocfs2/stackglue.h b/fs/ocfs2/stackglue.h
index 005e4f170e0f..db56281dd1be 100644
--- a/fs/ocfs2/stackglue.h
+++ b/fs/ocfs2/stackglue.h
@@ -134,22 +134,10 @@ struct ocfs2_stack_operations {
* be freed. Thus, a stack must not return from ->disconnect()
* until it will no longer reference the conn pointer.
*
- * If hangup_pending is zero, ocfs2_cluster_disconnect() will also
- * be dropping the reference on the module.
+ * Once this call returns, the stack glue will be dropping this
+ * connection's reference on the module.
*/
- int (*disconnect)(struct ocfs2_cluster_connection *conn,
- int hangup_pending);
-
- /*
- * ocfs2_cluster_hangup() exists for compatibility with older
- * ocfs2 tools. Only the classic stack really needs it. As such
- * ->hangup() is not required of all stacks. See the comment by
- * ocfs2_cluster_hangup() for more details.
- *
- * Note that ocfs2_cluster_hangup() can only be called if
- * hangup_pending was passed to ocfs2_cluster_disconnect().
- */
- void (*hangup)(const char *group, int grouplen);
+ int (*disconnect)(struct ocfs2_cluster_connection *conn);
/*
* ->this_node() returns the cluster's unique identifier for the
@@ -258,4 +246,5 @@ void ocfs2_stack_glue_set_locking_protocol(struct ocfs2_locking_protocol *proto)
/* Used by stack plugins */
int ocfs2_stack_glue_register(struct ocfs2_stack_plugin *plugin);
void ocfs2_stack_glue_unregister(struct ocfs2_stack_plugin *plugin);
+
#endif /* STACKGLUE_H */
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index df63ba20ae90..ccecfe5094fa 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -1703,7 +1703,11 @@ static int ocfs2_check_volume(struct ocfs2_super *osb)
local = ocfs2_mount_local(osb);
/* will play back anything left in the journal. */
- ocfs2_journal_load(osb->journal, local);
+ status = ocfs2_journal_load(osb->journal, local);
+ if (status < 0) {
+ mlog(ML_ERROR, "ocfs2 journal load failed! %d\n", status);
+ goto finally;
+ }
if (dirty) {
/* recover my local alloc if we didn't unmount cleanly. */
diff --git a/fs/partitions/check.c b/fs/partitions/check.c
index 6149e4b58c88..efef715135d3 100644
--- a/fs/partitions/check.c
+++ b/fs/partitions/check.c
@@ -401,7 +401,7 @@ void register_disk(struct gendisk *disk)
disk->dev.parent = disk->driverfs_dev;
disk->dev.devt = MKDEV(disk->major, disk->first_minor);
- strlcpy(disk->dev.bus_id, disk->disk_name, KOBJ_NAME_LEN);
+ strlcpy(disk->dev.bus_id, disk->disk_name, BUS_ID_SIZE);
/* ewww... some of these buggers have / in the name... */
s = strchr(disk->dev.bus_id, '/');
if (s)
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 9e3b8c33c24b..797d775e0354 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -288,7 +288,7 @@ static void render_cap_t(struct seq_file *m, const char *header,
seq_printf(m, "%s", header);
CAP_FOR_EACH_U32(__capi) {
seq_printf(m, "%08x",
- a->cap[(_LINUX_CAPABILITY_U32S-1) - __capi]);
+ a->cap[(_KERNEL_CAPABILITY_U32S-1) - __capi]);
}
seq_printf(m, "\n");
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 808cbdc193d3..58c3e6a8e15e 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -127,6 +127,25 @@ struct pid_entry {
NULL, &proc_single_file_operations, \
{ .proc_show = &proc_##OTYPE } )
+/*
+ * Count the number of hardlinks for the pid_entry table, excluding the .
+ * and .. links.
+ */
+static unsigned int pid_entry_count_dirs(const struct pid_entry *entries,
+ unsigned int n)
+{
+ unsigned int i;
+ unsigned int count;
+
+ count = 0;
+ for (i = 0; i < n; ++i) {
+ if (S_ISDIR(entries[i].mode))
+ ++count;
+ }
+
+ return count;
+}
+
int maps_protect;
EXPORT_SYMBOL(maps_protect);
@@ -214,7 +233,7 @@ static int check_mem_permission(struct task_struct *task)
*/
if (task->parent == current && (task->ptrace & PT_PTRACED) &&
task_is_stopped_or_traced(task) &&
- ptrace_may_attach(task))
+ ptrace_may_access(task, PTRACE_MODE_ATTACH))
return 0;
/*
@@ -232,7 +251,8 @@ struct mm_struct *mm_for_maps(struct task_struct *task)
task_lock(task);
if (task->mm != mm)
goto out;
- if (task->mm != current->mm && __ptrace_may_attach(task) < 0)
+ if (task->mm != current->mm &&
+ __ptrace_may_access(task, PTRACE_MODE_READ) < 0)
goto out;
task_unlock(task);
return mm;
@@ -499,7 +519,7 @@ static int proc_fd_access_allowed(struct inode *inode)
*/
task = get_proc_task(inode);
if (task) {
- allowed = ptrace_may_attach(task);
+ allowed = ptrace_may_access(task, PTRACE_MODE_READ);
put_task_struct(task);
}
return allowed;
@@ -885,7 +905,7 @@ static ssize_t environ_read(struct file *file, char __user *buf,
if (!task)
goto out_no_task;
- if (!ptrace_may_attach(task))
+ if (!ptrace_may_access(task, PTRACE_MODE_READ))
goto out;
ret = -ENOMEM;
@@ -2441,7 +2461,7 @@ static const struct pid_entry tgid_base_stuff[] = {
REG("oom_adj", S_IRUGO|S_IWUSR, oom_adjust),
#ifdef CONFIG_AUDITSYSCALL
REG("loginuid", S_IWUSR|S_IRUGO, loginuid),
- REG("sessionid", S_IRUSR, sessionid),
+ REG("sessionid", S_IRUGO, sessionid),
#endif
#ifdef CONFIG_FAULT_INJECTION
REG("make-it-fail", S_IRUGO|S_IWUSR, fault_inject),
@@ -2585,10 +2605,9 @@ static struct dentry *proc_pid_instantiate(struct inode *dir,
inode->i_op = &proc_tgid_base_inode_operations;
inode->i_fop = &proc_tgid_base_operations;
inode->i_flags|=S_IMMUTABLE;
- inode->i_nlink = 5;
-#ifdef CONFIG_SECURITY
- inode->i_nlink += 1;
-#endif
+
+ inode->i_nlink = 2 + pid_entry_count_dirs(tgid_base_stuff,
+ ARRAY_SIZE(tgid_base_stuff));
dentry->d_op = &pid_dentry_operations;
@@ -2816,10 +2835,9 @@ static struct dentry *proc_task_instantiate(struct inode *dir,
inode->i_op = &proc_tid_base_inode_operations;
inode->i_fop = &proc_tid_base_operations;
inode->i_flags|=S_IMMUTABLE;
- inode->i_nlink = 4;
-#ifdef CONFIG_SECURITY
- inode->i_nlink += 1;
-#endif
+
+ inode->i_nlink = 2 + pid_entry_count_dirs(tid_base_stuff,
+ ARRAY_SIZE(tid_base_stuff));
dentry->d_op = &pid_dentry_operations;
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 6f4e8dc97da1..b08d10017911 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -425,7 +425,8 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
}
}
unlock_new_inode(inode);
- }
+ } else
+ module_put(de->owner);
return inode;
out_ino:
diff --git a/fs/proc/proc_misc.c b/fs/proc/proc_misc.c
index 74a323d2b850..c652d469dc08 100644
--- a/fs/proc/proc_misc.c
+++ b/fs/proc/proc_misc.c
@@ -123,6 +123,11 @@ static int uptime_read_proc(char *page, char **start, off_t off,
return proc_calc_metrics(page, start, off, count, eof, len);
}
+int __attribute__((weak)) arch_report_meminfo(char *page)
+{
+ return 0;
+}
+
static int meminfo_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
@@ -139,7 +144,7 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
#define K(x) ((x) << (PAGE_SHIFT - 10))
si_meminfo(&i);
si_swapinfo(&i);
- committed = atomic_read(&vm_committed_space);
+ committed = atomic_long_read(&vm_committed_space);
allowed = ((totalram_pages - hugetlb_total_pages())
* sysctl_overcommit_ratio / 100) + total_swap_pages;
@@ -221,6 +226,8 @@ static int meminfo_read_proc(char *page, char **start, off_t off,
len += hugetlb_report_meminfo(page + len);
+ len += arch_report_meminfo(page + len);
+
return proc_calc_metrics(page, start, off, count, eof, len);
#undef K
}
@@ -472,6 +479,13 @@ static const struct file_operations proc_vmalloc_operations = {
};
#endif
+#ifndef arch_irq_stat_cpu
+#define arch_irq_stat_cpu(cpu) 0
+#endif
+#ifndef arch_irq_stat
+#define arch_irq_stat() 0
+#endif
+
static int show_stat(struct seq_file *p, void *v)
{
int i;
@@ -509,7 +523,9 @@ static int show_stat(struct seq_file *p, void *v)
sum += temp;
per_irq_sum[j] += temp;
}
+ sum += arch_irq_stat_cpu(i);
}
+ sum += arch_irq_stat();
seq_printf(p, "cpu %llu %llu %llu %llu %llu %llu %llu %llu %llu\n",
(unsigned long long)cputime64_to_clock_t(user),
@@ -716,7 +732,7 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
pfn = src / KPMSIZE;
count = min_t(size_t, count, (max_pfn * KPMSIZE) - src);
if (src & KPMMASK || count & KPMMASK)
- return -EIO;
+ return -EINVAL;
while (count > 0) {
ppage = NULL;
@@ -726,7 +742,7 @@ static ssize_t kpagecount_read(struct file *file, char __user *buf,
if (!ppage)
pcount = 0;
else
- pcount = atomic_read(&ppage->_count);
+ pcount = page_mapcount(ppage);
if (put_user(pcount, out++)) {
ret = -EFAULT;
@@ -782,7 +798,7 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
pfn = src / KPMSIZE;
count = min_t(unsigned long, count, (max_pfn * KPMSIZE) - src);
if (src & KPMMASK || count & KPMMASK)
- return -EIO;
+ return -EINVAL;
while (count > 0) {
ppage = NULL;
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 88717c0f941b..160f30f3532a 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -210,7 +210,7 @@ static int show_map(struct seq_file *m, void *v)
dev_t dev = 0;
int len;
- if (maps_protect && !ptrace_may_attach(task))
+ if (maps_protect && !ptrace_may_access(task, PTRACE_MODE_READ))
return -EACCES;
if (file) {
@@ -496,7 +496,7 @@ const struct file_operations proc_clear_refs_operations = {
};
struct pagemapread {
- char __user *out, *end;
+ u64 __user *out, *end;
};
#define PM_ENTRY_BYTES sizeof(u64)
@@ -519,21 +519,11 @@ struct pagemapread {
static int add_to_pagemap(unsigned long addr, u64 pfn,
struct pagemapread *pm)
{
- /*
- * Make sure there's room in the buffer for an
- * entire entry. Otherwise, only copy part of
- * the pfn.
- */
- if (pm->out + PM_ENTRY_BYTES >= pm->end) {
- if (copy_to_user(pm->out, &pfn, pm->end - pm->out))
- return -EFAULT;
- pm->out = pm->end;
- return PM_END_OF_BUFFER;
- }
-
if (put_user(pfn, pm->out))
return -EFAULT;
- pm->out += PM_ENTRY_BYTES;
+ pm->out++;
+ if (pm->out >= pm->end)
+ return PM_END_OF_BUFFER;
return 0;
}
@@ -629,12 +619,12 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
goto out;
ret = -EACCES;
- if (!ptrace_may_attach(task))
+ if (!ptrace_may_access(task, PTRACE_MODE_READ))
goto out_task;
ret = -EINVAL;
/* file position must be aligned */
- if (*ppos % PM_ENTRY_BYTES)
+ if ((*ppos % PM_ENTRY_BYTES) || (count % PM_ENTRY_BYTES))
goto out_task;
ret = 0;
@@ -664,10 +654,10 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
goto out_pages;
}
- pm.out = buf;
- pm.end = buf + count;
+ pm.out = (u64 *)buf;
+ pm.end = (u64 *)(buf + count);
- if (!ptrace_may_attach(task)) {
+ if (!ptrace_may_access(task, PTRACE_MODE_READ)) {
ret = -EIO;
} else {
unsigned long src = *ppos;
@@ -690,9 +680,9 @@ static ssize_t pagemap_read(struct file *file, char __user *buf,
if (ret == PM_END_OF_BUFFER)
ret = 0;
/* don't need mmap_sem for these, but this looks cleaner */
- *ppos += pm.out - buf;
+ *ppos += (char *)pm.out - buf;
if (!ret)
- ret = pm.out - buf;
+ ret = (char *)pm.out - buf;
}
out_pages:
@@ -726,7 +716,7 @@ static int show_numa_map_checked(struct seq_file *m, void *v)
struct proc_maps_private *priv = m->private;
struct task_struct *task = priv->task;
- if (maps_protect && !ptrace_may_attach(task))
+ if (maps_protect && !ptrace_may_access(task, PTRACE_MODE_READ))
return -EACCES;
return show_numa_map(m, v);
diff --git a/fs/proc/task_nommu.c b/fs/proc/task_nommu.c
index 4b4f9cc2f186..5d84e7121df8 100644
--- a/fs/proc/task_nommu.c
+++ b/fs/proc/task_nommu.c
@@ -113,7 +113,7 @@ static int show_map(struct seq_file *m, void *_vml)
struct proc_maps_private *priv = m->private;
struct task_struct *task = priv->task;
- if (maps_protect && !ptrace_may_attach(task))
+ if (maps_protect && !ptrace_may_access(task, PTRACE_MODE_READ))
return -EACCES;
return nommu_vma_show(m, vml->vma);
diff --git a/fs/reiserfs/journal.c b/fs/reiserfs/journal.c
index e396b2fa4743..9ec7d66c24a5 100644
--- a/fs/reiserfs/journal.c
+++ b/fs/reiserfs/journal.c
@@ -1412,7 +1412,7 @@ static int flush_journal_list(struct super_block *s,
/* if flushall == 0, the lock is already held */
if (flushall) {
down(&journal->j_flush_sem);
- } else if (!down_trylock(&journal->j_flush_sem)) {
+ } else if (down_try(&journal->j_flush_sem)) {
BUG();
}
diff --git a/fs/splice.c b/fs/splice.c
index 78150038b584..aa5f6f60b305 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -58,8 +58,8 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
*/
wait_on_page_writeback(page);
- if (PagePrivate(page))
- try_to_release_page(page, GFP_KERNEL);
+ if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
+ goto out_unlock;
/*
* If we succeeded in removing the mapping, set LRU flag
@@ -75,6 +75,7 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
* Raced with truncate or failed to remove page from current
* address space, unlock and return failure.
*/
+out_unlock:
unlock_page(page);
return 1;
}
@@ -983,7 +984,7 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
while (len) {
size_t read_len;
- loff_t pos = sd->pos;
+ loff_t pos = sd->pos, prev_pos = pos;
ret = do_splice_to(in, &pos, pipe, len, flags);
if (unlikely(ret <= 0))
@@ -998,15 +999,19 @@ ssize_t splice_direct_to_actor(struct file *in, struct splice_desc *sd,
* could get stuck data in the internal pipe:
*/
ret = actor(pipe, sd);
- if (unlikely(ret <= 0))
+ if (unlikely(ret <= 0)) {
+ sd->pos = prev_pos;
goto out_release;
+ }
bytes += ret;
len -= ret;
sd->pos = pos;
- if (ret < read_len)
+ if (ret < read_len) {
+ sd->pos = prev_pos + ret;
goto out_release;
+ }
}
done:
@@ -1072,7 +1077,7 @@ long do_splice_direct(struct file *in, loff_t *ppos, struct file *out,
ret = splice_direct_to_actor(in, &sd, direct_splice_actor);
if (ret > 0)
- *ppos += ret;
+ *ppos = sd.pos;
return ret;
}
diff --git a/fs/sysfs/dir.c b/fs/sysfs/dir.c
index a1c3a1fab7f0..8c0e4b92574f 100644
--- a/fs/sysfs/dir.c
+++ b/fs/sysfs/dir.c
@@ -419,12 +419,8 @@ void sysfs_addrm_start(struct sysfs_addrm_cxt *acxt,
*/
int sysfs_add_one(struct sysfs_addrm_cxt *acxt, struct sysfs_dirent *sd)
{
- if (sysfs_find_dirent(acxt->parent_sd, sd->s_name)) {
- printk(KERN_WARNING "sysfs: duplicate filename '%s' "
- "can not be created\n", sd->s_name);
- WARN_ON(1);
+ if (sysfs_find_dirent(acxt->parent_sd, sd->s_name))
return -EEXIST;
- }
sd->s_parent = sysfs_get(acxt->parent_sd);
diff --git a/fs/sysfs/file.c b/fs/sysfs/file.c
index e7735f643cd1..9db95a710e92 100644
--- a/fs/sysfs/file.c
+++ b/fs/sysfs/file.c
@@ -18,10 +18,18 @@
#include <linux/poll.h>
#include <linux/list.h>
#include <linux/mutex.h>
+#include <linux/limits.h>
#include <asm/uaccess.h>
#include "sysfs.h"
+/* used in crash dumps to help with debugging */
+static char last_sysfs_file[PATH_MAX];
+void sysfs_printk_last_file(void)
+{
+ printk(KERN_EMERG "last sysfs file: %s\n", last_sysfs_file);
+}
+
/*
* There's one sysfs_buffer for each open file and one
* sysfs_open_dirent for each sysfs_dirent with one or more open
@@ -327,6 +335,11 @@ static int sysfs_open_file(struct inode *inode, struct file *file)
struct sysfs_buffer *buffer;
struct sysfs_ops *ops;
int error = -EACCES;
+ char *p;
+
+ p = d_path(&file->f_path, last_sysfs_file, sizeof(last_sysfs_file));
+ if (p)
+ memmove(last_sysfs_file, p, strlen(p) + 1);
/* need attr_sd for attr and ops, its parent for kobj */
if (!sysfs_get_active_two(attr_sd))
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index 14f0023984d7..9f328d2420e7 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -22,7 +22,7 @@
/* Random magic number */
#define SYSFS_MAGIC 0x62656572
-static struct vfsmount *sysfs_mount;
+struct vfsmount *sysfs_mount;
struct super_block * sysfs_sb = NULL;
struct kmem_cache *sysfs_dir_cachep;
diff --git a/fs/sysfs/sysfs.h b/fs/sysfs/sysfs.h
index ce4e15f8aaeb..2915959a4499 100644
--- a/fs/sysfs/sysfs.h
+++ b/fs/sysfs/sysfs.h
@@ -91,6 +91,7 @@ struct sysfs_addrm_cxt {
extern struct sysfs_dirent sysfs_root;
extern struct super_block *sysfs_sb;
extern struct kmem_cache *sysfs_dir_cachep;
+extern struct vfsmount *sysfs_mount;
/*
* dir.c
diff --git a/fs/udf/super.c b/fs/udf/super.c
index 7a5f69be6ac2..dd295b8a8e7a 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -1770,6 +1770,7 @@ static void udf_open_lvid(struct super_block *sb)
le16_to_cpu(lvid->descTag.descCRCLength)));
lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
+ set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
}
@@ -1805,6 +1806,7 @@ static void udf_close_lvid(struct super_block *sb)
le16_to_cpu(lvid->descTag.descCRCLength)));
lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
+ set_buffer_uptodate(bh);
mark_buffer_dirty(bh);
}
diff --git a/fs/xfs/linux-2.6/kmem.c b/fs/xfs/linux-2.6/kmem.c
index 9b1bb17a0501..1cd3b55ee3d2 100644
--- a/fs/xfs/linux-2.6/kmem.c
+++ b/fs/xfs/linux-2.6/kmem.c
@@ -90,7 +90,7 @@ kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize,
}
void
-kmem_free(void *ptr, size_t size)
+kmem_free(const void *ptr)
{
if (!is_vmalloc_addr(ptr)) {
kfree(ptr);
@@ -100,7 +100,7 @@ kmem_free(void *ptr, size_t size)
}
void *
-kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
+kmem_realloc(const void *ptr, size_t newsize, size_t oldsize,
unsigned int __nocast flags)
{
void *new;
@@ -110,7 +110,7 @@ kmem_realloc(void *ptr, size_t newsize, size_t oldsize,
if (new)
memcpy(new, ptr,
((oldsize < newsize) ? oldsize : newsize));
- kmem_free(ptr, oldsize);
+ kmem_free(ptr);
}
return new;
}
diff --git a/fs/xfs/linux-2.6/kmem.h b/fs/xfs/linux-2.6/kmem.h
index 5e9564902976..3c9910103c52 100644
--- a/fs/xfs/linux-2.6/kmem.h
+++ b/fs/xfs/linux-2.6/kmem.h
@@ -57,8 +57,8 @@ kmem_flags_convert(unsigned int __nocast flags)
extern void *kmem_alloc(size_t, unsigned int __nocast);
extern void *kmem_zalloc(size_t, unsigned int __nocast);
extern void *kmem_zalloc_greedy(size_t *, size_t, size_t, unsigned int __nocast);
-extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
-extern void kmem_free(void *, size_t);
+extern void *kmem_realloc(const void *, size_t, size_t, unsigned int __nocast);
+extern void kmem_free(const void *);
/*
* Zone interfaces
diff --git a/fs/xfs/linux-2.6/sema.h b/fs/xfs/linux-2.6/sema.h
index 3abe7e9ceb33..7d20f0422c97 100644
--- a/fs/xfs/linux-2.6/sema.h
+++ b/fs/xfs/linux-2.6/sema.h
@@ -36,17 +36,15 @@ typedef struct semaphore sema_t;
static inline int issemalocked(sema_t *sp)
{
- return down_trylock(sp) || (up(sp), 0);
+ return !down_try(sp) || (up(sp), 0);
}
/*
- * Map cpsema (try to get the sema) to down_trylock. We need to switch
- * the return values since cpsema returns 1 (acquired) 0 (failed) and
- * down_trylock returns the reverse 0 (acquired) 1 (failed).
+ * Map cpsema (try to get the sema) to down_try.
*/
static inline int cpsema(sema_t *sp)
{
- return down_trylock(sp) ? 0 : 1;
+ return down_try(sp);
}
#endif /* __XFS_SUPPORT_SEMA_H__ */
diff --git a/fs/xfs/linux-2.6/xfs_aops.c b/fs/xfs/linux-2.6/xfs_aops.c
index a55c3b26d840..0b211cba1909 100644
--- a/fs/xfs/linux-2.6/xfs_aops.c
+++ b/fs/xfs/linux-2.6/xfs_aops.c
@@ -409,7 +409,6 @@ xfs_start_buffer_writeback(
STATIC void
xfs_start_page_writeback(
struct page *page,
- struct writeback_control *wbc,
int clear_dirty,
int buffers)
{
@@ -858,7 +857,7 @@ xfs_convert_page(
done = 1;
}
}
- xfs_start_page_writeback(page, wbc, !page_dirty, count);
+ xfs_start_page_writeback(page, !page_dirty, count);
}
return done;
@@ -1130,7 +1129,7 @@ xfs_page_state_convert(
SetPageUptodate(page);
if (startio)
- xfs_start_page_writeback(page, wbc, 1, count);
+ xfs_start_page_writeback(page, 1, count);
if (ioend && iomap_valid) {
offset = (iomap.iomap_offset + iomap.iomap_bsize - 1) >>
diff --git a/fs/xfs/linux-2.6/xfs_buf.c b/fs/xfs/linux-2.6/xfs_buf.c
index 5105015a75ad..09e50cdcc3b3 100644
--- a/fs/xfs/linux-2.6/xfs_buf.c
+++ b/fs/xfs/linux-2.6/xfs_buf.c
@@ -310,8 +310,7 @@ _xfs_buf_free_pages(
xfs_buf_t *bp)
{
if (bp->b_pages != bp->b_page_array) {
- kmem_free(bp->b_pages,
- bp->b_page_count * sizeof(struct page *));
+ kmem_free(bp->b_pages);
}
}
@@ -387,6 +386,8 @@ _xfs_buf_lookup_pages(
if (unlikely(page == NULL)) {
if (flags & XBF_READ_AHEAD) {
bp->b_page_count = i;
+ for (i = 0; i < bp->b_page_count; i++)
+ unlock_page(bp->b_pages[i]);
return -ENOMEM;
}
@@ -416,17 +417,24 @@ _xfs_buf_lookup_pages(
ASSERT(!PagePrivate(page));
if (!PageUptodate(page)) {
page_count--;
- if (blocksize < PAGE_CACHE_SIZE && !PagePrivate(page)) {
+ if (blocksize >= PAGE_CACHE_SIZE) {
+ if (flags & XBF_READ)
+ bp->b_flags |= _XBF_PAGE_LOCKED;
+ } else if (!PagePrivate(page)) {
if (test_page_region(page, offset, nbytes))
page_count++;
}
}
- unlock_page(page);
bp->b_pages[i] = page;
offset = 0;
}
+ if (!(bp->b_flags & _XBF_PAGE_LOCKED)) {
+ for (i = 0; i < bp->b_page_count; i++)
+ unlock_page(bp->b_pages[i]);
+ }
+
if (page_count == bp->b_page_count)
bp->b_flags |= XBF_DONE;
@@ -530,7 +538,7 @@ found:
* if this does not work then we need to drop the
* spinlock and do a hard attempt on the semaphore.
*/
- if (down_trylock(&bp->b_sema)) {
+ if (!down_try(&bp->b_sema)) {
if (!(flags & XBF_TRYLOCK)) {
/* wait for buffer ownership */
XB_TRACE(bp, "get_lock", 0);
@@ -746,6 +754,7 @@ xfs_buf_associate_memory(
bp->b_count_desired = len;
bp->b_buffer_length = buflen;
bp->b_flags |= XBF_MAPPED;
+ bp->b_flags &= ~_XBF_PAGE_LOCKED;
return 0;
}
@@ -873,7 +882,7 @@ xfs_buf_cond_lock(
{
int locked;
- locked = down_trylock(&bp->b_sema) == 0;
+ locked = down_try(&bp->b_sema);
if (locked) {
XB_SET_OWNER(bp);
}
@@ -1093,8 +1102,10 @@ _xfs_buf_ioend(
xfs_buf_t *bp,
int schedule)
{
- if (atomic_dec_and_test(&bp->b_io_remaining) == 1)
+ if (atomic_dec_and_test(&bp->b_io_remaining) == 1) {
+ bp->b_flags &= ~_XBF_PAGE_LOCKED;
xfs_buf_ioend(bp, schedule);
+ }
}
STATIC void
@@ -1125,6 +1136,9 @@ xfs_buf_bio_end_io(
if (--bvec >= bio->bi_io_vec)
prefetchw(&bvec->bv_page->flags);
+
+ if (bp->b_flags & _XBF_PAGE_LOCKED)
+ unlock_page(page);
} while (bvec >= bio->bi_io_vec);
_xfs_buf_ioend(bp, 1);
@@ -1163,7 +1177,8 @@ _xfs_buf_ioapply(
* filesystem block size is not smaller than the page size.
*/
if ((bp->b_buffer_length < PAGE_CACHE_SIZE) &&
- (bp->b_flags & XBF_READ) &&
+ ((bp->b_flags & (XBF_READ|_XBF_PAGE_LOCKED)) ==
+ (XBF_READ|_XBF_PAGE_LOCKED)) &&
(blocksize >= PAGE_CACHE_SIZE)) {
bio = bio_alloc(GFP_NOIO, 1);
@@ -1382,7 +1397,7 @@ STATIC void
xfs_free_bufhash(
xfs_buftarg_t *btp)
{
- kmem_free(btp->bt_hash, (1<<btp->bt_hashshift) * sizeof(xfs_bufhash_t));
+ kmem_free(btp->bt_hash);
btp->bt_hash = NULL;
}
@@ -1412,13 +1427,10 @@ xfs_unregister_buftarg(
void
xfs_free_buftarg(
- xfs_buftarg_t *btp,
- int external)
+ xfs_buftarg_t *btp)
{
xfs_flush_buftarg(btp, 1);
xfs_blkdev_issue_flush(btp);
- if (external)
- xfs_blkdev_put(btp->bt_bdev);
xfs_free_bufhash(btp);
iput(btp->bt_mapping->host);
@@ -1428,7 +1440,7 @@ xfs_free_buftarg(
xfs_unregister_buftarg(btp);
kthread_stop(btp->bt_task);
- kmem_free(btp, sizeof(*btp));
+ kmem_free(btp);
}
STATIC int
@@ -1559,7 +1571,7 @@ xfs_alloc_buftarg(
return btp;
error:
- kmem_free(btp, sizeof(*btp));
+ kmem_free(btp);
return NULL;
}
diff --git a/fs/xfs/linux-2.6/xfs_buf.h b/fs/xfs/linux-2.6/xfs_buf.h
index 841d7883528d..29d1d4adc078 100644
--- a/fs/xfs/linux-2.6/xfs_buf.h
+++ b/fs/xfs/linux-2.6/xfs_buf.h
@@ -66,6 +66,25 @@ typedef enum {
_XBF_PAGES = (1 << 18), /* backed by refcounted pages */
_XBF_RUN_QUEUES = (1 << 19),/* run block device task queue */
_XBF_DELWRI_Q = (1 << 21), /* buffer on delwri queue */
+
+ /*
+ * Special flag for supporting metadata blocks smaller than a FSB.
+ *
+ * In this case we can have multiple xfs_buf_t on a single page and
+ * need to lock out concurrent xfs_buf_t readers as they only
+ * serialise access to the buffer.
+ *
+ * If the FSB size >= PAGE_CACHE_SIZE case, we have no serialisation
+ * between reads of the page. Hence we can have one thread read the
+ * page and modify it, but then race with another thread that thinks
+ * the page is not up-to-date and hence reads it again.
+ *
+ * The result is that the first modifcation to the page is lost.
+ * This sort of AGF/AGI reading race can happen when unlinking inodes
+ * that require truncation and results in the AGI unlinked list
+ * modifications being lost.
+ */
+ _XBF_PAGE_LOCKED = (1 << 22),
} xfs_buf_flags_t;
typedef enum {
@@ -410,7 +429,7 @@ static inline void xfs_bdwrite(void *mp, xfs_buf_t *bp)
* Handling of buftargs.
*/
extern xfs_buftarg_t *xfs_alloc_buftarg(struct block_device *, int);
-extern void xfs_free_buftarg(xfs_buftarg_t *, int);
+extern void xfs_free_buftarg(xfs_buftarg_t *);
extern void xfs_wait_buftarg(xfs_buftarg_t *);
extern int xfs_setsize_buftarg(xfs_buftarg_t *, unsigned int, unsigned int);
extern int xfs_flush_buftarg(xfs_buftarg_t *, int);
diff --git a/fs/xfs/linux-2.6/xfs_export.c b/fs/xfs/linux-2.6/xfs_export.c
index c672b3238b14..987fe84f7b13 100644
--- a/fs/xfs/linux-2.6/xfs_export.c
+++ b/fs/xfs/linux-2.6/xfs_export.c
@@ -215,7 +215,7 @@ xfs_fs_get_parent(
struct xfs_inode *cip;
struct dentry *parent;
- error = xfs_lookup(XFS_I(child->d_inode), &xfs_name_dotdot, &cip);
+ error = xfs_lookup(XFS_I(child->d_inode), &xfs_name_dotdot, &cip, NULL);
if (unlikely(error))
return ERR_PTR(-error);
diff --git a/fs/xfs/linux-2.6/xfs_file.c b/fs/xfs/linux-2.6/xfs_file.c
index 65e78c13d4ae..5f60363b9343 100644
--- a/fs/xfs/linux-2.6/xfs_file.c
+++ b/fs/xfs/linux-2.6/xfs_file.c
@@ -184,19 +184,24 @@ xfs_file_release(
return -xfs_release(XFS_I(inode));
}
+/*
+ * We ignore the datasync flag here because a datasync is effectively
+ * identical to an fsync. That is, datasync implies that we need to write
+ * only the metadata needed to be able to access the data that is written
+ * if we crash after the call completes. Hence if we are writing beyond
+ * EOF we have to log the inode size change as well, which makes it a
+ * full fsync. If we don't write beyond EOF, the inode core will be
+ * clean in memory and so we don't need to log the inode, just like
+ * fsync.
+ */
STATIC int
xfs_file_fsync(
struct file *filp,
struct dentry *dentry,
int datasync)
{
- int flags = FSYNC_WAIT;
-
- if (datasync)
- flags |= FSYNC_DATA;
xfs_iflags_clear(XFS_I(dentry->d_inode), XFS_ITRUNCATED);
- return -xfs_fsync(XFS_I(dentry->d_inode), flags,
- (xfs_off_t)0, (xfs_off_t)-1);
+ return -xfs_fsync(XFS_I(dentry->d_inode));
}
/*
diff --git a/fs/xfs/linux-2.6/xfs_iops.c b/fs/xfs/linux-2.6/xfs_iops.c
index 2bf287ef5489..b022251114d9 100644
--- a/fs/xfs/linux-2.6/xfs_iops.c
+++ b/fs/xfs/linux-2.6/xfs_iops.c
@@ -382,7 +382,7 @@ xfs_vn_lookup(
return ERR_PTR(-ENAMETOOLONG);
xfs_dentry_to_name(&name, dentry);
- error = xfs_lookup(XFS_I(dir), &name, &cip);
+ error = xfs_lookup(XFS_I(dir), &name, &cip, NULL);
if (unlikely(error)) {
if (unlikely(error != ENOENT))
return ERR_PTR(-error);
@@ -393,6 +393,46 @@ xfs_vn_lookup(
return d_splice_alias(cip->i_vnode, dentry);
}
+STATIC struct dentry *
+xfs_vn_ci_lookup(
+ struct inode *dir,
+ struct dentry *dentry,
+ struct nameidata *nd)
+{
+ struct xfs_inode *ip;
+ struct xfs_name xname;
+ struct xfs_name ci_name;
+ struct qstr dname;
+ int error;
+
+ if (dentry->d_name.len >= MAXNAMELEN)
+ return ERR_PTR(-ENAMETOOLONG);
+
+ xfs_dentry_to_name(&xname, dentry);
+ error = xfs_lookup(XFS_I(dir), &xname, &ip, &ci_name);
+ if (unlikely(error)) {
+ if (unlikely(error != ENOENT))
+ return ERR_PTR(-error);
+ /*
+ * call d_add(dentry, NULL) here when d_drop_negative_children
+ * is called in xfs_vn_mknod (ie. allow negative dentries
+ * with CI filesystems).
+ */
+ return NULL;
+ }
+
+ /* if exact match, just splice and exit */
+ if (!ci_name.name)
+ return d_splice_alias(ip->i_vnode, dentry);
+
+ /* else case-insensitive match... */
+ dname.name = ci_name.name;
+ dname.len = ci_name.len;
+ dentry = d_add_ci(ip->i_vnode, dentry, &dname);
+ kmem_free(ci_name.name);
+ return dentry;
+}
+
STATIC int
xfs_vn_link(
struct dentry *old_dentry,
@@ -740,15 +780,11 @@ xfs_vn_setxattr(
char *attr = (char *)name;
attrnames_t *namesp;
int xflags = 0;
- int error;
namesp = attr_lookup_namespace(attr, attr_namespaces, ATTR_NAMECOUNT);
if (!namesp)
return -EOPNOTSUPP;
attr += namesp->attr_namelen;
- error = namesp->attr_capable(vp, NULL);
- if (error)
- return error;
/* Convert Linux syscall to XFS internal ATTR flags */
if (flags & XATTR_CREATE)
@@ -770,15 +806,11 @@ xfs_vn_getxattr(
char *attr = (char *)name;
attrnames_t *namesp;
int xflags = 0;
- ssize_t error;
namesp = attr_lookup_namespace(attr, attr_namespaces, ATTR_NAMECOUNT);
if (!namesp)
return -EOPNOTSUPP;
attr += namesp->attr_namelen;
- error = namesp->attr_capable(vp, NULL);
- if (error)
- return error;
/* Convert Linux syscall to XFS internal ATTR flags */
if (!size) {
@@ -818,15 +850,12 @@ xfs_vn_removexattr(
char *attr = (char *)name;
attrnames_t *namesp;
int xflags = 0;
- int error;
namesp = attr_lookup_namespace(attr, attr_namespaces, ATTR_NAMECOUNT);
if (!namesp)
return -EOPNOTSUPP;
attr += namesp->attr_namelen;
- error = namesp->attr_capable(vp, NULL);
- if (error)
- return error;
+
xflags |= namesp->attr_flag;
return namesp->attr_remove(vp, attr, xflags);
}
@@ -904,6 +933,25 @@ const struct inode_operations xfs_dir_inode_operations = {
.removexattr = xfs_vn_removexattr,
};
+const struct inode_operations xfs_dir_ci_inode_operations = {
+ .create = xfs_vn_create,
+ .lookup = xfs_vn_ci_lookup,
+ .link = xfs_vn_link,
+ .unlink = xfs_vn_unlink,
+ .symlink = xfs_vn_symlink,
+ .mkdir = xfs_vn_mkdir,
+ .rmdir = xfs_vn_rmdir,
+ .mknod = xfs_vn_mknod,
+ .rename = xfs_vn_rename,
+ .permission = xfs_vn_permission,
+ .getattr = xfs_vn_getattr,
+ .setattr = xfs_vn_setattr,
+ .setxattr = xfs_vn_setxattr,
+ .getxattr = xfs_vn_getxattr,
+ .listxattr = xfs_vn_listxattr,
+ .removexattr = xfs_vn_removexattr,
+};
+
const struct inode_operations xfs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = xfs_vn_follow_link,
diff --git a/fs/xfs/linux-2.6/xfs_iops.h b/fs/xfs/linux-2.6/xfs_iops.h
index 14d0deb7afff..3b4df5863e4a 100644
--- a/fs/xfs/linux-2.6/xfs_iops.h
+++ b/fs/xfs/linux-2.6/xfs_iops.h
@@ -20,6 +20,7 @@
extern const struct inode_operations xfs_inode_operations;
extern const struct inode_operations xfs_dir_inode_operations;
+extern const struct inode_operations xfs_dir_ci_inode_operations;
extern const struct inode_operations xfs_symlink_inode_operations;
extern const struct file_operations xfs_file_operations;
diff --git a/fs/xfs/linux-2.6/xfs_linux.h b/fs/xfs/linux-2.6/xfs_linux.h
index 4edc46915b57..aded57321b12 100644
--- a/fs/xfs/linux-2.6/xfs_linux.h
+++ b/fs/xfs/linux-2.6/xfs_linux.h
@@ -76,6 +76,7 @@
#include <linux/log2.h>
#include <linux/spinlock.h>
#include <linux/random.h>
+#include <linux/ctype.h>
#include <asm/page.h>
#include <asm/div64.h>
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 742b2c7852c1..79c11aaf5423 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -52,6 +52,7 @@
#include "xfs_version.h"
#include "xfs_log_priv.h"
#include "xfs_trans_priv.h"
+#include "xfs_filestream.h"
#include <linux/namei.h>
#include <linux/init.h>
@@ -74,7 +75,10 @@ xfs_args_allocate(
{
struct xfs_mount_args *args;
- args = kmem_zalloc(sizeof(struct xfs_mount_args), KM_SLEEP);
+ args = kzalloc(sizeof(struct xfs_mount_args), GFP_KERNEL);
+ if (!args)
+ return NULL;
+
args->logbufs = args->logbufsize = -1;
strncpy(args->fsname, sb->s_id, MAXNAMELEN);
@@ -314,6 +318,7 @@ xfs_parseargs(
args->flags |= XFSMNT_ATTR2;
} else if (!strcmp(this_char, MNTOPT_NOATTR2)) {
args->flags &= ~XFSMNT_ATTR2;
+ args->flags |= XFSMNT_NOATTR2;
} else if (!strcmp(this_char, MNTOPT_FILESTREAM)) {
args->flags2 |= XFSMNT2_FILESTREAMS;
} else if (!strcmp(this_char, MNTOPT_NOQUOTA)) {
@@ -564,7 +569,10 @@ xfs_set_inodeops(
inode->i_mapping->a_ops = &xfs_address_space_operations;
break;
case S_IFDIR:
- inode->i_op = &xfs_dir_inode_operations;
+ if (xfs_sb_version_hasasciici(&XFS_M(inode->i_sb)->m_sb))
+ inode->i_op = &xfs_dir_ci_inode_operations;
+ else
+ inode->i_op = &xfs_dir_inode_operations;
inode->i_fop = &xfs_dir_file_operations;
break;
case S_IFLNK:
@@ -764,6 +772,137 @@ xfs_blkdev_issue_flush(
blkdev_issue_flush(buftarg->bt_bdev, NULL);
}
+STATIC void
+xfs_close_devices(
+ struct xfs_mount *mp)
+{
+ if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
+ xfs_free_buftarg(mp->m_logdev_targp);
+ xfs_blkdev_put(mp->m_logdev_targp->bt_bdev);
+ }
+ if (mp->m_rtdev_targp) {
+ xfs_free_buftarg(mp->m_rtdev_targp);
+ xfs_blkdev_put(mp->m_rtdev_targp->bt_bdev);
+ }
+ xfs_free_buftarg(mp->m_ddev_targp);
+}
+
+/*
+ * The file system configurations are:
+ * (1) device (partition) with data and internal log
+ * (2) logical volume with data and log subvolumes.
+ * (3) logical volume with data, log, and realtime subvolumes.
+ *
+ * We only have to handle opening the log and realtime volumes here if
+ * they are present. The data subvolume has already been opened by
+ * get_sb_bdev() and is stored in sb->s_bdev.
+ */
+STATIC int
+xfs_open_devices(
+ struct xfs_mount *mp,
+ struct xfs_mount_args *args)
+{
+ struct block_device *ddev = mp->m_super->s_bdev;
+ struct block_device *logdev = NULL, *rtdev = NULL;
+ int error;
+
+ /*
+ * Open real time and log devices - order is important.
+ */
+ if (args->logname[0]) {
+ error = xfs_blkdev_get(mp, args->logname, &logdev);
+ if (error)
+ goto out;
+ }
+
+ if (args->rtname[0]) {
+ error = xfs_blkdev_get(mp, args->rtname, &rtdev);
+ if (error)
+ goto out_close_logdev;
+
+ if (rtdev == ddev || rtdev == logdev) {
+ cmn_err(CE_WARN,
+ "XFS: Cannot mount filesystem with identical rtdev and ddev/logdev.");
+ error = EINVAL;
+ goto out_close_rtdev;
+ }
+ }
+
+ /*
+ * Setup xfs_mount buffer target pointers
+ */
+ error = ENOMEM;
+ mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0);
+ if (!mp->m_ddev_targp)
+ goto out_close_rtdev;
+
+ if (rtdev) {
+ mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1);
+ if (!mp->m_rtdev_targp)
+ goto out_free_ddev_targ;
+ }
+
+ if (logdev && logdev != ddev) {
+ mp->m_logdev_targp = xfs_alloc_buftarg(logdev, 1);
+ if (!mp->m_logdev_targp)
+ goto out_free_rtdev_targ;
+ } else {
+ mp->m_logdev_targp = mp->m_ddev_targp;
+ }
+
+ return 0;
+
+ out_free_rtdev_targ:
+ if (mp->m_rtdev_targp)
+ xfs_free_buftarg(mp->m_rtdev_targp);
+ out_free_ddev_targ:
+ xfs_free_buftarg(mp->m_ddev_targp);
+ out_close_rtdev:
+ if (rtdev)
+ xfs_blkdev_put(rtdev);
+ out_close_logdev:
+ if (logdev && logdev != ddev)
+ xfs_blkdev_put(logdev);
+ out:
+ return error;
+}
+
+/*
+ * Setup xfs_mount buffer target pointers based on superblock
+ */
+STATIC int
+xfs_setup_devices(
+ struct xfs_mount *mp)
+{
+ int error;
+
+ error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
+ mp->m_sb.sb_sectsize);
+ if (error)
+ return error;
+
+ if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp) {
+ unsigned int log_sector_size = BBSIZE;
+
+ if (xfs_sb_version_hassector(&mp->m_sb))
+ log_sector_size = mp->m_sb.sb_logsectsize;
+ error = xfs_setsize_buftarg(mp->m_logdev_targp,
+ mp->m_sb.sb_blocksize,
+ log_sector_size);
+ if (error)
+ return error;
+ }
+ if (mp->m_rtdev_targp) {
+ error = xfs_setsize_buftarg(mp->m_rtdev_targp,
+ mp->m_sb.sb_blocksize,
+ mp->m_sb.sb_sectsize);
+ if (error)
+ return error;
+ }
+
+ return 0;
+}
+
/*
* XFS AIL push thread support
*/
@@ -1074,7 +1213,7 @@ xfssyncd(
list_del(&work->w_list);
if (work == &mp->m_sync_work)
continue;
- kmem_free(work, sizeof(struct bhv_vfs_sync_work));
+ kmem_free(work);
}
}
@@ -1086,14 +1225,63 @@ xfs_fs_put_super(
struct super_block *sb)
{
struct xfs_mount *mp = XFS_M(sb);
+ struct xfs_inode *rip = mp->m_rootip;
+ int unmount_event_flags = 0;
int error;
kthread_stop(mp->m_sync_task);
xfs_sync(mp, SYNC_ATTR | SYNC_DELWRI);
- error = xfs_unmount(mp, 0, NULL);
- if (error)
- printk("XFS: unmount got error=%d\n", error);
+
+#ifdef HAVE_DMAPI
+ if (mp->m_flags & XFS_MOUNT_DMAPI) {
+ unmount_event_flags =
+ (mp->m_dmevmask & (1 << DM_EVENT_UNMOUNT)) ?
+ 0 : DM_FLAGS_UNWANTED;
+ /*
+ * Ignore error from dmapi here, first unmount is not allowed
+ * to fail anyway, and second we wouldn't want to fail a
+ * unmount because of dmapi.
+ */
+ XFS_SEND_PREUNMOUNT(mp, rip, DM_RIGHT_NULL, rip, DM_RIGHT_NULL,
+ NULL, NULL, 0, 0, unmount_event_flags);
+ }
+#endif
+
+ /*
+ * Blow away any referenced inode in the filestreams cache.
+ * This can and will cause log traffic as inodes go inactive
+ * here.
+ */
+ xfs_filestream_unmount(mp);
+
+ XFS_bflush(mp->m_ddev_targp);
+ error = xfs_unmount_flush(mp, 0);
+ WARN_ON(error);
+
+ IRELE(rip);
+
+ /*
+ * If we're forcing a shutdown, typically because of a media error,
+ * we want to make sure we invalidate dirty pages that belong to
+ * referenced vnodes as well.
+ */
+ if (XFS_FORCED_SHUTDOWN(mp)) {
+ error = xfs_sync(mp, SYNC_WAIT | SYNC_CLOSE);
+ ASSERT(error != EFSCORRUPTED);
+ }
+
+ if (mp->m_flags & XFS_MOUNT_DMAPI) {
+ XFS_SEND_UNMOUNT(mp, rip, DM_RIGHT_NULL, 0, 0,
+ unmount_event_flags);
+ }
+
+ xfs_unmountfs(mp);
+ xfs_icsb_destroy_counters(mp);
+ xfs_close_devices(mp);
+ xfs_qmops_put(mp);
+ xfs_dmops_put(mp);
+ kfree(mp);
}
STATIC void
@@ -1216,13 +1404,35 @@ xfs_fs_remount(
char *options)
{
struct xfs_mount *mp = XFS_M(sb);
- struct xfs_mount_args *args = xfs_args_allocate(sb, 0);
+ struct xfs_mount_args *args;
int error;
+ args = xfs_args_allocate(sb, 0);
+ if (!args)
+ return -ENOMEM;
+
error = xfs_parseargs(mp, options, args, 1);
- if (!error)
- error = xfs_mntupdate(mp, flags, args);
- kmem_free(args, sizeof(*args));
+ if (error)
+ goto out_free_args;
+
+ if (!(*flags & MS_RDONLY)) { /* rw/ro -> rw */
+ if (mp->m_flags & XFS_MOUNT_RDONLY)
+ mp->m_flags &= ~XFS_MOUNT_RDONLY;
+ if (args->flags & XFSMNT_BARRIER) {
+ mp->m_flags |= XFS_MOUNT_BARRIER;
+ xfs_mountfs_check_barriers(mp);
+ } else {
+ mp->m_flags &= ~XFS_MOUNT_BARRIER;
+ }
+ } else if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { /* rw -> ro */
+ xfs_filestream_flush(mp);
+ xfs_sync(mp, SYNC_DATA_QUIESCE);
+ xfs_attr_quiesce(mp);
+ mp->m_flags |= XFS_MOUNT_RDONLY;
+ }
+
+ out_free_args:
+ kfree(args);
return -error;
}
@@ -1300,6 +1510,225 @@ xfs_fs_setxquota(
Q_XSETPQLIM), id, (caddr_t)fdq);
}
+/*
+ * This function fills in xfs_mount_t fields based on mount args.
+ * Note: the superblock has _not_ yet been read in.
+ */
+STATIC int
+xfs_start_flags(
+ struct xfs_mount_args *ap,
+ struct xfs_mount *mp)
+{
+ /* Values are in BBs */
+ if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) {
+ /*
+ * At this point the superblock has not been read
+ * in, therefore we do not know the block size.
+ * Before the mount call ends we will convert
+ * these to FSBs.
+ */
+ mp->m_dalign = ap->sunit;
+ mp->m_swidth = ap->swidth;
+ }
+
+ if (ap->logbufs != -1 &&
+ ap->logbufs != 0 &&
+ (ap->logbufs < XLOG_MIN_ICLOGS ||
+ ap->logbufs > XLOG_MAX_ICLOGS)) {
+ cmn_err(CE_WARN,
+ "XFS: invalid logbufs value: %d [not %d-%d]",
+ ap->logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
+ return XFS_ERROR(EINVAL);
+ }
+ mp->m_logbufs = ap->logbufs;
+ if (ap->logbufsize != -1 &&
+ ap->logbufsize != 0 &&
+ (ap->logbufsize < XLOG_MIN_RECORD_BSIZE ||
+ ap->logbufsize > XLOG_MAX_RECORD_BSIZE ||
+ !is_power_of_2(ap->logbufsize))) {
+ cmn_err(CE_WARN,
+ "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
+ ap->logbufsize);
+ return XFS_ERROR(EINVAL);
+ }
+ mp->m_logbsize = ap->logbufsize;
+ mp->m_fsname_len = strlen(ap->fsname) + 1;
+ mp->m_fsname = kmem_alloc(mp->m_fsname_len, KM_SLEEP);
+ strcpy(mp->m_fsname, ap->fsname);
+ if (ap->rtname[0]) {
+ mp->m_rtname = kmem_alloc(strlen(ap->rtname) + 1, KM_SLEEP);
+ strcpy(mp->m_rtname, ap->rtname);
+ }
+ if (ap->logname[0]) {
+ mp->m_logname = kmem_alloc(strlen(ap->logname) + 1, KM_SLEEP);
+ strcpy(mp->m_logname, ap->logname);
+ }
+
+ if (ap->flags & XFSMNT_WSYNC)
+ mp->m_flags |= XFS_MOUNT_WSYNC;
+#if XFS_BIG_INUMS
+ if (ap->flags & XFSMNT_INO64) {
+ mp->m_flags |= XFS_MOUNT_INO64;
+ mp->m_inoadd = XFS_INO64_OFFSET;
+ }
+#endif
+ if (ap->flags & XFSMNT_RETERR)
+ mp->m_flags |= XFS_MOUNT_RETERR;
+ if (ap->flags & XFSMNT_NOALIGN)
+ mp->m_flags |= XFS_MOUNT_NOALIGN;
+ if (ap->flags & XFSMNT_SWALLOC)
+ mp->m_flags |= XFS_MOUNT_SWALLOC;
+ if (ap->flags & XFSMNT_OSYNCISOSYNC)
+ mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC;
+ if (ap->flags & XFSMNT_32BITINODES)
+ mp->m_flags |= XFS_MOUNT_32BITINODES;
+
+ if (ap->flags & XFSMNT_IOSIZE) {
+ if (ap->iosizelog > XFS_MAX_IO_LOG ||
+ ap->iosizelog < XFS_MIN_IO_LOG) {
+ cmn_err(CE_WARN,
+ "XFS: invalid log iosize: %d [not %d-%d]",
+ ap->iosizelog, XFS_MIN_IO_LOG,
+ XFS_MAX_IO_LOG);
+ return XFS_ERROR(EINVAL);
+ }
+
+ mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
+ mp->m_readio_log = mp->m_writeio_log = ap->iosizelog;
+ }
+
+ if (ap->flags & XFSMNT_IKEEP)
+ mp->m_flags |= XFS_MOUNT_IKEEP;
+ if (ap->flags & XFSMNT_DIRSYNC)
+ mp->m_flags |= XFS_MOUNT_DIRSYNC;
+ if (ap->flags & XFSMNT_ATTR2)
+ mp->m_flags |= XFS_MOUNT_ATTR2;
+ if (ap->flags & XFSMNT_NOATTR2)
+ mp->m_flags |= XFS_MOUNT_NOATTR2;
+
+ if (ap->flags2 & XFSMNT2_COMPAT_IOSIZE)
+ mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
+
+ /*
+ * no recovery flag requires a read-only mount
+ */
+ if (ap->flags & XFSMNT_NORECOVERY) {
+ if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
+ cmn_err(CE_WARN,
+ "XFS: tried to mount a FS read-write without recovery!");
+ return XFS_ERROR(EINVAL);
+ }
+ mp->m_flags |= XFS_MOUNT_NORECOVERY;
+ }
+
+ if (ap->flags & XFSMNT_NOUUID)
+ mp->m_flags |= XFS_MOUNT_NOUUID;
+ if (ap->flags & XFSMNT_BARRIER)
+ mp->m_flags |= XFS_MOUNT_BARRIER;
+ else
+ mp->m_flags &= ~XFS_MOUNT_BARRIER;
+
+ if (ap->flags2 & XFSMNT2_FILESTREAMS)
+ mp->m_flags |= XFS_MOUNT_FILESTREAMS;
+
+ if (ap->flags & XFSMNT_DMAPI)
+ mp->m_flags |= XFS_MOUNT_DMAPI;
+ return 0;
+}
+
+/*
+ * This function fills in xfs_mount_t fields based on mount args.
+ * Note: the superblock _has_ now been read in.
+ */
+STATIC int
+xfs_finish_flags(
+ struct xfs_mount_args *ap,
+ struct xfs_mount *mp)
+{
+ int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
+
+ /* Fail a mount where the logbuf is smaller then the log stripe */
+ if (xfs_sb_version_haslogv2(&mp->m_sb)) {
+ if ((ap->logbufsize <= 0) &&
+ (mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE)) {
+ mp->m_logbsize = mp->m_sb.sb_logsunit;
+ } else if (ap->logbufsize > 0 &&
+ ap->logbufsize < mp->m_sb.sb_logsunit) {
+ cmn_err(CE_WARN,
+ "XFS: logbuf size must be greater than or equal to log stripe size");
+ return XFS_ERROR(EINVAL);
+ }
+ } else {
+ /* Fail a mount if the logbuf is larger than 32K */
+ if (ap->logbufsize > XLOG_BIG_RECORD_BSIZE) {
+ cmn_err(CE_WARN,
+ "XFS: logbuf size for version 1 logs must be 16K or 32K");
+ return XFS_ERROR(EINVAL);
+ }
+ }
+
+ /*
+ * mkfs'ed attr2 will turn on attr2 mount unless explicitly
+ * told by noattr2 to turn it off
+ */
+ if (xfs_sb_version_hasattr2(&mp->m_sb) &&
+ !(ap->flags & XFSMNT_NOATTR2))
+ mp->m_flags |= XFS_MOUNT_ATTR2;
+
+ /*
+ * prohibit r/w mounts of read-only filesystems
+ */
+ if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
+ cmn_err(CE_WARN,
+ "XFS: cannot mount a read-only filesystem as read-write");
+ return XFS_ERROR(EROFS);
+ }
+
+ /*
+ * check for shared mount.
+ */
+ if (ap->flags & XFSMNT_SHARED) {
+ if (!xfs_sb_version_hasshared(&mp->m_sb))
+ return XFS_ERROR(EINVAL);
+
+ /*
+ * For IRIX 6.5, shared mounts must have the shared
+ * version bit set, have the persistent readonly
+ * field set, must be version 0 and can only be mounted
+ * read-only.
+ */
+ if (!ronly || !(mp->m_sb.sb_flags & XFS_SBF_READONLY) ||
+ (mp->m_sb.sb_shared_vn != 0))
+ return XFS_ERROR(EINVAL);
+
+ mp->m_flags |= XFS_MOUNT_SHARED;
+
+ /*
+ * Shared XFS V0 can't deal with DMI. Return EINVAL.
+ */
+ if (mp->m_sb.sb_shared_vn == 0 && (ap->flags & XFSMNT_DMAPI))
+ return XFS_ERROR(EINVAL);
+ }
+
+ if (ap->flags & XFSMNT_UQUOTA) {
+ mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
+ if (ap->flags & XFSMNT_UQUOTAENF)
+ mp->m_qflags |= XFS_UQUOTA_ENFD;
+ }
+
+ if (ap->flags & XFSMNT_GQUOTA) {
+ mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
+ if (ap->flags & XFSMNT_GQUOTAENF)
+ mp->m_qflags |= XFS_OQUOTA_ENFD;
+ } else if (ap->flags & XFSMNT_PQUOTA) {
+ mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
+ if (ap->flags & XFSMNT_PQUOTAENF)
+ mp->m_qflags |= XFS_OQUOTA_ENFD;
+ }
+
+ return 0;
+}
+
STATIC int
xfs_fs_fill_super(
struct super_block *sb,
@@ -1308,11 +1737,21 @@ xfs_fs_fill_super(
{
struct inode *root;
struct xfs_mount *mp = NULL;
- struct xfs_mount_args *args = xfs_args_allocate(sb, silent);
- int error;
+ struct xfs_mount_args *args;
+ int flags = 0, error = ENOMEM;
+
+ args = xfs_args_allocate(sb, silent);
+ if (!args)
+ return -ENOMEM;
- mp = xfs_mount_init();
+ mp = kzalloc(sizeof(struct xfs_mount), GFP_KERNEL);
+ if (!mp)
+ goto out_free_args;
+ spin_lock_init(&mp->m_sb_lock);
+ mutex_init(&mp->m_ilock);
+ mutex_init(&mp->m_growlock);
+ atomic_set(&mp->m_active_trans, 0);
INIT_LIST_HEAD(&mp->m_sync_list);
spin_lock_init(&mp->m_sync_lock);
init_waitqueue_head(&mp->m_wait_single_sync_task);
@@ -1325,16 +1764,59 @@ xfs_fs_fill_super(
error = xfs_parseargs(mp, (char *)data, args, 0);
if (error)
- goto fail_vfsop;
+ goto out_free_mp;
sb_min_blocksize(sb, BBSIZE);
sb->s_export_op = &xfs_export_operations;
sb->s_qcop = &xfs_quotactl_operations;
sb->s_op = &xfs_super_operations;
- error = xfs_mount(mp, args, NULL);
+ error = xfs_dmops_get(mp, args);
+ if (error)
+ goto out_free_mp;
+ error = xfs_qmops_get(mp, args);
+ if (error)
+ goto out_put_dmops;
+
+ if (args->flags & XFSMNT_QUIET)
+ flags |= XFS_MFSI_QUIET;
+
+ error = xfs_open_devices(mp, args);
+ if (error)
+ goto out_put_qmops;
+
+ if (xfs_icsb_init_counters(mp))
+ mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
+
+ /*
+ * Setup flags based on mount(2) options and then the superblock
+ */
+ error = xfs_start_flags(args, mp);
+ if (error)
+ goto out_destroy_counters;
+ error = xfs_readsb(mp, flags);
+ if (error)
+ goto out_destroy_counters;
+ error = xfs_finish_flags(args, mp);
+ if (error)
+ goto out_free_sb;
+
+ error = xfs_setup_devices(mp);
+ if (error)
+ goto out_free_sb;
+
+ if (mp->m_flags & XFS_MOUNT_BARRIER)
+ xfs_mountfs_check_barriers(mp);
+
+ error = xfs_filestream_mount(mp);
+ if (error)
+ goto out_free_sb;
+
+ error = xfs_mountfs(mp, flags);
if (error)
- goto fail_vfsop;
+ goto out_filestream_unmount;
+
+ XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, args->mtpt, args->fsname);
sb->s_dirt = 1;
sb->s_magic = XFS_SB_MAGIC;
@@ -1369,10 +1851,27 @@ xfs_fs_fill_super(
xfs_itrace_exit(XFS_I(sb->s_root->d_inode));
- kmem_free(args, sizeof(*args));
+ kfree(args);
return 0;
-fail_vnrele:
+ out_filestream_unmount:
+ xfs_filestream_unmount(mp);
+ out_free_sb:
+ xfs_freesb(mp);
+ out_destroy_counters:
+ xfs_icsb_destroy_counters(mp);
+ xfs_close_devices(mp);
+ out_put_qmops:
+ xfs_qmops_put(mp);
+ out_put_dmops:
+ xfs_dmops_put(mp);
+ out_free_mp:
+ kfree(mp);
+ out_free_args:
+ kfree(args);
+ return -error;
+
+ fail_vnrele:
if (sb->s_root) {
dput(sb->s_root);
sb->s_root = NULL;
@@ -1380,12 +1879,22 @@ fail_vnrele:
iput(root);
}
-fail_unmount:
- xfs_unmount(mp, 0, NULL);
+ fail_unmount:
+ /*
+ * Blow away any referenced inode in the filestreams cache.
+ * This can and will cause log traffic as inodes go inactive
+ * here.
+ */
+ xfs_filestream_unmount(mp);
-fail_vfsop:
- kmem_free(args, sizeof(*args));
- return -error;
+ XFS_bflush(mp->m_ddev_targp);
+ error = xfs_unmount_flush(mp, 0);
+ WARN_ON(error);
+
+ IRELE(mp->m_rootip);
+
+ xfs_unmountfs(mp);
+ goto out_destroy_counters;
}
STATIC int
@@ -1396,8 +1905,17 @@ xfs_fs_get_sb(
void *data,
struct vfsmount *mnt)
{
- return get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super,
+ int error;
+
+ error = get_sb_bdev(fs_type, flags, dev_name, data, xfs_fs_fill_super,
mnt);
+ if (!error) {
+ xfs_mount_t *mp = XFS_M(mnt->mnt_sb);
+
+ mp->m_vfsmount = mnt;
+ }
+
+ return error;
}
static struct super_operations xfs_super_operations = {
diff --git a/fs/xfs/linux-2.6/xfs_super.h b/fs/xfs/linux-2.6/xfs_super.h
index 3efb7c6d3303..212bdc7a7897 100644
--- a/fs/xfs/linux-2.6/xfs_super.h
+++ b/fs/xfs/linux-2.6/xfs_super.h
@@ -107,9 +107,6 @@ extern void xfs_initialize_vnode(struct xfs_mount *mp, bhv_vnode_t *vp,
extern void xfs_flush_inode(struct xfs_inode *);
extern void xfs_flush_device(struct xfs_inode *);
-extern int xfs_blkdev_get(struct xfs_mount *, const char *,
- struct block_device **);
-extern void xfs_blkdev_put(struct block_device *);
extern void xfs_blkdev_issue_flush(struct xfs_buftarg *);
extern const struct export_operations xfs_export_operations;
diff --git a/fs/xfs/linux-2.6/xfs_vnode.h b/fs/xfs/linux-2.6/xfs_vnode.h
index 9d73cb5c0fc7..25eb2a9e8d9b 100644
--- a/fs/xfs/linux-2.6/xfs_vnode.h
+++ b/fs/xfs/linux-2.6/xfs_vnode.h
@@ -230,14 +230,6 @@ static inline void vn_atime_to_time_t(bhv_vnode_t *vp, time_t *tt)
#define ATTR_NOSIZETOK 0x400 /* Don't get the SIZE token */
/*
- * Flags to vop_fsync/reclaim.
- */
-#define FSYNC_NOWAIT 0 /* asynchronous flush */
-#define FSYNC_WAIT 0x1 /* synchronous fsync or forced reclaim */
-#define FSYNC_INVAL 0x2 /* flush and invalidate cached data */
-#define FSYNC_DATA 0x4 /* synchronous fsync of data only */
-
-/*
* Tracking vnode activity.
*/
#if defined(XFS_INODE_TRACE)
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 85df3288efd5..fc9f3fb39b7b 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -1435,8 +1435,7 @@ xfs_dqlock2(
/* ARGSUSED */
int
xfs_qm_dqpurge(
- xfs_dquot_t *dqp,
- uint flags)
+ xfs_dquot_t *dqp)
{
xfs_dqhash_t *thishash;
xfs_mount_t *mp = dqp->q_mount;
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index 5c371a92e3e2..f7393bba4e95 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -164,7 +164,7 @@ extern void xfs_qm_dqprint(xfs_dquot_t *);
extern void xfs_qm_dqdestroy(xfs_dquot_t *);
extern int xfs_qm_dqflush(xfs_dquot_t *, uint);
-extern int xfs_qm_dqpurge(xfs_dquot_t *, uint);
+extern int xfs_qm_dqpurge(xfs_dquot_t *);
extern void xfs_qm_dqunpin_wait(xfs_dquot_t *);
extern int xfs_qm_dqlock_nowait(xfs_dquot_t *);
extern int xfs_qm_dqflock_nowait(xfs_dquot_t *);
diff --git a/fs/xfs/quota/xfs_dquot_item.c b/fs/xfs/quota/xfs_dquot_item.c
index 36e05ca78412..08d2fc89e6a1 100644
--- a/fs/xfs/quota/xfs_dquot_item.c
+++ b/fs/xfs/quota/xfs_dquot_item.c
@@ -576,8 +576,8 @@ xfs_qm_qoffend_logitem_committed(
* xfs_trans_delete_ail() drops the AIL lock.
*/
xfs_trans_delete_ail(qfs->qql_item.li_mountp, (xfs_log_item_t *)qfs);
- kmem_free(qfs, sizeof(xfs_qoff_logitem_t));
- kmem_free(qfe, sizeof(xfs_qoff_logitem_t));
+ kmem_free(qfs);
+ kmem_free(qfe);
return (xfs_lsn_t)-1;
}
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index d31cce1165c5..26370a3128f5 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -192,8 +192,8 @@ xfs_qm_destroy(
xfs_qm_list_destroy(&(xqm->qm_usr_dqhtable[i]));
xfs_qm_list_destroy(&(xqm->qm_grp_dqhtable[i]));
}
- kmem_free(xqm->qm_usr_dqhtable, hsize * sizeof(xfs_dqhash_t));
- kmem_free(xqm->qm_grp_dqhtable, hsize * sizeof(xfs_dqhash_t));
+ kmem_free(xqm->qm_usr_dqhtable);
+ kmem_free(xqm->qm_grp_dqhtable);
xqm->qm_usr_dqhtable = NULL;
xqm->qm_grp_dqhtable = NULL;
xqm->qm_dqhashmask = 0;
@@ -201,7 +201,7 @@ xfs_qm_destroy(
#ifdef DEBUG
mutex_destroy(&qcheck_lock);
#endif
- kmem_free(xqm, sizeof(xfs_qm_t));
+ kmem_free(xqm);
}
/*
@@ -631,7 +631,7 @@ xfs_qm_dqpurge_int(
* freelist in INACTIVE state.
*/
nextdqp = dqp->MPL_NEXT;
- nmisses += xfs_qm_dqpurge(dqp, flags);
+ nmisses += xfs_qm_dqpurge(dqp);
dqp = nextdqp;
}
xfs_qm_mplist_unlock(mp);
@@ -1134,7 +1134,7 @@ xfs_qm_init_quotainfo(
* and change the superblock accordingly.
*/
if ((error = xfs_qm_init_quotainos(mp))) {
- kmem_free(qinf, sizeof(xfs_quotainfo_t));
+ kmem_free(qinf);
mp->m_quotainfo = NULL;
return error;
}
@@ -1248,7 +1248,7 @@ xfs_qm_destroy_quotainfo(
qi->qi_gquotaip = NULL;
}
mutex_destroy(&qi->qi_quotaofflock);
- kmem_free(qi, sizeof(xfs_quotainfo_t));
+ kmem_free(qi);
mp->m_quotainfo = NULL;
}
@@ -1623,7 +1623,7 @@ xfs_qm_dqiterate(
break;
} while (nmaps > 0);
- kmem_free(map, XFS_DQITER_MAP_SIZE * sizeof(*map));
+ kmem_free(map);
return error;
}
diff --git a/fs/xfs/quota/xfs_qm_syscalls.c b/fs/xfs/quota/xfs_qm_syscalls.c
index 768a3b27d2b6..413671523cb5 100644
--- a/fs/xfs/quota/xfs_qm_syscalls.c
+++ b/fs/xfs/quota/xfs_qm_syscalls.c
@@ -1449,14 +1449,14 @@ xfs_qm_internalqcheck(
for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) {
xfs_dqtest_cmp(d);
e = (xfs_dqtest_t *) d->HL_NEXT;
- kmem_free(d, sizeof(xfs_dqtest_t));
+ kmem_free(d);
d = e;
}
h1 = &qmtest_gdqtab[i];
for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) {
xfs_dqtest_cmp(d);
e = (xfs_dqtest_t *) d->HL_NEXT;
- kmem_free(d, sizeof(xfs_dqtest_t));
+ kmem_free(d);
d = e;
}
}
@@ -1467,8 +1467,8 @@ xfs_qm_internalqcheck(
} else {
cmn_err(CE_DEBUG, "******** quotacheck successful! ********");
}
- kmem_free(qmtest_udqtab, qmtest_hashmask * sizeof(xfs_dqhash_t));
- kmem_free(qmtest_gdqtab, qmtest_hashmask * sizeof(xfs_dqhash_t));
+ kmem_free(qmtest_udqtab);
+ kmem_free(qmtest_gdqtab);
mutex_unlock(&qcheck_lock);
return (qmtest_nfails);
}
diff --git a/fs/xfs/support/ktrace.c b/fs/xfs/support/ktrace.c
index 0b75d302508f..a34ef05489b1 100644
--- a/fs/xfs/support/ktrace.c
+++ b/fs/xfs/support/ktrace.c
@@ -89,7 +89,7 @@ ktrace_alloc(int nentries, unsigned int __nocast sleep)
if (sleep & KM_SLEEP)
panic("ktrace_alloc: NULL memory on KM_SLEEP request!");
- kmem_free(ktp, sizeof(*ktp));
+ kmem_free(ktp);
return NULL;
}
@@ -126,7 +126,7 @@ ktrace_free(ktrace_t *ktp)
} else {
entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t));
- kmem_free(ktp->kt_entries, entries_size);
+ kmem_free(ktp->kt_entries);
}
kmem_zone_free(ktrace_hdr_zone, ktp);
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index df151a859186..557dad611de0 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -241,8 +241,7 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
args.firstblock = &firstblock;
args.flist = &flist;
args.whichfork = XFS_ATTR_FORK;
- args.addname = 1;
- args.oknoent = 1;
+ args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
/*
* Determine space new attribute will use, and if it would be
@@ -974,7 +973,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
xfs_da_brelse(args->trans, bp);
return(retval);
}
- args->rename = 1; /* an atomic rename */
+ args->op_flags |= XFS_DA_OP_RENAME; /* an atomic rename */
args->blkno2 = args->blkno; /* set 2nd entry info*/
args->index2 = args->index;
args->rmtblkno2 = args->rmtblkno;
@@ -1054,7 +1053,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* so that one disappears and one appears atomically. Then we
* must remove the "old" attribute/value pair.
*/
- if (args->rename) {
+ if (args->op_flags & XFS_DA_OP_RENAME) {
/*
* In a separate transaction, set the incomplete flag on the
* "old" attr and clear the incomplete flag on the "new" attr.
@@ -1307,7 +1306,7 @@ restart:
} else if (retval == EEXIST) {
if (args->flags & ATTR_CREATE)
goto out;
- args->rename = 1; /* atomic rename op */
+ args->op_flags |= XFS_DA_OP_RENAME; /* atomic rename op */
args->blkno2 = args->blkno; /* set 2nd entry info*/
args->index2 = args->index;
args->rmtblkno2 = args->rmtblkno;
@@ -1425,7 +1424,7 @@ restart:
* so that one disappears and one appears atomically. Then we
* must remove the "old" attribute/value pair.
*/
- if (args->rename) {
+ if (args->op_flags & XFS_DA_OP_RENAME) {
/*
* In a separate transaction, set the incomplete flag on the
* "old" attr and clear the incomplete flag on the "new" attr.
@@ -2300,23 +2299,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
void
xfs_attr_trace_l_c(char *where, struct xfs_attr_list_context *context)
{
- xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_C, where,
- (__psunsigned_t)context->dp,
- (__psunsigned_t)context->cursor->hashval,
- (__psunsigned_t)context->cursor->blkno,
- (__psunsigned_t)context->cursor->offset,
- (__psunsigned_t)context->alist,
- (__psunsigned_t)context->bufsize,
- (__psunsigned_t)context->count,
- (__psunsigned_t)context->firstu,
- (__psunsigned_t)
- ((context->count > 0) &&
- !(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
- ? (ATTR_ENTRY(context->alist,
- context->count-1)->a_valuelen)
- : 0,
- (__psunsigned_t)context->dupcnt,
- (__psunsigned_t)context->flags,
+ xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_C, where, context,
(__psunsigned_t)NULL,
(__psunsigned_t)NULL,
(__psunsigned_t)NULL);
@@ -2329,23 +2312,7 @@ void
xfs_attr_trace_l_cn(char *where, struct xfs_attr_list_context *context,
struct xfs_da_intnode *node)
{
- xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CN, where,
- (__psunsigned_t)context->dp,
- (__psunsigned_t)context->cursor->hashval,
- (__psunsigned_t)context->cursor->blkno,
- (__psunsigned_t)context->cursor->offset,
- (__psunsigned_t)context->alist,
- (__psunsigned_t)context->bufsize,
- (__psunsigned_t)context->count,
- (__psunsigned_t)context->firstu,
- (__psunsigned_t)
- ((context->count > 0) &&
- !(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
- ? (ATTR_ENTRY(context->alist,
- context->count-1)->a_valuelen)
- : 0,
- (__psunsigned_t)context->dupcnt,
- (__psunsigned_t)context->flags,
+ xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CN, where, context,
(__psunsigned_t)be16_to_cpu(node->hdr.count),
(__psunsigned_t)be32_to_cpu(node->btree[0].hashval),
(__psunsigned_t)be32_to_cpu(node->btree[
@@ -2359,23 +2326,7 @@ void
xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
struct xfs_da_node_entry *btree)
{
- xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CB, where,
- (__psunsigned_t)context->dp,
- (__psunsigned_t)context->cursor->hashval,
- (__psunsigned_t)context->cursor->blkno,
- (__psunsigned_t)context->cursor->offset,
- (__psunsigned_t)context->alist,
- (__psunsigned_t)context->bufsize,
- (__psunsigned_t)context->count,
- (__psunsigned_t)context->firstu,
- (__psunsigned_t)
- ((context->count > 0) &&
- !(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
- ? (ATTR_ENTRY(context->alist,
- context->count-1)->a_valuelen)
- : 0,
- (__psunsigned_t)context->dupcnt,
- (__psunsigned_t)context->flags,
+ xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CB, where, context,
(__psunsigned_t)be32_to_cpu(btree->hashval),
(__psunsigned_t)be32_to_cpu(btree->before),
(__psunsigned_t)NULL);
@@ -2388,23 +2339,7 @@ void
xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
struct xfs_attr_leafblock *leaf)
{
- xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CL, where,
- (__psunsigned_t)context->dp,
- (__psunsigned_t)context->cursor->hashval,
- (__psunsigned_t)context->cursor->blkno,
- (__psunsigned_t)context->cursor->offset,
- (__psunsigned_t)context->alist,
- (__psunsigned_t)context->bufsize,
- (__psunsigned_t)context->count,
- (__psunsigned_t)context->firstu,
- (__psunsigned_t)
- ((context->count > 0) &&
- !(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
- ? (ATTR_ENTRY(context->alist,
- context->count-1)->a_valuelen)
- : 0,
- (__psunsigned_t)context->dupcnt,
- (__psunsigned_t)context->flags,
+ xfs_attr_trace_enter(XFS_ATTR_KTRACE_L_CL, where, context,
(__psunsigned_t)be16_to_cpu(leaf->hdr.count),
(__psunsigned_t)be32_to_cpu(leaf->entries[0].hashval),
(__psunsigned_t)be32_to_cpu(leaf->entries[
@@ -2417,22 +2352,30 @@ xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
*/
void
xfs_attr_trace_enter(int type, char *where,
- __psunsigned_t a2, __psunsigned_t a3,
- __psunsigned_t a4, __psunsigned_t a5,
- __psunsigned_t a6, __psunsigned_t a7,
- __psunsigned_t a8, __psunsigned_t a9,
- __psunsigned_t a10, __psunsigned_t a11,
- __psunsigned_t a12, __psunsigned_t a13,
- __psunsigned_t a14, __psunsigned_t a15)
+ struct xfs_attr_list_context *context,
+ __psunsigned_t a13, __psunsigned_t a14,
+ __psunsigned_t a15)
{
ASSERT(xfs_attr_trace_buf);
ktrace_enter(xfs_attr_trace_buf, (void *)((__psunsigned_t)type),
- (void *)where,
- (void *)a2, (void *)a3, (void *)a4,
- (void *)a5, (void *)a6, (void *)a7,
- (void *)a8, (void *)a9, (void *)a10,
- (void *)a11, (void *)a12, (void *)a13,
- (void *)a14, (void *)a15);
+ (void *)((__psunsigned_t)where),
+ (void *)((__psunsigned_t)context->dp),
+ (void *)((__psunsigned_t)context->cursor->hashval),
+ (void *)((__psunsigned_t)context->cursor->blkno),
+ (void *)((__psunsigned_t)context->cursor->offset),
+ (void *)((__psunsigned_t)context->alist),
+ (void *)((__psunsigned_t)context->bufsize),
+ (void *)((__psunsigned_t)context->count),
+ (void *)((__psunsigned_t)context->firstu),
+ (void *)((__psunsigned_t)
+ (((context->count > 0) &&
+ !(context->flags & (ATTR_KERNAMELS|ATTR_KERNOVAL)))
+ ? (ATTR_ENTRY(context->alist,
+ context->count-1)->a_valuelen)
+ : 0)),
+ (void *)((__psunsigned_t)context->dupcnt),
+ (void *)((__psunsigned_t)context->flags),
+ (void *)a13, (void *)a14, (void *)a15);
}
#endif /* XFS_ATTR_TRACE */
@@ -2622,43 +2565,6 @@ attr_lookup_namespace(
return NULL;
}
-/*
- * Some checks to prevent people abusing EAs to get over quota:
- * - Don't allow modifying user EAs on devices/symlinks;
- * - Don't allow modifying user EAs if sticky bit set;
- */
-STATIC int
-attr_user_capable(
- bhv_vnode_t *vp,
- cred_t *cred)
-{
- struct inode *inode = vn_to_inode(vp);
-
- if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
- return -EPERM;
- if (!S_ISREG(inode->i_mode) && !S_ISDIR(inode->i_mode) &&
- !capable(CAP_SYS_ADMIN))
- return -EPERM;
- if (S_ISDIR(inode->i_mode) && (inode->i_mode & S_ISVTX) &&
- (current_fsuid(cred) != inode->i_uid) && !capable(CAP_FOWNER))
- return -EPERM;
- return 0;
-}
-
-STATIC int
-attr_trusted_capable(
- bhv_vnode_t *vp,
- cred_t *cred)
-{
- struct inode *inode = vn_to_inode(vp);
-
- if (IS_IMMUTABLE(inode) || IS_APPEND(inode))
- return -EPERM;
- if (!capable(CAP_SYS_ADMIN))
- return -EPERM;
- return 0;
-}
-
STATIC int
attr_system_set(
bhv_vnode_t *vp, char *name, void *data, size_t size, int xflags)
@@ -2709,7 +2615,6 @@ struct attrnames attr_system = {
.attr_get = attr_system_get,
.attr_set = attr_system_set,
.attr_remove = attr_system_remove,
- .attr_capable = (attrcapable_t)fs_noerr,
};
struct attrnames attr_trusted = {
@@ -2719,7 +2624,6 @@ struct attrnames attr_trusted = {
.attr_get = attr_generic_get,
.attr_set = attr_generic_set,
.attr_remove = attr_generic_remove,
- .attr_capable = attr_trusted_capable,
};
struct attrnames attr_secure = {
@@ -2729,7 +2633,6 @@ struct attrnames attr_secure = {
.attr_get = attr_generic_get,
.attr_set = attr_generic_set,
.attr_remove = attr_generic_remove,
- .attr_capable = (attrcapable_t)fs_noerr,
};
struct attrnames attr_user = {
@@ -2738,7 +2641,6 @@ struct attrnames attr_user = {
.attr_get = attr_generic_get,
.attr_set = attr_generic_set,
.attr_remove = attr_generic_remove,
- .attr_capable = attr_user_capable,
};
struct attrnames *attr_namespaces[] =
diff --git a/fs/xfs/xfs_attr.h b/fs/xfs/xfs_attr.h
index 6cfc9384fe35..9b96d171b75c 100644
--- a/fs/xfs/xfs_attr.h
+++ b/fs/xfs/xfs_attr.h
@@ -42,7 +42,6 @@ typedef int (*attrset_t)(bhv_vnode_t *, char *, void *, size_t, int);
typedef int (*attrget_t)(bhv_vnode_t *, char *, void *, size_t, int);
typedef int (*attrremove_t)(bhv_vnode_t *, char *, int);
typedef int (*attrexists_t)(bhv_vnode_t *);
-typedef int (*attrcapable_t)(bhv_vnode_t *, struct cred *);
typedef struct attrnames {
char * attr_name;
@@ -52,7 +51,6 @@ typedef struct attrnames {
attrset_t attr_set;
attrremove_t attr_remove;
attrexists_t attr_exists;
- attrcapable_t attr_capable;
} attrnames_t;
#define ATTR_NAMECOUNT 4
diff --git a/fs/xfs/xfs_attr_leaf.c b/fs/xfs/xfs_attr_leaf.c
index 303d41e4217b..cb345e6e4850 100644
--- a/fs/xfs/xfs_attr_leaf.c
+++ b/fs/xfs/xfs_attr_leaf.c
@@ -369,9 +369,10 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
* Fix up the start offset of the attribute fork
*/
totsize -= size;
- if (totsize == sizeof(xfs_attr_sf_hdr_t) && !args->addname &&
- (mp->m_flags & XFS_MOUNT_ATTR2) &&
- (dp->i_d.di_format != XFS_DINODE_FMT_BTREE)) {
+ if (totsize == sizeof(xfs_attr_sf_hdr_t) &&
+ !(args->op_flags & XFS_DA_OP_ADDNAME) &&
+ (mp->m_flags & XFS_MOUNT_ATTR2) &&
+ (dp->i_d.di_format != XFS_DINODE_FMT_BTREE)) {
/*
* Last attribute now removed, revert to original
* inode format making all literal area available
@@ -389,9 +390,10 @@ xfs_attr_shortform_remove(xfs_da_args_t *args)
xfs_idata_realloc(dp, -size, XFS_ATTR_FORK);
dp->i_d.di_forkoff = xfs_attr_shortform_bytesfit(dp, totsize);
ASSERT(dp->i_d.di_forkoff);
- ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) || args->addname ||
- !(mp->m_flags & XFS_MOUNT_ATTR2) ||
- dp->i_d.di_format == XFS_DINODE_FMT_BTREE);
+ ASSERT(totsize > sizeof(xfs_attr_sf_hdr_t) ||
+ (args->op_flags & XFS_DA_OP_ADDNAME) ||
+ !(mp->m_flags & XFS_MOUNT_ATTR2) ||
+ dp->i_d.di_format == XFS_DINODE_FMT_BTREE);
dp->i_afp->if_ext_max =
XFS_IFORK_ASIZE(dp) / (uint)sizeof(xfs_bmbt_rec_t);
dp->i_df.if_ext_max =
@@ -531,7 +533,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
- nargs.oknoent = 1;
+ nargs.op_flags = XFS_DA_OP_OKNOENT;
sfe = &sf->list[0];
for (i = 0; i < sf->hdr.count; i++) {
@@ -555,7 +557,7 @@ xfs_attr_shortform_to_leaf(xfs_da_args_t *args)
out:
if(bp)
xfs_da_buf_done(bp);
- kmem_free(tmpbuffer, size);
+ kmem_free(tmpbuffer);
return(error);
}
@@ -676,7 +678,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
XFS_ERRLEVEL_LOW,
context->dp->i_mount, sfe);
xfs_attr_trace_l_c("sf corrupted", context);
- kmem_free(sbuf, sbsize);
+ kmem_free(sbuf);
return XFS_ERROR(EFSCORRUPTED);
}
if (!xfs_attr_namesp_match_overrides(context->flags, sfe->flags)) {
@@ -717,7 +719,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
}
}
if (i == nsbuf) {
- kmem_free(sbuf, sbsize);
+ kmem_free(sbuf);
xfs_attr_trace_l_c("blk end", context);
return(0);
}
@@ -747,7 +749,7 @@ xfs_attr_shortform_list(xfs_attr_list_context_t *context)
cursor->offset++;
}
- kmem_free(sbuf, sbsize);
+ kmem_free(sbuf);
xfs_attr_trace_l_c("sf E-O-F", context);
return(0);
}
@@ -853,7 +855,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
nargs.total = args->total;
nargs.whichfork = XFS_ATTR_FORK;
nargs.trans = args->trans;
- nargs.oknoent = 1;
+ nargs.op_flags = XFS_DA_OP_OKNOENT;
entry = &leaf->entries[0];
for (i = 0; i < be16_to_cpu(leaf->hdr.count); entry++, i++) {
if (entry->flags & XFS_ATTR_INCOMPLETE)
@@ -873,7 +875,7 @@ xfs_attr_leaf_to_shortform(xfs_dabuf_t *bp, xfs_da_args_t *args, int forkoff)
error = 0;
out:
- kmem_free(tmpbuffer, XFS_LBSIZE(dp->i_mount));
+ kmem_free(tmpbuffer);
return(error);
}
@@ -1155,7 +1157,7 @@ xfs_attr_leaf_add_work(xfs_dabuf_t *bp, xfs_da_args_t *args, int mapindex)
entry->hashval = cpu_to_be32(args->hashval);
entry->flags = tmp ? XFS_ATTR_LOCAL : 0;
entry->flags |= XFS_ATTR_NSP_ARGS_TO_ONDISK(args->flags);
- if (args->rename) {
+ if (args->op_flags & XFS_DA_OP_RENAME) {
entry->flags |= XFS_ATTR_INCOMPLETE;
if ((args->blkno2 == args->blkno) &&
(args->index2 <= args->index)) {
@@ -1271,7 +1273,7 @@ xfs_attr_leaf_compact(xfs_trans_t *trans, xfs_dabuf_t *bp)
be16_to_cpu(hdr_s->count), mp);
xfs_da_log_buf(trans, bp, 0, XFS_LBSIZE(mp) - 1);
- kmem_free(tmpbuffer, XFS_LBSIZE(mp));
+ kmem_free(tmpbuffer);
}
/*
@@ -1921,7 +1923,7 @@ xfs_attr_leaf_unbalance(xfs_da_state_t *state, xfs_da_state_blk_t *drop_blk,
be16_to_cpu(drop_hdr->count), mp);
}
memcpy((char *)save_leaf, (char *)tmp_leaf, state->blocksize);
- kmem_free(tmpbuffer, state->blocksize);
+ kmem_free(tmpbuffer);
}
xfs_da_log_buf(state->args->trans, save_blk->bp, 0,
@@ -2451,7 +2453,7 @@ xfs_attr_leaf_list_int(xfs_dabuf_t *bp, xfs_attr_list_context_t *context)
(int)name_rmt->namelen,
valuelen,
(char*)args.value);
- kmem_free(args.value, valuelen);
+ kmem_free(args.value);
}
else {
retval = context->put_listent(context,
@@ -2954,7 +2956,7 @@ xfs_attr_leaf_inactive(xfs_trans_t **trans, xfs_inode_t *dp, xfs_dabuf_t *bp)
error = tmp; /* save only the 1st errno */
}
- kmem_free((xfs_caddr_t)list, size);
+ kmem_free((xfs_caddr_t)list);
return(error);
}
diff --git a/fs/xfs/xfs_attr_sf.h b/fs/xfs/xfs_attr_sf.h
index f67f917803b1..ea22839caed2 100644
--- a/fs/xfs/xfs_attr_sf.h
+++ b/fs/xfs/xfs_attr_sf.h
@@ -97,13 +97,9 @@ void xfs_attr_trace_l_cb(char *where, struct xfs_attr_list_context *context,
void xfs_attr_trace_l_cl(char *where, struct xfs_attr_list_context *context,
struct xfs_attr_leafblock *leaf);
void xfs_attr_trace_enter(int type, char *where,
- __psunsigned_t a2, __psunsigned_t a3,
- __psunsigned_t a4, __psunsigned_t a5,
- __psunsigned_t a6, __psunsigned_t a7,
- __psunsigned_t a8, __psunsigned_t a9,
- __psunsigned_t a10, __psunsigned_t a11,
- __psunsigned_t a12, __psunsigned_t a13,
- __psunsigned_t a14, __psunsigned_t a15);
+ struct xfs_attr_list_context *context,
+ __psunsigned_t a13, __psunsigned_t a14,
+ __psunsigned_t a15);
#else
#define xfs_attr_trace_l_c(w,c)
#define xfs_attr_trace_l_cn(w,c,n)
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 53c259f5a5af..a612a90aae4a 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -5970,7 +5970,7 @@ unlock_and_return:
xfs_iunlock_map_shared(ip, lock);
xfs_iunlock(ip, XFS_IOLOCK_SHARED);
- kmem_free(map, subnex * sizeof(*map));
+ kmem_free(map);
return error;
}
diff --git a/fs/xfs/xfs_buf_item.c b/fs/xfs/xfs_buf_item.c
index 53a71c62025d..d86ca2c03a70 100644
--- a/fs/xfs/xfs_buf_item.c
+++ b/fs/xfs/xfs_buf_item.c
@@ -889,9 +889,9 @@ xfs_buf_item_relse(
}
#ifdef XFS_TRANS_DEBUG
- kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp));
+ kmem_free(bip->bli_orig);
bip->bli_orig = NULL;
- kmem_free(bip->bli_logged, XFS_BUF_COUNT(bp) / NBBY);
+ kmem_free(bip->bli_logged);
bip->bli_logged = NULL;
#endif /* XFS_TRANS_DEBUG */
@@ -1138,9 +1138,9 @@ xfs_buf_iodone(
xfs_trans_delete_ail(mp, (xfs_log_item_t *)bip);
#ifdef XFS_TRANS_DEBUG
- kmem_free(bip->bli_orig, XFS_BUF_COUNT(bp));
+ kmem_free(bip->bli_orig);
bip->bli_orig = NULL;
- kmem_free(bip->bli_logged, XFS_BUF_COUNT(bp) / NBBY);
+ kmem_free(bip->bli_logged);
bip->bli_logged = NULL;
#endif /* XFS_TRANS_DEBUG */
diff --git a/fs/xfs/xfs_clnt.h b/fs/xfs/xfs_clnt.h
index d5d1e60ee224..d2ce5dd70d87 100644
--- a/fs/xfs/xfs_clnt.h
+++ b/fs/xfs/xfs_clnt.h
@@ -78,6 +78,7 @@ struct xfs_mount_args {
#define XFSMNT_IOSIZE 0x00002000 /* optimize for I/O size */
#define XFSMNT_OSYNCISOSYNC 0x00004000 /* o_sync is REALLY o_sync */
/* (osyncisdsync is default) */
+#define XFSMNT_NOATTR2 0x00008000 /* turn off ATTR2 EA format */
#define XFSMNT_32BITINODES 0x00200000 /* restrict inodes to 32
* bits of address space */
#define XFSMNT_GQUOTA 0x00400000 /* group quota accounting */
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index 021a8f7e563f..edc0aef4e51e 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1431,7 +1431,7 @@ xfs_da_path_shift(xfs_da_state_t *state, xfs_da_state_path_t *path,
}
if (level < 0) {
*result = XFS_ERROR(ENOENT); /* we're out of our tree */
- ASSERT(args->oknoent);
+ ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
return(0);
}
@@ -1530,6 +1530,28 @@ xfs_da_hashname(const uchar_t *name, int namelen)
}
}
+enum xfs_dacmp
+xfs_da_compname(
+ struct xfs_da_args *args,
+ const char *name,
+ int len)
+{
+ return (args->namelen == len && memcmp(args->name, name, len) == 0) ?
+ XFS_CMP_EXACT : XFS_CMP_DIFFERENT;
+}
+
+static xfs_dahash_t
+xfs_default_hashname(
+ struct xfs_name *name)
+{
+ return xfs_da_hashname(name->name, name->len);
+}
+
+const struct xfs_nameops xfs_default_nameops = {
+ .hashname = xfs_default_hashname,
+ .compname = xfs_da_compname
+};
+
/*
* Add a block to the btree ahead of the file.
* Return the new block number to the caller.
@@ -1598,7 +1620,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
args->firstblock, args->total,
&mapp[mapi], &nmap, args->flist,
NULL))) {
- kmem_free(mapp, sizeof(*mapp) * count);
+ kmem_free(mapp);
return error;
}
if (nmap < 1)
@@ -1620,11 +1642,11 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
bno + count) {
if (mapp != &map)
- kmem_free(mapp, sizeof(*mapp) * count);
+ kmem_free(mapp);
return XFS_ERROR(ENOSPC);
}
if (mapp != &map)
- kmem_free(mapp, sizeof(*mapp) * count);
+ kmem_free(mapp);
*new_blkno = (xfs_dablk_t)bno;
return 0;
}
@@ -2090,10 +2112,10 @@ xfs_da_do_buf(
}
}
if (bplist) {
- kmem_free(bplist, sizeof(*bplist) * nmap);
+ kmem_free(bplist);
}
if (mapp != &map) {
- kmem_free(mapp, sizeof(*mapp) * nfsb);
+ kmem_free(mapp);
}
if (bpp)
*bpp = rbp;
@@ -2102,11 +2124,11 @@ exit1:
if (bplist) {
for (i = 0; i < nbplist; i++)
xfs_trans_brelse(trans, bplist[i]);
- kmem_free(bplist, sizeof(*bplist) * nmap);
+ kmem_free(bplist);
}
exit0:
if (mapp != &map)
- kmem_free(mapp, sizeof(*mapp) * nfsb);
+ kmem_free(mapp);
if (bpp)
*bpp = NULL;
return error;
@@ -2315,7 +2337,7 @@ xfs_da_buf_done(xfs_dabuf_t *dabuf)
if (dabuf->dirty)
xfs_da_buf_clean(dabuf);
if (dabuf->nbuf > 1)
- kmem_free(dabuf->data, BBTOB(dabuf->bbcount));
+ kmem_free(dabuf->data);
#ifdef XFS_DABUF_DEBUG
{
spin_lock(&xfs_dabuf_global_lock);
@@ -2332,7 +2354,7 @@ xfs_da_buf_done(xfs_dabuf_t *dabuf)
if (dabuf->nbuf == 1)
kmem_zone_free(xfs_dabuf_zone, dabuf);
else
- kmem_free(dabuf, XFS_DA_BUF_SIZE(dabuf->nbuf));
+ kmem_free(dabuf);
}
/*
@@ -2403,7 +2425,7 @@ xfs_da_brelse(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
for (i = 0; i < nbuf; i++)
xfs_trans_brelse(tp, bplist[i]);
if (bplist != &bp)
- kmem_free(bplist, nbuf * sizeof(*bplist));
+ kmem_free(bplist);
}
/*
@@ -2429,7 +2451,7 @@ xfs_da_binval(xfs_trans_t *tp, xfs_dabuf_t *dabuf)
for (i = 0; i < nbuf; i++)
xfs_trans_binval(tp, bplist[i]);
if (bplist != &bp)
- kmem_free(bplist, nbuf * sizeof(*bplist));
+ kmem_free(bplist);
}
/*
diff --git a/fs/xfs/xfs_da_btree.h b/fs/xfs/xfs_da_btree.h
index 7facf86f74f9..8be0b00ede9a 100644
--- a/fs/xfs/xfs_da_btree.h
+++ b/fs/xfs/xfs_da_btree.h
@@ -99,6 +99,15 @@ typedef struct xfs_da_node_entry xfs_da_node_entry_t;
*========================================================================*/
/*
+ * Search comparison results
+ */
+enum xfs_dacmp {
+ XFS_CMP_DIFFERENT, /* names are completely different */
+ XFS_CMP_EXACT, /* names are exactly the same */
+ XFS_CMP_CASE /* names are same but differ in case */
+};
+
+/*
* Structure to ease passing around component names.
*/
typedef struct xfs_da_args {
@@ -123,13 +132,20 @@ typedef struct xfs_da_args {
int index2; /* index of 2nd attr in blk */
xfs_dablk_t rmtblkno2; /* remote attr value starting blkno */
int rmtblkcnt2; /* remote attr value block count */
- unsigned char justcheck; /* T/F: check for ok with no space */
- unsigned char rename; /* T/F: this is an atomic rename op */
- unsigned char addname; /* T/F: this is an add operation */
- unsigned char oknoent; /* T/F: ok to return ENOENT, else die */
+ int op_flags; /* operation flags */
+ enum xfs_dacmp cmpresult; /* name compare result for lookups */
} xfs_da_args_t;
/*
+ * Operation flags:
+ */
+#define XFS_DA_OP_JUSTCHECK 0x0001 /* check for ok with no space */
+#define XFS_DA_OP_RENAME 0x0002 /* this is an atomic rename op */
+#define XFS_DA_OP_ADDNAME 0x0004 /* this is an add operation */
+#define XFS_DA_OP_OKNOENT 0x0008 /* lookup/add op, ENOENT ok, else die */
+#define XFS_DA_OP_CILOOKUP 0x0010 /* lookup to return CI name if found */
+
+/*
* Structure to describe buffer(s) for a block.
* This is needed in the directory version 2 format case, when
* multiple non-contiguous fsblocks might be needed to cover one
@@ -201,6 +217,14 @@ typedef struct xfs_da_state {
(uint)(XFS_DA_LOGOFF(BASE, ADDR)), \
(uint)(XFS_DA_LOGOFF(BASE, ADDR)+(SIZE)-1)
+/*
+ * Name ops for directory and/or attr name operations
+ */
+struct xfs_nameops {
+ xfs_dahash_t (*hashname)(struct xfs_name *);
+ enum xfs_dacmp (*compname)(struct xfs_da_args *, const char *, int);
+};
+
#ifdef __KERNEL__
/*========================================================================
@@ -249,6 +273,10 @@ int xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
xfs_dabuf_t *dead_buf);
uint xfs_da_hashname(const uchar_t *name_string, int name_length);
+enum xfs_dacmp xfs_da_compname(struct xfs_da_args *args,
+ const char *name, int len);
+
+
xfs_da_state_t *xfs_da_state_alloc(void);
void xfs_da_state_free(xfs_da_state_t *state);
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index 5f3647cb9885..2211e885ef24 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -116,7 +116,7 @@ xfs_swapext(
out_put_file:
fput(file);
out_free_sxp:
- kmem_free(sxp, sizeof(xfs_swapext_t));
+ kmem_free(sxp);
out:
return error;
}
@@ -381,6 +381,6 @@ xfs_swap_extents(
xfs_iunlock(tip, lock_flags);
}
if (tempifp != NULL)
- kmem_free(tempifp, sizeof(xfs_ifork_t));
+ kmem_free(tempifp);
return error;
}
diff --git a/fs/xfs/xfs_dir2.c b/fs/xfs/xfs_dir2.c
index 7cb26529766b..80e0dc51361c 100644
--- a/fs/xfs/xfs_dir2.c
+++ b/fs/xfs/xfs_dir2.c
@@ -46,6 +46,54 @@
struct xfs_name xfs_name_dotdot = {"..", 2};
+extern const struct xfs_nameops xfs_default_nameops;
+
+/*
+ * ASCII case-insensitive (ie. A-Z) support for directories that was
+ * used in IRIX.
+ */
+STATIC xfs_dahash_t
+xfs_ascii_ci_hashname(
+ struct xfs_name *name)
+{
+ xfs_dahash_t hash;
+ int i;
+
+ for (i = 0, hash = 0; i < name->len; i++)
+ hash = tolower(name->name[i]) ^ rol32(hash, 7);
+
+ return hash;
+}
+
+STATIC enum xfs_dacmp
+xfs_ascii_ci_compname(
+ struct xfs_da_args *args,
+ const char *name,
+ int len)
+{
+ enum xfs_dacmp result;
+ int i;
+
+ if (args->namelen != len)
+ return XFS_CMP_DIFFERENT;
+
+ result = XFS_CMP_EXACT;
+ for (i = 0; i < len; i++) {
+ if (args->name[i] == name[i])
+ continue;
+ if (tolower(args->name[i]) != tolower(name[i]))
+ return XFS_CMP_DIFFERENT;
+ result = XFS_CMP_CASE;
+ }
+
+ return result;
+}
+
+static struct xfs_nameops xfs_ascii_ci_nameops = {
+ .hashname = xfs_ascii_ci_hashname,
+ .compname = xfs_ascii_ci_compname,
+};
+
void
xfs_dir_mount(
xfs_mount_t *mp)
@@ -65,6 +113,10 @@ xfs_dir_mount(
(mp->m_dirblksize - (uint)sizeof(xfs_da_node_hdr_t)) /
(uint)sizeof(xfs_da_node_entry_t);
mp->m_dir_magicpct = (mp->m_dirblksize * 37) / 100;
+ if (xfs_sb_version_hasasciici(&mp->m_sb))
+ mp->m_dirnameops = &xfs_ascii_ci_nameops;
+ else
+ mp->m_dirnameops = &xfs_default_nameops;
}
/*
@@ -162,9 +214,10 @@ xfs_dir_createname(
return rval;
XFS_STATS_INC(xs_dir_create);
+ memset(&args, 0, sizeof(xfs_da_args_t));
args.name = name->name;
args.namelen = name->len;
- args.hashval = xfs_da_hashname(name->name, name->len);
+ args.hashval = dp->i_mount->m_dirnameops->hashname(name);
args.inumber = inum;
args.dp = dp;
args.firstblock = first;
@@ -172,8 +225,7 @@ xfs_dir_createname(
args.total = total;
args.whichfork = XFS_DATA_FORK;
args.trans = tp;
- args.justcheck = 0;
- args.addname = args.oknoent = 1;
+ args.op_flags = XFS_DA_OP_ADDNAME | XFS_DA_OP_OKNOENT;
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
rval = xfs_dir2_sf_addname(&args);
@@ -191,14 +243,43 @@ xfs_dir_createname(
}
/*
+ * If doing a CI lookup and case-insensitive match, dup actual name into
+ * args.value. Return EEXIST for success (ie. name found) or an error.
+ */
+int
+xfs_dir_cilookup_result(
+ struct xfs_da_args *args,
+ const char *name,
+ int len)
+{
+ if (args->cmpresult == XFS_CMP_DIFFERENT)
+ return ENOENT;
+ if (args->cmpresult != XFS_CMP_CASE ||
+ !(args->op_flags & XFS_DA_OP_CILOOKUP))
+ return EEXIST;
+
+ args->value = kmem_alloc(len, KM_MAYFAIL);
+ if (!args->value)
+ return ENOMEM;
+
+ memcpy(args->value, name, len);
+ args->valuelen = len;
+ return EEXIST;
+}
+
+/*
* Lookup a name in a directory, give back the inode number.
+ * If ci_name is not NULL, returns the actual name in ci_name if it differs
+ * to name, or ci_name->name is set to NULL for an exact match.
*/
+
int
xfs_dir_lookup(
xfs_trans_t *tp,
xfs_inode_t *dp,
struct xfs_name *name,
- xfs_ino_t *inum) /* out: inode number */
+ xfs_ino_t *inum, /* out: inode number */
+ struct xfs_name *ci_name) /* out: actual name if CI match */
{
xfs_da_args_t args;
int rval;
@@ -206,15 +287,17 @@ xfs_dir_lookup(
ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
XFS_STATS_INC(xs_dir_lookup);
- memset(&args, 0, sizeof(xfs_da_args_t));
+ memset(&args, 0, sizeof(xfs_da_args_t));
args.name = name->name;
args.namelen = name->len;
- args.hashval = xfs_da_hashname(name->name, name->len);
+ args.hashval = dp->i_mount->m_dirnameops->hashname(name);
args.dp = dp;
args.whichfork = XFS_DATA_FORK;
args.trans = tp;
- args.oknoent = 1;
+ args.op_flags = XFS_DA_OP_OKNOENT;
+ if (ci_name)
+ args.op_flags |= XFS_DA_OP_CILOOKUP;
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
rval = xfs_dir2_sf_lookup(&args);
@@ -230,8 +313,13 @@ xfs_dir_lookup(
rval = xfs_dir2_node_lookup(&args);
if (rval == EEXIST)
rval = 0;
- if (rval == 0)
+ if (!rval) {
*inum = args.inumber;
+ if (ci_name) {
+ ci_name->name = args.value;
+ ci_name->len = args.valuelen;
+ }
+ }
return rval;
}
@@ -255,9 +343,10 @@ xfs_dir_removename(
ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
XFS_STATS_INC(xs_dir_remove);
+ memset(&args, 0, sizeof(xfs_da_args_t));
args.name = name->name;
args.namelen = name->len;
- args.hashval = xfs_da_hashname(name->name, name->len);
+ args.hashval = dp->i_mount->m_dirnameops->hashname(name);
args.inumber = ino;
args.dp = dp;
args.firstblock = first;
@@ -265,7 +354,6 @@ xfs_dir_removename(
args.total = total;
args.whichfork = XFS_DATA_FORK;
args.trans = tp;
- args.justcheck = args.addname = args.oknoent = 0;
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
rval = xfs_dir2_sf_removename(&args);
@@ -338,9 +426,10 @@ xfs_dir_replace(
if ((rval = xfs_dir_ino_validate(tp->t_mountp, inum)))
return rval;
+ memset(&args, 0, sizeof(xfs_da_args_t));
args.name = name->name;
args.namelen = name->len;
- args.hashval = xfs_da_hashname(name->name, name->len);
+ args.hashval = dp->i_mount->m_dirnameops->hashname(name);
args.inumber = inum;
args.dp = dp;
args.firstblock = first;
@@ -348,7 +437,6 @@ xfs_dir_replace(
args.total = total;
args.whichfork = XFS_DATA_FORK;
args.trans = tp;
- args.justcheck = args.addname = args.oknoent = 0;
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
rval = xfs_dir2_sf_replace(&args);
@@ -384,15 +472,16 @@ xfs_dir_canenter(
return 0;
ASSERT((dp->i_d.di_mode & S_IFMT) == S_IFDIR);
- memset(&args, 0, sizeof(xfs_da_args_t));
+ memset(&args, 0, sizeof(xfs_da_args_t));
args.name = name->name;
args.namelen = name->len;
- args.hashval = xfs_da_hashname(name->name, name->len);
+ args.hashval = dp->i_mount->m_dirnameops->hashname(name);
args.dp = dp;
args.whichfork = XFS_DATA_FORK;
args.trans = tp;
- args.justcheck = args.addname = args.oknoent = 1;
+ args.op_flags = XFS_DA_OP_JUSTCHECK | XFS_DA_OP_ADDNAME |
+ XFS_DA_OP_OKNOENT;
if (dp->i_d.di_format == XFS_DINODE_FMT_LOCAL)
rval = xfs_dir2_sf_addname(&args);
@@ -493,7 +582,7 @@ xfs_dir2_grow_inode(
args->firstblock, args->total,
&mapp[mapi], &nmap, args->flist,
NULL))) {
- kmem_free(mapp, sizeof(*mapp) * count);
+ kmem_free(mapp);
return error;
}
if (nmap < 1)
@@ -525,14 +614,14 @@ xfs_dir2_grow_inode(
mapp[mapi - 1].br_startoff + mapp[mapi - 1].br_blockcount !=
bno + count) {
if (mapp != &map)
- kmem_free(mapp, sizeof(*mapp) * count);
+ kmem_free(mapp);
return XFS_ERROR(ENOSPC);
}
/*
* Done with the temporary mapping table.
*/
if (mapp != &map)
- kmem_free(mapp, sizeof(*mapp) * count);
+ kmem_free(mapp);
*dbp = xfs_dir2_da_to_db(mp, (xfs_dablk_t)bno);
/*
* Update file's size if this is the data space and it grew.
diff --git a/fs/xfs/xfs_dir2.h b/fs/xfs/xfs_dir2.h
index 6392f939029f..1d9ef96f33aa 100644
--- a/fs/xfs/xfs_dir2.h
+++ b/fs/xfs/xfs_dir2.h
@@ -74,7 +74,8 @@ extern int xfs_dir_createname(struct xfs_trans *tp, struct xfs_inode *dp,
xfs_fsblock_t *first,
struct xfs_bmap_free *flist, xfs_extlen_t tot);
extern int xfs_dir_lookup(struct xfs_trans *tp, struct xfs_inode *dp,
- struct xfs_name *name, xfs_ino_t *inum);
+ struct xfs_name *name, xfs_ino_t *inum,
+ struct xfs_name *ci_name);
extern int xfs_dir_removename(struct xfs_trans *tp, struct xfs_inode *dp,
struct xfs_name *name, xfs_ino_t ino,
xfs_fsblock_t *first,
@@ -99,4 +100,7 @@ extern int xfs_dir2_isleaf(struct xfs_trans *tp, struct xfs_inode *dp,
extern int xfs_dir2_shrink_inode(struct xfs_da_args *args, xfs_dir2_db_t db,
struct xfs_dabuf *bp);
+extern int xfs_dir_cilookup_result(struct xfs_da_args *args, const char *name,
+ int len);
+
#endif /* __XFS_DIR2_H__ */
diff --git a/fs/xfs/xfs_dir2_block.c b/fs/xfs/xfs_dir2_block.c
index fb5a556725b3..e2fa0a1d8e96 100644
--- a/fs/xfs/xfs_dir2_block.c
+++ b/fs/xfs/xfs_dir2_block.c
@@ -215,7 +215,7 @@ xfs_dir2_block_addname(
/*
* If this isn't a real add, we're done with the buffer.
*/
- if (args->justcheck)
+ if (args->op_flags & XFS_DA_OP_JUSTCHECK)
xfs_da_brelse(tp, bp);
/*
* If we don't have space for the new entry & leaf ...
@@ -225,7 +225,7 @@ xfs_dir2_block_addname(
* Not trying to actually do anything, or don't have
* a space reservation: return no-space.
*/
- if (args->justcheck || args->total == 0)
+ if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0)
return XFS_ERROR(ENOSPC);
/*
* Convert to the next larger format.
@@ -240,7 +240,7 @@ xfs_dir2_block_addname(
/*
* Just checking, and it would work, so say so.
*/
- if (args->justcheck)
+ if (args->op_flags & XFS_DA_OP_JUSTCHECK)
return 0;
needlog = needscan = 0;
/*
@@ -610,14 +610,15 @@ xfs_dir2_block_lookup(
/*
* Get the offset from the leaf entry, to point to the data.
*/
- dep = (xfs_dir2_data_entry_t *)
- ((char *)block + xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
+ dep = (xfs_dir2_data_entry_t *)((char *)block +
+ xfs_dir2_dataptr_to_off(mp, be32_to_cpu(blp[ent].address)));
/*
- * Fill in inode number, release the block.
+ * Fill in inode number, CI name if appropriate, release the block.
*/
args->inumber = be64_to_cpu(dep->inumber);
+ error = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
xfs_da_brelse(args->trans, bp);
- return XFS_ERROR(EEXIST);
+ return XFS_ERROR(error);
}
/*
@@ -643,6 +644,7 @@ xfs_dir2_block_lookup_int(
int mid; /* binary search current idx */
xfs_mount_t *mp; /* filesystem mount point */
xfs_trans_t *tp; /* transaction pointer */
+ enum xfs_dacmp cmp; /* comparison result */
dp = args->dp;
tp = args->trans;
@@ -673,7 +675,7 @@ xfs_dir2_block_lookup_int(
else
high = mid - 1;
if (low > high) {
- ASSERT(args->oknoent);
+ ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
xfs_da_brelse(tp, bp);
return XFS_ERROR(ENOENT);
}
@@ -697,20 +699,31 @@ xfs_dir2_block_lookup_int(
dep = (xfs_dir2_data_entry_t *)
((char *)block + xfs_dir2_dataptr_to_off(mp, addr));
/*
- * Compare, if it's right give back buffer & entry number.
+ * Compare name and if it's an exact match, return the index
+ * and buffer. If it's the first case-insensitive match, store
+ * the index and buffer and continue looking for an exact match.
*/
- if (dep->namelen == args->namelen &&
- dep->name[0] == args->name[0] &&
- memcmp(dep->name, args->name, args->namelen) == 0) {
+ cmp = mp->m_dirnameops->compname(args, dep->name, dep->namelen);
+ if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
+ args->cmpresult = cmp;
*bpp = bp;
*entno = mid;
- return 0;
+ if (cmp == XFS_CMP_EXACT)
+ return 0;
}
- } while (++mid < be32_to_cpu(btp->count) && be32_to_cpu(blp[mid].hashval) == hash);
+ } while (++mid < be32_to_cpu(btp->count) &&
+ be32_to_cpu(blp[mid].hashval) == hash);
+
+ ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
+ /*
+ * Here, we can only be doing a lookup (not a rename or replace).
+ * If a case-insensitive match was found earlier, return success.
+ */
+ if (args->cmpresult == XFS_CMP_CASE)
+ return 0;
/*
* No match, release the buffer and return ENOENT.
*/
- ASSERT(args->oknoent);
xfs_da_brelse(tp, bp);
return XFS_ERROR(ENOENT);
}
@@ -1033,6 +1046,7 @@ xfs_dir2_sf_to_block(
xfs_dir2_sf_t *sfp; /* shortform structure */
__be16 *tagp; /* end of data entry */
xfs_trans_t *tp; /* transaction pointer */
+ struct xfs_name name;
xfs_dir2_trace_args("sf_to_block", args);
dp = args->dp;
@@ -1071,7 +1085,7 @@ xfs_dir2_sf_to_block(
*/
error = xfs_dir2_grow_inode(args, XFS_DIR2_DATA_SPACE, &blkno);
if (error) {
- kmem_free(buf, buf_len);
+ kmem_free(buf);
return error;
}
/*
@@ -1079,7 +1093,7 @@ xfs_dir2_sf_to_block(
*/
error = xfs_dir2_data_init(args, blkno, &bp);
if (error) {
- kmem_free(buf, buf_len);
+ kmem_free(buf);
return error;
}
block = bp->data;
@@ -1187,8 +1201,10 @@ xfs_dir2_sf_to_block(
tagp = xfs_dir2_data_entry_tag_p(dep);
*tagp = cpu_to_be16((char *)dep - (char *)block);
xfs_dir2_data_log_entry(tp, bp, dep);
- blp[2 + i].hashval = cpu_to_be32(xfs_da_hashname(
- (char *)sfep->name, sfep->namelen));
+ name.name = sfep->name;
+ name.len = sfep->namelen;
+ blp[2 + i].hashval = cpu_to_be32(mp->m_dirnameops->
+ hashname(&name));
blp[2 + i].address = cpu_to_be32(xfs_dir2_byte_to_dataptr(mp,
(char *)dep - (char *)block));
offset = (int)((char *)(tagp + 1) - (char *)block);
@@ -1198,7 +1214,7 @@ xfs_dir2_sf_to_block(
sfep = xfs_dir2_sf_nextentry(sfp, sfep);
}
/* Done with the temporary buffer */
- kmem_free(buf, buf_len);
+ kmem_free(buf);
/*
* Sort the leaf entries by hash value.
*/
diff --git a/fs/xfs/xfs_dir2_data.c b/fs/xfs/xfs_dir2_data.c
index fb8c9e08b23d..498f8d694330 100644
--- a/fs/xfs/xfs_dir2_data.c
+++ b/fs/xfs/xfs_dir2_data.c
@@ -65,6 +65,7 @@ xfs_dir2_data_check(
xfs_mount_t *mp; /* filesystem mount point */
char *p; /* current data position */
int stale; /* count of stale leaves */
+ struct xfs_name name;
mp = dp->i_mount;
d = bp->data;
@@ -140,7 +141,9 @@ xfs_dir2_data_check(
addr = xfs_dir2_db_off_to_dataptr(mp, mp->m_dirdatablk,
(xfs_dir2_data_aoff_t)
((char *)dep - (char *)d));
- hash = xfs_da_hashname((char *)dep->name, dep->namelen);
+ name.name = dep->name;
+ name.len = dep->namelen;
+ hash = mp->m_dirnameops->hashname(&name);
for (i = 0; i < be32_to_cpu(btp->count); i++) {
if (be32_to_cpu(lep[i].address) == addr &&
be32_to_cpu(lep[i].hashval) == hash)
diff --git a/fs/xfs/xfs_dir2_leaf.c b/fs/xfs/xfs_dir2_leaf.c
index bc52b803d79b..f110242d6dfc 100644
--- a/fs/xfs/xfs_dir2_leaf.c
+++ b/fs/xfs/xfs_dir2_leaf.c
@@ -263,20 +263,21 @@ xfs_dir2_leaf_addname(
* If we don't have enough free bytes but we can make enough
* by compacting out stale entries, we'll do that.
*/
- if ((char *)bestsp - (char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] < needbytes &&
- be16_to_cpu(leaf->hdr.stale) > 1) {
+ if ((char *)bestsp - (char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] <
+ needbytes && be16_to_cpu(leaf->hdr.stale) > 1) {
compact = 1;
}
/*
* Otherwise if we don't have enough free bytes we need to
* convert to node form.
*/
- else if ((char *)bestsp - (char *)&leaf->ents[be16_to_cpu(leaf->hdr.count)] <
- needbytes) {
+ else if ((char *)bestsp - (char *)&leaf->ents[be16_to_cpu(
+ leaf->hdr.count)] < needbytes) {
/*
* Just checking or no space reservation, give up.
*/
- if (args->justcheck || args->total == 0) {
+ if ((args->op_flags & XFS_DA_OP_JUSTCHECK) ||
+ args->total == 0) {
xfs_da_brelse(tp, lbp);
return XFS_ERROR(ENOSPC);
}
@@ -301,7 +302,7 @@ xfs_dir2_leaf_addname(
* If just checking, then it will fit unless we needed to allocate
* a new data block.
*/
- if (args->justcheck) {
+ if (args->op_flags & XFS_DA_OP_JUSTCHECK) {
xfs_da_brelse(tp, lbp);
return use_block == -1 ? XFS_ERROR(ENOSPC) : 0;
}
@@ -1110,7 +1111,7 @@ xfs_dir2_leaf_getdents(
*offset = XFS_DIR2_MAX_DATAPTR;
else
*offset = xfs_dir2_byte_to_dataptr(mp, curoff);
- kmem_free(map, map_size * sizeof(*map));
+ kmem_free(map);
if (bp)
xfs_da_brelse(NULL, bp);
return error;
@@ -1298,12 +1299,13 @@ xfs_dir2_leaf_lookup(
((char *)dbp->data +
xfs_dir2_dataptr_to_off(dp->i_mount, be32_to_cpu(lep->address)));
/*
- * Return the found inode number.
+ * Return the found inode number & CI name if appropriate
*/
args->inumber = be64_to_cpu(dep->inumber);
+ error = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
xfs_da_brelse(tp, dbp);
xfs_da_brelse(tp, lbp);
- return XFS_ERROR(EEXIST);
+ return XFS_ERROR(error);
}
/*
@@ -1331,6 +1333,8 @@ xfs_dir2_leaf_lookup_int(
xfs_mount_t *mp; /* filesystem mount point */
xfs_dir2_db_t newdb; /* new data block number */
xfs_trans_t *tp; /* transaction pointer */
+ xfs_dabuf_t *cbp; /* case match data buffer */
+ enum xfs_dacmp cmp; /* name compare result */
dp = args->dp;
tp = args->trans;
@@ -1354,9 +1358,11 @@ xfs_dir2_leaf_lookup_int(
* Loop over all the entries with the right hash value
* looking to match the name.
*/
+ cbp = NULL;
for (lep = &leaf->ents[index], dbp = NULL, curdb = -1;
- index < be16_to_cpu(leaf->hdr.count) && be32_to_cpu(lep->hashval) == args->hashval;
- lep++, index++) {
+ index < be16_to_cpu(leaf->hdr.count) &&
+ be32_to_cpu(lep->hashval) == args->hashval;
+ lep++, index++) {
/*
* Skip over stale leaf entries.
*/
@@ -1371,12 +1377,12 @@ xfs_dir2_leaf_lookup_int(
* need to pitch the old one and read the new one.
*/
if (newdb != curdb) {
- if (dbp)
+ if (dbp != cbp)
xfs_da_brelse(tp, dbp);
- if ((error =
- xfs_da_read_buf(tp, dp,
- xfs_dir2_db_to_da(mp, newdb), -1, &dbp,
- XFS_DATA_FORK))) {
+ error = xfs_da_read_buf(tp, dp,
+ xfs_dir2_db_to_da(mp, newdb),
+ -1, &dbp, XFS_DATA_FORK);
+ if (error) {
xfs_da_brelse(tp, lbp);
return error;
}
@@ -1386,24 +1392,46 @@ xfs_dir2_leaf_lookup_int(
/*
* Point to the data entry.
*/
- dep = (xfs_dir2_data_entry_t *)
- ((char *)dbp->data +
- xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address)));
+ dep = (xfs_dir2_data_entry_t *)((char *)dbp->data +
+ xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address)));
/*
- * If it matches then return it.
+ * Compare name and if it's an exact match, return the index
+ * and buffer. If it's the first case-insensitive match, store
+ * the index and buffer and continue looking for an exact match.
*/
- if (dep->namelen == args->namelen &&
- dep->name[0] == args->name[0] &&
- memcmp(dep->name, args->name, args->namelen) == 0) {
- *dbpp = dbp;
+ cmp = mp->m_dirnameops->compname(args, dep->name, dep->namelen);
+ if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
+ args->cmpresult = cmp;
*indexp = index;
- return 0;
+ /*
+ * case exact match: release the stored CI buffer if it
+ * exists and return the current buffer.
+ */
+ if (cmp == XFS_CMP_EXACT) {
+ if (cbp && cbp != dbp)
+ xfs_da_brelse(tp, cbp);
+ *dbpp = dbp;
+ return 0;
+ }
+ cbp = dbp;
}
}
+ ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
+ /*
+ * Here, we can only be doing a lookup (not a rename or replace).
+ * If a case-insensitive match was found earlier, release the current
+ * buffer and return the stored CI matching buffer.
+ */
+ if (args->cmpresult == XFS_CMP_CASE) {
+ if (cbp != dbp)
+ xfs_da_brelse(tp, dbp);
+ *dbpp = cbp;
+ return 0;
+ }
/*
* No match found, return ENOENT.
*/
- ASSERT(args->oknoent);
+ ASSERT(cbp == NULL);
if (dbp)
xfs_da_brelse(tp, dbp);
xfs_da_brelse(tp, lbp);
diff --git a/fs/xfs/xfs_dir2_node.c b/fs/xfs/xfs_dir2_node.c
index 8dade711f099..1b5430223461 100644
--- a/fs/xfs/xfs_dir2_node.c
+++ b/fs/xfs/xfs_dir2_node.c
@@ -226,7 +226,7 @@ xfs_dir2_leafn_add(
ASSERT(index == be16_to_cpu(leaf->hdr.count) ||
be32_to_cpu(leaf->ents[index].hashval) >= args->hashval);
- if (args->justcheck)
+ if (args->op_flags & XFS_DA_OP_JUSTCHECK)
return 0;
/*
@@ -387,28 +387,26 @@ xfs_dir2_leafn_lasthash(
}
/*
- * Look up a leaf entry in a node-format leaf block.
- * If this is an addname then the extrablk in state is a freespace block,
- * otherwise it's a data block.
+ * Look up a leaf entry for space to add a name in a node-format leaf block.
+ * The extrablk in state is a freespace block.
*/
-int
-xfs_dir2_leafn_lookup_int(
+STATIC int
+xfs_dir2_leafn_lookup_for_addname(
xfs_dabuf_t *bp, /* leaf buffer */
xfs_da_args_t *args, /* operation arguments */
int *indexp, /* out: leaf entry index */
xfs_da_state_t *state) /* state to fill in */
{
- xfs_dabuf_t *curbp; /* current data/free buffer */
- xfs_dir2_db_t curdb; /* current data block number */
- xfs_dir2_db_t curfdb; /* current free block number */
- xfs_dir2_data_entry_t *dep; /* data block entry */
+ xfs_dabuf_t *curbp = NULL; /* current data/free buffer */
+ xfs_dir2_db_t curdb = -1; /* current data block number */
+ xfs_dir2_db_t curfdb = -1; /* current free block number */
xfs_inode_t *dp; /* incore directory inode */
int error; /* error return value */
int fi; /* free entry index */
- xfs_dir2_free_t *free=NULL; /* free block structure */
+ xfs_dir2_free_t *free = NULL; /* free block structure */
int index; /* leaf entry index */
xfs_dir2_leaf_t *leaf; /* leaf structure */
- int length=0; /* length of new data entry */
+ int length; /* length of new data entry */
xfs_dir2_leaf_entry_t *lep; /* leaf entry */
xfs_mount_t *mp; /* filesystem mount point */
xfs_dir2_db_t newdb; /* new data block number */
@@ -431,33 +429,20 @@ xfs_dir2_leafn_lookup_int(
/*
* Do we have a buffer coming in?
*/
- if (state->extravalid)
+ if (state->extravalid) {
+ /* If so, it's a free block buffer, get the block number. */
curbp = state->extrablk.bp;
- else
- curbp = NULL;
- /*
- * For addname, it's a free block buffer, get the block number.
- */
- if (args->addname) {
- curfdb = curbp ? state->extrablk.blkno : -1;
- curdb = -1;
- length = xfs_dir2_data_entsize(args->namelen);
- if ((free = (curbp ? curbp->data : NULL)))
- ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
- }
- /*
- * For others, it's a data block buffer, get the block number.
- */
- else {
- curfdb = -1;
- curdb = curbp ? state->extrablk.blkno : -1;
+ curfdb = state->extrablk.blkno;
+ free = curbp->data;
+ ASSERT(be32_to_cpu(free->hdr.magic) == XFS_DIR2_FREE_MAGIC);
}
+ length = xfs_dir2_data_entsize(args->namelen);
/*
* Loop over leaf entries with the right hash value.
*/
- for (lep = &leaf->ents[index];
- index < be16_to_cpu(leaf->hdr.count) && be32_to_cpu(lep->hashval) == args->hashval;
- lep++, index++) {
+ for (lep = &leaf->ents[index]; index < be16_to_cpu(leaf->hdr.count) &&
+ be32_to_cpu(lep->hashval) == args->hashval;
+ lep++, index++) {
/*
* Skip stale leaf entries.
*/
@@ -471,158 +456,224 @@ xfs_dir2_leafn_lookup_int(
* For addname, we're looking for a place to put the new entry.
* We want to use a data block with an entry of equal
* hash value to ours if there is one with room.
+ *
+ * If this block isn't the data block we already have
+ * in hand, take a look at it.
*/
- if (args->addname) {
+ if (newdb != curdb) {
+ curdb = newdb;
/*
- * If this block isn't the data block we already have
- * in hand, take a look at it.
+ * Convert the data block to the free block
+ * holding its freespace information.
*/
- if (newdb != curdb) {
- curdb = newdb;
- /*
- * Convert the data block to the free block
- * holding its freespace information.
- */
- newfdb = xfs_dir2_db_to_fdb(mp, newdb);
- /*
- * If it's not the one we have in hand,
- * read it in.
- */
- if (newfdb != curfdb) {
- /*
- * If we had one before, drop it.
- */
- if (curbp)
- xfs_da_brelse(tp, curbp);
- /*
- * Read the free block.
- */
- if ((error = xfs_da_read_buf(tp, dp,
- xfs_dir2_db_to_da(mp,
- newfdb),
- -1, &curbp,
- XFS_DATA_FORK))) {
- return error;
- }
- free = curbp->data;
- ASSERT(be32_to_cpu(free->hdr.magic) ==
- XFS_DIR2_FREE_MAGIC);
- ASSERT((be32_to_cpu(free->hdr.firstdb) %
- XFS_DIR2_MAX_FREE_BESTS(mp)) ==
- 0);
- ASSERT(be32_to_cpu(free->hdr.firstdb) <= curdb);
- ASSERT(curdb <
- be32_to_cpu(free->hdr.firstdb) +
- be32_to_cpu(free->hdr.nvalid));
- }
- /*
- * Get the index for our entry.
- */
- fi = xfs_dir2_db_to_fdindex(mp, curdb);
- /*
- * If it has room, return it.
- */
- if (unlikely(be16_to_cpu(free->bests[fi]) == NULLDATAOFF)) {
- XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int",
- XFS_ERRLEVEL_LOW, mp);
- if (curfdb != newfdb)
- xfs_da_brelse(tp, curbp);
- return XFS_ERROR(EFSCORRUPTED);
- }
- curfdb = newfdb;
- if (be16_to_cpu(free->bests[fi]) >= length) {
- *indexp = index;
- state->extravalid = 1;
- state->extrablk.bp = curbp;
- state->extrablk.blkno = curfdb;
- state->extrablk.index = fi;
- state->extrablk.magic =
- XFS_DIR2_FREE_MAGIC;
- ASSERT(args->oknoent);
- return XFS_ERROR(ENOENT);
- }
- }
- }
- /*
- * Not adding a new entry, so we really want to find
- * the name given to us.
- */
- else {
+ newfdb = xfs_dir2_db_to_fdb(mp, newdb);
/*
- * If it's a different data block, go get it.
+ * If it's not the one we have in hand, read it in.
*/
- if (newdb != curdb) {
+ if (newfdb != curfdb) {
/*
- * If we had a block before, drop it.
+ * If we had one before, drop it.
*/
if (curbp)
xfs_da_brelse(tp, curbp);
/*
- * Read the data block.
+ * Read the free block.
*/
- if ((error =
- xfs_da_read_buf(tp, dp,
- xfs_dir2_db_to_da(mp, newdb), -1,
- &curbp, XFS_DATA_FORK))) {
+ error = xfs_da_read_buf(tp, dp,
+ xfs_dir2_db_to_da(mp, newfdb),
+ -1, &curbp, XFS_DATA_FORK);
+ if (error)
return error;
- }
- xfs_dir2_data_check(dp, curbp);
- curdb = newdb;
+ free = curbp->data;
+ ASSERT(be32_to_cpu(free->hdr.magic) ==
+ XFS_DIR2_FREE_MAGIC);
+ ASSERT((be32_to_cpu(free->hdr.firstdb) %
+ XFS_DIR2_MAX_FREE_BESTS(mp)) == 0);
+ ASSERT(be32_to_cpu(free->hdr.firstdb) <= curdb);
+ ASSERT(curdb < be32_to_cpu(free->hdr.firstdb) +
+ be32_to_cpu(free->hdr.nvalid));
}
/*
- * Point to the data entry.
+ * Get the index for our entry.
*/
- dep = (xfs_dir2_data_entry_t *)
- ((char *)curbp->data +
- xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address)));
+ fi = xfs_dir2_db_to_fdindex(mp, curdb);
/*
- * Compare the entry, return it if it matches.
+ * If it has room, return it.
*/
- if (dep->namelen == args->namelen &&
- dep->name[0] == args->name[0] &&
- memcmp(dep->name, args->name, args->namelen) == 0) {
- args->inumber = be64_to_cpu(dep->inumber);
- *indexp = index;
- state->extravalid = 1;
- state->extrablk.bp = curbp;
- state->extrablk.blkno = curdb;
- state->extrablk.index =
- (int)((char *)dep -
- (char *)curbp->data);
- state->extrablk.magic = XFS_DIR2_DATA_MAGIC;
- return XFS_ERROR(EEXIST);
+ if (unlikely(be16_to_cpu(free->bests[fi]) == NULLDATAOFF)) {
+ XFS_ERROR_REPORT("xfs_dir2_leafn_lookup_int",
+ XFS_ERRLEVEL_LOW, mp);
+ if (curfdb != newfdb)
+ xfs_da_brelse(tp, curbp);
+ return XFS_ERROR(EFSCORRUPTED);
}
+ curfdb = newfdb;
+ if (be16_to_cpu(free->bests[fi]) >= length)
+ goto out;
}
}
+ /* Didn't find any space */
+ fi = -1;
+out:
+ ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
+ if (curbp) {
+ /* Giving back a free block. */
+ state->extravalid = 1;
+ state->extrablk.bp = curbp;
+ state->extrablk.index = fi;
+ state->extrablk.blkno = curfdb;
+ state->extrablk.magic = XFS_DIR2_FREE_MAGIC;
+ } else {
+ state->extravalid = 0;
+ }
/*
- * Didn't find a match.
- * If we are holding a buffer, give it back in case our caller
- * finds it useful.
+ * Return the index, that will be the insertion point.
*/
- if ((state->extravalid = (curbp != NULL))) {
- state->extrablk.bp = curbp;
- state->extrablk.index = -1;
+ *indexp = index;
+ return XFS_ERROR(ENOENT);
+}
+
+/*
+ * Look up a leaf entry in a node-format leaf block.
+ * The extrablk in state a data block.
+ */
+STATIC int
+xfs_dir2_leafn_lookup_for_entry(
+ xfs_dabuf_t *bp, /* leaf buffer */
+ xfs_da_args_t *args, /* operation arguments */
+ int *indexp, /* out: leaf entry index */
+ xfs_da_state_t *state) /* state to fill in */
+{
+ xfs_dabuf_t *curbp = NULL; /* current data/free buffer */
+ xfs_dir2_db_t curdb = -1; /* current data block number */
+ xfs_dir2_data_entry_t *dep; /* data block entry */
+ xfs_inode_t *dp; /* incore directory inode */
+ int error; /* error return value */
+ int di = -1; /* data entry index */
+ int index; /* leaf entry index */
+ xfs_dir2_leaf_t *leaf; /* leaf structure */
+ xfs_dir2_leaf_entry_t *lep; /* leaf entry */
+ xfs_mount_t *mp; /* filesystem mount point */
+ xfs_dir2_db_t newdb; /* new data block number */
+ xfs_trans_t *tp; /* transaction pointer */
+ enum xfs_dacmp cmp; /* comparison result */
+
+ dp = args->dp;
+ tp = args->trans;
+ mp = dp->i_mount;
+ leaf = bp->data;
+ ASSERT(be16_to_cpu(leaf->hdr.info.magic) == XFS_DIR2_LEAFN_MAGIC);
+#ifdef __KERNEL__
+ ASSERT(be16_to_cpu(leaf->hdr.count) > 0);
+#endif
+ xfs_dir2_leafn_check(dp, bp);
+ /*
+ * Look up the hash value in the leaf entries.
+ */
+ index = xfs_dir2_leaf_search_hash(args, bp);
+ /*
+ * Do we have a buffer coming in?
+ */
+ if (state->extravalid) {
+ curbp = state->extrablk.bp;
+ curdb = state->extrablk.blkno;
+ di = state->extrablk.index;
+ }
+ /*
+ * Loop over leaf entries with the right hash value.
+ */
+ for (lep = &leaf->ents[index]; index < be16_to_cpu(leaf->hdr.count) &&
+ be32_to_cpu(lep->hashval) == args->hashval;
+ lep++, index++) {
/*
- * For addname, giving back a free block.
+ * Skip stale leaf entries.
+ */
+ if (be32_to_cpu(lep->address) == XFS_DIR2_NULL_DATAPTR)
+ continue;
+ /*
+ * Pull the data block number from the entry.
+ */
+ newdb = xfs_dir2_dataptr_to_db(mp, be32_to_cpu(lep->address));
+ /*
+ * Not adding a new entry, so we really want to find
+ * the name given to us.
+ *
+ * If it's a different data block, go get it.
*/
- if (args->addname) {
- state->extrablk.blkno = curfdb;
- state->extrablk.magic = XFS_DIR2_FREE_MAGIC;
+ if (newdb != curdb) {
+ /*
+ * If we had a block before, drop it.
+ */
+ if (curbp)
+ xfs_da_brelse(tp, curbp);
+ /*
+ * Read the data block.
+ */
+ error = xfs_da_read_buf(tp, dp, xfs_dir2_db_to_da(mp,
+ newdb), -1, &curbp, XFS_DATA_FORK);
+ if (error)
+ return error;
+ xfs_dir2_data_check(dp, curbp);
+ curdb = newdb;
}
/*
- * For other callers, giving back a data block.
+ * Point to the data entry.
*/
- else {
- state->extrablk.blkno = curdb;
- state->extrablk.magic = XFS_DIR2_DATA_MAGIC;
+ dep = (xfs_dir2_data_entry_t *)((char *)curbp->data +
+ xfs_dir2_dataptr_to_off(mp, be32_to_cpu(lep->address)));
+ /*
+ * Compare the entry and if it's an exact match, return
+ * EEXIST immediately. If it's the first case-insensitive
+ * match, store the inode number and continue looking.
+ */
+ cmp = mp->m_dirnameops->compname(args, dep->name, dep->namelen);
+ if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
+ args->cmpresult = cmp;
+ args->inumber = be64_to_cpu(dep->inumber);
+ di = (int)((char *)dep - (char *)curbp->data);
+ error = EEXIST;
+ if (cmp == XFS_CMP_EXACT)
+ goto out;
}
}
+ /* Didn't find an exact match. */
+ error = ENOENT;
+ ASSERT(index == be16_to_cpu(leaf->hdr.count) ||
+ (args->op_flags & XFS_DA_OP_OKNOENT));
+out:
+ if (curbp) {
+ /* Giving back a data block. */
+ state->extravalid = 1;
+ state->extrablk.bp = curbp;
+ state->extrablk.index = di;
+ state->extrablk.blkno = curdb;
+ state->extrablk.magic = XFS_DIR2_DATA_MAGIC;
+ } else {
+ state->extravalid = 0;
+ }
/*
- * Return the final index, that will be the insertion point.
+ * Return the index, that will be the deletion point for remove/replace.
*/
*indexp = index;
- ASSERT(index == be16_to_cpu(leaf->hdr.count) || args->oknoent);
- return XFS_ERROR(ENOENT);
+ return XFS_ERROR(error);
+}
+
+/*
+ * Look up a leaf entry in a node-format leaf block.
+ * If this is an addname then the extrablk in state is a freespace block,
+ * otherwise it's a data block.
+ */
+int
+xfs_dir2_leafn_lookup_int(
+ xfs_dabuf_t *bp, /* leaf buffer */
+ xfs_da_args_t *args, /* operation arguments */
+ int *indexp, /* out: leaf entry index */
+ xfs_da_state_t *state) /* state to fill in */
+{
+ if (args->op_flags & XFS_DA_OP_ADDNAME)
+ return xfs_dir2_leafn_lookup_for_addname(bp, args, indexp,
+ state);
+ return xfs_dir2_leafn_lookup_for_entry(bp, args, indexp, state);
}
/*
@@ -823,9 +874,10 @@ xfs_dir2_leafn_rebalance(
*/
if (!state->inleaf)
blk2->index = blk1->index - be16_to_cpu(leaf1->hdr.count);
-
- /*
- * Finally sanity check just to make sure we are not returning a negative index
+
+ /*
+ * Finally sanity check just to make sure we are not returning a
+ * negative index
*/
if(blk2->index < 0) {
state->inleaf = 1;
@@ -1332,7 +1384,7 @@ xfs_dir2_node_addname(
/*
* It worked, fix the hash values up the btree.
*/
- if (!args->justcheck)
+ if (!(args->op_flags & XFS_DA_OP_JUSTCHECK))
xfs_da_fixhashpath(state, &state->path);
} else {
/*
@@ -1515,7 +1567,8 @@ xfs_dir2_node_addname_int(
/*
* Not allowed to allocate, return failure.
*/
- if (args->justcheck || args->total == 0) {
+ if ((args->op_flags & XFS_DA_OP_JUSTCHECK) ||
+ args->total == 0) {
/*
* Drop the freespace buffer unless it came from our
* caller.
@@ -1661,7 +1714,7 @@ xfs_dir2_node_addname_int(
/*
* If just checking, we succeeded.
*/
- if (args->justcheck) {
+ if (args->op_flags & XFS_DA_OP_JUSTCHECK) {
if ((fblk == NULL || fblk->bp == NULL) && fbp != NULL)
xfs_da_buf_done(fbp);
return 0;
@@ -1767,6 +1820,14 @@ xfs_dir2_node_lookup(
error = xfs_da_node_lookup_int(state, &rval);
if (error)
rval = error;
+ else if (rval == ENOENT && args->cmpresult == XFS_CMP_CASE) {
+ /* If a CI match, dup the actual name and return EEXIST */
+ xfs_dir2_data_entry_t *dep;
+
+ dep = (xfs_dir2_data_entry_t *)((char *)state->extrablk.bp->
+ data + state->extrablk.index);
+ rval = xfs_dir_cilookup_result(args, dep->name, dep->namelen);
+ }
/*
* Release the btree blocks and leaf block.
*/
@@ -1810,9 +1871,8 @@ xfs_dir2_node_removename(
* Look up the entry we're deleting, set up the cursor.
*/
error = xfs_da_node_lookup_int(state, &rval);
- if (error) {
+ if (error)
rval = error;
- }
/*
* Didn't find it, upper layer screwed up.
*/
@@ -1829,9 +1889,8 @@ xfs_dir2_node_removename(
*/
error = xfs_dir2_leafn_remove(args, blk->bp, blk->index,
&state->extrablk, &rval);
- if (error) {
+ if (error)
return error;
- }
/*
* Fix the hash values up the btree.
*/
diff --git a/fs/xfs/xfs_dir2_sf.c b/fs/xfs/xfs_dir2_sf.c
index 919d275a1cef..b46af0013ec9 100644
--- a/fs/xfs/xfs_dir2_sf.c
+++ b/fs/xfs/xfs_dir2_sf.c
@@ -255,7 +255,7 @@ xfs_dir2_block_to_sf(
xfs_dir2_sf_check(args);
out:
xfs_trans_log_inode(args->trans, dp, logflags);
- kmem_free(block, mp->m_dirblksize);
+ kmem_free(block);
return error;
}
@@ -332,7 +332,7 @@ xfs_dir2_sf_addname(
/*
* Just checking or no space reservation, it doesn't fit.
*/
- if (args->justcheck || args->total == 0)
+ if ((args->op_flags & XFS_DA_OP_JUSTCHECK) || args->total == 0)
return XFS_ERROR(ENOSPC);
/*
* Convert to block form then add the name.
@@ -345,7 +345,7 @@ xfs_dir2_sf_addname(
/*
* Just checking, it fits.
*/
- if (args->justcheck)
+ if (args->op_flags & XFS_DA_OP_JUSTCHECK)
return 0;
/*
* Do it the easy way - just add it at the end.
@@ -512,7 +512,7 @@ xfs_dir2_sf_addname_hard(
sfep = xfs_dir2_sf_nextentry(sfp, sfep);
memcpy(sfep, oldsfep, old_isize - nbytes);
}
- kmem_free(buf, old_isize);
+ kmem_free(buf);
dp->i_d.di_size = new_isize;
xfs_dir2_sf_check(args);
}
@@ -812,8 +812,11 @@ xfs_dir2_sf_lookup(
{
xfs_inode_t *dp; /* incore directory inode */
int i; /* entry index */
+ int error;
xfs_dir2_sf_entry_t *sfep; /* shortform directory entry */
xfs_dir2_sf_t *sfp; /* shortform structure */
+ enum xfs_dacmp cmp; /* comparison result */
+ xfs_dir2_sf_entry_t *ci_sfep; /* case-insens. entry */
xfs_dir2_trace_args("sf_lookup", args);
xfs_dir2_sf_check(args);
@@ -836,6 +839,7 @@ xfs_dir2_sf_lookup(
*/
if (args->namelen == 1 && args->name[0] == '.') {
args->inumber = dp->i_ino;
+ args->cmpresult = XFS_CMP_EXACT;
return XFS_ERROR(EEXIST);
}
/*
@@ -844,28 +848,41 @@ xfs_dir2_sf_lookup(
if (args->namelen == 2 &&
args->name[0] == '.' && args->name[1] == '.') {
args->inumber = xfs_dir2_sf_get_inumber(sfp, &sfp->hdr.parent);
+ args->cmpresult = XFS_CMP_EXACT;
return XFS_ERROR(EEXIST);
}
/*
* Loop over all the entries trying to match ours.
*/
- for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
- i < sfp->hdr.count;
- i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
- if (sfep->namelen == args->namelen &&
- sfep->name[0] == args->name[0] &&
- memcmp(args->name, sfep->name, args->namelen) == 0) {
- args->inumber =
- xfs_dir2_sf_get_inumber(sfp,
- xfs_dir2_sf_inumberp(sfep));
- return XFS_ERROR(EEXIST);
+ ci_sfep = NULL;
+ for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->hdr.count;
+ i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
+ /*
+ * Compare name and if it's an exact match, return the inode
+ * number. If it's the first case-insensitive match, store the
+ * inode number and continue looking for an exact match.
+ */
+ cmp = dp->i_mount->m_dirnameops->compname(args, sfep->name,
+ sfep->namelen);
+ if (cmp != XFS_CMP_DIFFERENT && cmp != args->cmpresult) {
+ args->cmpresult = cmp;
+ args->inumber = xfs_dir2_sf_get_inumber(sfp,
+ xfs_dir2_sf_inumberp(sfep));
+ if (cmp == XFS_CMP_EXACT)
+ return XFS_ERROR(EEXIST);
+ ci_sfep = sfep;
}
}
+ ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
/*
- * Didn't find it.
+ * Here, we can only be doing a lookup (not a rename or replace).
+ * If a case-insensitive match was not found, return ENOENT.
*/
- ASSERT(args->oknoent);
- return XFS_ERROR(ENOENT);
+ if (!ci_sfep)
+ return XFS_ERROR(ENOENT);
+ /* otherwise process the CI match as required by the caller */
+ error = xfs_dir_cilookup_result(args, ci_sfep->name, ci_sfep->namelen);
+ return XFS_ERROR(error);
}
/*
@@ -904,24 +921,21 @@ xfs_dir2_sf_removename(
* Loop over the old directory entries.
* Find the one we're deleting.
*/
- for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
- i < sfp->hdr.count;
- i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
- if (sfep->namelen == args->namelen &&
- sfep->name[0] == args->name[0] &&
- memcmp(sfep->name, args->name, args->namelen) == 0) {
+ for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp); i < sfp->hdr.count;
+ i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
+ if (xfs_da_compname(args, sfep->name, sfep->namelen) ==
+ XFS_CMP_EXACT) {
ASSERT(xfs_dir2_sf_get_inumber(sfp,
- xfs_dir2_sf_inumberp(sfep)) ==
- args->inumber);
+ xfs_dir2_sf_inumberp(sfep)) ==
+ args->inumber);
break;
}
}
/*
* Didn't find it.
*/
- if (i == sfp->hdr.count) {
+ if (i == sfp->hdr.count)
return XFS_ERROR(ENOENT);
- }
/*
* Calculate sizes.
*/
@@ -1042,11 +1056,10 @@ xfs_dir2_sf_replace(
*/
else {
for (i = 0, sfep = xfs_dir2_sf_firstentry(sfp);
- i < sfp->hdr.count;
- i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
- if (sfep->namelen == args->namelen &&
- sfep->name[0] == args->name[0] &&
- memcmp(args->name, sfep->name, args->namelen) == 0) {
+ i < sfp->hdr.count;
+ i++, sfep = xfs_dir2_sf_nextentry(sfp, sfep)) {
+ if (xfs_da_compname(args, sfep->name, sfep->namelen) ==
+ XFS_CMP_EXACT) {
#if XFS_BIG_INUMS || defined(DEBUG)
ino = xfs_dir2_sf_get_inumber(sfp,
xfs_dir2_sf_inumberp(sfep));
@@ -1061,7 +1074,7 @@ xfs_dir2_sf_replace(
* Didn't find it.
*/
if (i == sfp->hdr.count) {
- ASSERT(args->oknoent);
+ ASSERT(args->op_flags & XFS_DA_OP_OKNOENT);
#if XFS_BIG_INUMS
if (i8elevated)
xfs_dir2_sf_toino4(args);
@@ -1174,7 +1187,7 @@ xfs_dir2_sf_toino4(
/*
* Clean up the inode.
*/
- kmem_free(buf, oldsize);
+ kmem_free(buf);
dp->i_d.di_size = newsize;
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
}
@@ -1251,7 +1264,7 @@ xfs_dir2_sf_toino8(
/*
* Clean up the inode.
*/
- kmem_free(buf, oldsize);
+ kmem_free(buf);
dp->i_d.di_size = newsize;
xfs_trans_log_inode(args->trans, dp, XFS_ILOG_CORE | XFS_ILOG_DDATA);
}
diff --git a/fs/xfs/xfs_dir2_trace.c b/fs/xfs/xfs_dir2_trace.c
index f3fb2ffd6f5c..6cc7c0c681ac 100644
--- a/fs/xfs/xfs_dir2_trace.c
+++ b/fs/xfs/xfs_dir2_trace.c
@@ -85,7 +85,8 @@ xfs_dir2_trace_args(
(void *)((unsigned long)(args->inumber >> 32)),
(void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
(void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)args->justcheck, NULL, NULL);
+ (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
+ NULL, NULL);
}
void
@@ -100,7 +101,7 @@ xfs_dir2_trace_args_b(
(void *)((unsigned long)(args->inumber >> 32)),
(void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
(void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)args->justcheck,
+ (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
(void *)(bp ? bp->bps[0] : NULL), NULL);
}
@@ -117,7 +118,7 @@ xfs_dir2_trace_args_bb(
(void *)((unsigned long)(args->inumber >> 32)),
(void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
(void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)args->justcheck,
+ (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
(void *)(lbp ? lbp->bps[0] : NULL),
(void *)(dbp ? dbp->bps[0] : NULL));
}
@@ -157,8 +158,8 @@ xfs_dir2_trace_args_db(
(void *)((unsigned long)(args->inumber >> 32)),
(void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
(void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)args->justcheck, (void *)(long)db,
- (void *)dbp);
+ (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
+ (void *)(long)db, (void *)dbp);
}
void
@@ -173,7 +174,7 @@ xfs_dir2_trace_args_i(
(void *)((unsigned long)(args->inumber >> 32)),
(void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
(void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)args->justcheck,
+ (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
(void *)((unsigned long)(i >> 32)),
(void *)((unsigned long)(i & 0xFFFFFFFF)));
}
@@ -190,7 +191,8 @@ xfs_dir2_trace_args_s(
(void *)((unsigned long)(args->inumber >> 32)),
(void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
(void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)args->justcheck, (void *)(long)s, NULL);
+ (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
+ (void *)(long)s, NULL);
}
void
@@ -208,7 +210,7 @@ xfs_dir2_trace_args_sb(
(void *)((unsigned long)(args->inumber >> 32)),
(void *)((unsigned long)(args->inumber & 0xFFFFFFFF)),
(void *)args->dp, (void *)args->trans,
- (void *)(unsigned long)args->justcheck, (void *)(long)s,
- (void *)dbp);
+ (void *)(unsigned long)(args->op_flags & XFS_DA_OP_JUSTCHECK),
+ (void *)(long)s, (void *)dbp);
}
#endif /* XFS_DIR2_TRACE */
diff --git a/fs/xfs/xfs_error.c b/fs/xfs/xfs_error.c
index 05e5365d3c31..7380a00644c8 100644
--- a/fs/xfs/xfs_error.c
+++ b/fs/xfs/xfs_error.c
@@ -150,8 +150,7 @@ xfs_errortag_clearall(xfs_mount_t *mp, int loud)
xfs_etest[i]);
xfs_etest[i] = 0;
xfs_etest_fsid[i] = 0LL;
- kmem_free(xfs_etest_fsname[i],
- strlen(xfs_etest_fsname[i]) + 1);
+ kmem_free(xfs_etest_fsname[i]);
xfs_etest_fsname[i] = NULL;
}
}
@@ -175,7 +174,7 @@ xfs_fs_vcmn_err(int level, xfs_mount_t *mp, char *fmt, va_list ap)
newfmt = kmem_alloc(len, KM_SLEEP);
sprintf(newfmt, "Filesystem \"%s\": %s", mp->m_fsname, fmt);
icmn_err(level, newfmt, ap);
- kmem_free(newfmt, len);
+ kmem_free(newfmt);
} else {
icmn_err(level, fmt, ap);
}
diff --git a/fs/xfs/xfs_extfree_item.c b/fs/xfs/xfs_extfree_item.c
index 132bd07b9bb8..8aa28f751b2a 100644
--- a/fs/xfs/xfs_extfree_item.c
+++ b/fs/xfs/xfs_extfree_item.c
@@ -41,8 +41,7 @@ xfs_efi_item_free(xfs_efi_log_item_t *efip)
int nexts = efip->efi_format.efi_nextents;
if (nexts > XFS_EFI_MAX_FAST_EXTENTS) {
- kmem_free(efip, sizeof(xfs_efi_log_item_t) +
- (nexts - 1) * sizeof(xfs_extent_t));
+ kmem_free(efip);
} else {
kmem_zone_free(xfs_efi_zone, efip);
}
@@ -374,8 +373,7 @@ xfs_efd_item_free(xfs_efd_log_item_t *efdp)
int nexts = efdp->efd_format.efd_nextents;
if (nexts > XFS_EFD_MAX_FAST_EXTENTS) {
- kmem_free(efdp, sizeof(xfs_efd_log_item_t) +
- (nexts - 1) * sizeof(xfs_extent_t));
+ kmem_free(efdp);
} else {
kmem_zone_free(xfs_efd_zone, efdp);
}
diff --git a/fs/xfs/xfs_fs.h b/fs/xfs/xfs_fs.h
index 3bed6433d050..6ca749897c58 100644
--- a/fs/xfs/xfs_fs.h
+++ b/fs/xfs/xfs_fs.h
@@ -239,6 +239,7 @@ typedef struct xfs_fsop_resblks {
#define XFS_FSOP_GEOM_FLAGS_LOGV2 0x0100 /* log format version 2 */
#define XFS_FSOP_GEOM_FLAGS_SECTOR 0x0200 /* sector sizes >1BB */
#define XFS_FSOP_GEOM_FLAGS_ATTR2 0x0400 /* inline attributes rework */
+#define XFS_FSOP_GEOM_FLAGS_DIRV2CI 0x1000 /* ASCII only CI names */
#define XFS_FSOP_GEOM_FLAGS_LAZYSB 0x4000 /* lazy superblock counters */
diff --git a/fs/xfs/xfs_fsops.c b/fs/xfs/xfs_fsops.c
index 381ebda4f7bc..84583cf73db3 100644
--- a/fs/xfs/xfs_fsops.c
+++ b/fs/xfs/xfs_fsops.c
@@ -95,6 +95,8 @@ xfs_fs_geometry(
XFS_FSOP_GEOM_FLAGS_DIRV2 : 0) |
(xfs_sb_version_hassector(&mp->m_sb) ?
XFS_FSOP_GEOM_FLAGS_SECTOR : 0) |
+ (xfs_sb_version_hasasciici(&mp->m_sb) ?
+ XFS_FSOP_GEOM_FLAGS_DIRV2CI : 0) |
(xfs_sb_version_haslazysbcount(&mp->m_sb) ?
XFS_FSOP_GEOM_FLAGS_LAZYSB : 0) |
(xfs_sb_version_hasattr2(&mp->m_sb) ?
@@ -625,7 +627,7 @@ xfs_fs_goingdown(
xfs_force_shutdown(mp, SHUTDOWN_FORCE_UMOUNT);
thaw_bdev(sb->s_bdev, sb);
}
-
+
break;
}
case XFS_FSOP_GOING_FLAGS_LOGFLUSH:
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index cf0bb9c1d621..199a36ac8e2d 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -1763,67 +1763,6 @@ xfs_itruncate_finish(
return 0;
}
-
-/*
- * xfs_igrow_start
- *
- * Do the first part of growing a file: zero any data in the last
- * block that is beyond the old EOF. We need to do this before
- * the inode is joined to the transaction to modify the i_size.
- * That way we can drop the inode lock and call into the buffer
- * cache to get the buffer mapping the EOF.
- */
-int
-xfs_igrow_start(
- xfs_inode_t *ip,
- xfs_fsize_t new_size,
- cred_t *credp)
-{
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
- ASSERT(new_size > ip->i_size);
-
- /*
- * Zero any pages that may have been created by
- * xfs_write_file() beyond the end of the file
- * and any blocks between the old and new file sizes.
- */
- return xfs_zero_eof(ip, new_size, ip->i_size);
-}
-
-/*
- * xfs_igrow_finish
- *
- * This routine is called to extend the size of a file.
- * The inode must have both the iolock and the ilock locked
- * for update and it must be a part of the current transaction.
- * The xfs_igrow_start() function must have been called previously.
- * If the change_flag is not zero, the inode change timestamp will
- * be updated.
- */
-void
-xfs_igrow_finish(
- xfs_trans_t *tp,
- xfs_inode_t *ip,
- xfs_fsize_t new_size,
- int change_flag)
-{
- ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL|XFS_IOLOCK_EXCL));
- ASSERT(ip->i_transp == tp);
- ASSERT(new_size > ip->i_size);
-
- /*
- * Update the file size. Update the inode change timestamp
- * if change_flag set.
- */
- ip->i_d.di_size = new_size;
- ip->i_size = new_size;
- if (change_flag)
- xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
- xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
-
-}
-
-
/*
* This is called when the inode's link count goes to 0.
* We place the on-disk inode on a list in the AGI. It
@@ -2258,7 +2197,7 @@ xfs_ifree_cluster(
xfs_trans_binval(tp, bp);
}
- kmem_free(ip_found, ninodes * sizeof(xfs_inode_t *));
+ kmem_free(ip_found);
xfs_put_perag(mp, pag);
}
@@ -2470,7 +2409,7 @@ xfs_iroot_realloc(
(int)new_size);
memcpy(np, op, new_max * (uint)sizeof(xfs_dfsbno_t));
}
- kmem_free(ifp->if_broot, ifp->if_broot_bytes);
+ kmem_free(ifp->if_broot);
ifp->if_broot = new_broot;
ifp->if_broot_bytes = (int)new_size;
ASSERT(ifp->if_broot_bytes <=
@@ -2514,7 +2453,7 @@ xfs_idata_realloc(
if (new_size == 0) {
if (ifp->if_u1.if_data != ifp->if_u2.if_inline_data) {
- kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
+ kmem_free(ifp->if_u1.if_data);
}
ifp->if_u1.if_data = NULL;
real_size = 0;
@@ -2529,7 +2468,7 @@ xfs_idata_realloc(
ASSERT(ifp->if_real_bytes != 0);
memcpy(ifp->if_u2.if_inline_data, ifp->if_u1.if_data,
new_size);
- kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
+ kmem_free(ifp->if_u1.if_data);
ifp->if_u1.if_data = ifp->if_u2.if_inline_data;
}
real_size = 0;
@@ -2636,7 +2575,7 @@ xfs_idestroy_fork(
ifp = XFS_IFORK_PTR(ip, whichfork);
if (ifp->if_broot != NULL) {
- kmem_free(ifp->if_broot, ifp->if_broot_bytes);
+ kmem_free(ifp->if_broot);
ifp->if_broot = NULL;
}
@@ -2650,7 +2589,7 @@ xfs_idestroy_fork(
if ((ifp->if_u1.if_data != ifp->if_u2.if_inline_data) &&
(ifp->if_u1.if_data != NULL)) {
ASSERT(ifp->if_real_bytes != 0);
- kmem_free(ifp->if_u1.if_data, ifp->if_real_bytes);
+ kmem_free(ifp->if_u1.if_data);
ifp->if_u1.if_data = NULL;
ifp->if_real_bytes = 0;
}
@@ -2974,6 +2913,7 @@ xfs_iflush_cluster(
xfs_mount_t *mp = ip->i_mount;
xfs_perag_t *pag = xfs_get_perag(mp, ip->i_ino);
unsigned long first_index, mask;
+ unsigned long inodes_per_cluster;
int ilist_size;
xfs_inode_t **ilist;
xfs_inode_t *iq;
@@ -2985,8 +2925,9 @@ xfs_iflush_cluster(
ASSERT(pag->pagi_inodeok);
ASSERT(pag->pag_ici_init);
- ilist_size = XFS_INODE_CLUSTER_SIZE(mp) * sizeof(xfs_inode_t *);
- ilist = kmem_alloc(ilist_size, KM_MAYFAIL);
+ inodes_per_cluster = XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog;
+ ilist_size = inodes_per_cluster * sizeof(xfs_inode_t *);
+ ilist = kmem_alloc(ilist_size, KM_MAYFAIL|KM_NOFS);
if (!ilist)
return 0;
@@ -2995,8 +2936,7 @@ xfs_iflush_cluster(
read_lock(&pag->pag_ici_lock);
/* really need a gang lookup range call here */
nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)ilist,
- first_index,
- XFS_INODE_CLUSTER_SIZE(mp));
+ first_index, inodes_per_cluster);
if (nr_found == 0)
goto out_free;
@@ -3057,7 +2997,7 @@ xfs_iflush_cluster(
out_free:
read_unlock(&pag->pag_ici_lock);
- kmem_free(ilist, ilist_size);
+ kmem_free(ilist);
return 0;
@@ -3101,7 +3041,7 @@ cluster_corrupt_out:
* Unlocks the flush lock
*/
xfs_iflush_abort(iq);
- kmem_free(ilist, ilist_size);
+ kmem_free(ilist);
return XFS_ERROR(EFSCORRUPTED);
}
@@ -3835,7 +3775,7 @@ xfs_iext_add_indirect_multi(
erp = xfs_iext_irec_new(ifp, erp_idx);
}
memmove(&erp->er_extbuf[i], nex2_ep, byte_diff);
- kmem_free(nex2_ep, byte_diff);
+ kmem_free(nex2_ep);
erp->er_extcount += nex2;
xfs_iext_irec_update_extoffs(ifp, erp_idx + 1, nex2);
}
@@ -4111,7 +4051,7 @@ xfs_iext_direct_to_inline(
*/
memcpy(ifp->if_u2.if_inline_ext, ifp->if_u1.if_extents,
nextents * sizeof(xfs_bmbt_rec_t));
- kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
+ kmem_free(ifp->if_u1.if_extents);
ifp->if_u1.if_extents = ifp->if_u2.if_inline_ext;
ifp->if_real_bytes = 0;
}
@@ -4185,7 +4125,7 @@ xfs_iext_indirect_to_direct(
ASSERT(ifp->if_real_bytes == XFS_IEXT_BUFSZ);
ep = ifp->if_u1.if_ext_irec->er_extbuf;
- kmem_free(ifp->if_u1.if_ext_irec, sizeof(xfs_ext_irec_t));
+ kmem_free(ifp->if_u1.if_ext_irec);
ifp->if_flags &= ~XFS_IFEXTIREC;
ifp->if_u1.if_extents = ep;
ifp->if_bytes = size;
@@ -4211,7 +4151,7 @@ xfs_iext_destroy(
}
ifp->if_flags &= ~XFS_IFEXTIREC;
} else if (ifp->if_real_bytes) {
- kmem_free(ifp->if_u1.if_extents, ifp->if_real_bytes);
+ kmem_free(ifp->if_u1.if_extents);
} else if (ifp->if_bytes) {
memset(ifp->if_u2.if_inline_ext, 0, XFS_INLINE_EXTS *
sizeof(xfs_bmbt_rec_t));
@@ -4482,7 +4422,7 @@ xfs_iext_irec_remove(
if (erp->er_extbuf) {
xfs_iext_irec_update_extoffs(ifp, erp_idx + 1,
-erp->er_extcount);
- kmem_free(erp->er_extbuf, XFS_IEXT_BUFSZ);
+ kmem_free(erp->er_extbuf);
}
/* Compact extent records */
erp = ifp->if_u1.if_ext_irec;
@@ -4500,8 +4440,7 @@ xfs_iext_irec_remove(
xfs_iext_realloc_indirect(ifp,
nlists * sizeof(xfs_ext_irec_t));
} else {
- kmem_free(ifp->if_u1.if_ext_irec,
- sizeof(xfs_ext_irec_t));
+ kmem_free(ifp->if_u1.if_ext_irec);
}
ifp->if_real_bytes = nlists * XFS_IEXT_BUFSZ;
}
@@ -4570,7 +4509,7 @@ xfs_iext_irec_compact_pages(
* so er_extoffs don't get modified in
* xfs_iext_irec_remove.
*/
- kmem_free(erp_next->er_extbuf, XFS_IEXT_BUFSZ);
+ kmem_free(erp_next->er_extbuf);
erp_next->er_extbuf = NULL;
xfs_iext_irec_remove(ifp, erp_idx + 1);
nlists = ifp->if_real_bytes / XFS_IEXT_BUFSZ;
@@ -4613,8 +4552,7 @@ xfs_iext_irec_compact_full(
* so er_extoffs don't get modified in
* xfs_iext_irec_remove.
*/
- kmem_free(erp_next->er_extbuf,
- erp_next->er_extcount * sizeof(xfs_bmbt_rec_t));
+ kmem_free(erp_next->er_extbuf);
erp_next->er_extbuf = NULL;
xfs_iext_irec_remove(ifp, erp_idx + 1);
erp = &ifp->if_u1.if_ext_irec[erp_idx];
diff --git a/fs/xfs/xfs_inode.h b/fs/xfs/xfs_inode.h
index 0a999fee4f03..17a04b6321ed 100644
--- a/fs/xfs/xfs_inode.h
+++ b/fs/xfs/xfs_inode.h
@@ -507,9 +507,6 @@ int xfs_itruncate_start(xfs_inode_t *, uint, xfs_fsize_t);
int xfs_itruncate_finish(struct xfs_trans **, xfs_inode_t *,
xfs_fsize_t, int, int);
int xfs_iunlink(struct xfs_trans *, xfs_inode_t *);
-int xfs_igrow_start(xfs_inode_t *, xfs_fsize_t, struct cred *);
-void xfs_igrow_finish(struct xfs_trans *, xfs_inode_t *,
- xfs_fsize_t, int);
void xfs_idestroy_fork(xfs_inode_t *, int);
void xfs_idestroy(xfs_inode_t *);
diff --git a/fs/xfs/xfs_inode_item.c b/fs/xfs/xfs_inode_item.c
index 167b33f15772..0eee08a32c26 100644
--- a/fs/xfs/xfs_inode_item.c
+++ b/fs/xfs/xfs_inode_item.c
@@ -686,7 +686,7 @@ xfs_inode_item_unlock(
ASSERT(ip->i_d.di_nextents > 0);
ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_DEXT);
ASSERT(ip->i_df.if_bytes > 0);
- kmem_free(iip->ili_extents_buf, ip->i_df.if_bytes);
+ kmem_free(iip->ili_extents_buf);
iip->ili_extents_buf = NULL;
}
if (iip->ili_aextents_buf != NULL) {
@@ -694,7 +694,7 @@ xfs_inode_item_unlock(
ASSERT(ip->i_d.di_anextents > 0);
ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_AEXT);
ASSERT(ip->i_afp->if_bytes > 0);
- kmem_free(iip->ili_aextents_buf, ip->i_afp->if_bytes);
+ kmem_free(iip->ili_aextents_buf);
iip->ili_aextents_buf = NULL;
}
@@ -957,8 +957,7 @@ xfs_inode_item_destroy(
{
#ifdef XFS_TRANS_DEBUG
if (ip->i_itemp->ili_root_size != 0) {
- kmem_free(ip->i_itemp->ili_orig_root,
- ip->i_itemp->ili_root_size);
+ kmem_free(ip->i_itemp->ili_orig_root);
}
#endif
kmem_zone_free(xfs_ili_zone, ip->i_itemp);
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index 419de15aeb43..9a3ef9dcaeb9 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -257,7 +257,7 @@ xfs_bulkstat_one(
*ubused = error;
out_free:
- kmem_free(buf, sizeof(*buf));
+ kmem_free(buf);
return error;
}
@@ -708,7 +708,7 @@ xfs_bulkstat(
/*
* Done, we're either out of filesystem or space to put the data.
*/
- kmem_free(irbuf, irbsize);
+ kmem_free(irbuf);
*ubcountp = ubelem;
/*
* Found some inodes, return them now and return the error next time.
@@ -914,7 +914,7 @@ xfs_inumbers(
}
*lastino = XFS_AGINO_TO_INO(mp, agno, agino);
}
- kmem_free(buffer, bcount * sizeof(*buffer));
+ kmem_free(buffer);
if (cur)
xfs_btree_del_cursor(cur, (error ? XFS_BTREE_ERROR :
XFS_BTREE_NOERROR));
diff --git a/fs/xfs/xfs_log.c b/fs/xfs/xfs_log.c
index afaee301b0ee..2497de885b0a 100644
--- a/fs/xfs/xfs_log.c
+++ b/fs/xfs/xfs_log.c
@@ -226,20 +226,24 @@ xlog_grant_sub_space(struct log *log, int bytes)
static void
xlog_grant_add_space_write(struct log *log, int bytes)
{
- log->l_grant_write_bytes += bytes;
- if (log->l_grant_write_bytes > log->l_logsize) {
- log->l_grant_write_bytes -= log->l_logsize;
+ int tmp = log->l_logsize - log->l_grant_write_bytes;
+ if (tmp > bytes)
+ log->l_grant_write_bytes += bytes;
+ else {
log->l_grant_write_cycle++;
+ log->l_grant_write_bytes = bytes - tmp;
}
}
static void
xlog_grant_add_space_reserve(struct log *log, int bytes)
{
- log->l_grant_reserve_bytes += bytes;
- if (log->l_grant_reserve_bytes > log->l_logsize) {
- log->l_grant_reserve_bytes -= log->l_logsize;
+ int tmp = log->l_logsize - log->l_grant_reserve_bytes;
+ if (tmp > bytes)
+ log->l_grant_reserve_bytes += bytes;
+ else {
log->l_grant_reserve_cycle++;
+ log->l_grant_reserve_bytes = bytes - tmp;
}
}
@@ -1228,7 +1232,7 @@ xlog_alloc_log(xfs_mount_t *mp,
spin_lock_init(&log->l_icloglock);
spin_lock_init(&log->l_grant_lock);
- initnsema(&log->l_flushsema, 0, "ic-flush");
+ sv_init(&log->l_flush_wait, 0, "flush_wait");
/* log record size must be multiple of BBSIZE; see xlog_rec_header_t */
ASSERT((XFS_BUF_SIZE(bp) & BBMASK) == 0);
@@ -1570,10 +1574,9 @@ xlog_dealloc_log(xlog_t *log)
}
#endif
next_iclog = iclog->ic_next;
- kmem_free(iclog, sizeof(xlog_in_core_t));
+ kmem_free(iclog);
iclog = next_iclog;
}
- freesema(&log->l_flushsema);
spinlock_destroy(&log->l_icloglock);
spinlock_destroy(&log->l_grant_lock);
@@ -1587,7 +1590,7 @@ xlog_dealloc_log(xlog_t *log)
}
#endif
log->l_mp->m_log = NULL;
- kmem_free(log, sizeof(xlog_t));
+ kmem_free(log);
} /* xlog_dealloc_log */
/*
@@ -2097,6 +2100,7 @@ xlog_state_do_callback(
int funcdidcallbacks; /* flag: function did callbacks */
int repeats; /* for issuing console warnings if
* looping too many times */
+ int wake = 0;
spin_lock(&log->l_icloglock);
first_iclog = iclog = log->l_iclog;
@@ -2278,15 +2282,13 @@ xlog_state_do_callback(
}
#endif
- flushcnt = 0;
- if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR)) {
- flushcnt = log->l_flushcnt;
- log->l_flushcnt = 0;
- }
+ if (log->l_iclog->ic_state & (XLOG_STATE_ACTIVE|XLOG_STATE_IOERROR))
+ wake = 1;
spin_unlock(&log->l_icloglock);
- while (flushcnt--)
- vsema(&log->l_flushsema);
-} /* xlog_state_do_callback */
+
+ if (wake)
+ sv_broadcast(&log->l_flush_wait);
+}
/*
@@ -2384,16 +2386,15 @@ restart:
}
iclog = log->l_iclog;
- if (! (iclog->ic_state == XLOG_STATE_ACTIVE)) {
- log->l_flushcnt++;
- spin_unlock(&log->l_icloglock);
+ if (iclog->ic_state != XLOG_STATE_ACTIVE) {
xlog_trace_iclog(iclog, XLOG_TRACE_SLEEP_FLUSH);
XFS_STATS_INC(xs_log_noiclogs);
- /* Ensure that log writes happen */
- psema(&log->l_flushsema, PINOD);
+
+ /* Wait for log writes to have flushed */
+ sv_wait(&log->l_flush_wait, 0, &log->l_icloglock, 0);
goto restart;
}
- ASSERT(iclog->ic_state == XLOG_STATE_ACTIVE);
+
head = &iclog->ic_header;
atomic_inc(&iclog->ic_refcnt); /* prevents sync */
diff --git a/fs/xfs/xfs_log_priv.h b/fs/xfs/xfs_log_priv.h
index 8952a392b5f3..6245913196b4 100644
--- a/fs/xfs/xfs_log_priv.h
+++ b/fs/xfs/xfs_log_priv.h
@@ -423,10 +423,8 @@ typedef struct log {
int l_logBBsize; /* size of log in BB chunks */
/* The following block of fields are changed while holding icloglock */
- sema_t l_flushsema ____cacheline_aligned_in_smp;
- /* iclog flushing semaphore */
- int l_flushcnt; /* # of procs waiting on this
- * sema */
+ sv_t l_flush_wait ____cacheline_aligned_in_smp;
+ /* waiting for iclog flush */
int l_covered_state;/* state of "covering disk
* log entries" */
xlog_in_core_t *l_iclog; /* head log queue */
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index e65ab4af0955..9eb722ec744e 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -1715,8 +1715,7 @@ xlog_check_buffer_cancelled(
} else {
prevp->bc_next = bcp->bc_next;
}
- kmem_free(bcp,
- sizeof(xfs_buf_cancel_t));
+ kmem_free(bcp);
}
}
return 1;
@@ -2519,7 +2518,7 @@ write_inode_buffer:
error:
if (need_free)
- kmem_free(in_f, sizeof(*in_f));
+ kmem_free(in_f);
return XFS_ERROR(error);
}
@@ -2830,16 +2829,14 @@ xlog_recover_free_trans(
item = item->ri_next;
/* Free the regions in the item. */
for (i = 0; i < free_item->ri_cnt; i++) {
- kmem_free(free_item->ri_buf[i].i_addr,
- free_item->ri_buf[i].i_len);
+ kmem_free(free_item->ri_buf[i].i_addr);
}
/* Free the item itself */
- kmem_free(free_item->ri_buf,
- (free_item->ri_total * sizeof(xfs_log_iovec_t)));
- kmem_free(free_item, sizeof(xlog_recover_item_t));
+ kmem_free(free_item->ri_buf);
+ kmem_free(free_item);
} while (first_item != item);
/* Free the transaction recover structure */
- kmem_free(trans, sizeof(xlog_recover_t));
+ kmem_free(trans);
}
STATIC int
@@ -3786,8 +3783,7 @@ xlog_do_log_recovery(
error = xlog_do_recovery_pass(log, head_blk, tail_blk,
XLOG_RECOVER_PASS1);
if (error != 0) {
- kmem_free(log->l_buf_cancel_table,
- XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*));
+ kmem_free(log->l_buf_cancel_table);
log->l_buf_cancel_table = NULL;
return error;
}
@@ -3806,8 +3802,7 @@ xlog_do_log_recovery(
}
#endif /* DEBUG */
- kmem_free(log->l_buf_cancel_table,
- XLOG_BC_TABLE_SIZE * sizeof(xfs_buf_cancel_t*));
+ kmem_free(log->l_buf_cancel_table);
log->l_buf_cancel_table = NULL;
return error;
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index da3988453b71..1bfaa204f689 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -47,12 +47,10 @@
STATIC int xfs_mount_log_sb(xfs_mount_t *, __int64_t);
STATIC int xfs_uuid_mount(xfs_mount_t *);
-STATIC void xfs_uuid_unmount(xfs_mount_t *mp);
STATIC void xfs_unmountfs_wait(xfs_mount_t *);
#ifdef HAVE_PERCPU_SB
-STATIC void xfs_icsb_destroy_counters(xfs_mount_t *);
STATIC void xfs_icsb_balance_counter(xfs_mount_t *, xfs_sb_field_t,
int);
STATIC void xfs_icsb_balance_counter_locked(xfs_mount_t *, xfs_sb_field_t,
@@ -63,7 +61,6 @@ STATIC void xfs_icsb_disable_counter(xfs_mount_t *, xfs_sb_field_t);
#else
-#define xfs_icsb_destroy_counters(mp) do { } while (0)
#define xfs_icsb_balance_counter(mp, a, b) do { } while (0)
#define xfs_icsb_balance_counter_locked(mp, a, b) do { } while (0)
#define xfs_icsb_modify_counters(mp, a, b, c) do { } while (0)
@@ -126,33 +123,11 @@ static const struct {
};
/*
- * Return a pointer to an initialized xfs_mount structure.
- */
-xfs_mount_t *
-xfs_mount_init(void)
-{
- xfs_mount_t *mp;
-
- mp = kmem_zalloc(sizeof(xfs_mount_t), KM_SLEEP);
-
- if (xfs_icsb_init_counters(mp)) {
- mp->m_flags |= XFS_MOUNT_NO_PERCPU_SB;
- }
-
- spin_lock_init(&mp->m_sb_lock);
- mutex_init(&mp->m_ilock);
- mutex_init(&mp->m_growlock);
- atomic_set(&mp->m_active_trans, 0);
-
- return mp;
-}
-
-/*
* Free up the resources associated with a mount structure. Assume that
* the structure was initially zeroed, so we can tell which fields got
* initialized.
*/
-void
+STATIC void
xfs_mount_free(
xfs_mount_t *mp)
{
@@ -161,11 +136,8 @@ xfs_mount_free(
for (agno = 0; agno < mp->m_maxagi; agno++)
if (mp->m_perag[agno].pagb_list)
- kmem_free(mp->m_perag[agno].pagb_list,
- sizeof(xfs_perag_busy_t) *
- XFS_PAGB_NUM_SLOTS);
- kmem_free(mp->m_perag,
- sizeof(xfs_perag_t) * mp->m_sb.sb_agcount);
+ kmem_free(mp->m_perag[agno].pagb_list);
+ kmem_free(mp->m_perag);
}
spinlock_destroy(&mp->m_ail_lock);
@@ -176,13 +148,11 @@ xfs_mount_free(
XFS_QM_DONE(mp);
if (mp->m_fsname != NULL)
- kmem_free(mp->m_fsname, mp->m_fsname_len);
+ kmem_free(mp->m_fsname);
if (mp->m_rtname != NULL)
- kmem_free(mp->m_rtname, strlen(mp->m_rtname) + 1);
+ kmem_free(mp->m_rtname);
if (mp->m_logname != NULL)
- kmem_free(mp->m_logname, strlen(mp->m_logname) + 1);
-
- xfs_icsb_destroy_counters(mp);
+ kmem_free(mp->m_logname);
}
/*
@@ -994,9 +964,19 @@ xfs_mountfs(
* Re-check for ATTR2 in case it was found in bad_features2
* slot.
*/
- if (xfs_sb_version_hasattr2(&mp->m_sb))
+ if (xfs_sb_version_hasattr2(&mp->m_sb) &&
+ !(mp->m_flags & XFS_MOUNT_NOATTR2))
mp->m_flags |= XFS_MOUNT_ATTR2;
+ }
+
+ if (xfs_sb_version_hasattr2(&mp->m_sb) &&
+ (mp->m_flags & XFS_MOUNT_NOATTR2)) {
+ xfs_sb_version_removeattr2(&mp->m_sb);
+ update_flags |= XFS_SB_FEATURES2;
+ /* update sb_versionnum for the clearing of the morebits */
+ if (!sbp->sb_features2)
+ update_flags |= XFS_SB_VERSIONNUM;
}
/*
@@ -1255,15 +1235,13 @@ xfs_mountfs(
error2:
for (agno = 0; agno < sbp->sb_agcount; agno++)
if (mp->m_perag[agno].pagb_list)
- kmem_free(mp->m_perag[agno].pagb_list,
- sizeof(xfs_perag_busy_t) * XFS_PAGB_NUM_SLOTS);
- kmem_free(mp->m_perag, sbp->sb_agcount * sizeof(xfs_perag_t));
+ kmem_free(mp->m_perag[agno].pagb_list);
+ kmem_free(mp->m_perag);
mp->m_perag = NULL;
/* FALLTHROUGH */
error1:
if (uuid_mounted)
- xfs_uuid_unmount(mp);
- xfs_freesb(mp);
+ uuid_table_remove(&mp->m_sb.sb_uuid);
return error;
}
@@ -1274,7 +1252,7 @@ xfs_mountfs(
* log and makes sure that incore structures are freed.
*/
int
-xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
+xfs_unmountfs(xfs_mount_t *mp)
{
__uint64_t resblks;
int error = 0;
@@ -1341,9 +1319,8 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
*/
ASSERT(mp->m_inodes == NULL);
- xfs_unmountfs_close(mp, cr);
if ((mp->m_flags & XFS_MOUNT_NOUUID) == 0)
- xfs_uuid_unmount(mp);
+ uuid_table_remove(&mp->m_sb.sb_uuid);
#if defined(DEBUG) || defined(INDUCE_IO_ERROR)
xfs_errortag_clearall(mp, 0);
@@ -1352,16 +1329,6 @@ xfs_unmountfs(xfs_mount_t *mp, struct cred *cr)
return 0;
}
-void
-xfs_unmountfs_close(xfs_mount_t *mp, struct cred *cr)
-{
- if (mp->m_logdev_targp && mp->m_logdev_targp != mp->m_ddev_targp)
- xfs_free_buftarg(mp->m_logdev_targp, 1);
- if (mp->m_rtdev_targp)
- xfs_free_buftarg(mp->m_rtdev_targp, 1);
- xfs_free_buftarg(mp->m_ddev_targp, 0);
-}
-
STATIC void
xfs_unmountfs_wait(xfs_mount_t *mp)
{
@@ -1905,16 +1872,6 @@ xfs_uuid_mount(
}
/*
- * Remove filesystem from the UUID table.
- */
-STATIC void
-xfs_uuid_unmount(
- xfs_mount_t *mp)
-{
- uuid_table_remove(&mp->m_sb.sb_uuid);
-}
-
-/*
* Used to log changes to the superblock unit and width fields which could
* be altered by the mount options, as well as any potential sb_features2
* fixup. Only the first superblock is updated.
@@ -1928,7 +1885,8 @@ xfs_mount_log_sb(
int error;
ASSERT(fields & (XFS_SB_UNIT | XFS_SB_WIDTH | XFS_SB_UUID |
- XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2));
+ XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2 |
+ XFS_SB_VERSIONNUM));
tp = xfs_trans_alloc(mp, XFS_TRANS_SB_UNIT);
error = xfs_trans_reserve(tp, 0, mp->m_sb.sb_sectsize + 128, 0, 0,
@@ -2109,7 +2067,7 @@ xfs_icsb_reinit_counters(
xfs_icsb_unlock(mp);
}
-STATIC void
+void
xfs_icsb_destroy_counters(
xfs_mount_t *mp)
{
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index 63e0693a358a..dbba68f8c771 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -61,6 +61,7 @@ struct xfs_bmap_free;
struct xfs_extdelta;
struct xfs_swapext;
struct xfs_mru_cache;
+struct xfs_nameops;
/*
* Prototypes and functions for the Data Migration subsystem.
@@ -210,12 +211,14 @@ typedef struct xfs_icsb_cnts {
extern int xfs_icsb_init_counters(struct xfs_mount *);
extern void xfs_icsb_reinit_counters(struct xfs_mount *);
+extern void xfs_icsb_destroy_counters(struct xfs_mount *);
extern void xfs_icsb_sync_counters(struct xfs_mount *, int);
extern void xfs_icsb_sync_counters_locked(struct xfs_mount *, int);
#else
-#define xfs_icsb_init_counters(mp) (0)
-#define xfs_icsb_reinit_counters(mp) do { } while (0)
+#define xfs_icsb_init_counters(mp) (0)
+#define xfs_icsb_destroy_counters(mp) do { } while (0)
+#define xfs_icsb_reinit_counters(mp) do { } while (0)
#define xfs_icsb_sync_counters(mp, flags) do { } while (0)
#define xfs_icsb_sync_counters_locked(mp, flags) do { } while (0)
#endif
@@ -313,6 +316,7 @@ typedef struct xfs_mount {
__uint8_t m_inode_quiesce;/* call quiesce on new inodes.
field governed by m_ilock */
__uint8_t m_sectbb_log; /* sectlog - BBSHIFT */
+ const struct xfs_nameops *m_dirnameops; /* vector of dir name ops */
int m_dirblksize; /* directory block sz--bytes */
int m_dirblkfsbs; /* directory block sz--fsbs */
xfs_dablk_t m_dirdatablk; /* blockno of dir data v2 */
@@ -337,6 +341,7 @@ typedef struct xfs_mount {
spinlock_t m_sync_lock; /* work item list lock */
int m_sync_seq; /* sync thread generation no. */
wait_queue_head_t m_wait_single_sync_task;
+ struct vfsmount *m_vfsmount;
} xfs_mount_t;
/*
@@ -378,6 +383,7 @@ typedef struct xfs_mount {
counters */
#define XFS_MOUNT_FILESTREAMS (1ULL << 24) /* enable the filestreams
allocator */
+#define XFS_MOUNT_NOATTR2 (1ULL << 25) /* disable use of attr2 format */
/*
@@ -510,15 +516,12 @@ typedef struct xfs_mod_sb {
#define XFS_MOUNT_ILOCK(mp) mutex_lock(&((mp)->m_ilock))
#define XFS_MOUNT_IUNLOCK(mp) mutex_unlock(&((mp)->m_ilock))
-extern xfs_mount_t *xfs_mount_init(void);
extern void xfs_mod_sb(xfs_trans_t *, __int64_t);
extern int xfs_log_sbcount(xfs_mount_t *, uint);
-extern void xfs_mount_free(xfs_mount_t *mp);
extern int xfs_mountfs(xfs_mount_t *mp, int);
extern void xfs_mountfs_check_barriers(xfs_mount_t *mp);
-extern int xfs_unmountfs(xfs_mount_t *, struct cred *);
-extern void xfs_unmountfs_close(xfs_mount_t *, struct cred *);
+extern int xfs_unmountfs(xfs_mount_t *);
extern int xfs_unmountfs_writesb(xfs_mount_t *);
extern int xfs_unmount_flush(xfs_mount_t *, int);
extern int xfs_mod_incore_sb(xfs_mount_t *, xfs_sb_field_t, int64_t, int);
diff --git a/fs/xfs/xfs_mru_cache.c b/fs/xfs/xfs_mru_cache.c
index a0b2c0a2589a..26d14a1e0e14 100644
--- a/fs/xfs/xfs_mru_cache.c
+++ b/fs/xfs/xfs_mru_cache.c
@@ -382,9 +382,9 @@ xfs_mru_cache_create(
exit:
if (err && mru && mru->lists)
- kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists));
+ kmem_free(mru->lists);
if (err && mru)
- kmem_free(mru, sizeof(*mru));
+ kmem_free(mru);
return err;
}
@@ -424,8 +424,8 @@ xfs_mru_cache_destroy(
xfs_mru_cache_flush(mru);
- kmem_free(mru->lists, mru->grp_count * sizeof(*mru->lists));
- kmem_free(mru, sizeof(*mru));
+ kmem_free(mru->lists);
+ kmem_free(mru);
}
/*
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index a0dc6e5bc5b9..bf87a5913504 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -2062,7 +2062,7 @@ xfs_growfs_rt(
/*
* Free the fake mp structure.
*/
- kmem_free(nmp, sizeof(*nmp));
+ kmem_free(nmp);
return error;
}
diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h
index d904efe7f871..3f8cf1587f4c 100644
--- a/fs/xfs/xfs_sb.h
+++ b/fs/xfs/xfs_sb.h
@@ -46,10 +46,12 @@ struct xfs_mount;
#define XFS_SB_VERSION_SECTORBIT 0x0800
#define XFS_SB_VERSION_EXTFLGBIT 0x1000
#define XFS_SB_VERSION_DIRV2BIT 0x2000
+#define XFS_SB_VERSION_BORGBIT 0x4000 /* ASCII only case-insens. */
#define XFS_SB_VERSION_MOREBITSBIT 0x8000
#define XFS_SB_VERSION_OKSASHFBITS \
(XFS_SB_VERSION_EXTFLGBIT | \
- XFS_SB_VERSION_DIRV2BIT)
+ XFS_SB_VERSION_DIRV2BIT | \
+ XFS_SB_VERSION_BORGBIT)
#define XFS_SB_VERSION_OKREALFBITS \
(XFS_SB_VERSION_ATTRBIT | \
XFS_SB_VERSION_NLINKBIT | \
@@ -437,6 +439,12 @@ static inline int xfs_sb_version_hassector(xfs_sb_t *sbp)
((sbp)->sb_versionnum & XFS_SB_VERSION_SECTORBIT);
}
+static inline int xfs_sb_version_hasasciici(xfs_sb_t *sbp)
+{
+ return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \
+ (sbp->sb_versionnum & XFS_SB_VERSION_BORGBIT);
+}
+
static inline int xfs_sb_version_hasmorebits(xfs_sb_t *sbp)
{
return (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) && \
@@ -473,6 +481,13 @@ static inline void xfs_sb_version_addattr2(xfs_sb_t *sbp)
((sbp)->sb_features2 | XFS_SB_VERSION2_ATTR2BIT)));
}
+static inline void xfs_sb_version_removeattr2(xfs_sb_t *sbp)
+{
+ sbp->sb_features2 &= ~XFS_SB_VERSION2_ATTR2BIT;
+ if (!sbp->sb_features2)
+ sbp->sb_versionnum &= ~XFS_SB_VERSION_MOREBITSBIT;
+}
+
/*
* end of superblock version macros
*/
diff --git a/fs/xfs/xfs_trans.c b/fs/xfs/xfs_trans.c
index 140386434aa3..e4ebddd3c500 100644
--- a/fs/xfs/xfs_trans.c
+++ b/fs/xfs/xfs_trans.c
@@ -889,7 +889,7 @@ shut_us_down:
tp->t_commit_lsn = commit_lsn;
if (nvec > XFS_TRANS_LOGVEC_COUNT) {
- kmem_free(log_vector, nvec * sizeof(xfs_log_iovec_t));
+ kmem_free(log_vector);
}
/*
@@ -1265,7 +1265,7 @@ xfs_trans_committed(
ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
xfs_trans_chunk_committed(licp, tp->t_lsn, abortflag);
next_licp = licp->lic_next;
- kmem_free(licp, sizeof(xfs_log_item_chunk_t));
+ kmem_free(licp);
licp = next_licp;
}
diff --git a/fs/xfs/xfs_trans_inode.c b/fs/xfs/xfs_trans_inode.c
index 4c70bf5e9985..2a1c0f071f91 100644
--- a/fs/xfs/xfs_trans_inode.c
+++ b/fs/xfs/xfs_trans_inode.c
@@ -291,7 +291,7 @@ xfs_trans_inode_broot_debug(
iip = ip->i_itemp;
if (iip->ili_root_size != 0) {
ASSERT(iip->ili_orig_root != NULL);
- kmem_free(iip->ili_orig_root, iip->ili_root_size);
+ kmem_free(iip->ili_orig_root);
iip->ili_root_size = 0;
iip->ili_orig_root = NULL;
}
diff --git a/fs/xfs/xfs_trans_item.c b/fs/xfs/xfs_trans_item.c
index 66a09f0d894b..db5c83595526 100644
--- a/fs/xfs/xfs_trans_item.c
+++ b/fs/xfs/xfs_trans_item.c
@@ -161,7 +161,7 @@ xfs_trans_free_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp)
licpp = &((*licpp)->lic_next);
}
*licpp = licp->lic_next;
- kmem_free(licp, sizeof(xfs_log_item_chunk_t));
+ kmem_free(licp);
tp->t_items_free -= XFS_LIC_NUM_SLOTS;
}
}
@@ -314,7 +314,7 @@ xfs_trans_free_items(
ASSERT(!XFS_LIC_ARE_ALL_FREE(licp));
(void) xfs_trans_unlock_chunk(licp, 1, abort, NULLCOMMITLSN);
next_licp = licp->lic_next;
- kmem_free(licp, sizeof(xfs_log_item_chunk_t));
+ kmem_free(licp);
licp = next_licp;
}
@@ -363,7 +363,7 @@ xfs_trans_unlock_items(xfs_trans_t *tp, xfs_lsn_t commit_lsn)
next_licp = licp->lic_next;
if (XFS_LIC_ARE_ALL_FREE(licp)) {
*licpp = next_licp;
- kmem_free(licp, sizeof(xfs_log_item_chunk_t));
+ kmem_free(licp);
freed -= XFS_LIC_NUM_SLOTS;
} else {
licpp = &(licp->lic_next);
@@ -530,7 +530,7 @@ xfs_trans_free_busy(xfs_trans_t *tp)
lbcp = tp->t_busy.lbc_next;
while (lbcp != NULL) {
lbcq = lbcp->lbc_next;
- kmem_free(lbcp, sizeof(xfs_log_busy_chunk_t));
+ kmem_free(lbcp);
lbcp = lbcq;
}
diff --git a/fs/xfs/xfs_vfsops.c b/fs/xfs/xfs_vfsops.c
index 30bacd8bb0e5..8b5a3376c2f7 100644
--- a/fs/xfs/xfs_vfsops.c
+++ b/fs/xfs/xfs_vfsops.c
@@ -189,455 +189,6 @@ xfs_cleanup(void)
kmem_zone_destroy(xfs_log_ticket_zone);
}
-/*
- * xfs_start_flags
- *
- * This function fills in xfs_mount_t fields based on mount args.
- * Note: the superblock has _not_ yet been read in.
- */
-STATIC int
-xfs_start_flags(
- struct xfs_mount_args *ap,
- struct xfs_mount *mp)
-{
- /* Values are in BBs */
- if ((ap->flags & XFSMNT_NOALIGN) != XFSMNT_NOALIGN) {
- /*
- * At this point the superblock has not been read
- * in, therefore we do not know the block size.
- * Before the mount call ends we will convert
- * these to FSBs.
- */
- mp->m_dalign = ap->sunit;
- mp->m_swidth = ap->swidth;
- }
-
- if (ap->logbufs != -1 &&
- ap->logbufs != 0 &&
- (ap->logbufs < XLOG_MIN_ICLOGS ||
- ap->logbufs > XLOG_MAX_ICLOGS)) {
- cmn_err(CE_WARN,
- "XFS: invalid logbufs value: %d [not %d-%d]",
- ap->logbufs, XLOG_MIN_ICLOGS, XLOG_MAX_ICLOGS);
- return XFS_ERROR(EINVAL);
- }
- mp->m_logbufs = ap->logbufs;
- if (ap->logbufsize != -1 &&
- ap->logbufsize != 0 &&
- (ap->logbufsize < XLOG_MIN_RECORD_BSIZE ||
- ap->logbufsize > XLOG_MAX_RECORD_BSIZE ||
- !is_power_of_2(ap->logbufsize))) {
- cmn_err(CE_WARN,
- "XFS: invalid logbufsize: %d [not 16k,32k,64k,128k or 256k]",
- ap->logbufsize);
- return XFS_ERROR(EINVAL);
- }
- mp->m_logbsize = ap->logbufsize;
- mp->m_fsname_len = strlen(ap->fsname) + 1;
- mp->m_fsname = kmem_alloc(mp->m_fsname_len, KM_SLEEP);
- strcpy(mp->m_fsname, ap->fsname);
- if (ap->rtname[0]) {
- mp->m_rtname = kmem_alloc(strlen(ap->rtname) + 1, KM_SLEEP);
- strcpy(mp->m_rtname, ap->rtname);
- }
- if (ap->logname[0]) {
- mp->m_logname = kmem_alloc(strlen(ap->logname) + 1, KM_SLEEP);
- strcpy(mp->m_logname, ap->logname);
- }
-
- if (ap->flags & XFSMNT_WSYNC)
- mp->m_flags |= XFS_MOUNT_WSYNC;
-#if XFS_BIG_INUMS
- if (ap->flags & XFSMNT_INO64) {
- mp->m_flags |= XFS_MOUNT_INO64;
- mp->m_inoadd = XFS_INO64_OFFSET;
- }
-#endif
- if (ap->flags & XFSMNT_RETERR)
- mp->m_flags |= XFS_MOUNT_RETERR;
- if (ap->flags & XFSMNT_NOALIGN)
- mp->m_flags |= XFS_MOUNT_NOALIGN;
- if (ap->flags & XFSMNT_SWALLOC)
- mp->m_flags |= XFS_MOUNT_SWALLOC;
- if (ap->flags & XFSMNT_OSYNCISOSYNC)
- mp->m_flags |= XFS_MOUNT_OSYNCISOSYNC;
- if (ap->flags & XFSMNT_32BITINODES)
- mp->m_flags |= XFS_MOUNT_32BITINODES;
-
- if (ap->flags & XFSMNT_IOSIZE) {
- if (ap->iosizelog > XFS_MAX_IO_LOG ||
- ap->iosizelog < XFS_MIN_IO_LOG) {
- cmn_err(CE_WARN,
- "XFS: invalid log iosize: %d [not %d-%d]",
- ap->iosizelog, XFS_MIN_IO_LOG,
- XFS_MAX_IO_LOG);
- return XFS_ERROR(EINVAL);
- }
-
- mp->m_flags |= XFS_MOUNT_DFLT_IOSIZE;
- mp->m_readio_log = mp->m_writeio_log = ap->iosizelog;
- }
-
- if (ap->flags & XFSMNT_IKEEP)
- mp->m_flags |= XFS_MOUNT_IKEEP;
- if (ap->flags & XFSMNT_DIRSYNC)
- mp->m_flags |= XFS_MOUNT_DIRSYNC;
- if (ap->flags & XFSMNT_ATTR2)
- mp->m_flags |= XFS_MOUNT_ATTR2;
-
- if (ap->flags2 & XFSMNT2_COMPAT_IOSIZE)
- mp->m_flags |= XFS_MOUNT_COMPAT_IOSIZE;
-
- /*
- * no recovery flag requires a read-only mount
- */
- if (ap->flags & XFSMNT_NORECOVERY) {
- if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
- cmn_err(CE_WARN,
- "XFS: tried to mount a FS read-write without recovery!");
- return XFS_ERROR(EINVAL);
- }
- mp->m_flags |= XFS_MOUNT_NORECOVERY;
- }
-
- if (ap->flags & XFSMNT_NOUUID)
- mp->m_flags |= XFS_MOUNT_NOUUID;
- if (ap->flags & XFSMNT_BARRIER)
- mp->m_flags |= XFS_MOUNT_BARRIER;
- else
- mp->m_flags &= ~XFS_MOUNT_BARRIER;
-
- if (ap->flags2 & XFSMNT2_FILESTREAMS)
- mp->m_flags |= XFS_MOUNT_FILESTREAMS;
-
- if (ap->flags & XFSMNT_DMAPI)
- mp->m_flags |= XFS_MOUNT_DMAPI;
- return 0;
-}
-
-/*
- * This function fills in xfs_mount_t fields based on mount args.
- * Note: the superblock _has_ now been read in.
- */
-STATIC int
-xfs_finish_flags(
- struct xfs_mount_args *ap,
- struct xfs_mount *mp)
-{
- int ronly = (mp->m_flags & XFS_MOUNT_RDONLY);
-
- /* Fail a mount where the logbuf is smaller then the log stripe */
- if (xfs_sb_version_haslogv2(&mp->m_sb)) {
- if ((ap->logbufsize <= 0) &&
- (mp->m_sb.sb_logsunit > XLOG_BIG_RECORD_BSIZE)) {
- mp->m_logbsize = mp->m_sb.sb_logsunit;
- } else if (ap->logbufsize > 0 &&
- ap->logbufsize < mp->m_sb.sb_logsunit) {
- cmn_err(CE_WARN,
- "XFS: logbuf size must be greater than or equal to log stripe size");
- return XFS_ERROR(EINVAL);
- }
- } else {
- /* Fail a mount if the logbuf is larger than 32K */
- if (ap->logbufsize > XLOG_BIG_RECORD_BSIZE) {
- cmn_err(CE_WARN,
- "XFS: logbuf size for version 1 logs must be 16K or 32K");
- return XFS_ERROR(EINVAL);
- }
- }
-
- if (xfs_sb_version_hasattr2(&mp->m_sb))
- mp->m_flags |= XFS_MOUNT_ATTR2;
-
- /*
- * prohibit r/w mounts of read-only filesystems
- */
- if ((mp->m_sb.sb_flags & XFS_SBF_READONLY) && !ronly) {
- cmn_err(CE_WARN,
- "XFS: cannot mount a read-only filesystem as read-write");
- return XFS_ERROR(EROFS);
- }
-
- /*
- * check for shared mount.
- */
- if (ap->flags & XFSMNT_SHARED) {
- if (!xfs_sb_version_hasshared(&mp->m_sb))
- return XFS_ERROR(EINVAL);
-
- /*
- * For IRIX 6.5, shared mounts must have the shared
- * version bit set, have the persistent readonly
- * field set, must be version 0 and can only be mounted
- * read-only.
- */
- if (!ronly || !(mp->m_sb.sb_flags & XFS_SBF_READONLY) ||
- (mp->m_sb.sb_shared_vn != 0))
- return XFS_ERROR(EINVAL);
-
- mp->m_flags |= XFS_MOUNT_SHARED;
-
- /*
- * Shared XFS V0 can't deal with DMI. Return EINVAL.
- */
- if (mp->m_sb.sb_shared_vn == 0 && (ap->flags & XFSMNT_DMAPI))
- return XFS_ERROR(EINVAL);
- }
-
- if (ap->flags & XFSMNT_UQUOTA) {
- mp->m_qflags |= (XFS_UQUOTA_ACCT | XFS_UQUOTA_ACTIVE);
- if (ap->flags & XFSMNT_UQUOTAENF)
- mp->m_qflags |= XFS_UQUOTA_ENFD;
- }
-
- if (ap->flags & XFSMNT_GQUOTA) {
- mp->m_qflags |= (XFS_GQUOTA_ACCT | XFS_GQUOTA_ACTIVE);
- if (ap->flags & XFSMNT_GQUOTAENF)
- mp->m_qflags |= XFS_OQUOTA_ENFD;
- } else if (ap->flags & XFSMNT_PQUOTA) {
- mp->m_qflags |= (XFS_PQUOTA_ACCT | XFS_PQUOTA_ACTIVE);
- if (ap->flags & XFSMNT_PQUOTAENF)
- mp->m_qflags |= XFS_OQUOTA_ENFD;
- }
-
- return 0;
-}
-
-/*
- * xfs_mount
- *
- * The file system configurations are:
- * (1) device (partition) with data and internal log
- * (2) logical volume with data and log subvolumes.
- * (3) logical volume with data, log, and realtime subvolumes.
- *
- * We only have to handle opening the log and realtime volumes here if
- * they are present. The data subvolume has already been opened by
- * get_sb_bdev() and is stored in vfsp->vfs_super->s_bdev.
- */
-int
-xfs_mount(
- struct xfs_mount *mp,
- struct xfs_mount_args *args,
- cred_t *credp)
-{
- struct block_device *ddev, *logdev, *rtdev;
- int flags = 0, error;
-
- ddev = mp->m_super->s_bdev;
- logdev = rtdev = NULL;
-
- error = xfs_dmops_get(mp, args);
- if (error)
- return error;
- error = xfs_qmops_get(mp, args);
- if (error)
- return error;
-
- if (args->flags & XFSMNT_QUIET)
- flags |= XFS_MFSI_QUIET;
-
- /*
- * Open real time and log devices - order is important.
- */
- if (args->logname[0]) {
- error = xfs_blkdev_get(mp, args->logname, &logdev);
- if (error)
- return error;
- }
- if (args->rtname[0]) {
- error = xfs_blkdev_get(mp, args->rtname, &rtdev);
- if (error) {
- xfs_blkdev_put(logdev);
- return error;
- }
-
- if (rtdev == ddev || rtdev == logdev) {
- cmn_err(CE_WARN,
- "XFS: Cannot mount filesystem with identical rtdev and ddev/logdev.");
- xfs_blkdev_put(logdev);
- xfs_blkdev_put(rtdev);
- return EINVAL;
- }
- }
-
- /*
- * Setup xfs_mount buffer target pointers
- */
- error = ENOMEM;
- mp->m_ddev_targp = xfs_alloc_buftarg(ddev, 0);
- if (!mp->m_ddev_targp) {
- xfs_blkdev_put(logdev);
- xfs_blkdev_put(rtdev);
- return error;
- }
- if (rtdev) {
- mp->m_rtdev_targp = xfs_alloc_buftarg(rtdev, 1);
- if (!mp->m_rtdev_targp) {
- xfs_blkdev_put(logdev);
- xfs_blkdev_put(rtdev);
- goto error0;
- }
- }
- mp->m_logdev_targp = (logdev && logdev != ddev) ?
- xfs_alloc_buftarg(logdev, 1) : mp->m_ddev_targp;
- if (!mp->m_logdev_targp) {
- xfs_blkdev_put(logdev);
- xfs_blkdev_put(rtdev);
- goto error0;
- }
-
- /*
- * Setup flags based on mount(2) options and then the superblock
- */
- error = xfs_start_flags(args, mp);
- if (error)
- goto error1;
- error = xfs_readsb(mp, flags);
- if (error)
- goto error1;
- error = xfs_finish_flags(args, mp);
- if (error)
- goto error2;
-
- /*
- * Setup xfs_mount buffer target pointers based on superblock
- */
- error = xfs_setsize_buftarg(mp->m_ddev_targp, mp->m_sb.sb_blocksize,
- mp->m_sb.sb_sectsize);
- if (!error && logdev && logdev != ddev) {
- unsigned int log_sector_size = BBSIZE;
-
- if (xfs_sb_version_hassector(&mp->m_sb))
- log_sector_size = mp->m_sb.sb_logsectsize;
- error = xfs_setsize_buftarg(mp->m_logdev_targp,
- mp->m_sb.sb_blocksize,
- log_sector_size);
- }
- if (!error && rtdev)
- error = xfs_setsize_buftarg(mp->m_rtdev_targp,
- mp->m_sb.sb_blocksize,
- mp->m_sb.sb_sectsize);
- if (error)
- goto error2;
-
- if (mp->m_flags & XFS_MOUNT_BARRIER)
- xfs_mountfs_check_barriers(mp);
-
- if ((error = xfs_filestream_mount(mp)))
- goto error2;
-
- error = xfs_mountfs(mp, flags);
- if (error)
- goto error2;
-
- XFS_SEND_MOUNT(mp, DM_RIGHT_NULL, args->mtpt, args->fsname);
-
- return 0;
-
-error2:
- if (mp->m_sb_bp)
- xfs_freesb(mp);
-error1:
- xfs_binval(mp->m_ddev_targp);
- if (logdev && logdev != ddev)
- xfs_binval(mp->m_logdev_targp);
- if (rtdev)
- xfs_binval(mp->m_rtdev_targp);
-error0:
- xfs_unmountfs_close(mp, credp);
- xfs_qmops_put(mp);
- xfs_dmops_put(mp);
- return error;
-}
-
-int
-xfs_unmount(
- xfs_mount_t *mp,
- int flags,
- cred_t *credp)
-{
- xfs_inode_t *rip;
- bhv_vnode_t *rvp;
- int unmount_event_wanted = 0;
- int unmount_event_flags = 0;
- int xfs_unmountfs_needed = 0;
- int error;
-
- rip = mp->m_rootip;
- rvp = XFS_ITOV(rip);
-
-#ifdef HAVE_DMAPI
- if (mp->m_flags & XFS_MOUNT_DMAPI) {
- error = XFS_SEND_PREUNMOUNT(mp,
- rip, DM_RIGHT_NULL, rip, DM_RIGHT_NULL,
- NULL, NULL, 0, 0,
- (mp->m_dmevmask & (1<<DM_EVENT_PREUNMOUNT))?
- 0:DM_FLAGS_UNWANTED);
- if (error)
- return XFS_ERROR(error);
- unmount_event_wanted = 1;
- unmount_event_flags = (mp->m_dmevmask & (1<<DM_EVENT_UNMOUNT))?
- 0 : DM_FLAGS_UNWANTED;
- }
-#endif
-
- /*
- * Blow away any referenced inode in the filestreams cache.
- * This can and will cause log traffic as inodes go inactive
- * here.
- */
- xfs_filestream_unmount(mp);
-
- XFS_bflush(mp->m_ddev_targp);
- error = xfs_unmount_flush(mp, 0);
- if (error)
- goto out;
-
- ASSERT(vn_count(rvp) == 1);
-
- /*
- * Drop the reference count
- */
- IRELE(rip);
-
- /*
- * If we're forcing a shutdown, typically because of a media error,
- * we want to make sure we invalidate dirty pages that belong to
- * referenced vnodes as well.
- */
- if (XFS_FORCED_SHUTDOWN(mp)) {
- error = xfs_sync(mp, SYNC_WAIT | SYNC_CLOSE);
- ASSERT(error != EFSCORRUPTED);
- }
- xfs_unmountfs_needed = 1;
-
-out:
- /* Send DMAPI event, if required.
- * Then do xfs_unmountfs() if needed.
- * Then return error (or zero).
- */
- if (unmount_event_wanted) {
- /* Note: mp structure must still exist for
- * XFS_SEND_UNMOUNT() call.
- */
- XFS_SEND_UNMOUNT(mp, error == 0 ? rip : NULL,
- DM_RIGHT_NULL, 0, error, unmount_event_flags);
- }
- if (xfs_unmountfs_needed) {
- /*
- * Call common unmount function to flush to disk
- * and free the super block buffer & mount structures.
- */
- xfs_unmountfs(mp, credp);
- xfs_qmops_put(mp);
- xfs_dmops_put(mp);
- kmem_free(mp, sizeof(xfs_mount_t));
- }
-
- return XFS_ERROR(error);
-}
-
STATIC void
xfs_quiesce_fs(
xfs_mount_t *mp)
@@ -694,30 +245,6 @@ xfs_attr_quiesce(
xfs_unmountfs_writesb(mp);
}
-int
-xfs_mntupdate(
- struct xfs_mount *mp,
- int *flags,
- struct xfs_mount_args *args)
-{
- if (!(*flags & MS_RDONLY)) { /* rw/ro -> rw */
- if (mp->m_flags & XFS_MOUNT_RDONLY)
- mp->m_flags &= ~XFS_MOUNT_RDONLY;
- if (args->flags & XFSMNT_BARRIER) {
- mp->m_flags |= XFS_MOUNT_BARRIER;
- xfs_mountfs_check_barriers(mp);
- } else {
- mp->m_flags &= ~XFS_MOUNT_BARRIER;
- }
- } else if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { /* rw -> ro */
- xfs_filestream_flush(mp);
- xfs_sync(mp, SYNC_DATA_QUIESCE);
- xfs_attr_quiesce(mp);
- mp->m_flags |= XFS_MOUNT_RDONLY;
- }
- return 0;
-}
-
/*
* xfs_unmount_flush implements a set of flush operation on special
* inodes, which are needed as a separate set of operations so that
@@ -1048,7 +575,7 @@ xfs_sync_inodes(
if (XFS_FORCED_SHUTDOWN(mp) && !(flags & SYNC_CLOSE)) {
XFS_MOUNT_IUNLOCK(mp);
- kmem_free(ipointer, sizeof(xfs_iptr_t));
+ kmem_free(ipointer);
return 0;
}
@@ -1194,7 +721,7 @@ xfs_sync_inodes(
}
XFS_MOUNT_IUNLOCK(mp);
ASSERT(ipointer_in == B_FALSE);
- kmem_free(ipointer, sizeof(xfs_iptr_t));
+ kmem_free(ipointer);
return XFS_ERROR(error);
}
@@ -1224,7 +751,7 @@ xfs_sync_inodes(
ASSERT(ipointer_in == B_FALSE);
- kmem_free(ipointer, sizeof(xfs_iptr_t));
+ kmem_free(ipointer);
return XFS_ERROR(last_error);
}
diff --git a/fs/xfs/xfs_vfsops.h b/fs/xfs/xfs_vfsops.h
index 1688817c55ed..a74b05087da4 100644
--- a/fs/xfs/xfs_vfsops.h
+++ b/fs/xfs/xfs_vfsops.h
@@ -8,11 +8,6 @@ struct kstatfs;
struct xfs_mount;
struct xfs_mount_args;
-int xfs_mount(struct xfs_mount *mp, struct xfs_mount_args *args,
- struct cred *credp);
-int xfs_unmount(struct xfs_mount *mp, int flags, struct cred *credp);
-int xfs_mntupdate(struct xfs_mount *mp, int *flags,
- struct xfs_mount_args *args);
int xfs_sync(struct xfs_mount *mp, int flags);
void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname,
int lnnum);
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index 70702a60b4bb..b6a065eb25a5 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -444,7 +444,13 @@ xfs_setattr(
code = 0;
if ((vap->va_size > ip->i_size) &&
(flags & ATTR_NOSIZETOK) == 0) {
- code = xfs_igrow_start(ip, vap->va_size, credp);
+ /*
+ * Do the first part of growing a file: zero any data
+ * in the last block that is beyond the old EOF. We
+ * need to do this before the inode is joined to the
+ * transaction to modify the i_size.
+ */
+ code = xfs_zero_eof(ip, vap->va_size, ip->i_size);
}
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -512,8 +518,11 @@ xfs_setattr(
timeflags |= XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG;
if (vap->va_size > ip->i_size) {
- xfs_igrow_finish(tp, ip, vap->va_size,
- !(flags & ATTR_DMI));
+ ip->i_d.di_size = vap->va_size;
+ ip->i_size = vap->va_size;
+ if (!(flags & ATTR_DMI))
+ xfs_ichgtime(ip, XFS_ICHGTIME_CHG);
+ xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
} else if ((vap->va_size <= ip->i_size) ||
((vap->va_size == 0) && ip->i_d.di_nextents)) {
/*
@@ -856,18 +865,14 @@ xfs_readlink(
/*
* xfs_fsync
*
- * This is called to sync the inode and its data out to disk.
- * We need to hold the I/O lock while flushing the data, and
- * the inode lock while flushing the inode. The inode lock CANNOT
- * be held while flushing the data, so acquire after we're done
- * with that.
+ * This is called to sync the inode and its data out to disk. We need to hold
+ * the I/O lock while flushing the data, and the inode lock while flushing the
+ * inode. The inode lock CANNOT be held while flushing the data, so acquire
+ * after we're done with that.
*/
int
xfs_fsync(
- xfs_inode_t *ip,
- int flag,
- xfs_off_t start,
- xfs_off_t stop)
+ xfs_inode_t *ip)
{
xfs_trans_t *tp;
int error;
@@ -875,103 +880,79 @@ xfs_fsync(
xfs_itrace_entry(ip);
- ASSERT(start >= 0 && stop >= -1);
-
if (XFS_FORCED_SHUTDOWN(ip->i_mount))
return XFS_ERROR(EIO);
- if (flag & FSYNC_DATA)
- filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping);
+ /* capture size updates in I/O completion before writing the inode. */
+ error = filemap_fdatawait(vn_to_inode(XFS_ITOV(ip))->i_mapping);
+ if (error)
+ return XFS_ERROR(error);
/*
- * We always need to make sure that the required inode state
- * is safe on disk. The vnode might be clean but because
- * of committed transactions that haven't hit the disk yet.
- * Likewise, there could be unflushed non-transactional
- * changes to the inode core that have to go to disk.
+ * We always need to make sure that the required inode state is safe on
+ * disk. The vnode might be clean but we still might need to force the
+ * log because of committed transactions that haven't hit the disk yet.
+ * Likewise, there could be unflushed non-transactional changes to the
+ * inode core that have to go to disk and this requires us to issue
+ * a synchronous transaction to capture these changes correctly.
*
- * The following code depends on one assumption: that
- * any transaction that changes an inode logs the core
- * because it has to change some field in the inode core
- * (typically nextents or nblocks). That assumption
- * implies that any transactions against an inode will
- * catch any non-transactional updates. If inode-altering
- * transactions exist that violate this assumption, the
- * code breaks. Right now, it figures that if the involved
- * update_* field is clear and the inode is unpinned, the
- * inode is clean. Either it's been flushed or it's been
- * committed and the commit has hit the disk unpinning the inode.
- * (Note that xfs_inode_item_format() called at commit clears
- * the update_* fields.)
+ * This code relies on the assumption that if the update_* fields
+ * of the inode are clear and the inode is unpinned then it is clean
+ * and no action is required.
*/
xfs_ilock(ip, XFS_ILOCK_SHARED);
- /* If we are flushing data then we care about update_size
- * being set, otherwise we care about update_core
- */
- if ((flag & FSYNC_DATA) ?
- (ip->i_update_size == 0) :
- (ip->i_update_core == 0)) {
+ if (!(ip->i_update_size || ip->i_update_core)) {
/*
- * Timestamps/size haven't changed since last inode
- * flush or inode transaction commit. That means
- * either nothing got written or a transaction
- * committed which caught the updates. If the
- * latter happened and the transaction hasn't
- * hit the disk yet, the inode will be still
- * be pinned. If it is, force the log.
+ * Timestamps/size haven't changed since last inode flush or
+ * inode transaction commit. That means either nothing got
+ * written or a transaction committed which caught the updates.
+ * If the latter happened and the transaction hasn't hit the
+ * disk yet, the inode will be still be pinned. If it is,
+ * force the log.
*/
xfs_iunlock(ip, XFS_ILOCK_SHARED);
if (xfs_ipincount(ip)) {
- _xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
- XFS_LOG_FORCE |
- ((flag & FSYNC_WAIT)
- ? XFS_LOG_SYNC : 0),
+ error = _xfs_log_force(ip->i_mount, (xfs_lsn_t)0,
+ XFS_LOG_FORCE | XFS_LOG_SYNC,
&log_flushed);
} else {
/*
- * If the inode is not pinned and nothing
- * has changed we don't need to flush the
- * cache.
+ * If the inode is not pinned and nothing has changed
+ * we don't need to flush the cache.
*/
changed = 0;
}
- error = 0;
} else {
/*
- * Kick off a transaction to log the inode
- * core to get the updates. Make it
- * sync if FSYNC_WAIT is passed in (which
- * is done by everybody but specfs). The
- * sync transaction will also force the log.
+ * Kick off a transaction to log the inode core to get the
+ * updates. The sync transaction will also force the log.
*/
xfs_iunlock(ip, XFS_ILOCK_SHARED);
tp = xfs_trans_alloc(ip->i_mount, XFS_TRANS_FSYNC_TS);
- if ((error = xfs_trans_reserve(tp, 0,
- XFS_FSYNC_TS_LOG_RES(ip->i_mount),
- 0, 0, 0))) {
+ error = xfs_trans_reserve(tp, 0,
+ XFS_FSYNC_TS_LOG_RES(ip->i_mount), 0, 0, 0);
+ if (error) {
xfs_trans_cancel(tp, 0);
return error;
}
xfs_ilock(ip, XFS_ILOCK_EXCL);
/*
- * Note - it's possible that we might have pushed
- * ourselves out of the way during trans_reserve
- * which would flush the inode. But there's no
- * guarantee that the inode buffer has actually
- * gone out yet (it's delwri). Plus the buffer
- * could be pinned anyway if it's part of an
- * inode in another recent transaction. So we
- * play it safe and fire off the transaction anyway.
+ * Note - it's possible that we might have pushed ourselves out
+ * of the way during trans_reserve which would flush the inode.
+ * But there's no guarantee that the inode buffer has actually
+ * gone out yet (it's delwri). Plus the buffer could be pinned
+ * anyway if it's part of an inode in another recent
+ * transaction. So we play it safe and fire off the
+ * transaction anyway.
*/
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);
- if (flag & FSYNC_WAIT)
- xfs_trans_set_sync(tp);
+ xfs_trans_set_sync(tp);
error = _xfs_trans_commit(tp, 0, &log_flushed);
xfs_iunlock(ip, XFS_ILOCK_EXCL);
@@ -1629,12 +1610,18 @@ xfs_inactive(
return VN_INACTIVE_CACHE;
}
-
+/*
+ * Lookups up an inode from "name". If ci_name is not NULL, then a CI match
+ * is allowed, otherwise it has to be an exact match. If a CI match is found,
+ * ci_name->name will point to a the actual name (caller must free) or
+ * will be set to NULL if an exact match is found.
+ */
int
xfs_lookup(
xfs_inode_t *dp,
struct xfs_name *name,
- xfs_inode_t **ipp)
+ xfs_inode_t **ipp,
+ struct xfs_name *ci_name)
{
xfs_ino_t inum;
int error;
@@ -1646,7 +1633,7 @@ xfs_lookup(
return XFS_ERROR(EIO);
lock_mode = xfs_ilock_map_shared(dp);
- error = xfs_dir_lookup(NULL, dp, name, &inum);
+ error = xfs_dir_lookup(NULL, dp, name, &inum, ci_name);
xfs_iunlock_map_shared(dp, lock_mode);
if (error)
@@ -1654,12 +1641,15 @@ xfs_lookup(
error = xfs_iget(dp->i_mount, NULL, inum, 0, 0, ipp, 0);
if (error)
- goto out;
+ goto out_free_name;
xfs_itrace_ref(*ipp);
return 0;
- out:
+out_free_name:
+ if (ci_name)
+ kmem_free(ci_name->name);
+out:
*ipp = NULL;
return error;
}
diff --git a/fs/xfs/xfs_vnodeops.h b/fs/xfs/xfs_vnodeops.h
index 8abe8f186e20..7e9a8b241f21 100644
--- a/fs/xfs/xfs_vnodeops.h
+++ b/fs/xfs/xfs_vnodeops.h
@@ -18,12 +18,11 @@ int xfs_open(struct xfs_inode *ip);
int xfs_setattr(struct xfs_inode *ip, struct bhv_vattr *vap, int flags,
struct cred *credp);
int xfs_readlink(struct xfs_inode *ip, char *link);
-int xfs_fsync(struct xfs_inode *ip, int flag, xfs_off_t start,
- xfs_off_t stop);
+int xfs_fsync(struct xfs_inode *ip);
int xfs_release(struct xfs_inode *ip);
int xfs_inactive(struct xfs_inode *ip);
int xfs_lookup(struct xfs_inode *dp, struct xfs_name *name,
- struct xfs_inode **ipp);
+ struct xfs_inode **ipp, struct xfs_name *ci_name);
int xfs_create(struct xfs_inode *dp, struct xfs_name *name, mode_t mode,
xfs_dev_t rdev, struct xfs_inode **ipp, struct cred *credp);
int xfs_remove(struct xfs_inode *dp, struct xfs_name *name,