summaryrefslogtreecommitdiff
path: root/fs
diff options
context:
space:
mode:
Diffstat (limited to 'fs')
-rw-r--r--fs/9p/Kconfig10
-rw-r--r--fs/Kconfig1406
-rw-r--r--fs/Makefile2
-rw-r--r--fs/adfs/Kconfig27
-rw-r--r--fs/affs/Kconfig21
-rw-r--r--fs/afs/Kconfig30
-rw-r--r--fs/afs/Makefile3
-rw-r--r--fs/afs/cache.c503
-rw-r--r--fs/afs/cache.h15
-rw-r--r--fs/afs/cell.c16
-rw-r--r--fs/afs/file.c220
-rw-r--r--fs/afs/inode.c31
-rw-r--r--fs/afs/internal.h53
-rw-r--r--fs/afs/main.c27
-rw-r--r--fs/afs/mntpt.c4
-rw-r--r--fs/afs/proc.c1
-rw-r--r--fs/afs/vlocation.c25
-rw-r--r--fs/afs/volume.c14
-rw-r--r--fs/afs/write.c21
-rw-r--r--fs/autofs/Kconfig21
-rw-r--r--fs/autofs4/Kconfig20
-rw-r--r--fs/befs/Kconfig26
-rw-r--r--fs/bfs/Kconfig19
-rw-r--r--fs/bio-integrity.c26
-rw-r--r--fs/btrfs/Kconfig18
-rw-r--r--fs/btrfs/locking.c10
-rw-r--r--fs/cachefiles/Kconfig39
-rw-r--r--fs/cachefiles/Makefile18
-rw-r--r--fs/cachefiles/cf-bind.c286
-rw-r--r--fs/cachefiles/cf-daemon.c754
-rw-r--r--fs/cachefiles/cf-interface.c449
-rw-r--r--fs/cachefiles/cf-internal.h360
-rw-r--r--fs/cachefiles/cf-key.c159
-rw-r--r--fs/cachefiles/cf-main.c106
-rw-r--r--fs/cachefiles/cf-namei.c772
-rw-r--r--fs/cachefiles/cf-proc.c134
-rw-r--r--fs/cachefiles/cf-rdwr.c853
-rw-r--r--fs/cachefiles/cf-security.c116
-rw-r--r--fs/cachefiles/cf-xattr.c291
-rw-r--r--fs/cifs/CHANGES4
-rw-r--r--fs/cifs/cifs_debug.c1
-rw-r--r--fs/cifs/cifsencrypt.c18
-rw-r--r--fs/cifs/cifsproto.h4
-rw-r--r--fs/cifs/connect.c30
-rw-r--r--fs/cifs/dir.c56
-rw-r--r--fs/cifs/inode.c5
-rw-r--r--fs/cifs/md5.c38
-rw-r--r--fs/cifs/md5.h6
-rw-r--r--fs/cifs/transport.c127
-rw-r--r--fs/coda/Kconfig21
-rw-r--r--fs/compat_ioctl.c7
-rw-r--r--fs/configfs/Kconfig11
-rw-r--r--fs/configfs/dir.c59
-rw-r--r--fs/cramfs/Kconfig19
-rw-r--r--fs/dlm/dir.c18
-rw-r--r--fs/dlm/dlm_internal.h2
-rw-r--r--fs/dlm/lockspace.c2
-rw-r--r--fs/dlm/lowcomms.c180
-rw-r--r--fs/dlm/plock.c6
-rw-r--r--fs/dquot.c514
-rw-r--r--fs/ecryptfs/Kconfig11
-rw-r--r--fs/efs/Kconfig14
-rw-r--r--fs/eventpoll.c22
-rw-r--r--fs/ext2/inode.c2
-rw-r--r--fs/ext3/inode.c9
-rw-r--r--fs/ext3/namei.c20
-rw-r--r--fs/ext3/super.c44
-rw-r--r--fs/ext4/balloc.c6
-rw-r--r--fs/ext4/ext4.h18
-rw-r--r--fs/ext4/ext4_i.h3
-rw-r--r--fs/ext4/ext4_sb.h4
-rw-r--r--fs/ext4/extents.c2
-rw-r--r--fs/ext4/inode.c45
-rw-r--r--fs/ext4/mballoc.c46
-rw-r--r--fs/ext4/mballoc.h1
-rw-r--r--fs/ext4/namei.c21
-rw-r--r--fs/ext4/resize.c3
-rw-r--r--fs/ext4/super.c48
-rw-r--r--fs/fat/Kconfig97
-rw-r--r--fs/fcntl.c65
-rw-r--r--fs/freevxfs/Kconfig16
-rw-r--r--fs/fscache/Kconfig56
-rw-r--r--fs/fscache/Makefile19
-rw-r--r--fs/fscache/fsc-cache.c415
-rw-r--r--fs/fscache/fsc-cookie.c498
-rw-r--r--fs/fscache/fsc-fsdef.c144
-rw-r--r--fs/fscache/fsc-histogram.c109
-rw-r--r--fs/fscache/fsc-internal.h380
-rw-r--r--fs/fscache/fsc-main.c124
-rw-r--r--fs/fscache/fsc-netfs.c103
-rw-r--r--fs/fscache/fsc-object.c810
-rw-r--r--fs/fscache/fsc-operation.c459
-rw-r--r--fs/fscache/fsc-page.c771
-rw-r--r--fs/fscache/fsc-proc.c68
-rw-r--r--fs/fscache/fsc-stats.c212
-rw-r--r--fs/fuse/Kconfig15
-rw-r--r--fs/fuse/dev.c16
-rw-r--r--fs/fuse/file.c2
-rw-r--r--fs/fuse/inode.c30
-rw-r--r--fs/gfs2/Kconfig17
-rw-r--r--fs/gfs2/Makefile4
-rw-r--r--fs/gfs2/acl.c1
-rw-r--r--fs/gfs2/bmap.c1
-rw-r--r--fs/gfs2/dir.c1
-rw-r--r--fs/gfs2/eaops.c1
-rw-r--r--fs/gfs2/eattr.c1
-rw-r--r--fs/gfs2/glock.c251
-rw-r--r--fs/gfs2/glock.h127
-rw-r--r--fs/gfs2/glops.c14
-rw-r--r--fs/gfs2/incore.h67
-rw-r--r--fs/gfs2/inode.c13
-rw-r--r--fs/gfs2/inode.h22
-rw-r--r--fs/gfs2/lock_dlm.c240
-rw-r--r--fs/gfs2/locking.c232
-rw-r--r--fs/gfs2/locking/dlm/Makefile3
-rw-r--r--fs/gfs2/locking/dlm/lock.c708
-rw-r--r--fs/gfs2/locking/dlm/lock_dlm.h166
-rw-r--r--fs/gfs2/locking/dlm/main.c48
-rw-r--r--fs/gfs2/locking/dlm/mount.c276
-rw-r--r--fs/gfs2/locking/dlm/sysfs.c226
-rw-r--r--fs/gfs2/locking/dlm/thread.c68
-rw-r--r--fs/gfs2/log.c1
-rw-r--r--fs/gfs2/lops.c1
-rw-r--r--fs/gfs2/main.c13
-rw-r--r--fs/gfs2/meta_io.c1
-rw-r--r--fs/gfs2/mount.c112
-rw-r--r--fs/gfs2/mount.h17
-rw-r--r--fs/gfs2/ops_address.c2
-rw-r--r--fs/gfs2/ops_dentry.c1
-rw-r--r--fs/gfs2/ops_export.c1
-rw-r--r--fs/gfs2/ops_file.c74
-rw-r--r--fs/gfs2/ops_fstype.c149
-rw-r--r--fs/gfs2/ops_inode.c1
-rw-r--r--fs/gfs2/ops_super.c42
-rw-r--r--fs/gfs2/quota.c203
-rw-r--r--fs/gfs2/quota.h2
-rw-r--r--fs/gfs2/recovery.c28
-rw-r--r--fs/gfs2/rgrp.c1
-rw-r--r--fs/gfs2/super.c1
-rw-r--r--fs/gfs2/super.h26
-rw-r--r--fs/gfs2/sys.c156
-rw-r--r--fs/gfs2/trans.c3
-rw-r--r--fs/gfs2/util.c11
-rw-r--r--fs/hfs/Kconfig12
-rw-r--r--fs/hfsplus/Kconfig13
-rw-r--r--fs/hpfs/Kconfig14
-rw-r--r--fs/ioctl.c25
-rw-r--r--fs/isofs/Kconfig39
-rw-r--r--fs/jbd2/journal.c6
-rw-r--r--fs/jfs/Kconfig50
-rw-r--r--fs/jfs/jfs_debug.c1
-rw-r--r--fs/jfs/jfs_extent.c63
-rw-r--r--fs/jfs/jfs_imap.c10
-rw-r--r--fs/jfs/jfs_metapage.c18
-rw-r--r--fs/jfs/jfs_types.h29
-rw-r--r--fs/jfs/jfs_xtree.c263
-rw-r--r--fs/jfs/jfs_xtree.h2
-rw-r--r--fs/jfs/super.c4
-rw-r--r--fs/minix/Kconfig17
-rw-r--r--fs/ncpfs/Kconfig21
-rw-r--r--fs/nfs/Kconfig95
-rw-r--r--fs/nfs/Makefile1
-rw-r--r--fs/nfs/client.c16
-rw-r--r--fs/nfs/file.c37
-rw-r--r--fs/nfs/fscache-index.c337
-rw-r--r--fs/nfs/fscache.c521
-rw-r--r--fs/nfs/fscache.h208
-rw-r--r--fs/nfs/inode.c17
-rw-r--r--fs/nfs/internal.h4
-rw-r--r--fs/nfs/iostat.h18
-rw-r--r--fs/nfs/read.c27
-rw-r--r--fs/nfs/super.c45
-rw-r--r--fs/nfsd/Kconfig81
-rw-r--r--fs/nfsd/auth.c3
-rw-r--r--fs/nfsd/nfs3proc.c5
-rw-r--r--fs/nfsd/nfs4callback.c7
-rw-r--r--fs/nfsd/nfs4proc.c27
-rw-r--r--fs/nfsd/nfs4state.c168
-rw-r--r--fs/nfsd/vfs.c5
-rw-r--r--fs/notify/inotify/inotify_user.c135
-rw-r--r--fs/ntfs/Kconfig78
-rw-r--r--fs/ocfs2/Kconfig85
-rw-r--r--fs/ocfs2/alloc.c3
-rw-r--r--fs/ocfs2/cluster/heartbeat.c96
-rw-r--r--fs/ocfs2/cluster/heartbeat.h3
-rw-r--r--fs/ocfs2/cluster/nodemanager.c9
-rw-r--r--fs/ocfs2/dcache.c42
-rw-r--r--fs/ocfs2/dcache.h9
-rw-r--r--fs/ocfs2/dlmglue.c4
-rw-r--r--fs/ocfs2/journal.c12
-rw-r--r--fs/ocfs2/journal.h11
-rw-r--r--fs/ocfs2/localalloc.c86
-rw-r--r--fs/ocfs2/ocfs2.h12
-rw-r--r--fs/ocfs2/quota_global.c173
-rw-r--r--fs/ocfs2/super.c179
-rw-r--r--fs/ocfs2/xattr.c17
-rw-r--r--fs/omfs/Kconfig13
-rw-r--r--fs/proc/Makefile1
-rw-r--r--fs/proc/automount.c28
-rw-r--r--fs/proc/base.c26
-rw-r--r--fs/proc/generic.c110
-rw-r--r--fs/proc/inode-alloc.txt14
-rw-r--r--fs/proc/inode.c22
-rw-r--r--fs/proc/internal.h16
-rw-r--r--fs/proc/proc_net.c235
-rw-r--r--fs/proc/proc_tty.c1
-rw-r--r--fs/qnx4/Kconfig25
-rw-r--r--fs/reiserfs/Kconfig85
-rw-r--r--fs/reiserfs/procfs.c5
-rw-r--r--fs/reiserfs/super.c58
-rw-r--r--fs/romfs/Kconfig16
-rw-r--r--fs/smbfs/Kconfig55
-rw-r--r--fs/splice.c3
-rw-r--r--fs/squashfs/Kconfig51
-rw-r--r--fs/super.c1
-rw-r--r--fs/sysfs/Kconfig23
-rw-r--r--fs/sysfs/bin.c6
-rw-r--r--fs/sysfs/mount.c5
-rw-r--r--fs/sysv/Kconfig36
-rw-r--r--fs/ubifs/budget.c35
-rw-r--r--fs/ubifs/debug.c122
-rw-r--r--fs/ubifs/debug.h36
-rw-r--r--fs/ubifs/dir.c96
-rw-r--r--fs/ubifs/file.c9
-rw-r--r--fs/ubifs/gc.c28
-rw-r--r--fs/ubifs/io.c22
-rw-r--r--fs/ubifs/journal.c2
-rw-r--r--fs/ubifs/lprops.c12
-rw-r--r--fs/ubifs/lpt_commit.c44
-rw-r--r--fs/ubifs/master.c2
-rw-r--r--fs/ubifs/orphan.c38
-rw-r--r--fs/ubifs/super.c195
-rw-r--r--fs/ubifs/tnc.c12
-rw-r--r--fs/ubifs/ubifs.h26
-rw-r--r--fs/udf/Kconfig18
-rw-r--r--fs/udf/balloc.c113
-rw-r--r--fs/udf/dir.c14
-rw-r--r--fs/udf/directory.c38
-rw-r--r--fs/udf/ecma_167.h416
-rw-r--r--fs/udf/ialloc.c4
-rw-r--r--fs/udf/inode.c213
-rw-r--r--fs/udf/misc.c29
-rw-r--r--fs/udf/namei.c86
-rw-r--r--fs/udf/osta_udf.h22
-rw-r--r--fs/udf/partition.c2
-rw-r--r--fs/udf/super.c164
-rw-r--r--fs/udf/truncate.c44
-rw-r--r--fs/udf/udf_i.h6
-rw-r--r--fs/udf/udf_sb.h4
-rw-r--r--fs/udf/udfdecl.h46
-rw-r--r--fs/udf/udfend.h28
-rw-r--r--fs/udf/udftime.c6
-rw-r--r--fs/udf/unicode.c62
-rw-r--r--fs/ufs/Kconfig43
-rw-r--r--fs/xfs/Kconfig1
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.c305
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl.h15
-rw-r--r--fs/xfs/linux-2.6/xfs_ioctl32.c184
-rw-r--r--fs/xfs/linux-2.6/xfs_super.c17
-rw-r--r--fs/xfs/linux-2.6/xfs_sync.c6
-rw-r--r--fs/xfs/quota/xfs_dquot.c38
-rw-r--r--fs/xfs/quota/xfs_dquot.h10
-rw-r--r--fs/xfs/quota/xfs_qm.c9
-rw-r--r--fs/xfs/xfs_ag.h6
-rw-r--r--fs/xfs/xfs_alloc_btree.c2
-rw-r--r--fs/xfs/xfs_attr.c26
-rw-r--r--fs/xfs/xfs_bmap.c166
-rw-r--r--fs/xfs/xfs_bmap.h2
-rw-r--r--fs/xfs/xfs_bmap_btree.c10
-rw-r--r--fs/xfs/xfs_bmap_btree.h4
-rw-r--r--fs/xfs/xfs_btree.c16
-rw-r--r--fs/xfs/xfs_da_btree.c8
-rw-r--r--fs/xfs/xfs_dfrag.c10
-rw-r--r--fs/xfs/xfs_extfree_item.h6
-rw-r--r--fs/xfs/xfs_ialloc.c6
-rw-r--r--fs/xfs/xfs_ialloc.h2
-rw-r--r--fs/xfs/xfs_ialloc_btree.h1
-rw-r--r--fs/xfs/xfs_inode.c19
-rw-r--r--fs/xfs/xfs_inode_item.h6
-rw-r--r--fs/xfs/xfs_iomap.c10
-rw-r--r--fs/xfs/xfs_itable.c6
-rw-r--r--fs/xfs/xfs_log_recover.c31
-rw-r--r--fs/xfs/xfs_mount.c26
-rw-r--r--fs/xfs/xfs_mount.h9
-rw-r--r--fs/xfs/xfs_rename.c2
-rw-r--r--fs/xfs/xfs_rtalloc.c2
-rw-r--r--fs/xfs/xfs_rw.h1
-rw-r--r--fs/xfs/xfs_sb.h2
-rw-r--r--fs/xfs/xfs_vnodeops.c20
289 files changed, 16002 insertions, 7112 deletions
diff --git a/fs/9p/Kconfig b/fs/9p/Kconfig
new file mode 100644
index 000000000000..74e0723e90bc
--- /dev/null
+++ b/fs/9p/Kconfig
@@ -0,0 +1,10 @@
+config 9P_FS
+ tristate "Plan 9 Resource Sharing Support (9P2000) (Experimental)"
+ depends on INET && NET_9P && EXPERIMENTAL
+ help
+ If you say Y here, you will get experimental support for
+ Plan 9 resource sharing via the 9P2000 protocol.
+
+ See <http://v9fs.sf.net> for more information.
+
+ If unsure, say N.
diff --git a/fs/Kconfig b/fs/Kconfig
index 37bbec255c79..7ffcd52efa38 100644
--- a/fs/Kconfig
+++ b/fs/Kconfig
@@ -27,141 +27,8 @@ config FS_MBCACHE
default y if EXT4_FS=y && EXT4_FS_XATTR
default m if EXT2_FS_XATTR || EXT3_FS_XATTR || EXT4_FS_XATTR
-config REISERFS_FS
- tristate "Reiserfs support"
- help
- Stores not just filenames but the files themselves in a balanced
- tree. Uses journalling.
-
- Balanced trees are more efficient than traditional file system
- architectural foundations.
-
- In general, ReiserFS is as fast as ext2, but is very efficient with
- large directories and small files. Additional patches are needed
- for NFS and quotas, please see <http://www.namesys.com/> for links.
-
- It is more easily extended to have features currently found in
- database and keyword search systems than block allocation based file
- systems are. The next version will be so extended, and will support
- plugins consistent with our motto ``It takes more than a license to
- make source code open.''
-
- Read <http://www.namesys.com/> to learn more about reiserfs.
-
- Sponsored by Threshold Networks, Emusic.com, and Bigstorage.com.
-
- If you like it, you can pay us to add new features to it that you
- need, buy a support contract, or pay us to port it to another OS.
-
-config REISERFS_CHECK
- bool "Enable reiserfs debug mode"
- depends on REISERFS_FS
- help
- If you set this to Y, then ReiserFS will perform every check it can
- possibly imagine of its internal consistency throughout its
- operation. It will also go substantially slower. More than once we
- have forgotten that this was on, and then gone despondent over the
- latest benchmarks.:-) Use of this option allows our team to go all
- out in checking for consistency when debugging without fear of its
- effect on end users. If you are on the verge of sending in a bug
- report, say Y and you might get a useful error message. Almost
- everyone should say N.
-
-config REISERFS_PROC_INFO
- bool "Stats in /proc/fs/reiserfs"
- depends on REISERFS_FS && PROC_FS
- help
- Create under /proc/fs/reiserfs a hierarchy of files, displaying
- various ReiserFS statistics and internal data at the expense of
- making your kernel or module slightly larger (+8 KB). This also
- increases the amount of kernel memory required for each mount.
- Almost everyone but ReiserFS developers and people fine-tuning
- reiserfs or tracing problems should say N.
-
-config REISERFS_FS_XATTR
- bool "ReiserFS extended attributes"
- depends on REISERFS_FS
- help
- Extended attributes are name:value pairs associated with inodes by
- the kernel or by users (see the attr(5) manual page, or visit
- <http://acl.bestbits.at/> for details).
-
- If unsure, say N.
-
-config REISERFS_FS_POSIX_ACL
- bool "ReiserFS POSIX Access Control Lists"
- depends on REISERFS_FS_XATTR
- select FS_POSIX_ACL
- help
- Posix Access Control Lists (ACLs) support permissions for users and
- groups beyond the owner/group/world scheme.
-
- To learn more about Access Control Lists, visit the Posix ACLs for
- Linux website <http://acl.bestbits.at/>.
-
- If you don't know what Access Control Lists are, say N
-
-config REISERFS_FS_SECURITY
- bool "ReiserFS Security Labels"
- depends on REISERFS_FS_XATTR
- help
- Security labels support alternative access control models
- implemented by security modules like SELinux. This option
- enables an extended attribute handler for file security
- labels in the ReiserFS filesystem.
-
- If you are not using a security module that requires using
- extended attributes for file security labels, say N.
-
-config JFS_FS
- tristate "JFS filesystem support"
- select NLS
- help
- This is a port of IBM's Journaled Filesystem . More information is
- available in the file <file:Documentation/filesystems/jfs.txt>.
-
- If you do not intend to use the JFS filesystem, say N.
-
-config JFS_POSIX_ACL
- bool "JFS POSIX Access Control Lists"
- depends on JFS_FS
- select FS_POSIX_ACL
- help
- Posix Access Control Lists (ACLs) support permissions for users and
- groups beyond the owner/group/world scheme.
-
- To learn more about Access Control Lists, visit the Posix ACLs for
- Linux website <http://acl.bestbits.at/>.
-
- If you don't know what Access Control Lists are, say N
-
-config JFS_SECURITY
- bool "JFS Security Labels"
- depends on JFS_FS
- help
- Security labels support alternative access control models
- implemented by security modules like SELinux. This option
- enables an extended attribute handler for file security
- labels in the jfs filesystem.
-
- If you are not using a security module that requires using
- extended attributes for file security labels, say N.
-
-config JFS_DEBUG
- bool "JFS debugging"
- depends on JFS_FS
- help
- If you are experiencing any problems with the JFS filesystem, say
- Y here. This will result in additional debugging messages to be
- written to the system log. Under normal circumstances, this
- results in very little overhead.
-
-config JFS_STATISTICS
- bool "JFS statistics"
- depends on JFS_FS
- help
- Enabling this option will cause statistics from the JFS file system
- to be made available to the user in the /proc/fs/jfs/ directory.
+source "fs/reiserfs/Kconfig"
+source "fs/jfs/Kconfig"
config FS_POSIX_ACL
# Posix ACL utility routines (for now, only ext2/ext3/jfs/reiserfs/nfs4)
@@ -182,111 +49,8 @@ config FILE_LOCKING
source "fs/xfs/Kconfig"
source "fs/gfs2/Kconfig"
-
-config OCFS2_FS
- tristate "OCFS2 file system support"
- depends on NET && SYSFS
- select CONFIGFS_FS
- select JBD2
- select CRC32
- select QUOTA
- select QUOTA_TREE
- help
- OCFS2 is a general purpose extent based shared disk cluster file
- system with many similarities to ext3. It supports 64 bit inode
- numbers, and has automatically extending metadata groups which may
- also make it attractive for non-clustered use.
-
- You'll want to install the ocfs2-tools package in order to at least
- get "mount.ocfs2".
-
- Project web page: http://oss.oracle.com/projects/ocfs2
- Tools web page: http://oss.oracle.com/projects/ocfs2-tools
- OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/
-
- For more information on OCFS2, see the file
- <file:Documentation/filesystems/ocfs2.txt>.
-
-config OCFS2_FS_O2CB
- tristate "O2CB Kernelspace Clustering"
- depends on OCFS2_FS
- default y
- help
- OCFS2 includes a simple kernelspace clustering package, the OCFS2
- Cluster Base. It only requires a very small userspace component
- to configure it. This comes with the standard ocfs2-tools package.
- O2CB is limited to maintaining a cluster for OCFS2 file systems.
- It cannot manage any other cluster applications.
-
- It is always safe to say Y here, as the clustering method is
- run-time selectable.
-
-config OCFS2_FS_USERSPACE_CLUSTER
- tristate "OCFS2 Userspace Clustering"
- depends on OCFS2_FS && DLM
- default y
- help
- This option will allow OCFS2 to use userspace clustering services
- in conjunction with the DLM in fs/dlm. If you are using a
- userspace cluster manager, say Y here.
-
- It is safe to say Y, as the clustering method is run-time
- selectable.
-
-config OCFS2_FS_STATS
- bool "OCFS2 statistics"
- depends on OCFS2_FS
- default y
- help
- This option allows some fs statistics to be captured. Enabling
- this option may increase the memory consumption.
-
-config OCFS2_DEBUG_MASKLOG
- bool "OCFS2 logging support"
- depends on OCFS2_FS
- default y
- help
- The ocfs2 filesystem has an extensive logging system. The system
- allows selection of events to log via files in /sys/o2cb/logmask/.
- This option will enlarge your kernel, but it allows debugging of
- ocfs2 filesystem issues.
-
-config OCFS2_DEBUG_FS
- bool "OCFS2 expensive checks"
- depends on OCFS2_FS
- default n
- help
- This option will enable expensive consistency checks. Enable
- this option for debugging only as it is likely to decrease
- performance of the filesystem.
-
-config OCFS2_FS_POSIX_ACL
- bool "OCFS2 POSIX Access Control Lists"
- depends on OCFS2_FS
- select FS_POSIX_ACL
- default n
- help
- Posix Access Control Lists (ACLs) support permissions for users and
- groups beyond the owner/group/world scheme.
-
-config BTRFS_FS
- tristate "Btrfs filesystem (EXPERIMENTAL) Unstable disk format"
- depends on EXPERIMENTAL
- select LIBCRC32C
- select ZLIB_INFLATE
- select ZLIB_DEFLATE
- help
- Btrfs is a new filesystem with extents, writable snapshotting,
- support for multiple devices and many more features.
-
- Btrfs is highly experimental, and THE DISK FORMAT IS NOT YET
- FINALIZED. You should say N here unless you are interested in
- testing Btrfs with non-critical data.
-
- To compile this file system support as a module, choose M here. The
- module will be called btrfs.
-
- If unsure, say N.
+source "fs/ocfs2/Kconfig"
+source "fs/btrfs/Kconfig"
endif # BLOCK
@@ -348,130 +112,26 @@ config QUOTACTL
depends on XFS_QUOTA || QUOTA
default y
-config AUTOFS_FS
- tristate "Kernel automounter support"
- help
- The automounter is a tool to automatically mount remote file systems
- on demand. This implementation is partially kernel-based to reduce
- overhead in the already-mounted case; this is unlike the BSD
- automounter (amd), which is a pure user space daemon.
-
- To use the automounter you need the user-space tools from the autofs
- package; you can find the location in <file:Documentation/Changes>.
- You also want to answer Y to "NFS file system support", below.
-
- If you want to use the newer version of the automounter with more
- features, say N here and say Y to "Kernel automounter v4 support",
- below.
-
- To compile this support as a module, choose M here: the module will be
- called autofs.
-
- If you are not a part of a fairly large, distributed network, you
- probably do not need an automounter, and can say N here.
-
-config AUTOFS4_FS
- tristate "Kernel automounter version 4 support (also supports v3)"
- help
- The automounter is a tool to automatically mount remote file systems
- on demand. This implementation is partially kernel-based to reduce
- overhead in the already-mounted case; this is unlike the BSD
- automounter (amd), which is a pure user space daemon.
-
- To use the automounter you need the user-space tools from
- <ftp://ftp.kernel.org/pub/linux/daemons/autofs/v4/>; you also
- want to answer Y to "NFS file system support", below.
-
- To compile this support as a module, choose M here: the module will be
- called autofs4. You will need to add "alias autofs autofs4" to your
- modules configuration file.
-
- If you are not a part of a fairly large, distributed network or
- don't have a laptop which needs to dynamically reconfigure to the
- local network, you probably do not need an automounter, and can say
- N here.
-
-config FUSE_FS
- tristate "FUSE (Filesystem in Userspace) support"
- help
- With FUSE it is possible to implement a fully functional filesystem
- in a userspace program.
-
- There's also companion library: libfuse. This library along with
- utilities is available from the FUSE homepage:
- <http://fuse.sourceforge.net/>
-
- See <file:Documentation/filesystems/fuse.txt> for more information.
- See <file:Documentation/Changes> for needed library/utility version.
-
- If you want to develop a userspace FS, or if you want to use
- a filesystem based on FUSE, answer Y or M.
+source "fs/autofs/Kconfig"
+source "fs/autofs4/Kconfig"
+source "fs/fuse/Kconfig"
config GENERIC_ACL
bool
select FS_POSIX_ACL
-if BLOCK
-menu "CD-ROM/DVD Filesystems"
+menu "Caches"
-config ISO9660_FS
- tristate "ISO 9660 CDROM file system support"
- help
- This is the standard file system used on CD-ROMs. It was previously
- known as "High Sierra File System" and is called "hsfs" on other
- Unix systems. The so-called Rock-Ridge extensions which allow for
- long Unix filenames and symbolic links are also supported by this
- driver. If you have a CD-ROM drive and want to do more with it than
- just listen to audio CDs and watch its LEDs, say Y (and read
- <file:Documentation/filesystems/isofs.txt> and the CD-ROM-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>), thereby
- enlarging your kernel by about 27 KB; otherwise say N.
-
- To compile this file system support as a module, choose M here: the
- module will be called isofs.
-
-config JOLIET
- bool "Microsoft Joliet CDROM extensions"
- depends on ISO9660_FS
- select NLS
- help
- Joliet is a Microsoft extension for the ISO 9660 CD-ROM file system
- which allows for long filenames in unicode format (unicode is the
- new 16 bit character code, successor to ASCII, which encodes the
- characters of almost all languages of the world; see
- <http://www.unicode.org/> for more information). Say Y here if you
- want to be able to read Joliet CD-ROMs under Linux.
-
-config ZISOFS
- bool "Transparent decompression extension"
- depends on ISO9660_FS
- select ZLIB_INFLATE
- help
- This is a Linux-specific extension to RockRidge which lets you store
- data in compressed form on a CD-ROM and have it transparently
- decompressed when the CD-ROM is accessed. See
- <http://www.kernel.org/pub/linux/utils/fs/zisofs/> for the tools
- necessary to create such a filesystem. Say Y here if you want to be
- able to read such compressed CD-ROMs.
-
-config UDF_FS
- tristate "UDF file system support"
- select CRC_ITU_T
- help
- This is the new file system used on some CD-ROMs and DVDs. Say Y if
- you intend to mount DVD discs or CDRW's written in packet mode, or
- if written to by other UDF utilities, such as DirectCD.
- Please read <file:Documentation/filesystems/udf.txt>.
+source "fs/fscache/Kconfig"
+source "fs/cachefiles/Kconfig"
- To compile this file system support as a module, choose M here: the
- module will be called udf.
+endmenu
- If unsure, say N.
+if BLOCK
+menu "CD-ROM/DVD Filesystems"
-config UDF_NLS
- bool
- default y
- depends on (UDF_FS=m && NLS) || (UDF_FS=y && NLS=y)
+source "fs/isofs/Kconfig"
+source "fs/udf/Kconfig"
endmenu
endif # BLOCK
@@ -479,182 +139,8 @@ endif # BLOCK
if BLOCK
menu "DOS/FAT/NT Filesystems"
-config FAT_FS
- tristate
- select NLS
- help
- If you want to use one of the FAT-based file systems (the MS-DOS and
- VFAT (Windows 95) file systems), then you must say Y or M here
- to include FAT support. You will then be able to mount partitions or
- diskettes with FAT-based file systems and transparently access the
- files on them, i.e. MSDOS files will look and behave just like all
- other Unix files.
-
- This FAT support is not a file system in itself, it only provides
- the foundation for the other file systems. You will have to say Y or
- M to at least one of "MSDOS fs support" or "VFAT fs support" in
- order to make use of it.
-
- Another way to read and write MSDOS floppies and hard drive
- partitions from within Linux (but not transparently) is with the
- mtools ("man mtools") program suite. You don't need to say Y here in
- order to do that.
-
- If you need to move large files on floppies between a DOS and a
- Linux box, say Y here, mount the floppy under Linux with an MSDOS
- file system and use GNU tar's M option. GNU tar is a program
- available for Unix and DOS ("man tar" or "info tar").
-
- The FAT support will enlarge your kernel by about 37 KB. If unsure,
- say Y.
-
- To compile this as a module, choose M here: the module will be called
- fat. Note that if you compile the FAT support as a module, you
- cannot compile any of the FAT-based file systems into the kernel
- -- they will have to be modules as well.
-
-config MSDOS_FS
- tristate "MSDOS fs support"
- select FAT_FS
- help
- This allows you to mount MSDOS partitions of your hard drive (unless
- they are compressed; to access compressed MSDOS partitions under
- Linux, you can either use the DOS emulator DOSEMU, described in the
- DOSEMU-HOWTO, available from
- <http://www.tldp.org/docs.html#howto>, or try dmsdosfs in
- <ftp://ibiblio.org/pub/Linux/system/filesystems/dosfs/>. If you
- intend to use dosemu with a non-compressed MSDOS partition, say Y
- here) and MSDOS floppies. This means that file access becomes
- transparent, i.e. the MSDOS files look and behave just like all
- other Unix files.
-
- If you have Windows 95 or Windows NT installed on your MSDOS
- partitions, you should use the VFAT file system (say Y to "VFAT fs
- support" below), or you will not be able to see the long filenames
- generated by Windows 95 / Windows NT.
-
- This option will enlarge your kernel by about 7 KB. If unsure,
- answer Y. This will only work if you said Y to "DOS FAT fs support"
- as well. To compile this as a module, choose M here: the module will
- be called msdos.
-
-config VFAT_FS
- tristate "VFAT (Windows-95) fs support"
- select FAT_FS
- help
- This option provides support for normal Windows file systems with
- long filenames. That includes non-compressed FAT-based file systems
- used by Windows 95, Windows 98, Windows NT 4.0, and the Unix
- programs from the mtools package.
-
- The VFAT support enlarges your kernel by about 10 KB and it only
- works if you said Y to the "DOS FAT fs support" above. Please read
- the file <file:Documentation/filesystems/vfat.txt> for details. If
- unsure, say Y.
-
- To compile this as a module, choose M here: the module will be called
- vfat.
-
-config FAT_DEFAULT_CODEPAGE
- int "Default codepage for FAT"
- depends on MSDOS_FS || VFAT_FS
- default 437
- help
- This option should be set to the codepage of your FAT filesystems.
- It can be overridden with the "codepage" mount option.
- See <file:Documentation/filesystems/vfat.txt> for more information.
-
-config FAT_DEFAULT_IOCHARSET
- string "Default iocharset for FAT"
- depends on VFAT_FS
- default "iso8859-1"
- help
- Set this to the default input/output character set you'd
- like FAT to use. It should probably match the character set
- that most of your FAT filesystems use, and can be overridden
- with the "iocharset" mount option for FAT filesystems.
- Note that "utf8" is not recommended for FAT filesystems.
- If unsure, you shouldn't set "utf8" here.
- See <file:Documentation/filesystems/vfat.txt> for more information.
-
-config NTFS_FS
- tristate "NTFS file system support"
- select NLS
- help
- NTFS is the file system of Microsoft Windows NT, 2000, XP and 2003.
-
- Saying Y or M here enables read support. There is partial, but
- safe, write support available. For write support you must also
- say Y to "NTFS write support" below.
-
- There are also a number of user-space tools available, called
- ntfsprogs. These include ntfsundelete and ntfsresize, that work
- without NTFS support enabled in the kernel.
-
- This is a rewrite from scratch of Linux NTFS support and replaced
- the old NTFS code starting with Linux 2.5.11. A backport to
- the Linux 2.4 kernel series is separately available as a patch
- from the project web site.
-
- For more information see <file:Documentation/filesystems/ntfs.txt>
- and <http://www.linux-ntfs.org/>.
-
- To compile this file system support as a module, choose M here: the
- module will be called ntfs.
-
- If you are not using Windows NT, 2000, XP or 2003 in addition to
- Linux on your computer it is safe to say N.
-
-config NTFS_DEBUG
- bool "NTFS debugging support"
- depends on NTFS_FS
- help
- If you are experiencing any problems with the NTFS file system, say
- Y here. This will result in additional consistency checks to be
- performed by the driver as well as additional debugging messages to
- be written to the system log. Note that debugging messages are
- disabled by default. To enable them, supply the option debug_msgs=1
- at the kernel command line when booting the kernel or as an option
- to insmod when loading the ntfs module. Once the driver is active,
- you can enable debugging messages by doing (as root):
- echo 1 > /proc/sys/fs/ntfs-debug
- Replacing the "1" with "0" would disable debug messages.
-
- If you leave debugging messages disabled, this results in little
- overhead, but enabling debug messages results in very significant
- slowdown of the system.
-
- When reporting bugs, please try to have available a full dump of
- debugging messages while the misbehaviour was occurring.
-
-config NTFS_RW
- bool "NTFS write support"
- depends on NTFS_FS
- help
- This enables the partial, but safe, write support in the NTFS driver.
-
- The only supported operation is overwriting existing files, without
- changing the file length. No file or directory creation, deletion or
- renaming is possible. Note only non-resident files can be written to
- so you may find that some very small files (<500 bytes or so) cannot
- be written to.
-
- While we cannot guarantee that it will not damage any data, we have
- so far not received a single report where the driver would have
- damaged someones data so we assume it is perfectly safe to use.
-
- Note: While write support is safe in this version (a rewrite from
- scratch of the NTFS support), it should be noted that the old NTFS
- write support, included in Linux 2.5.10 and before (since 1997),
- is not safe.
-
- This is currently useful with TopologiLinux. TopologiLinux is run
- on top of any DOS/Microsoft Windows system without partitioning your
- hard disk. Unlike other Linux distributions TopologiLinux does not
- need its own partition. For more information see
- <http://topologi-linux.sourceforge.net/>
-
- It is perfectly safe to say N here.
+source "fs/fat/Kconfig"
+source "fs/ntfs/Kconfig"
endmenu
endif # BLOCK
@@ -662,30 +148,7 @@ endif # BLOCK
menu "Pseudo filesystems"
source "fs/proc/Kconfig"
-
-config SYSFS
- bool "sysfs file system support" if EMBEDDED
- default y
- help
- The sysfs filesystem is a virtual filesystem that the kernel uses to
- export internal kernel objects, their attributes, and their
- relationships to one another.
-
- Users can use sysfs to ascertain useful information about the running
- kernel, such as the devices the kernel has discovered on each bus and
- which driver each is bound to. sysfs can also be used to tune devices
- and other kernel subsystems.
-
- Some system agents rely on the information in sysfs to operate.
- /sbin/hotplug uses device and object attributes in sysfs to assist in
- delegating policy decisions, like persistently naming devices.
-
- sysfs is currently used by the block subsystem to mount the root
- partition. If sysfs is disabled you must specify the boot device on
- the kernel boot command line via its major and minor numbers. For
- example, "root=03:01" for /dev/hda1.
-
- Designers of embedded systems may wish to say N here to conserve space.
+source "fs/sysfs/Kconfig"
config TMPFS
bool "Virtual memory file system support (former shm fs)"
@@ -726,17 +189,7 @@ config HUGETLBFS
config HUGETLB_PAGE
def_bool HUGETLBFS
-config CONFIGFS_FS
- tristate "Userspace-driven configuration filesystem"
- depends on SYSFS
- help
- configfs is a ram-based filesystem that provides the converse
- of sysfs's functionality. Where sysfs is a filesystem-based
- view of kernel objects, configfs is a filesystem-based manager
- of kernel objects, or config_items.
-
- Both sysfs and configfs can and should exist together on the
- same system. One is not a replacement for the other.
+source "fs/configfs/Kconfig"
endmenu
@@ -755,425 +208,27 @@ menuconfig MISC_FILESYSTEMS
if MISC_FILESYSTEMS
-config ADFS_FS
- tristate "ADFS file system support (EXPERIMENTAL)"
- depends on BLOCK && EXPERIMENTAL
- help
- The Acorn Disc Filing System is the standard file system of the
- RiscOS operating system which runs on Acorn's ARM-based Risc PC
- systems and the Acorn Archimedes range of machines. If you say Y
- here, Linux will be able to read from ADFS partitions on hard drives
- and from ADFS-formatted floppy discs. If you also want to be able to
- write to those devices, say Y to "ADFS write support" below.
-
- The ADFS partition should be the first partition (i.e.,
- /dev/[hs]d?1) on each of your drives. Please read the file
- <file:Documentation/filesystems/adfs.txt> for further details.
-
- To compile this code as a module, choose M here: the module will be
- called adfs.
-
- If unsure, say N.
-
-config ADFS_FS_RW
- bool "ADFS write support (DANGEROUS)"
- depends on ADFS_FS
- help
- If you say Y here, you will be able to write to ADFS partitions on
- hard drives and ADFS-formatted floppy disks. This is experimental
- codes, so if you're unsure, say N.
-
-config AFFS_FS
- tristate "Amiga FFS file system support (EXPERIMENTAL)"
- depends on BLOCK && EXPERIMENTAL
- help
- The Fast File System (FFS) is the common file system used on hard
- disks by Amiga(tm) systems since AmigaOS Version 1.3 (34.20). Say Y
- if you want to be able to read and write files from and to an Amiga
- FFS partition on your hard drive. Amiga floppies however cannot be
- read with this driver due to an incompatibility of the floppy
- controller used in an Amiga and the standard floppy controller in
- PCs and workstations. Read <file:Documentation/filesystems/affs.txt>
- and <file:fs/affs/Changes>.
-
- With this driver you can also mount disk files used by Bernd
- Schmidt's Un*X Amiga Emulator
- (<http://www.freiburg.linux.de/~uae/>).
- If you want to do this, you will also need to say Y or M to "Loop
- device support", above.
-
- To compile this file system support as a module, choose M here: the
- module will be called affs. If unsure, say N.
-
-config ECRYPT_FS
- tristate "eCrypt filesystem layer support (EXPERIMENTAL)"
- depends on EXPERIMENTAL && KEYS && CRYPTO && NET
- help
- Encrypted filesystem that operates on the VFS layer. See
- <file:Documentation/filesystems/ecryptfs.txt> to learn more about
- eCryptfs. Userspace components are required and can be
- obtained from <http://ecryptfs.sf.net>.
-
- To compile this file system support as a module, choose M here: the
- module will be called ecryptfs.
-
-config HFS_FS
- tristate "Apple Macintosh file system support (EXPERIMENTAL)"
- depends on BLOCK && EXPERIMENTAL
- select NLS
- help
- If you say Y here, you will be able to mount Macintosh-formatted
- floppy disks and hard drive partitions with full read-write access.
- Please read <file:Documentation/filesystems/hfs.txt> to learn about
- the available mount options.
-
- To compile this file system support as a module, choose M here: the
- module will be called hfs.
-
-config HFSPLUS_FS
- tristate "Apple Extended HFS file system support"
- depends on BLOCK
- select NLS
- select NLS_UTF8
- help
- If you say Y here, you will be able to mount extended format
- Macintosh-formatted hard drive partitions with full read-write access.
-
- This file system is often called HFS+ and was introduced with
- MacOS 8. It includes all Mac specific filesystem data such as
- data forks and creator codes, but it also has several UNIX
- style features such as file ownership and permissions.
-
-config BEFS_FS
- tristate "BeOS file system (BeFS) support (read only) (EXPERIMENTAL)"
- depends on BLOCK && EXPERIMENTAL
- select NLS
- help
- The BeOS File System (BeFS) is the native file system of Be, Inc's
- BeOS. Notable features include support for arbitrary attributes
- on files and directories, and database-like indices on selected
- attributes. (Also note that this driver doesn't make those features
- available at this time). It is a 64 bit filesystem, so it supports
- extremely large volumes and files.
-
- If you use this filesystem, you should also say Y to at least one
- of the NLS (native language support) options below.
-
- If you don't know what this is about, say N.
-
- To compile this as a module, choose M here: the module will be
- called befs.
-
-config BEFS_DEBUG
- bool "Debug BeFS"
- depends on BEFS_FS
- help
- If you say Y here, you can use the 'debug' mount option to enable
- debugging output from the driver.
-
-config BFS_FS
- tristate "BFS file system support (EXPERIMENTAL)"
- depends on BLOCK && EXPERIMENTAL
- help
- Boot File System (BFS) is a file system used under SCO UnixWare to
- allow the bootloader access to the kernel image and other important
- files during the boot process. It is usually mounted under /stand
- and corresponds to the slice marked as "STAND" in the UnixWare
- partition. You should say Y if you want to read or write the files
- on your /stand slice from within Linux. You then also need to say Y
- to "UnixWare slices support", below. More information about the BFS
- file system is contained in the file
- <file:Documentation/filesystems/bfs.txt>.
-
- If you don't know what this is about, say N.
-
- To compile this as a module, choose M here: the module will be called
- bfs. Note that the file system of your root partition (the one
- containing the directory /) cannot be compiled as a module.
-
-
-
-config EFS_FS
- tristate "EFS file system support (read only) (EXPERIMENTAL)"
- depends on BLOCK && EXPERIMENTAL
- help
- EFS is an older file system used for non-ISO9660 CD-ROMs and hard
- disk partitions by SGI's IRIX operating system (IRIX 6.0 and newer
- uses the XFS file system for hard disk partitions however).
-
- This implementation only offers read-only access. If you don't know
- what all this is about, it's safe to say N. For more information
- about EFS see its home page at <http://aeschi.ch.eu.org/efs/>.
-
- To compile the EFS file system support as a module, choose M here: the
- module will be called efs.
-
+source "fs/adfs/Kconfig"
+source "fs/affs/Kconfig"
+source "fs/ecryptfs/Kconfig"
+source "fs/hfs/Kconfig"
+source "fs/hfsplus/Kconfig"
+source "fs/befs/Kconfig"
+source "fs/bfs/Kconfig"
+source "fs/efs/Kconfig"
source "fs/jffs2/Kconfig"
# UBIFS File system configuration
source "fs/ubifs/Kconfig"
-
-config CRAMFS
- tristate "Compressed ROM file system support (cramfs)"
- depends on BLOCK
- select ZLIB_INFLATE
- help
- Saying Y here includes support for CramFs (Compressed ROM File
- System). CramFs is designed to be a simple, small, and compressed
- file system for ROM based embedded systems. CramFs is read-only,
- limited to 256MB file systems (with 16MB files), and doesn't support
- 16/32 bits uid/gid, hard links and timestamps.
-
- See <file:Documentation/filesystems/cramfs.txt> and
- <file:fs/cramfs/README> for further information.
-
- To compile this as a module, choose M here: the module will be called
- cramfs. Note that the root file system (the one containing the
- directory /) cannot be compiled as a module.
-
- If unsure, say N.
-
-config SQUASHFS
- tristate "SquashFS 4.0 - Squashed file system support"
- depends on BLOCK
- select ZLIB_INFLATE
- help
- Saying Y here includes support for SquashFS 4.0 (a Compressed
- Read-Only File System). Squashfs is a highly compressed read-only
- filesystem for Linux. It uses zlib compression to compress both
- files, inodes and directories. Inodes in the system are very small
- and all blocks are packed to minimise data overhead. Block sizes
- greater than 4K are supported up to a maximum of 1 Mbytes (default
- block size 128K). SquashFS 4.0 supports 64 bit filesystems and files
- (larger than 4GB), full uid/gid information, hard links and
- timestamps.
-
- Squashfs is intended for general read-only filesystem use, for
- archival use (i.e. in cases where a .tar.gz file may be used), and in
- embedded systems where low overhead is needed. Further information
- and tools are available from http://squashfs.sourceforge.net.
-
- If you want to compile this as a module ( = code which can be
- inserted in and removed from the running kernel whenever you want),
- say M here and read <file:Documentation/modules.txt>. The module
- will be called squashfs. Note that the root file system (the one
- containing the directory /) cannot be compiled as a module.
-
- If unsure, say N.
-
-config SQUASHFS_EMBEDDED
-
- bool "Additional option for memory-constrained systems"
- depends on SQUASHFS
- default n
- help
- Saying Y here allows you to specify cache size.
-
- If unsure, say N.
-
-config SQUASHFS_FRAGMENT_CACHE_SIZE
- int "Number of fragments cached" if SQUASHFS_EMBEDDED
- depends on SQUASHFS
- default "3"
- help
- By default SquashFS caches the last 3 fragments read from
- the filesystem. Increasing this amount may mean SquashFS
- has to re-read fragments less often from disk, at the expense
- of extra system memory. Decreasing this amount will mean
- SquashFS uses less memory at the expense of extra reads from disk.
-
- Note there must be at least one cached fragment. Anything
- much more than three will probably not make much difference.
-
-config VXFS_FS
- tristate "FreeVxFS file system support (VERITAS VxFS(TM) compatible)"
- depends on BLOCK
- help
- FreeVxFS is a file system driver that support the VERITAS VxFS(TM)
- file system format. VERITAS VxFS(TM) is the standard file system
- of SCO UnixWare (and possibly others) and optionally available
- for Sunsoft Solaris, HP-UX and many other operating systems.
- Currently only readonly access is supported.
-
- NOTE: the file system type as used by mount(1), mount(2) and
- fstab(5) is 'vxfs' as it describes the file system format, not
- the actual driver.
-
- To compile this as a module, choose M here: the module will be
- called freevxfs. If unsure, say N.
-
-config MINIX_FS
- tristate "Minix file system support"
- depends on BLOCK
- help
- Minix is a simple operating system used in many classes about OS's.
- The minix file system (method to organize files on a hard disk
- partition or a floppy disk) was the original file system for Linux,
- but has been superseded by the second extended file system ext2fs.
- You don't want to use the minix file system on your hard disk
- because of certain built-in restrictions, but it is sometimes found
- on older Linux floppy disks. This option will enlarge your kernel
- by about 28 KB. If unsure, say N.
-
- To compile this file system support as a module, choose M here: the
- module will be called minix. Note that the file system of your root
- partition (the one containing the directory /) cannot be compiled as
- a module.
-
-config OMFS_FS
- tristate "SonicBlue Optimized MPEG File System support"
- depends on BLOCK
- select CRC_ITU_T
- help
- This is the proprietary file system used by the Rio Karma music
- player and ReplayTV DVR. Despite the name, this filesystem is not
- more efficient than a standard FS for MPEG files, in fact likely
- the opposite is true. Say Y if you have either of these devices
- and wish to mount its disk.
-
- To compile this file system support as a module, choose M here: the
- module will be called omfs. If unsure, say N.
-
-config HPFS_FS
- tristate "OS/2 HPFS file system support"
- depends on BLOCK
- help
- OS/2 is IBM's operating system for PC's, the same as Warp, and HPFS
- is the file system used for organizing files on OS/2 hard disk
- partitions. Say Y if you want to be able to read files from and
- write files to an OS/2 HPFS partition on your hard drive. OS/2
- floppies however are in regular MSDOS format, so you don't need this
- option in order to be able to read them. Read
- <file:Documentation/filesystems/hpfs.txt>.
-
- To compile this file system support as a module, choose M here: the
- module will be called hpfs. If unsure, say N.
-
-
-config QNX4FS_FS
- tristate "QNX4 file system support (read only)"
- depends on BLOCK
- help
- This is the file system used by the real-time operating systems
- QNX 4 and QNX 6 (the latter is also called QNX RTP).
- Further information is available at <http://www.qnx.com/>.
- Say Y if you intend to mount QNX hard disks or floppies.
- Unless you say Y to "QNX4FS read-write support" below, you will
- only be able to read these file systems.
-
- To compile this file system support as a module, choose M here: the
- module will be called qnx4.
-
- If you don't know whether you need it, then you don't need it:
- answer N.
-
-config QNX4FS_RW
- bool "QNX4FS write support (DANGEROUS)"
- depends on QNX4FS_FS && EXPERIMENTAL && BROKEN
- help
- Say Y if you want to test write support for QNX4 file systems.
-
- It's currently broken, so for now:
- answer N.
-
-config ROMFS_FS
- tristate "ROM file system support"
- depends on BLOCK
- ---help---
- This is a very small read-only file system mainly intended for
- initial ram disks of installation disks, but it could be used for
- other read-only media as well. Read
- <file:Documentation/filesystems/romfs.txt> for details.
-
- To compile this file system support as a module, choose M here: the
- module will be called romfs. Note that the file system of your
- root partition (the one containing the directory /) cannot be a
- module.
-
- If you don't know whether you need it, then you don't need it:
- answer N.
-
-
-config SYSV_FS
- tristate "System V/Xenix/V7/Coherent file system support"
- depends on BLOCK
- help
- SCO, Xenix and Coherent are commercial Unix systems for Intel
- machines, and Version 7 was used on the DEC PDP-11. Saying Y
- here would allow you to read from their floppies and hard disk
- partitions.
-
- If you have floppies or hard disk partitions like that, it is likely
- that they contain binaries from those other Unix systems; in order
- to run these binaries, you will want to install linux-abi which is
- a set of kernel modules that lets you run SCO, Xenix, Wyse,
- UnixWare, Dell Unix and System V programs under Linux. It is
- available via FTP (user: ftp) from
- <ftp://ftp.openlinux.org/pub/people/hch/linux-abi/>).
- NOTE: that will work only for binaries from Intel-based systems;
- PDP ones will have to wait until somebody ports Linux to -11 ;-)
-
- If you only intend to mount files from some other Unix over the
- network using NFS, you don't need the System V file system support
- (but you need NFS file system support obviously).
-
- Note that this option is generally not needed for floppies, since a
- good portable way to transport files and directories between unixes
- (and even other operating systems) is given by the tar program ("man
- tar" or preferably "info tar"). Note also that this option has
- nothing whatsoever to do with the option "System V IPC". Read about
- the System V file system in
- <file:Documentation/filesystems/sysv-fs.txt>.
- Saying Y here will enlarge your kernel by about 27 KB.
-
- To compile this as a module, choose M here: the module will be called
- sysv.
-
- If you haven't heard about all of this before, it's safe to say N.
-
-
-config UFS_FS
- tristate "UFS file system support (read only)"
- depends on BLOCK
- help
- BSD and derivate versions of Unix (such as SunOS, FreeBSD, NetBSD,
- OpenBSD and NeXTstep) use a file system called UFS. Some System V
- Unixes can create and mount hard disk partitions and diskettes using
- this file system as well. Saying Y here will allow you to read from
- these partitions; if you also want to write to them, say Y to the
- experimental "UFS file system write support", below. Please read the
- file <file:Documentation/filesystems/ufs.txt> for more information.
-
- The recently released UFS2 variant (used in FreeBSD 5.x) is
- READ-ONLY supported.
-
- Note that this option is generally not needed for floppies, since a
- good portable way to transport files and directories between unixes
- (and even other operating systems) is given by the tar program ("man
- tar" or preferably "info tar").
-
- When accessing NeXTstep files, you may need to convert them from the
- NeXT character set to the Latin1 character set; use the program
- recode ("info recode") for this purpose.
-
- To compile the UFS file system support as a module, choose M here: the
- module will be called ufs.
-
- If you haven't heard about all of this before, it's safe to say N.
-
-config UFS_FS_WRITE
- bool "UFS file system write support (DANGEROUS)"
- depends on UFS_FS && EXPERIMENTAL
- help
- Say Y here if you want to try writing to UFS partitions. This is
- experimental, so you should back up your UFS partitions beforehand.
-
-config UFS_DEBUG
- bool "UFS debugging"
- depends on UFS_FS
- help
- If you are experiencing any problems with the UFS filesystem, say
- Y here. This will result in _many_ additional debugging messages to be
- written to the system log.
+source "fs/cramfs/Kconfig"
+source "fs/squashfs/Kconfig"
+source "fs/freevxfs/Kconfig"
+source "fs/minix/Kconfig"
+source "fs/omfs/Kconfig"
+source "fs/hpfs/Kconfig"
+source "fs/qnx4/Kconfig"
+source "fs/romfs/Kconfig"
+source "fs/sysv/Kconfig"
+source "fs/ufs/Kconfig"
source "fs/exofs/Kconfig"
@@ -1195,173 +250,8 @@ menuconfig NETWORK_FILESYSTEMS
if NETWORK_FILESYSTEMS
-config NFS_FS
- tristate "NFS client support"
- depends on INET
- select LOCKD
- select SUNRPC
- select NFS_ACL_SUPPORT if NFS_V3_ACL
- help
- Choose Y here if you want to access files residing on other
- computers using Sun's Network File System protocol. To compile
- this file system support as a module, choose M here: the module
- will be called nfs.
-
- To mount file systems exported by NFS servers, you also need to
- install the user space mount.nfs command which can be found in
- the Linux nfs-utils package, available from http://linux-nfs.org/.
- Information about using the mount command is available in the
- mount(8) man page. More detail about the Linux NFS client
- implementation is available via the nfs(5) man page.
-
- Below you can choose which versions of the NFS protocol are
- available in the kernel to mount NFS servers. Support for NFS
- version 2 (RFC 1094) is always available when NFS_FS is selected.
-
- To configure a system which mounts its root file system via NFS
- at boot time, say Y here, select "Kernel level IP
- autoconfiguration" in the NETWORK menu, and select "Root file
- system on NFS" below. You cannot compile this file system as a
- module in this case.
-
- If unsure, say N.
-
-config NFS_V3
- bool "NFS client support for NFS version 3"
- depends on NFS_FS
- help
- This option enables support for version 3 of the NFS protocol
- (RFC 1813) in the kernel's NFS client.
-
- If unsure, say Y.
-
-config NFS_V3_ACL
- bool "NFS client support for the NFSv3 ACL protocol extension"
- depends on NFS_V3
- help
- Some NFS servers support an auxiliary NFSv3 ACL protocol that
- Sun added to Solaris but never became an official part of the
- NFS version 3 protocol. This protocol extension allows
- applications on NFS clients to manipulate POSIX Access Control
- Lists on files residing on NFS servers. NFS servers enforce
- ACLs on local files whether this protocol is available or not.
-
- Choose Y here if your NFS server supports the Solaris NFSv3 ACL
- protocol extension and you want your NFS client to allow
- applications to access and modify ACLs on files on the server.
-
- Most NFS servers don't support the Solaris NFSv3 ACL protocol
- extension. You can choose N here or specify the "noacl" mount
- option to prevent your NFS client from trying to use the NFSv3
- ACL protocol.
-
- If unsure, say N.
-
-config NFS_V4
- bool "NFS client support for NFS version 4 (EXPERIMENTAL)"
- depends on NFS_FS && EXPERIMENTAL
- select RPCSEC_GSS_KRB5
- help
- This option enables support for version 4 of the NFS protocol
- (RFC 3530) in the kernel's NFS client.
-
- To mount NFS servers using NFSv4, you also need to install user
- space programs which can be found in the Linux nfs-utils package,
- available from http://linux-nfs.org/.
-
- If unsure, say N.
-
-config ROOT_NFS
- bool "Root file system on NFS"
- depends on NFS_FS=y && IP_PNP
- help
- If you want your system to mount its root file system via NFS,
- choose Y here. This is common practice for managing systems
- without local permanent storage. For details, read
- <file:Documentation/filesystems/nfsroot.txt>.
-
- Most people say N here.
-
-config NFSD
- tristate "NFS server support"
- depends on INET
- select LOCKD
- select SUNRPC
- select EXPORTFS
- select NFS_ACL_SUPPORT if NFSD_V2_ACL
- help
- Choose Y here if you want to allow other computers to access
- files residing on this system using Sun's Network File System
- protocol. To compile the NFS server support as a module,
- choose M here: the module will be called nfsd.
-
- You may choose to use a user-space NFS server instead, in which
- case you can choose N here.
-
- To export local file systems using NFS, you also need to install
- user space programs which can be found in the Linux nfs-utils
- package, available from http://linux-nfs.org/. More detail about
- the Linux NFS server implementation is available via the
- exports(5) man page.
-
- Below you can choose which versions of the NFS protocol are
- available to clients mounting the NFS server on this system.
- Support for NFS version 2 (RFC 1094) is always available when
- CONFIG_NFSD is selected.
-
- If unsure, say N.
-
-config NFSD_V2_ACL
- bool
- depends on NFSD
-
-config NFSD_V3
- bool "NFS server support for NFS version 3"
- depends on NFSD
- help
- This option enables support in your system's NFS server for
- version 3 of the NFS protocol (RFC 1813).
-
- If unsure, say Y.
-
-config NFSD_V3_ACL
- bool "NFS server support for the NFSv3 ACL protocol extension"
- depends on NFSD_V3
- select NFSD_V2_ACL
- help
- Solaris NFS servers support an auxiliary NFSv3 ACL protocol that
- never became an official part of the NFS version 3 protocol.
- This protocol extension allows applications on NFS clients to
- manipulate POSIX Access Control Lists on files residing on NFS
- servers. NFS servers enforce POSIX ACLs on local files whether
- this protocol is available or not.
-
- This option enables support in your system's NFS server for the
- NFSv3 ACL protocol extension allowing NFS clients to manipulate
- POSIX ACLs on files exported by your system's NFS server. NFS
- clients which support the Solaris NFSv3 ACL protocol can then
- access and modify ACLs on your NFS server.
-
- To store ACLs on your NFS server, you also need to enable ACL-
- related CONFIG options for your local file systems of choice.
-
- If unsure, say N.
-
-config NFSD_V4
- bool "NFS server support for NFS version 4 (EXPERIMENTAL)"
- depends on NFSD && PROC_FS && EXPERIMENTAL
- select NFSD_V3
- select FS_POSIX_ACL
- select RPCSEC_GSS_KRB5
- help
- This option enables support in your system's NFS server for
- version 4 of the NFS protocol (RFC 3530).
-
- To export files using NFSv4, you need to install additional user
- space programs which can be found in the Linux nfs-utils package,
- available from http://linux-nfs.org/.
-
- If unsure, say N.
+source "fs/nfs/Kconfig"
+source "fs/nfsd/Kconfig"
config LOCKD
tristate
@@ -1383,221 +273,13 @@ config NFS_COMMON
depends on NFSD || NFS_FS
default y
-config SUNRPC
- tristate
-
-config SUNRPC_GSS
- tristate
-
-config SUNRPC_XPRT_RDMA
- tristate
- depends on SUNRPC && INFINIBAND && EXPERIMENTAL
- default SUNRPC && INFINIBAND
- help
- This option enables an RPC client transport capability that
- allows the NFS client to mount servers via an RDMA-enabled
- transport.
-
- To compile RPC client RDMA transport support as a module,
- choose M here: the module will be called xprtrdma.
-
- If unsure, say N.
-
-config SUNRPC_REGISTER_V4
- bool "Register local RPC services via rpcbind v4 (EXPERIMENTAL)"
- depends on SUNRPC && EXPERIMENTAL
- default n
- help
- Sun added support for registering RPC services at an IPv6
- address by creating two new versions of the rpcbind protocol
- (RFC 1833).
-
- This option enables support in the kernel RPC server for
- registering kernel RPC services via version 4 of the rpcbind
- protocol. If you enable this option, you must run a portmapper
- daemon that supports rpcbind protocol version 4.
-
- Serving NFS over IPv6 from knfsd (the kernel's NFS server)
- requires that you enable this option and use a portmapper that
- supports rpcbind version 4.
-
- If unsure, say N to get traditional behavior (register kernel
- RPC services using only rpcbind version 2). Distributions
- using the legacy Linux portmapper daemon must say N here.
-
-config RPCSEC_GSS_KRB5
- tristate "Secure RPC: Kerberos V mechanism (EXPERIMENTAL)"
- depends on SUNRPC && EXPERIMENTAL
- select SUNRPC_GSS
- select CRYPTO
- select CRYPTO_MD5
- select CRYPTO_DES
- select CRYPTO_CBC
- help
- Choose Y here to enable Secure RPC using the Kerberos version 5
- GSS-API mechanism (RFC 1964).
-
- Secure RPC calls with Kerberos require an auxiliary user-space
- daemon which may be found in the Linux nfs-utils package
- available from http://linux-nfs.org/. In addition, user-space
- Kerberos support should be installed.
-
- If unsure, say N.
-
-config RPCSEC_GSS_SPKM3
- tristate "Secure RPC: SPKM3 mechanism (EXPERIMENTAL)"
- depends on SUNRPC && EXPERIMENTAL
- select SUNRPC_GSS
- select CRYPTO
- select CRYPTO_MD5
- select CRYPTO_DES
- select CRYPTO_CAST5
- select CRYPTO_CBC
- help
- Choose Y here to enable Secure RPC using the SPKM3 public key
- GSS-API mechansim (RFC 2025).
-
- Secure RPC calls with SPKM3 require an auxiliary userspace
- daemon which may be found in the Linux nfs-utils package
- available from http://linux-nfs.org/.
-
- If unsure, say N.
-
-config SMB_FS
- tristate "SMB file system support (OBSOLETE, please use CIFS)"
- depends on INET
- select NLS
- help
- SMB (Server Message Block) is the protocol Windows for Workgroups
- (WfW), Windows 95/98, Windows NT and OS/2 Lan Manager use to share
- files and printers over local networks. Saying Y here allows you to
- mount their file systems (often called "shares" in this context) and
- access them just like any other Unix directory. Currently, this
- works only if the Windows machines use TCP/IP as the underlying
- transport protocol, and not NetBEUI. For details, read
- <file:Documentation/filesystems/smbfs.txt> and the SMB-HOWTO,
- available from <http://www.tldp.org/docs.html#howto>.
-
- Note: if you just want your box to act as an SMB *server* and make
- files and printing services available to Windows clients (which need
- to have a TCP/IP stack), you don't need to say Y here; you can use
- the program SAMBA (available from <ftp://ftp.samba.org/pub/samba/>)
- for that.
-
- General information about how to connect Linux, Windows machines and
- Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>.
-
- To compile the SMB support as a module, choose M here:
- the module will be called smbfs. Most people say N, however.
-
-config SMB_NLS_DEFAULT
- bool "Use a default NLS"
- depends on SMB_FS
- help
- Enabling this will make smbfs use nls translations by default. You
- need to specify the local charset (CONFIG_NLS_DEFAULT) in the nls
- settings and you need to give the default nls for the SMB server as
- CONFIG_SMB_NLS_REMOTE.
-
- The nls settings can be changed at mount time, if your smbmount
- supports that, using the codepage and iocharset parameters.
-
- smbmount from samba 2.2.0 or later supports this.
-
-config SMB_NLS_REMOTE
- string "Default Remote NLS Option"
- depends on SMB_NLS_DEFAULT
- default "cp437"
- help
- This setting allows you to specify a default value for which
- codepage the server uses. If this field is left blank no
- translations will be done by default. The local codepage/charset
- default to CONFIG_NLS_DEFAULT.
-
- The nls settings can be changed at mount time, if your smbmount
- supports that, using the codepage and iocharset parameters.
-
- smbmount from samba 2.2.0 or later supports this.
-
+source "net/sunrpc/Kconfig"
+source "fs/smbfs/Kconfig"
source "fs/cifs/Kconfig"
-
-config NCP_FS
- tristate "NCP file system support (to mount NetWare volumes)"
- depends on IPX!=n || INET
- help
- NCP (NetWare Core Protocol) is a protocol that runs over IPX and is
- used by Novell NetWare clients to talk to file servers. It is to
- IPX what NFS is to TCP/IP, if that helps. Saying Y here allows you
- to mount NetWare file server volumes and to access them just like
- any other Unix directory. For details, please read the file
- <file:Documentation/filesystems/ncpfs.txt> in the kernel source and
- the IPX-HOWTO from <http://www.tldp.org/docs.html#howto>.
-
- You do not have to say Y here if you want your Linux box to act as a
- file *server* for Novell NetWare clients.
-
- General information about how to connect Linux, Windows machines and
- Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>.
-
- To compile this as a module, choose M here: the module will be called
- ncpfs. Say N unless you are connected to a Novell network.
-
source "fs/ncpfs/Kconfig"
-
-config CODA_FS
- tristate "Coda file system support (advanced network fs)"
- depends on INET
- help
- Coda is an advanced network file system, similar to NFS in that it
- enables you to mount file systems of a remote server and access them
- with regular Unix commands as if they were sitting on your hard
- disk. Coda has several advantages over NFS: support for
- disconnected operation (e.g. for laptops), read/write server
- replication, security model for authentication and encryption,
- persistent client caches and write back caching.
-
- If you say Y here, your Linux box will be able to act as a Coda
- *client*. You will need user level code as well, both for the
- client and server. Servers are currently user level, i.e. they need
- no kernel support. Please read
- <file:Documentation/filesystems/coda.txt> and check out the Coda
- home page <http://www.coda.cs.cmu.edu/>.
-
- To compile the coda client support as a module, choose M here: the
- module will be called coda.
-
-config AFS_FS
- tristate "Andrew File System support (AFS) (EXPERIMENTAL)"
- depends on INET && EXPERIMENTAL
- select AF_RXRPC
- help
- If you say Y here, you will get an experimental Andrew File System
- driver. It currently only supports unsecured read-only AFS access.
-
- See <file:Documentation/filesystems/afs.txt> for more information.
-
- If unsure, say N.
-
-config AFS_DEBUG
- bool "AFS dynamic debugging"
- depends on AFS_FS
- help
- Say Y here to make runtime controllable debugging messages appear.
-
- See <file:Documentation/filesystems/afs.txt> for more information.
-
- If unsure, say N.
-
-config 9P_FS
- tristate "Plan 9 Resource Sharing Support (9P2000) (Experimental)"
- depends on INET && NET_9P && EXPERIMENTAL
- help
- If you say Y here, you will get experimental support for
- Plan 9 resource sharing via the 9P2000 protocol.
-
- See <http://v9fs.sf.net> for more information.
-
- If unsure, say N.
+source "fs/coda/Kconfig"
+source "fs/afs/Kconfig"
+source "fs/9p/Kconfig"
endif # NETWORK_FILESYSTEMS
diff --git a/fs/Makefile b/fs/Makefile
index c907161258cb..f689d059ebf1 100644
--- a/fs/Makefile
+++ b/fs/Makefile
@@ -67,6 +67,7 @@ obj-$(CONFIG_PROFILING) += dcookies.o
obj-$(CONFIG_DLM) += dlm/
# Do not add any filesystems before this line
+obj-$(CONFIG_FSCACHE) += fscache/
obj-$(CONFIG_REISERFS_FS) += reiserfs/
obj-$(CONFIG_EXT3_FS) += ext3/ # Before ext2 so root fs can be ext3
obj-$(CONFIG_EXT4_FS) += ext4/ # Before ext2 so root fs can be ext4
@@ -118,6 +119,7 @@ obj-$(CONFIG_AFS_FS) += afs/
obj-$(CONFIG_BEFS_FS) += befs/
obj-$(CONFIG_HOSTFS) += hostfs/
obj-$(CONFIG_HPPFS) += hppfs/
+obj-$(CONFIG_CACHEFILES) += cachefiles/
obj-$(CONFIG_DEBUG_FS) += debugfs/
obj-$(CONFIG_OCFS2_FS) += ocfs2/
obj-$(CONFIG_BTRFS_FS) += btrfs/
diff --git a/fs/adfs/Kconfig b/fs/adfs/Kconfig
new file mode 100644
index 000000000000..e55182a74605
--- /dev/null
+++ b/fs/adfs/Kconfig
@@ -0,0 +1,27 @@
+config ADFS_FS
+ tristate "ADFS file system support (EXPERIMENTAL)"
+ depends on BLOCK && EXPERIMENTAL
+ help
+ The Acorn Disc Filing System is the standard file system of the
+ RiscOS operating system which runs on Acorn's ARM-based Risc PC
+ systems and the Acorn Archimedes range of machines. If you say Y
+ here, Linux will be able to read from ADFS partitions on hard drives
+ and from ADFS-formatted floppy discs. If you also want to be able to
+ write to those devices, say Y to "ADFS write support" below.
+
+ The ADFS partition should be the first partition (i.e.,
+ /dev/[hs]d?1) on each of your drives. Please read the file
+ <file:Documentation/filesystems/adfs.txt> for further details.
+
+ To compile this code as a module, choose M here: the module will be
+ called adfs.
+
+ If unsure, say N.
+
+config ADFS_FS_RW
+ bool "ADFS write support (DANGEROUS)"
+ depends on ADFS_FS
+ help
+ If you say Y here, you will be able to write to ADFS partitions on
+ hard drives and ADFS-formatted floppy disks. This is experimental
+ codes, so if you're unsure, say N.
diff --git a/fs/affs/Kconfig b/fs/affs/Kconfig
new file mode 100644
index 000000000000..cfad9afb4762
--- /dev/null
+++ b/fs/affs/Kconfig
@@ -0,0 +1,21 @@
+config AFFS_FS
+ tristate "Amiga FFS file system support (EXPERIMENTAL)"
+ depends on BLOCK && EXPERIMENTAL
+ help
+ The Fast File System (FFS) is the common file system used on hard
+ disks by Amiga(tm) systems since AmigaOS Version 1.3 (34.20). Say Y
+ if you want to be able to read and write files from and to an Amiga
+ FFS partition on your hard drive. Amiga floppies however cannot be
+ read with this driver due to an incompatibility of the floppy
+ controller used in an Amiga and the standard floppy controller in
+ PCs and workstations. Read <file:Documentation/filesystems/affs.txt>
+ and <file:fs/affs/Changes>.
+
+ With this driver you can also mount disk files used by Bernd
+ Schmidt's Un*X Amiga Emulator
+ (<http://www.freiburg.linux.de/~uae/>).
+ If you want to do this, you will also need to say Y or M to "Loop
+ device support", above.
+
+ To compile this file system support as a module, choose M here: the
+ module will be called affs. If unsure, say N.
diff --git a/fs/afs/Kconfig b/fs/afs/Kconfig
new file mode 100644
index 000000000000..47f8179e54ae
--- /dev/null
+++ b/fs/afs/Kconfig
@@ -0,0 +1,30 @@
+config AFS_FS
+ tristate "Andrew File System support (AFS) (EXPERIMENTAL)"
+ depends on INET && EXPERIMENTAL
+ select AF_RXRPC
+ help
+ If you say Y here, you will get an experimental Andrew File System
+ driver. It currently only supports unsecured read-only AFS access.
+
+ See <file:Documentation/filesystems/afs.txt> for more information.
+
+ If unsure, say N.
+
+config AFS_DEBUG
+ bool "AFS dynamic debugging"
+ depends on AFS_FS
+ help
+ Say Y here to make runtime controllable debugging messages appear.
+
+ See <file:Documentation/filesystems/afs.txt> for more information.
+
+ If unsure, say N.
+
+config AFS_FSCACHE
+ bool "Provide AFS client caching support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on AFS_FS=m && FSCACHE || AFS_FS=y && FSCACHE=y
+ help
+ Say Y here if you want AFS data to be cached locally on disk through
+ the generic filesystem cache manager
+
diff --git a/fs/afs/Makefile b/fs/afs/Makefile
index a66671082cfb..4f64b95d57bd 100644
--- a/fs/afs/Makefile
+++ b/fs/afs/Makefile
@@ -2,7 +2,10 @@
# Makefile for Red Hat Linux AFS client.
#
+afs-cache-$(CONFIG_AFS_FSCACHE) := cache.o
+
kafs-objs := \
+ $(afs-cache-y) \
callback.o \
cell.o \
cmservice.o \
diff --git a/fs/afs/cache.c b/fs/afs/cache.c
index de0d7de69edc..e2b1d3f16519 100644
--- a/fs/afs/cache.c
+++ b/fs/afs/cache.c
@@ -1,6 +1,6 @@
/* AFS caching stuff
*
- * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -9,248 +9,395 @@
* 2 of the License, or (at your option) any later version.
*/
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_cell_cache_match(void *target,
- const void *entry);
-static void afs_cell_cache_update(void *source, void *entry);
-
-struct cachefs_index_def afs_cache_cell_index_def = {
- .name = "cell_ix",
- .data_size = sizeof(struct afs_cache_cell),
- .keys[0] = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 },
- .match = afs_cell_cache_match,
- .update = afs_cell_cache_update,
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include "internal.h"
+
+static uint16_t afs_cell_cache_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t buflen);
+static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data,
+ void *buffer, uint16_t buflen);
+static enum fscache_checkaux afs_cell_cache_check_aux(void *cookie_netfs_data,
+ const void *buffer,
+ uint16_t buflen);
+
+static uint16_t afs_vlocation_cache_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t buflen);
+static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data,
+ void *buffer, uint16_t buflen);
+static enum fscache_checkaux afs_vlocation_cache_check_aux(
+ void *cookie_netfs_data, const void *buffer, uint16_t buflen);
+
+static uint16_t afs_volume_cache_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t buflen);
+
+static uint16_t afs_vnode_cache_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t buflen);
+static void afs_vnode_cache_get_attr(const void *cookie_netfs_data,
+ uint64_t *size);
+static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data,
+ void *buffer, uint16_t buflen);
+static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data,
+ const void *buffer,
+ uint16_t buflen);
+static void afs_vnode_cache_now_uncached(void *cookie_netfs_data);
+
+struct fscache_netfs afs_cache_netfs = {
+ .name = "afs",
+ .version = 0,
+};
+
+struct fscache_cookie_def afs_cell_cache_index_def = {
+ .name = "AFS.cell",
+ .type = FSCACHE_COOKIE_TYPE_INDEX,
+ .get_key = afs_cell_cache_get_key,
+ .get_aux = afs_cell_cache_get_aux,
+ .check_aux = afs_cell_cache_check_aux,
+};
+
+struct fscache_cookie_def afs_vlocation_cache_index_def = {
+ .name = "AFS.vldb",
+ .type = FSCACHE_COOKIE_TYPE_INDEX,
+ .get_key = afs_vlocation_cache_get_key,
+ .get_aux = afs_vlocation_cache_get_aux,
+ .check_aux = afs_vlocation_cache_check_aux,
+};
+
+struct fscache_cookie_def afs_volume_cache_index_def = {
+ .name = "AFS.volume",
+ .type = FSCACHE_COOKIE_TYPE_INDEX,
+ .get_key = afs_volume_cache_get_key,
+};
+
+struct fscache_cookie_def afs_vnode_cache_index_def = {
+ .name = "AFS.vnode",
+ .type = FSCACHE_COOKIE_TYPE_DATAFILE,
+ .get_key = afs_vnode_cache_get_key,
+ .get_attr = afs_vnode_cache_get_attr,
+ .get_aux = afs_vnode_cache_get_aux,
+ .check_aux = afs_vnode_cache_check_aux,
+ .now_uncached = afs_vnode_cache_now_uncached,
};
-#endif
/*
- * match a cell record obtained from the cache
+ * set the key for the index entry
*/
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_cell_cache_match(void *target,
- const void *entry)
+static uint16_t afs_cell_cache_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
{
- const struct afs_cache_cell *ccell = entry;
- struct afs_cell *cell = target;
+ const struct afs_cell *cell = cookie_netfs_data;
+ uint16_t klen;
- _enter("{%s},{%s}", ccell->name, cell->name);
+ _enter("%p,%p,%u", cell, buffer, bufmax);
- if (strncmp(ccell->name, cell->name, sizeof(ccell->name)) == 0) {
- _leave(" = SUCCESS");
- return CACHEFS_MATCH_SUCCESS;
- }
+ klen = strlen(cell->name);
+ if (klen > bufmax)
+ return 0;
- _leave(" = FAILED");
- return CACHEFS_MATCH_FAILED;
+ memcpy(buffer, cell->name, klen);
+ return klen;
}
-#endif
/*
- * update a cell record in the cache
+ * provide new auxilliary cache data
*/
-#ifdef AFS_CACHING_SUPPORT
-static void afs_cell_cache_update(void *source, void *entry)
+static uint16_t afs_cell_cache_get_aux(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
{
- struct afs_cache_cell *ccell = entry;
- struct afs_cell *cell = source;
+ const struct afs_cell *cell = cookie_netfs_data;
+ uint16_t dlen;
- _enter("%p,%p", source, entry);
+ _enter("%p,%p,%u", cell, buffer, bufmax);
- strncpy(ccell->name, cell->name, sizeof(ccell->name));
+ dlen = cell->vl_naddrs * sizeof(cell->vl_addrs[0]);
+ dlen = min(dlen, bufmax);
+ dlen &= ~(sizeof(cell->vl_addrs[0]) - 1);
- memcpy(ccell->vl_servers,
- cell->vl_addrs,
- min(sizeof(ccell->vl_servers), sizeof(cell->vl_addrs)));
+ memcpy(buffer, cell->vl_addrs, dlen);
+ return dlen;
+}
+/*
+ * check that the auxilliary data indicates that the entry is still valid
+ */
+static enum fscache_checkaux afs_cell_cache_check_aux(void *cookie_netfs_data,
+ const void *buffer,
+ uint16_t buflen)
+{
+ _leave(" = OKAY");
+ return FSCACHE_CHECKAUX_OKAY;
}
-#endif
-
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_vlocation_cache_match(void *target,
- const void *entry);
-static void afs_vlocation_cache_update(void *source, void *entry);
-
-struct cachefs_index_def afs_vlocation_cache_index_def = {
- .name = "vldb",
- .data_size = sizeof(struct afs_cache_vlocation),
- .keys[0] = { CACHEFS_INDEX_KEYS_ASCIIZ, 64 },
- .match = afs_vlocation_cache_match,
- .update = afs_vlocation_cache_update,
-};
-#endif
+/*****************************************************************************/
/*
- * match a VLDB record stored in the cache
- * - may also load target from entry
+ * set the key for the index entry
*/
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_vlocation_cache_match(void *target,
- const void *entry)
+static uint16_t afs_vlocation_cache_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
{
- const struct afs_cache_vlocation *vldb = entry;
- struct afs_vlocation *vlocation = target;
+ const struct afs_vlocation *vlocation = cookie_netfs_data;
+ uint16_t klen;
+
+ _enter("{%s},%p,%u", vlocation->vldb.name, buffer, bufmax);
+
+ klen = strnlen(vlocation->vldb.name, sizeof(vlocation->vldb.name));
+ if (klen > bufmax)
+ return 0;
- _enter("{%s},{%s}", vlocation->vldb.name, vldb->name);
+ memcpy(buffer, vlocation->vldb.name, klen);
- if (strncmp(vlocation->vldb.name, vldb->name, sizeof(vldb->name)) == 0
- ) {
- if (!vlocation->valid ||
- vlocation->vldb.rtime == vldb->rtime
+ _leave(" = %u", klen);
+ return klen;
+}
+
+/*
+ * provide new auxilliary cache data
+ */
+static uint16_t afs_vlocation_cache_get_aux(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
+{
+ const struct afs_vlocation *vlocation = cookie_netfs_data;
+ uint16_t dlen;
+
+ _enter("{%s},%p,%u", vlocation->vldb.name, buffer, bufmax);
+
+ dlen = sizeof(struct afs_cache_vlocation);
+ dlen -= offsetof(struct afs_cache_vlocation, nservers);
+ if (dlen > bufmax)
+ return 0;
+
+ memcpy(buffer, (uint8_t *)&vlocation->vldb.nservers, dlen);
+
+ _leave(" = %u", dlen);
+ return dlen;
+}
+
+/*
+ * check that the auxilliary data indicates that the entry is still valid
+ */
+static
+enum fscache_checkaux afs_vlocation_cache_check_aux(void *cookie_netfs_data,
+ const void *buffer,
+ uint16_t buflen)
+{
+ const struct afs_cache_vlocation *cvldb;
+ struct afs_vlocation *vlocation = cookie_netfs_data;
+ uint16_t dlen;
+
+ _enter("{%s},%p,%u", vlocation->vldb.name, buffer, buflen);
+
+ /* check the size of the data is what we're expecting */
+ dlen = sizeof(struct afs_cache_vlocation);
+ dlen -= offsetof(struct afs_cache_vlocation, nservers);
+ if (dlen != buflen)
+ return FSCACHE_CHECKAUX_OBSOLETE;
+
+ cvldb = container_of(buffer, struct afs_cache_vlocation, nservers);
+
+ /* if what's on disk is more valid than what's in memory, then use the
+ * VL record from the cache */
+ if (!vlocation->valid || vlocation->vldb.rtime == cvldb->rtime) {
+ memcpy((uint8_t *)&vlocation->vldb.nservers, buffer, dlen);
+ vlocation->valid = 1;
+ _leave(" = SUCCESS [c->m]");
+ return FSCACHE_CHECKAUX_OKAY;
+ }
+
+ /* need to update the cache if the cached info differs */
+ if (memcmp(&vlocation->vldb, buffer, dlen) != 0) {
+ /* delete if the volume IDs for this name differ */
+ if (memcmp(&vlocation->vldb.vid, &cvldb->vid,
+ sizeof(cvldb->vid)) != 0
) {
- vlocation->vldb = *vldb;
- vlocation->valid = 1;
- _leave(" = SUCCESS [c->m]");
- return CACHEFS_MATCH_SUCCESS;
- } else if (memcmp(&vlocation->vldb, vldb, sizeof(*vldb)) != 0) {
- /* delete if VIDs for this name differ */
- if (memcmp(&vlocation->vldb.vid,
- &vldb->vid,
- sizeof(vldb->vid)) != 0) {
- _leave(" = DELETE");
- return CACHEFS_MATCH_SUCCESS_DELETE;
- }
-
- _leave(" = UPDATE");
- return CACHEFS_MATCH_SUCCESS_UPDATE;
- } else {
- _leave(" = SUCCESS");
- return CACHEFS_MATCH_SUCCESS;
+ _leave(" = OBSOLETE");
+ return FSCACHE_CHECKAUX_OBSOLETE;
}
+
+ _leave(" = UPDATE");
+ return FSCACHE_CHECKAUX_NEEDS_UPDATE;
}
- _leave(" = FAILED");
- return CACHEFS_MATCH_FAILED;
+ _leave(" = OKAY");
+ return FSCACHE_CHECKAUX_OKAY;
}
-#endif
+/*****************************************************************************/
/*
- * update a VLDB record stored in the cache
+ * set the key for the volume index entry
*/
-#ifdef AFS_CACHING_SUPPORT
-static void afs_vlocation_cache_update(void *source, void *entry)
+static uint16_t afs_volume_cache_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
{
- struct afs_cache_vlocation *vldb = entry;
- struct afs_vlocation *vlocation = source;
+ const struct afs_volume *volume = cookie_netfs_data;
+ uint16_t klen;
+
+ _enter("{%u},%p,%u", volume->type, buffer, bufmax);
+
+ klen = sizeof(volume->type);
+ if (klen > bufmax)
+ return 0;
- _enter("");
+ memcpy(buffer, &volume->type, sizeof(volume->type));
+
+ _leave(" = %u", klen);
+ return klen;
- *vldb = vlocation->vldb;
}
-#endif
-
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_volume_cache_match(void *target,
- const void *entry);
-static void afs_volume_cache_update(void *source, void *entry);
-
-struct cachefs_index_def afs_volume_cache_index_def = {
- .name = "volume",
- .data_size = sizeof(struct afs_cache_vhash),
- .keys[0] = { CACHEFS_INDEX_KEYS_BIN, 1 },
- .keys[1] = { CACHEFS_INDEX_KEYS_BIN, 1 },
- .match = afs_volume_cache_match,
- .update = afs_volume_cache_update,
-};
-#endif
+/*****************************************************************************/
/*
- * match a volume hash record stored in the cache
+ * set the key for the index entry
*/
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_volume_cache_match(void *target,
- const void *entry)
+static uint16_t afs_vnode_cache_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
{
- const struct afs_cache_vhash *vhash = entry;
- struct afs_volume *volume = target;
+ const struct afs_vnode *vnode = cookie_netfs_data;
+ uint16_t klen;
- _enter("{%u},{%u}", volume->type, vhash->vtype);
+ _enter("{%x,%x,%llx},%p,%u",
+ vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version,
+ buffer, bufmax);
- if (volume->type == vhash->vtype) {
- _leave(" = SUCCESS");
- return CACHEFS_MATCH_SUCCESS;
- }
+ klen = sizeof(vnode->fid.vnode);
+ if (klen > bufmax)
+ return 0;
+
+ memcpy(buffer, &vnode->fid.vnode, sizeof(vnode->fid.vnode));
- _leave(" = FAILED");
- return CACHEFS_MATCH_FAILED;
+ _leave(" = %u", klen);
+ return klen;
}
-#endif
/*
- * update a volume hash record stored in the cache
+ * provide updated file attributes
*/
-#ifdef AFS_CACHING_SUPPORT
-static void afs_volume_cache_update(void *source, void *entry)
+static void afs_vnode_cache_get_attr(const void *cookie_netfs_data,
+ uint64_t *size)
{
- struct afs_cache_vhash *vhash = entry;
- struct afs_volume *volume = source;
+ const struct afs_vnode *vnode = cookie_netfs_data;
- _enter("");
+ _enter("{%x,%x,%llx},",
+ vnode->fid.vnode, vnode->fid.unique,
+ vnode->status.data_version);
- vhash->vtype = volume->type;
+ *size = vnode->status.size;
}
-#endif
-
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_vnode_cache_match(void *target,
- const void *entry);
-static void afs_vnode_cache_update(void *source, void *entry);
-
-struct cachefs_index_def afs_vnode_cache_index_def = {
- .name = "vnode",
- .data_size = sizeof(struct afs_cache_vnode),
- .keys[0] = { CACHEFS_INDEX_KEYS_BIN, 4 },
- .match = afs_vnode_cache_match,
- .update = afs_vnode_cache_update,
-};
-#endif
/*
- * match a vnode record stored in the cache
+ * provide new auxilliary cache data
+ */
+static uint16_t afs_vnode_cache_get_aux(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
+{
+ const struct afs_vnode *vnode = cookie_netfs_data;
+ uint16_t dlen;
+
+ _enter("{%x,%x,%Lx},%p,%u",
+ vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version,
+ buffer, bufmax);
+
+ dlen = sizeof(vnode->fid.unique) + sizeof(vnode->status.data_version);
+ if (dlen > bufmax)
+ return 0;
+
+ memcpy(buffer, &vnode->fid.unique, sizeof(vnode->fid.unique));
+ buffer += sizeof(vnode->fid.unique);
+ memcpy(buffer, &vnode->status.data_version,
+ sizeof(vnode->status.data_version));
+
+ _leave(" = %u", dlen);
+ return dlen;
+}
+
+/*
+ * check that the auxilliary data indicates that the entry is still valid
*/
-#ifdef AFS_CACHING_SUPPORT
-static cachefs_match_val_t afs_vnode_cache_match(void *target,
- const void *entry)
+static enum fscache_checkaux afs_vnode_cache_check_aux(void *cookie_netfs_data,
+ const void *buffer,
+ uint16_t buflen)
{
- const struct afs_cache_vnode *cvnode = entry;
- struct afs_vnode *vnode = target;
-
- _enter("{%x,%x,%Lx},{%x,%x,%Lx}",
- vnode->fid.vnode,
- vnode->fid.unique,
- vnode->status.version,
- cvnode->vnode_id,
- cvnode->vnode_unique,
- cvnode->data_version);
-
- if (vnode->fid.vnode != cvnode->vnode_id) {
- _leave(" = FAILED");
- return CACHEFS_MATCH_FAILED;
+ struct afs_vnode *vnode = cookie_netfs_data;
+ uint16_t dlen;
+
+ _enter("{%x,%x,%llx},%p,%u",
+ vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version,
+ buffer, buflen);
+
+ /* check the size of the data is what we're expecting */
+ dlen = sizeof(vnode->fid.unique) + sizeof(vnode->status.data_version);
+ if (dlen != buflen) {
+ _leave(" = OBSOLETE [len %hx != %hx]", dlen, buflen);
+ return FSCACHE_CHECKAUX_OBSOLETE;
}
- if (vnode->fid.unique != cvnode->vnode_unique ||
- vnode->status.version != cvnode->data_version) {
- _leave(" = DELETE");
- return CACHEFS_MATCH_SUCCESS_DELETE;
+ if (memcmp(buffer,
+ &vnode->fid.unique,
+ sizeof(vnode->fid.unique)
+ ) != 0) {
+ unsigned unique;
+
+ memcpy(&unique, buffer, sizeof(unique));
+
+ _leave(" = OBSOLETE [uniq %x != %x]",
+ unique, vnode->fid.unique);
+ return FSCACHE_CHECKAUX_OBSOLETE;
+ }
+
+ if (memcmp(buffer + sizeof(vnode->fid.unique),
+ &vnode->status.data_version,
+ sizeof(vnode->status.data_version)
+ ) != 0) {
+ afs_dataversion_t version;
+
+ memcpy(&version, buffer + sizeof(vnode->fid.unique),
+ sizeof(version));
+
+ _leave(" = OBSOLETE [vers %llx != %llx]",
+ version, vnode->status.data_version);
+ return FSCACHE_CHECKAUX_OBSOLETE;
}
_leave(" = SUCCESS");
- return CACHEFS_MATCH_SUCCESS;
+ return FSCACHE_CHECKAUX_OKAY;
}
-#endif
/*
- * update a vnode record stored in the cache
+ * indication the cookie is no longer uncached
+ * - this function is called when the backing store currently caching a cookie
+ * is removed
+ * - the netfs should use this to clean up any markers indicating cached pages
+ * - this is mandatory for any object that may have data
*/
-#ifdef AFS_CACHING_SUPPORT
-static void afs_vnode_cache_update(void *source, void *entry)
+static void afs_vnode_cache_now_uncached(void *cookie_netfs_data)
{
- struct afs_cache_vnode *cvnode = entry;
- struct afs_vnode *vnode = source;
+ struct afs_vnode *vnode = cookie_netfs_data;
+ struct pagevec pvec;
+ pgoff_t first;
+ int loop, nr_pages;
+
+ _enter("{%x,%x,%Lx}",
+ vnode->fid.vnode, vnode->fid.unique, vnode->status.data_version);
+
+ pagevec_init(&pvec, 0);
+ first = 0;
+
+ for (;;) {
+ /* grab a bunch of pages to clean */
+ nr_pages = pagevec_lookup(&pvec, vnode->vfs_inode.i_mapping,
+ first,
+ PAGEVEC_SIZE - pagevec_count(&pvec));
+ if (!nr_pages)
+ break;
- _enter("");
+ for (loop = 0; loop < nr_pages; loop++)
+ ClearPageFsCache(pvec.pages[loop]);
+
+ first = pvec.pages[nr_pages - 1]->index + 1;
+
+ pvec.nr = nr_pages;
+ pagevec_release(&pvec);
+ cond_resched();
+ }
- cvnode->vnode_id = vnode->fid.vnode;
- cvnode->vnode_unique = vnode->fid.unique;
- cvnode->data_version = vnode->status.version;
+ _leave("");
}
-#endif
diff --git a/fs/afs/cache.h b/fs/afs/cache.h
index 36a3642cf90e..5c4f6b499e90 100644
--- a/fs/afs/cache.h
+++ b/fs/afs/cache.h
@@ -1,6 +1,6 @@
/* AFS local cache management interface
*
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -9,15 +9,4 @@
* 2 of the License, or (at your option) any later version.
*/
-#ifndef AFS_CACHE_H
-#define AFS_CACHE_H
-
-#undef AFS_CACHING_SUPPORT
-
-#include <linux/mm.h>
-#ifdef AFS_CACHING_SUPPORT
-#include <linux/cachefs.h>
-#endif
-#include "types.h"
-
-#endif /* AFS_CACHE_H */
+#include <linux/fscache.h>
diff --git a/fs/afs/cell.c b/fs/afs/cell.c
index 5e1df14e16b1..e19c13f059ed 100644
--- a/fs/afs/cell.c
+++ b/fs/afs/cell.c
@@ -147,12 +147,11 @@ struct afs_cell *afs_cell_create(const char *name, char *vllist)
if (ret < 0)
goto error;
-#ifdef AFS_CACHING_SUPPORT
- /* put it up for caching */
- cachefs_acquire_cookie(afs_cache_netfs.primary_index,
- &afs_vlocation_cache_index_def,
- cell,
- &cell->cache);
+#ifdef CONFIG_AFS_FSCACHE
+ /* put it up for caching (this never returns an error) */
+ cell->cache = fscache_acquire_cookie(afs_cache_netfs.primary_index,
+ &afs_cell_cache_index_def,
+ cell);
#endif
/* add to the cell lists */
@@ -362,10 +361,9 @@ static void afs_cell_destroy(struct afs_cell *cell)
list_del_init(&cell->proc_link);
up_write(&afs_proc_cells_sem);
-#ifdef AFS_CACHING_SUPPORT
- cachefs_relinquish_cookie(cell->cache, 0);
+#ifdef CONFIG_AFS_FSCACHE
+ fscache_relinquish_cookie(cell->cache, 0);
#endif
-
key_put(cell->anonymous_key);
kfree(cell);
diff --git a/fs/afs/file.c b/fs/afs/file.c
index a3901769a96c..ad12c2475b08 100644
--- a/fs/afs/file.c
+++ b/fs/afs/file.c
@@ -23,6 +23,9 @@ static void afs_invalidatepage(struct page *page, unsigned long offset);
static int afs_releasepage(struct page *page, gfp_t gfp_flags);
static int afs_launder_page(struct page *page);
+static int afs_readpages(struct file *filp, struct address_space *mapping,
+ struct list_head *pages, unsigned nr_pages);
+
const struct file_operations afs_file_operations = {
.open = afs_open,
.release = afs_release,
@@ -46,6 +49,7 @@ const struct inode_operations afs_file_inode_operations = {
const struct address_space_operations afs_fs_aops = {
.readpage = afs_readpage,
+ .readpages = afs_readpages,
.set_page_dirty = afs_set_page_dirty,
.launder_page = afs_launder_page,
.releasepage = afs_releasepage,
@@ -101,37 +105,18 @@ int afs_release(struct inode *inode, struct file *file)
/*
* deal with notification that a page was read from the cache
*/
-#ifdef AFS_CACHING_SUPPORT
-static void afs_readpage_read_complete(void *cookie_data,
- struct page *page,
- void *data,
- int error)
+static void afs_file_readpage_read_complete(struct page *page,
+ void *data,
+ int error)
{
- _enter("%p,%p,%p,%d", cookie_data, page, data, error);
+ _enter("%p,%p,%d", page, data, error);
- if (error)
- SetPageError(page);
- else
+ /* if the read completes with an error, we just unlock the page and let
+ * the VM reissue the readpage */
+ if (!error)
SetPageUptodate(page);
unlock_page(page);
-
}
-#endif
-
-/*
- * deal with notification that a page was written to the cache
- */
-#ifdef AFS_CACHING_SUPPORT
-static void afs_readpage_write_complete(void *cookie_data,
- struct page *page,
- void *data,
- int error)
-{
- _enter("%p,%p,%p,%d", cookie_data, page, data, error);
-
- unlock_page(page);
-}
-#endif
/*
* AFS read page from file, directory or symlink
@@ -161,9 +146,9 @@ static int afs_readpage(struct file *file, struct page *page)
if (test_bit(AFS_VNODE_DELETED, &vnode->flags))
goto error;
-#ifdef AFS_CACHING_SUPPORT
/* is it cached? */
- ret = cachefs_read_or_alloc_page(vnode->cache,
+#ifdef CONFIG_AFS_FSCACHE
+ ret = fscache_read_or_alloc_page(vnode->cache,
page,
afs_file_readpage_read_complete,
NULL,
@@ -171,20 +156,21 @@ static int afs_readpage(struct file *file, struct page *page)
#else
ret = -ENOBUFS;
#endif
-
switch (ret) {
- /* read BIO submitted and wb-journal entry found */
- case 1:
- BUG(); // TODO - handle wb-journal match
-
/* read BIO submitted (page in cache) */
case 0:
break;
- /* no page available in cache */
- case -ENOBUFS:
+ /* page not yet cached */
case -ENODATA:
+ _debug("cache said ENODATA");
+ goto go_on;
+
+ /* page will not be cached */
+ case -ENOBUFS:
+ _debug("cache said ENOBUFS");
default:
+ go_on:
offset = page->index << PAGE_CACHE_SHIFT;
len = min_t(size_t, i_size_read(inode) - offset, PAGE_SIZE);
@@ -198,27 +184,25 @@ static int afs_readpage(struct file *file, struct page *page)
set_bit(AFS_VNODE_DELETED, &vnode->flags);
ret = -ESTALE;
}
-#ifdef AFS_CACHING_SUPPORT
- cachefs_uncache_page(vnode->cache, page);
+
+#ifdef CONFIG_AFS_FSCACHE
+ fscache_uncache_page(vnode->cache, page);
#endif
+ BUG_ON(PageFsCache(page));
goto error;
}
SetPageUptodate(page);
-#ifdef AFS_CACHING_SUPPORT
- if (cachefs_write_page(vnode->cache,
- page,
- afs_file_readpage_write_complete,
- NULL,
- GFP_KERNEL) != 0
- ) {
- cachefs_uncache_page(vnode->cache, page);
- unlock_page(page);
+ /* send the page to the cache */
+#ifdef CONFIG_AFS_FSCACHE
+ if (PageFsCache(page) &&
+ fscache_write_page(vnode->cache, page, GFP_KERNEL) != 0) {
+ fscache_uncache_page(vnode->cache, page);
+ BUG_ON(PageFsCache(page));
}
-#else
- unlock_page(page);
#endif
+ unlock_page(page);
}
_leave(" = 0");
@@ -232,34 +216,59 @@ error:
}
/*
- * invalidate part or all of a page
+ * read a set of pages
*/
-static void afs_invalidatepage(struct page *page, unsigned long offset)
+static int afs_readpages(struct file *file, struct address_space *mapping,
+ struct list_head *pages, unsigned nr_pages)
{
- int ret = 1;
+ struct afs_vnode *vnode;
+ int ret = 0;
- _enter("{%lu},%lu", page->index, offset);
+ _enter(",{%lu},,%d", mapping->host->i_ino, nr_pages);
- BUG_ON(!PageLocked(page));
+ vnode = AFS_FS_I(mapping->host);
+ if (vnode->flags & AFS_VNODE_DELETED) {
+ _leave(" = -ESTALE");
+ return -ESTALE;
+ }
- if (PagePrivate(page)) {
- /* We release buffers only if the entire page is being
- * invalidated.
- * The get_block cached value has been unconditionally
- * invalidated, so real IO is not possible anymore.
- */
- if (offset == 0) {
- BUG_ON(!PageLocked(page));
-
- ret = 0;
- if (!PageWriteback(page))
- ret = page->mapping->a_ops->releasepage(page,
- 0);
- /* possibly should BUG_ON(!ret); - neilb */
- }
+ /* attempt to read as many of the pages as possible */
+#ifdef CONFIG_AFS_FSCACHE
+ ret = fscache_read_or_alloc_pages(vnode->cache,
+ mapping,
+ pages,
+ &nr_pages,
+ afs_file_readpage_read_complete,
+ NULL,
+ mapping_gfp_mask(mapping));
+#else
+ ret = -ENOBUFS;
+#endif
+
+ switch (ret) {
+ /* all pages are being read from the cache */
+ case 0:
+ BUG_ON(!list_empty(pages));
+ BUG_ON(nr_pages != 0);
+ _leave(" = 0 [reading all]");
+ return 0;
+
+ /* there were pages that couldn't be read from the cache */
+ case -ENODATA:
+ case -ENOBUFS:
+ break;
+
+ /* other error */
+ default:
+ _leave(" = %d", ret);
+ return ret;
}
- _leave(" = %d", ret);
+ /* load the missing pages from the network */
+ ret = read_cache_pages(mapping, pages, (void *) afs_readpage, file);
+
+ _leave(" = %d [netting]", ret);
+ return ret;
}
/*
@@ -273,25 +282,82 @@ static int afs_launder_page(struct page *page)
}
/*
- * release a page and cleanup its private data
+ * invalidate part or all of a page
+ * - release a page and clean up its private data if offset is 0 (indicating
+ * the entire page)
+ */
+static void afs_invalidatepage(struct page *page, unsigned long offset)
+{
+ struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
+ struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
+
+ _enter("{%lu},%lu", page->index, offset);
+
+ BUG_ON(!PageLocked(page));
+
+ /* we clean up only if the entire page is being invalidated */
+ if (offset == 0) {
+#ifdef CONFIG_AFS_FSCACHE
+ if (PageFsCache(page)) {
+ wait_on_page_fscache_write(page);
+ fscache_uncache_page(vnode->cache, page);
+ ClearPageFsCache(page);
+ }
+#endif
+
+ if (PagePrivate(page)) {
+ if (wb && !PageWriteback(page)) {
+ set_page_private(page, 0);
+ afs_put_writeback(wb);
+ }
+
+ if (!page_private(page))
+ ClearPagePrivate(page);
+ }
+ }
+
+ _leave("");
+}
+
+/*
+ * release a page and clean up its private state if it's not busy
+ * - return true if the page can now be released, false if not
*/
static int afs_releasepage(struct page *page, gfp_t gfp_flags)
{
+ struct afs_writeback *wb = (struct afs_writeback *) page_private(page);
struct afs_vnode *vnode = AFS_FS_I(page->mapping->host);
- struct afs_writeback *wb;
_enter("{{%x:%u}[%lu],%lx},%x",
vnode->fid.vid, vnode->fid.vnode, page->index, page->flags,
gfp_flags);
+ /* deny if page is being written to the cache and the caller hasn't
+ * elected to wait */
+#ifdef CONFIG_AFS_FSCACHE
+ if (PageFsCache(page)) {
+ if (PageFsCacheWrite(page)) {
+ if (!(gfp_flags & __GFP_WAIT)) {
+ _leave(" = F [cache busy]");
+ return 0;
+ }
+ wait_on_page_fscache_write(page);
+ }
+
+ fscache_uncache_page(vnode->cache, page);
+ ClearPageFsCache(page);
+ }
+#endif
+
if (PagePrivate(page)) {
- wb = (struct afs_writeback *) page_private(page);
- ASSERT(wb != NULL);
- set_page_private(page, 0);
+ if (wb) {
+ set_page_private(page, 0);
+ afs_put_writeback(wb);
+ }
ClearPagePrivate(page);
- afs_put_writeback(wb);
}
- _leave(" = 0");
- return 0;
+ /* indicate that the page can be released */
+ _leave(" = T");
+ return 1;
}
diff --git a/fs/afs/inode.c b/fs/afs/inode.c
index bb47217f6a18..c048f0658751 100644
--- a/fs/afs/inode.c
+++ b/fs/afs/inode.c
@@ -61,6 +61,11 @@ static int afs_inode_map_status(struct afs_vnode *vnode, struct key *key)
return -EBADMSG;
}
+#ifdef CONFIG_AFS_FSCACHE
+ if (vnode->status.size != inode->i_size)
+ fscache_attr_changed(vnode->cache);
+#endif
+
inode->i_nlink = vnode->status.nlink;
inode->i_uid = vnode->status.owner;
inode->i_gid = 0;
@@ -149,15 +154,6 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
return inode;
}
-#ifdef AFS_CACHING_SUPPORT
- /* set up caching before reading the status, as fetch-status reads the
- * first page of symlinks to see if they're really mntpts */
- cachefs_acquire_cookie(vnode->volume->cache,
- NULL,
- vnode,
- &vnode->cache);
-#endif
-
if (!status) {
/* it's a remotely extant inode */
set_bit(AFS_VNODE_CB_BROKEN, &vnode->flags);
@@ -183,6 +179,15 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
}
}
+ /* set up caching before mapping the status, as map-status reads the
+ * first page of symlinks to see if they're really mountpoints */
+ inode->i_size = vnode->status.size;
+#ifdef CONFIG_AFS_FSCACHE
+ vnode->cache = fscache_acquire_cookie(vnode->volume->cache,
+ &afs_vnode_cache_index_def,
+ vnode);
+#endif
+
ret = afs_inode_map_status(vnode, key);
if (ret < 0)
goto bad_inode;
@@ -196,6 +201,10 @@ struct inode *afs_iget(struct super_block *sb, struct key *key,
/* failure */
bad_inode:
+#ifdef CONFIG_AFS_FSCACHE
+ fscache_relinquish_cookie(vnode->cache, 0);
+ vnode->cache = NULL;
+#endif
iget_failed(inode);
_leave(" = %d [bad]", ret);
return ERR_PTR(ret);
@@ -340,8 +349,8 @@ void afs_clear_inode(struct inode *inode)
ASSERT(list_empty(&vnode->writebacks));
ASSERT(!vnode->cb_promised);
-#ifdef AFS_CACHING_SUPPORT
- cachefs_relinquish_cookie(vnode->cache, 0);
+#ifdef CONFIG_AFS_FSCACHE
+ fscache_relinquish_cookie(vnode->cache, 0);
vnode->cache = NULL;
#endif
diff --git a/fs/afs/internal.h b/fs/afs/internal.h
index 67f259d99cd6..106be66dafd2 100644
--- a/fs/afs/internal.h
+++ b/fs/afs/internal.h
@@ -21,6 +21,7 @@
#include "afs.h"
#include "afs_vl.h"
+#include "cache.h"
#define AFS_CELL_MAX_ADDRS 15
@@ -193,8 +194,8 @@ struct afs_cell {
struct key *anonymous_key; /* anonymous user key for this cell */
struct list_head proc_link; /* /proc cell list link */
struct proc_dir_entry *proc_dir; /* /proc dir for this cell */
-#ifdef AFS_CACHING_SUPPORT
- struct cachefs_cookie *cache; /* caching cookie */
+#ifdef CONFIG_AFS_FSCACHE
+ struct fscache_cookie *cache; /* caching cookie */
#endif
/* server record management */
@@ -249,8 +250,8 @@ struct afs_vlocation {
struct list_head grave; /* link in master graveyard list */
struct list_head update; /* link in master update list */
struct afs_cell *cell; /* cell to which volume belongs */
-#ifdef AFS_CACHING_SUPPORT
- struct cachefs_cookie *cache; /* caching cookie */
+#ifdef CONFIG_AFS_FSCACHE
+ struct fscache_cookie *cache; /* caching cookie */
#endif
struct afs_cache_vlocation vldb; /* volume information DB record */
struct afs_volume *vols[3]; /* volume access record pointer (index by type) */
@@ -302,8 +303,8 @@ struct afs_volume {
atomic_t usage;
struct afs_cell *cell; /* cell to which belongs (unrefd ptr) */
struct afs_vlocation *vlocation; /* volume location */
-#ifdef AFS_CACHING_SUPPORT
- struct cachefs_cookie *cache; /* caching cookie */
+#ifdef CONFIG_AFS_FSCACHE
+ struct fscache_cookie *cache; /* caching cookie */
#endif
afs_volid_t vid; /* volume ID */
afs_voltype_t type; /* type of volume */
@@ -333,8 +334,8 @@ struct afs_vnode {
struct afs_server *server; /* server currently supplying this file */
struct afs_fid fid; /* the file identifier for this inode */
struct afs_file_status status; /* AFS status info for this file */
-#ifdef AFS_CACHING_SUPPORT
- struct cachefs_cookie *cache; /* caching cookie */
+#ifdef CONFIG_AFS_FSCACHE
+ struct fscache_cookie *cache; /* caching cookie */
#endif
struct afs_permits *permits; /* cache of permits so far obtained */
struct mutex permits_lock; /* lock for altering permits list */
@@ -428,6 +429,22 @@ struct afs_uuid {
/*****************************************************************************/
/*
+ * cache.c
+ */
+#ifdef CONFIG_AFS_FSCACHE
+extern struct fscache_netfs afs_cache_netfs;
+extern struct fscache_cookie_def afs_cell_cache_index_def;
+extern struct fscache_cookie_def afs_vlocation_cache_index_def;
+extern struct fscache_cookie_def afs_volume_cache_index_def;
+extern struct fscache_cookie_def afs_vnode_cache_index_def;
+#else
+#define afs_cell_cache_index_def (*(struct fscache_cookie_def *) NULL)
+#define afs_vlocation_cache_index_def (*(struct fscache_cookie_def *) NULL)
+#define afs_volume_cache_index_def (*(struct fscache_cookie_def *) NULL)
+#define afs_vnode_cache_index_def (*(struct fscache_cookie_def *) NULL)
+#endif
+
+/*
* callback.c
*/
extern void afs_init_callback_state(struct afs_server *);
@@ -446,9 +463,6 @@ extern void afs_callback_update_kill(void);
*/
extern struct rw_semaphore afs_proc_cells_sem;
extern struct list_head afs_proc_cells;
-#ifdef AFS_CACHING_SUPPORT
-extern struct cachefs_index_def afs_cache_cell_index_def;
-#endif
#define afs_get_cell(C) do { atomic_inc(&(C)->usage); } while(0)
extern int afs_cell_init(char *);
@@ -554,9 +568,6 @@ extern void afs_clear_inode(struct inode *);
* main.c
*/
extern struct afs_uuid afs_uuid;
-#ifdef AFS_CACHING_SUPPORT
-extern struct cachefs_netfs afs_cache_netfs;
-#endif
/*
* misc.c
@@ -637,10 +648,6 @@ extern int afs_get_MAC_address(u8 *, size_t);
/*
* vlclient.c
*/
-#ifdef AFS_CACHING_SUPPORT
-extern struct cachefs_index_def afs_vlocation_cache_index_def;
-#endif
-
extern int afs_vl_get_entry_by_name(struct in_addr *, struct key *,
const char *, struct afs_cache_vlocation *,
const struct afs_wait_mode *);
@@ -664,12 +671,6 @@ extern void afs_vlocation_purge(void);
/*
* vnode.c
*/
-#ifdef AFS_CACHING_SUPPORT
-extern struct cachefs_index_def afs_vnode_cache_index_def;
-#endif
-
-extern struct afs_timer_ops afs_vnode_cb_timed_out_ops;
-
static inline struct afs_vnode *AFS_FS_I(struct inode *inode)
{
return container_of(inode, struct afs_vnode, vfs_inode);
@@ -711,10 +712,6 @@ extern int afs_vnode_release_lock(struct afs_vnode *, struct key *);
/*
* volume.c
*/
-#ifdef AFS_CACHING_SUPPORT
-extern struct cachefs_index_def afs_volume_cache_index_def;
-#endif
-
#define afs_get_volume(V) do { atomic_inc(&(V)->usage); } while(0)
extern void afs_put_volume(struct afs_volume *);
diff --git a/fs/afs/main.c b/fs/afs/main.c
index 2d3e5d4fb9f7..66d54d348c55 100644
--- a/fs/afs/main.c
+++ b/fs/afs/main.c
@@ -1,6 +1,6 @@
/* AFS client file system
*
- * Copyright (C) 2002 Red Hat, Inc. All Rights Reserved.
+ * Copyright (C) 2002,5 Red Hat, Inc. All Rights Reserved.
* Written by David Howells (dhowells@redhat.com)
*
* This program is free software; you can redistribute it and/or
@@ -29,18 +29,6 @@ static char *rootcell;
module_param(rootcell, charp, 0);
MODULE_PARM_DESC(rootcell, "root AFS cell name and VL server IP addr list");
-#ifdef AFS_CACHING_SUPPORT
-static struct cachefs_netfs_operations afs_cache_ops = {
- .get_page_cookie = afs_cache_get_page_cookie,
-};
-
-struct cachefs_netfs afs_cache_netfs = {
- .name = "afs",
- .version = 0,
- .ops = &afs_cache_ops,
-};
-#endif
-
struct afs_uuid afs_uuid;
/*
@@ -104,10 +92,9 @@ static int __init afs_init(void)
if (ret < 0)
return ret;
-#ifdef AFS_CACHING_SUPPORT
+#ifdef CONFIG_AFS_FSCACHE
/* we want to be able to cache */
- ret = cachefs_register_netfs(&afs_cache_netfs,
- &afs_cache_cell_index_def);
+ ret = fscache_register_netfs(&afs_cache_netfs);
if (ret < 0)
goto error_cache;
#endif
@@ -142,8 +129,8 @@ error_fs:
error_open_socket:
error_vl_update_init:
error_cell_init:
-#ifdef AFS_CACHING_SUPPORT
- cachefs_unregister_netfs(&afs_cache_netfs);
+#ifdef CONFIG_AFS_FSCACHE
+ fscache_unregister_netfs(&afs_cache_netfs);
error_cache:
#endif
afs_callback_update_kill();
@@ -175,8 +162,8 @@ static void __exit afs_exit(void)
afs_vlocation_purge();
flush_scheduled_work();
afs_cell_purge();
-#ifdef AFS_CACHING_SUPPORT
- cachefs_unregister_netfs(&afs_cache_netfs);
+#ifdef CONFIG_AFS_FSCACHE
+ fscache_unregister_netfs(&afs_cache_netfs);
#endif
afs_proc_cleanup();
rcu_barrier();
diff --git a/fs/afs/mntpt.c b/fs/afs/mntpt.c
index 78db4953a800..2b9e2d03a390 100644
--- a/fs/afs/mntpt.c
+++ b/fs/afs/mntpt.c
@@ -173,9 +173,9 @@ static struct vfsmount *afs_mntpt_do_automount(struct dentry *mntpt)
if (PageError(page))
goto error;
- buf = kmap(page);
+ buf = kmap_atomic(page, KM_USER0);
memcpy(devname, buf, size);
- kunmap(page);
+ kunmap_atomic(buf, KM_USER0);
page_cache_release(page);
page = NULL;
diff --git a/fs/afs/proc.c b/fs/afs/proc.c
index 7578c1ab9e0b..8630615e57fe 100644
--- a/fs/afs/proc.c
+++ b/fs/afs/proc.c
@@ -146,7 +146,6 @@ int afs_proc_init(void)
proc_afs = proc_mkdir("fs/afs", NULL);
if (!proc_afs)
goto error_dir;
- proc_afs->owner = THIS_MODULE;
p = proc_create("cells", 0, proc_afs, &afs_proc_cells_fops);
if (!p)
diff --git a/fs/afs/vlocation.c b/fs/afs/vlocation.c
index 849fc3160cb5..ec2a7431e458 100644
--- a/fs/afs/vlocation.c
+++ b/fs/afs/vlocation.c
@@ -281,9 +281,8 @@ static void afs_vlocation_apply_update(struct afs_vlocation *vl,
vl->vldb = *vldb;
-#ifdef AFS_CACHING_SUPPORT
- /* update volume entry in local cache */
- cachefs_update_cookie(vl->cache);
+#ifdef CONFIG_AFS_FSCACHE
+ fscache_update_cookie(vl->cache);
#endif
}
@@ -304,11 +303,9 @@ static int afs_vlocation_fill_in_record(struct afs_vlocation *vl,
memset(&vldb, 0, sizeof(vldb));
/* see if we have an in-cache copy (will set vl->valid if there is) */
-#ifdef AFS_CACHING_SUPPORT
- cachefs_acquire_cookie(cell->cache,
- &afs_volume_cache_index_def,
- vlocation,
- &vl->cache);
+#ifdef CONFIG_AFS_FSCACHE
+ vl->cache = fscache_acquire_cookie(vl->cell->cache,
+ &afs_vlocation_cache_index_def, vl);
#endif
if (vl->valid) {
@@ -420,6 +417,11 @@ fill_in_record:
spin_unlock(&vl->lock);
wake_up(&vl->waitq);
+ /* update volume entry in local cache */
+#ifdef CONFIG_AFS_FSCACHE
+ fscache_update_cookie(vl->cache);
+#endif
+
/* schedule for regular updates */
afs_vlocation_queue_for_updates(vl);
goto success;
@@ -465,7 +467,7 @@ found_in_memory:
spin_unlock(&vl->lock);
success:
- _leave(" = %p",vl);
+ _leave(" = %p", vl);
return vl;
error_abandon:
@@ -523,10 +525,9 @@ static void afs_vlocation_destroy(struct afs_vlocation *vl)
{
_enter("%p", vl);
-#ifdef AFS_CACHING_SUPPORT
- cachefs_relinquish_cookie(vl->cache, 0);
+#ifdef CONFIG_AFS_FSCACHE
+ fscache_relinquish_cookie(vl->cache, 0);
#endif
-
afs_put_cell(vl->cell);
kfree(vl);
}
diff --git a/fs/afs/volume.c b/fs/afs/volume.c
index 8bab0e3437f9..a353e69e2391 100644
--- a/fs/afs/volume.c
+++ b/fs/afs/volume.c
@@ -124,13 +124,11 @@ struct afs_volume *afs_volume_lookup(struct afs_mount_params *params)
}
/* attach the cache and volume location */
-#ifdef AFS_CACHING_SUPPORT
- cachefs_acquire_cookie(vlocation->cache,
- &afs_vnode_cache_index_def,
- volume,
- &volume->cache);
+#ifdef CONFIG_AFS_FSCACHE
+ volume->cache = fscache_acquire_cookie(vlocation->cache,
+ &afs_volume_cache_index_def,
+ volume);
#endif
-
afs_get_vlocation(vlocation);
volume->vlocation = vlocation;
@@ -194,8 +192,8 @@ void afs_put_volume(struct afs_volume *volume)
up_write(&vlocation->cell->vl_sem);
/* finish cleaning up the volume */
-#ifdef AFS_CACHING_SUPPORT
- cachefs_relinquish_cookie(volume->cache, 0);
+#ifdef CONFIG_AFS_FSCACHE
+ fscache_relinquish_cookie(volume->cache, 0);
#endif
afs_put_vlocation(vlocation);
diff --git a/fs/afs/write.c b/fs/afs/write.c
index 3fb36d433621..788451866c1f 100644
--- a/fs/afs/write.c
+++ b/fs/afs/write.c
@@ -780,3 +780,24 @@ int afs_fsync(struct file *file, struct dentry *dentry, int datasync)
_leave(" = %d", ret);
return ret;
}
+
+/*
+ * notification that a previously read-only page is about to become writable
+ * - if it returns an error, the caller will deliver a bus error signal
+ */
+int afs_page_mkwrite(struct vm_area_struct *vma, struct page *page)
+{
+ struct afs_vnode *vnode = AFS_FS_I(vma->vm_file->f_mapping->host);
+
+ _enter("{{%x:%u}},{%lx}",
+ vnode->fid.vid, vnode->fid.vnode, page->index);
+
+ /* wait for the page to be written to the cache before we allow it to
+ * be modified */
+#ifdef CONFIG_AFS_FSCACHE
+ wait_on_page_fscache_write(page);
+#endif
+
+ _leave(" = 0");
+ return 0;
+}
diff --git a/fs/autofs/Kconfig b/fs/autofs/Kconfig
new file mode 100644
index 000000000000..5f3bea90911e
--- /dev/null
+++ b/fs/autofs/Kconfig
@@ -0,0 +1,21 @@
+config AUTOFS_FS
+ tristate "Kernel automounter support"
+ help
+ The automounter is a tool to automatically mount remote file systems
+ on demand. This implementation is partially kernel-based to reduce
+ overhead in the already-mounted case; this is unlike the BSD
+ automounter (amd), which is a pure user space daemon.
+
+ To use the automounter you need the user-space tools from the autofs
+ package; you can find the location in <file:Documentation/Changes>.
+ You also want to answer Y to "NFS file system support", below.
+
+ If you want to use the newer version of the automounter with more
+ features, say N here and say Y to "Kernel automounter v4 support",
+ below.
+
+ To compile this support as a module, choose M here: the module will be
+ called autofs.
+
+ If you are not a part of a fairly large, distributed network, you
+ probably do not need an automounter, and can say N here.
diff --git a/fs/autofs4/Kconfig b/fs/autofs4/Kconfig
new file mode 100644
index 000000000000..1204d6384d39
--- /dev/null
+++ b/fs/autofs4/Kconfig
@@ -0,0 +1,20 @@
+config AUTOFS4_FS
+ tristate "Kernel automounter version 4 support (also supports v3)"
+ help
+ The automounter is a tool to automatically mount remote file systems
+ on demand. This implementation is partially kernel-based to reduce
+ overhead in the already-mounted case; this is unlike the BSD
+ automounter (amd), which is a pure user space daemon.
+
+ To use the automounter you need the user-space tools from
+ <ftp://ftp.kernel.org/pub/linux/daemons/autofs/v4/>; you also
+ want to answer Y to "NFS file system support", below.
+
+ To compile this support as a module, choose M here: the module will be
+ called autofs4. You will need to add "alias autofs autofs4" to your
+ modules configuration file.
+
+ If you are not a part of a fairly large, distributed network or
+ don't have a laptop which needs to dynamically reconfigure to the
+ local network, you probably do not need an automounter, and can say
+ N here.
diff --git a/fs/befs/Kconfig b/fs/befs/Kconfig
new file mode 100644
index 000000000000..7835d30f211f
--- /dev/null
+++ b/fs/befs/Kconfig
@@ -0,0 +1,26 @@
+config BEFS_FS
+ tristate "BeOS file system (BeFS) support (read only) (EXPERIMENTAL)"
+ depends on BLOCK && EXPERIMENTAL
+ select NLS
+ help
+ The BeOS File System (BeFS) is the native file system of Be, Inc's
+ BeOS. Notable features include support for arbitrary attributes
+ on files and directories, and database-like indices on selected
+ attributes. (Also note that this driver doesn't make those features
+ available at this time). It is a 64 bit filesystem, so it supports
+ extremely large volumes and files.
+
+ If you use this filesystem, you should also say Y to at least one
+ of the NLS (native language support) options below.
+
+ If you don't know what this is about, say N.
+
+ To compile this as a module, choose M here: the module will be
+ called befs.
+
+config BEFS_DEBUG
+ bool "Debug BeFS"
+ depends on BEFS_FS
+ help
+ If you say Y here, you can use the 'debug' mount option to enable
+ debugging output from the driver.
diff --git a/fs/bfs/Kconfig b/fs/bfs/Kconfig
new file mode 100644
index 000000000000..c2336c62024f
--- /dev/null
+++ b/fs/bfs/Kconfig
@@ -0,0 +1,19 @@
+config BFS_FS
+ tristate "BFS file system support (EXPERIMENTAL)"
+ depends on BLOCK && EXPERIMENTAL
+ help
+ Boot File System (BFS) is a file system used under SCO UnixWare to
+ allow the bootloader access to the kernel image and other important
+ files during the boot process. It is usually mounted under /stand
+ and corresponds to the slice marked as "STAND" in the UnixWare
+ partition. You should say Y if you want to read or write the files
+ on your /stand slice from within Linux. You then also need to say Y
+ to "UnixWare slices support", below. More information about the BFS
+ file system is contained in the file
+ <file:Documentation/filesystems/bfs.txt>.
+
+ If you don't know what this is about, say N.
+
+ To compile this as a module, choose M here: the module will be called
+ bfs. Note that the file system of your root partition (the one
+ containing the directory /) cannot be compiled as a module.
diff --git a/fs/bio-integrity.c b/fs/bio-integrity.c
index 77ebc3c263d6..549b0144da11 100644
--- a/fs/bio-integrity.c
+++ b/fs/bio-integrity.c
@@ -140,7 +140,6 @@ int bio_integrity_add_page(struct bio *bio, struct page *page,
iv = bip_vec_idx(bip, bip->bip_vcnt);
BUG_ON(iv == NULL);
- BUG_ON(iv->bv_page != NULL);
iv->bv_page = page;
iv->bv_len = len;
@@ -465,7 +464,7 @@ static int bio_integrity_verify(struct bio *bio)
if (ret) {
kunmap_atomic(kaddr, KM_USER0);
- break;
+ return ret;
}
sectors = bv->bv_len / bi->sector_size;
@@ -493,18 +492,13 @@ static void bio_integrity_verify_fn(struct work_struct *work)
struct bio_integrity_payload *bip =
container_of(work, struct bio_integrity_payload, bip_work);
struct bio *bio = bip->bip_bio;
- int error = bip->bip_error;
+ int error;
- if (bio_integrity_verify(bio)) {
- clear_bit(BIO_UPTODATE, &bio->bi_flags);
- error = -EIO;
- }
+ error = bio_integrity_verify(bio);
/* Restore original bio completion handler */
bio->bi_end_io = bip->bip_end_io;
-
- if (bio->bi_end_io)
- bio->bi_end_io(bio, error);
+ bio_endio(bio, error);
}
/**
@@ -525,7 +519,17 @@ void bio_integrity_endio(struct bio *bio, int error)
BUG_ON(bip->bip_bio != bio);
- bip->bip_error = error;
+ /* In case of an I/O error there is no point in verifying the
+ * integrity metadata. Restore original bio end_io handler
+ * and run it.
+ */
+ if (error) {
+ bio->bi_end_io = bip->bip_end_io;
+ bio_endio(bio, error);
+
+ return;
+ }
+
INIT_WORK(&bip->bip_work, bio_integrity_verify_fn);
queue_work(kintegrityd_wq, &bip->bip_work);
}
diff --git a/fs/btrfs/Kconfig b/fs/btrfs/Kconfig
new file mode 100644
index 000000000000..f8fcf999ea1b
--- /dev/null
+++ b/fs/btrfs/Kconfig
@@ -0,0 +1,18 @@
+config BTRFS_FS
+ tristate "Btrfs filesystem (EXPERIMENTAL) Unstable disk format"
+ depends on EXPERIMENTAL
+ select LIBCRC32C
+ select ZLIB_INFLATE
+ select ZLIB_DEFLATE
+ help
+ Btrfs is a new filesystem with extents, writable snapshotting,
+ support for multiple devices and many more features.
+
+ Btrfs is highly experimental, and THE DISK FORMAT IS NOT YET
+ FINALIZED. You should say N here unless you are interested in
+ testing Btrfs with non-critical data.
+
+ To compile this file system support as a module, choose M here. The
+ module will be called btrfs.
+
+ If unsure, say N.
diff --git a/fs/btrfs/locking.c b/fs/btrfs/locking.c
index 39bae7761db6..40ba8e8962f8 100644
--- a/fs/btrfs/locking.c
+++ b/fs/btrfs/locking.c
@@ -37,16 +37,6 @@
int btrfs_tree_lock(struct extent_buffer *eb)
{
- int i;
-
- if (mutex_trylock(&eb->mutex))
- return 0;
- for (i = 0; i < 512; i++) {
- cpu_relax();
- if (mutex_trylock(&eb->mutex))
- return 0;
- }
- cpu_relax();
mutex_lock_nested(&eb->mutex, BTRFS_MAX_LEVEL - btrfs_header_level(eb));
return 0;
}
diff --git a/fs/cachefiles/Kconfig b/fs/cachefiles/Kconfig
new file mode 100644
index 000000000000..80e9c6167f0b
--- /dev/null
+++ b/fs/cachefiles/Kconfig
@@ -0,0 +1,39 @@
+
+config CACHEFILES
+ tristate "Filesystem caching on files"
+ depends on FSCACHE && BLOCK
+ help
+ This permits use of a mounted filesystem as a cache for other
+ filesystems - primarily networking filesystems - thus allowing fast
+ local disk to enhance the speed of slower devices.
+
+ See Documentation/filesystems/caching/cachefiles.txt for more
+ information.
+
+config CACHEFILES_DEBUG
+ bool "Debug CacheFiles"
+ depends on CACHEFILES
+ help
+ This permits debugging to be dynamically enabled in the filesystem
+ caching on files module. If this is set, the debugging output may be
+ enabled by setting bits in /sys/modules/cachefiles/parameter/debug or
+ by including a debugging specifier in /etc/cachefilesd.conf.
+
+config CACHEFILES_HISTOGRAM
+ bool "Gather latency information on CacheFiles"
+ depends on CACHEFILES && PROC_FS
+ help
+
+ This option causes latency information to be gathered on CacheFiles
+ operation and exported through file:
+
+ /proc/fs/cachefiles/histogram
+
+ The generation of this histogram adds a certain amount of overhead to
+ execution as there are a number of points at which data is gathered,
+ and on a multi-CPU system these may be on cachelines that keep
+ bouncing between CPUs. On the other hand, the histogram may be
+ useful for debugging purposes. Saying 'N' here is recommended.
+
+ See Documentation/filesystems/caching/cachefiles.txt for more
+ information.
diff --git a/fs/cachefiles/Makefile b/fs/cachefiles/Makefile
new file mode 100644
index 000000000000..8a9c1bdc205f
--- /dev/null
+++ b/fs/cachefiles/Makefile
@@ -0,0 +1,18 @@
+#
+# Makefile for caching in a mounted filesystem
+#
+
+cachefiles-y := \
+ cf-bind.o \
+ cf-daemon.o \
+ cf-interface.o \
+ cf-key.o \
+ cf-main.o \
+ cf-namei.o \
+ cf-rdwr.o \
+ cf-security.o \
+ cf-xattr.o
+
+cachefiles-$(CONFIG_CACHEFILES_HISTOGRAM) += cf-proc.o
+
+obj-$(CONFIG_CACHEFILES) := cachefiles.o
diff --git a/fs/cachefiles/cf-bind.c b/fs/cachefiles/cf-bind.c
new file mode 100644
index 000000000000..5a8b30fd1a3e
--- /dev/null
+++ b/fs/cachefiles/cf-bind.c
@@ -0,0 +1,286 @@
+/* Bind and unbind a cache from the filesystem backing it
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/statfs.h>
+#include <linux/ctype.h>
+#include "cf-internal.h"
+
+static int cachefiles_daemon_add_cache(struct cachefiles_cache *caches);
+
+/*
+ * bind a directory as a cache
+ */
+int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args)
+{
+ _enter("{%u,%u,%u,%u,%u,%u},%s",
+ cache->frun_percent,
+ cache->fcull_percent,
+ cache->fstop_percent,
+ cache->brun_percent,
+ cache->bcull_percent,
+ cache->bstop_percent,
+ args);
+
+ /* start by checking things over */
+ ASSERT(cache->fstop_percent >= 0 &&
+ cache->fstop_percent < cache->fcull_percent &&
+ cache->fcull_percent < cache->frun_percent &&
+ cache->frun_percent < 100);
+
+ ASSERT(cache->bstop_percent >= 0 &&
+ cache->bstop_percent < cache->bcull_percent &&
+ cache->bcull_percent < cache->brun_percent &&
+ cache->brun_percent < 100);
+
+ if (*args) {
+ kerror("'bind' command doesn't take an argument");
+ return -EINVAL;
+ }
+
+ if (!cache->rootdirname) {
+ kerror("No cache directory specified");
+ return -EINVAL;
+ }
+
+ /* don't permit already bound caches to be re-bound */
+ if (test_bit(CACHEFILES_READY, &cache->flags)) {
+ kerror("Cache already bound");
+ return -EBUSY;
+ }
+
+ /* make sure we have copies of the tag and dirname strings */
+ if (!cache->tag) {
+ /* the tag string is released by the fops->release()
+ * function, so we don't release it on error here */
+ cache->tag = kstrdup("CacheFiles", GFP_KERNEL);
+ if (!cache->tag)
+ return -ENOMEM;
+ }
+
+ /* add the cache */
+ return cachefiles_daemon_add_cache(cache);
+}
+
+/*
+ * add a cache
+ */
+static int cachefiles_daemon_add_cache(struct cachefiles_cache *cache)
+{
+ struct cachefiles_object *fsdef;
+ struct nameidata nd;
+ struct kstatfs stats;
+ struct dentry *graveyard, *cachedir, *root;
+ const struct cred *saved_cred;
+ int ret;
+
+ _enter("");
+
+ /* we want to work under the module's security ID */
+ ret = cachefiles_get_security_ID(cache);
+ if (ret < 0)
+ return ret;
+
+ cachefiles_begin_secure(cache, &saved_cred);
+
+ /* allocate the root index object */
+ ret = -ENOMEM;
+
+ fsdef = kmem_cache_alloc(cachefiles_object_jar, GFP_KERNEL);
+ if (!fsdef)
+ goto error_root_object;
+
+ ASSERTCMP(fsdef->backer, ==, NULL);
+
+ atomic_set(&fsdef->usage, 1);
+ fsdef->type = FSCACHE_COOKIE_TYPE_INDEX;
+
+ _debug("- fsdef %p", fsdef);
+
+ /* look up the directory at the root of the cache */
+ memset(&nd, 0, sizeof(nd));
+
+ ret = path_lookup(cache->rootdirname, LOOKUP_DIRECTORY, &nd);
+ if (ret < 0)
+ goto error_open_root;
+
+ cache->mnt = mntget(nd.path.mnt);
+ root = dget(nd.path.dentry);
+ path_put(&nd.path);
+
+ /* check parameters */
+ ret = -EOPNOTSUPP;
+ if (!root->d_inode ||
+ !root->d_inode->i_op ||
+ !root->d_inode->i_op->lookup ||
+ !root->d_inode->i_op->mkdir ||
+ !root->d_inode->i_op->setxattr ||
+ !root->d_inode->i_op->getxattr ||
+ !root->d_sb ||
+ !root->d_sb->s_op ||
+ !root->d_sb->s_op->statfs ||
+ !root->d_sb->s_op->sync_fs)
+ goto error_unsupported;
+
+ ret = -EROFS;
+ if (root->d_sb->s_flags & MS_RDONLY)
+ goto error_unsupported;
+
+ /* determine the security of the on-disk cache as this governs
+ * security ID of files we create */
+ ret = cachefiles_determine_cache_security(cache, root, &saved_cred);
+ if (ret < 0)
+ goto error_unsupported;
+
+ /* get the cache size and blocksize */
+ ret = vfs_statfs(root, &stats);
+ if (ret < 0)
+ goto error_unsupported;
+
+ ret = -ERANGE;
+ if (stats.f_bsize <= 0)
+ goto error_unsupported;
+
+ ret = -EOPNOTSUPP;
+ if (stats.f_bsize > PAGE_SIZE)
+ goto error_unsupported;
+
+ cache->bsize = stats.f_bsize;
+ cache->bshift = 0;
+ if (stats.f_bsize < PAGE_SIZE)
+ cache->bshift = PAGE_SHIFT - ilog2(stats.f_bsize);
+
+ _debug("blksize %u (shift %u)",
+ cache->bsize, cache->bshift);
+
+ _debug("size %llu, avail %llu",
+ (unsigned long long) stats.f_blocks,
+ (unsigned long long) stats.f_bavail);
+
+ /* set up caching limits */
+ do_div(stats.f_files, 100);
+ cache->fstop = stats.f_files * cache->fstop_percent;
+ cache->fcull = stats.f_files * cache->fcull_percent;
+ cache->frun = stats.f_files * cache->frun_percent;
+
+ _debug("limits {%llu,%llu,%llu} files",
+ (unsigned long long) cache->frun,
+ (unsigned long long) cache->fcull,
+ (unsigned long long) cache->fstop);
+
+ stats.f_blocks >>= cache->bshift;
+ do_div(stats.f_blocks, 100);
+ cache->bstop = stats.f_blocks * cache->bstop_percent;
+ cache->bcull = stats.f_blocks * cache->bcull_percent;
+ cache->brun = stats.f_blocks * cache->brun_percent;
+
+ _debug("limits {%llu,%llu,%llu} blocks",
+ (unsigned long long) cache->brun,
+ (unsigned long long) cache->bcull,
+ (unsigned long long) cache->bstop);
+
+ /* get the cache directory and check its type */
+ cachedir = cachefiles_get_directory(cache, root, "cache");
+ if (IS_ERR(cachedir)) {
+ ret = PTR_ERR(cachedir);
+ goto error_unsupported;
+ }
+
+ fsdef->dentry = cachedir;
+ fsdef->fscache.cookie = NULL;
+
+ ret = cachefiles_check_object_type(fsdef);
+ if (ret < 0)
+ goto error_unsupported;
+
+ /* get the graveyard directory */
+ graveyard = cachefiles_get_directory(cache, root, "graveyard");
+ if (IS_ERR(graveyard)) {
+ ret = PTR_ERR(graveyard);
+ goto error_unsupported;
+ }
+
+ cache->graveyard = graveyard;
+
+ /* publish the cache */
+ fscache_init_cache(&cache->cache,
+ &cachefiles_cache_ops,
+ "%s",
+ fsdef->dentry->d_sb->s_id);
+
+ fscache_object_init(&fsdef->fscache, NULL, &cache->cache);
+
+ ret = fscache_add_cache(&cache->cache, &fsdef->fscache, cache->tag);
+ if (ret < 0)
+ goto error_add_cache;
+
+ /* done */
+ set_bit(CACHEFILES_READY, &cache->flags);
+ dput(root);
+
+ printk(KERN_INFO "CacheFiles:"
+ " File cache on %s registered\n",
+ cache->cache.identifier);
+
+ /* check how much space the cache has */
+ cachefiles_has_space(cache, 0, 0);
+ cachefiles_end_secure(cache, saved_cred);
+ return 0;
+
+error_add_cache:
+ dput(cache->graveyard);
+ cache->graveyard = NULL;
+error_unsupported:
+ mntput(cache->mnt);
+ cache->mnt = NULL;
+ dput(fsdef->dentry);
+ fsdef->dentry = NULL;
+ dput(root);
+error_open_root:
+ kmem_cache_free(cachefiles_object_jar, fsdef);
+error_root_object:
+ cachefiles_end_secure(cache, saved_cred);
+ kerror("Failed to register: %d", ret);
+ return ret;
+}
+
+/*
+ * unbind a cache on fd release
+ */
+void cachefiles_daemon_unbind(struct cachefiles_cache *cache)
+{
+ _enter("");
+
+ if (test_bit(CACHEFILES_READY, &cache->flags)) {
+ printk(KERN_INFO "CacheFiles:"
+ " File cache on %s unregistering\n",
+ cache->cache.identifier);
+
+ fscache_withdraw_cache(&cache->cache);
+ }
+
+ dput(cache->graveyard);
+ mntput(cache->mnt);
+
+ kfree(cache->rootdirname);
+ kfree(cache->secctx);
+ kfree(cache->tag);
+
+ _leave("");
+}
diff --git a/fs/cachefiles/cf-daemon.c b/fs/cachefiles/cf-daemon.c
new file mode 100644
index 000000000000..6aa6e6527b77
--- /dev/null
+++ b/fs/cachefiles/cf-daemon.c
@@ -0,0 +1,754 @@
+/* Daemon interface
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/namei.h>
+#include <linux/poll.h>
+#include <linux/mount.h>
+#include <linux/statfs.h>
+#include <linux/ctype.h>
+#include "cf-internal.h"
+
+static int cachefiles_daemon_open(struct inode *, struct file *);
+static int cachefiles_daemon_release(struct inode *, struct file *);
+static ssize_t cachefiles_daemon_read(struct file *, char __user *, size_t,
+ loff_t *);
+static ssize_t cachefiles_daemon_write(struct file *, const char __user *,
+ size_t, loff_t *);
+static unsigned int cachefiles_daemon_poll(struct file *,
+ struct poll_table_struct *);
+static int cachefiles_daemon_frun(struct cachefiles_cache *, char *);
+static int cachefiles_daemon_fcull(struct cachefiles_cache *, char *);
+static int cachefiles_daemon_fstop(struct cachefiles_cache *, char *);
+static int cachefiles_daemon_brun(struct cachefiles_cache *, char *);
+static int cachefiles_daemon_bcull(struct cachefiles_cache *, char *);
+static int cachefiles_daemon_bstop(struct cachefiles_cache *, char *);
+static int cachefiles_daemon_cull(struct cachefiles_cache *, char *);
+static int cachefiles_daemon_debug(struct cachefiles_cache *, char *);
+static int cachefiles_daemon_dir(struct cachefiles_cache *, char *);
+static int cachefiles_daemon_inuse(struct cachefiles_cache *, char *);
+static int cachefiles_daemon_secctx(struct cachefiles_cache *, char *);
+static int cachefiles_daemon_tag(struct cachefiles_cache *, char *);
+
+static unsigned long cachefiles_open;
+
+const struct file_operations cachefiles_daemon_fops = {
+ .owner = THIS_MODULE,
+ .open = cachefiles_daemon_open,
+ .release = cachefiles_daemon_release,
+ .read = cachefiles_daemon_read,
+ .write = cachefiles_daemon_write,
+ .poll = cachefiles_daemon_poll,
+};
+
+struct cachefiles_daemon_cmd {
+ char name[8];
+ int (*handler)(struct cachefiles_cache *cache, char *args);
+};
+
+static const struct cachefiles_daemon_cmd cachefiles_daemon_cmds[] = {
+ { "bind", cachefiles_daemon_bind },
+ { "brun", cachefiles_daemon_brun },
+ { "bcull", cachefiles_daemon_bcull },
+ { "bstop", cachefiles_daemon_bstop },
+ { "cull", cachefiles_daemon_cull },
+ { "debug", cachefiles_daemon_debug },
+ { "dir", cachefiles_daemon_dir },
+ { "frun", cachefiles_daemon_frun },
+ { "fcull", cachefiles_daemon_fcull },
+ { "fstop", cachefiles_daemon_fstop },
+ { "inuse", cachefiles_daemon_inuse },
+ { "secctx", cachefiles_daemon_secctx },
+ { "tag", cachefiles_daemon_tag },
+ { "", NULL }
+};
+
+
+/*
+ * do various checks
+ */
+static int cachefiles_daemon_open(struct inode *inode, struct file *file)
+{
+ struct cachefiles_cache *cache;
+
+ _enter("");
+
+ /* only the superuser may do this */
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+
+ /* the cachefiles device may only be open once at a time */
+ if (xchg(&cachefiles_open, 1) == 1)
+ return -EBUSY;
+
+ /* allocate a cache record */
+ cache = kzalloc(sizeof(struct cachefiles_cache), GFP_KERNEL);
+ if (!cache) {
+ cachefiles_open = 0;
+ return -ENOMEM;
+ }
+
+ mutex_init(&cache->daemon_mutex);
+ cache->active_nodes = RB_ROOT;
+ rwlock_init(&cache->active_lock);
+ init_waitqueue_head(&cache->daemon_pollwq);
+
+ /* set default caching limits
+ * - limit at 1% free space and/or free files
+ * - cull below 5% free space and/or free files
+ * - cease culling above 7% free space and/or free files
+ */
+ cache->frun_percent = 7;
+ cache->fcull_percent = 5;
+ cache->fstop_percent = 1;
+ cache->brun_percent = 7;
+ cache->bcull_percent = 5;
+ cache->bstop_percent = 1;
+
+ file->private_data = cache;
+ cache->cachefilesd = file;
+ return 0;
+}
+
+/*
+ * release a cache
+ */
+static int cachefiles_daemon_release(struct inode *inode, struct file *file)
+{
+ struct cachefiles_cache *cache = file->private_data;
+
+ _enter("");
+
+ ASSERT(cache);
+
+ set_bit(CACHEFILES_DEAD, &cache->flags);
+
+ cachefiles_daemon_unbind(cache);
+
+ ASSERT(!cache->active_nodes.rb_node);
+
+ /* clean up the control file interface */
+ cache->cachefilesd = NULL;
+ file->private_data = NULL;
+ cachefiles_open = 0;
+
+ kfree(cache);
+
+ _leave("");
+ return 0;
+}
+
+/*
+ * read the cache state
+ */
+static ssize_t cachefiles_daemon_read(struct file *file, char __user *_buffer,
+ size_t buflen, loff_t *pos)
+{
+ struct cachefiles_cache *cache = file->private_data;
+ char buffer[256];
+ int n;
+
+ //_enter(",,%zu,", buflen);
+
+ if (!test_bit(CACHEFILES_READY, &cache->flags))
+ return 0;
+
+ /* check how much space the cache has */
+ cachefiles_has_space(cache, 0, 0);
+
+ /* summarise */
+ clear_bit(CACHEFILES_STATE_CHANGED, &cache->flags);
+
+ n = snprintf(buffer, sizeof(buffer),
+ "cull=%c"
+ " frun=%llx"
+ " fcull=%llx"
+ " fstop=%llx"
+ " brun=%llx"
+ " bcull=%llx"
+ " bstop=%llx",
+ test_bit(CACHEFILES_CULLING, &cache->flags) ? '1' : '0',
+ (unsigned long long) cache->frun,
+ (unsigned long long) cache->fcull,
+ (unsigned long long) cache->fstop,
+ (unsigned long long) cache->brun,
+ (unsigned long long) cache->bcull,
+ (unsigned long long) cache->bstop
+ );
+
+ if (n > buflen)
+ return -EMSGSIZE;
+
+ if (copy_to_user(_buffer, buffer, n) != 0)
+ return -EFAULT;
+
+ return n;
+}
+
+/*
+ * command the cache
+ */
+static ssize_t cachefiles_daemon_write(struct file *file,
+ const char __user *_data,
+ size_t datalen,
+ loff_t *pos)
+{
+ const struct cachefiles_daemon_cmd *cmd;
+ struct cachefiles_cache *cache = file->private_data;
+ ssize_t ret;
+ char *data, *args, *cp;
+
+ //_enter(",,%zu,", datalen);
+
+ ASSERT(cache);
+
+ if (test_bit(CACHEFILES_DEAD, &cache->flags))
+ return -EIO;
+
+ if (datalen < 0 || datalen > PAGE_SIZE - 1)
+ return -EOPNOTSUPP;
+
+ /* drag the command string into the kernel so we can parse it */
+ data = kmalloc(datalen + 1, GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+
+ ret = -EFAULT;
+ if (copy_from_user(data, _data, datalen) != 0)
+ goto error;
+
+ data[datalen] = '\0';
+
+ ret = -EINVAL;
+ if (memchr(data, '\0', datalen))
+ goto error;
+
+ /* strip any newline */
+ cp = memchr(data, '\n', datalen);
+ if (cp) {
+ if (cp == data)
+ goto error;
+
+ *cp = '\0';
+ }
+
+ /* parse the command */
+ ret = -EOPNOTSUPP;
+
+ for (args = data; *args; args++)
+ if (isspace(*args))
+ break;
+ if (*args) {
+ if (args == data)
+ goto error;
+ *args = '\0';
+ for (args++; isspace(*args); args++)
+ continue;
+ }
+
+ /* run the appropriate command handler */
+ for (cmd = cachefiles_daemon_cmds; cmd->name[0]; cmd++)
+ if (strcmp(cmd->name, data) == 0)
+ goto found_command;
+
+error:
+ kfree(data);
+ //_leave(" = %zd", ret);
+ return ret;
+
+found_command:
+ mutex_lock(&cache->daemon_mutex);
+
+ ret = -EIO;
+ if (!test_bit(CACHEFILES_DEAD, &cache->flags))
+ ret = cmd->handler(cache, args);
+
+ mutex_unlock(&cache->daemon_mutex);
+
+ if (ret == 0)
+ ret = datalen;
+ goto error;
+}
+
+/*
+ * poll for culling state
+ * - use POLLOUT to indicate culling state
+ */
+static unsigned int cachefiles_daemon_poll(struct file *file,
+ struct poll_table_struct *poll)
+{
+ struct cachefiles_cache *cache = file->private_data;
+ unsigned int mask;
+
+ poll_wait(file, &cache->daemon_pollwq, poll);
+ mask = 0;
+
+ if (test_bit(CACHEFILES_STATE_CHANGED, &cache->flags))
+ mask |= POLLIN;
+
+ if (test_bit(CACHEFILES_CULLING, &cache->flags))
+ mask |= POLLOUT;
+
+ return mask;
+}
+
+/*
+ * give a range error for cache space constraints
+ * - can be tail-called
+ */
+static int cachefiles_daemon_range_error(struct cachefiles_cache *cache,
+ char *args)
+{
+ kerror("Free space limits must be in range"
+ " 0%%<=stop<cull<run<100%%");
+
+ return -EINVAL;
+}
+
+/*
+ * set the percentage of files at which to stop culling
+ * - command: "frun <N>%"
+ */
+static int cachefiles_daemon_frun(struct cachefiles_cache *cache, char *args)
+{
+ unsigned long frun;
+
+ _enter(",%s", args);
+
+ if (!*args)
+ return -EINVAL;
+
+ frun = simple_strtoul(args, &args, 10);
+ if (args[0] != '%' || args[1] != '\0')
+ return -EINVAL;
+
+ if (frun <= cache->fcull_percent || frun >= 100)
+ return cachefiles_daemon_range_error(cache, args);
+
+ cache->frun_percent = frun;
+ return 0;
+}
+
+/*
+ * set the percentage of files at which to start culling
+ * - command: "fcull <N>%"
+ */
+static int cachefiles_daemon_fcull(struct cachefiles_cache *cache, char *args)
+{
+ unsigned long fcull;
+
+ _enter(",%s", args);
+
+ if (!*args)
+ return -EINVAL;
+
+ fcull = simple_strtoul(args, &args, 10);
+ if (args[0] != '%' || args[1] != '\0')
+ return -EINVAL;
+
+ if (fcull <= cache->fstop_percent || fcull >= cache->frun_percent)
+ return cachefiles_daemon_range_error(cache, args);
+
+ cache->fcull_percent = fcull;
+ return 0;
+}
+
+/*
+ * set the percentage of files at which to stop allocating
+ * - command: "fstop <N>%"
+ */
+static int cachefiles_daemon_fstop(struct cachefiles_cache *cache, char *args)
+{
+ unsigned long fstop;
+
+ _enter(",%s", args);
+
+ if (!*args)
+ return -EINVAL;
+
+ fstop = simple_strtoul(args, &args, 10);
+ if (args[0] != '%' || args[1] != '\0')
+ return -EINVAL;
+
+ if (fstop < 0 || fstop >= cache->fcull_percent)
+ return cachefiles_daemon_range_error(cache, args);
+
+ cache->fstop_percent = fstop;
+ return 0;
+}
+
+/*
+ * set the percentage of blocks at which to stop culling
+ * - command: "brun <N>%"
+ */
+static int cachefiles_daemon_brun(struct cachefiles_cache *cache, char *args)
+{
+ unsigned long brun;
+
+ _enter(",%s", args);
+
+ if (!*args)
+ return -EINVAL;
+
+ brun = simple_strtoul(args, &args, 10);
+ if (args[0] != '%' || args[1] != '\0')
+ return -EINVAL;
+
+ if (brun <= cache->bcull_percent || brun >= 100)
+ return cachefiles_daemon_range_error(cache, args);
+
+ cache->brun_percent = brun;
+ return 0;
+}
+
+/*
+ * set the percentage of blocks at which to start culling
+ * - command: "bcull <N>%"
+ */
+static int cachefiles_daemon_bcull(struct cachefiles_cache *cache, char *args)
+{
+ unsigned long bcull;
+
+ _enter(",%s", args);
+
+ if (!*args)
+ return -EINVAL;
+
+ bcull = simple_strtoul(args, &args, 10);
+ if (args[0] != '%' || args[1] != '\0')
+ return -EINVAL;
+
+ if (bcull <= cache->bstop_percent || bcull >= cache->brun_percent)
+ return cachefiles_daemon_range_error(cache, args);
+
+ cache->bcull_percent = bcull;
+ return 0;
+}
+
+/*
+ * set the percentage of blocks at which to stop allocating
+ * - command: "bstop <N>%"
+ */
+static int cachefiles_daemon_bstop(struct cachefiles_cache *cache, char *args)
+{
+ unsigned long bstop;
+
+ _enter(",%s", args);
+
+ if (!*args)
+ return -EINVAL;
+
+ bstop = simple_strtoul(args, &args, 10);
+ if (args[0] != '%' || args[1] != '\0')
+ return -EINVAL;
+
+ if (bstop < 0 || bstop >= cache->bcull_percent)
+ return cachefiles_daemon_range_error(cache, args);
+
+ cache->bstop_percent = bstop;
+ return 0;
+}
+
+/*
+ * set the cache directory
+ * - command: "dir <name>"
+ */
+static int cachefiles_daemon_dir(struct cachefiles_cache *cache, char *args)
+{
+ char *dir;
+
+ _enter(",%s", args);
+
+ if (!*args) {
+ kerror("Empty directory specified");
+ return -EINVAL;
+ }
+
+ if (cache->rootdirname) {
+ kerror("Second cache directory specified");
+ return -EEXIST;
+ }
+
+ dir = kstrdup(args, GFP_KERNEL);
+ if (!dir)
+ return -ENOMEM;
+
+ cache->rootdirname = dir;
+ return 0;
+}
+
+/*
+ * set the cache security context
+ * - command: "secctx <ctx>"
+ */
+static int cachefiles_daemon_secctx(struct cachefiles_cache *cache, char *args)
+{
+ char *secctx;
+
+ _enter(",%s", args);
+
+ if (!*args) {
+ kerror("Empty security context specified");
+ return -EINVAL;
+ }
+
+ if (cache->secctx) {
+ kerror("Second security context specified");
+ return -EINVAL;
+ }
+
+ secctx = kstrdup(args, GFP_KERNEL);
+ if (!secctx)
+ return -ENOMEM;
+
+ cache->secctx = secctx;
+ return 0;
+}
+
+/*
+ * set the cache tag
+ * - command: "tag <name>"
+ */
+static int cachefiles_daemon_tag(struct cachefiles_cache *cache, char *args)
+{
+ char *tag;
+
+ _enter(",%s", args);
+
+ if (!*args) {
+ kerror("Empty tag specified");
+ return -EINVAL;
+ }
+
+ if (cache->tag)
+ return -EEXIST;
+
+ tag = kstrdup(args, GFP_KERNEL);
+ if (!tag)
+ return -ENOMEM;
+
+ cache->tag = tag;
+ return 0;
+}
+
+/*
+ * request a node in the cache be culled from the current working directory
+ * - command: "cull <name>"
+ */
+static int cachefiles_daemon_cull(struct cachefiles_cache *cache, char *args)
+{
+ struct fs_struct *fs;
+ struct dentry *dir;
+ const struct cred *saved_cred;
+ int ret;
+
+ _enter(",%s", args);
+
+ if (strchr(args, '/'))
+ goto inval;
+
+ if (!test_bit(CACHEFILES_READY, &cache->flags)) {
+ kerror("cull applied to unready cache");
+ return -EIO;
+ }
+
+ if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
+ kerror("cull applied to dead cache");
+ return -EIO;
+ }
+
+ /* extract the directory dentry from the cwd */
+ fs = current->fs;
+ read_lock(&fs->lock);
+ dir = dget(fs->pwd.dentry);
+ read_unlock(&fs->lock);
+
+ if (!S_ISDIR(dir->d_inode->i_mode))
+ goto notdir;
+
+ cachefiles_begin_secure(cache, &saved_cred);
+ ret = cachefiles_cull(cache, dir, args);
+ cachefiles_end_secure(cache, saved_cred);
+
+ dput(dir);
+ _leave(" = %d", ret);
+ return ret;
+
+notdir:
+ dput(dir);
+ kerror("cull command requires dirfd to be a directory");
+ return -ENOTDIR;
+
+inval:
+ kerror("cull command requires dirfd and filename");
+ return -EINVAL;
+}
+
+/*
+ * set debugging mode
+ * - command: "debug <mask>"
+ */
+static int cachefiles_daemon_debug(struct cachefiles_cache *cache, char *args)
+{
+ unsigned long mask;
+
+ _enter(",%s", args);
+
+ mask = simple_strtoul(args, &args, 0);
+ if (args[0] != '\0')
+ goto inval;
+
+ cachefiles_debug = mask;
+ _leave(" = 0");
+ return 0;
+
+inval:
+ kerror("debug command requires mask");
+ return -EINVAL;
+}
+
+/*
+ * find out whether an object in the current working directory is in use or not
+ * - command: "inuse <name>"
+ */
+static int cachefiles_daemon_inuse(struct cachefiles_cache *cache, char *args)
+{
+ struct fs_struct *fs;
+ struct dentry *dir;
+ const struct cred *saved_cred;
+ int ret;
+
+ //_enter(",%s", args);
+
+ if (strchr(args, '/'))
+ goto inval;
+
+ if (!test_bit(CACHEFILES_READY, &cache->flags)) {
+ kerror("inuse applied to unready cache");
+ return -EIO;
+ }
+
+ if (test_bit(CACHEFILES_DEAD, &cache->flags)) {
+ kerror("inuse applied to dead cache");
+ return -EIO;
+ }
+
+ /* extract the directory dentry from the cwd */
+ fs = current->fs;
+ read_lock(&fs->lock);
+ dir = dget(fs->pwd.dentry);
+ read_unlock(&fs->lock);
+
+ if (!S_ISDIR(dir->d_inode->i_mode))
+ goto notdir;
+
+ cachefiles_begin_secure(cache, &saved_cred);
+ ret = cachefiles_check_in_use(cache, dir, args);
+ cachefiles_end_secure(cache, saved_cred);
+
+ dput(dir);
+ //_leave(" = %d", ret);
+ return ret;
+
+notdir:
+ dput(dir);
+ kerror("inuse command requires dirfd to be a directory");
+ return -ENOTDIR;
+
+inval:
+ kerror("inuse command requires dirfd and filename");
+ return -EINVAL;
+}
+
+/*
+ * see if we have space for a number of pages and/or a number of files in the
+ * cache
+ */
+int cachefiles_has_space(struct cachefiles_cache *cache,
+ unsigned fnr, unsigned bnr)
+{
+ struct kstatfs stats;
+ int ret;
+
+ //_enter("{%llu,%llu,%llu,%llu,%llu,%llu},%u,%u",
+ // (unsigned long long) cache->frun,
+ // (unsigned long long) cache->fcull,
+ // (unsigned long long) cache->fstop,
+ // (unsigned long long) cache->brun,
+ // (unsigned long long) cache->bcull,
+ // (unsigned long long) cache->bstop,
+ // fnr, bnr);
+
+ /* find out how many pages of blockdev are available */
+ memset(&stats, 0, sizeof(stats));
+
+ ret = vfs_statfs(cache->mnt->mnt_root, &stats);
+ if (ret < 0) {
+ if (ret == -EIO)
+ cachefiles_io_error(cache, "statfs failed");
+ _leave(" = %d", ret);
+ return ret;
+ }
+
+ stats.f_bavail >>= cache->bshift;
+
+ //_debug("avail %llu,%llu",
+ // (unsigned long long) stats.f_ffree,
+ // (unsigned long long) stats.f_bavail);
+
+ /* see if there is sufficient space */
+ if (stats.f_ffree > fnr)
+ stats.f_ffree -= fnr;
+ else
+ stats.f_ffree = 0;
+
+ if (stats.f_bavail > bnr)
+ stats.f_bavail -= bnr;
+ else
+ stats.f_bavail = 0;
+
+ ret = -ENOBUFS;
+ if (stats.f_ffree < cache->fstop ||
+ stats.f_bavail < cache->bstop)
+ goto begin_cull;
+
+ ret = 0;
+ if (stats.f_ffree < cache->fcull ||
+ stats.f_bavail < cache->bcull)
+ goto begin_cull;
+
+ if (test_bit(CACHEFILES_CULLING, &cache->flags) &&
+ stats.f_ffree >= cache->frun &&
+ stats.f_bavail >= cache->brun &&
+ test_and_clear_bit(CACHEFILES_CULLING, &cache->flags)
+ ) {
+ _debug("cease culling");
+ cachefiles_state_changed(cache);
+ }
+
+ //_leave(" = 0");
+ return 0;
+
+begin_cull:
+ if (!test_and_set_bit(CACHEFILES_CULLING, &cache->flags)) {
+ _debug("### CULL CACHE ###");
+ cachefiles_state_changed(cache);
+ }
+
+ _leave(" = %d", ret);
+ return ret;
+}
diff --git a/fs/cachefiles/cf-interface.c b/fs/cachefiles/cf-interface.c
new file mode 100644
index 000000000000..1c9e70521a39
--- /dev/null
+++ b/fs/cachefiles/cf-interface.c
@@ -0,0 +1,449 @@
+/* FS-Cache interface to CacheFiles
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/mount.h>
+#include <linux/buffer_head.h>
+#include "cf-internal.h"
+
+#define list_to_page(head) (list_entry((head)->prev, struct page, lru))
+
+struct cachefiles_lookup_data {
+ struct cachefiles_xattr *auxdata; /* auxiliary data */
+ char *key; /* key path */
+};
+
+static int cachefiles_attr_changed(struct fscache_object *_object);
+
+/*
+ * allocate an object record for a cookie lookup and prepare the lookup data
+ */
+static struct fscache_object *cachefiles_alloc_object(
+ struct fscache_cache *_cache,
+ struct fscache_cookie *cookie)
+{
+ struct cachefiles_lookup_data *lookup_data;
+ struct cachefiles_object *object;
+ struct cachefiles_cache *cache;
+ struct cachefiles_xattr *auxdata;
+ unsigned keylen, auxlen;
+ void *buffer;
+ char *key;
+
+ cache = container_of(_cache, struct cachefiles_cache, cache);
+
+ _enter("{%s},%p,", cache->cache.identifier, cookie);
+
+ lookup_data = kmalloc(sizeof(*lookup_data), GFP_KERNEL);
+ if (!lookup_data)
+ goto nomem_lookup_data;
+
+ /* create a new object record and a temporary leaf image */
+ object = kmem_cache_alloc(cachefiles_object_jar, GFP_KERNEL);
+ if (!object)
+ goto nomem_object;
+
+ ASSERTCMP(object->backer, ==, NULL);
+
+ BUG_ON(test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags));
+ atomic_set(&object->usage, 1);
+
+ fscache_object_init(&object->fscache, cookie, &cache->cache);
+
+ object->type = cookie->def->type;
+
+ /* get hold of the raw key
+ * - stick the length on the front and leave space on the back for the
+ * encoder
+ */
+ buffer = kmalloc((2 + 512) + 3, GFP_KERNEL);
+ if (!buffer)
+ goto nomem_buffer;
+
+ keylen = cookie->def->get_key(cookie->netfs_data, buffer + 2, 512);
+ ASSERTCMP(keylen, <, 512);
+
+ *(uint16_t *)buffer = keylen;
+ ((char *)buffer)[keylen + 2] = 0;
+ ((char *)buffer)[keylen + 3] = 0;
+ ((char *)buffer)[keylen + 4] = 0;
+
+ /* turn the raw key into something that can work with as a filename */
+ key = cachefiles_cook_key(buffer, keylen + 2, object->type);
+ if (!key)
+ goto nomem_key;
+
+ /* get hold of the auxiliary data and prepend the object type */
+ auxdata = buffer;
+ auxlen = 0;
+ if (cookie->def->get_aux) {
+ auxlen = cookie->def->get_aux(cookie->netfs_data,
+ auxdata->data, 511);
+ ASSERTCMP(auxlen, <, 511);
+ }
+
+ auxdata->len = auxlen + 1;
+ auxdata->type = cookie->def->type;
+
+ lookup_data->auxdata = auxdata;
+ lookup_data->key = key;
+ object->lookup_data = lookup_data;
+
+ _leave(" = %p [%p]", &object->fscache, lookup_data);
+ return &object->fscache;
+
+nomem_key:
+ kfree(buffer);
+nomem_buffer:
+ BUG_ON(test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags));
+ kmem_cache_free(cachefiles_object_jar, object);
+ fscache_object_destroyed(&cache->cache);
+nomem_object:
+ kfree(lookup_data);
+nomem_lookup_data:
+ _leave(" = -ENOMEM");
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * attempt to look up the nominated node in this cache
+ */
+static void cachefiles_lookup_object(struct fscache_object *_object)
+{
+ struct cachefiles_lookup_data *lookup_data;
+ struct cachefiles_object *parent, *object;
+ struct cachefiles_cache *cache;
+ const struct cred *saved_cred;
+ int ret;
+
+ _enter("{OBJ%x}", _object->debug_id);
+
+ cache = container_of(_object->cache, struct cachefiles_cache, cache);
+ parent = container_of(_object->parent,
+ struct cachefiles_object, fscache);
+ object = container_of(_object, struct cachefiles_object, fscache);
+ lookup_data = object->lookup_data;
+
+ ASSERTCMP(lookup_data, !=, NULL);
+
+ /* look up the key, creating any missing bits */
+ cachefiles_begin_secure(cache, &saved_cred);
+ ret = cachefiles_walk_to_object(parent, object,
+ lookup_data->key,
+ lookup_data->auxdata);
+ cachefiles_end_secure(cache, saved_cred);
+
+ /* polish off by setting the attributes of non-index files */
+ if (ret == 0 &&
+ object->fscache.cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX)
+ cachefiles_attr_changed(&object->fscache);
+
+ if (ret < 0) {
+ printk(KERN_WARNING "CacheFiles: Lookup failed error %d\n",
+ ret);
+ fscache_object_lookup_error(&object->fscache);
+ }
+
+ _leave(" [%d]", ret);
+}
+
+/*
+ * indication of lookup completion
+ */
+static void cachefiles_lookup_complete(struct fscache_object *_object)
+{
+ struct cachefiles_object *object;
+
+ object = container_of(_object, struct cachefiles_object, fscache);
+
+ _enter("{OBJ%x,%p}", object->fscache.debug_id, object->lookup_data);
+
+ if (object->lookup_data) {
+ kfree(object->lookup_data->key);
+ kfree(object->lookup_data->auxdata);
+ kfree(object->lookup_data);
+ object->lookup_data = NULL;
+ }
+}
+
+/*
+ * increment the usage count on an inode object (may fail if unmounting)
+ */
+static
+struct fscache_object *cachefiles_grab_object(struct fscache_object *_object)
+{
+ struct cachefiles_object *object =
+ container_of(_object, struct cachefiles_object, fscache);
+
+ _enter("{OBJ%x,%d}", _object->debug_id, atomic_read(&object->usage));
+
+#ifdef CACHEFILES_DEBUG_SLAB
+ ASSERT((atomic_read(&object->usage) & 0xffff0000) != 0x6b6b0000);
+#endif
+
+ atomic_inc(&object->usage);
+ return &object->fscache;
+}
+
+/*
+ * update the auxilliary data for an object object on disk
+ */
+static void cachefiles_update_object(struct fscache_object *_object)
+{
+ struct cachefiles_object *object;
+ struct cachefiles_xattr *auxdata;
+ struct cachefiles_cache *cache;
+ struct fscache_cookie *cookie;
+ const struct cred *saved_cred;
+ unsigned auxlen;
+
+ _enter("{OBJ%x}", _object->debug_id);
+
+ object = container_of(_object, struct cachefiles_object, fscache);
+ cache = container_of(object->fscache.cache, struct cachefiles_cache,
+ cache);
+ cookie = object->fscache.cookie;
+
+ if (!cookie->def->get_aux) {
+ _leave(" [no aux]");
+ return;
+ }
+
+ auxdata = kmalloc(2 + 512 + 3, GFP_KERNEL);
+ if (!auxdata) {
+ _leave(" [nomem]");
+ return;
+ }
+
+ auxlen = cookie->def->get_aux(cookie->netfs_data, auxdata->data, 511);
+ ASSERTCMP(auxlen, <, 511);
+
+ auxdata->len = auxlen + 1;
+ auxdata->type = cookie->def->type;
+
+ cachefiles_begin_secure(cache, &saved_cred);
+ cachefiles_update_object_xattr(object, auxdata);
+ cachefiles_end_secure(cache, saved_cred);
+ kfree(auxdata);
+ _leave("");
+}
+
+/*
+ * discard the resources pinned by an object and effect retirement if
+ * requested
+ */
+static void cachefiles_drop_object(struct fscache_object *_object)
+{
+ struct cachefiles_object *object;
+ struct cachefiles_cache *cache;
+ const struct cred *saved_cred;
+
+ ASSERT(_object);
+
+ object = container_of(_object, struct cachefiles_object, fscache);
+
+ _enter("{OBJ%x,%d}",
+ object->fscache.debug_id, atomic_read(&object->usage));
+
+ cache = container_of(object->fscache.cache,
+ struct cachefiles_cache, cache);
+
+#ifdef CACHEFILES_DEBUG_SLAB
+ ASSERT((atomic_read(&object->usage) & 0xffff0000) != 0x6b6b0000);
+#endif
+
+ /* delete retired objects */
+ if (object->fscache.state == FSCACHE_OBJECT_RECYCLING &&
+ _object != cache->cache.fsdef
+ ) {
+ _debug("- retire object OBJ%x", object->fscache.debug_id);
+ cachefiles_begin_secure(cache, &saved_cred);
+ cachefiles_delete_object(cache, object);
+ cachefiles_end_secure(cache, saved_cred);
+ }
+
+ /* close the filesystem stuff attached to the object */
+ if (object->backer != object->dentry)
+ dput(object->backer);
+ object->backer = NULL;
+
+ /* note that the object is now inactive */
+ if (test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags)) {
+ write_lock(&cache->active_lock);
+ if (!test_and_clear_bit(CACHEFILES_OBJECT_ACTIVE,
+ &object->flags))
+ BUG();
+ rb_erase(&object->active_node, &cache->active_nodes);
+ wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
+ write_unlock(&cache->active_lock);
+ }
+
+ dput(object->dentry);
+ object->dentry = NULL;
+
+ _leave("");
+}
+
+/*
+ * dispose of a reference to an object
+ */
+static void cachefiles_put_object(struct fscache_object *_object)
+{
+ struct cachefiles_object *object;
+ struct fscache_cache *cache;
+
+ ASSERT(_object);
+
+ object = container_of(_object, struct cachefiles_object, fscache);
+
+ _enter("{OBJ%x,%d}",
+ object->fscache.debug_id, atomic_read(&object->usage));
+
+#ifdef CACHEFILES_DEBUG_SLAB
+ ASSERT((atomic_read(&object->usage) & 0xffff0000) != 0x6b6b0000);
+#endif
+
+ ASSERTIFCMP(object->fscache.parent,
+ object->fscache.parent->n_children, >, 0);
+
+ if (atomic_dec_and_test(&object->usage)) {
+ _debug("- kill object OBJ%x", object->fscache.debug_id);
+
+ ASSERT(!test_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags));
+ ASSERTCMP(object->fscache.parent, ==, NULL);
+ ASSERTCMP(object->backer, ==, NULL);
+ ASSERTCMP(object->dentry, ==, NULL);
+ ASSERTCMP(object->fscache.n_ops, ==, 0);
+ ASSERTCMP(object->fscache.n_children, ==, 0);
+
+ if (object->lookup_data) {
+ kfree(object->lookup_data->key);
+ kfree(object->lookup_data->auxdata);
+ kfree(object->lookup_data);
+ object->lookup_data = NULL;
+ }
+
+ cache = object->fscache.cache;
+ kmem_cache_free(cachefiles_object_jar, object);
+ fscache_object_destroyed(cache);
+ }
+
+ _leave("");
+}
+
+/*
+ * sync a cache
+ */
+static void cachefiles_sync_cache(struct fscache_cache *_cache)
+{
+ struct cachefiles_cache *cache;
+ const struct cred *saved_cred;
+ int ret;
+
+ _enter("%p", _cache);
+
+ cache = container_of(_cache, struct cachefiles_cache, cache);
+
+ /* make sure all pages pinned by operations on behalf of the netfs are
+ * written to disc */
+ cachefiles_begin_secure(cache, &saved_cred);
+ ret = fsync_super(cache->mnt->mnt_sb);
+ cachefiles_end_secure(cache, saved_cred);
+
+ if (ret == -EIO)
+ cachefiles_io_error(cache,
+ "Attempt to sync backing fs superblock"
+ " returned error %d",
+ ret);
+}
+
+/*
+ * notification the attributes on an object have changed
+ * - called with reads/writes excluded by FS-Cache
+ */
+static int cachefiles_attr_changed(struct fscache_object *_object)
+{
+ struct cachefiles_object *object;
+ struct cachefiles_cache *cache;
+ const struct cred *saved_cred;
+ struct iattr newattrs;
+ uint64_t ni_size;
+ loff_t oi_size;
+ int ret;
+
+ _object->cookie->def->get_attr(_object->cookie->netfs_data, &ni_size);
+
+ _enter("{OBJ%x},[%llu]",
+ _object->debug_id, (unsigned long long) ni_size);
+
+ object = container_of(_object, struct cachefiles_object, fscache);
+ cache = container_of(object->fscache.cache,
+ struct cachefiles_cache, cache);
+
+ if (ni_size == object->i_size)
+ return 0;
+
+ if (!object->backer)
+ return -ENOBUFS;
+
+ ASSERT(S_ISREG(object->backer->d_inode->i_mode));
+
+ fscache_set_store_limit(&object->fscache, ni_size);
+
+ oi_size = i_size_read(object->backer->d_inode);
+ if (oi_size == ni_size)
+ return 0;
+
+ newattrs.ia_size = ni_size;
+ newattrs.ia_valid = ATTR_SIZE;
+
+ cachefiles_begin_secure(cache, &saved_cred);
+ mutex_lock(&object->backer->d_inode->i_mutex);
+ ret = notify_change(object->backer, &newattrs);
+ mutex_unlock(&object->backer->d_inode->i_mutex);
+ cachefiles_end_secure(cache, saved_cred);
+
+ if (ret == -EIO) {
+ fscache_set_store_limit(&object->fscache, 0);
+ cachefiles_io_error_obj(object, "Size set failed");
+ ret = -ENOBUFS;
+ }
+
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * dissociate a cache from all the pages it was backing
+ */
+static void cachefiles_dissociate_pages(struct fscache_cache *cache)
+{
+ _enter("");
+}
+
+const struct fscache_cache_ops cachefiles_cache_ops = {
+ .name = "cachefiles",
+ .alloc_object = cachefiles_alloc_object,
+ .lookup_object = cachefiles_lookup_object,
+ .lookup_complete = cachefiles_lookup_complete,
+ .grab_object = cachefiles_grab_object,
+ .update_object = cachefiles_update_object,
+ .drop_object = cachefiles_drop_object,
+ .put_object = cachefiles_put_object,
+ .sync_cache = cachefiles_sync_cache,
+ .attr_changed = cachefiles_attr_changed,
+ .read_or_alloc_page = cachefiles_read_or_alloc_page,
+ .read_or_alloc_pages = cachefiles_read_or_alloc_pages,
+ .allocate_page = cachefiles_allocate_page,
+ .allocate_pages = cachefiles_allocate_pages,
+ .write_page = cachefiles_write_page,
+ .uncache_page = cachefiles_uncache_page,
+ .dissociate_pages = cachefiles_dissociate_pages,
+};
diff --git a/fs/cachefiles/cf-internal.h b/fs/cachefiles/cf-internal.h
new file mode 100644
index 000000000000..19218e1463d6
--- /dev/null
+++ b/fs/cachefiles/cf-internal.h
@@ -0,0 +1,360 @@
+/* General netfs cache on cache files internal defs
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/fscache-cache.h>
+#include <linux/timer.h>
+#include <linux/wait.h>
+#include <linux/workqueue.h>
+#include <linux/security.h>
+
+struct cachefiles_cache;
+struct cachefiles_object;
+
+extern unsigned cachefiles_debug;
+#define CACHEFILES_DEBUG_KENTER 1
+#define CACHEFILES_DEBUG_KLEAVE 2
+#define CACHEFILES_DEBUG_KDEBUG 4
+
+/*
+ * node records
+ */
+struct cachefiles_object {
+ struct fscache_object fscache; /* fscache handle */
+ struct cachefiles_lookup_data *lookup_data; /* cached lookup data */
+ struct dentry *dentry; /* the file/dir representing this object */
+ struct dentry *backer; /* backing file */
+ loff_t i_size; /* object size */
+ unsigned long flags;
+#define CACHEFILES_OBJECT_ACTIVE 0 /* T if marked active */
+ atomic_t usage; /* object usage count */
+ uint8_t type; /* object type */
+ uint8_t new; /* T if object new */
+ spinlock_t work_lock;
+ struct rb_node active_node; /* link in active tree (dentry is key) */
+};
+
+extern struct kmem_cache *cachefiles_object_jar;
+
+/*
+ * Cache files cache definition
+ */
+struct cachefiles_cache {
+ struct fscache_cache cache; /* FS-Cache record */
+ struct vfsmount *mnt; /* mountpoint holding the cache */
+ struct dentry *graveyard; /* directory into which dead objects go */
+ struct file *cachefilesd; /* manager daemon handle */
+ const struct cred *cache_cred; /* security override for accessing cache */
+ struct mutex daemon_mutex; /* command serialisation mutex */
+ wait_queue_head_t daemon_pollwq; /* poll waitqueue for daemon */
+ struct rb_root active_nodes; /* active nodes (can't be culled) */
+ rwlock_t active_lock; /* lock for active_nodes */
+ atomic_t gravecounter; /* graveyard uniquifier */
+ unsigned frun_percent; /* when to stop culling (% files) */
+ unsigned fcull_percent; /* when to start culling (% files) */
+ unsigned fstop_percent; /* when to stop allocating (% files) */
+ unsigned brun_percent; /* when to stop culling (% blocks) */
+ unsigned bcull_percent; /* when to start culling (% blocks) */
+ unsigned bstop_percent; /* when to stop allocating (% blocks) */
+ unsigned bsize; /* cache's block size */
+ unsigned bshift; /* min(ilog2(PAGE_SIZE / bsize), 0) */
+ uint64_t frun; /* when to stop culling */
+ uint64_t fcull; /* when to start culling */
+ uint64_t fstop; /* when to stop allocating */
+ sector_t brun; /* when to stop culling */
+ sector_t bcull; /* when to start culling */
+ sector_t bstop; /* when to stop allocating */
+ unsigned long flags;
+#define CACHEFILES_READY 0 /* T if cache prepared */
+#define CACHEFILES_DEAD 1 /* T if cache dead */
+#define CACHEFILES_CULLING 2 /* T if cull engaged */
+#define CACHEFILES_STATE_CHANGED 3 /* T if state changed (poll trigger) */
+ char *rootdirname; /* name of cache root directory */
+ char *secctx; /* LSM security context */
+ char *tag; /* cache binding tag */
+};
+
+/*
+ * backing file read tracking
+ */
+struct cachefiles_one_read {
+ wait_queue_t monitor; /* link into monitored waitqueue */
+ struct page *back_page; /* backing file page we're waiting for */
+ struct page *netfs_page; /* netfs page we're going to fill */
+ struct fscache_retrieval *op; /* retrieval op covering this */
+ struct list_head op_link; /* link in op's todo list */
+};
+
+/*
+ * backing file write tracking
+ */
+struct cachefiles_one_write {
+ struct page *netfs_page; /* netfs page to copy */
+ struct cachefiles_object *object;
+ struct list_head obj_link; /* link in object's lists */
+ fscache_rw_complete_t end_io_func;
+ void *context;
+};
+
+/*
+ * auxiliary data xattr buffer
+ */
+struct cachefiles_xattr {
+ uint16_t len;
+ uint8_t type;
+ uint8_t data[];
+};
+
+/*
+ * note change of state for daemon
+ */
+static inline void cachefiles_state_changed(struct cachefiles_cache *cache)
+{
+ set_bit(CACHEFILES_STATE_CHANGED, &cache->flags);
+ wake_up_all(&cache->daemon_pollwq);
+}
+
+/*
+ * cf-bind.c
+ */
+extern int cachefiles_daemon_bind(struct cachefiles_cache *cache, char *args);
+extern void cachefiles_daemon_unbind(struct cachefiles_cache *cache);
+
+/*
+ * cf-daemon.c
+ */
+extern const struct file_operations cachefiles_daemon_fops;
+
+extern int cachefiles_has_space(struct cachefiles_cache *cache,
+ unsigned fnr, unsigned bnr);
+
+/*
+ * cf-interface.c
+ */
+extern const struct fscache_cache_ops cachefiles_cache_ops;
+
+/*
+ * cf-key.c
+ */
+extern char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type);
+
+/*
+ * cf-namei.c
+ */
+extern int cachefiles_delete_object(struct cachefiles_cache *cache,
+ struct cachefiles_object *object);
+extern int cachefiles_walk_to_object(struct cachefiles_object *parent,
+ struct cachefiles_object *object,
+ const char *key,
+ struct cachefiles_xattr *auxdata);
+extern struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
+ struct dentry *dir,
+ const char *name);
+
+extern int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
+ char *filename);
+
+extern int cachefiles_check_in_use(struct cachefiles_cache *cache,
+ struct dentry *dir, char *filename);
+
+/*
+ * cf-proc.c
+ */
+#ifdef CONFIG_CACHEFILES_HISTOGRAM
+extern atomic_t cachefiles_lookup_histogram[HZ];
+extern atomic_t cachefiles_mkdir_histogram[HZ];
+extern atomic_t cachefiles_create_histogram[HZ];
+
+extern int __init cachefiles_proc_init(void);
+extern void cachefiles_proc_cleanup(void);
+static inline
+void cachefiles_hist(atomic_t histogram[], unsigned long start_jif)
+{
+ unsigned long jif = jiffies - start_jif;
+ if (jif >= HZ)
+ jif = HZ - 1;
+ atomic_inc(&histogram[jif]);
+}
+
+#else
+#define cachefiles_proc_init() (0)
+#define cachefiles_proc_cleanup() do {} while (0)
+#define cachefiles_hist(hist, start_jif) do {} while (0)
+#endif
+
+/*
+ * cf-rdwr.c
+ */
+extern int cachefiles_read_or_alloc_page(struct fscache_retrieval *,
+ struct page *, gfp_t);
+extern int cachefiles_read_or_alloc_pages(struct fscache_retrieval *,
+ struct list_head *, unsigned *,
+ gfp_t);
+extern int cachefiles_allocate_page(struct fscache_retrieval *, struct page *,
+ gfp_t);
+extern int cachefiles_allocate_pages(struct fscache_retrieval *,
+ struct list_head *, unsigned *, gfp_t);
+extern int cachefiles_write_page(struct fscache_storage *, struct page *);
+extern void cachefiles_uncache_page(struct fscache_object *, struct page *);
+
+/*
+ * cf-security.c
+ */
+extern int cachefiles_get_security_ID(struct cachefiles_cache *cache);
+extern int cachefiles_determine_cache_security(struct cachefiles_cache *cache,
+ struct dentry *root,
+ const struct cred **_saved_cred);
+
+static inline void cachefiles_begin_secure(struct cachefiles_cache *cache,
+ const struct cred **_saved_cred)
+{
+ *_saved_cred = override_creds(cache->cache_cred);
+}
+
+static inline void cachefiles_end_secure(struct cachefiles_cache *cache,
+ const struct cred *saved_cred)
+{
+ revert_creds(saved_cred);
+}
+
+/*
+ * cf-xattr.c
+ */
+extern int cachefiles_check_object_type(struct cachefiles_object *object);
+extern int cachefiles_set_object_xattr(struct cachefiles_object *object,
+ struct cachefiles_xattr *auxdata);
+extern int cachefiles_update_object_xattr(struct cachefiles_object *object,
+ struct cachefiles_xattr *auxdata);
+extern int cachefiles_check_object_xattr(struct cachefiles_object *object,
+ struct cachefiles_xattr *auxdata);
+extern int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
+ struct dentry *dentry);
+
+
+/*
+ * error handling
+ */
+#define kerror(FMT, ...) printk(KERN_ERR "CacheFiles: "FMT"\n", ##__VA_ARGS__)
+
+#define cachefiles_io_error(___cache, FMT, ...) \
+do { \
+ kerror("I/O Error: " FMT, ##__VA_ARGS__); \
+ fscache_io_error(&(___cache)->cache); \
+ set_bit(CACHEFILES_DEAD, &(___cache)->flags); \
+} while (0)
+
+#define cachefiles_io_error_obj(object, FMT, ...) \
+do { \
+ struct cachefiles_cache *___cache; \
+ \
+ ___cache = container_of((object)->fscache.cache, \
+ struct cachefiles_cache, cache); \
+ cachefiles_io_error(___cache, FMT, ##__VA_ARGS__); \
+} while (0)
+
+
+/*
+ * debug tracing
+ */
+#define dbgprintk(FMT, ...) \
+ printk(KERN_DEBUG "[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
+
+/* make sure we maintain the format strings, even when debugging is disabled */
+static inline void _dbprintk(const char *fmt, ...)
+ __attribute__((format(printf, 1, 2)));
+static inline void _dbprintk(const char *fmt, ...)
+{
+}
+
+#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
+
+
+#if defined(__KDEBUG)
+#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
+#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
+#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
+
+#elif defined(CONFIG_CACHEFILES_DEBUG)
+#define _enter(FMT, ...) \
+do { \
+ if (cachefiles_debug & CACHEFILES_DEBUG_KENTER) \
+ kenter(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#define _leave(FMT, ...) \
+do { \
+ if (cachefiles_debug & CACHEFILES_DEBUG_KLEAVE) \
+ kleave(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#define _debug(FMT, ...) \
+do { \
+ if (cachefiles_debug & CACHEFILES_DEBUG_KDEBUG) \
+ kdebug(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#else
+#define _enter(FMT, ...) _dbprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
+#define _leave(FMT, ...) _dbprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
+#define _debug(FMT, ...) _dbprintk(FMT, ##__VA_ARGS__)
+#endif
+
+#if 1 /* defined(__KDEBUGALL) */
+
+#define ASSERT(X) \
+do { \
+ if (unlikely(!(X))) { \
+ printk(KERN_ERR "\n"); \
+ printk(KERN_ERR "CacheFiles: Assertion failed\n"); \
+ BUG(); \
+ } \
+} while (0)
+
+#define ASSERTCMP(X, OP, Y) \
+do { \
+ if (unlikely(!((X) OP (Y)))) { \
+ printk(KERN_ERR "\n"); \
+ printk(KERN_ERR "CacheFiles: Assertion failed\n"); \
+ printk(KERN_ERR "%lx " #OP " %lx is false\n", \
+ (unsigned long)(X), (unsigned long)(Y)); \
+ BUG(); \
+ } \
+} while (0)
+
+#define ASSERTIF(C, X) \
+do { \
+ if (unlikely((C) && !(X))) { \
+ printk(KERN_ERR "\n"); \
+ printk(KERN_ERR "CacheFiles: Assertion failed\n"); \
+ BUG(); \
+ } \
+} while (0)
+
+#define ASSERTIFCMP(C, X, OP, Y) \
+do { \
+ if (unlikely((C) && !((X) OP (Y)))) { \
+ printk(KERN_ERR "\n"); \
+ printk(KERN_ERR "CacheFiles: Assertion failed\n"); \
+ printk(KERN_ERR "%lx " #OP " %lx is false\n", \
+ (unsigned long)(X), (unsigned long)(Y)); \
+ BUG(); \
+ } \
+} while (0)
+
+#else
+
+#define ASSERT(X) do {} while (0)
+#define ASSERTCMP(X, OP, Y) do {} while (0)
+#define ASSERTIF(C, X) do {} while (0)
+#define ASSERTIFCMP(C, X, OP, Y) do {} while (0)
+
+#endif
diff --git a/fs/cachefiles/cf-key.c b/fs/cachefiles/cf-key.c
new file mode 100644
index 000000000000..f3fa75f475d6
--- /dev/null
+++ b/fs/cachefiles/cf-key.c
@@ -0,0 +1,159 @@
+/* Key to pathname encoder
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/slab.h>
+#include "cf-internal.h"
+
+static const char cachefiles_charmap[64] =
+ "0123456789" /* 0 - 9 */
+ "abcdefghijklmnopqrstuvwxyz" /* 10 - 35 */
+ "ABCDEFGHIJKLMNOPQRSTUVWXYZ" /* 36 - 61 */
+ "_-" /* 62 - 63 */
+ ;
+
+static const char cachefiles_filecharmap[256] = {
+ /* we skip space and tab and control chars */
+ [33 ... 46] = 1, /* '!' -> '.' */
+ /* we skip '/' as it's significant to pathwalk */
+ [48 ... 127] = 1, /* '0' -> '~' */
+};
+
+/*
+ * turn the raw key into something cooked
+ * - the raw key should include the length in the two bytes at the front
+ * - the key may be up to 514 bytes in length (including the length word)
+ * - "base64" encode the strange keys, mapping 3 bytes of raw to four of
+ * cooked
+ * - need to cut the cooked key into 252 char lengths (189 raw bytes)
+ */
+char *cachefiles_cook_key(const u8 *raw, int keylen, uint8_t type)
+{
+ unsigned char csum, ch;
+ unsigned int acc;
+ char *key;
+ int loop, len, max, seg, mark, print;
+
+ _enter(",%d", keylen);
+
+ BUG_ON(keylen < 2 || keylen > 514);
+
+ csum = raw[0] + raw[1];
+ print = 1;
+ for (loop = 2; loop < keylen; loop++) {
+ ch = raw[loop];
+ csum += ch;
+ print &= cachefiles_filecharmap[ch];
+ }
+
+ if (print) {
+ /* if the path is usable ASCII, then we render it directly */
+ max = keylen - 2;
+ max += 2; /* two base64'd length chars on the front */
+ max += 5; /* @checksum/M */
+ max += 3 * 2; /* maximum number of segment dividers (".../M")
+ * is ((514 + 251) / 252) = 3
+ */
+ max += 1; /* NUL on end */
+ } else {
+ /* calculate the maximum length of the cooked key */
+ keylen = (keylen + 2) / 3;
+
+ max = keylen * 4;
+ max += 5; /* @checksum/M */
+ max += 3 * 2; /* maximum number of segment dividers (".../M")
+ * is ((514 + 188) / 189) = 3
+ */
+ max += 1; /* NUL on end */
+ }
+
+ max += 1; /* 2nd NUL on end */
+
+ _debug("max: %d", max);
+
+ key = kmalloc(max, GFP_KERNEL);
+ if (!key)
+ return NULL;
+
+ len = 0;
+
+ /* build the cooked key */
+ sprintf(key, "@%02x%c+", (unsigned) csum, 0);
+ len = 5;
+ mark = len - 1;
+
+ if (print) {
+ acc = *(uint16_t *) raw;
+ raw += 2;
+
+ key[len + 1] = cachefiles_charmap[acc & 63];
+ acc >>= 6;
+ key[len] = cachefiles_charmap[acc & 63];
+ len += 2;
+
+ seg = 250;
+ for (loop = keylen; loop > 0; loop--) {
+ if (seg <= 0) {
+ key[len++] = '\0';
+ mark = len;
+ key[len++] = '+';
+ seg = 252;
+ }
+
+ key[len++] = *raw++;
+ ASSERT(len < max);
+ }
+
+ switch (type) {
+ case FSCACHE_COOKIE_TYPE_INDEX: type = 'I'; break;
+ case FSCACHE_COOKIE_TYPE_DATAFILE: type = 'D'; break;
+ default: type = 'S'; break;
+ }
+ } else {
+ seg = 252;
+ for (loop = keylen; loop > 0; loop--) {
+ if (seg <= 0) {
+ key[len++] = '\0';
+ mark = len;
+ key[len++] = '+';
+ seg = 252;
+ }
+
+ acc = *raw++;
+ acc |= *raw++ << 8;
+ acc |= *raw++ << 16;
+
+ _debug("acc: %06x", acc);
+
+ key[len++] = cachefiles_charmap[acc & 63];
+ acc >>= 6;
+ key[len++] = cachefiles_charmap[acc & 63];
+ acc >>= 6;
+ key[len++] = cachefiles_charmap[acc & 63];
+ acc >>= 6;
+ key[len++] = cachefiles_charmap[acc & 63];
+
+ ASSERT(len < max);
+ }
+
+ switch (type) {
+ case FSCACHE_COOKIE_TYPE_INDEX: type = 'J'; break;
+ case FSCACHE_COOKIE_TYPE_DATAFILE: type = 'E'; break;
+ default: type = 'T'; break;
+ }
+ }
+
+ key[mark] = type;
+ key[len++] = 0;
+ key[len] = 0;
+
+ _leave(" = %p %d", key, len);
+ return key;
+}
diff --git a/fs/cachefiles/cf-main.c b/fs/cachefiles/cf-main.c
new file mode 100644
index 000000000000..87c5676ad636
--- /dev/null
+++ b/fs/cachefiles/cf-main.c
@@ -0,0 +1,106 @@
+/* Network filesystem caching backend to use cache files on a premounted
+ * filesystem
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/namei.h>
+#include <linux/mount.h>
+#include <linux/statfs.h>
+#include <linux/sysctl.h>
+#include <linux/miscdevice.h>
+#include "cf-internal.h"
+
+unsigned cachefiles_debug;
+module_param_named(debug, cachefiles_debug, uint, S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(cachefiles_debug, "CacheFiles debugging mask");
+
+MODULE_DESCRIPTION("Mounted-filesystem based cache");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+struct kmem_cache *cachefiles_object_jar;
+
+static struct miscdevice cachefiles_dev = {
+ .minor = MISC_DYNAMIC_MINOR,
+ .name = "cachefiles",
+ .fops = &cachefiles_daemon_fops,
+};
+
+static void cachefiles_object_init_once(void *_object)
+{
+ struct cachefiles_object *object = _object;
+
+ memset(object, 0, sizeof(*object));
+ spin_lock_init(&object->work_lock);
+}
+
+/*
+ * initialise the fs caching module
+ */
+static int __init cachefiles_init(void)
+{
+ int ret;
+
+ ret = misc_register(&cachefiles_dev);
+ if (ret < 0)
+ goto error_dev;
+
+ /* create an object jar */
+ ret = -ENOMEM;
+ cachefiles_object_jar =
+ kmem_cache_create("cachefiles_object_jar",
+ sizeof(struct cachefiles_object),
+ 0,
+ SLAB_HWCACHE_ALIGN,
+ cachefiles_object_init_once);
+ if (!cachefiles_object_jar) {
+ printk(KERN_NOTICE
+ "CacheFiles: Failed to allocate an object jar\n");
+ goto error_object_jar;
+ }
+
+ ret = cachefiles_proc_init();
+ if (ret < 0)
+ goto error_proc;
+
+ printk(KERN_INFO "CacheFiles: Loaded\n");
+ return 0;
+
+error_proc:
+ kmem_cache_destroy(cachefiles_object_jar);
+error_object_jar:
+ misc_deregister(&cachefiles_dev);
+error_dev:
+ kerror("failed to register: %d", ret);
+ return ret;
+}
+
+fs_initcall(cachefiles_init);
+
+/*
+ * clean up on module removal
+ */
+static void __exit cachefiles_exit(void)
+{
+ printk(KERN_INFO "CacheFiles: Unloading\n");
+
+ cachefiles_proc_cleanup();
+ kmem_cache_destroy(cachefiles_object_jar);
+ misc_deregister(&cachefiles_dev);
+}
+
+module_exit(cachefiles_exit);
diff --git a/fs/cachefiles/cf-namei.c b/fs/cachefiles/cf-namei.c
new file mode 100644
index 000000000000..70afdce26d07
--- /dev/null
+++ b/fs/cachefiles/cf-namei.c
@@ -0,0 +1,772 @@
+/* CacheFiles path walking and related routines
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/fsnotify.h>
+#include <linux/quotaops.h>
+#include <linux/xattr.h>
+#include <linux/mount.h>
+#include <linux/namei.h>
+#include <linux/security.h>
+#include "cf-internal.h"
+
+static int cachefiles_wait_bit(void *flags)
+{
+ schedule();
+ return 0;
+}
+
+/*
+ * record the fact that an object is now active
+ */
+static void cachefiles_mark_object_active(struct cachefiles_cache *cache,
+ struct cachefiles_object *object)
+{
+ struct cachefiles_object *xobject;
+ struct rb_node **_p, *_parent = NULL;
+ struct dentry *dentry;
+
+ _enter(",%p", object);
+
+try_again:
+ write_lock(&cache->active_lock);
+
+ if (test_and_set_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags))
+ BUG();
+
+ dentry = object->dentry;
+ _p = &cache->active_nodes.rb_node;
+ while (*_p) {
+ _parent = *_p;
+ xobject = rb_entry(_parent,
+ struct cachefiles_object, active_node);
+
+ ASSERT(xobject != object);
+
+ if (xobject->dentry > dentry)
+ _p = &(*_p)->rb_left;
+ else if (xobject->dentry < dentry)
+ _p = &(*_p)->rb_right;
+ else
+ goto wait_for_old_object;
+ }
+
+ rb_link_node(&object->active_node, _parent, _p);
+ rb_insert_color(&object->active_node, &cache->active_nodes);
+
+ write_unlock(&cache->active_lock);
+ _leave("");
+ return;
+
+ /* an old object from a previous incarnation is hogging the slot - we
+ * need to wait for it to be destroyed */
+wait_for_old_object:
+ if (xobject->fscache.state < FSCACHE_OBJECT_DYING) {
+ printk(KERN_ERR "\n");
+ printk(KERN_ERR "CacheFiles: Error:"
+ " Unexpected object collision\n");
+ printk(KERN_ERR "xobject: OBJ%x\n",
+ xobject->fscache.debug_id);
+ printk(KERN_ERR "xobjstate=%s\n",
+ fscache_object_states[xobject->fscache.state]);
+ printk(KERN_ERR "xobjflags=%lx\n", xobject->fscache.flags);
+ printk(KERN_ERR "xobjevent=%lx [%lx]\n",
+ xobject->fscache.events, xobject->fscache.event_mask);
+ printk(KERN_ERR "xops=%u inp=%u exc=%u\n",
+ xobject->fscache.n_ops, xobject->fscache.n_in_progress,
+ xobject->fscache.n_exclusive);
+ printk(KERN_ERR "xcookie=%p [pr=%p nd=%p fl=%lx]\n",
+ xobject->fscache.cookie,
+ xobject->fscache.cookie->parent,
+ xobject->fscache.cookie->netfs_data,
+ xobject->fscache.cookie->flags);
+ printk(KERN_ERR "xparent=%p\n",
+ xobject->fscache.parent);
+ printk(KERN_ERR "object: OBJ%x\n",
+ object->fscache.debug_id);
+ printk(KERN_ERR "cookie=%p [pr=%p nd=%p fl=%lx]\n",
+ object->fscache.cookie,
+ object->fscache.cookie->parent,
+ object->fscache.cookie->netfs_data,
+ object->fscache.cookie->flags);
+ printk(KERN_ERR "parent=%p\n",
+ object->fscache.parent);
+ BUG();
+ }
+ atomic_inc(&xobject->usage);
+ write_unlock(&cache->active_lock);
+
+ _debug(">>> wait");
+ wait_on_bit(&xobject->flags, CACHEFILES_OBJECT_ACTIVE,
+ cachefiles_wait_bit, TASK_UNINTERRUPTIBLE);
+ _debug("<<< waited");
+
+ cache->cache.ops->put_object(&xobject->fscache);
+ goto try_again;
+}
+
+/*
+ * delete an object representation from the cache
+ * - file backed objects are unlinked
+ * - directory backed objects are stuffed into the graveyard for userspace to
+ * delete
+ * - unlocks the directory mutex
+ */
+static int cachefiles_bury_object(struct cachefiles_cache *cache,
+ struct dentry *dir,
+ struct dentry *rep)
+{
+ struct dentry *grave, *trap;
+ char nbuffer[8 + 8 + 1];
+ int ret;
+
+ _enter(",'%*.*s','%*.*s'",
+ dir->d_name.len, dir->d_name.len, dir->d_name.name,
+ rep->d_name.len, rep->d_name.len, rep->d_name.name);
+
+ /* non-directories can just be unlinked */
+ if (!S_ISDIR(rep->d_inode->i_mode)) {
+ _debug("unlink stale object");
+ ret = vfs_unlink(dir->d_inode, rep);
+
+ mutex_unlock(&dir->d_inode->i_mutex);
+
+ if (ret == -EIO)
+ cachefiles_io_error(cache, "Unlink failed");
+
+ _leave(" = %d", ret);
+ return ret;
+ }
+
+ /* directories have to be moved to the graveyard */
+ _debug("move stale object to graveyard");
+ mutex_unlock(&dir->d_inode->i_mutex);
+
+try_again:
+ /* first step is to make up a grave dentry in the graveyard */
+ sprintf(nbuffer, "%08x%08x",
+ (uint32_t) get_seconds(),
+ (uint32_t) atomic_inc_return(&cache->gravecounter));
+
+ /* do the multiway lock magic */
+ trap = lock_rename(cache->graveyard, dir);
+
+ /* do some checks before getting the grave dentry */
+ if (rep->d_parent != dir) {
+ /* the entry was probably culled when we dropped the parent dir
+ * lock */
+ unlock_rename(cache->graveyard, dir);
+ _leave(" = 0 [culled?]");
+ return 0;
+ }
+
+ if (!S_ISDIR(cache->graveyard->d_inode->i_mode)) {
+ unlock_rename(cache->graveyard, dir);
+ cachefiles_io_error(cache, "Graveyard no longer a directory");
+ return -EIO;
+ }
+
+ if (trap == rep) {
+ unlock_rename(cache->graveyard, dir);
+ cachefiles_io_error(cache, "May not make directory loop");
+ return -EIO;
+ }
+
+ if (d_mountpoint(rep)) {
+ unlock_rename(cache->graveyard, dir);
+ cachefiles_io_error(cache, "Mountpoint in cache");
+ return -EIO;
+ }
+
+ grave = lookup_one_len(nbuffer, cache->graveyard, strlen(nbuffer));
+ if (IS_ERR(grave)) {
+ unlock_rename(cache->graveyard, dir);
+
+ if (PTR_ERR(grave) == -ENOMEM) {
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+
+ cachefiles_io_error(cache, "Lookup error %ld",
+ PTR_ERR(grave));
+ return -EIO;
+ }
+
+ if (grave->d_inode) {
+ unlock_rename(cache->graveyard, dir);
+ dput(grave);
+ grave = NULL;
+ cond_resched();
+ goto try_again;
+ }
+
+ if (d_mountpoint(grave)) {
+ unlock_rename(cache->graveyard, dir);
+ dput(grave);
+ cachefiles_io_error(cache, "Mountpoint in graveyard");
+ return -EIO;
+ }
+
+ /* target should not be an ancestor of source */
+ if (trap == grave) {
+ unlock_rename(cache->graveyard, dir);
+ dput(grave);
+ cachefiles_io_error(cache, "May not make directory loop");
+ return -EIO;
+ }
+
+ /* attempt the rename */
+ ret = vfs_rename(dir->d_inode, rep, cache->graveyard->d_inode, grave);
+ if (ret != 0 && ret != -ENOMEM)
+ cachefiles_io_error(cache, "Rename failed with error %d", ret);
+
+ unlock_rename(cache->graveyard, dir);
+ dput(grave);
+ _leave(" = 0");
+ return 0;
+}
+
+/*
+ * delete an object representation from the cache
+ */
+int cachefiles_delete_object(struct cachefiles_cache *cache,
+ struct cachefiles_object *object)
+{
+ struct dentry *dir;
+ int ret;
+
+ _enter(",{%p}", object->dentry);
+
+ ASSERT(object->dentry);
+ ASSERT(object->dentry->d_inode);
+ ASSERT(object->dentry->d_parent);
+
+ dir = dget_parent(object->dentry);
+
+ mutex_lock(&dir->d_inode->i_mutex);
+ ret = cachefiles_bury_object(cache, dir, object->dentry);
+
+ dput(dir);
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * walk from the parent object to the child object through the backing
+ * filesystem, creating directories as we go
+ */
+int cachefiles_walk_to_object(struct cachefiles_object *parent,
+ struct cachefiles_object *object,
+ const char *key,
+ struct cachefiles_xattr *auxdata)
+{
+ struct cachefiles_cache *cache;
+ struct dentry *dir, *next = NULL;
+ unsigned long start;
+ const char *name;
+ int ret, nlen;
+
+ _enter("{%p},,%s,", parent->dentry, key);
+
+ cache = container_of(parent->fscache.cache,
+ struct cachefiles_cache, cache);
+
+ ASSERT(parent->dentry);
+ ASSERT(parent->dentry->d_inode);
+
+ if (!(S_ISDIR(parent->dentry->d_inode->i_mode))) {
+ // TODO: convert file to dir
+ _leave("looking up in none directory");
+ return -ENOBUFS;
+ }
+
+ dir = dget(parent->dentry);
+
+advance:
+ /* attempt to transit the first directory component */
+ name = key;
+ nlen = strlen(key);
+
+ /* key ends in a double NUL */
+ key = key + nlen + 1;
+ if (!*key)
+ key = NULL;
+
+lookup_again:
+ /* search the current directory for the element name */
+ _debug("lookup '%s'", name);
+
+ mutex_lock(&dir->d_inode->i_mutex);
+
+ start = jiffies;
+ next = lookup_one_len(name, dir, nlen);
+ cachefiles_hist(cachefiles_lookup_histogram, start);
+ if (IS_ERR(next))
+ goto lookup_error;
+
+ _debug("next -> %p %s", next, next->d_inode ? "positive" : "negative");
+
+ if (!key)
+ object->new = !next->d_inode;
+
+ /* if this element of the path doesn't exist, then the lookup phase
+ * failed, and we can release any readers in the certain knowledge that
+ * there's nothing for them to actually read */
+ if (!next->d_inode)
+ fscache_object_lookup_negative(&object->fscache);
+
+ /* we need to create the object if it's negative */
+ if (key || object->type == FSCACHE_COOKIE_TYPE_INDEX) {
+ /* index objects and intervening tree levels must be subdirs */
+ if (!next->d_inode) {
+ ret = cachefiles_has_space(cache, 1, 0);
+ if (ret < 0)
+ goto create_error;
+
+ start = jiffies;
+ ret = vfs_mkdir(dir->d_inode, next, 0);
+ cachefiles_hist(cachefiles_mkdir_histogram, start);
+ if (ret < 0)
+ goto create_error;
+
+ ASSERT(next->d_inode);
+
+ _debug("mkdir -> %p{%p{ino=%lu}}",
+ next, next->d_inode, next->d_inode->i_ino);
+
+ } else if (!S_ISDIR(next->d_inode->i_mode)) {
+ kerror("inode %lu is not a directory",
+ next->d_inode->i_ino);
+ ret = -ENOBUFS;
+ goto error;
+ }
+
+ } else {
+ /* non-index objects start out life as files */
+ if (!next->d_inode) {
+ ret = cachefiles_has_space(cache, 1, 0);
+ if (ret < 0)
+ goto create_error;
+
+ start = jiffies;
+ ret = vfs_create(dir->d_inode, next, S_IFREG, NULL);
+ cachefiles_hist(cachefiles_create_histogram, start);
+ if (ret < 0)
+ goto create_error;
+
+ ASSERT(next->d_inode);
+
+ _debug("create -> %p{%p{ino=%lu}}",
+ next, next->d_inode, next->d_inode->i_ino);
+
+ } else if (!S_ISDIR(next->d_inode->i_mode) &&
+ !S_ISREG(next->d_inode->i_mode)
+ ) {
+ kerror("inode %lu is not a file or directory",
+ next->d_inode->i_ino);
+ ret = -ENOBUFS;
+ goto error;
+ }
+ }
+
+ /* process the next component */
+ if (key) {
+ _debug("advance");
+ mutex_unlock(&dir->d_inode->i_mutex);
+ dput(dir);
+ dir = next;
+ next = NULL;
+ goto advance;
+ }
+
+ /* we've found the object we were looking for */
+ object->dentry = next;
+
+ /* if we've found that the terminal object exists, then we need to
+ * check its attributes and delete it if it's out of date */
+ if (!object->new) {
+ _debug("validate '%*.*s'",
+ next->d_name.len, next->d_name.len, next->d_name.name);
+
+ ret = cachefiles_check_object_xattr(object, auxdata);
+ if (ret == -ESTALE) {
+ /* delete the object (the deleter drops the directory
+ * mutex) */
+ object->dentry = NULL;
+
+ ret = cachefiles_bury_object(cache, dir, next);
+ dput(next);
+ next = NULL;
+
+ if (ret < 0)
+ goto delete_error;
+
+ _debug("redo lookup");
+ goto lookup_again;
+ }
+ }
+
+ /* note that we're now using this object */
+ cachefiles_mark_object_active(cache, object);
+
+ mutex_unlock(&dir->d_inode->i_mutex);
+ dput(dir);
+ dir = NULL;
+
+ _debug("=== OBTAINED_OBJECT ===");
+
+ if (object->new) {
+ /* attach data to a newly constructed terminal object */
+ ret = cachefiles_set_object_xattr(object, auxdata);
+ if (ret < 0)
+ goto check_error;
+ } else {
+ /* always update the atime on an object we've just looked up
+ * (this is used to keep track of culling, and atimes are only
+ * updated by read, write and readdir but not lookup or
+ * open) */
+ touch_atime(cache->mnt, next);
+ }
+
+ /* open a file interface onto a data file */
+ if (object->type != FSCACHE_COOKIE_TYPE_INDEX) {
+ if (S_ISREG(object->dentry->d_inode->i_mode)) {
+ const struct address_space_operations *aops;
+
+ ret = -EPERM;
+ aops = object->dentry->d_inode->i_mapping->a_ops;
+ if (!aops->bmap ||
+ !aops->write_one_page)
+ goto check_error;
+
+ object->backer = object->dentry;
+ } else {
+ BUG(); // TODO: open file in data-class subdir
+ }
+ }
+
+ object->new = 0;
+ fscache_obtained_object(&object->fscache);
+
+ _leave(" = 0 [%lu]", object->dentry->d_inode->i_ino);
+ return 0;
+
+create_error:
+ _debug("create error %d", ret);
+ if (ret == -EIO)
+ cachefiles_io_error(cache, "Create/mkdir failed");
+ goto error;
+
+check_error:
+ _debug("check error %d", ret);
+ write_lock(&cache->active_lock);
+ rb_erase(&object->active_node, &cache->active_nodes);
+ clear_bit(CACHEFILES_OBJECT_ACTIVE, &object->flags);
+ wake_up_bit(&object->flags, CACHEFILES_OBJECT_ACTIVE);
+ write_unlock(&cache->active_lock);
+
+ dput(object->dentry);
+ object->dentry = NULL;
+ goto error_out;
+
+delete_error:
+ _debug("delete error %d", ret);
+ goto error_out2;
+
+lookup_error:
+ _debug("lookup error %ld", PTR_ERR(next));
+ ret = PTR_ERR(next);
+ if (ret == -EIO)
+ cachefiles_io_error(cache, "Lookup failed");
+ next = NULL;
+error:
+ mutex_unlock(&dir->d_inode->i_mutex);
+ dput(next);
+error_out2:
+ dput(dir);
+error_out:
+ if (ret == -ENOSPC)
+ ret = -ENOBUFS;
+
+ _leave(" = error %d", -ret);
+ return ret;
+}
+
+/*
+ * get a subdirectory
+ */
+struct dentry *cachefiles_get_directory(struct cachefiles_cache *cache,
+ struct dentry *dir,
+ const char *dirname)
+{
+ struct dentry *subdir;
+ unsigned long start;
+ int ret;
+
+ _enter(",,%s", dirname);
+
+ /* search the current directory for the element name */
+ mutex_lock(&dir->d_inode->i_mutex);
+
+ start = jiffies;
+ subdir = lookup_one_len(dirname, dir, strlen(dirname));
+ cachefiles_hist(cachefiles_lookup_histogram, start);
+ if (IS_ERR(subdir)) {
+ if (PTR_ERR(subdir) == -ENOMEM)
+ goto nomem_d_alloc;
+ goto lookup_error;
+ }
+
+ _debug("subdir -> %p %s",
+ subdir, subdir->d_inode ? "positive" : "negative");
+
+ /* we need to create the subdir if it doesn't exist yet */
+ if (!subdir->d_inode) {
+ ret = cachefiles_has_space(cache, 1, 0);
+ if (ret < 0)
+ goto mkdir_error;
+
+ _debug("attempt mkdir");
+
+ ret = vfs_mkdir(dir->d_inode, subdir, 0700);
+ if (ret < 0)
+ goto mkdir_error;
+
+ ASSERT(subdir->d_inode);
+
+ _debug("mkdir -> %p{%p{ino=%lu}}",
+ subdir,
+ subdir->d_inode,
+ subdir->d_inode->i_ino);
+ }
+
+ mutex_unlock(&dir->d_inode->i_mutex);
+
+ /* we need to make sure the subdir is a directory */
+ ASSERT(subdir->d_inode);
+
+ if (!S_ISDIR(subdir->d_inode->i_mode)) {
+ kerror("%s is not a directory", dirname);
+ ret = -EIO;
+ goto check_error;
+ }
+
+ ret = -EPERM;
+ if (!subdir->d_inode->i_op ||
+ !subdir->d_inode->i_op->setxattr ||
+ !subdir->d_inode->i_op->getxattr ||
+ !subdir->d_inode->i_op->lookup ||
+ !subdir->d_inode->i_op->mkdir ||
+ !subdir->d_inode->i_op->create ||
+ !subdir->d_inode->i_op->rename ||
+ !subdir->d_inode->i_op->rmdir ||
+ !subdir->d_inode->i_op->unlink)
+ goto check_error;
+
+ _leave(" = [%lu]", subdir->d_inode->i_ino);
+ return subdir;
+
+check_error:
+ dput(subdir);
+ _leave(" = %d [check]", ret);
+ return ERR_PTR(ret);
+
+mkdir_error:
+ mutex_unlock(&dir->d_inode->i_mutex);
+ dput(subdir);
+ kerror("mkdir %s failed with error %d", dirname, ret);
+ return ERR_PTR(ret);
+
+lookup_error:
+ mutex_unlock(&dir->d_inode->i_mutex);
+ ret = PTR_ERR(subdir);
+ kerror("Lookup %s failed with error %d", dirname, ret);
+ return ERR_PTR(ret);
+
+nomem_d_alloc:
+ mutex_unlock(&dir->d_inode->i_mutex);
+ _leave(" = -ENOMEM");
+ return ERR_PTR(-ENOMEM);
+}
+
+/*
+ * find out if an object is in use or not
+ * - if finds object and it's not in use:
+ * - returns a pointer to the object and a reference on it
+ * - returns with the directory locked
+ */
+static struct dentry *cachefiles_check_active(struct cachefiles_cache *cache,
+ struct dentry *dir,
+ char *filename)
+{
+ struct cachefiles_object *object;
+ struct rb_node *_n;
+ struct dentry *victim;
+ unsigned long start;
+ int ret;
+
+ //_enter(",%*.*s/,%s",
+ // dir->d_name.len, dir->d_name.len, dir->d_name.name, filename);
+
+ /* look up the victim */
+ mutex_lock_nested(&dir->d_inode->i_mutex, 1);
+
+ start = jiffies;
+ victim = lookup_one_len(filename, dir, strlen(filename));
+ cachefiles_hist(cachefiles_lookup_histogram, start);
+ if (IS_ERR(victim))
+ goto lookup_error;
+
+ //_debug("victim -> %p %s",
+ // victim, victim->d_inode ? "positive" : "negative");
+
+ /* if the object is no longer there then we probably retired the object
+ * at the netfs's request whilst the cull was in progress
+ */
+ if (!victim->d_inode) {
+ mutex_unlock(&dir->d_inode->i_mutex);
+ dput(victim);
+ _leave(" = -ENOENT [absent]");
+ return ERR_PTR(-ENOENT);
+ }
+
+ /* check to see if we're using this object */
+ read_lock(&cache->active_lock);
+
+ _n = cache->active_nodes.rb_node;
+
+ while (_n) {
+ object = rb_entry(_n, struct cachefiles_object, active_node);
+
+ if (object->dentry > victim)
+ _n = _n->rb_left;
+ else if (object->dentry < victim)
+ _n = _n->rb_right;
+ else
+ goto object_in_use;
+ }
+
+ read_unlock(&cache->active_lock);
+
+ //_leave(" = %p", victim);
+ return victim;
+
+object_in_use:
+ read_unlock(&cache->active_lock);
+ mutex_unlock(&dir->d_inode->i_mutex);
+ dput(victim);
+ //_leave(" = -EBUSY [in use]");
+ return ERR_PTR(-EBUSY);
+
+lookup_error:
+ mutex_unlock(&dir->d_inode->i_mutex);
+ ret = PTR_ERR(victim);
+ if (ret == -ENOENT) {
+ /* file or dir now absent - probably retired by netfs */
+ _leave(" = -ESTALE [absent]");
+ return ERR_PTR(-ESTALE);
+ }
+
+ if (ret == -EIO) {
+ cachefiles_io_error(cache, "Lookup failed");
+ } else if (ret != -ENOMEM) {
+ kerror("Internal error: %d", ret);
+ ret = -EIO;
+ }
+
+ _leave(" = %d", ret);
+ return ERR_PTR(ret);
+}
+
+/*
+ * cull an object if it's not in use
+ * - called only by cache manager daemon
+ */
+int cachefiles_cull(struct cachefiles_cache *cache, struct dentry *dir,
+ char *filename)
+{
+ struct dentry *victim;
+ int ret;
+
+ _enter(",%*.*s/,%s",
+ dir->d_name.len, dir->d_name.len, dir->d_name.name, filename);
+
+ victim = cachefiles_check_active(cache, dir, filename);
+ if (IS_ERR(victim))
+ return PTR_ERR(victim);
+
+ _debug("victim -> %p %s",
+ victim, victim->d_inode ? "positive" : "negative");
+
+ /* okay... the victim is not being used so we can cull it
+ * - start by marking it as stale
+ */
+ _debug("victim is cullable");
+
+ ret = cachefiles_remove_object_xattr(cache, victim);
+ if (ret < 0)
+ goto error_unlock;
+
+ /* actually remove the victim (drops the dir mutex) */
+ _debug("bury");
+
+ ret = cachefiles_bury_object(cache, dir, victim);
+ if (ret < 0)
+ goto error;
+
+ dput(victim);
+ _leave(" = 0");
+ return 0;
+
+error_unlock:
+ mutex_unlock(&dir->d_inode->i_mutex);
+error:
+ dput(victim);
+ if (ret == -ENOENT) {
+ /* file or dir now absent - probably retired by netfs */
+ _leave(" = -ESTALE [absent]");
+ return -ESTALE;
+ }
+
+ if (ret != -ENOMEM) {
+ kerror("Internal error: %d", ret);
+ ret = -EIO;
+ }
+
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * find out if an object is in use or not
+ * - called only by cache manager daemon
+ * - returns -EBUSY or 0 to indicate whether an object is in use or not
+ */
+int cachefiles_check_in_use(struct cachefiles_cache *cache, struct dentry *dir,
+ char *filename)
+{
+ struct dentry *victim;
+
+ //_enter(",%*.*s/,%s",
+ // dir->d_name.len, dir->d_name.len, dir->d_name.name, filename);
+
+ victim = cachefiles_check_active(cache, dir, filename);
+ if (IS_ERR(victim))
+ return PTR_ERR(victim);
+
+ mutex_unlock(&dir->d_inode->i_mutex);
+ dput(victim);
+ //_leave(" = 0");
+ return 0;
+}
diff --git a/fs/cachefiles/cf-proc.c b/fs/cachefiles/cf-proc.c
new file mode 100644
index 000000000000..d09beb5d5870
--- /dev/null
+++ b/fs/cachefiles/cf-proc.c
@@ -0,0 +1,134 @@
+/* CacheFiles statistics
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include "cf-internal.h"
+
+atomic_t cachefiles_lookup_histogram[HZ];
+atomic_t cachefiles_mkdir_histogram[HZ];
+atomic_t cachefiles_create_histogram[HZ];
+
+/*
+ * display the latency histogram
+ */
+static int cachefiles_histogram_show(struct seq_file *m, void *v)
+{
+ unsigned long index;
+ unsigned x, y, z, t;
+
+ switch ((unsigned long) v) {
+ case 1:
+ seq_puts(m, "JIFS SECS LOOKUPS MKDIRS CREATES\n");
+ return 0;
+ case 2:
+ seq_puts(m, "===== ===== ========= ========= =========\n");
+ return 0;
+ default:
+ index = (unsigned long) v - 3;
+ x = atomic_read(&cachefiles_lookup_histogram[index]);
+ y = atomic_read(&cachefiles_mkdir_histogram[index]);
+ z = atomic_read(&cachefiles_create_histogram[index]);
+ if (x == 0 && y == 0 && z == 0)
+ return 0;
+
+ t = (index * 1000) / HZ;
+
+ seq_printf(m, "%4lu 0.%03u %9u %9u %9u\n", index, t, x, y, z);
+ return 0;
+ }
+}
+
+/*
+ * set up the iterator to start reading from the first line
+ */
+static void *cachefiles_histogram_start(struct seq_file *m, loff_t *_pos)
+{
+ if ((unsigned long long)*_pos >= HZ + 2)
+ return NULL;
+ if (*_pos == 0)
+ *_pos = 1;
+ return (void *)(unsigned long) *_pos;
+}
+
+/*
+ * move to the next line
+ */
+static void *cachefiles_histogram_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return (unsigned long long)*pos > HZ + 2 ?
+ NULL : (void *)(unsigned long) *pos;
+}
+
+/*
+ * clean up after reading
+ */
+static void cachefiles_histogram_stop(struct seq_file *m, void *v)
+{
+}
+
+static const struct seq_operations cachefiles_histogram_ops = {
+ .start = cachefiles_histogram_start,
+ .stop = cachefiles_histogram_stop,
+ .next = cachefiles_histogram_next,
+ .show = cachefiles_histogram_show,
+};
+
+/*
+ * open "/proc/fs/cachefiles/XXX" which provide statistics summaries
+ */
+static int cachefiles_histogram_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &cachefiles_histogram_ops);
+}
+
+static const struct file_operations cachefiles_histogram_fops = {
+ .owner = THIS_MODULE,
+ .open = cachefiles_histogram_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
+
+/*
+ * initialise the /proc/fs/cachefiles/ directory
+ */
+int __init cachefiles_proc_init(void)
+{
+ _enter("");
+
+ if (!proc_mkdir("fs/cachefiles", NULL))
+ goto error_dir;
+
+ if (!proc_create("fs/cachefiles/histogram", S_IFREG | 0444, NULL,
+ &cachefiles_histogram_fops))
+ goto error_histogram;
+
+ _leave(" = 0");
+ return 0;
+
+error_histogram:
+ remove_proc_entry("fs/cachefiles", NULL);
+error_dir:
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+}
+
+/*
+ * clean up the /proc/fs/cachefiles/ directory
+ */
+void cachefiles_proc_cleanup(void)
+{
+ remove_proc_entry("fs/cachefiles/histogram", NULL);
+ remove_proc_entry("fs/cachefiles", NULL);
+}
diff --git a/fs/cachefiles/cf-rdwr.c b/fs/cachefiles/cf-rdwr.c
new file mode 100644
index 000000000000..bed3e2852e17
--- /dev/null
+++ b/fs/cachefiles/cf-rdwr.c
@@ -0,0 +1,853 @@
+/* Storage object read/write
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include "cf-internal.h"
+
+/*
+ * detect wake up events generated by the unlocking of pages in which we're
+ * interested
+ * - we use this to detect read completion of backing pages
+ * - the caller holds the waitqueue lock
+ */
+static int cachefiles_read_waiter(wait_queue_t *wait, unsigned mode,
+ int sync, void *_key)
+{
+ struct cachefiles_one_read *monitor =
+ container_of(wait, struct cachefiles_one_read, monitor);
+ struct cachefiles_object *object;
+ struct wait_bit_key *key = _key;
+ struct page *page = wait->private;
+
+ ASSERT(key);
+
+ _enter("{%lu},%u,%d,{%p,%u}",
+ monitor->netfs_page->index, mode, sync,
+ key->flags, key->bit_nr);
+
+ if (key->flags != &page->flags ||
+ key->bit_nr != PG_locked)
+ return 0;
+
+ _debug("--- monitor %p %lx ---", page, page->flags);
+
+ if (!PageUptodate(page) && !PageError(page))
+ dump_stack();
+
+ /* remove from the waitqueue */
+ list_del(&wait->task_list);
+
+ /* move onto the action list and queue for FS-Cache thread pool */
+ ASSERT(monitor->op);
+
+ object = container_of(monitor->op->op.object,
+ struct cachefiles_object, fscache);
+
+ spin_lock(&object->work_lock);
+ list_add_tail(&monitor->op_link, &monitor->op->to_do);
+ spin_unlock(&object->work_lock);
+
+ fscache_enqueue_retrieval(monitor->op);
+ return 0;
+}
+
+/*
+ * copy data from backing pages to netfs pages to complete a read operation
+ * - driven by FS-Cache's thread pool
+ */
+static void cachefiles_read_copier(struct fscache_operation *_op)
+{
+ struct cachefiles_one_read *monitor;
+ struct cachefiles_object *object;
+ struct fscache_retrieval *op;
+ struct pagevec pagevec;
+ int error, max;
+
+ op = container_of(_op, struct fscache_retrieval, op);
+ object = container_of(op->op.object,
+ struct cachefiles_object, fscache);
+
+ _enter("{ino=%lu}", object->backer->d_inode->i_ino);
+
+ pagevec_init(&pagevec, 0);
+
+ max = 8;
+ spin_lock_irq(&object->work_lock);
+
+ while (!list_empty(&op->to_do)) {
+ monitor = list_entry(op->to_do.next,
+ struct cachefiles_one_read, op_link);
+ list_del(&monitor->op_link);
+
+ spin_unlock_irq(&object->work_lock);
+
+ _debug("- copy {%lu}", monitor->back_page->index);
+
+ error = -EIO;
+ if (PageUptodate(monitor->back_page)) {
+ copy_highpage(monitor->netfs_page, monitor->back_page);
+
+ pagevec_add(&pagevec, monitor->netfs_page);
+ fscache_mark_pages_cached(monitor->op, &pagevec);
+ error = 0;
+ }
+
+ if (error)
+ cachefiles_io_error_obj(
+ object,
+ "Readpage failed on backing file %lx",
+ (unsigned long) monitor->back_page->flags);
+
+ page_cache_release(monitor->back_page);
+
+ fscache_end_io(op, monitor->netfs_page, error);
+ page_cache_release(monitor->netfs_page);
+ fscache_put_retrieval(op);
+ kfree(monitor);
+
+ /* let the thread pool have some air occasionally */
+ max--;
+ if (max < 0 || need_resched()) {
+ if (!list_empty(&op->to_do))
+ fscache_enqueue_retrieval(op);
+ _leave(" [maxed out]");
+ return;
+ }
+
+ spin_lock_irq(&object->work_lock);
+ }
+
+ spin_unlock_irq(&object->work_lock);
+ _leave("");
+}
+
+/*
+ * read the corresponding page to the given set from the backing file
+ * - an uncertain page is simply discarded, to be tried again another time
+ */
+static int cachefiles_read_backing_file_one(struct cachefiles_object *object,
+ struct fscache_retrieval *op,
+ struct page *netpage,
+ struct pagevec *pagevec)
+{
+ struct cachefiles_one_read *monitor;
+ struct address_space *bmapping;
+ struct page *newpage, *backpage;
+ int ret;
+
+ _enter("");
+
+ pagevec_reinit(pagevec);
+
+ _debug("read back %p{%lu,%d}",
+ netpage, netpage->index, page_count(netpage));
+
+ monitor = kzalloc(sizeof(*monitor), GFP_KERNEL);
+ if (!monitor)
+ goto nomem;
+
+ monitor->netfs_page = netpage;
+ monitor->op = fscache_get_retrieval(op);
+
+ init_waitqueue_func_entry(&monitor->monitor, cachefiles_read_waiter);
+
+ /* attempt to get hold of the backing page */
+ bmapping = object->backer->d_inode->i_mapping;
+ newpage = NULL;
+
+ for (;;) {
+ backpage = find_get_page(bmapping, netpage->index);
+ if (backpage)
+ goto backing_page_already_present;
+
+ if (!newpage) {
+ newpage = page_cache_alloc_cold(bmapping);
+ if (!newpage)
+ goto nomem_monitor;
+ }
+
+ ret = add_to_page_cache(newpage, bmapping,
+ netpage->index, GFP_KERNEL);
+ if (ret == 0)
+ goto installed_new_backing_page;
+ if (ret != -EEXIST)
+ goto nomem_page;
+ }
+
+ /* we've installed a new backing page, so now we need to add it
+ * to the LRU list and start it reading */
+installed_new_backing_page:
+ _debug("- new %p", newpage);
+
+ backpage = newpage;
+ newpage = NULL;
+
+ page_cache_get(backpage);
+ pagevec_add(pagevec, backpage);
+ __pagevec_lru_add_file(pagevec);
+
+read_backing_page:
+ ret = bmapping->a_ops->readpage(NULL, backpage);
+ if (ret < 0)
+ goto read_error;
+
+ /* set the monitor to transfer the data across */
+monitor_backing_page:
+ _debug("- monitor add");
+
+ /* install the monitor */
+ page_cache_get(monitor->netfs_page);
+ page_cache_get(backpage);
+ monitor->back_page = backpage;
+ monitor->monitor.private = backpage;
+ add_page_wait_queue(backpage, &monitor->monitor);
+ monitor = NULL;
+
+ /* but the page may have been read before the monitor was installed, so
+ * the monitor may miss the event - so we have to ensure that we do get
+ * one in such a case */
+ if (trylock_page(backpage)) {
+ _debug("jumpstart %p {%lx}", backpage, backpage->flags);
+ unlock_page(backpage);
+ }
+ goto success;
+
+ /* if the backing page is already present, it can be in one of
+ * three states: read in progress, read failed or read okay */
+backing_page_already_present:
+ _debug("- present");
+
+ if (newpage) {
+ page_cache_release(newpage);
+ newpage = NULL;
+ }
+
+ if (PageError(backpage))
+ goto io_error;
+
+ if (PageUptodate(backpage))
+ goto backing_page_already_uptodate;
+
+ if (!trylock_page(backpage))
+ goto monitor_backing_page;
+ _debug("read %p {%lx}", backpage, backpage->flags);
+ goto read_backing_page;
+
+ /* the backing page is already up to date, attach the netfs
+ * page to the pagecache and LRU and copy the data across */
+backing_page_already_uptodate:
+ _debug("- uptodate");
+
+ pagevec_add(pagevec, netpage);
+ fscache_mark_pages_cached(op, pagevec);
+
+ copy_highpage(netpage, backpage);
+ fscache_end_io(op, netpage, 0);
+
+success:
+ _debug("success");
+ ret = 0;
+
+out:
+ if (backpage)
+ page_cache_release(backpage);
+ if (monitor) {
+ fscache_put_retrieval(monitor->op);
+ kfree(monitor);
+ }
+ _leave(" = %d", ret);
+ return ret;
+
+read_error:
+ _debug("read error %d", ret);
+ if (ret == -ENOMEM)
+ goto out;
+io_error:
+ cachefiles_io_error_obj(object, "Page read error on backing file");
+ ret = -ENOBUFS;
+ goto out;
+
+nomem_page:
+ page_cache_release(newpage);
+nomem_monitor:
+ fscache_put_retrieval(monitor->op);
+ kfree(monitor);
+nomem:
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+}
+
+/*
+ * read a page from the cache or allocate a block in which to store it
+ * - cache withdrawal is prevented by the caller
+ * - returns -EINTR if interrupted
+ * - returns -ENOMEM if ran out of memory
+ * - returns -ENOBUFS if no buffers can be made available
+ * - returns -ENOBUFS if page is beyond EOF
+ * - if the page is backed by a block in the cache:
+ * - a read will be started which will call the callback on completion
+ * - 0 will be returned
+ * - else if the page is unbacked:
+ * - the metadata will be retained
+ * - -ENODATA will be returned
+ */
+int cachefiles_read_or_alloc_page(struct fscache_retrieval *op,
+ struct page *page,
+ gfp_t gfp)
+{
+ struct cachefiles_object *object;
+ struct cachefiles_cache *cache;
+ struct pagevec pagevec;
+ struct inode *inode;
+ sector_t block0, block;
+ unsigned shift;
+ int ret;
+
+ object = container_of(op->op.object,
+ struct cachefiles_object, fscache);
+ cache = container_of(object->fscache.cache,
+ struct cachefiles_cache, cache);
+
+ _enter("{%p},{%lx},,,", object, page->index);
+
+ if (!object->backer)
+ return -ENOBUFS;
+
+ inode = object->backer->d_inode;
+ ASSERT(S_ISREG(inode->i_mode));
+ ASSERT(inode->i_mapping->a_ops->bmap);
+ ASSERT(inode->i_mapping->a_ops->readpages);
+
+ /* calculate the shift required to use bmap */
+ if (inode->i_sb->s_blocksize > PAGE_SIZE)
+ return -ENOBUFS;
+
+ shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
+
+ op->op.flags = FSCACHE_OP_FAST;
+ op->op.processor = cachefiles_read_copier;
+
+ pagevec_init(&pagevec, 0);
+
+ /* we assume the absence or presence of the first block is a good
+ * enough indication for the page as a whole
+ * - TODO: don't use bmap() for this as it is _not_ actually good
+ * enough for this as it doesn't indicate errors, but it's all we've
+ * got for the moment
+ */
+ block0 = page->index;
+ block0 <<= shift;
+
+ block = inode->i_mapping->a_ops->bmap(inode->i_mapping, block0);
+ _debug("%llx -> %llx",
+ (unsigned long long) block0,
+ (unsigned long long) block);
+
+ if (block) {
+ /* submit the apparently valid page to the backing fs to be
+ * read from disk */
+ ret = cachefiles_read_backing_file_one(object, op, page,
+ &pagevec);
+ } else if (cachefiles_has_space(cache, 0, 1) == 0) {
+ /* there's space in the cache we can use */
+ pagevec_add(&pagevec, page);
+ fscache_mark_pages_cached(op, &pagevec);
+ ret = -ENODATA;
+ } else {
+ ret = -ENOBUFS;
+ }
+
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * read the corresponding pages to the given set from the backing file
+ * - any uncertain pages are simply discarded, to be tried again another time
+ */
+static int cachefiles_read_backing_file(struct cachefiles_object *object,
+ struct fscache_retrieval *op,
+ struct list_head *list,
+ struct pagevec *mark_pvec)
+{
+ struct cachefiles_one_read *monitor = NULL;
+ struct address_space *bmapping = object->backer->d_inode->i_mapping;
+ struct pagevec lru_pvec;
+ struct page *newpage = NULL, *netpage, *_n, *backpage = NULL;
+ int ret = 0;
+
+ _enter("");
+
+ pagevec_init(&lru_pvec, 0);
+
+ list_for_each_entry_safe(netpage, _n, list, lru) {
+ list_del(&netpage->lru);
+
+ _debug("read back %p{%lu,%d}",
+ netpage, netpage->index, page_count(netpage));
+
+ if (!monitor) {
+ monitor = kzalloc(sizeof(*monitor), GFP_KERNEL);
+ if (!monitor)
+ goto nomem;
+
+ monitor->op = fscache_get_retrieval(op);
+ init_waitqueue_func_entry(&monitor->monitor,
+ cachefiles_read_waiter);
+ }
+
+ for (;;) {
+ backpage = find_get_page(bmapping, netpage->index);
+ if (backpage)
+ goto backing_page_already_present;
+
+ if (!newpage) {
+ newpage = page_cache_alloc_cold(bmapping);
+ if (!newpage)
+ goto nomem;
+ }
+
+ ret = add_to_page_cache(newpage, bmapping,
+ netpage->index, GFP_KERNEL);
+ if (ret == 0)
+ goto installed_new_backing_page;
+ if (ret != -EEXIST)
+ goto nomem;
+ }
+
+ /* we've installed a new backing page, so now we need to add it
+ * to the LRU list and start it reading */
+ installed_new_backing_page:
+ _debug("- new %p", newpage);
+
+ backpage = newpage;
+ newpage = NULL;
+
+ page_cache_get(backpage);
+ if (!pagevec_add(&lru_pvec, backpage))
+ __pagevec_lru_add_file(&lru_pvec);
+
+ reread_backing_page:
+ ret = bmapping->a_ops->readpage(NULL, backpage);
+ if (ret < 0)
+ goto read_error;
+
+ /* add the netfs page to the pagecache and LRU, and set the
+ * monitor to transfer the data across */
+ monitor_backing_page:
+ _debug("- monitor add");
+
+ ret = add_to_page_cache(netpage, op->mapping, netpage->index,
+ GFP_KERNEL);
+ if (ret < 0) {
+ if (ret == -EEXIST) {
+ page_cache_release(netpage);
+ continue;
+ }
+ goto nomem;
+ }
+
+ page_cache_get(netpage);
+ if (!pagevec_add(&lru_pvec, netpage))
+ __pagevec_lru_add_file(&lru_pvec);
+
+ /* install a monitor */
+ page_cache_get(netpage);
+ monitor->netfs_page = netpage;
+
+ page_cache_get(backpage);
+ monitor->back_page = backpage;
+ monitor->monitor.private = backpage;
+ add_page_wait_queue(backpage, &monitor->monitor);
+ monitor = NULL;
+
+ /* but the page may have been read before the monitor was
+ * installed, so the monitor may miss the event - so we have to
+ * ensure that we do get one in such a case */
+ if (trylock_page(backpage)) {
+ _debug("2unlock %p {%lx}", backpage, backpage->flags);
+ unlock_page(backpage);
+ }
+
+ page_cache_release(backpage);
+ backpage = NULL;
+
+ page_cache_release(netpage);
+ netpage = NULL;
+ continue;
+
+ /* if the backing page is already present, it can be in one of
+ * three states: read in progress, read failed or read okay */
+ backing_page_already_present:
+ _debug("- present %p", backpage);
+
+ if (PageError(backpage))
+ goto io_error;
+
+ if (PageUptodate(backpage))
+ goto backing_page_already_uptodate;
+
+ _debug("- not ready %p{%lx}", backpage, backpage->flags);
+
+ if (!trylock_page(backpage))
+ goto monitor_backing_page;
+
+ if (PageError(backpage)) {
+ _debug("error %lx", backpage->flags);
+ unlock_page(backpage);
+ goto io_error;
+ }
+
+ if (PageUptodate(backpage))
+ goto backing_page_already_uptodate_unlock;
+
+ /* we've locked a page that's neither up to date nor erroneous,
+ * so we need to attempt to read it again */
+ goto reread_backing_page;
+
+ /* the backing page is already up to date, attach the netfs
+ * page to the pagecache and LRU and copy the data across */
+ backing_page_already_uptodate_unlock:
+ _debug("uptodate %lx", backpage->flags);
+ unlock_page(backpage);
+ backing_page_already_uptodate:
+ _debug("- uptodate");
+
+ ret = add_to_page_cache(netpage, op->mapping, netpage->index,
+ GFP_KERNEL);
+ if (ret < 0) {
+ if (ret == -EEXIST) {
+ page_cache_release(netpage);
+ continue;
+ }
+ goto nomem;
+ }
+
+ copy_highpage(netpage, backpage);
+
+ page_cache_release(backpage);
+ backpage = NULL;
+
+ if (!pagevec_add(mark_pvec, netpage))
+ fscache_mark_pages_cached(op, mark_pvec);
+
+ page_cache_get(netpage);
+ if (!pagevec_add(&lru_pvec, netpage))
+ __pagevec_lru_add_file(&lru_pvec);
+
+ fscache_end_io(op, netpage, 0);
+ page_cache_release(netpage);
+ netpage = NULL;
+ continue;
+ }
+
+ netpage = NULL;
+
+ _debug("out");
+
+out:
+ /* tidy up */
+ pagevec_lru_add_file(&lru_pvec);
+
+ if (newpage)
+ page_cache_release(newpage);
+ if (netpage)
+ page_cache_release(netpage);
+ if (backpage)
+ page_cache_release(backpage);
+ if (monitor) {
+ fscache_put_retrieval(op);
+ kfree(monitor);
+ }
+
+ list_for_each_entry_safe(netpage, _n, list, lru) {
+ list_del(&netpage->lru);
+ page_cache_release(netpage);
+ }
+
+ _leave(" = %d", ret);
+ return ret;
+
+nomem:
+ _debug("nomem");
+ ret = -ENOMEM;
+ goto out;
+
+read_error:
+ _debug("read error %d", ret);
+ if (ret == -ENOMEM)
+ goto out;
+io_error:
+ cachefiles_io_error_obj(object, "Page read error on backing file");
+ ret = -ENOBUFS;
+ goto out;
+}
+
+/*
+ * read a list of pages from the cache or allocate blocks in which to store
+ * them
+ */
+int cachefiles_read_or_alloc_pages(struct fscache_retrieval *op,
+ struct list_head *pages,
+ unsigned *nr_pages,
+ gfp_t gfp)
+{
+ struct cachefiles_object *object;
+ struct cachefiles_cache *cache;
+ struct list_head backpages;
+ struct pagevec pagevec;
+ struct inode *inode;
+ struct page *page, *_n;
+ unsigned shift, nrbackpages;
+ int ret, ret2, space;
+
+ object = container_of(op->op.object,
+ struct cachefiles_object, fscache);
+ cache = container_of(object->fscache.cache,
+ struct cachefiles_cache, cache);
+
+ _enter("{OBJ%x,%d},,%d,,",
+ object->fscache.debug_id, atomic_read(&op->op.usage),
+ *nr_pages);
+
+ if (!object->backer)
+ return -ENOBUFS;
+
+ space = 1;
+ if (cachefiles_has_space(cache, 0, *nr_pages) < 0)
+ space = 0;
+
+ inode = object->backer->d_inode;
+ ASSERT(S_ISREG(inode->i_mode));
+ ASSERT(inode->i_mapping->a_ops->bmap);
+ ASSERT(inode->i_mapping->a_ops->readpages);
+
+ /* calculate the shift required to use bmap */
+ if (inode->i_sb->s_blocksize > PAGE_SIZE)
+ return -ENOBUFS;
+
+ shift = PAGE_SHIFT - inode->i_sb->s_blocksize_bits;
+
+ pagevec_init(&pagevec, 0);
+
+ op->op.flags = FSCACHE_OP_FAST;
+ op->op.processor = cachefiles_read_copier;
+
+ INIT_LIST_HEAD(&backpages);
+ nrbackpages = 0;
+
+ ret = space ? -ENODATA : -ENOBUFS;
+ list_for_each_entry_safe(page, _n, pages, lru) {
+ sector_t block0, block;
+
+ /* we assume the absence or presence of the first block is a
+ * good enough indication for the page as a whole
+ * - TODO: don't use bmap() for this as it is _not_ actually
+ * good enough for this as it doesn't indicate errors, but
+ * it's all we've got for the moment
+ */
+ block0 = page->index;
+ block0 <<= shift;
+
+ block = inode->i_mapping->a_ops->bmap(inode->i_mapping,
+ block0);
+ _debug("%llx -> %llx",
+ (unsigned long long) block0,
+ (unsigned long long) block);
+
+ if (block) {
+ /* we have data - add it to the list to give to the
+ * backing fs */
+ list_move(&page->lru, &backpages);
+ (*nr_pages)--;
+ nrbackpages++;
+ } else if (space && pagevec_add(&pagevec, page) == 0) {
+ fscache_mark_pages_cached(op, &pagevec);
+ ret = -ENODATA;
+ }
+ }
+
+ if (pagevec_count(&pagevec) > 0)
+ fscache_mark_pages_cached(op, &pagevec);
+
+ if (list_empty(pages))
+ ret = 0;
+
+ /* submit the apparently valid pages to the backing fs to be read from
+ * disk */
+ if (nrbackpages > 0) {
+ ret2 = cachefiles_read_backing_file(object, op, &backpages,
+ &pagevec);
+ if (ret2 == -ENOMEM || ret2 == -EINTR)
+ ret = ret2;
+ }
+
+ if (pagevec_count(&pagevec) > 0)
+ fscache_mark_pages_cached(op, &pagevec);
+
+ _leave(" = %d [nr=%u%s]",
+ ret, *nr_pages, list_empty(pages) ? " empty" : "");
+ return ret;
+}
+
+/*
+ * allocate a block in the cache in which to store a page
+ * - cache withdrawal is prevented by the caller
+ * - returns -EINTR if interrupted
+ * - returns -ENOMEM if ran out of memory
+ * - returns -ENOBUFS if no buffers can be made available
+ * - returns -ENOBUFS if page is beyond EOF
+ * - otherwise:
+ * - the metadata will be retained
+ * - 0 will be returned
+ */
+int cachefiles_allocate_page(struct fscache_retrieval *op,
+ struct page *page,
+ gfp_t gfp)
+{
+ struct cachefiles_object *object;
+ struct cachefiles_cache *cache;
+ struct pagevec pagevec;
+ int ret;
+
+ object = container_of(op->op.object,
+ struct cachefiles_object, fscache);
+ cache = container_of(object->fscache.cache,
+ struct cachefiles_cache, cache);
+
+ _enter("%p,{%lx},", object, page->index);
+
+ ret = cachefiles_has_space(cache, 0, 1);
+ if (ret == 0) {
+ pagevec_init(&pagevec, 0);
+ pagevec_add(&pagevec, page);
+ fscache_mark_pages_cached(op, &pagevec);
+ } else {
+ ret = -ENOBUFS;
+ }
+
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * allocate blocks in the cache in which to store a set of pages
+ * - cache withdrawal is prevented by the caller
+ * - returns -EINTR if interrupted
+ * - returns -ENOMEM if ran out of memory
+ * - returns -ENOBUFS if some buffers couldn't be made available
+ * - returns -ENOBUFS if some pages are beyond EOF
+ * - otherwise:
+ * - -ENODATA will be returned
+ * - metadata will be retained for any page marked
+ */
+int cachefiles_allocate_pages(struct fscache_retrieval *op,
+ struct list_head *pages,
+ unsigned *nr_pages,
+ gfp_t gfp)
+{
+ struct cachefiles_object *object;
+ struct cachefiles_cache *cache;
+ struct pagevec pagevec;
+ struct page *page;
+ int ret;
+
+ object = container_of(op->op.object,
+ struct cachefiles_object, fscache);
+ cache = container_of(object->fscache.cache,
+ struct cachefiles_cache, cache);
+
+ _enter("%p,,,%d,", object, *nr_pages);
+
+ ret = cachefiles_has_space(cache, 0, *nr_pages);
+ if (ret == 0) {
+ pagevec_init(&pagevec, 0);
+
+ list_for_each_entry(page, pages, lru) {
+ if (pagevec_add(&pagevec, page) == 0)
+ fscache_mark_pages_cached(op, &pagevec);
+ }
+
+ if (pagevec_count(&pagevec) > 0)
+ fscache_mark_pages_cached(op, &pagevec);
+ ret = -ENODATA;
+ } else {
+ ret = -ENOBUFS;
+ }
+
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * request a page be stored in the cache
+ * - cache withdrawal is prevented by the caller
+ * - this request may be ignored if there's no cache block available, in which
+ * case -ENOBUFS will be returned
+ * - if the op is in progress, 0 will be returned
+ */
+int cachefiles_write_page(struct fscache_storage *op, struct page *page)
+{
+ struct cachefiles_object *object;
+ struct address_space *mapping;
+ int ret;
+
+ ASSERT(op != NULL);
+ ASSERT(page != NULL);
+
+ object = container_of(op->op.object,
+ struct cachefiles_object, fscache);
+
+ _enter("%p,%p{%lx},,,", object, page, page->index);
+
+ if (!object->backer) {
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+ }
+
+ ASSERT(S_ISREG(object->backer->d_inode->i_mode));
+
+ /* copy the page to ext3 and let it store it in its own time */
+ mapping = object->backer->d_inode->i_mapping;
+ ret = -EIO;
+ if (mapping->a_ops->write_one_page) {
+ ret = mapping->a_ops->write_one_page(mapping, page->index,
+ page);
+ _debug("write_one_page -> %d", ret);
+ }
+
+ if (ret < 0) {
+ if (ret == -EIO)
+ cachefiles_io_error_obj(
+ object, "Write page to backing file failed");
+ ret = -ENOBUFS;
+ }
+
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * detach a backing block from a page
+ * - cache withdrawal is prevented by the caller
+ */
+void cachefiles_uncache_page(struct fscache_object *_object, struct page *page)
+{
+ struct cachefiles_object *object;
+ struct cachefiles_cache *cache;
+
+ object = container_of(_object, struct cachefiles_object, fscache);
+ cache = container_of(object->fscache.cache,
+ struct cachefiles_cache, cache);
+
+ _enter("%p,{%lu}", object, page->index);
+
+ spin_unlock(&object->fscache.cookie->lock);
+}
diff --git a/fs/cachefiles/cf-security.c b/fs/cachefiles/cf-security.c
new file mode 100644
index 000000000000..8cdbc26f3571
--- /dev/null
+++ b/fs/cachefiles/cf-security.c
@@ -0,0 +1,116 @@
+/* CacheFiles security management
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/fs.h>
+#include <linux/cred.h>
+#include "cf-internal.h"
+
+/*
+ * determine the security context within which we access the cache from within
+ * the kernel
+ */
+int cachefiles_get_security_ID(struct cachefiles_cache *cache)
+{
+ struct cred *new;
+ int ret;
+
+ _enter("{%s}", cache->secctx);
+
+ new = prepare_kernel_cred(current);
+ if (!new) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (cache->secctx) {
+ ret = set_security_override_from_ctx(new, cache->secctx);
+ if (ret < 0) {
+ put_cred(new);
+ printk(KERN_ERR "CacheFiles:"
+ " Security denies permission to nominate"
+ " security context: error %d\n",
+ ret);
+ goto error;
+ }
+ }
+
+ cache->cache_cred = new;
+ ret = 0;
+error:
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * see if mkdir and create can be performed in the root directory
+ */
+static int cachefiles_check_cache_dir(struct cachefiles_cache *cache,
+ struct dentry *root)
+{
+ int ret;
+
+ ret = security_inode_mkdir(root->d_inode, root, 0);
+ if (ret < 0) {
+ printk(KERN_ERR "CacheFiles:"
+ " Security denies permission to make dirs: error %d",
+ ret);
+ return ret;
+ }
+
+ ret = security_inode_create(root->d_inode, root, 0);
+ if (ret < 0)
+ printk(KERN_ERR "CacheFiles:"
+ " Security denies permission to create files: error %d",
+ ret);
+
+ return ret;
+}
+
+/*
+ * check the security details of the on-disk cache
+ * - must be called with security override in force
+ */
+int cachefiles_determine_cache_security(struct cachefiles_cache *cache,
+ struct dentry *root,
+ const struct cred **_saved_cred)
+{
+ struct cred *new;
+ int ret;
+
+ _enter("");
+
+ /* duplicate the cache creds for COW (the override is currently in
+ * force, so we can use prepare_creds() to do this) */
+ new = prepare_creds();
+ if (!new)
+ return -ENOMEM;
+
+ cachefiles_end_secure(cache, *_saved_cred);
+
+ /* use the cache root dir's security context as the basis with
+ * which create files */
+ ret = set_create_files_as(new, root->d_inode);
+ if (ret < 0) {
+ _leave(" = %d [cfa]", ret);
+ return ret;
+ }
+
+ put_cred(cache->cache_cred);
+ cache->cache_cred = new;
+
+ cachefiles_begin_secure(cache, _saved_cred);
+ ret = cachefiles_check_cache_dir(cache, root);
+
+ if (ret == -EOPNOTSUPP)
+ ret = 0;
+ _leave(" = %d", ret);
+ return ret;
+}
diff --git a/fs/cachefiles/cf-xattr.c b/fs/cachefiles/cf-xattr.c
new file mode 100644
index 000000000000..37c84f915205
--- /dev/null
+++ b/fs/cachefiles/cf-xattr.c
@@ -0,0 +1,291 @@
+/* CacheFiles extended attribute management
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/fsnotify.h>
+#include <linux/quotaops.h>
+#include <linux/xattr.h>
+#include "cf-internal.h"
+
+static const char cachefiles_xattr_cache[] =
+ XATTR_USER_PREFIX "CacheFiles.cache";
+
+/*
+ * check the type label on an object
+ * - done using xattrs
+ */
+int cachefiles_check_object_type(struct cachefiles_object *object)
+{
+ struct dentry *dentry = object->dentry;
+ char type[3], xtype[3];
+ int ret;
+
+ ASSERT(dentry);
+ ASSERT(dentry->d_inode);
+
+ if (!object->fscache.cookie)
+ strcpy(type, "C3");
+ else
+ snprintf(type, 3, "%02x", object->fscache.cookie->def->type);
+
+ _enter("%p{%s}", object, type);
+
+ /* attempt to install a type label directly */
+ ret = vfs_setxattr(dentry, cachefiles_xattr_cache, type, 2,
+ XATTR_CREATE);
+ if (ret == 0) {
+ _debug("SET"); /* we succeeded */
+ goto error;
+ }
+
+ if (ret != -EEXIST) {
+ kerror("Can't set xattr on %*.*s [%lu] (err %d)",
+ dentry->d_name.len, dentry->d_name.len,
+ dentry->d_name.name, dentry->d_inode->i_ino,
+ -ret);
+ goto error;
+ }
+
+ /* read the current type label */
+ ret = vfs_getxattr(dentry, cachefiles_xattr_cache, xtype, 3);
+ if (ret < 0) {
+ if (ret == -ERANGE)
+ goto bad_type_length;
+
+ kerror("Can't read xattr on %*.*s [%lu] (err %d)",
+ dentry->d_name.len, dentry->d_name.len,
+ dentry->d_name.name, dentry->d_inode->i_ino,
+ -ret);
+ goto error;
+ }
+
+ /* check the type is what we're expecting */
+ if (ret != 2)
+ goto bad_type_length;
+
+ if (xtype[0] != type[0] || xtype[1] != type[1])
+ goto bad_type;
+
+ ret = 0;
+
+error:
+ _leave(" = %d", ret);
+ return ret;
+
+bad_type_length:
+ kerror("Cache object %lu type xattr length incorrect",
+ dentry->d_inode->i_ino);
+ ret = -EIO;
+ goto error;
+
+bad_type:
+ xtype[2] = 0;
+ kerror("Cache object %*.*s [%lu] type %s not %s",
+ dentry->d_name.len, dentry->d_name.len,
+ dentry->d_name.name, dentry->d_inode->i_ino,
+ xtype, type);
+ ret = -EIO;
+ goto error;
+}
+
+/*
+ * set the state xattr on a cache file
+ */
+int cachefiles_set_object_xattr(struct cachefiles_object *object,
+ struct cachefiles_xattr *auxdata)
+{
+ struct dentry *dentry = object->dentry;
+ int ret;
+
+ ASSERT(object->fscache.cookie);
+ ASSERT(dentry);
+
+ _enter("%p,#%d", object, auxdata->len);
+
+ /* attempt to install the cache metadata directly */
+ _debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len);
+
+ ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
+ &auxdata->type, auxdata->len,
+ XATTR_CREATE);
+ if (ret < 0 && ret != -ENOMEM)
+ cachefiles_io_error_obj(
+ object,
+ "Failed to set xattr with error %d", ret);
+
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * update the state xattr on a cache file
+ */
+int cachefiles_update_object_xattr(struct cachefiles_object *object,
+ struct cachefiles_xattr *auxdata)
+{
+ struct dentry *dentry = object->dentry;
+ int ret;
+
+ ASSERT(object->fscache.cookie);
+ ASSERT(dentry);
+
+ _enter("%p,#%d", object, auxdata->len);
+
+ /* attempt to install the cache metadata directly */
+ _debug("SET %s #%u", object->fscache.cookie->def->name, auxdata->len);
+
+ ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
+ &auxdata->type, auxdata->len,
+ XATTR_REPLACE);
+ if (ret < 0 && ret != -ENOMEM)
+ cachefiles_io_error_obj(
+ object,
+ "Failed to update xattr with error %d", ret);
+
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * check the state xattr on a cache file
+ * - return -ESTALE if the object should be deleted
+ */
+int cachefiles_check_object_xattr(struct cachefiles_object *object,
+ struct cachefiles_xattr *auxdata)
+{
+ struct cachefiles_xattr *auxbuf;
+ struct dentry *dentry = object->dentry;
+ int ret;
+
+ _enter("%p,#%d", object, auxdata->len);
+
+ ASSERT(dentry);
+ ASSERT(dentry->d_inode);
+
+ auxbuf = kmalloc(sizeof(struct cachefiles_xattr) + 512, GFP_KERNEL);
+ if (!auxbuf) {
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+
+ /* read the current type label */
+ ret = vfs_getxattr(dentry, cachefiles_xattr_cache,
+ &auxbuf->type, 512 + 1);
+ if (ret < 0) {
+ if (ret == -ENODATA)
+ goto stale; /* no attribute - power went off
+ * mid-cull? */
+
+ if (ret == -ERANGE)
+ goto bad_type_length;
+
+ cachefiles_io_error_obj(object,
+ "Can't read xattr on %lu (err %d)",
+ dentry->d_inode->i_ino, -ret);
+ goto error;
+ }
+
+ /* check the on-disk object */
+ if (ret < 1)
+ goto bad_type_length;
+
+ if (auxbuf->type != auxdata->type)
+ goto stale;
+
+ auxbuf->len = ret;
+
+ /* consult the netfs */
+ if (object->fscache.cookie->def->check_aux) {
+ enum fscache_checkaux result;
+ unsigned int dlen;
+
+ dlen = auxbuf->len - 1;
+
+ _debug("checkaux %s #%u",
+ object->fscache.cookie->def->name, dlen);
+
+ result = fscache_check_aux(&object->fscache,
+ &auxbuf->data, dlen);
+
+ switch (result) {
+ /* entry okay as is */
+ case FSCACHE_CHECKAUX_OKAY:
+ goto okay;
+
+ /* entry requires update */
+ case FSCACHE_CHECKAUX_NEEDS_UPDATE:
+ break;
+
+ /* entry requires deletion */
+ case FSCACHE_CHECKAUX_OBSOLETE:
+ goto stale;
+
+ default:
+ BUG();
+ }
+
+ /* update the current label */
+ ret = vfs_setxattr(dentry, cachefiles_xattr_cache,
+ &auxdata->type, auxdata->len,
+ XATTR_REPLACE);
+ if (ret < 0) {
+ cachefiles_io_error_obj(object,
+ "Can't update xattr on %lu"
+ " (error %d)",
+ dentry->d_inode->i_ino, -ret);
+ goto error;
+ }
+ }
+
+okay:
+ ret = 0;
+
+error:
+ kfree(auxbuf);
+ _leave(" = %d", ret);
+ return ret;
+
+bad_type_length:
+ kerror("Cache object %lu xattr length incorrect",
+ dentry->d_inode->i_ino);
+ ret = -EIO;
+ goto error;
+
+stale:
+ ret = -ESTALE;
+ goto error;
+}
+
+/*
+ * remove the object's xattr to mark it stale
+ */
+int cachefiles_remove_object_xattr(struct cachefiles_cache *cache,
+ struct dentry *dentry)
+{
+ int ret;
+
+ ret = vfs_removexattr(dentry, cachefiles_xattr_cache);
+ if (ret < 0) {
+ if (ret == -ENOENT || ret == -ENODATA)
+ ret = 0;
+ else if (ret != -ENOMEM)
+ cachefiles_io_error(cache,
+ "Can't remove xattr from %lu"
+ " (error %d)",
+ dentry->d_inode->i_ino, -ret);
+ }
+
+ _leave(" = %d", ret);
+ return ret;
+}
diff --git a/fs/cifs/CHANGES b/fs/cifs/CHANGES
index 080703a15f44..73ac7ebd1dfc 100644
--- a/fs/cifs/CHANGES
+++ b/fs/cifs/CHANGES
@@ -5,7 +5,9 @@ rather than posix (advisory) byte range locks, even though server would
support posix byte range locks. Fix query of root inode when prefixpath
specified and user does not have access to query information about the
top of the share. Fix problem in 2.6.28 resolving DFS paths to
-Samba servers (worked to Windows).
+Samba servers (worked to Windows). Fix rmdir so that pending search
+(readdir) requests do not get invalid results which include the now
+removed directory.
Version 1.55
------------
diff --git a/fs/cifs/cifs_debug.c b/fs/cifs/cifs_debug.c
index 490e34bbf27a..949c36ed7818 100644
--- a/fs/cifs/cifs_debug.c
+++ b/fs/cifs/cifs_debug.c
@@ -402,7 +402,6 @@ cifs_proc_init(void)
if (proc_fs_cifs == NULL)
return;
- proc_fs_cifs->owner = THIS_MODULE;
proc_create("DebugData", 0, proc_fs_cifs, &cifs_debug_data_proc_fops);
#ifdef CONFIG_CIFS_STATS
diff --git a/fs/cifs/cifsencrypt.c b/fs/cifs/cifsencrypt.c
index d4839cf0cb2c..7c9809523f42 100644
--- a/fs/cifs/cifsencrypt.c
+++ b/fs/cifs/cifsencrypt.c
@@ -48,11 +48,11 @@ static int cifs_calculate_signature(const struct smb_hdr *cifs_pdu,
if ((cifs_pdu == NULL) || (signature == NULL) || (key == NULL))
return -EINVAL;
- MD5Init(&context);
- MD5Update(&context, (char *)&key->data, key->len);
- MD5Update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
+ cifs_MD5_init(&context);
+ cifs_MD5_update(&context, (char *)&key->data, key->len);
+ cifs_MD5_update(&context, cifs_pdu->Protocol, cifs_pdu->smb_buf_length);
- MD5Final(signature, &context);
+ cifs_MD5_final(signature, &context);
return 0;
}
@@ -96,8 +96,8 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
if ((iov == NULL) || (signature == NULL) || (key == NULL))
return -EINVAL;
- MD5Init(&context);
- MD5Update(&context, (char *)&key->data, key->len);
+ cifs_MD5_init(&context);
+ cifs_MD5_update(&context, (char *)&key->data, key->len);
for (i = 0; i < n_vec; i++) {
if (iov[i].iov_len == 0)
continue;
@@ -110,13 +110,13 @@ static int cifs_calc_signature2(const struct kvec *iov, int n_vec,
if (i == 0) {
if (iov[0].iov_len <= 8) /* cmd field at offset 9 */
break; /* nothing to sign or corrupt header */
- MD5Update(&context, iov[0].iov_base+4,
+ cifs_MD5_update(&context, iov[0].iov_base+4,
iov[0].iov_len-4);
} else
- MD5Update(&context, iov[i].iov_base, iov[i].iov_len);
+ cifs_MD5_update(&context, iov[i].iov_base, iov[i].iov_len);
}
- MD5Final(signature, &context);
+ cifs_MD5_final(signature, &context);
return 0;
}
diff --git a/fs/cifs/cifsproto.h b/fs/cifs/cifsproto.h
index 06f6779988bf..382ba6298809 100644
--- a/fs/cifs/cifsproto.h
+++ b/fs/cifs/cifsproto.h
@@ -35,8 +35,8 @@ extern struct smb_hdr *cifs_buf_get(void);
extern void cifs_buf_release(void *);
extern struct smb_hdr *cifs_small_buf_get(void);
extern void cifs_small_buf_release(void *);
-extern int smb_send(struct socket *, struct smb_hdr *,
- unsigned int /* length */ , struct sockaddr *, bool);
+extern int smb_send(struct TCP_Server_Info *, struct smb_hdr *,
+ unsigned int /* length */);
extern unsigned int _GetXid(void);
extern void _FreeXid(unsigned int);
#define GetXid() (int)_GetXid(); cFYI(1,("CIFS VFS: in %s as Xid: %d with uid: %d",__func__, xid,current_fsuid()));
diff --git a/fs/cifs/connect.c b/fs/cifs/connect.c
index e9ea394ee075..005df85219a8 100644
--- a/fs/cifs/connect.c
+++ b/fs/cifs/connect.c
@@ -23,7 +23,6 @@
#include <linux/string.h>
#include <linux/list.h>
#include <linux/wait.h>
-#include <linux/ipv6.h>
#include <linux/pagemap.h>
#include <linux/ctype.h>
#include <linux/utsname.h>
@@ -35,6 +34,7 @@
#include <linux/freezer.h>
#include <asm/uaccess.h>
#include <asm/processor.h>
+#include <net/ipv6.h>
#include "cifspdu.h"
#include "cifsglob.h"
#include "cifsproto.h"
@@ -1354,7 +1354,7 @@ cifs_parse_mount_options(char *options, const char *devname,
}
static struct TCP_Server_Info *
-cifs_find_tcp_session(struct sockaddr *addr)
+cifs_find_tcp_session(struct sockaddr_storage *addr)
{
struct list_head *tmp;
struct TCP_Server_Info *server;
@@ -1374,13 +1374,13 @@ cifs_find_tcp_session(struct sockaddr *addr)
if (server->tcpStatus == CifsNew)
continue;
- if (addr->sa_family == AF_INET &&
+ if (addr->ss_family == AF_INET &&
(addr4->sin_addr.s_addr !=
server->addr.sockAddr.sin_addr.s_addr))
continue;
- else if (addr->sa_family == AF_INET6 &&
- memcmp(&server->addr.sockAddr6.sin6_addr,
- &addr6->sin6_addr, sizeof(addr6->sin6_addr)))
+ else if (addr->ss_family == AF_INET6 &&
+ !ipv6_addr_equal(&server->addr.sockAddr6.sin6_addr,
+ &addr6->sin6_addr))
continue;
++server->srv_count;
@@ -1419,12 +1419,12 @@ static struct TCP_Server_Info *
cifs_get_tcp_session(struct smb_vol *volume_info)
{
struct TCP_Server_Info *tcp_ses = NULL;
- struct sockaddr addr;
+ struct sockaddr_storage addr;
struct sockaddr_in *sin_server = (struct sockaddr_in *) &addr;
struct sockaddr_in6 *sin_server6 = (struct sockaddr_in6 *) &addr;
int rc;
- memset(&addr, 0, sizeof(struct sockaddr));
+ memset(&addr, 0, sizeof(struct sockaddr_storage));
if (volume_info->UNCip && volume_info->UNC) {
rc = cifs_inet_pton(AF_INET, volume_info->UNCip,
@@ -1435,9 +1435,9 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
rc = cifs_inet_pton(AF_INET6, volume_info->UNCip,
&sin_server6->sin6_addr.in6_u);
if (rc > 0)
- addr.sa_family = AF_INET6;
+ addr.ss_family = AF_INET6;
} else {
- addr.sa_family = AF_INET;
+ addr.ss_family = AF_INET;
}
if (rc <= 0) {
@@ -1502,7 +1502,7 @@ cifs_get_tcp_session(struct smb_vol *volume_info)
tcp_ses->tcpStatus = CifsNew;
++tcp_ses->srv_count;
- if (addr.sa_family == AF_INET6) {
+ if (addr.ss_family == AF_INET6) {
cFYI(1, ("attempting ipv6 connect"));
/* BB should we allow ipv6 on port 139? */
/* other OS never observed in Wild doing 139 with v6 */
@@ -1802,7 +1802,7 @@ ipv4_connect(struct TCP_Server_Info *server)
* user space buffer
*/
socket->sk->sk_rcvtimeo = 7 * HZ;
- socket->sk->sk_sndtimeo = 3 * HZ;
+ socket->sk->sk_sndtimeo = 5 * HZ;
/* make the bufsizes depend on wsize/rsize and max requests */
if (server->noautotune) {
@@ -1860,9 +1860,7 @@ ipv4_connect(struct TCP_Server_Info *server)
smb_buf = (struct smb_hdr *)ses_init_buf;
/* sizeof RFC1002_SESSION_REQUEST with no scope */
smb_buf->smb_buf_length = 0x81000044;
- rc = smb_send(socket, smb_buf, 0x44,
- (struct sockaddr *) &server->addr.sockAddr,
- server->noblocksnd);
+ rc = smb_send(server, smb_buf, 0x44);
kfree(ses_init_buf);
msleep(1); /* RFC1001 layer in at least one server
requires very short break before negprot
@@ -1955,7 +1953,7 @@ ipv6_connect(struct TCP_Server_Info *server)
* user space buffer
*/
socket->sk->sk_rcvtimeo = 7 * HZ;
- socket->sk->sk_sndtimeo = 3 * HZ;
+ socket->sk->sk_sndtimeo = 5 * HZ;
server->ssocket = socket;
return rc;
diff --git a/fs/cifs/dir.c b/fs/cifs/dir.c
index 838d9c720a5c..964aad03c5ad 100644
--- a/fs/cifs/dir.c
+++ b/fs/cifs/dir.c
@@ -129,6 +129,17 @@ cifs_bp_rename_retry:
return full_path;
}
+static void setup_cifs_dentry(struct cifsTconInfo *tcon,
+ struct dentry *direntry,
+ struct inode *newinode)
+{
+ if (tcon->nocase)
+ direntry->d_op = &cifs_ci_dentry_ops;
+ else
+ direntry->d_op = &cifs_dentry_ops;
+ d_instantiate(direntry, newinode);
+}
+
/* Inode operations in similar order to how they appear in Linux file fs.h */
int
@@ -139,14 +150,14 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
int xid;
int create_options = CREATE_NOT_DIR;
int oplock = 0;
+ /* BB below access is too much for the mknod to request */
int desiredAccess = GENERIC_READ | GENERIC_WRITE;
__u16 fileHandle;
struct cifs_sb_info *cifs_sb;
- struct cifsTconInfo *pTcon;
+ struct cifsTconInfo *tcon;
char *full_path = NULL;
FILE_ALL_INFO *buf = NULL;
struct inode *newinode = NULL;
- struct cifsFileInfo *pCifsFile = NULL;
struct cifsInodeInfo *pCifsInode;
int disposition = FILE_OVERWRITE_IF;
bool write_only = false;
@@ -154,7 +165,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
xid = GetXid();
cifs_sb = CIFS_SB(inode->i_sb);
- pTcon = cifs_sb->tcon;
+ tcon = cifs_sb->tcon;
full_path = build_path_from_dentry(direntry);
if (full_path == NULL) {
@@ -162,6 +173,8 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
return -ENOMEM;
}
+ mode &= ~current->fs->umask;
+
if (nd && (nd->flags & LOOKUP_OPEN)) {
int oflags = nd->intent.open.flags;
@@ -196,17 +209,15 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
return -ENOMEM;
}
- mode &= ~current->fs->umask;
-
/*
* if we're not using unix extensions, see if we need to set
* ATTR_READONLY on the create call
*/
- if (!pTcon->unix_ext && (mode & S_IWUGO) == 0)
+ if (!tcon->unix_ext && (mode & S_IWUGO) == 0)
create_options |= CREATE_OPTION_READONLY;
if (cifs_sb->tcon->ses->capabilities & CAP_NT_SMBS)
- rc = CIFSSMBOpen(xid, pTcon, full_path, disposition,
+ rc = CIFSSMBOpen(xid, tcon, full_path, disposition,
desiredAccess, create_options,
&fileHandle, &oplock, buf, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -215,7 +226,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
if (rc == -EIO) {
/* old server, retry the open legacy style */
- rc = SMBLegacyOpen(xid, pTcon, full_path, disposition,
+ rc = SMBLegacyOpen(xid, tcon, full_path, disposition,
desiredAccess, create_options,
&fileHandle, &oplock, buf, cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags & CIFS_MOUNT_MAP_SPECIAL_CHR);
@@ -225,7 +236,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
} else {
/* If Open reported that we actually created a file
then we now have to set the mode if possible */
- if ((pTcon->unix_ext) && (oplock & CIFS_CREATE_ACTION)) {
+ if ((tcon->unix_ext) && (oplock & CIFS_CREATE_ACTION)) {
struct cifs_unix_set_info_args args = {
.mode = mode,
.ctime = NO_CHANGE_64,
@@ -244,20 +255,20 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
args.uid = NO_CHANGE_64;
args.gid = NO_CHANGE_64;
}
- CIFSSMBUnixSetInfo(xid, pTcon, full_path, &args,
+ CIFSSMBUnixSetInfo(xid, tcon, full_path, &args,
cifs_sb->local_nls,
cifs_sb->mnt_cifs_flags &
CIFS_MOUNT_MAP_SPECIAL_CHR);
} else {
/* BB implement mode setting via Windows security
descriptors e.g. */
- /* CIFSSMBWinSetPerms(xid,pTcon,path,mode,-1,-1,nls);*/
+ /* CIFSSMBWinSetPerms(xid,tcon,path,mode,-1,-1,nls);*/
/* Could set r/o dos attribute if mode & 0222 == 0 */
}
/* server might mask mode so we have to query for it */
- if (pTcon->unix_ext)
+ if (tcon->unix_ext)
rc = cifs_get_inode_info_unix(&newinode, full_path,
inode->i_sb, xid);
else {
@@ -283,22 +294,17 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
}
if (rc != 0) {
- cFYI(1,
- ("Create worked but get_inode_info failed rc = %d",
- rc));
- } else {
- if (pTcon->nocase)
- direntry->d_op = &cifs_ci_dentry_ops;
- else
- direntry->d_op = &cifs_dentry_ops;
- d_instantiate(direntry, newinode);
- }
+ cFYI(1, ("Create worked, get_inode_info failed rc = %d",
+ rc));
+ } else
+ setup_cifs_dentry(tcon, direntry, newinode);
+
if ((nd == NULL /* nfsd case - nfs srv does not set nd */) ||
(!(nd->flags & LOOKUP_OPEN))) {
/* mknod case - do not leave file open */
- CIFSSMBClose(xid, pTcon, fileHandle);
+ CIFSSMBClose(xid, tcon, fileHandle);
} else if (newinode) {
- pCifsFile =
+ struct cifsFileInfo *pCifsFile =
kzalloc(sizeof(struct cifsFileInfo), GFP_KERNEL);
if (pCifsFile == NULL)
@@ -316,7 +322,7 @@ cifs_create(struct inode *inode, struct dentry *direntry, int mode,
/* set the following in open now
pCifsFile->pfile = file; */
write_lock(&GlobalSMBSeslock);
- list_add(&pCifsFile->tlist, &pTcon->openFileList);
+ list_add(&pCifsFile->tlist, &tcon->openFileList);
pCifsInode = CIFS_I(newinode);
if (pCifsInode) {
/* if readable file instance put first in list*/
diff --git a/fs/cifs/inode.c b/fs/cifs/inode.c
index 5ab9896fdcb2..bcf7b5184664 100644
--- a/fs/cifs/inode.c
+++ b/fs/cifs/inode.c
@@ -1285,6 +1285,11 @@ int cifs_rmdir(struct inode *inode, struct dentry *direntry)
cifsInode = CIFS_I(direntry->d_inode);
cifsInode->time = 0; /* force revalidate to go get info when
needed */
+
+ cifsInode = CIFS_I(inode);
+ cifsInode->time = 0; /* force revalidate to get parent dir info
+ since cached search results now invalid */
+
direntry->d_inode->i_ctime = inode->i_ctime = inode->i_mtime =
current_fs_time(inode->i_sb);
diff --git a/fs/cifs/md5.c b/fs/cifs/md5.c
index 462bbfefd4b6..98b66a54c319 100644
--- a/fs/cifs/md5.c
+++ b/fs/cifs/md5.c
@@ -10,8 +10,8 @@
* with every copy.
*
* To compute the message digest of a chunk of bytes, declare an
- * MD5Context structure, pass it to MD5Init, call MD5Update as
- * needed on buffers full of bytes, and then call MD5Final, which
+ * MD5Context structure, pass it to cifs_MD5_init, call cifs_MD5_update as
+ * needed on buffers full of bytes, and then call cifs_MD5_final, which
* will fill a supplied 16-byte array with the digest.
*/
@@ -45,7 +45,7 @@ byteReverse(unsigned char *buf, unsigned longs)
* initialization constants.
*/
void
-MD5Init(struct MD5Context *ctx)
+cifs_MD5_init(struct MD5Context *ctx)
{
ctx->buf[0] = 0x67452301;
ctx->buf[1] = 0xefcdab89;
@@ -61,7 +61,7 @@ MD5Init(struct MD5Context *ctx)
* of bytes.
*/
void
-MD5Update(struct MD5Context *ctx, unsigned char const *buf, unsigned len)
+cifs_MD5_update(struct MD5Context *ctx, unsigned char const *buf, unsigned len)
{
register __u32 t;
@@ -110,7 +110,7 @@ MD5Update(struct MD5Context *ctx, unsigned char const *buf, unsigned len)
* 1 0* (64-bit count of bits processed, MSB-first)
*/
void
-MD5Final(unsigned char digest[16], struct MD5Context *ctx)
+cifs_MD5_final(unsigned char digest[16], struct MD5Context *ctx)
{
unsigned int count;
unsigned char *p;
@@ -165,7 +165,7 @@ MD5Final(unsigned char digest[16], struct MD5Context *ctx)
/*
* The core of the MD5 algorithm, this alters an existing MD5 hash to
- * reflect the addition of 16 longwords of new data. MD5Update blocks
+ * reflect the addition of 16 longwords of new data. cifs_MD5_update blocks
* the data and converts bytes into longwords for this routine.
*/
static void
@@ -267,9 +267,9 @@ hmac_md5_init_rfc2104(unsigned char *key, int key_len,
unsigned char tk[16];
struct MD5Context tctx;
- MD5Init(&tctx);
- MD5Update(&tctx, key, key_len);
- MD5Final(tk, &tctx);
+ cifs_MD5_init(&tctx);
+ cifs_MD5_update(&tctx, key, key_len);
+ cifs_MD5_final(tk, &tctx);
key = tk;
key_len = 16;
@@ -287,8 +287,8 @@ hmac_md5_init_rfc2104(unsigned char *key, int key_len,
ctx->k_opad[i] ^= 0x5c;
}
- MD5Init(&ctx->ctx);
- MD5Update(&ctx->ctx, ctx->k_ipad, 64);
+ cifs_MD5_init(&ctx->ctx);
+ cifs_MD5_update(&ctx->ctx, ctx->k_ipad, 64);
}
#endif
@@ -317,8 +317,8 @@ hmac_md5_init_limK_to_64(const unsigned char *key, int key_len,
ctx->k_opad[i] ^= 0x5c;
}
- MD5Init(&ctx->ctx);
- MD5Update(&ctx->ctx, ctx->k_ipad, 64);
+ cifs_MD5_init(&ctx->ctx);
+ cifs_MD5_update(&ctx->ctx, ctx->k_ipad, 64);
}
/***********************************************************************
@@ -328,7 +328,7 @@ void
hmac_md5_update(const unsigned char *text, int text_len,
struct HMACMD5Context *ctx)
{
- MD5Update(&ctx->ctx, text, text_len); /* then text of datagram */
+ cifs_MD5_update(&ctx->ctx, text, text_len); /* then text of datagram */
}
/***********************************************************************
@@ -339,12 +339,12 @@ hmac_md5_final(unsigned char *digest, struct HMACMD5Context *ctx)
{
struct MD5Context ctx_o;
- MD5Final(digest, &ctx->ctx);
+ cifs_MD5_final(digest, &ctx->ctx);
- MD5Init(&ctx_o);
- MD5Update(&ctx_o, ctx->k_opad, 64);
- MD5Update(&ctx_o, digest, 16);
- MD5Final(digest, &ctx_o);
+ cifs_MD5_init(&ctx_o);
+ cifs_MD5_update(&ctx_o, ctx->k_opad, 64);
+ cifs_MD5_update(&ctx_o, digest, 16);
+ cifs_MD5_final(digest, &ctx_o);
}
/***********************************************************
diff --git a/fs/cifs/md5.h b/fs/cifs/md5.h
index f7d4f4197bac..6fba8cb402fd 100644
--- a/fs/cifs/md5.h
+++ b/fs/cifs/md5.h
@@ -20,10 +20,10 @@ struct HMACMD5Context {
};
#endif /* _HMAC_MD5_H */
-void MD5Init(struct MD5Context *context);
-void MD5Update(struct MD5Context *context, unsigned char const *buf,
+void cifs_MD5_init(struct MD5Context *context);
+void cifs_MD5_update(struct MD5Context *context, unsigned char const *buf,
unsigned len);
-void MD5Final(unsigned char digest[16], struct MD5Context *context);
+void cifs_MD5_final(unsigned char digest[16], struct MD5Context *context);
/* The following definitions come from lib/hmacmd5.c */
diff --git a/fs/cifs/transport.c b/fs/cifs/transport.c
index 7ebe6599ed3a..0ad3e2d116a6 100644
--- a/fs/cifs/transport.c
+++ b/fs/cifs/transport.c
@@ -154,81 +154,8 @@ void DeleteTconOplockQEntries(struct cifsTconInfo *tcon)
spin_unlock(&GlobalMid_Lock);
}
-int
-smb_send(struct socket *ssocket, struct smb_hdr *smb_buffer,
- unsigned int smb_buf_length, struct sockaddr *sin, bool noblocksnd)
-{
- int rc = 0;
- int i = 0;
- struct msghdr smb_msg;
- struct kvec iov;
- unsigned len = smb_buf_length + 4;
-
- if (ssocket == NULL)
- return -ENOTSOCK; /* BB eventually add reconnect code here */
- iov.iov_base = smb_buffer;
- iov.iov_len = len;
-
- smb_msg.msg_name = sin;
- smb_msg.msg_namelen = sizeof(struct sockaddr);
- smb_msg.msg_control = NULL;
- smb_msg.msg_controllen = 0;
- if (noblocksnd)
- smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
- else
- smb_msg.msg_flags = MSG_NOSIGNAL;
-
- /* smb header is converted in header_assemble. bcc and rest of SMB word
- area, and byte area if necessary, is converted to littleendian in
- cifssmb.c and RFC1001 len is converted to bigendian in smb_send
- Flags2 is converted in SendReceive */
-
- smb_buffer->smb_buf_length = cpu_to_be32(smb_buffer->smb_buf_length);
- cFYI(1, ("Sending smb of length %d", smb_buf_length));
- dump_smb(smb_buffer, len);
-
- while (len > 0) {
- rc = kernel_sendmsg(ssocket, &smb_msg, &iov, 1, len);
- if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
- i++;
- /* smaller timeout here than send2 since smaller size */
- /* Although it may not be required, this also is smaller
- oplock break time */
- if (i > 12) {
- cERROR(1,
- ("sends on sock %p stuck for 7 seconds",
- ssocket));
- rc = -EAGAIN;
- break;
- }
- msleep(1 << i);
- continue;
- }
- if (rc < 0)
- break;
- else
- i = 0; /* reset i after each successful send */
- iov.iov_base += rc;
- iov.iov_len -= rc;
- len -= rc;
- }
-
- if (rc < 0) {
- cERROR(1, ("Error %d sending data on socket to server", rc));
- } else {
- rc = 0;
- }
-
- /* Don't want to modify the buffer as a
- side effect of this call. */
- smb_buffer->smb_buf_length = smb_buf_length;
-
- return rc;
-}
-
static int
-smb_send2(struct TCP_Server_Info *server, struct kvec *iov, int n_vec,
- struct sockaddr *sin, bool noblocksnd)
+smb_sendv(struct TCP_Server_Info *server, struct kvec *iov, int n_vec)
{
int rc = 0;
int i = 0;
@@ -243,11 +170,11 @@ smb_send2(struct TCP_Server_Info *server, struct kvec *iov, int n_vec,
if (ssocket == NULL)
return -ENOTSOCK; /* BB eventually add reconnect code here */
- smb_msg.msg_name = sin;
+ smb_msg.msg_name = (struct sockaddr *) &server->addr.sockAddr;
smb_msg.msg_namelen = sizeof(struct sockaddr);
smb_msg.msg_control = NULL;
smb_msg.msg_controllen = 0;
- if (noblocksnd)
+ if (server->noblocksnd)
smb_msg.msg_flags = MSG_DONTWAIT + MSG_NOSIGNAL;
else
smb_msg.msg_flags = MSG_NOSIGNAL;
@@ -272,7 +199,25 @@ smb_send2(struct TCP_Server_Info *server, struct kvec *iov, int n_vec,
n_vec - first_vec, total_len);
if ((rc == -ENOSPC) || (rc == -EAGAIN)) {
i++;
- if (i >= 14) {
+ /* if blocking send we try 3 times, since each can block
+ for 5 seconds. For nonblocking we have to try more
+ but wait increasing amounts of time allowing time for
+ socket to clear. The overall time we wait in either
+ case to send on the socket is about 15 seconds.
+ Similarly we wait for 15 seconds for
+ a response from the server in SendReceive[2]
+ for the server to send a response back for
+ most types of requests (except SMB Write
+ past end of file which can be slow, and
+ blocking lock operations). NFS waits slightly longer
+ than CIFS, but this can make it take longer for
+ nonresponsive servers to be detected and 15 seconds
+ is more than enough time for modern networks to
+ send a packet. In most cases if we fail to send
+ after the retries we will kill the socket and
+ reconnect which may clear the network problem.
+ */
+ if ((i >= 14) || (!server->noblocksnd && (i > 2))) {
cERROR(1,
("sends on sock %p stuck for 15 seconds",
ssocket));
@@ -339,6 +284,18 @@ smb_send2(struct TCP_Server_Info *server, struct kvec *iov, int n_vec,
return rc;
}
+int
+smb_send(struct TCP_Server_Info *server, struct smb_hdr *smb_buffer,
+ unsigned int smb_buf_length)
+{
+ struct kvec iov;
+
+ iov.iov_base = smb_buffer;
+ iov.iov_len = smb_buf_length + 4;
+
+ return smb_sendv(server, &iov, 1);
+}
+
static int wait_for_free_request(struct cifsSesInfo *ses, const int long_op)
{
if (long_op == CIFS_ASYNC_OP) {
@@ -540,9 +497,7 @@ SendReceive2(const unsigned int xid, struct cifsSesInfo *ses,
#ifdef CONFIG_CIFS_STATS2
atomic_inc(&ses->server->inSend);
#endif
- rc = smb_send2(ses->server, iov, n_vec,
- (struct sockaddr *) &(ses->server->addr.sockAddr),
- ses->server->noblocksnd);
+ rc = smb_sendv(ses->server, iov, n_vec);
#ifdef CONFIG_CIFS_STATS2
atomic_dec(&ses->server->inSend);
midQ->when_sent = jiffies;
@@ -736,9 +691,7 @@ SendReceive(const unsigned int xid, struct cifsSesInfo *ses,
#ifdef CONFIG_CIFS_STATS2
atomic_inc(&ses->server->inSend);
#endif
- rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
- (struct sockaddr *) &(ses->server->addr.sockAddr),
- ses->server->noblocksnd);
+ rc = smb_send(ses->server, in_buf, in_buf->smb_buf_length);
#ifdef CONFIG_CIFS_STATS2
atomic_dec(&ses->server->inSend);
midQ->when_sent = jiffies;
@@ -879,9 +832,7 @@ send_nt_cancel(struct cifsTconInfo *tcon, struct smb_hdr *in_buf,
mutex_unlock(&ses->server->srv_mutex);
return rc;
}
- rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
- (struct sockaddr *) &(ses->server->addr.sockAddr),
- ses->server->noblocksnd);
+ rc = smb_send(ses->server, in_buf, in_buf->smb_buf_length);
mutex_unlock(&ses->server->srv_mutex);
return rc;
}
@@ -973,9 +924,7 @@ SendReceiveBlockingLock(const unsigned int xid, struct cifsTconInfo *tcon,
#ifdef CONFIG_CIFS_STATS2
atomic_inc(&ses->server->inSend);
#endif
- rc = smb_send(ses->server->ssocket, in_buf, in_buf->smb_buf_length,
- (struct sockaddr *) &(ses->server->addr.sockAddr),
- ses->server->noblocksnd);
+ rc = smb_send(ses->server, in_buf, in_buf->smb_buf_length);
#ifdef CONFIG_CIFS_STATS2
atomic_dec(&ses->server->inSend);
midQ->when_sent = jiffies;
diff --git a/fs/coda/Kconfig b/fs/coda/Kconfig
new file mode 100644
index 000000000000..c0e5a7fad06d
--- /dev/null
+++ b/fs/coda/Kconfig
@@ -0,0 +1,21 @@
+config CODA_FS
+ tristate "Coda file system support (advanced network fs)"
+ depends on INET
+ help
+ Coda is an advanced network file system, similar to NFS in that it
+ enables you to mount file systems of a remote server and access them
+ with regular Unix commands as if they were sitting on your hard
+ disk. Coda has several advantages over NFS: support for
+ disconnected operation (e.g. for laptops), read/write server
+ replication, security model for authentication and encryption,
+ persistent client caches and write back caching.
+
+ If you say Y here, your Linux box will be able to act as a Coda
+ *client*. You will need user level code as well, both for the
+ client and server. Servers are currently user level, i.e. they need
+ no kernel support. Please read
+ <file:Documentation/filesystems/coda.txt> and check out the Coda
+ home page <http://www.coda.cs.cmu.edu/>.
+
+ To compile the coda client support as a module, choose M here: the
+ module will be called coda.
diff --git a/fs/compat_ioctl.c b/fs/compat_ioctl.c
index 5235c67e7594..c8f8d5904f5e 100644
--- a/fs/compat_ioctl.c
+++ b/fs/compat_ioctl.c
@@ -538,6 +538,7 @@ static int dev_ifsioc(unsigned int fd, unsigned int cmd, unsigned long arg)
* cannot be fixed without breaking all existing apps.
*/
case TUNSETIFF:
+ case TUNGETIFF:
case SIOCGIFFLAGS:
case SIOCGIFMETRIC:
case SIOCGIFMTU:
@@ -1982,6 +1983,11 @@ COMPATIBLE_IOCTL(TUNSETNOCSUM)
COMPATIBLE_IOCTL(TUNSETDEBUG)
COMPATIBLE_IOCTL(TUNSETPERSIST)
COMPATIBLE_IOCTL(TUNSETOWNER)
+COMPATIBLE_IOCTL(TUNSETLINK)
+COMPATIBLE_IOCTL(TUNSETGROUP)
+COMPATIBLE_IOCTL(TUNGETFEATURES)
+COMPATIBLE_IOCTL(TUNSETOFFLOAD)
+COMPATIBLE_IOCTL(TUNSETTXFILTER)
/* Big V */
COMPATIBLE_IOCTL(VT_SETMODE)
COMPATIBLE_IOCTL(VT_GETMODE)
@@ -2573,6 +2579,7 @@ HANDLE_IOCTL(SIOCGIFPFLAGS, dev_ifsioc)
HANDLE_IOCTL(SIOCGIFTXQLEN, dev_ifsioc)
HANDLE_IOCTL(SIOCSIFTXQLEN, dev_ifsioc)
HANDLE_IOCTL(TUNSETIFF, dev_ifsioc)
+HANDLE_IOCTL(TUNGETIFF, dev_ifsioc)
HANDLE_IOCTL(SIOCETHTOOL, ethtool_ioctl)
HANDLE_IOCTL(SIOCBONDENSLAVE, bond_ioctl)
HANDLE_IOCTL(SIOCBONDRELEASE, bond_ioctl)
diff --git a/fs/configfs/Kconfig b/fs/configfs/Kconfig
new file mode 100644
index 000000000000..13587cc97a0b
--- /dev/null
+++ b/fs/configfs/Kconfig
@@ -0,0 +1,11 @@
+config CONFIGFS_FS
+ tristate "Userspace-driven configuration filesystem"
+ depends on SYSFS
+ help
+ configfs is a ram-based filesystem that provides the converse
+ of sysfs's functionality. Where sysfs is a filesystem-based
+ view of kernel objects, configfs is a filesystem-based manager
+ of kernel objects, or config_items.
+
+ Both sysfs and configfs can and should exist together on the
+ same system. One is not a replacement for the other.
diff --git a/fs/configfs/dir.c b/fs/configfs/dir.c
index 8e93341f3e82..9c2358391147 100644
--- a/fs/configfs/dir.c
+++ b/fs/configfs/dir.c
@@ -553,12 +553,24 @@ static void detach_groups(struct config_group *group)
child = sd->s_dentry;
+ /*
+ * Note: we hide this from lockdep since we have no way
+ * to teach lockdep about recursive
+ * I_MUTEX_PARENT -> I_MUTEX_CHILD patterns along a path
+ * in an inode tree, which are valid as soon as
+ * I_MUTEX_PARENT -> I_MUTEX_CHILD is valid from a
+ * parent inode to one of its children.
+ */
+ lockdep_off();
mutex_lock(&child->d_inode->i_mutex);
+ lockdep_on();
configfs_detach_group(sd->s_element);
child->d_inode->i_flags |= S_DEAD;
+ lockdep_off();
mutex_unlock(&child->d_inode->i_mutex);
+ lockdep_on();
d_delete(child);
dput(child);
@@ -748,11 +760,22 @@ static int configfs_attach_item(struct config_item *parent_item,
* We are going to remove an inode and its dentry but
* the VFS may already have hit and used them. Thus,
* we must lock them as rmdir() would.
+ *
+ * Note: we hide this from lockdep since we have no way
+ * to teach lockdep about recursive
+ * I_MUTEX_PARENT -> I_MUTEX_CHILD patterns along a path
+ * in an inode tree, which are valid as soon as
+ * I_MUTEX_PARENT -> I_MUTEX_CHILD is valid from a
+ * parent inode to one of its children.
*/
+ lockdep_off();
mutex_lock(&dentry->d_inode->i_mutex);
+ lockdep_on();
configfs_remove_dir(item);
dentry->d_inode->i_flags |= S_DEAD;
+ lockdep_off();
mutex_unlock(&dentry->d_inode->i_mutex);
+ lockdep_on();
d_delete(dentry);
}
}
@@ -787,14 +810,25 @@ static int configfs_attach_group(struct config_item *parent_item,
*
* We must also lock the inode to remove it safely in case of
* error, as rmdir() would.
+ *
+ * Note: we hide this from lockdep since we have no way
+ * to teach lockdep about recursive
+ * I_MUTEX_PARENT -> I_MUTEX_CHILD patterns along a path
+ * in an inode tree, which are valid as soon as
+ * I_MUTEX_PARENT -> I_MUTEX_CHILD is valid from a
+ * parent inode to one of its children.
*/
+ lockdep_off();
mutex_lock_nested(&dentry->d_inode->i_mutex, I_MUTEX_CHILD);
+ lockdep_on();
ret = populate_groups(to_config_group(item));
if (ret) {
configfs_detach_item(item);
dentry->d_inode->i_flags |= S_DEAD;
}
+ lockdep_off();
mutex_unlock(&dentry->d_inode->i_mutex);
+ lockdep_on();
if (ret)
d_delete(dentry);
}
@@ -956,7 +990,17 @@ static int configfs_depend_prep(struct dentry *origin,
BUG_ON(!origin || !sd);
/* Lock this guy on the way down */
+ /*
+ * Note: we hide this from lockdep since we have no way
+ * to teach lockdep about recursive
+ * I_MUTEX_PARENT -> I_MUTEX_CHILD patterns along a path
+ * in an inode tree, which are valid as soon as
+ * I_MUTEX_PARENT -> I_MUTEX_CHILD is valid from a
+ * parent inode to one of its children.
+ */
+ lockdep_off();
mutex_lock(&sd->s_dentry->d_inode->i_mutex);
+ lockdep_on();
if (sd->s_element == target) /* Boo-yah */
goto out;
@@ -970,7 +1014,9 @@ static int configfs_depend_prep(struct dentry *origin,
}
/* We looped all our children and didn't find target */
+ lockdep_off();
mutex_unlock(&sd->s_dentry->d_inode->i_mutex);
+ lockdep_on();
ret = -ENOENT;
out:
@@ -990,11 +1036,16 @@ static void configfs_depend_rollback(struct dentry *origin,
struct dentry *dentry = item->ci_dentry;
while (dentry != origin) {
+ /* See comments in configfs_depend_prep() */
+ lockdep_off();
mutex_unlock(&dentry->d_inode->i_mutex);
+ lockdep_on();
dentry = dentry->d_parent;
}
+ lockdep_off();
mutex_unlock(&origin->d_inode->i_mutex);
+ lockdep_on();
}
int configfs_depend_item(struct configfs_subsystem *subsys,
@@ -1329,8 +1380,16 @@ static int configfs_rmdir(struct inode *dir, struct dentry *dentry)
}
/* Wait until the racing operation terminates */
+ /*
+ * Note: we hide this from lockdep since we are locked
+ * with subclass I_MUTEX_NORMAL from vfs_rmdir() (why
+ * not I_MUTEX_CHILD?), and I_MUTEX_XATTR or
+ * I_MUTEX_QUOTA are not relevant for the locked inode.
+ */
+ lockdep_off();
mutex_lock(wait_mutex);
mutex_unlock(wait_mutex);
+ lockdep_on();
}
} while (ret == -EAGAIN);
diff --git a/fs/cramfs/Kconfig b/fs/cramfs/Kconfig
new file mode 100644
index 000000000000..cd06466f365e
--- /dev/null
+++ b/fs/cramfs/Kconfig
@@ -0,0 +1,19 @@
+config CRAMFS
+ tristate "Compressed ROM file system support (cramfs)"
+ depends on BLOCK
+ select ZLIB_INFLATE
+ help
+ Saying Y here includes support for CramFs (Compressed ROM File
+ System). CramFs is designed to be a simple, small, and compressed
+ file system for ROM based embedded systems. CramFs is read-only,
+ limited to 256MB file systems (with 16MB files), and doesn't support
+ 16/32 bits uid/gid, hard links and timestamps.
+
+ See <file:Documentation/filesystems/cramfs.txt> and
+ <file:fs/cramfs/README> for further information.
+
+ To compile this as a module, choose M here: the module will be called
+ cramfs. Note that the root file system (the one containing the
+ directory /) cannot be compiled as a module.
+
+ If unsure, say N.
diff --git a/fs/dlm/dir.c b/fs/dlm/dir.c
index 92969f879a17..858fba14aaa6 100644
--- a/fs/dlm/dir.c
+++ b/fs/dlm/dir.c
@@ -156,7 +156,7 @@ void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen
bucket = dir_hash(ls, name, namelen);
- write_lock(&ls->ls_dirtbl[bucket].lock);
+ spin_lock(&ls->ls_dirtbl[bucket].lock);
de = search_bucket(ls, name, namelen, bucket);
@@ -173,7 +173,7 @@ void dlm_dir_remove_entry(struct dlm_ls *ls, int nodeid, char *name, int namelen
list_del(&de->list);
kfree(de);
out:
- write_unlock(&ls->ls_dirtbl[bucket].lock);
+ spin_unlock(&ls->ls_dirtbl[bucket].lock);
}
void dlm_dir_clear(struct dlm_ls *ls)
@@ -185,14 +185,14 @@ void dlm_dir_clear(struct dlm_ls *ls)
DLM_ASSERT(list_empty(&ls->ls_recover_list), );
for (i = 0; i < ls->ls_dirtbl_size; i++) {
- write_lock(&ls->ls_dirtbl[i].lock);
+ spin_lock(&ls->ls_dirtbl[i].lock);
head = &ls->ls_dirtbl[i].list;
while (!list_empty(head)) {
de = list_entry(head->next, struct dlm_direntry, list);
list_del(&de->list);
put_free_de(ls, de);
}
- write_unlock(&ls->ls_dirtbl[i].lock);
+ spin_unlock(&ls->ls_dirtbl[i].lock);
}
}
@@ -307,17 +307,17 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
bucket = dir_hash(ls, name, namelen);
- write_lock(&ls->ls_dirtbl[bucket].lock);
+ spin_lock(&ls->ls_dirtbl[bucket].lock);
de = search_bucket(ls, name, namelen, bucket);
if (de) {
*r_nodeid = de->master_nodeid;
- write_unlock(&ls->ls_dirtbl[bucket].lock);
+ spin_unlock(&ls->ls_dirtbl[bucket].lock);
if (*r_nodeid == nodeid)
return -EEXIST;
return 0;
}
- write_unlock(&ls->ls_dirtbl[bucket].lock);
+ spin_unlock(&ls->ls_dirtbl[bucket].lock);
if (namelen > DLM_RESNAME_MAXLEN)
return -EINVAL;
@@ -330,7 +330,7 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
de->length = namelen;
memcpy(de->name, name, namelen);
- write_lock(&ls->ls_dirtbl[bucket].lock);
+ spin_lock(&ls->ls_dirtbl[bucket].lock);
tmp = search_bucket(ls, name, namelen, bucket);
if (tmp) {
kfree(de);
@@ -339,7 +339,7 @@ static int get_entry(struct dlm_ls *ls, int nodeid, char *name,
list_add_tail(&de->list, &ls->ls_dirtbl[bucket].list);
}
*r_nodeid = de->master_nodeid;
- write_unlock(&ls->ls_dirtbl[bucket].lock);
+ spin_unlock(&ls->ls_dirtbl[bucket].lock);
return 0;
}
diff --git a/fs/dlm/dlm_internal.h b/fs/dlm/dlm_internal.h
index 076e86f38bc8..d01ca0a711db 100644
--- a/fs/dlm/dlm_internal.h
+++ b/fs/dlm/dlm_internal.h
@@ -99,7 +99,7 @@ struct dlm_direntry {
struct dlm_dirtable {
struct list_head list;
- rwlock_t lock;
+ spinlock_t lock;
};
struct dlm_rsbtable {
diff --git a/fs/dlm/lockspace.c b/fs/dlm/lockspace.c
index aa32e5f02493..cd8e2df3c295 100644
--- a/fs/dlm/lockspace.c
+++ b/fs/dlm/lockspace.c
@@ -487,7 +487,7 @@ static int new_lockspace(char *name, int namelen, void **lockspace,
goto out_lkbfree;
for (i = 0; i < size; i++) {
INIT_LIST_HEAD(&ls->ls_dirtbl[i].list);
- rwlock_init(&ls->ls_dirtbl[i].lock);
+ spin_lock_init(&ls->ls_dirtbl[i].lock);
}
INIT_LIST_HEAD(&ls->ls_waiters);
diff --git a/fs/dlm/lowcomms.c b/fs/dlm/lowcomms.c
index 103a5ebd1371..d0e640c78374 100644
--- a/fs/dlm/lowcomms.c
+++ b/fs/dlm/lowcomms.c
@@ -2,7 +2,7 @@
*******************************************************************************
**
** Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
-** Copyright (C) 2004-2007 Red Hat, Inc. All rights reserved.
+** Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
**
** This copyrighted material is made available to anyone wishing to use,
** modify, copy, or redistribute it subject to the terms and conditions
@@ -21,7 +21,7 @@
*
* Cluster nodes are referred to by their nodeids. nodeids are
* simply 32 bit numbers to the locking module - if they need to
- * be expanded for the cluster infrastructure then that is it's
+ * be expanded for the cluster infrastructure then that is its
* responsibility. It is this layer's
* responsibility to resolve these into IP address or
* whatever it needs for inter-node communication.
@@ -36,9 +36,9 @@
* of high load. Also, this way, the sending thread can collect together
* messages bound for one node and send them in one block.
*
- * lowcomms will choose to use wither TCP or SCTP as its transport layer
+ * lowcomms will choose to use either TCP or SCTP as its transport layer
* depending on the configuration variable 'protocol'. This should be set
- * to 0 (default) for TCP or 1 for SCTP. It shouldbe configured using a
+ * to 0 (default) for TCP or 1 for SCTP. It should be configured using a
* cluster-wide mechanism as it must be the same on all nodes of the cluster
* for the DLM to function.
*
@@ -48,11 +48,11 @@
#include <net/sock.h>
#include <net/tcp.h>
#include <linux/pagemap.h>
-#include <linux/idr.h>
#include <linux/file.h>
#include <linux/mutex.h>
#include <linux/sctp.h>
#include <net/sctp/user.h>
+#include <net/ipv6.h>
#include "dlm_internal.h"
#include "lowcomms.h"
@@ -60,6 +60,7 @@
#include "config.h"
#define NEEDED_RMEM (4*1024*1024)
+#define CONN_HASH_SIZE 32
struct cbuf {
unsigned int base;
@@ -114,6 +115,7 @@ struct connection {
int retries;
#define MAX_CONNECT_RETRIES 3
int sctp_assoc;
+ struct hlist_node list;
struct connection *othercon;
struct work_struct rwork; /* Receive workqueue */
struct work_struct swork; /* Send workqueue */
@@ -138,14 +140,37 @@ static int dlm_local_count;
static struct workqueue_struct *recv_workqueue;
static struct workqueue_struct *send_workqueue;
-static DEFINE_IDR(connections_idr);
+static struct hlist_head connection_hash[CONN_HASH_SIZE];
static DEFINE_MUTEX(connections_lock);
-static int max_nodeid;
static struct kmem_cache *con_cache;
static void process_recv_sockets(struct work_struct *work);
static void process_send_sockets(struct work_struct *work);
+
+/* This is deliberately very simple because most clusters have simple
+ sequential nodeids, so we should be able to go straight to a connection
+ struct in the array */
+static inline int nodeid_hash(int nodeid)
+{
+ return nodeid & (CONN_HASH_SIZE-1);
+}
+
+static struct connection *__find_con(int nodeid)
+{
+ int r;
+ struct hlist_node *h;
+ struct connection *con;
+
+ r = nodeid_hash(nodeid);
+
+ hlist_for_each_entry(con, h, &connection_hash[r], list) {
+ if (con->nodeid == nodeid)
+ return con;
+ }
+ return NULL;
+}
+
/*
* If 'allocation' is zero then we don't attempt to create a new
* connection structure for this node.
@@ -154,31 +179,17 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
{
struct connection *con = NULL;
int r;
- int n;
- con = idr_find(&connections_idr, nodeid);
+ con = __find_con(nodeid);
if (con || !alloc)
return con;
- r = idr_pre_get(&connections_idr, alloc);
- if (!r)
- return NULL;
-
con = kmem_cache_zalloc(con_cache, alloc);
if (!con)
return NULL;
- r = idr_get_new_above(&connections_idr, con, nodeid, &n);
- if (r) {
- kmem_cache_free(con_cache, con);
- return NULL;
- }
-
- if (n != nodeid) {
- idr_remove(&connections_idr, n);
- kmem_cache_free(con_cache, con);
- return NULL;
- }
+ r = nodeid_hash(nodeid);
+ hlist_add_head(&con->list, &connection_hash[r]);
con->nodeid = nodeid;
mutex_init(&con->sock_mutex);
@@ -189,19 +200,30 @@ static struct connection *__nodeid2con(int nodeid, gfp_t alloc)
/* Setup action pointers for child sockets */
if (con->nodeid) {
- struct connection *zerocon = idr_find(&connections_idr, 0);
+ struct connection *zerocon = __find_con(0);
con->connect_action = zerocon->connect_action;
if (!con->rx_action)
con->rx_action = zerocon->rx_action;
}
- if (nodeid > max_nodeid)
- max_nodeid = nodeid;
-
return con;
}
+/* Loop round all connections */
+static void foreach_conn(void (*conn_func)(struct connection *c))
+{
+ int i;
+ struct hlist_node *h, *n;
+ struct connection *con;
+
+ for (i = 0; i < CONN_HASH_SIZE; i++) {
+ hlist_for_each_entry_safe(con, h, n, &connection_hash[i], list){
+ conn_func(con);
+ }
+ }
+}
+
static struct connection *nodeid2con(int nodeid, gfp_t allocation)
{
struct connection *con;
@@ -217,14 +239,17 @@ static struct connection *nodeid2con(int nodeid, gfp_t allocation)
static struct connection *assoc2con(int assoc_id)
{
int i;
+ struct hlist_node *h;
struct connection *con;
mutex_lock(&connections_lock);
- for (i=0; i<=max_nodeid; i++) {
- con = __nodeid2con(i, 0);
- if (con && con->sctp_assoc == assoc_id) {
- mutex_unlock(&connections_lock);
- return con;
+
+ for (i = 0 ; i < CONN_HASH_SIZE; i++) {
+ hlist_for_each_entry(con, h, &connection_hash[i], list) {
+ if (con && con->sctp_assoc == assoc_id) {
+ mutex_unlock(&connections_lock);
+ return con;
+ }
}
}
mutex_unlock(&connections_lock);
@@ -250,8 +275,7 @@ static int nodeid_to_addr(int nodeid, struct sockaddr *retaddr)
} else {
struct sockaddr_in6 *in6 = (struct sockaddr_in6 *) &addr;
struct sockaddr_in6 *ret6 = (struct sockaddr_in6 *) retaddr;
- memcpy(&ret6->sin6_addr, &in6->sin6_addr,
- sizeof(in6->sin6_addr));
+ ipv6_addr_copy(&ret6->sin6_addr, &in6->sin6_addr);
}
return 0;
@@ -376,25 +400,23 @@ static void sctp_send_shutdown(sctp_assoc_t associd)
log_print("send EOF to node failed: %d", ret);
}
+static void sctp_init_failed_foreach(struct connection *con)
+{
+ con->sctp_assoc = 0;
+ if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
+ if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags))
+ queue_work(send_workqueue, &con->swork);
+ }
+}
+
/* INIT failed but we don't know which node...
restart INIT on all pending nodes */
static void sctp_init_failed(void)
{
- int i;
- struct connection *con;
-
mutex_lock(&connections_lock);
- for (i=1; i<=max_nodeid; i++) {
- con = __nodeid2con(i, 0);
- if (!con)
- continue;
- con->sctp_assoc = 0;
- if (test_and_clear_bit(CF_CONNECT_PENDING, &con->flags)) {
- if (!test_and_set_bit(CF_WRITE_PENDING, &con->flags)) {
- queue_work(send_workqueue, &con->swork);
- }
- }
- }
+
+ foreach_conn(sctp_init_failed_foreach);
+
mutex_unlock(&connections_lock);
}
@@ -1313,13 +1335,10 @@ out_connect:
static void clean_one_writequeue(struct connection *con)
{
- struct list_head *list;
- struct list_head *temp;
+ struct writequeue_entry *e, *safe;
spin_lock(&con->writequeue_lock);
- list_for_each_safe(list, temp, &con->writequeue) {
- struct writequeue_entry *e =
- list_entry(list, struct writequeue_entry, list);
+ list_for_each_entry_safe(e, safe, &con->writequeue, list) {
list_del(&e->list);
free_entry(e);
}
@@ -1369,14 +1388,7 @@ static void process_send_sockets(struct work_struct *work)
/* Discard all entries on the write queues */
static void clean_writequeues(void)
{
- int nodeid;
-
- for (nodeid = 1; nodeid <= max_nodeid; nodeid++) {
- struct connection *con = __nodeid2con(nodeid, 0);
-
- if (con)
- clean_one_writequeue(con);
- }
+ foreach_conn(clean_one_writequeue);
}
static void work_stop(void)
@@ -1406,23 +1418,28 @@ static int work_start(void)
return 0;
}
-void dlm_lowcomms_stop(void)
+static void stop_conn(struct connection *con)
{
- int i;
- struct connection *con;
+ con->flags |= 0x0F;
+ if (con->sock)
+ con->sock->sk->sk_user_data = NULL;
+}
+static void free_conn(struct connection *con)
+{
+ close_connection(con, true);
+ if (con->othercon)
+ kmem_cache_free(con_cache, con->othercon);
+ kmem_cache_free(con_cache, con);
+}
+
+void dlm_lowcomms_stop(void)
+{
/* Set all the flags to prevent any
socket activity.
*/
mutex_lock(&connections_lock);
- for (i = 0; i <= max_nodeid; i++) {
- con = __nodeid2con(i, 0);
- if (con) {
- con->flags |= 0x0F;
- if (con->sock)
- con->sock->sk->sk_user_data = NULL;
- }
- }
+ foreach_conn(stop_conn);
mutex_unlock(&connections_lock);
work_stop();
@@ -1430,25 +1447,20 @@ void dlm_lowcomms_stop(void)
mutex_lock(&connections_lock);
clean_writequeues();
- for (i = 0; i <= max_nodeid; i++) {
- con = __nodeid2con(i, 0);
- if (con) {
- close_connection(con, true);
- if (con->othercon)
- kmem_cache_free(con_cache, con->othercon);
- kmem_cache_free(con_cache, con);
- }
- }
- max_nodeid = 0;
+ foreach_conn(free_conn);
+
mutex_unlock(&connections_lock);
kmem_cache_destroy(con_cache);
- idr_init(&connections_idr);
}
int dlm_lowcomms_start(void)
{
int error = -EINVAL;
struct connection *con;
+ int i;
+
+ for (i = 0; i < CONN_HASH_SIZE; i++)
+ INIT_HLIST_HEAD(&connection_hash[i]);
init_local();
if (!dlm_local_count) {
diff --git a/fs/dlm/plock.c b/fs/dlm/plock.c
index eba87ff3177b..894a32d438d5 100644
--- a/fs/dlm/plock.c
+++ b/fs/dlm/plock.c
@@ -168,7 +168,7 @@ static int dlm_plock_callback(struct plock_op *op)
notify = xop->callback;
if (op->info.rv) {
- notify(flc, NULL, op->info.rv);
+ notify(fl, NULL, op->info.rv);
goto out;
}
@@ -187,7 +187,7 @@ static int dlm_plock_callback(struct plock_op *op)
(unsigned long long)op->info.number, file, fl);
}
- rv = notify(flc, NULL, 0);
+ rv = notify(fl, NULL, 0);
if (rv) {
/* XXX: We need to cancel the fs lock here: */
log_print("dlm_plock_callback: lock granted after lock request "
@@ -304,7 +304,9 @@ int dlm_posix_get(dlm_lockspace_t *lockspace, u64 number, struct file *file,
if (rv == -ENOENT)
rv = 0;
else if (rv > 0) {
+ locks_init_lock(fl);
fl->fl_type = (op->info.ex) ? F_WRLCK : F_RDLCK;
+ fl->fl_flags = FL_POSIX;
fl->fl_pid = op->info.pid;
fl->fl_start = op->info.start;
fl->fl_end = op->info.end;
diff --git a/fs/dquot.c b/fs/dquot.c
index 48c0571f831d..28aa14667602 100644
--- a/fs/dquot.c
+++ b/fs/dquot.c
@@ -87,14 +87,17 @@
#define __DQUOT_PARANOIA
/*
- * There are two quota SMP locks. dq_list_lock protects all lists with quotas
- * and quota formats and also dqstats structure containing statistics about the
- * lists. dq_data_lock protects data from dq_dqb and also mem_dqinfo structures
- * and also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
+ * There are three quota SMP locks. dq_list_lock protects all lists with quotas
+ * and quota formats, dqstats structure containing statistics about the lists
+ * dq_data_lock protects data from dq_dqb and also mem_dqinfo structures and
+ * also guards consistency of dquot->dq_dqb with inode->i_blocks, i_bytes.
* i_blocks and i_bytes updates itself are guarded by i_lock acquired directly
- * in inode_add_bytes() and inode_sub_bytes().
+ * in inode_add_bytes() and inode_sub_bytes(). dq_state_lock protects
+ * modifications of quota state (on quotaon and quotaoff) and readers who care
+ * about latest values take it as well.
*
- * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock
+ * The spinlock ordering is hence: dq_data_lock > dq_list_lock > i_lock,
+ * dq_list_lock > dq_state_lock
*
* Note that some things (eg. sb pointer, type, id) doesn't change during
* the life of the dquot structure and so needn't to be protected by a lock
@@ -103,12 +106,7 @@
* operation is just reading pointers from inode (or not using them at all) the
* read lock is enough. If pointers are altered function must hold write lock
* (these locking rules also apply for S_NOQUOTA flag in the inode - note that
- * for altering the flag i_mutex is also needed). If operation is holding
- * reference to dquot in other way (e.g. quotactl ops) it must be guarded by
- * dqonoff_mutex.
- * This locking assures that:
- * a) update/access to dquot pointers in inode is serialized
- * b) everyone is guarded against invalidate_dquots()
+ * for altering the flag i_mutex is also needed).
*
* Each dquot has its dq_lock mutex. Locked dquots might not be referenced
* from inodes (dquot_alloc_space() and such don't check the dq_lock).
@@ -122,11 +120,19 @@
* Lock ordering (including related VFS locks) is the following:
* i_mutex > dqonoff_sem > journal_lock > dqptr_sem > dquot->dq_lock >
* dqio_mutex
+ * The lock ordering of dqptr_sem imposed by quota code is only dqonoff_sem >
+ * dqptr_sem. But filesystem has to count with the fact that functions such as
+ * dquot_alloc_space() acquire dqptr_sem and they usually have to be called
+ * from inside a transaction to keep filesystem consistency after a crash. Also
+ * filesystems usually want to do some IO on dquot from ->mark_dirty which is
+ * called with dqptr_sem held.
* i_mutex on quota files is special (it's below dqio_mutex)
*/
static DEFINE_SPINLOCK(dq_list_lock);
+static DEFINE_SPINLOCK(dq_state_lock);
DEFINE_SPINLOCK(dq_data_lock);
+EXPORT_SYMBOL(dq_data_lock);
static char *quotatypes[] = INITQFNAMES;
static struct quota_format_type *quota_formats; /* List of registered formats */
@@ -143,6 +149,7 @@ int register_quota_format(struct quota_format_type *fmt)
spin_unlock(&dq_list_lock);
return 0;
}
+EXPORT_SYMBOL(register_quota_format);
void unregister_quota_format(struct quota_format_type *fmt)
{
@@ -154,6 +161,7 @@ void unregister_quota_format(struct quota_format_type *fmt)
*actqf = (*actqf)->qf_next;
spin_unlock(&dq_list_lock);
}
+EXPORT_SYMBOL(unregister_quota_format);
static struct quota_format_type *find_quota_format(int id)
{
@@ -210,6 +218,7 @@ static unsigned int dq_hash_bits, dq_hash_mask;
static struct hlist_head *dquot_hash;
struct dqstats dqstats;
+EXPORT_SYMBOL(dqstats);
static inline unsigned int
hashfn(const struct super_block *sb, unsigned int id, int type)
@@ -304,6 +313,7 @@ int dquot_mark_dquot_dirty(struct dquot *dquot)
spin_unlock(&dq_list_lock);
return 0;
}
+EXPORT_SYMBOL(dquot_mark_dquot_dirty);
/* This function needs dq_list_lock */
static inline int clear_dquot_dirty(struct dquot *dquot)
@@ -355,6 +365,7 @@ out_iolock:
mutex_unlock(&dquot->dq_lock);
return ret;
}
+EXPORT_SYMBOL(dquot_acquire);
/*
* Write dquot to disk
@@ -384,6 +395,7 @@ out_sem:
mutex_unlock(&dqopt->dqio_mutex);
return ret;
}
+EXPORT_SYMBOL(dquot_commit);
/*
* Release dquot
@@ -412,6 +424,7 @@ out_dqlock:
mutex_unlock(&dquot->dq_lock);
return ret;
}
+EXPORT_SYMBOL(dquot_release);
void dquot_destroy(struct dquot *dquot)
{
@@ -428,7 +441,7 @@ static inline void do_destroy_dquot(struct dquot *dquot)
* quota is disabled and pointers from inodes removed so there cannot be new
* quota users. There can still be some users of quotas due to inodes being
* just deleted or pruned by prune_icache() (those are not attached to any
- * list). We have to wait for such users.
+ * list) or parallel quotactl call. We have to wait for such users.
*/
static void invalidate_dquots(struct super_block *sb, int type)
{
@@ -511,6 +524,7 @@ out:
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return ret;
}
+EXPORT_SYMBOL(dquot_scan_active);
int vfs_quota_sync(struct super_block *sb, int type)
{
@@ -558,6 +572,7 @@ int vfs_quota_sync(struct super_block *sb, int type)
return 0;
}
+EXPORT_SYMBOL(vfs_quota_sync);
/* Free unused dquots from cache */
static void prune_dqcache(int count)
@@ -600,7 +615,6 @@ static struct shrinker dqcache_shrinker = {
/*
* Put reference to dquot
* NOTE: If you change this function please check whether dqput_blocks() works right...
- * MUST be called with either dqptr_sem or dqonoff_mutex held
*/
void dqput(struct dquot *dquot)
{
@@ -668,6 +682,7 @@ we_slept:
put_dquot_last(dquot);
spin_unlock(&dq_list_lock);
}
+EXPORT_SYMBOL(dqput);
struct dquot *dquot_alloc(struct super_block *sb, int type)
{
@@ -697,36 +712,30 @@ static struct dquot *get_empty_dquot(struct super_block *sb, int type)
}
/*
- * Check whether dquot is in memory.
- * MUST be called with either dqptr_sem or dqonoff_mutex held
- */
-int dquot_is_cached(struct super_block *sb, unsigned int id, int type)
-{
- unsigned int hashent = hashfn(sb, id, type);
- int ret = 0;
-
- if (!sb_has_quota_active(sb, type))
- return 0;
- spin_lock(&dq_list_lock);
- if (find_dquot(hashent, sb, id, type) != NODQUOT)
- ret = 1;
- spin_unlock(&dq_list_lock);
- return ret;
-}
-
-/*
* Get reference to dquot
- * MUST be called with either dqptr_sem or dqonoff_mutex held
+ *
+ * Locking is slightly tricky here. We are guarded from parallel quotaoff()
+ * destroying our dquot by:
+ * a) checking for quota flags under dq_list_lock and
+ * b) getting a reference to dquot before we release dq_list_lock
*/
struct dquot *dqget(struct super_block *sb, unsigned int id, int type)
{
unsigned int hashent = hashfn(sb, id, type);
- struct dquot *dquot, *empty = NODQUOT;
+ struct dquot *dquot = NODQUOT, *empty = NODQUOT;
if (!sb_has_quota_active(sb, type))
return NODQUOT;
we_slept:
spin_lock(&dq_list_lock);
+ spin_lock(&dq_state_lock);
+ if (!sb_has_quota_active(sb, type)) {
+ spin_unlock(&dq_state_lock);
+ spin_unlock(&dq_list_lock);
+ goto out;
+ }
+ spin_unlock(&dq_state_lock);
+
if ((dquot = find_dquot(hashent, sb, id, type)) == NODQUOT) {
if (empty == NODQUOT) {
spin_unlock(&dq_list_lock);
@@ -735,6 +744,7 @@ we_slept:
goto we_slept;
}
dquot = empty;
+ empty = NODQUOT;
dquot->dq_id = id;
/* all dquots go on the inuse_list */
put_inuse(dquot);
@@ -749,8 +759,6 @@ we_slept:
dqstats.cache_hits++;
dqstats.lookups++;
spin_unlock(&dq_list_lock);
- if (empty)
- do_destroy_dquot(empty);
}
/* Wait for dq_lock - after this we know that either dquot_release() is already
* finished or it will be canceled due to dq_count > 1 test */
@@ -758,14 +766,19 @@ we_slept:
/* Read the dquot and instantiate it (everything done only if needed) */
if (!test_bit(DQ_ACTIVE_B, &dquot->dq_flags) && sb->dq_op->acquire_dquot(dquot) < 0) {
dqput(dquot);
- return NODQUOT;
+ dquot = NODQUOT;
+ goto out;
}
#ifdef __DQUOT_PARANOIA
BUG_ON(!dquot->dq_sb); /* Has somebody invalidated entry under us? */
#endif
+out:
+ if (empty)
+ do_destroy_dquot(empty);
return dquot;
}
+EXPORT_SYMBOL(dqget);
static int dqinit_needed(struct inode *inode, int type)
{
@@ -898,6 +911,28 @@ static inline void dquot_incr_space(struct dquot *dquot, qsize_t number)
dquot->dq_dqb.dqb_curspace += number;
}
+static inline void dquot_resv_space(struct dquot *dquot, qsize_t number)
+{
+ dquot->dq_dqb.dqb_rsvspace += number;
+}
+
+/*
+ * Claim reserved quota space
+ */
+static void dquot_claim_reserved_space(struct dquot *dquot,
+ qsize_t number)
+{
+ WARN_ON(dquot->dq_dqb.dqb_rsvspace < number);
+ dquot->dq_dqb.dqb_curspace += number;
+ dquot->dq_dqb.dqb_rsvspace -= number;
+}
+
+static inline
+void dquot_free_reserved_space(struct dquot *dquot, qsize_t number)
+{
+ dquot->dq_dqb.dqb_rsvspace -= number;
+}
+
static inline void dquot_decr_inodes(struct dquot *dquot, qsize_t number)
{
if (sb_dqopt(dquot->dq_sb)->flags & DQUOT_NEGATIVE_USAGE ||
@@ -1067,7 +1102,11 @@ err_out:
kfree_skb(skb);
}
#endif
-
+/*
+ * Write warnings to the console and send warning messages over netlink.
+ *
+ * Note that this function can sleep.
+ */
static inline void flush_warnings(struct dquot * const *dquots, char *warntype)
{
int i;
@@ -1128,13 +1167,18 @@ static int check_idq(struct dquot *dquot, qsize_t inodes, char *warntype)
/* needs dq_data_lock */
static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *warntype)
{
+ qsize_t tspace;
+
*warntype = QUOTA_NL_NOWARN;
if (!sb_has_quota_limits_enabled(dquot->dq_sb, dquot->dq_type) ||
test_bit(DQ_FAKE_B, &dquot->dq_flags))
return QUOTA_OK;
+ tspace = dquot->dq_dqb.dqb_curspace + dquot->dq_dqb.dqb_rsvspace
+ + space;
+
if (dquot->dq_dqb.dqb_bhardlimit &&
- dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bhardlimit &&
+ tspace > dquot->dq_dqb.dqb_bhardlimit &&
!ignore_hardlimit(dquot)) {
if (!prealloc)
*warntype = QUOTA_NL_BHARDWARN;
@@ -1142,7 +1186,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
}
if (dquot->dq_dqb.dqb_bsoftlimit &&
- dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit &&
+ tspace > dquot->dq_dqb.dqb_bsoftlimit &&
dquot->dq_dqb.dqb_btime && get_seconds() >= dquot->dq_dqb.dqb_btime &&
!ignore_hardlimit(dquot)) {
if (!prealloc)
@@ -1151,7 +1195,7 @@ static int check_bdq(struct dquot *dquot, qsize_t space, int prealloc, char *war
}
if (dquot->dq_dqb.dqb_bsoftlimit &&
- dquot->dq_dqb.dqb_curspace + space > dquot->dq_dqb.dqb_bsoftlimit &&
+ tspace > dquot->dq_dqb.dqb_bsoftlimit &&
dquot->dq_dqb.dqb_btime == 0) {
if (!prealloc) {
*warntype = QUOTA_NL_BSOFTWARN;
@@ -1198,65 +1242,80 @@ static int info_bdq_free(struct dquot *dquot, qsize_t space)
}
/*
* Initialize quota pointers in inode
- * Transaction must be started at entry
+ * We do things in a bit complicated way but by that we avoid calling
+ * dqget() and thus filesystem callbacks under dqptr_sem.
*/
int dquot_initialize(struct inode *inode, int type)
{
unsigned int id = 0;
int cnt, ret = 0;
+ struct dquot *got[MAXQUOTAS] = { NODQUOT, NODQUOT };
+ struct super_block *sb = inode->i_sb;
/* First test before acquiring mutex - solves deadlocks when we
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode))
return 0;
- down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+
+ /* First get references to structures we might need. */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (type != -1 && cnt != type)
+ continue;
+ switch (cnt) {
+ case USRQUOTA:
+ id = inode->i_uid;
+ break;
+ case GRPQUOTA:
+ id = inode->i_gid;
+ break;
+ }
+ got[cnt] = dqget(sb, id, cnt);
+ }
+
+ down_write(&sb_dqopt(sb)->dqptr_sem);
/* Having dqptr_sem we know NOQUOTA flags can't be altered... */
if (IS_NOQUOTA(inode))
goto out_err;
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (type != -1 && cnt != type)
continue;
+ /* Avoid races with quotaoff() */
+ if (!sb_has_quota_active(sb, cnt))
+ continue;
if (inode->i_dquot[cnt] == NODQUOT) {
- switch (cnt) {
- case USRQUOTA:
- id = inode->i_uid;
- break;
- case GRPQUOTA:
- id = inode->i_gid;
- break;
- }
- inode->i_dquot[cnt] = dqget(inode->i_sb, id, cnt);
+ inode->i_dquot[cnt] = got[cnt];
+ got[cnt] = NODQUOT;
}
}
out_err:
- up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ up_write(&sb_dqopt(sb)->dqptr_sem);
+ /* Drop unused references */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ dqput(got[cnt]);
return ret;
}
+EXPORT_SYMBOL(dquot_initialize);
/*
* Release all quotas referenced by inode
- * Transaction must be started at an entry
*/
-int dquot_drop_locked(struct inode *inode)
+int dquot_drop(struct inode *inode)
{
int cnt;
+ struct dquot *put[MAXQUOTAS];
+ down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] != NODQUOT) {
- dqput(inode->i_dquot[cnt]);
- inode->i_dquot[cnt] = NODQUOT;
- }
+ put[cnt] = inode->i_dquot[cnt];
+ inode->i_dquot[cnt] = NODQUOT;
}
- return 0;
-}
-
-int dquot_drop(struct inode *inode)
-{
- down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
- dquot_drop_locked(inode);
up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ dqput(put[cnt]);
return 0;
}
+EXPORT_SYMBOL(dquot_drop);
/* Wrapper to remove references to quota structures from inode */
void vfs_dq_drop(struct inode *inode)
@@ -1279,6 +1338,7 @@ void vfs_dq_drop(struct inode *inode)
inode->i_sb->dq_op->drop(inode);
}
}
+EXPORT_SYMBOL(vfs_dq_drop);
/*
* Following four functions update i_blocks+i_bytes fields and
@@ -1292,51 +1352,93 @@ void vfs_dq_drop(struct inode *inode)
/*
* This operation can block, but only after everything is updated
*/
-int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
+int __dquot_alloc_space(struct inode *inode, qsize_t number,
+ int warn, int reserve)
{
- int cnt, ret = NO_QUOTA;
+ int cnt, ret = QUOTA_OK;
char warntype[MAXQUOTAS];
- /* First test before acquiring mutex - solves deadlocks when we
- * re-enter the quota code and are already holding the mutex */
- if (IS_NOQUOTA(inode)) {
-out_add:
- inode_add_bytes(inode, number);
- return QUOTA_OK;
- }
for (cnt = 0; cnt < MAXQUOTAS; cnt++)
warntype[cnt] = QUOTA_NL_NOWARN;
- down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- if (IS_NOQUOTA(inode)) { /* Now we can do reliable test... */
- up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
- goto out_add;
- }
spin_lock(&dq_data_lock);
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (inode->i_dquot[cnt] == NODQUOT)
continue;
- if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt) == NO_QUOTA)
- goto warn_put_all;
+ if (check_bdq(inode->i_dquot[cnt], number, warn, warntype+cnt)
+ == NO_QUOTA) {
+ ret = NO_QUOTA;
+ goto out_unlock;
+ }
}
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (inode->i_dquot[cnt] == NODQUOT)
continue;
- dquot_incr_space(inode->i_dquot[cnt], number);
+ if (reserve)
+ dquot_resv_space(inode->i_dquot[cnt], number);
+ else
+ dquot_incr_space(inode->i_dquot[cnt], number);
}
- inode_add_bytes(inode, number);
- ret = QUOTA_OK;
-warn_put_all:
+ if (!reserve)
+ inode_add_bytes(inode, number);
+out_unlock:
spin_unlock(&dq_data_lock);
- if (ret == QUOTA_OK)
- /* Dirtify all the dquots - this can block when journalling */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (inode->i_dquot[cnt])
- mark_dquot_dirty(inode->i_dquot[cnt]);
flush_warnings(inode->i_dquot, warntype);
+ return ret;
+}
+
+int dquot_alloc_space(struct inode *inode, qsize_t number, int warn)
+{
+ int cnt, ret = QUOTA_OK;
+
+ /*
+ * First test before acquiring mutex - solves deadlocks when we
+ * re-enter the quota code and are already holding the mutex
+ */
+ if (IS_NOQUOTA(inode)) {
+ inode_add_bytes(inode, number);
+ goto out;
+ }
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode)) {
+ inode_add_bytes(inode, number);
+ goto out_unlock;
+ }
+
+ ret = __dquot_alloc_space(inode, number, warn, 0);
+ if (ret == NO_QUOTA)
+ goto out_unlock;
+
+ /* Dirtify all the dquots - this can block when journalling */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (inode->i_dquot[cnt])
+ mark_dquot_dirty(inode->i_dquot[cnt]);
+out_unlock:
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(dquot_alloc_space);
+
+int dquot_reserve_space(struct inode *inode, qsize_t number, int warn)
+{
+ int ret = QUOTA_OK;
+
+ if (IS_NOQUOTA(inode))
+ goto out;
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode))
+ goto out_unlock;
+
+ ret = __dquot_alloc_space(inode, number, warn, 1);
+out_unlock:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
return ret;
}
+EXPORT_SYMBOL(dquot_reserve_space);
/*
* This operation can block, but only after everything is updated
@@ -1382,6 +1484,73 @@ warn_put_all:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return ret;
}
+EXPORT_SYMBOL(dquot_alloc_inode);
+
+int dquot_claim_space(struct inode *inode, qsize_t number)
+{
+ int cnt;
+ int ret = QUOTA_OK;
+
+ if (IS_NOQUOTA(inode)) {
+ inode_add_bytes(inode, number);
+ goto out;
+ }
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode)) {
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ inode_add_bytes(inode, number);
+ goto out;
+ }
+
+ spin_lock(&dq_data_lock);
+ /* Claim reserved quotas to allocated quotas */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (inode->i_dquot[cnt] != NODQUOT)
+ dquot_claim_reserved_space(inode->i_dquot[cnt],
+ number);
+ }
+ /* Update inode bytes */
+ inode_add_bytes(inode, number);
+ spin_unlock(&dq_data_lock);
+ /* Dirtify all the dquots - this can block when journalling */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ if (inode->i_dquot[cnt])
+ mark_dquot_dirty(inode->i_dquot[cnt]);
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+ return ret;
+}
+EXPORT_SYMBOL(dquot_claim_space);
+
+/*
+ * Release reserved quota space
+ */
+void dquot_release_reserved_space(struct inode *inode, qsize_t number)
+{
+ int cnt;
+
+ if (IS_NOQUOTA(inode))
+ goto out;
+
+ down_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ if (IS_NOQUOTA(inode))
+ goto out_unlock;
+
+ spin_lock(&dq_data_lock);
+ /* Release reserved dquots */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
+ if (inode->i_dquot[cnt] != NODQUOT)
+ dquot_free_reserved_space(inode->i_dquot[cnt], number);
+ }
+ spin_unlock(&dq_data_lock);
+
+out_unlock:
+ up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
+out:
+ return;
+}
+EXPORT_SYMBOL(dquot_release_reserved_space);
/*
* This operation can block, but only after everything is updated
@@ -1422,6 +1591,7 @@ out_sub:
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return QUOTA_OK;
}
+EXPORT_SYMBOL(dquot_free_space);
/*
* This operation can block, but only after everything is updated
@@ -1458,6 +1628,20 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
up_read(&sb_dqopt(inode->i_sb)->dqptr_sem);
return QUOTA_OK;
}
+EXPORT_SYMBOL(dquot_free_inode);
+
+/*
+ * call back function, get reserved quota space from underlying fs
+ */
+qsize_t dquot_get_reserved_space(struct inode *inode)
+{
+ qsize_t reserved_space = 0;
+
+ if (sb_any_quota_active(inode->i_sb) &&
+ inode->i_sb->dq_op->get_reserved_space)
+ reserved_space = inode->i_sb->dq_op->get_reserved_space(inode);
+ return reserved_space;
+}
/*
* Transfer the number of inode and blocks from one diskquota to an other.
@@ -1467,11 +1651,13 @@ int dquot_free_inode(const struct inode *inode, qsize_t number)
*/
int dquot_transfer(struct inode *inode, struct iattr *iattr)
{
- qsize_t space;
+ qsize_t space, cur_space;
+ qsize_t rsv_space = 0;
struct dquot *transfer_from[MAXQUOTAS];
struct dquot *transfer_to[MAXQUOTAS];
- int cnt, ret = NO_QUOTA, chuid = (iattr->ia_valid & ATTR_UID) && inode->i_uid != iattr->ia_uid,
- chgid = (iattr->ia_valid & ATTR_GID) && inode->i_gid != iattr->ia_gid;
+ int cnt, ret = QUOTA_OK;
+ int chuid = iattr->ia_valid & ATTR_UID && inode->i_uid != iattr->ia_uid,
+ chgid = iattr->ia_valid & ATTR_GID && inode->i_gid != iattr->ia_gid;
char warntype_to[MAXQUOTAS];
char warntype_from_inodes[MAXQUOTAS], warntype_from_space[MAXQUOTAS];
@@ -1479,21 +1665,11 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
* re-enter the quota code and are already holding the mutex */
if (IS_NOQUOTA(inode))
return QUOTA_OK;
- /* Clear the arrays */
+ /* Initialize the arrays */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- transfer_to[cnt] = transfer_from[cnt] = NODQUOT;
+ transfer_from[cnt] = NODQUOT;
+ transfer_to[cnt] = NODQUOT;
warntype_to[cnt] = QUOTA_NL_NOWARN;
- }
- down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
- /* Now recheck reliably when holding dqptr_sem */
- if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
- up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
- return QUOTA_OK;
- }
- /* First build the transfer_to list - here we can block on
- * reading/instantiating of dquots. We know that the transaction for
- * us was already started so we don't violate lock ranking here */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
switch (cnt) {
case USRQUOTA:
if (!chuid)
@@ -1507,8 +1683,17 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
break;
}
}
+
+ down_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ /* Now recheck reliably when holding dqptr_sem */
+ if (IS_NOQUOTA(inode)) { /* File without quota accounting? */
+ up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ goto put_all;
+ }
spin_lock(&dq_data_lock);
- space = inode_get_bytes(inode);
+ cur_space = inode_get_bytes(inode);
+ rsv_space = dquot_get_reserved_space(inode);
+ space = cur_space + rsv_space;
/* Build the transfer_from list and check the limits */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (transfer_to[cnt] == NODQUOT)
@@ -1517,7 +1702,7 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
if (check_idq(transfer_to[cnt], 1, warntype_to + cnt) ==
NO_QUOTA || check_bdq(transfer_to[cnt], space, 0,
warntype_to + cnt) == NO_QUOTA)
- goto warn_put_all;
+ goto over_quota;
}
/*
@@ -1537,37 +1722,50 @@ int dquot_transfer(struct inode *inode, struct iattr *iattr)
warntype_from_space[cnt] =
info_bdq_free(transfer_from[cnt], space);
dquot_decr_inodes(transfer_from[cnt], 1);
- dquot_decr_space(transfer_from[cnt], space);
+ dquot_decr_space(transfer_from[cnt], cur_space);
+ dquot_free_reserved_space(transfer_from[cnt],
+ rsv_space);
}
dquot_incr_inodes(transfer_to[cnt], 1);
- dquot_incr_space(transfer_to[cnt], space);
+ dquot_incr_space(transfer_to[cnt], cur_space);
+ dquot_resv_space(transfer_to[cnt], rsv_space);
inode->i_dquot[cnt] = transfer_to[cnt];
}
- ret = QUOTA_OK;
-warn_put_all:
spin_unlock(&dq_data_lock);
+ up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+
/* Dirtify all the dquots - this can block when journalling */
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
if (transfer_from[cnt])
mark_dquot_dirty(transfer_from[cnt]);
- if (transfer_to[cnt])
+ if (transfer_to[cnt]) {
mark_dquot_dirty(transfer_to[cnt]);
+ /* The reference we got is transferred to the inode */
+ transfer_to[cnt] = NODQUOT;
+ }
}
+warn_put_all:
flush_warnings(transfer_to, warntype_to);
flush_warnings(transfer_from, warntype_from_inodes);
flush_warnings(transfer_from, warntype_from_space);
-
+put_all:
for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (ret == QUOTA_OK && transfer_from[cnt] != NODQUOT)
- dqput(transfer_from[cnt]);
- if (ret == NO_QUOTA && transfer_to[cnt] != NODQUOT)
- dqput(transfer_to[cnt]);
+ dqput(transfer_from[cnt]);
+ dqput(transfer_to[cnt]);
}
- up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
return ret;
+over_quota:
+ spin_unlock(&dq_data_lock);
+ up_write(&sb_dqopt(inode->i_sb)->dqptr_sem);
+ /* Clear dquot pointers we don't want to dqput() */
+ for (cnt = 0; cnt < MAXQUOTAS; cnt++)
+ transfer_from[cnt] = NODQUOT;
+ ret = NO_QUOTA;
+ goto warn_put_all;
}
+EXPORT_SYMBOL(dquot_transfer);
/* Wrapper for transferring ownership of an inode */
int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
@@ -1579,7 +1777,7 @@ int vfs_dq_transfer(struct inode *inode, struct iattr *iattr)
}
return 0;
}
-
+EXPORT_SYMBOL(vfs_dq_transfer);
/*
* Write info of quota file to disk
@@ -1594,6 +1792,7 @@ int dquot_commit_info(struct super_block *sb, int type)
mutex_unlock(&dqopt->dqio_mutex);
return ret;
}
+EXPORT_SYMBOL(dquot_commit_info);
/*
* Definitions of diskquota operations.
@@ -1651,19 +1850,24 @@ int vfs_quota_disable(struct super_block *sb, int type, unsigned int flags)
continue;
if (flags & DQUOT_SUSPENDED) {
+ spin_lock(&dq_state_lock);
dqopt->flags |=
dquot_state_flag(DQUOT_SUSPENDED, cnt);
+ spin_unlock(&dq_state_lock);
} else {
+ spin_lock(&dq_state_lock);
dqopt->flags &= ~dquot_state_flag(flags, cnt);
/* Turning off suspended quotas? */
if (!sb_has_quota_loaded(sb, cnt) &&
sb_has_quota_suspended(sb, cnt)) {
dqopt->flags &= ~dquot_state_flag(
DQUOT_SUSPENDED, cnt);
+ spin_unlock(&dq_state_lock);
iput(dqopt->files[cnt]);
dqopt->files[cnt] = NULL;
continue;
}
+ spin_unlock(&dq_state_lock);
}
/* We still have to keep quota loaded? */
@@ -1741,13 +1945,14 @@ put_inodes:
}
return ret;
}
+EXPORT_SYMBOL(vfs_quota_disable);
int vfs_quota_off(struct super_block *sb, int type, int remount)
{
return vfs_quota_disable(sb, type, remount ? DQUOT_SUSPENDED :
(DQUOT_USAGE_ENABLED | DQUOT_LIMITS_ENABLED));
}
-
+EXPORT_SYMBOL(vfs_quota_off);
/*
* Turn quotas on on a device
*/
@@ -1830,7 +2035,9 @@ static int vfs_load_quota_inode(struct inode *inode, int type, int format_id,
}
mutex_unlock(&dqopt->dqio_mutex);
mutex_unlock(&inode->i_mutex);
+ spin_lock(&dq_state_lock);
dqopt->flags |= dquot_state_flag(flags, type);
+ spin_unlock(&dq_state_lock);
add_dquot_ref(sb, type);
mutex_unlock(&dqopt->dqonoff_mutex);
@@ -1872,9 +2079,11 @@ static int vfs_quota_on_remount(struct super_block *sb, int type)
}
inode = dqopt->files[type];
dqopt->files[type] = NULL;
+ spin_lock(&dq_state_lock);
flags = dqopt->flags & dquot_state_flag(DQUOT_USAGE_ENABLED |
DQUOT_LIMITS_ENABLED, type);
dqopt->flags &= ~dquot_state_flag(DQUOT_STATE_FLAGS, type);
+ spin_unlock(&dq_state_lock);
mutex_unlock(&dqopt->dqonoff_mutex);
flags = dquot_generic_flag(flags, type);
@@ -1900,6 +2109,7 @@ int vfs_quota_on_path(struct super_block *sb, int type, int format_id,
DQUOT_LIMITS_ENABLED);
return error;
}
+EXPORT_SYMBOL(vfs_quota_on_path);
int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
int remount)
@@ -1917,6 +2127,7 @@ int vfs_quota_on(struct super_block *sb, int type, int format_id, char *name,
}
return error;
}
+EXPORT_SYMBOL(vfs_quota_on);
/*
* More powerful function for turning on quotas allowing setting
@@ -1952,7 +2163,9 @@ int vfs_quota_enable(struct inode *inode, int type, int format_id,
ret = -EBUSY;
goto out_lock;
}
+ spin_lock(&dq_state_lock);
sb_dqopt(sb)->flags |= dquot_state_flag(flags, type);
+ spin_unlock(&dq_state_lock);
out_lock:
mutex_unlock(&dqopt->dqonoff_mutex);
return ret;
@@ -1961,6 +2174,7 @@ out_lock:
load_quota:
return vfs_load_quota_inode(inode, type, format_id, flags);
}
+EXPORT_SYMBOL(vfs_quota_enable);
/*
* This function is used when filesystem needs to initialize quotas
@@ -1990,6 +2204,7 @@ out:
dput(dentry);
return error;
}
+EXPORT_SYMBOL(vfs_quota_on_mount);
/* Wrapper to turn on quotas when remounting rw */
int vfs_dq_quota_on_remount(struct super_block *sb)
@@ -2006,6 +2221,7 @@ int vfs_dq_quota_on_remount(struct super_block *sb)
}
return ret;
}
+EXPORT_SYMBOL(vfs_dq_quota_on_remount);
static inline qsize_t qbtos(qsize_t blocks)
{
@@ -2025,7 +2241,7 @@ static void do_get_dqblk(struct dquot *dquot, struct if_dqblk *di)
spin_lock(&dq_data_lock);
di->dqb_bhardlimit = stoqb(dm->dqb_bhardlimit);
di->dqb_bsoftlimit = stoqb(dm->dqb_bsoftlimit);
- di->dqb_curspace = dm->dqb_curspace;
+ di->dqb_curspace = dm->dqb_curspace + dm->dqb_rsvspace;
di->dqb_ihardlimit = dm->dqb_ihardlimit;
di->dqb_isoftlimit = dm->dqb_isoftlimit;
di->dqb_curinodes = dm->dqb_curinodes;
@@ -2039,16 +2255,15 @@ int vfs_get_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
{
struct dquot *dquot;
- mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
- if (!(dquot = dqget(sb, id, type))) {
- mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+ dquot = dqget(sb, id, type);
+ if (dquot == NODQUOT)
return -ESRCH;
- }
do_get_dqblk(dquot, di);
dqput(dquot);
- mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
+
return 0;
}
+EXPORT_SYMBOL(vfs_get_dqblk);
/* Generic routine for setting common part of quota structure */
static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
@@ -2067,7 +2282,7 @@ static int do_set_dqblk(struct dquot *dquot, struct if_dqblk *di)
spin_lock(&dq_data_lock);
if (di->dqb_valid & QIF_SPACE) {
- dm->dqb_curspace = di->dqb_curspace;
+ dm->dqb_curspace = di->dqb_curspace - dm->dqb_rsvspace;
check_blim = 1;
__set_bit(DQ_LASTSET_B + QIF_SPACE_B, &dquot->dq_flags);
}
@@ -2130,7 +2345,6 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
struct dquot *dquot;
int rc;
- mutex_lock(&sb_dqopt(sb)->dqonoff_mutex);
dquot = dqget(sb, id, type);
if (!dquot) {
rc = -ESRCH;
@@ -2139,9 +2353,9 @@ int vfs_set_dqblk(struct super_block *sb, int type, qid_t id, struct if_dqblk *d
rc = do_set_dqblk(dquot, di);
dqput(dquot);
out:
- mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return rc;
}
+EXPORT_SYMBOL(vfs_set_dqblk);
/* Generic routine for getting common part of quota file information */
int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
@@ -2163,6 +2377,7 @@ int vfs_get_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return 0;
}
+EXPORT_SYMBOL(vfs_get_dqinfo);
/* Generic routine for setting common part of quota file information */
int vfs_set_dqinfo(struct super_block *sb, int type, struct if_dqinfo *ii)
@@ -2191,6 +2406,7 @@ out:
mutex_unlock(&sb_dqopt(sb)->dqonoff_mutex);
return err;
}
+EXPORT_SYMBOL(vfs_set_dqinfo);
struct quotactl_ops vfs_quotactl_ops = {
.quota_on = vfs_quota_on,
@@ -2346,39 +2562,3 @@ static int __init dquot_init(void)
return 0;
}
module_init(dquot_init);
-
-EXPORT_SYMBOL(register_quota_format);
-EXPORT_SYMBOL(unregister_quota_format);
-EXPORT_SYMBOL(dqstats);
-EXPORT_SYMBOL(dq_data_lock);
-EXPORT_SYMBOL(vfs_quota_enable);
-EXPORT_SYMBOL(vfs_quota_on);
-EXPORT_SYMBOL(vfs_quota_on_path);
-EXPORT_SYMBOL(vfs_quota_on_mount);
-EXPORT_SYMBOL(vfs_quota_disable);
-EXPORT_SYMBOL(vfs_quota_off);
-EXPORT_SYMBOL(dquot_scan_active);
-EXPORT_SYMBOL(vfs_quota_sync);
-EXPORT_SYMBOL(vfs_get_dqinfo);
-EXPORT_SYMBOL(vfs_set_dqinfo);
-EXPORT_SYMBOL(vfs_get_dqblk);
-EXPORT_SYMBOL(vfs_set_dqblk);
-EXPORT_SYMBOL(dquot_commit);
-EXPORT_SYMBOL(dquot_commit_info);
-EXPORT_SYMBOL(dquot_acquire);
-EXPORT_SYMBOL(dquot_release);
-EXPORT_SYMBOL(dquot_mark_dquot_dirty);
-EXPORT_SYMBOL(dquot_initialize);
-EXPORT_SYMBOL(dquot_drop);
-EXPORT_SYMBOL(dquot_drop_locked);
-EXPORT_SYMBOL(vfs_dq_drop);
-EXPORT_SYMBOL(dqget);
-EXPORT_SYMBOL(dqput);
-EXPORT_SYMBOL(dquot_is_cached);
-EXPORT_SYMBOL(dquot_alloc_space);
-EXPORT_SYMBOL(dquot_alloc_inode);
-EXPORT_SYMBOL(dquot_free_space);
-EXPORT_SYMBOL(dquot_free_inode);
-EXPORT_SYMBOL(dquot_transfer);
-EXPORT_SYMBOL(vfs_dq_transfer);
-EXPORT_SYMBOL(vfs_dq_quota_on_remount);
diff --git a/fs/ecryptfs/Kconfig b/fs/ecryptfs/Kconfig
new file mode 100644
index 000000000000..0c754e64232b
--- /dev/null
+++ b/fs/ecryptfs/Kconfig
@@ -0,0 +1,11 @@
+config ECRYPT_FS
+ tristate "eCrypt filesystem layer support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL && KEYS && CRYPTO && NET
+ help
+ Encrypted filesystem that operates on the VFS layer. See
+ <file:Documentation/filesystems/ecryptfs.txt> to learn more about
+ eCryptfs. Userspace components are required and can be
+ obtained from <http://ecryptfs.sf.net>.
+
+ To compile this file system support as a module, choose M here: the
+ module will be called ecryptfs.
diff --git a/fs/efs/Kconfig b/fs/efs/Kconfig
new file mode 100644
index 000000000000..6ebfc1c207a8
--- /dev/null
+++ b/fs/efs/Kconfig
@@ -0,0 +1,14 @@
+config EFS_FS
+ tristate "EFS file system support (read only) (EXPERIMENTAL)"
+ depends on BLOCK && EXPERIMENTAL
+ help
+ EFS is an older file system used for non-ISO9660 CD-ROMs and hard
+ disk partitions by SGI's IRIX operating system (IRIX 6.0 and newer
+ uses the XFS file system for hard disk partitions however).
+
+ This implementation only offers read-only access. If you don't know
+ what all this is about, it's safe to say N. For more information
+ about EFS see its home page at <http://aeschi.ch.eu.org/efs/>.
+
+ To compile the EFS file system support as a module, choose M here: the
+ module will be called efs.
diff --git a/fs/eventpoll.c b/fs/eventpoll.c
index ba2f9ec71192..011b9b8c90c6 100644
--- a/fs/eventpoll.c
+++ b/fs/eventpoll.c
@@ -234,8 +234,6 @@ struct ep_pqueue {
/*
* Configuration options available inside /proc/sys/fs/epoll/
*/
-/* Maximum number of epoll devices, per user */
-static int max_user_instances __read_mostly;
/* Maximum number of epoll watched descriptors, per user */
static int max_user_watches __read_mostly;
@@ -261,14 +259,6 @@ static int zero;
ctl_table epoll_table[] = {
{
- .procname = "max_user_instances",
- .data = &max_user_instances,
- .maxlen = sizeof(int),
- .mode = 0644,
- .proc_handler = &proc_dointvec_minmax,
- .extra1 = &zero,
- },
- {
.procname = "max_user_watches",
.data = &max_user_watches,
.maxlen = sizeof(int),
@@ -491,7 +481,6 @@ static void ep_free(struct eventpoll *ep)
mutex_unlock(&epmutex);
mutex_destroy(&ep->mtx);
- atomic_dec(&ep->user->epoll_devs);
free_uid(ep->user);
kfree(ep);
}
@@ -581,10 +570,6 @@ static int ep_alloc(struct eventpoll **pep)
struct eventpoll *ep;
user = get_current_user();
- error = -EMFILE;
- if (unlikely(atomic_read(&user->epoll_devs) >=
- max_user_instances))
- goto free_uid;
error = -ENOMEM;
ep = kzalloc(sizeof(*ep), GFP_KERNEL);
if (unlikely(!ep))
@@ -1141,7 +1126,6 @@ SYSCALL_DEFINE1(epoll_create1, int, flags)
flags & O_CLOEXEC);
if (fd < 0)
ep_free(ep);
- atomic_inc(&ep->user->epoll_devs);
error_return:
DNPRINTK(3, (KERN_INFO "[%p] eventpoll: sys_epoll_create(%d) = %d\n",
@@ -1366,8 +1350,10 @@ static int __init eventpoll_init(void)
struct sysinfo si;
si_meminfo(&si);
- max_user_instances = 128;
- max_user_watches = (((si.totalram - si.totalhigh) / 32) << PAGE_SHIFT) /
+ /*
+ * Allows top 4% of lomem to be allocated for epoll watches (per user).
+ */
+ max_user_watches = (((si.totalram - si.totalhigh) / 25) << PAGE_SHIFT) /
EP_ITEM_COST;
/* Initialize the structure used to perform safe poll wait head wake ups */
diff --git a/fs/ext2/inode.c b/fs/ext2/inode.c
index 23fff2f87783..b46b68ac98b6 100644
--- a/fs/ext2/inode.c
+++ b/fs/ext2/inode.c
@@ -817,6 +817,8 @@ const struct address_space_operations ext2_nobh_aops = {
.direct_IO = ext2_direct_IO,
.writepages = ext2_writepages,
.migratepage = buffer_migrate_page,
+ .write_one_page = generic_file_buffered_write_one_page,
+ .write_one_page = generic_file_buffered_write_one_page,
};
/*
diff --git a/fs/ext3/inode.c b/fs/ext3/inode.c
index 5fa453b49a64..2bbb2764187c 100644
--- a/fs/ext3/inode.c
+++ b/fs/ext3/inode.c
@@ -1231,7 +1231,7 @@ static int ext3_generic_write_end(struct file *file,
loff_t pos, unsigned len, unsigned copied,
struct page *page, void *fsdata)
{
- struct inode *inode = file->f_mapping->host;
+ struct inode *inode = mapping->host;
copied = block_write_end(file, mapping, pos, len, copied, page, fsdata);
@@ -1256,7 +1256,7 @@ static int ext3_ordered_write_end(struct file *file,
struct page *page, void *fsdata)
{
handle_t *handle = ext3_journal_current_handle();
- struct inode *inode = file->f_mapping->host;
+ struct inode *inode = mapping->host;
unsigned from, to;
int ret = 0, ret2;
@@ -1298,7 +1298,7 @@ static int ext3_writeback_write_end(struct file *file,
struct page *page, void *fsdata)
{
handle_t *handle = ext3_journal_current_handle();
- struct inode *inode = file->f_mapping->host;
+ struct inode *inode = mapping->host;
int ret = 0, ret2;
loff_t new_i_size;
@@ -1795,6 +1795,7 @@ static const struct address_space_operations ext3_ordered_aops = {
.direct_IO = ext3_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
+ .write_one_page = generic_file_buffered_write_one_page,
};
static const struct address_space_operations ext3_writeback_aops = {
@@ -1810,6 +1811,7 @@ static const struct address_space_operations ext3_writeback_aops = {
.direct_IO = ext3_direct_IO,
.migratepage = buffer_migrate_page,
.is_partially_uptodate = block_is_partially_uptodate,
+ .write_one_page = generic_file_buffered_write_one_page,
};
static const struct address_space_operations ext3_journalled_aops = {
@@ -1824,6 +1826,7 @@ static const struct address_space_operations ext3_journalled_aops = {
.invalidatepage = ext3_invalidatepage,
.releasepage = ext3_releasepage,
.is_partially_uptodate = block_is_partially_uptodate,
+ .write_one_page = generic_file_buffered_write_one_page,
};
void ext3_set_aops(struct inode *inode)
diff --git a/fs/ext3/namei.c b/fs/ext3/namei.c
index 69a3d19ca9fd..4db4ffa1edad 100644
--- a/fs/ext3/namei.c
+++ b/fs/ext3/namei.c
@@ -1358,7 +1358,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
struct fake_dirent *fde;
blocksize = dir->i_sb->s_blocksize;
- dxtrace(printk("Creating index\n"));
+ dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
retval = ext3_journal_get_write_access(handle, bh);
if (retval) {
ext3_std_error(dir->i_sb, retval);
@@ -1367,6 +1367,19 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
}
root = (struct dx_root *) bh->b_data;
+ /* The 0th block becomes the root, move the dirents out */
+ fde = &root->dotdot;
+ de = (struct ext3_dir_entry_2 *)((char *)fde +
+ ext3_rec_len_from_disk(fde->rec_len));
+ if ((char *) de >= (((char *) root) + blocksize)) {
+ ext3_error(dir->i_sb, __func__,
+ "invalid rec_len for '..' in inode %lu",
+ dir->i_ino);
+ brelse(bh);
+ return -EIO;
+ }
+ len = ((char *) root) + blocksize - (char *) de;
+
bh2 = ext3_append (handle, dir, &block, &retval);
if (!(bh2)) {
brelse(bh);
@@ -1375,11 +1388,6 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
EXT3_I(dir)->i_flags |= EXT3_INDEX_FL;
data1 = bh2->b_data;
- /* The 0th block becomes the root, move the dirents out */
- fde = &root->dotdot;
- de = (struct ext3_dir_entry_2 *)((char *)fde +
- ext3_rec_len_from_disk(fde->rec_len));
- len = ((char *) root) + blocksize - (char *) de;
memcpy (data1, de, len);
de = (struct ext3_dir_entry_2 *) data1;
top = data1 + len;
diff --git a/fs/ext3/super.c b/fs/ext3/super.c
index b70d90e08a3c..ec410a9afef6 100644
--- a/fs/ext3/super.c
+++ b/fs/ext3/super.c
@@ -707,8 +707,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page,
#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
#define QTYPE2MOPT(on, t) ((t)==USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
-static int ext3_dquot_initialize(struct inode *inode, int type);
-static int ext3_dquot_drop(struct inode *inode);
static int ext3_write_dquot(struct dquot *dquot);
static int ext3_acquire_dquot(struct dquot *dquot);
static int ext3_release_dquot(struct dquot *dquot);
@@ -723,8 +721,8 @@ static ssize_t ext3_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off);
static struct dquot_operations ext3_quota_operations = {
- .initialize = ext3_dquot_initialize,
- .drop = ext3_dquot_drop,
+ .initialize = dquot_initialize,
+ .drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
@@ -2713,44 +2711,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot)
return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
}
-static int ext3_dquot_initialize(struct inode *inode, int type)
-{
- handle_t *handle;
- int ret, err;
-
- /* We may create quota structure so we need to reserve enough blocks */
- handle = ext3_journal_start(inode, 2*EXT3_QUOTA_INIT_BLOCKS(inode->i_sb));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- ret = dquot_initialize(inode, type);
- err = ext3_journal_stop(handle);
- if (!ret)
- ret = err;
- return ret;
-}
-
-static int ext3_dquot_drop(struct inode *inode)
-{
- handle_t *handle;
- int ret, err;
-
- /* We may delete quota structure so we need to reserve enough blocks */
- handle = ext3_journal_start(inode, 2*EXT3_QUOTA_DEL_BLOCKS(inode->i_sb));
- if (IS_ERR(handle)) {
- /*
- * We call dquot_drop() anyway to at least release references
- * to quota structures so that umount does not hang.
- */
- dquot_drop(inode);
- return PTR_ERR(handle);
- }
- ret = dquot_drop(inode);
- err = ext3_journal_stop(handle);
- if (!ret)
- ret = err;
- return ret;
-}
-
static int ext3_write_dquot(struct dquot *dquot)
{
int ret, err;
diff --git a/fs/ext4/balloc.c b/fs/ext4/balloc.c
index 6bba06b09dd1..9a50b8052dcf 100644
--- a/fs/ext4/balloc.c
+++ b/fs/ext4/balloc.c
@@ -684,15 +684,15 @@ ext4_fsblk_t ext4_count_free_blocks(struct super_block *sb)
gdp = ext4_get_group_desc(sb, i, NULL);
if (!gdp)
continue;
- desc_count += le16_to_cpu(gdp->bg_free_blocks_count);
+ desc_count += ext4_free_blks_count(sb, gdp);
brelse(bitmap_bh);
bitmap_bh = ext4_read_block_bitmap(sb, i);
if (bitmap_bh == NULL)
continue;
x = ext4_count_free(bitmap_bh, sb->s_blocksize);
- printk(KERN_DEBUG "group %lu: stored = %d, counted = %u\n",
- i, le16_to_cpu(gdp->bg_free_blocks_count), x);
+ printk(KERN_DEBUG "group %u: stored = %d, counted = %u\n",
+ i, ext4_free_blks_count(sb, gdp), x);
bitmap_count += x;
}
brelse(bitmap_bh);
diff --git a/fs/ext4/ext4.h b/fs/ext4/ext4.h
index c668e4377d76..55636f728398 100644
--- a/fs/ext4/ext4.h
+++ b/fs/ext4/ext4.h
@@ -32,14 +32,6 @@
#undef EXT4FS_DEBUG
/*
- * Define EXT4_RESERVATION to reserve data blocks for expanding files
- */
-#define EXT4_DEFAULT_RESERVE_BLOCKS 8
-/*max window size: 1024(direct blocks) + 3([t,d]indirect blocks) */
-#define EXT4_MAX_RESERVE_BLOCKS 1027
-#define EXT4_RESERVE_WINDOW_NOT_ALLOCATED 0
-
-/*
* Debug code
*/
#ifdef EXT4FS_DEBUG
@@ -53,8 +45,6 @@
#define ext4_debug(f, a...) do {} while (0)
#endif
-#define EXT4_MULTIBLOCK_ALLOCATOR 1
-
/* prefer goal again. length */
#define EXT4_MB_HINT_MERGE 1
/* blocks already reserved */
@@ -1098,6 +1088,7 @@ extern int ext4_chunk_trans_blocks(struct inode *, int nrblocks);
extern int ext4_block_truncate_page(handle_t *handle,
struct address_space *mapping, loff_t from);
extern int ext4_page_mkwrite(struct vm_area_struct *vma, struct page *page);
+extern unsigned long long ext4_get_reserved_space(struct inode *inode);
/* ioctl.c */
extern long ext4_ioctl(struct file *, unsigned int, unsigned long);
@@ -1206,8 +1197,11 @@ static inline void ext4_r_blocks_count_set(struct ext4_super_block *es,
static inline loff_t ext4_isize(struct ext4_inode *raw_inode)
{
- return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
- le32_to_cpu(raw_inode->i_size_lo);
+ if (S_ISREG(le16_to_cpu(raw_inode->i_mode)))
+ return ((loff_t)le32_to_cpu(raw_inode->i_size_high) << 32) |
+ le32_to_cpu(raw_inode->i_size_lo);
+ else
+ return (loff_t) le32_to_cpu(raw_inode->i_size_lo);
}
static inline void ext4_isize_set(struct ext4_inode *raw_inode, loff_t i_size)
diff --git a/fs/ext4/ext4_i.h b/fs/ext4/ext4_i.h
index e69acc16f5c4..2d516c0a22af 100644
--- a/fs/ext4/ext4_i.h
+++ b/fs/ext4/ext4_i.h
@@ -33,9 +33,6 @@ typedef __u32 ext4_lblk_t;
/* data type for block group number */
typedef unsigned int ext4_group_t;
-#define rsv_start rsv_window._rsv_start
-#define rsv_end rsv_window._rsv_end
-
/*
* storage for cached extent
*/
diff --git a/fs/ext4/ext4_sb.h b/fs/ext4/ext4_sb.h
index 039b6ea1a042..e318f486cc24 100644
--- a/fs/ext4/ext4_sb.h
+++ b/fs/ext4/ext4_sb.h
@@ -65,10 +65,6 @@ struct ext4_sb_info {
struct blockgroup_lock s_blockgroup_lock;
struct proc_dir_entry *s_proc;
- /* root of the per fs reservation window tree */
- spinlock_t s_rsv_window_lock;
- struct rb_root s_rsv_window_root;
-
/* Journaling */
struct inode *s_journal_inode;
struct journal_s *s_journal;
diff --git a/fs/ext4/extents.c b/fs/ext4/extents.c
index 54bf0623a9ae..e2eab196875f 100644
--- a/fs/ext4/extents.c
+++ b/fs/ext4/extents.c
@@ -3048,7 +3048,7 @@ retry:
WARN_ON(ret <= 0);
printk(KERN_ERR "%s: ext4_ext_get_blocks "
"returned error inode#%lu, block=%u, "
- "max_blocks=%lu", __func__,
+ "max_blocks=%u", __func__,
inode->i_ino, block, max_blocks);
#endif
ext4_mark_inode_dirty(handle, inode);
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index a6444cee0c7e..5e426a7662c5 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -360,9 +360,9 @@ static int ext4_block_to_path(struct inode *inode,
final = ptrs;
} else {
ext4_warning(inode->i_sb, "ext4_block_to_path",
- "block %lu > max",
+ "block %lu > max in inode %lu",
i_block + direct_blocks +
- indirect_blocks + double_blocks);
+ indirect_blocks + double_blocks, inode->i_ino);
}
if (boundary)
*boundary = final - 1 - (i_block & (ptrs - 1));
@@ -973,6 +973,17 @@ out:
return err;
}
+unsigned long long ext4_get_reserved_space(struct inode *inode)
+{
+ unsigned long long total;
+
+ spin_lock(&EXT4_I(inode)->i_block_reservation_lock);
+ total = EXT4_I(inode)->i_reserved_data_blocks +
+ EXT4_I(inode)->i_reserved_meta_blocks;
+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+ return total;
+}
/*
* Calculate the number of metadata blocks need to reserve
* to allocate @blocks for non extent file based file
@@ -1034,8 +1045,14 @@ static void ext4_da_update_reserve_space(struct inode *inode, int used)
/* update per-inode reservations */
BUG_ON(used > EXT4_I(inode)->i_reserved_data_blocks);
EXT4_I(inode)->i_reserved_data_blocks -= used;
-
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+ /*
+ * free those over-booking quota for metadata blocks
+ */
+
+ if (mdb_free)
+ vfs_dq_release_reservation_block(inode, mdb_free);
}
/*
@@ -1547,8 +1564,8 @@ static int ext4_journalled_write_end(struct file *file,
static int ext4_da_reserve_space(struct inode *inode, int nrblocks)
{
int retries = 0;
- struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
- unsigned long md_needed, mdblocks, total = 0;
+ struct ext4_sb_info *sbi = EXT4_SB(inode->i_sb);
+ unsigned long md_needed, mdblocks, total = 0;
/*
* recalculate the amount of metadata blocks to reserve
@@ -1564,12 +1581,23 @@ repeat:
md_needed = mdblocks - EXT4_I(inode)->i_reserved_meta_blocks;
total = md_needed + nrblocks;
+ /*
+ * Make quota reservation here to prevent quota overflow
+ * later. Real quota accounting is done at pages writeout
+ * time.
+ */
+ if (vfs_dq_reserve_block(inode, total)) {
+ spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+ return -EDQUOT;
+ }
+
if (ext4_claim_free_blocks(sbi, total)) {
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
if (ext4_should_retry_alloc(inode->i_sb, &retries)) {
yield();
goto repeat;
}
+ vfs_dq_release_reservation_block(inode, total);
return -ENOSPC;
}
EXT4_I(inode)->i_reserved_data_blocks += nrblocks;
@@ -1623,6 +1651,8 @@ static void ext4_da_release_space(struct inode *inode, int to_free)
BUG_ON(mdb > EXT4_I(inode)->i_reserved_meta_blocks);
EXT4_I(inode)->i_reserved_meta_blocks = mdb;
spin_unlock(&EXT4_I(inode)->i_block_reservation_lock);
+
+ vfs_dq_release_reservation_block(inode, release);
}
static void ext4_da_page_release_reservation(struct page *page,
@@ -2821,9 +2851,6 @@ static sector_t ext4_bmap(struct address_space *mapping, sector_t block)
filemap_write_and_wait(mapping);
}
- BUG_ON(!EXT4_JOURNAL(inode) &&
- EXT4_I(inode)->i_state & EXT4_STATE_JDATA);
-
if (EXT4_JOURNAL(inode) && EXT4_I(inode)->i_state & EXT4_STATE_JDATA) {
/*
* This is a REALLY heavyweight approach, but the use of
@@ -3622,7 +3649,7 @@ static void ext4_free_data(handle_t *handle, struct inode *inode,
* block pointed to itself, it would have been detached when
* the block was cleared. Check for this instead of OOPSing.
*/
- if (bh2jh(this_bh))
+ if ((EXT4_JOURNAL(inode) == NULL) || bh2jh(this_bh))
ext4_handle_dirty_metadata(handle, inode, this_bh);
else
ext4_error(inode->i_sb, __func__,
diff --git a/fs/ext4/mballoc.c b/fs/ext4/mballoc.c
index 918aec0c8a11..a66f5df1d902 100644
--- a/fs/ext4/mballoc.c
+++ b/fs/ext4/mballoc.c
@@ -3025,7 +3025,7 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
goto out_err;
ext4_debug("using block group %u(%d)\n", ac->ac_b_ex.fe_group,
- gdp->bg_free_blocks_count);
+ ext4_free_blks_count(sb, gdp));
err = ext4_journal_get_write_access(handle, gdp_bh);
if (err)
@@ -3086,9 +3086,12 @@ ext4_mb_mark_diskspace_used(struct ext4_allocation_context *ac,
if (!(ac->ac_flags & EXT4_MB_DELALLOC_RESERVED))
/* release all the reserved blocks if non delalloc */
percpu_counter_sub(&sbi->s_dirtyblocks_counter, reserv_blks);
- else
+ else {
percpu_counter_sub(&sbi->s_dirtyblocks_counter,
ac->ac_b_ex.fe_len);
+ /* convert reserved quota blocks to real quota blocks */
+ vfs_dq_claim_block(ac->ac_inode, ac->ac_b_ex.fe_len);
+ }
if (sbi->s_log_groups_per_flex) {
ext4_group_t flex_group = ext4_flex_group(sbi,
@@ -4533,7 +4536,7 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
struct ext4_sb_info *sbi;
struct super_block *sb;
ext4_fsblk_t block = 0;
- unsigned int inquota;
+ unsigned int inquota = 0;
unsigned int reserv_blks = 0;
sb = ar->inode->i_sb;
@@ -4551,9 +4554,17 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
(unsigned long long) ar->pleft,
(unsigned long long) ar->pright);
- if (!EXT4_I(ar->inode)->i_delalloc_reserved_flag) {
- /*
- * With delalloc we already reserved the blocks
+ /*
+ * For delayed allocation, we could skip the ENOSPC and
+ * EDQUOT check, as blocks and quotas have been already
+ * reserved when data being copied into pagecache.
+ */
+ if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
+ ar->flags |= EXT4_MB_DELALLOC_RESERVED;
+ else {
+ /* Without delayed allocation we need to verify
+ * there is enough free blocks to do block allocation
+ * and verify allocation doesn't exceed the quota limits.
*/
while (ar->len && ext4_claim_free_blocks(sbi, ar->len)) {
/* let others to free the space */
@@ -4565,19 +4576,16 @@ ext4_fsblk_t ext4_mb_new_blocks(handle_t *handle,
return 0;
}
reserv_blks = ar->len;
+ while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
+ ar->flags |= EXT4_MB_HINT_NOPREALLOC;
+ ar->len--;
+ }
+ inquota = ar->len;
+ if (ar->len == 0) {
+ *errp = -EDQUOT;
+ goto out3;
+ }
}
- while (ar->len && DQUOT_ALLOC_BLOCK(ar->inode, ar->len)) {
- ar->flags |= EXT4_MB_HINT_NOPREALLOC;
- ar->len--;
- }
- if (ar->len == 0) {
- *errp = -EDQUOT;
- goto out3;
- }
- inquota = ar->len;
-
- if (EXT4_I(ar->inode)->i_delalloc_reserved_flag)
- ar->flags |= EXT4_MB_DELALLOC_RESERVED;
ac = kmem_cache_alloc(ext4_ac_cachep, GFP_NOFS);
if (!ac) {
@@ -4643,7 +4651,7 @@ repeat:
out2:
kmem_cache_free(ext4_ac_cachep, ac);
out1:
- if (ar->len < inquota)
+ if (inquota && ar->len < inquota)
DQUOT_FREE_BLOCK(ar->inode, inquota - ar->len);
out3:
if (!ar->len) {
diff --git a/fs/ext4/mballoc.h b/fs/ext4/mballoc.h
index 10a2921baf14..7acf5ef24537 100644
--- a/fs/ext4/mballoc.h
+++ b/fs/ext4/mballoc.h
@@ -247,7 +247,6 @@ static inline void ext4_mb_store_history(struct ext4_allocation_context *ac)
#define in_range(b, first, len) ((b) >= (first) && (b) <= (first) + (len) - 1)
-struct buffer_head *read_block_bitmap(struct super_block *, ext4_group_t);
static inline ext4_fsblk_t ext4_grp_offs_to_block(struct super_block *sb,
struct ext4_free_extent *fex)
{
diff --git a/fs/ext4/namei.c b/fs/ext4/namei.c
index fec0b4c2f5f1..ba702bd7910d 100644
--- a/fs/ext4/namei.c
+++ b/fs/ext4/namei.c
@@ -1368,7 +1368,7 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
struct fake_dirent *fde;
blocksize = dir->i_sb->s_blocksize;
- dxtrace(printk(KERN_DEBUG "Creating index\n"));
+ dxtrace(printk(KERN_DEBUG "Creating index: inode %lu\n", dir->i_ino));
retval = ext4_journal_get_write_access(handle, bh);
if (retval) {
ext4_std_error(dir->i_sb, retval);
@@ -1377,6 +1377,20 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
}
root = (struct dx_root *) bh->b_data;
+ /* The 0th block becomes the root, move the dirents out */
+ fde = &root->dotdot;
+ de = (struct ext4_dir_entry_2 *)((char *)fde +
+ ext4_rec_len_from_disk(fde->rec_len));
+ if ((char *) de >= (((char *) root) + blocksize)) {
+ ext4_error(dir->i_sb, __func__,
+ "invalid rec_len for '..' in inode %lu",
+ dir->i_ino);
+ brelse(bh);
+ return -EIO;
+ }
+ len = ((char *) root) + blocksize - (char *) de;
+
+ /* Allocate new block for the 0th block's dirents */
bh2 = ext4_append(handle, dir, &block, &retval);
if (!(bh2)) {
brelse(bh);
@@ -1385,11 +1399,6 @@ static int make_indexed_dir(handle_t *handle, struct dentry *dentry,
EXT4_I(dir)->i_flags |= EXT4_INDEX_FL;
data1 = bh2->b_data;
- /* The 0th block becomes the root, move the dirents out */
- fde = &root->dotdot;
- de = (struct ext4_dir_entry_2 *)((char *)fde +
- ext4_rec_len_from_disk(fde->rec_len));
- len = ((char *) root) + blocksize - (char *) de;
memcpy (data1, de, len);
de = (struct ext4_dir_entry_2 *) data1;
top = data1 + len;
diff --git a/fs/ext4/resize.c b/fs/ext4/resize.c
index c328be5d6885..c06886abd658 100644
--- a/fs/ext4/resize.c
+++ b/fs/ext4/resize.c
@@ -861,12 +861,13 @@ int ext4_group_add(struct super_block *sb, struct ext4_new_group_data *input)
gdp = (struct ext4_group_desc *)((char *)primary->b_data +
gdb_off * EXT4_DESC_SIZE(sb));
+ memset(gdp, 0, EXT4_DESC_SIZE(sb));
ext4_block_bitmap_set(sb, gdp, input->block_bitmap); /* LV FIXME */
ext4_inode_bitmap_set(sb, gdp, input->inode_bitmap); /* LV FIXME */
ext4_inode_table_set(sb, gdp, input->inode_table); /* LV FIXME */
ext4_free_blks_set(sb, gdp, input->free_blocks_count);
ext4_free_inodes_set(sb, gdp, EXT4_INODES_PER_GROUP(sb));
- gdp->bg_flags |= cpu_to_le16(EXT4_BG_INODE_ZEROED);
+ gdp->bg_flags = cpu_to_le16(EXT4_BG_INODE_ZEROED);
gdp->bg_checksum = ext4_group_desc_csum(sbi, input->group, gdp);
/*
diff --git a/fs/ext4/super.c b/fs/ext4/super.c
index e5f06a5f045e..439ab9d6cb95 100644
--- a/fs/ext4/super.c
+++ b/fs/ext4/super.c
@@ -926,8 +926,6 @@ static int bdev_try_to_free_page(struct super_block *sb, struct page *page, gfp_
#define QTYPE2NAME(t) ((t) == USRQUOTA ? "user" : "group")
#define QTYPE2MOPT(on, t) ((t) == USRQUOTA?((on)##USRJQUOTA):((on)##GRPJQUOTA))
-static int ext4_dquot_initialize(struct inode *inode, int type);
-static int ext4_dquot_drop(struct inode *inode);
static int ext4_write_dquot(struct dquot *dquot);
static int ext4_acquire_dquot(struct dquot *dquot);
static int ext4_release_dquot(struct dquot *dquot);
@@ -942,9 +940,13 @@ static ssize_t ext4_quota_write(struct super_block *sb, int type,
const char *data, size_t len, loff_t off);
static struct dquot_operations ext4_quota_operations = {
- .initialize = ext4_dquot_initialize,
- .drop = ext4_dquot_drop,
+ .initialize = dquot_initialize,
+ .drop = dquot_drop,
.alloc_space = dquot_alloc_space,
+ .reserve_space = dquot_reserve_space,
+ .claim_space = dquot_claim_space,
+ .release_rsv = dquot_release_reserved_space,
+ .get_reserved_space = ext4_get_reserved_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
.free_inode = dquot_free_inode,
@@ -3378,44 +3380,6 @@ static inline struct inode *dquot_to_inode(struct dquot *dquot)
return sb_dqopt(dquot->dq_sb)->files[dquot->dq_type];
}
-static int ext4_dquot_initialize(struct inode *inode, int type)
-{
- handle_t *handle;
- int ret, err;
-
- /* We may create quota structure so we need to reserve enough blocks */
- handle = ext4_journal_start(inode, 2*EXT4_QUOTA_INIT_BLOCKS(inode->i_sb));
- if (IS_ERR(handle))
- return PTR_ERR(handle);
- ret = dquot_initialize(inode, type);
- err = ext4_journal_stop(handle);
- if (!ret)
- ret = err;
- return ret;
-}
-
-static int ext4_dquot_drop(struct inode *inode)
-{
- handle_t *handle;
- int ret, err;
-
- /* We may delete quota structure so we need to reserve enough blocks */
- handle = ext4_journal_start(inode, 2*EXT4_QUOTA_DEL_BLOCKS(inode->i_sb));
- if (IS_ERR(handle)) {
- /*
- * We call dquot_drop() anyway to at least release references
- * to quota structures so that umount does not hang.
- */
- dquot_drop(inode);
- return PTR_ERR(handle);
- }
- ret = dquot_drop(inode);
- err = ext4_journal_stop(handle);
- if (!ret)
- ret = err;
- return ret;
-}
-
static int ext4_write_dquot(struct dquot *dquot)
{
int ret, err;
diff --git a/fs/fat/Kconfig b/fs/fat/Kconfig
new file mode 100644
index 000000000000..d0a69ff25375
--- /dev/null
+++ b/fs/fat/Kconfig
@@ -0,0 +1,97 @@
+config FAT_FS
+ tristate
+ select NLS
+ help
+ If you want to use one of the FAT-based file systems (the MS-DOS and
+ VFAT (Windows 95) file systems), then you must say Y or M here
+ to include FAT support. You will then be able to mount partitions or
+ diskettes with FAT-based file systems and transparently access the
+ files on them, i.e. MSDOS files will look and behave just like all
+ other Unix files.
+
+ This FAT support is not a file system in itself, it only provides
+ the foundation for the other file systems. You will have to say Y or
+ M to at least one of "MSDOS fs support" or "VFAT fs support" in
+ order to make use of it.
+
+ Another way to read and write MSDOS floppies and hard drive
+ partitions from within Linux (but not transparently) is with the
+ mtools ("man mtools") program suite. You don't need to say Y here in
+ order to do that.
+
+ If you need to move large files on floppies between a DOS and a
+ Linux box, say Y here, mount the floppy under Linux with an MSDOS
+ file system and use GNU tar's M option. GNU tar is a program
+ available for Unix and DOS ("man tar" or "info tar").
+
+ The FAT support will enlarge your kernel by about 37 KB. If unsure,
+ say Y.
+
+ To compile this as a module, choose M here: the module will be called
+ fat. Note that if you compile the FAT support as a module, you
+ cannot compile any of the FAT-based file systems into the kernel
+ -- they will have to be modules as well.
+
+config MSDOS_FS
+ tristate "MSDOS fs support"
+ select FAT_FS
+ help
+ This allows you to mount MSDOS partitions of your hard drive (unless
+ they are compressed; to access compressed MSDOS partitions under
+ Linux, you can either use the DOS emulator DOSEMU, described in the
+ DOSEMU-HOWTO, available from
+ <http://www.tldp.org/docs.html#howto>, or try dmsdosfs in
+ <ftp://ibiblio.org/pub/Linux/system/filesystems/dosfs/>. If you
+ intend to use dosemu with a non-compressed MSDOS partition, say Y
+ here) and MSDOS floppies. This means that file access becomes
+ transparent, i.e. the MSDOS files look and behave just like all
+ other Unix files.
+
+ If you have Windows 95 or Windows NT installed on your MSDOS
+ partitions, you should use the VFAT file system (say Y to "VFAT fs
+ support" below), or you will not be able to see the long filenames
+ generated by Windows 95 / Windows NT.
+
+ This option will enlarge your kernel by about 7 KB. If unsure,
+ answer Y. This will only work if you said Y to "DOS FAT fs support"
+ as well. To compile this as a module, choose M here: the module will
+ be called msdos.
+
+config VFAT_FS
+ tristate "VFAT (Windows-95) fs support"
+ select FAT_FS
+ help
+ This option provides support for normal Windows file systems with
+ long filenames. That includes non-compressed FAT-based file systems
+ used by Windows 95, Windows 98, Windows NT 4.0, and the Unix
+ programs from the mtools package.
+
+ The VFAT support enlarges your kernel by about 10 KB and it only
+ works if you said Y to the "DOS FAT fs support" above. Please read
+ the file <file:Documentation/filesystems/vfat.txt> for details. If
+ unsure, say Y.
+
+ To compile this as a module, choose M here: the module will be called
+ vfat.
+
+config FAT_DEFAULT_CODEPAGE
+ int "Default codepage for FAT"
+ depends on MSDOS_FS || VFAT_FS
+ default 437
+ help
+ This option should be set to the codepage of your FAT filesystems.
+ It can be overridden with the "codepage" mount option.
+ See <file:Documentation/filesystems/vfat.txt> for more information.
+
+config FAT_DEFAULT_IOCHARSET
+ string "Default iocharset for FAT"
+ depends on VFAT_FS
+ default "iso8859-1"
+ help
+ Set this to the default input/output character set you'd
+ like FAT to use. It should probably match the character set
+ that most of your FAT filesystems use, and can be overridden
+ with the "iocharset" mount option for FAT filesystems.
+ Note that "utf8" is not recommended for FAT filesystems.
+ If unsure, you shouldn't set "utf8" here.
+ See <file:Documentation/filesystems/vfat.txt> for more information.
diff --git a/fs/fcntl.c b/fs/fcntl.c
index bd215cc791da..bbf609280f80 100644
--- a/fs/fcntl.c
+++ b/fs/fcntl.c
@@ -19,12 +19,16 @@
#include <linux/signal.h>
#include <linux/rcupdate.h>
#include <linux/pid_namespace.h>
-#include <linux/smp_lock.h>
+#include <linux/mutex.h>
#include <asm/poll.h>
#include <asm/siginfo.h>
#include <asm/uaccess.h>
+/* Serialize access to file->f_flags */
+DEFINE_SPINLOCK(file_flags_lock);
+EXPORT_SYMBOL(file_flags_lock);
+
void set_close_on_exec(unsigned int fd, int flag)
{
struct files_struct *files = current->files;
@@ -141,7 +145,7 @@ SYSCALL_DEFINE1(dup, unsigned int, fildes)
return ret;
}
-#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | FASYNC | O_DIRECT | O_NOATIME)
+#define SETFL_MASK (O_APPEND | O_NONBLOCK | O_NDELAY | O_DIRECT | O_NOATIME)
static int setfl(int fd, struct file * filp, unsigned long arg)
{
@@ -176,25 +180,60 @@ static int setfl(int fd, struct file * filp, unsigned long arg)
if (error)
return error;
- /*
- * We still need a lock here for now to keep multiple FASYNC calls
- * from racing with each other.
- */
- lock_kernel();
if ((arg ^ filp->f_flags) & FASYNC) {
- if (filp->f_op && filp->f_op->fasync) {
- error = filp->f_op->fasync(fd, filp, (arg & FASYNC) != 0);
- if (error < 0)
- goto out;
- }
+ error = fasync_change(fd, filp, (arg & FASYNC) != 0);
+ if (error == -ENOTTY)
+ /*
+ * ABI compatibility: fcntl() has traditionally returned
+ * zero in this case (but ioctl() does not).
+ */
+ error = 0;
+ else if (error < 0)
+ goto out;
}
+ lock_file_flags();
filp->f_flags = (arg & SETFL_MASK) | (filp->f_flags & ~SETFL_MASK);
+ unlock_file_flags();
out:
- unlock_kernel();
return error;
}
+
+
+/*
+ * Change the setting of fasync, let the driver know.
+ * Not static because ioctl_fioasync() uses it too.
+ */
+int fasync_change(int fd, struct file *filp, int on)
+{
+ int ret = 0;
+ static DEFINE_MUTEX(fasync_mutex);
+
+ if (filp->f_op->fasync == NULL)
+ return -ENOTTY;
+
+ mutex_lock(&fasync_mutex);
+ /* Can test without flags lock, nobody else will change it */
+ if (((filp->f_flags & FASYNC) == 0) == (on == 0))
+ goto out;
+ ret = filp->f_op->fasync(fd, filp, on);
+ if (ret >= 0) {
+ lock_file_flags();
+ if (on)
+ filp->f_flags |= FASYNC;
+ else
+ filp->f_flags &= ~FASYNC;
+ unlock_file_flags();
+ }
+ out:
+ mutex_unlock(&fasync_mutex);
+ return ret;
+}
+
+
+
+
static void f_modown(struct file *filp, struct pid *pid, enum pid_type type,
uid_t uid, uid_t euid, int force)
{
diff --git a/fs/freevxfs/Kconfig b/fs/freevxfs/Kconfig
new file mode 100644
index 000000000000..8dc1cd5c1efe
--- /dev/null
+++ b/fs/freevxfs/Kconfig
@@ -0,0 +1,16 @@
+config VXFS_FS
+ tristate "FreeVxFS file system support (VERITAS VxFS(TM) compatible)"
+ depends on BLOCK
+ help
+ FreeVxFS is a file system driver that support the VERITAS VxFS(TM)
+ file system format. VERITAS VxFS(TM) is the standard file system
+ of SCO UnixWare (and possibly others) and optionally available
+ for Sunsoft Solaris, HP-UX and many other operating systems.
+ Currently only readonly access is supported.
+
+ NOTE: the file system type as used by mount(1), mount(2) and
+ fstab(5) is 'vxfs' as it describes the file system format, not
+ the actual driver.
+
+ To compile this as a module, choose M here: the module will be
+ called freevxfs. If unsure, say N.
diff --git a/fs/fscache/Kconfig b/fs/fscache/Kconfig
new file mode 100644
index 000000000000..9bbb8ce7bea0
--- /dev/null
+++ b/fs/fscache/Kconfig
@@ -0,0 +1,56 @@
+
+config FSCACHE
+ tristate "General filesystem local caching manager"
+ depends on EXPERIMENTAL
+ select SLOW_WORK
+ help
+ This option enables a generic filesystem caching manager that can be
+ used by various network and other filesystems to cache data locally.
+ Different sorts of caches can be plugged in, depending on the
+ resources available.
+
+ See Documentation/filesystems/caching/fscache.txt for more information.
+
+config FSCACHE_STATS
+ bool "Gather statistical information on local caching"
+ depends on FSCACHE && PROC_FS
+ help
+ This option causes statistical information to be gathered on local
+ caching and exported through file:
+
+ /proc/fs/fscache/stats
+
+ The gathering of statistics adds a certain amount of overhead to
+ execution as there are a quite a few stats gathered, and on a
+ multi-CPU system these may be on cachelines that keep bouncing
+ between CPUs. On the other hand, the stats are very useful for
+ debugging purposes. Saying 'Y' here is recommended.
+
+ See Documentation/filesystems/caching/fscache.txt for more information.
+
+config FSCACHE_HISTOGRAM
+ bool "Gather latency information on local caching"
+ depends on FSCACHE && PROC_FS
+ help
+ This option causes latency information to be gathered on local
+ caching and exported through file:
+
+ /proc/fs/fscache/histogram
+
+ The generation of this histogram adds a certain amount of overhead to
+ execution as there are a number of points at which data is gathered,
+ and on a multi-CPU system these may be on cachelines that keep
+ bouncing between CPUs. On the other hand, the histogram may be
+ useful for debugging purposes. Saying 'N' here is recommended.
+
+ See Documentation/filesystems/caching/fscache.txt for more information.
+
+config FSCACHE_DEBUG
+ bool "Debug FS-Cache"
+ depends on FSCACHE
+ help
+ This permits debugging to be dynamically enabled in the local caching
+ management module. If this is set, the debugging output may be
+ enabled by setting bits in /sys/modules/fscache/parameter/debug.
+
+ See Documentation/filesystems/caching/fscache.txt for more information.
diff --git a/fs/fscache/Makefile b/fs/fscache/Makefile
new file mode 100644
index 000000000000..3f6a198058c1
--- /dev/null
+++ b/fs/fscache/Makefile
@@ -0,0 +1,19 @@
+#
+# Makefile for general filesystem caching code
+#
+
+fscache-y := \
+ fsc-cache.o \
+ fsc-cookie.o \
+ fsc-fsdef.o \
+ fsc-main.o \
+ fsc-netfs.o \
+ fsc-object.o \
+ fsc-operation.o \
+ fsc-page.o
+
+fscache-$(CONFIG_PROC_FS) += fsc-proc.o
+fscache-$(CONFIG_FSCACHE_STATS) += fsc-stats.o
+fscache-$(CONFIG_FSCACHE_HISTOGRAM) += fsc-histogram.o
+
+obj-$(CONFIG_FSCACHE) := fscache.o
diff --git a/fs/fscache/fsc-cache.c b/fs/fscache/fsc-cache.c
new file mode 100644
index 000000000000..f35831d51e84
--- /dev/null
+++ b/fs/fscache/fsc-cache.c
@@ -0,0 +1,415 @@
+/* FS-Cache cache handling
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define FSCACHE_DEBUG_LEVEL CACHE
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "fsc-internal.h"
+
+LIST_HEAD(fscache_cache_list);
+DECLARE_RWSEM(fscache_addremove_sem);
+DECLARE_WAIT_QUEUE_HEAD(fscache_cache_cleared_wq);
+EXPORT_SYMBOL(fscache_cache_cleared_wq);
+
+static LIST_HEAD(fscache_cache_tag_list);
+
+/*
+ * look up a cache tag
+ */
+struct fscache_cache_tag *__fscache_lookup_cache_tag(const char *name)
+{
+ struct fscache_cache_tag *tag, *xtag;
+
+ /* firstly check for the existence of the tag under read lock */
+ down_read(&fscache_addremove_sem);
+
+ list_for_each_entry(tag, &fscache_cache_tag_list, link) {
+ if (strcmp(tag->name, name) == 0) {
+ atomic_inc(&tag->usage);
+ up_read(&fscache_addremove_sem);
+ return tag;
+ }
+ }
+
+ up_read(&fscache_addremove_sem);
+
+ /* the tag does not exist - create a candidate */
+ xtag = kzalloc(sizeof(*xtag) + strlen(name) + 1, GFP_KERNEL);
+ if (!xtag)
+ /* return a dummy tag if out of memory */
+ return ERR_PTR(-ENOMEM);
+
+ atomic_set(&xtag->usage, 1);
+ strcpy(xtag->name, name);
+
+ /* write lock, search again and add if still not present */
+ down_write(&fscache_addremove_sem);
+
+ list_for_each_entry(tag, &fscache_cache_tag_list, link) {
+ if (strcmp(tag->name, name) == 0) {
+ atomic_inc(&tag->usage);
+ up_write(&fscache_addremove_sem);
+ kfree(xtag);
+ return tag;
+ }
+ }
+
+ list_add_tail(&xtag->link, &fscache_cache_tag_list);
+ up_write(&fscache_addremove_sem);
+ return xtag;
+}
+
+/*
+ * release a reference to a cache tag
+ */
+void __fscache_release_cache_tag(struct fscache_cache_tag *tag)
+{
+ if (tag != ERR_PTR(-ENOMEM)) {
+ down_write(&fscache_addremove_sem);
+
+ if (atomic_dec_and_test(&tag->usage))
+ list_del_init(&tag->link);
+ else
+ tag = NULL;
+
+ up_write(&fscache_addremove_sem);
+
+ kfree(tag);
+ }
+}
+
+/*
+ * select a cache in which to store an object
+ * - the cache addremove semaphore must be at least read-locked by the caller
+ * - the object will never be an index
+ */
+struct fscache_cache *fscache_select_cache_for_object(
+ struct fscache_cookie *cookie)
+{
+ struct fscache_cache_tag *tag;
+ struct fscache_object *object;
+ struct fscache_cache *cache;
+
+ _enter("");
+
+ if (list_empty(&fscache_cache_list)) {
+ _leave(" = NULL [no cache]");
+ return NULL;
+ }
+
+ /* we check the parent to determine the cache to use */
+ spin_lock(&cookie->lock);
+
+ /* the first in the parent's backing list should be the preferred
+ * cache */
+ if (!hlist_empty(&cookie->backing_objects)) {
+ object = hlist_entry(cookie->backing_objects.first,
+ struct fscache_object, cookie_link);
+
+ cache = object->cache;
+ if (object->state >= FSCACHE_OBJECT_DYING ||
+ test_bit(FSCACHE_IOERROR, &cache->flags))
+ cache = NULL;
+
+ spin_unlock(&cookie->lock);
+ _leave(" = %p [parent]", cache);
+ return cache;
+ }
+
+ /* the parent is unbacked */
+ if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
+ /* cookie not an index and is unbacked */
+ spin_unlock(&cookie->lock);
+ _leave(" = NULL [cookie ub,ni]");
+ return NULL;
+ }
+
+ spin_unlock(&cookie->lock);
+
+ if (!cookie->def->select_cache)
+ goto no_preference;
+
+ /* ask the netfs for its preference */
+ tag = cookie->def->select_cache(cookie->parent->netfs_data,
+ cookie->netfs_data);
+ if (!tag)
+ goto no_preference;
+
+ if (tag == ERR_PTR(-ENOMEM)) {
+ _leave(" = NULL [nomem tag]");
+ return NULL;
+ }
+
+ if (!tag->cache) {
+ _leave(" = NULL [unbacked tag]");
+ return NULL;
+ }
+
+ if (test_bit(FSCACHE_IOERROR, &tag->cache->flags))
+ return NULL;
+
+ _leave(" = %p [specific]", tag->cache);
+ return tag->cache;
+
+no_preference:
+ /* netfs has no preference - just select first cache */
+ cache = list_entry(fscache_cache_list.next,
+ struct fscache_cache, link);
+ _leave(" = %p [first]", cache);
+ return cache;
+}
+
+/**
+ * fscache_init_cache - Initialise a cache record
+ * @cache: The cache record to be initialised
+ * @ops: The cache operations to be installed in that record
+ * @idfmt: Format string to define identifier
+ * @...: sprintf-style arguments
+ *
+ * Initialise a record of a cache and fill in the name.
+ *
+ * See Documentation/filesystems/caching/backend-api.txt for a complete
+ * description.
+ */
+void fscache_init_cache(struct fscache_cache *cache,
+ const struct fscache_cache_ops *ops,
+ const char *idfmt,
+ ...)
+{
+ va_list va;
+
+ memset(cache, 0, sizeof(*cache));
+
+ cache->ops = ops;
+
+ va_start(va, idfmt);
+ vsnprintf(cache->identifier, sizeof(cache->identifier), idfmt, va);
+ va_end(va);
+
+ INIT_WORK(&cache->op_gc, fscache_operation_gc);
+ INIT_LIST_HEAD(&cache->link);
+ INIT_LIST_HEAD(&cache->object_list);
+ INIT_LIST_HEAD(&cache->op_gc_list);
+ spin_lock_init(&cache->object_list_lock);
+ spin_lock_init(&cache->op_gc_list_lock);
+}
+EXPORT_SYMBOL(fscache_init_cache);
+
+/**
+ * fscache_add_cache - Declare a cache as being open for business
+ * @cache: The record describing the cache
+ * @ifsdef: The record of the cache object describing the top-level index
+ * @tagname: The tag describing this cache
+ *
+ * Add a cache to the system, making it available for netfs's to use.
+ *
+ * See Documentation/filesystems/caching/backend-api.txt for a complete
+ * description.
+ */
+int fscache_add_cache(struct fscache_cache *cache,
+ struct fscache_object *ifsdef,
+ const char *tagname)
+{
+ struct fscache_cache_tag *tag;
+
+ BUG_ON(!cache->ops);
+ BUG_ON(!ifsdef);
+
+ cache->flags = 0;
+ ifsdef->event_mask = ULONG_MAX & ~(1 << FSCACHE_OBJECT_EV_CLEARED);
+ ifsdef->state = FSCACHE_OBJECT_ACTIVE;
+
+ if (!tagname)
+ tagname = cache->identifier;
+
+ BUG_ON(!tagname[0]);
+
+ _enter("{%s.%s},,%s", cache->ops->name, cache->identifier, tagname);
+
+ /* we use the cache tag to uniquely identify caches */
+ tag = __fscache_lookup_cache_tag(tagname);
+ if (IS_ERR(tag))
+ goto nomem;
+
+ if (test_and_set_bit(FSCACHE_TAG_RESERVED, &tag->flags))
+ goto tag_in_use;
+
+ cache->kobj = kobject_create_and_add(tagname, fscache_root);
+ if (!cache->kobj)
+ goto error;
+
+ ifsdef->cookie = &fscache_fsdef_index;
+ ifsdef->cache = cache;
+ cache->fsdef = ifsdef;
+
+ down_write(&fscache_addremove_sem);
+
+ tag->cache = cache;
+ cache->tag = tag;
+
+ /* add the cache to the list */
+ list_add(&cache->link, &fscache_cache_list);
+
+ /* add the cache's netfs definition index object to the cache's
+ * list */
+ spin_lock(&cache->object_list_lock);
+ list_add_tail(&ifsdef->cache_link, &cache->object_list);
+ spin_unlock(&cache->object_list_lock);
+
+ /* add the cache's netfs definition index object to the top level index
+ * cookie as a known backing object */
+ spin_lock(&fscache_fsdef_index.lock);
+
+ hlist_add_head(&ifsdef->cookie_link,
+ &fscache_fsdef_index.backing_objects);
+
+ atomic_inc(&fscache_fsdef_index.usage);
+
+ /* done */
+ spin_unlock(&fscache_fsdef_index.lock);
+ up_write(&fscache_addremove_sem);
+
+ printk(KERN_NOTICE "FS-Cache: Cache \"%s\" added (type %s)\n",
+ cache->tag->name, cache->ops->name);
+ kobject_uevent(cache->kobj, KOBJ_ADD);
+
+ _leave(" = 0 [%s]", cache->identifier);
+ return 0;
+
+tag_in_use:
+ printk(KERN_ERR "FS-Cache: Cache tag '%s' already in use\n", tagname);
+ __fscache_release_cache_tag(tag);
+ _leave(" = -EXIST");
+ return -EEXIST;
+
+error:
+ __fscache_release_cache_tag(tag);
+ _leave(" = -EINVAL");
+ return -EINVAL;
+
+nomem:
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(fscache_add_cache);
+
+/**
+ * fscache_io_error - Note a cache I/O error
+ * @cache: The record describing the cache
+ *
+ * Note that an I/O error occurred in a cache and that it should no longer be
+ * used for anything. This also reports the error into the kernel log.
+ *
+ * See Documentation/filesystems/caching/backend-api.txt for a complete
+ * description.
+ */
+void fscache_io_error(struct fscache_cache *cache)
+{
+ set_bit(FSCACHE_IOERROR, &cache->flags);
+
+ printk(KERN_ERR "FS-Cache: Cache %s stopped due to I/O error\n",
+ cache->ops->name);
+}
+EXPORT_SYMBOL(fscache_io_error);
+
+/*
+ * request withdrawal of all the objects in a cache
+ * - all the objects being withdrawn are moved onto the supplied list
+ */
+static void fscache_withdraw_all_objects(struct fscache_cache *cache,
+ struct list_head *dying_objects)
+{
+ struct fscache_object *object;
+
+ spin_lock(&cache->object_list_lock);
+
+ while (!list_empty(&cache->object_list)) {
+ object = list_entry(cache->object_list.next,
+ struct fscache_object, cache_link);
+ list_move_tail(&object->cache_link, dying_objects);
+
+ _debug("withdraw %p", object->cookie);
+
+ spin_lock(&object->lock);
+ spin_unlock(&cache->object_list_lock);
+ fscache_raise_event(object, FSCACHE_OBJECT_EV_WITHDRAW);
+ spin_unlock(&object->lock);
+
+ cond_resched();
+ spin_lock(&cache->object_list_lock);
+ }
+
+ spin_unlock(&cache->object_list_lock);
+}
+
+/**
+ * fscache_withdraw_cache - Withdraw a cache from the active service
+ * @cache: The record describing the cache
+ *
+ * Withdraw a cache from service, unbinding all its cache objects from the
+ * netfs cookies they're currently representing.
+ *
+ * See Documentation/filesystems/caching/backend-api.txt for a complete
+ * description.
+ */
+void fscache_withdraw_cache(struct fscache_cache *cache)
+{
+ LIST_HEAD(dying_objects);
+
+ _enter("");
+
+ printk(KERN_NOTICE "FS-Cache: Withdrawing cache \"%s\"\n",
+ cache->tag->name);
+
+ /* make the cache unavailable for cookie acquisition */
+ if (test_and_set_bit(FSCACHE_CACHE_WITHDRAWN, &cache->flags))
+ BUG();
+
+ down_write(&fscache_addremove_sem);
+ list_del_init(&cache->link);
+ cache->tag->cache = NULL;
+ up_write(&fscache_addremove_sem);
+
+ /* make sure all pages pinned by operations on behalf of the netfs are
+ * written to disk */
+ cache->ops->sync_cache(cache);
+
+ /* dissociate all the netfs pages backed by this cache from the block
+ * mappings in the cache */
+ cache->ops->dissociate_pages(cache);
+
+ /* we now have to destroy all the active objects pertaining to this
+ * cache - which we do by passing them off to thread pool to be
+ * disposed of */
+ _debug("destroy");
+
+ fscache_withdraw_all_objects(cache, &dying_objects);
+
+ /* wait for all extant objects to finish their outstanding operations
+ * and go away */
+ _debug("wait for finish");
+ wait_event(fscache_cache_cleared_wq,
+ atomic_read(&cache->object_count) == 0);
+ _debug("wait for clearance");
+ wait_event(fscache_cache_cleared_wq,
+ list_empty(&cache->object_list));
+ _debug("cleared");
+ ASSERT(list_empty(&dying_objects));
+
+ kobject_put(cache->kobj);
+
+ clear_bit(FSCACHE_TAG_RESERVED, &cache->tag->flags);
+ fscache_release_cache_tag(cache->tag);
+ cache->tag = NULL;
+
+ _leave("");
+}
+EXPORT_SYMBOL(fscache_withdraw_cache);
diff --git a/fs/fscache/fsc-cookie.c b/fs/fscache/fsc-cookie.c
new file mode 100644
index 000000000000..111bba2a79c3
--- /dev/null
+++ b/fs/fscache/fsc-cookie.c
@@ -0,0 +1,498 @@
+/* netfs cookie management
+ *
+ * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * See Documentation/filesystems/caching/netfs-api.txt for more information on
+ * the netfs API.
+ */
+
+#define FSCACHE_DEBUG_LEVEL COOKIE
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "fsc-internal.h"
+
+struct kmem_cache *fscache_cookie_jar;
+
+static atomic_t fscache_object_debug_id = ATOMIC_INIT(0);
+
+static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie);
+static int fscache_alloc_object(struct fscache_cache *cache,
+ struct fscache_cookie *cookie);
+static int fscache_attach_object(struct fscache_cookie *cookie,
+ struct fscache_object *object);
+
+/*
+ * initialise an cookie jar slab element prior to any use
+ */
+void fscache_cookie_init_once(void *_cookie)
+{
+ struct fscache_cookie *cookie = _cookie;
+
+ memset(cookie, 0, sizeof(*cookie));
+ spin_lock_init(&cookie->lock);
+ INIT_HLIST_HEAD(&cookie->backing_objects);
+}
+
+/*
+ * request a cookie to represent an object (index, datafile, xattr, etc)
+ * - parent specifies the parent object
+ * - the top level index cookie for each netfs is stored in the fscache_netfs
+ * struct upon registration
+ * - def points to the definition
+ * - the netfs_data will be passed to the functions pointed to in *def
+ * - all attached caches will be searched to see if they contain this object
+ * - index objects aren't stored on disk until there's a dependent file that
+ * needs storing
+ * - other objects are stored in a selected cache immediately, and all the
+ * indices forming the path to it are instantiated if necessary
+ * - we never let on to the netfs about errors
+ * - we may set a negative cookie pointer, but that's okay
+ */
+struct fscache_cookie *__fscache_acquire_cookie(
+ struct fscache_cookie *parent,
+ const struct fscache_cookie_def *def,
+ void *netfs_data)
+{
+ struct fscache_cookie *cookie;
+
+ BUG_ON(!def);
+
+ _enter("{%s},{%s},%p",
+ parent ? (char *) parent->def->name : "<no-parent>",
+ def->name, netfs_data);
+
+ fscache_stat(&fscache_n_acquires);
+
+ /* if there's no parent cookie, then we don't create one here either */
+ if (!parent) {
+ fscache_stat(&fscache_n_acquires_null);
+ _leave(" [no parent]");
+ return NULL;
+ }
+
+ /* validate the definition */
+ BUG_ON(!def->get_key);
+ BUG_ON(!def->name[0]);
+
+ BUG_ON(def->type == FSCACHE_COOKIE_TYPE_INDEX &&
+ parent->def->type != FSCACHE_COOKIE_TYPE_INDEX);
+
+ /* allocate and initialise a cookie */
+ cookie = kmem_cache_alloc(fscache_cookie_jar, GFP_KERNEL);
+ if (!cookie) {
+ fscache_stat(&fscache_n_acquires_oom);
+ _leave(" [ENOMEM]");
+ return NULL;
+ }
+
+ atomic_set(&cookie->usage, 1);
+ atomic_set(&cookie->n_children, 0);
+
+ atomic_inc(&parent->usage);
+ atomic_inc(&parent->n_children);
+
+ cookie->def = def;
+ cookie->parent = parent;
+ cookie->netfs_data = netfs_data;
+ cookie->flags = 0;
+
+ switch (cookie->def->type) {
+ case FSCACHE_COOKIE_TYPE_INDEX:
+ fscache_stat(&fscache_n_cookie_index);
+ break;
+ case FSCACHE_COOKIE_TYPE_DATAFILE:
+ fscache_stat(&fscache_n_cookie_data);
+ break;
+ default:
+ fscache_stat(&fscache_n_cookie_special);
+ break;
+ }
+
+ /* if the object is an index then we need do nothing more here - we
+ * create indices on disk when we need them as an index may exist in
+ * multiple caches */
+ if (cookie->def->type != FSCACHE_COOKIE_TYPE_INDEX) {
+ if (fscache_acquire_non_index_cookie(cookie) < 0) {
+ atomic_dec(&parent->n_children);
+ __fscache_cookie_put(cookie);
+ fscache_stat(&fscache_n_acquires_nobufs);
+ _leave(" = NULL");
+ return NULL;
+ }
+ }
+
+ fscache_stat(&fscache_n_acquires_ok);
+ _leave(" = %p", cookie);
+ return cookie;
+}
+EXPORT_SYMBOL(__fscache_acquire_cookie);
+
+/*
+ * acquire a non-index cookie
+ * - this must make sure the index chain is instantiated and instantiate the
+ * object representation too
+ */
+static int fscache_acquire_non_index_cookie(struct fscache_cookie *cookie)
+{
+ struct fscache_object *object;
+ struct fscache_cache *cache;
+ uint64_t i_size;
+ int ret;
+
+ _enter("");
+
+ cookie->flags = 1 << FSCACHE_COOKIE_UNAVAILABLE;
+
+ /* now we need to see whether the backing objects for this cookie yet
+ * exist, if not there'll be nothing to search */
+ down_read(&fscache_addremove_sem);
+
+ if (list_empty(&fscache_cache_list)) {
+ up_read(&fscache_addremove_sem);
+ _leave(" = 0 [no caches]");
+ return 0;
+ }
+
+ /* select a cache in which to store the object */
+ cache = fscache_select_cache_for_object(cookie->parent);
+ if (!cache) {
+ up_read(&fscache_addremove_sem);
+ fscache_stat(&fscache_n_acquires_no_cache);
+ _leave(" = -ENOMEDIUM [no cache]");
+ return -ENOMEDIUM;
+ }
+
+ _debug("cache %s", cache->tag->name);
+
+ cookie->flags =
+ (1 << FSCACHE_COOKIE_LOOKING_UP) |
+ (1 << FSCACHE_COOKIE_CREATING) |
+ (1 << FSCACHE_COOKIE_NO_DATA_YET);
+
+ /* ask the cache to allocate objects for this cookie and its parent
+ * chain */
+ ret = fscache_alloc_object(cache, cookie);
+ if (ret < 0) {
+ up_read(&fscache_addremove_sem);
+ _leave(" = %d", ret);
+ return ret;
+ }
+
+ /* pass on how big the object we're caching is supposed to be */
+ cookie->def->get_attr(cookie->netfs_data, &i_size);
+
+ spin_lock(&cookie->lock);
+ if (hlist_empty(&cookie->backing_objects)) {
+ spin_unlock(&cookie->lock);
+ goto unavailable;
+ }
+
+ object = hlist_entry(cookie->backing_objects.first,
+ struct fscache_object, cookie_link);
+
+ fscache_set_store_limit(object, i_size);
+
+ /* initiate the process of looking up all the objects in the chain
+ * (done by fscache_initialise_object()) */
+ fscache_enqueue_object(object);
+
+ spin_unlock(&cookie->lock);
+
+ /* we may be required to wait for lookup to complete at this point */
+ if (!fscache_defer_lookup) {
+ _debug("non-deferred lookup %p", &cookie->flags);
+ wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
+ fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+ _debug("complete");
+ if (test_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags))
+ goto unavailable;
+ }
+
+ up_read(&fscache_addremove_sem);
+ _leave(" = 0 [deferred]");
+ return 0;
+
+unavailable:
+ up_read(&fscache_addremove_sem);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+}
+
+/*
+ * recursively allocate cache object records for a cookie/cache combination
+ * - caller must be holding the addremove sem
+ */
+static int fscache_alloc_object(struct fscache_cache *cache,
+ struct fscache_cookie *cookie)
+{
+ struct fscache_object *object;
+ struct hlist_node *_n;
+ int ret;
+
+ _enter("%p,%p{%s}", cache, cookie, cookie->def->name);
+
+ spin_lock(&cookie->lock);
+ hlist_for_each_entry(object, _n, &cookie->backing_objects,
+ cookie_link) {
+ if (object->cache == cache)
+ goto object_already_extant;
+ }
+ spin_unlock(&cookie->lock);
+
+ /* ask the cache to allocate an object (we may end up with duplicate
+ * objects at this stage, but we sort that out later) */
+ object = cache->ops->alloc_object(cache, cookie);
+ if (IS_ERR(object)) {
+ fscache_stat(&fscache_n_object_no_alloc);
+ ret = PTR_ERR(object);
+ goto error;
+ }
+
+ fscache_stat(&fscache_n_object_alloc);
+
+ object->debug_id = atomic_inc_return(&fscache_object_debug_id);
+
+ _debug("ALLOC OBJ%x: %s {%lx}",
+ object->debug_id, cookie->def->name, object->events);
+
+ ret = fscache_alloc_object(cache, cookie->parent);
+ if (ret < 0)
+ goto error_put;
+
+ /* only attach if we managed to allocate all we needed, otherwise
+ * discard the object we just allocated and instead use the one
+ * attached to the cookie */
+ if (fscache_attach_object(cookie, object) < 0)
+ cache->ops->put_object(object);
+
+ _leave(" = 0");
+ return 0;
+
+object_already_extant:
+ ret = -ENOBUFS;
+ if (object->state >= FSCACHE_OBJECT_DYING) {
+ spin_unlock(&cookie->lock);
+ goto error;
+ }
+ spin_unlock(&cookie->lock);
+ _leave(" = 0 [found]");
+ return 0;
+
+error_put:
+ cache->ops->put_object(object);
+error:
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * attach a cache object to a cookie
+ */
+static int fscache_attach_object(struct fscache_cookie *cookie,
+ struct fscache_object *object)
+{
+ struct fscache_object *p;
+ struct fscache_cache *cache = object->cache;
+ struct hlist_node *_n;
+ int ret;
+
+ _enter("{%s},{OBJ%x}", cookie->def->name, object->debug_id);
+
+ spin_lock(&cookie->lock);
+
+ /* there may be multiple initial creations of this object, but we only
+ * want one */
+ ret = -EEXIST;
+ hlist_for_each_entry(p, _n, &cookie->backing_objects, cookie_link) {
+ if (p->cache == object->cache) {
+ if (p->state >= FSCACHE_OBJECT_DYING)
+ ret = -ENOBUFS;
+ goto cant_attach_object;
+ }
+ }
+
+ /* pin the parent object */
+ spin_lock_nested(&cookie->parent->lock, 1);
+ hlist_for_each_entry(p, _n, &cookie->parent->backing_objects,
+ cookie_link) {
+ if (p->cache == object->cache) {
+ if (p->state >= FSCACHE_OBJECT_DYING) {
+ ret = -ENOBUFS;
+ spin_unlock(&cookie->parent->lock);
+ goto cant_attach_object;
+ }
+ object->parent = p;
+ spin_lock(&p->lock);
+ p->n_children++;
+ spin_unlock(&p->lock);
+ break;
+ }
+ }
+ spin_unlock(&cookie->parent->lock);
+
+ /* attach to the cache's object list */
+ if (list_empty(&object->cache_link)) {
+ spin_lock(&cache->object_list_lock);
+ list_add(&object->cache_link, &cache->object_list);
+ spin_unlock(&cache->object_list_lock);
+ }
+
+ /* attach to the cookie */
+ object->cookie = cookie;
+ atomic_inc(&cookie->usage);
+ hlist_add_head(&object->cookie_link, &cookie->backing_objects);
+ ret = 0;
+
+cant_attach_object:
+ spin_unlock(&cookie->lock);
+ _leave(" = %d", ret);
+ return ret;
+}
+
+/*
+ * update the index entries backing a cookie
+ */
+void __fscache_update_cookie(struct fscache_cookie *cookie)
+{
+ struct fscache_object *object;
+ struct hlist_node *_p;
+
+ fscache_stat(&fscache_n_updates);
+
+ if (!cookie) {
+ fscache_stat(&fscache_n_updates_null);
+ _leave(" [no cookie]");
+ return;
+ }
+
+ _enter("{%s}", cookie->def->name);
+
+ BUG_ON(!cookie->def->get_aux);
+
+ spin_lock(&cookie->lock);
+
+ /* update the index entry on disk in each cache backing this cookie */
+ hlist_for_each_entry(object, _p,
+ &cookie->backing_objects, cookie_link) {
+ fscache_raise_event(object, FSCACHE_OBJECT_EV_UPDATE);
+ }
+
+ spin_unlock(&cookie->lock);
+ _leave("");
+}
+EXPORT_SYMBOL(__fscache_update_cookie);
+
+/*
+ * release a cookie back to the cache
+ * - the object will be marked as recyclable on disk if retire is true
+ * - all dependents of this cookie must have already been unregistered
+ * (indices/files/pages)
+ */
+void __fscache_relinquish_cookie(struct fscache_cookie *cookie, int retire)
+{
+ struct fscache_cache *cache;
+ struct fscache_object *object;
+ unsigned long event;
+
+ fscache_stat(&fscache_n_relinquishes);
+
+ if (!cookie) {
+ fscache_stat(&fscache_n_relinquishes_null);
+ _leave(" [no cookie]");
+ return;
+ }
+
+ _enter("%p{%s,%p},%d",
+ cookie, cookie->def->name, cookie->netfs_data, retire);
+
+ if (atomic_read(&cookie->n_children) != 0) {
+ printk(KERN_ERR "FS-Cache: Cookie '%s' still has children\n",
+ cookie->def->name);
+ BUG();
+ }
+
+ /* wait for the cookie to finish being instantiated (or to fail) */
+ if (test_bit(FSCACHE_COOKIE_CREATING, &cookie->flags)) {
+ fscache_stat(&fscache_n_relinquishes_waitcrt);
+ wait_on_bit(&cookie->flags, FSCACHE_COOKIE_CREATING,
+ fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+ }
+
+ event = retire ? FSCACHE_OBJECT_EV_RETIRE : FSCACHE_OBJECT_EV_RELEASE;
+
+ /* detach pointers back to the netfs */
+ spin_lock(&cookie->lock);
+
+ cookie->netfs_data = NULL;
+ cookie->def = NULL;
+
+ /* break links with all the active objects */
+ while (!hlist_empty(&cookie->backing_objects)) {
+ object = hlist_entry(cookie->backing_objects.first,
+ struct fscache_object,
+ cookie_link);
+
+ _debug("RELEASE OBJ%x", object->debug_id);
+
+ /* detach each cache object from the object cookie */
+ spin_lock(&object->lock);
+ hlist_del_init(&object->cookie_link);
+
+ cache = object->cache;
+ object->cookie = NULL;
+ fscache_raise_event(object, event);
+ spin_unlock(&object->lock);
+
+ if (atomic_dec_and_test(&cookie->usage))
+ /* the cookie refcount shouldn't be reduced to 0 yet */
+ BUG();
+ }
+
+ spin_unlock(&cookie->lock);
+
+ if (cookie->parent) {
+ ASSERTCMP(atomic_read(&cookie->parent->usage), >, 0);
+ ASSERTCMP(atomic_read(&cookie->parent->n_children), >, 0);
+ atomic_dec(&cookie->parent->n_children);
+ }
+
+ /* finally dispose of the cookie */
+ ASSERTCMP(atomic_read(&cookie->usage), >, 0);
+ fscache_cookie_put(cookie);
+
+ _leave("");
+}
+EXPORT_SYMBOL(__fscache_relinquish_cookie);
+
+/*
+ * destroy a cookie
+ */
+void __fscache_cookie_put(struct fscache_cookie *cookie)
+{
+ struct fscache_cookie *parent;
+
+ _enter("%p", cookie);
+
+ for (;;) {
+ _debug("FREE COOKIE %p", cookie);
+ parent = cookie->parent;
+ BUG_ON(!hlist_empty(&cookie->backing_objects));
+ kmem_cache_free(fscache_cookie_jar, cookie);
+
+ if (!parent)
+ break;
+
+ cookie = parent;
+ BUG_ON(atomic_read(&cookie->usage) <= 0);
+ if (!atomic_dec_and_test(&cookie->usage))
+ break;
+ }
+
+ _leave("");
+}
diff --git a/fs/fscache/fsc-fsdef.c b/fs/fscache/fsc-fsdef.c
new file mode 100644
index 000000000000..f408684cdf2c
--- /dev/null
+++ b/fs/fscache/fsc-fsdef.c
@@ -0,0 +1,144 @@
+/* Filesystem index definition
+ *
+ * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define FSCACHE_DEBUG_LEVEL CACHE
+#include <linux/module.h>
+#include "fsc-internal.h"
+
+static uint16_t fscache_fsdef_netfs_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax);
+
+static uint16_t fscache_fsdef_netfs_get_aux(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax);
+
+static
+enum fscache_checkaux fscache_fsdef_netfs_check_aux(void *cookie_netfs_data,
+ const void *data,
+ uint16_t datalen);
+
+/*
+ * The root index is owned by FS-Cache itself.
+ *
+ * When a netfs requests caching facilities, FS-Cache will, if one doesn't
+ * already exist, create an entry in the root index with the key being the name
+ * of the netfs ("AFS" for example), and the auxiliary data holding the index
+ * structure version supplied by the netfs:
+ *
+ * FSDEF
+ * |
+ * +-----------+
+ * | |
+ * NFS AFS
+ * [v=1] [v=1]
+ *
+ * If an entry with the appropriate name does already exist, the version is
+ * compared. If the version is different, the entire subtree from that entry
+ * will be discarded and a new entry created.
+ *
+ * The new entry will be an index, and a cookie referring to it will be passed
+ * to the netfs. This is then the root handle by which the netfs accesses the
+ * cache. It can create whatever objects it likes in that index, including
+ * further indices.
+ */
+static struct fscache_cookie_def fscache_fsdef_index_def = {
+ .name = ".FS-Cache",
+ .type = FSCACHE_COOKIE_TYPE_INDEX,
+};
+
+struct fscache_cookie fscache_fsdef_index = {
+ .usage = ATOMIC_INIT(1),
+ .lock = __SPIN_LOCK_UNLOCKED(fscache_fsdef_index.lock),
+ .backing_objects = HLIST_HEAD_INIT,
+ .def = &fscache_fsdef_index_def,
+};
+EXPORT_SYMBOL(fscache_fsdef_index);
+
+/*
+ * Definition of an entry in the root index. Each entry is an index, keyed to
+ * a specific netfs and only applicable to a particular version of the index
+ * structure used by that netfs.
+ */
+struct fscache_cookie_def fscache_fsdef_netfs_def = {
+ .name = "FSDEF.netfs",
+ .type = FSCACHE_COOKIE_TYPE_INDEX,
+ .get_key = fscache_fsdef_netfs_get_key,
+ .get_aux = fscache_fsdef_netfs_get_aux,
+ .check_aux = fscache_fsdef_netfs_check_aux,
+};
+
+/*
+ * get the key data for an FSDEF index record - this is the name of the netfs
+ * for which this entry is created
+ */
+static uint16_t fscache_fsdef_netfs_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
+{
+ const struct fscache_netfs *netfs = cookie_netfs_data;
+ unsigned klen;
+
+ _enter("{%s.%u},", netfs->name, netfs->version);
+
+ klen = strlen(netfs->name);
+ if (klen > bufmax)
+ return 0;
+
+ memcpy(buffer, netfs->name, klen);
+ return klen;
+}
+
+/*
+ * get the auxiliary data for an FSDEF index record - this is the index
+ * structure version number of the netfs for which this version is created
+ */
+static uint16_t fscache_fsdef_netfs_get_aux(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
+{
+ const struct fscache_netfs *netfs = cookie_netfs_data;
+ unsigned dlen;
+
+ _enter("{%s.%u},", netfs->name, netfs->version);
+
+ dlen = sizeof(uint32_t);
+ if (dlen > bufmax)
+ return 0;
+
+ memcpy(buffer, &netfs->version, dlen);
+ return dlen;
+}
+
+/*
+ * check that the index structure version number stored in the auxiliary data
+ * matches the one the netfs gave us
+ */
+static enum fscache_checkaux fscache_fsdef_netfs_check_aux(
+ void *cookie_netfs_data,
+ const void *data,
+ uint16_t datalen)
+{
+ struct fscache_netfs *netfs = cookie_netfs_data;
+ uint32_t version;
+
+ _enter("{%s},,%hu", netfs->name, datalen);
+
+ if (datalen != sizeof(version)) {
+ _leave(" = OBSOLETE [dl=%d v=%zu]", datalen, sizeof(version));
+ return FSCACHE_CHECKAUX_OBSOLETE;
+ }
+
+ memcpy(&version, data, sizeof(version));
+ if (version != netfs->version) {
+ _leave(" = OBSOLETE [ver=%x net=%x]", version, netfs->version);
+ return FSCACHE_CHECKAUX_OBSOLETE;
+ }
+
+ _leave(" = OKAY");
+ return FSCACHE_CHECKAUX_OKAY;
+}
diff --git a/fs/fscache/fsc-histogram.c b/fs/fscache/fsc-histogram.c
new file mode 100644
index 000000000000..d1cbbc373b25
--- /dev/null
+++ b/fs/fscache/fsc-histogram.c
@@ -0,0 +1,109 @@
+/* FS-Cache latency histogram
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#define FSCACHE_DEBUG_LEVEL THREAD
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include "fsc-internal.h"
+
+atomic_t fscache_obj_instantiate_histogram[HZ];
+atomic_t fscache_objs_histogram[HZ];
+atomic_t fscache_ops_histogram[HZ];
+atomic_t fscache_retrieval_delay_histogram[HZ];
+atomic_t fscache_retrieval_histogram[HZ];
+
+/*
+ * display the time-taken histogram
+ */
+static int fscache_histogram_show(struct seq_file *m, void *v)
+{
+ unsigned long index;
+ unsigned n[5], t;
+
+ switch ((unsigned long) v) {
+ case 1:
+ seq_puts(m, "JIFS SECS OBJ INST OP RUNS OBJ RUNS "
+ " RETRV DLY RETRIEVLS\n");
+ return 0;
+ case 2:
+ seq_puts(m, "===== ===== ========= ========= ========="
+ " ========= =========\n");
+ return 0;
+ default:
+ index = (unsigned long) v - 3;
+ n[0] = atomic_read(&fscache_obj_instantiate_histogram[index]);
+ n[1] = atomic_read(&fscache_ops_histogram[index]);
+ n[2] = atomic_read(&fscache_objs_histogram[index]);
+ n[3] = atomic_read(&fscache_retrieval_delay_histogram[index]);
+ n[4] = atomic_read(&fscache_retrieval_histogram[index]);
+ if (!(n[0] | n[1] | n[2] | n[3] | n[4]))
+ return 0;
+
+ t = (index * 1000) / HZ;
+
+ seq_printf(m, "%4lu 0.%03u %9u %9u %9u %9u %9u\n",
+ index, t, n[0], n[1], n[2], n[3], n[4]);
+ return 0;
+ }
+}
+
+/*
+ * set up the iterator to start reading from the first line
+ */
+static void *fscache_histogram_start(struct seq_file *m, loff_t *_pos)
+{
+ if ((unsigned long long)*_pos >= HZ + 2)
+ return NULL;
+ if (*_pos == 0)
+ *_pos = 1;
+ return (void *)(unsigned long) *_pos;
+}
+
+/*
+ * move to the next line
+ */
+static void *fscache_histogram_next(struct seq_file *m, void *v, loff_t *pos)
+{
+ (*pos)++;
+ return (unsigned long long)*pos > HZ + 2 ?
+ NULL : (void *)(unsigned long) *pos;
+}
+
+/*
+ * clean up after reading
+ */
+static void fscache_histogram_stop(struct seq_file *m, void *v)
+{
+}
+
+static const struct seq_operations fscache_histogram_ops = {
+ .start = fscache_histogram_start,
+ .stop = fscache_histogram_stop,
+ .next = fscache_histogram_next,
+ .show = fscache_histogram_show,
+};
+
+/*
+ * open "/proc/fs/fscache/histogram" to provide latency data
+ */
+static int fscache_histogram_open(struct inode *inode, struct file *file)
+{
+ return seq_open(file, &fscache_histogram_ops);
+}
+
+const struct file_operations fscache_histogram_fops = {
+ .owner = THIS_MODULE,
+ .open = fscache_histogram_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
diff --git a/fs/fscache/fsc-internal.h b/fs/fscache/fsc-internal.h
new file mode 100644
index 000000000000..e0cbd16f6dc9
--- /dev/null
+++ b/fs/fscache/fsc-internal.h
@@ -0,0 +1,380 @@
+/* Internal definitions for FS-Cache
+ *
+ * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+/*
+ * Lock order, in the order in which multiple locks should be obtained:
+ * - fscache_addremove_sem
+ * - cookie->lock
+ * - cookie->parent->lock
+ * - cache->object_list_lock
+ * - object->lock
+ * - object->parent->lock
+ * - fscache_thread_lock
+ *
+ */
+
+#include <linux/fscache-cache.h>
+#include <linux/sched.h>
+
+#define FSCACHE_MIN_THREADS 4
+#define FSCACHE_MAX_THREADS 32
+
+/*
+ * fsc-cache.c
+ */
+extern struct list_head fscache_cache_list;
+extern struct rw_semaphore fscache_addremove_sem;
+
+extern struct fscache_cache *fscache_select_cache_for_object(
+ struct fscache_cookie *);
+
+/*
+ * fsc-cookie.c
+ */
+extern struct kmem_cache *fscache_cookie_jar;
+
+extern void fscache_cookie_init_once(void *);
+extern void __fscache_cookie_put(struct fscache_cookie *);
+
+/*
+ * fsc-fsdef.c
+ */
+extern struct fscache_cookie fscache_fsdef_index;
+extern struct fscache_cookie_def fscache_fsdef_netfs_def;
+
+/*
+ * fsc-histogram.c
+ */
+#ifdef CONFIG_FSCACHE_HISTOGRAM
+extern atomic_t fscache_obj_instantiate_histogram[HZ];
+extern atomic_t fscache_objs_histogram[HZ];
+extern atomic_t fscache_ops_histogram[HZ];
+extern atomic_t fscache_retrieval_delay_histogram[HZ];
+extern atomic_t fscache_retrieval_histogram[HZ];
+
+static inline void fscache_hist(atomic_t histogram[], unsigned long start_jif)
+{
+ unsigned long jif = jiffies - start_jif;
+ if (jif >= HZ)
+ jif = HZ - 1;
+ atomic_inc(&histogram[jif]);
+}
+
+extern const struct file_operations fscache_histogram_fops;
+
+#else
+#define fscache_hist(hist, start_jif) do {} while (0)
+#endif
+
+/*
+ * fsc-main.c
+ */
+extern unsigned fscache_defer_lookup;
+extern unsigned fscache_defer_create;
+extern unsigned fscache_debug;
+extern struct kobject *fscache_root;
+
+extern int fscache_wait_bit(void *);
+extern int fscache_wait_bit_interruptible(void *);
+
+/*
+ * fsc-object.c
+ */
+extern void fscache_withdrawing_object(struct fscache_cache *,
+ struct fscache_object *);
+extern void fscache_enqueue_object(struct fscache_object *);
+
+/*
+ * fsc-operation.c
+ */
+extern int fscache_submit_exclusive_op(struct fscache_object *,
+ struct fscache_operation *);
+extern int fscache_submit_op(struct fscache_object *,
+ struct fscache_operation *);
+extern void fscache_abort_object(struct fscache_object *);
+extern void fscache_start_operations(struct fscache_object *);
+extern void fscache_operation_gc(struct work_struct *);
+
+/*
+ * fsc-proc.c
+ */
+#ifdef CONFIG_PROC_FS
+extern int __init fscache_proc_init(void);
+extern void fscache_proc_cleanup(void);
+#else
+#define fscache_proc_init() (0)
+#define fscache_proc_cleanup() do {} while (0)
+#endif
+
+/*
+ * fsc-stats.c
+ */
+#ifdef CONFIG_FSCACHE_STATS
+extern atomic_t fscache_n_ops_processed[FSCACHE_MAX_THREADS];
+extern atomic_t fscache_n_objs_processed[FSCACHE_MAX_THREADS];
+
+extern atomic_t fscache_n_op_pend;
+extern atomic_t fscache_n_op_run;
+extern atomic_t fscache_n_op_enqueue;
+extern atomic_t fscache_n_op_deferred_release;
+extern atomic_t fscache_n_op_release;
+extern atomic_t fscache_n_op_gc;
+
+extern atomic_t fscache_n_attr_changed;
+extern atomic_t fscache_n_attr_changed_ok;
+extern atomic_t fscache_n_attr_changed_nobufs;
+extern atomic_t fscache_n_attr_changed_nomem;
+extern atomic_t fscache_n_attr_changed_calls;
+
+extern atomic_t fscache_n_allocs;
+extern atomic_t fscache_n_allocs_ok;
+extern atomic_t fscache_n_allocs_wait;
+extern atomic_t fscache_n_allocs_nobufs;
+extern atomic_t fscache_n_alloc_ops;
+extern atomic_t fscache_n_alloc_op_waits;
+
+extern atomic_t fscache_n_retrievals;
+extern atomic_t fscache_n_retrievals_ok;
+extern atomic_t fscache_n_retrievals_wait;
+extern atomic_t fscache_n_retrievals_nodata;
+extern atomic_t fscache_n_retrievals_nobufs;
+extern atomic_t fscache_n_retrievals_intr;
+extern atomic_t fscache_n_retrievals_nomem;
+extern atomic_t fscache_n_retrieval_ops;
+extern atomic_t fscache_n_retrieval_op_waits;
+
+extern atomic_t fscache_n_stores;
+extern atomic_t fscache_n_stores_ok;
+extern atomic_t fscache_n_stores_again;
+extern atomic_t fscache_n_stores_nobufs;
+extern atomic_t fscache_n_stores_oom;
+extern atomic_t fscache_n_store_ops;
+extern atomic_t fscache_n_store_calls;
+
+extern atomic_t fscache_n_marks;
+extern atomic_t fscache_n_uncaches;
+
+extern atomic_t fscache_n_acquires;
+extern atomic_t fscache_n_acquires_null;
+extern atomic_t fscache_n_acquires_no_cache;
+extern atomic_t fscache_n_acquires_ok;
+extern atomic_t fscache_n_acquires_nobufs;
+extern atomic_t fscache_n_acquires_oom;
+
+extern atomic_t fscache_n_updates;
+extern atomic_t fscache_n_updates_null;
+extern atomic_t fscache_n_updates_run;
+
+extern atomic_t fscache_n_relinquishes;
+extern atomic_t fscache_n_relinquishes_null;
+extern atomic_t fscache_n_relinquishes_waitcrt;
+
+extern atomic_t fscache_n_cookie_index;
+extern atomic_t fscache_n_cookie_data;
+extern atomic_t fscache_n_cookie_special;
+
+extern atomic_t fscache_n_object_alloc;
+extern atomic_t fscache_n_object_no_alloc;
+extern atomic_t fscache_n_object_lookups;
+extern atomic_t fscache_n_object_lookups_negative;
+extern atomic_t fscache_n_object_lookups_positive;
+extern atomic_t fscache_n_object_created;
+extern atomic_t fscache_n_object_avail;
+extern atomic_t fscache_n_object_dead;
+
+extern atomic_t fscache_n_checkaux_none;
+extern atomic_t fscache_n_checkaux_okay;
+extern atomic_t fscache_n_checkaux_update;
+extern atomic_t fscache_n_checkaux_obsolete;
+
+static inline void fscache_stat(atomic_t *stat)
+{
+ atomic_inc(stat);
+}
+
+extern const struct file_operations fscache_stats_fops;
+#else
+
+#define fscache_stat(stat) do {} while (0)
+#endif
+
+/*
+ * raise an event on an object
+ * - if the event is not masked for that object, then the object is
+ * queued for attention by the thread pool.
+ */
+static inline void fscache_raise_event(struct fscache_object *object,
+ unsigned event)
+{
+ if (!test_and_set_bit(event, &object->events) &&
+ test_bit(event, &object->event_mask))
+ fscache_enqueue_object(object);
+}
+
+/*
+ * drop a reference to a cookie
+ */
+static inline void fscache_cookie_put(struct fscache_cookie *cookie)
+{
+ BUG_ON(atomic_read(&cookie->usage) <= 0);
+ if (atomic_dec_and_test(&cookie->usage))
+ __fscache_cookie_put(cookie);
+}
+
+/*
+ * get an extra reference to a netfs retrieval context
+ */
+static inline
+void *fscache_get_context(struct fscache_cookie *cookie, void *context)
+{
+ if (cookie->def->get_context)
+ cookie->def->get_context(cookie->netfs_data, context);
+ return context;
+}
+
+/*
+ * release a reference to a netfs retrieval context
+ */
+static inline
+void fscache_put_context(struct fscache_cookie *cookie, void *context)
+{
+ if (cookie->def->put_context)
+ cookie->def->put_context(cookie->netfs_data, context);
+}
+
+/*****************************************************************************/
+/*
+ * debug tracing
+ */
+#define dbgprintk(FMT, ...) \
+ printk(KERN_DEBUG "[%-6.6s] "FMT"\n", current->comm, ##__VA_ARGS__)
+
+/* make sure we maintain the format strings, even when debugging is disabled */
+static inline __attribute__((format(printf, 1, 2)))
+void _dbprintk(const char *fmt, ...)
+{
+}
+
+#define kenter(FMT, ...) dbgprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
+#define kleave(FMT, ...) dbgprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
+#define kdebug(FMT, ...) dbgprintk(FMT, ##__VA_ARGS__)
+
+#define kjournal(FMT, ...) _dbprintk(FMT, ##__VA_ARGS__)
+
+#ifdef __KDEBUG
+#define _enter(FMT, ...) kenter(FMT, ##__VA_ARGS__)
+#define _leave(FMT, ...) kleave(FMT, ##__VA_ARGS__)
+#define _debug(FMT, ...) kdebug(FMT, ##__VA_ARGS__)
+
+#elif defined(CONFIG_FSCACHE_DEBUG)
+#define _enter(FMT, ...) \
+do { \
+ if (__do_kdebug(ENTER)) \
+ kenter(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#define _leave(FMT, ...) \
+do { \
+ if (__do_kdebug(LEAVE)) \
+ kleave(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#define _debug(FMT, ...) \
+do { \
+ if (__do_kdebug(DEBUG)) \
+ kdebug(FMT, ##__VA_ARGS__); \
+} while (0)
+
+#else
+#define _enter(FMT, ...) _dbprintk("==> %s("FMT")", __func__, ##__VA_ARGS__)
+#define _leave(FMT, ...) _dbprintk("<== %s()"FMT"", __func__, ##__VA_ARGS__)
+#define _debug(FMT, ...) _dbprintk(FMT, ##__VA_ARGS__)
+#endif
+
+/*
+ * determine whether a particular optional debugging point should be logged
+ * - we need to go through three steps to persuade cpp to correctly join the
+ * shorthand in FSCACHE_DEBUG_LEVEL with its prefix
+ */
+#define ____do_kdebug(LEVEL, POINT) \
+ unlikely((fscache_debug & \
+ (FSCACHE_POINT_##POINT << (FSCACHE_DEBUG_ ## LEVEL * 3))))
+#define ___do_kdebug(LEVEL, POINT) \
+ ____do_kdebug(LEVEL, POINT)
+#define __do_kdebug(POINT) \
+ ___do_kdebug(FSCACHE_DEBUG_LEVEL, POINT)
+
+#define FSCACHE_DEBUG_CACHE 0
+#define FSCACHE_DEBUG_COOKIE 1
+#define FSCACHE_DEBUG_PAGE 2
+#define FSCACHE_DEBUG_OPERATION 3
+
+#define FSCACHE_POINT_ENTER 1
+#define FSCACHE_POINT_LEAVE 2
+#define FSCACHE_POINT_DEBUG 4
+
+#ifndef FSCACHE_DEBUG_LEVEL
+#define FSCACHE_DEBUG_LEVEL CACHE
+#endif
+
+/*
+ * assertions
+ */
+#if 1 /* defined(__KDEBUGALL) */
+
+#define ASSERT(X) \
+do { \
+ if (unlikely(!(X))) { \
+ printk(KERN_ERR "\n"); \
+ printk(KERN_ERR "FS-Cache: Assertion failed\n"); \
+ BUG(); \
+ } \
+} while (0)
+
+#define ASSERTCMP(X, OP, Y) \
+do { \
+ if (unlikely(!((X) OP (Y)))) { \
+ printk(KERN_ERR "\n"); \
+ printk(KERN_ERR "FS-Cache: Assertion failed\n"); \
+ printk(KERN_ERR "%lx " #OP " %lx is false\n", \
+ (unsigned long)(X), (unsigned long)(Y)); \
+ BUG(); \
+ } \
+} while (0)
+
+#define ASSERTIF(C, X) \
+do { \
+ if (unlikely((C) && !(X))) { \
+ printk(KERN_ERR "\n"); \
+ printk(KERN_ERR "FS-Cache: Assertion failed\n"); \
+ BUG(); \
+ } \
+} while (0)
+
+#define ASSERTIFCMP(C, X, OP, Y) \
+do { \
+ if (unlikely((C) && !((X) OP (Y)))) { \
+ printk(KERN_ERR "\n"); \
+ printk(KERN_ERR "FS-Cache: Assertion failed\n"); \
+ printk(KERN_ERR "%lx " #OP " %lx is false\n", \
+ (unsigned long)(X), (unsigned long)(Y)); \
+ BUG(); \
+ } \
+} while (0)
+
+#else
+
+#define ASSERT(X) do {} while (0)
+#define ASSERTCMP(X, OP, Y) do {} while (0)
+#define ASSERTIF(C, X) do {} while (0)
+#define ASSERTIFCMP(C, X, OP, Y) do {} while (0)
+
+#endif /* assert or not */
diff --git a/fs/fscache/fsc-main.c b/fs/fscache/fsc-main.c
new file mode 100644
index 000000000000..714c0ef2978c
--- /dev/null
+++ b/fs/fscache/fsc-main.c
@@ -0,0 +1,124 @@
+/* General filesystem local caching manager
+ *
+ * Copyright (C) 2004-2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define FSCACHE_DEBUG_LEVEL CACHE
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include "fsc-internal.h"
+
+MODULE_DESCRIPTION("FS Cache Manager");
+MODULE_AUTHOR("Red Hat, Inc.");
+MODULE_LICENSE("GPL");
+
+unsigned fscache_defer_lookup = 1;
+module_param_named(defer_lookup, fscache_defer_lookup, uint,
+ S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(fscache_defer_lookup,
+ "Defer cookie lookup to background thread");
+
+unsigned fscache_defer_create = 1;
+module_param_named(defer_create, fscache_defer_create, uint,
+ S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(fscache_defer_create,
+ "Defer cookie creation to background thread");
+
+unsigned fscache_debug;
+module_param_named(debug, fscache_debug, uint,
+ S_IWUSR | S_IRUGO);
+MODULE_PARM_DESC(fscache_debug,
+ "FS-Cache debugging mask");
+
+struct kobject *fscache_root;
+
+/*
+ * initialise the fs caching module
+ */
+static int __init fscache_init(void)
+{
+ int ret;
+
+ ret = slow_work_register_user();
+ if (ret < 0)
+ goto error_slow_work;
+
+ ret = fscache_proc_init();
+ if (ret < 0)
+ goto error_proc;
+
+ fscache_cookie_jar = kmem_cache_create("fscache_cookie_jar",
+ sizeof(struct fscache_cookie),
+ 0,
+ 0,
+ fscache_cookie_init_once);
+ if (!fscache_cookie_jar) {
+ printk(KERN_NOTICE
+ "FS-Cache: Failed to allocate a cookie jar\n");
+ ret = -ENOMEM;
+ goto error_cookie_jar;
+ }
+
+ fscache_root = kobject_create_and_add("fscache", kernel_kobj);
+ if (!fscache_root)
+ goto error_kobj;
+
+ printk(KERN_NOTICE "FS-Cache: Loaded\n");
+ return 0;
+
+error_kobj:
+ kmem_cache_destroy(fscache_cookie_jar);
+error_cookie_jar:
+ fscache_proc_cleanup();
+error_proc:
+ slow_work_unregister_user();
+error_slow_work:
+ return ret;
+}
+
+fs_initcall(fscache_init);
+
+/*
+ * clean up on module removal
+ */
+static void __exit fscache_exit(void)
+{
+ _enter("");
+
+ kobject_put(fscache_root);
+ kmem_cache_destroy(fscache_cookie_jar);
+ fscache_proc_cleanup();
+ slow_work_unregister_user();
+ printk(KERN_NOTICE "FS-Cache: Unloaded\n");
+}
+
+module_exit(fscache_exit);
+
+/*
+ * wait_on_bit() sleep function for uninterruptible waiting
+ */
+int fscache_wait_bit(void *flags)
+{
+ schedule();
+ return 0;
+}
+EXPORT_SYMBOL(fscache_wait_bit);
+
+/*
+ * wait_on_bit() sleep function for interruptible waiting
+ */
+int fscache_wait_bit_interruptible(void *flags)
+{
+ schedule();
+ return signal_pending(current);
+}
+EXPORT_SYMBOL(fscache_wait_bit_interruptible);
diff --git a/fs/fscache/fsc-netfs.c b/fs/fscache/fsc-netfs.c
new file mode 100644
index 000000000000..7f871f16d0ee
--- /dev/null
+++ b/fs/fscache/fsc-netfs.c
@@ -0,0 +1,103 @@
+/* FS-Cache netfs (client) registration
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#define FSCACHE_DEBUG_LEVEL COOKIE
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "fsc-internal.h"
+
+static LIST_HEAD(fscache_netfs_list);
+
+/*
+ * register a network filesystem for caching
+ */
+int __fscache_register_netfs(struct fscache_netfs *netfs)
+{
+ struct fscache_netfs *ptr;
+ int ret;
+
+ _enter("{%s}", netfs->name);
+
+ INIT_LIST_HEAD(&netfs->link);
+
+ /* allocate a cookie for the primary index */
+ netfs->primary_index =
+ kmem_cache_zalloc(fscache_cookie_jar, GFP_KERNEL);
+
+ if (!netfs->primary_index) {
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+
+ /* initialise the primary index cookie */
+ atomic_set(&netfs->primary_index->usage, 1);
+ atomic_set(&netfs->primary_index->n_children, 0);
+
+ netfs->primary_index->def = &fscache_fsdef_netfs_def;
+ netfs->primary_index->parent = &fscache_fsdef_index;
+ netfs->primary_index->netfs_data = netfs;
+
+ atomic_inc(&netfs->primary_index->parent->usage);
+ atomic_inc(&netfs->primary_index->parent->n_children);
+
+ spin_lock_init(&netfs->primary_index->lock);
+ INIT_HLIST_HEAD(&netfs->primary_index->backing_objects);
+
+ /* check the netfs type is not already present */
+ down_write(&fscache_addremove_sem);
+
+ ret = -EEXIST;
+ list_for_each_entry(ptr, &fscache_netfs_list, link) {
+ if (strcmp(ptr->name, netfs->name) == 0)
+ goto already_registered;
+ }
+
+ list_add(&netfs->link, &fscache_netfs_list);
+ ret = 0;
+
+ printk(KERN_NOTICE "FS-Cache: Netfs '%s' registered for caching\n",
+ netfs->name);
+
+already_registered:
+ up_write(&fscache_addremove_sem);
+
+ if (ret < 0) {
+ netfs->primary_index->parent = NULL;
+ __fscache_cookie_put(netfs->primary_index);
+ netfs->primary_index = NULL;
+ }
+
+ _leave(" = %d", ret);
+ return ret;
+}
+EXPORT_SYMBOL(__fscache_register_netfs);
+
+/*
+ * unregister a network filesystem from the cache
+ * - all cookies must have been released first
+ */
+void __fscache_unregister_netfs(struct fscache_netfs *netfs)
+{
+ _enter("{%s.%u}", netfs->name, netfs->version);
+
+ down_write(&fscache_addremove_sem);
+
+ list_del(&netfs->link);
+ fscache_relinquish_cookie(netfs->primary_index, 0);
+
+ up_write(&fscache_addremove_sem);
+
+ printk(KERN_NOTICE "FS-Cache: Netfs '%s' unregistered from caching\n",
+ netfs->name);
+
+ _leave("");
+}
+EXPORT_SYMBOL(__fscache_unregister_netfs);
diff --git a/fs/fscache/fsc-object.c b/fs/fscache/fsc-object.c
new file mode 100644
index 000000000000..46a3486e9dfe
--- /dev/null
+++ b/fs/fscache/fsc-object.c
@@ -0,0 +1,810 @@
+/* FS-Cache object state machine handler
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * See Documentation/filesystems/caching/object.txt for a description of the
+ * object state machine and the in-kernel representations.
+ */
+
+#define FSCACHE_DEBUG_LEVEL COOKIE
+#include <linux/module.h>
+#include "fsc-internal.h"
+
+const char *fscache_object_states[] = {
+ [FSCACHE_OBJECT_INIT] = "OBJECT_INIT",
+ [FSCACHE_OBJECT_LOOKING_UP] = "OBJECT_LOOKING_UP",
+ [FSCACHE_OBJECT_CREATING] = "OBJECT_CREATING",
+ [FSCACHE_OBJECT_AVAILABLE] = "OBJECT_AVAILABLE",
+ [FSCACHE_OBJECT_ACTIVE] = "OBJECT_ACTIVE",
+ [FSCACHE_OBJECT_UPDATING] = "OBJECT_UPDATING",
+ [FSCACHE_OBJECT_DYING] = "OBJECT_DYING",
+ [FSCACHE_OBJECT_LC_DYING] = "OBJECT_LC_DYING",
+ [FSCACHE_OBJECT_ABORT_INIT] = "OBJECT_ABORT_INIT",
+ [FSCACHE_OBJECT_RELEASING] = "OBJECT_RELEASING",
+ [FSCACHE_OBJECT_RECYCLING] = "OBJECT_RECYCLING",
+ [FSCACHE_OBJECT_WITHDRAWING] = "OBJECT_WITHDRAWING",
+ [FSCACHE_OBJECT_DEAD] = "OBJECT_DEAD",
+};
+EXPORT_SYMBOL(fscache_object_states);
+
+static void fscache_object_slow_work_put_ref(struct slow_work *);
+static int fscache_object_slow_work_get_ref(struct slow_work *);
+static void fscache_object_slow_work_execute(struct slow_work *);
+static void fscache_initialise_object(struct fscache_object *);
+static void fscache_lookup_object(struct fscache_object *);
+static void fscache_object_available(struct fscache_object *);
+static void fscache_release_object(struct fscache_object *);
+static void fscache_withdraw_object(struct fscache_object *);
+static void fscache_enqueue_dependents(struct fscache_object *);
+static void fscache_dequeue_object(struct fscache_object *);
+
+const struct slow_work_ops fscache_object_slow_work_ops = {
+ .get_ref = fscache_object_slow_work_get_ref,
+ .put_ref = fscache_object_slow_work_put_ref,
+ .execute = fscache_object_slow_work_execute,
+};
+EXPORT_SYMBOL(fscache_object_slow_work_ops);
+
+/*
+ * we need to notify the parent when an op completes that we had outstanding
+ * upon it
+ */
+static inline void fscache_done_parent_op(struct fscache_object *object)
+{
+ struct fscache_object *parent = object->parent;
+
+ _enter("OBJ%x {OBJ%x,%x}",
+ object->debug_id, parent->debug_id, parent->n_ops);
+
+ spin_lock_nested(&parent->lock, 1);
+ parent->n_ops--;
+ parent->n_obj_ops--;
+ if (parent->n_ops == 0)
+ fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
+ spin_unlock(&parent->lock);
+}
+
+/*
+ * process events that have been sent to an object's state machine
+ * - initiates parent lookup
+ * - does object lookup
+ * - does object creation
+ * - does object recycling and retirement
+ * - does object withdrawal
+ */
+static void fscache_object_state_machine(struct fscache_object *object)
+{
+ enum fscache_object_state new_state;
+
+ ASSERT(object != NULL);
+
+ _enter("{OBJ%x,%s,%lx}",
+ object->debug_id, fscache_object_states[object->state],
+ object->events);
+
+ switch (object->state) {
+ /* wait for the parent object to become ready */
+ case FSCACHE_OBJECT_INIT:
+ object->event_mask =
+ ULONG_MAX & ~(1 << FSCACHE_OBJECT_EV_CLEARED);
+ fscache_initialise_object(object);
+ goto done;
+
+ /* look up the object metadata on disk */
+ case FSCACHE_OBJECT_LOOKING_UP:
+ fscache_lookup_object(object);
+ goto lookup_transit;
+
+ /* create the object metadata on disk */
+ case FSCACHE_OBJECT_CREATING:
+ fscache_lookup_object(object);
+ goto lookup_transit;
+
+ /* handle an object becoming available; start pending
+ * operations and queue dependent operations for processing */
+ case FSCACHE_OBJECT_AVAILABLE:
+ fscache_object_available(object);
+ goto active_transit;
+
+ /* normal running state */
+ case FSCACHE_OBJECT_ACTIVE:
+ goto active_transit;
+
+ /* update the object metadata on disk */
+ case FSCACHE_OBJECT_UPDATING:
+ clear_bit(FSCACHE_OBJECT_EV_UPDATE, &object->events);
+ fscache_stat(&fscache_n_updates_run);
+ object->cache->ops->update_object(object);
+ goto active_transit;
+
+ /* handle an object dying during lookup or creation */
+ case FSCACHE_OBJECT_LC_DYING:
+ object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE);
+ object->cache->ops->lookup_complete(object);
+
+ spin_lock(&object->lock);
+ object->state = FSCACHE_OBJECT_DYING;
+ if (test_and_clear_bit(FSCACHE_COOKIE_CREATING,
+ &object->cookie->flags))
+ wake_up_bit(&object->cookie->flags,
+ FSCACHE_COOKIE_CREATING);
+ spin_unlock(&object->lock);
+
+ fscache_done_parent_op(object);
+
+ /* wait for completion of all active operations on this object
+ * and the death of all child objects of this object */
+ case FSCACHE_OBJECT_DYING:
+ dying:
+ clear_bit(FSCACHE_OBJECT_EV_CLEARED, &object->events);
+ spin_lock(&object->lock);
+ _debug("dying OBJ%x {%d,%d}",
+ object->debug_id, object->n_ops, object->n_children);
+ if (object->n_ops == 0 && object->n_children == 0) {
+ object->event_mask &=
+ ~(1 << FSCACHE_OBJECT_EV_CLEARED);
+ object->event_mask |=
+ (1 << FSCACHE_OBJECT_EV_WITHDRAW) |
+ (1 << FSCACHE_OBJECT_EV_RETIRE) |
+ (1 << FSCACHE_OBJECT_EV_RELEASE) |
+ (1 << FSCACHE_OBJECT_EV_ERROR);
+ } else {
+ object->event_mask &=
+ ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) |
+ (1 << FSCACHE_OBJECT_EV_RETIRE) |
+ (1 << FSCACHE_OBJECT_EV_RELEASE) |
+ (1 << FSCACHE_OBJECT_EV_ERROR));
+ object->event_mask |=
+ 1 << FSCACHE_OBJECT_EV_CLEARED;
+ }
+ spin_unlock(&object->lock);
+ fscache_enqueue_dependents(object);
+ goto terminal_transit;
+
+ /* handle an abort during initialisation */
+ case FSCACHE_OBJECT_ABORT_INIT:
+ _debug("handle abort init %lx", object->events);
+ object->event_mask &= ~(1 << FSCACHE_OBJECT_EV_UPDATE);
+
+ spin_lock(&object->lock);
+ fscache_dequeue_object(object);
+
+ object->state = FSCACHE_OBJECT_DYING;
+ if (test_and_clear_bit(FSCACHE_COOKIE_CREATING,
+ &object->cookie->flags))
+ wake_up_bit(&object->cookie->flags,
+ FSCACHE_COOKIE_CREATING);
+ spin_unlock(&object->lock);
+ goto dying;
+
+ /* handle the netfs releasing an object and possibly marking it
+ * obsolete too */
+ case FSCACHE_OBJECT_RELEASING:
+ case FSCACHE_OBJECT_RECYCLING:
+ object->event_mask &=
+ ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) |
+ (1 << FSCACHE_OBJECT_EV_RETIRE) |
+ (1 << FSCACHE_OBJECT_EV_RELEASE) |
+ (1 << FSCACHE_OBJECT_EV_ERROR));
+ fscache_release_object(object);
+ spin_lock(&object->lock);
+ object->state = FSCACHE_OBJECT_DEAD;
+ spin_unlock(&object->lock);
+ fscache_stat(&fscache_n_object_dead);
+ goto terminal_transit;
+
+ /* handle the parent cache of this object being withdrawn from
+ * active service */
+ case FSCACHE_OBJECT_WITHDRAWING:
+ object->event_mask &=
+ ~((1 << FSCACHE_OBJECT_EV_WITHDRAW) |
+ (1 << FSCACHE_OBJECT_EV_RETIRE) |
+ (1 << FSCACHE_OBJECT_EV_RELEASE) |
+ (1 << FSCACHE_OBJECT_EV_ERROR));
+ fscache_withdraw_object(object);
+ spin_lock(&object->lock);
+ object->state = FSCACHE_OBJECT_DEAD;
+ spin_unlock(&object->lock);
+ fscache_stat(&fscache_n_object_dead);
+ goto terminal_transit;
+
+ /* complain about the object being woken up once it is
+ * deceased */
+ case FSCACHE_OBJECT_DEAD:
+ printk(KERN_ERR "FS-Cache:"
+ " Unexpected event in dead state %lx\n",
+ object->events & object->event_mask);
+ BUG();
+
+ default:
+ printk(KERN_ERR "FS-Cache: Unknown object state %u\n",
+ object->state);
+ BUG();
+ }
+
+ /* determine the transition from a lookup state */
+lookup_transit:
+ switch (fls(object->events & object->event_mask) - 1) {
+ case FSCACHE_OBJECT_EV_WITHDRAW:
+ case FSCACHE_OBJECT_EV_RETIRE:
+ case FSCACHE_OBJECT_EV_RELEASE:
+ case FSCACHE_OBJECT_EV_ERROR:
+ new_state = FSCACHE_OBJECT_LC_DYING;
+ goto change_state;
+ case FSCACHE_OBJECT_EV_REQUEUE:
+ goto done;
+ case -1:
+ goto done; /* sleep until event */
+ default:
+ goto unsupported_event;
+ }
+
+ /* determine the transition from an active state */
+active_transit:
+ switch (fls(object->events & object->event_mask) - 1) {
+ case FSCACHE_OBJECT_EV_WITHDRAW:
+ case FSCACHE_OBJECT_EV_RETIRE:
+ case FSCACHE_OBJECT_EV_RELEASE:
+ case FSCACHE_OBJECT_EV_ERROR:
+ new_state = FSCACHE_OBJECT_DYING;
+ goto change_state;
+ case FSCACHE_OBJECT_EV_UPDATE:
+ new_state = FSCACHE_OBJECT_UPDATING;
+ goto change_state;
+ case -1:
+ new_state = FSCACHE_OBJECT_ACTIVE;
+ goto change_state; /* sleep until event */
+ default:
+ goto unsupported_event;
+ }
+
+ /* determine the transition from a terminal state */
+terminal_transit:
+ switch (fls(object->events & object->event_mask) - 1) {
+ case FSCACHE_OBJECT_EV_WITHDRAW:
+ new_state = FSCACHE_OBJECT_WITHDRAWING;
+ goto change_state;
+ case FSCACHE_OBJECT_EV_RETIRE:
+ new_state = FSCACHE_OBJECT_RECYCLING;
+ goto change_state;
+ case FSCACHE_OBJECT_EV_RELEASE:
+ new_state = FSCACHE_OBJECT_RELEASING;
+ goto change_state;
+ case FSCACHE_OBJECT_EV_ERROR:
+ new_state = FSCACHE_OBJECT_WITHDRAWING;
+ goto change_state;
+ case FSCACHE_OBJECT_EV_CLEARED:
+ new_state = FSCACHE_OBJECT_DYING;
+ goto change_state;
+ case -1:
+ goto done; /* sleep until event */
+ default:
+ goto unsupported_event;
+ }
+
+change_state:
+ spin_lock(&object->lock);
+ object->state = new_state;
+ spin_unlock(&object->lock);
+
+done:
+ _leave(" [->%s]", fscache_object_states[object->state]);
+ return;
+
+unsupported_event:
+ printk(KERN_ERR "FS-Cache:"
+ " Unsupported event %lx [mask %lx] in state %s\n",
+ object->events, object->event_mask,
+ fscache_object_states[object->state]);
+ BUG();
+}
+
+/*
+ * execute an object
+ */
+static void fscache_object_slow_work_execute(struct slow_work *work)
+{
+ struct fscache_object *object =
+ container_of(work, struct fscache_object, work);
+ unsigned long start;
+
+ _enter("{OBJ%x}", object->debug_id);
+
+ clear_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+
+ start = jiffies;
+ fscache_object_state_machine(object);
+ fscache_hist(fscache_objs_histogram, start);
+ if (object->events & object->event_mask)
+ fscache_enqueue_object(object);
+}
+
+/*
+ * initialise an object
+ * - check the specified object's parent to see if we can make use of it
+ * immediately to do a creation
+ * - we may need to start the process of creating a parent and we need to wait
+ * for the parent's lookup and creation to complete if it's not there yet
+ * - an object's cookie is pinned until we clear FSCACHE_COOKIE_CREATING on the
+ * leaf-most cookies of the object and all its children
+ */
+static void fscache_initialise_object(struct fscache_object *object)
+{
+ struct fscache_object *parent;
+
+ _enter("");
+ ASSERT(object->cookie != NULL);
+ ASSERT(object->cookie->parent != NULL);
+ ASSERT(list_empty(&object->work.link));
+
+ if (object->events & ((1 << FSCACHE_OBJECT_EV_ERROR) |
+ (1 << FSCACHE_OBJECT_EV_RELEASE) |
+ (1 << FSCACHE_OBJECT_EV_RETIRE) |
+ (1 << FSCACHE_OBJECT_EV_WITHDRAW))) {
+ _debug("abort init %lx", object->events);
+ spin_lock(&object->lock);
+ object->state = FSCACHE_OBJECT_ABORT_INIT;
+ spin_unlock(&object->lock);
+ return;
+ }
+
+ spin_lock(&object->cookie->lock);
+ spin_lock_nested(&object->cookie->parent->lock, 1);
+
+ parent = object->parent;
+ if (!parent) {
+ _debug("no parent");
+ set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events);
+ } else {
+ spin_lock(&object->lock);
+ spin_lock_nested(&parent->lock, 1);
+ _debug("parent %s", fscache_object_states[parent->state]);
+
+ if (parent->state >= FSCACHE_OBJECT_DYING) {
+ _debug("bad parent");
+ set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events);
+ } else if (parent->state < FSCACHE_OBJECT_AVAILABLE) {
+ _debug("wait");
+
+ /* we may get woken up in this state by child objects
+ * binding on to us, so we need to make sure we don't
+ * add ourself to the list multiple times */
+ if (list_empty(&object->dep_link)) {
+ object->cache->ops->grab_object(object);
+ list_add(&object->dep_link,
+ &parent->dependents);
+
+ /* fscache_acquire_non_index_cookie() uses this
+ * to wake the chain up */
+ if (parent->state == FSCACHE_OBJECT_INIT)
+ fscache_enqueue_object(parent);
+ }
+ } else {
+ _debug("go");
+ parent->n_ops++;
+ parent->n_obj_ops++;
+ object->lookup_jif = jiffies;
+ object->state = FSCACHE_OBJECT_LOOKING_UP;
+ set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+ }
+
+ spin_unlock(&parent->lock);
+ spin_unlock(&object->lock);
+ }
+
+ spin_unlock(&object->cookie->parent->lock);
+ spin_unlock(&object->cookie->lock);
+ _leave("");
+}
+
+/*
+ * look an object up in the cache from which it was allocated
+ * - we hold an "access lock" on the parent object, so the parent object cannot
+ * be withdrawn by either party till we've finished
+ * - an object's cookie is pinned until we clear FSCACHE_COOKIE_CREATING on the
+ * leaf-most cookies of the object and all its children
+ */
+static void fscache_lookup_object(struct fscache_object *object)
+{
+ struct fscache_cookie *cookie = object->cookie;
+ struct fscache_object *parent;
+
+ _enter("");
+
+ parent = object->parent;
+ ASSERT(parent != NULL);
+ ASSERTCMP(parent->n_ops, >, 0);
+ ASSERTCMP(parent->n_obj_ops, >, 0);
+
+ /* make sure the parent is still available */
+ ASSERTCMP(parent->state, >=, FSCACHE_OBJECT_AVAILABLE);
+
+ if (parent->state >= FSCACHE_OBJECT_DYING ||
+ test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
+ _debug("unavailable");
+ set_bit(FSCACHE_OBJECT_EV_WITHDRAW, &object->events);
+ _leave("");
+ return;
+ }
+
+ _debug("LOOKUP \"%s/%s\" in \"%s\"",
+ parent->cookie->def->name, cookie->def->name,
+ object->cache->tag->name);
+
+ fscache_stat(&fscache_n_object_lookups);
+ object->cache->ops->lookup_object(object);
+
+ if (test_bit(FSCACHE_OBJECT_EV_ERROR, &object->events))
+ set_bit(FSCACHE_COOKIE_UNAVAILABLE, &cookie->flags);
+
+ _leave("");
+}
+
+/**
+ * fscache_object_lookup_negative - Note negative cookie lookup
+ * @object: Object pointing to cookie to mark
+ *
+ * Note negative lookup, permitting those waiting to read data from an already
+ * existing backing object to continue as there's no data for them to read.
+ */
+void fscache_object_lookup_negative(struct fscache_object *object)
+{
+ struct fscache_cookie *cookie = object->cookie;
+
+ _enter("{OBJ%x,%s}",
+ object->debug_id, fscache_object_states[object->state]);
+
+ spin_lock(&object->lock);
+ if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
+ fscache_stat(&fscache_n_object_lookups_negative);
+
+ /* transit here to allow write requests to begin stacking up
+ * and read requests to begin returning ENODATA */
+ object->state = FSCACHE_OBJECT_CREATING;
+ spin_unlock(&object->lock);
+
+ set_bit(FSCACHE_COOKIE_PENDING_FILL, &cookie->flags);
+ set_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+
+ _debug("wake up lookup %p", &cookie->flags);
+ smp_mb__before_clear_bit();
+ clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
+ set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+ } else {
+ ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
+ spin_unlock(&object->lock);
+ }
+
+ _leave("");
+}
+EXPORT_SYMBOL(fscache_object_lookup_negative);
+
+/**
+ * fscache_obtained_object - Note successful object lookup or creation
+ * @object: Object pointing to cookie to mark
+ *
+ * Note successful lookup and/or creation, permitting those waiting to write
+ * data to a backing object to continue.
+ *
+ * Note that after calling this, an object's cookie may be relinquished by the
+ * netfs, and so must be accessed with object lock held.
+ */
+void fscache_obtained_object(struct fscache_object *object)
+{
+ struct fscache_cookie *cookie = object->cookie;
+
+ _enter("{OBJ%x,%s}",
+ object->debug_id, fscache_object_states[object->state]);
+
+ /* if we were still looking up, then we must have a positive lookup
+ * result, in which case there may be data available */
+ spin_lock(&object->lock);
+ if (object->state == FSCACHE_OBJECT_LOOKING_UP) {
+ fscache_stat(&fscache_n_object_lookups_positive);
+
+ clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+
+ object->state = FSCACHE_OBJECT_AVAILABLE;
+ spin_unlock(&object->lock);
+
+ smp_mb__before_clear_bit();
+ clear_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP);
+ set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+ } else {
+ ASSERTCMP(object->state, ==, FSCACHE_OBJECT_CREATING);
+ fscache_stat(&fscache_n_object_created);
+
+ object->state = FSCACHE_OBJECT_AVAILABLE;
+ spin_unlock(&object->lock);
+ set_bit(FSCACHE_OBJECT_EV_REQUEUE, &object->events);
+ smp_wmb();
+ }
+
+ if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &cookie->flags))
+ wake_up_bit(&cookie->flags, FSCACHE_COOKIE_CREATING);
+
+ _leave("");
+}
+EXPORT_SYMBOL(fscache_obtained_object);
+
+/*
+ * handle an object that has just become available
+ */
+static void fscache_object_available(struct fscache_object *object)
+{
+ _enter("{OBJ%x}", object->debug_id);
+
+ spin_lock(&object->lock);
+
+ if (test_and_clear_bit(FSCACHE_COOKIE_CREATING, &object->cookie->flags))
+ wake_up_bit(&object->cookie->flags, FSCACHE_COOKIE_CREATING);
+
+ fscache_done_parent_op(object);
+ if (object->n_in_progress == 0) {
+ if (object->n_ops > 0) {
+ ASSERTCMP(object->n_ops, >=, object->n_obj_ops);
+ ASSERTIF(object->n_ops > object->n_obj_ops,
+ !list_empty(&object->pending_ops));
+ fscache_start_operations(object);
+ } else {
+ ASSERT(list_empty(&object->pending_ops));
+ }
+ }
+ spin_unlock(&object->lock);
+
+ object->cache->ops->lookup_complete(object);
+ fscache_enqueue_dependents(object);
+
+ fscache_hist(fscache_obj_instantiate_histogram, object->lookup_jif);
+ fscache_stat(&fscache_n_object_avail);
+
+ _leave("");
+}
+
+/*
+ * drop an object's attachments
+ */
+static void fscache_drop_object(struct fscache_object *object)
+{
+ struct fscache_object *parent = object->parent;
+ struct fscache_cache *cache = object->cache;
+
+ _enter("{OBJ%x,%d}", object->debug_id, object->n_children);
+
+ spin_lock(&cache->object_list_lock);
+ list_del_init(&object->cache_link);
+ spin_unlock(&cache->object_list_lock);
+
+ cache->ops->drop_object(object);
+
+ if (parent) {
+ _debug("release parent OBJ%x {%d}",
+ parent->debug_id, parent->n_children);
+
+ spin_lock(&parent->lock);
+ parent->n_children--;
+ if (parent->n_children == 0)
+ fscache_raise_event(parent, FSCACHE_OBJECT_EV_CLEARED);
+ spin_unlock(&parent->lock);
+ object->parent = NULL;
+ }
+
+ /* this just shifts the object release to the slow work processor */
+ object->cache->ops->put_object(object);
+
+ _leave("");
+}
+
+/*
+ * release or recycle an object that the netfs has discarded
+ */
+static void fscache_release_object(struct fscache_object *object)
+{
+ _enter("");
+
+ fscache_drop_object(object);
+}
+
+/*
+ * withdraw an object from active service
+ */
+static void fscache_withdraw_object(struct fscache_object *object)
+{
+ struct fscache_cookie *cookie;
+ bool detached;
+
+ _enter("");
+
+ spin_lock(&object->lock);
+ cookie = object->cookie;
+ if (cookie) {
+ /* need to get the cookie lock before the object lock, starting
+ * from the object pointer */
+ atomic_inc(&cookie->usage);
+ spin_unlock(&object->lock);
+
+ detached = false;
+ spin_lock(&cookie->lock);
+ spin_lock(&object->lock);
+
+ if (object->cookie == cookie) {
+ hlist_del_init(&object->cookie_link);
+ object->cookie = NULL;
+ detached = true;
+ }
+ spin_unlock(&cookie->lock);
+ fscache_cookie_put(cookie);
+ if (detached)
+ fscache_cookie_put(cookie);
+ }
+
+ spin_unlock(&object->lock);
+
+ fscache_drop_object(object);
+}
+
+/*
+ * withdraw an object from active service at the behest of the cache
+ * - need break the links to a cached object cookie
+ * - called under two situations:
+ * (1) recycler decides to reclaim an in-use object
+ * (2) a cache is unmounted
+ * - have to take care as the cookie can be being relinquished by the netfs
+ * simultaneously
+ * - the object is pinned by the caller holding a refcount on it
+ */
+void fscache_withdrawing_object(struct fscache_cache *cache,
+ struct fscache_object *object)
+{
+ bool enqueue = false;
+
+ _enter(",OBJ%x", object->debug_id);
+
+ spin_lock(&object->lock);
+ if (object->state < FSCACHE_OBJECT_WITHDRAWING) {
+ object->state = FSCACHE_OBJECT_WITHDRAWING;
+ enqueue = true;
+ }
+ spin_unlock(&object->lock);
+
+ if (enqueue)
+ fscache_enqueue_object(object);
+
+ _leave("");
+}
+
+/*
+ * allow the slow work item processor to get a ref on an object
+ */
+static int fscache_object_slow_work_get_ref(struct slow_work *work)
+{
+ struct fscache_object *object =
+ container_of(work, struct fscache_object, work);
+
+ return object->cache->ops->grab_object(object) ? 0 : -EAGAIN;
+}
+
+/*
+ * allow the slow work item processor to discard a ref on a work item
+ */
+static void fscache_object_slow_work_put_ref(struct slow_work *work)
+{
+ struct fscache_object *object =
+ container_of(work, struct fscache_object, work);
+
+ return object->cache->ops->put_object(object);
+}
+
+/*
+ * enqueue an object for metadata-type processing
+ */
+void fscache_enqueue_object(struct fscache_object *object)
+{
+ _enter("{OBJ%x}", object->debug_id);
+
+ slow_work_enqueue(&object->work);
+}
+
+/*
+ * enqueue the dependents of an object for metadata-type processing
+ * - the caller must hold the object's lock
+ * - this may cause an already locked object to wind up being processed again
+ */
+static void fscache_enqueue_dependents(struct fscache_object *object)
+{
+ struct fscache_object *dep;
+
+ _enter("{OBJ%x}", object->debug_id);
+
+ if (list_empty(&object->dependents))
+ return;
+
+ spin_lock(&object->lock);
+
+ while (!list_empty(&object->dependents)) {
+ dep = list_entry(object->dependents.next,
+ struct fscache_object, dep_link);
+ list_del_init(&dep->dep_link);
+
+
+ /* sort onto appropriate lists */
+ fscache_enqueue_object(dep);
+ dep->cache->ops->put_object(dep);
+
+ if (!list_empty(&object->dependents))
+ cond_resched_lock(&object->lock);
+ }
+
+ spin_unlock(&object->lock);
+}
+
+/*
+ * remove an object from whatever queue it's waiting on
+ * - the caller must hold object->lock
+ */
+void fscache_dequeue_object(struct fscache_object *object)
+{
+ _enter("{OBJ%x}", object->debug_id);
+
+ if (!list_empty(&object->dep_link)) {
+ spin_lock(&object->parent->lock);
+ list_del_init(&object->dep_link);
+ spin_unlock(&object->parent->lock);
+ }
+
+ _leave("");
+}
+
+/**
+ * fscache_check_aux - Ask the netfs whether an object on disk is still valid
+ * @object: The object to ask about
+ * @data: The auxiliary data for the object
+ * @datalen: The size of the auxiliary data
+ *
+ * This function consults the netfs about the coherency state of an object
+ */
+enum fscache_checkaux fscache_check_aux(struct fscache_object *object,
+ const void *data, uint16_t datalen)
+{
+ enum fscache_checkaux result;
+
+ if (!object->cookie->def->check_aux) {
+ fscache_stat(&fscache_n_checkaux_none);
+ return FSCACHE_CHECKAUX_OKAY;
+ }
+
+ result = object->cookie->def->check_aux(object->cookie->netfs_data,
+ data, datalen);
+ switch (result) {
+ /* entry okay as is */
+ case FSCACHE_CHECKAUX_OKAY:
+ fscache_stat(&fscache_n_checkaux_okay);
+ break;
+
+ /* entry requires update */
+ case FSCACHE_CHECKAUX_NEEDS_UPDATE:
+ fscache_stat(&fscache_n_checkaux_update);
+ break;
+
+ /* entry requires deletion */
+ case FSCACHE_CHECKAUX_OBSOLETE:
+ fscache_stat(&fscache_n_checkaux_obsolete);
+ break;
+
+ default:
+ BUG();
+ }
+
+ return result;
+}
+EXPORT_SYMBOL(fscache_check_aux);
diff --git a/fs/fscache/fsc-operation.c b/fs/fscache/fsc-operation.c
new file mode 100644
index 000000000000..0ee9c4e7ba1b
--- /dev/null
+++ b/fs/fscache/fsc-operation.c
@@ -0,0 +1,459 @@
+/* FS-Cache worker operation management routines
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * See Documentation/filesystems/caching/operations.txt
+ */
+
+#define FSCACHE_DEBUG_LEVEL OPERATION
+#include <linux/module.h>
+#include "fsc-internal.h"
+
+atomic_t fscache_op_debug_id;
+EXPORT_SYMBOL(fscache_op_debug_id);
+
+/**
+ * fscache_enqueue_operation - Enqueue an operation for processing
+ * @op: The operation to enqueue
+ *
+ * Enqueue an operation for processing by the FS-Cache thread pool.
+ *
+ * This will get its own ref on the object.
+ */
+void fscache_enqueue_operation(struct fscache_operation *op)
+{
+ _enter("{OBJ%x OP%x,%u}",
+ op->object->debug_id, op->debug_id, atomic_read(&op->usage));
+
+ ASSERT(op->processor != NULL);
+ ASSERTCMP(op->object->state, >=, FSCACHE_OBJECT_AVAILABLE);
+ ASSERTCMP(atomic_read(&op->usage), >, 0);
+
+ if (list_empty(&op->pend_link)) {
+ switch (op->flags & FSCACHE_OP_TYPE) {
+ case FSCACHE_OP_FAST:
+ _debug("queue fast");
+ atomic_inc(&op->usage);
+ if (!schedule_work(&op->fast_work))
+ fscache_put_operation(op);
+ break;
+ case FSCACHE_OP_SLOW:
+ _debug("queue slow");
+ slow_work_enqueue(&op->slow_work);
+ break;
+ case FSCACHE_OP_MYTHREAD:
+ _debug("queue for caller's attention");
+ break;
+ default:
+ printk(KERN_ERR "FS-Cache: Unexpected op type %lx",
+ op->flags);
+ BUG();
+ break;
+ }
+ fscache_stat(&fscache_n_op_enqueue);
+ }
+}
+EXPORT_SYMBOL(fscache_enqueue_operation);
+
+/*
+ * start an op running
+ */
+static void fscache_run_op(struct fscache_object *object,
+ struct fscache_operation *op)
+{
+ object->n_in_progress++;
+ if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
+ wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
+ if (op->processor)
+ fscache_enqueue_operation(op);
+ fscache_stat(&fscache_n_op_run);
+}
+
+/*
+ * submit an exclusive operation for an object
+ * - other ops are excluded from running simultaneously with this one
+ * - this gets any extra refs it needs on an op
+ */
+int fscache_submit_exclusive_op(struct fscache_object *object,
+ struct fscache_operation *op)
+{
+ int ret;
+
+ _enter("{OBJ%x OP%x},", object->debug_id, op->debug_id);
+
+ spin_lock(&object->lock);
+ ASSERTCMP(object->n_ops, >=, object->n_in_progress);
+ ASSERTCMP(object->n_ops, >=, object->n_exclusive);
+
+ ret = -ENOBUFS;
+ if (fscache_object_is_active(object)) {
+ op->object = object;
+ object->n_ops++;
+ object->n_exclusive++; /* reads and writes must wait */
+
+ if (object->n_ops > 0) {
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+ fscache_stat(&fscache_n_op_pend);
+ } else if (!list_empty(&object->pending_ops)) {
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+ fscache_stat(&fscache_n_op_pend);
+ fscache_start_operations(object);
+ } else {
+ ASSERTCMP(object->n_in_progress, ==, 0);
+ fscache_run_op(object, op);
+ }
+
+ /* need to issue a new write op after this */
+ clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
+ ret = 0;
+ } else if (object->state == FSCACHE_OBJECT_CREATING) {
+ op->object = object;
+ object->n_ops++;
+ object->n_exclusive++; /* reads and writes must wait */
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+ fscache_stat(&fscache_n_op_pend);
+ ret = 0;
+ } else {
+ /* not allowed to submit ops in any other state */
+ BUG();
+ }
+
+ spin_unlock(&object->lock);
+ return ret;
+}
+
+/*
+ * report an unexpected submission
+ */
+static void fscache_report_unexpected_submission(struct fscache_object *object,
+ struct fscache_operation *op,
+ unsigned long ostate)
+{
+ static bool once_only;
+ struct fscache_operation *p;
+ unsigned n;
+
+ if (once_only)
+ return;
+ once_only = true;
+
+ kdebug("unexpected submission OP%x [OBJ%x %s]",
+ op->debug_id, object->debug_id,
+ fscache_object_states[object->state]);
+ kdebug("objstate=%s [%s]",
+ fscache_object_states[object->state],
+ fscache_object_states[ostate]);
+ kdebug("objflags=%lx", object->flags);
+ kdebug("objevent=%lx [%lx]", object->events, object->event_mask);
+ kdebug("ops=%u inp=%u exc=%u",
+ object->n_ops, object->n_in_progress, object->n_exclusive);
+
+ if (!list_empty(&object->pending_ops)) {
+ n = 0;
+ list_for_each_entry(p, &object->pending_ops, pend_link) {
+ ASSERTCMP(p->object, ==, object);
+ kdebug("%p %p", op->processor, op->release);
+ n++;
+ }
+
+ kdebug("n=%u", n);
+ }
+
+ dump_stack();
+}
+
+/*
+ * submit an operation for an object
+ * - objects may be submitted only in the following states:
+ * - during object creation (write ops may be submitted)
+ * - whilst the object is active
+ * - after an I/O error incurred in one of the two above states (op rejected)
+ * - this gets any extra refs it needs on an op
+ */
+int fscache_submit_op(struct fscache_object *object,
+ struct fscache_operation *op)
+{
+ unsigned long ostate;
+ int ret;
+
+ _enter("{OBJ%x OP%x},{%u}",
+ object->debug_id, op->debug_id, atomic_read(&op->usage));
+
+ ASSERTCMP(atomic_read(&op->usage), >, 0);
+
+ spin_lock(&object->lock);
+ ASSERTCMP(object->n_ops, >=, object->n_in_progress);
+ ASSERTCMP(object->n_ops, >=, object->n_exclusive);
+
+ ostate = object->state;
+ smp_rmb();
+
+ if (fscache_object_is_active(object)) {
+ op->object = object;
+ object->n_ops++;
+
+ if (object->n_exclusive > 0) {
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+ fscache_stat(&fscache_n_op_pend);
+ } else if (!list_empty(&object->pending_ops)) {
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+ fscache_stat(&fscache_n_op_pend);
+ fscache_start_operations(object);
+ } else {
+ ASSERTCMP(object->n_exclusive, ==, 0);
+ fscache_run_op(object, op);
+ }
+ ret = 0;
+ } else if (object->state == FSCACHE_OBJECT_CREATING) {
+ op->object = object;
+ object->n_ops++;
+ atomic_inc(&op->usage);
+ list_add_tail(&op->pend_link, &object->pending_ops);
+ fscache_stat(&fscache_n_op_pend);
+ ret = 0;
+ } else if (!test_bit(FSCACHE_IOERROR, &object->cache->flags)) {
+ fscache_report_unexpected_submission(object, op, ostate);
+ ASSERT(!fscache_object_is_active(object));
+ ret = -ENOBUFS;
+ } else {
+ ret = -ENOBUFS;
+ }
+
+ spin_unlock(&object->lock);
+ return ret;
+}
+
+/*
+ * queue an object for withdrawal on error, aborting all following asynchronous
+ * operations
+ */
+void fscache_abort_object(struct fscache_object *object)
+{
+ _enter("{OBJ%x}", object->debug_id);
+
+ fscache_raise_event(object, FSCACHE_OBJECT_EV_ERROR);
+}
+
+/*
+ * jump start the operation processing on an object
+ * - caller must hold object->lock
+ */
+void fscache_start_operations(struct fscache_object *object)
+{
+ struct fscache_operation *op;
+ bool stop = false;
+
+ while (!list_empty(&object->pending_ops) && !stop) {
+ op = list_entry(object->pending_ops.next,
+ struct fscache_operation, pend_link);
+
+ if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
+ if (object->n_in_progress > 0)
+ break;
+ stop = true;
+ }
+ list_del_init(&op->pend_link);
+ object->n_in_progress++;
+
+ if (test_and_clear_bit(FSCACHE_OP_WAITING, &op->flags))
+ wake_up_bit(&op->flags, FSCACHE_OP_WAITING);
+ if (op->processor)
+ fscache_enqueue_operation(op);
+
+ /* the pending queue was holding a ref on the object */
+ fscache_put_operation(op);
+ }
+
+ ASSERTCMP(object->n_in_progress, <=, object->n_ops);
+
+ _debug("woke %d ops on OBJ%x",
+ object->n_in_progress, object->debug_id);
+}
+
+/*
+ * release an operation
+ * - queues pending ops if this is the last in-progress op
+ */
+void fscache_put_operation(struct fscache_operation *op)
+{
+ struct fscache_object *object;
+ struct fscache_cache *cache;
+
+ _enter("{OBJ%x OP%x,%d}",
+ op->object->debug_id, op->debug_id, atomic_read(&op->usage));
+
+ ASSERTCMP(atomic_read(&op->usage), >, 0);
+
+ if (!atomic_dec_and_test(&op->usage))
+ return;
+
+ _debug("PUT OP");
+ if (test_and_set_bit(FSCACHE_OP_DEAD, &op->flags))
+ BUG();
+
+ fscache_stat(&fscache_n_op_release);
+
+ if (op->release) {
+ op->release(op);
+ op->release = NULL;
+ }
+
+ object = op->object;
+
+ /* now... we may get called with the object spinlock held, so we
+ * complete the cleanup here only if we can immediately acquire the
+ * lock, and defer it otherwise */
+ if (!spin_trylock(&object->lock)) {
+ _debug("defer put");
+ fscache_stat(&fscache_n_op_deferred_release);
+
+ cache = object->cache;
+ spin_lock(&cache->op_gc_list_lock);
+ list_add_tail(&op->pend_link, &cache->op_gc_list);
+ spin_unlock(&cache->op_gc_list_lock);
+ schedule_work(&cache->op_gc);
+ _leave(" [defer]");
+ return;
+ }
+
+ if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
+ ASSERTCMP(object->n_exclusive, >, 0);
+ object->n_exclusive--;
+ }
+
+ ASSERTCMP(object->n_in_progress, >, 0);
+ object->n_in_progress--;
+ if (object->n_in_progress == 0)
+ fscache_start_operations(object);
+
+ ASSERTCMP(object->n_ops, >, 0);
+ object->n_ops--;
+ if (object->n_ops == 0)
+ fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
+
+ spin_unlock(&object->lock);
+
+ kfree(op);
+ _leave(" [done]");
+}
+EXPORT_SYMBOL(fscache_put_operation);
+
+/*
+ * garbage collect operations that have had their release deferred
+ */
+void fscache_operation_gc(struct work_struct *work)
+{
+ struct fscache_operation *op;
+ struct fscache_object *object;
+ struct fscache_cache *cache =
+ container_of(work, struct fscache_cache, op_gc);
+ int count = 0;
+
+ _enter("");
+
+ do {
+ spin_lock(&cache->op_gc_list_lock);
+ if (list_empty(&cache->op_gc_list)) {
+ spin_unlock(&cache->op_gc_list_lock);
+ break;
+ }
+
+ op = list_entry(cache->op_gc_list.next,
+ struct fscache_operation, pend_link);
+ list_del(&op->pend_link);
+ spin_unlock(&cache->op_gc_list_lock);
+
+ object = op->object;
+
+ _debug("GC DEFERRED REL OBJ%x OP%x",
+ object->debug_id, op->debug_id);
+ fscache_stat(&fscache_n_op_gc);
+
+ ASSERTCMP(atomic_read(&op->usage), ==, 0);
+
+ spin_lock(&object->lock);
+ if (test_bit(FSCACHE_OP_EXCLUSIVE, &op->flags)) {
+ ASSERTCMP(object->n_exclusive, >, 0);
+ object->n_exclusive--;
+ }
+
+ ASSERTCMP(object->n_in_progress, >, 0);
+ object->n_in_progress--;
+ if (object->n_in_progress == 0)
+ fscache_start_operations(object);
+
+ ASSERTCMP(object->n_ops, >, 0);
+ object->n_ops--;
+ if (object->n_ops == 0)
+ fscache_raise_event(object, FSCACHE_OBJECT_EV_CLEARED);
+
+ spin_unlock(&object->lock);
+
+ } while (count++ < 20);
+
+ if (!list_empty(&cache->op_gc_list))
+ schedule_work(&cache->op_gc);
+
+ _leave("");
+}
+
+/*
+ * allow the slow work item processor to get a ref on an operation
+ */
+static int fscache_op_get_ref(struct slow_work *work)
+{
+ struct fscache_operation *op =
+ container_of(work, struct fscache_operation, slow_work);
+
+ atomic_inc(&op->usage);
+ return 0;
+}
+
+/*
+ * allow the slow work item processor to discard a ref on an operation
+ */
+static void fscache_op_put_ref(struct slow_work *work)
+{
+ struct fscache_operation *op =
+ container_of(work, struct fscache_operation, slow_work);
+
+ fscache_put_operation(op);
+}
+
+/*
+ * execute an operation using the slow thread pool to provide processing context
+ * - the caller holds a ref to this object, so we don't need to hold one
+ */
+static void fscache_op_execute(struct slow_work *work)
+{
+ struct fscache_operation *op =
+ container_of(work, struct fscache_operation, slow_work);
+ unsigned long start;
+
+ _enter("{OBJ%x OP%x,%d}",
+ op->object->debug_id, op->debug_id, atomic_read(&op->usage));
+
+ ASSERT(op->processor != NULL);
+ start = jiffies;
+ op->processor(op);
+ fscache_hist(fscache_ops_histogram, start);
+
+ _leave("");
+}
+
+const struct slow_work_ops fscache_op_slow_work_ops = {
+ .get_ref = fscache_op_get_ref,
+ .put_ref = fscache_op_put_ref,
+ .execute = fscache_op_execute,
+};
diff --git a/fs/fscache/fsc-page.c b/fs/fscache/fsc-page.c
new file mode 100644
index 000000000000..54fc2b577049
--- /dev/null
+++ b/fs/fscache/fsc-page.c
@@ -0,0 +1,771 @@
+/* Cache page management and data I/O routines
+ *
+ * Copyright (C) 2004-2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define FSCACHE_DEBUG_LEVEL PAGE
+#include <linux/module.h>
+#include <linux/fscache-cache.h>
+#include <linux/buffer_head.h>
+#include <linux/pagevec.h>
+#include "fsc-internal.h"
+
+/*
+ * actually apply the changed attributes to a cache object
+ */
+static void fscache_attr_changed_op(struct fscache_operation *op)
+{
+ struct fscache_object *object = op->object;
+
+ _enter("{OBJ%x OP%x}", object->debug_id, op->debug_id);
+
+ fscache_stat(&fscache_n_attr_changed_calls);
+
+ if (fscache_object_is_active(object) &&
+ object->cache->ops->attr_changed(object) < 0)
+ fscache_abort_object(object);
+
+ _leave("");
+}
+
+/*
+ * notification that the attributes on an object have changed
+ */
+int __fscache_attr_changed(struct fscache_cookie *cookie)
+{
+ struct fscache_operation *op;
+ struct fscache_object *object;
+
+ _enter("%p", cookie);
+
+ ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+
+ fscache_stat(&fscache_n_attr_changed);
+
+ op = kzalloc(sizeof(*op), GFP_KERNEL);
+ if (!op) {
+ fscache_stat(&fscache_n_attr_changed_nomem);
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+
+ fscache_operation_init(op, NULL);
+ fscache_operation_init_slow(op, fscache_attr_changed_op);
+ op->flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_EXCLUSIVE);
+
+ spin_lock(&cookie->lock);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs;
+ object = hlist_entry(cookie->backing_objects.first,
+ struct fscache_object, cookie_link);
+
+ if (fscache_submit_exclusive_op(object, op) < 0)
+ goto nobufs;
+ spin_unlock(&cookie->lock);
+ fscache_stat(&fscache_n_attr_changed_ok);
+ fscache_put_operation(op);
+ _leave(" = 0");
+ return 0;
+
+nobufs:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+ fscache_stat(&fscache_n_attr_changed_nobufs);
+ _leave(" = %d", -ENOBUFS);
+ return -ENOBUFS;
+}
+EXPORT_SYMBOL(__fscache_attr_changed);
+
+/*
+ * handle secondary execution given to a retrieval op on behalf of the
+ * cache
+ */
+static void fscache_retrieval_work(struct work_struct *work)
+{
+ struct fscache_retrieval *op =
+ container_of(work, struct fscache_retrieval, op.fast_work);
+ unsigned long start;
+
+ _enter("{OP%x}", op->op.debug_id);
+
+ start = jiffies;
+ op->op.processor(&op->op);
+ fscache_hist(fscache_ops_histogram, start);
+ fscache_put_operation(&op->op);
+}
+
+/*
+ * release a retrieval op reference
+ */
+static void fscache_release_retrieval_op(struct fscache_operation *_op)
+{
+ struct fscache_retrieval *op =
+ container_of(_op, struct fscache_retrieval, op);
+
+ _enter("{OP%x}", op->op.debug_id);
+
+ fscache_hist(fscache_retrieval_histogram, op->start_time);
+ if (op->context)
+ fscache_put_context(op->op.object->cookie, op->context);
+
+ _leave("");
+}
+
+/*
+ * allocate a retrieval op
+ */
+static struct fscache_retrieval *fscache_alloc_retrieval(
+ struct address_space *mapping,
+ fscache_rw_complete_t end_io_func,
+ void *context)
+{
+ struct fscache_retrieval *op;
+
+ /* allocate a retrieval operation and attempt to submit it */
+ op = kzalloc(sizeof(*op), GFP_NOIO);
+ if (!op) {
+ fscache_stat(&fscache_n_retrievals_nomem);
+ return NULL;
+ }
+
+ fscache_operation_init(&op->op, fscache_release_retrieval_op);
+ op->op.flags = FSCACHE_OP_MYTHREAD | (1 << FSCACHE_OP_WAITING);
+ op->mapping = mapping;
+ op->end_io_func = end_io_func;
+ op->context = context;
+ op->start_time = jiffies;
+ INIT_WORK(&op->op.fast_work, fscache_retrieval_work);
+ INIT_LIST_HEAD(&op->to_do);
+ return op;
+}
+
+/*
+ * wait for a deferred lookup to complete
+ */
+static int fscache_wait_for_deferred_lookup(struct fscache_cookie *cookie)
+{
+ unsigned long jif;
+
+ _enter("");
+
+ if (!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags)) {
+ _leave(" = 0 [imm]");
+ return 0;
+ }
+
+ fscache_stat(&fscache_n_retrievals_wait);
+
+ jif = jiffies;
+ if (wait_on_bit(&cookie->flags, FSCACHE_COOKIE_LOOKING_UP,
+ fscache_wait_bit_interruptible,
+ TASK_INTERRUPTIBLE) != 0) {
+ fscache_stat(&fscache_n_retrievals_intr);
+ _leave(" = -ERESTARTSYS");
+ return -ERESTARTSYS;
+ }
+
+ ASSERT(!test_bit(FSCACHE_COOKIE_LOOKING_UP, &cookie->flags));
+
+ smp_rmb();
+ fscache_hist(fscache_retrieval_delay_histogram, jif);
+ _leave(" = 0 [dly]");
+ return 0;
+}
+
+/*
+ * read a page from the cache or allocate a block in which to store it
+ * - we return:
+ * -ENOMEM - out of memory, nothing done
+ * -ERESTARTSYS - interrupted
+ * -ENOBUFS - no backing object available in which to cache the block
+ * -ENODATA - no data available in the backing object for this block
+ * 0 - dispatched a read - it'll call end_io_func() when finished
+ */
+int __fscache_read_or_alloc_page(struct fscache_cookie *cookie,
+ struct page *page,
+ fscache_rw_complete_t end_io_func,
+ void *context,
+ gfp_t gfp)
+{
+ struct fscache_retrieval *op;
+ struct fscache_object *object;
+ int ret;
+
+ _enter("%p,%p,,,", cookie, page);
+
+ fscache_stat(&fscache_n_retrievals);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs;
+
+ ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+ ASSERTCMP(page, !=, NULL);
+
+ if (fscache_wait_for_deferred_lookup(cookie) < 0)
+ return -ERESTARTSYS;
+
+ op = fscache_alloc_retrieval(page->mapping, end_io_func, context);
+ if (!op) {
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+ }
+
+ spin_lock(&cookie->lock);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs_unlock;
+ object = hlist_entry(cookie->backing_objects.first,
+ struct fscache_object, cookie_link);
+
+ ASSERTCMP(object->state, >, FSCACHE_OBJECT_LOOKING_UP);
+
+ if (fscache_submit_op(object, &op->op) < 0)
+ goto nobufs_unlock;
+ spin_unlock(&cookie->lock);
+
+ fscache_stat(&fscache_n_retrieval_ops);
+
+ /* pin the netfs read context in case we need to do the actual netfs
+ * read because we've encountered a cache read failure */
+ fscache_get_context(object->cookie, op->context);
+
+ /* we wait for the operation to become active, and then process it
+ * *here*, in this thread, and not in the thread pool */
+ if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
+ _debug(">>> WT");
+ fscache_stat(&fscache_n_retrieval_op_waits);
+ wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
+ fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+ _debug("<<< GO");
+ }
+
+ /* ask the cache to honour the operation */
+ if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags)) {
+ ret = object->cache->ops->allocate_page(op, page, gfp);
+ if (ret == 0)
+ ret = -ENODATA;
+ } else {
+ ret = object->cache->ops->read_or_alloc_page(op, page, gfp);
+ }
+
+ if (ret == -ENOMEM)
+ fscache_stat(&fscache_n_retrievals_nomem);
+ else if (ret == -ERESTARTSYS)
+ fscache_stat(&fscache_n_retrievals_intr);
+ else if (ret == -ENODATA)
+ fscache_stat(&fscache_n_retrievals_nodata);
+ else if (ret < 0)
+ fscache_stat(&fscache_n_retrievals_nobufs);
+ else
+ fscache_stat(&fscache_n_retrievals_ok);
+
+ fscache_put_retrieval(op);
+ _leave(" = %d", ret);
+ return ret;
+
+nobufs_unlock:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+nobufs:
+ fscache_stat(&fscache_n_retrievals_nobufs);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+}
+EXPORT_SYMBOL(__fscache_read_or_alloc_page);
+
+/*
+ * read a list of page from the cache or allocate a block in which to store
+ * them
+ * - we return:
+ * -ENOMEM - out of memory, some pages may be being read
+ * -ERESTARTSYS - interrupted, some pages may be being read
+ * -ENOBUFS - no backing object or space available in which to cache any
+ * pages not being read
+ * -ENODATA - no data available in the backing object for some or all of
+ * the pages
+ * 0 - dispatched a read on all pages
+ *
+ * end_io_func() will be called for each page read from the cache as it is
+ * finishes being read
+ *
+ * any pages for which a read is dispatched will be removed from pages and
+ * nr_pages
+ */
+int __fscache_read_or_alloc_pages(struct fscache_cookie *cookie,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned *nr_pages,
+ fscache_rw_complete_t end_io_func,
+ void *context,
+ gfp_t gfp)
+{
+ fscache_pages_retrieval_func_t func;
+ struct fscache_retrieval *op;
+ struct fscache_object *object;
+ int ret;
+
+ _enter("%p,,%d,,,", cookie, *nr_pages);
+
+ fscache_stat(&fscache_n_retrievals);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs;
+
+ ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+ ASSERTCMP(*nr_pages, >, 0);
+ ASSERT(!list_empty(pages));
+
+ if (fscache_wait_for_deferred_lookup(cookie) < 0)
+ return -ERESTARTSYS;
+
+ op = fscache_alloc_retrieval(mapping, end_io_func, context);
+ if (!op)
+ return -ENOMEM;
+
+ spin_lock(&cookie->lock);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs_unlock;
+ object = hlist_entry(cookie->backing_objects.first,
+ struct fscache_object, cookie_link);
+
+ if (fscache_submit_op(object, &op->op) < 0)
+ goto nobufs_unlock;
+ spin_unlock(&cookie->lock);
+
+ fscache_stat(&fscache_n_retrieval_ops);
+
+ /* pin the netfs read context in case we need to do the actual netfs
+ * read because we've encountered a cache read failure */
+ fscache_get_context(object->cookie, op->context);
+
+ /* we wait for the operation to become active, and then process it
+ * *here*, in this thread, and not in the thread pool */
+ if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
+ _debug(">>> WT");
+ fscache_stat(&fscache_n_retrieval_op_waits);
+ wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
+ fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+ _debug("<<< GO");
+ }
+
+ /* ask the cache to honour the operation */
+ if (test_bit(FSCACHE_COOKIE_NO_DATA_YET, &object->cookie->flags))
+ func = object->cache->ops->allocate_pages;
+ else
+ func = object->cache->ops->read_or_alloc_pages;
+ ret = func(op, pages, nr_pages, gfp);
+
+ if (ret == -ENOMEM)
+ fscache_stat(&fscache_n_retrievals_nomem);
+ else if (ret == -ERESTARTSYS)
+ fscache_stat(&fscache_n_retrievals_intr);
+ else if (ret == -ENODATA)
+ fscache_stat(&fscache_n_retrievals_nodata);
+ else if (ret < 0)
+ fscache_stat(&fscache_n_retrievals_nobufs);
+ else
+ fscache_stat(&fscache_n_retrievals_ok);
+
+ fscache_put_retrieval(op);
+ _leave(" = %d", ret);
+ return ret;
+
+nobufs_unlock:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+nobufs:
+ fscache_stat(&fscache_n_retrievals_nobufs);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+}
+EXPORT_SYMBOL(__fscache_read_or_alloc_pages);
+
+/*
+ * allocate a block in the cache on which to store a page
+ * - we return:
+ * -ENOMEM - out of memory, nothing done
+ * -ERESTARTSYS - interrupted
+ * -ENOBUFS - no backing object available in which to cache the block
+ * 0 - block allocated
+ */
+int __fscache_alloc_page(struct fscache_cookie *cookie,
+ struct page *page,
+ gfp_t gfp)
+{
+ struct fscache_retrieval *op;
+ struct fscache_object *object;
+ int ret;
+
+ _enter("%p,%p,,,", cookie, page);
+
+ fscache_stat(&fscache_n_allocs);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs;
+
+ ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+ ASSERTCMP(page, !=, NULL);
+
+ if (fscache_wait_for_deferred_lookup(cookie) < 0)
+ return -ERESTARTSYS;
+
+ op = fscache_alloc_retrieval(page->mapping, NULL, NULL);
+ if (!op)
+ return -ENOMEM;
+
+ spin_lock(&cookie->lock);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs_unlock;
+ object = hlist_entry(cookie->backing_objects.first,
+ struct fscache_object, cookie_link);
+
+ if (fscache_submit_op(object, &op->op) < 0)
+ goto nobufs_unlock;
+ spin_unlock(&cookie->lock);
+
+ fscache_stat(&fscache_n_alloc_ops);
+
+ if (test_bit(FSCACHE_OP_WAITING, &op->op.flags)) {
+ _debug(">>> WT");
+ fscache_stat(&fscache_n_alloc_op_waits);
+ wait_on_bit(&op->op.flags, FSCACHE_OP_WAITING,
+ fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+ _debug("<<< GO");
+ }
+
+ /* ask the cache to honour the operation */
+ ret = object->cache->ops->allocate_page(op, page, gfp);
+
+ if (ret < 0)
+ fscache_stat(&fscache_n_allocs_nobufs);
+ else
+ fscache_stat(&fscache_n_allocs_ok);
+
+ fscache_put_retrieval(op);
+ _leave(" = %d", ret);
+ return ret;
+
+nobufs_unlock:
+ spin_unlock(&cookie->lock);
+ kfree(op);
+nobufs:
+ fscache_stat(&fscache_n_allocs_nobufs);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+}
+EXPORT_SYMBOL(__fscache_alloc_page);
+
+/*
+ * release a write op reference
+ */
+static void fscache_release_write_op(struct fscache_operation *_op)
+{
+ _enter("{OP%x}", _op->debug_id);
+}
+
+/*
+ * perform the background storage of a page into the cache
+ */
+static void fscache_write_op(struct fscache_operation *_op)
+{
+ struct fscache_storage *op =
+ container_of(_op, struct fscache_storage, op);
+ struct fscache_object *object = op->op.object;
+ struct page *page;
+ unsigned n;
+ void *results[1];
+ int ret;
+
+ _enter("{OP%x,%d}", op->op.debug_id, atomic_read(&op->op.usage));
+
+ spin_lock(&object->lock);
+
+ if (!fscache_object_is_active(object)) {
+ spin_unlock(&object->lock);
+ _leave("");
+ return;
+ }
+
+ fscache_stat(&fscache_n_store_calls);
+
+ /* find a page to store */
+ page = NULL;
+ n = radix_tree_gang_lookup(&object->stores, results, 0, 1);
+ if (n == 1) {
+ page = results[0];
+ _debug("gang %d [%lx]", n, page->index);
+ if (page->index <= op->store_limit)
+ radix_tree_delete(&object->stores, page->index);
+ else
+ goto superseded;
+ } else {
+ goto superseded;
+ }
+
+ spin_unlock(&object->lock);
+
+ if (page) {
+ ret = object->cache->ops->write_page(op, page);
+ end_page_fscache_write(page);
+ page_cache_release(page);
+ if (ret < 0)
+ fscache_abort_object(object);
+ else
+ fscache_enqueue_operation(&op->op);
+ }
+
+ _leave("");
+ return;
+
+superseded:
+ /* this writer is going away and there aren't any more things to
+ * write */
+ _debug("cease");
+ clear_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags);
+ spin_unlock(&object->lock);
+ _leave("");
+}
+
+/*
+ * request a page be stored in the cache
+ * - returns:
+ * -ENOMEM - out of memory, nothing done
+ * -ENOBUFS - no backing object available in which to cache the page
+ * 0 - dispatched a write - it'll call end_io_func() when finished
+ *
+ * if the cookie still has a backing object at this point, that object can be
+ * in one of a few states with respect to storage processing:
+ *
+ * (1) negative lookup, object not yet created (FSCACHE_COOKIE_CREATING is
+ * set)
+ *
+ * (a) no writes yet (set FSCACHE_COOKIE_PENDING_FILL and queue deferred
+ * fill op)
+ *
+ * (b) writes deferred till post-creation (mark page for writing and
+ * return immediately)
+ *
+ * (2) negative lookup, object created, initial fill being made from netfs
+ * (FSCACHE_COOKIE_INITIAL_FILL is set)
+ *
+ * (a) fill point not yet reached this page (mark page for writing and
+ * return)
+ *
+ * (b) fill point passed this page (queue op to store this page)
+ *
+ * (3) object extant (queue op to store this page)
+ *
+ * any other state is invalid
+ */
+int __fscache_write_page(struct fscache_cookie *cookie,
+ struct page *page,
+ gfp_t gfp)
+{
+ struct fscache_storage *op;
+ struct fscache_object *object;
+ int ret;
+
+ _enter("%p,%x,", cookie, (u32) page->flags);
+
+ ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+ ASSERT(PageFsCache(page));
+
+ fscache_stat(&fscache_n_stores);
+
+ op = kzalloc(sizeof(*op), GFP_NOIO);
+ if (!op)
+ goto nomem;
+
+ fscache_operation_init(&op->op, fscache_release_write_op);
+ fscache_operation_init_slow(&op->op, fscache_write_op);
+ op->op.flags = FSCACHE_OP_SLOW | (1 << FSCACHE_OP_WAITING);
+
+ ret = radix_tree_preload(gfp & ~__GFP_HIGHMEM);
+ if (ret < 0)
+ goto nomem_free;
+
+ ret = -ENOBUFS;
+ spin_lock(&cookie->lock);
+
+ if (hlist_empty(&cookie->backing_objects))
+ goto nobufs;
+ object = hlist_entry(cookie->backing_objects.first,
+ struct fscache_object, cookie_link);
+ if (test_bit(FSCACHE_IOERROR, &object->cache->flags))
+ goto nobufs;
+
+ /* add the page to the pending-storage radix tree on the backing
+ * object */
+ spin_lock(&object->lock);
+
+ _debug("store limit %llx", (unsigned long long) object->store_limit);
+
+ ret = radix_tree_insert(&object->stores, page->index, page);
+ if (ret < 0) {
+ if (ret == -EEXIST)
+ goto already_queued;
+ _debug("insert failed %d", ret);
+ goto nobufs_unlock_obj;
+ }
+
+ page_cache_get(page);
+ if (TestSetPageFsCacheWrite(page))
+ BUG();
+
+ /* we only want one writer at a time, but we do need to queue new
+ * writers after exclusive ops */
+ if (test_and_set_bit(FSCACHE_OBJECT_PENDING_WRITE, &object->flags))
+ goto already_pending;
+
+ spin_unlock(&object->lock);
+
+ op->op.debug_id = atomic_inc_return(&fscache_op_debug_id);
+ op->store_limit = object->store_limit;
+
+ if (fscache_submit_op(object, &op->op) < 0)
+ goto submit_failed;
+
+ spin_unlock(&cookie->lock);
+ radix_tree_preload_end();
+ fscache_stat(&fscache_n_store_ops);
+ fscache_stat(&fscache_n_stores_ok);
+
+ /* the slow work queue now carries its own ref on the object */
+ fscache_put_operation(&op->op);
+ _leave(" = 0");
+ return 0;
+
+already_queued:
+ fscache_stat(&fscache_n_stores_again);
+already_pending:
+ spin_unlock(&object->lock);
+ spin_unlock(&cookie->lock);
+ radix_tree_preload_end();
+ kfree(op);
+ fscache_stat(&fscache_n_stores_ok);
+ _leave(" = 0");
+ return 0;
+
+submit_failed:
+ radix_tree_delete(&object->stores, page->index);
+ end_page_fscache_write(page);
+ page_cache_release(page);
+ ret = -ENOBUFS;
+ goto nobufs;
+
+nobufs_unlock_obj:
+ spin_unlock(&object->lock);
+nobufs:
+ spin_unlock(&cookie->lock);
+ radix_tree_preload_end();
+ kfree(op);
+ fscache_stat(&fscache_n_stores_nobufs);
+ _leave(" = -ENOBUFS");
+ return -ENOBUFS;
+
+nomem_free:
+ kfree(op);
+nomem:
+ fscache_stat(&fscache_n_stores_oom);
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(__fscache_write_page);
+
+/*
+ * remove a page from the cache
+ */
+void __fscache_uncache_page(struct fscache_cookie *cookie, struct page *page)
+{
+ struct fscache_object *object;
+
+ _enter(",%p", page);
+
+ ASSERTCMP(cookie->def->type, !=, FSCACHE_COOKIE_TYPE_INDEX);
+ ASSERTCMP(page, !=, NULL);
+
+ fscache_stat(&fscache_n_uncaches);
+
+ /* cache withdrawal may beat us to it */
+ if (!PageFsCache(page))
+ goto done;
+
+ /* get the object */
+ spin_lock(&cookie->lock);
+
+ if (hlist_empty(&cookie->backing_objects)) {
+ ClearPageFsCache(page);
+ goto done_unlock;
+ }
+
+ object = hlist_entry(cookie->backing_objects.first,
+ struct fscache_object, cookie_link);
+
+ /* there might now be stuff on disk we could read */
+ clear_bit(FSCACHE_COOKIE_NO_DATA_YET, &cookie->flags);
+
+ /* only invoke the cache backend if we managed to mark the page
+ * uncached here; this deals with synchronisation vs withdrawal */
+ if (TestClearPageFsCache(page) &&
+ object->cache->ops->uncache_page) {
+ /* the cache backend releases the cookie lock */
+ object->cache->ops->uncache_page(object, page);
+ goto done;
+ }
+
+done_unlock:
+ spin_unlock(&cookie->lock);
+done:
+ _leave("");
+}
+EXPORT_SYMBOL(__fscache_uncache_page);
+
+/**
+ * fscache_mark_pages_cached - Mark pages as being cached
+ * @op: The retrieval op pages are being marked for
+ * @pagevec: The pages to be marked
+ *
+ * Mark a bunch of netfs pages as being cached. After this is called,
+ * the netfs must call fscache_uncache_page() to remove the mark.
+ */
+void fscache_mark_pages_cached(struct fscache_retrieval *op,
+ struct pagevec *pagevec)
+{
+ struct fscache_cookie *cookie = op->op.object->cookie;
+ unsigned long loop;
+
+#ifdef CONFIG_FSCACHE_STATS
+ atomic_add(pagevec->nr, &fscache_n_marks);
+#endif
+
+ for (loop = 0; loop < pagevec->nr; loop++) {
+ struct page *page = pagevec->pages[loop];
+
+ _debug("- mark %p{%lx}", page, page->index);
+ if (TestSetPageFsCache(page)) {
+ static bool once_only;
+ if (!once_only) {
+ once_only = true;
+ printk(KERN_WARNING "FS-Cache:"
+ " Cookie type %s marked page %lx"
+ " multiple times\n",
+ cookie->def->name, page->index);
+ }
+ }
+ }
+
+ if (cookie->def->mark_pages_cached)
+ cookie->def->mark_pages_cached(cookie->netfs_data,
+ op->mapping, pagevec);
+ pagevec_reinit(pagevec);
+}
+EXPORT_SYMBOL(fscache_mark_pages_cached);
diff --git a/fs/fscache/fsc-proc.c b/fs/fscache/fsc-proc.c
new file mode 100644
index 000000000000..2f549f33b462
--- /dev/null
+++ b/fs/fscache/fsc-proc.c
@@ -0,0 +1,68 @@
+/* FS-Cache statistics viewing interface
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define FSCACHE_DEBUG_LEVEL OPERATION
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include "fsc-internal.h"
+
+/*
+ * initialise the /proc/fs/fscache/ directory
+ */
+int __init fscache_proc_init(void)
+{
+ _enter("");
+
+ if (!proc_mkdir("fs/fscache", NULL))
+ goto error_dir;
+
+#ifdef CONFIG_FSCACHE_STATS
+ if (!proc_create("fs/fscache/stats", S_IFREG | 0444, NULL,
+ &fscache_stats_fops))
+ goto error_stats;
+#endif
+
+#ifdef CONFIG_FSCACHE_HISTOGRAM
+ if (!proc_create("fs/fscache/histogram", S_IFREG | 0444, NULL,
+ &fscache_histogram_fops))
+ goto error_histogram;
+#endif
+
+ _leave(" = 0");
+ return 0;
+
+#ifdef CONFIG_FSCACHE_HISTOGRAM
+error_histogram:
+#endif
+#ifdef CONFIG_FSCACHE_STATS
+ remove_proc_entry("fs/fscache/stats", NULL);
+error_stats:
+#endif
+ remove_proc_entry("fs/fscache", NULL);
+error_dir:
+ _leave(" = -ENOMEM");
+ return -ENOMEM;
+}
+
+/*
+ * clean up the /proc/fs/fscache/ directory
+ */
+void fscache_proc_cleanup(void)
+{
+#ifdef CONFIG_FSCACHE_HISTOGRAM
+ remove_proc_entry("fs/fscache/histogram", NULL);
+#endif
+#ifdef CONFIG_FSCACHE_STATS
+ remove_proc_entry("fs/fscache/stats", NULL);
+#endif
+ remove_proc_entry("fs/fscache", NULL);
+}
diff --git a/fs/fscache/fsc-stats.c b/fs/fscache/fsc-stats.c
new file mode 100644
index 000000000000..93ffc8f1a481
--- /dev/null
+++ b/fs/fscache/fsc-stats.c
@@ -0,0 +1,212 @@
+/* FS-Cache statistics
+ *
+ * Copyright (C) 2007 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#define FSCACHE_DEBUG_LEVEL THREAD
+#include <linux/module.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include "fsc-internal.h"
+
+/*
+ * operation counters
+ */
+atomic_t fscache_n_op_pend;
+atomic_t fscache_n_op_run;
+atomic_t fscache_n_op_enqueue;
+atomic_t fscache_n_op_requeue;
+atomic_t fscache_n_op_deferred_release;
+atomic_t fscache_n_op_release;
+atomic_t fscache_n_op_gc;
+
+atomic_t fscache_n_attr_changed;
+atomic_t fscache_n_attr_changed_ok;
+atomic_t fscache_n_attr_changed_nobufs;
+atomic_t fscache_n_attr_changed_nomem;
+atomic_t fscache_n_attr_changed_calls;
+
+atomic_t fscache_n_allocs;
+atomic_t fscache_n_allocs_ok;
+atomic_t fscache_n_allocs_wait;
+atomic_t fscache_n_allocs_nobufs;
+atomic_t fscache_n_alloc_ops;
+atomic_t fscache_n_alloc_op_waits;
+
+atomic_t fscache_n_retrievals;
+atomic_t fscache_n_retrievals_ok;
+atomic_t fscache_n_retrievals_wait;
+atomic_t fscache_n_retrievals_nodata;
+atomic_t fscache_n_retrievals_nobufs;
+atomic_t fscache_n_retrievals_intr;
+atomic_t fscache_n_retrievals_nomem;
+atomic_t fscache_n_retrieval_ops;
+atomic_t fscache_n_retrieval_op_waits;
+
+atomic_t fscache_n_stores;
+atomic_t fscache_n_stores_ok;
+atomic_t fscache_n_stores_again;
+atomic_t fscache_n_stores_nobufs;
+atomic_t fscache_n_stores_oom;
+atomic_t fscache_n_store_ops;
+atomic_t fscache_n_store_calls;
+
+atomic_t fscache_n_marks;
+atomic_t fscache_n_uncaches;
+
+atomic_t fscache_n_acquires;
+atomic_t fscache_n_acquires_null;
+atomic_t fscache_n_acquires_no_cache;
+atomic_t fscache_n_acquires_ok;
+atomic_t fscache_n_acquires_nobufs;
+atomic_t fscache_n_acquires_oom;
+
+atomic_t fscache_n_updates;
+atomic_t fscache_n_updates_null;
+atomic_t fscache_n_updates_run;
+
+atomic_t fscache_n_relinquishes;
+atomic_t fscache_n_relinquishes_null;
+atomic_t fscache_n_relinquishes_waitcrt;
+
+atomic_t fscache_n_cookie_index;
+atomic_t fscache_n_cookie_data;
+atomic_t fscache_n_cookie_special;
+
+atomic_t fscache_n_object_alloc;
+atomic_t fscache_n_object_no_alloc;
+atomic_t fscache_n_object_lookups;
+atomic_t fscache_n_object_lookups_negative;
+atomic_t fscache_n_object_lookups_positive;
+atomic_t fscache_n_object_created;
+atomic_t fscache_n_object_avail;
+atomic_t fscache_n_object_dead;
+
+atomic_t fscache_n_checkaux_none;
+atomic_t fscache_n_checkaux_okay;
+atomic_t fscache_n_checkaux_update;
+atomic_t fscache_n_checkaux_obsolete;
+
+/*
+ * display the general statistics
+ */
+static int fscache_stats_show(struct seq_file *m, void *v)
+{
+ seq_puts(m, "FS-Cache statistics\n");
+
+ seq_printf(m, "Cookies: idx=%u dat=%u spc=%u\n",
+ atomic_read(&fscache_n_cookie_index),
+ atomic_read(&fscache_n_cookie_data),
+ atomic_read(&fscache_n_cookie_special));
+
+ seq_printf(m, "Objects: alc=%u nal=%u avl=%u ded=%u\n",
+ atomic_read(&fscache_n_object_alloc),
+ atomic_read(&fscache_n_object_no_alloc),
+ atomic_read(&fscache_n_object_avail),
+ atomic_read(&fscache_n_object_dead));
+ seq_printf(m, "ChkAux : non=%u ok=%u upd=%u obs=%u\n",
+ atomic_read(&fscache_n_checkaux_none),
+ atomic_read(&fscache_n_checkaux_okay),
+ atomic_read(&fscache_n_checkaux_update),
+ atomic_read(&fscache_n_checkaux_obsolete));
+
+ seq_printf(m, "Pages : mrk=%u unc=%u\n",
+ atomic_read(&fscache_n_marks),
+ atomic_read(&fscache_n_uncaches));
+
+ seq_printf(m, "Acquire: n=%u nul=%u noc=%u ok=%u nbf=%u"
+ " oom=%u\n",
+ atomic_read(&fscache_n_acquires),
+ atomic_read(&fscache_n_acquires_null),
+ atomic_read(&fscache_n_acquires_no_cache),
+ atomic_read(&fscache_n_acquires_ok),
+ atomic_read(&fscache_n_acquires_nobufs),
+ atomic_read(&fscache_n_acquires_oom));
+
+ seq_printf(m, "Lookups: n=%u neg=%u pos=%u crt=%u\n",
+ atomic_read(&fscache_n_object_lookups),
+ atomic_read(&fscache_n_object_lookups_negative),
+ atomic_read(&fscache_n_object_lookups_positive),
+ atomic_read(&fscache_n_object_created));
+
+ seq_printf(m, "Updates: n=%u nul=%u run=%u\n",
+ atomic_read(&fscache_n_updates),
+ atomic_read(&fscache_n_updates_null),
+ atomic_read(&fscache_n_updates_run));
+
+ seq_printf(m, "Relinqs: n=%u nul=%u wcr=%u\n",
+ atomic_read(&fscache_n_relinquishes),
+ atomic_read(&fscache_n_relinquishes_null),
+ atomic_read(&fscache_n_relinquishes_waitcrt));
+
+ seq_printf(m, "AttrChg: n=%u ok=%u nbf=%u oom=%u run=%u\n",
+ atomic_read(&fscache_n_attr_changed),
+ atomic_read(&fscache_n_attr_changed_ok),
+ atomic_read(&fscache_n_attr_changed_nobufs),
+ atomic_read(&fscache_n_attr_changed_nomem),
+ atomic_read(&fscache_n_attr_changed_calls));
+
+ seq_printf(m, "Allocs : n=%u ok=%u wt=%u nbf=%u\n",
+ atomic_read(&fscache_n_allocs),
+ atomic_read(&fscache_n_allocs_ok),
+ atomic_read(&fscache_n_allocs_wait),
+ atomic_read(&fscache_n_allocs_nobufs));
+ seq_printf(m, "Allocs : ops=%u owt=%u\n",
+ atomic_read(&fscache_n_alloc_ops),
+ atomic_read(&fscache_n_alloc_op_waits));
+
+ seq_printf(m, "Retrvls: n=%u ok=%u wt=%u nod=%u nbf=%u"
+ " int=%u oom=%u\n",
+ atomic_read(&fscache_n_retrievals),
+ atomic_read(&fscache_n_retrievals_ok),
+ atomic_read(&fscache_n_retrievals_wait),
+ atomic_read(&fscache_n_retrievals_nodata),
+ atomic_read(&fscache_n_retrievals_nobufs),
+ atomic_read(&fscache_n_retrievals_intr),
+ atomic_read(&fscache_n_retrievals_nomem));
+ seq_printf(m, "Retrvls: ops=%u owt=%u\n",
+ atomic_read(&fscache_n_retrieval_ops),
+ atomic_read(&fscache_n_retrieval_op_waits));
+
+ seq_printf(m, "Stores : n=%u ok=%u agn=%u nbf=%u oom=%u\n",
+ atomic_read(&fscache_n_stores),
+ atomic_read(&fscache_n_stores_ok),
+ atomic_read(&fscache_n_stores_again),
+ atomic_read(&fscache_n_stores_nobufs),
+ atomic_read(&fscache_n_stores_oom));
+ seq_printf(m, "Stores : ops=%u run=%u\n",
+ atomic_read(&fscache_n_store_ops),
+ atomic_read(&fscache_n_store_calls));
+
+ seq_printf(m, "Ops : pend=%u run=%u enq=%u\n",
+ atomic_read(&fscache_n_op_pend),
+ atomic_read(&fscache_n_op_run),
+ atomic_read(&fscache_n_op_enqueue));
+ seq_printf(m, "Ops : dfr=%u rel=%u gc=%u\n",
+ atomic_read(&fscache_n_op_deferred_release),
+ atomic_read(&fscache_n_op_release),
+ atomic_read(&fscache_n_op_gc));
+ return 0;
+}
+
+/*
+ * open "/proc/fs/fscache/stats" allowing provision of a statistical summary
+ */
+static int fscache_stats_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, fscache_stats_show, NULL);
+}
+
+const struct file_operations fscache_stats_fops = {
+ .owner = THIS_MODULE,
+ .open = fscache_stats_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = seq_release,
+};
diff --git a/fs/fuse/Kconfig b/fs/fuse/Kconfig
new file mode 100644
index 000000000000..0cf160a94eda
--- /dev/null
+++ b/fs/fuse/Kconfig
@@ -0,0 +1,15 @@
+config FUSE_FS
+ tristate "FUSE (Filesystem in Userspace) support"
+ help
+ With FUSE it is possible to implement a fully functional filesystem
+ in a userspace program.
+
+ There's also companion library: libfuse. This library along with
+ utilities is available from the FUSE homepage:
+ <http://fuse.sourceforge.net/>
+
+ See <file:Documentation/filesystems/fuse.txt> for more information.
+ See <file:Documentation/Changes> for needed library/utility version.
+
+ If you want to develop a userspace FS, or if you want to use
+ a filesystem based on FUSE, answer Y or M.
diff --git a/fs/fuse/dev.c b/fs/fuse/dev.c
index e0c7ada08a1f..ba76b68c52ff 100644
--- a/fs/fuse/dev.c
+++ b/fs/fuse/dev.c
@@ -281,7 +281,8 @@ __releases(&fc->lock)
fc->blocked = 0;
wake_up_all(&fc->blocked_waitq);
}
- if (fc->num_background == FUSE_CONGESTION_THRESHOLD) {
+ if (fc->num_background == FUSE_CONGESTION_THRESHOLD &&
+ fc->connected) {
clear_bdi_congested(&fc->bdi, READ);
clear_bdi_congested(&fc->bdi, WRITE);
}
@@ -825,16 +826,21 @@ static int fuse_notify_poll(struct fuse_conn *fc, unsigned int size,
struct fuse_copy_state *cs)
{
struct fuse_notify_poll_wakeup_out outarg;
- int err;
+ int err = -EINVAL;
if (size != sizeof(outarg))
- return -EINVAL;
+ goto err;
err = fuse_copy_one(cs, &outarg, sizeof(outarg));
if (err)
- return err;
+ goto err;
+ fuse_copy_finish(cs);
return fuse_notify_poll_wakeup(fc, &outarg);
+
+err:
+ fuse_copy_finish(cs);
+ return err;
}
static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
@@ -845,6 +851,7 @@ static int fuse_notify(struct fuse_conn *fc, enum fuse_notify_code code,
return fuse_notify_poll(fc, size, cs);
default:
+ fuse_copy_finish(cs);
return -EINVAL;
}
}
@@ -923,7 +930,6 @@ static ssize_t fuse_dev_write(struct kiocb *iocb, const struct iovec *iov,
*/
if (!oh.unique) {
err = fuse_notify(fc, oh.error, nbytes - sizeof(oh), &cs);
- fuse_copy_finish(&cs);
return err ? err : nbytes;
}
diff --git a/fs/fuse/file.c b/fs/fuse/file.c
index e8162646a9b5..d9fdb7cec538 100644
--- a/fs/fuse/file.c
+++ b/fs/fuse/file.c
@@ -54,7 +54,7 @@ struct fuse_file *fuse_file_alloc(struct fuse_conn *fc)
ff->reserved_req = fuse_request_alloc();
if (!ff->reserved_req) {
kfree(ff);
- ff = NULL;
+ return NULL;
} else {
INIT_LIST_HEAD(&ff->write_entry);
atomic_set(&ff->count, 0);
diff --git a/fs/fuse/inode.c b/fs/fuse/inode.c
index 47c96fdca1ac..459b73dd45e1 100644
--- a/fs/fuse/inode.c
+++ b/fs/fuse/inode.c
@@ -292,6 +292,7 @@ static void fuse_put_super(struct super_block *sb)
list_del(&fc->entry);
fuse_ctl_remove_conn(fc);
mutex_unlock(&fuse_mutex);
+ bdi_destroy(&fc->bdi);
fuse_conn_put(fc);
}
@@ -532,7 +533,6 @@ void fuse_conn_put(struct fuse_conn *fc)
if (fc->destroy_req)
fuse_request_free(fc->destroy_req);
mutex_destroy(&fc->inst_mutex);
- bdi_destroy(&fc->bdi);
fc->release(fc);
}
}
@@ -805,16 +805,18 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
int err;
int is_bdev = sb->s_bdev != NULL;
+ err = -EINVAL;
if (sb->s_flags & MS_MANDLOCK)
- return -EINVAL;
+ goto err;
if (!parse_fuse_opt((char *) data, &d, is_bdev))
- return -EINVAL;
+ goto err;
if (is_bdev) {
#ifdef CONFIG_BLOCK
+ err = -EINVAL;
if (!sb_set_blocksize(sb, d.blksize))
- return -EINVAL;
+ goto err;
#endif
} else {
sb->s_blocksize = PAGE_CACHE_SIZE;
@@ -826,20 +828,22 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
sb->s_export_op = &fuse_export_operations;
file = fget(d.fd);
+ err = -EINVAL;
if (!file)
- return -EINVAL;
+ goto err;
if (file->f_op != &fuse_dev_operations)
- return -EINVAL;
+ goto err_fput;
fc = kmalloc(sizeof(*fc), GFP_KERNEL);
+ err = -ENOMEM;
if (!fc)
- return -ENOMEM;
+ goto err_fput;
err = fuse_conn_init(fc, sb);
if (err) {
kfree(fc);
- return err;
+ goto err_fput;
}
fc->release = fuse_free_conn;
@@ -854,12 +858,12 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
err = -ENOMEM;
root = fuse_get_root_inode(sb, d.rootmode);
if (!root)
- goto err;
+ goto err_put_conn;
root_dentry = d_alloc_root(root);
if (!root_dentry) {
iput(root);
- goto err;
+ goto err_put_conn;
}
init_req = fuse_request_alloc();
@@ -903,9 +907,11 @@ static int fuse_fill_super(struct super_block *sb, void *data, int silent)
fuse_request_free(init_req);
err_put_root:
dput(root_dentry);
- err:
- fput(file);
+ err_put_conn:
fuse_conn_put(fc);
+ err_fput:
+ fput(file);
+ err:
return err;
}
diff --git a/fs/gfs2/Kconfig b/fs/gfs2/Kconfig
index e563a6449811..3a981b7f64ca 100644
--- a/fs/gfs2/Kconfig
+++ b/fs/gfs2/Kconfig
@@ -1,6 +1,10 @@
config GFS2_FS
tristate "GFS2 file system support"
depends on EXPERIMENTAL && (64BIT || LBD)
+ select DLM if GFS2_FS_LOCKING_DLM
+ select CONFIGFS_FS if GFS2_FS_LOCKING_DLM
+ select SYSFS if GFS2_FS_LOCKING_DLM
+ select IP_SCTP if DLM_SCTP
select FS_POSIX_ACL
select CRC32
help
@@ -18,17 +22,16 @@ config GFS2_FS
the locking module below. Documentation and utilities for GFS2 can
be found here: http://sources.redhat.com/cluster
- The "nolock" lock module is now built in to GFS2 by default.
+ The "nolock" lock module is now built in to GFS2 by default. If
+ you want to use the DLM, be sure to enable HOTPLUG and IPv4/6
+ networking.
config GFS2_FS_LOCKING_DLM
- tristate "GFS2 DLM locking module"
- depends on GFS2_FS && SYSFS && NET && INET && (IPV6 || IPV6=n)
- select IP_SCTP if DLM_SCTP
- select CONFIGFS_FS
- select DLM
+ bool "GFS2 DLM locking"
+ depends on (GFS2_FS!=n) && NET && INET && (IPV6 || IPV6=n) && HOTPLUG
help
Multiple node locking module for GFS2
- Most users of GFS2 will require this module. It provides the locking
+ Most users of GFS2 will require this. It provides the locking
interface between GFS2 and the DLM, which is required to use GFS2
in a cluster environment.
diff --git a/fs/gfs2/Makefile b/fs/gfs2/Makefile
index c1b4ec6a9650..a851ea4bdf70 100644
--- a/fs/gfs2/Makefile
+++ b/fs/gfs2/Makefile
@@ -1,9 +1,9 @@
obj-$(CONFIG_GFS2_FS) += gfs2.o
gfs2-y := acl.o bmap.o dir.o eaops.o eattr.o glock.o \
- glops.o inode.o log.o lops.o locking.o main.o meta_io.o \
+ glops.o inode.o log.o lops.o main.o meta_io.o \
mount.o ops_address.o ops_dentry.o ops_export.o ops_file.o \
ops_fstype.o ops_inode.o ops_super.o quota.o \
recovery.o rgrp.o super.o sys.o trans.o util.o
-obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += locking/dlm/
+gfs2-$(CONFIG_GFS2_FS_LOCKING_DLM) += lock_dlm.o
diff --git a/fs/gfs2/acl.c b/fs/gfs2/acl.c
index e335dceb6a4f..43764f4fa763 100644
--- a/fs/gfs2/acl.c
+++ b/fs/gfs2/acl.c
@@ -15,7 +15,6 @@
#include <linux/posix_acl.h>
#include <linux/posix_acl_xattr.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/lm_interface.h>
#include "gfs2.h"
#include "incore.h"
diff --git a/fs/gfs2/bmap.c b/fs/gfs2/bmap.c
index 11ffc56f1f81..3a5d3f883e10 100644
--- a/fs/gfs2/bmap.c
+++ b/fs/gfs2/bmap.c
@@ -13,7 +13,6 @@
#include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
-#include <linux/lm_interface.h>
#include "gfs2.h"
#include "incore.h"
diff --git a/fs/gfs2/dir.c b/fs/gfs2/dir.c
index b7c8e5c70791..aef4d0c06748 100644
--- a/fs/gfs2/dir.c
+++ b/fs/gfs2/dir.c
@@ -60,7 +60,6 @@
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
#include <linux/vmalloc.h>
-#include <linux/lm_interface.h>
#include "gfs2.h"
#include "incore.h"
diff --git a/fs/gfs2/eaops.c b/fs/gfs2/eaops.c
index f114ba2b3557..dee9b03e5b37 100644
--- a/fs/gfs2/eaops.c
+++ b/fs/gfs2/eaops.c
@@ -14,7 +14,6 @@
#include <linux/capability.h>
#include <linux/xattr.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/lm_interface.h>
#include <asm/uaccess.h>
#include "gfs2.h"
diff --git a/fs/gfs2/eattr.c b/fs/gfs2/eattr.c
index 0d1c76d906ae..899763aed217 100644
--- a/fs/gfs2/eattr.c
+++ b/fs/gfs2/eattr.c
@@ -13,7 +13,6 @@
#include <linux/buffer_head.h>
#include <linux/xattr.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/lm_interface.h>
#include <asm/uaccess.h>
#include "gfs2.h"
diff --git a/fs/gfs2/glock.c b/fs/gfs2/glock.c
index 6b983aef785d..173e59ce9ad3 100644
--- a/fs/gfs2/glock.c
+++ b/fs/gfs2/glock.c
@@ -10,7 +10,6 @@
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/delay.h>
#include <linux/sort.h>
@@ -18,7 +17,6 @@
#include <linux/kallsyms.h>
#include <linux/gfs2_ondisk.h>
#include <linux/list.h>
-#include <linux/lm_interface.h>
#include <linux/wait.h>
#include <linux/module.h>
#include <linux/rwsem.h>
@@ -155,13 +153,10 @@ static void glock_free(struct gfs2_glock *gl)
struct gfs2_sbd *sdp = gl->gl_sbd;
struct inode *aspace = gl->gl_aspace;
- if (sdp->sd_lockstruct.ls_ops->lm_put_lock)
- sdp->sd_lockstruct.ls_ops->lm_put_lock(gl->gl_lock);
-
if (aspace)
gfs2_aspace_put(aspace);
- kmem_cache_free(gfs2_glock_cachep, gl);
+ sdp->sd_lockstruct.ls_ops->lm_put_lock(gfs2_glock_cachep, gl);
}
/**
@@ -211,7 +206,6 @@ int gfs2_glock_put(struct gfs2_glock *gl)
atomic_dec(&lru_count);
}
spin_unlock(&lru_lock);
- GLOCK_BUG_ON(gl, gl->gl_state != LM_ST_UNLOCKED);
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_lru));
GLOCK_BUG_ON(gl, !list_empty(&gl->gl_holders));
glock_free(gl);
@@ -256,27 +250,6 @@ static struct gfs2_glock *search_bucket(unsigned int hash,
}
/**
- * gfs2_glock_find() - Find glock by lock number
- * @sdp: The GFS2 superblock
- * @name: The lock name
- *
- * Returns: NULL, or the struct gfs2_glock with the requested number
- */
-
-static struct gfs2_glock *gfs2_glock_find(const struct gfs2_sbd *sdp,
- const struct lm_lockname *name)
-{
- unsigned int hash = gl_hash(sdp, name);
- struct gfs2_glock *gl;
-
- read_lock(gl_lock_addr(hash));
- gl = search_bucket(hash, sdp, name);
- read_unlock(gl_lock_addr(hash));
-
- return gl;
-}
-
-/**
* may_grant - check if its ok to grant a new lock
* @gl: The glock
* @gh: The lock request which we wish to grant
@@ -523,7 +496,7 @@ out_locked:
}
static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
- unsigned int cur_state, unsigned int req_state,
+ unsigned int req_state,
unsigned int flags)
{
int ret = LM_OUT_ERROR;
@@ -532,7 +505,7 @@ static unsigned int gfs2_lm_lock(struct gfs2_sbd *sdp, void *lock,
return req_state == LM_ST_UNLOCKED ? 0 : req_state;
if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
- ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock, cur_state,
+ ret = sdp->sd_lockstruct.ls_ops->lm_lock(lock,
req_state, flags);
return ret;
}
@@ -575,7 +548,7 @@ __acquires(&gl->gl_spin)
gl->gl_state == LM_ST_DEFERRED) &&
!(lck_flags & (LM_FLAG_TRY | LM_FLAG_TRY_1CB)))
lck_flags |= LM_FLAG_TRY_1CB;
- ret = gfs2_lm_lock(sdp, gl->gl_lock, gl->gl_state, target, lck_flags);
+ ret = gfs2_lm_lock(sdp, gl, target, lck_flags);
if (!(ret & LM_OUT_ASYNC)) {
finish_xmote(gl, ret);
@@ -681,18 +654,6 @@ static void glock_work_func(struct work_struct *work)
gfs2_glock_put(gl);
}
-static int gfs2_lm_get_lock(struct gfs2_sbd *sdp, struct lm_lockname *name,
- void **lockp)
-{
- int error = -EIO;
- if (!sdp->sd_lockstruct.ls_ops->lm_get_lock)
- return 0;
- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
- error = sdp->sd_lockstruct.ls_ops->lm_get_lock(
- sdp->sd_lockstruct.ls_lockspace, name, lockp);
- return error;
-}
-
/**
* gfs2_glock_get() - Get a glock, or create one if one doesn't exist
* @sdp: The GFS2 superblock
@@ -736,7 +697,9 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
gl->gl_demote_state = LM_ST_EXCLUSIVE;
gl->gl_hash = hash;
gl->gl_ops = glops;
- gl->gl_stamp = jiffies;
+ snprintf(gl->gl_strname, GDLM_STRNAME_BYTES, "%8x%16llx", name.ln_type, (unsigned long long)number);
+ memset(&gl->gl_lksb, 0, sizeof(struct dlm_lksb));
+ gl->gl_lksb.sb_lvbptr = gl->gl_lvb;
gl->gl_tchange = jiffies;
gl->gl_object = NULL;
gl->gl_sbd = sdp;
@@ -753,10 +716,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
}
}
- error = gfs2_lm_get_lock(sdp, &name, &gl->gl_lock);
- if (error)
- goto fail_aspace;
-
write_lock(gl_lock_addr(hash));
tmp = search_bucket(hash, sdp, &name);
if (tmp) {
@@ -772,9 +731,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
return 0;
-fail_aspace:
- if (gl->gl_aspace)
- gfs2_aspace_put(gl->gl_aspace);
fail:
kmem_cache_free(gfs2_glock_cachep, gl);
return error;
@@ -966,7 +922,7 @@ do_cancel:
if (!(gh->gh_flags & LM_FLAG_PRIORITY)) {
spin_unlock(&gl->gl_spin);
if (sdp->sd_lockstruct.ls_ops->lm_cancel)
- sdp->sd_lockstruct.ls_ops->lm_cancel(gl->gl_lock);
+ sdp->sd_lockstruct.ls_ops->lm_cancel(gl);
spin_lock(&gl->gl_spin);
}
return;
@@ -1051,7 +1007,6 @@ void gfs2_glock_dq(struct gfs2_holder *gh)
spin_lock(&gl->gl_spin);
clear_bit(GLF_LOCK, &gl->gl_flags);
}
- gl->gl_stamp = jiffies;
if (list_empty(&gl->gl_holders) &&
!test_bit(GLF_PENDING_DEMOTE, &gl->gl_flags) &&
!test_bit(GLF_DEMOTE, &gl->gl_flags))
@@ -1240,70 +1195,13 @@ void gfs2_glock_dq_uninit_m(unsigned int num_gh, struct gfs2_holder *ghs)
gfs2_glock_dq_uninit(&ghs[x]);
}
-static int gfs2_lm_hold_lvb(struct gfs2_sbd *sdp, void *lock, char **lvbp)
+void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state)
{
- int error = -EIO;
- if (!sdp->sd_lockstruct.ls_ops->lm_hold_lvb)
- return 0;
- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
- error = sdp->sd_lockstruct.ls_ops->lm_hold_lvb(lock, lvbp);
- return error;
-}
-
-/**
- * gfs2_lvb_hold - attach a LVB from a glock
- * @gl: The glock in question
- *
- */
-
-int gfs2_lvb_hold(struct gfs2_glock *gl)
-{
- int error;
-
- if (!atomic_read(&gl->gl_lvb_count)) {
- error = gfs2_lm_hold_lvb(gl->gl_sbd, gl->gl_lock, &gl->gl_lvb);
- if (error)
- return error;
- gfs2_glock_hold(gl);
- }
- atomic_inc(&gl->gl_lvb_count);
-
- return 0;
-}
-
-/**
- * gfs2_lvb_unhold - detach a LVB from a glock
- * @gl: The glock in question
- *
- */
-
-void gfs2_lvb_unhold(struct gfs2_glock *gl)
-{
- struct gfs2_sbd *sdp = gl->gl_sbd;
-
- gfs2_glock_hold(gl);
- gfs2_assert(gl->gl_sbd, atomic_read(&gl->gl_lvb_count) > 0);
- if (atomic_dec_and_test(&gl->gl_lvb_count)) {
- if (sdp->sd_lockstruct.ls_ops->lm_unhold_lvb)
- sdp->sd_lockstruct.ls_ops->lm_unhold_lvb(gl->gl_lock, gl->gl_lvb);
- gl->gl_lvb = NULL;
- gfs2_glock_put(gl);
- }
- gfs2_glock_put(gl);
-}
-
-static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
- unsigned int state)
-{
- struct gfs2_glock *gl;
unsigned long delay = 0;
unsigned long holdtime;
unsigned long now = jiffies;
- gl = gfs2_glock_find(sdp, name);
- if (!gl)
- return;
-
+ gfs2_glock_hold(gl);
holdtime = gl->gl_tchange + gl->gl_ops->go_min_hold_time;
if (time_before(now, holdtime))
delay = holdtime - now;
@@ -1317,74 +1215,37 @@ static void blocking_cb(struct gfs2_sbd *sdp, struct lm_lockname *name,
gfs2_glock_put(gl);
}
-static void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid)
-{
- struct gfs2_jdesc *jd;
-
- spin_lock(&sdp->sd_jindex_spin);
- list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
- if (jd->jd_jid != jid)
- continue;
- jd->jd_dirty = 1;
- break;
- }
- spin_unlock(&sdp->sd_jindex_spin);
-}
-
/**
- * gfs2_glock_cb - Callback used by locking module
- * @sdp: Pointer to the superblock
- * @type: Type of callback
- * @data: Type dependent data pointer
+ * gfs2_glock_complete - Callback used by locking
+ * @gl: Pointer to the glock
+ * @ret: The return value from the dlm
*
- * Called by the locking module when it wants to tell us something.
- * Either we need to drop a lock, one of our ASYNC requests completed, or
- * a journal from another client needs to be recovered.
*/
-void gfs2_glock_cb(void *cb_data, unsigned int type, void *data)
+void gfs2_glock_complete(struct gfs2_glock *gl, int ret)
{
- struct gfs2_sbd *sdp = cb_data;
-
- switch (type) {
- case LM_CB_NEED_E:
- blocking_cb(sdp, data, LM_ST_UNLOCKED);
- return;
-
- case LM_CB_NEED_D:
- blocking_cb(sdp, data, LM_ST_DEFERRED);
- return;
-
- case LM_CB_NEED_S:
- blocking_cb(sdp, data, LM_ST_SHARED);
- return;
-
- case LM_CB_ASYNC: {
- struct lm_async_cb *async = data;
- struct gfs2_glock *gl;
-
- down_read(&gfs2_umount_flush_sem);
- gl = gfs2_glock_find(sdp, &async->lc_name);
- if (gfs2_assert_warn(sdp, gl))
+ struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
+ down_read(&gfs2_umount_flush_sem);
+ gl->gl_reply = ret;
+ if (unlikely(test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))) {
+ struct gfs2_holder *gh;
+ spin_lock(&gl->gl_spin);
+ gh = find_first_waiter(gl);
+ if ((!(gh && (gh->gh_flags & LM_FLAG_NOEXP)) &&
+ (gl->gl_target != LM_ST_UNLOCKED)) ||
+ ((ret & ~LM_OUT_ST_MASK) != 0))
+ set_bit(GLF_FROZEN, &gl->gl_flags);
+ spin_unlock(&gl->gl_spin);
+ if (test_bit(GLF_FROZEN, &gl->gl_flags)) {
+ up_read(&gfs2_umount_flush_sem);
return;
- gl->gl_reply = async->lc_ret;
- set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
- if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
- gfs2_glock_put(gl);
- up_read(&gfs2_umount_flush_sem);
- return;
- }
-
- case LM_CB_NEED_RECOVERY:
- gfs2_jdesc_make_dirty(sdp, *(unsigned int *)data);
- if (sdp->sd_recoverd_process)
- wake_up_process(sdp->sd_recoverd_process);
- return;
-
- default:
- gfs2_assert_warn(sdp, 0);
- return;
+ }
}
+ set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+ gfs2_glock_hold(gl);
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ gfs2_glock_put(gl);
+ up_read(&gfs2_umount_flush_sem);
}
/**
@@ -1515,6 +1376,27 @@ out:
return has_entries;
}
+
+/**
+ * thaw_glock - thaw out a glock which has an unprocessed reply waiting
+ * @gl: The glock to thaw
+ *
+ * N.B. When we freeze a glock, we leave a ref to the glock outstanding,
+ * so this has to result in the ref count being dropped by one.
+ */
+
+static void thaw_glock(struct gfs2_glock *gl)
+{
+ if (!test_and_clear_bit(GLF_FROZEN, &gl->gl_flags))
+ return;
+ down_read(&gfs2_umount_flush_sem);
+ set_bit(GLF_REPLY_PENDING, &gl->gl_flags);
+ gfs2_glock_hold(gl);
+ if (queue_delayed_work(glock_workqueue, &gl->gl_work, 0) == 0)
+ gfs2_glock_put(gl);
+ up_read(&gfs2_umount_flush_sem);
+}
+
/**
* clear_glock - look at a glock and see if we can free it from glock cache
* @gl: the glock to look at
@@ -1540,6 +1422,20 @@ static void clear_glock(struct gfs2_glock *gl)
}
/**
+ * gfs2_glock_thaw - Thaw any frozen glocks
+ * @sdp: The super block
+ *
+ */
+
+void gfs2_glock_thaw(struct gfs2_sbd *sdp)
+{
+ unsigned x;
+
+ for (x = 0; x < GFS2_GL_HASH_SIZE; x++)
+ examine_bucket(thaw_glock, sdp, x);
+}
+
+/**
* gfs2_gl_hash_clear - Empty out the glock hash table
* @sdp: the filesystem
* @wait: wait until it's all gone
@@ -1619,7 +1515,7 @@ static const char *hflags2str(char *buf, unsigned flags, unsigned long iflags)
if (flags & LM_FLAG_NOEXP)
*p++ = 'e';
if (flags & LM_FLAG_ANY)
- *p++ = 'a';
+ *p++ = 'A';
if (flags & LM_FLAG_PRIORITY)
*p++ = 'p';
if (flags & GL_ASYNC)
@@ -1683,6 +1579,10 @@ static const char *gflags2str(char *buf, const unsigned long *gflags)
*p++ = 'i';
if (test_bit(GLF_REPLY_PENDING, gflags))
*p++ = 'r';
+ if (test_bit(GLF_INITIAL, gflags))
+ *p++ = 'i';
+ if (test_bit(GLF_FROZEN, gflags))
+ *p++ = 'F';
*p = 0;
return buf;
}
@@ -1717,14 +1617,13 @@ static int __dump_glock(struct seq_file *seq, const struct gfs2_glock *gl)
dtime *= 1000000/HZ; /* demote time in uSec */
if (!test_bit(GLF_DEMOTE, &gl->gl_flags))
dtime = 0;
- gfs2_print_dbg(seq, "G: s:%s n:%u/%llu f:%s t:%s d:%s/%llu l:%d a:%d r:%d\n",
+ gfs2_print_dbg(seq, "G: s:%s n:%u/%llu f:%s t:%s d:%s/%llu a:%d r:%d\n",
state2str(gl->gl_state),
gl->gl_name.ln_type,
(unsigned long long)gl->gl_name.ln_number,
gflags2str(gflags_buf, &gl->gl_flags),
state2str(gl->gl_target),
state2str(gl->gl_demote_state), dtime,
- atomic_read(&gl->gl_lvb_count),
atomic_read(&gl->gl_ail_count),
atomic_read(&gl->gl_ref));
diff --git a/fs/gfs2/glock.h b/fs/gfs2/glock.h
index 543ec7ecfbda..a602a28f6f08 100644
--- a/fs/gfs2/glock.h
+++ b/fs/gfs2/glock.h
@@ -11,15 +11,130 @@
#define __GLOCK_DOT_H__
#include <linux/sched.h>
+#include <linux/parser.h>
#include "incore.h"
-/* Flags for lock requests; used in gfs2_holder gh_flag field.
- From lm_interface.h:
+/* Options for hostdata parser */
+
+enum {
+ Opt_jid,
+ Opt_id,
+ Opt_first,
+ Opt_nodir,
+ Opt_err,
+};
+
+/*
+ * lm_lockname types
+ */
+
+#define LM_TYPE_RESERVED 0x00
+#define LM_TYPE_NONDISK 0x01
+#define LM_TYPE_INODE 0x02
+#define LM_TYPE_RGRP 0x03
+#define LM_TYPE_META 0x04
+#define LM_TYPE_IOPEN 0x05
+#define LM_TYPE_FLOCK 0x06
+#define LM_TYPE_PLOCK 0x07
+#define LM_TYPE_QUOTA 0x08
+#define LM_TYPE_JOURNAL 0x09
+
+/*
+ * lm_lock() states
+ *
+ * SHARED is compatible with SHARED, not with DEFERRED or EX.
+ * DEFERRED is compatible with DEFERRED, not with SHARED or EX.
+ */
+
+#define LM_ST_UNLOCKED 0
+#define LM_ST_EXCLUSIVE 1
+#define LM_ST_DEFERRED 2
+#define LM_ST_SHARED 3
+
+/*
+ * lm_lock() flags
+ *
+ * LM_FLAG_TRY
+ * Don't wait to acquire the lock if it can't be granted immediately.
+ *
+ * LM_FLAG_TRY_1CB
+ * Send one blocking callback if TRY is set and the lock is not granted.
+ *
+ * LM_FLAG_NOEXP
+ * GFS sets this flag on lock requests it makes while doing journal recovery.
+ * These special requests should not be blocked due to the recovery like
+ * ordinary locks would be.
+ *
+ * LM_FLAG_ANY
+ * A SHARED request may also be granted in DEFERRED, or a DEFERRED request may
+ * also be granted in SHARED. The preferred state is whichever is compatible
+ * with other granted locks, or the specified state if no other locks exist.
+ *
+ * LM_FLAG_PRIORITY
+ * Override fairness considerations. Suppose a lock is held in a shared state
+ * and there is a pending request for the deferred state. A shared lock
+ * request with the priority flag would be allowed to bypass the deferred
+ * request and directly join the other shared lock. A shared lock request
+ * without the priority flag might be forced to wait until the deferred
+ * requested had acquired and released the lock.
+ */
+
#define LM_FLAG_TRY 0x00000001
#define LM_FLAG_TRY_1CB 0x00000002
#define LM_FLAG_NOEXP 0x00000004
#define LM_FLAG_ANY 0x00000008
-#define LM_FLAG_PRIORITY 0x00000010 */
+#define LM_FLAG_PRIORITY 0x00000010
+#define GL_ASYNC 0x00000040
+#define GL_EXACT 0x00000080
+#define GL_SKIP 0x00000100
+#define GL_ATIME 0x00000200
+#define GL_NOCACHE 0x00000400
+
+/*
+ * lm_lock() and lm_async_cb return flags
+ *
+ * LM_OUT_ST_MASK
+ * Masks the lower two bits of lock state in the returned value.
+ *
+ * LM_OUT_CANCELED
+ * The lock request was canceled.
+ *
+ * LM_OUT_ASYNC
+ * The result of the request will be returned in an LM_CB_ASYNC callback.
+ *
+ */
+
+#define LM_OUT_ST_MASK 0x00000003
+#define LM_OUT_CANCELED 0x00000008
+#define LM_OUT_ASYNC 0x00000080
+#define LM_OUT_ERROR 0x00000100
+
+/*
+ * lm_recovery_done() messages
+ */
+
+#define LM_RD_GAVEUP 308
+#define LM_RD_SUCCESS 309
+
+#define GLR_TRYFAILED 13
+
+struct lm_lockops {
+ const char *lm_proto_name;
+ int (*lm_mount) (struct gfs2_sbd *sdp, const char *fsname);
+ void (*lm_unmount) (struct gfs2_sbd *sdp);
+ void (*lm_withdraw) (struct gfs2_sbd *sdp);
+ void (*lm_put_lock) (struct kmem_cache *cachep, void *gl);
+ unsigned int (*lm_lock) (struct gfs2_glock *gl,
+ unsigned int req_state, unsigned int flags);
+ void (*lm_cancel) (struct gfs2_glock *gl);
+ const match_table_t *lm_tokens;
+};
+
+#define LM_FLAG_TRY 0x00000001
+#define LM_FLAG_TRY_1CB 0x00000002
+#define LM_FLAG_NOEXP 0x00000004
+#define LM_FLAG_ANY 0x00000008
+#define LM_FLAG_PRIORITY 0x00000010
#define GL_ASYNC 0x00000040
#define GL_EXACT 0x00000080
@@ -128,10 +243,12 @@ static inline int gfs2_glock_nq_init(struct gfs2_glock *gl,
int gfs2_lvb_hold(struct gfs2_glock *gl);
void gfs2_lvb_unhold(struct gfs2_glock *gl);
-void gfs2_glock_cb(void *cb_data, unsigned int type, void *data);
+void gfs2_glock_cb(struct gfs2_glock *gl, unsigned int state);
+void gfs2_glock_complete(struct gfs2_glock *gl, int ret);
void gfs2_reclaim_glock(struct gfs2_sbd *sdp);
void gfs2_gl_hash_clear(struct gfs2_sbd *sdp);
void gfs2_glock_finish_truncate(struct gfs2_inode *ip);
+void gfs2_glock_thaw(struct gfs2_sbd *sdp);
int __init gfs2_glock_init(void);
void gfs2_glock_exit(void);
@@ -141,4 +258,6 @@ void gfs2_delete_debugfs_file(struct gfs2_sbd *sdp);
int gfs2_register_debugfs(void);
void gfs2_unregister_debugfs(void);
+extern const struct lm_lockops gfs2_dlm_ops;
+
#endif /* __GLOCK_DOT_H__ */
diff --git a/fs/gfs2/glops.c b/fs/gfs2/glops.c
index 8522d3aa64fc..f07ede8cb9ba 100644
--- a/fs/gfs2/glops.c
+++ b/fs/gfs2/glops.c
@@ -12,7 +12,6 @@
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/lm_interface.h>
#include <linux/bio.h>
#include "gfs2.h"
@@ -390,18 +389,6 @@ static int trans_go_demote_ok(const struct gfs2_glock *gl)
return 0;
}
-/**
- * quota_go_demote_ok - Check to see if it's ok to unlock a quota glock
- * @gl: the glock
- *
- * Returns: 1 if it's ok
- */
-
-static int quota_go_demote_ok(const struct gfs2_glock *gl)
-{
- return !atomic_read(&gl->gl_lvb_count);
-}
-
const struct gfs2_glock_operations gfs2_meta_glops = {
.go_xmote_th = meta_go_sync,
.go_type = LM_TYPE_META,
@@ -448,7 +435,6 @@ const struct gfs2_glock_operations gfs2_nondisk_glops = {
};
const struct gfs2_glock_operations gfs2_quota_glops = {
- .go_demote_ok = quota_go_demote_ok,
.go_type = LM_TYPE_QUOTA,
};
diff --git a/fs/gfs2/incore.h b/fs/gfs2/incore.h
index 608849d00021..8fe0675120ac 100644
--- a/fs/gfs2/incore.h
+++ b/fs/gfs2/incore.h
@@ -12,6 +12,8 @@
#include <linux/fs.h>
#include <linux/workqueue.h>
+#include <linux/dlm.h>
+#include <linux/buffer_head.h>
#define DIO_WAIT 0x00000010
#define DIO_METADATA 0x00000020
@@ -26,6 +28,7 @@ struct gfs2_trans;
struct gfs2_ail;
struct gfs2_jdesc;
struct gfs2_sbd;
+struct lm_lockops;
typedef void (*gfs2_glop_bh_t) (struct gfs2_glock *gl, unsigned int ret);
@@ -121,6 +124,28 @@ struct gfs2_bufdata {
struct list_head bd_ail_gl_list;
};
+/*
+ * Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
+ * prefix of lock_dlm_ gets awkward.
+ */
+
+#define GDLM_STRNAME_BYTES 25
+#define GDLM_LVB_SIZE 32
+
+enum {
+ DFL_BLOCK_LOCKS = 0,
+};
+
+struct lm_lockname {
+ u64 ln_number;
+ unsigned int ln_type;
+};
+
+#define lm_name_equal(name1, name2) \
+ (((name1)->ln_number == (name2)->ln_number) && \
+ ((name1)->ln_type == (name2)->ln_type))
+
+
struct gfs2_glock_operations {
void (*go_xmote_th) (struct gfs2_glock *gl);
int (*go_xmote_bh) (struct gfs2_glock *gl, struct gfs2_holder *gh);
@@ -162,6 +187,8 @@ enum {
GLF_LFLUSH = 7,
GLF_INVALIDATE_IN_PROGRESS = 8,
GLF_REPLY_PENDING = 9,
+ GLF_INITIAL = 10,
+ GLF_FROZEN = 11,
};
struct gfs2_glock {
@@ -181,11 +208,9 @@ struct gfs2_glock {
struct list_head gl_holders;
const struct gfs2_glock_operations *gl_ops;
- void *gl_lock;
- char *gl_lvb;
- atomic_t gl_lvb_count;
-
- unsigned long gl_stamp;
+ char gl_strname[GDLM_STRNAME_BYTES];
+ struct dlm_lksb gl_lksb;
+ char gl_lvb[32];
unsigned long gl_tchange;
void *gl_object;
@@ -283,7 +308,9 @@ enum {
struct gfs2_quota_data {
struct list_head qd_list;
- unsigned int qd_count;
+ struct list_head qd_reclaim;
+
+ atomic_t qd_count;
u32 qd_id;
unsigned long qd_flags; /* QDF_... */
@@ -303,7 +330,6 @@ struct gfs2_quota_data {
u64 qd_sync_gen;
unsigned long qd_last_warn;
- unsigned long qd_last_touched;
};
struct gfs2_trans {
@@ -406,7 +432,6 @@ struct gfs2_tune {
unsigned int gt_quota_warn_period; /* Secs between quota warn msgs */
unsigned int gt_quota_scale_num; /* Numerator */
unsigned int gt_quota_scale_den; /* Denominator */
- unsigned int gt_quota_cache_secs;
unsigned int gt_quota_quantum; /* Secs between syncs to quota file */
unsigned int gt_new_files_jdata;
unsigned int gt_max_readahead; /* Max bytes to read-ahead from disk */
@@ -447,6 +472,30 @@ struct gfs2_sb_host {
char sb_locktable[GFS2_LOCKNAME_LEN];
};
+/*
+ * lm_mount() return values
+ *
+ * ls_jid - the journal ID this node should use
+ * ls_first - this node is the first to mount the file system
+ * ls_lockspace - lock module's context for this file system
+ * ls_ops - lock module's functions
+ */
+
+struct lm_lockstruct {
+ u32 ls_id;
+ unsigned int ls_jid;
+ unsigned int ls_first;
+ unsigned int ls_first_done;
+ unsigned int ls_nodir;
+ const struct lm_lockops *ls_ops;
+ unsigned long ls_flags;
+ dlm_lockspace_t *ls_dlm;
+
+ int ls_recover_jid;
+ int ls_recover_jid_done;
+ int ls_recover_jid_status;
+};
+
struct gfs2_sbd {
struct super_block *sd_vfs;
struct kobject sd_kobj;
@@ -520,7 +569,6 @@ struct gfs2_sbd {
spinlock_t sd_jindex_spin;
struct mutex sd_jindex_mutex;
unsigned int sd_journals;
- unsigned long sd_jindex_refresh_time;
struct gfs2_jdesc *sd_jdesc;
struct gfs2_holder sd_journal_gh;
@@ -540,7 +588,6 @@ struct gfs2_sbd {
struct list_head sd_quota_list;
atomic_t sd_quota_count;
- spinlock_t sd_quota_spin;
struct mutex sd_quota_mutex;
wait_queue_head_t sd_quota_wait;
struct list_head sd_trunc_list;
diff --git a/fs/gfs2/inode.c b/fs/gfs2/inode.c
index 3b87c188da41..7b277d449155 100644
--- a/fs/gfs2/inode.c
+++ b/fs/gfs2/inode.c
@@ -16,7 +16,6 @@
#include <linux/sort.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
-#include <linux/lm_interface.h>
#include <linux/security.h>
#include <linux/time.h>
@@ -137,16 +136,16 @@ void gfs2_set_iop(struct inode *inode)
if (S_ISREG(mode)) {
inode->i_op = &gfs2_file_iops;
- if (sdp->sd_args.ar_localflocks)
- inode->i_fop = &gfs2_file_fops_nolock;
+ if (gfs2_localflocks(sdp))
+ inode->i_fop = gfs2_file_fops_nolock;
else
- inode->i_fop = &gfs2_file_fops;
+ inode->i_fop = gfs2_file_fops;
} else if (S_ISDIR(mode)) {
inode->i_op = &gfs2_dir_iops;
- if (sdp->sd_args.ar_localflocks)
- inode->i_fop = &gfs2_dir_fops_nolock;
+ if (gfs2_localflocks(sdp))
+ inode->i_fop = gfs2_dir_fops_nolock;
else
- inode->i_fop = &gfs2_dir_fops;
+ inode->i_fop = gfs2_dir_fops;
} else if (S_ISLNK(mode)) {
inode->i_op = &gfs2_symlink_iops;
} else {
diff --git a/fs/gfs2/inode.h b/fs/gfs2/inode.h
index d5329364cdff..dca4fee3078b 100644
--- a/fs/gfs2/inode.h
+++ b/fs/gfs2/inode.h
@@ -101,12 +101,26 @@ void gfs2_dinode_print(const struct gfs2_inode *ip);
extern const struct inode_operations gfs2_file_iops;
extern const struct inode_operations gfs2_dir_iops;
extern const struct inode_operations gfs2_symlink_iops;
-extern const struct file_operations gfs2_file_fops;
-extern const struct file_operations gfs2_dir_fops;
-extern const struct file_operations gfs2_file_fops_nolock;
-extern const struct file_operations gfs2_dir_fops_nolock;
+extern const struct file_operations *gfs2_file_fops_nolock;
+extern const struct file_operations *gfs2_dir_fops_nolock;
extern void gfs2_set_inode_flags(struct inode *inode);
+
+#ifdef CONFIG_GFS2_FS_LOCKING_DLM
+extern const struct file_operations *gfs2_file_fops;
+extern const struct file_operations *gfs2_dir_fops;
+static inline int gfs2_localflocks(const struct gfs2_sbd *sdp)
+{
+ return sdp->sd_args.ar_localflocks;
+}
+#else /* Single node only */
+#define gfs2_file_fops NULL
+#define gfs2_dir_fops NULL
+static inline int gfs2_localflocks(const struct gfs2_sbd *sdp)
+{
+ return 1;
+}
+#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
#endif /* __INODE_DOT_H__ */
diff --git a/fs/gfs2/lock_dlm.c b/fs/gfs2/lock_dlm.c
new file mode 100644
index 000000000000..a0bb7d2251a0
--- /dev/null
+++ b/fs/gfs2/lock_dlm.c
@@ -0,0 +1,240 @@
+/*
+ * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
+ * Copyright (C) 2004-2009 Red Hat, Inc. All rights reserved.
+ *
+ * This copyrighted material is made available to anyone wishing to use,
+ * modify, copy, or redistribute it subject to the terms and conditions
+ * of the GNU General Public License version 2.
+ */
+
+#include <linux/fs.h>
+#include <linux/dlm.h>
+#include <linux/types.h>
+#include <linux/gfs2_ondisk.h>
+
+#include "incore.h"
+#include "glock.h"
+#include "util.h"
+
+
+static void gdlm_ast(void *arg)
+{
+ struct gfs2_glock *gl = arg;
+ unsigned ret = gl->gl_state;
+
+ BUG_ON(gl->gl_lksb.sb_flags & DLM_SBF_DEMOTED);
+
+ if (gl->gl_lksb.sb_flags & DLM_SBF_VALNOTVALID)
+ memset(gl->gl_lvb, 0, GDLM_LVB_SIZE);
+
+ switch (gl->gl_lksb.sb_status) {
+ case -DLM_EUNLOCK: /* Unlocked, so glock can be freed */
+ kmem_cache_free(gfs2_glock_cachep, gl);
+ return;
+ case -DLM_ECANCEL: /* Cancel while getting lock */
+ ret |= LM_OUT_CANCELED;
+ goto out;
+ case -EAGAIN: /* Try lock fails */
+ goto out;
+ case -EINVAL: /* Invalid */
+ case -ENOMEM: /* Out of memory */
+ ret |= LM_OUT_ERROR;
+ goto out;
+ case 0: /* Success */
+ break;
+ default: /* Something unexpected */
+ BUG();
+ }
+
+ ret = gl->gl_target;
+ if (gl->gl_lksb.sb_flags & DLM_SBF_ALTMODE) {
+ if (gl->gl_target == LM_ST_SHARED)
+ ret = LM_ST_DEFERRED;
+ else if (gl->gl_target == LM_ST_DEFERRED)
+ ret = LM_ST_SHARED;
+ else
+ BUG();
+ }
+
+ set_bit(GLF_INITIAL, &gl->gl_flags);
+ gfs2_glock_complete(gl, ret);
+ return;
+out:
+ if (!test_bit(GLF_INITIAL, &gl->gl_flags))
+ gl->gl_lksb.sb_lkid = 0;
+ gfs2_glock_complete(gl, ret);
+}
+
+static void gdlm_bast(void *arg, int mode)
+{
+ struct gfs2_glock *gl = arg;
+
+ switch (mode) {
+ case DLM_LOCK_EX:
+ gfs2_glock_cb(gl, LM_ST_UNLOCKED);
+ break;
+ case DLM_LOCK_CW:
+ gfs2_glock_cb(gl, LM_ST_DEFERRED);
+ break;
+ case DLM_LOCK_PR:
+ gfs2_glock_cb(gl, LM_ST_SHARED);
+ break;
+ default:
+ printk(KERN_ERR "unknown bast mode %d", mode);
+ BUG();
+ }
+}
+
+/* convert gfs lock-state to dlm lock-mode */
+
+static int make_mode(const unsigned int lmstate)
+{
+ switch (lmstate) {
+ case LM_ST_UNLOCKED:
+ return DLM_LOCK_NL;
+ case LM_ST_EXCLUSIVE:
+ return DLM_LOCK_EX;
+ case LM_ST_DEFERRED:
+ return DLM_LOCK_CW;
+ case LM_ST_SHARED:
+ return DLM_LOCK_PR;
+ }
+ printk(KERN_ERR "unknown LM state %d", lmstate);
+ BUG();
+ return -1;
+}
+
+static u32 make_flags(const u32 lkid, const unsigned int gfs_flags,
+ const int req)
+{
+ u32 lkf = 0;
+
+ if (gfs_flags & LM_FLAG_TRY)
+ lkf |= DLM_LKF_NOQUEUE;
+
+ if (gfs_flags & LM_FLAG_TRY_1CB) {
+ lkf |= DLM_LKF_NOQUEUE;
+ lkf |= DLM_LKF_NOQUEUEBAST;
+ }
+
+ if (gfs_flags & LM_FLAG_PRIORITY) {
+ lkf |= DLM_LKF_NOORDER;
+ lkf |= DLM_LKF_HEADQUE;
+ }
+
+ if (gfs_flags & LM_FLAG_ANY) {
+ if (req == DLM_LOCK_PR)
+ lkf |= DLM_LKF_ALTCW;
+ else if (req == DLM_LOCK_CW)
+ lkf |= DLM_LKF_ALTPR;
+ else
+ BUG();
+ }
+
+ if (lkid != 0)
+ lkf |= DLM_LKF_CONVERT;
+
+ lkf |= DLM_LKF_VALBLK;
+
+ return lkf;
+}
+
+static unsigned int gdlm_lock(struct gfs2_glock *gl,
+ unsigned int req_state, unsigned int flags)
+{
+ struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
+ int error;
+ int req;
+ u32 lkf;
+
+ req = make_mode(req_state);
+ lkf = make_flags(gl->gl_lksb.sb_lkid, flags, req);
+
+ /*
+ * Submit the actual lock request.
+ */
+
+ error = dlm_lock(ls->ls_dlm, req, &gl->gl_lksb, lkf, gl->gl_strname,
+ GDLM_STRNAME_BYTES - 1, 0, gdlm_ast, gl, gdlm_bast);
+ if (error == -EAGAIN)
+ return 0;
+ if (error)
+ return LM_OUT_ERROR;
+ return LM_OUT_ASYNC;
+}
+
+static void gdlm_put_lock(struct kmem_cache *cachep, void *ptr)
+{
+ struct gfs2_glock *gl = ptr;
+ struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
+ int error;
+
+ if (gl->gl_lksb.sb_lkid == 0) {
+ kmem_cache_free(cachep, gl);
+ return;
+ }
+
+ error = dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_VALBLK,
+ NULL, gl);
+ if (error) {
+ printk(KERN_ERR "gdlm_unlock %x,%llx err=%d\n",
+ gl->gl_name.ln_type,
+ (unsigned long long)gl->gl_name.ln_number, error);
+ return;
+ }
+}
+
+static void gdlm_cancel(struct gfs2_glock *gl)
+{
+ struct lm_lockstruct *ls = &gl->gl_sbd->sd_lockstruct;
+ dlm_unlock(ls->ls_dlm, gl->gl_lksb.sb_lkid, DLM_LKF_CANCEL, NULL, gl);
+}
+
+static int gdlm_mount(struct gfs2_sbd *sdp, const char *fsname)
+{
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ int error;
+
+ if (fsname == NULL) {
+ fs_info(sdp, "no fsname found\n");
+ return -EINVAL;
+ }
+
+ error = dlm_new_lockspace(fsname, strlen(fsname), &ls->ls_dlm,
+ DLM_LSFL_FS | DLM_LSFL_NEWEXCL |
+ (ls->ls_nodir ? DLM_LSFL_NODIR : 0),
+ GDLM_LVB_SIZE);
+ if (error)
+ printk(KERN_ERR "dlm_new_lockspace error %d", error);
+
+ return error;
+}
+
+static void gdlm_unmount(struct gfs2_sbd *sdp)
+{
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+
+ if (ls->ls_dlm) {
+ dlm_release_lockspace(ls->ls_dlm, 2);
+ ls->ls_dlm = NULL;
+ }
+}
+
+static const match_table_t dlm_tokens = {
+ { Opt_jid, "jid=%d"},
+ { Opt_id, "id=%d"},
+ { Opt_first, "first=%d"},
+ { Opt_nodir, "nodir=%d"},
+ { Opt_err, NULL },
+};
+
+const struct lm_lockops gfs2_dlm_ops = {
+ .lm_proto_name = "lock_dlm",
+ .lm_mount = gdlm_mount,
+ .lm_unmount = gdlm_unmount,
+ .lm_put_lock = gdlm_put_lock,
+ .lm_lock = gdlm_lock,
+ .lm_cancel = gdlm_cancel,
+ .lm_tokens = &dlm_tokens,
+};
+
diff --git a/fs/gfs2/locking.c b/fs/gfs2/locking.c
deleted file mode 100644
index 523243a13a21..000000000000
--- a/fs/gfs2/locking.c
+++ /dev/null
@@ -1,232 +0,0 @@
-/*
- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
- * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License version 2.
- */
-
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/wait.h>
-#include <linux/sched.h>
-#include <linux/kmod.h>
-#include <linux/fs.h>
-#include <linux/delay.h>
-#include <linux/lm_interface.h>
-
-struct lmh_wrapper {
- struct list_head lw_list;
- const struct lm_lockops *lw_ops;
-};
-
-static int nolock_mount(char *table_name, char *host_data,
- lm_callback_t cb, void *cb_data,
- unsigned int min_lvb_size, int flags,
- struct lm_lockstruct *lockstruct,
- struct kobject *fskobj);
-
-/* List of registered low-level locking protocols. A file system selects one
- of them by name at mount time, e.g. lock_nolock, lock_dlm. */
-
-static const struct lm_lockops nolock_ops = {
- .lm_proto_name = "lock_nolock",
- .lm_mount = nolock_mount,
-};
-
-static struct lmh_wrapper nolock_proto = {
- .lw_list = LIST_HEAD_INIT(nolock_proto.lw_list),
- .lw_ops = &nolock_ops,
-};
-
-static LIST_HEAD(lmh_list);
-static DEFINE_MUTEX(lmh_lock);
-
-static int nolock_mount(char *table_name, char *host_data,
- lm_callback_t cb, void *cb_data,
- unsigned int min_lvb_size, int flags,
- struct lm_lockstruct *lockstruct,
- struct kobject *fskobj)
-{
- char *c;
- unsigned int jid;
-
- c = strstr(host_data, "jid=");
- if (!c)
- jid = 0;
- else {
- c += 4;
- sscanf(c, "%u", &jid);
- }
-
- lockstruct->ls_jid = jid;
- lockstruct->ls_first = 1;
- lockstruct->ls_lvb_size = min_lvb_size;
- lockstruct->ls_ops = &nolock_ops;
- lockstruct->ls_flags = LM_LSFLAG_LOCAL;
-
- return 0;
-}
-
-/**
- * gfs2_register_lockproto - Register a low-level locking protocol
- * @proto: the protocol definition
- *
- * Returns: 0 on success, -EXXX on failure
- */
-
-int gfs2_register_lockproto(const struct lm_lockops *proto)
-{
- struct lmh_wrapper *lw;
-
- mutex_lock(&lmh_lock);
-
- list_for_each_entry(lw, &lmh_list, lw_list) {
- if (!strcmp(lw->lw_ops->lm_proto_name, proto->lm_proto_name)) {
- mutex_unlock(&lmh_lock);
- printk(KERN_INFO "GFS2: protocol %s already exists\n",
- proto->lm_proto_name);
- return -EEXIST;
- }
- }
-
- lw = kzalloc(sizeof(struct lmh_wrapper), GFP_KERNEL);
- if (!lw) {
- mutex_unlock(&lmh_lock);
- return -ENOMEM;
- }
-
- lw->lw_ops = proto;
- list_add(&lw->lw_list, &lmh_list);
-
- mutex_unlock(&lmh_lock);
-
- return 0;
-}
-
-/**
- * gfs2_unregister_lockproto - Unregister a low-level locking protocol
- * @proto: the protocol definition
- *
- */
-
-void gfs2_unregister_lockproto(const struct lm_lockops *proto)
-{
- struct lmh_wrapper *lw;
-
- mutex_lock(&lmh_lock);
-
- list_for_each_entry(lw, &lmh_list, lw_list) {
- if (!strcmp(lw->lw_ops->lm_proto_name, proto->lm_proto_name)) {
- list_del(&lw->lw_list);
- mutex_unlock(&lmh_lock);
- kfree(lw);
- return;
- }
- }
-
- mutex_unlock(&lmh_lock);
-
- printk(KERN_WARNING "GFS2: can't unregister lock protocol %s\n",
- proto->lm_proto_name);
-}
-
-/**
- * gfs2_mount_lockproto - Mount a lock protocol
- * @proto_name - the name of the protocol
- * @table_name - the name of the lock space
- * @host_data - data specific to this host
- * @cb - the callback to the code using the lock module
- * @sdp - The GFS2 superblock
- * @min_lvb_size - the mininum LVB size that the caller can deal with
- * @flags - LM_MFLAG_*
- * @lockstruct - a structure returned describing the mount
- *
- * Returns: 0 on success, -EXXX on failure
- */
-
-int gfs2_mount_lockproto(char *proto_name, char *table_name, char *host_data,
- lm_callback_t cb, void *cb_data,
- unsigned int min_lvb_size, int flags,
- struct lm_lockstruct *lockstruct,
- struct kobject *fskobj)
-{
- struct lmh_wrapper *lw = NULL;
- int try = 0;
- int error, found;
-
-
-retry:
- mutex_lock(&lmh_lock);
-
- if (list_empty(&nolock_proto.lw_list))
- list_add(&nolock_proto.lw_list, &lmh_list);
-
- found = 0;
- list_for_each_entry(lw, &lmh_list, lw_list) {
- if (!strcmp(lw->lw_ops->lm_proto_name, proto_name)) {
- found = 1;
- break;
- }
- }
-
- if (!found) {
- if (!try && capable(CAP_SYS_MODULE)) {
- try = 1;
- mutex_unlock(&lmh_lock);
- request_module(proto_name);
- goto retry;
- }
- printk(KERN_INFO "GFS2: can't find protocol %s\n", proto_name);
- error = -ENOENT;
- goto out;
- }
-
- if (lw->lw_ops->lm_owner &&
- !try_module_get(lw->lw_ops->lm_owner)) {
- try = 0;
- mutex_unlock(&lmh_lock);
- msleep(1000);
- goto retry;
- }
-
- error = lw->lw_ops->lm_mount(table_name, host_data, cb, cb_data,
- min_lvb_size, flags, lockstruct, fskobj);
- if (error)
- module_put(lw->lw_ops->lm_owner);
-out:
- mutex_unlock(&lmh_lock);
- return error;
-}
-
-void gfs2_unmount_lockproto(struct lm_lockstruct *lockstruct)
-{
- mutex_lock(&lmh_lock);
- if (lockstruct->ls_ops->lm_unmount)
- lockstruct->ls_ops->lm_unmount(lockstruct->ls_lockspace);
- if (lockstruct->ls_ops->lm_owner)
- module_put(lockstruct->ls_ops->lm_owner);
- mutex_unlock(&lmh_lock);
-}
-
-/**
- * gfs2_withdraw_lockproto - abnormally unmount a lock module
- * @lockstruct: the lockstruct passed into mount
- *
- */
-
-void gfs2_withdraw_lockproto(struct lm_lockstruct *lockstruct)
-{
- mutex_lock(&lmh_lock);
- lockstruct->ls_ops->lm_withdraw(lockstruct->ls_lockspace);
- if (lockstruct->ls_ops->lm_owner)
- module_put(lockstruct->ls_ops->lm_owner);
- mutex_unlock(&lmh_lock);
-}
-
-EXPORT_SYMBOL_GPL(gfs2_register_lockproto);
-EXPORT_SYMBOL_GPL(gfs2_unregister_lockproto);
-
diff --git a/fs/gfs2/locking/dlm/Makefile b/fs/gfs2/locking/dlm/Makefile
deleted file mode 100644
index 2609bb6cd013..000000000000
--- a/fs/gfs2/locking/dlm/Makefile
+++ /dev/null
@@ -1,3 +0,0 @@
-obj-$(CONFIG_GFS2_FS_LOCKING_DLM) += lock_dlm.o
-lock_dlm-y := lock.o main.o mount.o sysfs.o thread.o
-
diff --git a/fs/gfs2/locking/dlm/lock.c b/fs/gfs2/locking/dlm/lock.c
deleted file mode 100644
index 2482c9047505..000000000000
--- a/fs/gfs2/locking/dlm/lock.c
+++ /dev/null
@@ -1,708 +0,0 @@
-/*
- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License version 2.
- */
-
-#include "lock_dlm.h"
-
-static char junk_lvb[GDLM_LVB_SIZE];
-
-
-/* convert dlm lock-mode to gfs lock-state */
-
-static s16 gdlm_make_lmstate(s16 dlmmode)
-{
- switch (dlmmode) {
- case DLM_LOCK_IV:
- case DLM_LOCK_NL:
- return LM_ST_UNLOCKED;
- case DLM_LOCK_EX:
- return LM_ST_EXCLUSIVE;
- case DLM_LOCK_CW:
- return LM_ST_DEFERRED;
- case DLM_LOCK_PR:
- return LM_ST_SHARED;
- }
- gdlm_assert(0, "unknown DLM mode %d", dlmmode);
- return -1;
-}
-
-/* A lock placed on this queue is re-submitted to DLM as soon as the lock_dlm
- thread gets to it. */
-
-static void queue_submit(struct gdlm_lock *lp)
-{
- struct gdlm_ls *ls = lp->ls;
-
- spin_lock(&ls->async_lock);
- list_add_tail(&lp->delay_list, &ls->submit);
- spin_unlock(&ls->async_lock);
- wake_up(&ls->thread_wait);
-}
-
-static void wake_up_ast(struct gdlm_lock *lp)
-{
- clear_bit(LFL_AST_WAIT, &lp->flags);
- smp_mb__after_clear_bit();
- wake_up_bit(&lp->flags, LFL_AST_WAIT);
-}
-
-static void gdlm_delete_lp(struct gdlm_lock *lp)
-{
- struct gdlm_ls *ls = lp->ls;
-
- spin_lock(&ls->async_lock);
- if (!list_empty(&lp->delay_list))
- list_del_init(&lp->delay_list);
- ls->all_locks_count--;
- spin_unlock(&ls->async_lock);
-
- kfree(lp);
-}
-
-static void gdlm_queue_delayed(struct gdlm_lock *lp)
-{
- struct gdlm_ls *ls = lp->ls;
-
- spin_lock(&ls->async_lock);
- list_add_tail(&lp->delay_list, &ls->delayed);
- spin_unlock(&ls->async_lock);
-}
-
-static void process_complete(struct gdlm_lock *lp)
-{
- struct gdlm_ls *ls = lp->ls;
- struct lm_async_cb acb;
-
- memset(&acb, 0, sizeof(acb));
-
- if (lp->lksb.sb_status == -DLM_ECANCEL) {
- log_info("complete dlm cancel %x,%llx flags %lx",
- lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number,
- lp->flags);
-
- lp->req = lp->cur;
- acb.lc_ret |= LM_OUT_CANCELED;
- if (lp->cur == DLM_LOCK_IV)
- lp->lksb.sb_lkid = 0;
- goto out;
- }
-
- if (test_and_clear_bit(LFL_DLM_UNLOCK, &lp->flags)) {
- if (lp->lksb.sb_status != -DLM_EUNLOCK) {
- log_info("unlock sb_status %d %x,%llx flags %lx",
- lp->lksb.sb_status, lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number,
- lp->flags);
- return;
- }
-
- lp->cur = DLM_LOCK_IV;
- lp->req = DLM_LOCK_IV;
- lp->lksb.sb_lkid = 0;
-
- if (test_and_clear_bit(LFL_UNLOCK_DELETE, &lp->flags)) {
- gdlm_delete_lp(lp);
- return;
- }
- goto out;
- }
-
- if (lp->lksb.sb_flags & DLM_SBF_VALNOTVALID)
- memset(lp->lksb.sb_lvbptr, 0, GDLM_LVB_SIZE);
-
- if (lp->lksb.sb_flags & DLM_SBF_ALTMODE) {
- if (lp->req == DLM_LOCK_PR)
- lp->req = DLM_LOCK_CW;
- else if (lp->req == DLM_LOCK_CW)
- lp->req = DLM_LOCK_PR;
- }
-
- /*
- * A canceled lock request. The lock was just taken off the delayed
- * list and was never even submitted to dlm.
- */
-
- if (test_and_clear_bit(LFL_CANCEL, &lp->flags)) {
- log_info("complete internal cancel %x,%llx",
- lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number);
- lp->req = lp->cur;
- acb.lc_ret |= LM_OUT_CANCELED;
- goto out;
- }
-
- /*
- * An error occured.
- */
-
- if (lp->lksb.sb_status) {
- /* a "normal" error */
- if ((lp->lksb.sb_status == -EAGAIN) &&
- (lp->lkf & DLM_LKF_NOQUEUE)) {
- lp->req = lp->cur;
- if (lp->cur == DLM_LOCK_IV)
- lp->lksb.sb_lkid = 0;
- goto out;
- }
-
- /* this could only happen with cancels I think */
- log_info("ast sb_status %d %x,%llx flags %lx",
- lp->lksb.sb_status, lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number,
- lp->flags);
- return;
- }
-
- /*
- * This is an AST for an EX->EX conversion for sync_lvb from GFS.
- */
-
- if (test_and_clear_bit(LFL_SYNC_LVB, &lp->flags)) {
- wake_up_ast(lp);
- return;
- }
-
- /*
- * A lock has been demoted to NL because it initially completed during
- * BLOCK_LOCKS. Now it must be requested in the originally requested
- * mode.
- */
-
- if (test_and_clear_bit(LFL_REREQUEST, &lp->flags)) {
- gdlm_assert(lp->req == DLM_LOCK_NL, "%x,%llx",
- lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number);
- gdlm_assert(lp->prev_req > DLM_LOCK_NL, "%x,%llx",
- lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number);
-
- lp->cur = DLM_LOCK_NL;
- lp->req = lp->prev_req;
- lp->prev_req = DLM_LOCK_IV;
- lp->lkf &= ~DLM_LKF_CONVDEADLK;
-
- set_bit(LFL_NOCACHE, &lp->flags);
-
- if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
- !test_bit(LFL_NOBLOCK, &lp->flags))
- gdlm_queue_delayed(lp);
- else
- queue_submit(lp);
- return;
- }
-
- /*
- * A request is granted during dlm recovery. It may be granted
- * because the locks of a failed node were cleared. In that case,
- * there may be inconsistent data beneath this lock and we must wait
- * for recovery to complete to use it. When gfs recovery is done this
- * granted lock will be converted to NL and then reacquired in this
- * granted state.
- */
-
- if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
- !test_bit(LFL_NOBLOCK, &lp->flags) &&
- lp->req != DLM_LOCK_NL) {
-
- lp->cur = lp->req;
- lp->prev_req = lp->req;
- lp->req = DLM_LOCK_NL;
- lp->lkf |= DLM_LKF_CONVERT;
- lp->lkf &= ~DLM_LKF_CONVDEADLK;
-
- log_debug("rereq %x,%llx id %x %d,%d",
- lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number,
- lp->lksb.sb_lkid, lp->cur, lp->req);
-
- set_bit(LFL_REREQUEST, &lp->flags);
- queue_submit(lp);
- return;
- }
-
- /*
- * DLM demoted the lock to NL before it was granted so GFS must be
- * told it cannot cache data for this lock.
- */
-
- if (lp->lksb.sb_flags & DLM_SBF_DEMOTED)
- set_bit(LFL_NOCACHE, &lp->flags);
-
-out:
- /*
- * This is an internal lock_dlm lock
- */
-
- if (test_bit(LFL_INLOCK, &lp->flags)) {
- clear_bit(LFL_NOBLOCK, &lp->flags);
- lp->cur = lp->req;
- wake_up_ast(lp);
- return;
- }
-
- /*
- * Normal completion of a lock request. Tell GFS it now has the lock.
- */
-
- clear_bit(LFL_NOBLOCK, &lp->flags);
- lp->cur = lp->req;
-
- acb.lc_name = lp->lockname;
- acb.lc_ret |= gdlm_make_lmstate(lp->cur);
-
- ls->fscb(ls->sdp, LM_CB_ASYNC, &acb);
-}
-
-static void gdlm_ast(void *astarg)
-{
- struct gdlm_lock *lp = astarg;
- clear_bit(LFL_ACTIVE, &lp->flags);
- process_complete(lp);
-}
-
-static void process_blocking(struct gdlm_lock *lp, int bast_mode)
-{
- struct gdlm_ls *ls = lp->ls;
- unsigned int cb = 0;
-
- switch (gdlm_make_lmstate(bast_mode)) {
- case LM_ST_EXCLUSIVE:
- cb = LM_CB_NEED_E;
- break;
- case LM_ST_DEFERRED:
- cb = LM_CB_NEED_D;
- break;
- case LM_ST_SHARED:
- cb = LM_CB_NEED_S;
- break;
- default:
- gdlm_assert(0, "unknown bast mode %u", bast_mode);
- }
-
- ls->fscb(ls->sdp, cb, &lp->lockname);
-}
-
-
-static void gdlm_bast(void *astarg, int mode)
-{
- struct gdlm_lock *lp = astarg;
-
- if (!mode) {
- printk(KERN_INFO "lock_dlm: bast mode zero %x,%llx\n",
- lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number);
- return;
- }
-
- process_blocking(lp, mode);
-}
-
-/* convert gfs lock-state to dlm lock-mode */
-
-static s16 make_mode(s16 lmstate)
-{
- switch (lmstate) {
- case LM_ST_UNLOCKED:
- return DLM_LOCK_NL;
- case LM_ST_EXCLUSIVE:
- return DLM_LOCK_EX;
- case LM_ST_DEFERRED:
- return DLM_LOCK_CW;
- case LM_ST_SHARED:
- return DLM_LOCK_PR;
- }
- gdlm_assert(0, "unknown LM state %d", lmstate);
- return -1;
-}
-
-
-/* verify agreement with GFS on the current lock state, NB: DLM_LOCK_NL and
- DLM_LOCK_IV are both considered LM_ST_UNLOCKED by GFS. */
-
-static void check_cur_state(struct gdlm_lock *lp, unsigned int cur_state)
-{
- s16 cur = make_mode(cur_state);
- if (lp->cur != DLM_LOCK_IV)
- gdlm_assert(lp->cur == cur, "%d, %d", lp->cur, cur);
-}
-
-static inline unsigned int make_flags(struct gdlm_lock *lp,
- unsigned int gfs_flags,
- s16 cur, s16 req)
-{
- unsigned int lkf = 0;
-
- if (gfs_flags & LM_FLAG_TRY)
- lkf |= DLM_LKF_NOQUEUE;
-
- if (gfs_flags & LM_FLAG_TRY_1CB) {
- lkf |= DLM_LKF_NOQUEUE;
- lkf |= DLM_LKF_NOQUEUEBAST;
- }
-
- if (gfs_flags & LM_FLAG_PRIORITY) {
- lkf |= DLM_LKF_NOORDER;
- lkf |= DLM_LKF_HEADQUE;
- }
-
- if (gfs_flags & LM_FLAG_ANY) {
- if (req == DLM_LOCK_PR)
- lkf |= DLM_LKF_ALTCW;
- else if (req == DLM_LOCK_CW)
- lkf |= DLM_LKF_ALTPR;
- }
-
- if (lp->lksb.sb_lkid != 0) {
- lkf |= DLM_LKF_CONVERT;
- }
-
- if (lp->lvb)
- lkf |= DLM_LKF_VALBLK;
-
- return lkf;
-}
-
-/* make_strname - convert GFS lock numbers to a string */
-
-static inline void make_strname(const struct lm_lockname *lockname,
- struct gdlm_strname *str)
-{
- sprintf(str->name, "%8x%16llx", lockname->ln_type,
- (unsigned long long)lockname->ln_number);
- str->namelen = GDLM_STRNAME_BYTES;
-}
-
-static int gdlm_create_lp(struct gdlm_ls *ls, struct lm_lockname *name,
- struct gdlm_lock **lpp)
-{
- struct gdlm_lock *lp;
-
- lp = kzalloc(sizeof(struct gdlm_lock), GFP_NOFS);
- if (!lp)
- return -ENOMEM;
-
- lp->lockname = *name;
- make_strname(name, &lp->strname);
- lp->ls = ls;
- lp->cur = DLM_LOCK_IV;
- INIT_LIST_HEAD(&lp->delay_list);
-
- spin_lock(&ls->async_lock);
- ls->all_locks_count++;
- spin_unlock(&ls->async_lock);
-
- *lpp = lp;
- return 0;
-}
-
-int gdlm_get_lock(void *lockspace, struct lm_lockname *name,
- void **lockp)
-{
- struct gdlm_lock *lp;
- int error;
-
- error = gdlm_create_lp(lockspace, name, &lp);
-
- *lockp = lp;
- return error;
-}
-
-void gdlm_put_lock(void *lock)
-{
- gdlm_delete_lp(lock);
-}
-
-unsigned int gdlm_do_lock(struct gdlm_lock *lp)
-{
- struct gdlm_ls *ls = lp->ls;
- int error, bast = 1;
-
- /*
- * When recovery is in progress, delay lock requests for submission
- * once recovery is done. Requests for recovery (NOEXP) and unlocks
- * can pass.
- */
-
- if (test_bit(DFL_BLOCK_LOCKS, &ls->flags) &&
- !test_bit(LFL_NOBLOCK, &lp->flags) && lp->req != DLM_LOCK_NL) {
- gdlm_queue_delayed(lp);
- return LM_OUT_ASYNC;
- }
-
- /*
- * Submit the actual lock request.
- */
-
- if (test_bit(LFL_NOBAST, &lp->flags))
- bast = 0;
-
- set_bit(LFL_ACTIVE, &lp->flags);
-
- log_debug("lk %x,%llx id %x %d,%d %x", lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number, lp->lksb.sb_lkid,
- lp->cur, lp->req, lp->lkf);
-
- error = dlm_lock(ls->dlm_lockspace, lp->req, &lp->lksb, lp->lkf,
- lp->strname.name, lp->strname.namelen, 0, gdlm_ast,
- lp, bast ? gdlm_bast : NULL);
-
- if ((error == -EAGAIN) && (lp->lkf & DLM_LKF_NOQUEUE)) {
- lp->lksb.sb_status = -EAGAIN;
- gdlm_ast(lp);
- error = 0;
- }
-
- if (error) {
- log_error("%s: gdlm_lock %x,%llx err=%d cur=%d req=%d lkf=%x "
- "flags=%lx", ls->fsname, lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number, error,
- lp->cur, lp->req, lp->lkf, lp->flags);
- return LM_OUT_ERROR;
- }
- return LM_OUT_ASYNC;
-}
-
-static unsigned int gdlm_do_unlock(struct gdlm_lock *lp)
-{
- struct gdlm_ls *ls = lp->ls;
- unsigned int lkf = 0;
- int error;
-
- set_bit(LFL_DLM_UNLOCK, &lp->flags);
- set_bit(LFL_ACTIVE, &lp->flags);
-
- if (lp->lvb)
- lkf = DLM_LKF_VALBLK;
-
- log_debug("un %x,%llx %x %d %x", lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number,
- lp->lksb.sb_lkid, lp->cur, lkf);
-
- error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, lkf, NULL, lp);
-
- if (error) {
- log_error("%s: gdlm_unlock %x,%llx err=%d cur=%d req=%d lkf=%x "
- "flags=%lx", ls->fsname, lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number, error,
- lp->cur, lp->req, lp->lkf, lp->flags);
- return LM_OUT_ERROR;
- }
- return LM_OUT_ASYNC;
-}
-
-unsigned int gdlm_lock(void *lock, unsigned int cur_state,
- unsigned int req_state, unsigned int flags)
-{
- struct gdlm_lock *lp = lock;
-
- if (req_state == LM_ST_UNLOCKED)
- return gdlm_unlock(lock, cur_state);
-
- if (req_state == LM_ST_UNLOCKED)
- return gdlm_unlock(lock, cur_state);
-
- clear_bit(LFL_DLM_CANCEL, &lp->flags);
- if (flags & LM_FLAG_NOEXP)
- set_bit(LFL_NOBLOCK, &lp->flags);
-
- check_cur_state(lp, cur_state);
- lp->req = make_mode(req_state);
- lp->lkf = make_flags(lp, flags, lp->cur, lp->req);
-
- return gdlm_do_lock(lp);
-}
-
-unsigned int gdlm_unlock(void *lock, unsigned int cur_state)
-{
- struct gdlm_lock *lp = lock;
-
- clear_bit(LFL_DLM_CANCEL, &lp->flags);
- if (lp->cur == DLM_LOCK_IV)
- return 0;
- return gdlm_do_unlock(lp);
-}
-
-void gdlm_cancel(void *lock)
-{
- struct gdlm_lock *lp = lock;
- struct gdlm_ls *ls = lp->ls;
- int error, delay_list = 0;
-
- if (test_bit(LFL_DLM_CANCEL, &lp->flags))
- return;
-
- log_info("gdlm_cancel %x,%llx flags %lx", lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number, lp->flags);
-
- spin_lock(&ls->async_lock);
- if (!list_empty(&lp->delay_list)) {
- list_del_init(&lp->delay_list);
- delay_list = 1;
- }
- spin_unlock(&ls->async_lock);
-
- if (delay_list) {
- set_bit(LFL_CANCEL, &lp->flags);
- set_bit(LFL_ACTIVE, &lp->flags);
- gdlm_ast(lp);
- return;
- }
-
- if (!test_bit(LFL_ACTIVE, &lp->flags) ||
- test_bit(LFL_DLM_UNLOCK, &lp->flags)) {
- log_info("gdlm_cancel skip %x,%llx flags %lx",
- lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number, lp->flags);
- return;
- }
-
- /* the lock is blocked in the dlm */
-
- set_bit(LFL_DLM_CANCEL, &lp->flags);
- set_bit(LFL_ACTIVE, &lp->flags);
-
- error = dlm_unlock(ls->dlm_lockspace, lp->lksb.sb_lkid, DLM_LKF_CANCEL,
- NULL, lp);
-
- log_info("gdlm_cancel rv %d %x,%llx flags %lx", error,
- lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number, lp->flags);
-
- if (error == -EBUSY)
- clear_bit(LFL_DLM_CANCEL, &lp->flags);
-}
-
-static int gdlm_add_lvb(struct gdlm_lock *lp)
-{
- char *lvb;
-
- lvb = kzalloc(GDLM_LVB_SIZE, GFP_NOFS);
- if (!lvb)
- return -ENOMEM;
-
- lp->lksb.sb_lvbptr = lvb;
- lp->lvb = lvb;
- return 0;
-}
-
-static void gdlm_del_lvb(struct gdlm_lock *lp)
-{
- kfree(lp->lvb);
- lp->lvb = NULL;
- lp->lksb.sb_lvbptr = NULL;
-}
-
-static int gdlm_ast_wait(void *word)
-{
- schedule();
- return 0;
-}
-
-/* This can do a synchronous dlm request (requiring a lock_dlm thread to get
- the completion) because gfs won't call hold_lvb() during a callback (from
- the context of a lock_dlm thread). */
-
-static int hold_null_lock(struct gdlm_lock *lp)
-{
- struct gdlm_lock *lpn = NULL;
- int error;
-
- if (lp->hold_null) {
- printk(KERN_INFO "lock_dlm: lvb already held\n");
- return 0;
- }
-
- error = gdlm_create_lp(lp->ls, &lp->lockname, &lpn);
- if (error)
- goto out;
-
- lpn->lksb.sb_lvbptr = junk_lvb;
- lpn->lvb = junk_lvb;
-
- lpn->req = DLM_LOCK_NL;
- lpn->lkf = DLM_LKF_VALBLK | DLM_LKF_EXPEDITE;
- set_bit(LFL_NOBAST, &lpn->flags);
- set_bit(LFL_INLOCK, &lpn->flags);
- set_bit(LFL_AST_WAIT, &lpn->flags);
-
- gdlm_do_lock(lpn);
- wait_on_bit(&lpn->flags, LFL_AST_WAIT, gdlm_ast_wait, TASK_UNINTERRUPTIBLE);
- error = lpn->lksb.sb_status;
- if (error) {
- printk(KERN_INFO "lock_dlm: hold_null_lock dlm error %d\n",
- error);
- gdlm_delete_lp(lpn);
- lpn = NULL;
- }
-out:
- lp->hold_null = lpn;
- return error;
-}
-
-/* This cannot do a synchronous dlm request (requiring a lock_dlm thread to get
- the completion) because gfs may call unhold_lvb() during a callback (from
- the context of a lock_dlm thread) which could cause a deadlock since the
- other lock_dlm thread could be engaged in recovery. */
-
-static void unhold_null_lock(struct gdlm_lock *lp)
-{
- struct gdlm_lock *lpn = lp->hold_null;
-
- gdlm_assert(lpn, "%x,%llx", lp->lockname.ln_type,
- (unsigned long long)lp->lockname.ln_number);
- lpn->lksb.sb_lvbptr = NULL;
- lpn->lvb = NULL;
- set_bit(LFL_UNLOCK_DELETE, &lpn->flags);
- gdlm_do_unlock(lpn);
- lp->hold_null = NULL;
-}
-
-/* Acquire a NL lock because gfs requires the value block to remain
- intact on the resource while the lvb is "held" even if it's holding no locks
- on the resource. */
-
-int gdlm_hold_lvb(void *lock, char **lvbp)
-{
- struct gdlm_lock *lp = lock;
- int error;
-
- error = gdlm_add_lvb(lp);
- if (error)
- return error;
-
- *lvbp = lp->lvb;
-
- error = hold_null_lock(lp);
- if (error)
- gdlm_del_lvb(lp);
-
- return error;
-}
-
-void gdlm_unhold_lvb(void *lock, char *lvb)
-{
- struct gdlm_lock *lp = lock;
-
- unhold_null_lock(lp);
- gdlm_del_lvb(lp);
-}
-
-void gdlm_submit_delayed(struct gdlm_ls *ls)
-{
- struct gdlm_lock *lp, *safe;
-
- spin_lock(&ls->async_lock);
- list_for_each_entry_safe(lp, safe, &ls->delayed, delay_list) {
- list_del_init(&lp->delay_list);
- list_add_tail(&lp->delay_list, &ls->submit);
- }
- spin_unlock(&ls->async_lock);
- wake_up(&ls->thread_wait);
-}
-
diff --git a/fs/gfs2/locking/dlm/lock_dlm.h b/fs/gfs2/locking/dlm/lock_dlm.h
deleted file mode 100644
index 3c98e7c6f93b..000000000000
--- a/fs/gfs2/locking/dlm/lock_dlm.h
+++ /dev/null
@@ -1,166 +0,0 @@
-/*
- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License version 2.
- */
-
-#ifndef LOCK_DLM_DOT_H
-#define LOCK_DLM_DOT_H
-
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/spinlock.h>
-#include <linux/types.h>
-#include <linux/string.h>
-#include <linux/list.h>
-#include <linux/socket.h>
-#include <linux/delay.h>
-#include <linux/kthread.h>
-#include <linux/kobject.h>
-#include <linux/fcntl.h>
-#include <linux/wait.h>
-#include <net/sock.h>
-
-#include <linux/dlm.h>
-#include <linux/dlm_plock.h>
-#include <linux/lm_interface.h>
-
-/*
- * Internally, we prefix things with gdlm_ and GDLM_ (for gfs-dlm) since a
- * prefix of lock_dlm_ gets awkward. Externally, GFS refers to this module
- * as "lock_dlm".
- */
-
-#define GDLM_STRNAME_BYTES 24
-#define GDLM_LVB_SIZE 32
-#define GDLM_DROP_COUNT 0
-#define GDLM_DROP_PERIOD 60
-#define GDLM_NAME_LEN 128
-
-/* GFS uses 12 bytes to identify a resource (32 bit type + 64 bit number).
- We sprintf these numbers into a 24 byte string of hex values to make them
- human-readable (to make debugging simpler.) */
-
-struct gdlm_strname {
- unsigned char name[GDLM_STRNAME_BYTES];
- unsigned short namelen;
-};
-
-enum {
- DFL_BLOCK_LOCKS = 0,
- DFL_SPECTATOR = 1,
- DFL_WITHDRAW = 2,
-};
-
-struct gdlm_ls {
- u32 id;
- int jid;
- int first;
- int first_done;
- unsigned long flags;
- struct kobject kobj;
- char clustername[GDLM_NAME_LEN];
- char fsname[GDLM_NAME_LEN];
- int fsflags;
- dlm_lockspace_t *dlm_lockspace;
- lm_callback_t fscb;
- struct gfs2_sbd *sdp;
- int recover_jid;
- int recover_jid_done;
- int recover_jid_status;
- spinlock_t async_lock;
- struct list_head delayed;
- struct list_head submit;
- u32 all_locks_count;
- wait_queue_head_t wait_control;
- struct task_struct *thread;
- wait_queue_head_t thread_wait;
-};
-
-enum {
- LFL_NOBLOCK = 0,
- LFL_NOCACHE = 1,
- LFL_DLM_UNLOCK = 2,
- LFL_DLM_CANCEL = 3,
- LFL_SYNC_LVB = 4,
- LFL_FORCE_PROMOTE = 5,
- LFL_REREQUEST = 6,
- LFL_ACTIVE = 7,
- LFL_INLOCK = 8,
- LFL_CANCEL = 9,
- LFL_NOBAST = 10,
- LFL_HEADQUE = 11,
- LFL_UNLOCK_DELETE = 12,
- LFL_AST_WAIT = 13,
-};
-
-struct gdlm_lock {
- struct gdlm_ls *ls;
- struct lm_lockname lockname;
- struct gdlm_strname strname;
- char *lvb;
- struct dlm_lksb lksb;
-
- s16 cur;
- s16 req;
- s16 prev_req;
- u32 lkf; /* dlm flags DLM_LKF_ */
- unsigned long flags; /* lock_dlm flags LFL_ */
-
- struct list_head delay_list; /* delayed */
- struct gdlm_lock *hold_null; /* NL lock for hold_lvb */
-};
-
-#define gdlm_assert(assertion, fmt, args...) \
-do { \
- if (unlikely(!(assertion))) { \
- printk(KERN_EMERG "lock_dlm: fatal assertion failed \"%s\"\n" \
- "lock_dlm: " fmt "\n", \
- #assertion, ##args); \
- BUG(); \
- } \
-} while (0)
-
-#define log_print(lev, fmt, arg...) printk(lev "lock_dlm: " fmt "\n" , ## arg)
-#define log_info(fmt, arg...) log_print(KERN_INFO , fmt , ## arg)
-#define log_error(fmt, arg...) log_print(KERN_ERR , fmt , ## arg)
-#ifdef LOCK_DLM_LOG_DEBUG
-#define log_debug(fmt, arg...) log_print(KERN_DEBUG , fmt , ## arg)
-#else
-#define log_debug(fmt, arg...)
-#endif
-
-/* sysfs.c */
-
-int gdlm_sysfs_init(void);
-void gdlm_sysfs_exit(void);
-int gdlm_kobject_setup(struct gdlm_ls *, struct kobject *);
-void gdlm_kobject_release(struct gdlm_ls *);
-
-/* thread.c */
-
-int gdlm_init_threads(struct gdlm_ls *);
-void gdlm_release_threads(struct gdlm_ls *);
-
-/* lock.c */
-
-void gdlm_submit_delayed(struct gdlm_ls *);
-unsigned int gdlm_do_lock(struct gdlm_lock *);
-
-int gdlm_get_lock(void *, struct lm_lockname *, void **);
-void gdlm_put_lock(void *);
-unsigned int gdlm_lock(void *, unsigned int, unsigned int, unsigned int);
-unsigned int gdlm_unlock(void *, unsigned int);
-void gdlm_cancel(void *);
-int gdlm_hold_lvb(void *, char **);
-void gdlm_unhold_lvb(void *, char *);
-
-/* mount.c */
-
-extern const struct lm_lockops gdlm_ops;
-
-#endif
-
diff --git a/fs/gfs2/locking/dlm/main.c b/fs/gfs2/locking/dlm/main.c
deleted file mode 100644
index b9a03a7ff801..000000000000
--- a/fs/gfs2/locking/dlm/main.c
+++ /dev/null
@@ -1,48 +0,0 @@
-/*
- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License version 2.
- */
-
-#include <linux/init.h>
-
-#include "lock_dlm.h"
-
-static int __init init_lock_dlm(void)
-{
- int error;
-
- error = gfs2_register_lockproto(&gdlm_ops);
- if (error) {
- printk(KERN_WARNING "lock_dlm: can't register protocol: %d\n",
- error);
- return error;
- }
-
- error = gdlm_sysfs_init();
- if (error) {
- gfs2_unregister_lockproto(&gdlm_ops);
- return error;
- }
-
- printk(KERN_INFO
- "Lock_DLM (built %s %s) installed\n", __DATE__, __TIME__);
- return 0;
-}
-
-static void __exit exit_lock_dlm(void)
-{
- gdlm_sysfs_exit();
- gfs2_unregister_lockproto(&gdlm_ops);
-}
-
-module_init(init_lock_dlm);
-module_exit(exit_lock_dlm);
-
-MODULE_DESCRIPTION("GFS DLM Locking Module");
-MODULE_AUTHOR("Red Hat, Inc.");
-MODULE_LICENSE("GPL");
-
diff --git a/fs/gfs2/locking/dlm/mount.c b/fs/gfs2/locking/dlm/mount.c
deleted file mode 100644
index 1aa7eb6a0226..000000000000
--- a/fs/gfs2/locking/dlm/mount.c
+++ /dev/null
@@ -1,276 +0,0 @@
-/*
- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License version 2.
- */
-
-#include "lock_dlm.h"
-
-const struct lm_lockops gdlm_ops;
-
-
-static struct gdlm_ls *init_gdlm(lm_callback_t cb, struct gfs2_sbd *sdp,
- int flags, char *table_name)
-{
- struct gdlm_ls *ls;
- char buf[256], *p;
-
- ls = kzalloc(sizeof(struct gdlm_ls), GFP_KERNEL);
- if (!ls)
- return NULL;
-
- ls->fscb = cb;
- ls->sdp = sdp;
- ls->fsflags = flags;
- spin_lock_init(&ls->async_lock);
- INIT_LIST_HEAD(&ls->delayed);
- INIT_LIST_HEAD(&ls->submit);
- init_waitqueue_head(&ls->thread_wait);
- init_waitqueue_head(&ls->wait_control);
- ls->jid = -1;
-
- strncpy(buf, table_name, 256);
- buf[255] = '\0';
-
- p = strchr(buf, ':');
- if (!p) {
- log_info("invalid table_name \"%s\"", table_name);
- kfree(ls);
- return NULL;
- }
- *p = '\0';
- p++;
-
- strncpy(ls->clustername, buf, GDLM_NAME_LEN);
- strncpy(ls->fsname, p, GDLM_NAME_LEN);
-
- return ls;
-}
-
-static int make_args(struct gdlm_ls *ls, char *data_arg, int *nodir)
-{
- char data[256];
- char *options, *x, *y;
- int error = 0;
-
- memset(data, 0, 256);
- strncpy(data, data_arg, 255);
-
- if (!strlen(data)) {
- log_error("no mount options, (u)mount helpers not installed");
- return -EINVAL;
- }
-
- for (options = data; (x = strsep(&options, ":")); ) {
- if (!*x)
- continue;
-
- y = strchr(x, '=');
- if (y)
- *y++ = 0;
-
- if (!strcmp(x, "jid")) {
- if (!y) {
- log_error("need argument to jid");
- error = -EINVAL;
- break;
- }
- sscanf(y, "%u", &ls->jid);
-
- } else if (!strcmp(x, "first")) {
- if (!y) {
- log_error("need argument to first");
- error = -EINVAL;
- break;
- }
- sscanf(y, "%u", &ls->first);
-
- } else if (!strcmp(x, "id")) {
- if (!y) {
- log_error("need argument to id");
- error = -EINVAL;
- break;
- }
- sscanf(y, "%u", &ls->id);
-
- } else if (!strcmp(x, "nodir")) {
- if (!y) {
- log_error("need argument to nodir");
- error = -EINVAL;
- break;
- }
- sscanf(y, "%u", nodir);
-
- } else {
- log_error("unkonwn option: %s", x);
- error = -EINVAL;
- break;
- }
- }
-
- return error;
-}
-
-static int gdlm_mount(char *table_name, char *host_data,
- lm_callback_t cb, void *cb_data,
- unsigned int min_lvb_size, int flags,
- struct lm_lockstruct *lockstruct,
- struct kobject *fskobj)
-{
- struct gdlm_ls *ls;
- int error = -ENOMEM, nodir = 0;
-
- if (min_lvb_size > GDLM_LVB_SIZE)
- goto out;
-
- ls = init_gdlm(cb, cb_data, flags, table_name);
- if (!ls)
- goto out;
-
- error = make_args(ls, host_data, &nodir);
- if (error)
- goto out;
-
- error = gdlm_init_threads(ls);
- if (error)
- goto out_free;
-
- error = gdlm_kobject_setup(ls, fskobj);
- if (error)
- goto out_thread;
-
- error = dlm_new_lockspace(ls->fsname, strlen(ls->fsname),
- &ls->dlm_lockspace,
- DLM_LSFL_FS | DLM_LSFL_NEWEXCL |
- (nodir ? DLM_LSFL_NODIR : 0),
- GDLM_LVB_SIZE);
- if (error) {
- log_error("dlm_new_lockspace error %d", error);
- goto out_kobj;
- }
-
- lockstruct->ls_jid = ls->jid;
- lockstruct->ls_first = ls->first;
- lockstruct->ls_lockspace = ls;
- lockstruct->ls_ops = &gdlm_ops;
- lockstruct->ls_flags = 0;
- lockstruct->ls_lvb_size = GDLM_LVB_SIZE;
- return 0;
-
-out_kobj:
- gdlm_kobject_release(ls);
-out_thread:
- gdlm_release_threads(ls);
-out_free:
- kfree(ls);
-out:
- return error;
-}
-
-static void gdlm_unmount(void *lockspace)
-{
- struct gdlm_ls *ls = lockspace;
-
- log_debug("unmount flags %lx", ls->flags);
-
- /* FIXME: serialize unmount and withdraw in case they
- happen at once. Also, if unmount follows withdraw,
- wait for withdraw to finish. */
-
- if (test_bit(DFL_WITHDRAW, &ls->flags))
- goto out;
-
- gdlm_kobject_release(ls);
- dlm_release_lockspace(ls->dlm_lockspace, 2);
- gdlm_release_threads(ls);
- BUG_ON(ls->all_locks_count);
-out:
- kfree(ls);
-}
-
-static void gdlm_recovery_done(void *lockspace, unsigned int jid,
- unsigned int message)
-{
- char env_jid[20];
- char env_status[20];
- char *envp[] = { env_jid, env_status, NULL };
- struct gdlm_ls *ls = lockspace;
- ls->recover_jid_done = jid;
- ls->recover_jid_status = message;
- sprintf(env_jid, "JID=%d", jid);
- sprintf(env_status, "RECOVERY=%s",
- message == LM_RD_SUCCESS ? "Done" : "Failed");
- kobject_uevent_env(&ls->kobj, KOBJ_CHANGE, envp);
-}
-
-static void gdlm_others_may_mount(void *lockspace)
-{
- char *message = "FIRSTMOUNT=Done";
- char *envp[] = { message, NULL };
- struct gdlm_ls *ls = lockspace;
- ls->first_done = 1;
- kobject_uevent_env(&ls->kobj, KOBJ_CHANGE, envp);
-}
-
-/* Userspace gets the offline uevent, blocks new gfs locks on
- other mounters, and lets us know (sets WITHDRAW flag). Then,
- userspace leaves the mount group while we leave the lockspace. */
-
-static void gdlm_withdraw(void *lockspace)
-{
- struct gdlm_ls *ls = lockspace;
-
- kobject_uevent(&ls->kobj, KOBJ_OFFLINE);
-
- wait_event_interruptible(ls->wait_control,
- test_bit(DFL_WITHDRAW, &ls->flags));
-
- dlm_release_lockspace(ls->dlm_lockspace, 2);
- gdlm_release_threads(ls);
- gdlm_kobject_release(ls);
-}
-
-static int gdlm_plock(void *lockspace, struct lm_lockname *name,
- struct file *file, int cmd, struct file_lock *fl)
-{
- struct gdlm_ls *ls = lockspace;
- return dlm_posix_lock(ls->dlm_lockspace, name->ln_number, file, cmd, fl);
-}
-
-static int gdlm_punlock(void *lockspace, struct lm_lockname *name,
- struct file *file, struct file_lock *fl)
-{
- struct gdlm_ls *ls = lockspace;
- return dlm_posix_unlock(ls->dlm_lockspace, name->ln_number, file, fl);
-}
-
-static int gdlm_plock_get(void *lockspace, struct lm_lockname *name,
- struct file *file, struct file_lock *fl)
-{
- struct gdlm_ls *ls = lockspace;
- return dlm_posix_get(ls->dlm_lockspace, name->ln_number, file, fl);
-}
-
-const struct lm_lockops gdlm_ops = {
- .lm_proto_name = "lock_dlm",
- .lm_mount = gdlm_mount,
- .lm_others_may_mount = gdlm_others_may_mount,
- .lm_unmount = gdlm_unmount,
- .lm_withdraw = gdlm_withdraw,
- .lm_get_lock = gdlm_get_lock,
- .lm_put_lock = gdlm_put_lock,
- .lm_lock = gdlm_lock,
- .lm_unlock = gdlm_unlock,
- .lm_plock = gdlm_plock,
- .lm_punlock = gdlm_punlock,
- .lm_plock_get = gdlm_plock_get,
- .lm_cancel = gdlm_cancel,
- .lm_hold_lvb = gdlm_hold_lvb,
- .lm_unhold_lvb = gdlm_unhold_lvb,
- .lm_recovery_done = gdlm_recovery_done,
- .lm_owner = THIS_MODULE,
-};
-
diff --git a/fs/gfs2/locking/dlm/sysfs.c b/fs/gfs2/locking/dlm/sysfs.c
deleted file mode 100644
index 9b7edcf7bd49..000000000000
--- a/fs/gfs2/locking/dlm/sysfs.c
+++ /dev/null
@@ -1,226 +0,0 @@
-/*
- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License version 2.
- */
-
-#include <linux/ctype.h>
-#include <linux/stat.h>
-
-#include "lock_dlm.h"
-
-static ssize_t proto_name_show(struct gdlm_ls *ls, char *buf)
-{
- return sprintf(buf, "%s\n", gdlm_ops.lm_proto_name);
-}
-
-static ssize_t block_show(struct gdlm_ls *ls, char *buf)
-{
- ssize_t ret;
- int val = 0;
-
- if (test_bit(DFL_BLOCK_LOCKS, &ls->flags))
- val = 1;
- ret = sprintf(buf, "%d\n", val);
- return ret;
-}
-
-static ssize_t block_store(struct gdlm_ls *ls, const char *buf, size_t len)
-{
- ssize_t ret = len;
- int val;
-
- val = simple_strtol(buf, NULL, 0);
-
- if (val == 1)
- set_bit(DFL_BLOCK_LOCKS, &ls->flags);
- else if (val == 0) {
- clear_bit(DFL_BLOCK_LOCKS, &ls->flags);
- gdlm_submit_delayed(ls);
- } else {
- ret = -EINVAL;
- }
- return ret;
-}
-
-static ssize_t withdraw_show(struct gdlm_ls *ls, char *buf)
-{
- ssize_t ret;
- int val = 0;
-
- if (test_bit(DFL_WITHDRAW, &ls->flags))
- val = 1;
- ret = sprintf(buf, "%d\n", val);
- return ret;
-}
-
-static ssize_t withdraw_store(struct gdlm_ls *ls, const char *buf, size_t len)
-{
- ssize_t ret = len;
- int val;
-
- val = simple_strtol(buf, NULL, 0);
-
- if (val == 1)
- set_bit(DFL_WITHDRAW, &ls->flags);
- else
- ret = -EINVAL;
- wake_up(&ls->wait_control);
- return ret;
-}
-
-static ssize_t id_show(struct gdlm_ls *ls, char *buf)
-{
- return sprintf(buf, "%u\n", ls->id);
-}
-
-static ssize_t jid_show(struct gdlm_ls *ls, char *buf)
-{
- return sprintf(buf, "%d\n", ls->jid);
-}
-
-static ssize_t first_show(struct gdlm_ls *ls, char *buf)
-{
- return sprintf(buf, "%d\n", ls->first);
-}
-
-static ssize_t first_done_show(struct gdlm_ls *ls, char *buf)
-{
- return sprintf(buf, "%d\n", ls->first_done);
-}
-
-static ssize_t recover_show(struct gdlm_ls *ls, char *buf)
-{
- return sprintf(buf, "%d\n", ls->recover_jid);
-}
-
-static ssize_t recover_store(struct gdlm_ls *ls, const char *buf, size_t len)
-{
- ls->recover_jid = simple_strtol(buf, NULL, 0);
- ls->fscb(ls->sdp, LM_CB_NEED_RECOVERY, &ls->recover_jid);
- return len;
-}
-
-static ssize_t recover_done_show(struct gdlm_ls *ls, char *buf)
-{
- return sprintf(buf, "%d\n", ls->recover_jid_done);
-}
-
-static ssize_t recover_status_show(struct gdlm_ls *ls, char *buf)
-{
- return sprintf(buf, "%d\n", ls->recover_jid_status);
-}
-
-struct gdlm_attr {
- struct attribute attr;
- ssize_t (*show)(struct gdlm_ls *, char *);
- ssize_t (*store)(struct gdlm_ls *, const char *, size_t);
-};
-
-#define GDLM_ATTR(_name,_mode,_show,_store) \
-static struct gdlm_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
-
-GDLM_ATTR(proto_name, 0444, proto_name_show, NULL);
-GDLM_ATTR(block, 0644, block_show, block_store);
-GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store);
-GDLM_ATTR(id, 0444, id_show, NULL);
-GDLM_ATTR(jid, 0444, jid_show, NULL);
-GDLM_ATTR(first, 0444, first_show, NULL);
-GDLM_ATTR(first_done, 0444, first_done_show, NULL);
-GDLM_ATTR(recover, 0644, recover_show, recover_store);
-GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
-GDLM_ATTR(recover_status, 0444, recover_status_show, NULL);
-
-static struct attribute *gdlm_attrs[] = {
- &gdlm_attr_proto_name.attr,
- &gdlm_attr_block.attr,
- &gdlm_attr_withdraw.attr,
- &gdlm_attr_id.attr,
- &gdlm_attr_jid.attr,
- &gdlm_attr_first.attr,
- &gdlm_attr_first_done.attr,
- &gdlm_attr_recover.attr,
- &gdlm_attr_recover_done.attr,
- &gdlm_attr_recover_status.attr,
- NULL,
-};
-
-static ssize_t gdlm_attr_show(struct kobject *kobj, struct attribute *attr,
- char *buf)
-{
- struct gdlm_ls *ls = container_of(kobj, struct gdlm_ls, kobj);
- struct gdlm_attr *a = container_of(attr, struct gdlm_attr, attr);
- return a->show ? a->show(ls, buf) : 0;
-}
-
-static ssize_t gdlm_attr_store(struct kobject *kobj, struct attribute *attr,
- const char *buf, size_t len)
-{
- struct gdlm_ls *ls = container_of(kobj, struct gdlm_ls, kobj);
- struct gdlm_attr *a = container_of(attr, struct gdlm_attr, attr);
- return a->store ? a->store(ls, buf, len) : len;
-}
-
-static struct sysfs_ops gdlm_attr_ops = {
- .show = gdlm_attr_show,
- .store = gdlm_attr_store,
-};
-
-static struct kobj_type gdlm_ktype = {
- .default_attrs = gdlm_attrs,
- .sysfs_ops = &gdlm_attr_ops,
-};
-
-static struct kset *gdlm_kset;
-
-int gdlm_kobject_setup(struct gdlm_ls *ls, struct kobject *fskobj)
-{
- int error;
-
- ls->kobj.kset = gdlm_kset;
- error = kobject_init_and_add(&ls->kobj, &gdlm_ktype, fskobj,
- "lock_module");
- if (error)
- log_error("can't register kobj %d", error);
- kobject_uevent(&ls->kobj, KOBJ_ADD);
-
- return error;
-}
-
-void gdlm_kobject_release(struct gdlm_ls *ls)
-{
- kobject_put(&ls->kobj);
-}
-
-static int gdlm_uevent(struct kset *kset, struct kobject *kobj,
- struct kobj_uevent_env *env)
-{
- struct gdlm_ls *ls = container_of(kobj, struct gdlm_ls, kobj);
- add_uevent_var(env, "LOCKTABLE=%s:%s", ls->clustername, ls->fsname);
- add_uevent_var(env, "LOCKPROTO=lock_dlm");
- return 0;
-}
-
-static struct kset_uevent_ops gdlm_uevent_ops = {
- .uevent = gdlm_uevent,
-};
-
-
-int gdlm_sysfs_init(void)
-{
- gdlm_kset = kset_create_and_add("lock_dlm", &gdlm_uevent_ops, kernel_kobj);
- if (!gdlm_kset) {
- printk(KERN_WARNING "%s: can not create kset\n", __func__);
- return -ENOMEM;
- }
- return 0;
-}
-
-void gdlm_sysfs_exit(void)
-{
- kset_unregister(gdlm_kset);
-}
-
diff --git a/fs/gfs2/locking/dlm/thread.c b/fs/gfs2/locking/dlm/thread.c
deleted file mode 100644
index 38823efd698c..000000000000
--- a/fs/gfs2/locking/dlm/thread.c
+++ /dev/null
@@ -1,68 +0,0 @@
-/*
- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
- * Copyright (C) 2004-2005 Red Hat, Inc. All rights reserved.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License version 2.
- */
-
-#include "lock_dlm.h"
-
-static inline int no_work(struct gdlm_ls *ls)
-{
- int ret;
-
- spin_lock(&ls->async_lock);
- ret = list_empty(&ls->submit);
- spin_unlock(&ls->async_lock);
-
- return ret;
-}
-
-static int gdlm_thread(void *data)
-{
- struct gdlm_ls *ls = (struct gdlm_ls *) data;
- struct gdlm_lock *lp = NULL;
-
- while (!kthread_should_stop()) {
- wait_event_interruptible(ls->thread_wait,
- !no_work(ls) || kthread_should_stop());
-
- spin_lock(&ls->async_lock);
-
- if (!list_empty(&ls->submit)) {
- lp = list_entry(ls->submit.next, struct gdlm_lock,
- delay_list);
- list_del_init(&lp->delay_list);
- spin_unlock(&ls->async_lock);
- gdlm_do_lock(lp);
- spin_lock(&ls->async_lock);
- }
- spin_unlock(&ls->async_lock);
- }
-
- return 0;
-}
-
-int gdlm_init_threads(struct gdlm_ls *ls)
-{
- struct task_struct *p;
- int error;
-
- p = kthread_run(gdlm_thread, ls, "lock_dlm");
- error = IS_ERR(p);
- if (error) {
- log_error("can't start lock_dlm thread %d", error);
- return error;
- }
- ls->thread = p;
-
- return 0;
-}
-
-void gdlm_release_threads(struct gdlm_ls *ls)
-{
- kthread_stop(ls->thread);
-}
-
diff --git a/fs/gfs2/log.c b/fs/gfs2/log.c
index ad305854bdc6..98918a756410 100644
--- a/fs/gfs2/log.c
+++ b/fs/gfs2/log.c
@@ -14,7 +14,6 @@
#include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
-#include <linux/lm_interface.h>
#include <linux/delay.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
diff --git a/fs/gfs2/lops.c b/fs/gfs2/lops.c
index 4390f6f4047d..80e4f5f898bb 100644
--- a/fs/gfs2/lops.c
+++ b/fs/gfs2/lops.c
@@ -13,7 +13,6 @@
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/lm_interface.h>
#include "gfs2.h"
#include "incore.h"
diff --git a/fs/gfs2/main.c b/fs/gfs2/main.c
index 7cacfde32194..a6892ed0840a 100644
--- a/fs/gfs2/main.c
+++ b/fs/gfs2/main.c
@@ -14,7 +14,6 @@
#include <linux/module.h>
#include <linux/init.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/lm_interface.h>
#include <asm/atomic.h>
#include "gfs2.h"
@@ -23,6 +22,12 @@
#include "sys.h"
#include "util.h"
#include "glock.h"
+#include "quota.h"
+
+static struct shrinker qd_shrinker = {
+ .shrink = gfs2_shrink_qd_memory,
+ .seeks = DEFAULT_SEEKS,
+};
static void gfs2_init_inode_once(void *foo)
{
@@ -41,8 +46,6 @@ static void gfs2_init_glock_once(void *foo)
INIT_HLIST_NODE(&gl->gl_list);
spin_lock_init(&gl->gl_spin);
INIT_LIST_HEAD(&gl->gl_holders);
- gl->gl_lvb = NULL;
- atomic_set(&gl->gl_lvb_count, 0);
INIT_LIST_HEAD(&gl->gl_lru);
INIT_LIST_HEAD(&gl->gl_ail_list);
atomic_set(&gl->gl_ail_count, 0);
@@ -100,6 +103,8 @@ static int __init init_gfs2_fs(void)
if (!gfs2_quotad_cachep)
goto fail;
+ register_shrinker(&qd_shrinker);
+
error = register_filesystem(&gfs2_fs_type);
if (error)
goto fail;
@@ -117,6 +122,7 @@ static int __init init_gfs2_fs(void)
fail_unregister:
unregister_filesystem(&gfs2_fs_type);
fail:
+ unregister_shrinker(&qd_shrinker);
gfs2_glock_exit();
if (gfs2_quotad_cachep)
@@ -145,6 +151,7 @@ fail:
static void __exit exit_gfs2_fs(void)
{
+ unregister_shrinker(&qd_shrinker);
gfs2_glock_exit();
gfs2_unregister_debugfs();
unregister_filesystem(&gfs2_fs_type);
diff --git a/fs/gfs2/meta_io.c b/fs/gfs2/meta_io.c
index 09853620c951..870d65ae7ae2 100644
--- a/fs/gfs2/meta_io.c
+++ b/fs/gfs2/meta_io.c
@@ -19,7 +19,6 @@
#include <linux/delay.h>
#include <linux/bio.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/lm_interface.h>
#include "gfs2.h"
#include "incore.h"
diff --git a/fs/gfs2/mount.c b/fs/gfs2/mount.c
index 3cb0a44ba023..fba502aa8b2d 100644
--- a/fs/gfs2/mount.c
+++ b/fs/gfs2/mount.c
@@ -12,12 +12,11 @@
#include <linux/completion.h>
#include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/lm_interface.h>
#include <linux/parser.h>
#include "gfs2.h"
#include "incore.h"
-#include "mount.h"
+#include "super.h"
#include "sys.h"
#include "util.h"
@@ -77,101 +76,46 @@ static const match_table_t tokens = {
* Return: errno
*/
-int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount)
+int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *options)
{
- struct gfs2_args *args = &sdp->sd_args;
- char *data = data_arg;
- char *options, *o, *v;
- int error = 0;
-
- if (!remount) {
- /* Set some defaults */
- args->ar_quota = GFS2_QUOTA_DEFAULT;
- args->ar_data = GFS2_DATA_DEFAULT;
- }
+ char *o;
+ int token;
+ substring_t tmp[MAX_OPT_ARGS];
/* Split the options into tokens with the "," character and
process them */
- for (options = data; (o = strsep(&options, ",")); ) {
- int token;
- substring_t tmp[MAX_OPT_ARGS];
-
- if (!*o)
+ while (1) {
+ o = strsep(&options, ",");
+ if (o == NULL)
+ break;
+ if (*o == '\0')
continue;
token = match_token(o, tokens, tmp);
switch (token) {
case Opt_lockproto:
- v = match_strdup(&tmp[0]);
- if (!v) {
- fs_info(sdp, "no memory for lockproto\n");
- error = -ENOMEM;
- goto out_error;
- }
-
- if (remount && strcmp(v, args->ar_lockproto)) {
- kfree(v);
- goto cant_remount;
- }
-
- strncpy(args->ar_lockproto, v, GFS2_LOCKNAME_LEN);
- args->ar_lockproto[GFS2_LOCKNAME_LEN - 1] = 0;
- kfree(v);
+ match_strlcpy(args->ar_lockproto, &tmp[0],
+ GFS2_LOCKNAME_LEN);
break;
case Opt_locktable:
- v = match_strdup(&tmp[0]);
- if (!v) {
- fs_info(sdp, "no memory for locktable\n");
- error = -ENOMEM;
- goto out_error;
- }
-
- if (remount && strcmp(v, args->ar_locktable)) {
- kfree(v);
- goto cant_remount;
- }
-
- strncpy(args->ar_locktable, v, GFS2_LOCKNAME_LEN);
- args->ar_locktable[GFS2_LOCKNAME_LEN - 1] = 0;
- kfree(v);
+ match_strlcpy(args->ar_locktable, &tmp[0],
+ GFS2_LOCKNAME_LEN);
break;
case Opt_hostdata:
- v = match_strdup(&tmp[0]);
- if (!v) {
- fs_info(sdp, "no memory for hostdata\n");
- error = -ENOMEM;
- goto out_error;
- }
-
- if (remount && strcmp(v, args->ar_hostdata)) {
- kfree(v);
- goto cant_remount;
- }
-
- strncpy(args->ar_hostdata, v, GFS2_LOCKNAME_LEN);
- args->ar_hostdata[GFS2_LOCKNAME_LEN - 1] = 0;
- kfree(v);
+ match_strlcpy(args->ar_hostdata, &tmp[0],
+ GFS2_LOCKNAME_LEN);
break;
case Opt_spectator:
- if (remount && !args->ar_spectator)
- goto cant_remount;
args->ar_spectator = 1;
- sdp->sd_vfs->s_flags |= MS_RDONLY;
break;
case Opt_ignore_local_fs:
- if (remount && !args->ar_ignore_local_fs)
- goto cant_remount;
args->ar_ignore_local_fs = 1;
break;
case Opt_localflocks:
- if (remount && !args->ar_localflocks)
- goto cant_remount;
args->ar_localflocks = 1;
break;
case Opt_localcaching:
- if (remount && !args->ar_localcaching)
- goto cant_remount;
args->ar_localcaching = 1;
break;
case Opt_debug:
@@ -181,17 +125,13 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount)
args->ar_debug = 0;
break;
case Opt_upgrade:
- if (remount && !args->ar_upgrade)
- goto cant_remount;
args->ar_upgrade = 1;
break;
case Opt_acl:
args->ar_posix_acl = 1;
- sdp->sd_vfs->s_flags |= MS_POSIXACL;
break;
case Opt_noacl:
args->ar_posix_acl = 0;
- sdp->sd_vfs->s_flags &= ~MS_POSIXACL;
break;
case Opt_quota_off:
args->ar_quota = GFS2_QUOTA_OFF;
@@ -215,29 +155,15 @@ int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount)
args->ar_data = GFS2_DATA_ORDERED;
break;
case Opt_meta:
- if (remount && args->ar_meta != 1)
- goto cant_remount;
args->ar_meta = 1;
break;
case Opt_err:
default:
- fs_info(sdp, "unknown option: %s\n", o);
- error = -EINVAL;
- goto out_error;
+ fs_info(sdp, "invalid mount option: %s\n", o);
+ return -EINVAL;
}
}
-out_error:
- if (error)
- fs_info(sdp, "invalid mount option(s)\n");
-
- if (data != data_arg)
- kfree(data);
-
- return error;
-
-cant_remount:
- fs_info(sdp, "can't remount with option %s\n", o);
- return -EINVAL;
+ return 0;
}
diff --git a/fs/gfs2/mount.h b/fs/gfs2/mount.h
deleted file mode 100644
index 401288acfdf3..000000000000
--- a/fs/gfs2/mount.h
+++ /dev/null
@@ -1,17 +0,0 @@
-/*
- * Copyright (C) Sistina Software, Inc. 1997-2003 All rights reserved.
- * Copyright (C) 2004-2006 Red Hat, Inc. All rights reserved.
- *
- * This copyrighted material is made available to anyone wishing to use,
- * modify, copy, or redistribute it subject to the terms and conditions
- * of the GNU General Public License version 2.
- */
-
-#ifndef __MOUNT_DOT_H__
-#define __MOUNT_DOT_H__
-
-struct gfs2_sbd;
-
-int gfs2_mount_args(struct gfs2_sbd *sdp, char *data_arg, int remount);
-
-#endif /* __MOUNT_DOT_H__ */
diff --git a/fs/gfs2/ops_address.c b/fs/gfs2/ops_address.c
index 4ddab67867eb..a6d00e8ffe10 100644
--- a/fs/gfs2/ops_address.c
+++ b/fs/gfs2/ops_address.c
@@ -19,7 +19,6 @@
#include <linux/writeback.h>
#include <linux/swap.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/lm_interface.h>
#include <linux/backing-dev.h>
#include "gfs2.h"
@@ -442,6 +441,7 @@ static int stuffed_readpage(struct gfs2_inode *ip, struct page *page)
*/
if (unlikely(page->index)) {
zero_user(page, 0, PAGE_CACHE_SIZE);
+ SetPageUptodate(page);
return 0;
}
diff --git a/fs/gfs2/ops_dentry.c b/fs/gfs2/ops_dentry.c
index c2ad36330ca3..5eb57b044382 100644
--- a/fs/gfs2/ops_dentry.c
+++ b/fs/gfs2/ops_dentry.c
@@ -13,7 +13,6 @@
#include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
-#include <linux/lm_interface.h>
#include "gfs2.h"
#include "incore.h"
diff --git a/fs/gfs2/ops_export.c b/fs/gfs2/ops_export.c
index 7fdeb14ddd1a..9200ef221716 100644
--- a/fs/gfs2/ops_export.c
+++ b/fs/gfs2/ops_export.c
@@ -14,7 +14,6 @@
#include <linux/exportfs.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
-#include <linux/lm_interface.h>
#include "gfs2.h"
#include "incore.h"
diff --git a/fs/gfs2/ops_file.c b/fs/gfs2/ops_file.c
index 93fe41b67f97..99d726f1c7a6 100644
--- a/fs/gfs2/ops_file.c
+++ b/fs/gfs2/ops_file.c
@@ -20,9 +20,10 @@
#include <linux/gfs2_ondisk.h>
#include <linux/ext2_fs.h>
#include <linux/crc32.h>
-#include <linux/lm_interface.h>
#include <linux/writeback.h>
#include <asm/uaccess.h>
+#include <linux/dlm.h>
+#include <linux/dlm_plock.h>
#include "gfs2.h"
#include "incore.h"
@@ -560,57 +561,24 @@ static int gfs2_fsync(struct file *file, struct dentry *dentry, int datasync)
return ret;
}
+#ifdef CONFIG_GFS2_FS_LOCKING_DLM
+
/**
* gfs2_setlease - acquire/release a file lease
* @file: the file pointer
* @arg: lease type
* @fl: file lock
*
+ * We don't currently have a way to enforce a lease across the whole
+ * cluster; until we do, disable leases (by just returning -EINVAL),
+ * unless the administrator has requested purely local locking.
+ *
* Returns: errno
*/
static int gfs2_setlease(struct file *file, long arg, struct file_lock **fl)
{
- struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
-
- /*
- * We don't currently have a way to enforce a lease across the whole
- * cluster; until we do, disable leases (by just returning -EINVAL),
- * unless the administrator has requested purely local locking.
- */
- if (!sdp->sd_args.ar_localflocks)
- return -EINVAL;
- return generic_setlease(file, arg, fl);
-}
-
-static int gfs2_lm_plock_get(struct gfs2_sbd *sdp, struct lm_lockname *name,
- struct file *file, struct file_lock *fl)
-{
- int error = -EIO;
- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
- error = sdp->sd_lockstruct.ls_ops->lm_plock_get(
- sdp->sd_lockstruct.ls_lockspace, name, file, fl);
- return error;
-}
-
-static int gfs2_lm_plock(struct gfs2_sbd *sdp, struct lm_lockname *name,
- struct file *file, int cmd, struct file_lock *fl)
-{
- int error = -EIO;
- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
- error = sdp->sd_lockstruct.ls_ops->lm_plock(
- sdp->sd_lockstruct.ls_lockspace, name, file, cmd, fl);
- return error;
-}
-
-static int gfs2_lm_punlock(struct gfs2_sbd *sdp, struct lm_lockname *name,
- struct file *file, struct file_lock *fl)
-{
- int error = -EIO;
- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
- error = sdp->sd_lockstruct.ls_ops->lm_punlock(
- sdp->sd_lockstruct.ls_lockspace, name, file, fl);
- return error;
+ return -EINVAL;
}
/**
@@ -626,9 +594,7 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
{
struct gfs2_inode *ip = GFS2_I(file->f_mapping->host);
struct gfs2_sbd *sdp = GFS2_SB(file->f_mapping->host);
- struct lm_lockname name =
- { .ln_number = ip->i_no_addr,
- .ln_type = LM_TYPE_PLOCK };
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
if (!(fl->fl_flags & FL_POSIX))
return -ENOLCK;
@@ -640,12 +606,14 @@ static int gfs2_lock(struct file *file, int cmd, struct file_lock *fl)
cmd = F_SETLK;
fl->fl_type = F_UNLCK;
}
+ if (unlikely(test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
+ return -EIO;
if (IS_GETLK(cmd))
- return gfs2_lm_plock_get(sdp, &name, file, fl);
+ return dlm_posix_get(ls->ls_dlm, ip->i_no_addr, file, fl);
else if (fl->fl_type == F_UNLCK)
- return gfs2_lm_punlock(sdp, &name, file, fl);
+ return dlm_posix_unlock(ls->ls_dlm, ip->i_no_addr, file, fl);
else
- return gfs2_lm_plock(sdp, &name, file, cmd, fl);
+ return dlm_posix_lock(ls->ls_dlm, ip->i_no_addr, file, cmd, fl);
}
static int do_flock(struct file *file, int cmd, struct file_lock *fl)
@@ -732,7 +700,7 @@ static int gfs2_flock(struct file *file, int cmd, struct file_lock *fl)
}
}
-const struct file_operations gfs2_file_fops = {
+const struct file_operations *gfs2_file_fops = &(const struct file_operations){
.llseek = gfs2_llseek,
.read = do_sync_read,
.aio_read = generic_file_aio_read,
@@ -750,7 +718,7 @@ const struct file_operations gfs2_file_fops = {
.setlease = gfs2_setlease,
};
-const struct file_operations gfs2_dir_fops = {
+const struct file_operations *gfs2_dir_fops = &(const struct file_operations){
.readdir = gfs2_readdir,
.unlocked_ioctl = gfs2_ioctl,
.open = gfs2_open,
@@ -760,7 +728,9 @@ const struct file_operations gfs2_dir_fops = {
.flock = gfs2_flock,
};
-const struct file_operations gfs2_file_fops_nolock = {
+#endif /* CONFIG_GFS2_FS_LOCKING_DLM */
+
+const struct file_operations *gfs2_file_fops_nolock = &(const struct file_operations){
.llseek = gfs2_llseek,
.read = do_sync_read,
.aio_read = generic_file_aio_read,
@@ -773,10 +743,10 @@ const struct file_operations gfs2_file_fops_nolock = {
.fsync = gfs2_fsync,
.splice_read = generic_file_splice_read,
.splice_write = generic_file_splice_write,
- .setlease = gfs2_setlease,
+ .setlease = generic_setlease,
};
-const struct file_operations gfs2_dir_fops_nolock = {
+const struct file_operations *gfs2_dir_fops_nolock = &(const struct file_operations){
.readdir = gfs2_readdir,
.unlocked_ioctl = gfs2_ioctl,
.open = gfs2_open,
diff --git a/fs/gfs2/ops_fstype.c b/fs/gfs2/ops_fstype.c
index f91eebdde581..e502b379a4da 100644
--- a/fs/gfs2/ops_fstype.c
+++ b/fs/gfs2/ops_fstype.c
@@ -17,7 +17,6 @@
#include <linux/namei.h>
#include <linux/mount.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/lm_interface.h>
#include "gfs2.h"
#include "incore.h"
@@ -25,7 +24,6 @@
#include "glock.h"
#include "glops.h"
#include "inode.h"
-#include "mount.h"
#include "recovery.h"
#include "rgrp.h"
#include "super.h"
@@ -64,7 +62,6 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
gt->gt_quota_warn_period = 10;
gt->gt_quota_scale_num = 1;
gt->gt_quota_scale_den = 1;
- gt->gt_quota_cache_secs = 300;
gt->gt_quota_quantum = 60;
gt->gt_new_files_jdata = 0;
gt->gt_max_readahead = 1 << 18;
@@ -100,7 +97,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
mutex_init(&sdp->sd_jindex_mutex);
INIT_LIST_HEAD(&sdp->sd_quota_list);
- spin_lock_init(&sdp->sd_quota_spin);
mutex_init(&sdp->sd_quota_mutex);
init_waitqueue_head(&sdp->sd_quota_wait);
INIT_LIST_HEAD(&sdp->sd_trunc_list);
@@ -630,13 +626,13 @@ static int map_journal_extents(struct gfs2_sbd *sdp)
return rc;
}
-static void gfs2_lm_others_may_mount(struct gfs2_sbd *sdp)
+static void gfs2_others_may_mount(struct gfs2_sbd *sdp)
{
- if (!sdp->sd_lockstruct.ls_ops->lm_others_may_mount)
- return;
- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
- sdp->sd_lockstruct.ls_ops->lm_others_may_mount(
- sdp->sd_lockstruct.ls_lockspace);
+ char *message = "FIRSTMOUNT=Done";
+ char *envp[] = { message, NULL };
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ ls->ls_first_done = 1;
+ kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
}
/**
@@ -796,7 +792,7 @@ static int init_journal(struct gfs2_sbd *sdp, int undo)
}
}
- gfs2_lm_others_may_mount(sdp);
+ gfs2_others_may_mount(sdp);
} else if (!sdp->sd_args.ar_spectator) {
error = gfs2_recover_journal(sdp->sd_jdesc);
if (error) {
@@ -1005,7 +1001,6 @@ static int init_threads(struct gfs2_sbd *sdp, int undo)
goto fail_quotad;
sdp->sd_log_flush_time = jiffies;
- sdp->sd_jindex_refresh_time = jiffies;
p = kthread_run(gfs2_logd, sdp, "gfs2_logd");
error = IS_ERR(p);
@@ -1033,6 +1028,17 @@ fail:
return error;
}
+static const match_table_t nolock_tokens = {
+ { Opt_jid, "jid=%d\n", },
+ { Opt_err, NULL },
+};
+
+static const struct lm_lockops nolock_ops = {
+ .lm_proto_name = "lock_nolock",
+ .lm_put_lock = kmem_cache_free,
+ .lm_tokens = &nolock_tokens,
+};
+
/**
* gfs2_lm_mount - mount a locking protocol
* @sdp: the filesystem
@@ -1044,31 +1050,73 @@ fail:
static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
{
- char *proto = sdp->sd_proto_name;
- char *table = sdp->sd_table_name;
- int flags = LM_MFLAG_CONV_NODROP;
- int error;
+ const struct lm_lockops *lm;
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ struct gfs2_args *args = &sdp->sd_args;
+ const char *proto = sdp->sd_proto_name;
+ const char *table = sdp->sd_table_name;
+ const char *fsname;
+ char *o, *options;
+ int ret;
- if (sdp->sd_args.ar_spectator)
- flags |= LM_MFLAG_SPECTATOR;
+ if (!strcmp("lock_nolock", proto)) {
+ lm = &nolock_ops;
+ sdp->sd_args.ar_localflocks = 1;
+ sdp->sd_args.ar_localcaching = 1;
+#ifdef CONFIG_GFS2_FS_LOCKING_DLM
+ } else if (!strcmp("lock_dlm", proto)) {
+ lm = &gfs2_dlm_ops;
+#endif
+ } else {
+ printk(KERN_INFO "GFS2: can't find protocol %s\n", proto);
+ return -ENOENT;
+ }
fs_info(sdp, "Trying to join cluster \"%s\", \"%s\"\n", proto, table);
- error = gfs2_mount_lockproto(proto, table, sdp->sd_args.ar_hostdata,
- gfs2_glock_cb, sdp,
- GFS2_MIN_LVB_SIZE, flags,
- &sdp->sd_lockstruct, &sdp->sd_kobj);
- if (error) {
- fs_info(sdp, "can't mount proto=%s, table=%s, hostdata=%s\n",
- proto, table, sdp->sd_args.ar_hostdata);
- goto out;
- }
+ ls->ls_ops = lm;
+ ls->ls_first = 1;
+ ls->ls_id = 0;
- if (gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_ops) ||
- gfs2_assert_warn(sdp, sdp->sd_lockstruct.ls_lvb_size >=
- GFS2_MIN_LVB_SIZE)) {
- gfs2_unmount_lockproto(&sdp->sd_lockstruct);
- goto out;
+ for (options = args->ar_hostdata; (o = strsep(&options, ":")); ) {
+ substring_t tmp[MAX_OPT_ARGS];
+ int token, option;
+
+ if (!o || !*o)
+ continue;
+
+ token = match_token(o, *lm->lm_tokens, tmp);
+ switch (token) {
+ case Opt_jid:
+ ret = match_int(&tmp[0], &option);
+ if (ret || option < 0)
+ goto hostdata_error;
+ ls->ls_jid = option;
+ break;
+ case Opt_id:
+ ret = match_int(&tmp[0], &option);
+ if (ret)
+ goto hostdata_error;
+ ls->ls_id = option;
+ break;
+ case Opt_first:
+ ret = match_int(&tmp[0], &option);
+ if (ret || (option != 0 && option != 1))
+ goto hostdata_error;
+ ls->ls_first = option;
+ break;
+ case Opt_nodir:
+ ret = match_int(&tmp[0], &option);
+ if (ret || (option != 0 && option != 1))
+ goto hostdata_error;
+ ls->ls_nodir = option;
+ break;
+ case Opt_err:
+ default:
+hostdata_error:
+ fs_info(sdp, "unknown hostdata (%s)\n", o);
+ return -EINVAL;
+ }
}
if (sdp->sd_args.ar_spectator)
@@ -1077,22 +1125,25 @@ static int gfs2_lm_mount(struct gfs2_sbd *sdp, int silent)
snprintf(sdp->sd_fsname, GFS2_FSNAME_LEN, "%s.%u", table,
sdp->sd_lockstruct.ls_jid);
- fs_info(sdp, "Joined cluster. Now mounting FS...\n");
-
- if ((sdp->sd_lockstruct.ls_flags & LM_LSFLAG_LOCAL) &&
- !sdp->sd_args.ar_ignore_local_fs) {
- sdp->sd_args.ar_localflocks = 1;
- sdp->sd_args.ar_localcaching = 1;
+ fsname = strchr(table, ':');
+ if (fsname)
+ fsname++;
+ if (lm->lm_mount == NULL) {
+ fs_info(sdp, "Now mounting FS...\n");
+ return 0;
}
-
-out:
- return error;
+ ret = lm->lm_mount(sdp, fsname);
+ if (ret == 0)
+ fs_info(sdp, "Joined cluster. Now mounting FS...\n");
+ return ret;
}
void gfs2_lm_unmount(struct gfs2_sbd *sdp)
{
- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
- gfs2_unmount_lockproto(&sdp->sd_lockstruct);
+ const struct lm_lockops *lm = sdp->sd_lockstruct.ls_ops;
+ if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)) &&
+ lm->lm_unmount)
+ lm->lm_unmount(sdp);
}
/**
@@ -1116,12 +1167,20 @@ static int fill_super(struct super_block *sb, void *data, int silent)
return -ENOMEM;
}
- error = gfs2_mount_args(sdp, (char *)data, 0);
+ sdp->sd_args.ar_quota = GFS2_QUOTA_DEFAULT;
+ sdp->sd_args.ar_data = GFS2_DATA_DEFAULT;
+
+ error = gfs2_mount_args(sdp, &sdp->sd_args, data);
if (error) {
printk(KERN_WARNING "GFS2: can't parse mount arguments\n");
goto fail;
}
+ if (sdp->sd_args.ar_spectator)
+ sb->s_flags |= MS_RDONLY;
+ if (sdp->sd_args.ar_posix_acl)
+ sb->s_flags |= MS_POSIXACL;
+
sb->s_magic = GFS2_MAGIC;
sb->s_op = &gfs2_super_ops;
sb->s_export_op = &gfs2_export_ops;
@@ -1199,6 +1258,8 @@ fail_sb:
dput(sdp->sd_root_dir);
if (sdp->sd_master_dir)
dput(sdp->sd_master_dir);
+ if (sb->s_root)
+ dput(sb->s_root);
sb->s_root = NULL;
fail_locking:
init_locking(sdp, &mount_gh, UNDO);
diff --git a/fs/gfs2/ops_inode.c b/fs/gfs2/ops_inode.c
index 49877546beb9..abd5429ae285 100644
--- a/fs/gfs2/ops_inode.c
+++ b/fs/gfs2/ops_inode.c
@@ -18,7 +18,6 @@
#include <linux/posix_acl.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
-#include <linux/lm_interface.h>
#include <linux/fiemap.h>
#include <asm/uaccess.h>
diff --git a/fs/gfs2/ops_super.c b/fs/gfs2/ops_super.c
index 320323d03479..4ecdad026eaf 100644
--- a/fs/gfs2/ops_super.c
+++ b/fs/gfs2/ops_super.c
@@ -19,7 +19,6 @@
#include <linux/delay.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
-#include <linux/lm_interface.h>
#include <linux/time.h>
#include "gfs2.h"
@@ -27,7 +26,6 @@
#include "glock.h"
#include "inode.h"
#include "log.h"
-#include "mount.h"
#include "quota.h"
#include "recovery.h"
#include "rgrp.h"
@@ -40,6 +38,8 @@
#include "bmap.h"
#include "meta_io.h"
+#define args_neq(a1, a2, x) ((a1)->ar_##x != (a2)->ar_##x)
+
/**
* gfs2_write_inode - Make sure the inode is stable on the disk
* @inode: The inode
@@ -435,25 +435,45 @@ static int gfs2_statfs(struct dentry *dentry, struct kstatfs *buf)
static int gfs2_remount_fs(struct super_block *sb, int *flags, char *data)
{
struct gfs2_sbd *sdp = sb->s_fs_info;
+ struct gfs2_args args = sdp->sd_args; /* Default to current settings */
int error;
- error = gfs2_mount_args(sdp, data, 1);
+ error = gfs2_mount_args(sdp, &args, data);
if (error)
return error;
+ /* Not allowed to change locking details */
+ if (strcmp(args.ar_lockproto, sdp->sd_args.ar_lockproto) ||
+ strcmp(args.ar_locktable, sdp->sd_args.ar_locktable) ||
+ strcmp(args.ar_hostdata, sdp->sd_args.ar_hostdata))
+ return -EINVAL;
+
+ /* Some flags must not be changed */
+ if (args_neq(&args, &sdp->sd_args, spectator) ||
+ args_neq(&args, &sdp->sd_args, ignore_local_fs) ||
+ args_neq(&args, &sdp->sd_args, localflocks) ||
+ args_neq(&args, &sdp->sd_args, localcaching) ||
+ args_neq(&args, &sdp->sd_args, meta))
+ return -EINVAL;
+
if (sdp->sd_args.ar_spectator)
*flags |= MS_RDONLY;
- else {
- if (*flags & MS_RDONLY) {
- if (!(sb->s_flags & MS_RDONLY))
- error = gfs2_make_fs_ro(sdp);
- } else if (!(*flags & MS_RDONLY) &&
- (sb->s_flags & MS_RDONLY)) {
+
+ if ((sb->s_flags ^ *flags) & MS_RDONLY) {
+ if (*flags & MS_RDONLY)
+ error = gfs2_make_fs_ro(sdp);
+ else
error = gfs2_make_fs_rw(sdp);
- }
+ if (error)
+ return error;
}
- return error;
+ sdp->sd_args = args;
+ if (sdp->sd_args.ar_posix_acl)
+ sb->s_flags |= MS_POSIXACL;
+ else
+ sb->s_flags &= ~MS_POSIXACL;
+ return 0;
}
/**
diff --git a/fs/gfs2/quota.c b/fs/gfs2/quota.c
index b08d09696b3e..8d53f66b5bcc 100644
--- a/fs/gfs2/quota.c
+++ b/fs/gfs2/quota.c
@@ -45,7 +45,6 @@
#include <linux/fs.h>
#include <linux/bio.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/lm_interface.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
@@ -80,6 +79,51 @@ struct gfs2_quota_change_host {
u32 qc_id;
};
+static LIST_HEAD(qd_lru_list);
+static atomic_t qd_lru_count = ATOMIC_INIT(0);
+static spinlock_t qd_lru_lock = SPIN_LOCK_UNLOCKED;
+
+int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask)
+{
+ struct gfs2_quota_data *qd;
+ struct gfs2_sbd *sdp;
+
+ if (nr == 0)
+ goto out;
+
+ if (!(gfp_mask & __GFP_FS))
+ return -1;
+
+ spin_lock(&qd_lru_lock);
+ while (nr && !list_empty(&qd_lru_list)) {
+ qd = list_entry(qd_lru_list.next,
+ struct gfs2_quota_data, qd_reclaim);
+ sdp = qd->qd_gl->gl_sbd;
+
+ /* Free from the filesystem-specific list */
+ list_del(&qd->qd_list);
+
+ gfs2_assert_warn(sdp, !qd->qd_change);
+ gfs2_assert_warn(sdp, !qd->qd_slot_count);
+ gfs2_assert_warn(sdp, !qd->qd_bh_count);
+
+ gfs2_glock_put(qd->qd_gl);
+ atomic_dec(&sdp->sd_quota_count);
+
+ /* Delete it from the common reclaim list */
+ list_del_init(&qd->qd_reclaim);
+ atomic_dec(&qd_lru_count);
+ spin_unlock(&qd_lru_lock);
+ kmem_cache_free(gfs2_quotad_cachep, qd);
+ spin_lock(&qd_lru_lock);
+ nr--;
+ }
+ spin_unlock(&qd_lru_lock);
+
+out:
+ return (atomic_read(&qd_lru_count) * sysctl_vfs_cache_pressure) / 100;
+}
+
static u64 qd2offset(struct gfs2_quota_data *qd)
{
u64 offset;
@@ -100,22 +144,18 @@ static int qd_alloc(struct gfs2_sbd *sdp, int user, u32 id,
if (!qd)
return -ENOMEM;
- qd->qd_count = 1;
+ atomic_set(&qd->qd_count, 1);
qd->qd_id = id;
if (user)
set_bit(QDF_USER, &qd->qd_flags);
qd->qd_slot = -1;
+ INIT_LIST_HEAD(&qd->qd_reclaim);
error = gfs2_glock_get(sdp, 2 * (u64)id + !user,
&gfs2_quota_glops, CREATE, &qd->qd_gl);
if (error)
goto fail;
- error = gfs2_lvb_hold(qd->qd_gl);
- gfs2_glock_put(qd->qd_gl);
- if (error)
- goto fail;
-
*qdp = qd;
return 0;
@@ -135,11 +175,17 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
for (;;) {
found = 0;
- spin_lock(&sdp->sd_quota_spin);
+ spin_lock(&qd_lru_lock);
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
if (qd->qd_id == id &&
!test_bit(QDF_USER, &qd->qd_flags) == !user) {
- qd->qd_count++;
+ if (!atomic_read(&qd->qd_count) &&
+ !list_empty(&qd->qd_reclaim)) {
+ /* Remove it from reclaim list */
+ list_del_init(&qd->qd_reclaim);
+ atomic_dec(&qd_lru_count);
+ }
+ atomic_inc(&qd->qd_count);
found = 1;
break;
}
@@ -155,11 +201,11 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
new_qd = NULL;
}
- spin_unlock(&sdp->sd_quota_spin);
+ spin_unlock(&qd_lru_lock);
if (qd || !create) {
if (new_qd) {
- gfs2_lvb_unhold(new_qd->qd_gl);
+ gfs2_glock_put(new_qd->qd_gl);
kmem_cache_free(gfs2_quotad_cachep, new_qd);
}
*qdp = qd;
@@ -175,21 +221,18 @@ static int qd_get(struct gfs2_sbd *sdp, int user, u32 id, int create,
static void qd_hold(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
-
- spin_lock(&sdp->sd_quota_spin);
- gfs2_assert(sdp, qd->qd_count);
- qd->qd_count++;
- spin_unlock(&sdp->sd_quota_spin);
+ gfs2_assert(sdp, atomic_read(&qd->qd_count));
+ atomic_inc(&qd->qd_count);
}
static void qd_put(struct gfs2_quota_data *qd)
{
- struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
- spin_lock(&sdp->sd_quota_spin);
- gfs2_assert(sdp, qd->qd_count);
- if (!--qd->qd_count)
- qd->qd_last_touched = jiffies;
- spin_unlock(&sdp->sd_quota_spin);
+ if (atomic_dec_and_lock(&qd->qd_count, &qd_lru_lock)) {
+ /* Add to the reclaim list */
+ list_add_tail(&qd->qd_reclaim, &qd_lru_list);
+ atomic_inc(&qd_lru_count);
+ spin_unlock(&qd_lru_lock);
+ }
}
static int slot_get(struct gfs2_quota_data *qd)
@@ -198,10 +241,10 @@ static int slot_get(struct gfs2_quota_data *qd)
unsigned int c, o = 0, b;
unsigned char byte = 0;
- spin_lock(&sdp->sd_quota_spin);
+ spin_lock(&qd_lru_lock);
if (qd->qd_slot_count++) {
- spin_unlock(&sdp->sd_quota_spin);
+ spin_unlock(&qd_lru_lock);
return 0;
}
@@ -225,13 +268,13 @@ found:
sdp->sd_quota_bitmap[c][o] |= 1 << b;
- spin_unlock(&sdp->sd_quota_spin);
+ spin_unlock(&qd_lru_lock);
return 0;
fail:
qd->qd_slot_count--;
- spin_unlock(&sdp->sd_quota_spin);
+ spin_unlock(&qd_lru_lock);
return -ENOSPC;
}
@@ -239,23 +282,23 @@ static void slot_hold(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
- spin_lock(&sdp->sd_quota_spin);
+ spin_lock(&qd_lru_lock);
gfs2_assert(sdp, qd->qd_slot_count);
qd->qd_slot_count++;
- spin_unlock(&sdp->sd_quota_spin);
+ spin_unlock(&qd_lru_lock);
}
static void slot_put(struct gfs2_quota_data *qd)
{
struct gfs2_sbd *sdp = qd->qd_gl->gl_sbd;
- spin_lock(&sdp->sd_quota_spin);
+ spin_lock(&qd_lru_lock);
gfs2_assert(sdp, qd->qd_slot_count);
if (!--qd->qd_slot_count) {
gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, qd->qd_slot, 0);
qd->qd_slot = -1;
}
- spin_unlock(&sdp->sd_quota_spin);
+ spin_unlock(&qd_lru_lock);
}
static int bh_get(struct gfs2_quota_data *qd)
@@ -330,7 +373,7 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
if (sdp->sd_vfs->s_flags & MS_RDONLY)
return 0;
- spin_lock(&sdp->sd_quota_spin);
+ spin_lock(&qd_lru_lock);
list_for_each_entry(qd, &sdp->sd_quota_list, qd_list) {
if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
@@ -341,8 +384,8 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
set_bit(QDF_LOCKED, &qd->qd_flags);
- gfs2_assert_warn(sdp, qd->qd_count);
- qd->qd_count++;
+ gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
+ atomic_inc(&qd->qd_count);
qd->qd_change_sync = qd->qd_change;
gfs2_assert_warn(sdp, qd->qd_slot_count);
qd->qd_slot_count++;
@@ -354,7 +397,7 @@ static int qd_fish(struct gfs2_sbd *sdp, struct gfs2_quota_data **qdp)
if (!found)
qd = NULL;
- spin_unlock(&sdp->sd_quota_spin);
+ spin_unlock(&qd_lru_lock);
if (qd) {
gfs2_assert_warn(sdp, qd->qd_change_sync);
@@ -379,24 +422,24 @@ static int qd_trylock(struct gfs2_quota_data *qd)
if (sdp->sd_vfs->s_flags & MS_RDONLY)
return 0;
- spin_lock(&sdp->sd_quota_spin);
+ spin_lock(&qd_lru_lock);
if (test_bit(QDF_LOCKED, &qd->qd_flags) ||
!test_bit(QDF_CHANGE, &qd->qd_flags)) {
- spin_unlock(&sdp->sd_quota_spin);
+ spin_unlock(&qd_lru_lock);
return 0;
}
list_move_tail(&qd->qd_list, &sdp->sd_quota_list);
set_bit(QDF_LOCKED, &qd->qd_flags);
- gfs2_assert_warn(sdp, qd->qd_count);
- qd->qd_count++;
+ gfs2_assert_warn(sdp, atomic_read(&qd->qd_count));
+ atomic_inc(&qd->qd_count);
qd->qd_change_sync = qd->qd_change;
gfs2_assert_warn(sdp, qd->qd_slot_count);
qd->qd_slot_count++;
- spin_unlock(&sdp->sd_quota_spin);
+ spin_unlock(&qd_lru_lock);
gfs2_assert_warn(sdp, qd->qd_change_sync);
if (bh_get(qd)) {
@@ -556,9 +599,9 @@ static void do_qc(struct gfs2_quota_data *qd, s64 change)
x = be64_to_cpu(qc->qc_change) + change;
qc->qc_change = cpu_to_be64(x);
- spin_lock(&sdp->sd_quota_spin);
+ spin_lock(&qd_lru_lock);
qd->qd_change = x;
- spin_unlock(&sdp->sd_quota_spin);
+ spin_unlock(&qd_lru_lock);
if (!x) {
gfs2_assert_warn(sdp, test_bit(QDF_CHANGE, &qd->qd_flags));
@@ -802,8 +845,8 @@ restart:
loff_t pos;
gfs2_glock_dq_uninit(q_gh);
error = gfs2_glock_nq_init(qd->qd_gl,
- LM_ST_EXCLUSIVE, GL_NOCACHE,
- q_gh);
+ LM_ST_EXCLUSIVE, GL_NOCACHE,
+ q_gh);
if (error)
return error;
@@ -820,7 +863,6 @@ restart:
gfs2_glock_dq_uninit(&i_gh);
-
gfs2_quota_in(&q, buf);
qlvb = (struct gfs2_quota_lvb *)qd->qd_gl->gl_lvb;
qlvb->qb_magic = cpu_to_be32(GFS2_MAGIC);
@@ -890,9 +932,9 @@ static int need_sync(struct gfs2_quota_data *qd)
if (!qd->qd_qb.qb_limit)
return 0;
- spin_lock(&sdp->sd_quota_spin);
+ spin_lock(&qd_lru_lock);
value = qd->qd_change;
- spin_unlock(&sdp->sd_quota_spin);
+ spin_unlock(&qd_lru_lock);
spin_lock(&gt->gt_spin);
num = gt->gt_quota_scale_num;
@@ -985,9 +1027,9 @@ int gfs2_quota_check(struct gfs2_inode *ip, u32 uid, u32 gid)
continue;
value = (s64)be64_to_cpu(qd->qd_qb.qb_value);
- spin_lock(&sdp->sd_quota_spin);
+ spin_lock(&qd_lru_lock);
value += qd->qd_change;
- spin_unlock(&sdp->sd_quota_spin);
+ spin_unlock(&qd_lru_lock);
if (be64_to_cpu(qd->qd_qb.qb_limit) && (s64)be64_to_cpu(qd->qd_qb.qb_limit) < value) {
print_message(qd, "exceeded");
@@ -1171,13 +1213,12 @@ int gfs2_quota_init(struct gfs2_sbd *sdp)
qd->qd_change = qc.qc_change;
qd->qd_slot = slot;
qd->qd_slot_count = 1;
- qd->qd_last_touched = jiffies;
- spin_lock(&sdp->sd_quota_spin);
+ spin_lock(&qd_lru_lock);
gfs2_icbit_munge(sdp, sdp->sd_quota_bitmap, slot, 1);
list_add(&qd->qd_list, &sdp->sd_quota_list);
atomic_inc(&sdp->sd_quota_count);
- spin_unlock(&sdp->sd_quota_spin);
+ spin_unlock(&qd_lru_lock);
found++;
}
@@ -1197,73 +1238,48 @@ fail:
return error;
}
-static void gfs2_quota_scan(struct gfs2_sbd *sdp)
-{
- struct gfs2_quota_data *qd, *safe;
- LIST_HEAD(dead);
-
- spin_lock(&sdp->sd_quota_spin);
- list_for_each_entry_safe(qd, safe, &sdp->sd_quota_list, qd_list) {
- if (!qd->qd_count &&
- time_after_eq(jiffies, qd->qd_last_touched +
- gfs2_tune_get(sdp, gt_quota_cache_secs) * HZ)) {
- list_move(&qd->qd_list, &dead);
- gfs2_assert_warn(sdp,
- atomic_read(&sdp->sd_quota_count) > 0);
- atomic_dec(&sdp->sd_quota_count);
- }
- }
- spin_unlock(&sdp->sd_quota_spin);
-
- while (!list_empty(&dead)) {
- qd = list_entry(dead.next, struct gfs2_quota_data, qd_list);
- list_del(&qd->qd_list);
-
- gfs2_assert_warn(sdp, !qd->qd_change);
- gfs2_assert_warn(sdp, !qd->qd_slot_count);
- gfs2_assert_warn(sdp, !qd->qd_bh_count);
-
- gfs2_lvb_unhold(qd->qd_gl);
- kmem_cache_free(gfs2_quotad_cachep, qd);
- }
-}
-
void gfs2_quota_cleanup(struct gfs2_sbd *sdp)
{
struct list_head *head = &sdp->sd_quota_list;
struct gfs2_quota_data *qd;
unsigned int x;
- spin_lock(&sdp->sd_quota_spin);
+ spin_lock(&qd_lru_lock);
while (!list_empty(head)) {
qd = list_entry(head->prev, struct gfs2_quota_data, qd_list);
- if (qd->qd_count > 1 ||
- (qd->qd_count && !test_bit(QDF_CHANGE, &qd->qd_flags))) {
+ if (atomic_read(&qd->qd_count) > 1 ||
+ (atomic_read(&qd->qd_count) &&
+ !test_bit(QDF_CHANGE, &qd->qd_flags))) {
list_move(&qd->qd_list, head);
- spin_unlock(&sdp->sd_quota_spin);
+ spin_unlock(&qd_lru_lock);
schedule();
- spin_lock(&sdp->sd_quota_spin);
+ spin_lock(&qd_lru_lock);
continue;
}
list_del(&qd->qd_list);
+ /* Also remove if this qd exists in the reclaim list */
+ if (!list_empty(&qd->qd_reclaim)) {
+ list_del_init(&qd->qd_reclaim);
+ atomic_dec(&qd_lru_count);
+ }
atomic_dec(&sdp->sd_quota_count);
- spin_unlock(&sdp->sd_quota_spin);
+ spin_unlock(&qd_lru_lock);
- if (!qd->qd_count) {
+ if (!atomic_read(&qd->qd_count)) {
gfs2_assert_warn(sdp, !qd->qd_change);
gfs2_assert_warn(sdp, !qd->qd_slot_count);
} else
gfs2_assert_warn(sdp, qd->qd_slot_count == 1);
gfs2_assert_warn(sdp, !qd->qd_bh_count);
- gfs2_lvb_unhold(qd->qd_gl);
+ gfs2_glock_put(qd->qd_gl);
kmem_cache_free(gfs2_quotad_cachep, qd);
- spin_lock(&sdp->sd_quota_spin);
+ spin_lock(&qd_lru_lock);
}
- spin_unlock(&sdp->sd_quota_spin);
+ spin_unlock(&qd_lru_lock);
gfs2_assert_warn(sdp, !atomic_read(&sdp->sd_quota_count));
@@ -1341,9 +1357,6 @@ int gfs2_quotad(void *data)
quotad_check_timeo(sdp, "sync", gfs2_quota_sync, t,
&quotad_timeo, &tune->gt_quota_quantum);
- /* FIXME: This should be turned into a shrinker */
- gfs2_quota_scan(sdp);
-
/* Check for & recover partially truncated inodes */
quotad_check_trunc_list(sdp);
diff --git a/fs/gfs2/quota.h b/fs/gfs2/quota.h
index cec9032be97d..0fa5fa63d0e8 100644
--- a/fs/gfs2/quota.h
+++ b/fs/gfs2/quota.h
@@ -49,4 +49,6 @@ static inline int gfs2_quota_lock_check(struct gfs2_inode *ip)
return ret;
}
+extern int gfs2_shrink_qd_memory(int nr, gfp_t gfp_mask);
+
#endif /* __QUOTA_DOT_H__ */
diff --git a/fs/gfs2/recovery.c b/fs/gfs2/recovery.c
index efd09c3d2b26..247e8f7d6b3d 100644
--- a/fs/gfs2/recovery.c
+++ b/fs/gfs2/recovery.c
@@ -13,7 +13,6 @@
#include <linux/buffer_head.h>
#include <linux/gfs2_ondisk.h>
#include <linux/crc32.h>
-#include <linux/lm_interface.h>
#include <linux/kthread.h>
#include <linux/freezer.h>
@@ -427,20 +426,23 @@ static int clean_journal(struct gfs2_jdesc *jd, struct gfs2_log_header_host *hea
}
-static void gfs2_lm_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
- unsigned int message)
+static void gfs2_recovery_done(struct gfs2_sbd *sdp, unsigned int jid,
+ unsigned int message)
{
- if (!sdp->sd_lockstruct.ls_ops->lm_recovery_done)
- return;
-
- if (likely(!test_bit(SDF_SHUTDOWN, &sdp->sd_flags)))
- sdp->sd_lockstruct.ls_ops->lm_recovery_done(
- sdp->sd_lockstruct.ls_lockspace, jid, message);
+ char env_jid[20];
+ char env_status[20];
+ char *envp[] = { env_jid, env_status, NULL };
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ ls->ls_recover_jid_done = jid;
+ ls->ls_recover_jid_status = message;
+ sprintf(env_jid, "JID=%d", jid);
+ sprintf(env_status, "RECOVERY=%s",
+ message == LM_RD_SUCCESS ? "Done" : "Failed");
+ kobject_uevent_env(&sdp->sd_kobj, KOBJ_CHANGE, envp);
}
-
/**
- * gfs2_recover_journal - recovery a given journal
+ * gfs2_recover_journal - recover a given journal
* @jd: the struct gfs2_jdesc describing the journal
*
* Acquire the journal's lock, check to see if the journal is clean, and
@@ -561,7 +563,7 @@ int gfs2_recover_journal(struct gfs2_jdesc *jd)
if (jd->jd_jid != sdp->sd_lockstruct.ls_jid)
gfs2_glock_dq_uninit(&ji_gh);
- gfs2_lm_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS);
+ gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_SUCCESS);
if (jd->jd_jid != sdp->sd_lockstruct.ls_jid)
gfs2_glock_dq_uninit(&j_gh);
@@ -581,7 +583,7 @@ fail_gunlock_j:
fs_info(sdp, "jid=%u: %s\n", jd->jd_jid, (error) ? "Failed" : "Done");
fail:
- gfs2_lm_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP);
+ gfs2_recovery_done(sdp, jd->jd_jid, LM_RD_GAVEUP);
return error;
}
diff --git a/fs/gfs2/rgrp.c b/fs/gfs2/rgrp.c
index 8b01c635d925..ba5a021b1c57 100644
--- a/fs/gfs2/rgrp.c
+++ b/fs/gfs2/rgrp.c
@@ -13,7 +13,6 @@
#include <linux/buffer_head.h>
#include <linux/fs.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/lm_interface.h>
#include <linux/prefetch.h>
#include "gfs2.h"
diff --git a/fs/gfs2/super.c b/fs/gfs2/super.c
index 141b781f2fcc..7cf302b135ce 100644
--- a/fs/gfs2/super.c
+++ b/fs/gfs2/super.c
@@ -15,7 +15,6 @@
#include <linux/crc32.h>
#include <linux/gfs2_ondisk.h>
#include <linux/bio.h>
-#include <linux/lm_interface.h>
#include "gfs2.h"
#include "incore.h"
diff --git a/fs/gfs2/super.h b/fs/gfs2/super.h
index f6b8b00ad881..91abdbedcc86 100644
--- a/fs/gfs2/super.h
+++ b/fs/gfs2/super.h
@@ -14,7 +14,7 @@
#include <linux/dcache.h>
#include "incore.h"
-void gfs2_lm_unmount(struct gfs2_sbd *sdp);
+extern void gfs2_lm_unmount(struct gfs2_sbd *sdp);
static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
{
@@ -27,21 +27,23 @@ static inline unsigned int gfs2_jindex_size(struct gfs2_sbd *sdp)
void gfs2_jindex_free(struct gfs2_sbd *sdp);
-struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
-int gfs2_jdesc_check(struct gfs2_jdesc *jd);
+extern int gfs2_mount_args(struct gfs2_sbd *sdp, struct gfs2_args *args, char *data);
-int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
- struct gfs2_inode **ipp);
+extern struct gfs2_jdesc *gfs2_jdesc_find(struct gfs2_sbd *sdp, unsigned int jid);
+extern int gfs2_jdesc_check(struct gfs2_jdesc *jd);
-int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
+extern int gfs2_lookup_in_master_dir(struct gfs2_sbd *sdp, char *filename,
+ struct gfs2_inode **ipp);
-int gfs2_statfs_init(struct gfs2_sbd *sdp);
-void gfs2_statfs_change(struct gfs2_sbd *sdp,
- s64 total, s64 free, s64 dinodes);
-int gfs2_statfs_sync(struct gfs2_sbd *sdp);
+extern int gfs2_make_fs_rw(struct gfs2_sbd *sdp);
-int gfs2_freeze_fs(struct gfs2_sbd *sdp);
-void gfs2_unfreeze_fs(struct gfs2_sbd *sdp);
+extern int gfs2_statfs_init(struct gfs2_sbd *sdp);
+extern void gfs2_statfs_change(struct gfs2_sbd *sdp, s64 total, s64 free,
+ s64 dinodes);
+extern int gfs2_statfs_sync(struct gfs2_sbd *sdp);
+
+extern int gfs2_freeze_fs(struct gfs2_sbd *sdp);
+extern void gfs2_unfreeze_fs(struct gfs2_sbd *sdp);
extern struct file_system_type gfs2_fs_type;
extern struct file_system_type gfs2meta_fs_type;
diff --git a/fs/gfs2/sys.c b/fs/gfs2/sys.c
index 26c1fa777a95..a78997ea5037 100644
--- a/fs/gfs2/sys.c
+++ b/fs/gfs2/sys.c
@@ -14,9 +14,8 @@
#include <linux/buffer_head.h>
#include <linux/module.h>
#include <linux/kobject.h>
-#include <linux/gfs2_ondisk.h>
-#include <linux/lm_interface.h>
#include <asm/uaccess.h>
+#include <linux/gfs2_ondisk.h>
#include "gfs2.h"
#include "incore.h"
@@ -224,14 +223,145 @@ static struct lockstruct_attr lockstruct_attr_##name = __ATTR_RO(name)
LOCKSTRUCT_ATTR(jid, "%u\n");
LOCKSTRUCT_ATTR(first, "%u\n");
-LOCKSTRUCT_ATTR(lvb_size, "%u\n");
-LOCKSTRUCT_ATTR(flags, "%d\n");
static struct attribute *lockstruct_attrs[] = {
&lockstruct_attr_jid.attr,
&lockstruct_attr_first.attr,
- &lockstruct_attr_lvb_size.attr,
- &lockstruct_attr_flags.attr,
+ NULL,
+};
+
+/*
+ * lock_module. Originally from lock_dlm
+ */
+
+static ssize_t proto_name_show(struct gfs2_sbd *sdp, char *buf)
+{
+ const struct lm_lockops *ops = sdp->sd_lockstruct.ls_ops;
+ return sprintf(buf, "%s\n", ops->lm_proto_name);
+}
+
+static ssize_t block_show(struct gfs2_sbd *sdp, char *buf)
+{
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ ssize_t ret;
+ int val = 0;
+
+ if (test_bit(DFL_BLOCK_LOCKS, &ls->ls_flags))
+ val = 1;
+ ret = sprintf(buf, "%d\n", val);
+ return ret;
+}
+
+static ssize_t block_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
+{
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ ssize_t ret = len;
+ int val;
+
+ val = simple_strtol(buf, NULL, 0);
+
+ if (val == 1)
+ set_bit(DFL_BLOCK_LOCKS, &ls->ls_flags);
+ else if (val == 0) {
+ clear_bit(DFL_BLOCK_LOCKS, &ls->ls_flags);
+ smp_mb__after_clear_bit();
+ gfs2_glock_thaw(sdp);
+ } else {
+ ret = -EINVAL;
+ }
+ return ret;
+}
+
+static ssize_t lkid_show(struct gfs2_sbd *sdp, char *buf)
+{
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ return sprintf(buf, "%u\n", ls->ls_id);
+}
+
+static ssize_t lkfirst_show(struct gfs2_sbd *sdp, char *buf)
+{
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ return sprintf(buf, "%d\n", ls->ls_first);
+}
+
+static ssize_t first_done_show(struct gfs2_sbd *sdp, char *buf)
+{
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ return sprintf(buf, "%d\n", ls->ls_first_done);
+}
+
+static ssize_t recover_show(struct gfs2_sbd *sdp, char *buf)
+{
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ return sprintf(buf, "%d\n", ls->ls_recover_jid);
+}
+
+static void gfs2_jdesc_make_dirty(struct gfs2_sbd *sdp, unsigned int jid)
+{
+ struct gfs2_jdesc *jd;
+
+ spin_lock(&sdp->sd_jindex_spin);
+ list_for_each_entry(jd, &sdp->sd_jindex_list, jd_list) {
+ if (jd->jd_jid != jid)
+ continue;
+ jd->jd_dirty = 1;
+ break;
+ }
+ spin_unlock(&sdp->sd_jindex_spin);
+}
+
+static ssize_t recover_store(struct gfs2_sbd *sdp, const char *buf, size_t len)
+{
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ ls->ls_recover_jid = simple_strtol(buf, NULL, 0);
+ gfs2_jdesc_make_dirty(sdp, ls->ls_recover_jid);
+ if (sdp->sd_recoverd_process)
+ wake_up_process(sdp->sd_recoverd_process);
+ return len;
+}
+
+static ssize_t recover_done_show(struct gfs2_sbd *sdp, char *buf)
+{
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ return sprintf(buf, "%d\n", ls->ls_recover_jid_done);
+}
+
+static ssize_t recover_status_show(struct gfs2_sbd *sdp, char *buf)
+{
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ return sprintf(buf, "%d\n", ls->ls_recover_jid_status);
+}
+
+struct gdlm_attr {
+ struct attribute attr;
+ ssize_t (*show)(struct gfs2_sbd *sdp, char *);
+ ssize_t (*store)(struct gfs2_sbd *sdp, const char *, size_t);
+};
+
+#define GDLM_ATTR(_name,_mode,_show,_store) \
+static struct gdlm_attr gdlm_attr_##_name = __ATTR(_name,_mode,_show,_store)
+
+GDLM_ATTR(proto_name, 0444, proto_name_show, NULL);
+GDLM_ATTR(block, 0644, block_show, block_store);
+GDLM_ATTR(withdraw, 0644, withdraw_show, withdraw_store);
+GDLM_ATTR(id, 0444, lkid_show, NULL);
+GDLM_ATTR(first, 0444, lkfirst_show, NULL);
+GDLM_ATTR(first_done, 0444, first_done_show, NULL);
+GDLM_ATTR(recover, 0644, recover_show, recover_store);
+GDLM_ATTR(recover_done, 0444, recover_done_show, NULL);
+GDLM_ATTR(recover_status, 0444, recover_status_show, NULL);
+
+static struct attribute *lock_module_attrs[] = {
+ &gdlm_attr_proto_name.attr,
+ &gdlm_attr_block.attr,
+ &gdlm_attr_withdraw.attr,
+ &gdlm_attr_id.attr,
+ &lockstruct_attr_jid.attr,
+ &gdlm_attr_first.attr,
+ &gdlm_attr_first_done.attr,
+ &gdlm_attr_recover.attr,
+ &gdlm_attr_recover_done.attr,
+ &gdlm_attr_recover_status.attr,
NULL,
};
@@ -373,7 +503,6 @@ TUNE_ATTR(complain_secs, 0);
TUNE_ATTR(statfs_slow, 0);
TUNE_ATTR(new_files_jdata, 0);
TUNE_ATTR(quota_simul_sync, 1);
-TUNE_ATTR(quota_cache_secs, 1);
TUNE_ATTR(stall_secs, 1);
TUNE_ATTR(statfs_quantum, 1);
TUNE_ATTR_DAEMON(recoverd_secs, recoverd_process);
@@ -389,7 +518,6 @@ static struct attribute *tune_attrs[] = {
&tune_attr_complain_secs.attr,
&tune_attr_statfs_slow.attr,
&tune_attr_quota_simul_sync.attr,
- &tune_attr_quota_cache_secs.attr,
&tune_attr_stall_secs.attr,
&tune_attr_statfs_quantum.attr,
&tune_attr_recoverd_secs.attr,
@@ -414,6 +542,11 @@ static struct attribute_group tune_group = {
.attrs = tune_attrs,
};
+static struct attribute_group lock_module_group = {
+ .name = "lock_module",
+ .attrs = lock_module_attrs,
+};
+
int gfs2_sys_fs_add(struct gfs2_sbd *sdp)
{
int error;
@@ -436,9 +569,15 @@ int gfs2_sys_fs_add(struct gfs2_sbd *sdp)
if (error)
goto fail_args;
+ error = sysfs_create_group(&sdp->sd_kobj, &lock_module_group);
+ if (error)
+ goto fail_tune;
+
kobject_uevent(&sdp->sd_kobj, KOBJ_ADD);
return 0;
+fail_tune:
+ sysfs_remove_group(&sdp->sd_kobj, &tune_group);
fail_args:
sysfs_remove_group(&sdp->sd_kobj, &args_group);
fail_lockstruct:
@@ -455,6 +594,7 @@ void gfs2_sys_fs_del(struct gfs2_sbd *sdp)
sysfs_remove_group(&sdp->sd_kobj, &tune_group);
sysfs_remove_group(&sdp->sd_kobj, &args_group);
sysfs_remove_group(&sdp->sd_kobj, &lockstruct_group);
+ sysfs_remove_group(&sdp->sd_kobj, &lock_module_group);
kobject_put(&sdp->sd_kobj);
}
diff --git a/fs/gfs2/trans.c b/fs/gfs2/trans.c
index f677b8a83f0c..33cd523ec97e 100644
--- a/fs/gfs2/trans.c
+++ b/fs/gfs2/trans.c
@@ -12,9 +12,8 @@
#include <linux/spinlock.h>
#include <linux/completion.h>
#include <linux/buffer_head.h>
-#include <linux/gfs2_ondisk.h>
#include <linux/kallsyms.h>
-#include <linux/lm_interface.h>
+#include <linux/gfs2_ondisk.h>
#include "gfs2.h"
#include "incore.h"
diff --git a/fs/gfs2/util.c b/fs/gfs2/util.c
index 374f50e95496..9d12b1118ba0 100644
--- a/fs/gfs2/util.c
+++ b/fs/gfs2/util.c
@@ -13,7 +13,6 @@
#include <linux/buffer_head.h>
#include <linux/crc32.h>
#include <linux/gfs2_ondisk.h>
-#include <linux/lm_interface.h>
#include <asm/uaccess.h>
#include "gfs2.h"
@@ -35,6 +34,8 @@ void gfs2_assert_i(struct gfs2_sbd *sdp)
int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...)
{
+ struct lm_lockstruct *ls = &sdp->sd_lockstruct;
+ const struct lm_lockops *lm = ls->ls_ops;
va_list args;
if (test_and_set_bit(SDF_SHUTDOWN, &sdp->sd_flags))
@@ -47,8 +48,12 @@ int gfs2_lm_withdraw(struct gfs2_sbd *sdp, char *fmt, ...)
fs_err(sdp, "about to withdraw this file system\n");
BUG_ON(sdp->sd_args.ar_debug);
- fs_err(sdp, "telling LM to withdraw\n");
- gfs2_withdraw_lockproto(&sdp->sd_lockstruct);
+ kobject_uevent(&sdp->sd_kobj, KOBJ_OFFLINE);
+
+ if (lm->lm_unmount) {
+ fs_err(sdp, "telling LM to unmount\n");
+ lm->lm_unmount(sdp);
+ }
fs_err(sdp, "withdrawn\n");
dump_stack();
diff --git a/fs/hfs/Kconfig b/fs/hfs/Kconfig
new file mode 100644
index 000000000000..b77c5bc20f8a
--- /dev/null
+++ b/fs/hfs/Kconfig
@@ -0,0 +1,12 @@
+config HFS_FS
+ tristate "Apple Macintosh file system support (EXPERIMENTAL)"
+ depends on BLOCK && EXPERIMENTAL
+ select NLS
+ help
+ If you say Y here, you will be able to mount Macintosh-formatted
+ floppy disks and hard drive partitions with full read-write access.
+ Please read <file:Documentation/filesystems/hfs.txt> to learn about
+ the available mount options.
+
+ To compile this file system support as a module, choose M here: the
+ module will be called hfs.
diff --git a/fs/hfsplus/Kconfig b/fs/hfsplus/Kconfig
new file mode 100644
index 000000000000..a63371815aab
--- /dev/null
+++ b/fs/hfsplus/Kconfig
@@ -0,0 +1,13 @@
+config HFSPLUS_FS
+ tristate "Apple Extended HFS file system support"
+ depends on BLOCK
+ select NLS
+ select NLS_UTF8
+ help
+ If you say Y here, you will be able to mount extended format
+ Macintosh-formatted hard drive partitions with full read-write access.
+
+ This file system is often called HFS+ and was introduced with
+ MacOS 8. It includes all Mac specific filesystem data such as
+ data forks and creator codes, but it also has several UNIX
+ style features such as file ownership and permissions.
diff --git a/fs/hpfs/Kconfig b/fs/hpfs/Kconfig
new file mode 100644
index 000000000000..56bd15c5bf6c
--- /dev/null
+++ b/fs/hpfs/Kconfig
@@ -0,0 +1,14 @@
+config HPFS_FS
+ tristate "OS/2 HPFS file system support"
+ depends on BLOCK
+ help
+ OS/2 is IBM's operating system for PC's, the same as Warp, and HPFS
+ is the file system used for organizing files on OS/2 hard disk
+ partitions. Say Y if you want to be able to read files from and
+ write files to an OS/2 HPFS partition on your hard drive. OS/2
+ floppies however are in regular MSDOS format, so you don't need this
+ option in order to be able to read them. Read
+ <file:Documentation/filesystems/hpfs.txt>.
+
+ To compile this file system support as a module, choose M here: the
+ module will be called hpfs. If unsure, say N.
diff --git a/fs/ioctl.c b/fs/ioctl.c
index 240ec63984cb..f8e8606a6abf 100644
--- a/fs/ioctl.c
+++ b/fs/ioctl.c
@@ -404,10 +404,12 @@ static int ioctl_fionbio(struct file *filp, int __user *argp)
if (O_NONBLOCK != O_NDELAY)
flag |= O_NDELAY;
#endif
+ lock_file_flags();
if (on)
filp->f_flags |= flag;
else
filp->f_flags &= ~flag;
+ unlock_file_flags();
return error;
}
@@ -423,20 +425,9 @@ static int ioctl_fioasync(unsigned int fd, struct file *filp,
flag = on ? FASYNC : 0;
/* Did FASYNC state change ? */
- if ((flag ^ filp->f_flags) & FASYNC) {
- if (filp->f_op && filp->f_op->fasync)
- error = filp->f_op->fasync(fd, filp, on);
- else
- error = -ENOTTY;
- }
- if (error)
- return error;
-
- if (on)
- filp->f_flags |= FASYNC;
- else
- filp->f_flags &= ~FASYNC;
- return error;
+ if ((flag ^ filp->f_flags) & FASYNC)
+ return fasync_change(fd, filp, on);
+ return 0;
}
static int ioctl_fsfreeze(struct file *filp)
@@ -499,17 +490,11 @@ int do_vfs_ioctl(struct file *filp, unsigned int fd, unsigned int cmd,
break;
case FIONBIO:
- /* BKL needed to avoid races tweaking f_flags */
- lock_kernel();
error = ioctl_fionbio(filp, argp);
- unlock_kernel();
break;
case FIOASYNC:
- /* BKL needed to avoid races tweaking f_flags */
- lock_kernel();
error = ioctl_fioasync(fd, filp, argp);
- unlock_kernel();
break;
case FIOQSIZE:
diff --git a/fs/isofs/Kconfig b/fs/isofs/Kconfig
new file mode 100644
index 000000000000..8ab9878e3671
--- /dev/null
+++ b/fs/isofs/Kconfig
@@ -0,0 +1,39 @@
+config ISO9660_FS
+ tristate "ISO 9660 CDROM file system support"
+ help
+ This is the standard file system used on CD-ROMs. It was previously
+ known as "High Sierra File System" and is called "hsfs" on other
+ Unix systems. The so-called Rock-Ridge extensions which allow for
+ long Unix filenames and symbolic links are also supported by this
+ driver. If you have a CD-ROM drive and want to do more with it than
+ just listen to audio CDs and watch its LEDs, say Y (and read
+ <file:Documentation/filesystems/isofs.txt> and the CD-ROM-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>), thereby
+ enlarging your kernel by about 27 KB; otherwise say N.
+
+ To compile this file system support as a module, choose M here: the
+ module will be called isofs.
+
+config JOLIET
+ bool "Microsoft Joliet CDROM extensions"
+ depends on ISO9660_FS
+ select NLS
+ help
+ Joliet is a Microsoft extension for the ISO 9660 CD-ROM file system
+ which allows for long filenames in unicode format (unicode is the
+ new 16 bit character code, successor to ASCII, which encodes the
+ characters of almost all languages of the world; see
+ <http://www.unicode.org/> for more information). Say Y here if you
+ want to be able to read Joliet CD-ROMs under Linux.
+
+config ZISOFS
+ bool "Transparent decompression extension"
+ depends on ISO9660_FS
+ select ZLIB_INFLATE
+ help
+ This is a Linux-specific extension to RockRidge which lets you store
+ data in compressed form on a CD-ROM and have it transparently
+ decompressed when the CD-ROM is accessed. See
+ <http://www.kernel.org/pub/linux/utils/fs/zisofs/> for the tools
+ necessary to create such a filesystem. Say Y here if you want to be
+ able to read such compressed CD-ROMs.
diff --git a/fs/jbd2/journal.c b/fs/jbd2/journal.c
index 56675306ed81..eb343008eded 100644
--- a/fs/jbd2/journal.c
+++ b/fs/jbd2/journal.c
@@ -37,10 +37,10 @@
#include <linux/proc_fs.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
+#include <linux/math64.h>
#include <asm/uaccess.h>
#include <asm/page.h>
-#include <asm/div64.h>
EXPORT_SYMBOL(jbd2_journal_start);
EXPORT_SYMBOL(jbd2_journal_restart);
@@ -846,8 +846,8 @@ static int jbd2_seq_info_show(struct seq_file *seq, void *v)
jiffies_to_msecs(s->stats->u.run.rs_flushing / s->stats->ts_tid));
seq_printf(seq, " %ums logging transaction\n",
jiffies_to_msecs(s->stats->u.run.rs_logging / s->stats->ts_tid));
- seq_printf(seq, " %luus average transaction commit time\n",
- do_div(s->journal->j_average_commit_time, 1000));
+ seq_printf(seq, " %lluus average transaction commit time\n",
+ div_u64(s->journal->j_average_commit_time, 1000));
seq_printf(seq, " %lu handles per transaction\n",
s->stats->u.run.rs_handle_count / s->stats->ts_tid);
seq_printf(seq, " %lu blocks per transaction\n",
diff --git a/fs/jfs/Kconfig b/fs/jfs/Kconfig
new file mode 100644
index 000000000000..57cef19951db
--- /dev/null
+++ b/fs/jfs/Kconfig
@@ -0,0 +1,50 @@
+config JFS_FS
+ tristate "JFS filesystem support"
+ select NLS
+ select CRC32
+ help
+ This is a port of IBM's Journaled Filesystem . More information is
+ available in the file <file:Documentation/filesystems/jfs.txt>.
+
+ If you do not intend to use the JFS filesystem, say N.
+
+config JFS_POSIX_ACL
+ bool "JFS POSIX Access Control Lists"
+ depends on JFS_FS
+ select FS_POSIX_ACL
+ help
+ Posix Access Control Lists (ACLs) support permissions for users and
+ groups beyond the owner/group/world scheme.
+
+ To learn more about Access Control Lists, visit the Posix ACLs for
+ Linux website <http://acl.bestbits.at/>.
+
+ If you don't know what Access Control Lists are, say N
+
+config JFS_SECURITY
+ bool "JFS Security Labels"
+ depends on JFS_FS
+ help
+ Security labels support alternative access control models
+ implemented by security modules like SELinux. This option
+ enables an extended attribute handler for file security
+ labels in the jfs filesystem.
+
+ If you are not using a security module that requires using
+ extended attributes for file security labels, say N.
+
+config JFS_DEBUG
+ bool "JFS debugging"
+ depends on JFS_FS
+ help
+ If you are experiencing any problems with the JFS filesystem, say
+ Y here. This will result in additional debugging messages to be
+ written to the system log. Under normal circumstances, this
+ results in very little overhead.
+
+config JFS_STATISTICS
+ bool "JFS statistics"
+ depends on JFS_FS
+ help
+ Enabling this option will cause statistics from the JFS file system
+ to be made available to the user in the /proc/fs/jfs/ directory.
diff --git a/fs/jfs/jfs_debug.c b/fs/jfs/jfs_debug.c
index 6a73de84bcef..dd824d9b0b1a 100644
--- a/fs/jfs/jfs_debug.c
+++ b/fs/jfs/jfs_debug.c
@@ -90,7 +90,6 @@ void jfs_proc_init(void)
if (!(base = proc_mkdir("fs/jfs", NULL)))
return;
- base->owner = THIS_MODULE;
for (i = 0; i < NPROCENT; i++)
proc_create(Entries[i].name, 0, base, Entries[i].proc_fops);
diff --git a/fs/jfs/jfs_extent.c b/fs/jfs/jfs_extent.c
index 7ae1e3281de9..16bc326eeca4 100644
--- a/fs/jfs/jfs_extent.c
+++ b/fs/jfs/jfs_extent.c
@@ -362,11 +362,12 @@ exit:
int extHint(struct inode *ip, s64 offset, xad_t * xp)
{
struct super_block *sb = ip->i_sb;
- struct xadlist xadl;
- struct lxdlist lxdl;
- lxd_t lxd;
+ int nbperpage = JFS_SBI(sb)->nbperpage;
s64 prev;
- int rc, nbperpage = JFS_SBI(sb)->nbperpage;
+ int rc = 0;
+ s64 xaddr;
+ int xlen;
+ int xflag;
/* init the hint as "no hint provided" */
XADaddress(xp, 0);
@@ -376,46 +377,30 @@ int extHint(struct inode *ip, s64 offset, xad_t * xp)
*/
prev = ((offset & ~POFFSET) >> JFS_SBI(sb)->l2bsize) - nbperpage;
- /* if the offsets in the first page of the file,
- * no hint provided.
+ /* if the offset is in the first page of the file, no hint provided.
*/
if (prev < 0)
- return (0);
-
- /* prepare to lookup the previous page's extent info */
- lxdl.maxnlxd = 1;
- lxdl.nlxd = 1;
- lxdl.lxd = &lxd;
- LXDoffset(&lxd, prev)
- LXDlength(&lxd, nbperpage);
-
- xadl.maxnxad = 1;
- xadl.nxad = 0;
- xadl.xad = xp;
-
- /* perform the lookup */
- if ((rc = xtLookupList(ip, &lxdl, &xadl, 0)))
- return (rc);
-
- /* check if no extent exists for the previous page.
- * this is possible for sparse files.
- */
- if (xadl.nxad == 0) {
-// assert(ISSPARSE(ip));
- return (0);
- }
+ goto out;
- /* only preserve the abnr flag within the xad flags
- * of the returned hint.
- */
- xp->flag &= XAD_NOTRECORDED;
+ rc = xtLookup(ip, prev, nbperpage, &xflag, &xaddr, &xlen, 0);
- if(xadl.nxad != 1 || lengthXAD(xp) != nbperpage) {
- jfs_error(ip->i_sb, "extHint: corrupt xtree");
- return -EIO;
- }
+ if ((rc == 0) && xlen) {
+ if (xlen != nbperpage) {
+ jfs_error(ip->i_sb, "extHint: corrupt xtree");
+ rc = -EIO;
+ }
+ XADaddress(xp, xaddr);
+ XADlength(xp, xlen);
+ /*
+ * only preserve the abnr flag within the xad flags
+ * of the returned hint.
+ */
+ xp->flag = xflag & XAD_NOTRECORDED;
+ } else
+ rc = 0;
- return (0);
+out:
+ return (rc);
}
diff --git a/fs/jfs/jfs_imap.c b/fs/jfs/jfs_imap.c
index 0f94381ca6d0..346057218edc 100644
--- a/fs/jfs/jfs_imap.c
+++ b/fs/jfs/jfs_imap.c
@@ -57,12 +57,6 @@
#include "jfs_debug.h"
/*
- * __mark_inode_dirty expects inodes to be hashed. Since we don't want
- * special inodes in the fileset inode space, we make them appear hashed,
- * but do not put on any lists.
- */
-
-/*
* imap locks
*/
/* iag free list lock */
@@ -497,7 +491,9 @@ struct inode *diReadSpecial(struct super_block *sb, ino_t inum, int secondary)
release_metapage(mp);
/*
- * that will look hashed, but won't be on any list; hlist_del()
+ * __mark_inode_dirty expects inodes to be hashed. Since we don't
+ * want special inodes in the fileset inode space, we make them
+ * appear hashed, but do not put on any lists. hlist_del()
* will work fine and require no locking.
*/
ip->i_hash.pprev = &ip->i_hash.next;
diff --git a/fs/jfs/jfs_metapage.c b/fs/jfs/jfs_metapage.c
index c350057087dd..07b6c5dfb4b6 100644
--- a/fs/jfs/jfs_metapage.c
+++ b/fs/jfs/jfs_metapage.c
@@ -369,6 +369,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
unsigned long bio_bytes = 0;
unsigned long bio_offset = 0;
int offset;
+ int bad_blocks = 0;
page_start = (sector_t)page->index <<
(PAGE_CACHE_SHIFT - inode->i_blkbits);
@@ -394,6 +395,7 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
}
clear_bit(META_dirty, &mp->flag);
+ set_bit(META_io, &mp->flag);
block_offset = offset >> inode->i_blkbits;
lblock = page_start + block_offset;
if (bio) {
@@ -402,7 +404,6 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
len = min(xlen, blocks_per_mp);
xlen -= len;
bio_bytes += len << inode->i_blkbits;
- set_bit(META_io, &mp->flag);
continue;
}
/* Not contiguous */
@@ -424,12 +425,14 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
xlen = (PAGE_CACHE_SIZE - offset) >> inode->i_blkbits;
pblock = metapage_get_blocks(inode, lblock, &xlen);
if (!pblock) {
- /* Need better error handling */
printk(KERN_ERR "JFS: metapage_get_blocks failed\n");
- dec_io(page, last_write_complete);
+ /*
+ * We already called inc_io(), but can't cancel it
+ * with dec_io() until we're done with the page
+ */
+ bad_blocks++;
continue;
}
- set_bit(META_io, &mp->flag);
len = min(xlen, (int)JFS_SBI(inode->i_sb)->nbperpage);
bio = bio_alloc(GFP_NOFS, 1);
@@ -459,6 +462,9 @@ static int metapage_writepage(struct page *page, struct writeback_control *wbc)
unlock_page(page);
+ if (bad_blocks)
+ goto err_out;
+
if (nr_underway == 0)
end_page_writeback(page);
@@ -474,7 +480,9 @@ skip:
bio_put(bio);
unlock_page(page);
dec_io(page, last_write_complete);
-
+err_out:
+ while (bad_blocks--)
+ dec_io(page, last_write_complete);
return -EIO;
}
diff --git a/fs/jfs/jfs_types.h b/fs/jfs/jfs_types.h
index 649f9817accd..43ea3713c083 100644
--- a/fs/jfs/jfs_types.h
+++ b/fs/jfs/jfs_types.h
@@ -58,35 +58,6 @@ struct timestruc_t {
#define ONES 0xffffffffu /* all bit on */
/*
- * logical xd (lxd)
- */
-typedef struct {
- unsigned len:24;
- unsigned off1:8;
- u32 off2;
-} lxd_t;
-
-/* lxd_t field construction */
-#define LXDlength(lxd, length32) ( (lxd)->len = length32 )
-#define LXDoffset(lxd, offset64)\
-{\
- (lxd)->off1 = ((s64)offset64) >> 32;\
- (lxd)->off2 = (offset64) & 0xffffffff;\
-}
-
-/* lxd_t field extraction */
-#define lengthLXD(lxd) ( (lxd)->len )
-#define offsetLXD(lxd)\
- ( ((s64)((lxd)->off1)) << 32 | (lxd)->off2 )
-
-/* lxd list */
-struct lxdlist {
- s16 maxnlxd;
- s16 nlxd;
- lxd_t *lxd;
-};
-
-/*
* physical xd (pxd)
*/
typedef struct {
diff --git a/fs/jfs/jfs_xtree.c b/fs/jfs/jfs_xtree.c
index ae3acafb447b..44380fb0d7c2 100644
--- a/fs/jfs/jfs_xtree.c
+++ b/fs/jfs/jfs_xtree.c
@@ -164,11 +164,8 @@ int xtLookup(struct inode *ip, s64 lstart,
/* is lookup offset beyond eof ? */
size = ((u64) ip->i_size + (JFS_SBI(ip->i_sb)->bsize - 1)) >>
JFS_SBI(ip->i_sb)->l2bsize;
- if (lstart >= size) {
- jfs_err("xtLookup: lstart (0x%lx) >= size (0x%lx)",
- (ulong) lstart, (ulong) size);
+ if (lstart >= size)
return 0;
- }
}
/*
@@ -220,264 +217,6 @@ int xtLookup(struct inode *ip, s64 lstart,
return rc;
}
-
-/*
- * xtLookupList()
- *
- * function: map a single logical extent into a list of physical extent;
- *
- * parameter:
- * struct inode *ip,
- * struct lxdlist *lxdlist, lxd list (in)
- * struct xadlist *xadlist, xad list (in/out)
- * int flag)
- *
- * coverage of lxd by xad under assumption of
- * . lxd's are ordered and disjoint.
- * . xad's are ordered and disjoint.
- *
- * return:
- * 0: success
- *
- * note: a page being written (even a single byte) is backed fully,
- * except the last page which is only backed with blocks
- * required to cover the last byte;
- * the extent backing a page is fully contained within an xad;
- */
-int xtLookupList(struct inode *ip, struct lxdlist * lxdlist,
- struct xadlist * xadlist, int flag)
-{
- int rc = 0;
- struct btstack btstack;
- int cmp;
- s64 bn;
- struct metapage *mp;
- xtpage_t *p;
- int index;
- lxd_t *lxd;
- xad_t *xad, *pxd;
- s64 size, lstart, lend, xstart, xend, pstart;
- s64 llen, xlen, plen;
- s64 xaddr, paddr;
- int nlxd, npxd, maxnpxd;
-
- npxd = xadlist->nxad = 0;
- maxnpxd = xadlist->maxnxad;
- pxd = xadlist->xad;
-
- nlxd = lxdlist->nlxd;
- lxd = lxdlist->lxd;
-
- lstart = offsetLXD(lxd);
- llen = lengthLXD(lxd);
- lend = lstart + llen;
-
- size = (ip->i_size + (JFS_SBI(ip->i_sb)->bsize - 1)) >>
- JFS_SBI(ip->i_sb)->l2bsize;
-
- /*
- * search for the xad entry covering the logical extent
- */
- search:
- if (lstart >= size)
- return 0;
-
- if ((rc = xtSearch(ip, lstart, NULL, &cmp, &btstack, 0)))
- return rc;
-
- /*
- * compute the physical extent covering logical extent
- *
- * N.B. search may have failed (e.g., hole in sparse file),
- * and returned the index of the next entry.
- */
-//map:
- /* retrieve search result */
- XT_GETSEARCH(ip, btstack.top, bn, mp, p, index);
-
- /* is xad on the next sibling page ? */
- if (index == le16_to_cpu(p->header.nextindex)) {
- if (p->header.flag & BT_ROOT)
- goto mapend;
-
- if ((bn = le64_to_cpu(p->header.next)) == 0)
- goto mapend;
-
- XT_PUTPAGE(mp);
-
- /* get next sibling page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
-
- index = XTENTRYSTART;
- }
-
- xad = &p->xad[index];
-
- /*
- * is lxd covered by xad ?
- */
- compare:
- xstart = offsetXAD(xad);
- xlen = lengthXAD(xad);
- xend = xstart + xlen;
- xaddr = addressXAD(xad);
-
- compare1:
- if (xstart < lstart)
- goto compare2;
-
- /* (lstart <= xstart) */
-
- /* lxd is NOT covered by xad */
- if (lend <= xstart) {
- /*
- * get next lxd
- */
- if (--nlxd == 0)
- goto mapend;
- lxd++;
-
- lstart = offsetLXD(lxd);
- llen = lengthLXD(lxd);
- lend = lstart + llen;
- if (lstart >= size)
- goto mapend;
-
- /* compare with the current xad */
- goto compare1;
- }
- /* lxd is covered by xad */
- else { /* (xstart < lend) */
-
- /* initialize new pxd */
- pstart = xstart;
- plen = min(lend - xstart, xlen);
- paddr = xaddr;
-
- goto cover;
- }
-
- /* (xstart < lstart) */
- compare2:
- /* lxd is covered by xad */
- if (lstart < xend) {
- /* initialize new pxd */
- pstart = lstart;
- plen = min(xend - lstart, llen);
- paddr = xaddr + (lstart - xstart);
-
- goto cover;
- }
- /* lxd is NOT covered by xad */
- else { /* (xend <= lstart) */
-
- /*
- * get next xad
- *
- * linear search next xad covering lxd on
- * the current xad page, and then tree search
- */
- if (index == le16_to_cpu(p->header.nextindex) - 1) {
- if (p->header.flag & BT_ROOT)
- goto mapend;
-
- XT_PUTPAGE(mp);
- goto search;
- } else {
- index++;
- xad++;
-
- /* compare with new xad */
- goto compare;
- }
- }
-
- /*
- * lxd is covered by xad and a new pxd has been initialized
- * (lstart <= xstart < lend) or (xstart < lstart < xend)
- */
- cover:
- /* finalize pxd corresponding to current xad */
- XT_PUTENTRY(pxd, xad->flag, pstart, plen, paddr);
-
- if (++npxd >= maxnpxd)
- goto mapend;
- pxd++;
-
- /*
- * lxd is fully covered by xad
- */
- if (lend <= xend) {
- /*
- * get next lxd
- */
- if (--nlxd == 0)
- goto mapend;
- lxd++;
-
- lstart = offsetLXD(lxd);
- llen = lengthLXD(lxd);
- lend = lstart + llen;
- if (lstart >= size)
- goto mapend;
-
- /*
- * test for old xad covering new lxd
- * (old xstart < new lstart)
- */
- goto compare2;
- }
- /*
- * lxd is partially covered by xad
- */
- else { /* (xend < lend) */
-
- /*
- * get next xad
- *
- * linear search next xad covering lxd on
- * the current xad page, and then next xad page search
- */
- if (index == le16_to_cpu(p->header.nextindex) - 1) {
- if (p->header.flag & BT_ROOT)
- goto mapend;
-
- if ((bn = le64_to_cpu(p->header.next)) == 0)
- goto mapend;
-
- XT_PUTPAGE(mp);
-
- /* get next sibling page */
- XT_GETPAGE(ip, bn, mp, PSIZE, p, rc);
- if (rc)
- return rc;
-
- index = XTENTRYSTART;
- xad = &p->xad[index];
- } else {
- index++;
- xad++;
- }
-
- /*
- * test for new xad covering old lxd
- * (old lstart < new xstart)
- */
- goto compare;
- }
-
- mapend:
- xadlist->nxad = npxd;
-
-//out:
- XT_PUTPAGE(mp);
-
- return rc;
-}
-
-
/*
* xtSearch()
*
diff --git a/fs/jfs/jfs_xtree.h b/fs/jfs/jfs_xtree.h
index 70815c8a3d6a..08c0c749b986 100644
--- a/fs/jfs/jfs_xtree.h
+++ b/fs/jfs/jfs_xtree.h
@@ -110,8 +110,6 @@ typedef union {
*/
extern int xtLookup(struct inode *ip, s64 lstart, s64 llen,
int *pflag, s64 * paddr, int *plen, int flag);
-extern int xtLookupList(struct inode *ip, struct lxdlist * lxdlist,
- struct xadlist * xadlist, int flag);
extern void xtInitRoot(tid_t tid, struct inode *ip);
extern int xtInsert(tid_t tid, struct inode *ip,
int xflag, s64 xoff, int xlen, s64 * xaddrp, int flag);
diff --git a/fs/jfs/super.c b/fs/jfs/super.c
index b37d1f78b854..6f21adf9479a 100644
--- a/fs/jfs/super.c
+++ b/fs/jfs/super.c
@@ -29,6 +29,7 @@
#include <linux/posix_acl.h>
#include <linux/buffer_head.h>
#include <linux/exportfs.h>
+#include <linux/crc32.h>
#include <asm/uaccess.h>
#include <linux/seq_file.h>
@@ -168,6 +169,9 @@ static int jfs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_files = maxinodes;
buf->f_ffree = maxinodes - (atomic_read(&imap->im_numinos) -
atomic_read(&imap->im_numfree));
+ buf->f_fsid.val[0] = (u32)crc32_le(0, sbi->uuid, sizeof(sbi->uuid)/2);
+ buf->f_fsid.val[1] = (u32)crc32_le(0, sbi->uuid + sizeof(sbi->uuid)/2,
+ sizeof(sbi->uuid)/2);
buf->f_namelen = JFS_NAME_MAX;
return 0;
diff --git a/fs/minix/Kconfig b/fs/minix/Kconfig
new file mode 100644
index 000000000000..0fd7ca994264
--- /dev/null
+++ b/fs/minix/Kconfig
@@ -0,0 +1,17 @@
+config MINIX_FS
+ tristate "Minix file system support"
+ depends on BLOCK
+ help
+ Minix is a simple operating system used in many classes about OS's.
+ The minix file system (method to organize files on a hard disk
+ partition or a floppy disk) was the original file system for Linux,
+ but has been superseded by the second extended file system ext2fs.
+ You don't want to use the minix file system on your hard disk
+ because of certain built-in restrictions, but it is sometimes found
+ on older Linux floppy disks. This option will enlarge your kernel
+ by about 28 KB. If unsure, say N.
+
+ To compile this file system support as a module, choose M here: the
+ module will be called minix. Note that the file system of your root
+ partition (the one containing the directory /) cannot be compiled as
+ a module.
diff --git a/fs/ncpfs/Kconfig b/fs/ncpfs/Kconfig
index 142808427b25..c931cf22a1f6 100644
--- a/fs/ncpfs/Kconfig
+++ b/fs/ncpfs/Kconfig
@@ -1,6 +1,27 @@
#
# NCP Filesystem configuration
#
+config NCP_FS
+ tristate "NCP file system support (to mount NetWare volumes)"
+ depends on IPX!=n || INET
+ help
+ NCP (NetWare Core Protocol) is a protocol that runs over IPX and is
+ used by Novell NetWare clients to talk to file servers. It is to
+ IPX what NFS is to TCP/IP, if that helps. Saying Y here allows you
+ to mount NetWare file server volumes and to access them just like
+ any other Unix directory. For details, please read the file
+ <file:Documentation/filesystems/ncpfs.txt> in the kernel source and
+ the IPX-HOWTO from <http://www.tldp.org/docs.html#howto>.
+
+ You do not have to say Y here if you want your Linux box to act as a
+ file *server* for Novell NetWare clients.
+
+ General information about how to connect Linux, Windows machines and
+ Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>.
+
+ To compile this as a module, choose M here: the module will be called
+ ncpfs. Say N unless you are connected to a Novell network.
+
config NCPFS_PACKET_SIGNING
bool "Packet signatures"
depends on NCP_FS
diff --git a/fs/nfs/Kconfig b/fs/nfs/Kconfig
new file mode 100644
index 000000000000..0272437dc990
--- /dev/null
+++ b/fs/nfs/Kconfig
@@ -0,0 +1,95 @@
+config NFS_FS
+ tristate "NFS client support"
+ depends on INET
+ select LOCKD
+ select SUNRPC
+ select NFS_ACL_SUPPORT if NFS_V3_ACL
+ help
+ Choose Y here if you want to access files residing on other
+ computers using Sun's Network File System protocol. To compile
+ this file system support as a module, choose M here: the module
+ will be called nfs.
+
+ To mount file systems exported by NFS servers, you also need to
+ install the user space mount.nfs command which can be found in
+ the Linux nfs-utils package, available from http://linux-nfs.org/.
+ Information about using the mount command is available in the
+ mount(8) man page. More detail about the Linux NFS client
+ implementation is available via the nfs(5) man page.
+
+ Below you can choose which versions of the NFS protocol are
+ available in the kernel to mount NFS servers. Support for NFS
+ version 2 (RFC 1094) is always available when NFS_FS is selected.
+
+ To configure a system which mounts its root file system via NFS
+ at boot time, say Y here, select "Kernel level IP
+ autoconfiguration" in the NETWORK menu, and select "Root file
+ system on NFS" below. You cannot compile this file system as a
+ module in this case.
+
+ If unsure, say N.
+
+config NFS_V3
+ bool "NFS client support for NFS version 3"
+ depends on NFS_FS
+ help
+ This option enables support for version 3 of the NFS protocol
+ (RFC 1813) in the kernel's NFS client.
+
+ If unsure, say Y.
+
+config NFS_V3_ACL
+ bool "NFS client support for the NFSv3 ACL protocol extension"
+ depends on NFS_V3
+ help
+ Some NFS servers support an auxiliary NFSv3 ACL protocol that
+ Sun added to Solaris but never became an official part of the
+ NFS version 3 protocol. This protocol extension allows
+ applications on NFS clients to manipulate POSIX Access Control
+ Lists on files residing on NFS servers. NFS servers enforce
+ ACLs on local files whether this protocol is available or not.
+
+ Choose Y here if your NFS server supports the Solaris NFSv3 ACL
+ protocol extension and you want your NFS client to allow
+ applications to access and modify ACLs on files on the server.
+
+ Most NFS servers don't support the Solaris NFSv3 ACL protocol
+ extension. You can choose N here or specify the "noacl" mount
+ option to prevent your NFS client from trying to use the NFSv3
+ ACL protocol.
+
+ If unsure, say N.
+
+config NFS_V4
+ bool "NFS client support for NFS version 4 (EXPERIMENTAL)"
+ depends on NFS_FS && EXPERIMENTAL
+ select RPCSEC_GSS_KRB5
+ help
+ This option enables support for version 4 of the NFS protocol
+ (RFC 3530) in the kernel's NFS client.
+
+ To mount NFS servers using NFSv4, you also need to install user
+ space programs which can be found in the Linux nfs-utils package,
+ available from http://linux-nfs.org/.
+
+ If unsure, say N.
+
+config ROOT_NFS
+ bool "Root file system on NFS"
+ depends on NFS_FS=y && IP_PNP
+ help
+ If you want your system to mount its root file system via NFS,
+ choose Y here. This is common practice for managing systems
+ without local permanent storage. For details, read
+ <file:Documentation/filesystems/nfsroot.txt>.
+
+ Most people say N here.
+
+config NFS_FSCACHE
+ bool "Provide NFS client caching support (EXPERIMENTAL)"
+ depends on EXPERIMENTAL
+ depends on NFS_FS=m && FSCACHE || NFS_FS=y && FSCACHE=y
+ help
+ Say Y here if you want NFS data to be cached locally on disc through
+ the general filesystem cache manager
+
diff --git a/fs/nfs/Makefile b/fs/nfs/Makefile
index ac6170c594a3..845159814de2 100644
--- a/fs/nfs/Makefile
+++ b/fs/nfs/Makefile
@@ -15,3 +15,4 @@ nfs-$(CONFIG_NFS_V4) += nfs4proc.o nfs4xdr.o nfs4state.o nfs4renewd.o \
callback.o callback_xdr.o callback_proc.o \
nfs4namespace.o
nfs-$(CONFIG_SYSCTL) += sysctl.o
+nfs-$(CONFIG_NFS_FSCACHE) += fscache.o fscache-index.o
diff --git a/fs/nfs/client.c b/fs/nfs/client.c
index 9b728f3565a1..a2e93f2ab5e1 100644
--- a/fs/nfs/client.c
+++ b/fs/nfs/client.c
@@ -45,6 +45,7 @@
#include "delegation.h"
#include "iostat.h"
#include "internal.h"
+#include "fscache.h"
#define NFSDBG_FACILITY NFSDBG_CLIENT
@@ -154,6 +155,8 @@ static struct nfs_client *nfs_alloc_client(const struct nfs_client_initdata *cl_
if (!IS_ERR(cred))
clp->cl_machine_cred = cred;
+ nfs_fscache_get_client_cookie(clp);
+
return clp;
error_3:
@@ -187,6 +190,8 @@ static void nfs_free_client(struct nfs_client *clp)
nfs4_shutdown_client(clp);
+ nfs_fscache_release_client_cookie(clp);
+
/* -EIO all pending I/O */
if (!IS_ERR(clp->cl_rpcclient))
rpc_shutdown_client(clp->cl_rpcclient);
@@ -701,6 +706,7 @@ static int nfs_init_server(struct nfs_server *server,
/* Initialise the client representation from the mount data */
server->flags = data->flags;
+ server->options = data->options;
if (data->rsize)
server->rsize = nfs_block_size(data->rsize, NULL);
@@ -1089,6 +1095,7 @@ static int nfs4_init_server(struct nfs_server *server,
/* Initialise the client representation from the mount data */
server->flags = data->flags;
server->caps |= NFS_CAP_ATOMIC_OPEN;
+ server->options = data->options;
/* Get a client record */
error = nfs4_set_client(server,
@@ -1500,7 +1507,7 @@ static int nfs_volume_list_show(struct seq_file *m, void *v)
/* display header on line 1 */
if (v == &nfs_volume_list) {
- seq_puts(m, "NV SERVER PORT DEV FSID\n");
+ seq_puts(m, "NV SERVER PORT DEV FSID FSC\n");
return 0;
}
/* display one transport per line on subsequent lines */
@@ -1514,12 +1521,13 @@ static int nfs_volume_list_show(struct seq_file *m, void *v)
(unsigned long long) server->fsid.major,
(unsigned long long) server->fsid.minor);
- seq_printf(m, "v%u %s %s %-7s %-17s\n",
+ seq_printf(m, "v%u %s %s %-7s %-17s %s\n",
clp->rpc_ops->version,
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_ADDR),
rpc_peeraddr2str(clp->cl_rpcclient, RPC_DISPLAY_HEX_PORT),
dev,
- fsid);
+ fsid,
+ nfs_server_fscache_state(server));
return 0;
}
@@ -1535,8 +1543,6 @@ int __init nfs_fs_proc_init(void)
if (!proc_fs_nfs)
goto error_0;
- proc_fs_nfs->owner = THIS_MODULE;
-
/* a file of servers with which we're dealing */
p = proc_create("servers", S_IFREG|S_IRUGO,
proc_fs_nfs, &nfs_server_list_fops);
diff --git a/fs/nfs/file.c b/fs/nfs/file.c
index 90f292b520d2..dda531744826 100644
--- a/fs/nfs/file.c
+++ b/fs/nfs/file.c
@@ -35,6 +35,7 @@
#include "delegation.h"
#include "internal.h"
#include "iostat.h"
+#include "fscache.h"
#define NFSDBG_FACILITY NFSDBG_FILE
@@ -409,6 +410,13 @@ static int nfs_write_end(struct file *file, struct address_space *mapping,
return copied;
}
+/*
+ * Partially or wholly invalidate a page
+ * - Release the private state associated with a page if undergoing complete
+ * page invalidation
+ * - Called if either PG_private or PG_fscache is set on the page
+ * - Caller holds page lock
+ */
static void nfs_invalidate_page(struct page *page, unsigned long offset)
{
dfprintk(PAGECACHE, "NFS: invalidate_page(%p, %lu)\n", page, offset);
@@ -417,16 +425,34 @@ static void nfs_invalidate_page(struct page *page, unsigned long offset)
return;
/* Cancel any unstarted writes on this page */
nfs_wb_page_cancel(page->mapping->host, page);
+
+ nfs_fscache_invalidate_page(page, page->mapping->host);
}
+/*
+ * Attempt to release the private state associated with a page
+ * - Called if either PG_private or PG_fscache is set on the page
+ * - Caller holds page lock
+ * - Return true (may release page) or false (may not)
+ */
static int nfs_release_page(struct page *page, gfp_t gfp)
{
dfprintk(PAGECACHE, "NFS: release_page(%p)\n", page);
/* If PagePrivate() is set, then the page is not freeable */
- return 0;
+ if (PagePrivate(page))
+ return 0;
+ return nfs_fscache_release_page(page, gfp);
}
+/*
+ * Attempt to clear the private state associated with a page when an error
+ * occurs that requires the cached contents of an inode to be written back or
+ * destroyed
+ * - Called if either PG_private or fscache is set on the page
+ * - Caller holds page lock
+ * - Return 0 if successful, -error otherwise
+ */
static int nfs_launder_page(struct page *page)
{
struct inode *inode = page->mapping->host;
@@ -434,6 +460,7 @@ static int nfs_launder_page(struct page *page)
dfprintk(PAGECACHE, "NFS: launder_page(%ld, %llu)\n",
inode->i_ino, (long long)page_offset(page));
+ wait_on_page_fscache_write(page);
return nfs_wb_page(inode, page);
}
@@ -451,6 +478,11 @@ const struct address_space_operations nfs_file_aops = {
.launder_page = nfs_launder_page,
};
+/*
+ * Notification that a PTE pointing to an NFS page is about to be made
+ * writable, implying that someone is about to modify the page through a
+ * shared-writable mapping
+ */
static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
{
struct file *filp = vma->vm_file;
@@ -464,6 +496,9 @@ static int nfs_vm_page_mkwrite(struct vm_area_struct *vma, struct page *page)
filp->f_mapping->host->i_ino,
(long long)page_offset(page));
+ /* make sure the cache has finished storing the page */
+ wait_on_page_fscache_write(page);
+
lock_page(page);
mapping = page->mapping;
if (mapping != dentry->d_inode->i_mapping)
diff --git a/fs/nfs/fscache-index.c b/fs/nfs/fscache-index.c
new file mode 100644
index 000000000000..5b1006480bc2
--- /dev/null
+++ b/fs/nfs/fscache-index.c
@@ -0,0 +1,337 @@
+/* NFS FS-Cache index structure definition
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_fs_sb.h>
+#include <linux/in6.h>
+
+#include "internal.h"
+#include "fscache.h"
+
+#define NFSDBG_FACILITY NFSDBG_FSCACHE
+
+/*
+ * Define the NFS filesystem for FS-Cache. Upon registration FS-Cache sticks
+ * the cookie for the top-level index object for NFS into here. The top-level
+ * index can than have other cache objects inserted into it.
+ */
+struct fscache_netfs nfs_fscache_netfs = {
+ .name = "nfs",
+ .version = 0,
+};
+
+/*
+ * Register NFS for caching
+ */
+int nfs_fscache_register(void)
+{
+ return fscache_register_netfs(&nfs_fscache_netfs);
+}
+
+/*
+ * Unregister NFS for caching
+ */
+void nfs_fscache_unregister(void)
+{
+ fscache_unregister_netfs(&nfs_fscache_netfs);
+}
+
+/*
+ * Layout of the key for an NFS server cache object.
+ */
+struct nfs_server_key {
+ uint16_t nfsversion; /* NFS protocol version */
+ uint16_t family; /* address family */
+ uint16_t port; /* IP port */
+ union {
+ struct in_addr ipv4_addr; /* IPv4 address */
+ struct in6_addr ipv6_addr; /* IPv6 address */
+ } addr[0];
+};
+
+/*
+ * Generate a key to describe a server in the main NFS index
+ * - We return the length of the key, or 0 if we can't generate one
+ */
+static uint16_t nfs_server_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
+{
+ const struct nfs_client *clp = cookie_netfs_data;
+ const struct sockaddr_in6 *sin6 = (struct sockaddr_in6 *) &clp->cl_addr;
+ const struct sockaddr_in *sin = (struct sockaddr_in *) &clp->cl_addr;
+ struct nfs_server_key *key = buffer;
+ uint16_t len = sizeof(struct nfs_server_key);
+
+ key->nfsversion = clp->rpc_ops->version;
+ key->family = clp->cl_addr.ss_family;
+
+ memset(key, 0, len);
+
+ switch (clp->cl_addr.ss_family) {
+ case AF_INET:
+ key->port = sin->sin_port;
+ key->addr[0].ipv4_addr = sin->sin_addr;
+ len += sizeof(key->addr[0].ipv4_addr);
+ break;
+
+ case AF_INET6:
+ key->port = sin6->sin6_port;
+ key->addr[0].ipv6_addr = sin6->sin6_addr;
+ len += sizeof(key->addr[0].ipv6_addr);
+ break;
+
+ default:
+ printk(KERN_WARNING "NFS: Unknown network family '%d'\n",
+ clp->cl_addr.ss_family);
+ len = 0;
+ break;
+ }
+
+ return len;
+}
+
+/*
+ * Define the server object for FS-Cache. This is used to describe a server
+ * object to fscache_acquire_cookie(). It is keyed by the NFS protocol and
+ * server address parameters.
+ */
+const struct fscache_cookie_def nfs_fscache_server_index_def = {
+ .name = "NFS.server",
+ .type = FSCACHE_COOKIE_TYPE_INDEX,
+ .get_key = nfs_server_get_key,
+};
+
+/*
+ * Generate a key to describe a superblock key in the main NFS index
+ */
+static uint16_t nfs_super_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
+{
+ const struct nfs_fscache_key *key;
+ const struct nfs_server *nfss = cookie_netfs_data;
+ uint16_t len;
+
+ key = nfss->fscache_key;
+ len = sizeof(key->key) + key->key.uniq_len;
+ if (len > bufmax) {
+ len = 0;
+ } else {
+ memcpy(buffer, &key->key, sizeof(key->key));
+ memcpy(buffer + sizeof(key->key),
+ key->key.uniquifier, key->key.uniq_len);
+ }
+
+ return len;
+}
+
+/*
+ * Define the superblock object for FS-Cache. This is used to describe a
+ * superblock object to fscache_acquire_cookie(). It is keyed by all the NFS
+ * parameters that might cause a separate superblock.
+ */
+const struct fscache_cookie_def nfs_fscache_super_index_def = {
+ .name = "NFS.super",
+ .type = FSCACHE_COOKIE_TYPE_INDEX,
+ .get_key = nfs_super_get_key,
+};
+
+/*
+ * Definition of the auxiliary data attached to NFS inode storage objects
+ * within the cache.
+ *
+ * The contents of this struct are recorded in the on-disk local cache in the
+ * auxiliary data attached to the data storage object backing an inode. This
+ * permits coherency to be managed when a new inode binds to an already extant
+ * cache object.
+ */
+struct nfs_fscache_inode_auxdata {
+ struct timespec mtime;
+ struct timespec ctime;
+ loff_t size;
+ u64 change_attr;
+};
+
+/*
+ * Generate a key to describe an NFS inode in an NFS server's index
+ */
+static uint16_t nfs_fscache_inode_get_key(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
+{
+ const struct nfs_inode *nfsi = cookie_netfs_data;
+ uint16_t nsize;
+
+ /* use the inode's NFS filehandle as the key */
+ nsize = nfsi->fh.size;
+ memcpy(buffer, nfsi->fh.data, nsize);
+ return nsize;
+}
+
+/*
+ * Get certain file attributes from the netfs data
+ * - This function can be absent for an index
+ * - Not permitted to return an error
+ * - The netfs data from the cookie being used as the source is presented
+ */
+static void nfs_fscache_inode_get_attr(const void *cookie_netfs_data,
+ uint64_t *size)
+{
+ const struct nfs_inode *nfsi = cookie_netfs_data;
+
+ *size = nfsi->vfs_inode.i_size;
+}
+
+/*
+ * Get the auxiliary data from netfs data
+ * - This function can be absent if the index carries no state data
+ * - Should store the auxiliary data in the buffer
+ * - Should return the amount of amount stored
+ * - Not permitted to return an error
+ * - The netfs data from the cookie being used as the source is presented
+ */
+static uint16_t nfs_fscache_inode_get_aux(const void *cookie_netfs_data,
+ void *buffer, uint16_t bufmax)
+{
+ struct nfs_fscache_inode_auxdata auxdata;
+ const struct nfs_inode *nfsi = cookie_netfs_data;
+
+ memset(&auxdata, 0, sizeof(auxdata));
+ auxdata.size = nfsi->vfs_inode.i_size;
+ auxdata.mtime = nfsi->vfs_inode.i_mtime;
+ auxdata.ctime = nfsi->vfs_inode.i_ctime;
+
+ if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
+ auxdata.change_attr = nfsi->change_attr;
+
+ if (bufmax > sizeof(auxdata))
+ bufmax = sizeof(auxdata);
+
+ memcpy(buffer, &auxdata, bufmax);
+ return bufmax;
+}
+
+/*
+ * Consult the netfs about the state of an object
+ * - This function can be absent if the index carries no state data
+ * - The netfs data from the cookie being used as the target is
+ * presented, as is the auxiliary data
+ */
+static
+enum fscache_checkaux nfs_fscache_inode_check_aux(void *cookie_netfs_data,
+ const void *data,
+ uint16_t datalen)
+{
+ struct nfs_fscache_inode_auxdata auxdata;
+ struct nfs_inode *nfsi = cookie_netfs_data;
+
+ if (datalen != sizeof(auxdata))
+ return FSCACHE_CHECKAUX_OBSOLETE;
+
+ memset(&auxdata, 0, sizeof(auxdata));
+ auxdata.size = nfsi->vfs_inode.i_size;
+ auxdata.mtime = nfsi->vfs_inode.i_mtime;
+ auxdata.ctime = nfsi->vfs_inode.i_ctime;
+
+ if (NFS_SERVER(&nfsi->vfs_inode)->nfs_client->rpc_ops->version == 4)
+ auxdata.change_attr = nfsi->change_attr;
+
+ if (memcmp(data, &auxdata, datalen) != 0)
+ return FSCACHE_CHECKAUX_OBSOLETE;
+
+ return FSCACHE_CHECKAUX_OKAY;
+}
+
+/*
+ * Indication from FS-Cache that the cookie is no longer cached
+ * - This function is called when the backing store currently caching a cookie
+ * is removed
+ * - The netfs should use this to clean up any markers indicating cached pages
+ * - This is mandatory for any object that may have data
+ */
+static void nfs_fscache_inode_now_uncached(void *cookie_netfs_data)
+{
+ struct nfs_inode *nfsi = cookie_netfs_data;
+ struct pagevec pvec;
+ pgoff_t first;
+ int loop, nr_pages;
+
+ pagevec_init(&pvec, 0);
+ first = 0;
+
+ dprintk("NFS: nfs_inode_now_uncached: nfs_inode 0x%p\n", nfsi);
+
+ for (;;) {
+ /* grab a bunch of pages to unmark */
+ nr_pages = pagevec_lookup(&pvec,
+ nfsi->vfs_inode.i_mapping,
+ first,
+ PAGEVEC_SIZE - pagevec_count(&pvec));
+ if (!nr_pages)
+ break;
+
+ for (loop = 0; loop < nr_pages; loop++)
+ ClearPageFsCache(pvec.pages[loop]);
+
+ first = pvec.pages[nr_pages - 1]->index + 1;
+
+ pvec.nr = nr_pages;
+ pagevec_release(&pvec);
+ cond_resched();
+ }
+}
+
+/*
+ * Get an extra reference on a read context.
+ * - This function can be absent if the completion function doesn't require a
+ * context.
+ * - The read context is passed back to NFS in the event that a data read on the
+ * cache fails with EIO - in which case the server must be contacted to
+ * retrieve the data, which requires the read context for security.
+ */
+static void nfs_fh_get_context(void *cookie_netfs_data, void *context)
+{
+ get_nfs_open_context(context);
+}
+
+/*
+ * Release an extra reference on a read context.
+ * - This function can be absent if the completion function doesn't require a
+ * context.
+ */
+static void nfs_fh_put_context(void *cookie_netfs_data, void *context)
+{
+ if (context)
+ put_nfs_open_context(context);
+}
+
+/*
+ * Define the inode object for FS-Cache. This is used to describe an inode
+ * object to fscache_acquire_cookie(). It is keyed by the NFS file handle for
+ * an inode.
+ *
+ * Coherency is managed by comparing the copies of i_size, i_mtime and i_ctime
+ * held in the cache auxiliary data for the data storage object with those in
+ * the inode struct in memory.
+ */
+const struct fscache_cookie_def nfs_fscache_inode_object_def = {
+ .name = "NFS.fh",
+ .type = FSCACHE_COOKIE_TYPE_DATAFILE,
+ .get_key = nfs_fscache_inode_get_key,
+ .get_attr = nfs_fscache_inode_get_attr,
+ .get_aux = nfs_fscache_inode_get_aux,
+ .check_aux = nfs_fscache_inode_check_aux,
+ .now_uncached = nfs_fscache_inode_now_uncached,
+ .get_context = nfs_fh_get_context,
+ .put_context = nfs_fh_put_context,
+};
diff --git a/fs/nfs/fscache.c b/fs/nfs/fscache.c
new file mode 100644
index 000000000000..968cf5d99b3f
--- /dev/null
+++ b/fs/nfs/fscache.c
@@ -0,0 +1,521 @@
+/* NFS filesystem cache interface
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#include <linux/init.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/nfs_fs.h>
+#include <linux/nfs_fs_sb.h>
+#include <linux/in6.h>
+#include <linux/seq_file.h>
+
+#include "internal.h"
+#include "iostat.h"
+#include "fscache.h"
+
+#define NFSDBG_FACILITY NFSDBG_FSCACHE
+
+static struct rb_root nfs_fscache_keys = RB_ROOT;
+static DEFINE_SPINLOCK(nfs_fscache_keys_lock);
+
+/*
+ * Get the per-client index cookie for an NFS client if the appropriate mount
+ * flag was set
+ * - We always try and get an index cookie for the client, but get filehandle
+ * cookies on a per-superblock basis, depending on the mount flags
+ */
+void nfs_fscache_get_client_cookie(struct nfs_client *clp)
+{
+ /* create a cache index for looking up filehandles */
+ clp->fscache = fscache_acquire_cookie(nfs_fscache_netfs.primary_index,
+ &nfs_fscache_server_index_def,
+ clp);
+ dfprintk(FSCACHE, "NFS: get client cookie (0x%p/0x%p)\n",
+ clp, clp->fscache);
+}
+
+/*
+ * Dispose of a per-client cookie
+ */
+void nfs_fscache_release_client_cookie(struct nfs_client *clp)
+{
+ dfprintk(FSCACHE, "NFS: releasing client cookie (0x%p/0x%p)\n",
+ clp, clp->fscache);
+
+ fscache_relinquish_cookie(clp->fscache, 0);
+ clp->fscache = NULL;
+}
+
+/*
+ * Get the cache cookie for an NFS superblock. We have to handle
+ * uniquification here because the cache doesn't do it for us.
+ */
+void nfs_fscache_get_super_cookie(struct super_block *sb,
+ struct nfs_parsed_mount_data *data)
+{
+ struct nfs_fscache_key *key, *xkey;
+ struct nfs_server *nfss = NFS_SB(sb);
+ struct rb_node **p, *parent;
+ const char *uniq = data->fscache_uniq ?: "";
+ int diff, ulen;
+
+ ulen = strlen(uniq);
+ key = kzalloc(sizeof(*key) + ulen, GFP_KERNEL);
+ if (!key)
+ return;
+
+ key->nfs_client = nfss->nfs_client;
+ key->key.super.s_flags = sb->s_flags & NFS_MS_MASK;
+ key->key.nfs_server.flags = nfss->flags;
+ key->key.nfs_server.rsize = nfss->rsize;
+ key->key.nfs_server.wsize = nfss->wsize;
+ key->key.nfs_server.acregmin = nfss->acregmin;
+ key->key.nfs_server.acregmax = nfss->acregmax;
+ key->key.nfs_server.acdirmin = nfss->acdirmin;
+ key->key.nfs_server.acdirmax = nfss->acdirmax;
+ key->key.nfs_server.fsid = nfss->fsid;
+ key->key.rpc_auth.au_flavor = nfss->client->cl_auth->au_flavor;
+
+ key->key.uniq_len = ulen;
+ memcpy(key->key.uniquifier, uniq, ulen);
+
+ spin_lock(&nfs_fscache_keys_lock);
+ p = &nfs_fscache_keys.rb_node;
+ parent = NULL;
+ while (*p) {
+ parent = *p;
+ xkey = rb_entry(parent, struct nfs_fscache_key, node);
+
+ if (key->nfs_client < xkey->nfs_client)
+ goto go_left;
+ if (key->nfs_client > xkey->nfs_client)
+ goto go_right;
+
+ diff = memcmp(&key->key, &xkey->key, sizeof(key->key));
+ if (diff < 0)
+ goto go_left;
+ if (diff > 0)
+ goto go_right;
+
+ if (key->key.uniq_len == 0)
+ goto non_unique;
+ diff = memcmp(key->key.uniquifier,
+ xkey->key.uniquifier,
+ key->key.uniq_len);
+ if (diff < 0)
+ goto go_left;
+ if (diff > 0)
+ goto go_right;
+ goto non_unique;
+
+ go_left:
+ p = &(*p)->rb_left;
+ continue;
+ go_right:
+ p = &(*p)->rb_right;
+ }
+
+ rb_link_node(&key->node, parent, p);
+ rb_insert_color(&key->node, &nfs_fscache_keys);
+ spin_unlock(&nfs_fscache_keys_lock);
+ nfss->fscache_key = key;
+
+ /* create a cache index for looking up filehandles */
+ nfss->fscache = fscache_acquire_cookie(nfss->nfs_client->fscache,
+ &nfs_fscache_super_index_def,
+ nfss);
+ dfprintk(FSCACHE, "NFS: get superblock cookie (0x%p/0x%p)\n",
+ nfss, nfss->fscache);
+ return;
+
+non_unique:
+ spin_unlock(&nfs_fscache_keys_lock);
+ kfree(key);
+ nfss->fscache_key = NULL;
+ nfss->fscache = NULL;
+ printk(KERN_WARNING "NFS:"
+ " Cache request denied due to non-unique superblock keys\n");
+}
+
+/*
+ * release a per-superblock cookie
+ */
+void nfs_fscache_release_super_cookie(struct super_block *sb)
+{
+ struct nfs_server *nfss = NFS_SB(sb);
+
+ dfprintk(FSCACHE, "NFS: releasing superblock cookie (0x%p/0x%p)\n",
+ nfss, nfss->fscache);
+
+ fscache_relinquish_cookie(nfss->fscache, 0);
+ nfss->fscache = NULL;
+
+ if (nfss->fscache_key) {
+ spin_lock(&nfs_fscache_keys_lock);
+ rb_erase(&nfss->fscache_key->node, &nfs_fscache_keys);
+ spin_unlock(&nfs_fscache_keys_lock);
+ kfree(nfss->fscache_key);
+ nfss->fscache_key = NULL;
+ }
+}
+
+/*
+ * Initialise the per-inode cache cookie pointer for an NFS inode.
+ */
+void nfs_fscache_init_inode_cookie(struct inode *inode)
+{
+ NFS_I(inode)->fscache = NULL;
+ if (S_ISREG(inode->i_mode))
+ set_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
+}
+
+/*
+ * Get the per-inode cache cookie for an NFS inode.
+ */
+static void nfs_fscache_enable_inode_cookie(struct inode *inode)
+{
+ struct super_block *sb = inode->i_sb;
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ if (nfsi->fscache || !NFS_FSCACHE(inode))
+ return;
+
+ if ((NFS_SB(sb)->options & NFS_OPTION_FSCACHE)) {
+ nfsi->fscache = fscache_acquire_cookie(
+ NFS_SB(sb)->fscache,
+ &nfs_fscache_inode_object_def,
+ nfsi);
+
+ dfprintk(FSCACHE, "NFS: get FH cookie (0x%p/0x%p/0x%p)\n",
+ sb, nfsi, nfsi->fscache);
+ }
+}
+
+/*
+ * Release a per-inode cookie.
+ */
+void nfs_fscache_release_inode_cookie(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ dfprintk(FSCACHE, "NFS: clear cookie (0x%p/0x%p)\n",
+ nfsi, nfsi->fscache);
+
+ fscache_relinquish_cookie(nfsi->fscache, 0);
+ nfsi->fscache = NULL;
+}
+
+/*
+ * Retire a per-inode cookie, destroying the data attached to it.
+ */
+void nfs_fscache_zap_inode_cookie(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ dfprintk(FSCACHE, "NFS: zapping cookie (0x%p/0x%p)\n",
+ nfsi, nfsi->fscache);
+
+ fscache_relinquish_cookie(nfsi->fscache, 1);
+ nfsi->fscache = NULL;
+}
+
+/*
+ * Turn off the cache with regard to a per-inode cookie if opened for writing,
+ * invalidating all the pages in the page cache relating to the associated
+ * inode to clear the per-page caching.
+ */
+static void nfs_fscache_disable_inode_cookie(struct inode *inode)
+{
+ clear_bit(NFS_INO_FSCACHE, &NFS_I(inode)->flags);
+
+ if (NFS_I(inode)->fscache) {
+ dfprintk(FSCACHE,
+ "NFS: nfsi 0x%p turning cache off\n", NFS_I(inode));
+
+ /* Need to invalidate any mapped pages that were read in before
+ * turning off the cache.
+ */
+ if (inode->i_mapping && inode->i_mapping->nrpages)
+ invalidate_inode_pages2(inode->i_mapping);
+
+ nfs_fscache_zap_inode_cookie(inode);
+ }
+}
+
+/*
+ * wait_on_bit() sleep function for uninterruptible waiting
+ */
+static int nfs_fscache_wait_bit(void *flags)
+{
+ schedule();
+ return 0;
+}
+
+/*
+ * Lock against someone else trying to also acquire or relinquish a cookie
+ */
+static inline void nfs_fscache_inode_lock(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ while (test_and_set_bit(NFS_INO_FSCACHE_LOCK, &nfsi->flags))
+ wait_on_bit(&nfsi->flags, NFS_INO_FSCACHE_LOCK,
+ nfs_fscache_wait_bit, TASK_UNINTERRUPTIBLE);
+}
+
+/*
+ * Unlock cookie management lock
+ */
+static inline void nfs_fscache_inode_unlock(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ smp_mb__before_clear_bit();
+ clear_bit(NFS_INO_FSCACHE_LOCK, &nfsi->flags);
+ smp_mb__after_clear_bit();
+ wake_up_bit(&nfsi->flags, NFS_INO_FSCACHE_LOCK);
+}
+
+/*
+ * Decide if we should enable or disable local caching for this inode.
+ * - For now, with NFS, only regular files that are open read-only will be able
+ * to use the cache.
+ * - May be invoked multiple times in parallel by parallel nfs_open() functions.
+ */
+void nfs_fscache_set_inode_cookie(struct inode *inode, struct file *filp)
+{
+ if (NFS_FSCACHE(inode)) {
+ nfs_fscache_inode_lock(inode);
+ if ((filp->f_flags & O_ACCMODE) != O_RDONLY)
+ nfs_fscache_disable_inode_cookie(inode);
+ else
+ nfs_fscache_enable_inode_cookie(inode);
+ nfs_fscache_inode_unlock(inode);
+ }
+}
+
+/*
+ * Replace a per-inode cookie due to revalidation detecting a file having
+ * changed on the server.
+ */
+void nfs_fscache_reset_inode_cookie(struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+ struct nfs_server *nfss = NFS_SERVER(inode);
+ struct fscache_cookie *old = nfsi->fscache;
+
+ nfs_fscache_inode_lock(inode);
+ if (nfsi->fscache) {
+ /* retire the current fscache cache and get a new one */
+ fscache_relinquish_cookie(nfsi->fscache, 1);
+
+ nfsi->fscache = fscache_acquire_cookie(
+ nfss->nfs_client->fscache,
+ &nfs_fscache_inode_object_def,
+ nfsi);
+
+ dfprintk(FSCACHE,
+ "NFS: revalidation new cookie (0x%p/0x%p/0x%p/0x%p)\n",
+ nfss, nfsi, old, nfsi->fscache);
+ }
+ nfs_fscache_inode_unlock(inode);
+}
+
+/*
+ * Release the caching state associated with a page, if the page isn't busy
+ * interacting with the cache.
+ * - Returns true (can release page) or false (page busy).
+ */
+int nfs_fscache_release_page(struct page *page, gfp_t gfp)
+{
+ if (PageFsCacheWrite(page)) {
+ if (!(gfp & __GFP_WAIT))
+ return 0;
+ wait_on_page_fscache_write(page);
+ }
+
+ if (PageFsCache(page)) {
+ struct nfs_inode *nfsi = NFS_I(page->mapping->host);
+
+ BUG_ON(!nfsi->fscache);
+
+ dfprintk(FSCACHE, "NFS: fscache releasepage (0x%p/0x%p/0x%p)\n",
+ nfsi->fscache, page, nfsi);
+
+ fscache_uncache_page(nfsi->fscache, page);
+ nfs_add_fscache_stats(page->mapping->host,
+ NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
+ }
+
+ return 1;
+}
+
+/*
+ * Release the caching state associated with a page if undergoing complete page
+ * invalidation.
+ */
+void __nfs_fscache_invalidate_page(struct page *page, struct inode *inode)
+{
+ struct nfs_inode *nfsi = NFS_I(inode);
+
+ BUG_ON(!nfsi->fscache);
+
+ dfprintk(FSCACHE, "NFS: fscache invalidatepage (0x%p/0x%p/0x%p)\n",
+ nfsi->fscache, page, nfsi);
+
+ wait_on_page_fscache_write(page);
+
+ BUG_ON(!PageLocked(page));
+ fscache_uncache_page(nfsi->fscache, page);
+ nfs_add_fscache_stats(page->mapping->host,
+ NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
+}
+
+/*
+ * Handle completion of a page being read from the cache.
+ * - Called in process (keventd) context.
+ */
+static void nfs_readpage_from_fscache_complete(struct page *page,
+ void *context,
+ int error)
+{
+ dfprintk(FSCACHE,
+ "NFS: readpage_from_fscache_complete (0x%p/0x%p/%d)\n",
+ page, context, error);
+
+ /* if the read completes with an error, we just unlock the page and let
+ * the VM reissue the readpage */
+ if (!error) {
+ SetPageUptodate(page);
+ unlock_page(page);
+ } else {
+ error = nfs_readpage_async(context, page->mapping->host, page);
+ if (error)
+ unlock_page(page);
+ }
+}
+
+/*
+ * Retrieve a page from fscache
+ */
+int __nfs_readpage_from_fscache(struct nfs_open_context *ctx,
+ struct inode *inode, struct page *page)
+{
+ int ret;
+
+ dfprintk(FSCACHE,
+ "NFS: readpage_from_fscache(fsc:%p/p:%p(i:%lx f:%lx)/0x%p)\n",
+ NFS_I(inode)->fscache, page, page->index, page->flags, inode);
+
+ ret = fscache_read_or_alloc_page(NFS_I(inode)->fscache,
+ page,
+ nfs_readpage_from_fscache_complete,
+ ctx,
+ GFP_KERNEL);
+
+ switch (ret) {
+ case 0: /* read BIO submitted (page in fscache) */
+ dfprintk(FSCACHE,
+ "NFS: readpage_from_fscache: BIO submitted\n");
+ nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK, 1);
+ return ret;
+
+ case -ENOBUFS: /* inode not in cache */
+ case -ENODATA: /* page not in cache */
+ nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL, 1);
+ dfprintk(FSCACHE,
+ "NFS: readpage_from_fscache %d\n", ret);
+ return 1;
+
+ default:
+ dfprintk(FSCACHE, "NFS: readpage_from_fscache %d\n", ret);
+ nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL, 1);
+ }
+ return ret;
+}
+
+/*
+ * Retrieve a set of pages from fscache
+ */
+int __nfs_readpages_from_fscache(struct nfs_open_context *ctx,
+ struct inode *inode,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned *nr_pages)
+{
+ int ret, npages = *nr_pages;
+
+ dfprintk(FSCACHE, "NFS: nfs_getpages_from_fscache (0x%p/%u/0x%p)\n",
+ NFS_I(inode)->fscache, npages, inode);
+
+ ret = fscache_read_or_alloc_pages(NFS_I(inode)->fscache,
+ mapping, pages, nr_pages,
+ nfs_readpage_from_fscache_complete,
+ ctx,
+ mapping_gfp_mask(mapping));
+ if (*nr_pages < npages)
+ nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_OK,
+ npages);
+ if (*nr_pages > 0)
+ nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_READ_FAIL,
+ *nr_pages);
+
+ switch (ret) {
+ case 0: /* read submitted to the cache for all pages */
+ BUG_ON(!list_empty(pages));
+ BUG_ON(*nr_pages != 0);
+ dfprintk(FSCACHE,
+ "NFS: nfs_getpages_from_fscache: submitted\n");
+
+ return ret;
+
+ case -ENOBUFS: /* some pages aren't cached and can't be */
+ case -ENODATA: /* some pages aren't cached */
+ dfprintk(FSCACHE,
+ "NFS: nfs_getpages_from_fscache: no page: %d\n", ret);
+ return 1;
+
+ default:
+ dfprintk(FSCACHE,
+ "NFS: nfs_getpages_from_fscache: ret %d\n", ret);
+ }
+
+ return ret;
+}
+
+/*
+ * Store a newly fetched page in fscache
+ * - PG_fscache must be set on the page
+ */
+void __nfs_readpage_to_fscache(struct inode *inode, struct page *page, int sync)
+{
+ int ret;
+
+ dfprintk(FSCACHE,
+ "NFS: readpage_to_fscache(fsc:%p/p:%p(i:%lx f:%lx)/%d)\n",
+ NFS_I(inode)->fscache, page, page->index, page->flags, sync);
+
+ ret = fscache_write_page(NFS_I(inode)->fscache, page, GFP_KERNEL);
+ dfprintk(FSCACHE,
+ "NFS: readpage_to_fscache: p:%p(i:%lu f:%lx) ret %d\n",
+ page, page->index, page->flags, ret);
+
+ if (ret != 0) {
+ fscache_uncache_page(NFS_I(inode)->fscache, page);
+ nfs_add_fscache_stats(inode,
+ NFSIOS_FSCACHE_PAGES_WRITTEN_FAIL, 1);
+ nfs_add_fscache_stats(inode, NFSIOS_FSCACHE_PAGES_UNCACHED, 1);
+ } else {
+ nfs_add_fscache_stats(inode,
+ NFSIOS_FSCACHE_PAGES_WRITTEN_OK, 1);
+ }
+}
diff --git a/fs/nfs/fscache.h b/fs/nfs/fscache.h
new file mode 100644
index 000000000000..2d43b67b6da3
--- /dev/null
+++ b/fs/nfs/fscache.h
@@ -0,0 +1,208 @@
+/* NFS filesystem cache interface definitions
+ *
+ * Copyright (C) 2008 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public Licence
+ * as published by the Free Software Foundation; either version
+ * 2 of the Licence, or (at your option) any later version.
+ */
+
+#ifndef _NFS_FSCACHE_H
+#define _NFS_FSCACHE_H
+
+#include <linux/nfs_fs.h>
+#include <linux/nfs_mount.h>
+#include <linux/nfs4_mount.h>
+#include <linux/fscache.h>
+
+#ifdef CONFIG_NFS_FSCACHE
+
+/*
+ * set of NFS FS-Cache objects that form a superblock key
+ */
+struct nfs_fscache_key {
+ struct rb_node node;
+ struct nfs_client *nfs_client; /* the server */
+
+ /* the elements of the unique key - as used by nfs_compare_super() and
+ * nfs_compare_mount_options() to distinguish superblocks */
+ struct {
+ struct {
+ unsigned long s_flags; /* various flags
+ * (& NFS_MS_MASK) */
+ } super;
+
+ struct {
+ struct nfs_fsid fsid;
+ int flags;
+ unsigned int rsize; /* read size */
+ unsigned int wsize; /* write size */
+ unsigned int acregmin; /* attr cache timeouts */
+ unsigned int acregmax;
+ unsigned int acdirmin;
+ unsigned int acdirmax;
+ } nfs_server;
+
+ struct {
+ rpc_authflavor_t au_flavor;
+ } rpc_auth;
+
+ /* uniquifier - can be used if nfs_server.flags includes
+ * NFS_MOUNT_UNSHARED */
+ u8 uniq_len;
+ char uniquifier[0];
+ } key;
+};
+
+/*
+ * fscache-index.c
+ */
+extern struct fscache_netfs nfs_fscache_netfs;
+extern const struct fscache_cookie_def nfs_fscache_server_index_def;
+extern const struct fscache_cookie_def nfs_fscache_super_index_def;
+extern const struct fscache_cookie_def nfs_fscache_inode_object_def;
+
+extern int nfs_fscache_register(void);
+extern void nfs_fscache_unregister(void);
+
+/*
+ * fscache.c
+ */
+extern void nfs_fscache_get_client_cookie(struct nfs_client *);
+extern void nfs_fscache_release_client_cookie(struct nfs_client *);
+
+extern void nfs_fscache_get_super_cookie(struct super_block *,
+ struct nfs_parsed_mount_data *);
+extern void nfs_fscache_release_super_cookie(struct super_block *);
+
+extern void nfs_fscache_init_inode_cookie(struct inode *);
+extern void nfs_fscache_release_inode_cookie(struct inode *);
+extern void nfs_fscache_zap_inode_cookie(struct inode *);
+extern void nfs_fscache_set_inode_cookie(struct inode *, struct file *);
+extern void nfs_fscache_reset_inode_cookie(struct inode *);
+
+extern void __nfs_fscache_invalidate_page(struct page *, struct inode *);
+extern int nfs_fscache_release_page(struct page *, gfp_t);
+
+extern int __nfs_readpage_from_fscache(struct nfs_open_context *,
+ struct inode *, struct page *);
+extern int __nfs_readpages_from_fscache(struct nfs_open_context *,
+ struct inode *, struct address_space *,
+ struct list_head *, unsigned *);
+extern void __nfs_readpage_to_fscache(struct inode *, struct page *, int);
+
+/*
+ * release the caching state associated with a page if undergoing complete page
+ * invalidation
+ */
+static inline void nfs_fscache_invalidate_page(struct page *page,
+ struct inode *inode)
+{
+ if (PageFsCache(page))
+ __nfs_fscache_invalidate_page(page, inode);
+}
+
+/*
+ * Retrieve a page from an inode data storage object.
+ */
+static inline int nfs_readpage_from_fscache(struct nfs_open_context *ctx,
+ struct inode *inode,
+ struct page *page)
+{
+ if (NFS_I(inode)->fscache)
+ return __nfs_readpage_from_fscache(ctx, inode, page);
+ return -ENOBUFS;
+}
+
+/*
+ * Retrieve a set of pages from an inode data storage object.
+ */
+static inline int nfs_readpages_from_fscache(struct nfs_open_context *ctx,
+ struct inode *inode,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned *nr_pages)
+{
+ if (NFS_I(inode)->fscache)
+ return __nfs_readpages_from_fscache(ctx, inode, mapping, pages,
+ nr_pages);
+ return -ENOBUFS;
+}
+
+/*
+ * Store a page newly fetched from the server in an inode data storage object
+ * in the cache.
+ */
+static inline void nfs_readpage_to_fscache(struct inode *inode,
+ struct page *page,
+ int sync)
+{
+ if (PageFsCache(page))
+ __nfs_readpage_to_fscache(inode, page, sync);
+}
+
+/*
+ * indicate the client caching state as readable text
+ */
+static inline const char *nfs_server_fscache_state(struct nfs_server *server)
+{
+ if (server->fscache && (server->options & NFS_OPTION_FSCACHE))
+ return "yes";
+ return "no ";
+}
+
+
+#else /* CONFIG_NFS_FSCACHE */
+static inline int nfs_fscache_register(void) { return 0; }
+static inline void nfs_fscache_unregister(void) {}
+
+static inline void nfs_fscache_get_client_cookie(struct nfs_client *clp) {}
+static inline void nfs_fscache_release_client_cookie(struct nfs_client *clp) {}
+
+static inline void nfs_fscache_get_super_cookie(
+ struct super_block *sb,
+ struct nfs_parsed_mount_data *data)
+{
+}
+static inline void nfs_fscache_release_super_cookie(struct super_block *sb) {}
+
+static inline void nfs_fscache_init_inode_cookie(struct inode *inode) {}
+static inline void nfs_fscache_release_inode_cookie(struct inode *inode) {}
+static inline void nfs_fscache_zap_inode_cookie(struct inode *inode) {}
+static inline void nfs_fscache_set_inode_cookie(struct inode *inode,
+ struct file *filp) {}
+static inline void nfs_fscache_reset_inode_cookie(struct inode *inode) {}
+
+static inline int nfs_fscache_release_page(struct page *page, gfp_t gfp)
+{
+ return 1; /* True: may release page */
+}
+static inline void nfs_fscache_invalidate_page(struct page *page,
+ struct inode *inode) {}
+
+static inline int nfs_readpage_from_fscache(struct nfs_open_context *ctx,
+ struct inode *inode,
+ struct page *page)
+{
+ return -ENOBUFS;
+}
+static inline int nfs_readpages_from_fscache(struct nfs_open_context *ctx,
+ struct inode *inode,
+ struct address_space *mapping,
+ struct list_head *pages,
+ unsigned *nr_pages)
+{
+ return -ENOBUFS;
+}
+static inline void nfs_readpage_to_fscache(struct inode *inode,
+ struct page *page, int sync) {}
+
+static inline const char *nfs_server_fscache_state(struct nfs_server *server)
+{
+ return "no ";
+}
+
+#endif /* CONFIG_NFS_FSCACHE */
+#endif /* _NFS_FSCACHE_H */
diff --git a/fs/nfs/inode.c b/fs/nfs/inode.c
index 0c381686171e..381031810235 100644
--- a/fs/nfs/inode.c
+++ b/fs/nfs/inode.c
@@ -46,6 +46,7 @@
#include "delegation.h"
#include "iostat.h"
#include "internal.h"
+#include "fscache.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -109,6 +110,7 @@ void nfs_clear_inode(struct inode *inode)
BUG_ON(!list_empty(&NFS_I(inode)->open_files));
nfs_zap_acl_cache(inode);
nfs_access_zap_cache(inode);
+ nfs_fscache_release_inode_cookie(inode);
}
/**
@@ -328,6 +330,8 @@ nfs_fhget(struct super_block *sb, struct nfs_fh *fh, struct nfs_fattr *fattr)
memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
nfsi->access_cache = RB_ROOT;
+ nfs_fscache_init_inode_cookie(inode);
+
unlock_new_inode(inode);
} else
nfs_refresh_inode(inode, fattr);
@@ -642,6 +646,7 @@ int nfs_open(struct inode *inode, struct file *filp)
ctx->mode = filp->f_mode;
nfs_file_set_open_context(filp, ctx);
put_nfs_open_context(ctx);
+ nfs_fscache_set_inode_cookie(inode, filp);
return 0;
}
@@ -745,6 +750,7 @@ static int nfs_invalidate_mapping_nolock(struct inode *inode, struct address_spa
memset(nfsi->cookieverf, 0, sizeof(nfsi->cookieverf));
spin_unlock(&inode->i_lock);
nfs_inc_stats(inode, NFSIOS_DATAINVALIDATE);
+ nfs_fscache_reset_inode_cookie(inode);
dfprintk(PAGECACHE, "NFS: (%s/%Ld) data cache invalidated\n",
inode->i_sb->s_id, (long long)NFS_FILEID(inode));
return 0;
@@ -975,6 +981,7 @@ int nfs_refresh_inode(struct inode *inode, struct nfs_fattr *fattr)
spin_lock(&inode->i_lock);
status = nfs_refresh_inode_locked(inode, fattr);
spin_unlock(&inode->i_lock);
+
return status;
}
@@ -1121,7 +1128,8 @@ static int nfs_update_inode(struct inode *inode, struct nfs_fattr *fattr)
} else if (nfsi->change_attr != fattr->change_attr) {
dprintk("NFS: change_attr change on server for file %s/%ld\n",
inode->i_sb->s_id, inode->i_ino);
- invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA|NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
+ invalid |= NFS_INO_INVALID_ATTR|NFS_INO_INVALID_DATA |
+ NFS_INO_INVALID_ACCESS|NFS_INO_INVALID_ACL;
if (S_ISDIR(inode->i_mode))
nfs_force_lookup_revalidate(inode);
}
@@ -1337,6 +1345,10 @@ static int __init init_nfs_fs(void)
{
int err;
+ err = nfs_fscache_register();
+ if (err < 0)
+ goto out7;
+
err = nfsiod_start();
if (err)
goto out6;
@@ -1389,6 +1401,8 @@ out4:
out5:
nfsiod_stop();
out6:
+ nfs_fscache_unregister();
+out7:
return err;
}
@@ -1399,6 +1413,7 @@ static void __exit exit_nfs_fs(void)
nfs_destroy_readpagecache();
nfs_destroy_inodecache();
nfs_destroy_nfspagecache();
+ nfs_fscache_unregister();
#ifdef CONFIG_PROC_FS
rpc_proc_unregister("nfs");
#endif
diff --git a/fs/nfs/internal.h b/fs/nfs/internal.h
index 340ede8f608f..004269776ca1 100644
--- a/fs/nfs/internal.h
+++ b/fs/nfs/internal.h
@@ -5,6 +5,8 @@
#include <linux/mount.h>
#include <linux/security.h>
+#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS)
+
struct nfs_string;
/* Maximum number of readahead requests
@@ -37,10 +39,12 @@ struct nfs_parsed_mount_data {
int acregmin, acregmax,
acdirmin, acdirmax;
int namlen;
+ unsigned int options;
unsigned int bsize;
unsigned int auth_flavor_len;
rpc_authflavor_t auth_flavors[1];
char *client_address;
+ char *fscache_uniq;
struct {
struct sockaddr_storage address;
diff --git a/fs/nfs/iostat.h b/fs/nfs/iostat.h
index a36952810032..a2ab2529b5ca 100644
--- a/fs/nfs/iostat.h
+++ b/fs/nfs/iostat.h
@@ -16,6 +16,9 @@
struct nfs_iostats {
unsigned long long bytes[__NFSIOS_BYTESMAX];
+#ifdef CONFIG_NFS_FSCACHE
+ unsigned long long fscache[__NFSIOS_FSCACHEMAX];
+#endif
unsigned long events[__NFSIOS_COUNTSMAX];
} ____cacheline_aligned;
@@ -57,6 +60,21 @@ static inline void nfs_add_stats(const struct inode *inode,
nfs_add_server_stats(NFS_SERVER(inode), stat, addend);
}
+#ifdef CONFIG_NFS_FSCACHE
+static inline void nfs_add_fscache_stats(struct inode *inode,
+ enum nfs_stat_fscachecounters stat,
+ unsigned long addend)
+{
+ struct nfs_iostats *iostats;
+ int cpu;
+
+ cpu = get_cpu();
+ iostats = per_cpu_ptr(NFS_SERVER(inode)->io_stats, cpu);
+ iostats->fscache[stat] += addend;
+ put_cpu_no_resched();
+}
+#endif
+
static inline struct nfs_iostats *nfs_alloc_iostats(void)
{
return alloc_percpu(struct nfs_iostats);
diff --git a/fs/nfs/read.c b/fs/nfs/read.c
index f856004bb7fa..4ace3c50a8eb 100644
--- a/fs/nfs/read.c
+++ b/fs/nfs/read.c
@@ -24,6 +24,7 @@
#include "internal.h"
#include "iostat.h"
+#include "fscache.h"
#define NFSDBG_FACILITY NFSDBG_PAGECACHE
@@ -111,8 +112,8 @@ static void nfs_readpage_truncate_uninitialised_page(struct nfs_read_data *data)
}
}
-static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
- struct page *page)
+int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
+ struct page *page)
{
LIST_HEAD(one_request);
struct nfs_page *new;
@@ -139,6 +140,11 @@ static int nfs_readpage_async(struct nfs_open_context *ctx, struct inode *inode,
static void nfs_readpage_release(struct nfs_page *req)
{
+ struct inode *d_inode = req->wb_context->path.dentry->d_inode;
+
+ if (PageUptodate(req->wb_page))
+ nfs_readpage_to_fscache(d_inode, req->wb_page, 0);
+
unlock_page(req->wb_page);
dprintk("NFS: read done (%s/%Ld %d@%Ld)\n",
@@ -510,8 +516,15 @@ int nfs_readpage(struct file *file, struct page *page)
} else
ctx = get_nfs_open_context(nfs_file_open_context(file));
+ if (!IS_SYNC(inode)) {
+ error = nfs_readpage_from_fscache(ctx, inode, page);
+ if (error == 0)
+ goto out;
+ }
+
error = nfs_readpage_async(ctx, inode, page);
+out:
put_nfs_open_context(ctx);
return error;
out_unlock:
@@ -584,6 +597,15 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
return -EBADF;
} else
desc.ctx = get_nfs_open_context(nfs_file_open_context(filp));
+
+ /* attempt to read as many of the pages as possible from the cache
+ * - this returns -ENOBUFS immediately if the cookie is negative
+ */
+ ret = nfs_readpages_from_fscache(desc.ctx, inode, mapping,
+ pages, &nr_pages);
+ if (ret == 0)
+ goto read_complete; /* all pages were read */
+
if (rsize < PAGE_CACHE_SIZE)
nfs_pageio_init(&pgio, inode, nfs_pagein_multi, rsize, 0);
else
@@ -594,6 +616,7 @@ int nfs_readpages(struct file *filp, struct address_space *mapping,
nfs_pageio_complete(&pgio);
npages = (pgio.pg_bytes_written + PAGE_CACHE_SIZE - 1) >> PAGE_CACHE_SHIFT;
nfs_add_stats(inode, NFSIOS_READPAGES, npages);
+read_complete:
put_nfs_open_context(desc.ctx);
out:
return ret;
diff --git a/fs/nfs/super.c b/fs/nfs/super.c
index d6686f4786dc..83cad89e0e09 100644
--- a/fs/nfs/super.c
+++ b/fs/nfs/super.c
@@ -60,6 +60,7 @@
#include "delegation.h"
#include "iostat.h"
#include "internal.h"
+#include "fscache.h"
#define NFSDBG_FACILITY NFSDBG_VFS
@@ -76,6 +77,7 @@ enum {
Opt_rdirplus, Opt_nordirplus,
Opt_sharecache, Opt_nosharecache,
Opt_resvport, Opt_noresvport,
+ Opt_fscache, Opt_nofscache,
/* Mount options that take integer arguments */
Opt_port,
@@ -93,6 +95,7 @@ enum {
Opt_sec, Opt_proto, Opt_mountproto, Opt_mounthost,
Opt_addr, Opt_mountaddr, Opt_clientaddr,
Opt_lookupcache,
+ Opt_fscache_uniq,
/* Special mount options */
Opt_userspace, Opt_deprecated, Opt_sloppy,
@@ -132,6 +135,9 @@ static const match_table_t nfs_mount_option_tokens = {
{ Opt_nosharecache, "nosharecache" },
{ Opt_resvport, "resvport" },
{ Opt_noresvport, "noresvport" },
+ { Opt_fscache, "fsc" },
+ { Opt_fscache_uniq, "fsc=%s" },
+ { Opt_nofscache, "nofsc" },
{ Opt_port, "port=%u" },
{ Opt_rsize, "rsize=%u" },
@@ -563,6 +569,8 @@ static void nfs_show_mount_options(struct seq_file *m, struct nfs_server *nfss,
if (clp->rpc_ops->version == 4)
seq_printf(m, ",clientaddr=%s", clp->cl_ipaddr);
#endif
+ if (nfss->options & NFS_OPTION_FSCACHE)
+ seq_printf(m, ",fsc");
}
/*
@@ -641,6 +649,10 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
totals.events[i] += stats->events[i];
for (i = 0; i < __NFSIOS_BYTESMAX; i++)
totals.bytes[i] += stats->bytes[i];
+#ifdef CONFIG_NFS_FSCACHE
+ for (i = 0; i < __NFSIOS_FSCACHEMAX; i++)
+ totals.fscache[i] += stats->fscache[i];
+#endif
preempt_enable();
}
@@ -651,6 +663,13 @@ static int nfs_show_stats(struct seq_file *m, struct vfsmount *mnt)
seq_printf(m, "\n\tbytes:\t");
for (i = 0; i < __NFSIOS_BYTESMAX; i++)
seq_printf(m, "%Lu ", totals.bytes[i]);
+#ifdef CONFIG_NFS_FSCACHE
+ if (nfss->options & NFS_OPTION_FSCACHE) {
+ seq_printf(m, "\n\tfsc:\t");
+ for (i = 0; i < __NFSIOS_FSCACHEMAX; i++)
+ seq_printf(m, "%Lu ", totals.bytes[i]);
+ }
+#endif
seq_printf(m, "\n");
rpc_print_iostats(m, nfss->client);
@@ -1043,6 +1062,24 @@ static int nfs_parse_mount_options(char *raw,
case Opt_noresvport:
mnt->flags |= NFS_MOUNT_NORESVPORT;
break;
+ case Opt_fscache:
+ mnt->options |= NFS_OPTION_FSCACHE;
+ kfree(mnt->fscache_uniq);
+ mnt->fscache_uniq = NULL;
+ break;
+ case Opt_nofscache:
+ mnt->options &= ~NFS_OPTION_FSCACHE;
+ kfree(mnt->fscache_uniq);
+ mnt->fscache_uniq = NULL;
+ break;
+ case Opt_fscache_uniq:
+ string = match_strdup(args);
+ if (!string)
+ goto out_nomem;
+ kfree(mnt->fscache_uniq);
+ mnt->fscache_uniq = string;
+ mnt->options |= NFS_OPTION_FSCACHE;
+ break;
/*
* options that take numeric values
@@ -1868,8 +1905,6 @@ static void nfs_clone_super(struct super_block *sb,
nfs_initialise_sb(sb);
}
-#define NFS_MS_MASK (MS_RDONLY|MS_NOSUID|MS_NODEV|MS_NOEXEC|MS_SYNCHRONOUS)
-
static int nfs_compare_mount_options(const struct super_block *s, const struct nfs_server *b, int flags)
{
const struct nfs_server *a = s->s_fs_info;
@@ -2034,6 +2069,7 @@ static int nfs_get_sb(struct file_system_type *fs_type,
if (!s->s_root) {
/* initial superblock/root creation */
nfs_fill_super(s, data);
+ nfs_fscache_get_super_cookie(s, data);
}
mntroot = nfs_get_root(s, mntfh);
@@ -2054,6 +2090,7 @@ static int nfs_get_sb(struct file_system_type *fs_type,
out:
kfree(data->nfs_server.hostname);
kfree(data->mount_server.hostname);
+ kfree(data->fscache_uniq);
security_free_mnt_opts(&data->lsm_opts);
out_free_fh:
kfree(mntfh);
@@ -2081,6 +2118,7 @@ static void nfs_kill_super(struct super_block *s)
bdi_unregister(&server->backing_dev_info);
kill_anon_super(s);
+ nfs_fscache_release_super_cookie(s);
nfs_free_server(server);
}
@@ -2388,6 +2426,7 @@ static int nfs4_get_sb(struct file_system_type *fs_type,
if (!s->s_root) {
/* initial superblock/root creation */
nfs4_fill_super(s);
+ nfs_fscache_get_super_cookie(s, data);
}
mntroot = nfs4_get_root(s, mntfh);
@@ -2409,6 +2448,7 @@ out:
kfree(data->client_address);
kfree(data->nfs_server.export_path);
kfree(data->nfs_server.hostname);
+ kfree(data->fscache_uniq);
security_free_mnt_opts(&data->lsm_opts);
out_free_fh:
kfree(mntfh);
@@ -2435,6 +2475,7 @@ static void nfs4_kill_super(struct super_block *sb)
kill_anon_super(sb);
nfs4_renewd_prepare_shutdown(server);
+ nfs_fscache_release_super_cookie(sb);
nfs_free_server(server);
}
diff --git a/fs/nfsd/Kconfig b/fs/nfsd/Kconfig
new file mode 100644
index 000000000000..503b9da159a3
--- /dev/null
+++ b/fs/nfsd/Kconfig
@@ -0,0 +1,81 @@
+config NFSD
+ tristate "NFS server support"
+ depends on INET
+ depends on FILE_LOCKING
+ select LOCKD
+ select SUNRPC
+ select EXPORTFS
+ select NFS_ACL_SUPPORT if NFSD_V2_ACL
+ help
+ Choose Y here if you want to allow other computers to access
+ files residing on this system using Sun's Network File System
+ protocol. To compile the NFS server support as a module,
+ choose M here: the module will be called nfsd.
+
+ You may choose to use a user-space NFS server instead, in which
+ case you can choose N here.
+
+ To export local file systems using NFS, you also need to install
+ user space programs which can be found in the Linux nfs-utils
+ package, available from http://linux-nfs.org/. More detail about
+ the Linux NFS server implementation is available via the
+ exports(5) man page.
+
+ Below you can choose which versions of the NFS protocol are
+ available to clients mounting the NFS server on this system.
+ Support for NFS version 2 (RFC 1094) is always available when
+ CONFIG_NFSD is selected.
+
+ If unsure, say N.
+
+config NFSD_V2_ACL
+ bool
+ depends on NFSD
+
+config NFSD_V3
+ bool "NFS server support for NFS version 3"
+ depends on NFSD
+ help
+ This option enables support in your system's NFS server for
+ version 3 of the NFS protocol (RFC 1813).
+
+ If unsure, say Y.
+
+config NFSD_V3_ACL
+ bool "NFS server support for the NFSv3 ACL protocol extension"
+ depends on NFSD_V3
+ select NFSD_V2_ACL
+ help
+ Solaris NFS servers support an auxiliary NFSv3 ACL protocol that
+ never became an official part of the NFS version 3 protocol.
+ This protocol extension allows applications on NFS clients to
+ manipulate POSIX Access Control Lists on files residing on NFS
+ servers. NFS servers enforce POSIX ACLs on local files whether
+ this protocol is available or not.
+
+ This option enables support in your system's NFS server for the
+ NFSv3 ACL protocol extension allowing NFS clients to manipulate
+ POSIX ACLs on files exported by your system's NFS server. NFS
+ clients which support the Solaris NFSv3 ACL protocol can then
+ access and modify ACLs on your NFS server.
+
+ To store ACLs on your NFS server, you also need to enable ACL-
+ related CONFIG options for your local file systems of choice.
+
+ If unsure, say N.
+
+config NFSD_V4
+ bool "NFS server support for NFS version 4 (EXPERIMENTAL)"
+ depends on NFSD && PROC_FS && EXPERIMENTAL
+ select NFSD_V3
+ select FS_POSIX_ACL
+ select RPCSEC_GSS_KRB5
+ help
+ This option enables support in your system's NFS server for
+ version 4 of the NFS protocol (RFC 3530).
+
+ To export files using NFSv4, you need to install additional user
+ space programs which can be found in the Linux nfs-utils package,
+ available from http://linux-nfs.org/.
+
+ If unsure, say N.
diff --git a/fs/nfsd/auth.c b/fs/nfsd/auth.c
index c903e04aa217..5573508f707f 100644
--- a/fs/nfsd/auth.c
+++ b/fs/nfsd/auth.c
@@ -49,6 +49,8 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
new->fsuid = exp->ex_anon_uid;
new->fsgid = exp->ex_anon_gid;
gi = groups_alloc(0);
+ if (!gi)
+ goto oom;
} else if (flags & NFSEXP_ROOTSQUASH) {
if (!new->fsuid)
new->fsuid = exp->ex_anon_uid;
@@ -85,6 +87,7 @@ int nfsd_setuser(struct svc_rqst *rqstp, struct svc_export *exp)
new->cap_effective = cap_raise_nfsd_set(new->cap_effective,
new->cap_permitted);
put_cred(override_creds(new));
+ put_cred(new);
return 0;
oom:
diff --git a/fs/nfsd/nfs3proc.c b/fs/nfsd/nfs3proc.c
index 9dbd2eb91281..579ce8c69daa 100644
--- a/fs/nfsd/nfs3proc.c
+++ b/fs/nfsd/nfs3proc.c
@@ -18,6 +18,7 @@
#include <linux/unistd.h>
#include <linux/slab.h>
#include <linux/major.h>
+#include <linux/magic.h>
#include <linux/sunrpc/svc.h>
#include <linux/nfsd/nfsd.h>
@@ -569,7 +570,7 @@ nfsd3_proc_fsinfo(struct svc_rqst * rqstp, struct nfsd_fhandle *argp,
struct super_block *sb = argp->fh.fh_dentry->d_inode->i_sb;
/* Note that we don't care for remote fs's here */
- if (sb->s_magic == 0x4d44 /* MSDOS_SUPER_MAGIC */) {
+ if (sb->s_magic == MSDOS_SUPER_MAGIC) {
resp->f_properties = NFS3_FSF_BILLYBOY;
}
resp->f_maxfilesize = sb->s_maxbytes;
@@ -610,7 +611,7 @@ nfsd3_proc_pathconf(struct svc_rqst * rqstp, struct nfsd_fhandle *argp,
resp->p_link_max = EXT2_LINK_MAX;
resp->p_name_max = EXT2_NAME_LEN;
break;
- case 0x4d44: /* MSDOS_SUPER_MAGIC */
+ case MSDOS_SUPER_MAGIC:
resp->p_case_insensitive = 1;
resp->p_case_preserving = 0;
break;
diff --git a/fs/nfsd/nfs4callback.c b/fs/nfsd/nfs4callback.c
index c464181b5994..3ddc9fb2e358 100644
--- a/fs/nfsd/nfs4callback.c
+++ b/fs/nfsd/nfs4callback.c
@@ -218,7 +218,7 @@ static int
encode_cb_recall(struct xdr_stream *xdr, struct nfs4_cb_recall *cb_rec)
{
__be32 *p;
- int len = cb_rec->cbr_fhlen;
+ int len = cb_rec->cbr_fh.fh_size;
RESERVE_SPACE(12+sizeof(cb_rec->cbr_stateid) + len);
WRITE32(OP_CB_RECALL);
@@ -226,7 +226,7 @@ encode_cb_recall(struct xdr_stream *xdr, struct nfs4_cb_recall *cb_rec)
WRITEMEM(&cb_rec->cbr_stateid.si_opaque, sizeof(stateid_opaque_t));
WRITE32(cb_rec->cbr_trunc);
WRITE32(len);
- WRITEMEM(cb_rec->cbr_fhval, len);
+ WRITEMEM(&cb_rec->cbr_fh.fh_base, len);
return 0;
}
@@ -451,7 +451,6 @@ nfsd4_probe_callback(struct nfs4_client *clp)
/*
* called with dp->dl_count inc'ed.
- * nfs4_lock_state() may or may not have been called.
*/
void
nfsd4_cb_recall(struct nfs4_delegation *dp)
@@ -491,7 +490,9 @@ out_put_cred:
* Success or failure, now we're either waiting for lease expiration
* or deleg_return.
*/
+ nfs4_lock_state();
put_nfs4_client(clp);
nfs4_put_delegation(dp);
+ nfs4_unlock_state();
return;
}
diff --git a/fs/nfsd/nfs4proc.c b/fs/nfsd/nfs4proc.c
index 9fa60a3ad48c..af66073ed423 100644
--- a/fs/nfsd/nfs4proc.c
+++ b/fs/nfsd/nfs4proc.c
@@ -103,11 +103,13 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
(u32 *)open->op_verf.data,
&open->op_truncate, &created);
- /* If we ever decide to use different attrs to store the
- * verifier in nfsd_create_v3, then we'll need to change this
+ /*
+ * Following rfc 3530 14.2.16, use the returned bitmask
+ * to indicate which attributes we used to store the
+ * verifier:
*/
if (open->op_createmode == NFS4_CREATE_EXCLUSIVE && status == 0)
- open->op_bmval[1] |= (FATTR4_WORD1_TIME_ACCESS |
+ open->op_bmval[1] = (FATTR4_WORD1_TIME_ACCESS |
FATTR4_WORD1_TIME_MODIFY);
} else {
status = nfsd_lookup(rqstp, current_fh,
@@ -118,13 +120,11 @@ do_open_lookup(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_o
goto out;
set_change_info(&open->op_cinfo, current_fh);
-
- /* set reply cache */
fh_dup2(current_fh, &resfh);
- open->op_stateowner->so_replay.rp_openfh_len = resfh.fh_handle.fh_size;
- memcpy(open->op_stateowner->so_replay.rp_openfh,
- &resfh.fh_handle.fh_base, resfh.fh_handle.fh_size);
+ /* set reply cache */
+ fh_copy_shallow(&open->op_stateowner->so_replay.rp_openfh,
+ &resfh.fh_handle);
if (!created)
status = do_open_permission(rqstp, current_fh, open,
NFSD_MAY_NOP);
@@ -150,10 +150,8 @@ do_open_fhandle(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nfsd4_
memset(&open->op_cinfo, 0, sizeof(struct nfsd4_change_info));
/* set replay cache */
- open->op_stateowner->so_replay.rp_openfh_len = current_fh->fh_handle.fh_size;
- memcpy(open->op_stateowner->so_replay.rp_openfh,
- &current_fh->fh_handle.fh_base,
- current_fh->fh_handle.fh_size);
+ fh_copy_shallow(&open->op_stateowner->so_replay.rp_openfh,
+ &current_fh->fh_handle);
open->op_truncate = (open->op_iattr.ia_valid & ATTR_SIZE) &&
(open->op_iattr.ia_size == 0);
@@ -185,9 +183,8 @@ nfsd4_open(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
if (status == nfserr_replay_me) {
struct nfs4_replay *rp = &open->op_stateowner->so_replay;
fh_put(&cstate->current_fh);
- cstate->current_fh.fh_handle.fh_size = rp->rp_openfh_len;
- memcpy(&cstate->current_fh.fh_handle.fh_base, rp->rp_openfh,
- rp->rp_openfh_len);
+ fh_copy_shallow(&cstate->current_fh.fh_handle,
+ &rp->rp_openfh);
status = fh_verify(rqstp, &cstate->current_fh, 0, NFSD_MAY_NOP);
if (status)
dprintk("nfsd4_open: replay failed"
diff --git a/fs/nfsd/nfs4state.c b/fs/nfsd/nfs4state.c
index 88db7d3ec120..7f616e928a57 100644
--- a/fs/nfsd/nfs4state.c
+++ b/fs/nfsd/nfs4state.c
@@ -75,7 +75,6 @@ static stateid_t onestateid; /* bits all 1 */
/* forward declarations */
static struct nfs4_stateid * find_stateid(stateid_t *stid, int flags);
static struct nfs4_delegation * find_delegation_stateid(struct inode *ino, stateid_t *stid);
-static void release_stateid_lockowners(struct nfs4_stateid *open_stp);
static char user_recovery_dirname[PATH_MAX] = "/var/lib/nfs/v4recovery";
static void nfs4_set_recdir(char *recdir);
@@ -117,10 +116,6 @@ opaque_hashval(const void *ptr, int nbytes)
return x;
}
-/* forward declarations */
-static void release_stateowner(struct nfs4_stateowner *sop);
-static void release_stateid(struct nfs4_stateid *stp, int flags);
-
/*
* Delegation state
*/
@@ -220,9 +215,7 @@ alloc_init_deleg(struct nfs4_client *clp, struct nfs4_stateid *stp, struct svc_f
dp->dl_stateid.si_stateownerid = current_delegid++;
dp->dl_stateid.si_fileid = 0;
dp->dl_stateid.si_generation = 0;
- dp->dl_fhlen = current_fh->fh_handle.fh_size;
- memcpy(dp->dl_fhval, &current_fh->fh_handle.fh_base,
- current_fh->fh_handle.fh_size);
+ fh_copy_shallow(&dp->dl_fh, &current_fh->fh_handle);
dp->dl_time = 0;
atomic_set(&dp->dl_count, 1);
list_add(&dp->dl_perfile, &fp->fi_delegations);
@@ -311,6 +304,90 @@ static struct list_head unconf_id_hashtbl[CLIENT_HASH_SIZE];
static struct list_head client_lru;
static struct list_head close_lru;
+static void unhash_generic_stateid(struct nfs4_stateid *stp)
+{
+ list_del(&stp->st_hash);
+ list_del(&stp->st_perfile);
+ list_del(&stp->st_perstateowner);
+}
+
+static void free_generic_stateid(struct nfs4_stateid *stp)
+{
+ put_nfs4_file(stp->st_file);
+ kmem_cache_free(stateid_slab, stp);
+}
+
+static void release_lock_stateid(struct nfs4_stateid *stp)
+{
+ unhash_generic_stateid(stp);
+ locks_remove_posix(stp->st_vfs_file, (fl_owner_t)stp->st_stateowner);
+ free_generic_stateid(stp);
+}
+
+static void unhash_lockowner(struct nfs4_stateowner *sop)
+{
+ struct nfs4_stateid *stp;
+
+ list_del(&sop->so_idhash);
+ list_del(&sop->so_strhash);
+ list_del(&sop->so_perstateid);
+ while (!list_empty(&sop->so_stateids)) {
+ stp = list_first_entry(&sop->so_stateids,
+ struct nfs4_stateid, st_perstateowner);
+ release_lock_stateid(stp);
+ }
+}
+
+static void release_lockowner(struct nfs4_stateowner *sop)
+{
+ unhash_lockowner(sop);
+ nfs4_put_stateowner(sop);
+}
+
+static void
+release_stateid_lockowners(struct nfs4_stateid *open_stp)
+{
+ struct nfs4_stateowner *lock_sop;
+
+ while (!list_empty(&open_stp->st_lockowners)) {
+ lock_sop = list_entry(open_stp->st_lockowners.next,
+ struct nfs4_stateowner, so_perstateid);
+ /* list_del(&open_stp->st_lockowners); */
+ BUG_ON(lock_sop->so_is_open_owner);
+ release_lockowner(lock_sop);
+ }
+}
+
+static void release_open_stateid(struct nfs4_stateid *stp)
+{
+ unhash_generic_stateid(stp);
+ release_stateid_lockowners(stp);
+ nfsd_close(stp->st_vfs_file);
+ free_generic_stateid(stp);
+}
+
+static void unhash_openowner(struct nfs4_stateowner *sop)
+{
+ struct nfs4_stateid *stp;
+
+ list_del(&sop->so_idhash);
+ list_del(&sop->so_strhash);
+ list_del(&sop->so_perclient);
+ list_del(&sop->so_perstateid); /* XXX: necessary? */
+ while (!list_empty(&sop->so_stateids)) {
+ stp = list_first_entry(&sop->so_stateids,
+ struct nfs4_stateid, st_perstateowner);
+ release_open_stateid(stp);
+ }
+}
+
+static void release_openowner(struct nfs4_stateowner *sop)
+{
+ unhash_openowner(sop);
+ list_del(&sop->so_close_lru);
+ nfs4_put_stateowner(sop);
+}
+
static inline void
renew_client(struct nfs4_client *clp)
{
@@ -420,7 +497,7 @@ expire_client(struct nfs4_client *clp)
list_del(&clp->cl_lru);
while (!list_empty(&clp->cl_openowners)) {
sop = list_entry(clp->cl_openowners.next, struct nfs4_stateowner, so_perclient);
- release_stateowner(sop);
+ release_openowner(sop);
}
put_nfs4_client(clp);
}
@@ -1037,48 +1114,6 @@ alloc_init_open_stateowner(unsigned int strhashval, struct nfs4_client *clp, str
return sop;
}
-static void
-release_stateid_lockowners(struct nfs4_stateid *open_stp)
-{
- struct nfs4_stateowner *lock_sop;
-
- while (!list_empty(&open_stp->st_lockowners)) {
- lock_sop = list_entry(open_stp->st_lockowners.next,
- struct nfs4_stateowner, so_perstateid);
- /* list_del(&open_stp->st_lockowners); */
- BUG_ON(lock_sop->so_is_open_owner);
- release_stateowner(lock_sop);
- }
-}
-
-static void
-unhash_stateowner(struct nfs4_stateowner *sop)
-{
- struct nfs4_stateid *stp;
-
- list_del(&sop->so_idhash);
- list_del(&sop->so_strhash);
- if (sop->so_is_open_owner)
- list_del(&sop->so_perclient);
- list_del(&sop->so_perstateid);
- while (!list_empty(&sop->so_stateids)) {
- stp = list_entry(sop->so_stateids.next,
- struct nfs4_stateid, st_perstateowner);
- if (sop->so_is_open_owner)
- release_stateid(stp, OPEN_STATE);
- else
- release_stateid(stp, LOCK_STATE);
- }
-}
-
-static void
-release_stateowner(struct nfs4_stateowner *sop)
-{
- unhash_stateowner(sop);
- list_del(&sop->so_close_lru);
- nfs4_put_stateowner(sop);
-}
-
static inline void
init_stateid(struct nfs4_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *open) {
struct nfs4_stateowner *sop = open->op_stateowner;
@@ -1106,24 +1141,6 @@ init_stateid(struct nfs4_stateid *stp, struct nfs4_file *fp, struct nfsd4_open *
}
static void
-release_stateid(struct nfs4_stateid *stp, int flags)
-{
- struct file *filp = stp->st_vfs_file;
-
- list_del(&stp->st_hash);
- list_del(&stp->st_perfile);
- list_del(&stp->st_perstateowner);
- if (flags & OPEN_STATE) {
- release_stateid_lockowners(stp);
- stp->st_vfs_file = NULL;
- nfsd_close(filp);
- } else if (flags & LOCK_STATE)
- locks_remove_posix(filp, (fl_owner_t) stp->st_stateowner);
- put_nfs4_file(stp->st_file);
- kmem_cache_free(stateid_slab, stp);
-}
-
-static void
move_to_close_lru(struct nfs4_stateowner *sop)
{
dprintk("NFSD: move_to_close_lru nfs4_stateowner %p\n", sop);
@@ -1435,7 +1452,7 @@ nfsd4_process_open1(struct nfsd4_open *open)
if (!sop->so_confirmed) {
/* Replace unconfirmed owners without checking for replay. */
clp = sop->so_client;
- release_stateowner(sop);
+ release_openowner(sop);
open->op_stateowner = NULL;
goto renew;
}
@@ -1764,7 +1781,7 @@ nfsd4_process_open2(struct svc_rqst *rqstp, struct svc_fh *current_fh, struct nf
init_stateid(stp, fp, open);
status = nfsd4_truncate(rqstp, current_fh, open);
if (status) {
- release_stateid(stp, OPEN_STATE);
+ release_open_stateid(stp);
goto out;
}
}
@@ -1898,7 +1915,7 @@ nfs4_laundromat(void)
}
dprintk("NFSD: purging unused open stateowner (so_id %d)\n",
sop->so_id);
- release_stateowner(sop);
+ release_openowner(sop);
}
if (clientid_val < NFSD_LAUNDROMAT_MINTIMEOUT)
clientid_val = NFSD_LAUNDROMAT_MINTIMEOUT;
@@ -2373,7 +2390,7 @@ nfsd4_close(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
memcpy(&close->cl_stateid, &stp->st_stateid, sizeof(stateid_t));
/* release_stateid() calls nfsd_close() if needed */
- release_stateid(stp, OPEN_STATE);
+ release_open_stateid(stp);
/* place unused nfs4_stateowners on so_close_lru list to be
* released by the laundromat service after the lease period
@@ -2788,7 +2805,7 @@ nfsd4_lock(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
}
out:
if (status && lock->lk_is_new && lock_sop)
- release_stateowner(lock_sop);
+ release_lockowner(lock_sop);
if (lock->lk_replay_owner) {
nfs4_get_stateowner(lock->lk_replay_owner);
cstate->replay_owner = lock->lk_replay_owner;
@@ -2871,7 +2888,6 @@ nfsd4_lockt(struct svc_rqst *rqstp, struct nfsd4_compound_state *cstate,
file_lock.fl_owner = (fl_owner_t)lockt->lt_stateowner;
file_lock.fl_pid = current->tgid;
file_lock.fl_flags = FL_POSIX;
- file_lock.fl_lmops = &nfsd_posix_mng_ops;
file_lock.fl_start = lockt->lt_offset;
file_lock.fl_end = last_byte_offset(lockt->lt_offset, lockt->lt_length);
@@ -3038,7 +3054,7 @@ nfsd4_release_lockowner(struct svc_rqst *rqstp,
/* unhash_stateowner deletes so_perclient only
* for openowners. */
list_del(&sop->so_perclient);
- release_stateowner(sop);
+ release_lockowner(sop);
}
out:
nfs4_unlock_state();
diff --git a/fs/nfsd/vfs.c b/fs/nfsd/vfs.c
index 6e50aaa56ca2..4965a5aba572 100644
--- a/fs/nfsd/vfs.c
+++ b/fs/nfsd/vfs.c
@@ -998,8 +998,11 @@ nfsd_vfs_write(struct svc_rqst *rqstp, struct svc_fh *fhp, struct file *file,
if (!EX_ISSYNC(exp))
stable = 0;
- if (stable && !EX_WGATHER(exp))
+ if (stable && !EX_WGATHER(exp)) {
+ lock_file_flags();
file->f_flags |= O_SYNC;
+ unlock_file_flags();
+ }
/* Write the data. */
oldfs = get_fs(); set_fs(KERNEL_DS);
diff --git a/fs/notify/inotify/inotify_user.c b/fs/notify/inotify/inotify_user.c
index d53a1838d6e8..bed766e435b5 100644
--- a/fs/notify/inotify/inotify_user.c
+++ b/fs/notify/inotify/inotify_user.c
@@ -427,10 +427,61 @@ static unsigned int inotify_poll(struct file *file, poll_table *wait)
return ret;
}
+/*
+ * Get an inotify_kernel_event if one exists and is small
+ * enough to fit in "count". Return an error pointer if
+ * not large enough.
+ *
+ * Called with the device ev_mutex held.
+ */
+static struct inotify_kernel_event *get_one_event(struct inotify_device *dev,
+ size_t count)
+{
+ size_t event_size = sizeof(struct inotify_event);
+ struct inotify_kernel_event *kevent;
+
+ if (list_empty(&dev->events))
+ return NULL;
+
+ kevent = inotify_dev_get_event(dev);
+ if (kevent->name)
+ event_size += kevent->event.len;
+
+ if (event_size > count)
+ return ERR_PTR(-EINVAL);
+
+ remove_kevent(dev, kevent);
+ return kevent;
+}
+
+/*
+ * Copy an event to user space, returning how much we copied.
+ *
+ * We already checked that the event size is smaller than the
+ * buffer we had in "get_one_event()" above.
+ */
+static ssize_t copy_event_to_user(struct inotify_kernel_event *kevent,
+ char __user *buf)
+{
+ size_t event_size = sizeof(struct inotify_event);
+
+ if (copy_to_user(buf, &kevent->event, event_size))
+ return -EFAULT;
+
+ if (kevent->name) {
+ buf += event_size;
+
+ if (copy_to_user(buf, kevent->name, kevent->event.len))
+ return -EFAULT;
+
+ event_size += kevent->event.len;
+ }
+ return event_size;
+}
+
static ssize_t inotify_read(struct file *file, char __user *buf,
size_t count, loff_t *pos)
{
- size_t event_size = sizeof (struct inotify_event);
struct inotify_device *dev;
char __user *start;
int ret;
@@ -440,81 +491,43 @@ static ssize_t inotify_read(struct file *file, char __user *buf,
dev = file->private_data;
while (1) {
+ struct inotify_kernel_event *kevent;
prepare_to_wait(&dev->wq, &wait, TASK_INTERRUPTIBLE);
mutex_lock(&dev->ev_mutex);
- if (!list_empty(&dev->events)) {
- ret = 0;
- break;
- }
+ kevent = get_one_event(dev, count);
mutex_unlock(&dev->ev_mutex);
- if (file->f_flags & O_NONBLOCK) {
- ret = -EAGAIN;
- break;
- }
-
- if (signal_pending(current)) {
- ret = -EINTR;
- break;
+ if (kevent) {
+ ret = PTR_ERR(kevent);
+ if (IS_ERR(kevent))
+ break;
+ ret = copy_event_to_user(kevent, buf);
+ free_kevent(kevent);
+ if (ret < 0)
+ break;
+ buf += ret;
+ count -= ret;
+ continue;
}
- schedule();
- }
-
- finish_wait(&dev->wq, &wait);
- if (ret)
- return ret;
-
- while (1) {
- struct inotify_kernel_event *kevent;
-
- ret = buf - start;
- if (list_empty(&dev->events))
+ ret = -EAGAIN;
+ if (file->f_flags & O_NONBLOCK)
break;
-
- kevent = inotify_dev_get_event(dev);
- if (event_size + kevent->event.len > count) {
- if (ret == 0 && count > 0) {
- /*
- * could not get a single event because we
- * didn't have enough buffer space.
- */
- ret = -EINVAL;
- }
+ ret = -EINTR;
+ if (signal_pending(current))
break;
- }
- remove_kevent(dev, kevent);
- /*
- * Must perform the copy_to_user outside the mutex in order
- * to avoid a lock order reversal with mmap_sem.
- */
- mutex_unlock(&dev->ev_mutex);
-
- if (copy_to_user(buf, &kevent->event, event_size)) {
- ret = -EFAULT;
+ if (start != buf)
break;
- }
- buf += event_size;
- count -= event_size;
-
- if (kevent->name) {
- if (copy_to_user(buf, kevent->name, kevent->event.len)){
- ret = -EFAULT;
- break;
- }
- buf += kevent->event.len;
- count -= kevent->event.len;
- }
-
- free_kevent(kevent);
- mutex_lock(&dev->ev_mutex);
+ schedule();
}
- mutex_unlock(&dev->ev_mutex);
+ finish_wait(&dev->wq, &wait);
+ if (start != buf && ret != -EFAULT)
+ ret = buf - start;
return ret;
}
diff --git a/fs/ntfs/Kconfig b/fs/ntfs/Kconfig
new file mode 100644
index 000000000000..f5a868cc9152
--- /dev/null
+++ b/fs/ntfs/Kconfig
@@ -0,0 +1,78 @@
+config NTFS_FS
+ tristate "NTFS file system support"
+ select NLS
+ help
+ NTFS is the file system of Microsoft Windows NT, 2000, XP and 2003.
+
+ Saying Y or M here enables read support. There is partial, but
+ safe, write support available. For write support you must also
+ say Y to "NTFS write support" below.
+
+ There are also a number of user-space tools available, called
+ ntfsprogs. These include ntfsundelete and ntfsresize, that work
+ without NTFS support enabled in the kernel.
+
+ This is a rewrite from scratch of Linux NTFS support and replaced
+ the old NTFS code starting with Linux 2.5.11. A backport to
+ the Linux 2.4 kernel series is separately available as a patch
+ from the project web site.
+
+ For more information see <file:Documentation/filesystems/ntfs.txt>
+ and <http://www.linux-ntfs.org/>.
+
+ To compile this file system support as a module, choose M here: the
+ module will be called ntfs.
+
+ If you are not using Windows NT, 2000, XP or 2003 in addition to
+ Linux on your computer it is safe to say N.
+
+config NTFS_DEBUG
+ bool "NTFS debugging support"
+ depends on NTFS_FS
+ help
+ If you are experiencing any problems with the NTFS file system, say
+ Y here. This will result in additional consistency checks to be
+ performed by the driver as well as additional debugging messages to
+ be written to the system log. Note that debugging messages are
+ disabled by default. To enable them, supply the option debug_msgs=1
+ at the kernel command line when booting the kernel or as an option
+ to insmod when loading the ntfs module. Once the driver is active,
+ you can enable debugging messages by doing (as root):
+ echo 1 > /proc/sys/fs/ntfs-debug
+ Replacing the "1" with "0" would disable debug messages.
+
+ If you leave debugging messages disabled, this results in little
+ overhead, but enabling debug messages results in very significant
+ slowdown of the system.
+
+ When reporting bugs, please try to have available a full dump of
+ debugging messages while the misbehaviour was occurring.
+
+config NTFS_RW
+ bool "NTFS write support"
+ depends on NTFS_FS
+ help
+ This enables the partial, but safe, write support in the NTFS driver.
+
+ The only supported operation is overwriting existing files, without
+ changing the file length. No file or directory creation, deletion or
+ renaming is possible. Note only non-resident files can be written to
+ so you may find that some very small files (<500 bytes or so) cannot
+ be written to.
+
+ While we cannot guarantee that it will not damage any data, we have
+ so far not received a single report where the driver would have
+ damaged someones data so we assume it is perfectly safe to use.
+
+ Note: While write support is safe in this version (a rewrite from
+ scratch of the NTFS support), it should be noted that the old NTFS
+ write support, included in Linux 2.5.10 and before (since 1997),
+ is not safe.
+
+ This is currently useful with TopologiLinux. TopologiLinux is run
+ on top of any DOS/Microsoft Windows system without partitioning your
+ hard disk. Unlike other Linux distributions TopologiLinux does not
+ need its own partition. For more information see
+ <http://topologi-linux.sourceforge.net/>
+
+ It is perfectly safe to say N here.
diff --git a/fs/ocfs2/Kconfig b/fs/ocfs2/Kconfig
new file mode 100644
index 000000000000..701b7a3a872e
--- /dev/null
+++ b/fs/ocfs2/Kconfig
@@ -0,0 +1,85 @@
+config OCFS2_FS
+ tristate "OCFS2 file system support"
+ depends on NET && SYSFS
+ select CONFIGFS_FS
+ select JBD2
+ select CRC32
+ select QUOTA
+ select QUOTA_TREE
+ help
+ OCFS2 is a general purpose extent based shared disk cluster file
+ system with many similarities to ext3. It supports 64 bit inode
+ numbers, and has automatically extending metadata groups which may
+ also make it attractive for non-clustered use.
+
+ You'll want to install the ocfs2-tools package in order to at least
+ get "mount.ocfs2".
+
+ Project web page: http://oss.oracle.com/projects/ocfs2
+ Tools web page: http://oss.oracle.com/projects/ocfs2-tools
+ OCFS2 mailing lists: http://oss.oracle.com/projects/ocfs2/mailman/
+
+ For more information on OCFS2, see the file
+ <file:Documentation/filesystems/ocfs2.txt>.
+
+config OCFS2_FS_O2CB
+ tristate "O2CB Kernelspace Clustering"
+ depends on OCFS2_FS
+ default y
+ help
+ OCFS2 includes a simple kernelspace clustering package, the OCFS2
+ Cluster Base. It only requires a very small userspace component
+ to configure it. This comes with the standard ocfs2-tools package.
+ O2CB is limited to maintaining a cluster for OCFS2 file systems.
+ It cannot manage any other cluster applications.
+
+ It is always safe to say Y here, as the clustering method is
+ run-time selectable.
+
+config OCFS2_FS_USERSPACE_CLUSTER
+ tristate "OCFS2 Userspace Clustering"
+ depends on OCFS2_FS && DLM
+ default y
+ help
+ This option will allow OCFS2 to use userspace clustering services
+ in conjunction with the DLM in fs/dlm. If you are using a
+ userspace cluster manager, say Y here.
+
+ It is safe to say Y, as the clustering method is run-time
+ selectable.
+
+config OCFS2_FS_STATS
+ bool "OCFS2 statistics"
+ depends on OCFS2_FS
+ default y
+ help
+ This option allows some fs statistics to be captured. Enabling
+ this option may increase the memory consumption.
+
+config OCFS2_DEBUG_MASKLOG
+ bool "OCFS2 logging support"
+ depends on OCFS2_FS
+ default y
+ help
+ The ocfs2 filesystem has an extensive logging system. The system
+ allows selection of events to log via files in /sys/o2cb/logmask/.
+ This option will enlarge your kernel, but it allows debugging of
+ ocfs2 filesystem issues.
+
+config OCFS2_DEBUG_FS
+ bool "OCFS2 expensive checks"
+ depends on OCFS2_FS
+ default n
+ help
+ This option will enable expensive consistency checks. Enable
+ this option for debugging only as it is likely to decrease
+ performance of the filesystem.
+
+config OCFS2_FS_POSIX_ACL
+ bool "OCFS2 POSIX Access Control Lists"
+ depends on OCFS2_FS
+ select FS_POSIX_ACL
+ default n
+ help
+ Posix Access Control Lists (ACLs) support permissions for users and
+ groups beyond the owner/group/world scheme.
diff --git a/fs/ocfs2/alloc.c b/fs/ocfs2/alloc.c
index d861096c9d81..60fe74035db5 100644
--- a/fs/ocfs2/alloc.c
+++ b/fs/ocfs2/alloc.c
@@ -5390,6 +5390,9 @@ int ocfs2_remove_btree_range(struct inode *inode,
goto out;
}
+ vfs_dq_free_space_nodirty(inode,
+ ocfs2_clusters_to_bytes(inode->i_sb, len));
+
ret = ocfs2_remove_extent(inode, et, cpos, len, handle, meta_ac,
dealloc);
if (ret) {
diff --git a/fs/ocfs2/cluster/heartbeat.c b/fs/ocfs2/cluster/heartbeat.c
index 04697ba7f73e..4f85eceab376 100644
--- a/fs/ocfs2/cluster/heartbeat.c
+++ b/fs/ocfs2/cluster/heartbeat.c
@@ -33,6 +33,7 @@
#include <linux/random.h>
#include <linux/crc32.h>
#include <linux/time.h>
+#include <linux/debugfs.h>
#include "heartbeat.h"
#include "tcp.h"
@@ -60,6 +61,11 @@ static unsigned long o2hb_live_node_bitmap[BITS_TO_LONGS(O2NM_MAX_NODES)];
static LIST_HEAD(o2hb_node_events);
static DECLARE_WAIT_QUEUE_HEAD(o2hb_steady_queue);
+#define O2HB_DEBUG_DIR "o2hb"
+#define O2HB_DEBUG_LIVENODES "livenodes"
+static struct dentry *o2hb_debug_dir;
+static struct dentry *o2hb_debug_livenodes;
+
static LIST_HEAD(o2hb_all_regions);
static struct o2hb_callback {
@@ -905,7 +911,77 @@ static int o2hb_thread(void *data)
return 0;
}
-void o2hb_init(void)
+#ifdef CONFIG_DEBUG_FS
+static int o2hb_debug_open(struct inode *inode, struct file *file)
+{
+ unsigned long map[BITS_TO_LONGS(O2NM_MAX_NODES)];
+ char *buf = NULL;
+ int i = -1;
+ int out = 0;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ goto bail;
+
+ o2hb_fill_node_map(map, sizeof(map));
+
+ while ((i = find_next_bit(map, O2NM_MAX_NODES, i + 1)) < O2NM_MAX_NODES)
+ out += snprintf(buf + out, PAGE_SIZE - out, "%d ", i);
+ out += snprintf(buf + out, PAGE_SIZE - out, "\n");
+
+ i_size_write(inode, out);
+
+ file->private_data = buf;
+
+ return 0;
+bail:
+ return -ENOMEM;
+}
+
+static int o2hb_debug_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static ssize_t o2hb_debug_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
+ i_size_read(file->f_mapping->host));
+}
+#else
+static int o2hb_debug_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+static int o2hb_debug_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+static ssize_t o2hb_debug_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static struct file_operations o2hb_debug_fops = {
+ .open = o2hb_debug_open,
+ .release = o2hb_debug_release,
+ .read = o2hb_debug_read,
+ .llseek = generic_file_llseek,
+};
+
+void o2hb_exit(void)
+{
+ if (o2hb_debug_livenodes)
+ debugfs_remove(o2hb_debug_livenodes);
+ if (o2hb_debug_dir)
+ debugfs_remove(o2hb_debug_dir);
+}
+
+int o2hb_init(void)
{
int i;
@@ -918,6 +994,24 @@ void o2hb_init(void)
INIT_LIST_HEAD(&o2hb_node_events);
memset(o2hb_live_node_bitmap, 0, sizeof(o2hb_live_node_bitmap));
+
+ o2hb_debug_dir = debugfs_create_dir(O2HB_DEBUG_DIR, NULL);
+ if (!o2hb_debug_dir) {
+ mlog_errno(-ENOMEM);
+ return -ENOMEM;
+ }
+
+ o2hb_debug_livenodes = debugfs_create_file(O2HB_DEBUG_LIVENODES,
+ S_IFREG|S_IRUSR,
+ o2hb_debug_dir, NULL,
+ &o2hb_debug_fops);
+ if (!o2hb_debug_livenodes) {
+ mlog_errno(-ENOMEM);
+ debugfs_remove(o2hb_debug_dir);
+ return -ENOMEM;
+ }
+
+ return 0;
}
/* if we're already in a callback then we're already serialized by the sem */
diff --git a/fs/ocfs2/cluster/heartbeat.h b/fs/ocfs2/cluster/heartbeat.h
index e511339886b3..2f1649253b49 100644
--- a/fs/ocfs2/cluster/heartbeat.h
+++ b/fs/ocfs2/cluster/heartbeat.h
@@ -75,7 +75,8 @@ void o2hb_unregister_callback(const char *region_uuid,
struct o2hb_callback_func *hc);
void o2hb_fill_node_map(unsigned long *map,
unsigned bytes);
-void o2hb_init(void);
+void o2hb_exit(void);
+int o2hb_init(void);
int o2hb_check_node_heartbeating(u8 node_num);
int o2hb_check_node_heartbeating_from_callback(u8 node_num);
int o2hb_check_local_node_heartbeating(void);
diff --git a/fs/ocfs2/cluster/nodemanager.c b/fs/ocfs2/cluster/nodemanager.c
index 70e8fa9e2539..7ee6188bc79a 100644
--- a/fs/ocfs2/cluster/nodemanager.c
+++ b/fs/ocfs2/cluster/nodemanager.c
@@ -881,6 +881,7 @@ static void __exit exit_o2nm(void)
o2cb_sys_shutdown();
o2net_exit();
+ o2hb_exit();
}
static int __init init_o2nm(void)
@@ -889,11 +890,13 @@ static int __init init_o2nm(void)
cluster_print_version();
- o2hb_init();
+ ret = o2hb_init();
+ if (ret)
+ goto out;
ret = o2net_init();
if (ret)
- goto out;
+ goto out_o2hb;
ret = o2net_register_hb_callbacks();
if (ret)
@@ -916,6 +919,8 @@ out_callbacks:
o2net_unregister_hb_callbacks();
out_o2net:
o2net_exit();
+out_o2hb:
+ o2hb_exit();
out:
return ret;
}
diff --git a/fs/ocfs2/dcache.c b/fs/ocfs2/dcache.c
index b1cc7c381e88..e9d7c2038c0f 100644
--- a/fs/ocfs2/dcache.c
+++ b/fs/ocfs2/dcache.c
@@ -38,6 +38,7 @@
#include "dlmglue.h"
#include "file.h"
#include "inode.h"
+#include "super.h"
static int ocfs2_dentry_revalidate(struct dentry *dentry,
@@ -294,6 +295,34 @@ out_attach:
return ret;
}
+static DEFINE_SPINLOCK(dentry_list_lock);
+
+/* We limit the number of dentry locks to drop in one go. We have
+ * this limit so that we don't starve other users of ocfs2_wq. */
+#define DL_INODE_DROP_COUNT 64
+
+/* Drop inode references from dentry locks */
+void ocfs2_drop_dl_inodes(struct work_struct *work)
+{
+ struct ocfs2_super *osb = container_of(work, struct ocfs2_super,
+ dentry_lock_work);
+ struct ocfs2_dentry_lock *dl;
+ int drop_count = DL_INODE_DROP_COUNT;
+
+ spin_lock(&dentry_list_lock);
+ while (osb->dentry_lock_list && drop_count--) {
+ dl = osb->dentry_lock_list;
+ osb->dentry_lock_list = dl->dl_next;
+ spin_unlock(&dentry_list_lock);
+ iput(dl->dl_inode);
+ kfree(dl);
+ spin_lock(&dentry_list_lock);
+ }
+ if (osb->dentry_lock_list)
+ queue_work(ocfs2_wq, &osb->dentry_lock_work);
+ spin_unlock(&dentry_list_lock);
+}
+
/*
* ocfs2_dentry_iput() and friends.
*
@@ -318,16 +347,23 @@ out_attach:
static void ocfs2_drop_dentry_lock(struct ocfs2_super *osb,
struct ocfs2_dentry_lock *dl)
{
- iput(dl->dl_inode);
ocfs2_simple_drop_lockres(osb, &dl->dl_lockres);
ocfs2_lock_res_free(&dl->dl_lockres);
- kfree(dl);
+
+ /* We leave dropping of inode reference to ocfs2_wq as that can
+ * possibly lead to inode deletion which gets tricky */
+ spin_lock(&dentry_list_lock);
+ if (!osb->dentry_lock_list)
+ queue_work(ocfs2_wq, &osb->dentry_lock_work);
+ dl->dl_next = osb->dentry_lock_list;
+ osb->dentry_lock_list = dl;
+ spin_unlock(&dentry_list_lock);
}
void ocfs2_dentry_lock_put(struct ocfs2_super *osb,
struct ocfs2_dentry_lock *dl)
{
- int unlock = 0;
+ int unlock;
BUG_ON(dl->dl_count == 0);
diff --git a/fs/ocfs2/dcache.h b/fs/ocfs2/dcache.h
index c091c34d9883..d06e16c06640 100644
--- a/fs/ocfs2/dcache.h
+++ b/fs/ocfs2/dcache.h
@@ -29,8 +29,13 @@
extern struct dentry_operations ocfs2_dentry_ops;
struct ocfs2_dentry_lock {
+ /* Use count of dentry lock */
unsigned int dl_count;
- u64 dl_parent_blkno;
+ union {
+ /* Linked list of dentry locks to release */
+ struct ocfs2_dentry_lock *dl_next;
+ u64 dl_parent_blkno;
+ };
/*
* The ocfs2_dentry_lock keeps an inode reference until
@@ -47,6 +52,8 @@ int ocfs2_dentry_attach_lock(struct dentry *dentry, struct inode *inode,
void ocfs2_dentry_lock_put(struct ocfs2_super *osb,
struct ocfs2_dentry_lock *dl);
+void ocfs2_drop_dl_inodes(struct work_struct *work);
+
struct dentry *ocfs2_find_local_alias(struct inode *inode, u64 parent_blkno,
int skip_unhashed);
diff --git a/fs/ocfs2/dlmglue.c b/fs/ocfs2/dlmglue.c
index b0c4cadd4c45..206a2370876a 100644
--- a/fs/ocfs2/dlmglue.c
+++ b/fs/ocfs2/dlmglue.c
@@ -2860,6 +2860,10 @@ static void ocfs2_unlock_ast(void *opaque, int error)
case OCFS2_UNLOCK_CANCEL_CONVERT:
mlog(0, "Cancel convert success for %s\n", lockres->l_name);
lockres->l_action = OCFS2_AST_INVALID;
+ /* Downconvert thread may have requeued this lock, we
+ * need to wake it. */
+ if (lockres->l_flags & OCFS2_LOCK_BLOCKED)
+ ocfs2_wake_downconvert_thread(ocfs2_get_lockres_osb(lockres));
break;
case OCFS2_UNLOCK_DROP_LOCK:
lockres->l_level = DLM_LOCK_IV;
diff --git a/fs/ocfs2/journal.c b/fs/ocfs2/journal.c
index 57d7d25a2b9a..4c8f3557fe93 100644
--- a/fs/ocfs2/journal.c
+++ b/fs/ocfs2/journal.c
@@ -76,18 +76,6 @@ static inline int ocfs2_wait_on_quotas(struct ocfs2_super *osb)
return __ocfs2_wait_on_mount(osb, 1);
}
-
-
-/*
- * The recovery_list is a simple linked list of node numbers to recover.
- * It is protected by the recovery_lock.
- */
-
-struct ocfs2_recovery_map {
- unsigned int rm_used;
- unsigned int *rm_entries;
-};
-
int ocfs2_recovery_init(struct ocfs2_super *osb)
{
struct ocfs2_recovery_map *rm;
diff --git a/fs/ocfs2/journal.h b/fs/ocfs2/journal.h
index 3c3532e1307c..c4847caf4cc1 100644
--- a/fs/ocfs2/journal.h
+++ b/fs/ocfs2/journal.h
@@ -38,6 +38,17 @@ enum ocfs2_journal_state {
struct ocfs2_super;
struct ocfs2_dinode;
+/*
+ * The recovery_list is a simple linked list of node numbers to recover.
+ * It is protected by the recovery_lock.
+ */
+
+struct ocfs2_recovery_map {
+ unsigned int rm_used;
+ unsigned int *rm_entries;
+};
+
+
struct ocfs2_journal {
enum ocfs2_journal_state j_state; /* Journals current state */
diff --git a/fs/ocfs2/localalloc.c b/fs/ocfs2/localalloc.c
index ec70cdbe77fc..bac7e6abaf47 100644
--- a/fs/ocfs2/localalloc.c
+++ b/fs/ocfs2/localalloc.c
@@ -28,7 +28,6 @@
#include <linux/slab.h>
#include <linux/highmem.h>
#include <linux/bitops.h>
-#include <linux/debugfs.h>
#define MLOG_MASK_PREFIX ML_DISK_ALLOC
#include <cluster/masklog.h>
@@ -75,84 +74,6 @@ static int ocfs2_local_alloc_new_window(struct ocfs2_super *osb,
static int ocfs2_local_alloc_slide_window(struct ocfs2_super *osb,
struct inode *local_alloc_inode);
-#ifdef CONFIG_OCFS2_FS_STATS
-
-static int ocfs2_la_debug_open(struct inode *inode, struct file *file)
-{
- file->private_data = inode->i_private;
- return 0;
-}
-
-#define LA_DEBUG_BUF_SZ PAGE_CACHE_SIZE
-#define LA_DEBUG_VER 1
-static ssize_t ocfs2_la_debug_read(struct file *file, char __user *userbuf,
- size_t count, loff_t *ppos)
-{
- static DEFINE_MUTEX(la_debug_mutex);
- struct ocfs2_super *osb = file->private_data;
- int written, ret;
- char *buf = osb->local_alloc_debug_buf;
-
- mutex_lock(&la_debug_mutex);
- memset(buf, 0, LA_DEBUG_BUF_SZ);
-
- written = snprintf(buf, LA_DEBUG_BUF_SZ,
- "0x%x\t0x%llx\t%u\t%u\t0x%x\n",
- LA_DEBUG_VER,
- (unsigned long long)osb->la_last_gd,
- osb->local_alloc_default_bits,
- osb->local_alloc_bits, osb->local_alloc_state);
-
- ret = simple_read_from_buffer(userbuf, count, ppos, buf, written);
-
- mutex_unlock(&la_debug_mutex);
- return ret;
-}
-
-static const struct file_operations ocfs2_la_debug_fops = {
- .open = ocfs2_la_debug_open,
- .read = ocfs2_la_debug_read,
-};
-
-static void ocfs2_init_la_debug(struct ocfs2_super *osb)
-{
- osb->local_alloc_debug_buf = kmalloc(LA_DEBUG_BUF_SZ, GFP_NOFS);
- if (!osb->local_alloc_debug_buf)
- return;
-
- osb->local_alloc_debug = debugfs_create_file("local_alloc_stats",
- S_IFREG|S_IRUSR,
- osb->osb_debug_root,
- osb,
- &ocfs2_la_debug_fops);
- if (!osb->local_alloc_debug) {
- kfree(osb->local_alloc_debug_buf);
- osb->local_alloc_debug_buf = NULL;
- }
-}
-
-static void ocfs2_shutdown_la_debug(struct ocfs2_super *osb)
-{
- if (osb->local_alloc_debug)
- debugfs_remove(osb->local_alloc_debug);
-
- if (osb->local_alloc_debug_buf)
- kfree(osb->local_alloc_debug_buf);
-
- osb->local_alloc_debug_buf = NULL;
- osb->local_alloc_debug = NULL;
-}
-#else /* CONFIG_OCFS2_FS_STATS */
-static void ocfs2_init_la_debug(struct ocfs2_super *osb)
-{
- return;
-}
-static void ocfs2_shutdown_la_debug(struct ocfs2_super *osb)
-{
- return;
-}
-#endif
-
static inline int ocfs2_la_state_enabled(struct ocfs2_super *osb)
{
return (osb->local_alloc_state == OCFS2_LA_THROTTLED ||
@@ -226,8 +147,6 @@ int ocfs2_load_local_alloc(struct ocfs2_super *osb)
mlog_entry_void();
- ocfs2_init_la_debug(osb);
-
if (osb->local_alloc_bits == 0)
goto bail;
@@ -299,9 +218,6 @@ bail:
if (inode)
iput(inode);
- if (status < 0)
- ocfs2_shutdown_la_debug(osb);
-
mlog(0, "Local alloc window bits = %d\n", osb->local_alloc_bits);
mlog_exit(status);
@@ -331,8 +247,6 @@ void ocfs2_shutdown_local_alloc(struct ocfs2_super *osb)
cancel_delayed_work(&osb->la_enable_wq);
flush_workqueue(ocfs2_wq);
- ocfs2_shutdown_la_debug(osb);
-
if (osb->local_alloc_state == OCFS2_LA_UNUSED)
goto out;
diff --git a/fs/ocfs2/ocfs2.h b/fs/ocfs2/ocfs2.h
index ad5c24a29edd..166e30479dc3 100644
--- a/fs/ocfs2/ocfs2.h
+++ b/fs/ocfs2/ocfs2.h
@@ -210,6 +210,7 @@ struct ocfs2_journal;
struct ocfs2_slot_info;
struct ocfs2_recovery_map;
struct ocfs2_quota_recovery;
+struct ocfs2_dentry_lock;
struct ocfs2_super
{
struct task_struct *commit_task;
@@ -286,11 +287,6 @@ struct ocfs2_super
u64 la_last_gd;
-#ifdef CONFIG_OCFS2_FS_STATS
- struct dentry *local_alloc_debug;
- char *local_alloc_debug_buf;
-#endif
-
/* Next three fields are for local node slot recovery during
* mount. */
int dirty;
@@ -307,6 +303,7 @@ struct ocfs2_super
struct ocfs2_dlm_debug *osb_dlm_debug;
struct dentry *osb_debug_root;
+ struct dentry *osb_ctxt;
wait_queue_head_t recovery_event;
@@ -325,6 +322,11 @@ struct ocfs2_super
struct list_head blocked_lock_list;
unsigned long blocked_lock_count;
+ /* List of dentry locks to release. Anyone can add locks to
+ * the list, ocfs2_wq processes the list */
+ struct ocfs2_dentry_lock *dentry_lock_list;
+ struct work_struct dentry_lock_work;
+
wait_queue_head_t osb_mount_event;
/* Truncate log info */
diff --git a/fs/ocfs2/quota_global.c b/fs/ocfs2/quota_global.c
index 6aff8f2d3e49..1ed0f7c86869 100644
--- a/fs/ocfs2/quota_global.c
+++ b/fs/ocfs2/quota_global.c
@@ -754,7 +754,9 @@ static int ocfs2_mark_dquot_dirty(struct dquot *dquot)
if (dquot->dq_flags & mask)
sync = 1;
spin_unlock(&dq_data_lock);
- if (!sync) {
+ /* This is a slight hack but we can't afford getting global quota
+ * lock if we already have a transaction started. */
+ if (!sync || journal_current_handle()) {
status = ocfs2_write_dquot(dquot);
goto out;
}
@@ -810,171 +812,6 @@ out:
return status;
}
-/* This is difficult. We have to lock quota inode and start transaction
- * in this function but we don't want to take the penalty of exlusive
- * quota file lock when we are just going to use cached structures. So
- * we just take read lock check whether we have dquot cached and if so,
- * we don't have to take the write lock... */
-static int ocfs2_dquot_initialize(struct inode *inode, int type)
-{
- handle_t *handle = NULL;
- int status = 0;
- struct super_block *sb = inode->i_sb;
- struct ocfs2_mem_dqinfo *oinfo;
- int exclusive = 0;
- int cnt;
- qid_t id;
-
- mlog_entry_void();
-
- for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (type != -1 && cnt != type)
- continue;
- if (!sb_has_quota_active(sb, cnt))
- continue;
- oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
- status = ocfs2_lock_global_qf(oinfo, 0);
- if (status < 0)
- goto out;
- /* This is just a performance optimization not a reliable test.
- * Since we hold an inode lock, noone can actually release
- * the structure until we are finished with initialization. */
- if (inode->i_dquot[cnt] != NODQUOT) {
- ocfs2_unlock_global_qf(oinfo, 0);
- continue;
- }
- /* When we have inode lock, we know that no dquot_release() can
- * run and thus we can safely check whether we need to
- * read+modify global file to get quota information or whether
- * our node already has it. */
- if (cnt == USRQUOTA)
- id = inode->i_uid;
- else if (cnt == GRPQUOTA)
- id = inode->i_gid;
- else
- BUG();
- /* Obtain exclusion from quota off... */
- down_write(&sb_dqopt(sb)->dqptr_sem);
- exclusive = !dquot_is_cached(sb, id, cnt);
- up_write(&sb_dqopt(sb)->dqptr_sem);
- if (exclusive) {
- status = ocfs2_lock_global_qf(oinfo, 1);
- if (status < 0) {
- exclusive = 0;
- mlog_errno(status);
- goto out_ilock;
- }
- handle = ocfs2_start_trans(OCFS2_SB(sb),
- ocfs2_calc_qinit_credits(sb, cnt));
- if (IS_ERR(handle)) {
- status = PTR_ERR(handle);
- mlog_errno(status);
- goto out_ilock;
- }
- }
- dquot_initialize(inode, cnt);
- if (exclusive) {
- ocfs2_commit_trans(OCFS2_SB(sb), handle);
- ocfs2_unlock_global_qf(oinfo, 1);
- }
- ocfs2_unlock_global_qf(oinfo, 0);
- }
- mlog_exit(0);
- return 0;
-out_ilock:
- if (exclusive)
- ocfs2_unlock_global_qf(oinfo, 1);
- ocfs2_unlock_global_qf(oinfo, 0);
-out:
- mlog_exit(status);
- return status;
-}
-
-static int ocfs2_dquot_drop_slow(struct inode *inode)
-{
- int status = 0;
- int cnt;
- int got_lock[MAXQUOTAS] = {0, 0};
- handle_t *handle;
- struct super_block *sb = inode->i_sb;
- struct ocfs2_mem_dqinfo *oinfo;
-
- for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (!sb_has_quota_active(sb, cnt))
- continue;
- oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
- status = ocfs2_lock_global_qf(oinfo, 1);
- if (status < 0)
- goto out;
- got_lock[cnt] = 1;
- }
- handle = ocfs2_start_trans(OCFS2_SB(sb),
- ocfs2_calc_qinit_credits(sb, USRQUOTA) +
- ocfs2_calc_qinit_credits(sb, GRPQUOTA));
- if (IS_ERR(handle)) {
- status = PTR_ERR(handle);
- mlog_errno(status);
- goto out;
- }
- dquot_drop(inode);
- ocfs2_commit_trans(OCFS2_SB(sb), handle);
-out:
- for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (got_lock[cnt]) {
- oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
- ocfs2_unlock_global_qf(oinfo, 1);
- }
- return status;
-}
-
-/* See the comment before ocfs2_dquot_initialize. */
-static int ocfs2_dquot_drop(struct inode *inode)
-{
- int status = 0;
- struct super_block *sb = inode->i_sb;
- struct ocfs2_mem_dqinfo *oinfo;
- int exclusive = 0;
- int cnt;
- int got_lock[MAXQUOTAS] = {0, 0};
-
- mlog_entry_void();
- for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (!sb_has_quota_active(sb, cnt))
- continue;
- oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
- status = ocfs2_lock_global_qf(oinfo, 0);
- if (status < 0)
- goto out;
- got_lock[cnt] = 1;
- }
- /* Lock against anyone releasing references so that when when we check
- * we know we are not going to be last ones to release dquot */
- down_write(&sb_dqopt(sb)->dqptr_sem);
- /* Urgh, this is a terrible hack :( */
- for (cnt = 0; cnt < MAXQUOTAS; cnt++) {
- if (inode->i_dquot[cnt] != NODQUOT &&
- atomic_read(&inode->i_dquot[cnt]->dq_count) > 1) {
- exclusive = 1;
- break;
- }
- }
- if (!exclusive)
- dquot_drop_locked(inode);
- up_write(&sb_dqopt(sb)->dqptr_sem);
-out:
- for (cnt = 0; cnt < MAXQUOTAS; cnt++)
- if (got_lock[cnt]) {
- oinfo = sb_dqinfo(sb, cnt)->dqi_priv;
- ocfs2_unlock_global_qf(oinfo, 0);
- }
- /* In case we bailed out because we had to do expensive locking
- * do it now... */
- if (exclusive)
- status = ocfs2_dquot_drop_slow(inode);
- mlog_exit(status);
- return status;
-}
-
static struct dquot *ocfs2_alloc_dquot(struct super_block *sb, int type)
{
struct ocfs2_dquot *dquot =
@@ -991,8 +828,8 @@ static void ocfs2_destroy_dquot(struct dquot *dquot)
}
struct dquot_operations ocfs2_quota_operations = {
- .initialize = ocfs2_dquot_initialize,
- .drop = ocfs2_dquot_drop,
+ .initialize = dquot_initialize,
+ .drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
diff --git a/fs/ocfs2/super.c b/fs/ocfs2/super.c
index 43ed11345b59..1986b810bab6 100644
--- a/fs/ocfs2/super.c
+++ b/fs/ocfs2/super.c
@@ -201,6 +201,170 @@ static const match_table_t tokens = {
{Opt_err, NULL}
};
+#ifdef CONFIG_DEBUG_FS
+static int ocfs2_osb_dump(struct ocfs2_super *osb, char *buf, int len)
+{
+ int out = 0;
+ int i;
+ struct ocfs2_cluster_connection *cconn = osb->cconn;
+ struct ocfs2_recovery_map *rm = osb->recovery_map;
+
+ out += snprintf(buf + out, len - out,
+ "%10s => Id: %-s Uuid: %-s Gen: 0x%X Label: %-s\n",
+ "Device", osb->dev_str, osb->uuid_str,
+ osb->fs_generation, osb->vol_label);
+
+ out += snprintf(buf + out, len - out,
+ "%10s => State: %d Flags: 0x%lX\n", "Volume",
+ atomic_read(&osb->vol_state), osb->osb_flags);
+
+ out += snprintf(buf + out, len - out,
+ "%10s => Block: %lu Cluster: %d\n", "Sizes",
+ osb->sb->s_blocksize, osb->s_clustersize);
+
+ out += snprintf(buf + out, len - out,
+ "%10s => Compat: 0x%X Incompat: 0x%X "
+ "ROcompat: 0x%X\n",
+ "Features", osb->s_feature_compat,
+ osb->s_feature_incompat, osb->s_feature_ro_compat);
+
+ out += snprintf(buf + out, len - out,
+ "%10s => Opts: 0x%lX AtimeQuanta: %u\n", "Mount",
+ osb->s_mount_opt, osb->s_atime_quantum);
+
+ out += snprintf(buf + out, len - out,
+ "%10s => Stack: %s Name: %*s Version: %d.%d\n",
+ "Cluster",
+ (*osb->osb_cluster_stack == '\0' ?
+ "o2cb" : osb->osb_cluster_stack),
+ cconn->cc_namelen, cconn->cc_name,
+ cconn->cc_version.pv_major, cconn->cc_version.pv_minor);
+
+ spin_lock(&osb->dc_task_lock);
+ out += snprintf(buf + out, len - out,
+ "%10s => Pid: %d Count: %lu WakeSeq: %lu "
+ "WorkSeq: %lu\n", "DownCnvt",
+ task_pid_nr(osb->dc_task), osb->blocked_lock_count,
+ osb->dc_wake_sequence, osb->dc_work_sequence);
+ spin_unlock(&osb->dc_task_lock);
+
+ spin_lock(&osb->osb_lock);
+ out += snprintf(buf + out, len - out, "%10s => Pid: %d Nodes:",
+ "Recovery",
+ (osb->recovery_thread_task ?
+ task_pid_nr(osb->recovery_thread_task) : -1));
+ if (rm->rm_used == 0)
+ out += snprintf(buf + out, len - out, " None\n");
+ else {
+ for (i = 0; i < rm->rm_used; i++)
+ out += snprintf(buf + out, len - out, " %d",
+ rm->rm_entries[i]);
+ out += snprintf(buf + out, len - out, "\n");
+ }
+ spin_unlock(&osb->osb_lock);
+
+ out += snprintf(buf + out, len - out,
+ "%10s => Pid: %d Interval: %lu Needs: %d\n", "Commit",
+ task_pid_nr(osb->commit_task), osb->osb_commit_interval,
+ atomic_read(&osb->needs_checkpoint));
+
+ out += snprintf(buf + out, len - out,
+ "%10s => State: %d NumTxns: %d TxnId: %lu\n",
+ "Journal", osb->journal->j_state,
+ atomic_read(&osb->journal->j_num_trans),
+ osb->journal->j_trans_id);
+
+ out += snprintf(buf + out, len - out,
+ "%10s => GlobalAllocs: %d LocalAllocs: %d "
+ "SubAllocs: %d LAWinMoves: %d SAExtends: %d\n",
+ "Stats",
+ atomic_read(&osb->alloc_stats.bitmap_data),
+ atomic_read(&osb->alloc_stats.local_data),
+ atomic_read(&osb->alloc_stats.bg_allocs),
+ atomic_read(&osb->alloc_stats.moves),
+ atomic_read(&osb->alloc_stats.bg_extends));
+
+ out += snprintf(buf + out, len - out,
+ "%10s => State: %u Descriptor: %llu Size: %u bits "
+ "Default: %u bits\n",
+ "LocalAlloc", osb->local_alloc_state,
+ (unsigned long long)osb->la_last_gd,
+ osb->local_alloc_bits, osb->local_alloc_default_bits);
+
+ spin_lock(&osb->osb_lock);
+ out += snprintf(buf + out, len - out,
+ "%10s => Slot: %d NumStolen: %d\n", "Steal",
+ osb->s_inode_steal_slot,
+ atomic_read(&osb->s_num_inodes_stolen));
+ spin_unlock(&osb->osb_lock);
+
+ out += snprintf(buf + out, len - out, "%10s => %3s %10s\n",
+ "Slots", "Num", "RecoGen");
+
+ for (i = 0; i < osb->max_slots; ++i) {
+ out += snprintf(buf + out, len - out,
+ "%10s %c %3d %10d\n",
+ " ",
+ (i == osb->slot_num ? '*' : ' '),
+ i, osb->slot_recovery_generations[i]);
+ }
+
+ return out;
+}
+
+static int ocfs2_osb_debug_open(struct inode *inode, struct file *file)
+{
+ struct ocfs2_super *osb = inode->i_private;
+ char *buf = NULL;
+
+ buf = kmalloc(PAGE_SIZE, GFP_KERNEL);
+ if (!buf)
+ goto bail;
+
+ i_size_write(inode, ocfs2_osb_dump(osb, buf, PAGE_SIZE));
+
+ file->private_data = buf;
+
+ return 0;
+bail:
+ return -ENOMEM;
+}
+
+static int ocfs2_debug_release(struct inode *inode, struct file *file)
+{
+ kfree(file->private_data);
+ return 0;
+}
+
+static ssize_t ocfs2_debug_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ return simple_read_from_buffer(buf, nbytes, ppos, file->private_data,
+ i_size_read(file->f_mapping->host));
+}
+#else
+static int ocfs2_osb_debug_open(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+static int ocfs2_debug_release(struct inode *inode, struct file *file)
+{
+ return 0;
+}
+static ssize_t ocfs2_debug_read(struct file *file, char __user *buf,
+ size_t nbytes, loff_t *ppos)
+{
+ return 0;
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static struct file_operations ocfs2_osb_debug_fops = {
+ .open = ocfs2_osb_debug_open,
+ .release = ocfs2_debug_release,
+ .read = ocfs2_debug_read,
+ .llseek = generic_file_llseek,
+};
+
/*
* write_super and sync_fs ripped right out of ext3.
*/
@@ -926,6 +1090,16 @@ static int ocfs2_fill_super(struct super_block *sb, void *data, int silent)
goto read_super_error;
}
+ osb->osb_ctxt = debugfs_create_file("fs_state", S_IFREG|S_IRUSR,
+ osb->osb_debug_root,
+ osb,
+ &ocfs2_osb_debug_fops);
+ if (!osb->osb_ctxt) {
+ status = -EINVAL;
+ mlog_errno(status);
+ goto read_super_error;
+ }
+
status = ocfs2_mount_volume(sb);
if (osb->root_inode)
inode = igrab(osb->root_inode);
@@ -1613,6 +1787,8 @@ static void ocfs2_dismount_volume(struct super_block *sb, int mnt_err)
osb = OCFS2_SB(sb);
BUG_ON(!osb);
+ debugfs_remove(osb->osb_ctxt);
+
ocfs2_disable_quotas(osb);
ocfs2_shutdown_local_alloc(osb);
@@ -1887,6 +2063,9 @@ static int ocfs2_initialize_super(struct super_block *sb,
INIT_WORK(&journal->j_recovery_work, ocfs2_complete_recovery);
journal->j_state = OCFS2_JOURNAL_FREE;
+ INIT_WORK(&osb->dentry_lock_work, ocfs2_drop_dl_inodes);
+ osb->dentry_lock_list = NULL;
+
/* get some pseudo constants for clustersize bits */
osb->s_clustersize_bits =
le32_to_cpu(di->id2.i_super.s_clustersize_bits);
diff --git a/fs/ocfs2/xattr.c b/fs/ocfs2/xattr.c
index e1d638af6ac3..915039fffe6e 100644
--- a/fs/ocfs2/xattr.c
+++ b/fs/ocfs2/xattr.c
@@ -4729,13 +4729,6 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode,
vb.vb_xv = (struct ocfs2_xattr_value_root *)
(vb.vb_bh->b_data + offset % blocksize);
- ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket,
- OCFS2_JOURNAL_ACCESS_WRITE);
- if (ret) {
- mlog_errno(ret);
- goto out;
- }
-
/*
* From here on out we have to dirty the bucket. The generic
* value calls only modify one of the bucket's bhs, but we need
@@ -4748,12 +4741,18 @@ static int ocfs2_xattr_bucket_value_truncate(struct inode *inode,
ret = ocfs2_xattr_value_truncate(inode, &vb, len, ctxt);
if (ret) {
mlog_errno(ret);
- goto out_dirty;
+ goto out;
+ }
+
+ ret = ocfs2_xattr_bucket_journal_access(ctxt->handle, bucket,
+ OCFS2_JOURNAL_ACCESS_WRITE);
+ if (ret) {
+ mlog_errno(ret);
+ goto out;
}
xe->xe_value_size = cpu_to_le64(len);
-out_dirty:
ocfs2_xattr_bucket_journal_dirty(ctxt->handle, bucket);
out:
diff --git a/fs/omfs/Kconfig b/fs/omfs/Kconfig
new file mode 100644
index 000000000000..b1b9a0aba6fd
--- /dev/null
+++ b/fs/omfs/Kconfig
@@ -0,0 +1,13 @@
+config OMFS_FS
+ tristate "SonicBlue Optimized MPEG File System support"
+ depends on BLOCK
+ select CRC_ITU_T
+ help
+ This is the proprietary file system used by the Rio Karma music
+ player and ReplayTV DVR. Despite the name, this filesystem is not
+ more efficient than a standard FS for MPEG files, in fact likely
+ the opposite is true. Say Y if you have either of these devices
+ and wish to mount its disk.
+
+ To compile this file system support as a module, choose M here: the
+ module will be called omfs. If unsure, say N.
diff --git a/fs/proc/Makefile b/fs/proc/Makefile
index 63d965193b22..757f7c11461c 100644
--- a/fs/proc/Makefile
+++ b/fs/proc/Makefile
@@ -9,6 +9,7 @@ proc-$(CONFIG_MMU) := mmu.o task_mmu.o
proc-y += inode.o root.o base.o generic.o array.o \
proc_tty.o
+proc-y += automount.o
proc-y += cmdline.o
proc-y += cpuinfo.o
proc-y += devices.o
diff --git a/fs/proc/automount.c b/fs/proc/automount.c
new file mode 100644
index 000000000000..5d22b5aa442c
--- /dev/null
+++ b/fs/proc/automount.c
@@ -0,0 +1,28 @@
+#include <linux/list.h>
+#include <linux/mount.h>
+#include <linux/workqueue.h>
+#include "internal.h"
+
+LIST_HEAD(proc_automounts);
+
+static void proc_expire_automounts(struct work_struct *work);
+
+static DECLARE_DELAYED_WORK(proc_automount_task, proc_expire_automounts);
+static int proc_automount_timeout = 500 * HZ;
+
+void proc_shrink_automounts(void)
+{
+ struct list_head *list = &proc_automounts;
+
+ mark_mounts_for_expiry(list);
+ mark_mounts_for_expiry(list);
+ if (list_empty(list))
+ return;
+
+ schedule_delayed_work(&proc_automount_task, proc_automount_timeout);
+}
+
+static void proc_expire_automounts(struct work_struct *work)
+{
+ proc_shrink_automounts();
+}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 0c9de19a1633..baf850c09609 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -126,6 +126,8 @@ struct pid_entry {
NOD(NAME, (S_IFREG|(MODE)), \
NULL, &proc_single_file_operations, \
{ .proc_show = show } )
+#define MNT(NAME, MODE, iops) \
+ NOD(NAME, (S_IFDIR|(MODE)), &iops, NULL, {})
/*
* Count the number of hardlinks for the pid_entry table, excluding the .
@@ -1514,6 +1516,7 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
struct inode *inode = dentry->d_inode;
struct task_struct *task = get_proc_task(inode);
const struct cred *cred;
+ int ret = 0;
if (task) {
if ((inode->i_mode == (S_IFDIR|S_IRUGO|S_IXUGO)) ||
@@ -1528,12 +1531,14 @@ static int pid_revalidate(struct dentry *dentry, struct nameidata *nd)
inode->i_gid = 0;
}
inode->i_mode &= ~(S_ISUID | S_ISGID);
- security_task_to_inode(task, inode);
+ ret = proc_net_revalidate(task, dentry, nd);
+ if (ret == 1)
+ security_task_to_inode(task, inode);
put_task_struct(task);
- return 1;
}
- d_drop(dentry);
- return 0;
+ if (ret == 0)
+ d_drop(dentry);
+ return ret;
}
static int pid_delete_dentry(struct dentry * dentry)
@@ -2488,7 +2493,7 @@ static const struct pid_entry tgid_base_stuff[] = {
DIR("fd", S_IRUSR|S_IXUSR, proc_fd_inode_operations, proc_fd_operations),
DIR("fdinfo", S_IRUSR|S_IXUSR, proc_fdinfo_inode_operations, proc_fdinfo_operations),
#ifdef CONFIG_NET
- DIR("net", S_IRUGO|S_IXUGO, proc_net_inode_operations, proc_net_operations),
+ MNT("net", S_IRUGO|S_IXUGO, proc_net_inode_operations),
#endif
REG("environ", S_IRUSR, proc_environ_operations),
INF("auxv", S_IRUSR, proc_pid_auxv),
@@ -2591,15 +2596,11 @@ static void proc_flush_task_mnt(struct vfsmount *mnt, pid_t pid, pid_t tgid)
name.len = snprintf(buf, sizeof(buf), "%d", pid);
dentry = d_hash_and_lookup(mnt->mnt_root, &name);
if (dentry) {
- if (!(current->flags & PF_EXITING))
- shrink_dcache_parent(dentry);
+ shrink_dcache_parent(dentry);
d_drop(dentry);
dput(dentry);
}
- if (tgid == 0)
- goto out;
-
name.name = buf;
name.len = snprintf(buf, sizeof(buf), "%d", tgid);
leader = d_hash_and_lookup(mnt->mnt_root, &name);
@@ -2660,13 +2661,12 @@ void proc_flush_task(struct task_struct *task)
struct upid *upid;
pid = task_pid(task);
- if (thread_group_leader(task))
- tgid = task_tgid(task);
+ tgid = task_tgid(task);
for (i = 0; i <= pid->level; i++) {
upid = &pid->numbers[i];
proc_flush_task_mnt(upid->ns->proc_mnt, upid->nr,
- tgid ? tgid->numbers[i].nr : 0);
+ tgid->numbers[i].nr);
}
upid = &pid->numbers[pid->level];
diff --git a/fs/proc/generic.c b/fs/proc/generic.c
index db7fa5cab988..65cc1eefeae3 100644
--- a/fs/proc/generic.c
+++ b/fs/proc/generic.c
@@ -37,7 +37,7 @@ static int proc_match(int len, const char *name, struct proc_dir_entry *de)
#define PROC_BLOCK_SIZE (PAGE_SIZE - 1024)
static ssize_t
-proc_file_read(struct file *file, char __user *buf, size_t nbytes,
+__proc_file_read(struct file *file, char __user *buf, size_t nbytes,
loff_t *ppos)
{
struct inode * inode = file->f_path.dentry->d_inode;
@@ -183,19 +183,47 @@ proc_file_read(struct file *file, char __user *buf, size_t nbytes,
}
static ssize_t
+proc_file_read(struct file *file, char __user *buf, size_t nbytes,
+ loff_t *ppos)
+{
+ struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ ssize_t rv = -EIO;
+
+ spin_lock(&pde->pde_unload_lock);
+ if (!pde->proc_fops) {
+ spin_unlock(&pde->pde_unload_lock);
+ return rv;
+ }
+ pde->pde_users++;
+ spin_unlock(&pde->pde_unload_lock);
+
+ rv = __proc_file_read(file, buf, nbytes, ppos);
+
+ pde_users_dec(pde);
+ return rv;
+}
+
+static ssize_t
proc_file_write(struct file *file, const char __user *buffer,
size_t count, loff_t *ppos)
{
- struct inode *inode = file->f_path.dentry->d_inode;
- struct proc_dir_entry * dp;
-
- dp = PDE(inode);
-
- if (!dp->write_proc)
- return -EIO;
+ struct proc_dir_entry *pde = PDE(file->f_path.dentry->d_inode);
+ ssize_t rv = -EIO;
+
+ if (pde->write_proc) {
+ spin_lock(&pde->pde_unload_lock);
+ if (!pde->proc_fops) {
+ spin_unlock(&pde->pde_unload_lock);
+ return rv;
+ }
+ pde->pde_users++;
+ spin_unlock(&pde->pde_unload_lock);
- /* FIXME: does this routine need ppos? probably... */
- return dp->write_proc(file, buffer, count, dp->data);
+ /* FIXME: does this routine need ppos? probably... */
+ rv = pde->write_proc(file, buffer, count, pde->data);
+ pde_users_dec(pde);
+ }
+ return rv;
}
@@ -307,6 +335,21 @@ static DEFINE_SPINLOCK(proc_inum_lock); /* protects the above */
/*
* Return an inode number between PROC_DYNAMIC_FIRST and
* 0xffffffff, or zero on failure.
+ *
+ * Current inode allocations in the proc-fs (hex-numbers):
+ *
+ * 00000000 reserved
+ * 00000001-00000fff static entries (goners)
+ * 001 root-ino
+ *
+ * 00001000-00001fff unused
+ * 0001xxxx-7fffxxxx pid-dir entries for pid 1-7fff
+ * 80000000-efffffff unused
+ * f0000000-ffffffff dynamic entries
+ *
+ * Goal:
+ * Once we split the thing into several virtual filesystems,
+ * we will get rid of magical ranges (and this comment, BTW).
*/
static unsigned int get_inode_number(void)
{
@@ -528,7 +571,6 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
dp->proc_fops = &proc_dir_operations;
dp->proc_iops = &proc_dir_inode_operations;
}
- dir->nlink++;
} else if (S_ISLNK(dp->mode)) {
if (dp->proc_iops == NULL)
dp->proc_iops = &proc_link_inode_operations;
@@ -551,6 +593,8 @@ static int proc_register(struct proc_dir_entry * dir, struct proc_dir_entry * dp
dp->next = dir->subdir;
dp->parent = dir;
dir->subdir = dp;
+ if (S_ISDIR(dp->mode))
+ dir->nlink++;
spin_unlock(&proc_subdir_lock);
return 0;
@@ -595,6 +639,24 @@ static struct proc_dir_entry *__proc_create(struct proc_dir_entry **parent,
return ent;
}
+struct proc_dir_entry *proc_create_root(void)
+{
+ struct proc_dir_entry *ent, *parent = NULL;
+
+ ent = __proc_create(&parent, "..", S_IFDIR | S_IRUGO | S_IXUGO, 2);
+ if (ent) {
+ ent->proc_fops = &proc_dir_operations;
+ ent->proc_iops = &proc_dir_inode_operations;
+ ent->low_ino = get_inode_number();
+ ent->parent = ent;
+ if (!ent->low_ino) {
+ kfree(ent);
+ ent = NULL;
+ }
+ }
+ return ent;
+}
+
struct proc_dir_entry *proc_symlink(const char *name,
struct proc_dir_entry *parent, const char *dest)
{
@@ -635,23 +697,6 @@ struct proc_dir_entry *proc_mkdir_mode(const char *name, mode_t mode,
return ent;
}
-struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
- struct proc_dir_entry *parent)
-{
- struct proc_dir_entry *ent;
-
- ent = __proc_create(&parent, name, S_IFDIR | S_IRUGO | S_IXUGO, 2);
- if (ent) {
- ent->data = net;
- if (proc_register(parent, ent) < 0) {
- kfree(ent);
- ent = NULL;
- }
- }
- return ent;
-}
-EXPORT_SYMBOL_GPL(proc_net_mkdir);
-
struct proc_dir_entry *proc_mkdir(const char *name,
struct proc_dir_entry *parent)
{
@@ -754,6 +799,8 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
de = *p;
*p = de->next;
de->next = NULL;
+ if (S_ISDIR(de->mode))
+ parent->nlink--;
break;
}
}
@@ -761,6 +808,11 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
if (!de)
return;
+ release_proc_entry(de);
+}
+
+void release_proc_entry(struct proc_dir_entry *de)
+{
spin_lock(&de->pde_unload_lock);
/*
* Stop accepting new callers into module. If you're
@@ -796,8 +848,6 @@ continue_removing:
}
spin_unlock(&de->pde_unload_lock);
- if (S_ISDIR(de->mode))
- parent->nlink--;
de->nlink = 0;
WARN(de->subdir, KERN_WARNING "%s: removing non-empty directory "
"'%s/%s', leaking at least '%s'\n", __func__,
diff --git a/fs/proc/inode-alloc.txt b/fs/proc/inode-alloc.txt
deleted file mode 100644
index 77212f938c2c..000000000000
--- a/fs/proc/inode-alloc.txt
+++ /dev/null
@@ -1,14 +0,0 @@
-Current inode allocations in the proc-fs (hex-numbers):
-
- 00000000 reserved
- 00000001-00000fff static entries (goners)
- 001 root-ino
-
- 00001000-00001fff unused
- 0001xxxx-7fffxxxx pid-dir entries for pid 1-7fff
- 80000000-efffffff unused
- f0000000-ffffffff dynamic entries
-
-Goal:
- a) once we'll split the thing into several virtual filesystems we
- will get rid of magical ranges (and this file, BTW).
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index 3e76bb9b3ad6..5660b9c5d414 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -58,11 +58,8 @@ static void proc_delete_inode(struct inode *inode)
/* Let go of any associated proc directory entry */
de = PROC_I(inode)->pde;
- if (de) {
- if (de->owner)
- module_put(de->owner);
+ if (de)
de_put(de);
- }
if (PROC_I(inode)->sysctl)
sysctl_head_put(PROC_I(inode)->sysctl);
clear_inode(inode);
@@ -112,7 +109,7 @@ void __init proc_init_inodecache(void)
init_once);
}
-static const struct super_operations proc_sops = {
+const struct super_operations proc_sops = {
.alloc_inode = proc_alloc_inode,
.destroy_inode = proc_destroy_inode,
.drop_inode = generic_delete_inode,
@@ -127,7 +124,7 @@ static void __pde_users_dec(struct proc_dir_entry *pde)
complete(pde->pde_unload_completion);
}
-static void pde_users_dec(struct proc_dir_entry *pde)
+void pde_users_dec(struct proc_dir_entry *pde)
{
spin_lock(&pde->pde_unload_lock);
__pde_users_dec(pde);
@@ -449,12 +446,9 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
{
struct inode * inode;
- if (!try_module_get(de->owner))
- goto out_mod;
-
inode = iget_locked(sb, ino);
if (!inode)
- goto out_ino;
+ return NULL;
if (inode->i_state & I_NEW) {
inode->i_mtime = inode->i_atime = inode->i_ctime = CURRENT_TIME;
PROC_I(inode)->fd = 0;
@@ -485,14 +479,8 @@ struct inode *proc_get_inode(struct super_block *sb, unsigned int ino,
}
}
unlock_new_inode(inode);
- } else
- module_put(de->owner);
+ }
return inode;
-
-out_ino:
- module_put(de->owner);
-out_mod:
- return NULL;
}
int proc_fill_super(struct super_block *s)
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index cd53ff838498..5b890ba33b4a 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -58,9 +58,17 @@ extern const struct file_operations proc_numa_maps_operations;
extern const struct file_operations proc_smaps_operations;
extern const struct file_operations proc_clear_refs_operations;
extern const struct file_operations proc_pagemap_operations;
-extern const struct file_operations proc_net_operations;
extern const struct inode_operations proc_net_inode_operations;
+#ifdef CONFIG_NET
+int proc_net_revalidate(struct task_struct *tsk, struct dentry *dentry, struct nameidata *nd);
+#else
+static inline int proc_net_revalidate(struct task_struct *tsk, struct dentry *dentry, struct nameidata *nd)
+{
+ return 1;
+}
+#endif
+
void free_proc_entry(struct proc_dir_entry *de);
void proc_init_inodecache(void);
@@ -84,6 +92,9 @@ struct dentry *proc_lookup_de(struct proc_dir_entry *de, struct inode *ino,
struct dentry *dentry);
int proc_readdir_de(struct proc_dir_entry *de, struct file *filp, void *dirent,
filldir_t filldir);
+struct proc_dir_entry *proc_create_root(void);
+void release_proc_entry(struct proc_dir_entry *de);
+extern const struct super_operations proc_sops;
struct pde_opener {
struct inode *inode;
@@ -91,3 +102,6 @@ struct pde_opener {
int (*release)(struct inode *, struct file *);
struct list_head lh;
};
+void pde_users_dec(struct proc_dir_entry *pde);
+
+extern struct list_head proc_automounts;
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 04d1270f1c38..baaddad8436c 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -20,15 +20,17 @@
#include <linux/bitops.h>
#include <linux/mount.h>
#include <linux/nsproxy.h>
+#include <linux/namei.h>
#include <net/net_namespace.h>
#include <linux/seq_file.h>
#include "internal.h"
+static struct file_system_type proc_net_fs_type;
static struct net *get_proc_net(const struct inode *inode)
{
- return maybe_get_net(PDE_NET(PDE(inode)));
+ return maybe_get_net(inode->i_sb->s_fs_info);
}
int seq_open_net(struct inode *ino, struct file *f,
@@ -117,66 +119,91 @@ static struct net *get_proc_task_net(struct inode *dir)
return net;
}
-static struct dentry *proc_tgid_net_lookup(struct inode *dir,
- struct dentry *dentry, struct nameidata *nd)
+void *proc_net_follow_link(struct dentry *dentry, struct nameidata *nd)
{
- struct dentry *de;
+ /* Follow to a mount point of the proper network namespace. */
+ struct vfsmount *mnt;
struct net *net;
-
- de = ERR_PTR(-ENOENT);
- net = get_proc_task_net(dir);
- if (net != NULL) {
- de = proc_lookup_de(net->proc_net, dir, dentry);
- put_net(net);
- }
- return de;
-}
-
-static int proc_tgid_net_getattr(struct vfsmount *mnt, struct dentry *dentry,
- struct kstat *stat)
-{
- struct inode *inode = dentry->d_inode;
- struct net *net;
-
- net = get_proc_task_net(inode);
-
- generic_fillattr(inode, stat);
-
- if (net != NULL) {
- stat->nlink = net->proc_net->nlink;
- put_net(net);
+ int err = -ENOENT;
+
+ net = get_proc_task_net(dentry->d_inode);
+ if (!net)
+ goto out_err;
+
+ mnt = kern_mount_data(&proc_net_fs_type, net);
+ if (IS_ERR(mnt))
+ goto out_err;
+
+ dput(nd->path.dentry);
+ nd->path.dentry = dget(dentry);
+
+ err = do_add_mount(mntget(mnt), &nd->path, MNT_SHRINKABLE,
+ &proc_automounts);
+ if (err < 0) {
+ mntput(mnt);
+ if (err == -EBUSY)
+ goto out_follow;
+ goto out_err;
}
-
- return 0;
+ err = 0;
+ path_put(&nd->path);
+ nd->path.mnt = mnt;
+ nd->path.dentry = dget(mnt->mnt_root);
+ put_net(net);
+out:
+ return ERR_PTR(err);
+out_err:
+ path_put(&nd->path);
+ goto out;
+out_follow:
+ /* We raced with ourselves so just walk the mounts */
+ while (d_mountpoint(nd->path.dentry) &&
+ follow_down(&nd->path.mnt, &nd->path.dentry))
+ ;
+ err = 0;
+ goto out;
}
const struct inode_operations proc_net_inode_operations = {
- .lookup = proc_tgid_net_lookup,
- .getattr = proc_tgid_net_getattr,
+ .follow_link = proc_net_follow_link,
};
-static int proc_tgid_net_readdir(struct file *filp, void *dirent,
- filldir_t filldir)
+
+int proc_net_revalidate(struct task_struct *task, struct dentry *dentry,
+ struct nameidata *nd)
{
- int ret;
- struct net *net;
+ struct inode *inode = dentry->d_inode;
+ struct dentry *tdentry;
+ struct vfsmount *tmnt;
+ int ret = 1;
- ret = -EINVAL;
- net = get_proc_task_net(filp->f_path.dentry->d_inode);
- if (net != NULL) {
- ret = proc_readdir_de(net->proc_net, filp, dirent, filldir);
- put_net(net);
+ /* Are we talking about a proc/net mount point? */
+ if (!nd || inode->i_op != &proc_net_inode_operations)
+ goto out;
+
+ /*
+ * If the wrong filesystem is mounted on /proc/<pid>/net report the
+ * dentry is invalid.
+ */
+ tmnt = mntget(nd->path.mnt);
+ tdentry = dget(dentry);
+ if (follow_down(&tmnt, &tdentry)) {
+ struct nsproxy *ns;
+
+ rcu_read_lock();
+ ns = task_nsproxy(task);
+ if ((ns == NULL) ||
+ (tmnt->mnt_sb->s_magic != PROC_NET_SUPER_MAGIC) ||
+ (tmnt->mnt_sb->s_fs_info != ns->net_ns))
+ ret = 0;
+ rcu_read_unlock();
}
+ dput(tdentry);
+ mntput(tmnt);
+out:
return ret;
}
-const struct file_operations proc_net_operations = {
- .llseek = generic_file_llseek,
- .read = generic_read_dir,
- .readdir = proc_tgid_net_readdir,
-};
-
-
struct proc_dir_entry *proc_net_fops_create(struct net *net,
const char *name, mode_t mode, const struct file_operations *fops)
{
@@ -184,28 +211,108 @@ struct proc_dir_entry *proc_net_fops_create(struct net *net,
}
EXPORT_SYMBOL_GPL(proc_net_fops_create);
+struct proc_dir_entry *proc_net_mkdir(struct net *net, const char *name,
+ struct proc_dir_entry *parent)
+{
+ if (!parent)
+ parent = net->proc_net;
+ return proc_mkdir(name, parent);
+}
+EXPORT_SYMBOL_GPL(proc_net_mkdir);
+
void proc_net_remove(struct net *net, const char *name)
{
remove_proc_entry(name, net->proc_net);
}
EXPORT_SYMBOL_GPL(proc_net_remove);
+static int proc_net_fill_super(struct super_block *sb)
+{
+ struct net *net = sb->s_fs_info;
+ struct proc_dir_entry *netd = net->proc_net;
+ struct inode *root_inode = NULL;
+
+ sb->s_flags |= MS_NODIRATIME | MS_NOSUID | MS_NOEXEC;
+ sb->s_blocksize = PAGE_SIZE;
+ sb->s_blocksize_bits = PAGE_SHIFT;
+ sb->s_magic = PROC_NET_SUPER_MAGIC;
+ sb->s_op = &proc_sops;
+ sb->s_time_gran = 1;
+
+ de_get(netd);
+ root_inode = proc_get_inode(sb, netd->low_ino, netd);
+ if (!root_inode)
+ goto out_no_root;
+ root_inode->i_uid = 0;
+ root_inode->i_gid = 0;
+ sb->s_root = d_alloc_root(root_inode);
+ if (!sb->s_root)
+ goto out_no_root;
+ return 0;
+
+out_no_root:
+ printk("%s: get root inode failed\n", __func__);
+ iput(root_inode);
+ de_put(netd);
+ return -ENOMEM;
+}
+
+static int proc_net_test_super(struct super_block *sb, void *data)
+{
+ return sb->s_fs_info == data;
+}
+
+static int proc_net_set_super(struct super_block *sb, void *data)
+{
+ sb->s_fs_info = data;
+ return set_anon_super(sb, NULL);
+}
+
+static int proc_net_get_sb(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data, struct vfsmount *mnt)
+{
+ struct super_block *sb;
+
+ if (!(flags & MS_KERNMOUNT))
+ data = current->nsproxy->net_ns;
+
+ sb = sget(fs_type, proc_net_test_super, proc_net_set_super, data);
+ if (IS_ERR(sb))
+ return PTR_ERR(sb);
+
+ if (!sb->s_root) {
+ int err;
+ sb->s_flags = flags;
+ err = proc_net_fill_super(sb);
+ if (err) {
+ up_write(&sb->s_umount);
+ deactivate_super(sb);
+ return err;
+ }
+
+ sb->s_flags |= MS_ACTIVE;
+ }
+
+ return simple_set_mnt(mnt, sb);
+}
+
+static struct file_system_type proc_net_fs_type = {
+ .name = "proc/net",
+ .get_sb = proc_net_get_sb,
+ .kill_sb = kill_litter_super,
+};
+
static __net_init int proc_net_ns_init(struct net *net)
{
struct proc_dir_entry *netd, *net_statd;
+ struct vfsmount *mnt;
int err;
err = -ENOMEM;
- netd = kzalloc(sizeof(*netd), GFP_KERNEL);
+ netd = proc_create_root();
if (!netd)
goto out;
- netd->data = net;
- netd->nlink = 2;
- netd->name = "net";
- netd->namelen = 3;
- netd->parent = &proc_root;
-
err = -EEXIST;
net_statd = proc_net_mkdir(net, "stat", netd);
if (!net_statd)
@@ -213,8 +320,17 @@ static __net_init int proc_net_ns_init(struct net *net)
net->proc_net = netd;
net->proc_net_stat = net_statd;
+
+ mnt = kern_mount_data(&proc_net_fs_type, net);
+ if (IS_ERR(mnt))
+ goto free_stat;
+
+ net->proc_mnt = mnt;
+
return 0;
+free_stat:
+ remove_proc_entry("stat", netd);
free_net:
kfree(netd);
out:
@@ -224,7 +340,14 @@ out:
static __net_exit void proc_net_ns_exit(struct net *net)
{
remove_proc_entry("stat", net->proc_net);
- kfree(net->proc_net);
+ release_proc_entry(net->proc_net);
+ /*
+ * We won't be looking up this super block any more so set s_fs_info to
+ * NULL to ensure it doesn't conflict with network namespaces allocated
+ * in the future at the same address.
+ */
+ net->proc_mnt->mnt_sb->s_fs_info = NULL;
+ mntput(net->proc_mnt);
}
static struct pernet_operations __net_initdata proc_net_ns_ops = {
@@ -235,6 +358,6 @@ static struct pernet_operations __net_initdata proc_net_ns_ops = {
int __init proc_net_init(void)
{
proc_symlink("net", NULL, "self/net");
-
+ register_filesystem(&proc_net_fs_type);
return register_pernet_subsys(&proc_net_ns_ops);
}
diff --git a/fs/proc/proc_tty.c b/fs/proc/proc_tty.c
index d153946d6d15..4a9e0f65ae60 100644
--- a/fs/proc/proc_tty.c
+++ b/fs/proc/proc_tty.c
@@ -152,7 +152,6 @@ void proc_tty_register_driver(struct tty_driver *driver)
if (!ent)
return;
ent->read_proc = driver->ops->read_proc;
- ent->owner = driver->owner;
ent->data = driver;
driver->proc_entry = ent;
diff --git a/fs/qnx4/Kconfig b/fs/qnx4/Kconfig
new file mode 100644
index 000000000000..be8e0e1445b6
--- /dev/null
+++ b/fs/qnx4/Kconfig
@@ -0,0 +1,25 @@
+config QNX4FS_FS
+ tristate "QNX4 file system support (read only)"
+ depends on BLOCK
+ help
+ This is the file system used by the real-time operating systems
+ QNX 4 and QNX 6 (the latter is also called QNX RTP).
+ Further information is available at <http://www.qnx.com/>.
+ Say Y if you intend to mount QNX hard disks or floppies.
+ Unless you say Y to "QNX4FS read-write support" below, you will
+ only be able to read these file systems.
+
+ To compile this file system support as a module, choose M here: the
+ module will be called qnx4.
+
+ If you don't know whether you need it, then you don't need it:
+ answer N.
+
+config QNX4FS_RW
+ bool "QNX4FS write support (DANGEROUS)"
+ depends on QNX4FS_FS && EXPERIMENTAL && BROKEN
+ help
+ Say Y if you want to test write support for QNX4 file systems.
+
+ It's currently broken, so for now:
+ answer N.
diff --git a/fs/reiserfs/Kconfig b/fs/reiserfs/Kconfig
new file mode 100644
index 000000000000..949b8c6addc8
--- /dev/null
+++ b/fs/reiserfs/Kconfig
@@ -0,0 +1,85 @@
+config REISERFS_FS
+ tristate "Reiserfs support"
+ help
+ Stores not just filenames but the files themselves in a balanced
+ tree. Uses journalling.
+
+ Balanced trees are more efficient than traditional file system
+ architectural foundations.
+
+ In general, ReiserFS is as fast as ext2, but is very efficient with
+ large directories and small files. Additional patches are needed
+ for NFS and quotas, please see <http://www.namesys.com/> for links.
+
+ It is more easily extended to have features currently found in
+ database and keyword search systems than block allocation based file
+ systems are. The next version will be so extended, and will support
+ plugins consistent with our motto ``It takes more than a license to
+ make source code open.''
+
+ Read <http://www.namesys.com/> to learn more about reiserfs.
+
+ Sponsored by Threshold Networks, Emusic.com, and Bigstorage.com.
+
+ If you like it, you can pay us to add new features to it that you
+ need, buy a support contract, or pay us to port it to another OS.
+
+config REISERFS_CHECK
+ bool "Enable reiserfs debug mode"
+ depends on REISERFS_FS
+ help
+ If you set this to Y, then ReiserFS will perform every check it can
+ possibly imagine of its internal consistency throughout its
+ operation. It will also go substantially slower. More than once we
+ have forgotten that this was on, and then gone despondent over the
+ latest benchmarks.:-) Use of this option allows our team to go all
+ out in checking for consistency when debugging without fear of its
+ effect on end users. If you are on the verge of sending in a bug
+ report, say Y and you might get a useful error message. Almost
+ everyone should say N.
+
+config REISERFS_PROC_INFO
+ bool "Stats in /proc/fs/reiserfs"
+ depends on REISERFS_FS && PROC_FS
+ help
+ Create under /proc/fs/reiserfs a hierarchy of files, displaying
+ various ReiserFS statistics and internal data at the expense of
+ making your kernel or module slightly larger (+8 KB). This also
+ increases the amount of kernel memory required for each mount.
+ Almost everyone but ReiserFS developers and people fine-tuning
+ reiserfs or tracing problems should say N.
+
+config REISERFS_FS_XATTR
+ bool "ReiserFS extended attributes"
+ depends on REISERFS_FS
+ help
+ Extended attributes are name:value pairs associated with inodes by
+ the kernel or by users (see the attr(5) manual page, or visit
+ <http://acl.bestbits.at/> for details).
+
+ If unsure, say N.
+
+config REISERFS_FS_POSIX_ACL
+ bool "ReiserFS POSIX Access Control Lists"
+ depends on REISERFS_FS_XATTR
+ select FS_POSIX_ACL
+ help
+ Posix Access Control Lists (ACLs) support permissions for users and
+ groups beyond the owner/group/world scheme.
+
+ To learn more about Access Control Lists, visit the Posix ACLs for
+ Linux website <http://acl.bestbits.at/>.
+
+ If you don't know what Access Control Lists are, say N
+
+config REISERFS_FS_SECURITY
+ bool "ReiserFS Security Labels"
+ depends on REISERFS_FS_XATTR
+ help
+ Security labels support alternative access control models
+ implemented by security modules like SELinux. This option
+ enables an extended attribute handler for file security
+ labels in the ReiserFS filesystem.
+
+ If you are not using a security module that requires using
+ extended attributes for file security labels, say N.
diff --git a/fs/reiserfs/procfs.c b/fs/reiserfs/procfs.c
index 37173fa07d15..d79a913e7bfe 100644
--- a/fs/reiserfs/procfs.c
+++ b/fs/reiserfs/procfs.c
@@ -492,7 +492,6 @@ int reiserfs_proc_info_init(struct super_block *sb)
spin_lock_init(&__PINFO(sb).lock);
REISERFS_SB(sb)->procdir = proc_mkdir(b, proc_info_root);
if (REISERFS_SB(sb)->procdir) {
- REISERFS_SB(sb)->procdir->owner = THIS_MODULE;
REISERFS_SB(sb)->procdir->data = sb;
add_file(sb, "version", show_version);
add_file(sb, "super", show_super);
@@ -556,9 +555,7 @@ int reiserfs_proc_info_global_init(void)
{
if (proc_info_root == NULL) {
proc_info_root = proc_mkdir(proc_info_root_name, NULL);
- if (proc_info_root) {
- proc_info_root->owner = THIS_MODULE;
- } else {
+ if (!proc_info_root) {
reiserfs_warning(NULL,
"reiserfs: cannot create /proc/%s",
proc_info_root_name);
diff --git a/fs/reiserfs/super.c b/fs/reiserfs/super.c
index f3c820b75829..317c0352163c 100644
--- a/fs/reiserfs/super.c
+++ b/fs/reiserfs/super.c
@@ -629,8 +629,6 @@ static const struct super_operations reiserfs_sops = {
#ifdef CONFIG_QUOTA
#define QTYPE2NAME(t) ((t)==USRQUOTA?"user":"group")
-static int reiserfs_dquot_initialize(struct inode *, int);
-static int reiserfs_dquot_drop(struct inode *);
static int reiserfs_write_dquot(struct dquot *);
static int reiserfs_acquire_dquot(struct dquot *);
static int reiserfs_release_dquot(struct dquot *);
@@ -639,8 +637,8 @@ static int reiserfs_write_info(struct super_block *, int);
static int reiserfs_quota_on(struct super_block *, int, int, char *, int);
static struct dquot_operations reiserfs_quota_operations = {
- .initialize = reiserfs_dquot_initialize,
- .drop = reiserfs_dquot_drop,
+ .initialize = dquot_initialize,
+ .drop = dquot_drop,
.alloc_space = dquot_alloc_space,
.alloc_inode = dquot_alloc_inode,
.free_space = dquot_free_space,
@@ -1896,58 +1894,6 @@ static int reiserfs_statfs(struct dentry *dentry, struct kstatfs *buf)
}
#ifdef CONFIG_QUOTA
-static int reiserfs_dquot_initialize(struct inode *inode, int type)
-{
- struct reiserfs_transaction_handle th;
- int ret, err;
-
- /* We may create quota structure so we need to reserve enough blocks */
- reiserfs_write_lock(inode->i_sb);
- ret =
- journal_begin(&th, inode->i_sb,
- 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb));
- if (ret)
- goto out;
- ret = dquot_initialize(inode, type);
- err =
- journal_end(&th, inode->i_sb,
- 2 * REISERFS_QUOTA_INIT_BLOCKS(inode->i_sb));
- if (!ret && err)
- ret = err;
- out:
- reiserfs_write_unlock(inode->i_sb);
- return ret;
-}
-
-static int reiserfs_dquot_drop(struct inode *inode)
-{
- struct reiserfs_transaction_handle th;
- int ret, err;
-
- /* We may delete quota structure so we need to reserve enough blocks */
- reiserfs_write_lock(inode->i_sb);
- ret =
- journal_begin(&th, inode->i_sb,
- 2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
- if (ret) {
- /*
- * We call dquot_drop() anyway to at least release references
- * to quota structures so that umount does not hang.
- */
- dquot_drop(inode);
- goto out;
- }
- ret = dquot_drop(inode);
- err =
- journal_end(&th, inode->i_sb,
- 2 * REISERFS_QUOTA_DEL_BLOCKS(inode->i_sb));
- if (!ret && err)
- ret = err;
- out:
- reiserfs_write_unlock(inode->i_sb);
- return ret;
-}
-
static int reiserfs_write_dquot(struct dquot *dquot)
{
struct reiserfs_transaction_handle th;
diff --git a/fs/romfs/Kconfig b/fs/romfs/Kconfig
new file mode 100644
index 000000000000..1a17020f9faf
--- /dev/null
+++ b/fs/romfs/Kconfig
@@ -0,0 +1,16 @@
+config ROMFS_FS
+ tristate "ROM file system support"
+ depends on BLOCK
+ ---help---
+ This is a very small read-only file system mainly intended for
+ initial ram disks of installation disks, but it could be used for
+ other read-only media as well. Read
+ <file:Documentation/filesystems/romfs.txt> for details.
+
+ To compile this file system support as a module, choose M here: the
+ module will be called romfs. Note that the file system of your
+ root partition (the one containing the directory /) cannot be a
+ module.
+
+ If you don't know whether you need it, then you don't need it:
+ answer N.
diff --git a/fs/smbfs/Kconfig b/fs/smbfs/Kconfig
new file mode 100644
index 000000000000..e668127c8b2e
--- /dev/null
+++ b/fs/smbfs/Kconfig
@@ -0,0 +1,55 @@
+config SMB_FS
+ tristate "SMB file system support (OBSOLETE, please use CIFS)"
+ depends on INET
+ select NLS
+ help
+ SMB (Server Message Block) is the protocol Windows for Workgroups
+ (WfW), Windows 95/98, Windows NT and OS/2 Lan Manager use to share
+ files and printers over local networks. Saying Y here allows you to
+ mount their file systems (often called "shares" in this context) and
+ access them just like any other Unix directory. Currently, this
+ works only if the Windows machines use TCP/IP as the underlying
+ transport protocol, and not NetBEUI. For details, read
+ <file:Documentation/filesystems/smbfs.txt> and the SMB-HOWTO,
+ available from <http://www.tldp.org/docs.html#howto>.
+
+ Note: if you just want your box to act as an SMB *server* and make
+ files and printing services available to Windows clients (which need
+ to have a TCP/IP stack), you don't need to say Y here; you can use
+ the program SAMBA (available from <ftp://ftp.samba.org/pub/samba/>)
+ for that.
+
+ General information about how to connect Linux, Windows machines and
+ Macs is on the WWW at <http://www.eats.com/linux_mac_win.html>.
+
+ To compile the SMB support as a module, choose M here:
+ the module will be called smbfs. Most people say N, however.
+
+config SMB_NLS_DEFAULT
+ bool "Use a default NLS"
+ depends on SMB_FS
+ help
+ Enabling this will make smbfs use nls translations by default. You
+ need to specify the local charset (CONFIG_NLS_DEFAULT) in the nls
+ settings and you need to give the default nls for the SMB server as
+ CONFIG_SMB_NLS_REMOTE.
+
+ The nls settings can be changed at mount time, if your smbmount
+ supports that, using the codepage and iocharset parameters.
+
+ smbmount from samba 2.2.0 or later supports this.
+
+config SMB_NLS_REMOTE
+ string "Default Remote NLS Option"
+ depends on SMB_NLS_DEFAULT
+ default "cp437"
+ help
+ This setting allows you to specify a default value for which
+ codepage the server uses. If this field is left blank no
+ translations will be done by default. The local codepage/charset
+ default to CONFIG_NLS_DEFAULT.
+
+ The nls settings can be changed at mount time, if your smbmount
+ supports that, using the codepage and iocharset parameters.
+
+ smbmount from samba 2.2.0 or later supports this.
diff --git a/fs/splice.c b/fs/splice.c
index 4ed0ba44a966..dd727d43e5b7 100644
--- a/fs/splice.c
+++ b/fs/splice.c
@@ -59,7 +59,8 @@ static int page_cache_pipe_buf_steal(struct pipe_inode_info *pipe,
*/
wait_on_page_writeback(page);
- if (PagePrivate(page) && !try_to_release_page(page, GFP_KERNEL))
+ if (page_has_private(page) &&
+ !try_to_release_page(page, GFP_KERNEL))
goto out_unlock;
/*
diff --git a/fs/squashfs/Kconfig b/fs/squashfs/Kconfig
new file mode 100644
index 000000000000..25a00d19d686
--- /dev/null
+++ b/fs/squashfs/Kconfig
@@ -0,0 +1,51 @@
+config SQUASHFS
+ tristate "SquashFS 4.0 - Squashed file system support"
+ depends on BLOCK
+ select ZLIB_INFLATE
+ help
+ Saying Y here includes support for SquashFS 4.0 (a Compressed
+ Read-Only File System). Squashfs is a highly compressed read-only
+ filesystem for Linux. It uses zlib compression to compress both
+ files, inodes and directories. Inodes in the system are very small
+ and all blocks are packed to minimise data overhead. Block sizes
+ greater than 4K are supported up to a maximum of 1 Mbytes (default
+ block size 128K). SquashFS 4.0 supports 64 bit filesystems and files
+ (larger than 4GB), full uid/gid information, hard links and
+ timestamps.
+
+ Squashfs is intended for general read-only filesystem use, for
+ archival use (i.e. in cases where a .tar.gz file may be used), and in
+ embedded systems where low overhead is needed. Further information
+ and tools are available from http://squashfs.sourceforge.net.
+
+ If you want to compile this as a module ( = code which can be
+ inserted in and removed from the running kernel whenever you want),
+ say M here and read <file:Documentation/modules.txt>. The module
+ will be called squashfs. Note that the root file system (the one
+ containing the directory /) cannot be compiled as a module.
+
+ If unsure, say N.
+
+config SQUASHFS_EMBEDDED
+
+ bool "Additional option for memory-constrained systems"
+ depends on SQUASHFS
+ default n
+ help
+ Saying Y here allows you to specify cache size.
+
+ If unsure, say N.
+
+config SQUASHFS_FRAGMENT_CACHE_SIZE
+ int "Number of fragments cached" if SQUASHFS_EMBEDDED
+ depends on SQUASHFS
+ default "3"
+ help
+ By default SquashFS caches the last 3 fragments read from
+ the filesystem. Increasing this amount may mean SquashFS
+ has to re-read fragments less often from disk, at the expense
+ of extra system memory. Decreasing this amount will mean
+ SquashFS uses less memory at the expense of extra reads from disk.
+
+ Note there must be at least one cached fragment. Anything
+ much more than three will probably not make much difference.
diff --git a/fs/super.c b/fs/super.c
index 645e5403f2a0..7352fe82e7d2 100644
--- a/fs/super.c
+++ b/fs/super.c
@@ -272,6 +272,7 @@ int fsync_super(struct super_block *sb)
__fsync_super(sb);
return sync_blockdev(sb->s_bdev);
}
+EXPORT_SYMBOL_GPL(fsync_super);
/**
* generic_shutdown_super - common helper for ->kill_sb()
diff --git a/fs/sysfs/Kconfig b/fs/sysfs/Kconfig
new file mode 100644
index 000000000000..f4b67588b9d6
--- /dev/null
+++ b/fs/sysfs/Kconfig
@@ -0,0 +1,23 @@
+config SYSFS
+ bool "sysfs file system support" if EMBEDDED
+ default y
+ help
+ The sysfs filesystem is a virtual filesystem that the kernel uses to
+ export internal kernel objects, their attributes, and their
+ relationships to one another.
+
+ Users can use sysfs to ascertain useful information about the running
+ kernel, such as the devices the kernel has discovered on each bus and
+ which driver each is bound to. sysfs can also be used to tune devices
+ and other kernel subsystems.
+
+ Some system agents rely on the information in sysfs to operate.
+ /sbin/hotplug uses device and object attributes in sysfs to assist in
+ delegating policy decisions, like persistently naming devices.
+
+ sysfs is currently used by the block subsystem to mount the root
+ partition. If sysfs is disabled you must specify the boot device on
+ the kernel boot command line via its major and minor numbers. For
+ example, "root=03:01" for /dev/hda1.
+
+ Designers of embedded systems may wish to say N here to conserve space.
diff --git a/fs/sysfs/bin.c b/fs/sysfs/bin.c
index 66f6e58a7e4b..f2c478c3424e 100644
--- a/fs/sysfs/bin.c
+++ b/fs/sysfs/bin.c
@@ -63,6 +63,9 @@ read(struct file *file, char __user *userbuf, size_t bytes, loff_t *off)
int count = min_t(size_t, bytes, PAGE_SIZE);
char *temp;
+ if (!bytes)
+ return 0;
+
if (size) {
if (offs > size)
return 0;
@@ -131,6 +134,9 @@ static ssize_t write(struct file *file, const char __user *userbuf,
int count = min_t(size_t, bytes, PAGE_SIZE);
char *temp;
+ if (!bytes)
+ return 0;
+
if (size) {
if (offs > size)
return 0;
diff --git a/fs/sysfs/mount.c b/fs/sysfs/mount.c
index ab343e371d64..84ef378673a8 100644
--- a/fs/sysfs/mount.c
+++ b/fs/sysfs/mount.c
@@ -17,11 +17,10 @@
#include <linux/pagemap.h>
#include <linux/init.h>
#include <linux/module.h>
+#include <linux/magic.h>
#include "sysfs.h"
-/* Random magic number */
-#define SYSFS_MAGIC 0x62656572
static struct vfsmount *sysfs_mount;
struct super_block * sysfs_sb = NULL;
@@ -53,7 +52,9 @@ static int sysfs_fill_super(struct super_block *sb, void *data, int silent)
sysfs_sb = sb;
/* get root inode, initialize and unlock it */
+ mutex_lock(&sysfs_mutex);
inode = sysfs_get_inode(&sysfs_root);
+ mutex_unlock(&sysfs_mutex);
if (!inode) {
pr_debug("sysfs: could not get root inode\n");
return -ENOMEM;
diff --git a/fs/sysv/Kconfig b/fs/sysv/Kconfig
new file mode 100644
index 000000000000..33aeb4b75db1
--- /dev/null
+++ b/fs/sysv/Kconfig
@@ -0,0 +1,36 @@
+config SYSV_FS
+ tristate "System V/Xenix/V7/Coherent file system support"
+ depends on BLOCK
+ help
+ SCO, Xenix and Coherent are commercial Unix systems for Intel
+ machines, and Version 7 was used on the DEC PDP-11. Saying Y
+ here would allow you to read from their floppies and hard disk
+ partitions.
+
+ If you have floppies or hard disk partitions like that, it is likely
+ that they contain binaries from those other Unix systems; in order
+ to run these binaries, you will want to install linux-abi which is
+ a set of kernel modules that lets you run SCO, Xenix, Wyse,
+ UnixWare, Dell Unix and System V programs under Linux. It is
+ available via FTP (user: ftp) from
+ <ftp://ftp.openlinux.org/pub/people/hch/linux-abi/>).
+ NOTE: that will work only for binaries from Intel-based systems;
+ PDP ones will have to wait until somebody ports Linux to -11 ;-)
+
+ If you only intend to mount files from some other Unix over the
+ network using NFS, you don't need the System V file system support
+ (but you need NFS file system support obviously).
+
+ Note that this option is generally not needed for floppies, since a
+ good portable way to transport files and directories between unixes
+ (and even other operating systems) is given by the tar program ("man
+ tar" or preferably "info tar"). Note also that this option has
+ nothing whatsoever to do with the option "System V IPC". Read about
+ the System V file system in
+ <file:Documentation/filesystems/sysv-fs.txt>.
+ Saying Y here will enlarge your kernel by about 27 KB.
+
+ To compile this as a module, choose M here: the module will be called
+ sysv.
+
+ If you haven't heard about all of this before, it's safe to say N.
diff --git a/fs/ubifs/budget.c b/fs/ubifs/budget.c
index 175f9c590b77..f393620890ee 100644
--- a/fs/ubifs/budget.c
+++ b/fs/ubifs/budget.c
@@ -689,7 +689,7 @@ long long ubifs_reported_space(const struct ubifs_info *c, long long free)
}
/**
- * ubifs_get_free_space - return amount of free space.
+ * ubifs_get_free_space_nolock - return amount of free space.
* @c: UBIFS file-system description object
*
* This function calculates amount of free space to report to user-space.
@@ -704,16 +704,14 @@ long long ubifs_reported_space(const struct ubifs_info *c, long long free)
* traditional file-systems, because they have way less overhead than UBIFS.
* So, to keep users happy, UBIFS tries to take the overhead into account.
*/
-long long ubifs_get_free_space(struct ubifs_info *c)
+long long ubifs_get_free_space_nolock(struct ubifs_info *c)
{
- int min_idx_lebs, rsvd_idx_lebs, lebs;
+ int rsvd_idx_lebs, lebs;
long long available, outstanding, free;
- spin_lock(&c->space_lock);
- min_idx_lebs = c->min_idx_lebs;
- ubifs_assert(min_idx_lebs == ubifs_calc_min_idx_lebs(c));
+ ubifs_assert(c->min_idx_lebs == ubifs_calc_min_idx_lebs(c));
outstanding = c->budg_data_growth + c->budg_dd_growth;
- available = ubifs_calc_available(c, min_idx_lebs);
+ available = ubifs_calc_available(c, c->min_idx_lebs);
/*
* When reporting free space to user-space, UBIFS guarantees that it is
@@ -726,15 +724,14 @@ long long ubifs_get_free_space(struct ubifs_info *c)
* Note, the calculations below are similar to what we have in
* 'do_budget_space()', so refer there for comments.
*/
- if (min_idx_lebs > c->lst.idx_lebs)
- rsvd_idx_lebs = min_idx_lebs - c->lst.idx_lebs;
+ if (c->min_idx_lebs > c->lst.idx_lebs)
+ rsvd_idx_lebs = c->min_idx_lebs - c->lst.idx_lebs;
else
rsvd_idx_lebs = 0;
lebs = c->lst.empty_lebs + c->freeable_cnt + c->idx_gc_cnt -
c->lst.taken_empty_lebs;
lebs -= rsvd_idx_lebs;
available += lebs * (c->dark_wm - c->leb_overhead);
- spin_unlock(&c->space_lock);
if (available > outstanding)
free = ubifs_reported_space(c, available - outstanding);
@@ -742,3 +739,21 @@ long long ubifs_get_free_space(struct ubifs_info *c)
free = 0;
return free;
}
+
+/**
+ * ubifs_get_free_space - return amount of free space.
+ * @c: UBIFS file-system description object
+ *
+ * This function calculates and retuns amount of free space to report to
+ * user-space.
+ */
+long long ubifs_get_free_space(struct ubifs_info *c)
+{
+ long long free;
+
+ spin_lock(&c->space_lock);
+ free = ubifs_get_free_space_nolock(c);
+ spin_unlock(&c->space_lock);
+
+ return free;
+}
diff --git a/fs/ubifs/debug.c b/fs/ubifs/debug.c
index 792c5a16c182..e975bd82f38b 100644
--- a/fs/ubifs/debug.c
+++ b/fs/ubifs/debug.c
@@ -620,9 +620,11 @@ void dbg_dump_budg(struct ubifs_info *c)
c->dark_wm, c->dead_wm, c->max_idx_node_sz);
printk(KERN_DEBUG "\tgc_lnum %d, ihead_lnum %d\n",
c->gc_lnum, c->ihead_lnum);
- for (i = 0; i < c->jhead_cnt; i++)
- printk(KERN_DEBUG "\tjhead %d\t LEB %d\n",
- c->jheads[i].wbuf.jhead, c->jheads[i].wbuf.lnum);
+ /* If we are in R/O mode, journal heads do not exist */
+ if (c->jheads)
+ for (i = 0; i < c->jhead_cnt; i++)
+ printk(KERN_DEBUG "\tjhead %d\t LEB %d\n",
+ c->jheads[i].wbuf.jhead, c->jheads[i].wbuf.lnum);
for (rb = rb_first(&c->buds); rb; rb = rb_next(rb)) {
bud = rb_entry(rb, struct ubifs_bud, rb);
printk(KERN_DEBUG "\tbud LEB %d\n", bud->lnum);
@@ -637,10 +639,7 @@ void dbg_dump_budg(struct ubifs_info *c)
/* Print budgeting predictions */
available = ubifs_calc_available(c, c->min_idx_lebs);
outstanding = c->budg_data_growth + c->budg_dd_growth;
- if (available > outstanding)
- free = ubifs_reported_space(c, available - outstanding);
- else
- free = 0;
+ free = ubifs_get_free_space_nolock(c);
printk(KERN_DEBUG "Budgeting predictions:\n");
printk(KERN_DEBUG "\tavailable: %lld, outstanding %lld, free %lld\n",
available, outstanding, free);
@@ -861,6 +860,65 @@ void dbg_dump_index(struct ubifs_info *c)
}
/**
+ * dbg_save_space_info - save information about flash space.
+ * @c: UBIFS file-system description object
+ *
+ * This function saves information about UBIFS free space, dirty space, etc, in
+ * order to check it later.
+ */
+void dbg_save_space_info(struct ubifs_info *c)
+{
+ struct ubifs_debug_info *d = c->dbg;
+
+ ubifs_get_lp_stats(c, &d->saved_lst);
+
+ spin_lock(&c->space_lock);
+ d->saved_free = ubifs_get_free_space_nolock(c);
+ spin_unlock(&c->space_lock);
+}
+
+/**
+ * dbg_check_space_info - check flash space information.
+ * @c: UBIFS file-system description object
+ *
+ * This function compares current flash space information with the information
+ * which was saved when the 'dbg_save_space_info()' function was called.
+ * Returns zero if the information has not changed, and %-EINVAL it it has
+ * changed.
+ */
+int dbg_check_space_info(struct ubifs_info *c)
+{
+ struct ubifs_debug_info *d = c->dbg;
+ struct ubifs_lp_stats lst;
+ long long avail, free;
+
+ spin_lock(&c->space_lock);
+ avail = ubifs_calc_available(c, c->min_idx_lebs);
+ spin_unlock(&c->space_lock);
+ free = ubifs_get_free_space(c);
+
+ if (free != d->saved_free) {
+ ubifs_err("free space changed from %lld to %lld",
+ d->saved_free, free);
+ goto out;
+ }
+
+ return 0;
+
+out:
+ ubifs_msg("saved lprops statistics dump");
+ dbg_dump_lstats(&d->saved_lst);
+ ubifs_get_lp_stats(c, &lst);
+ ubifs_msg("current lprops statistics dump");
+ dbg_dump_lstats(&d->saved_lst);
+ spin_lock(&c->space_lock);
+ dbg_dump_budg(c);
+ spin_unlock(&c->space_lock);
+ dump_stack();
+ return -EINVAL;
+}
+
+/**
* dbg_check_synced_i_size - check synchronized inode size.
* @inode: inode to check
*
@@ -1349,7 +1407,7 @@ int dbg_check_tnc(struct ubifs_info *c, int extra)
* @c: UBIFS file-system description object
* @leaf_cb: called for each leaf node
* @znode_cb: called for each indexing node
- * @priv: private date which is passed to callbacks
+ * @priv: private data which is passed to callbacks
*
* This function walks the UBIFS index and calls the @leaf_cb for each leaf
* node and @znode_cb for each indexing node. Returns zero in case of success
@@ -2409,7 +2467,7 @@ void ubifs_debugging_exit(struct ubifs_info *c)
* Root directory for UBIFS stuff in debugfs. Contains sub-directories which
* contain the stuff specific to particular file-system mounts.
*/
-static struct dentry *debugfs_rootdir;
+static struct dentry *dfs_rootdir;
/**
* dbg_debugfs_init - initialize debugfs file-system.
@@ -2421,9 +2479,9 @@ static struct dentry *debugfs_rootdir;
*/
int dbg_debugfs_init(void)
{
- debugfs_rootdir = debugfs_create_dir("ubifs", NULL);
- if (IS_ERR(debugfs_rootdir)) {
- int err = PTR_ERR(debugfs_rootdir);
+ dfs_rootdir = debugfs_create_dir("ubifs", NULL);
+ if (IS_ERR(dfs_rootdir)) {
+ int err = PTR_ERR(dfs_rootdir);
ubifs_err("cannot create \"ubifs\" debugfs directory, "
"error %d\n", err);
return err;
@@ -2437,7 +2495,7 @@ int dbg_debugfs_init(void)
*/
void dbg_debugfs_exit(void)
{
- debugfs_remove(debugfs_rootdir);
+ debugfs_remove(dfs_rootdir);
}
static int open_debugfs_file(struct inode *inode, struct file *file)
@@ -2452,13 +2510,13 @@ static ssize_t write_debugfs_file(struct file *file, const char __user *buf,
struct ubifs_info *c = file->private_data;
struct ubifs_debug_info *d = c->dbg;
- if (file->f_path.dentry == d->dump_lprops)
+ if (file->f_path.dentry == d->dfs_dump_lprops)
dbg_dump_lprops(c);
- else if (file->f_path.dentry == d->dump_budg) {
+ else if (file->f_path.dentry == d->dfs_dump_budg) {
spin_lock(&c->space_lock);
dbg_dump_budg(c);
spin_unlock(&c->space_lock);
- } else if (file->f_path.dentry == d->dump_tnc) {
+ } else if (file->f_path.dentry == d->dfs_dump_tnc) {
mutex_lock(&c->tnc_mutex);
dbg_dump_tnc(c);
mutex_unlock(&c->tnc_mutex);
@@ -2469,7 +2527,7 @@ static ssize_t write_debugfs_file(struct file *file, const char __user *buf,
return count;
}
-static const struct file_operations debugfs_fops = {
+static const struct file_operations dfs_fops = {
.open = open_debugfs_file,
.write = write_debugfs_file,
.owner = THIS_MODULE,
@@ -2494,36 +2552,32 @@ int dbg_debugfs_init_fs(struct ubifs_info *c)
struct dentry *dent;
struct ubifs_debug_info *d = c->dbg;
- sprintf(d->debugfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
- d->debugfs_dir = debugfs_create_dir(d->debugfs_dir_name,
- debugfs_rootdir);
- if (IS_ERR(d->debugfs_dir)) {
- err = PTR_ERR(d->debugfs_dir);
+ sprintf(d->dfs_dir_name, "ubi%d_%d", c->vi.ubi_num, c->vi.vol_id);
+ d->dfs_dir = debugfs_create_dir(d->dfs_dir_name, dfs_rootdir);
+ if (IS_ERR(d->dfs_dir)) {
+ err = PTR_ERR(d->dfs_dir);
ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
- d->debugfs_dir_name, err);
+ d->dfs_dir_name, err);
goto out;
}
fname = "dump_lprops";
- dent = debugfs_create_file(fname, S_IWUGO, d->debugfs_dir, c,
- &debugfs_fops);
+ dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops);
if (IS_ERR(dent))
goto out_remove;
- d->dump_lprops = dent;
+ d->dfs_dump_lprops = dent;
fname = "dump_budg";
- dent = debugfs_create_file(fname, S_IWUGO, d->debugfs_dir, c,
- &debugfs_fops);
+ dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops);
if (IS_ERR(dent))
goto out_remove;
- d->dump_budg = dent;
+ d->dfs_dump_budg = dent;
fname = "dump_tnc";
- dent = debugfs_create_file(fname, S_IWUGO, d->debugfs_dir, c,
- &debugfs_fops);
+ dent = debugfs_create_file(fname, S_IWUGO, d->dfs_dir, c, &dfs_fops);
if (IS_ERR(dent))
goto out_remove;
- d->dump_tnc = dent;
+ d->dfs_dump_tnc = dent;
return 0;
@@ -2531,7 +2585,7 @@ out_remove:
err = PTR_ERR(dent);
ubifs_err("cannot create \"%s\" debugfs directory, error %d\n",
fname, err);
- debugfs_remove_recursive(d->debugfs_dir);
+ debugfs_remove_recursive(d->dfs_dir);
out:
return err;
}
@@ -2542,7 +2596,7 @@ out:
*/
void dbg_debugfs_exit_fs(struct ubifs_info *c)
{
- debugfs_remove_recursive(c->dbg->debugfs_dir);
+ debugfs_remove_recursive(c->dbg->dfs_dir);
}
#endif /* CONFIG_UBIFS_FS_DEBUG */
diff --git a/fs/ubifs/debug.h b/fs/ubifs/debug.h
index 9820d6999f7e..c1cd73b2e06e 100644
--- a/fs/ubifs/debug.h
+++ b/fs/ubifs/debug.h
@@ -41,15 +41,17 @@
* @chk_lpt_wastage: used by LPT tree size checker
* @chk_lpt_lebs: used by LPT tree size checker
* @new_nhead_offs: used by LPT tree size checker
- * @new_ihead_lnum: used by debugging to check ihead_lnum
- * @new_ihead_offs: used by debugging to check ihead_offs
+ * @new_ihead_lnum: used by debugging to check @c->ihead_lnum
+ * @new_ihead_offs: used by debugging to check @c->ihead_offs
*
- * debugfs_dir_name: name of debugfs directory containing this file-system's
- * files
- * debugfs_dir: direntry object of the file-system debugfs directory
- * dump_lprops: "dump lprops" debugfs knob
- * dump_budg: "dump budgeting information" debugfs knob
- * dump_tnc: "dump TNC" debugfs knob
+ * @saved_lst: saved lprops statistics (used by 'dbg_save_space_info()')
+ * @saved_free: saved free space (used by 'dbg_save_space_info()')
+ *
+ * dfs_dir_name: name of debugfs directory containing this file-system's files
+ * dfs_dir: direntry object of the file-system debugfs directory
+ * dfs_dump_lprops: "dump lprops" debugfs knob
+ * dfs_dump_budg: "dump budgeting information" debugfs knob
+ * dfs_dump_tnc: "dump TNC" debugfs knob
*/
struct ubifs_debug_info {
void *buf;
@@ -69,11 +71,14 @@ struct ubifs_debug_info {
int new_ihead_lnum;
int new_ihead_offs;
- char debugfs_dir_name[100];
- struct dentry *debugfs_dir;
- struct dentry *dump_lprops;
- struct dentry *dump_budg;
- struct dentry *dump_tnc;
+ struct ubifs_lp_stats saved_lst;
+ long long saved_free;
+
+ char dfs_dir_name[100];
+ struct dentry *dfs_dir;
+ struct dentry *dfs_dump_lprops;
+ struct dentry *dfs_dump_budg;
+ struct dentry *dfs_dump_tnc;
};
#define ubifs_assert(expr) do { \
@@ -297,7 +302,8 @@ int dbg_walk_index(struct ubifs_info *c, dbg_leaf_callback leaf_cb,
dbg_znode_callback znode_cb, void *priv);
/* Checking functions */
-
+void dbg_save_space_info(struct ubifs_info *c);
+int dbg_check_space_info(struct ubifs_info *c);
int dbg_check_lprops(struct ubifs_info *c);
int dbg_old_index_check_init(struct ubifs_info *c, struct ubifs_zbranch *zroot);
int dbg_check_old_index(struct ubifs_info *c, struct ubifs_zbranch *zroot);
@@ -439,6 +445,8 @@ void dbg_debugfs_exit_fs(struct ubifs_info *c);
#define dbg_walk_index(c, leaf_cb, znode_cb, priv) 0
#define dbg_old_index_check_init(c, zroot) 0
+#define dbg_save_space_info(c) ({})
+#define dbg_check_space_info(c) 0
#define dbg_check_old_index(c, zroot) 0
#define dbg_check_cats(c) 0
#define dbg_check_ltab(c) 0
diff --git a/fs/ubifs/dir.c b/fs/ubifs/dir.c
index f448ab1f9c38..f55d523c52bb 100644
--- a/fs/ubifs/dir.c
+++ b/fs/ubifs/dir.c
@@ -482,30 +482,29 @@ static int ubifs_dir_release(struct inode *dir, struct file *file)
}
/**
- * lock_2_inodes - lock two UBIFS inodes.
+ * lock_2_inodes - a wrapper for locking two UBIFS inodes.
* @inode1: first inode
* @inode2: second inode
+ *
+ * We do not implement any tricks to guarantee strict lock ordering, because
+ * VFS has already done it for us on the @i_mutex. So this is just a simple
+ * wrapper function.
*/
static void lock_2_inodes(struct inode *inode1, struct inode *inode2)
{
- if (inode1->i_ino < inode2->i_ino) {
- mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_2);
- mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_3);
- } else {
- mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2);
- mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_3);
- }
+ mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1);
+ mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2);
}
/**
- * unlock_2_inodes - unlock two UBIFS inodes inodes.
+ * unlock_2_inodes - a wrapper for unlocking two UBIFS inodes.
* @inode1: first inode
* @inode2: second inode
*/
static void unlock_2_inodes(struct inode *inode1, struct inode *inode2)
{
- mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
mutex_unlock(&ubifs_inode(inode2)->ui_mutex);
+ mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
}
static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
@@ -527,6 +526,8 @@ static int ubifs_link(struct dentry *old_dentry, struct inode *dir,
dbg_gen("dent '%.*s' to ino %lu (nlink %d) in dir ino %lu",
dentry->d_name.len, dentry->d_name.name, inode->i_ino,
inode->i_nlink, dir->i_ino);
+ ubifs_assert(mutex_is_locked(&dir->i_mutex));
+ ubifs_assert(mutex_is_locked(&inode->i_mutex));
err = dbg_check_synced_i_size(inode);
if (err)
return err;
@@ -580,6 +581,8 @@ static int ubifs_unlink(struct inode *dir, struct dentry *dentry)
dbg_gen("dent '%.*s' from ino %lu (nlink %d) in dir ino %lu",
dentry->d_name.len, dentry->d_name.name, inode->i_ino,
inode->i_nlink, dir->i_ino);
+ ubifs_assert(mutex_is_locked(&dir->i_mutex));
+ ubifs_assert(mutex_is_locked(&inode->i_mutex));
err = dbg_check_synced_i_size(inode);
if (err)
return err;
@@ -667,7 +670,8 @@ static int ubifs_rmdir(struct inode *dir, struct dentry *dentry)
dbg_gen("directory '%.*s', ino %lu in dir ino %lu", dentry->d_name.len,
dentry->d_name.name, inode->i_ino, dir->i_ino);
-
+ ubifs_assert(mutex_is_locked(&dir->i_mutex));
+ ubifs_assert(mutex_is_locked(&inode->i_mutex));
err = check_dir_empty(c, dentry->d_inode);
if (err)
return err;
@@ -922,59 +926,30 @@ out_budg:
}
/**
- * lock_3_inodes - lock three UBIFS inodes for rename.
+ * lock_3_inodes - a wrapper for locking three UBIFS inodes.
* @inode1: first inode
* @inode2: second inode
* @inode3: third inode
*
- * For 'ubifs_rename()', @inode1 may be the same as @inode2 whereas @inode3 may
- * be null.
+ * This function is used for 'ubifs_rename()' and @inode1 may be the same as
+ * @inode2 whereas @inode3 may be %NULL.
+ *
+ * We do not implement any tricks to guarantee strict lock ordering, because
+ * VFS has already done it for us on the @i_mutex. So this is just a simple
+ * wrapper function.
*/
static void lock_3_inodes(struct inode *inode1, struct inode *inode2,
struct inode *inode3)
{
- struct inode *i1, *i2, *i3;
-
- if (!inode3) {
- if (inode1 != inode2) {
- lock_2_inodes(inode1, inode2);
- return;
- }
- mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1);
- return;
- }
-
- if (inode1 == inode2) {
- lock_2_inodes(inode1, inode3);
- return;
- }
-
- /* 3 different inodes */
- if (inode1 < inode2) {
- i3 = inode2;
- if (inode1 < inode3) {
- i1 = inode1;
- i2 = inode3;
- } else {
- i1 = inode3;
- i2 = inode1;
- }
- } else {
- i3 = inode1;
- if (inode2 < inode3) {
- i1 = inode2;
- i2 = inode3;
- } else {
- i1 = inode3;
- i2 = inode2;
- }
- }
- mutex_lock_nested(&ubifs_inode(i1)->ui_mutex, WB_MUTEX_1);
- lock_2_inodes(i2, i3);
+ mutex_lock_nested(&ubifs_inode(inode1)->ui_mutex, WB_MUTEX_1);
+ if (inode2 != inode1)
+ mutex_lock_nested(&ubifs_inode(inode2)->ui_mutex, WB_MUTEX_2);
+ if (inode3)
+ mutex_lock_nested(&ubifs_inode(inode3)->ui_mutex, WB_MUTEX_3);
}
/**
- * unlock_3_inodes - unlock three UBIFS inodes for rename.
+ * unlock_3_inodes - a wrapper for unlocking three UBIFS inodes for rename.
* @inode1: first inode
* @inode2: second inode
* @inode3: third inode
@@ -982,11 +957,11 @@ static void lock_3_inodes(struct inode *inode1, struct inode *inode2,
static void unlock_3_inodes(struct inode *inode1, struct inode *inode2,
struct inode *inode3)
{
- mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
- if (inode1 != inode2)
- mutex_unlock(&ubifs_inode(inode2)->ui_mutex);
if (inode3)
mutex_unlock(&ubifs_inode(inode3)->ui_mutex);
+ if (inode1 != inode2)
+ mutex_unlock(&ubifs_inode(inode2)->ui_mutex);
+ mutex_unlock(&ubifs_inode(inode1)->ui_mutex);
}
static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
@@ -1020,6 +995,11 @@ static int ubifs_rename(struct inode *old_dir, struct dentry *old_dentry,
"dir ino %lu", old_dentry->d_name.len, old_dentry->d_name.name,
old_inode->i_ino, old_dir->i_ino, new_dentry->d_name.len,
new_dentry->d_name.name, new_dir->i_ino);
+ ubifs_assert(mutex_is_locked(&old_dir->i_mutex));
+ ubifs_assert(mutex_is_locked(&new_dir->i_mutex));
+ if (unlink)
+ ubifs_assert(mutex_is_locked(&new_inode->i_mutex));
+
if (unlink && is_dir) {
err = check_dir_empty(c, new_inode);
@@ -1199,7 +1179,7 @@ int ubifs_getattr(struct vfsmount *mnt, struct dentry *dentry,
return 0;
}
-struct inode_operations ubifs_dir_inode_operations = {
+const struct inode_operations ubifs_dir_inode_operations = {
.lookup = ubifs_lookup,
.create = ubifs_create,
.link = ubifs_link,
@@ -1219,7 +1199,7 @@ struct inode_operations ubifs_dir_inode_operations = {
#endif
};
-struct file_operations ubifs_dir_operations = {
+const struct file_operations ubifs_dir_operations = {
.llseek = ubifs_dir_llseek,
.release = ubifs_dir_release,
.read = generic_read_dir,
diff --git a/fs/ubifs/file.c b/fs/ubifs/file.c
index bf37374567fa..93b6de51f261 100644
--- a/fs/ubifs/file.c
+++ b/fs/ubifs/file.c
@@ -432,7 +432,6 @@ static int ubifs_write_begin(struct file *file, struct address_space *mapping,
int uninitialized_var(err), appending = !!(pos + len > inode->i_size);
struct page *page;
-
ubifs_assert(ubifs_inode(inode)->ui_size == inode->i_size);
if (unlikely(c->ro_media))
@@ -1541,7 +1540,7 @@ static int ubifs_file_mmap(struct file *file, struct vm_area_struct *vma)
return 0;
}
-struct address_space_operations ubifs_file_address_operations = {
+const struct address_space_operations ubifs_file_address_operations = {
.readpage = ubifs_readpage,
.writepage = ubifs_writepage,
.write_begin = ubifs_write_begin,
@@ -1551,7 +1550,7 @@ struct address_space_operations ubifs_file_address_operations = {
.releasepage = ubifs_releasepage,
};
-struct inode_operations ubifs_file_inode_operations = {
+const struct inode_operations ubifs_file_inode_operations = {
.setattr = ubifs_setattr,
.getattr = ubifs_getattr,
#ifdef CONFIG_UBIFS_FS_XATTR
@@ -1562,14 +1561,14 @@ struct inode_operations ubifs_file_inode_operations = {
#endif
};
-struct inode_operations ubifs_symlink_inode_operations = {
+const struct inode_operations ubifs_symlink_inode_operations = {
.readlink = generic_readlink,
.follow_link = ubifs_follow_link,
.setattr = ubifs_setattr,
.getattr = ubifs_getattr,
};
-struct file_operations ubifs_file_operations = {
+const struct file_operations ubifs_file_operations = {
.llseek = generic_file_llseek,
.read = do_sync_read,
.write = do_sync_write,
diff --git a/fs/ubifs/gc.c b/fs/ubifs/gc.c
index 9832f9abe28e..a711d33b3d3e 100644
--- a/fs/ubifs/gc.c
+++ b/fs/ubifs/gc.c
@@ -31,6 +31,26 @@
* to be reused. Garbage collection will cause the number of dirty index nodes
* to grow, however sufficient space is reserved for the index to ensure the
* commit will never run out of space.
+ *
+ * Notes about dead watermark. At current UBIFS implementation we assume that
+ * LEBs which have less than @c->dead_wm bytes of free + dirty space are full
+ * and not worth garbage-collecting. The dead watermark is one min. I/O unit
+ * size, or min. UBIFS node size, depending on what is greater. Indeed, UBIFS
+ * Garbage Collector has to synchronize the GC head's write buffer before
+ * returning, so this is about wasting one min. I/O unit. However, UBIFS GC can
+ * actually reclaim even very small pieces of dirty space by garbage collecting
+ * enough dirty LEBs, but we do not bother doing this at this implementation.
+ *
+ * Notes about dark watermark. The results of GC work depends on how big are
+ * the UBIFS nodes GC deals with. Large nodes make GC waste more space. Indeed,
+ * if GC move data from LEB A to LEB B and nodes in LEB A are large, GC would
+ * have to waste large pieces of free space at the end of LEB B, because nodes
+ * from LEB A would not fit. And the worst situation is when all nodes are of
+ * maximum size. So dark watermark is the amount of free + dirty space in LEB
+ * which are guaranteed to be reclaimable. If LEB has less space, the GC migh
+ * be unable to reclaim it. So, LEBs with free + dirty greater than dark
+ * watermark are "good" LEBs from GC's point of few. The other LEBs are not so
+ * good, and GC takes extra care when moving them.
*/
#include <linux/pagemap.h>
@@ -381,7 +401,7 @@ int ubifs_garbage_collect_leb(struct ubifs_info *c, struct ubifs_lprops *lp)
/*
* Don't release the LEB until after the next commit, because
- * it may contain date which is needed for recovery. So
+ * it may contain data which is needed for recovery. So
* although we freed this LEB, it will become usable only after
* the commit.
*/
@@ -810,8 +830,9 @@ out:
* ubifs_destroy_idx_gc - destroy idx_gc list.
* @c: UBIFS file-system description object
*
- * This function destroys the idx_gc list. It is called when unmounting or
- * remounting read-only so locks are not needed.
+ * This function destroys the @c->idx_gc list. It is called when unmounting
+ * so locks are not needed. Returns zero in case of success and a negative
+ * error code in case of failure.
*/
void ubifs_destroy_idx_gc(struct ubifs_info *c)
{
@@ -824,7 +845,6 @@ void ubifs_destroy_idx_gc(struct ubifs_info *c)
list_del(&idx_gc->list);
kfree(idx_gc);
}
-
}
/**
diff --git a/fs/ubifs/io.c b/fs/ubifs/io.c
index 01682713af69..e8e632a1dcdf 100644
--- a/fs/ubifs/io.c
+++ b/fs/ubifs/io.c
@@ -29,7 +29,7 @@
* would have been wasted for padding to the nearest minimal I/O unit boundary.
* Instead, data first goes to the write-buffer and is flushed when the
* buffer is full or when it is not used for some time (by timer). This is
- * similarto the mechanism is used by JFFS2.
+ * similar to the mechanism is used by JFFS2.
*
* Write-buffers are defined by 'struct ubifs_wbuf' objects and protected by
* mutexes defined inside these objects. Since sometimes upper-level code
@@ -75,7 +75,7 @@ void ubifs_ro_mode(struct ubifs_info *c, int err)
* @lnum: logical eraseblock number
* @offs: offset within the logical eraseblock
* @quiet: print no messages
- * @chk_crc: indicates whether to always check the CRC
+ * @must_chk_crc: indicates whether to always check the CRC
*
* This function checks node magic number and CRC checksum. This function also
* validates node length to prevent UBIFS from becoming crazy when an attacker
@@ -83,11 +83,17 @@ void ubifs_ro_mode(struct ubifs_info *c, int err)
* node length in the common header could cause UBIFS to read memory outside of
* allocated buffer when checking the CRC checksum.
*
- * This function returns zero in case of success %-EUCLEAN in case of bad CRC
- * or magic.
+ * This function may skip data nodes CRC checking if @c->no_chk_data_crc is
+ * true, which is controlled by corresponding UBIFS mount option. However, if
+ * @must_chk_crc is true, then @c->no_chk_data_crc is ignored and CRC is
+ * checked. Similarly, if @c->always_chk_crc is true, @c->no_chk_data_crc is
+ * ignored and CRC is checked.
+ *
+ * This function returns zero in case of success and %-EUCLEAN in case of bad
+ * CRC or magic.
*/
int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
- int offs, int quiet, int chk_crc)
+ int offs, int quiet, int must_chk_crc)
{
int err = -EINVAL, type, node_len;
uint32_t crc, node_crc, magic;
@@ -123,9 +129,9 @@ int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
node_len > c->ranges[type].max_len)
goto out_len;
- if (!chk_crc && type == UBIFS_DATA_NODE && !c->always_chk_crc)
- if (c->no_chk_data_crc)
- return 0;
+ if (!must_chk_crc && type == UBIFS_DATA_NODE && !c->always_chk_crc &&
+ c->no_chk_data_crc)
+ return 0;
crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
node_crc = le32_to_cpu(ch->crc);
diff --git a/fs/ubifs/journal.c b/fs/ubifs/journal.c
index 9b7c54e0cd2a..a11ca0958a23 100644
--- a/fs/ubifs/journal.c
+++ b/fs/ubifs/journal.c
@@ -208,7 +208,7 @@ again:
offs = 0;
out:
- err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs, UBI_SHORTTERM);
+ err = ubifs_wbuf_seek_nolock(wbuf, lnum, offs, wbuf->dtype);
if (err)
goto out_unlock;
diff --git a/fs/ubifs/lprops.c b/fs/ubifs/lprops.c
index dfd2bcece27a..4cdd284dea56 100644
--- a/fs/ubifs/lprops.c
+++ b/fs/ubifs/lprops.c
@@ -635,10 +635,10 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c,
* @c: UBIFS file-system description object
* @st: return statistics
*/
-void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *st)
+void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst)
{
spin_lock(&c->space_lock);
- memcpy(st, &c->lst, sizeof(struct ubifs_lp_stats));
+ memcpy(lst, &c->lst, sizeof(struct ubifs_lp_stats));
spin_unlock(&c->space_lock);
}
@@ -678,6 +678,9 @@ int ubifs_change_one_lp(struct ubifs_info *c, int lnum, int free, int dirty,
out:
ubifs_release_lprops(c);
+ if (err)
+ ubifs_err("cannot change properties of LEB %d, error %d",
+ lnum, err);
return err;
}
@@ -714,6 +717,9 @@ int ubifs_update_one_lp(struct ubifs_info *c, int lnum, int free, int dirty,
out:
ubifs_release_lprops(c);
+ if (err)
+ ubifs_err("cannot update properties of LEB %d, error %d",
+ lnum, err);
return err;
}
@@ -737,6 +743,8 @@ int ubifs_read_one_lp(struct ubifs_info *c, int lnum, struct ubifs_lprops *lp)
lpp = ubifs_lpt_lookup(c, lnum);
if (IS_ERR(lpp)) {
err = PTR_ERR(lpp);
+ ubifs_err("cannot read properties of LEB %d, error %d",
+ lnum, err);
goto out;
}
diff --git a/fs/ubifs/lpt_commit.c b/fs/ubifs/lpt_commit.c
index 96ca95707175..3216a1f277f8 100644
--- a/fs/ubifs/lpt_commit.c
+++ b/fs/ubifs/lpt_commit.c
@@ -556,23 +556,23 @@ no_space:
}
/**
- * next_pnode - find next pnode.
+ * next_pnode_to_dirty - find next pnode to dirty.
* @c: UBIFS file-system description object
* @pnode: pnode
*
- * This function returns the next pnode or %NULL if there are no more pnodes.
+ * This function returns the next pnode to dirty or %NULL if there are no more
+ * pnodes. Note that pnodes that have never been written (lnum == 0) are
+ * skipped.
*/
-static struct ubifs_pnode *next_pnode(struct ubifs_info *c,
- struct ubifs_pnode *pnode)
+static struct ubifs_pnode *next_pnode_to_dirty(struct ubifs_info *c,
+ struct ubifs_pnode *pnode)
{
struct ubifs_nnode *nnode;
int iip;
/* Try to go right */
nnode = pnode->parent;
- iip = pnode->iip + 1;
- if (iip < UBIFS_LPT_FANOUT) {
- /* We assume here that LEB zero is never an LPT LEB */
+ for (iip = pnode->iip + 1; iip < UBIFS_LPT_FANOUT; iip++) {
if (nnode->nbranch[iip].lnum)
return ubifs_get_pnode(c, nnode, iip);
}
@@ -583,8 +583,11 @@ static struct ubifs_pnode *next_pnode(struct ubifs_info *c,
nnode = nnode->parent;
if (!nnode)
return NULL;
- /* We assume here that LEB zero is never an LPT LEB */
- } while (iip >= UBIFS_LPT_FANOUT || !nnode->nbranch[iip].lnum);
+ for (; iip < UBIFS_LPT_FANOUT; iip++) {
+ if (nnode->nbranch[iip].lnum)
+ break;
+ }
+ } while (iip >= UBIFS_LPT_FANOUT);
/* Go right */
nnode = ubifs_get_nnode(c, nnode, iip);
@@ -593,12 +596,29 @@ static struct ubifs_pnode *next_pnode(struct ubifs_info *c,
/* Go down to level 1 */
while (nnode->level > 1) {
- nnode = ubifs_get_nnode(c, nnode, 0);
+ for (iip = 0; iip < UBIFS_LPT_FANOUT; iip++) {
+ if (nnode->nbranch[iip].lnum)
+ break;
+ }
+ if (iip >= UBIFS_LPT_FANOUT) {
+ /*
+ * Should not happen, but we need to keep going
+ * if it does.
+ */
+ iip = 0;
+ }
+ nnode = ubifs_get_nnode(c, nnode, iip);
if (IS_ERR(nnode))
return (void *)nnode;
}
- return ubifs_get_pnode(c, nnode, 0);
+ for (iip = 0; iip < UBIFS_LPT_FANOUT; iip++)
+ if (nnode->nbranch[iip].lnum)
+ break;
+ if (iip >= UBIFS_LPT_FANOUT)
+ /* Should not happen, but we need to keep going if it does */
+ iip = 0;
+ return ubifs_get_pnode(c, nnode, iip);
}
/**
@@ -688,7 +708,7 @@ static int make_tree_dirty(struct ubifs_info *c)
pnode = pnode_lookup(c, 0);
while (pnode) {
do_make_pnode_dirty(c, pnode);
- pnode = next_pnode(c, pnode);
+ pnode = next_pnode_to_dirty(c, pnode);
if (IS_ERR(pnode))
return PTR_ERR(pnode);
}
diff --git a/fs/ubifs/master.c b/fs/ubifs/master.c
index 71d5493bf565..a88f33801b98 100644
--- a/fs/ubifs/master.c
+++ b/fs/ubifs/master.c
@@ -354,7 +354,7 @@ int ubifs_write_master(struct ubifs_info *c)
int err, lnum, offs, len;
if (c->ro_media)
- return -EINVAL;
+ return -EROFS;
lnum = UBIFS_MST_LNUM;
offs = c->mst_offs + c->mst_node_alsz;
diff --git a/fs/ubifs/orphan.c b/fs/ubifs/orphan.c
index 9e6f403f170e..152a7b34a141 100644
--- a/fs/ubifs/orphan.c
+++ b/fs/ubifs/orphan.c
@@ -46,7 +46,7 @@
* Orphans are accumulated in a rb-tree. When an inode's link count drops to
* zero, the inode number is added to the rb-tree. It is removed from the tree
* when the inode is deleted. Any new orphans that are in the orphan tree when
- * the commit is run, are written to the orphan area in 1 or more orph nodes.
+ * the commit is run, are written to the orphan area in 1 or more orphan nodes.
* If the orphan area is full, it is consolidated to make space. There is
* always enough space because validation prevents the user from creating more
* than the maximum number of orphans allowed.
@@ -231,7 +231,7 @@ static int tot_avail_orphs(struct ubifs_info *c)
}
/**
- * do_write_orph_node - write a node
+ * do_write_orph_node - write a node to the orphan head.
* @c: UBIFS file-system description object
* @len: length of node
* @atomic: write atomically
@@ -264,11 +264,11 @@ static int do_write_orph_node(struct ubifs_info *c, int len, int atomic)
}
/**
- * write_orph_node - write an orph node
+ * write_orph_node - write an orphan node.
* @c: UBIFS file-system description object
* @atomic: write atomically
*
- * This function builds an orph node from the cnext list and writes it to the
+ * This function builds an orphan node from the cnext list and writes it to the
* orphan head. On success, %0 is returned, otherwise a negative error code
* is returned.
*/
@@ -326,11 +326,11 @@ static int write_orph_node(struct ubifs_info *c, int atomic)
}
/**
- * write_orph_nodes - write orph nodes until there are no more to commit
+ * write_orph_nodes - write orphan nodes until there are no more to commit.
* @c: UBIFS file-system description object
* @atomic: write atomically
*
- * This function writes orph nodes for all the orphans to commit. On success,
+ * This function writes orphan nodes for all the orphans to commit. On success,
* %0 is returned, otherwise a negative error code is returned.
*/
static int write_orph_nodes(struct ubifs_info *c, int atomic)
@@ -478,14 +478,14 @@ int ubifs_orphan_end_commit(struct ubifs_info *c)
}
/**
- * clear_orphans - erase all LEBs used for orphans.
+ * ubifs_clear_orphans - erase all LEBs used for orphans.
* @c: UBIFS file-system description object
*
* If recovery is not required, then the orphans from the previous session
* are not needed. This function locates the LEBs used to record
* orphans, and un-maps them.
*/
-static int clear_orphans(struct ubifs_info *c)
+int ubifs_clear_orphans(struct ubifs_info *c)
{
int lnum, err;
@@ -547,9 +547,9 @@ static int insert_dead_orphan(struct ubifs_info *c, ino_t inum)
* do_kill_orphans - remove orphan inodes from the index.
* @c: UBIFS file-system description object
* @sleb: scanned LEB
- * @last_cmt_no: cmt_no of last orph node read is passed and returned here
+ * @last_cmt_no: cmt_no of last orphan node read is passed and returned here
* @outofdate: whether the LEB is out of date is returned here
- * @last_flagged: whether the end orph node is encountered
+ * @last_flagged: whether the end orphan node is encountered
*
* This function is a helper to the 'kill_orphans()' function. It goes through
* every orphan node in a LEB and for every inode number recorded, removes
@@ -580,8 +580,8 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
/*
* The commit number on the master node may be less, because
* of a failed commit. If there are several failed commits in a
- * row, the commit number written on orph nodes will continue to
- * increase (because the commit number is adjusted here) even
+ * row, the commit number written on orphan nodes will continue
+ * to increase (because the commit number is adjusted here) even
* though the commit number on the master node stays the same
* because the master node has not been re-written.
*/
@@ -589,9 +589,9 @@ static int do_kill_orphans(struct ubifs_info *c, struct ubifs_scan_leb *sleb,
c->cmt_no = cmt_no;
if (cmt_no < *last_cmt_no && *last_flagged) {
/*
- * The last orph node had a higher commit number and was
- * flagged as the last written for that commit number.
- * That makes this orph node, out of date.
+ * The last orphan node had a higher commit number and
+ * was flagged as the last written for that commit
+ * number. That makes this orphan node, out of date.
*/
if (!first) {
ubifs_err("out of order commit number %llu in "
@@ -658,10 +658,10 @@ static int kill_orphans(struct ubifs_info *c)
/*
* Orph nodes always start at c->orph_first and are written to each
* successive LEB in turn. Generally unused LEBs will have been unmapped
- * but may contain out of date orph nodes if the unmap didn't go
- * through. In addition, the last orph node written for each commit is
+ * but may contain out of date orphan nodes if the unmap didn't go
+ * through. In addition, the last orphan node written for each commit is
* marked (top bit of orph->cmt_no is set to 1). It is possible that
- * there are orph nodes from the next commit (i.e. the commit did not
+ * there are orphan nodes from the next commit (i.e. the commit did not
* complete successfully). In that case, no orphans will have been lost
* due to the way that orphans are written, and any orphans added will
* be valid orphans anyway and so can be deleted.
@@ -718,7 +718,7 @@ int ubifs_mount_orphans(struct ubifs_info *c, int unclean, int read_only)
if (unclean)
err = kill_orphans(c);
else if (!read_only)
- err = clear_orphans(c);
+ err = ubifs_clear_orphans(c);
return err;
}
diff --git a/fs/ubifs/super.c b/fs/ubifs/super.c
index 89556ee72518..1182b66a5491 100644
--- a/fs/ubifs/super.c
+++ b/fs/ubifs/super.c
@@ -397,6 +397,7 @@ static int ubifs_statfs(struct dentry *dentry, struct kstatfs *buf)
buf->f_namelen = UBIFS_MAX_NLEN;
buf->f_fsid.val[0] = le32_to_cpu(uuid[0]) ^ le32_to_cpu(uuid[2]);
buf->f_fsid.val[1] = le32_to_cpu(uuid[1]) ^ le32_to_cpu(uuid[3]);
+ ubifs_assert(buf->f_bfree <= c->block_cnt);
return 0;
}
@@ -432,33 +433,24 @@ static int ubifs_sync_fs(struct super_block *sb, int wait)
int i, err;
struct ubifs_info *c = sb->s_fs_info;
struct writeback_control wbc = {
- .sync_mode = wait ? WB_SYNC_ALL : WB_SYNC_NONE,
+ .sync_mode = WB_SYNC_ALL,
.range_start = 0,
.range_end = LLONG_MAX,
.nr_to_write = LONG_MAX,
};
/*
- * Note by akpm about WB_SYNC_NONE used above: zero @wait is just an
- * advisory thing to help the file system shove lots of data into the
- * queues. If some gets missed then it'll be picked up on the second
+ * Zero @wait is just an advisory thing to help the file system shove
+ * lots of data into the queues, and there will be the second
* '->sync_fs()' call, with non-zero @wait.
*/
+ if (!wait)
+ return 0;
if (sb->s_flags & MS_RDONLY)
return 0;
/*
- * Synchronize write buffers, because 'ubifs_run_commit()' does not
- * do this if it waits for an already running commit.
- */
- for (i = 0; i < c->jhead_cnt; i++) {
- err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
- if (err)
- return err;
- }
-
- /*
* VFS calls '->sync_fs()' before synchronizing all dirty inodes and
* pages, so synchronize them first, then commit the journal. Strictly
* speaking, it is not necessary to commit the journal here,
@@ -469,6 +461,16 @@ static int ubifs_sync_fs(struct super_block *sb, int wait)
*/
generic_sync_sb_inodes(sb, &wbc);
+ /*
+ * Synchronize write buffers, because 'ubifs_run_commit()' does not
+ * do this if it waits for an already running commit.
+ */
+ for (i = 0; i < c->jhead_cnt; i++) {
+ err = ubifs_wbuf_sync(&c->jheads[i].wbuf);
+ if (err)
+ return err;
+ }
+
err = ubifs_run_commit(c);
if (err)
return err;
@@ -572,15 +574,8 @@ static int init_constants_early(struct ubifs_info *c)
c->ranges[UBIFS_IDX_NODE].max_len = INT_MAX;
/*
- * Initialize dead and dark LEB space watermarks.
- *
- * Dead space is the space which cannot be used. Its watermark is
- * equivalent to min. I/O unit or minimum node size if it is greater
- * then min. I/O unit.
- *
- * Dark space is the space which might be used, or might not, depending
- * on which node should be written to the LEB. Its watermark is
- * equivalent to maximum UBIFS node size.
+ * Initialize dead and dark LEB space watermarks. See gc.c for comments
+ * about these values.
*/
c->dead_wm = ALIGN(MIN_WRITE_SZ, c->min_io_size);
c->dark_wm = ALIGN(UBIFS_MAX_NODE_SZ, c->min_io_size);
@@ -741,12 +736,12 @@ static void init_constants_master(struct ubifs_info *c)
* take_gc_lnum - reserve GC LEB.
* @c: UBIFS file-system description object
*
- * This function ensures that the LEB reserved for garbage collection is
- * unmapped and is marked as "taken" in lprops. We also have to set free space
- * to LEB size and dirty space to zero, because lprops may contain out-of-date
- * information if the file-system was un-mounted before it has been committed.
- * This function returns zero in case of success and a negative error code in
- * case of failure.
+ * This function ensures that the LEB reserved for garbage collection is marked
+ * as "taken" in lprops. We also have to set free space to LEB size and dirty
+ * space to zero, because lprops may contain out-of-date information if the
+ * file-system was un-mounted before it has been committed. This function
+ * returns zero in case of success and a negative error code in case of
+ * failure.
*/
static int take_gc_lnum(struct ubifs_info *c)
{
@@ -757,10 +752,6 @@ static int take_gc_lnum(struct ubifs_info *c)
return -EINVAL;
}
- err = ubifs_leb_unmap(c, c->gc_lnum);
- if (err)
- return err;
-
/* And we have to tell lprops that this LEB is taken */
err = ubifs_change_one_lp(c, c->gc_lnum, c->leb_size, 0,
LPROPS_TAKEN, 0, 0);
@@ -966,13 +957,16 @@ static int ubifs_parse_options(struct ubifs_info *c, char *options,
token = match_token(p, tokens, args);
switch (token) {
+ /*
+ * %Opt_fast_unmount and %Opt_norm_unmount options are ignored.
+ * We accepte them in order to be backware-compatible. But this
+ * should be removed at some point.
+ */
case Opt_fast_unmount:
c->mount_opts.unmount_mode = 2;
- c->fast_unmount = 1;
break;
case Opt_norm_unmount:
c->mount_opts.unmount_mode = 1;
- c->fast_unmount = 0;
break;
case Opt_bulk_read:
c->mount_opts.bulk_read = 2;
@@ -1094,12 +1088,7 @@ static int check_free_space(struct ubifs_info *c)
ubifs_err("insufficient free space to mount in read/write mode");
dbg_dump_budg(c);
dbg_dump_lprops(c);
- /*
- * We return %-EINVAL instead of %-ENOSPC because it seems to
- * be the closest error code mentioned in the mount function
- * documentation.
- */
- return -EINVAL;
+ return -ENOSPC;
}
return 0;
}
@@ -1286,10 +1275,19 @@ static int mount_ubifs(struct ubifs_info *c)
if (err)
goto out_orphans;
err = ubifs_rcvry_gc_commit(c);
- } else
+ } else {
err = take_gc_lnum(c);
- if (err)
- goto out_orphans;
+ if (err)
+ goto out_orphans;
+
+ /*
+ * GC LEB may contain garbage if there was an unclean
+ * reboot, and it should be un-mapped.
+ */
+ err = ubifs_leb_unmap(c, c->gc_lnum);
+ if (err)
+ return err;
+ }
err = dbg_check_lprops(c);
if (err)
@@ -1298,6 +1296,16 @@ static int mount_ubifs(struct ubifs_info *c)
err = ubifs_recover_size(c);
if (err)
goto out_orphans;
+ } else {
+ /*
+ * Even if we mount read-only, we have to set space in GC LEB
+ * to proper value because this affects UBIFS free space
+ * reporting. We do not want to have a situation when
+ * re-mounting from R/O to R/W changes amount of free space.
+ */
+ err = take_gc_lnum(c);
+ if (err)
+ goto out_orphans;
}
spin_lock(&ubifs_infos_lock);
@@ -1310,14 +1318,17 @@ static int mount_ubifs(struct ubifs_info *c)
else {
c->need_recovery = 0;
ubifs_msg("recovery completed");
+ /* GC LEB has to be empty and taken at this point */
+ ubifs_assert(c->lst.taken_empty_lebs == 1);
}
- }
+ } else
+ ubifs_assert(c->lst.taken_empty_lebs == 1);
- err = dbg_debugfs_init_fs(c);
+ err = dbg_check_filesystem(c);
if (err)
goto out_infos;
- err = dbg_check_filesystem(c);
+ err = dbg_debugfs_init_fs(c);
if (err)
goto out_infos;
@@ -1351,7 +1362,6 @@ static int mount_ubifs(struct ubifs_info *c)
c->uuid[4], c->uuid[5], c->uuid[6], c->uuid[7],
c->uuid[8], c->uuid[9], c->uuid[10], c->uuid[11],
c->uuid[12], c->uuid[13], c->uuid[14], c->uuid[15]);
- dbg_msg("fast unmount: %d", c->fast_unmount);
dbg_msg("big_lpt %d", c->big_lpt);
dbg_msg("log LEBs: %d (%d - %d)",
c->log_lebs, UBIFS_LOG_LNUM, c->log_last);
@@ -1475,10 +1485,8 @@ static int ubifs_remount_rw(struct ubifs_info *c)
{
int err, lnum;
- if (c->ro_media)
- return -EINVAL;
-
mutex_lock(&c->umount_mutex);
+ dbg_save_space_info(c);
c->remounting_rw = 1;
c->always_chk_crc = 1;
@@ -1514,6 +1522,12 @@ static int ubifs_remount_rw(struct ubifs_info *c)
err = ubifs_recover_inl_heads(c, c->sbuf);
if (err)
goto out;
+ } else {
+ /* A readonly mount is not allowed to have orphans */
+ ubifs_assert(c->tot_orphans == 0);
+ err = ubifs_clear_orphans(c);
+ if (err)
+ goto out;
}
if (!(c->mst_node->flags & cpu_to_le32(UBIFS_MST_DIRTY))) {
@@ -1569,7 +1583,7 @@ static int ubifs_remount_rw(struct ubifs_info *c)
if (c->need_recovery)
err = ubifs_rcvry_gc_commit(c);
else
- err = take_gc_lnum(c);
+ err = ubifs_leb_unmap(c, c->gc_lnum);
if (err)
goto out;
@@ -1582,8 +1596,9 @@ static int ubifs_remount_rw(struct ubifs_info *c)
c->vfs_sb->s_flags &= ~MS_RDONLY;
c->remounting_rw = 0;
c->always_chk_crc = 0;
+ err = dbg_check_space_info(c);
mutex_unlock(&c->umount_mutex);
- return 0;
+ return err;
out:
vfree(c->orph_buf);
@@ -1603,43 +1618,18 @@ out:
}
/**
- * commit_on_unmount - commit the journal when un-mounting.
- * @c: UBIFS file-system description object
- *
- * This function is called during un-mounting and re-mounting, and it commits
- * the journal unless the "fast unmount" mode is enabled.
- */
-static void commit_on_unmount(struct ubifs_info *c)
-{
- struct super_block *sb = c->vfs_sb;
- long long bud_bytes;
-
- /*
- * This function is called before the background thread is stopped, so
- * we may race with ongoing commit, which means we have to take
- * @c->bud_lock to access @c->bud_bytes.
- */
- spin_lock(&c->buds_lock);
- bud_bytes = c->bud_bytes;
- spin_unlock(&c->buds_lock);
-
- if (!c->fast_unmount && !(sb->s_flags & MS_RDONLY) && bud_bytes)
- ubifs_run_commit(c);
-}
-
-/**
* ubifs_remount_ro - re-mount in read-only mode.
* @c: UBIFS file-system description object
*
- * We rely on VFS to have stopped writing. Possibly the background thread could
- * be running a commit, however kthread_stop will wait in that case.
+ * We assume VFS has stopped writing. Possibly the background thread could be
+ * running a commit, however kthread_stop will wait in that case.
*/
static void ubifs_remount_ro(struct ubifs_info *c)
{
int i, err;
ubifs_assert(!c->need_recovery);
- commit_on_unmount(c);
+ ubifs_assert(!(c->vfs_sb->s_flags & MS_RDONLY));
mutex_lock(&c->umount_mutex);
if (c->bgt) {
@@ -1647,27 +1637,29 @@ static void ubifs_remount_ro(struct ubifs_info *c)
c->bgt = NULL;
}
+ dbg_save_space_info(c);
+
for (i = 0; i < c->jhead_cnt; i++) {
ubifs_wbuf_sync(&c->jheads[i].wbuf);
del_timer_sync(&c->jheads[i].wbuf.timer);
}
- if (!c->ro_media) {
- c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
- c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
- c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum);
- err = ubifs_write_master(c);
- if (err)
- ubifs_ro_mode(c, err);
- }
+ c->mst_node->flags &= ~cpu_to_le32(UBIFS_MST_DIRTY);
+ c->mst_node->flags |= cpu_to_le32(UBIFS_MST_NO_ORPHS);
+ c->mst_node->gc_lnum = cpu_to_le32(c->gc_lnum);
+ err = ubifs_write_master(c);
+ if (err)
+ ubifs_ro_mode(c, err);
- ubifs_destroy_idx_gc(c);
free_wbufs(c);
vfree(c->orph_buf);
c->orph_buf = NULL;
vfree(c->ileb_buf);
c->ileb_buf = NULL;
ubifs_lpt_free(c, 1);
+ err = dbg_check_space_info(c);
+ if (err)
+ ubifs_ro_mode(c, err);
mutex_unlock(&c->umount_mutex);
}
@@ -1760,11 +1752,20 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
}
if ((sb->s_flags & MS_RDONLY) && !(*flags & MS_RDONLY)) {
+ if (c->ro_media) {
+ ubifs_msg("cannot re-mount due to prior errors");
+ return -EROFS;
+ }
err = ubifs_remount_rw(c);
if (err)
return err;
- } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY))
+ } else if (!(sb->s_flags & MS_RDONLY) && (*flags & MS_RDONLY)) {
+ if (c->ro_media) {
+ ubifs_msg("cannot re-mount due to prior errors");
+ return -EROFS;
+ }
ubifs_remount_ro(c);
+ }
if (c->bulk_read == 1)
bu_init(c);
@@ -1774,10 +1775,11 @@ static int ubifs_remount_fs(struct super_block *sb, int *flags, char *data)
c->bu.buf = NULL;
}
+ ubifs_assert(c->lst.taken_empty_lebs == 1);
return 0;
}
-struct super_operations ubifs_super_operations = {
+const struct super_operations ubifs_super_operations = {
.alloc_inode = ubifs_alloc_inode,
.destroy_inode = ubifs_destroy_inode,
.put_super = ubifs_put_super,
@@ -2044,15 +2046,6 @@ out_close:
static void ubifs_kill_sb(struct super_block *sb)
{
- struct ubifs_info *c = sb->s_fs_info;
-
- /*
- * We do 'commit_on_unmount()' here instead of 'ubifs_put_super()'
- * in order to be outside BKL.
- */
- if (sb->s_root)
- commit_on_unmount(c);
- /* The un-mount routine is actually done in put_super() */
generic_shutdown_super(sb);
}
diff --git a/fs/ubifs/tnc.c b/fs/ubifs/tnc.c
index f7e36f545527..fa28a84c6a1b 100644
--- a/fs/ubifs/tnc.c
+++ b/fs/ubifs/tnc.c
@@ -443,6 +443,11 @@ static int tnc_read_node_nm(struct ubifs_info *c, struct ubifs_zbranch *zbr,
* This function performs that same function as ubifs_read_node except that
* it does not require that there is actually a node present and instead
* the return code indicates if a node was read.
+ *
+ * Note, this function does not check CRC of data nodes if @c->no_chk_data_crc
+ * is true (it is controlled by corresponding mount option). However, if
+ * @c->always_chk_crc is true, @c->no_chk_data_crc is ignored and CRC is always
+ * checked.
*/
static int try_read_node(const struct ubifs_info *c, void *buf, int type,
int len, int lnum, int offs)
@@ -470,9 +475,8 @@ static int try_read_node(const struct ubifs_info *c, void *buf, int type,
if (node_len != len)
return 0;
- if (type == UBIFS_DATA_NODE && !c->always_chk_crc)
- if (c->no_chk_data_crc)
- return 0;
+ if (type == UBIFS_DATA_NODE && !c->always_chk_crc && c->no_chk_data_crc)
+ return 1;
crc = crc32(UBIFS_CRC32_INIT, buf + 8, node_len - 8);
node_crc = le32_to_cpu(ch->crc);
@@ -1506,7 +1510,7 @@ out:
*
* Note, if the bulk-read buffer length (@bu->buf_len) is known, this function
* makes sure bulk-read nodes fit the buffer. Otherwise, this function prepares
- * maxumum possible amount of nodes for bulk-read.
+ * maximum possible amount of nodes for bulk-read.
*/
int ubifs_tnc_get_bu_keys(struct ubifs_info *c, struct bu_info *bu)
{
diff --git a/fs/ubifs/ubifs.h b/fs/ubifs/ubifs.h
index fc2a4cc66d03..039a68bee29a 100644
--- a/fs/ubifs/ubifs.h
+++ b/fs/ubifs/ubifs.h
@@ -426,9 +426,9 @@ struct ubifs_unclean_leb {
* LEB properties flags.
*
* LPROPS_UNCAT: not categorized
- * LPROPS_DIRTY: dirty > 0, not index
+ * LPROPS_DIRTY: dirty > free, dirty >= @c->dead_wm, not index
* LPROPS_DIRTY_IDX: dirty + free > @c->min_idx_node_sze and index
- * LPROPS_FREE: free > 0, not empty, not index
+ * LPROPS_FREE: free > 0, dirty < @c->dead_wm, not empty, not index
* LPROPS_HEAP_CNT: number of heaps used for storing categorized LEBs
* LPROPS_EMPTY: LEB is empty, not taken
* LPROPS_FREEABLE: free + dirty == leb_size, not index, not taken
@@ -961,7 +961,6 @@ struct ubifs_debug_info;
* @cs_lock: commit state lock
* @cmt_wq: wait queue to sleep on if the log is full and a commit is running
*
- * @fast_unmount: do not run journal commit before un-mounting
* @big_lpt: flag that LPT is too big to write whole during commit
* @no_chk_data_crc: do not check CRCs when reading data nodes (except during
* recovery)
@@ -1202,7 +1201,6 @@ struct ubifs_info {
spinlock_t cs_lock;
wait_queue_head_t cmt_wq;
- unsigned int fast_unmount:1;
unsigned int big_lpt:1;
unsigned int no_chk_data_crc:1;
unsigned int bulk_read:1;
@@ -1405,13 +1403,13 @@ extern struct list_head ubifs_infos;
extern spinlock_t ubifs_infos_lock;
extern atomic_long_t ubifs_clean_zn_cnt;
extern struct kmem_cache *ubifs_inode_slab;
-extern struct super_operations ubifs_super_operations;
-extern struct address_space_operations ubifs_file_address_operations;
-extern struct file_operations ubifs_file_operations;
-extern struct inode_operations ubifs_file_inode_operations;
-extern struct file_operations ubifs_dir_operations;
-extern struct inode_operations ubifs_dir_inode_operations;
-extern struct inode_operations ubifs_symlink_inode_operations;
+extern const struct super_operations ubifs_super_operations;
+extern const struct address_space_operations ubifs_file_address_operations;
+extern const struct file_operations ubifs_file_operations;
+extern const struct inode_operations ubifs_file_inode_operations;
+extern const struct file_operations ubifs_dir_operations;
+extern const struct inode_operations ubifs_dir_inode_operations;
+extern const struct inode_operations ubifs_symlink_inode_operations;
extern struct backing_dev_info ubifs_backing_dev_info;
extern struct ubifs_compressor *ubifs_compressors[UBIFS_COMPR_TYPES_CNT];
@@ -1428,7 +1426,7 @@ int ubifs_read_node_wbuf(struct ubifs_wbuf *wbuf, void *buf, int type, int len,
int ubifs_write_node(struct ubifs_info *c, void *node, int len, int lnum,
int offs, int dtype);
int ubifs_check_node(const struct ubifs_info *c, const void *buf, int lnum,
- int offs, int quiet, int chk_crc);
+ int offs, int quiet, int must_chk_crc);
void ubifs_prepare_node(struct ubifs_info *c, void *buf, int len, int pad);
void ubifs_prep_grp_node(struct ubifs_info *c, void *node, int len, int last);
int ubifs_io_init(struct ubifs_info *c);
@@ -1495,6 +1493,7 @@ void ubifs_release_ino_dirty(struct ubifs_info *c, struct inode *inode,
void ubifs_cancel_ino_op(struct ubifs_info *c, struct inode *inode,
struct ubifs_budget_req *req);
long long ubifs_get_free_space(struct ubifs_info *c);
+long long ubifs_get_free_space_nolock(struct ubifs_info *c);
int ubifs_calc_min_idx_lebs(struct ubifs_info *c);
void ubifs_convert_page_budget(struct ubifs_info *c);
long long ubifs_reported_space(const struct ubifs_info *c, long long free);
@@ -1603,6 +1602,7 @@ void ubifs_delete_orphan(struct ubifs_info *c, ino_t inum);
int ubifs_orphan_start_commit(struct ubifs_info *c);
int ubifs_orphan_end_commit(struct ubifs_info *c);
int ubifs_mount_orphans(struct ubifs_info *c, int unclean, int read_only);
+int ubifs_clear_orphans(struct ubifs_info *c);
/* lpt.c */
int ubifs_calc_lpt_geom(struct ubifs_info *c);
@@ -1646,7 +1646,7 @@ const struct ubifs_lprops *ubifs_change_lp(struct ubifs_info *c,
const struct ubifs_lprops *lp,
int free, int dirty, int flags,
int idx_gc_cnt);
-void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *stats);
+void ubifs_get_lp_stats(struct ubifs_info *c, struct ubifs_lp_stats *lst);
void ubifs_add_to_cat(struct ubifs_info *c, struct ubifs_lprops *lprops,
int cat);
void ubifs_replace_cat(struct ubifs_info *c, struct ubifs_lprops *old_lprops,
diff --git a/fs/udf/Kconfig b/fs/udf/Kconfig
new file mode 100644
index 000000000000..0e0e99bd6bce
--- /dev/null
+++ b/fs/udf/Kconfig
@@ -0,0 +1,18 @@
+config UDF_FS
+ tristate "UDF file system support"
+ select CRC_ITU_T
+ help
+ This is the new file system used on some CD-ROMs and DVDs. Say Y if
+ you intend to mount DVD discs or CDRW's written in packet mode, or
+ if written to by other UDF utilities, such as DirectCD.
+ Please read <file:Documentation/filesystems/udf.txt>.
+
+ To compile this file system support as a module, choose M here: the
+ module will be called udf.
+
+ If unsure, say N.
+
+config UDF_NLS
+ bool
+ default y
+ depends on (UDF_FS=m && NLS) || (UDF_FS=y && NLS=y)
diff --git a/fs/udf/balloc.c b/fs/udf/balloc.c
index 1b809bd494bd..a208e3760b08 100644
--- a/fs/udf/balloc.c
+++ b/fs/udf/balloc.c
@@ -87,12 +87,12 @@ static int read_block_bitmap(struct super_block *sb,
{
struct buffer_head *bh = NULL;
int retval = 0;
- kernel_lb_addr loc;
+ struct kernel_lb_addr loc;
loc.logicalBlockNum = bitmap->s_extPosition;
loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
- bh = udf_tread(sb, udf_get_lb_pblock(sb, loc, block));
+ bh = udf_tread(sb, udf_get_lb_pblock(sb, &loc, block));
if (!bh)
retval = -EIO;
@@ -156,11 +156,13 @@ static bool udf_add_free_space(struct udf_sb_info *sbi,
static void udf_bitmap_free_blocks(struct super_block *sb,
struct inode *inode,
struct udf_bitmap *bitmap,
- kernel_lb_addr bloc, uint32_t offset,
+ struct kernel_lb_addr *bloc,
+ uint32_t offset,
uint32_t count)
{
struct udf_sb_info *sbi = UDF_SB(sb);
struct buffer_head *bh = NULL;
+ struct udf_part_map *partmap;
unsigned long block;
unsigned long block_group;
unsigned long bit;
@@ -169,17 +171,17 @@ static void udf_bitmap_free_blocks(struct super_block *sb,
unsigned long overflow;
mutex_lock(&sbi->s_alloc_mutex);
- if (bloc.logicalBlockNum < 0 ||
- (bloc.logicalBlockNum + count) >
- sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
+ partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
+ if (bloc->logicalBlockNum < 0 ||
+ (bloc->logicalBlockNum + count) >
+ partmap->s_partition_len) {
udf_debug("%d < %d || %d + %d > %d\n",
- bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
- sbi->s_partmaps[bloc.partitionReferenceNum].
- s_partition_len);
+ bloc->logicalBlockNum, 0, bloc->logicalBlockNum,
+ count, partmap->s_partition_len);
goto error_return;
}
- block = bloc.logicalBlockNum + offset +
+ block = bloc->logicalBlockNum + offset +
(sizeof(struct spaceBitmapDesc) << 3);
do {
@@ -425,26 +427,28 @@ error_return:
static void udf_table_free_blocks(struct super_block *sb,
struct inode *inode,
struct inode *table,
- kernel_lb_addr bloc, uint32_t offset,
+ struct kernel_lb_addr *bloc,
+ uint32_t offset,
uint32_t count)
{
struct udf_sb_info *sbi = UDF_SB(sb);
+ struct udf_part_map *partmap;
uint32_t start, end;
uint32_t elen;
- kernel_lb_addr eloc;
+ struct kernel_lb_addr eloc;
struct extent_position oepos, epos;
int8_t etype;
int i;
struct udf_inode_info *iinfo;
mutex_lock(&sbi->s_alloc_mutex);
- if (bloc.logicalBlockNum < 0 ||
- (bloc.logicalBlockNum + count) >
- sbi->s_partmaps[bloc.partitionReferenceNum].s_partition_len) {
+ partmap = &sbi->s_partmaps[bloc->partitionReferenceNum];
+ if (bloc->logicalBlockNum < 0 ||
+ (bloc->logicalBlockNum + count) >
+ partmap->s_partition_len) {
udf_debug("%d < %d || %d + %d > %d\n",
bloc.logicalBlockNum, 0, bloc.logicalBlockNum, count,
- sbi->s_partmaps[bloc.partitionReferenceNum].
- s_partition_len);
+ partmap->s_partition_len);
goto error_return;
}
@@ -456,8 +460,8 @@ static void udf_table_free_blocks(struct super_block *sb,
if (udf_add_free_space(sbi, sbi->s_partition, count))
mark_buffer_dirty(sbi->s_lvid_bh);
- start = bloc.logicalBlockNum + offset;
- end = bloc.logicalBlockNum + offset + count - 1;
+ start = bloc->logicalBlockNum + offset;
+ end = bloc->logicalBlockNum + offset + count - 1;
epos.offset = oepos.offset = sizeof(struct unallocSpaceEntry);
elen = 0;
@@ -483,7 +487,7 @@ static void udf_table_free_blocks(struct super_block *sb,
start += count;
count = 0;
}
- udf_write_aext(table, &oepos, eloc, elen, 1);
+ udf_write_aext(table, &oepos, &eloc, elen, 1);
} else if (eloc.logicalBlockNum == (end + 1)) {
if ((0x3FFFFFFF - elen) <
(count << sb->s_blocksize_bits)) {
@@ -502,7 +506,7 @@ static void udf_table_free_blocks(struct super_block *sb,
end -= count;
count = 0;
}
- udf_write_aext(table, &oepos, eloc, elen, 1);
+ udf_write_aext(table, &oepos, &eloc, elen, 1);
}
if (epos.bh != oepos.bh) {
@@ -532,8 +536,8 @@ static void udf_table_free_blocks(struct super_block *sb,
*/
int adsize;
- short_ad *sad = NULL;
- long_ad *lad = NULL;
+ struct short_ad *sad = NULL;
+ struct long_ad *lad = NULL;
struct allocExtDesc *aed;
eloc.logicalBlockNum = start;
@@ -541,9 +545,9 @@ static void udf_table_free_blocks(struct super_block *sb,
(count << sb->s_blocksize_bits);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- adsize = sizeof(short_ad);
+ adsize = sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
- adsize = sizeof(long_ad);
+ adsize = sizeof(struct long_ad);
else {
brelse(oepos.bh);
brelse(epos.bh);
@@ -563,7 +567,7 @@ static void udf_table_free_blocks(struct super_block *sb,
elen -= sb->s_blocksize;
epos.bh = udf_tread(sb,
- udf_get_lb_pblock(sb, epos.block, 0));
+ udf_get_lb_pblock(sb, &epos.block, 0));
if (!epos.bh) {
brelse(oepos.bh);
goto error_return;
@@ -601,15 +605,15 @@ static void udf_table_free_blocks(struct super_block *sb,
if (sbi->s_udfrev >= 0x0200)
udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
3, 1, epos.block.logicalBlockNum,
- sizeof(tag));
+ sizeof(struct tag));
else
udf_new_tag(epos.bh->b_data, TAG_IDENT_AED,
2, 1, epos.block.logicalBlockNum,
- sizeof(tag));
+ sizeof(struct tag));
switch (iinfo->i_alloc_type) {
case ICBTAG_FLAG_AD_SHORT:
- sad = (short_ad *)sptr;
+ sad = (struct short_ad *)sptr;
sad->extLength = cpu_to_le32(
EXT_NEXT_EXTENT_ALLOCDECS |
sb->s_blocksize);
@@ -617,7 +621,7 @@ static void udf_table_free_blocks(struct super_block *sb,
cpu_to_le32(epos.block.logicalBlockNum);
break;
case ICBTAG_FLAG_AD_LONG:
- lad = (long_ad *)sptr;
+ lad = (struct long_ad *)sptr;
lad->extLength = cpu_to_le32(
EXT_NEXT_EXTENT_ALLOCDECS |
sb->s_blocksize);
@@ -635,7 +639,7 @@ static void udf_table_free_blocks(struct super_block *sb,
/* It's possible that stealing the block emptied the extent */
if (elen) {
- udf_write_aext(table, &epos, eloc, elen, 1);
+ udf_write_aext(table, &epos, &eloc, elen, 1);
if (!epos.bh) {
iinfo->i_lenAlloc += adsize;
@@ -666,7 +670,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
struct udf_sb_info *sbi = UDF_SB(sb);
int alloc_count = 0;
uint32_t elen, adsize;
- kernel_lb_addr eloc;
+ struct kernel_lb_addr eloc;
struct extent_position epos;
int8_t etype = -1;
struct udf_inode_info *iinfo;
@@ -677,9 +681,9 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
iinfo = UDF_I(table);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- adsize = sizeof(short_ad);
+ adsize = sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
- adsize = sizeof(long_ad);
+ adsize = sizeof(struct long_ad);
else
return 0;
@@ -707,7 +711,7 @@ static int udf_table_prealloc_blocks(struct super_block *sb,
alloc_count = block_count;
eloc.logicalBlockNum += alloc_count;
elen -= (alloc_count << sb->s_blocksize_bits);
- udf_write_aext(table, &epos, eloc,
+ udf_write_aext(table, &epos, &eloc,
(etype << 30) | elen, 1);
} else
udf_delete_aext(table, epos, eloc,
@@ -735,7 +739,7 @@ static int udf_table_new_block(struct super_block *sb,
uint32_t spread = 0xFFFFFFFF, nspread = 0xFFFFFFFF;
uint32_t newblock = 0, adsize;
uint32_t elen, goal_elen = 0;
- kernel_lb_addr eloc, uninitialized_var(goal_eloc);
+ struct kernel_lb_addr eloc, uninitialized_var(goal_eloc);
struct extent_position epos, goal_epos;
int8_t etype;
struct udf_inode_info *iinfo = UDF_I(table);
@@ -743,9 +747,9 @@ static int udf_table_new_block(struct super_block *sb,
*err = -ENOSPC;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- adsize = sizeof(short_ad);
+ adsize = sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
- adsize = sizeof(long_ad);
+ adsize = sizeof(struct long_ad);
else
return newblock;
@@ -814,7 +818,7 @@ static int udf_table_new_block(struct super_block *sb,
}
if (goal_elen)
- udf_write_aext(table, &goal_epos, goal_eloc, goal_elen, 1);
+ udf_write_aext(table, &goal_epos, &goal_eloc, goal_elen, 1);
else
udf_delete_aext(table, goal_epos, goal_eloc, goal_elen);
brelse(goal_epos.bh);
@@ -828,32 +832,25 @@ static int udf_table_new_block(struct super_block *sb,
return newblock;
}
-inline void udf_free_blocks(struct super_block *sb,
- struct inode *inode,
- kernel_lb_addr bloc, uint32_t offset,
- uint32_t count)
+void udf_free_blocks(struct super_block *sb, struct inode *inode,
+ struct kernel_lb_addr *bloc, uint32_t offset,
+ uint32_t count)
{
- uint16_t partition = bloc.partitionReferenceNum;
+ uint16_t partition = bloc->partitionReferenceNum;
struct udf_part_map *map = &UDF_SB(sb)->s_partmaps[partition];
if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_BITMAP) {
- return udf_bitmap_free_blocks(sb, inode,
- map->s_uspace.s_bitmap,
- bloc, offset, count);
+ udf_bitmap_free_blocks(sb, inode, map->s_uspace.s_bitmap,
+ bloc, offset, count);
} else if (map->s_partition_flags & UDF_PART_FLAG_UNALLOC_TABLE) {
- return udf_table_free_blocks(sb, inode,
- map->s_uspace.s_table,
- bloc, offset, count);
+ udf_table_free_blocks(sb, inode, map->s_uspace.s_table,
+ bloc, offset, count);
} else if (map->s_partition_flags & UDF_PART_FLAG_FREED_BITMAP) {
- return udf_bitmap_free_blocks(sb, inode,
- map->s_fspace.s_bitmap,
- bloc, offset, count);
+ udf_bitmap_free_blocks(sb, inode, map->s_fspace.s_bitmap,
+ bloc, offset, count);
} else if (map->s_partition_flags & UDF_PART_FLAG_FREED_TABLE) {
- return udf_table_free_blocks(sb, inode,
- map->s_fspace.s_table,
- bloc, offset, count);
- } else {
- return;
+ udf_table_free_blocks(sb, inode, map->s_fspace.s_table,
+ bloc, offset, count);
}
}
diff --git a/fs/udf/dir.c b/fs/udf/dir.c
index 62dc270c69d1..2efd4d5291b6 100644
--- a/fs/udf/dir.c
+++ b/fs/udf/dir.c
@@ -51,7 +51,7 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
uint8_t lfi;
loff_t size = udf_ext0_offset(dir) + dir->i_size;
struct buffer_head *tmp, *bha[16];
- kernel_lb_addr eloc;
+ struct kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
int i, num, ret = 0;
@@ -80,13 +80,13 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
ret = -ENOENT;
goto out;
}
- block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
+ block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- epos.offset -= sizeof(short_ad);
+ epos.offset -= sizeof(struct short_ad);
else if (iinfo->i_alloc_type ==
ICBTAG_FLAG_AD_LONG)
- epos.offset -= sizeof(long_ad);
+ epos.offset -= sizeof(struct long_ad);
} else {
offset = 0;
}
@@ -101,7 +101,7 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
if (i + offset > (elen >> dir->i_sb->s_blocksize_bits))
i = (elen >> dir->i_sb->s_blocksize_bits) - offset;
for (num = 0; i > 0; i--) {
- block = udf_get_lb_pblock(dir->i_sb, eloc, offset + i);
+ block = udf_get_lb_pblock(dir->i_sb, &eloc, offset + i);
tmp = udf_tgetblk(dir->i_sb, block);
if (tmp && !buffer_uptodate(tmp) && !buffer_locked(tmp))
bha[num++] = tmp;
@@ -161,9 +161,9 @@ static int do_udf_readdir(struct inode *dir, struct file *filp,
memcpy(fname, "..", flen);
dt_type = DT_DIR;
} else {
- kernel_lb_addr tloc = lelb_to_cpu(cfi.icb.extLocation);
+ struct kernel_lb_addr tloc = lelb_to_cpu(cfi.icb.extLocation);
- iblock = udf_get_lb_pblock(dir->i_sb, tloc, 0);
+ iblock = udf_get_lb_pblock(dir->i_sb, &tloc, 0);
flen = udf_get_filename(dir->i_sb, nameptr, fname, lfi);
dt_type = DT_UNKNOWN;
}
diff --git a/fs/udf/directory.c b/fs/udf/directory.c
index 2820f8fcf4cc..1d2c570704c8 100644
--- a/fs/udf/directory.c
+++ b/fs/udf/directory.c
@@ -20,7 +20,7 @@
#if 0
static uint8_t *udf_filead_read(struct inode *dir, uint8_t *tmpad,
- uint8_t ad_size, kernel_lb_addr fe_loc,
+ uint8_t ad_size, struct kernel_lb_addr fe_loc,
int *pos, int *offset, struct buffer_head **bh,
int *error)
{
@@ -75,7 +75,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
struct udf_fileident_bh *fibh,
struct fileIdentDesc *cfi,
struct extent_position *epos,
- kernel_lb_addr *eloc, uint32_t *elen,
+ struct kernel_lb_addr *eloc, uint32_t *elen,
sector_t *offset)
{
struct fileIdentDesc *fi;
@@ -111,7 +111,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
(EXT_RECORDED_ALLOCATED >> 30))
return NULL;
- block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset);
+ block = udf_get_lb_pblock(dir->i_sb, eloc, *offset);
(*offset)++;
@@ -131,7 +131,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
if (i + *offset > (*elen >> blocksize_bits))
i = (*elen >> blocksize_bits)-*offset;
for (num = 0; i > 0; i--) {
- block = udf_get_lb_pblock(dir->i_sb, *eloc,
+ block = udf_get_lb_pblock(dir->i_sb, eloc,
*offset + i);
tmp = udf_tgetblk(dir->i_sb, block);
if (tmp && !buffer_uptodate(tmp) &&
@@ -169,7 +169,7 @@ struct fileIdentDesc *udf_fileident_read(struct inode *dir, loff_t *nf_pos,
(EXT_RECORDED_ALLOCATED >> 30))
return NULL;
- block = udf_get_lb_pblock(dir->i_sb, *eloc, *offset);
+ block = udf_get_lb_pblock(dir->i_sb, eloc, *offset);
(*offset)++;
@@ -249,9 +249,9 @@ struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize, int *offset)
}
#if 0
-static extent_ad *udf_get_fileextent(void *buffer, int bufsize, int *offset)
+static struct extent_ad *udf_get_fileextent(void *buffer, int bufsize, int *offset)
{
- extent_ad *ext;
+ struct extent_ad *ext;
struct fileEntry *fe;
uint8_t *ptr;
@@ -274,54 +274,54 @@ static extent_ad *udf_get_fileextent(void *buffer, int bufsize, int *offset)
if ((*offset > 0) && (*offset < le32_to_cpu(fe->lengthAllocDescs)))
ptr += *offset;
- ext = (extent_ad *)ptr;
+ ext = (struct extent_ad *)ptr;
- *offset = *offset + sizeof(extent_ad);
+ *offset = *offset + sizeof(struct extent_ad);
return ext;
}
#endif
-short_ad *udf_get_fileshortad(uint8_t *ptr, int maxoffset, uint32_t *offset,
+struct short_ad *udf_get_fileshortad(uint8_t *ptr, int maxoffset, uint32_t *offset,
int inc)
{
- short_ad *sa;
+ struct short_ad *sa;
if ((!ptr) || (!offset)) {
printk(KERN_ERR "udf: udf_get_fileshortad() invalidparms\n");
return NULL;
}
- if ((*offset + sizeof(short_ad)) > maxoffset)
+ if ((*offset + sizeof(struct short_ad)) > maxoffset)
return NULL;
else {
- sa = (short_ad *)ptr;
+ sa = (struct short_ad *)ptr;
if (sa->extLength == 0)
return NULL;
}
if (inc)
- *offset += sizeof(short_ad);
+ *offset += sizeof(struct short_ad);
return sa;
}
-long_ad *udf_get_filelongad(uint8_t *ptr, int maxoffset, uint32_t *offset, int inc)
+struct long_ad *udf_get_filelongad(uint8_t *ptr, int maxoffset, uint32_t *offset, int inc)
{
- long_ad *la;
+ struct long_ad *la;
if ((!ptr) || (!offset)) {
printk(KERN_ERR "udf: udf_get_filelongad() invalidparms\n");
return NULL;
}
- if ((*offset + sizeof(long_ad)) > maxoffset)
+ if ((*offset + sizeof(struct long_ad)) > maxoffset)
return NULL;
else {
- la = (long_ad *)ptr;
+ la = (struct long_ad *)ptr;
if (la->extLength == 0)
return NULL;
}
if (inc)
- *offset += sizeof(long_ad);
+ *offset += sizeof(struct long_ad);
return la;
}
diff --git a/fs/udf/ecma_167.h b/fs/udf/ecma_167.h
index a0974df82b31..4792b771aa80 100644
--- a/fs/udf/ecma_167.h
+++ b/fs/udf/ecma_167.h
@@ -38,10 +38,10 @@
#define _ECMA_167_H 1
/* Character set specification (ECMA 167r3 1/7.2.1) */
-typedef struct {
+struct charspec {
uint8_t charSetType;
uint8_t charSetInfo[63];
-} __attribute__ ((packed)) charspec;
+} __attribute__ ((packed));
/* Character Set Type (ECMA 167r3 1/7.2.1.1) */
#define CHARSPEC_TYPE_CS0 0x00 /* (1/7.2.2) */
@@ -57,7 +57,7 @@ typedef struct {
typedef uint8_t dstring;
/* Timestamp (ECMA 167r3 1/7.3) */
-typedef struct {
+struct timestamp {
__le16 typeAndTimezone;
__le16 year;
uint8_t month;
@@ -68,7 +68,7 @@ typedef struct {
uint8_t centiseconds;
uint8_t hundredsOfMicroseconds;
uint8_t microseconds;
-} __attribute__ ((packed)) timestamp;
+} __attribute__ ((packed));
/* Type and Time Zone (ECMA 167r3 1/7.3.1) */
#define TIMESTAMP_TYPE_MASK 0xF000
@@ -78,11 +78,11 @@ typedef struct {
#define TIMESTAMP_TIMEZONE_MASK 0x0FFF
/* Entity identifier (ECMA 167r3 1/7.4) */
-typedef struct {
+struct regid {
uint8_t flags;
uint8_t ident[23];
uint8_t identSuffix[8];
-} __attribute__ ((packed)) regid;
+} __attribute__ ((packed));
/* Flags (ECMA 167r3 1/7.4.1) */
#define ENTITYID_FLAGS_DIRTY 0x00
@@ -126,38 +126,38 @@ struct terminatingExtendedAreaDesc {
/* Boot Descriptor (ECMA 167r3 2/9.4) */
struct bootDesc {
- uint8_t structType;
- uint8_t stdIdent[VSD_STD_ID_LEN];
- uint8_t structVersion;
- uint8_t reserved1;
- regid archType;
- regid bootIdent;
- __le32 bootExtLocation;
- __le32 bootExtLength;
- __le64 loadAddress;
- __le64 startAddress;
- timestamp descCreationDateAndTime;
- __le16 flags;
- uint8_t reserved2[32];
- uint8_t bootUse[1906];
+ uint8_t structType;
+ uint8_t stdIdent[VSD_STD_ID_LEN];
+ uint8_t structVersion;
+ uint8_t reserved1;
+ struct regid archType;
+ struct regid bootIdent;
+ __le32 bootExtLocation;
+ __le32 bootExtLength;
+ __le64 loadAddress;
+ __le64 startAddress;
+ struct timestamp descCreationDateAndTime;
+ __le16 flags;
+ uint8_t reserved2[32];
+ uint8_t bootUse[1906];
} __attribute__ ((packed));
/* Flags (ECMA 167r3 2/9.4.12) */
#define BOOT_FLAGS_ERASE 0x01
/* Extent Descriptor (ECMA 167r3 3/7.1) */
-typedef struct {
+struct extent_ad {
__le32 extLength;
__le32 extLocation;
-} __attribute__ ((packed)) extent_ad;
+} __attribute__ ((packed));
-typedef struct {
+struct kernel_extent_ad {
uint32_t extLength;
uint32_t extLocation;
-} kernel_extent_ad;
+};
/* Descriptor Tag (ECMA 167r3 3/7.2) */
-typedef struct {
+struct tag {
__le16 tagIdent;
__le16 descVersion;
uint8_t tagChecksum;
@@ -166,7 +166,7 @@ typedef struct {
__le16 descCRC;
__le16 descCRCLength;
__le32 tagLocation;
-} __attribute__ ((packed)) tag;
+} __attribute__ ((packed));
/* Tag Identifier (ECMA 167r3 3/7.2.1) */
#define TAG_IDENT_PVD 0x0001
@@ -190,28 +190,28 @@ struct NSRDesc {
/* Primary Volume Descriptor (ECMA 167r3 3/10.1) */
struct primaryVolDesc {
- tag descTag;
- __le32 volDescSeqNum;
- __le32 primaryVolDescNum;
- dstring volIdent[32];
- __le16 volSeqNum;
- __le16 maxVolSeqNum;
- __le16 interchangeLvl;
- __le16 maxInterchangeLvl;
- __le32 charSetList;
- __le32 maxCharSetList;
- dstring volSetIdent[128];
- charspec descCharSet;
- charspec explanatoryCharSet;
- extent_ad volAbstract;
- extent_ad volCopyright;
- regid appIdent;
- timestamp recordingDateAndTime;
- regid impIdent;
- uint8_t impUse[64];
- __le32 predecessorVolDescSeqLocation;
- __le16 flags;
- uint8_t reserved[22];
+ struct tag descTag;
+ __le32 volDescSeqNum;
+ __le32 primaryVolDescNum;
+ dstring volIdent[32];
+ __le16 volSeqNum;
+ __le16 maxVolSeqNum;
+ __le16 interchangeLvl;
+ __le16 maxInterchangeLvl;
+ __le32 charSetList;
+ __le32 maxCharSetList;
+ dstring volSetIdent[128];
+ struct charspec descCharSet;
+ struct charspec explanatoryCharSet;
+ struct extent_ad volAbstract;
+ struct extent_ad volCopyright;
+ struct regid appIdent;
+ struct timestamp recordingDateAndTime;
+ struct regid impIdent;
+ uint8_t impUse[64];
+ __le32 predecessorVolDescSeqLocation;
+ __le16 flags;
+ uint8_t reserved[22];
} __attribute__ ((packed));
/* Flags (ECMA 167r3 3/10.1.21) */
@@ -219,40 +219,40 @@ struct primaryVolDesc {
/* Anchor Volume Descriptor Pointer (ECMA 167r3 3/10.2) */
struct anchorVolDescPtr {
- tag descTag;
- extent_ad mainVolDescSeqExt;
- extent_ad reserveVolDescSeqExt;
- uint8_t reserved[480];
+ struct tag descTag;
+ struct extent_ad mainVolDescSeqExt;
+ struct extent_ad reserveVolDescSeqExt;
+ uint8_t reserved[480];
} __attribute__ ((packed));
/* Volume Descriptor Pointer (ECMA 167r3 3/10.3) */
struct volDescPtr {
- tag descTag;
- __le32 volDescSeqNum;
- extent_ad nextVolDescSeqExt;
- uint8_t reserved[484];
+ struct tag descTag;
+ __le32 volDescSeqNum;
+ struct extent_ad nextVolDescSeqExt;
+ uint8_t reserved[484];
} __attribute__ ((packed));
/* Implementation Use Volume Descriptor (ECMA 167r3 3/10.4) */
struct impUseVolDesc {
- tag descTag;
+ struct tag descTag;
__le32 volDescSeqNum;
- regid impIdent;
+ struct regid impIdent;
uint8_t impUse[460];
} __attribute__ ((packed));
/* Partition Descriptor (ECMA 167r3 3/10.5) */
struct partitionDesc {
- tag descTag;
+ struct tag descTag;
__le32 volDescSeqNum;
__le16 partitionFlags;
__le16 partitionNumber;
- regid partitionContents;
+ struct regid partitionContents;
uint8_t partitionContentsUse[128];
__le32 accessType;
__le32 partitionStartingLocation;
__le32 partitionLength;
- regid impIdent;
+ struct regid impIdent;
uint8_t impUse[128];
uint8_t reserved[156];
} __attribute__ ((packed));
@@ -278,19 +278,19 @@ struct partitionDesc {
/* Logical Volume Descriptor (ECMA 167r3 3/10.6) */
struct logicalVolDesc {
- tag descTag;
- __le32 volDescSeqNum;
- charspec descCharSet;
- dstring logicalVolIdent[128];
- __le32 logicalBlockSize;
- regid domainIdent;
- uint8_t logicalVolContentsUse[16];
- __le32 mapTableLength;
- __le32 numPartitionMaps;
- regid impIdent;
- uint8_t impUse[128];
- extent_ad integritySeqExt;
- uint8_t partitionMaps[0];
+ struct tag descTag;
+ __le32 volDescSeqNum;
+ struct charspec descCharSet;
+ dstring logicalVolIdent[128];
+ __le32 logicalBlockSize;
+ struct regid domainIdent;
+ uint8_t logicalVolContentsUse[16];
+ __le32 mapTableLength;
+ __le32 numPartitionMaps;
+ struct regid impIdent;
+ uint8_t impUse[128];
+ struct extent_ad integritySeqExt;
+ uint8_t partitionMaps[0];
} __attribute__ ((packed));
/* Generic Partition Map (ECMA 167r3 3/10.7.1) */
@@ -322,30 +322,30 @@ struct genericPartitionMap2 {
/* Unallocated Space Descriptor (ECMA 167r3 3/10.8) */
struct unallocSpaceDesc {
- tag descTag;
- __le32 volDescSeqNum;
- __le32 numAllocDescs;
- extent_ad allocDescs[0];
+ struct tag descTag;
+ __le32 volDescSeqNum;
+ __le32 numAllocDescs;
+ struct extent_ad allocDescs[0];
} __attribute__ ((packed));
/* Terminating Descriptor (ECMA 167r3 3/10.9) */
struct terminatingDesc {
- tag descTag;
+ struct tag descTag;
uint8_t reserved[496];
} __attribute__ ((packed));
/* Logical Volume Integrity Descriptor (ECMA 167r3 3/10.10) */
struct logicalVolIntegrityDesc {
- tag descTag;
- timestamp recordingDateAndTime;
- __le32 integrityType;
- extent_ad nextIntegrityExt;
- uint8_t logicalVolContentsUse[32];
- __le32 numOfPartitions;
- __le32 lengthOfImpUse;
- __le32 freeSpaceTable[0];
- __le32 sizeTable[0];
- uint8_t impUse[0];
+ struct tag descTag;
+ struct timestamp recordingDateAndTime;
+ __le32 integrityType;
+ struct extent_ad nextIntegrityExt;
+ uint8_t logicalVolContentsUse[32];
+ __le32 numOfPartitions;
+ __le32 lengthOfImpUse;
+ __le32 freeSpaceTable[0];
+ __le32 sizeTable[0];
+ uint8_t impUse[0];
} __attribute__ ((packed));
/* Integrity Type (ECMA 167r3 3/10.10.3) */
@@ -353,50 +353,50 @@ struct logicalVolIntegrityDesc {
#define LVID_INTEGRITY_TYPE_CLOSE 0x00000001
/* Recorded Address (ECMA 167r3 4/7.1) */
-typedef struct {
+struct lb_addr {
__le32 logicalBlockNum;
__le16 partitionReferenceNum;
-} __attribute__ ((packed)) lb_addr;
+} __attribute__ ((packed));
/* ... and its in-core analog */
-typedef struct {
+struct kernel_lb_addr {
uint32_t logicalBlockNum;
uint16_t partitionReferenceNum;
-} kernel_lb_addr;
+};
/* Short Allocation Descriptor (ECMA 167r3 4/14.14.1) */
-typedef struct {
+struct short_ad {
__le32 extLength;
__le32 extPosition;
-} __attribute__ ((packed)) short_ad;
+} __attribute__ ((packed));
/* Long Allocation Descriptor (ECMA 167r3 4/14.14.2) */
-typedef struct {
+struct long_ad {
__le32 extLength;
- lb_addr extLocation;
+ struct lb_addr extLocation;
uint8_t impUse[6];
-} __attribute__ ((packed)) long_ad;
+} __attribute__ ((packed));
-typedef struct {
- uint32_t extLength;
- kernel_lb_addr extLocation;
- uint8_t impUse[6];
-} kernel_long_ad;
+struct kernel_long_ad {
+ uint32_t extLength;
+ struct kernel_lb_addr extLocation;
+ uint8_t impUse[6];
+};
/* Extended Allocation Descriptor (ECMA 167r3 4/14.14.3) */
-typedef struct {
+struct ext_ad {
__le32 extLength;
__le32 recordedLength;
__le32 informationLength;
- lb_addr extLocation;
-} __attribute__ ((packed)) ext_ad;
+ struct lb_addr extLocation;
+} __attribute__ ((packed));
-typedef struct {
- uint32_t extLength;
- uint32_t recordedLength;
- uint32_t informationLength;
- kernel_lb_addr extLocation;
-} kernel_ext_ad;
+struct kernel_ext_ad {
+ uint32_t extLength;
+ uint32_t recordedLength;
+ uint32_t informationLength;
+ struct kernel_lb_addr extLocation;
+};
/* Descriptor Tag (ECMA 167r3 4/7.2 - See 3/7.2) */
@@ -415,44 +415,44 @@ typedef struct {
/* File Set Descriptor (ECMA 167r3 4/14.1) */
struct fileSetDesc {
- tag descTag;
- timestamp recordingDateAndTime;
- __le16 interchangeLvl;
- __le16 maxInterchangeLvl;
- __le32 charSetList;
- __le32 maxCharSetList;
- __le32 fileSetNum;
- __le32 fileSetDescNum;
- charspec logicalVolIdentCharSet;
- dstring logicalVolIdent[128];
- charspec fileSetCharSet;
- dstring fileSetIdent[32];
- dstring copyrightFileIdent[32];
- dstring abstractFileIdent[32];
- long_ad rootDirectoryICB;
- regid domainIdent;
- long_ad nextExt;
- long_ad streamDirectoryICB;
- uint8_t reserved[32];
+ struct tag descTag;
+ struct timestamp recordingDateAndTime;
+ __le16 interchangeLvl;
+ __le16 maxInterchangeLvl;
+ __le32 charSetList;
+ __le32 maxCharSetList;
+ __le32 fileSetNum;
+ __le32 fileSetDescNum;
+ struct charspec logicalVolIdentCharSet;
+ dstring logicalVolIdent[128];
+ struct charspec fileSetCharSet;
+ dstring fileSetIdent[32];
+ dstring copyrightFileIdent[32];
+ dstring abstractFileIdent[32];
+ struct long_ad rootDirectoryICB;
+ struct regid domainIdent;
+ struct long_ad nextExt;
+ struct long_ad streamDirectoryICB;
+ uint8_t reserved[32];
} __attribute__ ((packed));
/* Partition Header Descriptor (ECMA 167r3 4/14.3) */
struct partitionHeaderDesc {
- short_ad unallocSpaceTable;
- short_ad unallocSpaceBitmap;
- short_ad partitionIntegrityTable;
- short_ad freedSpaceTable;
- short_ad freedSpaceBitmap;
+ struct short_ad unallocSpaceTable;
+ struct short_ad unallocSpaceBitmap;
+ struct short_ad partitionIntegrityTable;
+ struct short_ad freedSpaceTable;
+ struct short_ad freedSpaceBitmap;
uint8_t reserved[88];
} __attribute__ ((packed));
/* File Identifier Descriptor (ECMA 167r3 4/14.4) */
struct fileIdentDesc {
- tag descTag;
+ struct tag descTag;
__le16 fileVersionNum;
uint8_t fileCharacteristics;
uint8_t lengthFileIdent;
- long_ad icb;
+ struct long_ad icb;
__le16 lengthOfImpUse;
uint8_t impUse[0];
uint8_t fileIdent[0];
@@ -468,22 +468,22 @@ struct fileIdentDesc {
/* Allocation Ext Descriptor (ECMA 167r3 4/14.5) */
struct allocExtDesc {
- tag descTag;
+ struct tag descTag;
__le32 previousAllocExtLocation;
__le32 lengthAllocDescs;
} __attribute__ ((packed));
/* ICB Tag (ECMA 167r3 4/14.6) */
-typedef struct {
+struct icbtag {
__le32 priorRecordedNumDirectEntries;
__le16 strategyType;
__le16 strategyParameter;
__le16 numEntries;
uint8_t reserved;
uint8_t fileType;
- lb_addr parentICBLocation;
+ struct lb_addr parentICBLocation;
__le16 flags;
-} __attribute__ ((packed)) icbtag;
+} __attribute__ ((packed));
/* Strategy Type (ECMA 167r3 4/14.6.2) */
#define ICBTAG_STRATEGY_TYPE_UNDEF 0x0000
@@ -528,41 +528,41 @@ typedef struct {
/* Indirect Entry (ECMA 167r3 4/14.7) */
struct indirectEntry {
- tag descTag;
- icbtag icbTag;
- long_ad indirectICB;
+ struct tag descTag;
+ struct icbtag icbTag;
+ struct long_ad indirectICB;
} __attribute__ ((packed));
/* Terminal Entry (ECMA 167r3 4/14.8) */
struct terminalEntry {
- tag descTag;
- icbtag icbTag;
+ struct tag descTag;
+ struct icbtag icbTag;
} __attribute__ ((packed));
/* File Entry (ECMA 167r3 4/14.9) */
struct fileEntry {
- tag descTag;
- icbtag icbTag;
- __le32 uid;
- __le32 gid;
- __le32 permissions;
- __le16 fileLinkCount;
- uint8_t recordFormat;
- uint8_t recordDisplayAttr;
- __le32 recordLength;
- __le64 informationLength;
- __le64 logicalBlocksRecorded;
- timestamp accessTime;
- timestamp modificationTime;
- timestamp attrTime;
- __le32 checkpoint;
- long_ad extendedAttrICB;
- regid impIdent;
- __le64 uniqueID;
- __le32 lengthExtendedAttr;
- __le32 lengthAllocDescs;
- uint8_t extendedAttr[0];
- uint8_t allocDescs[0];
+ struct tag descTag;
+ struct icbtag icbTag;
+ __le32 uid;
+ __le32 gid;
+ __le32 permissions;
+ __le16 fileLinkCount;
+ uint8_t recordFormat;
+ uint8_t recordDisplayAttr;
+ __le32 recordLength;
+ __le64 informationLength;
+ __le64 logicalBlocksRecorded;
+ struct timestamp accessTime;
+ struct timestamp modificationTime;
+ struct timestamp attrTime;
+ __le32 checkpoint;
+ struct long_ad extendedAttrICB;
+ struct regid impIdent;
+ __le64 uniqueID;
+ __le32 lengthExtendedAttr;
+ __le32 lengthAllocDescs;
+ uint8_t extendedAttr[0];
+ uint8_t allocDescs[0];
} __attribute__ ((packed));
/* Permissions (ECMA 167r3 4/14.9.5) */
@@ -604,7 +604,7 @@ struct fileEntry {
/* Extended Attribute Header Descriptor (ECMA 167r3 4/14.10.1) */
struct extendedAttrHeaderDesc {
- tag descTag;
+ struct tag descTag;
__le32 impAttrLocation;
__le32 appAttrLocation;
} __attribute__ ((packed));
@@ -687,7 +687,7 @@ struct impUseExtAttr {
uint8_t reserved[3];
__le32 attrLength;
__le32 impUseLength;
- regid impIdent;
+ struct regid impIdent;
uint8_t impUse[0];
} __attribute__ ((packed));
@@ -698,7 +698,7 @@ struct appUseExtAttr {
uint8_t reserved[3];
__le32 attrLength;
__le32 appUseLength;
- regid appIdent;
+ struct regid appIdent;
uint8_t appUse[0];
} __attribute__ ((packed));
@@ -712,15 +712,15 @@ struct appUseExtAttr {
/* Unallocated Space Entry (ECMA 167r3 4/14.11) */
struct unallocSpaceEntry {
- tag descTag;
- icbtag icbTag;
+ struct tag descTag;
+ struct icbtag icbTag;
__le32 lengthAllocDescs;
uint8_t allocDescs[0];
} __attribute__ ((packed));
/* Space Bitmap Descriptor (ECMA 167r3 4/14.12) */
struct spaceBitmapDesc {
- tag descTag;
+ struct tag descTag;
__le32 numOfBits;
__le32 numOfBytes;
uint8_t bitmap[0];
@@ -728,13 +728,13 @@ struct spaceBitmapDesc {
/* Partition Integrity Entry (ECMA 167r3 4/14.13) */
struct partitionIntegrityEntry {
- tag descTag;
- icbtag icbTag;
- timestamp recordingDateAndTime;
- uint8_t integrityType;
- uint8_t reserved[175];
- regid impIdent;
- uint8_t impUse[256];
+ struct tag descTag;
+ struct icbtag icbTag;
+ struct timestamp recordingDateAndTime;
+ uint8_t integrityType;
+ uint8_t reserved[175];
+ struct regid impIdent;
+ uint8_t impUse[256];
} __attribute__ ((packed));
/* Short Allocation Descriptor (ECMA 167r3 4/14.14.1) */
@@ -765,32 +765,32 @@ struct pathComponent {
/* File Entry (ECMA 167r3 4/14.17) */
struct extendedFileEntry {
- tag descTag;
- icbtag icbTag;
- __le32 uid;
- __le32 gid;
- __le32 permissions;
- __le16 fileLinkCount;
- uint8_t recordFormat;
- uint8_t recordDisplayAttr;
- __le32 recordLength;
- __le64 informationLength;
- __le64 objectSize;
- __le64 logicalBlocksRecorded;
- timestamp accessTime;
- timestamp modificationTime;
- timestamp createTime;
- timestamp attrTime;
- __le32 checkpoint;
- __le32 reserved;
- long_ad extendedAttrICB;
- long_ad streamDirectoryICB;
- regid impIdent;
- __le64 uniqueID;
- __le32 lengthExtendedAttr;
- __le32 lengthAllocDescs;
- uint8_t extendedAttr[0];
- uint8_t allocDescs[0];
+ struct tag descTag;
+ struct icbtag icbTag;
+ __le32 uid;
+ __le32 gid;
+ __le32 permissions;
+ __le16 fileLinkCount;
+ uint8_t recordFormat;
+ uint8_t recordDisplayAttr;
+ __le32 recordLength;
+ __le64 informationLength;
+ __le64 objectSize;
+ __le64 logicalBlocksRecorded;
+ struct timestamp accessTime;
+ struct timestamp modificationTime;
+ struct timestamp createTime;
+ struct timestamp attrTime;
+ __le32 checkpoint;
+ __le32 reserved;
+ struct long_ad extendedAttrICB;
+ struct long_ad streamDirectoryICB;
+ struct regid impIdent;
+ __le64 uniqueID;
+ __le32 lengthExtendedAttr;
+ __le32 lengthAllocDescs;
+ uint8_t extendedAttr[0];
+ uint8_t allocDescs[0];
} __attribute__ ((packed));
#endif /* _ECMA_167_H */
diff --git a/fs/udf/ialloc.c b/fs/udf/ialloc.c
index 31fc84297ddb..76f9b3635a23 100644
--- a/fs/udf/ialloc.c
+++ b/fs/udf/ialloc.c
@@ -54,7 +54,7 @@ void udf_free_inode(struct inode *inode)
}
mutex_unlock(&sbi->s_alloc_mutex);
- udf_free_blocks(sb, NULL, UDF_I(inode)->i_location, 0, 1);
+ udf_free_blocks(sb, NULL, &UDF_I(inode)->i_location, 0, 1);
}
struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
@@ -138,7 +138,7 @@ struct inode *udf_new_inode(struct inode *dir, int mode, int *err)
iinfo->i_location.logicalBlockNum = block;
iinfo->i_location.partitionReferenceNum =
dinfo->i_location.partitionReferenceNum;
- inode->i_ino = udf_get_lb_pblock(sb, iinfo->i_location, 0);
+ inode->i_ino = udf_get_lb_pblock(sb, &iinfo->i_location, 0);
inode->i_blocks = 0;
iinfo->i_lenEAttr = 0;
iinfo->i_lenAlloc = 0;
diff --git a/fs/udf/inode.c b/fs/udf/inode.c
index 30ebde490f7f..e7533f785636 100644
--- a/fs/udf/inode.c
+++ b/fs/udf/inode.c
@@ -55,15 +55,15 @@ static int udf_alloc_i_data(struct inode *inode, size_t size);
static struct buffer_head *inode_getblk(struct inode *, sector_t, int *,
sector_t *, int *);
static int8_t udf_insert_aext(struct inode *, struct extent_position,
- kernel_lb_addr, uint32_t);
+ struct kernel_lb_addr, uint32_t);
static void udf_split_extents(struct inode *, int *, int, int,
- kernel_long_ad[EXTENT_MERGE_SIZE], int *);
+ struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
static void udf_prealloc_extents(struct inode *, int, int,
- kernel_long_ad[EXTENT_MERGE_SIZE], int *);
+ struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
static void udf_merge_extents(struct inode *,
- kernel_long_ad[EXTENT_MERGE_SIZE], int *);
+ struct kernel_long_ad[EXTENT_MERGE_SIZE], int *);
static void udf_update_extents(struct inode *,
- kernel_long_ad[EXTENT_MERGE_SIZE], int, int,
+ struct kernel_long_ad[EXTENT_MERGE_SIZE], int, int,
struct extent_position *);
static int udf_get_block(struct inode *, sector_t, struct buffer_head *, int);
@@ -200,7 +200,7 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
{
int newblock;
struct buffer_head *dbh = NULL;
- kernel_lb_addr eloc;
+ struct kernel_lb_addr eloc;
uint32_t elen;
uint8_t alloctype;
struct extent_position epos;
@@ -281,7 +281,7 @@ struct buffer_head *udf_expand_dir_adinicb(struct inode *inode, int *block,
epos.bh = NULL;
epos.block = iinfo->i_location;
epos.offset = udf_file_entry_alloc_offset(inode);
- udf_add_aext(inode, &epos, eloc, elen, 0);
+ udf_add_aext(inode, &epos, &eloc, elen, 0);
/* UniqueID stuff */
brelse(epos.bh);
@@ -359,12 +359,12 @@ static struct buffer_head *udf_getblk(struct inode *inode, long block,
/* Extend the file by 'blocks' blocks, return the number of extents added */
int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
- kernel_long_ad *last_ext, sector_t blocks)
+ struct kernel_long_ad *last_ext, sector_t blocks)
{
sector_t add;
int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
struct super_block *sb = inode->i_sb;
- kernel_lb_addr prealloc_loc = {};
+ struct kernel_lb_addr prealloc_loc = {};
int prealloc_len = 0;
struct udf_inode_info *iinfo;
@@ -411,11 +411,11 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
}
if (fake) {
- udf_add_aext(inode, last_pos, last_ext->extLocation,
+ udf_add_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1);
count++;
} else
- udf_write_aext(inode, last_pos, last_ext->extLocation,
+ udf_write_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1);
/* Managed to do everything necessary? */
@@ -432,7 +432,7 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
/* Create enough extents to cover the whole hole */
while (blocks > add) {
blocks -= add;
- if (udf_add_aext(inode, last_pos, last_ext->extLocation,
+ if (udf_add_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1) == -1)
return -1;
count++;
@@ -440,7 +440,7 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
if (blocks) {
last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
(blocks << sb->s_blocksize_bits);
- if (udf_add_aext(inode, last_pos, last_ext->extLocation,
+ if (udf_add_aext(inode, last_pos, &last_ext->extLocation,
last_ext->extLength, 1) == -1)
return -1;
count++;
@@ -449,7 +449,7 @@ int udf_extend_file(struct inode *inode, struct extent_position *last_pos,
out:
/* Do we have some preallocated blocks saved? */
if (prealloc_len) {
- if (udf_add_aext(inode, last_pos, prealloc_loc,
+ if (udf_add_aext(inode, last_pos, &prealloc_loc,
prealloc_len, 1) == -1)
return -1;
last_ext->extLocation = prealloc_loc;
@@ -459,9 +459,9 @@ out:
/* last_pos should point to the last written extent... */
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- last_pos->offset -= sizeof(short_ad);
+ last_pos->offset -= sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
- last_pos->offset -= sizeof(long_ad);
+ last_pos->offset -= sizeof(struct long_ad);
else
return -1;
@@ -473,11 +473,11 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
{
static sector_t last_block;
struct buffer_head *result = NULL;
- kernel_long_ad laarr[EXTENT_MERGE_SIZE];
+ struct kernel_long_ad laarr[EXTENT_MERGE_SIZE];
struct extent_position prev_epos, cur_epos, next_epos;
int count = 0, startnum = 0, endnum = 0;
uint32_t elen = 0, tmpelen;
- kernel_lb_addr eloc, tmpeloc;
+ struct kernel_lb_addr eloc, tmpeloc;
int c = 1;
loff_t lbcount = 0, b_off = 0;
uint32_t newblocknum, newblock;
@@ -550,12 +550,12 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
elen = EXT_RECORDED_ALLOCATED |
((elen + inode->i_sb->s_blocksize - 1) &
~(inode->i_sb->s_blocksize - 1));
- etype = udf_write_aext(inode, &cur_epos, eloc, elen, 1);
+ etype = udf_write_aext(inode, &cur_epos, &eloc, elen, 1);
}
brelse(prev_epos.bh);
brelse(cur_epos.bh);
brelse(next_epos.bh);
- newblock = udf_get_lb_pblock(inode->i_sb, eloc, offset);
+ newblock = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
*phys = newblock;
return NULL;
}
@@ -572,7 +572,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
} else {
/* Create a fake extent when there's not one */
memset(&laarr[0].extLocation, 0x00,
- sizeof(kernel_lb_addr));
+ sizeof(struct kernel_lb_addr));
laarr[0].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED;
/* Will udf_extend_file() create real extent from
a fake one? */
@@ -602,7 +602,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
laarr[c].extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
inode->i_sb->s_blocksize;
memset(&laarr[c].extLocation, 0x00,
- sizeof(kernel_lb_addr));
+ sizeof(struct kernel_lb_addr));
count++;
endnum++;
}
@@ -699,7 +699,7 @@ static struct buffer_head *inode_getblk(struct inode *inode, sector_t block,
static void udf_split_extents(struct inode *inode, int *c, int offset,
int newblocknum,
- kernel_long_ad laarr[EXTENT_MERGE_SIZE],
+ struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
int *endnum)
{
unsigned long blocksize = inode->i_sb->s_blocksize;
@@ -726,7 +726,7 @@ static void udf_split_extents(struct inode *inode, int *c, int offset,
if (offset) {
if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
udf_free_blocks(inode->i_sb, inode,
- laarr[curr].extLocation,
+ &laarr[curr].extLocation,
0, offset);
laarr[curr].extLength =
EXT_NOT_RECORDED_NOT_ALLOCATED |
@@ -763,7 +763,7 @@ static void udf_split_extents(struct inode *inode, int *c, int offset,
}
static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
- kernel_long_ad laarr[EXTENT_MERGE_SIZE],
+ struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
int *endnum)
{
int start, length = 0, currlength = 0, i;
@@ -817,7 +817,7 @@ static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
inode->i_sb->s_blocksize_bits);
else {
memmove(&laarr[c + 2], &laarr[c + 1],
- sizeof(long_ad) * (*endnum - (c + 1)));
+ sizeof(struct long_ad) * (*endnum - (c + 1)));
(*endnum)++;
laarr[c + 1].extLocation.logicalBlockNum = next;
laarr[c + 1].extLocation.partitionReferenceNum =
@@ -846,7 +846,7 @@ static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
if (*endnum > (i + 1))
memmove(&laarr[i],
&laarr[i + 1],
- sizeof(long_ad) *
+ sizeof(struct long_ad) *
(*endnum - (i + 1)));
i--;
(*endnum)--;
@@ -859,7 +859,7 @@ static void udf_prealloc_extents(struct inode *inode, int c, int lastblock,
}
static void udf_merge_extents(struct inode *inode,
- kernel_long_ad laarr[EXTENT_MERGE_SIZE],
+ struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
int *endnum)
{
int i;
@@ -867,8 +867,8 @@ static void udf_merge_extents(struct inode *inode,
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
for (i = 0; i < (*endnum - 1); i++) {
- kernel_long_ad *li /*l[i]*/ = &laarr[i];
- kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1];
+ struct kernel_long_ad *li /*l[i]*/ = &laarr[i];
+ struct kernel_long_ad *lip1 /*l[i plus 1]*/ = &laarr[i + 1];
if (((li->extLength >> 30) == (lip1->extLength >> 30)) &&
(((li->extLength >> 30) ==
@@ -902,7 +902,7 @@ static void udf_merge_extents(struct inode *inode,
blocksize - 1) & ~(blocksize - 1));
if (*endnum > (i + 2))
memmove(&laarr[i + 1], &laarr[i + 2],
- sizeof(long_ad) *
+ sizeof(struct long_ad) *
(*endnum - (i + 2)));
i--;
(*endnum)--;
@@ -911,7 +911,7 @@ static void udf_merge_extents(struct inode *inode,
(EXT_NOT_RECORDED_ALLOCATED >> 30)) &&
((lip1->extLength >> 30) ==
(EXT_NOT_RECORDED_NOT_ALLOCATED >> 30))) {
- udf_free_blocks(inode->i_sb, inode, li->extLocation, 0,
+ udf_free_blocks(inode->i_sb, inode, &li->extLocation, 0,
((li->extLength &
UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) >> blocksize_bits);
@@ -937,7 +937,7 @@ static void udf_merge_extents(struct inode *inode,
blocksize - 1) & ~(blocksize - 1));
if (*endnum > (i + 2))
memmove(&laarr[i + 1], &laarr[i + 2],
- sizeof(long_ad) *
+ sizeof(struct long_ad) *
(*endnum - (i + 2)));
i--;
(*endnum)--;
@@ -945,7 +945,7 @@ static void udf_merge_extents(struct inode *inode,
} else if ((li->extLength >> 30) ==
(EXT_NOT_RECORDED_ALLOCATED >> 30)) {
udf_free_blocks(inode->i_sb, inode,
- li->extLocation, 0,
+ &li->extLocation, 0,
((li->extLength &
UDF_EXTENT_LENGTH_MASK) +
blocksize - 1) >> blocksize_bits);
@@ -959,12 +959,12 @@ static void udf_merge_extents(struct inode *inode,
}
static void udf_update_extents(struct inode *inode,
- kernel_long_ad laarr[EXTENT_MERGE_SIZE],
+ struct kernel_long_ad laarr[EXTENT_MERGE_SIZE],
int startnum, int endnum,
struct extent_position *epos)
{
int start = 0, i;
- kernel_lb_addr tmploc;
+ struct kernel_lb_addr tmploc;
uint32_t tmplen;
if (startnum > endnum) {
@@ -983,7 +983,7 @@ static void udf_update_extents(struct inode *inode,
for (i = start; i < endnum; i++) {
udf_next_aext(inode, epos, &tmploc, &tmplen, 0);
- udf_write_aext(inode, epos, laarr[i].extLocation,
+ udf_write_aext(inode, epos, &laarr[i].extLocation,
laarr[i].extLength, 1);
}
}
@@ -1076,7 +1076,7 @@ static void __udf_read_inode(struct inode *inode)
* i_nlink = 1
* i_op = NULL;
*/
- bh = udf_read_ptagged(inode->i_sb, iinfo->i_location, 0, &ident);
+ bh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 0, &ident);
if (!bh) {
printk(KERN_ERR "udf: udf_read_inode(ino %ld) failed !bh\n",
inode->i_ino);
@@ -1098,24 +1098,24 @@ static void __udf_read_inode(struct inode *inode)
if (fe->icbTag.strategyType == cpu_to_le16(4096)) {
struct buffer_head *ibh;
- ibh = udf_read_ptagged(inode->i_sb, iinfo->i_location, 1,
+ ibh = udf_read_ptagged(inode->i_sb, &iinfo->i_location, 1,
&ident);
if (ident == TAG_IDENT_IE && ibh) {
struct buffer_head *nbh = NULL;
- kernel_lb_addr loc;
+ struct kernel_lb_addr loc;
struct indirectEntry *ie;
ie = (struct indirectEntry *)ibh->b_data;
loc = lelb_to_cpu(ie->indirectICB.extLocation);
if (ie->indirectICB.extLength &&
- (nbh = udf_read_ptagged(inode->i_sb, loc, 0,
+ (nbh = udf_read_ptagged(inode->i_sb, &loc, 0,
&ident))) {
if (ident == TAG_IDENT_FE ||
ident == TAG_IDENT_EFE) {
memcpy(&iinfo->i_location,
&loc,
- sizeof(kernel_lb_addr));
+ sizeof(struct kernel_lb_addr));
brelse(bh);
brelse(ibh);
brelse(nbh);
@@ -1222,8 +1222,15 @@ static void udf_fill_inode(struct inode *inode, struct buffer_head *bh)
inode->i_size = le64_to_cpu(fe->informationLength);
iinfo->i_lenExtents = inode->i_size;
- inode->i_mode = udf_convert_permissions(fe);
- inode->i_mode &= ~UDF_SB(inode->i_sb)->s_umask;
+ if (fe->icbTag.fileType != ICBTAG_FILE_TYPE_DIRECTORY &&
+ sbi->s_fmode != UDF_INVALID_MODE)
+ inode->i_mode = sbi->s_fmode;
+ else if (fe->icbTag.fileType == ICBTAG_FILE_TYPE_DIRECTORY &&
+ sbi->s_dmode != UDF_INVALID_MODE)
+ inode->i_mode = sbi->s_dmode;
+ else
+ inode->i_mode = udf_convert_permissions(fe);
+ inode->i_mode &= ~sbi->s_umask;
if (iinfo->i_efe == 0) {
inode->i_blocks = le64_to_cpu(fe->logicalBlocksRecorded) <<
@@ -1396,7 +1403,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
bh = udf_tread(inode->i_sb,
udf_get_lb_pblock(inode->i_sb,
- iinfo->i_location, 0));
+ &iinfo->i_location, 0));
if (!bh) {
udf_debug("bread failure\n");
return -EIO;
@@ -1416,13 +1423,13 @@ static int udf_update_inode(struct inode *inode, int do_sync)
iinfo->i_ext.i_data, inode->i_sb->s_blocksize -
sizeof(struct unallocSpaceEntry));
crclen = sizeof(struct unallocSpaceEntry) +
- iinfo->i_lenAlloc - sizeof(tag);
+ iinfo->i_lenAlloc - sizeof(struct tag);
use->descTag.tagLocation = cpu_to_le32(
iinfo->i_location.
logicalBlockNum);
use->descTag.descCRCLength = cpu_to_le16(crclen);
use->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)use +
- sizeof(tag),
+ sizeof(struct tag),
crclen));
use->descTag.tagChecksum = udf_tag_checksum(&use->descTag);
@@ -1459,23 +1466,23 @@ static int udf_update_inode(struct inode *inode, int do_sync)
fe->informationLength = cpu_to_le64(inode->i_size);
if (S_ISCHR(inode->i_mode) || S_ISBLK(inode->i_mode)) {
- regid *eid;
+ struct regid *eid;
struct deviceSpec *dsea =
(struct deviceSpec *)udf_get_extendedattr(inode, 12, 1);
if (!dsea) {
dsea = (struct deviceSpec *)
udf_add_extendedattr(inode,
sizeof(struct deviceSpec) +
- sizeof(regid), 12, 0x3);
+ sizeof(struct regid), 12, 0x3);
dsea->attrType = cpu_to_le32(12);
dsea->attrSubtype = 1;
dsea->attrLength = cpu_to_le32(
sizeof(struct deviceSpec) +
- sizeof(regid));
- dsea->impUseLength = cpu_to_le32(sizeof(regid));
+ sizeof(struct regid));
+ dsea->impUseLength = cpu_to_le32(sizeof(struct regid));
}
- eid = (regid *)dsea->impUse;
- memset(eid, 0, sizeof(regid));
+ eid = (struct regid *)dsea->impUse;
+ memset(eid, 0, sizeof(struct regid));
strcpy(eid->ident, UDF_ID_DEVELOPER);
eid->identSuffix[0] = UDF_OS_CLASS_UNIX;
eid->identSuffix[1] = UDF_OS_ID_LINUX;
@@ -1494,7 +1501,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
udf_time_to_disk_stamp(&fe->accessTime, inode->i_atime);
udf_time_to_disk_stamp(&fe->modificationTime, inode->i_mtime);
udf_time_to_disk_stamp(&fe->attrTime, inode->i_ctime);
- memset(&(fe->impIdent), 0, sizeof(regid));
+ memset(&(fe->impIdent), 0, sizeof(struct regid));
strcpy(fe->impIdent.ident, UDF_ID_DEVELOPER);
fe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
fe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
@@ -1533,7 +1540,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
udf_time_to_disk_stamp(&efe->createTime, iinfo->i_crtime);
udf_time_to_disk_stamp(&efe->attrTime, inode->i_ctime);
- memset(&(efe->impIdent), 0, sizeof(regid));
+ memset(&(efe->impIdent), 0, sizeof(struct regid));
strcpy(efe->impIdent.ident, UDF_ID_DEVELOPER);
efe->impIdent.identSuffix[0] = UDF_OS_CLASS_UNIX;
efe->impIdent.identSuffix[1] = UDF_OS_ID_LINUX;
@@ -1584,9 +1591,9 @@ static int udf_update_inode(struct inode *inode, int do_sync)
fe->descTag.tagLocation = cpu_to_le32(
iinfo->i_location.logicalBlockNum);
crclen += iinfo->i_lenEAttr + iinfo->i_lenAlloc -
- sizeof(tag);
+ sizeof(struct tag);
fe->descTag.descCRCLength = cpu_to_le16(crclen);
- fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(tag),
+ fe->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)fe + sizeof(struct tag),
crclen));
fe->descTag.tagChecksum = udf_tag_checksum(&fe->descTag);
@@ -1606,7 +1613,7 @@ static int udf_update_inode(struct inode *inode, int do_sync)
return err;
}
-struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino)
+struct inode *udf_iget(struct super_block *sb, struct kernel_lb_addr *ino)
{
unsigned long block = udf_get_lb_pblock(sb, ino, 0);
struct inode *inode = iget_locked(sb, block);
@@ -1615,7 +1622,7 @@ struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino)
return NULL;
if (inode->i_state & I_NEW) {
- memcpy(&UDF_I(inode)->i_location, &ino, sizeof(kernel_lb_addr));
+ memcpy(&UDF_I(inode)->i_location, ino, sizeof(struct kernel_lb_addr));
__udf_read_inode(inode);
unlock_new_inode(inode);
}
@@ -1623,10 +1630,10 @@ struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino)
if (is_bad_inode(inode))
goto out_iput;
- if (ino.logicalBlockNum >= UDF_SB(sb)->
- s_partmaps[ino.partitionReferenceNum].s_partition_len) {
+ if (ino->logicalBlockNum >= UDF_SB(sb)->
+ s_partmaps[ino->partitionReferenceNum].s_partition_len) {
udf_debug("block=%d, partition=%d out of range\n",
- ino.logicalBlockNum, ino.partitionReferenceNum);
+ ino->logicalBlockNum, ino->partitionReferenceNum);
make_bad_inode(inode);
goto out_iput;
}
@@ -1639,11 +1646,11 @@ struct inode *udf_iget(struct super_block *sb, kernel_lb_addr ino)
}
int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
- kernel_lb_addr eloc, uint32_t elen, int inc)
+ struct kernel_lb_addr *eloc, uint32_t elen, int inc)
{
int adsize;
- short_ad *sad = NULL;
- long_ad *lad = NULL;
+ struct short_ad *sad = NULL;
+ struct long_ad *lad = NULL;
struct allocExtDesc *aed;
int8_t etype;
uint8_t *ptr;
@@ -1657,9 +1664,9 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
ptr = epos->bh->b_data + epos->offset;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- adsize = sizeof(short_ad);
+ adsize = sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
- adsize = sizeof(long_ad);
+ adsize = sizeof(struct long_ad);
else
return -1;
@@ -1667,7 +1674,7 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
char *sptr, *dptr;
struct buffer_head *nbh;
int err, loffset;
- kernel_lb_addr obloc = epos->block;
+ struct kernel_lb_addr obloc = epos->block;
epos->block.logicalBlockNum = udf_new_block(inode->i_sb, NULL,
obloc.partitionReferenceNum,
@@ -1675,7 +1682,7 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
if (!epos->block.logicalBlockNum)
return -1;
nbh = udf_tgetblk(inode->i_sb, udf_get_lb_pblock(inode->i_sb,
- epos->block,
+ &epos->block,
0));
if (!nbh)
return -1;
@@ -1712,20 +1719,20 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
}
if (UDF_SB(inode->i_sb)->s_udfrev >= 0x0200)
udf_new_tag(nbh->b_data, TAG_IDENT_AED, 3, 1,
- epos->block.logicalBlockNum, sizeof(tag));
+ epos->block.logicalBlockNum, sizeof(struct tag));
else
udf_new_tag(nbh->b_data, TAG_IDENT_AED, 2, 1,
- epos->block.logicalBlockNum, sizeof(tag));
+ epos->block.logicalBlockNum, sizeof(struct tag));
switch (iinfo->i_alloc_type) {
case ICBTAG_FLAG_AD_SHORT:
- sad = (short_ad *)sptr;
+ sad = (struct short_ad *)sptr;
sad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
inode->i_sb->s_blocksize);
sad->extPosition =
cpu_to_le32(epos->block.logicalBlockNum);
break;
case ICBTAG_FLAG_AD_LONG:
- lad = (long_ad *)sptr;
+ lad = (struct long_ad *)sptr;
lad->extLength = cpu_to_le32(EXT_NEXT_EXTENT_ALLOCDECS |
inode->i_sb->s_blocksize);
lad->extLocation = cpu_to_lelb(epos->block);
@@ -1769,12 +1776,12 @@ int8_t udf_add_aext(struct inode *inode, struct extent_position *epos,
}
int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
- kernel_lb_addr eloc, uint32_t elen, int inc)
+ struct kernel_lb_addr *eloc, uint32_t elen, int inc)
{
int adsize;
uint8_t *ptr;
- short_ad *sad;
- long_ad *lad;
+ struct short_ad *sad;
+ struct long_ad *lad;
struct udf_inode_info *iinfo = UDF_I(inode);
if (!epos->bh)
@@ -1786,17 +1793,17 @@ int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
switch (iinfo->i_alloc_type) {
case ICBTAG_FLAG_AD_SHORT:
- sad = (short_ad *)ptr;
+ sad = (struct short_ad *)ptr;
sad->extLength = cpu_to_le32(elen);
- sad->extPosition = cpu_to_le32(eloc.logicalBlockNum);
- adsize = sizeof(short_ad);
+ sad->extPosition = cpu_to_le32(eloc->logicalBlockNum);
+ adsize = sizeof(struct short_ad);
break;
case ICBTAG_FLAG_AD_LONG:
- lad = (long_ad *)ptr;
+ lad = (struct long_ad *)ptr;
lad->extLength = cpu_to_le32(elen);
- lad->extLocation = cpu_to_lelb(eloc);
+ lad->extLocation = cpu_to_lelb(*eloc);
memset(lad->impUse, 0x00, sizeof(lad->impUse));
- adsize = sizeof(long_ad);
+ adsize = sizeof(struct long_ad);
break;
default:
return -1;
@@ -1823,7 +1830,7 @@ int8_t udf_write_aext(struct inode *inode, struct extent_position *epos,
}
int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
- kernel_lb_addr *eloc, uint32_t *elen, int inc)
+ struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
{
int8_t etype;
@@ -1833,7 +1840,7 @@ int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
epos->block = *eloc;
epos->offset = sizeof(struct allocExtDesc);
brelse(epos->bh);
- block = udf_get_lb_pblock(inode->i_sb, epos->block, 0);
+ block = udf_get_lb_pblock(inode->i_sb, &epos->block, 0);
epos->bh = udf_tread(inode->i_sb, block);
if (!epos->bh) {
udf_debug("reading block %d failed!\n", block);
@@ -1845,13 +1852,13 @@ int8_t udf_next_aext(struct inode *inode, struct extent_position *epos,
}
int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
- kernel_lb_addr *eloc, uint32_t *elen, int inc)
+ struct kernel_lb_addr *eloc, uint32_t *elen, int inc)
{
int alen;
int8_t etype;
uint8_t *ptr;
- short_ad *sad;
- long_ad *lad;
+ struct short_ad *sad;
+ struct long_ad *lad;
struct udf_inode_info *iinfo = UDF_I(inode);
if (!epos->bh) {
@@ -1900,9 +1907,9 @@ int8_t udf_current_aext(struct inode *inode, struct extent_position *epos,
}
static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
- kernel_lb_addr neloc, uint32_t nelen)
+ struct kernel_lb_addr neloc, uint32_t nelen)
{
- kernel_lb_addr oeloc;
+ struct kernel_lb_addr oeloc;
uint32_t oelen;
int8_t etype;
@@ -1910,18 +1917,18 @@ static int8_t udf_insert_aext(struct inode *inode, struct extent_position epos,
get_bh(epos.bh);
while ((etype = udf_next_aext(inode, &epos, &oeloc, &oelen, 0)) != -1) {
- udf_write_aext(inode, &epos, neloc, nelen, 1);
+ udf_write_aext(inode, &epos, &neloc, nelen, 1);
neloc = oeloc;
nelen = (etype << 30) | oelen;
}
- udf_add_aext(inode, &epos, neloc, nelen, 1);
+ udf_add_aext(inode, &epos, &neloc, nelen, 1);
brelse(epos.bh);
return (nelen >> 30);
}
int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
- kernel_lb_addr eloc, uint32_t elen)
+ struct kernel_lb_addr eloc, uint32_t elen)
{
struct extent_position oepos;
int adsize;
@@ -1936,9 +1943,9 @@ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
iinfo = UDF_I(inode);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- adsize = sizeof(short_ad);
+ adsize = sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
- adsize = sizeof(long_ad);
+ adsize = sizeof(struct long_ad);
else
adsize = 0;
@@ -1947,7 +1954,7 @@ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
return -1;
while ((etype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
- udf_write_aext(inode, &oepos, eloc, (etype << 30) | elen, 1);
+ udf_write_aext(inode, &oepos, &eloc, (etype << 30) | elen, 1);
if (oepos.bh != epos.bh) {
oepos.block = epos.block;
brelse(oepos.bh);
@@ -1956,13 +1963,13 @@ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
oepos.offset = epos.offset - adsize;
}
}
- memset(&eloc, 0x00, sizeof(kernel_lb_addr));
+ memset(&eloc, 0x00, sizeof(struct kernel_lb_addr));
elen = 0;
if (epos.bh != oepos.bh) {
- udf_free_blocks(inode->i_sb, inode, epos.block, 0, 1);
- udf_write_aext(inode, &oepos, eloc, elen, 1);
- udf_write_aext(inode, &oepos, eloc, elen, 1);
+ udf_free_blocks(inode->i_sb, inode, &epos.block, 0, 1);
+ udf_write_aext(inode, &oepos, &eloc, elen, 1);
+ udf_write_aext(inode, &oepos, &eloc, elen, 1);
if (!oepos.bh) {
iinfo->i_lenAlloc -= (adsize * 2);
mark_inode_dirty(inode);
@@ -1979,7 +1986,7 @@ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
mark_buffer_dirty_inode(oepos.bh, inode);
}
} else {
- udf_write_aext(inode, &oepos, eloc, elen, 1);
+ udf_write_aext(inode, &oepos, &eloc, elen, 1);
if (!oepos.bh) {
iinfo->i_lenAlloc -= adsize;
mark_inode_dirty(inode);
@@ -2004,7 +2011,7 @@ int8_t udf_delete_aext(struct inode *inode, struct extent_position epos,
}
int8_t inode_bmap(struct inode *inode, sector_t block,
- struct extent_position *pos, kernel_lb_addr *eloc,
+ struct extent_position *pos, struct kernel_lb_addr *eloc,
uint32_t *elen, sector_t *offset)
{
unsigned char blocksize_bits = inode->i_sb->s_blocksize_bits;
@@ -2036,7 +2043,7 @@ int8_t inode_bmap(struct inode *inode, sector_t block,
long udf_block_map(struct inode *inode, sector_t block)
{
- kernel_lb_addr eloc;
+ struct kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
struct extent_position epos = {};
@@ -2046,7 +2053,7 @@ long udf_block_map(struct inode *inode, sector_t block)
if (inode_bmap(inode, block, &epos, &eloc, &elen, &offset) ==
(EXT_RECORDED_ALLOCATED >> 30))
- ret = udf_get_lb_pblock(inode->i_sb, eloc, offset);
+ ret = udf_get_lb_pblock(inode->i_sb, &eloc, offset);
else
ret = 0;
diff --git a/fs/udf/misc.c b/fs/udf/misc.c
index 84bf0fd4a4f1..9215700c00a4 100644
--- a/fs/udf/misc.c
+++ b/fs/udf/misc.c
@@ -134,10 +134,10 @@ struct genericFormat *udf_add_extendedattr(struct inode *inode, uint32_t size,
}
}
/* rewrite CRC + checksum of eahd */
- crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(tag);
+ crclen = sizeof(struct extendedAttrHeaderDesc) - sizeof(struct tag);
eahd->descTag.descCRCLength = cpu_to_le16(crclen);
eahd->descTag.descCRC = cpu_to_le16(crc_itu_t(0, (char *)eahd +
- sizeof(tag), crclen));
+ sizeof(struct tag), crclen));
eahd->descTag.tagChecksum = udf_tag_checksum(&eahd->descTag);
iinfo->i_lenEAttr += size;
return (struct genericFormat *)&ea[offset];
@@ -202,7 +202,7 @@ struct genericFormat *udf_get_extendedattr(struct inode *inode, uint32_t type,
struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
uint32_t location, uint16_t *ident)
{
- tag *tag_p;
+ struct tag *tag_p;
struct buffer_head *bh = NULL;
/* Read the block */
@@ -216,7 +216,7 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
return NULL;
}
- tag_p = (tag *)(bh->b_data);
+ tag_p = (struct tag *)(bh->b_data);
*ident = le16_to_cpu(tag_p->tagIdent);
@@ -241,9 +241,9 @@ struct buffer_head *udf_read_tagged(struct super_block *sb, uint32_t block,
}
/* Verify the descriptor CRC */
- if (le16_to_cpu(tag_p->descCRCLength) + sizeof(tag) > sb->s_blocksize ||
+ if (le16_to_cpu(tag_p->descCRCLength) + sizeof(struct tag) > sb->s_blocksize ||
le16_to_cpu(tag_p->descCRC) == crc_itu_t(0,
- bh->b_data + sizeof(tag),
+ bh->b_data + sizeof(struct tag),
le16_to_cpu(tag_p->descCRCLength)))
return bh;
@@ -255,27 +255,28 @@ error_out:
return NULL;
}
-struct buffer_head *udf_read_ptagged(struct super_block *sb, kernel_lb_addr loc,
+struct buffer_head *udf_read_ptagged(struct super_block *sb,
+ struct kernel_lb_addr *loc,
uint32_t offset, uint16_t *ident)
{
return udf_read_tagged(sb, udf_get_lb_pblock(sb, loc, offset),
- loc.logicalBlockNum + offset, ident);
+ loc->logicalBlockNum + offset, ident);
}
void udf_update_tag(char *data, int length)
{
- tag *tptr = (tag *)data;
- length -= sizeof(tag);
+ struct tag *tptr = (struct tag *)data;
+ length -= sizeof(struct tag);
tptr->descCRCLength = cpu_to_le16(length);
- tptr->descCRC = cpu_to_le16(crc_itu_t(0, data + sizeof(tag), length));
+ tptr->descCRC = cpu_to_le16(crc_itu_t(0, data + sizeof(struct tag), length));
tptr->tagChecksum = udf_tag_checksum(tptr);
}
void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
uint32_t loc, int length)
{
- tag *tptr = (tag *)data;
+ struct tag *tptr = (struct tag *)data;
tptr->tagIdent = cpu_to_le16(ident);
tptr->descVersion = cpu_to_le16(version);
tptr->tagSerialNum = cpu_to_le16(snum);
@@ -283,12 +284,12 @@ void udf_new_tag(char *data, uint16_t ident, uint16_t version, uint16_t snum,
udf_update_tag(data, length);
}
-u8 udf_tag_checksum(const tag *t)
+u8 udf_tag_checksum(const struct tag *t)
{
u8 *data = (u8 *)t;
u8 checksum = 0;
int i;
- for (i = 0; i < sizeof(tag); ++i)
+ for (i = 0; i < sizeof(struct tag); ++i)
if (i != 4) /* position of checksum */
checksum += data[i];
return checksum;
diff --git a/fs/udf/namei.c b/fs/udf/namei.c
index f84bfaa8d941..6a29fa34c478 100644
--- a/fs/udf/namei.c
+++ b/fs/udf/namei.c
@@ -47,7 +47,7 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
struct fileIdentDesc *sfi, struct udf_fileident_bh *fibh,
uint8_t *impuse, uint8_t *fileident)
{
- uint16_t crclen = fibh->eoffset - fibh->soffset - sizeof(tag);
+ uint16_t crclen = fibh->eoffset - fibh->soffset - sizeof(struct tag);
uint16_t crc;
int offset;
uint16_t liu = le16_to_cpu(cfi->lengthOfImpUse);
@@ -99,18 +99,18 @@ int udf_write_fi(struct inode *inode, struct fileIdentDesc *cfi,
memset(fibh->ebh->b_data, 0x00, padlen + offset);
}
- crc = crc_itu_t(0, (uint8_t *)cfi + sizeof(tag),
- sizeof(struct fileIdentDesc) - sizeof(tag));
+ crc = crc_itu_t(0, (uint8_t *)cfi + sizeof(struct tag),
+ sizeof(struct fileIdentDesc) - sizeof(struct tag));
if (fibh->sbh == fibh->ebh) {
crc = crc_itu_t(crc, (uint8_t *)sfi->impUse,
- crclen + sizeof(tag) -
+ crclen + sizeof(struct tag) -
sizeof(struct fileIdentDesc));
} else if (sizeof(struct fileIdentDesc) >= -fibh->soffset) {
crc = crc_itu_t(crc, fibh->ebh->b_data +
sizeof(struct fileIdentDesc) +
fibh->soffset,
- crclen + sizeof(tag) -
+ crclen + sizeof(struct tag) -
sizeof(struct fileIdentDesc));
} else {
crc = crc_itu_t(crc, (uint8_t *)sfi->impUse,
@@ -154,7 +154,7 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
uint8_t lfi;
uint16_t liu;
loff_t size;
- kernel_lb_addr eloc;
+ struct kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
struct extent_position epos = {};
@@ -171,12 +171,12 @@ static struct fileIdentDesc *udf_find_entry(struct inode *dir,
if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
&eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30))
goto out_err;
- block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
+ block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- epos.offset -= sizeof(short_ad);
+ epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
- epos.offset -= sizeof(long_ad);
+ epos.offset -= sizeof(struct long_ad);
} else
offset = 0;
@@ -268,7 +268,7 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
#ifdef UDF_RECOVERY
/* temporary shorthand for specifying files by inode number */
if (!strncmp(dentry->d_name.name, ".B=", 3)) {
- kernel_lb_addr lb = {
+ struct kernel_lb_addr lb = {
.logicalBlockNum = 0,
.partitionReferenceNum =
simple_strtoul(dentry->d_name.name + 3,
@@ -283,11 +283,14 @@ static struct dentry *udf_lookup(struct inode *dir, struct dentry *dentry,
#endif /* UDF_RECOVERY */
if (udf_find_entry(dir, &dentry->d_name, &fibh, &cfi)) {
+ struct kernel_lb_addr loc;
+
if (fibh.sbh != fibh.ebh)
brelse(fibh.ebh);
brelse(fibh.sbh);
- inode = udf_iget(dir->i_sb, lelb_to_cpu(cfi.icb.extLocation));
+ loc = lelb_to_cpu(cfi.icb.extLocation);
+ inode = udf_iget(dir->i_sb, &loc);
if (!inode) {
unlock_kernel();
return ERR_PTR(-EACCES);
@@ -313,7 +316,7 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
uint8_t lfi;
uint16_t liu;
int block;
- kernel_lb_addr eloc;
+ struct kernel_lb_addr eloc;
uint32_t elen = 0;
sector_t offset;
struct extent_position epos = {};
@@ -351,16 +354,16 @@ static struct fileIdentDesc *udf_add_entry(struct inode *dir,
if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits, &epos,
&eloc, &elen, &offset) != (EXT_RECORDED_ALLOCATED >> 30)) {
block = udf_get_lb_pblock(dir->i_sb,
- dinfo->i_location, 0);
+ &dinfo->i_location, 0);
fibh->soffset = fibh->eoffset = sb->s_blocksize;
goto add;
}
- block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
+ block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- epos.offset -= sizeof(short_ad);
+ epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
- epos.offset -= sizeof(long_ad);
+ epos.offset -= sizeof(struct long_ad);
} else
offset = 0;
@@ -409,10 +412,10 @@ add:
if (dinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB && elen) {
elen = (elen + sb->s_blocksize - 1) & ~(sb->s_blocksize - 1);
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- epos.offset -= sizeof(short_ad);
+ epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
- epos.offset -= sizeof(long_ad);
- udf_write_aext(dir, &epos, eloc, elen, 1);
+ epos.offset -= sizeof(struct long_ad);
+ udf_write_aext(dir, &epos, &eloc, elen, 1);
}
f_pos += nfidlen;
@@ -494,10 +497,10 @@ add:
memset(cfi, 0, sizeof(struct fileIdentDesc));
if (UDF_SB(sb)->s_udfrev >= 0x0200)
udf_new_tag((char *)cfi, TAG_IDENT_FID, 3, 1, block,
- sizeof(tag));
+ sizeof(struct tag));
else
udf_new_tag((char *)cfi, TAG_IDENT_FID, 2, 1, block,
- sizeof(tag));
+ sizeof(struct tag));
cfi->fileVersionNum = cpu_to_le16(1);
cfi->lengthFileIdent = namelen;
cfi->lengthOfImpUse = cpu_to_le16(0);
@@ -530,7 +533,7 @@ static int udf_delete_entry(struct inode *inode, struct fileIdentDesc *fi,
cfi->fileCharacteristics |= FID_FILE_CHAR_DELETED;
if (UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT))
- memset(&(cfi->icb), 0x00, sizeof(long_ad));
+ memset(&(cfi->icb), 0x00, sizeof(struct long_ad));
return udf_write_fi(inode, cfi, fi, fibh, NULL, NULL);
}
@@ -710,7 +713,7 @@ static int empty_dir(struct inode *dir)
loff_t f_pos;
loff_t size = udf_ext0_offset(dir) + dir->i_size;
int block;
- kernel_lb_addr eloc;
+ struct kernel_lb_addr eloc;
uint32_t elen;
sector_t offset;
struct extent_position epos = {};
@@ -724,12 +727,12 @@ static int empty_dir(struct inode *dir)
else if (inode_bmap(dir, f_pos >> dir->i_sb->s_blocksize_bits,
&epos, &eloc, &elen, &offset) ==
(EXT_RECORDED_ALLOCATED >> 30)) {
- block = udf_get_lb_pblock(dir->i_sb, eloc, offset);
+ block = udf_get_lb_pblock(dir->i_sb, &eloc, offset);
if ((++offset << dir->i_sb->s_blocksize_bits) < elen) {
if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- epos.offset -= sizeof(short_ad);
+ epos.offset -= sizeof(struct short_ad);
else if (dinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
- epos.offset -= sizeof(long_ad);
+ epos.offset -= sizeof(struct long_ad);
} else
offset = 0;
@@ -778,7 +781,7 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
struct inode *inode = dentry->d_inode;
struct udf_fileident_bh fibh;
struct fileIdentDesc *fi, cfi;
- kernel_lb_addr tloc;
+ struct kernel_lb_addr tloc;
retval = -ENOENT;
lock_kernel();
@@ -788,7 +791,7 @@ static int udf_rmdir(struct inode *dir, struct dentry *dentry)
retval = -EIO;
tloc = lelb_to_cpu(cfi.icb.extLocation);
- if (udf_get_lb_pblock(dir->i_sb, tloc, 0) != inode->i_ino)
+ if (udf_get_lb_pblock(dir->i_sb, &tloc, 0) != inode->i_ino)
goto end_rmdir;
retval = -ENOTEMPTY;
if (!empty_dir(inode))
@@ -824,7 +827,7 @@ static int udf_unlink(struct inode *dir, struct dentry *dentry)
struct udf_fileident_bh fibh;
struct fileIdentDesc *fi;
struct fileIdentDesc cfi;
- kernel_lb_addr tloc;
+ struct kernel_lb_addr tloc;
retval = -ENOENT;
lock_kernel();
@@ -834,7 +837,7 @@ static int udf_unlink(struct inode *dir, struct dentry *dentry)
retval = -EIO;
tloc = lelb_to_cpu(cfi.icb.extLocation);
- if (udf_get_lb_pblock(dir->i_sb, tloc, 0) != inode->i_ino)
+ if (udf_get_lb_pblock(dir->i_sb, &tloc, 0) != inode->i_ino)
goto end_unlink;
if (!inode->i_nlink) {
@@ -897,7 +900,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
inode->i_op = &page_symlink_inode_operations;
if (iinfo->i_alloc_type != ICBTAG_FLAG_AD_IN_ICB) {
- kernel_lb_addr eloc;
+ struct kernel_lb_addr eloc;
uint32_t bsize;
block = udf_new_block(inode->i_sb, inode,
@@ -913,7 +916,7 @@ static int udf_symlink(struct inode *dir, struct dentry *dentry,
iinfo->i_location.partitionReferenceNum;
bsize = inode->i_sb->s_blocksize;
iinfo->i_lenExtents = bsize;
- udf_add_aext(inode, &epos, eloc, bsize, 0);
+ udf_add_aext(inode, &epos, &eloc, bsize, 0);
brelse(epos.bh);
block = udf_get_pblock(inode->i_sb, block,
@@ -1108,7 +1111,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
struct fileIdentDesc ocfi, ncfi;
struct buffer_head *dir_bh = NULL;
int retval = -ENOENT;
- kernel_lb_addr tloc;
+ struct kernel_lb_addr tloc;
struct udf_inode_info *old_iinfo = UDF_I(old_inode);
lock_kernel();
@@ -1119,7 +1122,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
brelse(ofibh.sbh);
}
tloc = lelb_to_cpu(ocfi.icb.extLocation);
- if (!ofi || udf_get_lb_pblock(old_dir->i_sb, tloc, 0)
+ if (!ofi || udf_get_lb_pblock(old_dir->i_sb, &tloc, 0)
!= old_inode->i_ino)
goto end_rename;
@@ -1158,7 +1161,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
if (!dir_fi)
goto end_rename;
tloc = lelb_to_cpu(dir_fi->icb.extLocation);
- if (udf_get_lb_pblock(old_inode->i_sb, tloc, 0) !=
+ if (udf_get_lb_pblock(old_inode->i_sb, &tloc, 0) !=
old_dir->i_ino)
goto end_rename;
@@ -1187,7 +1190,7 @@ static int udf_rename(struct inode *old_dir, struct dentry *old_dentry,
*/
ncfi.fileVersionNum = ocfi.fileVersionNum;
ncfi.fileCharacteristics = ocfi.fileCharacteristics;
- memcpy(&(ncfi.icb), &(ocfi.icb), sizeof(long_ad));
+ memcpy(&(ncfi.icb), &(ocfi.icb), sizeof(struct long_ad));
udf_write_fi(new_dir, &ncfi, nfi, &nfibh, NULL, NULL);
/* The old fid may have moved - find it again */
@@ -1242,6 +1245,7 @@ end_rename:
static struct dentry *udf_get_parent(struct dentry *child)
{
+ struct kernel_lb_addr tloc;
struct inode *inode = NULL;
struct qstr dotdot = {.name = "..", .len = 2};
struct fileIdentDesc cfi;
@@ -1255,8 +1259,8 @@ static struct dentry *udf_get_parent(struct dentry *child)
brelse(fibh.ebh);
brelse(fibh.sbh);
- inode = udf_iget(child->d_inode->i_sb,
- lelb_to_cpu(cfi.icb.extLocation));
+ tloc = lelb_to_cpu(cfi.icb.extLocation);
+ inode = udf_iget(child->d_inode->i_sb, &tloc);
if (!inode)
goto out_unlock;
unlock_kernel();
@@ -1272,14 +1276,14 @@ static struct dentry *udf_nfs_get_inode(struct super_block *sb, u32 block,
u16 partref, __u32 generation)
{
struct inode *inode;
- kernel_lb_addr loc;
+ struct kernel_lb_addr loc;
if (block == 0)
return ERR_PTR(-ESTALE);
loc.logicalBlockNum = block;
loc.partitionReferenceNum = partref;
- inode = udf_iget(sb, loc);
+ inode = udf_iget(sb, &loc);
if (inode == NULL)
return ERR_PTR(-ENOMEM);
@@ -1318,7 +1322,7 @@ static int udf_encode_fh(struct dentry *de, __u32 *fh, int *lenp,
{
int len = *lenp;
struct inode *inode = de->d_inode;
- kernel_lb_addr location = UDF_I(inode)->i_location;
+ struct kernel_lb_addr location = UDF_I(inode)->i_location;
struct fid *fid = (struct fid *)fh;
int type = FILEID_UDF_WITHOUT_PARENT;
diff --git a/fs/udf/osta_udf.h b/fs/udf/osta_udf.h
index 65ff47902bd2..fbff74654df2 100644
--- a/fs/udf/osta_udf.h
+++ b/fs/udf/osta_udf.h
@@ -85,7 +85,7 @@ struct appIdentSuffix {
/* Logical Volume Integrity Descriptor (UDF 2.50 2.2.6) */
/* Implementation Use (UDF 2.50 2.2.6.4) */
struct logicalVolIntegrityDescImpUse {
- regid impIdent;
+ struct regid impIdent;
__le32 numFiles;
__le32 numDirs;
__le16 minUDFReadRev;
@@ -97,12 +97,12 @@ struct logicalVolIntegrityDescImpUse {
/* Implementation Use Volume Descriptor (UDF 2.50 2.2.7) */
/* Implementation Use (UDF 2.50 2.2.7.2) */
struct impUseVolDescImpUse {
- charspec LVICharset;
+ struct charspec LVICharset;
dstring logicalVolIdent[128];
dstring LVInfo1[36];
dstring LVInfo2[36];
dstring LVInfo3[36];
- regid impIdent;
+ struct regid impIdent;
uint8_t impUse[128];
} __attribute__ ((packed));
@@ -110,7 +110,7 @@ struct udfPartitionMap2 {
uint8_t partitionMapType;
uint8_t partitionMapLength;
uint8_t reserved1[2];
- regid partIdent;
+ struct regid partIdent;
__le16 volSeqNum;
__le16 partitionNum;
} __attribute__ ((packed));
@@ -120,7 +120,7 @@ struct virtualPartitionMap {
uint8_t partitionMapType;
uint8_t partitionMapLength;
uint8_t reserved1[2];
- regid partIdent;
+ struct regid partIdent;
__le16 volSeqNum;
__le16 partitionNum;
uint8_t reserved2[24];
@@ -131,7 +131,7 @@ struct sparablePartitionMap {
uint8_t partitionMapType;
uint8_t partitionMapLength;
uint8_t reserved1[2];
- regid partIdent;
+ struct regid partIdent;
__le16 volSeqNum;
__le16 partitionNum;
__le16 packetLength;
@@ -146,7 +146,7 @@ struct metadataPartitionMap {
uint8_t partitionMapType;
uint8_t partitionMapLength;
uint8_t reserved1[2];
- regid partIdent;
+ struct regid partIdent;
__le16 volSeqNum;
__le16 partitionNum;
__le32 metadataFileLoc;
@@ -161,7 +161,7 @@ struct metadataPartitionMap {
/* Virtual Allocation Table (UDF 1.5 2.2.10) */
struct virtualAllocationTable15 {
__le32 VirtualSector[0];
- regid vatIdent;
+ struct regid vatIdent;
__le32 previousVATICBLoc;
} __attribute__ ((packed));
@@ -192,8 +192,8 @@ struct sparingEntry {
} __attribute__ ((packed));
struct sparingTable {
- tag descTag;
- regid sparingIdent;
+ struct tag descTag;
+ struct regid sparingIdent;
__le16 reallocationTableLen;
__le16 reserved;
__le32 sequenceNum;
@@ -206,7 +206,7 @@ struct sparingTable {
#define ICBTAG_FILE_TYPE_MIRROR 0xFB
#define ICBTAG_FILE_TYPE_BITMAP 0xFC
-/* struct long_ad ICB - ADImpUse (UDF 2.50 2.2.4.3) */
+/* struct struct long_ad ICB - ADImpUse (UDF 2.50 2.2.4.3) */
struct allocDescImpUse {
__le16 flags;
uint8_t impUse[4];
diff --git a/fs/udf/partition.c b/fs/udf/partition.c
index 96dfd207c3d6..4b540ee632d5 100644
--- a/fs/udf/partition.c
+++ b/fs/udf/partition.c
@@ -273,7 +273,7 @@ static uint32_t udf_try_read_meta(struct inode *inode, uint32_t block,
{
struct super_block *sb = inode->i_sb;
struct udf_part_map *map;
- kernel_lb_addr eloc;
+ struct kernel_lb_addr eloc;
uint32_t elen;
sector_t ext_offset;
struct extent_position epos = {};
diff --git a/fs/udf/super.c b/fs/udf/super.c
index e25e7010627b..4457da0e0bc9 100644
--- a/fs/udf/super.c
+++ b/fs/udf/super.c
@@ -85,12 +85,12 @@ static void udf_write_super(struct super_block *);
static int udf_remount_fs(struct super_block *, int *, char *);
static int udf_check_valid(struct super_block *, int, int);
static int udf_vrs(struct super_block *sb, int silent);
-static void udf_load_logicalvolint(struct super_block *, kernel_extent_ad);
+static void udf_load_logicalvolint(struct super_block *, struct kernel_extent_ad);
static void udf_find_anchor(struct super_block *);
-static int udf_find_fileset(struct super_block *, kernel_lb_addr *,
- kernel_lb_addr *);
+static int udf_find_fileset(struct super_block *, struct kernel_lb_addr *,
+ struct kernel_lb_addr *);
static void udf_load_fileset(struct super_block *, struct buffer_head *,
- kernel_lb_addr *);
+ struct kernel_lb_addr *);
static void udf_open_lvid(struct super_block *);
static void udf_close_lvid(struct super_block *);
static unsigned int udf_count_free(struct super_block *);
@@ -201,6 +201,8 @@ struct udf_options {
mode_t umask;
gid_t gid;
uid_t uid;
+ mode_t fmode;
+ mode_t dmode;
struct nls_table *nls_map;
};
@@ -282,6 +284,10 @@ static int udf_show_options(struct seq_file *seq, struct vfsmount *mnt)
seq_printf(seq, ",gid=%u", sbi->s_gid);
if (sbi->s_umask != 0)
seq_printf(seq, ",umask=%o", sbi->s_umask);
+ if (sbi->s_fmode != UDF_INVALID_MODE)
+ seq_printf(seq, ",mode=%o", sbi->s_fmode);
+ if (sbi->s_dmode != UDF_INVALID_MODE)
+ seq_printf(seq, ",dmode=%o", sbi->s_dmode);
if (UDF_QUERY_FLAG(sb, UDF_FLAG_SESSION_SET))
seq_printf(seq, ",session=%u", sbi->s_session);
if (UDF_QUERY_FLAG(sb, UDF_FLAG_LASTBLOCK_SET))
@@ -317,6 +323,8 @@ static int udf_show_options(struct seq_file *seq, struct vfsmount *mnt)
*
* gid= Set the default group.
* umask= Set the default umask.
+ * mode= Set the default file permissions.
+ * dmode= Set the default directory permissions.
* uid= Set the default user.
* bs= Set the block size.
* unhide Show otherwise hidden files.
@@ -366,7 +374,8 @@ enum {
Opt_gid, Opt_uid, Opt_umask, Opt_session, Opt_lastblock,
Opt_anchor, Opt_volume, Opt_partition, Opt_fileset,
Opt_rootdir, Opt_utf8, Opt_iocharset,
- Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore
+ Opt_err, Opt_uforget, Opt_uignore, Opt_gforget, Opt_gignore,
+ Opt_fmode, Opt_dmode
};
static const match_table_t tokens = {
@@ -395,6 +404,8 @@ static const match_table_t tokens = {
{Opt_rootdir, "rootdir=%u"},
{Opt_utf8, "utf8"},
{Opt_iocharset, "iocharset=%s"},
+ {Opt_fmode, "mode=%o"},
+ {Opt_dmode, "dmode=%o"},
{Opt_err, NULL}
};
@@ -531,6 +542,16 @@ static int udf_parse_options(char *options, struct udf_options *uopt,
case Opt_gforget:
uopt->flags |= (1 << UDF_FLAG_GID_FORGET);
break;
+ case Opt_fmode:
+ if (match_octal(args, &option))
+ return 0;
+ uopt->fmode = option & 0777;
+ break;
+ case Opt_dmode:
+ if (match_octal(args, &option))
+ return 0;
+ uopt->dmode = option & 0777;
+ break;
default:
printk(KERN_ERR "udf: bad mount option \"%s\" "
"or missing value\n", p);
@@ -560,6 +581,8 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
uopt.uid = sbi->s_uid;
uopt.gid = sbi->s_gid;
uopt.umask = sbi->s_umask;
+ uopt.fmode = sbi->s_fmode;
+ uopt.dmode = sbi->s_dmode;
if (!udf_parse_options(options, &uopt, true))
return -EINVAL;
@@ -568,6 +591,8 @@ static int udf_remount_fs(struct super_block *sb, int *flags, char *options)
sbi->s_uid = uopt.uid;
sbi->s_gid = uopt.gid;
sbi->s_umask = uopt.umask;
+ sbi->s_fmode = uopt.fmode;
+ sbi->s_dmode = uopt.dmode;
if (sbi->s_lvid_bh) {
int write_rev = le16_to_cpu(udf_sb_lvidiu(sbi)->minUDFWriteRev);
@@ -706,13 +731,18 @@ static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock)
sector_t last[6];
int i;
struct udf_sb_info *sbi = UDF_SB(sb);
-
- last[0] = lastblock;
- last[1] = last[0] - 1;
- last[2] = last[0] + 1;
- last[3] = last[0] - 2;
- last[4] = last[0] - 150;
- last[5] = last[0] - 152;
+ int last_count = 0;
+
+ last[last_count++] = lastblock;
+ if (lastblock >= 1)
+ last[last_count++] = lastblock - 1;
+ last[last_count++] = lastblock + 1;
+ if (lastblock >= 2)
+ last[last_count++] = lastblock - 2;
+ if (lastblock >= 150)
+ last[last_count++] = lastblock - 150;
+ if (lastblock >= 152)
+ last[last_count++] = lastblock - 152;
/* according to spec, anchor is in either:
* block 256
@@ -720,9 +750,7 @@ static sector_t udf_scan_anchors(struct super_block *sb, sector_t lastblock)
* lastblock
* however, if the disc isn't closed, it could be 512 */
- for (i = 0; i < ARRAY_SIZE(last); i++) {
- if (last[i] < 0)
- continue;
+ for (i = 0; i < last_count; i++) {
if (last[i] >= sb->s_bdev->bd_inode->i_size >>
sb->s_blocksize_bits)
continue;
@@ -810,8 +838,8 @@ check_anchor:
}
static int udf_find_fileset(struct super_block *sb,
- kernel_lb_addr *fileset,
- kernel_lb_addr *root)
+ struct kernel_lb_addr *fileset,
+ struct kernel_lb_addr *root)
{
struct buffer_head *bh = NULL;
long lastblock;
@@ -820,7 +848,7 @@ static int udf_find_fileset(struct super_block *sb,
if (fileset->logicalBlockNum != 0xFFFFFFFF ||
fileset->partitionReferenceNum != 0xFFFF) {
- bh = udf_read_ptagged(sb, *fileset, 0, &ident);
+ bh = udf_read_ptagged(sb, fileset, 0, &ident);
if (!bh) {
return 1;
@@ -834,7 +862,7 @@ static int udf_find_fileset(struct super_block *sb,
sbi = UDF_SB(sb);
if (!bh) {
/* Search backwards through the partitions */
- kernel_lb_addr newfileset;
+ struct kernel_lb_addr newfileset;
/* --> cvg: FIXME - is it reasonable? */
return 1;
@@ -850,7 +878,7 @@ static int udf_find_fileset(struct super_block *sb,
newfileset.logicalBlockNum = 0;
do {
- bh = udf_read_ptagged(sb, newfileset, 0,
+ bh = udf_read_ptagged(sb, &newfileset, 0,
&ident);
if (!bh) {
newfileset.logicalBlockNum++;
@@ -902,14 +930,23 @@ static int udf_find_fileset(struct super_block *sb,
static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
{
struct primaryVolDesc *pvoldesc;
- struct ustr instr;
- struct ustr outstr;
+ struct ustr *instr, *outstr;
struct buffer_head *bh;
uint16_t ident;
+ int ret = 1;
+
+ instr = kmalloc(sizeof(struct ustr), GFP_NOFS);
+ if (!instr)
+ return 1;
+
+ outstr = kmalloc(sizeof(struct ustr), GFP_NOFS);
+ if (!outstr)
+ goto out1;
bh = udf_read_tagged(sb, block, block, &ident);
if (!bh)
- return 1;
+ goto out2;
+
BUG_ON(ident != TAG_IDENT_PVD);
pvoldesc = (struct primaryVolDesc *)bh->b_data;
@@ -917,7 +954,7 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
if (udf_disk_stamp_to_time(&UDF_SB(sb)->s_record_time,
pvoldesc->recordingDateAndTime)) {
#ifdef UDFFS_DEBUG
- timestamp *ts = &pvoldesc->recordingDateAndTime;
+ struct timestamp *ts = &pvoldesc->recordingDateAndTime;
udf_debug("recording time %04u/%02u/%02u"
" %02u:%02u (%x)\n",
le16_to_cpu(ts->year), ts->month, ts->day, ts->hour,
@@ -925,20 +962,25 @@ static int udf_load_pvoldesc(struct super_block *sb, sector_t block)
#endif
}
- if (!udf_build_ustr(&instr, pvoldesc->volIdent, 32))
- if (udf_CS0toUTF8(&outstr, &instr)) {
- strncpy(UDF_SB(sb)->s_volume_ident, outstr.u_name,
- outstr.u_len > 31 ? 31 : outstr.u_len);
+ if (!udf_build_ustr(instr, pvoldesc->volIdent, 32))
+ if (udf_CS0toUTF8(outstr, instr)) {
+ strncpy(UDF_SB(sb)->s_volume_ident, outstr->u_name,
+ outstr->u_len > 31 ? 31 : outstr->u_len);
udf_debug("volIdent[] = '%s'\n",
UDF_SB(sb)->s_volume_ident);
}
- if (!udf_build_ustr(&instr, pvoldesc->volSetIdent, 128))
- if (udf_CS0toUTF8(&outstr, &instr))
- udf_debug("volSetIdent[] = '%s'\n", outstr.u_name);
+ if (!udf_build_ustr(instr, pvoldesc->volSetIdent, 128))
+ if (udf_CS0toUTF8(outstr, instr))
+ udf_debug("volSetIdent[] = '%s'\n", outstr->u_name);
brelse(bh);
- return 0;
+ ret = 0;
+out2:
+ kfree(outstr);
+out1:
+ kfree(instr);
+ return ret;
}
static int udf_load_metadata_files(struct super_block *sb, int partition)
@@ -946,7 +988,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
struct udf_sb_info *sbi = UDF_SB(sb);
struct udf_part_map *map;
struct udf_meta_data *mdata;
- kernel_lb_addr addr;
+ struct kernel_lb_addr addr;
int fe_error = 0;
map = &sbi->s_partmaps[partition];
@@ -959,7 +1001,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
udf_debug("Metadata file location: block = %d part = %d\n",
addr.logicalBlockNum, addr.partitionReferenceNum);
- mdata->s_metadata_fe = udf_iget(sb, addr);
+ mdata->s_metadata_fe = udf_iget(sb, &addr);
if (mdata->s_metadata_fe == NULL) {
udf_warning(sb, __func__, "metadata inode efe not found, "
@@ -981,7 +1023,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
udf_debug("Mirror metadata file location: block = %d part = %d\n",
addr.logicalBlockNum, addr.partitionReferenceNum);
- mdata->s_mirror_fe = udf_iget(sb, addr);
+ mdata->s_mirror_fe = udf_iget(sb, &addr);
if (mdata->s_mirror_fe == NULL) {
if (fe_error) {
@@ -1013,7 +1055,7 @@ static int udf_load_metadata_files(struct super_block *sb, int partition)
udf_debug("Bitmap file location: block = %d part = %d\n",
addr.logicalBlockNum, addr.partitionReferenceNum);
- mdata->s_bitmap_fe = udf_iget(sb, addr);
+ mdata->s_bitmap_fe = udf_iget(sb, &addr);
if (mdata->s_bitmap_fe == NULL) {
if (sb->s_flags & MS_RDONLY)
@@ -1037,7 +1079,7 @@ error_exit:
}
static void udf_load_fileset(struct super_block *sb, struct buffer_head *bh,
- kernel_lb_addr *root)
+ struct kernel_lb_addr *root)
{
struct fileSetDesc *fset;
@@ -1119,13 +1161,13 @@ static int udf_fill_partdesc_info(struct super_block *sb,
phd = (struct partitionHeaderDesc *)p->partitionContentsUse;
if (phd->unallocSpaceTable.extLength) {
- kernel_lb_addr loc = {
+ struct kernel_lb_addr loc = {
.logicalBlockNum = le32_to_cpu(
phd->unallocSpaceTable.extPosition),
.partitionReferenceNum = p_index,
};
- map->s_uspace.s_table = udf_iget(sb, loc);
+ map->s_uspace.s_table = udf_iget(sb, &loc);
if (!map->s_uspace.s_table) {
udf_debug("cannot load unallocSpaceTable (part %d)\n",
p_index);
@@ -1154,13 +1196,13 @@ static int udf_fill_partdesc_info(struct super_block *sb,
udf_debug("partitionIntegrityTable (part %d)\n", p_index);
if (phd->freedSpaceTable.extLength) {
- kernel_lb_addr loc = {
+ struct kernel_lb_addr loc = {
.logicalBlockNum = le32_to_cpu(
phd->freedSpaceTable.extPosition),
.partitionReferenceNum = p_index,
};
- map->s_fspace.s_table = udf_iget(sb, loc);
+ map->s_fspace.s_table = udf_iget(sb, &loc);
if (!map->s_fspace.s_table) {
udf_debug("cannot load freedSpaceTable (part %d)\n",
p_index);
@@ -1192,7 +1234,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
{
struct udf_sb_info *sbi = UDF_SB(sb);
struct udf_part_map *map = &sbi->s_partmaps[p_index];
- kernel_lb_addr ino;
+ struct kernel_lb_addr ino;
struct buffer_head *bh = NULL;
struct udf_inode_info *vati;
uint32_t pos;
@@ -1201,7 +1243,7 @@ static int udf_load_vat(struct super_block *sb, int p_index, int type1_index)
/* VAT file entry is in the last recorded block */
ino.partitionReferenceNum = type1_index;
ino.logicalBlockNum = sbi->s_last_block - map->s_partition_root;
- sbi->s_vat_inode = udf_iget(sb, ino);
+ sbi->s_vat_inode = udf_iget(sb, &ino);
if (!sbi->s_vat_inode)
return 1;
@@ -1322,7 +1364,7 @@ out_bh:
}
static int udf_load_logicalvol(struct super_block *sb, sector_t block,
- kernel_lb_addr *fileset)
+ struct kernel_lb_addr *fileset)
{
struct logicalVolDesc *lvd;
int i, j, offset;
@@ -1471,7 +1513,7 @@ static int udf_load_logicalvol(struct super_block *sb, sector_t block,
}
if (fileset) {
- long_ad *la = (long_ad *)&(lvd->logicalVolContentsUse[0]);
+ struct long_ad *la = (struct long_ad *)&(lvd->logicalVolContentsUse[0]);
*fileset = lelb_to_cpu(la->extLocation);
udf_debug("FileSet found in LogicalVolDesc at block=%d, "
@@ -1490,7 +1532,7 @@ out_bh:
* udf_load_logicalvolint
*
*/
-static void udf_load_logicalvolint(struct super_block *sb, kernel_extent_ad loc)
+static void udf_load_logicalvolint(struct super_block *sb, struct kernel_extent_ad loc)
{
struct buffer_head *bh = NULL;
uint16_t ident;
@@ -1533,7 +1575,7 @@ static void udf_load_logicalvolint(struct super_block *sb, kernel_extent_ad loc)
* Written, tested, and released.
*/
static noinline int udf_process_sequence(struct super_block *sb, long block,
- long lastblock, kernel_lb_addr *fileset)
+ long lastblock, struct kernel_lb_addr *fileset)
{
struct buffer_head *bh = NULL;
struct udf_vds_record vds[VDS_POS_LENGTH];
@@ -1678,7 +1720,7 @@ static int udf_check_valid(struct super_block *sb, int novrs, int silent)
return !block;
}
-static int udf_load_sequence(struct super_block *sb, kernel_lb_addr *fileset)
+static int udf_load_sequence(struct super_block *sb, struct kernel_lb_addr *fileset)
{
struct anchorVolDescPtr *anchor;
uint16_t ident;
@@ -1755,7 +1797,7 @@ static void udf_open_lvid(struct super_block *sb)
lvid->integrityType = LVID_INTEGRITY_TYPE_OPEN;
lvid->descTag.descCRC = cpu_to_le16(
- crc_itu_t(0, (char *)lvid + sizeof(tag),
+ crc_itu_t(0, (char *)lvid + sizeof(struct tag),
le16_to_cpu(lvid->descTag.descCRCLength)));
lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
@@ -1790,7 +1832,7 @@ static void udf_close_lvid(struct super_block *sb)
lvid->integrityType = cpu_to_le32(LVID_INTEGRITY_TYPE_CLOSE);
lvid->descTag.descCRC = cpu_to_le16(
- crc_itu_t(0, (char *)lvid + sizeof(tag),
+ crc_itu_t(0, (char *)lvid + sizeof(struct tag),
le16_to_cpu(lvid->descTag.descCRCLength)));
lvid->descTag.tagChecksum = udf_tag_checksum(&lvid->descTag);
@@ -1848,13 +1890,15 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
int i;
struct inode *inode = NULL;
struct udf_options uopt;
- kernel_lb_addr rootdir, fileset;
+ struct kernel_lb_addr rootdir, fileset;
struct udf_sb_info *sbi;
uopt.flags = (1 << UDF_FLAG_USE_AD_IN_ICB) | (1 << UDF_FLAG_STRICT);
uopt.uid = -1;
uopt.gid = -1;
uopt.umask = 0;
+ uopt.fmode = UDF_INVALID_MODE;
+ uopt.dmode = UDF_INVALID_MODE;
sbi = kzalloc(sizeof(struct udf_sb_info), GFP_KERNEL);
if (!sbi)
@@ -1892,6 +1936,8 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
sbi->s_uid = uopt.uid;
sbi->s_gid = uopt.gid;
sbi->s_umask = uopt.umask;
+ sbi->s_fmode = uopt.fmode;
+ sbi->s_dmode = uopt.dmode;
sbi->s_nls_map = uopt.nls_map;
/* Set the block size for all transfers */
@@ -1978,7 +2024,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
}
if (!silent) {
- timestamp ts;
+ struct timestamp ts;
udf_time_to_disk_stamp(&ts, sbi->s_record_time);
udf_info("UDF: Mounting volume '%s', "
"timestamp %04u/%02u/%02u %02u:%02u (%x)\n",
@@ -1991,7 +2037,7 @@ static int udf_fill_super(struct super_block *sb, void *options, int silent)
/* Assign the root inode */
/* assign inodes by physical block number */
/* perhaps it's not extensible enough, but for now ... */
- inode = udf_iget(sb, rootdir);
+ inode = udf_iget(sb, &rootdir);
if (!inode) {
printk(KERN_ERR "UDF-fs: Error in udf_iget, block=%d, "
"partition=%d\n",
@@ -2086,6 +2132,7 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
struct super_block *sb = dentry->d_sb;
struct udf_sb_info *sbi = UDF_SB(sb);
struct logicalVolIntegrityDescImpUse *lvidiu;
+ u64 id = huge_encode_dev(sb->s_bdev->bd_dev);
if (sbi->s_lvid_bh != NULL)
lvidiu = udf_sb_lvidiu(sbi);
@@ -2101,8 +2148,9 @@ static int udf_statfs(struct dentry *dentry, struct kstatfs *buf)
le32_to_cpu(lvidiu->numDirs)) : 0)
+ buf->f_bfree;
buf->f_ffree = buf->f_bfree;
- /* __kernel_fsid_t f_fsid */
buf->f_namelen = UDF_NAME_LEN - 2;
+ buf->f_fsid.val[0] = (u32)id;
+ buf->f_fsid.val[1] = (u32)(id >> 32);
return 0;
}
@@ -2114,7 +2162,7 @@ static unsigned int udf_count_free_bitmap(struct super_block *sb,
unsigned int accum = 0;
int index;
int block = 0, newblock;
- kernel_lb_addr loc;
+ struct kernel_lb_addr loc;
uint32_t bytes;
uint8_t *ptr;
uint16_t ident;
@@ -2124,7 +2172,7 @@ static unsigned int udf_count_free_bitmap(struct super_block *sb,
loc.logicalBlockNum = bitmap->s_extPosition;
loc.partitionReferenceNum = UDF_SB(sb)->s_partition;
- bh = udf_read_ptagged(sb, loc, 0, &ident);
+ bh = udf_read_ptagged(sb, &loc, 0, &ident);
if (!bh) {
printk(KERN_ERR "udf: udf_count_free failed\n");
@@ -2147,7 +2195,7 @@ static unsigned int udf_count_free_bitmap(struct super_block *sb,
bytes -= cur_bytes;
if (bytes) {
brelse(bh);
- newblock = udf_get_lb_pblock(sb, loc, ++block);
+ newblock = udf_get_lb_pblock(sb, &loc, ++block);
bh = udf_tread(sb, newblock);
if (!bh) {
udf_debug("read failed\n");
@@ -2170,7 +2218,7 @@ static unsigned int udf_count_free_table(struct super_block *sb,
{
unsigned int accum = 0;
uint32_t elen;
- kernel_lb_addr eloc;
+ struct kernel_lb_addr eloc;
int8_t etype;
struct extent_position epos;
diff --git a/fs/udf/truncate.c b/fs/udf/truncate.c
index 65e19b4f9424..225527cdc885 100644
--- a/fs/udf/truncate.c
+++ b/fs/udf/truncate.c
@@ -28,10 +28,10 @@
#include "udf_sb.h"
static void extent_trunc(struct inode *inode, struct extent_position *epos,
- kernel_lb_addr eloc, int8_t etype, uint32_t elen,
+ struct kernel_lb_addr *eloc, int8_t etype, uint32_t elen,
uint32_t nelen)
{
- kernel_lb_addr neloc = {};
+ struct kernel_lb_addr neloc = {};
int last_block = (elen + inode->i_sb->s_blocksize - 1) >>
inode->i_sb->s_blocksize_bits;
int first_block = (nelen + inode->i_sb->s_blocksize - 1) >>
@@ -43,12 +43,12 @@ static void extent_trunc(struct inode *inode, struct extent_position *epos,
last_block);
etype = (EXT_NOT_RECORDED_NOT_ALLOCATED >> 30);
} else
- neloc = eloc;
+ neloc = *eloc;
nelen = (etype << 30) | nelen;
}
if (elen != nelen) {
- udf_write_aext(inode, epos, neloc, nelen, 0);
+ udf_write_aext(inode, epos, &neloc, nelen, 0);
if (last_block - first_block > 0) {
if (etype == (EXT_RECORDED_ALLOCATED >> 30))
mark_inode_dirty(inode);
@@ -68,7 +68,7 @@ static void extent_trunc(struct inode *inode, struct extent_position *epos,
void udf_truncate_tail_extent(struct inode *inode)
{
struct extent_position epos = {};
- kernel_lb_addr eloc;
+ struct kernel_lb_addr eloc;
uint32_t elen, nelen;
uint64_t lbcount = 0;
int8_t etype = -1, netype;
@@ -83,9 +83,9 @@ void udf_truncate_tail_extent(struct inode *inode)
return;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- adsize = sizeof(short_ad);
+ adsize = sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
- adsize = sizeof(long_ad);
+ adsize = sizeof(struct long_ad);
else
BUG();
@@ -106,7 +106,7 @@ void udf_truncate_tail_extent(struct inode *inode)
(unsigned)elen);
nelen = elen - (lbcount - inode->i_size);
epos.offset -= adsize;
- extent_trunc(inode, &epos, eloc, etype, elen, nelen);
+ extent_trunc(inode, &epos, &eloc, etype, elen, nelen);
epos.offset += adsize;
if (udf_next_aext(inode, &epos, &eloc, &elen, 1) != -1)
printk(KERN_ERR "udf_truncate_tail_extent(): "
@@ -124,7 +124,7 @@ void udf_truncate_tail_extent(struct inode *inode)
void udf_discard_prealloc(struct inode *inode)
{
struct extent_position epos = { NULL, 0, {0, 0} };
- kernel_lb_addr eloc;
+ struct kernel_lb_addr eloc;
uint32_t elen;
uint64_t lbcount = 0;
int8_t etype = -1, netype;
@@ -136,9 +136,9 @@ void udf_discard_prealloc(struct inode *inode)
return;
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- adsize = sizeof(short_ad);
+ adsize = sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
- adsize = sizeof(long_ad);
+ adsize = sizeof(struct long_ad);
else
adsize = 0;
@@ -152,7 +152,7 @@ void udf_discard_prealloc(struct inode *inode)
if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
epos.offset -= adsize;
lbcount -= elen;
- extent_trunc(inode, &epos, eloc, etype, elen, 0);
+ extent_trunc(inode, &epos, &eloc, etype, elen, 0);
if (!epos.bh) {
iinfo->i_lenAlloc =
epos.offset -
@@ -200,7 +200,7 @@ static void udf_update_alloc_ext_desc(struct inode *inode,
void udf_truncate_extents(struct inode *inode)
{
struct extent_position epos;
- kernel_lb_addr eloc, neloc = {};
+ struct kernel_lb_addr eloc, neloc = {};
uint32_t elen, nelen = 0, indirect_ext_len = 0, lenalloc;
int8_t etype;
struct super_block *sb = inode->i_sb;
@@ -210,9 +210,9 @@ void udf_truncate_extents(struct inode *inode)
struct udf_inode_info *iinfo = UDF_I(inode);
if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
- adsize = sizeof(short_ad);
+ adsize = sizeof(struct short_ad);
else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
- adsize = sizeof(long_ad);
+ adsize = sizeof(struct long_ad);
else
BUG();
@@ -221,7 +221,7 @@ void udf_truncate_extents(struct inode *inode)
(inode->i_size & (sb->s_blocksize - 1));
if (etype != -1) {
epos.offset -= adsize;
- extent_trunc(inode, &epos, eloc, etype, elen, byte_offset);
+ extent_trunc(inode, &epos, &eloc, etype, elen, byte_offset);
epos.offset += adsize;
if (byte_offset)
lenalloc = epos.offset;
@@ -236,12 +236,12 @@ void udf_truncate_extents(struct inode *inode)
while ((etype = udf_current_aext(inode, &epos, &eloc,
&elen, 0)) != -1) {
if (etype == (EXT_NEXT_EXTENT_ALLOCDECS >> 30)) {
- udf_write_aext(inode, &epos, neloc, nelen, 0);
+ udf_write_aext(inode, &epos, &neloc, nelen, 0);
if (indirect_ext_len) {
/* We managed to free all extents in the
* indirect extent - free it too */
BUG_ON(!epos.bh);
- udf_free_blocks(sb, inode, epos.block,
+ udf_free_blocks(sb, inode, &epos.block,
0, indirect_ext_len);
} else if (!epos.bh) {
iinfo->i_lenAlloc = lenalloc;
@@ -253,7 +253,7 @@ void udf_truncate_extents(struct inode *inode)
epos.offset = sizeof(struct allocExtDesc);
epos.block = eloc;
epos.bh = udf_tread(sb,
- udf_get_lb_pblock(sb, eloc, 0));
+ udf_get_lb_pblock(sb, &eloc, 0));
if (elen)
indirect_ext_len =
(elen + sb->s_blocksize - 1) >>
@@ -261,7 +261,7 @@ void udf_truncate_extents(struct inode *inode)
else
indirect_ext_len = 1;
} else {
- extent_trunc(inode, &epos, eloc, etype,
+ extent_trunc(inode, &epos, &eloc, etype,
elen, 0);
epos.offset += adsize;
}
@@ -269,7 +269,7 @@ void udf_truncate_extents(struct inode *inode)
if (indirect_ext_len) {
BUG_ON(!epos.bh);
- udf_free_blocks(sb, inode, epos.block, 0,
+ udf_free_blocks(sb, inode, &epos.block, 0,
indirect_ext_len);
} else if (!epos.bh) {
iinfo->i_lenAlloc = lenalloc;
@@ -278,7 +278,7 @@ void udf_truncate_extents(struct inode *inode)
udf_update_alloc_ext_desc(inode, &epos, lenalloc);
} else if (inode->i_size) {
if (byte_offset) {
- kernel_long_ad extent;
+ struct kernel_long_ad extent;
/*
* OK, there is not extent covering inode->i_size and
diff --git a/fs/udf/udf_i.h b/fs/udf/udf_i.h
index 4f86b1d98a5d..e58d1de41073 100644
--- a/fs/udf/udf_i.h
+++ b/fs/udf/udf_i.h
@@ -4,7 +4,7 @@
struct udf_inode_info {
struct timespec i_crtime;
/* Physical address of inode */
- kernel_lb_addr i_location;
+ struct kernel_lb_addr i_location;
__u64 i_unique;
__u32 i_lenEAttr;
__u32 i_lenAlloc;
@@ -17,8 +17,8 @@ struct udf_inode_info {
unsigned i_strat4096 : 1;
unsigned reserved : 26;
union {
- short_ad *i_sad;
- long_ad *i_lad;
+ struct short_ad *i_sad;
+ struct long_ad *i_lad;
__u8 *i_data;
} i_ext;
struct inode vfs_inode;
diff --git a/fs/udf/udf_sb.h b/fs/udf/udf_sb.h
index 1c1c514a9725..158221ecdc42 100644
--- a/fs/udf/udf_sb.h
+++ b/fs/udf/udf_sb.h
@@ -48,6 +48,8 @@
#define UDF_SPARABLE_MAP15 0x1522U
#define UDF_METADATA_MAP25 0x2511U
+#define UDF_INVALID_MODE ((mode_t)-1)
+
#pragma pack(1) /* XXX(hch): Why? This file just defines in-core structures */
struct udf_meta_data {
@@ -123,6 +125,8 @@ struct udf_sb_info {
mode_t s_umask;
gid_t s_gid;
uid_t s_uid;
+ mode_t s_fmode;
+ mode_t s_dmode;
/* Root Info */
struct timespec s_record_time;
diff --git a/fs/udf/udfdecl.h b/fs/udf/udfdecl.h
index 8ec865de5f13..9a2a9b61413e 100644
--- a/fs/udf/udfdecl.h
+++ b/fs/udf/udfdecl.h
@@ -62,10 +62,8 @@ static inline size_t udf_ext0_offset(struct inode *inode)
return 0;
}
-#define udf_get_lb_pblock(sb,loc,offset) udf_get_pblock((sb), (loc).logicalBlockNum, (loc).partitionReferenceNum, (offset))
-
/* computes tag checksum */
-u8 udf_tag_checksum(const tag *t);
+u8 udf_tag_checksum(const struct tag *t);
struct dentry;
struct inode;
@@ -95,7 +93,7 @@ struct udf_vds_record {
};
struct generic_desc {
- tag descTag;
+ struct tag descTag;
__le32 volDescSeqNum;
};
@@ -108,7 +106,7 @@ struct ustr {
struct extent_position {
struct buffer_head *bh;
uint32_t offset;
- kernel_lb_addr block;
+ struct kernel_lb_addr block;
};
/* super.c */
@@ -124,7 +122,7 @@ extern int udf_ioctl(struct inode *, struct file *, unsigned int,
unsigned long);
/* inode.c */
-extern struct inode *udf_iget(struct super_block *, kernel_lb_addr);
+extern struct inode *udf_iget(struct super_block *, struct kernel_lb_addr *);
extern int udf_sync_inode(struct inode *);
extern void udf_expand_file_adinicb(struct inode *, int, int *);
extern struct buffer_head *udf_expand_dir_adinicb(struct inode *, int *, int *);
@@ -136,19 +134,19 @@ extern void udf_clear_inode(struct inode *);
extern int udf_write_inode(struct inode *, int);
extern long udf_block_map(struct inode *, sector_t);
extern int udf_extend_file(struct inode *, struct extent_position *,
- kernel_long_ad *, sector_t);
+ struct kernel_long_ad *, sector_t);
extern int8_t inode_bmap(struct inode *, sector_t, struct extent_position *,
- kernel_lb_addr *, uint32_t *, sector_t *);
+ struct kernel_lb_addr *, uint32_t *, sector_t *);
extern int8_t udf_add_aext(struct inode *, struct extent_position *,
- kernel_lb_addr, uint32_t, int);
+ struct kernel_lb_addr *, uint32_t, int);
extern int8_t udf_write_aext(struct inode *, struct extent_position *,
- kernel_lb_addr, uint32_t, int);
+ struct kernel_lb_addr *, uint32_t, int);
extern int8_t udf_delete_aext(struct inode *, struct extent_position,
- kernel_lb_addr, uint32_t);
+ struct kernel_lb_addr, uint32_t);
extern int8_t udf_next_aext(struct inode *, struct extent_position *,
- kernel_lb_addr *, uint32_t *, int);
+ struct kernel_lb_addr *, uint32_t *, int);
extern int8_t udf_current_aext(struct inode *, struct extent_position *,
- kernel_lb_addr *, uint32_t *, int);
+ struct kernel_lb_addr *, uint32_t *, int);
/* misc.c */
extern struct buffer_head *udf_tgetblk(struct super_block *, int);
@@ -160,7 +158,7 @@ extern struct genericFormat *udf_get_extendedattr(struct inode *, uint32_t,
extern struct buffer_head *udf_read_tagged(struct super_block *, uint32_t,
uint32_t, uint16_t *);
extern struct buffer_head *udf_read_ptagged(struct super_block *,
- kernel_lb_addr, uint32_t,
+ struct kernel_lb_addr *, uint32_t,
uint16_t *);
extern void udf_update_tag(char *, int);
extern void udf_new_tag(char *, uint16_t, uint16_t, uint16_t, uint32_t, int);
@@ -182,6 +180,14 @@ extern uint32_t udf_get_pblock_meta25(struct super_block *, uint32_t, uint16_t,
uint32_t);
extern int udf_relocate_blocks(struct super_block *, long, long *);
+static inline uint32_t
+udf_get_lb_pblock(struct super_block *sb, struct kernel_lb_addr *loc,
+ uint32_t offset)
+{
+ return udf_get_pblock(sb, loc->logicalBlockNum,
+ loc->partitionReferenceNum, offset);
+}
+
/* unicode.c */
extern int udf_get_filename(struct super_block *, uint8_t *, uint8_t *, int);
extern int udf_put_filename(struct super_block *, const uint8_t *, uint8_t *,
@@ -200,7 +206,7 @@ extern void udf_truncate_extents(struct inode *);
/* balloc.c */
extern void udf_free_blocks(struct super_block *, struct inode *,
- kernel_lb_addr, uint32_t, uint32_t);
+ struct kernel_lb_addr *, uint32_t, uint32_t);
extern int udf_prealloc_blocks(struct super_block *, struct inode *, uint16_t,
uint32_t, uint32_t);
extern int udf_new_block(struct super_block *, struct inode *, uint16_t,
@@ -214,16 +220,16 @@ extern struct fileIdentDesc *udf_fileident_read(struct inode *, loff_t *,
struct udf_fileident_bh *,
struct fileIdentDesc *,
struct extent_position *,
- kernel_lb_addr *, uint32_t *,
+ struct kernel_lb_addr *, uint32_t *,
sector_t *);
extern struct fileIdentDesc *udf_get_fileident(void *buffer, int bufsize,
int *offset);
-extern long_ad *udf_get_filelongad(uint8_t *, int, uint32_t *, int);
-extern short_ad *udf_get_fileshortad(uint8_t *, int, uint32_t *, int);
+extern struct long_ad *udf_get_filelongad(uint8_t *, int, uint32_t *, int);
+extern struct short_ad *udf_get_fileshortad(uint8_t *, int, uint32_t *, int);
/* udftime.c */
extern struct timespec *udf_disk_stamp_to_time(struct timespec *dest,
- timestamp src);
-extern timestamp *udf_time_to_disk_stamp(timestamp *dest, struct timespec src);
+ struct timestamp src);
+extern struct timestamp *udf_time_to_disk_stamp(struct timestamp *dest, struct timespec src);
#endif /* __UDF_DECL_H */
diff --git a/fs/udf/udfend.h b/fs/udf/udfend.h
index 489f52fb428c..6a9f3a9cc428 100644
--- a/fs/udf/udfend.h
+++ b/fs/udf/udfend.h
@@ -4,9 +4,9 @@
#include <asm/byteorder.h>
#include <linux/string.h>
-static inline kernel_lb_addr lelb_to_cpu(lb_addr in)
+static inline struct kernel_lb_addr lelb_to_cpu(struct lb_addr in)
{
- kernel_lb_addr out;
+ struct kernel_lb_addr out;
out.logicalBlockNum = le32_to_cpu(in.logicalBlockNum);
out.partitionReferenceNum = le16_to_cpu(in.partitionReferenceNum);
@@ -14,9 +14,9 @@ static inline kernel_lb_addr lelb_to_cpu(lb_addr in)
return out;
}
-static inline lb_addr cpu_to_lelb(kernel_lb_addr in)
+static inline struct lb_addr cpu_to_lelb(struct kernel_lb_addr in)
{
- lb_addr out;
+ struct lb_addr out;
out.logicalBlockNum = cpu_to_le32(in.logicalBlockNum);
out.partitionReferenceNum = cpu_to_le16(in.partitionReferenceNum);
@@ -24,9 +24,9 @@ static inline lb_addr cpu_to_lelb(kernel_lb_addr in)
return out;
}
-static inline short_ad lesa_to_cpu(short_ad in)
+static inline struct short_ad lesa_to_cpu(struct short_ad in)
{
- short_ad out;
+ struct short_ad out;
out.extLength = le32_to_cpu(in.extLength);
out.extPosition = le32_to_cpu(in.extPosition);
@@ -34,9 +34,9 @@ static inline short_ad lesa_to_cpu(short_ad in)
return out;
}
-static inline short_ad cpu_to_lesa(short_ad in)
+static inline struct short_ad cpu_to_lesa(struct short_ad in)
{
- short_ad out;
+ struct short_ad out;
out.extLength = cpu_to_le32(in.extLength);
out.extPosition = cpu_to_le32(in.extPosition);
@@ -44,9 +44,9 @@ static inline short_ad cpu_to_lesa(short_ad in)
return out;
}
-static inline kernel_long_ad lela_to_cpu(long_ad in)
+static inline struct kernel_long_ad lela_to_cpu(struct long_ad in)
{
- kernel_long_ad out;
+ struct kernel_long_ad out;
out.extLength = le32_to_cpu(in.extLength);
out.extLocation = lelb_to_cpu(in.extLocation);
@@ -54,9 +54,9 @@ static inline kernel_long_ad lela_to_cpu(long_ad in)
return out;
}
-static inline long_ad cpu_to_lela(kernel_long_ad in)
+static inline struct long_ad cpu_to_lela(struct kernel_long_ad in)
{
- long_ad out;
+ struct long_ad out;
out.extLength = cpu_to_le32(in.extLength);
out.extLocation = cpu_to_lelb(in.extLocation);
@@ -64,9 +64,9 @@ static inline long_ad cpu_to_lela(kernel_long_ad in)
return out;
}
-static inline kernel_extent_ad leea_to_cpu(extent_ad in)
+static inline struct kernel_extent_ad leea_to_cpu(struct extent_ad in)
{
- kernel_extent_ad out;
+ struct kernel_extent_ad out;
out.extLength = le32_to_cpu(in.extLength);
out.extLocation = le32_to_cpu(in.extLocation);
diff --git a/fs/udf/udftime.c b/fs/udf/udftime.c
index 5f811655c9b5..b8c828c4d200 100644
--- a/fs/udf/udftime.c
+++ b/fs/udf/udftime.c
@@ -85,7 +85,8 @@ extern struct timezone sys_tz;
#define SECS_PER_HOUR (60 * 60)
#define SECS_PER_DAY (SECS_PER_HOUR * 24)
-struct timespec *udf_disk_stamp_to_time(struct timespec *dest, timestamp src)
+struct timespec *
+udf_disk_stamp_to_time(struct timespec *dest, struct timestamp src)
{
int yday;
u16 typeAndTimezone = le16_to_cpu(src.typeAndTimezone);
@@ -116,7 +117,8 @@ struct timespec *udf_disk_stamp_to_time(struct timespec *dest, timestamp src)
return dest;
}
-timestamp *udf_time_to_disk_stamp(timestamp *dest, struct timespec ts)
+struct timestamp *
+udf_time_to_disk_stamp(struct timestamp *dest, struct timespec ts)
{
long int days, rem, y;
const unsigned short int *ip;
diff --git a/fs/udf/unicode.c b/fs/udf/unicode.c
index 9fdf8c93c58e..cefa8c8913e6 100644
--- a/fs/udf/unicode.c
+++ b/fs/udf/unicode.c
@@ -254,7 +254,7 @@ static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
{
const uint8_t *ocu;
uint8_t cmp_id, ocu_len;
- int i;
+ int i, len;
ocu_len = ocu_i->u_len;
@@ -279,8 +279,13 @@ static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
if (cmp_id == 16)
c = (c << 8) | ocu[i++];
- utf_o->u_len += nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
- UDF_NAME_LEN - utf_o->u_len);
+ len = nls->uni2char(c, &utf_o->u_name[utf_o->u_len],
+ UDF_NAME_LEN - utf_o->u_len);
+ /* Valid character? */
+ if (len >= 0)
+ utf_o->u_len += len;
+ else
+ utf_o->u_name[utf_o->u_len++] = '?';
}
utf_o->u_cmpID = 8;
@@ -290,7 +295,8 @@ static int udf_CS0toNLS(struct nls_table *nls, struct ustr *utf_o,
static int udf_NLStoCS0(struct nls_table *nls, dstring *ocu, struct ustr *uni,
int length)
{
- unsigned len, i, max_val;
+ int len;
+ unsigned i, max_val;
uint16_t uni_char;
int u_len;
@@ -302,8 +308,13 @@ try_again:
u_len = 0U;
for (i = 0U; i < uni->u_len; i++) {
len = nls->char2uni(&uni->u_name[i], uni->u_len - i, &uni_char);
- if (len <= 0)
+ if (!len)
continue;
+ /* Invalid character, deal with it */
+ if (len < 0) {
+ len = 1;
+ uni_char = '?';
+ }
if (uni_char > max_val) {
max_val = 0xffffU;
@@ -324,34 +335,43 @@ try_again:
int udf_get_filename(struct super_block *sb, uint8_t *sname, uint8_t *dname,
int flen)
{
- struct ustr filename, unifilename;
- int len;
+ struct ustr *filename, *unifilename;
+ int len = 0;
- if (udf_build_ustr_exact(&unifilename, sname, flen))
+ filename = kmalloc(sizeof(struct ustr), GFP_NOFS);
+ if (!filename)
return 0;
+ unifilename = kmalloc(sizeof(struct ustr), GFP_NOFS);
+ if (!unifilename)
+ goto out1;
+
+ if (udf_build_ustr_exact(unifilename, sname, flen))
+ goto out2;
+
if (UDF_QUERY_FLAG(sb, UDF_FLAG_UTF8)) {
- if (!udf_CS0toUTF8(&filename, &unifilename)) {
+ if (!udf_CS0toUTF8(filename, unifilename)) {
udf_debug("Failed in udf_get_filename: sname = %s\n",
sname);
- return 0;
+ goto out2;
}
} else if (UDF_QUERY_FLAG(sb, UDF_FLAG_NLS_MAP)) {
- if (!udf_CS0toNLS(UDF_SB(sb)->s_nls_map, &filename,
- &unifilename)) {
+ if (!udf_CS0toNLS(UDF_SB(sb)->s_nls_map, filename,
+ unifilename)) {
udf_debug("Failed in udf_get_filename: sname = %s\n",
sname);
- return 0;
+ goto out2;
}
} else
- return 0;
-
- len = udf_translate_to_linux(dname, filename.u_name, filename.u_len,
- unifilename.u_name, unifilename.u_len);
- if (len)
- return len;
-
- return 0;
+ goto out2;
+
+ len = udf_translate_to_linux(dname, filename->u_name, filename->u_len,
+ unifilename->u_name, unifilename->u_len);
+out2:
+ kfree(unifilename);
+out1:
+ kfree(filename);
+ return len;
}
int udf_put_filename(struct super_block *sb, const uint8_t *sname,
diff --git a/fs/ufs/Kconfig b/fs/ufs/Kconfig
new file mode 100644
index 000000000000..e4f10a40768a
--- /dev/null
+++ b/fs/ufs/Kconfig
@@ -0,0 +1,43 @@
+config UFS_FS
+ tristate "UFS file system support (read only)"
+ depends on BLOCK
+ help
+ BSD and derivate versions of Unix (such as SunOS, FreeBSD, NetBSD,
+ OpenBSD and NeXTstep) use a file system called UFS. Some System V
+ Unixes can create and mount hard disk partitions and diskettes using
+ this file system as well. Saying Y here will allow you to read from
+ these partitions; if you also want to write to them, say Y to the
+ experimental "UFS file system write support", below. Please read the
+ file <file:Documentation/filesystems/ufs.txt> for more information.
+
+ The recently released UFS2 variant (used in FreeBSD 5.x) is
+ READ-ONLY supported.
+
+ Note that this option is generally not needed for floppies, since a
+ good portable way to transport files and directories between unixes
+ (and even other operating systems) is given by the tar program ("man
+ tar" or preferably "info tar").
+
+ When accessing NeXTstep files, you may need to convert them from the
+ NeXT character set to the Latin1 character set; use the program
+ recode ("info recode") for this purpose.
+
+ To compile the UFS file system support as a module, choose M here: the
+ module will be called ufs.
+
+ If you haven't heard about all of this before, it's safe to say N.
+
+config UFS_FS_WRITE
+ bool "UFS file system write support (DANGEROUS)"
+ depends on UFS_FS && EXPERIMENTAL
+ help
+ Say Y here if you want to try writing to UFS partitions. This is
+ experimental, so you should back up your UFS partitions beforehand.
+
+config UFS_DEBUG
+ bool "UFS debugging"
+ depends on UFS_FS
+ help
+ If you are experiencing any problems with the UFS filesystem, say
+ Y here. This will result in _many_ additional debugging messages to be
+ written to the system log.
diff --git a/fs/xfs/Kconfig b/fs/xfs/Kconfig
index 3f53dd101f99..29228f5899cd 100644
--- a/fs/xfs/Kconfig
+++ b/fs/xfs/Kconfig
@@ -1,6 +1,7 @@
config XFS_FS
tristate "XFS filesystem support"
depends on BLOCK
+ select EXPORTFS
help
XFS is a high performance journaling filesystem which originated
on the SGI IRIX platform. It is completely multi-threaded, can
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.c b/fs/xfs/linux-2.6/xfs_ioctl.c
index e5be1e0be802..4bd112313f33 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl.c
@@ -50,12 +50,14 @@
#include "xfs_vnodeops.h"
#include "xfs_quota.h"
#include "xfs_inode_item.h"
+#include "xfs_export.h"
#include <linux/capability.h>
#include <linux/dcache.h>
#include <linux/mount.h>
#include <linux/namei.h>
#include <linux/pagemap.h>
+#include <linux/exportfs.h>
/*
* xfs_find_handle maps from userspace xfs_fsop_handlereq structure to
@@ -164,97 +166,69 @@ xfs_find_handle(
return 0;
}
-
/*
- * Convert userspace handle data into inode.
- *
- * We use the fact that all the fsop_handlereq ioctl calls have a data
- * structure argument whose first component is always a xfs_fsop_handlereq_t,
- * so we can pass that sub structure into this handy, shared routine.
- *
- * If no error, caller must always iput the returned inode.
+ * No need to do permission checks on the various pathname components
+ * as the handle operations are privileged.
*/
STATIC int
-xfs_vget_fsop_handlereq(
- xfs_mount_t *mp,
- struct inode *parinode, /* parent inode pointer */
- xfs_fsop_handlereq_t *hreq,
- struct inode **inode)
+xfs_handle_acceptable(
+ void *context,
+ struct dentry *dentry)
+{
+ return 1;
+}
+
+/*
+ * Convert userspace handle data into a dentry.
+ */
+struct dentry *
+xfs_handle_to_dentry(
+ struct file *parfilp,
+ void __user *uhandle,
+ u32 hlen)
{
- void __user *hanp;
- size_t hlen;
- xfs_fid_t *xfid;
- xfs_handle_t *handlep;
xfs_handle_t handle;
- xfs_inode_t *ip;
- xfs_ino_t ino;
- __u32 igen;
- int error;
+ struct xfs_fid64 fid;
/*
* Only allow handle opens under a directory.
*/
- if (!S_ISDIR(parinode->i_mode))
- return XFS_ERROR(ENOTDIR);
-
- hanp = hreq->ihandle;
- hlen = hreq->ihandlen;
- handlep = &handle;
-
- if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
- return XFS_ERROR(EINVAL);
- if (copy_from_user(handlep, hanp, hlen))
- return XFS_ERROR(EFAULT);
- if (hlen < sizeof(*handlep))
- memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
- if (hlen > sizeof(handlep->ha_fsid)) {
- if (handlep->ha_fid.fid_len !=
- (hlen - sizeof(handlep->ha_fsid) -
- sizeof(handlep->ha_fid.fid_len)) ||
- handlep->ha_fid.fid_pad)
- return XFS_ERROR(EINVAL);
- }
-
- /*
- * Crack the handle, obtain the inode # & generation #
- */
- xfid = (struct xfs_fid *)&handlep->ha_fid;
- if (xfid->fid_len == sizeof(*xfid) - sizeof(xfid->fid_len)) {
- ino = xfid->fid_ino;
- igen = xfid->fid_gen;
- } else {
- return XFS_ERROR(EINVAL);
- }
-
- /*
- * Get the XFS inode, building a Linux inode to go with it.
- */
- error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
- if (error)
- return error;
- if (ip == NULL)
- return XFS_ERROR(EIO);
- if (ip->i_d.di_gen != igen) {
- xfs_iput_new(ip, XFS_ILOCK_SHARED);
- return XFS_ERROR(ENOENT);
- }
-
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
+ if (!S_ISDIR(parfilp->f_path.dentry->d_inode->i_mode))
+ return ERR_PTR(-ENOTDIR);
+
+ if (hlen != sizeof(xfs_handle_t))
+ return ERR_PTR(-EINVAL);
+ if (copy_from_user(&handle, uhandle, hlen))
+ return ERR_PTR(-EFAULT);
+ if (handle.ha_fid.fid_len !=
+ sizeof(handle.ha_fid) - sizeof(handle.ha_fid.fid_len))
+ return ERR_PTR(-EINVAL);
+
+ memset(&fid, 0, sizeof(struct fid));
+ fid.ino = handle.ha_fid.fid_ino;
+ fid.gen = handle.ha_fid.fid_gen;
+
+ return exportfs_decode_fh(parfilp->f_path.mnt, (struct fid *)&fid, 3,
+ FILEID_INO32_GEN | XFS_FILEID_TYPE_64FLAG,
+ xfs_handle_acceptable, NULL);
+}
- *inode = VFS_I(ip);
- return 0;
+STATIC struct dentry *
+xfs_handlereq_to_dentry(
+ struct file *parfilp,
+ xfs_fsop_handlereq_t *hreq)
+{
+ return xfs_handle_to_dentry(parfilp, hreq->ihandle, hreq->ihandlen);
}
int
xfs_open_by_handle(
- xfs_mount_t *mp,
- xfs_fsop_handlereq_t *hreq,
struct file *parfilp,
- struct inode *parinode)
+ xfs_fsop_handlereq_t *hreq)
{
const struct cred *cred = current_cred();
int error;
- int new_fd;
+ int fd;
int permflag;
struct file *filp;
struct inode *inode;
@@ -263,19 +237,21 @@ xfs_open_by_handle(
if (!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM);
- error = xfs_vget_fsop_handlereq(mp, parinode, hreq, &inode);
- if (error)
- return -error;
+ dentry = xfs_handlereq_to_dentry(parfilp, hreq);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ inode = dentry->d_inode;
/* Restrict xfs_open_by_handle to directories & regular files. */
if (!(S_ISREG(inode->i_mode) || S_ISDIR(inode->i_mode))) {
- iput(inode);
- return -XFS_ERROR(EINVAL);
+ error = -XFS_ERROR(EPERM);
+ goto out_dput;
}
#if BITS_PER_LONG != 32
hreq->oflags |= O_LARGEFILE;
#endif
+
/* Put open permission in namei format. */
permflag = hreq->oflags;
if ((permflag+1) & O_ACCMODE)
@@ -285,50 +261,45 @@ xfs_open_by_handle(
if ((!(permflag & O_APPEND) || (permflag & O_TRUNC)) &&
(permflag & FMODE_WRITE) && IS_APPEND(inode)) {
- iput(inode);
- return -XFS_ERROR(EPERM);
+ error = -XFS_ERROR(EPERM);
+ goto out_dput;
}
if ((permflag & FMODE_WRITE) && IS_IMMUTABLE(inode)) {
- iput(inode);
- return -XFS_ERROR(EACCES);
+ error = -XFS_ERROR(EACCES);
+ goto out_dput;
}
/* Can't write directories. */
- if ( S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
- iput(inode);
- return -XFS_ERROR(EISDIR);
+ if (S_ISDIR(inode->i_mode) && (permflag & FMODE_WRITE)) {
+ error = -XFS_ERROR(EISDIR);
+ goto out_dput;
}
- if ((new_fd = get_unused_fd()) < 0) {
- iput(inode);
- return new_fd;
+ fd = get_unused_fd();
+ if (fd < 0) {
+ error = fd;
+ goto out_dput;
}
- dentry = d_obtain_alias(inode);
- if (IS_ERR(dentry)) {
- put_unused_fd(new_fd);
- return PTR_ERR(dentry);
- }
-
- /* Ensure umount returns EBUSY on umounts while this file is open. */
- mntget(parfilp->f_path.mnt);
-
- /* Create file pointer. */
- filp = dentry_open(dentry, parfilp->f_path.mnt, hreq->oflags, cred);
+ filp = dentry_open(dentry, mntget(parfilp->f_path.mnt),
+ hreq->oflags, cred);
if (IS_ERR(filp)) {
- put_unused_fd(new_fd);
- return -XFS_ERROR(-PTR_ERR(filp));
+ put_unused_fd(fd);
+ return PTR_ERR(filp);
}
if (inode->i_mode & S_IFREG) {
- /* invisible operation should not change atime */
filp->f_flags |= O_NOATIME;
filp->f_mode |= FMODE_NOCMTIME;
}
- fd_install(new_fd, filp);
- return new_fd;
+ fd_install(fd, filp);
+ return fd;
+
+ out_dput:
+ dput(dentry);
+ return error;
}
/*
@@ -359,11 +330,10 @@ do_readlink(
int
xfs_readlink_by_handle(
- xfs_mount_t *mp,
- xfs_fsop_handlereq_t *hreq,
- struct inode *parinode)
+ struct file *parfilp,
+ xfs_fsop_handlereq_t *hreq)
{
- struct inode *inode;
+ struct dentry *dentry;
__u32 olen;
void *link;
int error;
@@ -371,26 +341,28 @@ xfs_readlink_by_handle(
if (!capable(CAP_SYS_ADMIN))
return -XFS_ERROR(EPERM);
- error = xfs_vget_fsop_handlereq(mp, parinode, hreq, &inode);
- if (error)
- return -error;
+ dentry = xfs_handlereq_to_dentry(parfilp, hreq);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
/* Restrict this handle operation to symlinks only. */
- if (!S_ISLNK(inode->i_mode)) {
+ if (!S_ISLNK(dentry->d_inode->i_mode)) {
error = -XFS_ERROR(EINVAL);
- goto out_iput;
+ goto out_dput;
}
if (copy_from_user(&olen, hreq->ohandlen, sizeof(__u32))) {
error = -XFS_ERROR(EFAULT);
- goto out_iput;
+ goto out_dput;
}
link = kmalloc(MAXPATHLEN+1, GFP_KERNEL);
- if (!link)
- goto out_iput;
+ if (!link) {
+ error = -XFS_ERROR(ENOMEM);
+ goto out_dput;
+ }
- error = -xfs_readlink(XFS_I(inode), link);
+ error = -xfs_readlink(XFS_I(dentry->d_inode), link);
if (error)
goto out_kfree;
error = do_readlink(hreq->ohandle, olen, link);
@@ -399,32 +371,31 @@ xfs_readlink_by_handle(
out_kfree:
kfree(link);
- out_iput:
- iput(inode);
+ out_dput:
+ dput(dentry);
return error;
}
STATIC int
xfs_fssetdm_by_handle(
- xfs_mount_t *mp,
- void __user *arg,
- struct inode *parinode)
+ struct file *parfilp,
+ void __user *arg)
{
int error;
struct fsdmidata fsd;
xfs_fsop_setdm_handlereq_t dmhreq;
- struct inode *inode;
+ struct dentry *dentry;
if (!capable(CAP_MKNOD))
return -XFS_ERROR(EPERM);
if (copy_from_user(&dmhreq, arg, sizeof(xfs_fsop_setdm_handlereq_t)))
return -XFS_ERROR(EFAULT);
- error = xfs_vget_fsop_handlereq(mp, parinode, &dmhreq.hreq, &inode);
- if (error)
- return -error;
+ dentry = xfs_handlereq_to_dentry(parfilp, &dmhreq.hreq);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
- if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
+ if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) {
error = -XFS_ERROR(EPERM);
goto out;
}
@@ -434,24 +405,23 @@ xfs_fssetdm_by_handle(
goto out;
}
- error = -xfs_set_dmattrs(XFS_I(inode), fsd.fsd_dmevmask,
+ error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask,
fsd.fsd_dmstate);
out:
- iput(inode);
+ dput(dentry);
return error;
}
STATIC int
xfs_attrlist_by_handle(
- xfs_mount_t *mp,
- void __user *arg,
- struct inode *parinode)
+ struct file *parfilp,
+ void __user *arg)
{
- int error;
+ int error = -ENOMEM;
attrlist_cursor_kern_t *cursor;
xfs_fsop_attrlist_handlereq_t al_hreq;
- struct inode *inode;
+ struct dentry *dentry;
char *kbuf;
if (!capable(CAP_SYS_ADMIN))
@@ -467,16 +437,16 @@ xfs_attrlist_by_handle(
if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
return -XFS_ERROR(EINVAL);
- error = xfs_vget_fsop_handlereq(mp, parinode, &al_hreq.hreq, &inode);
- if (error)
- goto out;
+ dentry = xfs_handlereq_to_dentry(parfilp, &al_hreq.hreq);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
if (!kbuf)
- goto out_vn_rele;
+ goto out_dput;
cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
- error = xfs_attr_list(XFS_I(inode), kbuf, al_hreq.buflen,
+ error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen,
al_hreq.flags, cursor);
if (error)
goto out_kfree;
@@ -486,10 +456,9 @@ xfs_attrlist_by_handle(
out_kfree:
kfree(kbuf);
- out_vn_rele:
- iput(inode);
- out:
- return -error;
+ out_dput:
+ dput(dentry);
+ return error;
}
int
@@ -564,15 +533,13 @@ xfs_attrmulti_attr_remove(
STATIC int
xfs_attrmulti_by_handle(
- xfs_mount_t *mp,
- void __user *arg,
struct file *parfilp,
- struct inode *parinode)
+ void __user *arg)
{
int error;
xfs_attr_multiop_t *ops;
xfs_fsop_attrmulti_handlereq_t am_hreq;
- struct inode *inode;
+ struct dentry *dentry;
unsigned int i, size;
char *attr_name;
@@ -581,19 +548,19 @@ xfs_attrmulti_by_handle(
if (copy_from_user(&am_hreq, arg, sizeof(xfs_fsop_attrmulti_handlereq_t)))
return -XFS_ERROR(EFAULT);
- error = xfs_vget_fsop_handlereq(mp, parinode, &am_hreq.hreq, &inode);
- if (error)
- goto out;
+ dentry = xfs_handlereq_to_dentry(parfilp, &am_hreq.hreq);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
error = E2BIG;
size = am_hreq.opcount * sizeof(xfs_attr_multiop_t);
if (!size || size > 16 * PAGE_SIZE)
- goto out_vn_rele;
+ goto out_dput;
error = ENOMEM;
ops = kmalloc(size, GFP_KERNEL);
if (!ops)
- goto out_vn_rele;
+ goto out_dput;
error = EFAULT;
if (copy_from_user(ops, am_hreq.ops, size))
@@ -615,25 +582,28 @@ xfs_attrmulti_by_handle(
switch (ops[i].am_opcode) {
case ATTR_OP_GET:
- ops[i].am_error = xfs_attrmulti_attr_get(inode,
- attr_name, ops[i].am_attrvalue,
- &ops[i].am_length, ops[i].am_flags);
+ ops[i].am_error = xfs_attrmulti_attr_get(
+ dentry->d_inode, attr_name,
+ ops[i].am_attrvalue, &ops[i].am_length,
+ ops[i].am_flags);
break;
case ATTR_OP_SET:
ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
if (ops[i].am_error)
break;
- ops[i].am_error = xfs_attrmulti_attr_set(inode,
- attr_name, ops[i].am_attrvalue,
- ops[i].am_length, ops[i].am_flags);
+ ops[i].am_error = xfs_attrmulti_attr_set(
+ dentry->d_inode, attr_name,
+ ops[i].am_attrvalue, ops[i].am_length,
+ ops[i].am_flags);
mnt_drop_write(parfilp->f_path.mnt);
break;
case ATTR_OP_REMOVE:
ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
if (ops[i].am_error)
break;
- ops[i].am_error = xfs_attrmulti_attr_remove(inode,
- attr_name, ops[i].am_flags);
+ ops[i].am_error = xfs_attrmulti_attr_remove(
+ dentry->d_inode, attr_name,
+ ops[i].am_flags);
mnt_drop_write(parfilp->f_path.mnt);
break;
default:
@@ -647,9 +617,8 @@ xfs_attrmulti_by_handle(
kfree(attr_name);
out_kfree_ops:
kfree(ops);
- out_vn_rele:
- iput(inode);
- out:
+ out_dput:
+ dput(dentry);
return -error;
}
@@ -1440,23 +1409,23 @@ xfs_file_ioctl(
if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
return -XFS_ERROR(EFAULT);
- return xfs_open_by_handle(mp, &hreq, filp, inode);
+ return xfs_open_by_handle(filp, &hreq);
}
case XFS_IOC_FSSETDM_BY_HANDLE:
- return xfs_fssetdm_by_handle(mp, arg, inode);
+ return xfs_fssetdm_by_handle(filp, arg);
case XFS_IOC_READLINK_BY_HANDLE: {
xfs_fsop_handlereq_t hreq;
if (copy_from_user(&hreq, arg, sizeof(xfs_fsop_handlereq_t)))
return -XFS_ERROR(EFAULT);
- return xfs_readlink_by_handle(mp, &hreq, inode);
+ return xfs_readlink_by_handle(filp, &hreq);
}
case XFS_IOC_ATTRLIST_BY_HANDLE:
- return xfs_attrlist_by_handle(mp, arg, inode);
+ return xfs_attrlist_by_handle(filp, arg);
case XFS_IOC_ATTRMULTI_BY_HANDLE:
- return xfs_attrmulti_by_handle(mp, arg, filp, inode);
+ return xfs_attrmulti_by_handle(filp, arg);
case XFS_IOC_SWAPEXT: {
struct xfs_swapext sxp;
diff --git a/fs/xfs/linux-2.6/xfs_ioctl.h b/fs/xfs/linux-2.6/xfs_ioctl.h
index 8c16bf2d7e03..7bd7c6afc1eb 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl.h
+++ b/fs/xfs/linux-2.6/xfs_ioctl.h
@@ -34,16 +34,13 @@ xfs_find_handle(
extern int
xfs_open_by_handle(
- xfs_mount_t *mp,
- xfs_fsop_handlereq_t *hreq,
struct file *parfilp,
- struct inode *parinode);
+ xfs_fsop_handlereq_t *hreq);
extern int
xfs_readlink_by_handle(
- xfs_mount_t *mp,
- xfs_fsop_handlereq_t *hreq,
- struct inode *parinode);
+ struct file *parfilp,
+ xfs_fsop_handlereq_t *hreq);
extern int
xfs_attrmulti_attr_get(
@@ -67,6 +64,12 @@ xfs_attrmulti_attr_remove(
char *name,
__uint32_t flags);
+extern struct dentry *
+xfs_handle_to_dentry(
+ struct file *parfilp,
+ void __user *uhandle,
+ u32 hlen);
+
extern long
xfs_file_ioctl(
struct file *filp,
diff --git a/fs/xfs/linux-2.6/xfs_ioctl32.c b/fs/xfs/linux-2.6/xfs_ioctl32.c
index 50903ad3182e..c70c4e3db790 100644
--- a/fs/xfs/linux-2.6/xfs_ioctl32.c
+++ b/fs/xfs/linux-2.6/xfs_ioctl32.c
@@ -17,6 +17,7 @@
*/
#include <linux/compat.h>
#include <linux/ioctl.h>
+#include <linux/mount.h>
#include <asm/uaccess.h>
#include "xfs.h"
#include "xfs_fs.h"
@@ -340,96 +341,24 @@ xfs_compat_handlereq_copyin(
return 0;
}
-/*
- * Convert userspace handle data into inode.
- *
- * We use the fact that all the fsop_handlereq ioctl calls have a data
- * structure argument whose first component is always a xfs_fsop_handlereq_t,
- * so we can pass that sub structure into this handy, shared routine.
- *
- * If no error, caller must always iput the returned inode.
- */
-STATIC int
-xfs_vget_fsop_handlereq_compat(
- xfs_mount_t *mp,
- struct inode *parinode, /* parent inode pointer */
- compat_xfs_fsop_handlereq_t *hreq,
- struct inode **inode)
+STATIC struct dentry *
+xfs_compat_handlereq_to_dentry(
+ struct file *parfilp,
+ compat_xfs_fsop_handlereq_t *hreq)
{
- void __user *hanp;
- size_t hlen;
- xfs_fid_t *xfid;
- xfs_handle_t *handlep;
- xfs_handle_t handle;
- xfs_inode_t *ip;
- xfs_ino_t ino;
- __u32 igen;
- int error;
-
- /*
- * Only allow handle opens under a directory.
- */
- if (!S_ISDIR(parinode->i_mode))
- return XFS_ERROR(ENOTDIR);
-
- hanp = compat_ptr(hreq->ihandle);
- hlen = hreq->ihandlen;
- handlep = &handle;
-
- if (hlen < sizeof(handlep->ha_fsid) || hlen > sizeof(*handlep))
- return XFS_ERROR(EINVAL);
- if (copy_from_user(handlep, hanp, hlen))
- return XFS_ERROR(EFAULT);
- if (hlen < sizeof(*handlep))
- memset(((char *)handlep) + hlen, 0, sizeof(*handlep) - hlen);
- if (hlen > sizeof(handlep->ha_fsid)) {
- if (handlep->ha_fid.fid_len !=
- (hlen - sizeof(handlep->ha_fsid) -
- sizeof(handlep->ha_fid.fid_len)) ||
- handlep->ha_fid.fid_pad)
- return XFS_ERROR(EINVAL);
- }
-
- /*
- * Crack the handle, obtain the inode # & generation #
- */
- xfid = (struct xfs_fid *)&handlep->ha_fid;
- if (xfid->fid_len == sizeof(*xfid) - sizeof(xfid->fid_len)) {
- ino = xfid->fid_ino;
- igen = xfid->fid_gen;
- } else {
- return XFS_ERROR(EINVAL);
- }
-
- /*
- * Get the XFS inode, building a Linux inode to go with it.
- */
- error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_SHARED, &ip, 0);
- if (error)
- return error;
- if (ip == NULL)
- return XFS_ERROR(EIO);
- if (ip->i_d.di_gen != igen) {
- xfs_iput_new(ip, XFS_ILOCK_SHARED);
- return XFS_ERROR(ENOENT);
- }
-
- xfs_iunlock(ip, XFS_ILOCK_SHARED);
-
- *inode = VFS_I(ip);
- return 0;
+ return xfs_handle_to_dentry(parfilp,
+ compat_ptr(hreq->ihandle), hreq->ihandlen);
}
STATIC int
xfs_compat_attrlist_by_handle(
- xfs_mount_t *mp,
- void __user *arg,
- struct inode *parinode)
+ struct file *parfilp,
+ void __user *arg)
{
int error;
attrlist_cursor_kern_t *cursor;
compat_xfs_fsop_attrlist_handlereq_t al_hreq;
- struct inode *inode;
+ struct dentry *dentry;
char *kbuf;
if (!capable(CAP_SYS_ADMIN))
@@ -446,17 +375,17 @@ xfs_compat_attrlist_by_handle(
if (al_hreq.flags & ~(ATTR_ROOT | ATTR_SECURE))
return -XFS_ERROR(EINVAL);
- error = xfs_vget_fsop_handlereq_compat(mp, parinode, &al_hreq.hreq,
- &inode);
- if (error)
- goto out;
+ dentry = xfs_compat_handlereq_to_dentry(parfilp, &al_hreq.hreq);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
+ error = -ENOMEM;
kbuf = kmalloc(al_hreq.buflen, GFP_KERNEL);
if (!kbuf)
- goto out_vn_rele;
+ goto out_dput;
cursor = (attrlist_cursor_kern_t *)&al_hreq.pos;
- error = xfs_attr_list(XFS_I(inode), kbuf, al_hreq.buflen,
+ error = -xfs_attr_list(XFS_I(dentry->d_inode), kbuf, al_hreq.buflen,
al_hreq.flags, cursor);
if (error)
goto out_kfree;
@@ -466,22 +395,20 @@ xfs_compat_attrlist_by_handle(
out_kfree:
kfree(kbuf);
- out_vn_rele:
- iput(inode);
- out:
- return -error;
+ out_dput:
+ dput(dentry);
+ return error;
}
STATIC int
xfs_compat_attrmulti_by_handle(
- xfs_mount_t *mp,
- void __user *arg,
- struct inode *parinode)
+ struct file *parfilp,
+ void __user *arg)
{
int error;
compat_xfs_attr_multiop_t *ops;
compat_xfs_fsop_attrmulti_handlereq_t am_hreq;
- struct inode *inode;
+ struct dentry *dentry;
unsigned int i, size;
char *attr_name;
@@ -491,20 +418,19 @@ xfs_compat_attrmulti_by_handle(
sizeof(compat_xfs_fsop_attrmulti_handlereq_t)))
return -XFS_ERROR(EFAULT);
- error = xfs_vget_fsop_handlereq_compat(mp, parinode, &am_hreq.hreq,
- &inode);
- if (error)
- goto out;
+ dentry = xfs_compat_handlereq_to_dentry(parfilp, &am_hreq.hreq);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
error = E2BIG;
size = am_hreq.opcount * sizeof(compat_xfs_attr_multiop_t);
if (!size || size > 16 * PAGE_SIZE)
- goto out_vn_rele;
+ goto out_dput;
error = ENOMEM;
ops = kmalloc(size, GFP_KERNEL);
if (!ops)
- goto out_vn_rele;
+ goto out_dput;
error = EFAULT;
if (copy_from_user(ops, compat_ptr(am_hreq.ops), size))
@@ -527,20 +453,29 @@ xfs_compat_attrmulti_by_handle(
switch (ops[i].am_opcode) {
case ATTR_OP_GET:
- ops[i].am_error = xfs_attrmulti_attr_get(inode,
- attr_name,
+ ops[i].am_error = xfs_attrmulti_attr_get(
+ dentry->d_inode, attr_name,
compat_ptr(ops[i].am_attrvalue),
&ops[i].am_length, ops[i].am_flags);
break;
case ATTR_OP_SET:
- ops[i].am_error = xfs_attrmulti_attr_set(inode,
- attr_name,
+ ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
+ if (ops[i].am_error)
+ break;
+ ops[i].am_error = xfs_attrmulti_attr_set(
+ dentry->d_inode, attr_name,
compat_ptr(ops[i].am_attrvalue),
ops[i].am_length, ops[i].am_flags);
+ mnt_drop_write(parfilp->f_path.mnt);
break;
case ATTR_OP_REMOVE:
- ops[i].am_error = xfs_attrmulti_attr_remove(inode,
- attr_name, ops[i].am_flags);
+ ops[i].am_error = mnt_want_write(parfilp->f_path.mnt);
+ if (ops[i].am_error)
+ break;
+ ops[i].am_error = xfs_attrmulti_attr_remove(
+ dentry->d_inode, attr_name,
+ ops[i].am_flags);
+ mnt_drop_write(parfilp->f_path.mnt);
break;
default:
ops[i].am_error = EINVAL;
@@ -553,22 +488,20 @@ xfs_compat_attrmulti_by_handle(
kfree(attr_name);
out_kfree_ops:
kfree(ops);
- out_vn_rele:
- iput(inode);
- out:
+ out_dput:
+ dput(dentry);
return -error;
}
STATIC int
xfs_compat_fssetdm_by_handle(
- xfs_mount_t *mp,
- void __user *arg,
- struct inode *parinode)
+ struct file *parfilp,
+ void __user *arg)
{
int error;
struct fsdmidata fsd;
compat_xfs_fsop_setdm_handlereq_t dmhreq;
- struct inode *inode;
+ struct dentry *dentry;
if (!capable(CAP_MKNOD))
return -XFS_ERROR(EPERM);
@@ -576,12 +509,11 @@ xfs_compat_fssetdm_by_handle(
sizeof(compat_xfs_fsop_setdm_handlereq_t)))
return -XFS_ERROR(EFAULT);
- error = xfs_vget_fsop_handlereq_compat(mp, parinode, &dmhreq.hreq,
- &inode);
- if (error)
- return -error;
+ dentry = xfs_compat_handlereq_to_dentry(parfilp, &dmhreq.hreq);
+ if (IS_ERR(dentry))
+ return PTR_ERR(dentry);
- if (IS_IMMUTABLE(inode) || IS_APPEND(inode)) {
+ if (IS_IMMUTABLE(dentry->d_inode) || IS_APPEND(dentry->d_inode)) {
error = -XFS_ERROR(EPERM);
goto out;
}
@@ -591,11 +523,11 @@ xfs_compat_fssetdm_by_handle(
goto out;
}
- error = -xfs_set_dmattrs(XFS_I(inode), fsd.fsd_dmevmask,
+ error = -xfs_set_dmattrs(XFS_I(dentry->d_inode), fsd.fsd_dmevmask,
fsd.fsd_dmstate);
out:
- iput(inode);
+ dput(dentry);
return error;
}
@@ -722,21 +654,21 @@ xfs_file_compat_ioctl(
if (xfs_compat_handlereq_copyin(&hreq, arg))
return -XFS_ERROR(EFAULT);
- return xfs_open_by_handle(mp, &hreq, filp, inode);
+ return xfs_open_by_handle(filp, &hreq);
}
case XFS_IOC_READLINK_BY_HANDLE_32: {
struct xfs_fsop_handlereq hreq;
if (xfs_compat_handlereq_copyin(&hreq, arg))
return -XFS_ERROR(EFAULT);
- return xfs_readlink_by_handle(mp, &hreq, inode);
+ return xfs_readlink_by_handle(filp, &hreq);
}
case XFS_IOC_ATTRLIST_BY_HANDLE_32:
- return xfs_compat_attrlist_by_handle(mp, arg, inode);
+ return xfs_compat_attrlist_by_handle(filp, arg);
case XFS_IOC_ATTRMULTI_BY_HANDLE_32:
- return xfs_compat_attrmulti_by_handle(mp, arg, inode);
+ return xfs_compat_attrmulti_by_handle(filp, arg);
case XFS_IOC_FSSETDM_BY_HANDLE_32:
- return xfs_compat_fssetdm_by_handle(mp, arg, inode);
+ return xfs_compat_fssetdm_by_handle(filp, arg);
default:
return -XFS_ERROR(ENOIOCTLCMD);
}
diff --git a/fs/xfs/linux-2.6/xfs_super.c b/fs/xfs/linux-2.6/xfs_super.c
index 95a971080368..c71e226da7f5 100644
--- a/fs/xfs/linux-2.6/xfs_super.c
+++ b/fs/xfs/linux-2.6/xfs_super.c
@@ -1197,6 +1197,7 @@ xfs_fs_remount(
struct xfs_mount *mp = XFS_M(sb);
substring_t args[MAX_OPT_ARGS];
char *p;
+ int error;
while ((p = strsep(&options, ",")) != NULL) {
int token;
@@ -1247,11 +1248,25 @@ xfs_fs_remount(
}
}
- /* rw/ro -> rw */
+ /* ro -> rw */
if ((mp->m_flags & XFS_MOUNT_RDONLY) && !(*flags & MS_RDONLY)) {
mp->m_flags &= ~XFS_MOUNT_RDONLY;
if (mp->m_flags & XFS_MOUNT_BARRIER)
xfs_mountfs_check_barriers(mp);
+
+ /*
+ * If this is the first remount to writeable state we
+ * might have some superblock changes to update.
+ */
+ if (mp->m_update_flags) {
+ error = xfs_mount_log_sb(mp, mp->m_update_flags);
+ if (error) {
+ cmn_err(CE_WARN,
+ "XFS: failed to write sb changes");
+ return error;
+ }
+ mp->m_update_flags = 0;
+ }
}
/* rw -> ro */
diff --git a/fs/xfs/linux-2.6/xfs_sync.c b/fs/xfs/linux-2.6/xfs_sync.c
index 2ed035354c26..a608e72fa405 100644
--- a/fs/xfs/linux-2.6/xfs_sync.c
+++ b/fs/xfs/linux-2.6/xfs_sync.c
@@ -371,7 +371,11 @@ xfs_quiesce_attr(
/* flush inodes and push all remaining buffers out to disk */
xfs_quiesce_fs(mp);
- ASSERT_ALWAYS(atomic_read(&mp->m_active_trans) == 0);
+ /*
+ * Just warn here till VFS can correctly support
+ * read-only remount without racing.
+ */
+ WARN_ON(atomic_read(&mp->m_active_trans) != 0);
/* Push the superblock and write an unmount record */
error = xfs_log_sbcount(mp, 1);
diff --git a/fs/xfs/quota/xfs_dquot.c b/fs/xfs/quota/xfs_dquot.c
index 591ca6602bfb..6543c0b29753 100644
--- a/fs/xfs/quota/xfs_dquot.c
+++ b/fs/xfs/quota/xfs_dquot.c
@@ -73,6 +73,8 @@ int xfs_dqreq_num;
int xfs_dqerror_mod = 33;
#endif
+static struct lock_class_key xfs_dquot_other_class;
+
/*
* Allocate and initialize a dquot. We don't always allocate fresh memory;
* we try to reclaim a free dquot if the number of incore dquots are above
@@ -139,7 +141,15 @@ xfs_qm_dqinit(
ASSERT(dqp->q_trace);
xfs_dqtrace_entry(dqp, "DQRECLAIMED_INIT");
#endif
- }
+ }
+
+ /*
+ * In either case we need to make sure group quotas have a different
+ * lock class than user quotas, to make sure lockdep knows we can
+ * locks of one of each at the same time.
+ */
+ if (!(type & XFS_DQ_USER))
+ lockdep_set_class(&dqp->q_qlock, &xfs_dquot_other_class);
/*
* log item gets initialized later
@@ -421,7 +431,7 @@ xfs_qm_dqalloc(
/*
* Initialize the bmap freelist prior to calling bmapi code.
*/
- XFS_BMAP_INIT(&flist, &firstblock);
+ xfs_bmap_init(&flist, &firstblock);
xfs_ilock(quotip, XFS_ILOCK_EXCL);
/*
* Return if this type of quotas is turned off while we didn't
@@ -1383,6 +1393,12 @@ xfs_dqunlock_nonotify(
mutex_unlock(&(dqp->q_qlock));
}
+/*
+ * Lock two xfs_dquot structures.
+ *
+ * To avoid deadlocks we always lock the quota structure with
+ * the lowerd id first.
+ */
void
xfs_dqlock2(
xfs_dquot_t *d1,
@@ -1392,18 +1408,16 @@ xfs_dqlock2(
ASSERT(d1 != d2);
if (be32_to_cpu(d1->q_core.d_id) >
be32_to_cpu(d2->q_core.d_id)) {
- xfs_dqlock(d2);
- xfs_dqlock(d1);
+ mutex_lock(&d2->q_qlock);
+ mutex_lock_nested(&d1->q_qlock, XFS_QLOCK_NESTED);
} else {
- xfs_dqlock(d1);
- xfs_dqlock(d2);
- }
- } else {
- if (d1) {
- xfs_dqlock(d1);
- } else if (d2) {
- xfs_dqlock(d2);
+ mutex_lock(&d1->q_qlock);
+ mutex_lock_nested(&d2->q_qlock, XFS_QLOCK_NESTED);
}
+ } else if (d1) {
+ mutex_lock(&d1->q_qlock);
+ } else if (d2) {
+ mutex_lock(&d2->q_qlock);
}
}
diff --git a/fs/xfs/quota/xfs_dquot.h b/fs/xfs/quota/xfs_dquot.h
index 7e455337e2ba..d443e93b4331 100644
--- a/fs/xfs/quota/xfs_dquot.h
+++ b/fs/xfs/quota/xfs_dquot.h
@@ -97,6 +97,16 @@ typedef struct xfs_dquot {
#define dq_hashlist q_lists.dqm_hashlist
#define dq_flags q_lists.dqm_flags
+/*
+ * Lock hierachy for q_qlock:
+ * XFS_QLOCK_NORMAL is the implicit default,
+ * XFS_QLOCK_NESTED is the dquot with the higher id in xfs_dqlock2
+ */
+enum {
+ XFS_QLOCK_NORMAL = 0,
+ XFS_QLOCK_NESTED,
+};
+
#define XFS_DQHOLD(dqp) ((dqp)->q_nrefs++)
#ifdef DEBUG
diff --git a/fs/xfs/quota/xfs_qm.c b/fs/xfs/quota/xfs_qm.c
index 6b13960cf318..7a2beb64314f 100644
--- a/fs/xfs/quota/xfs_qm.c
+++ b/fs/xfs/quota/xfs_qm.c
@@ -1070,6 +1070,13 @@ xfs_qm_sync(
return 0;
}
+/*
+ * The hash chains and the mplist use the same xfs_dqhash structure as
+ * their list head, but we can take the mplist qh_lock and one of the
+ * hash qh_locks at the same time without any problem as they aren't
+ * related.
+ */
+static struct lock_class_key xfs_quota_mplist_class;
/*
* This initializes all the quota information that's kept in the
@@ -1105,6 +1112,8 @@ xfs_qm_init_quotainfo(
}
xfs_qm_list_init(&qinf->qi_dqlist, "mpdqlist", 0);
+ lockdep_set_class(&qinf->qi_dqlist.qh_lock, &xfs_quota_mplist_class);
+
qinf->qi_dqreclaims = 0;
/* mutex used to serialize quotaoffs */
diff --git a/fs/xfs/xfs_ag.h b/fs/xfs/xfs_ag.h
index d3b3cf742999..143d63ecb20a 100644
--- a/fs/xfs/xfs_ag.h
+++ b/fs/xfs/xfs_ag.h
@@ -244,8 +244,8 @@ typedef struct xfs_perag
#define XFS_AG_CHECK_DADDR(mp,d,len) \
((len) == 1 ? \
ASSERT((d) == XFS_SB_DADDR || \
- XFS_DADDR_TO_AGBNO(mp, d) != XFS_SB_DADDR) : \
- ASSERT(XFS_DADDR_TO_AGNO(mp, d) == \
- XFS_DADDR_TO_AGNO(mp, (d) + (len) - 1)))
+ xfs_daddr_to_agbno(mp, d) != XFS_SB_DADDR) : \
+ ASSERT(xfs_daddr_to_agno(mp, d) == \
+ xfs_daddr_to_agno(mp, (d) + (len) - 1)))
#endif /* __XFS_AG_H__ */
diff --git a/fs/xfs/xfs_alloc_btree.c b/fs/xfs/xfs_alloc_btree.c
index 733cb75a8c5d..c10c3a292d30 100644
--- a/fs/xfs/xfs_alloc_btree.c
+++ b/fs/xfs/xfs_alloc_btree.c
@@ -115,7 +115,7 @@ xfs_allocbt_free_block(
xfs_agblock_t bno;
int error;
- bno = XFS_DADDR_TO_AGBNO(cur->bc_mp, XFS_BUF_ADDR(bp));
+ bno = xfs_daddr_to_agbno(cur->bc_mp, XFS_BUF_ADDR(bp));
error = xfs_alloc_put_freelist(cur->bc_tp, agbp, NULL, bno, 1);
if (error)
return error;
diff --git a/fs/xfs/xfs_attr.c b/fs/xfs/xfs_attr.c
index f7cdc28aff41..5fde1654b430 100644
--- a/fs/xfs/xfs_attr.c
+++ b/fs/xfs/xfs_attr.c
@@ -374,7 +374,7 @@ xfs_attr_set_int(xfs_inode_t *dp, struct xfs_name *name,
* It won't fit in the shortform, transform to a leaf block.
* GROT: another possible req'mt for a double-split btree op.
*/
- XFS_BMAP_INIT(args.flist, args.firstblock);
+ xfs_bmap_init(args.flist, args.firstblock);
error = xfs_attr_shortform_to_leaf(&args);
if (!error) {
error = xfs_bmap_finish(&args.trans, args.flist,
@@ -956,7 +956,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* Commit that transaction so that the node_addname() call
* can manage its own transactions.
*/
- XFS_BMAP_INIT(args->flist, args->firstblock);
+ xfs_bmap_init(args->flist, args->firstblock);
error = xfs_attr_leaf_to_node(args);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
@@ -1057,7 +1057,7 @@ xfs_attr_leaf_addname(xfs_da_args_t *args)
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- XFS_BMAP_INIT(args->flist, args->firstblock);
+ xfs_bmap_init(args->flist, args->firstblock);
error = xfs_attr_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (!error) {
@@ -1135,7 +1135,7 @@ xfs_attr_leaf_removename(xfs_da_args_t *args)
* If the result is small enough, shrink it all into the inode.
*/
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- XFS_BMAP_INIT(args->flist, args->firstblock);
+ xfs_bmap_init(args->flist, args->firstblock);
error = xfs_attr_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (!error) {
@@ -1290,7 +1290,7 @@ restart:
* have been a b-tree.
*/
xfs_da_state_free(state);
- XFS_BMAP_INIT(args->flist, args->firstblock);
+ xfs_bmap_init(args->flist, args->firstblock);
error = xfs_attr_leaf_to_node(args);
if (!error) {
error = xfs_bmap_finish(&args->trans,
@@ -1331,7 +1331,7 @@ restart:
* in the index/blkno/rmtblkno/rmtblkcnt fields and
* in the index2/blkno2/rmtblkno2/rmtblkcnt2 fields.
*/
- XFS_BMAP_INIT(args->flist, args->firstblock);
+ xfs_bmap_init(args->flist, args->firstblock);
error = xfs_da_split(state);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
@@ -1443,7 +1443,7 @@ restart:
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
- XFS_BMAP_INIT(args->flist, args->firstblock);
+ xfs_bmap_init(args->flist, args->firstblock);
error = xfs_da_join(state);
if (!error) {
error = xfs_bmap_finish(&args->trans,
@@ -1579,7 +1579,7 @@ xfs_attr_node_removename(xfs_da_args_t *args)
* Check to see if the tree needs to be collapsed.
*/
if (retval && (state->path.active > 1)) {
- XFS_BMAP_INIT(args->flist, args->firstblock);
+ xfs_bmap_init(args->flist, args->firstblock);
error = xfs_da_join(state);
if (!error) {
error = xfs_bmap_finish(&args->trans, args->flist,
@@ -1630,7 +1630,7 @@ xfs_attr_node_removename(xfs_da_args_t *args)
== XFS_ATTR_LEAF_MAGIC);
if ((forkoff = xfs_attr_shortform_allfit(bp, dp))) {
- XFS_BMAP_INIT(args->flist, args->firstblock);
+ xfs_bmap_init(args->flist, args->firstblock);
error = xfs_attr_leaf_to_shortform(bp, args, forkoff);
/* bp is gone due to xfs_da_shrink_inode */
if (!error) {
@@ -2069,7 +2069,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
/*
* Allocate a single extent, up to the size of the value.
*/
- XFS_BMAP_INIT(args->flist, args->firstblock);
+ xfs_bmap_init(args->flist, args->firstblock);
nmap = 1;
error = xfs_bmapi(args->trans, dp, (xfs_fileoff_t)lblkno,
blkcnt,
@@ -2123,7 +2123,7 @@ xfs_attr_rmtval_set(xfs_da_args_t *args)
/*
* Try to remember where we decided to put the value.
*/
- XFS_BMAP_INIT(args->flist, args->firstblock);
+ xfs_bmap_init(args->flist, args->firstblock);
nmap = 1;
error = xfs_bmapi(NULL, dp, (xfs_fileoff_t)lblkno,
args->rmtblkcnt,
@@ -2188,7 +2188,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
/*
* Try to remember where we decided to put the value.
*/
- XFS_BMAP_INIT(args->flist, args->firstblock);
+ xfs_bmap_init(args->flist, args->firstblock);
nmap = 1;
error = xfs_bmapi(NULL, args->dp, (xfs_fileoff_t)lblkno,
args->rmtblkcnt,
@@ -2229,7 +2229,7 @@ xfs_attr_rmtval_remove(xfs_da_args_t *args)
blkcnt = args->rmtblkcnt;
done = 0;
while (!done) {
- XFS_BMAP_INIT(args->flist, args->firstblock);
+ xfs_bmap_init(args->flist, args->firstblock);
error = xfs_bunmapi(args->trans, args->dp, lblkno, blkcnt,
XFS_BMAPI_ATTRFORK | XFS_BMAPI_METADATA,
1, args->firstblock, args->flist,
diff --git a/fs/xfs/xfs_bmap.c b/fs/xfs/xfs_bmap.c
index 138308e70d14..c852cd65aaea 100644
--- a/fs/xfs/xfs_bmap.c
+++ b/fs/xfs/xfs_bmap.c
@@ -595,9 +595,9 @@ xfs_bmap_add_extent(
xfs_iext_insert(ifp, 0, 1, new);
ASSERT(cur == NULL);
ifp->if_lastex = 0;
- if (!ISNULLSTARTBLOCK(new->br_startblock)) {
+ if (!isnullstartblock(new->br_startblock)) {
XFS_IFORK_NEXT_SET(ip, whichfork, 1);
- logflags = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
+ logflags = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
} else
logflags = 0;
/* DELTA: single new extent */
@@ -613,7 +613,7 @@ xfs_bmap_add_extent(
/*
* Any kind of new delayed allocation goes here.
*/
- else if (ISNULLSTARTBLOCK(new->br_startblock)) {
+ else if (isnullstartblock(new->br_startblock)) {
if (cur)
ASSERT((cur->bc_private.b.flags &
XFS_BTCUR_BPRV_WASDEL) == 0);
@@ -644,11 +644,11 @@ xfs_bmap_add_extent(
* in a delayed or unwritten allocation with a real one, or
* converting real back to unwritten.
*/
- if (!ISNULLSTARTBLOCK(new->br_startblock) &&
+ if (!isnullstartblock(new->br_startblock) &&
new->br_startoff + new->br_blockcount > prev.br_startoff) {
if (prev.br_state != XFS_EXT_UNWRITTEN &&
- ISNULLSTARTBLOCK(prev.br_startblock)) {
- da_old = STARTBLOCKVAL(prev.br_startblock);
+ isnullstartblock(prev.br_startblock)) {
+ da_old = startblockval(prev.br_startblock);
if (cur)
ASSERT(cur->bc_private.b.flags &
XFS_BTCUR_BPRV_WASDEL);
@@ -803,7 +803,7 @@ xfs_bmap_add_extent_delay_real(
*/
if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
- STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock));
+ STATE_SET(LEFT_DELAY, isnullstartblock(LEFT.br_startblock));
}
STATE_SET(LEFT_CONTIG,
STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
@@ -820,7 +820,7 @@ xfs_bmap_add_extent_delay_real(
idx <
ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
- STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock));
+ STATE_SET(RIGHT_DELAY, isnullstartblock(RIGHT.br_startblock));
}
STATE_SET(RIGHT_CONTIG,
STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
@@ -1019,8 +1019,8 @@ xfs_bmap_add_extent_delay_real(
goto done;
}
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
- STARTBLOCKVAL(PREV.br_startblock));
- xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+ startblockval(PREV.br_startblock));
+ xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
XFS_BMAP_TRACE_POST_UPDATE("LF|LC", ip, idx, XFS_DATA_FORK);
*dnew = temp;
/* DELTA: The boundary between two in-core extents moved. */
@@ -1067,10 +1067,10 @@ xfs_bmap_add_extent_delay_real(
goto done;
}
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
- STARTBLOCKVAL(PREV.br_startblock) -
+ startblockval(PREV.br_startblock) -
(cur ? cur->bc_private.b.allocated : 0));
ep = xfs_iext_get_ext(ifp, idx + 1);
- xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+ xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
XFS_BMAP_TRACE_POST_UPDATE("LF", ip, idx + 1, XFS_DATA_FORK);
*dnew = temp;
/* DELTA: One in-core extent is split in two. */
@@ -1110,8 +1110,8 @@ xfs_bmap_add_extent_delay_real(
goto done;
}
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
- STARTBLOCKVAL(PREV.br_startblock));
- xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+ startblockval(PREV.br_startblock));
+ xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
XFS_BMAP_TRACE_POST_UPDATE("RF|RC", ip, idx, XFS_DATA_FORK);
*dnew = temp;
/* DELTA: The boundary between two in-core extents moved. */
@@ -1157,10 +1157,10 @@ xfs_bmap_add_extent_delay_real(
goto done;
}
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
- STARTBLOCKVAL(PREV.br_startblock) -
+ startblockval(PREV.br_startblock) -
(cur ? cur->bc_private.b.allocated : 0));
ep = xfs_iext_get_ext(ifp, idx);
- xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+ xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
XFS_BMAP_TRACE_POST_UPDATE("RF", ip, idx, XFS_DATA_FORK);
*dnew = temp;
/* DELTA: One in-core extent is split in two. */
@@ -1213,7 +1213,7 @@ xfs_bmap_add_extent_delay_real(
}
temp = xfs_bmap_worst_indlen(ip, temp);
temp2 = xfs_bmap_worst_indlen(ip, temp2);
- diff = (int)(temp + temp2 - STARTBLOCKVAL(PREV.br_startblock) -
+ diff = (int)(temp + temp2 - startblockval(PREV.br_startblock) -
(cur ? cur->bc_private.b.allocated : 0));
if (diff > 0 &&
xfs_mod_incore_sb(ip->i_mount, XFS_SBS_FDBLOCKS, -((int64_t)diff), rsvd)) {
@@ -1241,11 +1241,11 @@ xfs_bmap_add_extent_delay_real(
}
}
ep = xfs_iext_get_ext(ifp, idx);
- xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+ xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx, XFS_DATA_FORK);
XFS_BMAP_TRACE_PRE_UPDATE("0", ip, idx + 2, XFS_DATA_FORK);
xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx + 2),
- NULLSTARTBLOCK((int)temp2));
+ nullstartblock((int)temp2));
XFS_BMAP_TRACE_POST_UPDATE("0", ip, idx + 2, XFS_DATA_FORK);
*dnew = temp + temp2;
/* DELTA: One in-core extent is split in three. */
@@ -1365,7 +1365,7 @@ xfs_bmap_add_extent_unwritten_real(
*/
if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &LEFT);
- STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(LEFT.br_startblock));
+ STATE_SET(LEFT_DELAY, isnullstartblock(LEFT.br_startblock));
}
STATE_SET(LEFT_CONTIG,
STATE_TEST(LEFT_VALID) && !STATE_TEST(LEFT_DELAY) &&
@@ -1382,7 +1382,7 @@ xfs_bmap_add_extent_unwritten_real(
idx <
ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t) - 1)) {
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx + 1), &RIGHT);
- STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(RIGHT.br_startblock));
+ STATE_SET(RIGHT_DELAY, isnullstartblock(RIGHT.br_startblock));
}
STATE_SET(RIGHT_CONTIG,
STATE_TEST(RIGHT_VALID) && !STATE_TEST(RIGHT_DELAY) &&
@@ -1889,13 +1889,13 @@ xfs_bmap_add_extent_hole_delay(
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
ep = xfs_iext_get_ext(ifp, idx);
state = 0;
- ASSERT(ISNULLSTARTBLOCK(new->br_startblock));
+ ASSERT(isnullstartblock(new->br_startblock));
/*
* Check and set flags if this segment has a left neighbor
*/
if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
- STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock));
+ STATE_SET(LEFT_DELAY, isnullstartblock(left.br_startblock));
}
/*
* Check and set flags if the current (right) segment exists.
@@ -1905,7 +1905,7 @@ xfs_bmap_add_extent_hole_delay(
idx <
ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
xfs_bmbt_get_all(ep, &right);
- STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock));
+ STATE_SET(RIGHT_DELAY, isnullstartblock(right.br_startblock));
}
/*
* Set contiguity flags on the left and right neighbors.
@@ -1938,12 +1938,12 @@ xfs_bmap_add_extent_hole_delay(
XFS_BMAP_TRACE_PRE_UPDATE("LC|RC", ip, idx - 1,
XFS_DATA_FORK);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
- oldlen = STARTBLOCKVAL(left.br_startblock) +
- STARTBLOCKVAL(new->br_startblock) +
- STARTBLOCKVAL(right.br_startblock);
+ oldlen = startblockval(left.br_startblock) +
+ startblockval(new->br_startblock) +
+ startblockval(right.br_startblock);
newlen = xfs_bmap_worst_indlen(ip, temp);
xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
- NULLSTARTBLOCK((int)newlen));
+ nullstartblock((int)newlen));
XFS_BMAP_TRACE_POST_UPDATE("LC|RC", ip, idx - 1,
XFS_DATA_FORK);
XFS_BMAP_TRACE_DELETE("LC|RC", ip, idx, 1, XFS_DATA_FORK);
@@ -1964,11 +1964,11 @@ xfs_bmap_add_extent_hole_delay(
XFS_BMAP_TRACE_PRE_UPDATE("LC", ip, idx - 1,
XFS_DATA_FORK);
xfs_bmbt_set_blockcount(xfs_iext_get_ext(ifp, idx - 1), temp);
- oldlen = STARTBLOCKVAL(left.br_startblock) +
- STARTBLOCKVAL(new->br_startblock);
+ oldlen = startblockval(left.br_startblock) +
+ startblockval(new->br_startblock);
newlen = xfs_bmap_worst_indlen(ip, temp);
xfs_bmbt_set_startblock(xfs_iext_get_ext(ifp, idx - 1),
- NULLSTARTBLOCK((int)newlen));
+ nullstartblock((int)newlen));
XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1,
XFS_DATA_FORK);
ip->i_df.if_lastex = idx - 1;
@@ -1985,11 +1985,11 @@ xfs_bmap_add_extent_hole_delay(
*/
XFS_BMAP_TRACE_PRE_UPDATE("RC", ip, idx, XFS_DATA_FORK);
temp = new->br_blockcount + right.br_blockcount;
- oldlen = STARTBLOCKVAL(new->br_startblock) +
- STARTBLOCKVAL(right.br_startblock);
+ oldlen = startblockval(new->br_startblock) +
+ startblockval(right.br_startblock);
newlen = xfs_bmap_worst_indlen(ip, temp);
xfs_bmbt_set_allf(ep, new->br_startoff,
- NULLSTARTBLOCK((int)newlen), temp, right.br_state);
+ nullstartblock((int)newlen), temp, right.br_state);
XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, XFS_DATA_FORK);
ip->i_df.if_lastex = idx;
/* DELTA: One in-core extent grew into a hole. */
@@ -2085,7 +2085,7 @@ xfs_bmap_add_extent_hole_real(
*/
if (STATE_SET_TEST(LEFT_VALID, idx > 0)) {
xfs_bmbt_get_all(xfs_iext_get_ext(ifp, idx - 1), &left);
- STATE_SET(LEFT_DELAY, ISNULLSTARTBLOCK(left.br_startblock));
+ STATE_SET(LEFT_DELAY, isnullstartblock(left.br_startblock));
}
/*
* Check and set flags if this segment has a current value.
@@ -2095,7 +2095,7 @@ xfs_bmap_add_extent_hole_real(
idx <
ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t))) {
xfs_bmbt_get_all(ep, &right);
- STATE_SET(RIGHT_DELAY, ISNULLSTARTBLOCK(right.br_startblock));
+ STATE_SET(RIGHT_DELAY, isnullstartblock(right.br_startblock));
}
/*
* We're inserting a real allocation between "left" and "right".
@@ -2143,7 +2143,7 @@ xfs_bmap_add_extent_hole_real(
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
if (cur == NULL) {
- rval = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
+ rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
} else {
rval = XFS_ILOG_CORE;
if ((error = xfs_bmbt_lookup_eq(cur,
@@ -2185,7 +2185,7 @@ xfs_bmap_add_extent_hole_real(
XFS_BMAP_TRACE_POST_UPDATE("LC", ip, idx - 1, whichfork);
ifp->if_lastex = idx - 1;
if (cur == NULL) {
- rval = XFS_ILOG_FEXT(whichfork);
+ rval = xfs_ilog_fext(whichfork);
} else {
rval = 0;
if ((error = xfs_bmbt_lookup_eq(cur,
@@ -2220,7 +2220,7 @@ xfs_bmap_add_extent_hole_real(
XFS_BMAP_TRACE_POST_UPDATE("RC", ip, idx, whichfork);
ifp->if_lastex = idx;
if (cur == NULL) {
- rval = XFS_ILOG_FEXT(whichfork);
+ rval = xfs_ilog_fext(whichfork);
} else {
rval = 0;
if ((error = xfs_bmbt_lookup_eq(cur,
@@ -2254,7 +2254,7 @@ xfs_bmap_add_extent_hole_real(
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
if (cur == NULL) {
- rval = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
+ rval = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
} else {
rval = XFS_ILOG_CORE;
if ((error = xfs_bmbt_lookup_eq(cur,
@@ -2482,7 +2482,7 @@ xfs_bmap_adjacent(
* try to use it's last block as our starting point.
*/
if (ap->eof && ap->prevp->br_startoff != NULLFILEOFF &&
- !ISNULLSTARTBLOCK(ap->prevp->br_startblock) &&
+ !isnullstartblock(ap->prevp->br_startblock) &&
ISVALID(ap->prevp->br_startblock + ap->prevp->br_blockcount,
ap->prevp->br_startblock)) {
ap->rval = ap->prevp->br_startblock + ap->prevp->br_blockcount;
@@ -2511,7 +2511,7 @@ xfs_bmap_adjacent(
* start block based on it.
*/
if (ap->prevp->br_startoff != NULLFILEOFF &&
- !ISNULLSTARTBLOCK(ap->prevp->br_startblock) &&
+ !isnullstartblock(ap->prevp->br_startblock) &&
(prevbno = ap->prevp->br_startblock +
ap->prevp->br_blockcount) &&
ISVALID(prevbno, ap->prevp->br_startblock)) {
@@ -2552,7 +2552,7 @@ xfs_bmap_adjacent(
* If there's a following (right) block, select a requested
* start block based on it.
*/
- if (!ISNULLSTARTBLOCK(ap->gotp->br_startblock)) {
+ if (!isnullstartblock(ap->gotp->br_startblock)) {
/*
* Calculate gap to start of next block.
*/
@@ -3082,7 +3082,7 @@ xfs_bmap_btree_to_extents(
ASSERT(ifp->if_broot == NULL);
ASSERT((ifp->if_flags & XFS_IFBROOT) == 0);
XFS_IFORK_FMT_SET(ip, whichfork, XFS_DINODE_FMT_EXTENTS);
- *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FEXT(whichfork);
+ *logflagsp = XFS_ILOG_CORE | xfs_ilog_fext(whichfork);
return 0;
}
@@ -3136,8 +3136,8 @@ xfs_bmap_del_extent(
del_endoff = del->br_startoff + del->br_blockcount;
got_endoff = got.br_startoff + got.br_blockcount;
ASSERT(got_endoff >= del_endoff);
- delay = ISNULLSTARTBLOCK(got.br_startblock);
- ASSERT(ISNULLSTARTBLOCK(del->br_startblock) == delay);
+ delay = isnullstartblock(got.br_startblock);
+ ASSERT(isnullstartblock(del->br_startblock) == delay);
flags = 0;
qfield = 0;
error = 0;
@@ -3189,7 +3189,7 @@ xfs_bmap_del_extent(
}
da_old = da_new = 0;
} else {
- da_old = STARTBLOCKVAL(got.br_startblock);
+ da_old = startblockval(got.br_startblock);
da_new = 0;
nblks = 0;
do_fx = 0;
@@ -3213,7 +3213,7 @@ xfs_bmap_del_extent(
XFS_IFORK_NEXTENTS(ip, whichfork) - 1);
flags |= XFS_ILOG_CORE;
if (!cur) {
- flags |= XFS_ILOG_FEXT(whichfork);
+ flags |= xfs_ilog_fext(whichfork);
break;
}
if ((error = xfs_btree_delete(cur, &i)))
@@ -3233,7 +3233,7 @@ xfs_bmap_del_extent(
if (delay) {
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
da_old);
- xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+ xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx,
whichfork);
da_new = temp;
@@ -3242,7 +3242,7 @@ xfs_bmap_del_extent(
xfs_bmbt_set_startblock(ep, del_endblock);
XFS_BMAP_TRACE_POST_UPDATE("2", ip, idx, whichfork);
if (!cur) {
- flags |= XFS_ILOG_FEXT(whichfork);
+ flags |= xfs_ilog_fext(whichfork);
break;
}
if ((error = xfs_bmbt_update(cur, del_endoff, del_endblock,
@@ -3262,7 +3262,7 @@ xfs_bmap_del_extent(
if (delay) {
temp = XFS_FILBLKS_MIN(xfs_bmap_worst_indlen(ip, temp),
da_old);
- xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+ xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx,
whichfork);
da_new = temp;
@@ -3270,7 +3270,7 @@ xfs_bmap_del_extent(
}
XFS_BMAP_TRACE_POST_UPDATE("1", ip, idx, whichfork);
if (!cur) {
- flags |= XFS_ILOG_FEXT(whichfork);
+ flags |= xfs_ilog_fext(whichfork);
break;
}
if ((error = xfs_bmbt_update(cur, got.br_startoff,
@@ -3345,22 +3345,22 @@ xfs_bmap_del_extent(
}
XFS_WANT_CORRUPTED_GOTO(i == 1, done);
} else
- flags |= XFS_ILOG_FEXT(whichfork);
+ flags |= xfs_ilog_fext(whichfork);
XFS_IFORK_NEXT_SET(ip, whichfork,
XFS_IFORK_NEXTENTS(ip, whichfork) + 1);
} else {
ASSERT(whichfork == XFS_DATA_FORK);
temp = xfs_bmap_worst_indlen(ip, temp);
- xfs_bmbt_set_startblock(ep, NULLSTARTBLOCK((int)temp));
+ xfs_bmbt_set_startblock(ep, nullstartblock((int)temp));
temp2 = xfs_bmap_worst_indlen(ip, temp2);
- new.br_startblock = NULLSTARTBLOCK((int)temp2);
+ new.br_startblock = nullstartblock((int)temp2);
da_new = temp + temp2;
while (da_new > da_old) {
if (temp) {
temp--;
da_new--;
xfs_bmbt_set_startblock(ep,
- NULLSTARTBLOCK((int)temp));
+ nullstartblock((int)temp));
}
if (da_new == da_old)
break;
@@ -3368,7 +3368,7 @@ xfs_bmap_del_extent(
temp2--;
da_new--;
new.br_startblock =
- NULLSTARTBLOCK((int)temp2);
+ nullstartblock((int)temp2);
}
}
}
@@ -3545,7 +3545,7 @@ xfs_bmap_extents_to_btree(
nextents = ifp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t);
for (cnt = i = 0; i < nextents; i++) {
ep = xfs_iext_get_ext(ifp, i);
- if (!ISNULLSTARTBLOCK(xfs_bmbt_get_startblock(ep))) {
+ if (!isnullstartblock(xfs_bmbt_get_startblock(ep))) {
arp->l0 = cpu_to_be64(ep->l0);
arp->l1 = cpu_to_be64(ep->l1);
arp++; cnt++;
@@ -3572,7 +3572,7 @@ xfs_bmap_extents_to_btree(
xfs_btree_log_recs(cur, abp, 1, be16_to_cpu(ablock->bb_numrecs));
ASSERT(*curp == NULL);
*curp = cur;
- *logflagsp = XFS_ILOG_CORE | XFS_ILOG_FBROOT(whichfork);
+ *logflagsp = XFS_ILOG_CORE | xfs_ilog_fbroot(whichfork);
return 0;
}
@@ -3676,7 +3676,7 @@ xfs_bmap_local_to_extents(
ip->i_d.di_nblocks = 1;
XFS_TRANS_MOD_DQUOT_BYINO(args.mp, tp, ip,
XFS_TRANS_DQ_BCOUNT, 1L);
- flags |= XFS_ILOG_FEXT(whichfork);
+ flags |= xfs_ilog_fext(whichfork);
} else {
ASSERT(XFS_IFORK_NEXTENTS(ip, whichfork) == 0);
xfs_bmap_forkoff_reset(ip->i_mount, ip, whichfork);
@@ -4082,7 +4082,7 @@ xfs_bmap_add_attrfork(
XFS_IFORK_ASIZE(ip) / (uint)sizeof(xfs_bmbt_rec_t);
ip->i_afp->if_flags = XFS_IFEXTENTS;
logflags = 0;
- XFS_BMAP_INIT(&flist, &firstblock);
+ xfs_bmap_init(&flist, &firstblock);
switch (ip->i_d.di_format) {
case XFS_DINODE_FMT_LOCAL:
error = xfs_bmap_add_attrfork_local(tp, ip, &firstblock, &flist,
@@ -4162,7 +4162,7 @@ xfs_bmap_add_free(
ASSERT(bno != NULLFSBLOCK);
ASSERT(len > 0);
ASSERT(len <= MAXEXTLEN);
- ASSERT(!ISNULLSTARTBLOCK(bno));
+ ASSERT(!isnullstartblock(bno));
agno = XFS_FSB_TO_AGNO(mp, bno);
agbno = XFS_FSB_TO_AGBNO(mp, bno);
ASSERT(agno < mp->m_sb.sb_agcount);
@@ -4909,7 +4909,7 @@ xfs_bmapi(
got.br_startoff = end;
inhole = eof || got.br_startoff > bno;
wasdelay = wr && !inhole && !(flags & XFS_BMAPI_DELAY) &&
- ISNULLSTARTBLOCK(got.br_startblock);
+ isnullstartblock(got.br_startblock);
/*
* First, deal with the hole before the allocated space
* that we found, if any.
@@ -5028,7 +5028,7 @@ xfs_bmapi(
}
ip->i_delayed_blks += alen;
- abno = NULLSTARTBLOCK(indlen);
+ abno = nullstartblock(indlen);
} else {
/*
* If first time, allocate and fill in
@@ -5144,8 +5144,8 @@ xfs_bmapi(
aoff + alen);
#ifdef DEBUG
if (flags & XFS_BMAPI_DELAY) {
- ASSERT(ISNULLSTARTBLOCK(got.br_startblock));
- ASSERT(STARTBLOCKVAL(got.br_startblock) > 0);
+ ASSERT(isnullstartblock(got.br_startblock));
+ ASSERT(startblockval(got.br_startblock) > 0);
}
ASSERT(got.br_state == XFS_EXT_NORM ||
got.br_state == XFS_EXT_UNWRITTEN);
@@ -5179,7 +5179,7 @@ xfs_bmapi(
ASSERT((bno >= obno) || (n == 0));
ASSERT(bno < end);
mval->br_startoff = bno;
- if (ISNULLSTARTBLOCK(got.br_startblock)) {
+ if (isnullstartblock(got.br_startblock)) {
ASSERT(!wr || (flags & XFS_BMAPI_DELAY));
mval->br_startblock = DELAYSTARTBLOCK;
} else
@@ -5201,7 +5201,7 @@ xfs_bmapi(
ASSERT(mval->br_blockcount <= len);
} else {
*mval = got;
- if (ISNULLSTARTBLOCK(mval->br_startblock)) {
+ if (isnullstartblock(mval->br_startblock)) {
ASSERT(!wr || (flags & XFS_BMAPI_DELAY));
mval->br_startblock = DELAYSTARTBLOCK;
}
@@ -5329,12 +5329,12 @@ error0:
* Log everything. Do this after conversion, there's no point in
* logging the extent records if we've converted to btree format.
*/
- if ((logflags & XFS_ILOG_FEXT(whichfork)) &&
+ if ((logflags & xfs_ilog_fext(whichfork)) &&
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
- logflags &= ~XFS_ILOG_FEXT(whichfork);
- else if ((logflags & XFS_ILOG_FBROOT(whichfork)) &&
+ logflags &= ~xfs_ilog_fext(whichfork);
+ else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
- logflags &= ~XFS_ILOG_FBROOT(whichfork);
+ logflags &= ~xfs_ilog_fbroot(whichfork);
/*
* Log whatever the flags say, even if error. Otherwise we might miss
* detecting a case where the data is changed, there's an error,
@@ -5411,7 +5411,7 @@ xfs_bmapi_single(
*fsb = NULLFSBLOCK;
return 0;
}
- ASSERT(!ISNULLSTARTBLOCK(got.br_startblock));
+ ASSERT(!isnullstartblock(got.br_startblock));
ASSERT(bno < got.br_startoff + got.br_blockcount);
*fsb = got.br_startblock + (bno - got.br_startoff);
ifp->if_lastex = lastx;
@@ -5543,7 +5543,7 @@ xfs_bunmapi(
*/
ASSERT(ep != NULL);
del = got;
- wasdel = ISNULLSTARTBLOCK(del.br_startblock);
+ wasdel = isnullstartblock(del.br_startblock);
if (got.br_startoff < start) {
del.br_startoff = start;
del.br_blockcount -= start - got.br_startoff;
@@ -5638,7 +5638,7 @@ xfs_bunmapi(
xfs_bmbt_get_all(xfs_iext_get_ext(ifp,
lastx - 1), &prev);
ASSERT(prev.br_state == XFS_EXT_NORM);
- ASSERT(!ISNULLSTARTBLOCK(prev.br_startblock));
+ ASSERT(!isnullstartblock(prev.br_startblock));
ASSERT(del.br_startblock ==
prev.br_startblock + prev.br_blockcount);
if (prev.br_startoff < start) {
@@ -5666,7 +5666,7 @@ xfs_bunmapi(
}
}
if (wasdel) {
- ASSERT(STARTBLOCKVAL(del.br_startblock) > 0);
+ ASSERT(startblockval(del.br_startblock) > 0);
/* Update realtime/data freespace, unreserve quota */
if (isrt) {
xfs_filblks_t rtexts;
@@ -5782,12 +5782,12 @@ error0:
* Log everything. Do this after conversion, there's no point in
* logging the extent records if we've converted to btree format.
*/
- if ((logflags & XFS_ILOG_FEXT(whichfork)) &&
+ if ((logflags & xfs_ilog_fext(whichfork)) &&
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_EXTENTS)
- logflags &= ~XFS_ILOG_FEXT(whichfork);
- else if ((logflags & XFS_ILOG_FBROOT(whichfork)) &&
+ logflags &= ~xfs_ilog_fext(whichfork);
+ else if ((logflags & xfs_ilog_fbroot(whichfork)) &&
XFS_IFORK_FORMAT(ip, whichfork) != XFS_DINODE_FMT_BTREE)
- logflags &= ~XFS_ILOG_FBROOT(whichfork);
+ logflags &= ~xfs_ilog_fbroot(whichfork);
/*
* Log inode even in the error case, if the transaction
* is dirty we'll need to shut down the filesystem.
@@ -5838,7 +5838,7 @@ xfs_getbmapx_fix_eof_hole(
if (startblock == DELAYSTARTBLOCK)
out->bmv_block = -2;
else
- out->bmv_block = XFS_FSB_TO_DB(ip, startblock);
+ out->bmv_block = xfs_fsb_to_db(ip, startblock);
fileblock = XFS_BB_TO_FSB(ip->i_mount, out->bmv_offset);
ifp = XFS_IFORK_PTR(ip, XFS_DATA_FORK);
if (xfs_iext_bno_to_ext(ifp, fileblock, &lastx) &&
@@ -5979,7 +5979,7 @@ xfs_getbmap(
if (nex > XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1)
nex = XFS_IFORK_NEXTENTS(ip, whichfork) * 2 + 1;
- bmapi_flags = XFS_BMAPI_AFLAG(whichfork) |
+ bmapi_flags = xfs_bmapi_aflag(whichfork) |
((iflags & BMV_IF_PREALLOC) ? 0 : XFS_BMAPI_IGSTATE);
/*
@@ -6098,7 +6098,7 @@ xfs_bmap_isaeof(
*/
*aeof = (off >= s.br_startoff &&
off < s.br_startoff + s.br_blockcount &&
- ISNULLSTARTBLOCK(s.br_startblock)) ||
+ isnullstartblock(s.br_startblock)) ||
off >= s.br_startoff + s.br_blockcount;
return 0;
}
diff --git a/fs/xfs/xfs_bmap.h b/fs/xfs/xfs_bmap.h
index 284571c05ed0..be2979d88d32 100644
--- a/fs/xfs/xfs_bmap.h
+++ b/fs/xfs/xfs_bmap.h
@@ -95,7 +95,6 @@ typedef struct xfs_bmap_free
/* need write cache flushing and no */
/* additional allocation alignments */
-#define XFS_BMAPI_AFLAG(w) xfs_bmapi_aflag(w)
static inline int xfs_bmapi_aflag(int w)
{
return (w == XFS_ATTR_FORK ? XFS_BMAPI_ATTRFORK : 0);
@@ -107,7 +106,6 @@ static inline int xfs_bmapi_aflag(int w)
#define DELAYSTARTBLOCK ((xfs_fsblock_t)-1LL)
#define HOLESTARTBLOCK ((xfs_fsblock_t)-2LL)
-#define XFS_BMAP_INIT(flp,fbp) xfs_bmap_init(flp,fbp)
static inline void xfs_bmap_init(xfs_bmap_free_t *flp, xfs_fsblock_t *fbp)
{
((flp)->xbf_first = NULL, (flp)->xbf_count = 0, \
diff --git a/fs/xfs/xfs_bmap_btree.c b/fs/xfs/xfs_bmap_btree.c
index ba6b08c2fb02..0760d352586f 100644
--- a/fs/xfs/xfs_bmap_btree.c
+++ b/fs/xfs/xfs_bmap_btree.c
@@ -121,7 +121,7 @@ __xfs_bmbt_get_all(
b = (((xfs_dfsbno_t)l0 & xfs_mask64lo(9)) << 43) |
(((xfs_dfsbno_t)l1) >> 21);
- ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
+ ASSERT((b >> 32) == 0 || isnulldstartblock(b));
s->br_startblock = (xfs_fsblock_t)b;
}
#else /* !DEBUG */
@@ -172,7 +172,7 @@ xfs_bmbt_get_startblock(
b = (((xfs_dfsbno_t)r->l0 & xfs_mask64lo(9)) << 43) |
(((xfs_dfsbno_t)r->l1) >> 21);
- ASSERT((b >> 32) == 0 || ISNULLDSTARTBLOCK(b));
+ ASSERT((b >> 32) == 0 || isnulldstartblock(b));
return (xfs_fsblock_t)b;
#else /* !DEBUG */
return (xfs_fsblock_t)(((xfs_dfsbno_t)r->l1) >> 21);
@@ -261,7 +261,7 @@ xfs_bmbt_set_allf(
((xfs_bmbt_rec_base_t)blockcount &
(xfs_bmbt_rec_base_t)xfs_mask64lo(21));
#else /* !XFS_BIG_BLKNOS */
- if (ISNULLSTARTBLOCK(startblock)) {
+ if (isnullstartblock(startblock)) {
r->l0 = ((xfs_bmbt_rec_base_t)extent_flag << 63) |
((xfs_bmbt_rec_base_t)startoff << 9) |
(xfs_bmbt_rec_base_t)xfs_mask64lo(9);
@@ -321,7 +321,7 @@ xfs_bmbt_disk_set_allf(
((xfs_bmbt_rec_base_t)blockcount &
(xfs_bmbt_rec_base_t)xfs_mask64lo(21)));
#else /* !XFS_BIG_BLKNOS */
- if (ISNULLSTARTBLOCK(startblock)) {
+ if (isnullstartblock(startblock)) {
r->l0 = cpu_to_be64(
((xfs_bmbt_rec_base_t)extent_flag << 63) |
((xfs_bmbt_rec_base_t)startoff << 9) |
@@ -382,7 +382,7 @@ xfs_bmbt_set_startblock(
r->l1 = (r->l1 & (xfs_bmbt_rec_base_t)xfs_mask64lo(21)) |
(xfs_bmbt_rec_base_t)(v << 21);
#else /* !XFS_BIG_BLKNOS */
- if (ISNULLSTARTBLOCK(v)) {
+ if (isnullstartblock(v)) {
r->l0 |= (xfs_bmbt_rec_base_t)xfs_mask64lo(9);
r->l1 = (xfs_bmbt_rec_base_t)xfs_mask64hi(11) |
((xfs_bmbt_rec_base_t)v << 21) |
diff --git a/fs/xfs/xfs_bmap_btree.h b/fs/xfs/xfs_bmap_btree.h
index a4555abb6622..0e8df007615e 100644
--- a/fs/xfs/xfs_bmap_btree.h
+++ b/fs/xfs/xfs_bmap_btree.h
@@ -76,26 +76,22 @@ typedef struct xfs_bmbt_rec_host {
#define DSTARTBLOCKMASK \
(((((xfs_dfsbno_t)1) << DSTARTBLOCKMASKBITS) - 1) << STARTBLOCKVALBITS)
-#define ISNULLSTARTBLOCK(x) isnullstartblock(x)
static inline int isnullstartblock(xfs_fsblock_t x)
{
return ((x) & STARTBLOCKMASK) == STARTBLOCKMASK;
}
-#define ISNULLDSTARTBLOCK(x) isnulldstartblock(x)
static inline int isnulldstartblock(xfs_dfsbno_t x)
{
return ((x) & DSTARTBLOCKMASK) == DSTARTBLOCKMASK;
}
-#define NULLSTARTBLOCK(k) nullstartblock(k)
static inline xfs_fsblock_t nullstartblock(int k)
{
ASSERT(k < (1 << STARTBLOCKVALBITS));
return STARTBLOCKMASK | (k);
}
-#define STARTBLOCKVAL(x) startblockval(x)
static inline xfs_filblks_t startblockval(xfs_fsblock_t x)
{
return (xfs_filblks_t)((x) & ~STARTBLOCKMASK);
diff --git a/fs/xfs/xfs_btree.c b/fs/xfs/xfs_btree.c
index 2c3ef20f8842..e73c332eb23f 100644
--- a/fs/xfs/xfs_btree.c
+++ b/fs/xfs/xfs_btree.c
@@ -843,7 +843,7 @@ xfs_btree_ptr_is_null(
union xfs_btree_ptr *ptr)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
- return be64_to_cpu(ptr->l) == NULLFSBLOCK;
+ return be64_to_cpu(ptr->l) == NULLDFSBNO;
else
return be32_to_cpu(ptr->s) == NULLAGBLOCK;
}
@@ -854,7 +854,7 @@ xfs_btree_set_ptr_null(
union xfs_btree_ptr *ptr)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS)
- ptr->l = cpu_to_be64(NULLFSBLOCK);
+ ptr->l = cpu_to_be64(NULLDFSBNO);
else
ptr->s = cpu_to_be32(NULLAGBLOCK);
}
@@ -918,8 +918,8 @@ xfs_btree_init_block(
new->bb_numrecs = cpu_to_be16(numrecs);
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
- new->bb_u.l.bb_leftsib = cpu_to_be64(NULLFSBLOCK);
- new->bb_u.l.bb_rightsib = cpu_to_be64(NULLFSBLOCK);
+ new->bb_u.l.bb_leftsib = cpu_to_be64(NULLDFSBNO);
+ new->bb_u.l.bb_rightsib = cpu_to_be64(NULLDFSBNO);
} else {
new->bb_u.s.bb_leftsib = cpu_to_be32(NULLAGBLOCK);
new->bb_u.s.bb_rightsib = cpu_to_be32(NULLAGBLOCK);
@@ -960,7 +960,7 @@ xfs_btree_buf_to_ptr(
ptr->l = cpu_to_be64(XFS_DADDR_TO_FSB(cur->bc_mp,
XFS_BUF_ADDR(bp)));
else {
- ptr->s = cpu_to_be32(XFS_DADDR_TO_AGBNO(cur->bc_mp,
+ ptr->s = cpu_to_be32(xfs_daddr_to_agbno(cur->bc_mp,
XFS_BUF_ADDR(bp)));
}
}
@@ -971,7 +971,7 @@ xfs_btree_ptr_to_daddr(
union xfs_btree_ptr *ptr)
{
if (cur->bc_flags & XFS_BTREE_LONG_PTRS) {
- ASSERT(be64_to_cpu(ptr->l) != NULLFSBLOCK);
+ ASSERT(be64_to_cpu(ptr->l) != NULLDFSBNO);
return XFS_FSB_TO_DADDR(cur->bc_mp, be64_to_cpu(ptr->l));
} else {
@@ -2454,7 +2454,7 @@ xfs_btree_new_iroot(
xfs_btree_log_ptrs(cur, cbp, 1, be16_to_cpu(cblock->bb_numrecs));
*logflags |=
- XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork);
+ XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork);
*stat = 1;
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
return 0;
@@ -3048,7 +3048,7 @@ xfs_btree_kill_iroot(
cur->bc_bufs[level - 1] = NULL;
be16_add_cpu(&block->bb_level, -1);
xfs_trans_log_inode(cur->bc_tp, ip,
- XFS_ILOG_CORE | XFS_ILOG_FBROOT(cur->bc_private.b.whichfork));
+ XFS_ILOG_CORE | xfs_ilog_fbroot(cur->bc_private.b.whichfork));
cur->bc_nlevels--;
out0:
XFS_BTREE_TRACE_CURSOR(cur, XBT_EXIT);
diff --git a/fs/xfs/xfs_da_btree.c b/fs/xfs/xfs_da_btree.c
index a11a8390bf6c..c45f74ff1a5b 100644
--- a/fs/xfs/xfs_da_btree.c
+++ b/fs/xfs/xfs_da_btree.c
@@ -1597,7 +1597,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
nmap = 1;
ASSERT(args->firstblock != NULL);
if ((error = xfs_bmapi(tp, dp, bno, count,
- XFS_BMAPI_AFLAG(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|
+ xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|XFS_BMAPI_METADATA|
XFS_BMAPI_CONTIG,
args->firstblock, args->total, &map, &nmap,
args->flist, NULL))) {
@@ -1618,7 +1618,7 @@ xfs_da_grow_inode(xfs_da_args_t *args, xfs_dablk_t *new_blkno)
nmap = MIN(XFS_BMAP_MAX_NMAP, count);
c = (int)(bno + count - b);
if ((error = xfs_bmapi(tp, dp, b, c,
- XFS_BMAPI_AFLAG(w)|XFS_BMAPI_WRITE|
+ xfs_bmapi_aflag(w)|XFS_BMAPI_WRITE|
XFS_BMAPI_METADATA,
args->firstblock, args->total,
&mapp[mapi], &nmap, args->flist,
@@ -1882,7 +1882,7 @@ xfs_da_shrink_inode(xfs_da_args_t *args, xfs_dablk_t dead_blkno,
* the last block to the place we want to kill.
*/
if ((error = xfs_bunmapi(tp, dp, dead_blkno, count,
- XFS_BMAPI_AFLAG(w)|XFS_BMAPI_METADATA,
+ xfs_bmapi_aflag(w)|XFS_BMAPI_METADATA,
0, args->firstblock, args->flist, NULL,
&done)) == ENOSPC) {
if (w != XFS_DATA_FORK)
@@ -1987,7 +1987,7 @@ xfs_da_do_buf(
if ((error = xfs_bmapi(trans, dp, (xfs_fileoff_t)bno,
nfsb,
XFS_BMAPI_METADATA |
- XFS_BMAPI_AFLAG(whichfork),
+ xfs_bmapi_aflag(whichfork),
NULL, 0, mapp, &nmap, NULL, NULL)))
goto exit0;
}
diff --git a/fs/xfs/xfs_dfrag.c b/fs/xfs/xfs_dfrag.c
index b4c1ee713492..f8278cfcc1d3 100644
--- a/fs/xfs/xfs_dfrag.c
+++ b/fs/xfs/xfs_dfrag.c
@@ -55,17 +55,11 @@ xfs_swapext(
struct file *file, *target_file;
int error = 0;
- sxp = kmem_alloc(sizeof(xfs_swapext_t), KM_MAYFAIL);
- if (!sxp) {
- error = XFS_ERROR(ENOMEM);
- goto out;
- }
-
/* Pull information for the target fd */
file = fget((int)sxp->sx_fdtarget);
if (!file) {
error = XFS_ERROR(EINVAL);
- goto out_free_sxp;
+ goto out;
}
if (!(file->f_mode & FMODE_WRITE) || (file->f_flags & O_APPEND)) {
@@ -109,8 +103,6 @@ xfs_swapext(
fput(target_file);
out_put_file:
fput(file);
- out_free_sxp:
- kmem_free(sxp);
out:
return error;
}
diff --git a/fs/xfs/xfs_extfree_item.h b/fs/xfs/xfs_extfree_item.h
index 2f049f63e85f..0d22c56fdf64 100644
--- a/fs/xfs/xfs_extfree_item.h
+++ b/fs/xfs/xfs_extfree_item.h
@@ -33,12 +33,10 @@ typedef struct xfs_extent {
* conversion routine.
*/
-#ifndef HAVE_FORMAT32
typedef struct xfs_extent_32 {
__uint64_t ext_start;
__uint32_t ext_len;
} __attribute__((packed)) xfs_extent_32_t;
-#endif
typedef struct xfs_extent_64 {
__uint64_t ext_start;
@@ -59,7 +57,6 @@ typedef struct xfs_efi_log_format {
xfs_extent_t efi_extents[1]; /* array of extents to free */
} xfs_efi_log_format_t;
-#ifndef HAVE_FORMAT32
typedef struct xfs_efi_log_format_32 {
__uint16_t efi_type; /* efi log item type */
__uint16_t efi_size; /* size of this item */
@@ -67,7 +64,6 @@ typedef struct xfs_efi_log_format_32 {
__uint64_t efi_id; /* efi identifier */
xfs_extent_32_t efi_extents[1]; /* array of extents to free */
} __attribute__((packed)) xfs_efi_log_format_32_t;
-#endif
typedef struct xfs_efi_log_format_64 {
__uint16_t efi_type; /* efi log item type */
@@ -90,7 +86,6 @@ typedef struct xfs_efd_log_format {
xfs_extent_t efd_extents[1]; /* array of extents freed */
} xfs_efd_log_format_t;
-#ifndef HAVE_FORMAT32
typedef struct xfs_efd_log_format_32 {
__uint16_t efd_type; /* efd log item type */
__uint16_t efd_size; /* size of this item */
@@ -98,7 +93,6 @@ typedef struct xfs_efd_log_format_32 {
__uint64_t efd_efi_id; /* id of corresponding efi */
xfs_extent_32_t efd_extents[1]; /* array of extents freed */
} __attribute__((packed)) xfs_efd_log_format_32_t;
-#endif
typedef struct xfs_efd_log_format_64 {
__uint16_t efd_type; /* efd log item type */
diff --git a/fs/xfs/xfs_ialloc.c b/fs/xfs/xfs_ialloc.c
index e6ebbaeb4dc6..ab016e5ae7be 100644
--- a/fs/xfs/xfs_ialloc.c
+++ b/fs/xfs/xfs_ialloc.c
@@ -357,7 +357,7 @@ xfs_ialloc_ag_alloc(
int ioffset = i << args.mp->m_sb.sb_inodelog;
uint isize = sizeof(struct xfs_dinode);
- free = XFS_MAKE_IPTR(args.mp, fbuf, i);
+ free = xfs_make_iptr(args.mp, fbuf, i);
free->di_magic = cpu_to_be16(XFS_DINODE_MAGIC);
free->di_version = version;
free->di_gen = cpu_to_be32(gen);
@@ -937,7 +937,7 @@ nextag:
}
}
}
- offset = XFS_IALLOC_FIND_FREE(&rec.ir_free);
+ offset = xfs_ialloc_find_free(&rec.ir_free);
ASSERT(offset >= 0);
ASSERT(offset < XFS_INODES_PER_CHUNK);
ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) %
@@ -1279,7 +1279,7 @@ xfs_imap(
offset = XFS_INO_TO_OFFSET(mp, ino);
ASSERT(offset < mp->m_sb.sb_inopblock);
- cluster_agbno = XFS_DADDR_TO_AGBNO(mp, imap->im_blkno);
+ cluster_agbno = xfs_daddr_to_agbno(mp, imap->im_blkno);
offset += (agbno - cluster_agbno) * mp->m_sb.sb_inopblock;
imap->im_len = XFS_FSB_TO_BB(mp, blks_per_cluster);
diff --git a/fs/xfs/xfs_ialloc.h b/fs/xfs/xfs_ialloc.h
index 50f558a4e0a8..aeee8278f92c 100644
--- a/fs/xfs/xfs_ialloc.h
+++ b/fs/xfs/xfs_ialloc.h
@@ -39,7 +39,6 @@ struct xfs_trans;
/*
* Make an inode pointer out of the buffer/offset.
*/
-#define XFS_MAKE_IPTR(mp,b,o) xfs_make_iptr(mp,b,o)
static inline struct xfs_dinode *
xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o)
{
@@ -50,7 +49,6 @@ xfs_make_iptr(struct xfs_mount *mp, struct xfs_buf *b, int o)
/*
* Find a free (set) bit in the inode bitmask.
*/
-#define XFS_IALLOC_FIND_FREE(fp) xfs_ialloc_find_free(fp)
static inline int xfs_ialloc_find_free(xfs_inofree_t *fp)
{
return xfs_lowbit64(*fp);
diff --git a/fs/xfs/xfs_ialloc_btree.h b/fs/xfs/xfs_ialloc_btree.h
index 37e5dd01a577..5580e255ff06 100644
--- a/fs/xfs/xfs_ialloc_btree.h
+++ b/fs/xfs/xfs_ialloc_btree.h
@@ -36,7 +36,6 @@ typedef __uint64_t xfs_inofree_t;
#define XFS_INODES_PER_CHUNK_LOG (XFS_NBBYLOG + 3)
#define XFS_INOBT_ALL_FREE ((xfs_inofree_t)-1)
-#define XFS_INOBT_MASKN(i,n) xfs_inobt_maskn(i,n)
static inline xfs_inofree_t xfs_inobt_maskn(int i, int n)
{
return (((n) >= XFS_INODES_PER_CHUNK ? \
diff --git a/fs/xfs/xfs_inode.c b/fs/xfs/xfs_inode.c
index 5a5e035e5d38..e7ae08d1df48 100644
--- a/fs/xfs/xfs_inode.c
+++ b/fs/xfs/xfs_inode.c
@@ -424,6 +424,19 @@ xfs_iformat(
case XFS_DINODE_FMT_LOCAL:
atp = (xfs_attr_shortform_t *)XFS_DFORK_APTR(dip);
size = be16_to_cpu(atp->hdr.totsize);
+
+ if (unlikely(size < sizeof(struct xfs_attr_sf_hdr))) {
+ xfs_fs_repair_cmn_err(CE_WARN, ip->i_mount,
+ "corrupt inode %Lu "
+ "(bad attr fork size %Ld).",
+ (unsigned long long) ip->i_ino,
+ (long long) size);
+ XFS_CORRUPTION_ERROR("xfs_iformat(8)",
+ XFS_ERRLEVEL_LOW,
+ ip->i_mount, dip);
+ return XFS_ERROR(EFSCORRUPTED);
+ }
+
error = xfs_iformat_local(ip, dip, XFS_ATTR_FORK, size);
break;
case XFS_DINODE_FMT_EXTENTS:
@@ -1601,10 +1614,10 @@ xfs_itruncate_finish(
* in this file with garbage in them once recovery
* runs.
*/
- XFS_BMAP_INIT(&free_list, &first_block);
+ xfs_bmap_init(&free_list, &first_block);
error = xfs_bunmapi(ntp, ip,
first_unmap_block, unmap_len,
- XFS_BMAPI_AFLAG(fork) |
+ xfs_bmapi_aflag(fork) |
(sync ? 0 : XFS_BMAPI_ASYNC),
XFS_ITRUNC_MAX_EXTENTS,
&first_block, &free_list,
@@ -2557,7 +2570,7 @@ xfs_iextents_copy(
for (i = 0; i < nrecs; i++) {
xfs_bmbt_rec_host_t *ep = xfs_iext_get_ext(ifp, i);
start_block = xfs_bmbt_get_startblock(ep);
- if (ISNULLSTARTBLOCK(start_block)) {
+ if (isnullstartblock(start_block)) {
/*
* It's a delayed allocation extent, so skip it.
*/
diff --git a/fs/xfs/xfs_inode_item.h b/fs/xfs/xfs_inode_item.h
index 1ff04cc323ad..a52ac125f055 100644
--- a/fs/xfs/xfs_inode_item.h
+++ b/fs/xfs/xfs_inode_item.h
@@ -40,7 +40,6 @@ typedef struct xfs_inode_log_format {
__int32_t ilf_boffset; /* off of inode in buffer */
} xfs_inode_log_format_t;
-#ifndef HAVE_FORMAT32
typedef struct xfs_inode_log_format_32 {
__uint16_t ilf_type; /* inode log item type */
__uint16_t ilf_size; /* size of this item */
@@ -56,7 +55,6 @@ typedef struct xfs_inode_log_format_32 {
__int32_t ilf_len; /* len of inode buffer */
__int32_t ilf_boffset; /* off of inode in buffer */
} __attribute__((packed)) xfs_inode_log_format_32_t;
-#endif
typedef struct xfs_inode_log_format_64 {
__uint16_t ilf_type; /* inode log item type */
@@ -111,20 +109,16 @@ typedef struct xfs_inode_log_format_64 {
#define XFS_ILI_IOLOCKED_ANY (XFS_ILI_IOLOCKED_EXCL | XFS_ILI_IOLOCKED_SHARED)
-
-#define XFS_ILOG_FBROOT(w) xfs_ilog_fbroot(w)
static inline int xfs_ilog_fbroot(int w)
{
return (w == XFS_DATA_FORK ? XFS_ILOG_DBROOT : XFS_ILOG_ABROOT);
}
-#define XFS_ILOG_FEXT(w) xfs_ilog_fext(w)
static inline int xfs_ilog_fext(int w)
{
return (w == XFS_DATA_FORK ? XFS_ILOG_DEXT : XFS_ILOG_AEXT);
}
-#define XFS_ILOG_FDATA(w) xfs_ilog_fdata(w)
static inline int xfs_ilog_fdata(int w)
{
return (w == XFS_DATA_FORK ? XFS_ILOG_DDATA : XFS_ILOG_ADATA);
diff --git a/fs/xfs/xfs_iomap.c b/fs/xfs/xfs_iomap.c
index 911062cf73a6..08ce72316bfe 100644
--- a/fs/xfs/xfs_iomap.c
+++ b/fs/xfs/xfs_iomap.c
@@ -155,7 +155,7 @@ xfs_imap_to_bmap(
iomapp->iomap_bn = IOMAP_DADDR_NULL;
iomapp->iomap_flags |= IOMAP_DELAY;
} else {
- iomapp->iomap_bn = XFS_FSB_TO_DB(ip, start_block);
+ iomapp->iomap_bn = xfs_fsb_to_db(ip, start_block);
if (ISUNWRITTEN(imap))
iomapp->iomap_flags |= IOMAP_UNWRITTEN;
}
@@ -261,7 +261,7 @@ xfs_iomap(
xfs_iunlock(ip, lockmode);
lockmode = 0;
- if (nimaps && !ISNULLSTARTBLOCK(imap.br_startblock)) {
+ if (nimaps && !isnullstartblock(imap.br_startblock)) {
xfs_iomap_map_trace(XFS_IOMAP_WRITE_MAP, ip,
offset, count, iomapp, &imap, flags);
break;
@@ -491,7 +491,7 @@ xfs_iomap_write_direct(
/*
* Issue the xfs_bmapi() call to allocate the blocks
*/
- XFS_BMAP_INIT(&free_list, &firstfsb);
+ xfs_bmap_init(&free_list, &firstfsb);
nimaps = 1;
error = xfs_bmapi(tp, ip, offset_fsb, count_fsb, bmapi_flag,
&firstfsb, 0, &imap, &nimaps, &free_list, NULL);
@@ -751,7 +751,7 @@ xfs_iomap_write_allocate(
xfs_trans_ijoin(tp, ip, XFS_ILOCK_EXCL);
xfs_trans_ihold(tp, ip);
- XFS_BMAP_INIT(&free_list, &first_block);
+ xfs_bmap_init(&free_list, &first_block);
/*
* it is possible that the extents have changed since
@@ -911,7 +911,7 @@ xfs_iomap_write_unwritten(
/*
* Modify the unwritten extent state of the buffer.
*/
- XFS_BMAP_INIT(&free_list, &firstfsb);
+ xfs_bmap_init(&free_list, &firstfsb);
nimaps = 1;
error = xfs_bmapi(tp, ip, offset_fsb, count_fsb,
XFS_BMAPI_WRITE|XFS_BMAPI_CONVERT, &firstfsb,
diff --git a/fs/xfs/xfs_itable.c b/fs/xfs/xfs_itable.c
index e19d0a8d5618..cf98a805ec90 100644
--- a/fs/xfs/xfs_itable.c
+++ b/fs/xfs/xfs_itable.c
@@ -453,7 +453,7 @@ xfs_bulkstat(
(chunkidx = agino - gino + 1) <
XFS_INODES_PER_CHUNK &&
/* there are some left allocated */
- XFS_INOBT_MASKN(chunkidx,
+ xfs_inobt_maskn(chunkidx,
XFS_INODES_PER_CHUNK - chunkidx) & ~gfree) {
/*
* Grab the chunk record. Mark all the
@@ -464,7 +464,7 @@ xfs_bulkstat(
if (XFS_INOBT_MASK(i) & ~gfree)
gcnt++;
}
- gfree |= XFS_INOBT_MASKN(0, chunkidx);
+ gfree |= xfs_inobt_maskn(0, chunkidx);
irbp->ir_startino = gino;
irbp->ir_freecount = gcnt;
irbp->ir_free = gfree;
@@ -535,7 +535,7 @@ xfs_bulkstat(
chunkidx < XFS_INODES_PER_CHUNK;
chunkidx += nicluster,
agbno += nbcluster) {
- if (XFS_INOBT_MASKN(chunkidx,
+ if (xfs_inobt_maskn(chunkidx,
nicluster) & ~gfree)
xfs_btree_reada_bufs(mp, agno,
agbno, nbcluster);
diff --git a/fs/xfs/xfs_log_recover.c b/fs/xfs/xfs_log_recover.c
index 35cca98bd94c..b1047de2fffd 100644
--- a/fs/xfs/xfs_log_recover.c
+++ b/fs/xfs/xfs_log_recover.c
@@ -70,16 +70,21 @@ STATIC void xlog_recover_check_summary(xlog_t *);
xfs_buf_t *
xlog_get_bp(
xlog_t *log,
- int num_bblks)
+ int nbblks)
{
- ASSERT(num_bblks > 0);
+ if (nbblks <= 0 || nbblks > log->l_logBBsize) {
+ xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks);
+ XFS_ERROR_REPORT("xlog_get_bp(1)",
+ XFS_ERRLEVEL_HIGH, log->l_mp);
+ return NULL;
+ }
if (log->l_sectbb_log) {
- if (num_bblks > 1)
- num_bblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
- num_bblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, num_bblks);
+ if (nbblks > 1)
+ nbblks += XLOG_SECTOR_ROUNDUP_BBCOUNT(log, 1);
+ nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
}
- return xfs_buf_get_noaddr(BBTOB(num_bblks), log->l_mp->m_logdev_targp);
+ return xfs_buf_get_noaddr(BBTOB(nbblks), log->l_mp->m_logdev_targp);
}
void
@@ -102,6 +107,13 @@ xlog_bread(
{
int error;
+ if (nbblks <= 0 || nbblks > log->l_logBBsize) {
+ xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks);
+ XFS_ERROR_REPORT("xlog_bread(1)",
+ XFS_ERRLEVEL_HIGH, log->l_mp);
+ return EFSCORRUPTED;
+ }
+
if (log->l_sectbb_log) {
blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
@@ -139,6 +151,13 @@ xlog_bwrite(
{
int error;
+ if (nbblks <= 0 || nbblks > log->l_logBBsize) {
+ xlog_warn("XFS: Invalid block length (0x%x) given for buffer", nbblks);
+ XFS_ERROR_REPORT("xlog_bwrite(1)",
+ XFS_ERRLEVEL_HIGH, log->l_mp);
+ return EFSCORRUPTED;
+ }
+
if (log->l_sectbb_log) {
blk_no = XLOG_SECTOR_ROUNDDOWN_BLKNO(log, blk_no);
nbblks = XLOG_SECTOR_ROUNDUP_BBCOUNT(log, nbblks);
diff --git a/fs/xfs/xfs_mount.c b/fs/xfs/xfs_mount.c
index 3c97c6463a4e..35300250e86d 100644
--- a/fs/xfs/xfs_mount.c
+++ b/fs/xfs/xfs_mount.c
@@ -45,7 +45,6 @@
#include "xfs_fsops.h"
#include "xfs_utils.h"
-STATIC int xfs_mount_log_sb(xfs_mount_t *, __int64_t);
STATIC int xfs_uuid_mount(xfs_mount_t *);
STATIC void xfs_unmountfs_wait(xfs_mount_t *);
@@ -682,7 +681,7 @@ xfs_initialize_perag_data(xfs_mount_t *mp, xfs_agnumber_t agcount)
* Update alignment values based on mount options and sb values
*/
STATIC int
-xfs_update_alignment(xfs_mount_t *mp, __uint64_t *update_flags)
+xfs_update_alignment(xfs_mount_t *mp)
{
xfs_sb_t *sbp = &(mp->m_sb);
@@ -736,11 +735,11 @@ xfs_update_alignment(xfs_mount_t *mp, __uint64_t *update_flags)
if (xfs_sb_version_hasdalign(sbp)) {
if (sbp->sb_unit != mp->m_dalign) {
sbp->sb_unit = mp->m_dalign;
- *update_flags |= XFS_SB_UNIT;
+ mp->m_update_flags |= XFS_SB_UNIT;
}
if (sbp->sb_width != mp->m_swidth) {
sbp->sb_width = mp->m_swidth;
- *update_flags |= XFS_SB_WIDTH;
+ mp->m_update_flags |= XFS_SB_WIDTH;
}
}
} else if ((mp->m_flags & XFS_MOUNT_NOALIGN) != XFS_MOUNT_NOALIGN &&
@@ -905,7 +904,6 @@ xfs_mountfs(
xfs_sb_t *sbp = &(mp->m_sb);
xfs_inode_t *rip;
__uint64_t resblks;
- __int64_t update_flags = 0LL;
uint quotamount, quotaflags;
int uuid_mounted = 0;
int error = 0;
@@ -933,7 +931,7 @@ xfs_mountfs(
"XFS: correcting sb_features alignment problem");
sbp->sb_features2 |= sbp->sb_bad_features2;
sbp->sb_bad_features2 = sbp->sb_features2;
- update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2;
+ mp->m_update_flags |= XFS_SB_FEATURES2 | XFS_SB_BAD_FEATURES2;
/*
* Re-check for ATTR2 in case it was found in bad_features2
@@ -947,11 +945,11 @@ xfs_mountfs(
if (xfs_sb_version_hasattr2(&mp->m_sb) &&
(mp->m_flags & XFS_MOUNT_NOATTR2)) {
xfs_sb_version_removeattr2(&mp->m_sb);
- update_flags |= XFS_SB_FEATURES2;
+ mp->m_update_flags |= XFS_SB_FEATURES2;
/* update sb_versionnum for the clearing of the morebits */
if (!sbp->sb_features2)
- update_flags |= XFS_SB_VERSIONNUM;
+ mp->m_update_flags |= XFS_SB_VERSIONNUM;
}
/*
@@ -960,7 +958,7 @@ xfs_mountfs(
* allocator alignment is within an ag, therefore ag has
* to be aligned at stripe boundary.
*/
- error = xfs_update_alignment(mp, &update_flags);
+ error = xfs_update_alignment(mp);
if (error)
goto error1;
@@ -1137,10 +1135,12 @@ xfs_mountfs(
}
/*
- * If fs is not mounted readonly, then update the superblock changes.
+ * If this is a read-only mount defer the superblock updates until
+ * the next remount into writeable mode. Otherwise we would never
+ * perform the update e.g. for the root filesystem.
*/
- if (update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
- error = xfs_mount_log_sb(mp, update_flags);
+ if (mp->m_update_flags && !(mp->m_flags & XFS_MOUNT_RDONLY)) {
+ error = xfs_mount_log_sb(mp, mp->m_update_flags);
if (error) {
cmn_err(CE_WARN, "XFS: failed to write sb changes");
goto error4;
@@ -1820,7 +1820,7 @@ xfs_uuid_mount(
* be altered by the mount options, as well as any potential sb_features2
* fixup. Only the first superblock is updated.
*/
-STATIC int
+int
xfs_mount_log_sb(
xfs_mount_t *mp,
__int64_t fields)
diff --git a/fs/xfs/xfs_mount.h b/fs/xfs/xfs_mount.h
index c1e028467327..f5e9937f9bdb 100644
--- a/fs/xfs/xfs_mount.h
+++ b/fs/xfs/xfs_mount.h
@@ -44,9 +44,9 @@ typedef struct xfs_trans_reservations {
#ifndef __KERNEL__
-#define XFS_DADDR_TO_AGNO(mp,d) \
+#define xfs_daddr_to_agno(mp,d) \
((xfs_agnumber_t)(XFS_BB_TO_FSBT(mp, d) / (mp)->m_sb.sb_agblocks))
-#define XFS_DADDR_TO_AGBNO(mp,d) \
+#define xfs_daddr_to_agbno(mp,d) \
((xfs_agblock_t)(XFS_BB_TO_FSBT(mp, d) % (mp)->m_sb.sb_agblocks))
#else /* __KERNEL__ */
@@ -327,6 +327,8 @@ typedef struct xfs_mount {
spinlock_t m_sync_lock; /* work item list lock */
int m_sync_seq; /* sync thread generation no. */
wait_queue_head_t m_wait_single_sync_task;
+ __int64_t m_update_flags; /* sb flags we need to update
+ on the next remount,rw */
} xfs_mount_t;
/*
@@ -439,7 +441,6 @@ void xfs_do_force_shutdown(struct xfs_mount *mp, int flags, char *fname,
*/
#define XFS_MFSI_QUIET 0x40 /* Be silent if mount errors found */
-#define XFS_DADDR_TO_AGNO(mp,d) xfs_daddr_to_agno(mp,d)
static inline xfs_agnumber_t
xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d)
{
@@ -448,7 +449,6 @@ xfs_daddr_to_agno(struct xfs_mount *mp, xfs_daddr_t d)
return (xfs_agnumber_t) ld;
}
-#define XFS_DADDR_TO_AGBNO(mp,d) xfs_daddr_to_agbno(mp,d)
static inline xfs_agblock_t
xfs_daddr_to_agbno(struct xfs_mount *mp, xfs_daddr_t d)
{
@@ -514,6 +514,7 @@ extern int xfs_mod_incore_sb_unlocked(xfs_mount_t *, xfs_sb_field_t,
int64_t, int);
extern int xfs_mod_incore_sb_batch(xfs_mount_t *, xfs_mod_sb_t *,
uint, int);
+extern int xfs_mount_log_sb(xfs_mount_t *, __int64_t);
extern struct xfs_buf *xfs_getsb(xfs_mount_t *, int);
extern int xfs_readsb(xfs_mount_t *, int);
extern void xfs_freesb(xfs_mount_t *);
diff --git a/fs/xfs/xfs_rename.c b/fs/xfs/xfs_rename.c
index 86471bb40fd4..58f85e9cd11d 100644
--- a/fs/xfs/xfs_rename.c
+++ b/fs/xfs/xfs_rename.c
@@ -147,7 +147,7 @@ xfs_rename(
xfs_sort_for_rename(src_dp, target_dp, src_ip, target_ip,
inodes, &num_inodes);
- XFS_BMAP_INIT(&free_list, &first_block);
+ xfs_bmap_init(&free_list, &first_block);
tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
spaceres = XFS_RENAME_SPACE_RES(mp, target_name->len);
diff --git a/fs/xfs/xfs_rtalloc.c b/fs/xfs/xfs_rtalloc.c
index edf12c7b834c..c5bb86f3ec05 100644
--- a/fs/xfs/xfs_rtalloc.c
+++ b/fs/xfs/xfs_rtalloc.c
@@ -120,7 +120,7 @@ xfs_growfs_rt_alloc(
if ((error = xfs_trans_iget(mp, tp, ino, 0,
XFS_ILOCK_EXCL, &ip)))
goto error_cancel;
- XFS_BMAP_INIT(&flist, &firstblock);
+ xfs_bmap_init(&flist, &firstblock);
/*
* Allocate blocks to the bitmap file.
*/
diff --git a/fs/xfs/xfs_rw.h b/fs/xfs/xfs_rw.h
index f87db5344ce6..f76c003ec55d 100644
--- a/fs/xfs/xfs_rw.h
+++ b/fs/xfs/xfs_rw.h
@@ -28,7 +28,6 @@ struct xfs_mount;
* file is a real time file or not, because the bmap code
* does.
*/
-#define XFS_FSB_TO_DB(ip,fsb) xfs_fsb_to_db(ip,fsb)
static inline xfs_daddr_t
xfs_fsb_to_db(struct xfs_inode *ip, xfs_fsblock_t fsb)
{
diff --git a/fs/xfs/xfs_sb.h b/fs/xfs/xfs_sb.h
index 1ed71916e4c9..1b017c657494 100644
--- a/fs/xfs/xfs_sb.h
+++ b/fs/xfs/xfs_sb.h
@@ -505,7 +505,7 @@ static inline void xfs_sb_version_removeattr2(xfs_sb_t *sbp)
#define XFS_HDR_BLOCK(mp,d) ((xfs_agblock_t)XFS_BB_TO_FSBT(mp,d))
#define XFS_DADDR_TO_FSB(mp,d) XFS_AGB_TO_FSB(mp, \
- XFS_DADDR_TO_AGNO(mp,d), XFS_DADDR_TO_AGBNO(mp,d))
+ xfs_daddr_to_agno(mp,d), xfs_daddr_to_agbno(mp,d))
#define XFS_FSB_TO_DADDR(mp,fsbno) XFS_AGB_TO_DADDR(mp, \
XFS_FSB_TO_AGNO(mp,fsbno), XFS_FSB_TO_AGBNO(mp,fsbno))
diff --git a/fs/xfs/xfs_vnodeops.c b/fs/xfs/xfs_vnodeops.c
index f07bf8768c3a..0e55c5d7db5f 100644
--- a/fs/xfs/xfs_vnodeops.c
+++ b/fs/xfs/xfs_vnodeops.c
@@ -862,7 +862,7 @@ xfs_inactive_symlink_rmt(
* Find the block(s) so we can inval and unmap them.
*/
done = 0;
- XFS_BMAP_INIT(&free_list, &first_block);
+ xfs_bmap_init(&free_list, &first_block);
nmaps = ARRAY_SIZE(mval);
if ((error = xfs_bmapi(tp, ip, 0, XFS_B_TO_FSB(mp, size),
XFS_BMAPI_METADATA, &first_block, 0, mval, &nmaps,
@@ -1288,7 +1288,7 @@ xfs_inactive(
/*
* Free the inode.
*/
- XFS_BMAP_INIT(&free_list, &first_block);
+ xfs_bmap_init(&free_list, &first_block);
error = xfs_ifree(tp, ip, &free_list);
if (error) {
/*
@@ -1461,7 +1461,7 @@ xfs_create(
xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
unlock_dp_on_error = B_TRUE;
- XFS_BMAP_INIT(&free_list, &first_block);
+ xfs_bmap_init(&free_list, &first_block);
ASSERT(ip == NULL);
@@ -1879,7 +1879,7 @@ xfs_remove(
}
}
- XFS_BMAP_INIT(&free_list, &first_block);
+ xfs_bmap_init(&free_list, &first_block);
error = xfs_dir_removename(tp, dp, name, ip->i_ino,
&first_block, &free_list, resblks);
if (error) {
@@ -2059,7 +2059,7 @@ xfs_link(
if (error)
goto error_return;
- XFS_BMAP_INIT(&free_list, &first_block);
+ xfs_bmap_init(&free_list, &first_block);
error = xfs_dir_createname(tp, tdp, target_name, sip->i_ino,
&first_block, &free_list, resblks);
@@ -2231,7 +2231,7 @@ xfs_mkdir(
xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
unlock_dp_on_error = B_FALSE;
- XFS_BMAP_INIT(&free_list, &first_block);
+ xfs_bmap_init(&free_list, &first_block);
error = xfs_dir_createname(tp, dp, dir_name, cdp->i_ino,
&first_block, &free_list, resblks ?
@@ -2438,7 +2438,7 @@ xfs_symlink(
* Initialize the bmap freelist prior to calling either
* bmapi or the directory create code.
*/
- XFS_BMAP_INIT(&free_list, &first_block);
+ xfs_bmap_init(&free_list, &first_block);
/*
* Allocate an inode for the symlink.
@@ -2860,7 +2860,7 @@ retry:
/*
* Issue the xfs_bmapi() call to allocate the blocks
*/
- XFS_BMAP_INIT(&free_list, &firstfsb);
+ xfs_bmap_init(&free_list, &firstfsb);
error = xfs_bmapi(tp, ip, startoffset_fsb,
allocatesize_fsb, bmapi_flag,
&firstfsb, 0, imapp, &nimaps,
@@ -2980,7 +2980,7 @@ xfs_zero_remaining_bytes(
XFS_BUF_UNDONE(bp);
XFS_BUF_UNWRITE(bp);
XFS_BUF_READ(bp);
- XFS_BUF_SET_ADDR(bp, XFS_FSB_TO_DB(ip, imap.br_startblock));
+ XFS_BUF_SET_ADDR(bp, xfs_fsb_to_db(ip, imap.br_startblock));
xfsbdstrat(mp, bp);
error = xfs_iowait(bp);
if (error) {
@@ -3186,7 +3186,7 @@ xfs_free_file_space(
/*
* issue the bunmapi() call to free the blocks
*/
- XFS_BMAP_INIT(&free_list, &firstfsb);
+ xfs_bmap_init(&free_list, &firstfsb);
error = xfs_bunmapi(tp, ip, startoffset_fsb,
endoffset_fsb - startoffset_fsb,
0, 2, &firstfsb, &free_list, NULL, &done);