// SPDX-License-Identifier: GPL-2.0-only #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include "internal.h" #include #include /* * Performs necessary checks before doing a clone. * * Can adjust amount of bytes to clone via @req_count argument. * Returns appropriate error code that caller should return or * zero in case the clone should be allowed. */ static int generic_remap_checks(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *req_count, unsigned int remap_flags) { struct inode *inode_in = file_in->f_mapping->host; struct inode *inode_out = file_out->f_mapping->host; uint64_t count = *req_count; uint64_t bcount; loff_t size_in, size_out; loff_t bs = inode_out->i_sb->s_blocksize; int ret; /* The start of both ranges must be aligned to an fs block. */ if (!IS_ALIGNED(pos_in, bs) || !IS_ALIGNED(pos_out, bs)) return -EINVAL; /* Ensure offsets don't wrap. */ if (pos_in + count < pos_in || pos_out + count < pos_out) return -EINVAL; size_in = i_size_read(inode_in); size_out = i_size_read(inode_out); /* Dedupe requires both ranges to be within EOF. */ if ((remap_flags & REMAP_FILE_DEDUP) && (pos_in >= size_in || pos_in + count > size_in || pos_out >= size_out || pos_out + count > size_out)) return -EINVAL; /* Ensure the infile range is within the infile. */ if (pos_in >= size_in) return -EINVAL; count = min(count, size_in - (uint64_t)pos_in); ret = generic_write_check_limits(file_out, pos_out, &count); if (ret) return ret; /* * If the user wanted us to link to the infile's EOF, round up to the * next block boundary for this check. * * Otherwise, make sure the count is also block-aligned, having * already confirmed the starting offsets' block alignment. */ if (pos_in + count == size_in) { bcount = ALIGN(size_in, bs) - pos_in; } else { if (!IS_ALIGNED(count, bs)) count = ALIGN_DOWN(count, bs); bcount = count; } /* Don't allow overlapped cloning within the same file. */ if (inode_in == inode_out && pos_out + bcount > pos_in && pos_out < pos_in + bcount) return -EINVAL; /* * We shortened the request but the caller can't deal with that, so * bounce the request back to userspace. */ if (*req_count != count && !(remap_flags & REMAP_FILE_CAN_SHORTEN)) return -EINVAL; *req_count = count; return 0; } static int remap_verify_area(struct file *file, loff_t pos, loff_t len, bool write) { struct inode *inode = file_inode(file); if (unlikely(pos < 0 || len < 0)) return -EINVAL; if (unlikely((loff_t) (pos + len) < 0)) return -EINVAL; if (unlikely(inode->i_flctx && mandatory_lock(inode))) { loff_t end = len ? pos + len - 1 : OFFSET_MAX; int retval; retval = locks_mandatory_area(inode, file, pos, end, write ? F_WRLCK : F_RDLCK); if (retval < 0) return retval; } return security_file_permission(file, write ? MAY_WRITE : MAY_READ); } /* * Ensure that we don't remap a partial EOF block in the middle of something * else. Assume that the offsets have already been checked for block * alignment. * * For clone we only link a partial EOF block above or at the destination file's * EOF. For deduplication we accept a partial EOF block only if it ends at the * destination file's EOF (can not link it into the middle of a file). * * Shorten the request if possible. */ static int generic_remap_check_len(struct inode *inode_in, struct inode *inode_out, loff_t pos_out, loff_t *len, unsigned int remap_flags) { u64 blkmask = i_blocksize(inode_in) - 1; loff_t new_len = *len; if ((*len & blkmask) == 0) return 0; if (pos_out + *len < i_size_read(inode_out)) new_len &= ~blkmask; if (new_len == *len) return 0; if (remap_flags & REMAP_FILE_CAN_SHORTEN) { *len = new_len; return 0; } return (remap_flags & REMAP_FILE_DEDUP) ? -EBADE : -EINVAL; } /* Read a page's worth of file data into the page cache. */ static struct page *vfs_dedupe_get_page(struct inode *inode, loff_t offset) { struct page *page; page = read_mapping_page(inode->i_mapping, offset >> PAGE_SHIFT, NULL); if (IS_ERR(page)) return page; if (!PageUptodate(page)) { put_page(page); return ERR_PTR(-EIO); } return page; } /* * Lock two pages, ensuring that we lock in offset order if the pages are from * the same file. */ static void vfs_lock_two_pages(struct page *page1, struct page *page2) { /* Always lock in order of increasing index. */ if (page1->index > page2->index) swap(page1, page2); lock_page(page1); if (page1 != page2) lock_page(page2); } /* Unlock two pages, being careful not to unlock the same page twice. */ static void vfs_unlock_two_pages(struct page *page1, struct page *page2) { unlock_page(page1); if (page1 != page2) unlock_page(page2); } /* * Compare extents of two files to see if they are the same. * Caller must have locked both inodes to prevent write races. */ static int vfs_dedupe_file_range_compare(struct inode *src, loff_t srcoff, struct inode *dest, loff_t destoff, loff_t len, bool *is_same) { loff_t src_poff; loff_t dest_poff; void *src_addr; void *dest_addr; struct page *src_page; struct page *dest_page; loff_t cmp_len; bool same; int error; error = -EINVAL; same = true; while (len) { src_poff = srcoff & (PAGE_SIZE - 1); dest_poff = destoff & (PAGE_SIZE - 1); cmp_len = min(PAGE_SIZE - src_poff, PAGE_SIZE - dest_poff); cmp_len = min(cmp_len, len); if (cmp_len <= 0) goto out_error; src_page = vfs_dedupe_get_page(src, srcoff); if (IS_ERR(src_page)) { error = PTR_ERR(src_page); goto out_error; } dest_page = vfs_dedupe_get_page(dest, destoff); if (IS_ERR(dest_page)) { error = PTR_ERR(dest_page); put_page(src_page); goto out_error; } vfs_lock_two_pages(src_page, dest_page); /* * Now that we've locked both pages, make sure they're still * mapped to the file data we're interested in. If not, * someone is invalidating pages on us and we lose. */ if (!PageUptodate(src_page) || !PageUptodate(dest_page) || src_page->mapping != src->i_mapping || dest_page->mapping != dest->i_mapping) { same = false; goto unlock; } src_addr = kmap_atomic(src_page); dest_addr = kmap_atomic(dest_page); flush_dcache_page(src_page); flush_dcache_page(dest_page); if (memcmp(src_addr + src_poff, dest_addr + dest_poff, cmp_len)) same = false; kunmap_atomic(dest_addr); kunmap_atomic(src_addr); unlock: vfs_unlock_two_pages(src_page, dest_page); put_page(dest_page); put_page(src_page); if (!same) break; srcoff += cmp_len; destoff += cmp_len; len -= cmp_len; } *is_same = same; return 0; out_error: return error; } /* * Check that the two inodes are eligible for cloning, the ranges make * sense, and then flush all dirty data. Caller must ensure that the * inodes have been locked against any other modifications. * * If there's an error, then the usual negative error code is returned. * Otherwise returns 0 with *len set to the request length. */ int generic_remap_file_range_prep(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t *len, unsigned int remap_flags) { struct inode *inode_in = file_inode(file_in); struct inode *inode_out = file_inode(file_out); bool same_inode = (inode_in == inode_out); int ret; /* Don't touch certain kinds of inodes */ if (IS_IMMUTABLE(inode_out)) return -EPERM; if (IS_SWAPFILE(inode_in) || IS_SWAPFILE(inode_out)) return -ETXTBSY; /* Don't reflink dirs, pipes, sockets... */ if (S_ISDIR(inode_in->i_mode) || S_ISDIR(inode_out->i_mode)) return -EISDIR; if (!S_ISREG(inode_in->i_mode) || !S_ISREG(inode_out->i_mode)) return -EINVAL; /* Zero length dedupe exits immediately; reflink goes to EOF. */ if (*len == 0) { loff_t isize = i_size_read(inode_in); if ((remap_flags & REMAP_FILE_DEDUP) || pos_in == isize) return 0; if (pos_in > isize) return -EINVAL; *len = isize - pos_in; if (*len == 0) return 0; } /* Check that we don't violate system file offset limits. */ ret = generic_remap_checks(file_in, pos_in, file_out, pos_out, len, remap_flags); if (ret) return ret; /* Wait for the completion of any pending IOs on both files */ inode_dio_wait(inode_in); if (!same_inode) inode_dio_wait(inode_out); ret = filemap_write_and_wait_range(inode_in->i_mapping, pos_in, pos_in + *len - 1); if (ret) return ret; ret = filemap_write_and_wait_range(inode_out->i_mapping, pos_out, pos_out + *len - 1); if (ret) return ret; /* * Check that the extents are the same. */ if (remap_flags & REMAP_FILE_DEDUP) { bool is_same = false; ret = vfs_dedupe_file_range_compare(inode_in, pos_in, inode_out, pos_out, *len, &is_same); if (ret) return ret; if (!is_same) return -EBADE; } ret = generic_remap_check_len(inode_in, inode_out, pos_out, len, remap_flags); if (ret) return ret; /* If can't alter the file contents, we're done. */ if (!(remap_flags & REMAP_FILE_DEDUP)) ret = file_modified(file_out); return ret; } EXPORT_SYMBOL(generic_remap_file_range_prep); loff_t do_clone_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t len, unsigned int remap_flags) { loff_t ret; WARN_ON_ONCE(remap_flags & REMAP_FILE_DEDUP); /* * FICLONE/FICLONERANGE ioctls enforce that src and dest files are on * the same mount. Practically, they only need to be on the same file * system. */ if (file_inode(file_in)->i_sb != file_inode(file_out)->i_sb) return -EXDEV; ret = generic_file_rw_checks(file_in, file_out); if (ret < 0) return ret; if (!file_in->f_op->remap_file_range) return -EOPNOTSUPP; ret = remap_verify_area(file_in, pos_in, len, false); if (ret) return ret; ret = remap_verify_area(file_out, pos_out, len, true); if (ret) return ret; ret = file_in->f_op->remap_file_range(file_in, pos_in, file_out, pos_out, len, remap_flags); if (ret < 0) return ret; fsnotify_access(file_in); fsnotify_modify(file_out); return ret; } EXPORT_SYMBOL(do_clone_file_range); loff_t vfs_clone_file_range(struct file *file_in, loff_t pos_in, struct file *file_out, loff_t pos_out, loff_t len, unsigned int remap_flags) { loff_t ret; file_start_write(file_out); ret = do_clone_file_range(file_in, pos_in, file_out, pos_out, len, remap_flags); file_end_write(file_out); return ret; } EXPORT_SYMBOL(vfs_clone_file_range); /* Check whether we are allowed to dedupe the destination file */ static bool allow_file_dedupe(struct file *file) { struct user_namespace *mnt_userns = file_mnt_user_ns(file); struct inode *inode = file_inode(file); if (capable(CAP_SYS_ADMIN)) return true; if (file->f_mode & FMODE_WRITE) return true; if (uid_eq(current_fsuid(), i_uid_into_mnt(mnt_userns, inode))) return true; if (!inode_permission(mnt_userns, inode, MAY_WRITE)) return true; return false; } loff_t vfs_dedupe_file_range_one(struct file *src_file, loff_t src_pos, struct file *dst_file, loff_t dst_pos, loff_t len, unsigned int remap_flags) { loff_t ret; WARN_ON_ONCE(remap_flags & ~(REMAP_FILE_DEDUP | REMAP_FILE_CAN_SHORTEN)); ret = mnt_want_write_file(dst_file); if (ret) return ret; /* * This is redundant if called from vfs_dedupe_file_range(), but other * callers need it and it's not performance sesitive... */ ret = remap_verify_area(src_file, src_pos, len, false); if (ret) goto out_drop_write; ret = remap_verify_area(dst_file, dst_pos, len, true); if (ret) goto out_drop_write; ret = -EPERM; if (!allow_file_dedupe(dst_file)) goto out_drop_write; ret = -EXDEV; if (src_file->f_path.mnt != dst_file->f_path.mnt) goto out_drop_write; ret = -EISDIR; if (S_ISDIR(file_inode(dst_file)->i_mode)) goto out_drop_write; ret = -EINVAL; if (!dst_file->f_op->remap_file_range) goto out_drop_write; if (len == 0) { ret = 0; goto out_drop_write; } ret = dst_file->f_op->remap_file_range(src_file, src_pos, dst_file, dst_pos, len, remap_flags | REMAP_FILE_DEDUP); out_drop_write: mnt_drop_write_file(dst_file); return ret; } EXPORT_SYMBOL(vfs_dedupe_file_range_one); int vfs_dedupe_file_range(struct file *file, struct file_dedupe_range *same) { struct file_dedupe_range_info *info; struct inode *src = file_inode(file); u64 off; u64 len; int i; int ret; u16 count = same->dest_count; loff_t deduped; if (!(file->f_mode & FMODE_READ)) return -EINVAL; if (same->reserved1 || same->reserved2) return -EINVAL; off = same->src_offset; len = same->src_length; if (S_ISDIR(src->i_mode)) return -EISDIR; if (!S_ISREG(src->i_mode)) return -EINVAL; if (!file->f_op->remap_file_range) return -EOPNOTSUPP; ret = remap_verify_area(file, off, len, false); if (ret < 0) return ret; ret = 0; if (off + len > i_size_read(src)) return -EINVAL; /* Arbitrary 1G limit on a single dedupe request, can be raised. */ len = min_t(u64, len, 1 << 30); /* pre-format output fields to sane values */ for (i = 0; i < count; i++) { same->info[i].bytes_deduped = 0ULL; same->info[i].status = FILE_DEDUPE_RANGE_SAME; } for (i = 0, info = same->info; i < count; i++, info++) { struct fd dst_fd = fdget(info->dest_fd); struct file *dst_file = dst_fd.file; if (!dst_file) { info->status = -EBADF; goto next_loop; } if (info->reserved) { info->status = -EINVAL; goto next_fdput; } deduped = vfs_dedupe_file_range_one(file, off, dst_file, info->dest_offset, len, REMAP_FILE_CAN_SHORTEN); if (deduped == -EBADE) info->status = FILE_DEDUPE_RANGE_DIFFERS; else if (deduped < 0) info->status = deduped; else info->bytes_deduped = len; next_fdput: fdput(dst_fd); next_loop: if (fatal_signal_pending(current)) break; } return ret; } EXPORT_SYMBOL(vfs_dedupe_file_range); /* Performs necessary checks before doing a range exchange. */ static int generic_xchg_file_range_checks(struct file *file1, struct file *file2, const struct file_xchg_range *fxr, unsigned int blocksize) { struct inode *inode1 = file1->f_mapping->host; struct inode *inode2 = file2->f_mapping->host; int64_t test_len; uint64_t blen; loff_t size1, size2; int ret; if (fxr->length < 0) return -EINVAL; /* The start of both ranges must be aligned to an fs block. */ if (!IS_ALIGNED(fxr->file1_offset, blocksize) || !IS_ALIGNED(fxr->file2_offset, blocksize)) return -EINVAL; /* Ensure offsets don't wrap. */ if (fxr->file1_offset + fxr->length < fxr->file1_offset || fxr->file2_offset + fxr->length < fxr->file2_offset) return -EINVAL; size1 = i_size_read(inode1); size2 = i_size_read(inode2); /* * We require both ranges to be within EOF, unless we're exchanging * to EOF. generic_xchg_range_prep already checked that both * fxr->file1_offset and fxr->file2_offset are within EOF. */ if (!(fxr->flags & FILE_XCHG_RANGE_TO_EOF) && (fxr->file1_offset + fxr->length > size1 || fxr->file2_offset + fxr->length > size2)) return -EINVAL; /* * Make sure we don't hit any file size limits. If we hit any size * limits such that test_length was adjusted, we abort the whole * operation. */ test_len = fxr->length; ret = generic_write_check_limits(file2, fxr->file2_offset, &test_len); if (ret) return ret; ret = generic_write_check_limits(file1, fxr->file1_offset, &test_len); if (ret) return ret; if (test_len != fxr->length) return -EINVAL; /* * If the user wanted us to exchange up to the infile's EOF, round up * to the next block boundary for this check. Do the same for the * outfile. * * Otherwise, reject the range length if it's not block aligned. We * already confirmed the starting offsets' block alignment. */ if (fxr->file1_offset + fxr->length == size1) blen = ALIGN(size1, blocksize) - fxr->file1_offset; else if (fxr->file2_offset + fxr->length == size2) blen = ALIGN(size2, blocksize) - fxr->file2_offset; else if (!IS_ALIGNED(fxr->length, blocksize)) return -EINVAL; else blen = fxr->length; /* Don't allow overlapped exchanges within the same file. */ if (inode1 == inode2 && fxr->file2_offset + blen > fxr->file1_offset && fxr->file1_offset + blen > fxr->file2_offset) return -EINVAL; return 0; } /* * Check that the two inodes are eligible for range exchanges, the ranges make * sense, and then flush all dirty data. Caller must ensure that the inodes * have been locked against any other modifications. */ int generic_xchg_file_range_prep(struct file *file1, struct file *file2, struct file_xchg_range *fxr, unsigned int blocksize) { struct inode *inode1 = file_inode(file1); struct inode *inode2 = file_inode(file2); u64 blkmask = blocksize - 1; bool same_inode = (inode1 == inode2); int ret; /* Don't touch certain kinds of inodes */ if (IS_IMMUTABLE(inode1) || IS_IMMUTABLE(inode2)) return -EPERM; if (IS_SWAPFILE(inode1) || IS_SWAPFILE(inode2)) return -ETXTBSY; /* Don't reflink dirs, pipes, sockets... */ if (S_ISDIR(inode1->i_mode) || S_ISDIR(inode2->i_mode)) return -EISDIR; if (!S_ISREG(inode1->i_mode) || !S_ISREG(inode2->i_mode)) return -EINVAL; /* Ranges cannot start after EOF. */ if (fxr->file1_offset > i_size_read(inode1) || fxr->file2_offset > i_size_read(inode2)) return -EINVAL; /* * If the caller said to exchange to EOF, we set the length of the * request large enough to cover everything to the end of both files. */ if (fxr->flags & FILE_XCHG_RANGE_TO_EOF) fxr->length = max_t(int64_t, i_size_read(inode1) - fxr->file1_offset, i_size_read(inode2) - fxr->file2_offset); /* Zero length exchange exits immediately. */ if (fxr->length == 0) return 0; /* Check that we don't violate system file offset limits. */ ret = generic_xchg_file_range_checks(file1, file2, fxr, blocksize); if (ret) return ret; /* * Ensure that we don't exchange a partial EOF block into the middle of * another file. */ if (fxr->length & blkmask) { loff_t new_length = fxr->length; if (fxr->file2_offset + new_length < i_size_read(inode2)) new_length &= ~blkmask; if (fxr->file1_offset + new_length < i_size_read(inode1)) new_length &= ~blkmask; if (new_length != fxr->length) return -EINVAL; } /* Wait for the completion of any pending IOs on both files */ inode_dio_wait(inode1); if (!same_inode) inode_dio_wait(inode2); ret = filemap_write_and_wait_range(inode1->i_mapping, fxr->file1_offset, fxr->file1_offset + fxr->length - 1); if (ret) return ret; ret = filemap_write_and_wait_range(inode2->i_mapping, fxr->file2_offset, fxr->file2_offset + fxr->length - 1); if (ret) return ret; /* * If the files or inodes involved require synchronous writes, amend * the request to force the filesystem to flush all data and metadata * to disk after the operation completes. */ if (((file1->f_flags | file2->f_flags) & (__O_SYNC | O_DSYNC)) || IS_SYNC(file_inode(file1)) || IS_SYNC(file_inode(file2))) fxr->flags |= FILE_XCHG_RANGE_FSYNC; /* Remove privilege bits from both files. */ ret = file_remove_privs(file1); if (ret) return ret; return file_remove_privs(file2); } EXPORT_SYMBOL(generic_xchg_file_range_prep); /* * Check that both files' metadata agree with the snapshot that we took for * the range exchange request. * This should be called after the filesystem has locked /all/ inode metadata * against modification. */ int generic_xchg_file_range_check_fresh(struct inode *inode1, struct inode *inode2, const struct file_xchg_range *fxr) { /* Check that the offset/length values cover all of both files */ if ((fxr->flags & FILE_XCHG_RANGE_FULL_FILES) && (fxr->file1_offset != 0 || fxr->file2_offset != 0 || fxr->length != i_size_read(inode1) || fxr->length != i_size_read(inode2))) return -EDOM; /* Check that file2 hasn't otherwise been modified. */ if ((fxr->flags & FILE_XCHG_RANGE_FILE2_FRESH) && (fxr->file2_ino != inode2->i_ino || fxr->file2_ctime != inode2->i_ctime.tv_sec || fxr->file2_ctime_nsec != inode2->i_ctime.tv_nsec || fxr->file2_mtime != inode2->i_mtime.tv_sec || fxr->file2_mtime_nsec != inode2->i_mtime.tv_nsec)) return -EBUSY; return 0; } EXPORT_SYMBOL(generic_xchg_file_range_check_fresh); static inline int xchg_range_verify_area(struct file *file, loff_t pos, struct file_xchg_range *fxr) { int64_t len = fxr->length; if (fxr->flags & FILE_XCHG_RANGE_TO_EOF) len = min_t(int64_t, len, i_size_read(file_inode(file)) - pos); return remap_verify_area(file, pos, len, true); } int do_xchg_file_range(struct file *file1, struct file *file2, struct file_xchg_range *fxr) { int ret; if ((fxr->flags & ~FILE_XCHG_RANGE_ALL_FLAGS) || memchr_inv(&fxr->pad, 0, sizeof(fxr->pad))) return -EINVAL; if ((fxr->flags & FILE_XCHG_RANGE_FULL_FILES) && (fxr->flags & FILE_XCHG_RANGE_TO_EOF)) return -EINVAL; /* * The ioctl enforces that src and dest files are on the same mount. * Practically, they only need to be on the same file system. */ if (file_inode(file1)->i_sb != file_inode(file2)->i_sb) return -EXDEV; ret = generic_file_rw_checks(file1, file2); if (ret < 0) return ret; ret = generic_file_rw_checks(file2, file1); if (ret < 0) return ret; if (!file1->f_op->xchg_file_range) return -EOPNOTSUPP; ret = xchg_range_verify_area(file1, fxr->file1_offset, fxr); if (ret) return ret; ret = xchg_range_verify_area(file2, fxr->file2_offset, fxr); if (ret) return ret; ret = file2->f_op->xchg_file_range(file1, file2, fxr); if (ret) return ret; fsnotify_modify(file1); fsnotify_modify(file2); return 0; } EXPORT_SYMBOL(do_xchg_file_range); int vfs_xchg_file_range(struct file *file1, struct file *file2, struct file_xchg_range *fxr) { int ret; file_start_write(file2); ret = do_xchg_file_range(file1, file2, fxr); file_end_write(file2); return ret; } EXPORT_SYMBOL(vfs_xchg_file_range);