summaryrefslogtreecommitdiff
path: root/fs/proc
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/array.c2
-rw-r--r--fs/proc/base.c31
-rw-r--r--fs/proc/fd.c14
-rw-r--r--fs/proc/internal.h2
-rw-r--r--fs/proc/interrupts.c4
-rw-r--r--fs/proc/kcore.c46
-rw-r--r--fs/proc/meminfo.c2
-rw-r--r--fs/proc/proc_sysctl.c122
-rw-r--r--fs/proc/softirqs.c2
-rw-r--r--fs/proc/stat.c4
-rw-r--r--fs/proc/task_mmu.c22
-rw-r--r--fs/proc/vmcore.c9
12 files changed, 153 insertions, 107 deletions
diff --git a/fs/proc/array.c b/fs/proc/array.c
index 34a47fb0c57f..55ed3510d2bb 100644
--- a/fs/proc/array.c
+++ b/fs/proc/array.c
@@ -109,7 +109,7 @@ void proc_task_name(struct seq_file *m, struct task_struct *p, bool escape)
else if (p->flags & PF_KTHREAD)
get_kthread_comm(tcomm, sizeof(tcomm), p);
else
- __get_task_comm(tcomm, sizeof(tcomm), p);
+ get_task_comm(tcomm, p);
if (escape)
seq_escape_str(m, tcomm, ESCAPE_SPACE | ESCAPE_SPECIAL, "\n\\");
diff --git a/fs/proc/base.c b/fs/proc/base.c
index b31283d81c52..0edf14a9840e 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -58,7 +58,6 @@
#include <linux/init.h>
#include <linux/capability.h>
#include <linux/file.h>
-#include <linux/fdtable.h>
#include <linux/generic-radix-tree.h>
#include <linux/string.h>
#include <linux/seq_file.h>
@@ -832,19 +831,21 @@ static const struct file_operations proc_single_file_operations = {
struct mm_struct *proc_mem_open(struct inode *inode, unsigned int mode)
{
struct task_struct *task = get_proc_task(inode);
- struct mm_struct *mm = ERR_PTR(-ESRCH);
+ struct mm_struct *mm;
- if (task) {
- mm = mm_access(task, mode | PTRACE_MODE_FSCREDS);
- put_task_struct(task);
+ if (!task)
+ return ERR_PTR(-ESRCH);
- if (!IS_ERR_OR_NULL(mm)) {
- /* ensure this mm_struct can't be freed */
- mmgrab(mm);
- /* but do not pin its memory */
- mmput(mm);
- }
- }
+ mm = mm_access(task, mode | PTRACE_MODE_FSCREDS);
+ put_task_struct(task);
+
+ if (IS_ERR(mm))
+ return mm == ERR_PTR(-ESRCH) ? NULL : mm;
+
+ /* ensure this mm_struct can't be freed */
+ mmgrab(mm);
+ /* but do not pin its memory */
+ mmput(mm);
return mm;
}
@@ -2208,7 +2209,7 @@ static int map_files_d_revalidate(struct dentry *dentry, unsigned int flags)
goto out_notask;
mm = mm_access(task, PTRACE_MODE_READ_FSCREDS);
- if (IS_ERR_OR_NULL(mm))
+ if (IS_ERR(mm))
goto out;
if (!dname_to_vma_addr(dentry, &vm_start, &vm_end)) {
@@ -2553,8 +2554,8 @@ static int show_timer(struct seq_file *m, void *v)
seq_printf(m, "ID: %d\n", timer->it_id);
seq_printf(m, "signal: %d/%px\n",
- timer->sigq->info.si_signo,
- timer->sigq->info.si_value.sival_ptr);
+ timer->sigq.info.si_signo,
+ timer->sigq.info.si_value.sival_ptr);
seq_printf(m, "notify: %s/%s.%d\n",
nstr[notify & ~SIGEV_THREAD_ID],
(notify & SIGEV_THREAD_ID) ? "tid" : "pid",
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 1f54a54bfb91..24baf23e864f 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -77,7 +77,7 @@ static int seq_fdinfo_open(struct inode *inode, struct file *file)
return single_open(file, seq_show, inode);
}
-/**
+/*
* Shared /proc/pid/fdinfo and /proc/pid/fdinfo/fd permission helper to ensure
* that the current task has PTRACE_MODE_READ in addition to the normal
* POSIX-like checks.
@@ -116,9 +116,7 @@ static bool tid_fd_mode(struct task_struct *task, unsigned fd, fmode_t *mode)
{
struct file *file;
- rcu_read_lock();
- file = task_lookup_fdget_rcu(task, fd);
- rcu_read_unlock();
+ file = fget_task(task, fd);
if (file) {
*mode = file->f_mode;
fput(file);
@@ -258,19 +256,17 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx,
if (!dir_emit_dots(file, ctx))
goto out;
- rcu_read_lock();
for (fd = ctx->pos - 2;; fd++) {
struct file *f;
struct fd_data data;
char name[10 + 1];
unsigned int len;
- f = task_lookup_next_fdget_rcu(p, &fd);
+ f = fget_task_next(p, &fd);
ctx->pos = fd + 2LL;
if (!f)
break;
data.mode = f->f_mode;
- rcu_read_unlock();
fput(f);
data.fd = fd;
@@ -278,11 +274,9 @@ static int proc_readfd_common(struct file *file, struct dir_context *ctx,
if (!proc_fill_cache(file, ctx,
name, len, instantiate, p,
&data))
- goto out;
+ break;
cond_resched();
- rcu_read_lock();
}
- rcu_read_unlock();
out:
put_task_struct(p);
return 0;
diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index 87e4d6282025..1695509370b8 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -102,7 +102,7 @@ struct proc_inode {
union proc_op op;
struct proc_dir_entry *pde;
struct ctl_table_header *sysctl;
- struct ctl_table *sysctl_entry;
+ const struct ctl_table *sysctl_entry;
struct hlist_node sibling_inodes;
const struct proc_ns_operations *ns_ops;
struct inode vfs_inode;
diff --git a/fs/proc/interrupts.c b/fs/proc/interrupts.c
index cb0edc7cbf09..714a22ded8a8 100644
--- a/fs/proc/interrupts.c
+++ b/fs/proc/interrupts.c
@@ -11,13 +11,13 @@
*/
static void *int_seq_start(struct seq_file *f, loff_t *pos)
{
- return (*pos <= nr_irqs) ? pos : NULL;
+ return *pos <= irq_get_nr_irqs() ? pos : NULL;
}
static void *int_seq_next(struct seq_file *f, void *v, loff_t *pos)
{
(*pos)++;
- if (*pos > nr_irqs)
+ if (*pos > irq_get_nr_irqs())
return NULL;
return pos;
}
diff --git a/fs/proc/kcore.c b/fs/proc/kcore.c
index 7d0acdad74e2..7a85735d584f 100644
--- a/fs/proc/kcore.c
+++ b/fs/proc/kcore.c
@@ -50,6 +50,20 @@ static struct proc_dir_entry *proc_root_kcore;
#define kc_offset_to_vaddr(o) ((o) + PAGE_OFFSET)
#endif
+#ifndef kc_xlate_dev_mem_ptr
+#define kc_xlate_dev_mem_ptr kc_xlate_dev_mem_ptr
+static inline void *kc_xlate_dev_mem_ptr(phys_addr_t phys)
+{
+ return __va(phys);
+}
+#endif
+#ifndef kc_unxlate_dev_mem_ptr
+#define kc_unxlate_dev_mem_ptr kc_unxlate_dev_mem_ptr
+static inline void kc_unxlate_dev_mem_ptr(phys_addr_t phys, void *virt)
+{
+}
+#endif
+
static LIST_HEAD(kclist_head);
static DECLARE_RWSEM(kclist_lock);
static int kcore_need_update = 1;
@@ -471,19 +485,21 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
while (buflen) {
struct page *page;
unsigned long pfn;
+ phys_addr_t phys;
+ void *__start;
/*
* If this is the first iteration or the address is not within
* the previous entry, search for a matching entry.
*/
if (!m || start < m->addr || start >= m->addr + m->size) {
- struct kcore_list *iter;
+ struct kcore_list *pos;
m = NULL;
- list_for_each_entry(iter, &kclist_head, list) {
- if (start >= iter->addr &&
- start < iter->addr + iter->size) {
- m = iter;
+ list_for_each_entry(pos, &kclist_head, list) {
+ if (start >= pos->addr &&
+ start < pos->addr + pos->size) {
+ m = pos;
break;
}
}
@@ -537,7 +553,8 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
}
break;
case KCORE_RAM:
- pfn = __pa(start) >> PAGE_SHIFT;
+ phys = __pa(start);
+ pfn = phys >> PAGE_SHIFT;
page = pfn_to_online_page(pfn);
/*
@@ -557,13 +574,28 @@ static ssize_t read_kcore_iter(struct kiocb *iocb, struct iov_iter *iter)
fallthrough;
case KCORE_VMEMMAP:
case KCORE_TEXT:
+ if (m->type == KCORE_RAM) {
+ __start = kc_xlate_dev_mem_ptr(phys);
+ if (!__start) {
+ ret = -ENOMEM;
+ if (iov_iter_zero(tsz, iter) != tsz)
+ ret = -EFAULT;
+ goto out;
+ }
+ } else {
+ __start = (void *)start;
+ }
+
/*
* Sadly we must use a bounce buffer here to be able to
* make use of copy_from_kernel_nofault(), as these
* memory regions might not always be mapped on all
* architectures.
*/
- if (copy_from_kernel_nofault(buf, (void *)start, tsz)) {
+ ret = copy_from_kernel_nofault(buf, __start, tsz);
+ if (m->type == KCORE_RAM)
+ kc_unxlate_dev_mem_ptr(phys, __start);
+ if (ret) {
if (iov_iter_zero(tsz, iter) != tsz) {
ret = -EFAULT;
goto out;
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 245171d9164b..8ba9b1472390 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -91,7 +91,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
#ifdef CONFIG_ZSWAP
show_val_kb(m, "Zswap: ", zswap_total_pages());
seq_printf(m, "Zswapped: %8lu kB\n",
- (unsigned long)atomic_read(&zswap_stored_pages) <<
+ (unsigned long)atomic_long_read(&zswap_stored_pages) <<
(PAGE_SHIFT - 10));
#endif
show_val_kb(m, "Dirty: ",
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 9553e77c9d31..27a283d85a6e 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -17,6 +17,7 @@
#include <linux/bpf-cgroup.h>
#include <linux/mount.h>
#include <linux/kmemleak.h>
+#include <linux/lockdep.h>
#include "internal.h"
#define list_for_each_table_entry(entry, header) \
@@ -29,8 +30,13 @@ static const struct inode_operations proc_sys_inode_operations;
static const struct file_operations proc_sys_dir_file_operations;
static const struct inode_operations proc_sys_dir_operations;
-/* Support for permanently empty directories */
-static struct ctl_table sysctl_mount_point[] = { };
+/*
+ * Support for permanently empty directories.
+ * Must be non-empty to avoid sharing an address with other tables.
+ */
+static const struct ctl_table sysctl_mount_point[] = {
+ { }
+};
/**
* register_sysctl_mount_point() - registers a sysctl mount point
@@ -42,7 +48,7 @@ static struct ctl_table sysctl_mount_point[] = { };
*/
struct ctl_table_header *register_sysctl_mount_point(const char *path)
{
- return register_sysctl(path, sysctl_mount_point);
+ return register_sysctl_sz(path, sysctl_mount_point, 0);
}
EXPORT_SYMBOL(register_sysctl_mount_point);
@@ -62,7 +68,7 @@ void proc_sys_poll_notify(struct ctl_table_poll *poll)
wake_up_interruptible(&poll->wait);
}
-static struct ctl_table root_table[] = {
+static const struct ctl_table root_table[] = {
{
.procname = "",
.mode = S_IFDIR|S_IRUGO|S_IXUGO,
@@ -83,7 +89,7 @@ static DEFINE_SPINLOCK(sysctl_lock);
static void drop_sysctl_table(struct ctl_table_header *header);
static int sysctl_follow_link(struct ctl_table_header **phead,
- struct ctl_table **pentry);
+ const struct ctl_table **pentry);
static int insert_links(struct ctl_table_header *head);
static void put_links(struct ctl_table_header *header);
@@ -104,14 +110,15 @@ static int namecmp(const char *name1, int len1, const char *name2, int len2)
return cmp;
}
-/* Called under sysctl_lock */
-static struct ctl_table *find_entry(struct ctl_table_header **phead,
+static const struct ctl_table *find_entry(struct ctl_table_header **phead,
struct ctl_dir *dir, const char *name, int namelen)
{
struct ctl_table_header *head;
- struct ctl_table *entry;
+ const struct ctl_table *entry;
struct rb_node *node = dir->root.rb_node;
+ lockdep_assert_held(&sysctl_lock);
+
while (node)
{
struct ctl_node *ctl_node;
@@ -136,7 +143,7 @@ static struct ctl_table *find_entry(struct ctl_table_header **phead,
return NULL;
}
-static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry)
+static int insert_entry(struct ctl_table_header *head, const struct ctl_table *entry)
{
struct rb_node *node = &head->node[entry - head->ctl_table].node;
struct rb_node **p = &head->parent->root.rb_node;
@@ -146,7 +153,7 @@ static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry)
while (*p) {
struct ctl_table_header *parent_head;
- struct ctl_table *parent_entry;
+ const struct ctl_table *parent_entry;
struct ctl_node *parent_node;
const char *parent_name;
int cmp;
@@ -175,7 +182,7 @@ static int insert_entry(struct ctl_table_header *head, struct ctl_table *entry)
return 0;
}
-static void erase_entry(struct ctl_table_header *head, struct ctl_table *entry)
+static void erase_entry(struct ctl_table_header *head, const struct ctl_table *entry)
{
struct rb_node *node = &head->node[entry - head->ctl_table].node;
@@ -184,7 +191,7 @@ static void erase_entry(struct ctl_table_header *head, struct ctl_table *entry)
static void init_header(struct ctl_table_header *head,
struct ctl_table_root *root, struct ctl_table_set *set,
- struct ctl_node *node, struct ctl_table *table, size_t table_size)
+ struct ctl_node *node, const struct ctl_table *table, size_t table_size)
{
head->ctl_table = table;
head->ctl_table_size = table_size;
@@ -199,7 +206,7 @@ static void init_header(struct ctl_table_header *head,
head->node = node;
INIT_HLIST_HEAD(&head->inodes);
if (node) {
- struct ctl_table *entry;
+ const struct ctl_table *entry;
list_for_each_table_entry(entry, head) {
node->header = head;
@@ -212,7 +219,7 @@ static void init_header(struct ctl_table_header *head,
static void erase_header(struct ctl_table_header *head)
{
- struct ctl_table *entry;
+ const struct ctl_table *entry;
list_for_each_table_entry(entry, head)
erase_entry(head, entry);
@@ -220,7 +227,7 @@ static void erase_header(struct ctl_table_header *head)
static int insert_header(struct ctl_dir *dir, struct ctl_table_header *header)
{
- struct ctl_table *entry;
+ const struct ctl_table *entry;
struct ctl_table_header *dir_h = &dir->header;
int err;
@@ -258,18 +265,20 @@ fail_links:
return err;
}
-/* called under sysctl_lock */
static int use_table(struct ctl_table_header *p)
{
+ lockdep_assert_held(&sysctl_lock);
+
if (unlikely(p->unregistering))
return 0;
p->used++;
return 1;
}
-/* called under sysctl_lock */
static void unuse_table(struct ctl_table_header *p)
{
+ lockdep_assert_held(&sysctl_lock);
+
if (!--p->used)
if (unlikely(p->unregistering))
complete(p->unregistering);
@@ -280,9 +289,11 @@ static void proc_sys_invalidate_dcache(struct ctl_table_header *head)
proc_invalidate_siblings_dcache(&head->inodes, &sysctl_lock);
}
-/* called under sysctl_lock, will reacquire if has to wait */
static void start_unregistering(struct ctl_table_header *p)
{
+ /* will reacquire if has to wait */
+ lockdep_assert_held(&sysctl_lock);
+
/*
* if p->used is 0, nobody will ever touch that entry again;
* we'll eliminate all paths to it before dropping sysctl_lock
@@ -339,12 +350,12 @@ lookup_header_set(struct ctl_table_root *root)
return set;
}
-static struct ctl_table *lookup_entry(struct ctl_table_header **phead,
- struct ctl_dir *dir,
- const char *name, int namelen)
+static const struct ctl_table *lookup_entry(struct ctl_table_header **phead,
+ struct ctl_dir *dir,
+ const char *name, int namelen)
{
struct ctl_table_header *head;
- struct ctl_table *entry;
+ const struct ctl_table *entry;
spin_lock(&sysctl_lock);
entry = find_entry(&head, dir, name, namelen);
@@ -369,10 +380,10 @@ static struct ctl_node *first_usable_entry(struct rb_node *node)
}
static void first_entry(struct ctl_dir *dir,
- struct ctl_table_header **phead, struct ctl_table **pentry)
+ struct ctl_table_header **phead, const struct ctl_table **pentry)
{
struct ctl_table_header *head = NULL;
- struct ctl_table *entry = NULL;
+ const struct ctl_table *entry = NULL;
struct ctl_node *ctl_node;
spin_lock(&sysctl_lock);
@@ -386,10 +397,10 @@ static void first_entry(struct ctl_dir *dir,
*pentry = entry;
}
-static void next_entry(struct ctl_table_header **phead, struct ctl_table **pentry)
+static void next_entry(struct ctl_table_header **phead, const struct ctl_table **pentry)
{
struct ctl_table_header *head = *phead;
- struct ctl_table *entry = *pentry;
+ const struct ctl_table *entry = *pentry;
struct ctl_node *ctl_node = &head->node[entry - head->ctl_table];
spin_lock(&sysctl_lock);
@@ -422,7 +433,7 @@ static int test_perm(int mode, int op)
return -EACCES;
}
-static int sysctl_perm(struct ctl_table_header *head, struct ctl_table *table, int op)
+static int sysctl_perm(struct ctl_table_header *head, const struct ctl_table *table, int op)
{
struct ctl_table_root *root = head->root;
int mode;
@@ -436,7 +447,7 @@ static int sysctl_perm(struct ctl_table_header *head, struct ctl_table *table, i
}
static struct inode *proc_sys_make_inode(struct super_block *sb,
- struct ctl_table_header *head, struct ctl_table *table)
+ struct ctl_table_header *head, const struct ctl_table *table)
{
struct ctl_table_root *root = head->root;
struct inode *inode;
@@ -507,7 +518,7 @@ static struct dentry *proc_sys_lookup(struct inode *dir, struct dentry *dentry,
struct ctl_table_header *head = grab_header(dir);
struct ctl_table_header *h = NULL;
const struct qstr *name = &dentry->d_name;
- struct ctl_table *p;
+ const struct ctl_table *p;
struct inode *inode;
struct dentry *err = ERR_PTR(-ENOENT);
struct ctl_dir *ctl_dir;
@@ -545,7 +556,7 @@ static ssize_t proc_sys_call_handler(struct kiocb *iocb, struct iov_iter *iter,
{
struct inode *inode = file_inode(iocb->ki_filp);
struct ctl_table_header *head = grab_header(inode);
- struct ctl_table *table = PROC_I(inode)->sysctl_entry;
+ const struct ctl_table *table = PROC_I(inode)->sysctl_entry;
size_t count = iov_iter_count(iter);
char *kbuf;
ssize_t error;
@@ -619,7 +630,7 @@ static ssize_t proc_sys_write(struct kiocb *iocb, struct iov_iter *iter)
static int proc_sys_open(struct inode *inode, struct file *filp)
{
struct ctl_table_header *head = grab_header(inode);
- struct ctl_table *table = PROC_I(inode)->sysctl_entry;
+ const struct ctl_table *table = PROC_I(inode)->sysctl_entry;
/* sysctl was unregistered */
if (IS_ERR(head))
@@ -637,7 +648,7 @@ static __poll_t proc_sys_poll(struct file *filp, poll_table *wait)
{
struct inode *inode = file_inode(filp);
struct ctl_table_header *head = grab_header(inode);
- struct ctl_table *table = PROC_I(inode)->sysctl_entry;
+ const struct ctl_table *table = PROC_I(inode)->sysctl_entry;
__poll_t ret = DEFAULT_POLLMASK;
unsigned long event;
@@ -668,7 +679,7 @@ out:
static bool proc_sys_fill_cache(struct file *file,
struct dir_context *ctx,
struct ctl_table_header *head,
- struct ctl_table *table)
+ const struct ctl_table *table)
{
struct dentry *child, *dir = file->f_path.dentry;
struct inode *inode;
@@ -693,11 +704,11 @@ static bool proc_sys_fill_cache(struct file *file,
res = d_splice_alias(inode, child);
d_lookup_done(child);
if (unlikely(res)) {
- if (IS_ERR(res)) {
- dput(child);
- return false;
- }
dput(child);
+
+ if (IS_ERR(res))
+ return false;
+
child = res;
}
}
@@ -712,7 +723,7 @@ static bool proc_sys_fill_cache(struct file *file,
static bool proc_sys_link_fill_cache(struct file *file,
struct dir_context *ctx,
struct ctl_table_header *head,
- struct ctl_table *table)
+ const struct ctl_table *table)
{
bool ret = true;
@@ -730,7 +741,7 @@ out:
return ret;
}
-static int scan(struct ctl_table_header *head, struct ctl_table *table,
+static int scan(struct ctl_table_header *head, const struct ctl_table *table,
unsigned long *pos, struct file *file,
struct dir_context *ctx)
{
@@ -754,7 +765,7 @@ static int proc_sys_readdir(struct file *file, struct dir_context *ctx)
{
struct ctl_table_header *head = grab_header(file_inode(file));
struct ctl_table_header *h = NULL;
- struct ctl_table *entry;
+ const struct ctl_table *entry;
struct ctl_dir *ctl_dir;
unsigned long pos;
@@ -787,7 +798,7 @@ static int proc_sys_permission(struct mnt_idmap *idmap,
* are _NOT_ writeable, capabilities or not.
*/
struct ctl_table_header *head;
- struct ctl_table *table;
+ const struct ctl_table *table;
int error;
/* Executable files are not allowed under /proc/sys/ */
@@ -831,7 +842,7 @@ static int proc_sys_getattr(struct mnt_idmap *idmap,
{
struct inode *inode = d_inode(path->dentry);
struct ctl_table_header *head = grab_header(inode);
- struct ctl_table *table = PROC_I(inode)->sysctl_entry;
+ const struct ctl_table *table = PROC_I(inode)->sysctl_entry;
if (IS_ERR(head))
return PTR_ERR(head);
@@ -930,7 +941,7 @@ static struct ctl_dir *find_subdir(struct ctl_dir *dir,
const char *name, int namelen)
{
struct ctl_table_header *head;
- struct ctl_table *entry;
+ const struct ctl_table *entry;
entry = find_entry(&head, dir, name, namelen);
if (!entry)
@@ -1041,12 +1052,12 @@ static struct ctl_dir *xlate_dir(struct ctl_table_set *set, struct ctl_dir *dir)
}
static int sysctl_follow_link(struct ctl_table_header **phead,
- struct ctl_table **pentry)
+ const struct ctl_table **pentry)
{
struct ctl_table_header *head;
+ const struct ctl_table *entry;
struct ctl_table_root *root;
struct ctl_table_set *set;
- struct ctl_table *entry;
struct ctl_dir *dir;
int ret;
@@ -1073,7 +1084,7 @@ static int sysctl_follow_link(struct ctl_table_header **phead,
return ret;
}
-static int sysctl_err(const char *path, struct ctl_table *table, char *fmt, ...)
+static int sysctl_err(const char *path, const struct ctl_table *table, char *fmt, ...)
{
struct va_format vaf;
va_list args;
@@ -1089,7 +1100,7 @@ static int sysctl_err(const char *path, struct ctl_table *table, char *fmt, ...)
return -EINVAL;
}
-static int sysctl_check_table_array(const char *path, struct ctl_table *table)
+static int sysctl_check_table_array(const char *path, const struct ctl_table *table)
{
unsigned int extra;
int err = 0;
@@ -1128,7 +1139,7 @@ static int sysctl_check_table_array(const char *path, struct ctl_table *table)
static int sysctl_check_table(const char *path, struct ctl_table_header *header)
{
- struct ctl_table *entry;
+ const struct ctl_table *entry;
int err = 0;
list_for_each_table_entry(entry, header) {
if (!entry->procname)
@@ -1164,8 +1175,9 @@ static int sysctl_check_table(const char *path, struct ctl_table_header *header)
static struct ctl_table_header *new_links(struct ctl_dir *dir, struct ctl_table_header *head)
{
- struct ctl_table *link_table, *entry, *link;
+ struct ctl_table *link_table, *link;
struct ctl_table_header *links;
+ const struct ctl_table *entry;
struct ctl_node *node;
char *link_name;
int name_bytes;
@@ -1210,7 +1222,7 @@ static bool get_links(struct ctl_dir *dir,
struct ctl_table_root *link_root)
{
struct ctl_table_header *tmp_head;
- struct ctl_table *entry, *link;
+ const struct ctl_table *entry, *link;
if (header->ctl_table_size == 0 ||
sysctl_is_perm_empty_ctl_header(header))
@@ -1353,7 +1365,7 @@ static struct ctl_dir *sysctl_mkdir_p(struct ctl_dir *dir, const char *path)
*/
struct ctl_table_header *__register_sysctl_table(
struct ctl_table_set *set,
- const char *path, struct ctl_table *table, size_t table_size)
+ const char *path, const struct ctl_table *table, size_t table_size)
{
struct ctl_table_root *root = set->dir.header.root;
struct ctl_table_header *header;
@@ -1414,7 +1426,7 @@ fail:
*
* See __register_sysctl_table for more details.
*/
-struct ctl_table_header *register_sysctl_sz(const char *path, struct ctl_table *table,
+struct ctl_table_header *register_sysctl_sz(const char *path, const struct ctl_table *table,
size_t table_size)
{
return __register_sysctl_table(&sysctl_table_root.default_set,
@@ -1443,7 +1455,7 @@ EXPORT_SYMBOL(register_sysctl_sz);
*
* Context: if your base directory does not exist it will be created for you.
*/
-void __init __register_sysctl_init(const char *path, struct ctl_table *table,
+void __init __register_sysctl_init(const char *path, const struct ctl_table *table,
const char *table_name, size_t table_size)
{
struct ctl_table_header *hdr = register_sysctl_sz(path, table, table_size);
@@ -1461,7 +1473,7 @@ static void put_links(struct ctl_table_header *header)
struct ctl_table_root *root = header->root;
struct ctl_dir *parent = header->parent;
struct ctl_dir *core_parent;
- struct ctl_table *entry;
+ const struct ctl_table *entry;
if (header->set == root_set)
return;
@@ -1472,7 +1484,7 @@ static void put_links(struct ctl_table_header *header)
list_for_each_table_entry(entry, header) {
struct ctl_table_header *link_head;
- struct ctl_table *link;
+ const struct ctl_table *link;
const char *name = entry->procname;
link = find_entry(&link_head, core_parent, name, strlen(name));
diff --git a/fs/proc/softirqs.c b/fs/proc/softirqs.c
index f4616083faef..04bb29721419 100644
--- a/fs/proc/softirqs.c
+++ b/fs/proc/softirqs.c
@@ -20,7 +20,7 @@ static int show_softirqs(struct seq_file *p, void *v)
for (i = 0; i < NR_SOFTIRQS; i++) {
seq_printf(p, "%12s:", softirq_to_name[i]);
for_each_possible_cpu(j)
- seq_printf(p, " %10u", kstat_softirqs_cpu(i, j));
+ seq_put_decimal_ull_width(p, " ", kstat_softirqs_cpu(i, j), 10);
seq_putc(p, '\n');
}
return 0;
diff --git a/fs/proc/stat.c b/fs/proc/stat.c
index da60956b2915..8b444e862319 100644
--- a/fs/proc/stat.c
+++ b/fs/proc/stat.c
@@ -76,7 +76,7 @@ static void show_all_irqs(struct seq_file *p)
seq_put_decimal_ull(p, " ", kstat_irqs_usr(i));
next = i + 1;
}
- show_irq_gap(p, nr_irqs - next);
+ show_irq_gap(p, irq_get_nr_irqs() - next);
}
static int show_stat(struct seq_file *p, void *v)
@@ -196,7 +196,7 @@ static int stat_open(struct inode *inode, struct file *file)
unsigned int size = 1024 + 128 * num_online_cpus();
/* minimum size to display an interrupt count : 2 bytes */
- size += 2 * nr_irqs;
+ size += 2 * irq_get_nr_irqs();
return single_open_size(file, show_stat, NULL, size);
}
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 72f14fd59c2d..38a5a3e9cba2 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -909,8 +909,15 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
{
/*
* Don't forget to update Documentation/ on changes.
+ *
+ * The length of the second argument of mnemonics[]
+ * needs to be 3 instead of previously set 2
+ * (i.e. from [BITS_PER_LONG][2] to [BITS_PER_LONG][3])
+ * to avoid spurious
+ * -Werror=unterminated-string-initialization warning
+ * with GCC 15
*/
- static const char mnemonics[BITS_PER_LONG][2] = {
+ static const char mnemonics[BITS_PER_LONG][3] = {
/*
* In case if we meet a flag we don't know about.
*/
@@ -971,7 +978,7 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
#ifdef CONFIG_HAVE_ARCH_USERFAULTFD_MINOR
[ilog2(VM_UFFD_MINOR)] = "ui",
#endif /* CONFIG_HAVE_ARCH_USERFAULTFD_MINOR */
-#ifdef CONFIG_X86_USER_SHADOW_STACK
+#ifdef CONFIG_ARCH_HAS_USER_SHADOW_STACK
[ilog2(VM_SHADOW_STACK)] = "ss",
#endif
#if defined(CONFIG_64BIT) || defined(CONFIG_PPC32)
@@ -987,11 +994,8 @@ static void show_smap_vma_flags(struct seq_file *m, struct vm_area_struct *vma)
for (i = 0; i < BITS_PER_LONG; i++) {
if (!mnemonics[i][0])
continue;
- if (vma->vm_flags & (1UL << i)) {
- seq_putc(m, mnemonics[i][0]);
- seq_putc(m, mnemonics[i][1]);
- seq_putc(m, ' ');
- }
+ if (vma->vm_flags & (1UL << i))
+ seq_printf(m, "%s ", mnemonics[i]);
}
seq_putc(m, '\n');
}
@@ -2661,8 +2665,10 @@ static int pagemap_scan_get_args(struct pm_scan_arg *arg,
return -EFAULT;
if (!arg->vec && arg->vec_len)
return -EINVAL;
+ if (UINT_MAX == SIZE_MAX && arg->vec_len > SIZE_MAX)
+ return -EINVAL;
if (arg->vec && !access_ok((void __user *)(long)arg->vec,
- arg->vec_len * sizeof(struct page_region)))
+ size_mul(arg->vec_len, sizeof(struct page_region))))
return -EFAULT;
/* Fixup default values */
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index b52d85f8ad59..b4521b096058 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -457,10 +457,6 @@ static vm_fault_t mmap_vmcore_fault(struct vm_fault *vmf)
#endif
}
-static const struct vm_operations_struct vmcore_mmap_ops = {
- .fault = mmap_vmcore_fault,
-};
-
/**
* vmcore_alloc_buf - allocate buffer in vmalloc memory
* @size: size of buffer
@@ -488,6 +484,11 @@ static inline char *vmcore_alloc_buf(size_t size)
* virtually contiguous user-space in ELF layout.
*/
#ifdef CONFIG_MMU
+
+static const struct vm_operations_struct vmcore_mmap_ops = {
+ .fault = mmap_vmcore_fault,
+};
+
/*
* remap_oldmem_pfn_checked - do remap_oldmem_pfn_range replacing all pages
* reported as not being ram with the zero page.