summaryrefslogtreecommitdiff
path: root/ipc/sem.c
diff options
context:
space:
mode:
Diffstat (limited to 'ipc/sem.c')
-rw-r--r--ipc/sem.c47
1 files changed, 33 insertions, 14 deletions
diff --git a/ipc/sem.c b/ipc/sem.c
index 06be75d9217a..5af1943ad782 100644
--- a/ipc/sem.c
+++ b/ipc/sem.c
@@ -70,6 +70,7 @@
* The worst-case behavior is nevertheless O(N^2) for N wakeups.
*/
+#include <linux/compat.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/init.h>
@@ -84,6 +85,7 @@
#include <linux/nsproxy.h>
#include <linux/ipc_namespace.h>
#include <linux/sched/wake_q.h>
+#include <linux/nospec.h>
#include <linux/uaccess.h>
#include "util.h"
@@ -104,7 +106,7 @@ struct sem {
/* that alter the semaphore */
struct list_head pending_const; /* pending single-sop operations */
/* that do not alter the semaphore*/
- time_t sem_otime; /* candidate for sem_otime */
+ time64_t sem_otime; /* candidate for sem_otime */
} ____cacheline_aligned_in_smp;
/* One sem_array data structure for each set of semaphores in the system. */
@@ -367,6 +369,7 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
int nsops)
{
struct sem *sem;
+ int idx;
if (nsops != 1) {
/* Complex operation - acquire a full lock */
@@ -384,7 +387,8 @@ static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
*
* Both facts are tracked by use_global_mode.
*/
- sem = &sma->sems[sops->sem_num];
+ idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
+ sem = &sma->sems[idx];
/*
* Initial check for use_global_lock. Just an optimization,
@@ -637,7 +641,8 @@ static int perform_atomic_semop_slow(struct sem_array *sma, struct sem_queue *q)
un = q->undo;
for (sop = sops; sop < sops + nsops; sop++) {
- curr = &sma->sems[sop->sem_num];
+ int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
+ curr = &sma->sems[idx];
sem_op = sop->sem_op;
result = curr->semval;
@@ -717,7 +722,9 @@ static int perform_atomic_semop(struct sem_array *sma, struct sem_queue *q)
* until the operations can go through.
*/
for (sop = sops; sop < sops + nsops; sop++) {
- curr = &sma->sems[sop->sem_num];
+ int idx = array_index_nospec(sop->sem_num, sma->sem_nsems);
+
+ curr = &sma->sems[idx];
sem_op = sop->sem_op;
result = curr->semval;
@@ -984,10 +991,10 @@ again:
static void set_semotime(struct sem_array *sma, struct sembuf *sops)
{
if (sops == NULL) {
- sma->sems[0].sem_otime = get_seconds();
+ sma->sems[0].sem_otime = ktime_get_real_seconds();
} else {
sma->sems[sops[0].sem_num].sem_otime =
- get_seconds();
+ ktime_get_real_seconds();
}
}
@@ -1214,6 +1221,7 @@ static int semctl_stat(struct ipc_namespace *ns, int semid,
int cmd, struct semid64_ds *semid64)
{
struct sem_array *sma;
+ time64_t semotime;
int id = 0;
int err;
@@ -1257,8 +1265,13 @@ static int semctl_stat(struct ipc_namespace *ns, int semid,
}
kernel_to_ipc64_perm(&sma->sem_perm, &semid64->sem_perm);
- semid64->sem_otime = get_semotime(sma);
+ semotime = get_semotime(sma);
+ semid64->sem_otime = semotime;
semid64->sem_ctime = sma->sem_ctime;
+#ifndef CONFIG_64BIT
+ semid64->sem_otime_high = semotime >> 32;
+ semid64->sem_ctime_high = sma->sem_ctime >> 32;
+#endif
semid64->sem_nsems = sma->sem_nsems;
ipc_unlock_object(&sma->sem_perm);
@@ -1349,6 +1362,7 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
return -EIDRM;
}
+ semnum = array_index_nospec(semnum, sma->sem_nsems);
curr = &sma->sems[semnum];
ipc_assert_locked_object(&sma->sem_perm);
@@ -1502,6 +1516,8 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
err = -EIDRM;
goto out_unlock;
}
+
+ semnum = array_index_nospec(semnum, nsems);
curr = &sma->sems[semnum];
switch (cmd) {
@@ -1704,8 +1720,10 @@ static int copy_compat_semid_to_user(void __user *buf, struct semid64_ds *in,
struct compat_semid64_ds v;
memset(&v, 0, sizeof(v));
to_compat_ipc64_perm(&v.sem_perm, &in->sem_perm);
- v.sem_otime = in->sem_otime;
- v.sem_ctime = in->sem_ctime;
+ v.sem_otime = lower_32_bits(in->sem_otime);
+ v.sem_otime_high = upper_32_bits(in->sem_otime);
+ v.sem_ctime = lower_32_bits(in->sem_ctime);
+ v.sem_ctime_high = upper_32_bits(in->sem_ctime);
v.sem_nsems = in->sem_nsems;
return copy_to_user(buf, &v, sizeof(v));
} else {
@@ -1936,7 +1954,7 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
if (nsops > ns->sc_semopm)
return -E2BIG;
if (nsops > SEMOPM_FAST) {
- sops = kvmalloc(sizeof(*sops)*nsops, GFP_KERNEL);
+ sops = kvmalloc_array(nsops, sizeof(*sops), GFP_KERNEL);
if (sops == NULL)
return -ENOMEM;
}
@@ -2072,7 +2090,8 @@ static long do_semtimedop(int semid, struct sembuf __user *tsops,
*/
if (nsops == 1) {
struct sem *curr;
- curr = &sma->sems[sops->sem_num];
+ int idx = array_index_nospec(sops->sem_num, sma->sem_nsems);
+ curr = &sma->sems[idx];
if (alter) {
if (sma->complex_count) {
@@ -2168,7 +2187,7 @@ out_free:
}
long ksys_semtimedop(int semid, struct sembuf __user *tsops,
- unsigned int nsops, const struct timespec __user *timeout)
+ unsigned int nsops, const struct __kernel_timespec __user *timeout)
{
if (timeout) {
struct timespec64 ts;
@@ -2180,12 +2199,12 @@ long ksys_semtimedop(int semid, struct sembuf __user *tsops,
}
SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
- unsigned int, nsops, const struct timespec __user *, timeout)
+ unsigned int, nsops, const struct __kernel_timespec __user *, timeout)
{
return ksys_semtimedop(semid, tsops, nsops, timeout);
}
-#ifdef CONFIG_COMPAT
+#ifdef CONFIG_COMPAT_32BIT_TIME
long compat_ksys_semtimedop(int semid, struct sembuf __user *tsems,
unsigned int nsops,
const struct compat_timespec __user *timeout)