idx int64 | func_before string | Vulnerability Classification string | vul int64 | func_after string | patch string | CWE ID string | lines_before string | lines_after string |
|---|---|---|---|---|---|---|---|---|
23,600 | static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd)
{
struct oz_serial_ctx *ctx;
spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
if (ctx)
atomic_inc(&ctx->ref_count);
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
return ctx;
}
| DoS Overflow | 0 | static struct oz_serial_ctx *oz_cdev_claim_ctx(struct oz_pd *pd)
{
struct oz_serial_ctx *ctx;
spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
ctx = (struct oz_serial_ctx *)pd->app_ctx[OZ_APPID_SERIAL-1];
if (ctx)
atomic_inc(&ctx->ref_count);
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
return ctx;
}
| @@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
+ if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+ return -EINVAL;
+
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd) | CWE-119 | null | null |
23,601 | int oz_cdev_deregister(void)
{
cdev_del(&g_cdev.cdev);
unregister_chrdev_region(g_cdev.devnum, 1);
if (g_oz_class) {
device_destroy(g_oz_class, g_cdev.devnum);
class_destroy(g_oz_class);
}
return 0;
}
| DoS Overflow | 0 | int oz_cdev_deregister(void)
{
cdev_del(&g_cdev.cdev);
unregister_chrdev_region(g_cdev.devnum, 1);
if (g_oz_class) {
device_destroy(g_oz_class, g_cdev.devnum);
class_destroy(g_oz_class);
}
return 0;
}
| @@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
+ if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+ return -EINVAL;
+
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd) | CWE-119 | null | null |
23,602 | static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int rc = 0;
if (_IOC_TYPE(cmd) != OZ_IOCTL_MAGIC)
return -ENOTTY;
if (_IOC_NR(cmd) > OZ_IOCTL_MAX)
return -ENOTTY;
if (_IOC_DIR(cmd) & _IOC_READ)
rc = !access_ok(VERIFY_WRITE, (void __user *)arg,
_IOC_SIZE(cmd));
else if (_IOC_DIR(cmd) & _IOC_WRITE)
rc = !access_ok(VERIFY_READ, (void __user *)arg,
_IOC_SIZE(cmd));
if (rc)
return -EFAULT;
switch (cmd) {
case OZ_IOCTL_GET_PD_LIST: {
struct oz_pd_list list;
oz_dbg(ON, "OZ_IOCTL_GET_PD_LIST\n");
memset(&list, 0, sizeof(list));
list.count = oz_get_pd_list(list.addr, OZ_MAX_PDS);
if (copy_to_user((void __user *)arg, &list,
sizeof(list)))
return -EFAULT;
}
break;
case OZ_IOCTL_SET_ACTIVE_PD: {
u8 addr[ETH_ALEN];
oz_dbg(ON, "OZ_IOCTL_SET_ACTIVE_PD\n");
if (copy_from_user(addr, (void __user *)arg, ETH_ALEN))
return -EFAULT;
rc = oz_set_active_pd(addr);
}
break;
case OZ_IOCTL_GET_ACTIVE_PD: {
u8 addr[ETH_ALEN];
oz_dbg(ON, "OZ_IOCTL_GET_ACTIVE_PD\n");
spin_lock_bh(&g_cdev.lock);
memcpy(addr, g_cdev.active_addr, ETH_ALEN);
spin_unlock_bh(&g_cdev.lock);
if (copy_to_user((void __user *)arg, addr, ETH_ALEN))
return -EFAULT;
}
break;
case OZ_IOCTL_ADD_BINDING:
case OZ_IOCTL_REMOVE_BINDING: {
struct oz_binding_info b;
if (copy_from_user(&b, (void __user *)arg,
sizeof(struct oz_binding_info))) {
return -EFAULT;
}
/* Make sure name is null terminated. */
b.name[OZ_MAX_BINDING_LEN-1] = 0;
if (cmd == OZ_IOCTL_ADD_BINDING)
oz_binding_add(b.name);
else
oz_binding_remove(b.name);
}
break;
}
return rc;
}
| DoS Overflow | 0 | static long oz_cdev_ioctl(struct file *filp, unsigned int cmd,
unsigned long arg)
{
int rc = 0;
if (_IOC_TYPE(cmd) != OZ_IOCTL_MAGIC)
return -ENOTTY;
if (_IOC_NR(cmd) > OZ_IOCTL_MAX)
return -ENOTTY;
if (_IOC_DIR(cmd) & _IOC_READ)
rc = !access_ok(VERIFY_WRITE, (void __user *)arg,
_IOC_SIZE(cmd));
else if (_IOC_DIR(cmd) & _IOC_WRITE)
rc = !access_ok(VERIFY_READ, (void __user *)arg,
_IOC_SIZE(cmd));
if (rc)
return -EFAULT;
switch (cmd) {
case OZ_IOCTL_GET_PD_LIST: {
struct oz_pd_list list;
oz_dbg(ON, "OZ_IOCTL_GET_PD_LIST\n");
memset(&list, 0, sizeof(list));
list.count = oz_get_pd_list(list.addr, OZ_MAX_PDS);
if (copy_to_user((void __user *)arg, &list,
sizeof(list)))
return -EFAULT;
}
break;
case OZ_IOCTL_SET_ACTIVE_PD: {
u8 addr[ETH_ALEN];
oz_dbg(ON, "OZ_IOCTL_SET_ACTIVE_PD\n");
if (copy_from_user(addr, (void __user *)arg, ETH_ALEN))
return -EFAULT;
rc = oz_set_active_pd(addr);
}
break;
case OZ_IOCTL_GET_ACTIVE_PD: {
u8 addr[ETH_ALEN];
oz_dbg(ON, "OZ_IOCTL_GET_ACTIVE_PD\n");
spin_lock_bh(&g_cdev.lock);
memcpy(addr, g_cdev.active_addr, ETH_ALEN);
spin_unlock_bh(&g_cdev.lock);
if (copy_to_user((void __user *)arg, addr, ETH_ALEN))
return -EFAULT;
}
break;
case OZ_IOCTL_ADD_BINDING:
case OZ_IOCTL_REMOVE_BINDING: {
struct oz_binding_info b;
if (copy_from_user(&b, (void __user *)arg,
sizeof(struct oz_binding_info))) {
return -EFAULT;
}
/* Make sure name is null terminated. */
b.name[OZ_MAX_BINDING_LEN-1] = 0;
if (cmd == OZ_IOCTL_ADD_BINDING)
oz_binding_add(b.name);
else
oz_binding_remove(b.name);
}
break;
}
return rc;
}
| @@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
+ if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+ return -EINVAL;
+
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd) | CWE-119 | null | null |
23,603 | static int oz_cdev_open(struct inode *inode, struct file *filp)
{
struct oz_cdev *dev = container_of(inode->i_cdev, struct oz_cdev, cdev);
oz_dbg(ON, "major = %d minor = %d\n", imajor(inode), iminor(inode));
filp->private_data = dev;
return 0;
}
| DoS Overflow | 0 | static int oz_cdev_open(struct inode *inode, struct file *filp)
{
struct oz_cdev *dev = container_of(inode->i_cdev, struct oz_cdev, cdev);
oz_dbg(ON, "major = %d minor = %d\n", imajor(inode), iminor(inode));
filp->private_data = dev;
return 0;
}
| @@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
+ if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+ return -EINVAL;
+
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd) | CWE-119 | null | null |
23,604 | static unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
{
unsigned int ret = 0;
struct oz_cdev *dev = filp->private_data;
oz_dbg(ON, "Poll called wait = %p\n", wait);
spin_lock_bh(&dev->lock);
if (dev->active_pd) {
struct oz_serial_ctx *ctx = oz_cdev_claim_ctx(dev->active_pd);
if (ctx) {
if (ctx->rd_in != ctx->rd_out)
ret |= POLLIN | POLLRDNORM;
oz_cdev_release_ctx(ctx);
}
}
spin_unlock_bh(&dev->lock);
if (wait)
poll_wait(filp, &dev->rdq, wait);
return ret;
}
| DoS Overflow | 0 | static unsigned int oz_cdev_poll(struct file *filp, poll_table *wait)
{
unsigned int ret = 0;
struct oz_cdev *dev = filp->private_data;
oz_dbg(ON, "Poll called wait = %p\n", wait);
spin_lock_bh(&dev->lock);
if (dev->active_pd) {
struct oz_serial_ctx *ctx = oz_cdev_claim_ctx(dev->active_pd);
if (ctx) {
if (ctx->rd_in != ctx->rd_out)
ret |= POLLIN | POLLRDNORM;
oz_cdev_release_ctx(ctx);
}
}
spin_unlock_bh(&dev->lock);
if (wait)
poll_wait(filp, &dev->rdq, wait);
return ret;
}
| @@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
+ if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+ return -EINVAL;
+
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd) | CWE-119 | null | null |
23,605 | static ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count,
loff_t *fpos)
{
int n;
int ix;
struct oz_pd *pd;
struct oz_serial_ctx *ctx;
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd)
oz_pd_get(pd);
spin_unlock_bh(&g_cdev.lock);
if (pd == NULL)
return -1;
ctx = oz_cdev_claim_ctx(pd);
if (ctx == NULL)
goto out2;
n = ctx->rd_in - ctx->rd_out;
if (n < 0)
n += OZ_RD_BUF_SZ;
if (count > n)
count = n;
ix = ctx->rd_out;
n = OZ_RD_BUF_SZ - ix;
if (n > count)
n = count;
if (copy_to_user(buf, &ctx->rd_buf[ix], n)) {
count = 0;
goto out1;
}
ix += n;
if (ix == OZ_RD_BUF_SZ)
ix = 0;
if (n < count) {
if (copy_to_user(&buf[n], ctx->rd_buf, count-n)) {
count = 0;
goto out1;
}
ix = count-n;
}
ctx->rd_out = ix;
out1:
oz_cdev_release_ctx(ctx);
out2:
oz_pd_put(pd);
return count;
}
| DoS Overflow | 0 | static ssize_t oz_cdev_read(struct file *filp, char __user *buf, size_t count,
loff_t *fpos)
{
int n;
int ix;
struct oz_pd *pd;
struct oz_serial_ctx *ctx;
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd)
oz_pd_get(pd);
spin_unlock_bh(&g_cdev.lock);
if (pd == NULL)
return -1;
ctx = oz_cdev_claim_ctx(pd);
if (ctx == NULL)
goto out2;
n = ctx->rd_in - ctx->rd_out;
if (n < 0)
n += OZ_RD_BUF_SZ;
if (count > n)
count = n;
ix = ctx->rd_out;
n = OZ_RD_BUF_SZ - ix;
if (n > count)
n = count;
if (copy_to_user(buf, &ctx->rd_buf[ix], n)) {
count = 0;
goto out1;
}
ix += n;
if (ix == OZ_RD_BUF_SZ)
ix = 0;
if (n < count) {
if (copy_to_user(&buf[n], ctx->rd_buf, count-n)) {
count = 0;
goto out1;
}
ix = count-n;
}
ctx->rd_out = ix;
out1:
oz_cdev_release_ctx(ctx);
out2:
oz_pd_put(pd);
return count;
}
| @@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
+ if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+ return -EINVAL;
+
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd) | CWE-119 | null | null |
23,606 | int oz_cdev_register(void)
{
int err;
struct device *dev;
memset(&g_cdev, 0, sizeof(g_cdev));
err = alloc_chrdev_region(&g_cdev.devnum, 0, 1, "ozwpan");
if (err < 0)
return err;
oz_dbg(ON, "Alloc dev number %d:%d\n",
MAJOR(g_cdev.devnum), MINOR(g_cdev.devnum));
cdev_init(&g_cdev.cdev, &oz_fops);
g_cdev.cdev.owner = THIS_MODULE;
g_cdev.cdev.ops = &oz_fops;
spin_lock_init(&g_cdev.lock);
init_waitqueue_head(&g_cdev.rdq);
err = cdev_add(&g_cdev.cdev, g_cdev.devnum, 1);
if (err < 0) {
oz_dbg(ON, "Failed to add cdev\n");
goto unregister;
}
g_oz_class = class_create(THIS_MODULE, "ozmo_wpan");
if (IS_ERR(g_oz_class)) {
oz_dbg(ON, "Failed to register ozmo_wpan class\n");
err = PTR_ERR(g_oz_class);
goto delete;
}
dev = device_create(g_oz_class, NULL, g_cdev.devnum, NULL, "ozwpan");
if (IS_ERR(dev)) {
oz_dbg(ON, "Failed to create sysfs entry for cdev\n");
err = PTR_ERR(dev);
goto delete;
}
return 0;
delete:
cdev_del(&g_cdev.cdev);
unregister:
unregister_chrdev_region(g_cdev.devnum, 1);
return err;
}
| DoS Overflow | 0 | int oz_cdev_register(void)
{
int err;
struct device *dev;
memset(&g_cdev, 0, sizeof(g_cdev));
err = alloc_chrdev_region(&g_cdev.devnum, 0, 1, "ozwpan");
if (err < 0)
return err;
oz_dbg(ON, "Alloc dev number %d:%d\n",
MAJOR(g_cdev.devnum), MINOR(g_cdev.devnum));
cdev_init(&g_cdev.cdev, &oz_fops);
g_cdev.cdev.owner = THIS_MODULE;
g_cdev.cdev.ops = &oz_fops;
spin_lock_init(&g_cdev.lock);
init_waitqueue_head(&g_cdev.rdq);
err = cdev_add(&g_cdev.cdev, g_cdev.devnum, 1);
if (err < 0) {
oz_dbg(ON, "Failed to add cdev\n");
goto unregister;
}
g_oz_class = class_create(THIS_MODULE, "ozmo_wpan");
if (IS_ERR(g_oz_class)) {
oz_dbg(ON, "Failed to register ozmo_wpan class\n");
err = PTR_ERR(g_oz_class);
goto delete;
}
dev = device_create(g_oz_class, NULL, g_cdev.devnum, NULL, "ozwpan");
if (IS_ERR(dev)) {
oz_dbg(ON, "Failed to create sysfs entry for cdev\n");
err = PTR_ERR(dev);
goto delete;
}
return 0;
delete:
cdev_del(&g_cdev.cdev);
unregister:
unregister_chrdev_region(g_cdev.devnum, 1);
return err;
}
| @@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
+ if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+ return -EINVAL;
+
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd) | CWE-119 | null | null |
23,607 | static int oz_cdev_release(struct inode *inode, struct file *filp)
{
return 0;
}
| DoS Overflow | 0 | static int oz_cdev_release(struct inode *inode, struct file *filp)
{
return 0;
}
| @@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
+ if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+ return -EINVAL;
+
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd) | CWE-119 | null | null |
23,608 | static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx)
{
if (atomic_dec_and_test(&ctx->ref_count)) {
oz_dbg(ON, "Dealloc serial context\n");
kfree(ctx);
}
}
| DoS Overflow | 0 | static void oz_cdev_release_ctx(struct oz_serial_ctx *ctx)
{
if (atomic_dec_and_test(&ctx->ref_count)) {
oz_dbg(ON, "Dealloc serial context\n");
kfree(ctx);
}
}
| @@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
+ if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+ return -EINVAL;
+
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd) | CWE-119 | null | null |
23,609 | int oz_cdev_start(struct oz_pd *pd, int resume)
{
struct oz_serial_ctx *ctx;
struct oz_serial_ctx *old_ctx;
if (resume) {
oz_dbg(ON, "Serial service resumed\n");
return 0;
}
ctx = kzalloc(sizeof(struct oz_serial_ctx), GFP_ATOMIC);
if (ctx == NULL)
return -ENOMEM;
atomic_set(&ctx->ref_count, 1);
ctx->tx_seq_num = 1;
spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
old_ctx = pd->app_ctx[OZ_APPID_SERIAL-1];
if (old_ctx) {
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
kfree(ctx);
} else {
pd->app_ctx[OZ_APPID_SERIAL-1] = ctx;
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
}
spin_lock(&g_cdev.lock);
if ((g_cdev.active_pd == NULL) &&
(memcmp(pd->mac_addr, g_cdev.active_addr, ETH_ALEN) == 0)) {
oz_pd_get(pd);
g_cdev.active_pd = pd;
oz_dbg(ON, "Active PD arrived\n");
}
spin_unlock(&g_cdev.lock);
oz_dbg(ON, "Serial service started\n");
return 0;
}
| DoS Overflow | 0 | int oz_cdev_start(struct oz_pd *pd, int resume)
{
struct oz_serial_ctx *ctx;
struct oz_serial_ctx *old_ctx;
if (resume) {
oz_dbg(ON, "Serial service resumed\n");
return 0;
}
ctx = kzalloc(sizeof(struct oz_serial_ctx), GFP_ATOMIC);
if (ctx == NULL)
return -ENOMEM;
atomic_set(&ctx->ref_count, 1);
ctx->tx_seq_num = 1;
spin_lock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
old_ctx = pd->app_ctx[OZ_APPID_SERIAL-1];
if (old_ctx) {
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
kfree(ctx);
} else {
pd->app_ctx[OZ_APPID_SERIAL-1] = ctx;
spin_unlock_bh(&pd->app_lock[OZ_APPID_SERIAL-1]);
}
spin_lock(&g_cdev.lock);
if ((g_cdev.active_pd == NULL) &&
(memcmp(pd->mac_addr, g_cdev.active_addr, ETH_ALEN) == 0)) {
oz_pd_get(pd);
g_cdev.active_pd = pd;
oz_dbg(ON, "Active PD arrived\n");
}
spin_unlock(&g_cdev.lock);
oz_dbg(ON, "Serial service started\n");
return 0;
}
| @@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
+ if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+ return -EINVAL;
+
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd) | CWE-119 | null | null |
23,610 | void oz_cdev_term(void)
{
oz_app_enable(OZ_APPID_SERIAL, 0);
}
| DoS Overflow | 0 | void oz_cdev_term(void)
{
oz_app_enable(OZ_APPID_SERIAL, 0);
}
| @@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
+ if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+ return -EINVAL;
+
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd) | CWE-119 | null | null |
23,611 | static int oz_set_active_pd(const u8 *addr)
{
int rc = 0;
struct oz_pd *pd;
struct oz_pd *old_pd;
pd = oz_pd_find(addr);
if (pd) {
spin_lock_bh(&g_cdev.lock);
memcpy(g_cdev.active_addr, addr, ETH_ALEN);
old_pd = g_cdev.active_pd;
g_cdev.active_pd = pd;
spin_unlock_bh(&g_cdev.lock);
if (old_pd)
oz_pd_put(old_pd);
} else {
if (is_zero_ether_addr(addr)) {
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
g_cdev.active_pd = NULL;
memset(g_cdev.active_addr, 0,
sizeof(g_cdev.active_addr));
spin_unlock_bh(&g_cdev.lock);
if (pd)
oz_pd_put(pd);
} else {
rc = -1;
}
}
return rc;
}
| DoS Overflow | 0 | static int oz_set_active_pd(const u8 *addr)
{
int rc = 0;
struct oz_pd *pd;
struct oz_pd *old_pd;
pd = oz_pd_find(addr);
if (pd) {
spin_lock_bh(&g_cdev.lock);
memcpy(g_cdev.active_addr, addr, ETH_ALEN);
old_pd = g_cdev.active_pd;
g_cdev.active_pd = pd;
spin_unlock_bh(&g_cdev.lock);
if (old_pd)
oz_pd_put(old_pd);
} else {
if (is_zero_ether_addr(addr)) {
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
g_cdev.active_pd = NULL;
memset(g_cdev.active_addr, 0,
sizeof(g_cdev.active_addr));
spin_unlock_bh(&g_cdev.lock);
if (pd)
oz_pd_put(pd);
} else {
rc = -1;
}
}
return rc;
}
| @@ -155,6 +155,9 @@ static ssize_t oz_cdev_write(struct file *filp, const char __user *buf,
struct oz_app_hdr *app_hdr;
struct oz_serial_ctx *ctx;
+ if (count > sizeof(ei->data) - sizeof(*elt) - sizeof(*app_hdr))
+ return -EINVAL;
+
spin_lock_bh(&g_cdev.lock);
pd = g_cdev.active_pd;
if (pd) | CWE-119 | null | null |
23,612 | static int exitcode_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, exitcode_proc_show, NULL);
}
| DoS Overflow | 0 | static int exitcode_proc_open(struct inode *inode, struct file *file)
{
return single_open(file, exitcode_proc_show, NULL);
}
| @@ -40,9 +40,11 @@ static ssize_t exitcode_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
char *end, buf[sizeof("nnnnn\0")];
+ size_t size;
int tmp;
- if (copy_from_user(buf, buffer, count))
+ size = min(count, sizeof(buf));
+ if (copy_from_user(buf, buffer, size))
return -EFAULT;
tmp = simple_strtol(buf, &end, 0); | CWE-119 | null | null |
23,613 | static int make_proc_exitcode(void)
{
struct proc_dir_entry *ent;
ent = proc_create("exitcode", 0600, NULL, &exitcode_proc_fops);
if (ent == NULL) {
printk(KERN_WARNING "make_proc_exitcode : Failed to register "
"/proc/exitcode\n");
return 0;
}
return 0;
}
| DoS Overflow | 0 | static int make_proc_exitcode(void)
{
struct proc_dir_entry *ent;
ent = proc_create("exitcode", 0600, NULL, &exitcode_proc_fops);
if (ent == NULL) {
printk(KERN_WARNING "make_proc_exitcode : Failed to register "
"/proc/exitcode\n");
return 0;
}
return 0;
}
| @@ -40,9 +40,11 @@ static ssize_t exitcode_proc_write(struct file *file,
const char __user *buffer, size_t count, loff_t *pos)
{
char *end, buf[sizeof("nnnnn\0")];
+ size_t size;
int tmp;
- if (copy_from_user(buf, buffer, count))
+ size = min(count, sizeof(buf));
+ if (copy_from_user(buf, buffer, size))
return -EFAULT;
tmp = simple_strtol(buf, &end, 0); | CWE-119 | null | null |
23,614 | SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
{
struct ipc_namespace *ns;
struct ipc_ops msg_ops;
struct ipc_params msg_params;
ns = current->nsproxy->ipc_ns;
msg_ops.getnew = newque;
msg_ops.associate = msg_security;
msg_ops.more_checks = NULL;
msg_params.key = key;
msg_params.flg = msgflg;
return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
}
| DoS | 0 | SYSCALL_DEFINE2(msgget, key_t, key, int, msgflg)
{
struct ipc_namespace *ns;
struct ipc_ops msg_ops;
struct ipc_params msg_params;
ns = current->nsproxy->ipc_ns;
msg_ops.getnew = newque;
msg_ops.associate = msg_security;
msg_ops.more_checks = NULL;
msg_params.key = key;
msg_params.flg = msgflg;
return ipcget(ns, &msg_ids(ns), &msg_ops, &msg_params);
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,615 | SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
{
struct msg_queue *msq;
int err, version;
struct ipc_namespace *ns;
if (msqid < 0 || cmd < 0)
return -EINVAL;
version = ipc_parse_version(&cmd);
ns = current->nsproxy->ipc_ns;
switch (cmd) {
case IPC_INFO:
case MSG_INFO:
{
struct msginfo msginfo;
int max_id;
if (!buf)
return -EFAULT;
/*
* We must not return kernel stack data.
* due to padding, it's not enough
* to set all member fields.
*/
err = security_msg_queue_msgctl(NULL, cmd);
if (err)
return err;
memset(&msginfo, 0, sizeof(msginfo));
msginfo.msgmni = ns->msg_ctlmni;
msginfo.msgmax = ns->msg_ctlmax;
msginfo.msgmnb = ns->msg_ctlmnb;
msginfo.msgssz = MSGSSZ;
msginfo.msgseg = MSGSEG;
down_read(&msg_ids(ns).rw_mutex);
if (cmd == MSG_INFO) {
msginfo.msgpool = msg_ids(ns).in_use;
msginfo.msgmap = atomic_read(&ns->msg_hdrs);
msginfo.msgtql = atomic_read(&ns->msg_bytes);
} else {
msginfo.msgmap = MSGMAP;
msginfo.msgpool = MSGPOOL;
msginfo.msgtql = MSGTQL;
}
max_id = ipc_get_maxid(&msg_ids(ns));
up_read(&msg_ids(ns).rw_mutex);
if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
return -EFAULT;
return (max_id < 0) ? 0 : max_id;
}
case MSG_STAT: /* msqid is an index rather than a msg queue id */
case IPC_STAT:
{
struct msqid64_ds tbuf;
int success_return;
if (!buf)
return -EFAULT;
if (cmd == MSG_STAT) {
msq = msg_lock(ns, msqid);
if (IS_ERR(msq))
return PTR_ERR(msq);
success_return = msq->q_perm.id;
} else {
msq = msg_lock_check(ns, msqid);
if (IS_ERR(msq))
return PTR_ERR(msq);
success_return = 0;
}
err = -EACCES;
if (ipcperms(ns, &msq->q_perm, S_IRUGO))
goto out_unlock;
err = security_msg_queue_msgctl(msq, cmd);
if (err)
goto out_unlock;
memset(&tbuf, 0, sizeof(tbuf));
kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
tbuf.msg_stime = msq->q_stime;
tbuf.msg_rtime = msq->q_rtime;
tbuf.msg_ctime = msq->q_ctime;
tbuf.msg_cbytes = msq->q_cbytes;
tbuf.msg_qnum = msq->q_qnum;
tbuf.msg_qbytes = msq->q_qbytes;
tbuf.msg_lspid = msq->q_lspid;
tbuf.msg_lrpid = msq->q_lrpid;
msg_unlock(msq);
if (copy_msqid_to_user(buf, &tbuf, version))
return -EFAULT;
return success_return;
}
case IPC_SET:
case IPC_RMID:
err = msgctl_down(ns, msqid, cmd, buf, version);
return err;
default:
return -EINVAL;
}
out_unlock:
msg_unlock(msq);
return err;
}
| DoS | 0 | SYSCALL_DEFINE3(msgctl, int, msqid, int, cmd, struct msqid_ds __user *, buf)
{
struct msg_queue *msq;
int err, version;
struct ipc_namespace *ns;
if (msqid < 0 || cmd < 0)
return -EINVAL;
version = ipc_parse_version(&cmd);
ns = current->nsproxy->ipc_ns;
switch (cmd) {
case IPC_INFO:
case MSG_INFO:
{
struct msginfo msginfo;
int max_id;
if (!buf)
return -EFAULT;
/*
* We must not return kernel stack data.
* due to padding, it's not enough
* to set all member fields.
*/
err = security_msg_queue_msgctl(NULL, cmd);
if (err)
return err;
memset(&msginfo, 0, sizeof(msginfo));
msginfo.msgmni = ns->msg_ctlmni;
msginfo.msgmax = ns->msg_ctlmax;
msginfo.msgmnb = ns->msg_ctlmnb;
msginfo.msgssz = MSGSSZ;
msginfo.msgseg = MSGSEG;
down_read(&msg_ids(ns).rw_mutex);
if (cmd == MSG_INFO) {
msginfo.msgpool = msg_ids(ns).in_use;
msginfo.msgmap = atomic_read(&ns->msg_hdrs);
msginfo.msgtql = atomic_read(&ns->msg_bytes);
} else {
msginfo.msgmap = MSGMAP;
msginfo.msgpool = MSGPOOL;
msginfo.msgtql = MSGTQL;
}
max_id = ipc_get_maxid(&msg_ids(ns));
up_read(&msg_ids(ns).rw_mutex);
if (copy_to_user(buf, &msginfo, sizeof(struct msginfo)))
return -EFAULT;
return (max_id < 0) ? 0 : max_id;
}
case MSG_STAT: /* msqid is an index rather than a msg queue id */
case IPC_STAT:
{
struct msqid64_ds tbuf;
int success_return;
if (!buf)
return -EFAULT;
if (cmd == MSG_STAT) {
msq = msg_lock(ns, msqid);
if (IS_ERR(msq))
return PTR_ERR(msq);
success_return = msq->q_perm.id;
} else {
msq = msg_lock_check(ns, msqid);
if (IS_ERR(msq))
return PTR_ERR(msq);
success_return = 0;
}
err = -EACCES;
if (ipcperms(ns, &msq->q_perm, S_IRUGO))
goto out_unlock;
err = security_msg_queue_msgctl(msq, cmd);
if (err)
goto out_unlock;
memset(&tbuf, 0, sizeof(tbuf));
kernel_to_ipc64_perm(&msq->q_perm, &tbuf.msg_perm);
tbuf.msg_stime = msq->q_stime;
tbuf.msg_rtime = msq->q_rtime;
tbuf.msg_ctime = msq->q_ctime;
tbuf.msg_cbytes = msq->q_cbytes;
tbuf.msg_qnum = msq->q_qnum;
tbuf.msg_qbytes = msq->q_qbytes;
tbuf.msg_lspid = msq->q_lspid;
tbuf.msg_lrpid = msq->q_lrpid;
msg_unlock(msq);
if (copy_msqid_to_user(buf, &tbuf, version))
return -EFAULT;
return success_return;
}
case IPC_SET:
case IPC_RMID:
err = msgctl_down(ns, msqid, cmd, buf, version);
return err;
default:
return -EINVAL;
}
out_unlock:
msg_unlock(msq);
return err;
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,616 | SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
long, msgtyp, int, msgflg)
{
return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill);
}
| DoS | 0 | SYSCALL_DEFINE5(msgrcv, int, msqid, struct msgbuf __user *, msgp, size_t, msgsz,
long, msgtyp, int, msgflg)
{
return do_msgrcv(msqid, msgp, msgsz, msgtyp, msgflg, do_msg_fill);
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,617 | static inline int convert_mode(long *msgtyp, int msgflg)
{
if (msgflg & MSG_COPY)
return SEARCH_NUMBER;
/*
* find message of correct type.
* msgtyp = 0 => get first.
* msgtyp > 0 => get first message of matching type.
* msgtyp < 0 => get message with least type must be < abs(msgtype).
*/
if (*msgtyp == 0)
return SEARCH_ANY;
if (*msgtyp < 0) {
*msgtyp = -*msgtyp;
return SEARCH_LESSEQUAL;
}
if (msgflg & MSG_EXCEPT)
return SEARCH_NOTEQUAL;
return SEARCH_EQUAL;
}
| DoS | 0 | static inline int convert_mode(long *msgtyp, int msgflg)
{
if (msgflg & MSG_COPY)
return SEARCH_NUMBER;
/*
* find message of correct type.
* msgtyp = 0 => get first.
* msgtyp > 0 => get first message of matching type.
* msgtyp < 0 => get message with least type must be < abs(msgtype).
*/
if (*msgtyp == 0)
return SEARCH_ANY;
if (*msgtyp < 0) {
*msgtyp = -*msgtyp;
return SEARCH_LESSEQUAL;
}
if (msgflg & MSG_EXCEPT)
return SEARCH_NOTEQUAL;
return SEARCH_EQUAL;
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,618 | copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
{
switch(version) {
case IPC_64:
if (copy_from_user(out, buf, sizeof(*out)))
return -EFAULT;
return 0;
case IPC_OLD:
{
struct msqid_ds tbuf_old;
if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
return -EFAULT;
out->msg_perm.uid = tbuf_old.msg_perm.uid;
out->msg_perm.gid = tbuf_old.msg_perm.gid;
out->msg_perm.mode = tbuf_old.msg_perm.mode;
if (tbuf_old.msg_qbytes == 0)
out->msg_qbytes = tbuf_old.msg_lqbytes;
else
out->msg_qbytes = tbuf_old.msg_qbytes;
return 0;
}
default:
return -EINVAL;
}
}
| DoS | 0 | copy_msqid_from_user(struct msqid64_ds *out, void __user *buf, int version)
{
switch(version) {
case IPC_64:
if (copy_from_user(out, buf, sizeof(*out)))
return -EFAULT;
return 0;
case IPC_OLD:
{
struct msqid_ds tbuf_old;
if (copy_from_user(&tbuf_old, buf, sizeof(tbuf_old)))
return -EFAULT;
out->msg_perm.uid = tbuf_old.msg_perm.uid;
out->msg_perm.gid = tbuf_old.msg_perm.gid;
out->msg_perm.mode = tbuf_old.msg_perm.mode;
if (tbuf_old.msg_qbytes == 0)
out->msg_qbytes = tbuf_old.msg_lqbytes;
else
out->msg_qbytes = tbuf_old.msg_qbytes;
return 0;
}
default:
return -EINVAL;
}
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,619 | copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
{
switch(version) {
case IPC_64:
return copy_to_user(buf, in, sizeof(*in));
case IPC_OLD:
{
struct msqid_ds out;
memset(&out, 0, sizeof(out));
ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
out.msg_stime = in->msg_stime;
out.msg_rtime = in->msg_rtime;
out.msg_ctime = in->msg_ctime;
if (in->msg_cbytes > USHRT_MAX)
out.msg_cbytes = USHRT_MAX;
else
out.msg_cbytes = in->msg_cbytes;
out.msg_lcbytes = in->msg_cbytes;
if (in->msg_qnum > USHRT_MAX)
out.msg_qnum = USHRT_MAX;
else
out.msg_qnum = in->msg_qnum;
if (in->msg_qbytes > USHRT_MAX)
out.msg_qbytes = USHRT_MAX;
else
out.msg_qbytes = in->msg_qbytes;
out.msg_lqbytes = in->msg_qbytes;
out.msg_lspid = in->msg_lspid;
out.msg_lrpid = in->msg_lrpid;
return copy_to_user(buf, &out, sizeof(out));
}
default:
return -EINVAL;
}
}
| DoS | 0 | copy_msqid_to_user(void __user *buf, struct msqid64_ds *in, int version)
{
switch(version) {
case IPC_64:
return copy_to_user(buf, in, sizeof(*in));
case IPC_OLD:
{
struct msqid_ds out;
memset(&out, 0, sizeof(out));
ipc64_perm_to_ipc_perm(&in->msg_perm, &out.msg_perm);
out.msg_stime = in->msg_stime;
out.msg_rtime = in->msg_rtime;
out.msg_ctime = in->msg_ctime;
if (in->msg_cbytes > USHRT_MAX)
out.msg_cbytes = USHRT_MAX;
else
out.msg_cbytes = in->msg_cbytes;
out.msg_lcbytes = in->msg_cbytes;
if (in->msg_qnum > USHRT_MAX)
out.msg_qnum = USHRT_MAX;
else
out.msg_qnum = in->msg_qnum;
if (in->msg_qbytes > USHRT_MAX)
out.msg_qbytes = USHRT_MAX;
else
out.msg_qbytes = in->msg_qbytes;
out.msg_lqbytes = in->msg_qbytes;
out.msg_lspid = in->msg_lspid;
out.msg_lrpid = in->msg_lrpid;
return copy_to_user(buf, &out, sizeof(out));
}
default:
return -EINVAL;
}
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,620 | static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz)
{
struct msgbuf __user *msgp = dest;
size_t msgsz;
if (put_user(msg->m_type, &msgp->mtype))
return -EFAULT;
msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz;
if (store_msg(msgp->mtext, msg, msgsz))
return -EFAULT;
return msgsz;
}
| DoS | 0 | static long do_msg_fill(void __user *dest, struct msg_msg *msg, size_t bufsz)
{
struct msgbuf __user *msgp = dest;
size_t msgsz;
if (put_user(msg->m_type, &msgp->mtype))
return -EFAULT;
msgsz = (bufsz > msg->m_ts) ? msg->m_ts : bufsz;
if (store_msg(msgp->mtext, msg, msgsz))
return -EFAULT;
return msgsz;
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,621 | long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
int msgflg,
long (*msg_handler)(void __user *, struct msg_msg *, size_t))
{
struct msg_queue *msq;
struct msg_msg *msg;
int mode;
struct ipc_namespace *ns;
struct msg_msg *copy = NULL;
ns = current->nsproxy->ipc_ns;
if (msqid < 0 || (long) bufsz < 0)
return -EINVAL;
if (msgflg & MSG_COPY) {
copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
if (IS_ERR(copy))
return PTR_ERR(copy);
}
mode = convert_mode(&msgtyp, msgflg);
msq = msg_lock_check(ns, msqid);
if (IS_ERR(msq)) {
free_copy(copy);
return PTR_ERR(msq);
}
for (;;) {
struct msg_receiver msr_d;
msg = ERR_PTR(-EACCES);
if (ipcperms(ns, &msq->q_perm, S_IRUGO))
goto out_unlock;
msg = find_msg(msq, &msgtyp, mode);
if (!IS_ERR(msg)) {
/*
* Found a suitable message.
* Unlink it from the queue.
*/
if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
msg = ERR_PTR(-E2BIG);
goto out_unlock;
}
/*
* If we are copying, then do not unlink message and do
* not update queue parameters.
*/
if (msgflg & MSG_COPY) {
msg = copy_msg(msg, copy);
goto out_unlock;
}
list_del(&msg->m_list);
msq->q_qnum--;
msq->q_rtime = get_seconds();
msq->q_lrpid = task_tgid_vnr(current);
msq->q_cbytes -= msg->m_ts;
atomic_sub(msg->m_ts, &ns->msg_bytes);
atomic_dec(&ns->msg_hdrs);
ss_wakeup(&msq->q_senders, 0);
msg_unlock(msq);
break;
}
/* No message waiting. Wait for a message */
if (msgflg & IPC_NOWAIT) {
msg = ERR_PTR(-ENOMSG);
goto out_unlock;
}
list_add_tail(&msr_d.r_list, &msq->q_receivers);
msr_d.r_tsk = current;
msr_d.r_msgtype = msgtyp;
msr_d.r_mode = mode;
if (msgflg & MSG_NOERROR)
msr_d.r_maxsize = INT_MAX;
else
msr_d.r_maxsize = bufsz;
msr_d.r_msg = ERR_PTR(-EAGAIN);
current->state = TASK_INTERRUPTIBLE;
msg_unlock(msq);
schedule();
/* Lockless receive, part 1:
* Disable preemption. We don't hold a reference to the queue
* and getting a reference would defeat the idea of a lockless
* operation, thus the code relies on rcu to guarantee the
* existence of msq:
* Prior to destruction, expunge_all(-EIRDM) changes r_msg.
* Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
* rcu_read_lock() prevents preemption between reading r_msg
* and the spin_lock() inside ipc_lock_by_ptr().
*/
rcu_read_lock();
/* Lockless receive, part 2:
* Wait until pipelined_send or expunge_all are outside of
* wake_up_process(). There is a race with exit(), see
* ipc/mqueue.c for the details.
*/
msg = (struct msg_msg*)msr_d.r_msg;
while (msg == NULL) {
cpu_relax();
msg = (struct msg_msg *)msr_d.r_msg;
}
/* Lockless receive, part 3:
* If there is a message or an error then accept it without
* locking.
*/
if (msg != ERR_PTR(-EAGAIN)) {
rcu_read_unlock();
break;
}
/* Lockless receive, part 3:
* Acquire the queue spinlock.
*/
ipc_lock_by_ptr(&msq->q_perm);
rcu_read_unlock();
/* Lockless receive, part 4:
* Repeat test after acquiring the spinlock.
*/
msg = (struct msg_msg*)msr_d.r_msg;
if (msg != ERR_PTR(-EAGAIN))
goto out_unlock;
list_del(&msr_d.r_list);
if (signal_pending(current)) {
msg = ERR_PTR(-ERESTARTNOHAND);
out_unlock:
msg_unlock(msq);
break;
}
}
if (IS_ERR(msg)) {
free_copy(copy);
return PTR_ERR(msg);
}
bufsz = msg_handler(buf, msg, bufsz);
free_msg(msg);
return bufsz;
}
| DoS | 0 | long do_msgrcv(int msqid, void __user *buf, size_t bufsz, long msgtyp,
int msgflg,
long (*msg_handler)(void __user *, struct msg_msg *, size_t))
{
struct msg_queue *msq;
struct msg_msg *msg;
int mode;
struct ipc_namespace *ns;
struct msg_msg *copy = NULL;
ns = current->nsproxy->ipc_ns;
if (msqid < 0 || (long) bufsz < 0)
return -EINVAL;
if (msgflg & MSG_COPY) {
copy = prepare_copy(buf, min_t(size_t, bufsz, ns->msg_ctlmax));
if (IS_ERR(copy))
return PTR_ERR(copy);
}
mode = convert_mode(&msgtyp, msgflg);
msq = msg_lock_check(ns, msqid);
if (IS_ERR(msq)) {
free_copy(copy);
return PTR_ERR(msq);
}
for (;;) {
struct msg_receiver msr_d;
msg = ERR_PTR(-EACCES);
if (ipcperms(ns, &msq->q_perm, S_IRUGO))
goto out_unlock;
msg = find_msg(msq, &msgtyp, mode);
if (!IS_ERR(msg)) {
/*
* Found a suitable message.
* Unlink it from the queue.
*/
if ((bufsz < msg->m_ts) && !(msgflg & MSG_NOERROR)) {
msg = ERR_PTR(-E2BIG);
goto out_unlock;
}
/*
* If we are copying, then do not unlink message and do
* not update queue parameters.
*/
if (msgflg & MSG_COPY) {
msg = copy_msg(msg, copy);
goto out_unlock;
}
list_del(&msg->m_list);
msq->q_qnum--;
msq->q_rtime = get_seconds();
msq->q_lrpid = task_tgid_vnr(current);
msq->q_cbytes -= msg->m_ts;
atomic_sub(msg->m_ts, &ns->msg_bytes);
atomic_dec(&ns->msg_hdrs);
ss_wakeup(&msq->q_senders, 0);
msg_unlock(msq);
break;
}
/* No message waiting. Wait for a message */
if (msgflg & IPC_NOWAIT) {
msg = ERR_PTR(-ENOMSG);
goto out_unlock;
}
list_add_tail(&msr_d.r_list, &msq->q_receivers);
msr_d.r_tsk = current;
msr_d.r_msgtype = msgtyp;
msr_d.r_mode = mode;
if (msgflg & MSG_NOERROR)
msr_d.r_maxsize = INT_MAX;
else
msr_d.r_maxsize = bufsz;
msr_d.r_msg = ERR_PTR(-EAGAIN);
current->state = TASK_INTERRUPTIBLE;
msg_unlock(msq);
schedule();
/* Lockless receive, part 1:
* Disable preemption. We don't hold a reference to the queue
* and getting a reference would defeat the idea of a lockless
* operation, thus the code relies on rcu to guarantee the
* existence of msq:
* Prior to destruction, expunge_all(-EIRDM) changes r_msg.
* Thus if r_msg is -EAGAIN, then the queue not yet destroyed.
* rcu_read_lock() prevents preemption between reading r_msg
* and the spin_lock() inside ipc_lock_by_ptr().
*/
rcu_read_lock();
/* Lockless receive, part 2:
* Wait until pipelined_send or expunge_all are outside of
* wake_up_process(). There is a race with exit(), see
* ipc/mqueue.c for the details.
*/
msg = (struct msg_msg*)msr_d.r_msg;
while (msg == NULL) {
cpu_relax();
msg = (struct msg_msg *)msr_d.r_msg;
}
/* Lockless receive, part 3:
* If there is a message or an error then accept it without
* locking.
*/
if (msg != ERR_PTR(-EAGAIN)) {
rcu_read_unlock();
break;
}
/* Lockless receive, part 3:
* Acquire the queue spinlock.
*/
ipc_lock_by_ptr(&msq->q_perm);
rcu_read_unlock();
/* Lockless receive, part 4:
* Repeat test after acquiring the spinlock.
*/
msg = (struct msg_msg*)msr_d.r_msg;
if (msg != ERR_PTR(-EAGAIN))
goto out_unlock;
list_del(&msr_d.r_list);
if (signal_pending(current)) {
msg = ERR_PTR(-ERESTARTNOHAND);
out_unlock:
msg_unlock(msq);
break;
}
}
if (IS_ERR(msg)) {
free_copy(copy);
return PTR_ERR(msg);
}
bufsz = msg_handler(buf, msg, bufsz);
free_msg(msg);
return bufsz;
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,622 | static void expunge_all(struct msg_queue *msq, int res)
{
struct list_head *tmp;
tmp = msq->q_receivers.next;
while (tmp != &msq->q_receivers) {
struct msg_receiver *msr;
msr = list_entry(tmp, struct msg_receiver, r_list);
tmp = tmp->next;
msr->r_msg = NULL;
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = ERR_PTR(res);
}
}
| DoS | 0 | static void expunge_all(struct msg_queue *msq, int res)
{
struct list_head *tmp;
tmp = msq->q_receivers.next;
while (tmp != &msq->q_receivers) {
struct msg_receiver *msr;
msr = list_entry(tmp, struct msg_receiver, r_list);
tmp = tmp->next;
msr->r_msg = NULL;
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = ERR_PTR(res);
}
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,623 | static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
{
struct msg_msg *msg;
long count = 0;
list_for_each_entry(msg, &msq->q_messages, m_list) {
if (testmsg(msg, *msgtyp, mode) &&
!security_msg_queue_msgrcv(msq, msg, current,
*msgtyp, mode)) {
if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
*msgtyp = msg->m_type - 1;
} else if (mode == SEARCH_NUMBER) {
if (*msgtyp == count)
return msg;
} else
return msg;
count++;
}
}
return ERR_PTR(-EAGAIN);
}
| DoS | 0 | static struct msg_msg *find_msg(struct msg_queue *msq, long *msgtyp, int mode)
{
struct msg_msg *msg;
long count = 0;
list_for_each_entry(msg, &msq->q_messages, m_list) {
if (testmsg(msg, *msgtyp, mode) &&
!security_msg_queue_msgrcv(msq, msg, current,
*msgtyp, mode)) {
if (mode == SEARCH_LESSEQUAL && msg->m_type != 1) {
*msgtyp = msg->m_type - 1;
} else if (mode == SEARCH_NUMBER) {
if (*msgtyp == count)
return msg;
} else
return msg;
count++;
}
}
return ERR_PTR(-EAGAIN);
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,624 | static inline void free_copy(struct msg_msg *copy)
{
if (copy)
free_msg(copy);
}
| DoS | 0 | static inline void free_copy(struct msg_msg *copy)
{
if (copy)
free_msg(copy);
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,625 | static inline void free_copy(struct msg_msg *copy)
{
}
| DoS | 0 | static inline void free_copy(struct msg_msg *copy)
{
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,626 | void msg_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &msg_ids(ns), freeque);
idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
}
| DoS | 0 | void msg_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &msg_ids(ns), freeque);
idr_destroy(&ns->ids[IPC_MSG_IDS].ipcs_idr);
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,627 | void __init msg_init(void)
{
msg_init_ns(&init_ipc_ns);
printk(KERN_INFO "msgmni has been set to %d\n",
init_ipc_ns.msg_ctlmni);
ipc_init_proc_interface("sysvipc/msg",
" key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
IPC_MSG_IDS, sysvipc_msg_proc_show);
}
| DoS | 0 | void __init msg_init(void)
{
msg_init_ns(&init_ipc_ns);
printk(KERN_INFO "msgmni has been set to %d\n",
init_ipc_ns.msg_ctlmni);
ipc_init_proc_interface("sysvipc/msg",
" key msqid perms cbytes qnum lspid lrpid uid gid cuid cgid stime rtime ctime\n",
IPC_MSG_IDS, sysvipc_msg_proc_show);
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,628 | void msg_init_ns(struct ipc_namespace *ns)
{
ns->msg_ctlmax = MSGMAX;
ns->msg_ctlmnb = MSGMNB;
recompute_msgmni(ns);
atomic_set(&ns->msg_bytes, 0);
atomic_set(&ns->msg_hdrs, 0);
ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
}
| DoS | 0 | void msg_init_ns(struct ipc_namespace *ns)
{
ns->msg_ctlmax = MSGMAX;
ns->msg_ctlmnb = MSGMNB;
recompute_msgmni(ns);
atomic_set(&ns->msg_bytes, 0);
atomic_set(&ns->msg_hdrs, 0);
ipc_init_ids(&ns->ids[IPC_MSG_IDS]);
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,629 | static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id);
if (IS_ERR(ipcp))
return (struct msg_queue *)ipcp;
return container_of(ipcp, struct msg_queue, q_perm);
}
| DoS | 0 | static inline struct msg_queue *msg_lock(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_lock(&msg_ids(ns), id);
if (IS_ERR(ipcp))
return (struct msg_queue *)ipcp;
return container_of(ipcp, struct msg_queue, q_perm);
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,630 | static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
{
ipc_rmid(&msg_ids(ns), &s->q_perm);
}
| DoS | 0 | static inline void msg_rmid(struct ipc_namespace *ns, struct msg_queue *s)
{
ipc_rmid(&msg_ids(ns), &s->q_perm);
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,631 | static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
{
struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
return security_msg_queue_associate(msq, msgflg);
}
| DoS | 0 | static inline int msg_security(struct kern_ipc_perm *ipcp, int msgflg)
{
struct msg_queue *msq = container_of(ipcp, struct msg_queue, q_perm);
return security_msg_queue_associate(msq, msgflg);
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,632 | static int newque(struct ipc_namespace *ns, struct ipc_params *params)
{
struct msg_queue *msq;
int id, retval;
key_t key = params->key;
int msgflg = params->flg;
msq = ipc_rcu_alloc(sizeof(*msq));
if (!msq)
return -ENOMEM;
msq->q_perm.mode = msgflg & S_IRWXUGO;
msq->q_perm.key = key;
msq->q_perm.security = NULL;
retval = security_msg_queue_alloc(msq);
if (retval) {
ipc_rcu_putref(msq);
return retval;
}
/*
* ipc_addid() locks msq
*/
id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
if (id < 0) {
security_msg_queue_free(msq);
ipc_rcu_putref(msq);
return id;
}
msq->q_stime = msq->q_rtime = 0;
msq->q_ctime = get_seconds();
msq->q_cbytes = msq->q_qnum = 0;
msq->q_qbytes = ns->msg_ctlmnb;
msq->q_lspid = msq->q_lrpid = 0;
INIT_LIST_HEAD(&msq->q_messages);
INIT_LIST_HEAD(&msq->q_receivers);
INIT_LIST_HEAD(&msq->q_senders);
msg_unlock(msq);
return msq->q_perm.id;
}
| DoS | 0 | static int newque(struct ipc_namespace *ns, struct ipc_params *params)
{
struct msg_queue *msq;
int id, retval;
key_t key = params->key;
int msgflg = params->flg;
msq = ipc_rcu_alloc(sizeof(*msq));
if (!msq)
return -ENOMEM;
msq->q_perm.mode = msgflg & S_IRWXUGO;
msq->q_perm.key = key;
msq->q_perm.security = NULL;
retval = security_msg_queue_alloc(msq);
if (retval) {
ipc_rcu_putref(msq);
return retval;
}
/*
* ipc_addid() locks msq
*/
id = ipc_addid(&msg_ids(ns), &msq->q_perm, ns->msg_ctlmni);
if (id < 0) {
security_msg_queue_free(msq);
ipc_rcu_putref(msq);
return id;
}
msq->q_stime = msq->q_rtime = 0;
msq->q_ctime = get_seconds();
msq->q_cbytes = msq->q_qnum = 0;
msq->q_qbytes = ns->msg_ctlmnb;
msq->q_lspid = msq->q_lrpid = 0;
INIT_LIST_HEAD(&msq->q_messages);
INIT_LIST_HEAD(&msq->q_receivers);
INIT_LIST_HEAD(&msq->q_senders);
msg_unlock(msq);
return msq->q_perm.id;
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,633 | static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
{
struct list_head *tmp;
tmp = msq->q_receivers.next;
while (tmp != &msq->q_receivers) {
struct msg_receiver *msr;
msr = list_entry(tmp, struct msg_receiver, r_list);
tmp = tmp->next;
if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
!security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
msr->r_msgtype, msr->r_mode)) {
list_del(&msr->r_list);
if (msr->r_maxsize < msg->m_ts) {
msr->r_msg = NULL;
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = ERR_PTR(-E2BIG);
} else {
msr->r_msg = NULL;
msq->q_lrpid = task_pid_vnr(msr->r_tsk);
msq->q_rtime = get_seconds();
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = msg;
return 1;
}
}
}
return 0;
}
| DoS | 0 | static inline int pipelined_send(struct msg_queue *msq, struct msg_msg *msg)
{
struct list_head *tmp;
tmp = msq->q_receivers.next;
while (tmp != &msq->q_receivers) {
struct msg_receiver *msr;
msr = list_entry(tmp, struct msg_receiver, r_list);
tmp = tmp->next;
if (testmsg(msg, msr->r_msgtype, msr->r_mode) &&
!security_msg_queue_msgrcv(msq, msg, msr->r_tsk,
msr->r_msgtype, msr->r_mode)) {
list_del(&msr->r_list);
if (msr->r_maxsize < msg->m_ts) {
msr->r_msg = NULL;
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = ERR_PTR(-E2BIG);
} else {
msr->r_msg = NULL;
msq->q_lrpid = task_pid_vnr(msr->r_tsk);
msq->q_rtime = get_seconds();
wake_up_process(msr->r_tsk);
smp_mb();
msr->r_msg = msg;
return 1;
}
}
}
return 0;
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,634 | static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
{
struct msg_msg *copy;
/*
* Create dummy message to copy real message to.
*/
copy = load_msg(buf, bufsz);
if (!IS_ERR(copy))
copy->m_ts = bufsz;
return copy;
}
| DoS | 0 | static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
{
struct msg_msg *copy;
/*
* Create dummy message to copy real message to.
*/
copy = load_msg(buf, bufsz);
if (!IS_ERR(copy))
copy->m_ts = bufsz;
return copy;
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,635 | static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
{
return ERR_PTR(-ENOSYS);
}
| DoS | 0 | static inline struct msg_msg *prepare_copy(void __user *buf, size_t bufsz)
{
return ERR_PTR(-ENOSYS);
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,636 | void recompute_msgmni(struct ipc_namespace *ns)
{
struct sysinfo i;
unsigned long allowed;
int nb_ns;
si_meminfo(&i);
allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
/ MSGMNB;
nb_ns = atomic_read(&nr_ipc_ns);
allowed /= nb_ns;
if (allowed < MSGMNI) {
ns->msg_ctlmni = MSGMNI;
return;
}
if (allowed > IPCMNI / nb_ns) {
ns->msg_ctlmni = IPCMNI / nb_ns;
return;
}
ns->msg_ctlmni = allowed;
}
| DoS | 0 | void recompute_msgmni(struct ipc_namespace *ns)
{
struct sysinfo i;
unsigned long allowed;
int nb_ns;
si_meminfo(&i);
allowed = (((i.totalram - i.totalhigh) / MSG_MEM_SCALE) * i.mem_unit)
/ MSGMNB;
nb_ns = atomic_read(&nr_ipc_ns);
allowed /= nb_ns;
if (allowed < MSGMNI) {
ns->msg_ctlmni = MSGMNI;
return;
}
if (allowed > IPCMNI / nb_ns) {
ns->msg_ctlmni = IPCMNI / nb_ns;
return;
}
ns->msg_ctlmni = allowed;
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,637 | static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
{
mss->tsk = current;
current->state = TASK_INTERRUPTIBLE;
list_add_tail(&mss->list, &msq->q_senders);
}
| DoS | 0 | static inline void ss_add(struct msg_queue *msq, struct msg_sender *mss)
{
mss->tsk = current;
current->state = TASK_INTERRUPTIBLE;
list_add_tail(&mss->list, &msq->q_senders);
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,638 | static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
{
struct user_namespace *user_ns = seq_user_ns(s);
struct msg_queue *msq = it;
return seq_printf(s,
"%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
msq->q_perm.key,
msq->q_perm.id,
msq->q_perm.mode,
msq->q_cbytes,
msq->q_qnum,
msq->q_lspid,
msq->q_lrpid,
from_kuid_munged(user_ns, msq->q_perm.uid),
from_kgid_munged(user_ns, msq->q_perm.gid),
from_kuid_munged(user_ns, msq->q_perm.cuid),
from_kgid_munged(user_ns, msq->q_perm.cgid),
msq->q_stime,
msq->q_rtime,
msq->q_ctime);
}
| DoS | 0 | static int sysvipc_msg_proc_show(struct seq_file *s, void *it)
{
struct user_namespace *user_ns = seq_user_ns(s);
struct msg_queue *msq = it;
return seq_printf(s,
"%10d %10d %4o %10lu %10lu %5u %5u %5u %5u %5u %5u %10lu %10lu %10lu\n",
msq->q_perm.key,
msq->q_perm.id,
msq->q_perm.mode,
msq->q_cbytes,
msq->q_qnum,
msq->q_lspid,
msq->q_lrpid,
from_kuid_munged(user_ns, msq->q_perm.uid),
from_kgid_munged(user_ns, msq->q_perm.gid),
from_kuid_munged(user_ns, msq->q_perm.cuid),
from_kgid_munged(user_ns, msq->q_perm.cgid),
msq->q_stime,
msq->q_rtime,
msq->q_ctime);
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,639 | static int testmsg(struct msg_msg *msg, long type, int mode)
{
switch(mode)
{
case SEARCH_ANY:
case SEARCH_NUMBER:
return 1;
case SEARCH_LESSEQUAL:
if (msg->m_type <=type)
return 1;
break;
case SEARCH_EQUAL:
if (msg->m_type == type)
return 1;
break;
case SEARCH_NOTEQUAL:
if (msg->m_type != type)
return 1;
break;
}
return 0;
}
| DoS | 0 | static int testmsg(struct msg_msg *msg, long type, int mode)
{
switch(mode)
{
case SEARCH_ANY:
case SEARCH_NUMBER:
return 1;
case SEARCH_LESSEQUAL:
if (msg->m_type <=type)
return 1;
break;
case SEARCH_EQUAL:
if (msg->m_type == type)
return 1;
break;
case SEARCH_NOTEQUAL:
if (msg->m_type != type)
return 1;
break;
}
return 0;
}
| @@ -687,7 +687,12 @@ long do_msgsnd(int msqid, long mtype, void __user *mtext,
goto out_unlock_free;
}
ss_add(msq, &s);
- ipc_rcu_getref(msq);
+
+ if (!ipc_rcu_getref(msq)) {
+ err = -EIDRM;
+ goto out_unlock_free;
+ }
+
msg_unlock(msq);
schedule();
| CWE-189 | null | null |
23,640 | SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
{
struct ipc_namespace *ns;
struct ipc_ops sem_ops;
struct ipc_params sem_params;
ns = current->nsproxy->ipc_ns;
if (nsems < 0 || nsems > ns->sc_semmsl)
return -EINVAL;
sem_ops.getnew = newary;
sem_ops.associate = sem_security;
sem_ops.more_checks = sem_more_checks;
sem_params.key = key;
sem_params.flg = semflg;
sem_params.u.nsems = nsems;
return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
}
| DoS | 0 | SYSCALL_DEFINE3(semget, key_t, key, int, nsems, int, semflg)
{
struct ipc_namespace *ns;
struct ipc_ops sem_ops;
struct ipc_params sem_params;
ns = current->nsproxy->ipc_ns;
if (nsems < 0 || nsems > ns->sc_semmsl)
return -EINVAL;
sem_ops.getnew = newary;
sem_ops.associate = sem_security;
sem_ops.more_checks = sem_more_checks;
sem_params.key = key;
sem_params.flg = semflg;
sem_params.u.nsems = nsems;
return ipcget(ns, &sem_ids(ns), &sem_ops, &sem_params);
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,641 | SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
unsigned, nsops)
{
return sys_semtimedop(semid, tsops, nsops, NULL);
}
| DoS | 0 | SYSCALL_DEFINE3(semop, int, semid, struct sembuf __user *, tsops,
unsigned, nsops)
{
return sys_semtimedop(semid, tsops, nsops, NULL);
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,642 | SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
{
int version;
struct ipc_namespace *ns;
void __user *p = (void __user *)arg;
if (semid < 0)
return -EINVAL;
version = ipc_parse_version(&cmd);
ns = current->nsproxy->ipc_ns;
switch(cmd) {
case IPC_INFO:
case SEM_INFO:
case IPC_STAT:
case SEM_STAT:
return semctl_nolock(ns, semid, cmd, version, p);
case GETALL:
case GETVAL:
case GETPID:
case GETNCNT:
case GETZCNT:
case SETALL:
return semctl_main(ns, semid, semnum, cmd, p);
case SETVAL:
return semctl_setval(ns, semid, semnum, arg);
case IPC_RMID:
case IPC_SET:
return semctl_down(ns, semid, cmd, version, p);
default:
return -EINVAL;
}
}
| DoS | 0 | SYSCALL_DEFINE4(semctl, int, semid, int, semnum, int, cmd, unsigned long, arg)
{
int version;
struct ipc_namespace *ns;
void __user *p = (void __user *)arg;
if (semid < 0)
return -EINVAL;
version = ipc_parse_version(&cmd);
ns = current->nsproxy->ipc_ns;
switch(cmd) {
case IPC_INFO:
case SEM_INFO:
case IPC_STAT:
case SEM_STAT:
return semctl_nolock(ns, semid, cmd, version, p);
case GETALL:
case GETVAL:
case GETPID:
case GETNCNT:
case GETZCNT:
case SETALL:
return semctl_main(ns, semid, semnum, cmd, p);
case SETVAL:
return semctl_setval(ns, semid, semnum, arg);
case IPC_RMID:
case IPC_SET:
return semctl_down(ns, semid, cmd, version, p);
default:
return -EINVAL;
}
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,643 | static int check_restart(struct sem_array *sma, struct sem_queue *q)
{
struct sem *curr;
struct sem_queue *h;
/* if the operation didn't modify the array, then no restart */
if (q->alter == 0)
return 0;
/* pending complex operations are too difficult to analyse */
if (sma->complex_count)
return 1;
/* we were a sleeping complex operation. Too difficult */
if (q->nsops > 1)
return 1;
curr = sma->sem_base + q->sops[0].sem_num;
/* No-one waits on this queue */
if (list_empty(&curr->sem_pending))
return 0;
/* the new semaphore value */
if (curr->semval) {
/* It is impossible that someone waits for the new value:
* - q is a previously sleeping simple operation that
* altered the array. It must be a decrement, because
* simple increments never sleep.
* - The value is not 0, thus wait-for-zero won't proceed.
* - If there are older (higher priority) decrements
* in the queue, then they have observed the original
* semval value and couldn't proceed. The operation
* decremented to value - thus they won't proceed either.
*/
BUG_ON(q->sops[0].sem_op >= 0);
return 0;
}
/*
* semval is 0. Check if there are wait-for-zero semops.
* They must be the first entries in the per-semaphore queue
*/
h = list_first_entry(&curr->sem_pending, struct sem_queue, list);
BUG_ON(h->nsops != 1);
BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num);
/* Yes, there is a wait-for-zero semop. Restart */
if (h->sops[0].sem_op == 0)
return 1;
/* Again - no-one is waiting for the new value. */
return 0;
}
| DoS | 0 | static int check_restart(struct sem_array *sma, struct sem_queue *q)
{
struct sem *curr;
struct sem_queue *h;
/* if the operation didn't modify the array, then no restart */
if (q->alter == 0)
return 0;
/* pending complex operations are too difficult to analyse */
if (sma->complex_count)
return 1;
/* we were a sleeping complex operation. Too difficult */
if (q->nsops > 1)
return 1;
curr = sma->sem_base + q->sops[0].sem_num;
/* No-one waits on this queue */
if (list_empty(&curr->sem_pending))
return 0;
/* the new semaphore value */
if (curr->semval) {
/* It is impossible that someone waits for the new value:
* - q is a previously sleeping simple operation that
* altered the array. It must be a decrement, because
* simple increments never sleep.
* - The value is not 0, thus wait-for-zero won't proceed.
* - If there are older (higher priority) decrements
* in the queue, then they have observed the original
* semval value and couldn't proceed. The operation
* decremented to value - thus they won't proceed either.
*/
BUG_ON(q->sops[0].sem_op >= 0);
return 0;
}
/*
* semval is 0. Check if there are wait-for-zero semops.
* They must be the first entries in the per-semaphore queue
*/
h = list_first_entry(&curr->sem_pending, struct sem_queue, list);
BUG_ON(h->nsops != 1);
BUG_ON(h->sops[0].sem_num != q->sops[0].sem_num);
/* Yes, there is a wait-for-zero semop. Restart */
if (h->sops[0].sem_op == 0)
return 1;
/* Again - no-one is waiting for the new value. */
return 0;
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,644 | static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
{
switch(version) {
case IPC_64:
return copy_to_user(buf, in, sizeof(*in));
case IPC_OLD:
{
struct semid_ds out;
memset(&out, 0, sizeof(out));
ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
out.sem_otime = in->sem_otime;
out.sem_ctime = in->sem_ctime;
out.sem_nsems = in->sem_nsems;
return copy_to_user(buf, &out, sizeof(out));
}
default:
return -EINVAL;
}
}
| DoS | 0 | static unsigned long copy_semid_to_user(void __user *buf, struct semid64_ds *in, int version)
{
switch(version) {
case IPC_64:
return copy_to_user(buf, in, sizeof(*in));
case IPC_OLD:
{
struct semid_ds out;
memset(&out, 0, sizeof(out));
ipc64_perm_to_ipc_perm(&in->sem_perm, &out.sem_perm);
out.sem_otime = in->sem_otime;
out.sem_ctime = in->sem_ctime;
out.sem_nsems = in->sem_nsems;
return copy_to_user(buf, &out, sizeof(out));
}
default:
return -EINVAL;
}
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,645 | int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
{
struct sem_undo_list *undo_list;
int error;
if (clone_flags & CLONE_SYSVSEM) {
error = get_undo_list(&undo_list);
if (error)
return error;
atomic_inc(&undo_list->refcnt);
tsk->sysvsem.undo_list = undo_list;
} else
tsk->sysvsem.undo_list = NULL;
return 0;
}
| DoS | 0 | int copy_semundo(unsigned long clone_flags, struct task_struct *tsk)
{
struct sem_undo_list *undo_list;
int error;
if (clone_flags & CLONE_SYSVSEM) {
error = get_undo_list(&undo_list);
if (error)
return error;
atomic_inc(&undo_list->refcnt);
tsk->sysvsem.undo_list = undo_list;
} else
tsk->sysvsem.undo_list = NULL;
return 0;
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,646 | static int count_semncnt (struct sem_array * sma, ushort semnum)
{
int semncnt;
struct sem_queue * q;
semncnt = 0;
list_for_each_entry(q, &sma->sem_pending, list) {
struct sembuf * sops = q->sops;
int nsops = q->nsops;
int i;
for (i = 0; i < nsops; i++)
if (sops[i].sem_num == semnum
&& (sops[i].sem_op < 0)
&& !(sops[i].sem_flg & IPC_NOWAIT))
semncnt++;
}
return semncnt;
}
| DoS | 0 | static int count_semncnt (struct sem_array * sma, ushort semnum)
{
int semncnt;
struct sem_queue * q;
semncnt = 0;
list_for_each_entry(q, &sma->sem_pending, list) {
struct sembuf * sops = q->sops;
int nsops = q->nsops;
int i;
for (i = 0; i < nsops; i++)
if (sops[i].sem_num == semnum
&& (sops[i].sem_op < 0)
&& !(sops[i].sem_flg & IPC_NOWAIT))
semncnt++;
}
return semncnt;
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,647 | static int count_semzcnt (struct sem_array * sma, ushort semnum)
{
int semzcnt;
struct sem_queue * q;
semzcnt = 0;
list_for_each_entry(q, &sma->sem_pending, list) {
struct sembuf * sops = q->sops;
int nsops = q->nsops;
int i;
for (i = 0; i < nsops; i++)
if (sops[i].sem_num == semnum
&& (sops[i].sem_op == 0)
&& !(sops[i].sem_flg & IPC_NOWAIT))
semzcnt++;
}
return semzcnt;
}
| DoS | 0 | static int count_semzcnt (struct sem_array * sma, ushort semnum)
{
int semzcnt;
struct sem_queue * q;
semzcnt = 0;
list_for_each_entry(q, &sma->sem_pending, list) {
struct sembuf * sops = q->sops;
int nsops = q->nsops;
int i;
for (i = 0; i < nsops; i++)
if (sops[i].sem_num == semnum
&& (sops[i].sem_op == 0)
&& !(sops[i].sem_flg & IPC_NOWAIT))
semzcnt++;
}
return semzcnt;
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,648 | static int get_queue_result(struct sem_queue *q)
{
int error;
error = q->status;
while (unlikely(error == IN_WAKEUP)) {
cpu_relax();
error = q->status;
}
return error;
}
| DoS | 0 | static int get_queue_result(struct sem_queue *q)
{
int error;
error = q->status;
while (unlikely(error == IN_WAKEUP)) {
cpu_relax();
error = q->status;
}
return error;
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,649 | static inline int get_undo_list(struct sem_undo_list **undo_listp)
{
struct sem_undo_list *undo_list;
undo_list = current->sysvsem.undo_list;
if (!undo_list) {
undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
if (undo_list == NULL)
return -ENOMEM;
spin_lock_init(&undo_list->lock);
atomic_set(&undo_list->refcnt, 1);
INIT_LIST_HEAD(&undo_list->list_proc);
current->sysvsem.undo_list = undo_list;
}
*undo_listp = undo_list;
return 0;
}
| DoS | 0 | static inline int get_undo_list(struct sem_undo_list **undo_listp)
{
struct sem_undo_list *undo_list;
undo_list = current->sysvsem.undo_list;
if (!undo_list) {
undo_list = kzalloc(sizeof(*undo_list), GFP_KERNEL);
if (undo_list == NULL)
return -ENOMEM;
spin_lock_init(&undo_list->lock);
atomic_set(&undo_list->refcnt, 1);
INIT_LIST_HEAD(&undo_list->list_proc);
current->sysvsem.undo_list = undo_list;
}
*undo_listp = undo_list;
return 0;
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,650 | static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
{
struct sem_undo *un;
assert_spin_locked(&ulp->lock);
un = __lookup_undo(ulp, semid);
if (un) {
list_del_rcu(&un->list_proc);
list_add_rcu(&un->list_proc, &ulp->list_proc);
}
return un;
}
| DoS | 0 | static struct sem_undo *lookup_undo(struct sem_undo_list *ulp, int semid)
{
struct sem_undo *un;
assert_spin_locked(&ulp->lock);
un = __lookup_undo(ulp, semid);
if (un) {
list_del_rcu(&un->list_proc);
list_add_rcu(&un->list_proc, &ulp->list_proc);
}
return un;
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,651 | void sem_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &sem_ids(ns), freeary);
idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
}
| DoS | 0 | void sem_exit_ns(struct ipc_namespace *ns)
{
free_ipcs(ns, &sem_ids(ns), freeary);
idr_destroy(&ns->ids[IPC_SEM_IDS].ipcs_idr);
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,652 | void sem_init_ns(struct ipc_namespace *ns)
{
ns->sc_semmsl = SEMMSL;
ns->sc_semmns = SEMMNS;
ns->sc_semopm = SEMOPM;
ns->sc_semmni = SEMMNI;
ns->used_sems = 0;
ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
}
| DoS | 0 | void sem_init_ns(struct ipc_namespace *ns)
{
ns->sc_semmsl = SEMMSL;
ns->sc_semmns = SEMMNS;
ns->sc_semopm = SEMOPM;
ns->sc_semmni = SEMMNI;
ns->used_sems = 0;
ipc_init_ids(&ns->ids[IPC_SEM_IDS]);
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,653 | static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id);
if (IS_ERR(ipcp))
return ERR_CAST(ipcp);
return container_of(ipcp, struct sem_array, sem_perm);
}
| DoS | 0 | static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int id)
{
struct kern_ipc_perm *ipcp = ipc_obtain_object(&sem_ids(ns), id);
if (IS_ERR(ipcp))
return ERR_CAST(ipcp);
return container_of(ipcp, struct sem_array, sem_perm);
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,654 | static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
if (IS_ERR(ipcp))
return ERR_CAST(ipcp);
return container_of(ipcp, struct sem_array, sem_perm);
}
| DoS | 0 | static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
struct kern_ipc_perm *ipcp = ipc_obtain_object_check(&sem_ids(ns), id);
if (IS_ERR(ipcp))
return ERR_CAST(ipcp);
return container_of(ipcp, struct sem_array, sem_perm);
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,655 | static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
{
ipc_rmid(&sem_ids(ns), &s->sem_perm);
}
| DoS | 0 | static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
{
ipc_rmid(&sem_ids(ns), &s->sem_perm);
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,656 | static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
{
struct sem_array *sma;
sma = container_of(ipcp, struct sem_array, sem_perm);
return security_sem_associate(sma, semflg);
}
| DoS | 0 | static inline int sem_security(struct kern_ipc_perm *ipcp, int semflg)
{
struct sem_array *sma;
sma = container_of(ipcp, struct sem_array, sem_perm);
return security_sem_associate(sma, semflg);
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,657 | static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
{
struct user_namespace *user_ns = seq_user_ns(s);
struct sem_array *sma = it;
return seq_printf(s,
"%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
sma->sem_perm.key,
sma->sem_perm.id,
sma->sem_perm.mode,
sma->sem_nsems,
from_kuid_munged(user_ns, sma->sem_perm.uid),
from_kgid_munged(user_ns, sma->sem_perm.gid),
from_kuid_munged(user_ns, sma->sem_perm.cuid),
from_kgid_munged(user_ns, sma->sem_perm.cgid),
sma->sem_otime,
sma->sem_ctime);
}
| DoS | 0 | static int sysvipc_sem_proc_show(struct seq_file *s, void *it)
{
struct user_namespace *user_ns = seq_user_ns(s);
struct sem_array *sma = it;
return seq_printf(s,
"%10d %10d %4o %10u %5u %5u %5u %5u %10lu %10lu\n",
sma->sem_perm.key,
sma->sem_perm.id,
sma->sem_perm.mode,
sma->sem_nsems,
from_kuid_munged(user_ns, sma->sem_perm.uid),
from_kgid_munged(user_ns, sma->sem_perm.gid),
from_kuid_munged(user_ns, sma->sem_perm.cuid),
from_kgid_munged(user_ns, sma->sem_perm.cgid),
sma->sem_otime,
sma->sem_ctime);
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,658 | static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
int nsops, struct sem_undo *un, int pid)
{
int result, sem_op;
struct sembuf *sop;
struct sem * curr;
for (sop = sops; sop < sops + nsops; sop++) {
curr = sma->sem_base + sop->sem_num;
sem_op = sop->sem_op;
result = curr->semval;
if (!sem_op && result)
goto would_block;
result += sem_op;
if (result < 0)
goto would_block;
if (result > SEMVMX)
goto out_of_range;
if (sop->sem_flg & SEM_UNDO) {
int undo = un->semadj[sop->sem_num] - sem_op;
/*
* Exceeding the undo range is an error.
*/
if (undo < (-SEMAEM - 1) || undo > SEMAEM)
goto out_of_range;
}
curr->semval = result;
}
sop--;
while (sop >= sops) {
sma->sem_base[sop->sem_num].sempid = pid;
if (sop->sem_flg & SEM_UNDO)
un->semadj[sop->sem_num] -= sop->sem_op;
sop--;
}
return 0;
out_of_range:
result = -ERANGE;
goto undo;
would_block:
if (sop->sem_flg & IPC_NOWAIT)
result = -EAGAIN;
else
result = 1;
undo:
sop--;
while (sop >= sops) {
sma->sem_base[sop->sem_num].semval -= sop->sem_op;
sop--;
}
return result;
}
| DoS | 0 | static int try_atomic_semop (struct sem_array * sma, struct sembuf * sops,
int nsops, struct sem_undo *un, int pid)
{
int result, sem_op;
struct sembuf *sop;
struct sem * curr;
for (sop = sops; sop < sops + nsops; sop++) {
curr = sma->sem_base + sop->sem_num;
sem_op = sop->sem_op;
result = curr->semval;
if (!sem_op && result)
goto would_block;
result += sem_op;
if (result < 0)
goto would_block;
if (result > SEMVMX)
goto out_of_range;
if (sop->sem_flg & SEM_UNDO) {
int undo = un->semadj[sop->sem_num] - sem_op;
/*
* Exceeding the undo range is an error.
*/
if (undo < (-SEMAEM - 1) || undo > SEMAEM)
goto out_of_range;
}
curr->semval = result;
}
sop--;
while (sop >= sops) {
sma->sem_base[sop->sem_num].sempid = pid;
if (sop->sem_flg & SEM_UNDO)
un->semadj[sop->sem_num] -= sop->sem_op;
sop--;
}
return 0;
out_of_range:
result = -ERANGE;
goto undo;
would_block:
if (sop->sem_flg & IPC_NOWAIT)
result = -EAGAIN;
else
result = 1;
undo:
sop--;
while (sop >= sops) {
sma->sem_base[sop->sem_num].semval -= sop->sem_op;
sop--;
}
return result;
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,659 | static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
{
list_del(&q->list);
if (q->nsops > 1)
sma->complex_count--;
}
| DoS | 0 | static void unlink_queue(struct sem_array *sma, struct sem_queue *q)
{
list_del(&q->list);
if (q->nsops > 1)
sma->complex_count--;
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,660 | static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
{
struct sem_queue *q;
struct list_head *walk;
struct list_head *pending_list;
int semop_completed = 0;
if (semnum == -1)
pending_list = &sma->sem_pending;
else
pending_list = &sma->sem_base[semnum].sem_pending;
again:
walk = pending_list->next;
while (walk != pending_list) {
int error, restart;
q = container_of(walk, struct sem_queue, list);
walk = walk->next;
/* If we are scanning the single sop, per-semaphore list of
* one semaphore and that semaphore is 0, then it is not
* necessary to scan the "alter" entries: simple increments
* that affect only one entry succeed immediately and cannot
* be in the per semaphore pending queue, and decrements
* cannot be successful if the value is already 0.
*/
if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
q->alter)
break;
error = try_atomic_semop(sma, q->sops, q->nsops,
q->undo, q->pid);
/* Does q->sleeper still need to sleep? */
if (error > 0)
continue;
unlink_queue(sma, q);
if (error) {
restart = 0;
} else {
semop_completed = 1;
restart = check_restart(sma, q);
}
wake_up_sem_queue_prepare(pt, q, error);
if (restart)
goto again;
}
return semop_completed;
}
| DoS | 0 | static int update_queue(struct sem_array *sma, int semnum, struct list_head *pt)
{
struct sem_queue *q;
struct list_head *walk;
struct list_head *pending_list;
int semop_completed = 0;
if (semnum == -1)
pending_list = &sma->sem_pending;
else
pending_list = &sma->sem_base[semnum].sem_pending;
again:
walk = pending_list->next;
while (walk != pending_list) {
int error, restart;
q = container_of(walk, struct sem_queue, list);
walk = walk->next;
/* If we are scanning the single sop, per-semaphore list of
* one semaphore and that semaphore is 0, then it is not
* necessary to scan the "alter" entries: simple increments
* that affect only one entry succeed immediately and cannot
* be in the per semaphore pending queue, and decrements
* cannot be successful if the value is already 0.
*/
if (semnum != -1 && sma->sem_base[semnum].semval == 0 &&
q->alter)
break;
error = try_atomic_semop(sma, q->sops, q->nsops,
q->undo, q->pid);
/* Does q->sleeper still need to sleep? */
if (error > 0)
continue;
unlink_queue(sma, q);
if (error) {
restart = 0;
} else {
semop_completed = 1;
restart = check_restart(sma, q);
}
wake_up_sem_queue_prepare(pt, q, error);
if (restart)
goto again;
}
return semop_completed;
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,661 | static void wake_up_sem_queue_do(struct list_head *pt)
{
struct sem_queue *q, *t;
int did_something;
did_something = !list_empty(pt);
list_for_each_entry_safe(q, t, pt, list) {
wake_up_process(q->sleeper);
/* q can disappear immediately after writing q->status. */
smp_wmb();
q->status = q->pid;
}
if (did_something)
preempt_enable();
}
| DoS | 0 | static void wake_up_sem_queue_do(struct list_head *pt)
{
struct sem_queue *q, *t;
int did_something;
did_something = !list_empty(pt);
list_for_each_entry_safe(q, t, pt, list) {
wake_up_process(q->sleeper);
/* q can disappear immediately after writing q->status. */
smp_wmb();
q->status = q->pid;
}
if (did_something)
preempt_enable();
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,662 | static void wake_up_sem_queue_prepare(struct list_head *pt,
struct sem_queue *q, int error)
{
if (list_empty(pt)) {
/*
* Hold preempt off so that we don't get preempted and have the
* wakee busy-wait until we're scheduled back on.
*/
preempt_disable();
}
q->status = IN_WAKEUP;
q->pid = error;
list_add_tail(&q->list, pt);
}
| DoS | 0 | static void wake_up_sem_queue_prepare(struct list_head *pt,
struct sem_queue *q, int error)
{
if (list_empty(pt)) {
/*
* Hold preempt off so that we don't get preempted and have the
* wakee busy-wait until we're scheduled back on.
*/
preempt_disable();
}
q->status = IN_WAKEUP;
q->pid = error;
list_add_tail(&q->list, pt);
}
| @@ -94,6 +94,7 @@
struct sem {
int semval; /* current value */
int sempid; /* pid of last operation */
+ spinlock_t lock; /* spinlock for fine-grained semtimedop */
struct list_head sem_pending; /* pending single-sop operations */
};
@@ -137,7 +138,6 @@ struct sem_undo_list {
#define sem_ids(ns) ((ns)->ids[IPC_SEM_IDS])
-#define sem_unlock(sma) ipc_unlock(&(sma)->sem_perm)
#define sem_checkid(sma, semid) ipc_checkid(&sma->sem_perm, semid)
static int newary(struct ipc_namespace *, struct ipc_params *);
@@ -189,11 +189,90 @@ void __init sem_init (void)
IPC_SEM_IDS, sysvipc_sem_proc_show);
}
+/*
+ * If the request contains only one semaphore operation, and there are
+ * no complex transactions pending, lock only the semaphore involved.
+ * Otherwise, lock the entire semaphore array, since we either have
+ * multiple semaphores in our own semops, or we need to look at
+ * semaphores from other pending complex operations.
+ *
+ * Carefully guard against sma->complex_count changing between zero
+ * and non-zero while we are spinning for the lock. The value of
+ * sma->complex_count cannot change while we are holding the lock,
+ * so sem_unlock should be fine.
+ *
+ * The global lock path checks that all the local locks have been released,
+ * checking each local lock once. This means that the local lock paths
+ * cannot start their critical sections while the global lock is held.
+ */
+static inline int sem_lock(struct sem_array *sma, struct sembuf *sops,
+ int nsops)
+{
+ int locknum;
+ again:
+ if (nsops == 1 && !sma->complex_count) {
+ struct sem *sem = sma->sem_base + sops->sem_num;
+
+ /* Lock just the semaphore we are interested in. */
+ spin_lock(&sem->lock);
+
+ /*
+ * If sma->complex_count was set while we were spinning,
+ * we may need to look at things we did not lock here.
+ */
+ if (unlikely(sma->complex_count)) {
+ spin_unlock(&sem->lock);
+ goto lock_array;
+ }
+
+ /*
+ * Another process is holding the global lock on the
+ * sem_array; we cannot enter our critical section,
+ * but have to wait for the global lock to be released.
+ */
+ if (unlikely(spin_is_locked(&sma->sem_perm.lock))) {
+ spin_unlock(&sem->lock);
+ spin_unlock_wait(&sma->sem_perm.lock);
+ goto again;
+ }
+
+ locknum = sops->sem_num;
+ } else {
+ int i;
+ /*
+ * Lock the semaphore array, and wait for all of the
+ * individual semaphore locks to go away. The code
+ * above ensures no new single-lock holders will enter
+ * their critical section while the array lock is held.
+ */
+ lock_array:
+ spin_lock(&sma->sem_perm.lock);
+ for (i = 0; i < sma->sem_nsems; i++) {
+ struct sem *sem = sma->sem_base + i;
+ spin_unlock_wait(&sem->lock);
+ }
+ locknum = -1;
+ }
+ return locknum;
+}
+
+static inline void sem_unlock(struct sem_array *sma, int locknum)
+{
+ if (locknum == -1) {
+ spin_unlock(&sma->sem_perm.lock);
+ } else {
+ struct sem *sem = sma->sem_base + locknum;
+ spin_unlock(&sem->lock);
+ }
+ rcu_read_unlock();
+}
+
/*
* sem_lock_(check_) routines are called in the paths where the rw_mutex
* is not held.
*/
-static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id)
+static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns,
+ int id, struct sembuf *sops, int nsops, int *locknum)
{
struct kern_ipc_perm *ipcp;
struct sem_array *sma;
@@ -205,15 +284,16 @@ static inline struct sem_array *sem_obtain_lock(struct ipc_namespace *ns, int id
goto err;
}
- spin_lock(&ipcp->lock);
+ sma = container_of(ipcp, struct sem_array, sem_perm);
+ *locknum = sem_lock(sma, sops, nsops);
/* ipc_rmid() may have already freed the ID while sem_lock
* was spinning: verify that the structure is still valid
*/
if (!ipcp->deleted)
return container_of(ipcp, struct sem_array, sem_perm);
- spin_unlock(&ipcp->lock);
+ sem_unlock(sma, *locknum);
sma = ERR_PTR(-EINVAL);
err:
rcu_read_unlock();
@@ -230,17 +310,6 @@ static inline struct sem_array *sem_obtain_object(struct ipc_namespace *ns, int
return container_of(ipcp, struct sem_array, sem_perm);
}
-static inline struct sem_array *sem_lock_check(struct ipc_namespace *ns,
- int id)
-{
- struct kern_ipc_perm *ipcp = ipc_lock_check(&sem_ids(ns), id);
-
- if (IS_ERR(ipcp))
- return ERR_CAST(ipcp);
-
- return container_of(ipcp, struct sem_array, sem_perm);
-}
-
static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns,
int id)
{
@@ -254,31 +323,31 @@ static inline struct sem_array *sem_obtain_object_check(struct ipc_namespace *ns
static inline void sem_lock_and_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
+ rcu_read_lock();
+ sem_lock(sma, NULL, -1);
ipc_rcu_putref(sma);
}
static inline void sem_getref_and_unlock(struct sem_array *sma)
{
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_putref(struct sem_array *sma)
{
- ipc_lock_by_ptr(&sma->sem_perm);
- ipc_rcu_putref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock_and_putref(sma);
+ sem_unlock(sma, -1);
}
/*
* Call inside the rcu read section.
*/
static inline void sem_getref(struct sem_array *sma)
{
- spin_lock(&(sma)->sem_perm.lock);
- ipc_rcu_getref(sma);
- ipc_unlock(&(sma)->sem_perm);
+ sem_lock(sma, NULL, -1);
+ WARN_ON_ONCE(!ipc_rcu_getref(sma));
+ sem_unlock(sma, -1);
}
static inline void sem_rmid(struct ipc_namespace *ns, struct sem_array *s)
@@ -371,15 +440,17 @@ static int newary(struct ipc_namespace *ns, struct ipc_params *params)
sma->sem_base = (struct sem *) &sma[1];
- for (i = 0; i < nsems; i++)
+ for (i = 0; i < nsems; i++) {
INIT_LIST_HEAD(&sma->sem_base[i].sem_pending);
+ spin_lock_init(&sma->sem_base[i].lock);
+ }
sma->complex_count = 0;
INIT_LIST_HEAD(&sma->sem_pending);
INIT_LIST_HEAD(&sma->list_id);
sma->sem_nsems = nsems;
sma->sem_ctime = get_seconds();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
return sma->sem_perm.id;
}
@@ -818,7 +889,7 @@ static void freeary(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp)
/* Remove the semaphore set from the IDR */
sem_rmid(ns, sma);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
ns->used_sems -= sma->sem_nsems;
@@ -947,7 +1018,6 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
struct sem_array *sma;
struct sem* curr;
int err;
- int nsems;
struct list_head tasks;
int val;
#if defined(CONFIG_64BIT) && defined(__BIG_ENDIAN)
@@ -958,31 +1028,39 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
val = arg;
#endif
- sma = sem_lock_check(ns, semid);
- if (IS_ERR(sma))
- return PTR_ERR(sma);
+ if (val > SEMVMX || val < 0)
+ return -ERANGE;
INIT_LIST_HEAD(&tasks);
- nsems = sma->sem_nsems;
- err = -EACCES;
- if (ipcperms(ns, &sma->sem_perm, S_IWUGO))
- goto out_unlock;
+ rcu_read_lock();
+ sma = sem_obtain_object_check(ns, semid);
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
+ return PTR_ERR(sma);
+ }
+
+ if (semnum < 0 || semnum >= sma->sem_nsems) {
+ rcu_read_unlock();
+ return -EINVAL;
+ }
+
+
+ if (ipcperms(ns, &sma->sem_perm, S_IWUGO)) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
err = security_sem_semctl(sma, SETVAL);
- if (err)
- goto out_unlock;
+ if (err) {
+ rcu_read_unlock();
+ return -EACCES;
+ }
- err = -EINVAL;
- if(semnum < 0 || semnum >= nsems)
- goto out_unlock;
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
- err = -ERANGE;
- if (val > SEMVMX || val < 0)
- goto out_unlock;
-
assert_spin_locked(&sma->sem_perm.lock);
list_for_each_entry(un, &sma->list_id, list_id)
un->semadj[semnum] = 0;
@@ -992,11 +1070,9 @@ static int semctl_setval(struct ipc_namespace *ns, int semid, int semnum,
sma->sem_ctime = get_seconds();
/* maybe some queued-up processes were waiting for this */
do_smart_update(sma, NULL, 0, 0, &tasks);
- err = 0;
-out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
- return err;
+ return 0;
}
static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
@@ -1051,16 +1127,16 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
- }
+ } else
+ sem_lock(sma, NULL, -1);
- spin_lock(&sma->sem_perm.lock);
for (i = 0; i < sma->sem_nsems; i++)
sem_io[i] = sma->sem_base[i].semval;
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = 0;
if(copy_to_user(array, sem_io, nsems*sizeof(ushort)))
err = -EFAULT;
@@ -1071,7 +1147,10 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
int i;
struct sem_undo *un;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ return -EIDRM;
+ }
rcu_read_unlock();
if(nsems > SEMMSL_FAST) {
@@ -1097,7 +1176,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
err = -EIDRM;
goto out_free;
}
@@ -1124,7 +1203,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
goto out_wakeup;
}
- spin_lock(&sma->sem_perm.lock);
+ sem_lock(sma, NULL, -1);
curr = &sma->sem_base[semnum];
switch (cmd) {
@@ -1143,7 +1222,7 @@ static int semctl_main(struct ipc_namespace *ns, int semid, int semnum,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1211,11 +1290,11 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
switch(cmd){
case IPC_RMID:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
freeary(ns, ipcp);
goto out_up;
case IPC_SET:
- ipc_lock_object(&sma->sem_perm);
+ sem_lock(sma, NULL, -1);
err = ipc_update_perm(&semid64.sem_perm, ipcp);
if (err)
goto out_unlock;
@@ -1228,7 +1307,7 @@ static int semctl_down(struct ipc_namespace *ns, int semid,
}
out_unlock:
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out_up:
up_write(&sem_ids(ns).rw_mutex);
return err;
@@ -1340,8 +1419,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
struct sem_array *sma;
struct sem_undo_list *ulp;
struct sem_undo *un, *new;
- int nsems;
- int error;
+ int nsems, error;
error = get_undo_list(&ulp);
if (error)
@@ -1363,7 +1441,11 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
}
nsems = sma->sem_nsems;
- ipc_rcu_getref(sma);
+ if (!ipc_rcu_getref(sma)) {
+ rcu_read_unlock();
+ un = ERR_PTR(-EIDRM);
+ goto out;
+ }
rcu_read_unlock();
/* step 2: allocate new undo structure */
@@ -1376,7 +1458,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
/* step 3: Acquire the lock on semaphore array */
sem_lock_and_putref(sma);
if (sma->sem_perm.deleted) {
- sem_unlock(sma);
+ sem_unlock(sma, -1);
kfree(new);
un = ERR_PTR(-EIDRM);
goto out;
@@ -1404,7 +1486,7 @@ static struct sem_undo *find_alloc_undo(struct ipc_namespace *ns, int semid)
success:
spin_unlock(&ulp->lock);
rcu_read_lock();
- sem_unlock(sma);
+ sem_unlock(sma, -1);
out:
return un;
}
@@ -1444,7 +1526,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
struct sembuf fast_sops[SEMOPM_FAST];
struct sembuf* sops = fast_sops, *sop;
struct sem_undo *un;
- int undos = 0, alter = 0, max;
+ int undos = 0, alter = 0, max, locknum;
struct sem_queue queue;
unsigned long jiffies_left = 0;
struct ipc_namespace *ns;
@@ -1488,22 +1570,23 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
alter = 1;
}
+ INIT_LIST_HEAD(&tasks);
+
if (undos) {
+ /* On success, find_alloc_undo takes the rcu_read_lock */
un = find_alloc_undo(ns, semid);
if (IS_ERR(un)) {
error = PTR_ERR(un);
goto out_free;
}
- } else
+ } else {
un = NULL;
+ rcu_read_lock();
+ }
- INIT_LIST_HEAD(&tasks);
-
- rcu_read_lock();
sma = sem_obtain_object_check(ns, semid);
if (IS_ERR(sma)) {
- if (un)
- rcu_read_unlock();
+ rcu_read_unlock();
error = PTR_ERR(sma);
goto out_free;
}
@@ -1534,23 +1617,9 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
* "un" itself is guaranteed by rcu.
*/
error = -EIDRM;
- ipc_lock_object(&sma->sem_perm);
- if (un) {
- if (un->semid == -1) {
- rcu_read_unlock();
- goto out_unlock_free;
- } else {
- /*
- * rcu lock can be released, "un" cannot disappear:
- * - sem_lock is acquired, thus IPC_RMID is
- * impossible.
- * - exit_sem is impossible, it always operates on
- * current (or a dead task).
- */
-
- rcu_read_unlock();
- }
- }
+ locknum = sem_lock(sma, sops, nsops);
+ if (un && un->semid == -1)
+ goto out_unlock_free;
error = try_atomic_semop (sma, sops, nsops, un, task_tgid_vnr(current));
if (error <= 0) {
@@ -1591,7 +1660,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
sleep_again:
current->state = TASK_INTERRUPTIBLE;
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
if (timeout)
jiffies_left = schedule_timeout(jiffies_left);
@@ -1613,7 +1682,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
goto out_free;
}
- sma = sem_obtain_lock(ns, semid);
+ sma = sem_obtain_lock(ns, semid, sops, nsops, &locknum);
/*
* Wait until it's guaranteed that no wakeup_sem_queue_do() is ongoing.
@@ -1652,7 +1721,7 @@ SYSCALL_DEFINE4(semtimedop, int, semid, struct sembuf __user *, tsops,
unlink_queue(sma, &queue);
out_unlock_free:
- sem_unlock(sma);
+ sem_unlock(sma, locknum);
out_wakeup:
wake_up_sem_queue_do(&tasks);
out_free:
@@ -1716,8 +1785,7 @@ void exit_sem(struct task_struct *tsk)
struct sem_array *sma;
struct sem_undo *un;
struct list_head tasks;
- int semid;
- int i;
+ int semid, i;
rcu_read_lock();
un = list_entry_rcu(ulp->list_proc.next,
@@ -1726,23 +1794,26 @@ void exit_sem(struct task_struct *tsk)
semid = -1;
else
semid = un->semid;
- rcu_read_unlock();
- if (semid == -1)
+ if (semid == -1) {
+ rcu_read_unlock();
break;
+ }
- sma = sem_lock_check(tsk->nsproxy->ipc_ns, un->semid);
-
+ sma = sem_obtain_object_check(tsk->nsproxy->ipc_ns, un->semid);
/* exit_sem raced with IPC_RMID, nothing to do */
- if (IS_ERR(sma))
+ if (IS_ERR(sma)) {
+ rcu_read_unlock();
continue;
+ }
+ sem_lock(sma, NULL, -1);
un = __lookup_undo(ulp, semid);
if (un == NULL) {
/* exit_sem raced with IPC_RMID+semget() that created
* exactly the same semid. Nothing to do.
*/
- sem_unlock(sma);
+ sem_unlock(sma, -1);
continue;
}
@@ -1782,7 +1853,7 @@ void exit_sem(struct task_struct *tsk)
/* maybe some queued-up processes were waiting for this */
INIT_LIST_HEAD(&tasks);
do_smart_update(sma, NULL, 0, 1, &tasks);
- sem_unlock(sma);
+ sem_unlock(sma, -1);
wake_up_sem_queue_do(&tasks);
kfree_rcu(un, rcu); | CWE-189 | null | null |
23,663 | void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
{
out->key = in->key;
SET_UID(out->uid, in->uid);
SET_GID(out->gid, in->gid);
SET_UID(out->cuid, in->cuid);
SET_GID(out->cgid, in->cgid);
out->mode = in->mode;
out->seq = in->seq;
}
| DoS | 0 | void ipc64_perm_to_ipc_perm (struct ipc64_perm *in, struct ipc_perm *out)
{
out->key = in->key;
SET_UID(out->uid, in->uid);
SET_GID(out->gid, in->gid);
SET_UID(out->cuid, in->cuid);
SET_GID(out->cgid, in->cgid);
out->mode = in->mode;
out->seq = in->seq;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,664 | int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
{
kuid_t euid;
kgid_t egid;
int id;
int next_id = ids->next_id;
if (size > IPCMNI)
size = IPCMNI;
if (ids->in_use >= size)
return -ENOSPC;
idr_preload(GFP_KERNEL);
spin_lock_init(&new->lock);
new->deleted = 0;
rcu_read_lock();
spin_lock(&new->lock);
id = idr_alloc(&ids->ipcs_idr, new,
(next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
GFP_NOWAIT);
idr_preload_end();
if (id < 0) {
spin_unlock(&new->lock);
rcu_read_unlock();
return id;
}
ids->in_use++;
current_euid_egid(&euid, &egid);
new->cuid = new->uid = euid;
new->gid = new->cgid = egid;
if (next_id < 0) {
new->seq = ids->seq++;
if (ids->seq > ids->seq_max)
ids->seq = 0;
} else {
new->seq = ipcid_to_seqx(next_id);
ids->next_id = -1;
}
new->id = ipc_buildid(id, new->seq);
return id;
}
| DoS | 0 | int ipc_addid(struct ipc_ids* ids, struct kern_ipc_perm* new, int size)
{
kuid_t euid;
kgid_t egid;
int id;
int next_id = ids->next_id;
if (size > IPCMNI)
size = IPCMNI;
if (ids->in_use >= size)
return -ENOSPC;
idr_preload(GFP_KERNEL);
spin_lock_init(&new->lock);
new->deleted = 0;
rcu_read_lock();
spin_lock(&new->lock);
id = idr_alloc(&ids->ipcs_idr, new,
(next_id < 0) ? 0 : ipcid_to_idx(next_id), 0,
GFP_NOWAIT);
idr_preload_end();
if (id < 0) {
spin_unlock(&new->lock);
rcu_read_unlock();
return id;
}
ids->in_use++;
current_euid_egid(&euid, &egid);
new->cuid = new->uid = euid;
new->gid = new->cgid = egid;
if (next_id < 0) {
new->seq = ids->seq++;
if (ids->seq > ids->seq_max)
ids->seq = 0;
} else {
new->seq = ipcid_to_seqx(next_id);
ids->next_id = -1;
}
new->id = ipc_buildid(id, new->seq);
return id;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,665 | static int ipc_check_perms(struct ipc_namespace *ns,
struct kern_ipc_perm *ipcp,
struct ipc_ops *ops,
struct ipc_params *params)
{
int err;
if (ipcperms(ns, ipcp, params->flg))
err = -EACCES;
else {
err = ops->associate(ipcp, params->flg);
if (!err)
err = ipcp->id;
}
return err;
}
| DoS | 0 | static int ipc_check_perms(struct ipc_namespace *ns,
struct kern_ipc_perm *ipcp,
struct ipc_ops *ops,
struct ipc_params *params)
{
int err;
if (ipcperms(ns, ipcp, params->flg))
err = -EACCES;
else {
err = ops->associate(ipcp, params->flg);
if (!err)
err = ipcp->id;
}
return err;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,666 | static void ipc_do_vfree(struct work_struct *work)
{
vfree(container_of(work, struct ipc_rcu_sched, work));
}
| DoS | 0 | static void ipc_do_vfree(struct work_struct *work)
{
vfree(container_of(work, struct ipc_rcu_sched, work));
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,667 | static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
{
struct kern_ipc_perm *ipc;
int next_id;
int total;
for (total = 0, next_id = 0; total < ids->in_use; next_id++) {
ipc = idr_find(&ids->ipcs_idr, next_id);
if (ipc == NULL)
continue;
if (ipc->key != key) {
total++;
continue;
}
ipc_lock_by_ptr(ipc);
return ipc;
}
return NULL;
}
| DoS | 0 | static struct kern_ipc_perm *ipc_findkey(struct ipc_ids *ids, key_t key)
{
struct kern_ipc_perm *ipc;
int next_id;
int total;
for (total = 0, next_id = 0; total < ids->in_use; next_id++) {
ipc = idr_find(&ids->ipcs_idr, next_id);
if (ipc == NULL)
continue;
if (ipc->key != key) {
total++;
continue;
}
ipc_lock_by_ptr(ipc);
return ipc;
}
return NULL;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,668 | static int __init ipc_init(void)
{
sem_init();
msg_init();
shm_init();
register_hotmemory_notifier(&ipc_memory_nb);
register_ipcns_notifier(&init_ipc_ns);
return 0;
}
| DoS | 0 | static int __init ipc_init(void)
{
sem_init();
msg_init();
shm_init();
register_hotmemory_notifier(&ipc_memory_nb);
register_ipcns_notifier(&init_ipc_ns);
return 0;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,669 | struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
{
struct kern_ipc_perm *out;
out = ipc_lock(ids, id);
if (IS_ERR(out))
return out;
if (ipc_checkid(out, id)) {
ipc_unlock(out);
return ERR_PTR(-EIDRM);
}
return out;
}
| DoS | 0 | struct kern_ipc_perm *ipc_lock_check(struct ipc_ids *ids, int id)
{
struct kern_ipc_perm *out;
out = ipc_lock(ids, id);
if (IS_ERR(out))
return out;
if (ipc_checkid(out, id)) {
ipc_unlock(out);
return ERR_PTR(-EIDRM);
}
return out;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,670 | static int ipc_memory_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier);
switch (action) {
case MEM_ONLINE: /* memory successfully brought online */
case MEM_OFFLINE: /* or offline: it's time to recompute msgmni */
/*
* This is done by invoking the ipcns notifier chain with the
* IPC_MEMCHANGED event.
* In order not to keep the lock on the hotplug memory chain
* for too long, queue a work item that will, when waken up,
* activate the ipcns notification chain.
* No need to keep several ipc work items on the queue.
*/
if (!work_pending(&ipc_memory_wq))
schedule_work(&ipc_memory_wq);
break;
case MEM_GOING_ONLINE:
case MEM_GOING_OFFLINE:
case MEM_CANCEL_ONLINE:
case MEM_CANCEL_OFFLINE:
default:
break;
}
return NOTIFY_OK;
}
| DoS | 0 | static int ipc_memory_callback(struct notifier_block *self,
unsigned long action, void *arg)
{
static DECLARE_WORK(ipc_memory_wq, ipc_memory_notifier);
switch (action) {
case MEM_ONLINE: /* memory successfully brought online */
case MEM_OFFLINE: /* or offline: it's time to recompute msgmni */
/*
* This is done by invoking the ipcns notifier chain with the
* IPC_MEMCHANGED event.
* In order not to keep the lock on the hotplug memory chain
* for too long, queue a work item that will, when waken up,
* activate the ipcns notification chain.
* No need to keep several ipc work items on the queue.
*/
if (!work_pending(&ipc_memory_wq))
schedule_work(&ipc_memory_wq);
break;
case MEM_GOING_ONLINE:
case MEM_GOING_OFFLINE:
case MEM_CANCEL_ONLINE:
case MEM_CANCEL_OFFLINE:
default:
break;
}
return NOTIFY_OK;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,671 | static void ipc_memory_notifier(struct work_struct *work)
{
ipcns_notify(IPCNS_MEMCHANGED);
}
| DoS | 0 | static void ipc_memory_notifier(struct work_struct *work)
{
ipcns_notify(IPCNS_MEMCHANGED);
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,672 | struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id)
{
struct kern_ipc_perm *out;
int lid = ipcid_to_idx(id);
out = idr_find(&ids->ipcs_idr, lid);
if (!out)
return ERR_PTR(-EINVAL);
return out;
}
| DoS | 0 | struct kern_ipc_perm *ipc_obtain_object(struct ipc_ids *ids, int id)
{
struct kern_ipc_perm *out;
int lid = ipcid_to_idx(id);
out = idr_find(&ids->ipcs_idr, lid);
if (!out)
return ERR_PTR(-EINVAL);
return out;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,673 | struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id)
{
struct kern_ipc_perm *out = ipc_obtain_object(ids, id);
if (IS_ERR(out))
goto out;
if (ipc_checkid(out, id))
return ERR_PTR(-EIDRM);
out:
return out;
}
| DoS | 0 | struct kern_ipc_perm *ipc_obtain_object_check(struct ipc_ids *ids, int id)
{
struct kern_ipc_perm *out = ipc_obtain_object(ids, id);
if (IS_ERR(out))
goto out;
if (ipc_checkid(out, id))
return ERR_PTR(-EIDRM);
out:
return out;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,674 | void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
{
int lid = ipcid_to_idx(ipcp->id);
idr_remove(&ids->ipcs_idr, lid);
ids->in_use--;
ipcp->deleted = 1;
return;
}
| DoS | 0 | void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
{
int lid = ipcid_to_idx(ipcp->id);
idr_remove(&ids->ipcs_idr, lid);
ids->in_use--;
ipcp->deleted = 1;
return;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,675 | static void ipc_schedule_free(struct rcu_head *head)
{
struct ipc_rcu_grace *grace;
struct ipc_rcu_sched *sched;
grace = container_of(head, struct ipc_rcu_grace, rcu);
sched = container_of(&(grace->data[0]), struct ipc_rcu_sched,
data[0]);
INIT_WORK(&sched->work, ipc_do_vfree);
schedule_work(&sched->work);
}
| DoS | 0 | static void ipc_schedule_free(struct rcu_head *head)
{
struct ipc_rcu_grace *grace;
struct ipc_rcu_sched *sched;
grace = container_of(head, struct ipc_rcu_grace, rcu);
sched = container_of(&(grace->data[0]), struct ipc_rcu_sched,
data[0]);
INIT_WORK(&sched->work, ipc_do_vfree);
schedule_work(&sched->work);
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,676 | int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
{
kuid_t uid = make_kuid(current_user_ns(), in->uid);
kgid_t gid = make_kgid(current_user_ns(), in->gid);
if (!uid_valid(uid) || !gid_valid(gid))
return -EINVAL;
out->uid = uid;
out->gid = gid;
out->mode = (out->mode & ~S_IRWXUGO)
| (in->mode & S_IRWXUGO);
return 0;
}
| DoS | 0 | int ipc_update_perm(struct ipc64_perm *in, struct kern_ipc_perm *out)
{
kuid_t uid = make_kuid(current_user_ns(), in->uid);
kgid_t gid = make_kgid(current_user_ns(), in->gid);
if (!uid_valid(uid) || !gid_valid(gid))
return -EINVAL;
out->uid = uid;
out->gid = gid;
out->mode = (out->mode & ~S_IRWXUGO)
| (in->mode & S_IRWXUGO);
return 0;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,677 | struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
struct ipc_ids *ids, int id, int cmd,
struct ipc64_perm *perm, int extra_perm)
{
struct kern_ipc_perm *ipcp;
ipcp = ipcctl_pre_down_nolock(ns, ids, id, cmd, perm, extra_perm);
if (IS_ERR(ipcp))
goto out;
spin_lock(&ipcp->lock);
out:
return ipcp;
}
| DoS | 0 | struct kern_ipc_perm *ipcctl_pre_down(struct ipc_namespace *ns,
struct ipc_ids *ids, int id, int cmd,
struct ipc64_perm *perm, int extra_perm)
{
struct kern_ipc_perm *ipcp;
ipcp = ipcctl_pre_down_nolock(ns, ids, id, cmd, perm, extra_perm);
if (IS_ERR(ipcp))
goto out;
spin_lock(&ipcp->lock);
out:
return ipcp;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,678 | struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
struct ipc_ids *ids, int id, int cmd,
struct ipc64_perm *perm, int extra_perm)
{
kuid_t euid;
int err = -EPERM;
struct kern_ipc_perm *ipcp;
down_write(&ids->rw_mutex);
rcu_read_lock();
ipcp = ipc_obtain_object_check(ids, id);
if (IS_ERR(ipcp)) {
err = PTR_ERR(ipcp);
goto out_up;
}
audit_ipc_obj(ipcp);
if (cmd == IPC_SET)
audit_ipc_set_perm(extra_perm, perm->uid,
perm->gid, perm->mode);
euid = current_euid();
if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid) ||
ns_capable(ns->user_ns, CAP_SYS_ADMIN))
return ipcp;
out_up:
/*
* Unsuccessful lookup, unlock and return
* the corresponding error.
*/
rcu_read_unlock();
up_write(&ids->rw_mutex);
return ERR_PTR(err);
}
| DoS | 0 | struct kern_ipc_perm *ipcctl_pre_down_nolock(struct ipc_namespace *ns,
struct ipc_ids *ids, int id, int cmd,
struct ipc64_perm *perm, int extra_perm)
{
kuid_t euid;
int err = -EPERM;
struct kern_ipc_perm *ipcp;
down_write(&ids->rw_mutex);
rcu_read_lock();
ipcp = ipc_obtain_object_check(ids, id);
if (IS_ERR(ipcp)) {
err = PTR_ERR(ipcp);
goto out_up;
}
audit_ipc_obj(ipcp);
if (cmd == IPC_SET)
audit_ipc_set_perm(extra_perm, perm->uid,
perm->gid, perm->mode);
euid = current_euid();
if (uid_eq(euid, ipcp->cuid) || uid_eq(euid, ipcp->uid) ||
ns_capable(ns->user_ns, CAP_SYS_ADMIN))
return ipcp;
out_up:
/*
* Unsuccessful lookup, unlock and return
* the corresponding error.
*/
rcu_read_unlock();
up_write(&ids->rw_mutex);
return ERR_PTR(err);
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,679 | int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
struct ipc_ops *ops, struct ipc_params *params)
{
if (params->key == IPC_PRIVATE)
return ipcget_new(ns, ids, ops, params);
else
return ipcget_public(ns, ids, ops, params);
}
| DoS | 0 | int ipcget(struct ipc_namespace *ns, struct ipc_ids *ids,
struct ipc_ops *ops, struct ipc_params *params)
{
if (params->key == IPC_PRIVATE)
return ipcget_new(ns, ids, ops, params);
else
return ipcget_public(ns, ids, ops, params);
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,680 | static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
struct ipc_ops *ops, struct ipc_params *params)
{
int err;
down_write(&ids->rw_mutex);
err = ops->getnew(ns, params);
up_write(&ids->rw_mutex);
return err;
}
| DoS | 0 | static int ipcget_new(struct ipc_namespace *ns, struct ipc_ids *ids,
struct ipc_ops *ops, struct ipc_params *params)
{
int err;
down_write(&ids->rw_mutex);
err = ops->getnew(ns, params);
up_write(&ids->rw_mutex);
return err;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,681 | static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
struct ipc_ops *ops, struct ipc_params *params)
{
struct kern_ipc_perm *ipcp;
int flg = params->flg;
int err;
/*
* Take the lock as a writer since we are potentially going to add
* a new entry + read locks are not "upgradable"
*/
down_write(&ids->rw_mutex);
ipcp = ipc_findkey(ids, params->key);
if (ipcp == NULL) {
/* key not used */
if (!(flg & IPC_CREAT))
err = -ENOENT;
else
err = ops->getnew(ns, params);
} else {
/* ipc object has been locked by ipc_findkey() */
if (flg & IPC_CREAT && flg & IPC_EXCL)
err = -EEXIST;
else {
err = 0;
if (ops->more_checks)
err = ops->more_checks(ipcp, params);
if (!err)
/*
* ipc_check_perms returns the IPC id on
* success
*/
err = ipc_check_perms(ns, ipcp, ops, params);
}
ipc_unlock(ipcp);
}
up_write(&ids->rw_mutex);
return err;
}
| DoS | 0 | static int ipcget_public(struct ipc_namespace *ns, struct ipc_ids *ids,
struct ipc_ops *ops, struct ipc_params *params)
{
struct kern_ipc_perm *ipcp;
int flg = params->flg;
int err;
/*
* Take the lock as a writer since we are potentially going to add
* a new entry + read locks are not "upgradable"
*/
down_write(&ids->rw_mutex);
ipcp = ipc_findkey(ids, params->key);
if (ipcp == NULL) {
/* key not used */
if (!(flg & IPC_CREAT))
err = -ENOENT;
else
err = ops->getnew(ns, params);
} else {
/* ipc object has been locked by ipc_findkey() */
if (flg & IPC_CREAT && flg & IPC_EXCL)
err = -EEXIST;
else {
err = 0;
if (ops->more_checks)
err = ops->more_checks(ipcp, params);
if (!err)
/*
* ipc_check_perms returns the IPC id on
* success
*/
err = ipc_check_perms(ns, ipcp, ops, params);
}
ipc_unlock(ipcp);
}
up_write(&ids->rw_mutex);
return err;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,682 | int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
{
kuid_t euid = current_euid();
int requested_mode, granted_mode;
audit_ipc_obj(ipcp);
requested_mode = (flag >> 6) | (flag >> 3) | flag;
granted_mode = ipcp->mode;
if (uid_eq(euid, ipcp->cuid) ||
uid_eq(euid, ipcp->uid))
granted_mode >>= 6;
else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
granted_mode >>= 3;
/* is there some bit set in requested_mode but not in granted_mode? */
if ((requested_mode & ~granted_mode & 0007) &&
!ns_capable(ns->user_ns, CAP_IPC_OWNER))
return -1;
return security_ipc_permission(ipcp, flag);
}
| DoS | 0 | int ipcperms(struct ipc_namespace *ns, struct kern_ipc_perm *ipcp, short flag)
{
kuid_t euid = current_euid();
int requested_mode, granted_mode;
audit_ipc_obj(ipcp);
requested_mode = (flag >> 6) | (flag >> 3) | flag;
granted_mode = ipcp->mode;
if (uid_eq(euid, ipcp->cuid) ||
uid_eq(euid, ipcp->uid))
granted_mode >>= 6;
else if (in_group_p(ipcp->cgid) || in_group_p(ipcp->gid))
granted_mode >>= 3;
/* is there some bit set in requested_mode but not in granted_mode? */
if ((requested_mode & ~granted_mode & 0007) &&
!ns_capable(ns->user_ns, CAP_IPC_OWNER))
return -1;
return security_ipc_permission(ipcp, flag);
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,683 | void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
{
out->key = in->key;
out->uid = from_kuid_munged(current_user_ns(), in->uid);
out->gid = from_kgid_munged(current_user_ns(), in->gid);
out->cuid = from_kuid_munged(current_user_ns(), in->cuid);
out->cgid = from_kgid_munged(current_user_ns(), in->cgid);
out->mode = in->mode;
out->seq = in->seq;
}
| DoS | 0 | void kernel_to_ipc64_perm (struct kern_ipc_perm *in, struct ipc64_perm *out)
{
out->key = in->key;
out->uid = from_kuid_munged(current_user_ns(), in->uid);
out->gid = from_kgid_munged(current_user_ns(), in->gid);
out->cuid = from_kuid_munged(current_user_ns(), in->cuid);
out->cgid = from_kgid_munged(current_user_ns(), in->cgid);
out->mode = in->mode;
out->seq = in->seq;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,684 | static inline int rcu_use_vmalloc(int size)
{
/* Too big for a single page? */
if (HDRLEN_KMALLOC + size > PAGE_SIZE)
return 1;
return 0;
}
| DoS | 0 | static inline int rcu_use_vmalloc(int size)
{
/* Too big for a single page? */
if (HDRLEN_KMALLOC + size > PAGE_SIZE)
return 1;
return 0;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,685 | static int sysvipc_proc_open(struct inode *inode, struct file *file)
{
int ret;
struct seq_file *seq;
struct ipc_proc_iter *iter;
ret = -ENOMEM;
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
goto out;
ret = seq_open(file, &sysvipc_proc_seqops);
if (ret)
goto out_kfree;
seq = file->private_data;
seq->private = iter;
iter->iface = PDE(inode)->data;
iter->ns = get_ipc_ns(current->nsproxy->ipc_ns);
out:
return ret;
out_kfree:
kfree(iter);
goto out;
}
| DoS | 0 | static int sysvipc_proc_open(struct inode *inode, struct file *file)
{
int ret;
struct seq_file *seq;
struct ipc_proc_iter *iter;
ret = -ENOMEM;
iter = kmalloc(sizeof(*iter), GFP_KERNEL);
if (!iter)
goto out;
ret = seq_open(file, &sysvipc_proc_seqops);
if (ret)
goto out_kfree;
seq = file->private_data;
seq->private = iter;
iter->iface = PDE(inode)->data;
iter->ns = get_ipc_ns(current->nsproxy->ipc_ns);
out:
return ret;
out_kfree:
kfree(iter);
goto out;
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,686 | static int sysvipc_proc_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct ipc_proc_iter *iter = seq->private;
put_ipc_ns(iter->ns);
return seq_release_private(inode, file);
}
| DoS | 0 | static int sysvipc_proc_release(struct inode *inode, struct file *file)
{
struct seq_file *seq = file->private_data;
struct ipc_proc_iter *iter = seq->private;
put_ipc_ns(iter->ns);
return seq_release_private(inode, file);
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,687 | static int sysvipc_proc_show(struct seq_file *s, void *it)
{
struct ipc_proc_iter *iter = s->private;
struct ipc_proc_iface *iface = iter->iface;
if (it == SEQ_START_TOKEN)
return seq_puts(s, iface->header);
return iface->show(s, it);
}
| DoS | 0 | static int sysvipc_proc_show(struct seq_file *s, void *it)
{
struct ipc_proc_iter *iter = s->private;
struct ipc_proc_iface *iface = iter->iface;
if (it == SEQ_START_TOKEN)
return seq_puts(s, iface->header);
return iface->show(s, it);
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,688 | static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
{
struct ipc_proc_iter *iter = s->private;
struct ipc_proc_iface *iface = iter->iface;
struct ipc_ids *ids;
ids = &iter->ns->ids[iface->ids];
/*
* Take the lock - this will be released by the corresponding
* call to stop().
*/
down_read(&ids->rw_mutex);
/* pos < 0 is invalid */
if (*pos < 0)
return NULL;
/* pos == 0 means header */
if (*pos == 0)
return SEQ_START_TOKEN;
/* Find the (pos-1)th ipc */
return sysvipc_find_ipc(ids, *pos - 1, pos);
}
| DoS | 0 | static void *sysvipc_proc_start(struct seq_file *s, loff_t *pos)
{
struct ipc_proc_iter *iter = s->private;
struct ipc_proc_iface *iface = iter->iface;
struct ipc_ids *ids;
ids = &iter->ns->ids[iface->ids];
/*
* Take the lock - this will be released by the corresponding
* call to stop().
*/
down_read(&ids->rw_mutex);
/* pos < 0 is invalid */
if (*pos < 0)
return NULL;
/* pos == 0 means header */
if (*pos == 0)
return SEQ_START_TOKEN;
/* Find the (pos-1)th ipc */
return sysvipc_find_ipc(ids, *pos - 1, pos);
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,689 | static void sysvipc_proc_stop(struct seq_file *s, void *it)
{
struct kern_ipc_perm *ipc = it;
struct ipc_proc_iter *iter = s->private;
struct ipc_proc_iface *iface = iter->iface;
struct ipc_ids *ids;
/* If we had a locked structure, release it */
if (ipc && ipc != SEQ_START_TOKEN)
ipc_unlock(ipc);
ids = &iter->ns->ids[iface->ids];
/* Release the lock we took in start() */
up_read(&ids->rw_mutex);
}
| DoS | 0 | static void sysvipc_proc_stop(struct seq_file *s, void *it)
{
struct kern_ipc_perm *ipc = it;
struct ipc_proc_iter *iter = s->private;
struct ipc_proc_iface *iface = iter->iface;
struct ipc_ids *ids;
/* If we had a locked structure, release it */
if (ipc && ipc != SEQ_START_TOKEN)
ipc_unlock(ipc);
ids = &iter->ns->ids[iface->ids];
/* Release the lock we took in start() */
up_read(&ids->rw_mutex);
}
| @@ -439,9 +439,9 @@ void ipc_rmid(struct ipc_ids *ids, struct kern_ipc_perm *ipcp)
* NULL is returned if the allocation fails
*/
-void* ipc_alloc(int size)
+void *ipc_alloc(int size)
{
- void* out;
+ void *out;
if(size > PAGE_SIZE)
out = vmalloc(size);
else
@@ -478,7 +478,7 @@ void ipc_free(void* ptr, int size)
*/
struct ipc_rcu_hdr
{
- int refcount;
+ atomic_t refcount;
int is_vmalloc;
void *data[0];
};
@@ -516,39 +516,41 @@ static inline int rcu_use_vmalloc(int size)
* @size: size desired
*
* Allocate memory for the rcu header structure + the object.
- * Returns the pointer to the object.
- * NULL is returned if the allocation fails.
+ * Returns the pointer to the object or NULL upon failure.
*/
-
-void* ipc_rcu_alloc(int size)
+void *ipc_rcu_alloc(int size)
{
- void* out;
- /*
+ void *out;
+
+ /*
* We prepend the allocation with the rcu struct, and
- * workqueue if necessary (for vmalloc).
+ * workqueue if necessary (for vmalloc).
*/
if (rcu_use_vmalloc(size)) {
out = vmalloc(HDRLEN_VMALLOC + size);
- if (out) {
- out += HDRLEN_VMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_VMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 1;
} else {
out = kmalloc(HDRLEN_KMALLOC + size, GFP_KERNEL);
- if (out) {
- out += HDRLEN_KMALLOC;
- container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
- container_of(out, struct ipc_rcu_hdr, data)->refcount = 1;
- }
+ if (!out)
+ goto done;
+
+ out += HDRLEN_KMALLOC;
+ container_of(out, struct ipc_rcu_hdr, data)->is_vmalloc = 0;
}
+ /* set reference counter no matter what kind of allocation was done */
+ atomic_set(&container_of(out, struct ipc_rcu_hdr, data)->refcount, 1);
+done:
return out;
}
-void ipc_rcu_getref(void *ptr)
+int ipc_rcu_getref(void *ptr)
{
- container_of(ptr, struct ipc_rcu_hdr, data)->refcount++;
+ return atomic_inc_not_zero(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount);
}
static void ipc_do_vfree(struct work_struct *work)
@@ -578,7 +580,7 @@ static void ipc_schedule_free(struct rcu_head *head)
void ipc_rcu_putref(void *ptr)
{
- if (--container_of(ptr, struct ipc_rcu_hdr, data)->refcount > 0)
+ if (!atomic_dec_and_test(&container_of(ptr, struct ipc_rcu_hdr, data)->refcount))
return;
if (container_of(ptr, struct ipc_rcu_hdr, data)->is_vmalloc) { | CWE-189 | null | null |
23,690 | static int __ip_append_data(struct sock *sk,
struct flowi4 *fl4,
struct sk_buff_head *queue,
struct inet_cork *cork,
struct page_frag *pfrag,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
struct ip_options *opt = cork->opt;
int hh_len;
int exthdrlen;
int mtu;
int copy;
int err;
int offset = 0;
unsigned int maxfraglen, fragheaderlen;
int csummode = CHECKSUM_NONE;
struct rtable *rt = (struct rtable *)cork->dst;
skb = skb_peek_tail(queue);
exthdrlen = !skb ? rt->dst.header_len : 0;
mtu = cork->fragsize;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
if (cork->length + length > 0xFFFF - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
mtu-exthdrlen);
return -EMSGSIZE;
}
/*
* transhdrlen > 0 means that this is the first fragment and we wish
* it won't be fragmented in the future.
*/
if (transhdrlen &&
length + fragheaderlen <= mtu &&
rt->dst.dev->features & NETIF_F_V4_CSUM &&
!exthdrlen)
csummode = CHECKSUM_PARTIAL;
cork->length += length;
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, transhdrlen,
maxfraglen, flags);
if (err)
goto error;
return 0;
}
/* So, what's going on in the loop below?
*
* We use calculated fragment length to generate chained skb,
* each of segments is IP fragment ready for sending to network after
* adding appropriate IP header.
*/
if (!skb)
goto alloc_new_skb;
while (length > 0) {
/* Check if the remaining data fits into current packet. */
copy = mtu - skb->len;
if (copy < length)
copy = maxfraglen - skb->len;
if (copy <= 0) {
char *data;
unsigned int datalen;
unsigned int fraglen;
unsigned int fraggap;
unsigned int alloclen;
struct sk_buff *skb_prev;
alloc_new_skb:
skb_prev = skb;
if (skb_prev)
fraggap = skb_prev->len - maxfraglen;
else
fraggap = 0;
/*
* If remaining data exceeds the mtu,
* we know we need more fragment(s).
*/
datalen = length + fraggap;
if (datalen > mtu - fragheaderlen)
datalen = maxfraglen - fragheaderlen;
fraglen = datalen + fragheaderlen;
if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
else
alloclen = fraglen;
alloclen += exthdrlen;
/* The last fragment gets additional space at tail.
* Note, with MSG_MORE we overallocate on fragments,
* because we have no idea what fragment will be
* the last.
*/
if (datalen == length + fraggap)
alloclen += rt->dst.trailer_len;
if (transhdrlen) {
skb = sock_alloc_send_skb(sk,
alloclen + hh_len + 15,
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
if (atomic_read(&sk->sk_wmem_alloc) <=
2 * sk->sk_sndbuf)
skb = sock_wmalloc(sk,
alloclen + hh_len + 15, 1,
sk->sk_allocation);
if (unlikely(skb == NULL))
err = -ENOBUFS;
else
/* only the initial fragment is
time stamped */
cork->tx_flags = 0;
}
if (skb == NULL)
goto error;
/*
* Fill in the control structures
*/
skb->ip_summed = csummode;
skb->csum = 0;
skb_reserve(skb, hh_len);
skb_shinfo(skb)->tx_flags = cork->tx_flags;
/*
* Find where to start putting bytes.
*/
data = skb_put(skb, fraglen + exthdrlen);
skb_set_network_header(skb, exthdrlen);
skb->transport_header = (skb->network_header +
fragheaderlen);
data += fragheaderlen + exthdrlen;
if (fraggap) {
skb->csum = skb_copy_and_csum_bits(
skb_prev, maxfraglen,
data + transhdrlen, fraggap, 0);
skb_prev->csum = csum_sub(skb_prev->csum,
skb->csum);
data += fraggap;
pskb_trim_unique(skb_prev, maxfraglen);
}
copy = datalen - transhdrlen - fraggap;
if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
err = -EFAULT;
kfree_skb(skb);
goto error;
}
offset += copy;
length -= datalen - fraggap;
transhdrlen = 0;
exthdrlen = 0;
csummode = CHECKSUM_NONE;
/*
* Put the packet on the pending queue.
*/
__skb_queue_tail(queue, skb);
continue;
}
if (copy > length)
copy = length;
if (!(rt->dst.dev->features&NETIF_F_SG)) {
unsigned int off;
off = skb->len;
if (getfrag(from, skb_put(skb, copy),
offset, copy, off, skb) < 0) {
__skb_trim(skb, off);
err = -EFAULT;
goto error;
}
} else {
int i = skb_shinfo(skb)->nr_frags;
err = -ENOMEM;
if (!sk_page_frag_refill(sk, pfrag))
goto error;
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
err = -EMSGSIZE;
if (i == MAX_SKB_FRAGS)
goto error;
__skb_fill_page_desc(skb, i, pfrag->page,
pfrag->offset, 0);
skb_shinfo(skb)->nr_frags = ++i;
get_page(pfrag->page);
}
copy = min_t(int, copy, pfrag->size - pfrag->offset);
if (getfrag(from,
page_address(pfrag->page) + pfrag->offset,
offset, copy, skb->len, skb) < 0)
goto error_efault;
pfrag->offset += copy;
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
atomic_add(copy, &sk->sk_wmem_alloc);
}
offset += copy;
length -= copy;
}
return 0;
error_efault:
err = -EFAULT;
error:
cork->length -= length;
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
return err;
}
| DoS +Priv Mem. Corr. | 0 | static int __ip_append_data(struct sock *sk,
struct flowi4 *fl4,
struct sk_buff_head *queue,
struct inet_cork *cork,
struct page_frag *pfrag,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
unsigned int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
struct ip_options *opt = cork->opt;
int hh_len;
int exthdrlen;
int mtu;
int copy;
int err;
int offset = 0;
unsigned int maxfraglen, fragheaderlen;
int csummode = CHECKSUM_NONE;
struct rtable *rt = (struct rtable *)cork->dst;
skb = skb_peek_tail(queue);
exthdrlen = !skb ? rt->dst.header_len : 0;
mtu = cork->fragsize;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
if (cork->length + length > 0xFFFF - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
mtu-exthdrlen);
return -EMSGSIZE;
}
/*
* transhdrlen > 0 means that this is the first fragment and we wish
* it won't be fragmented in the future.
*/
if (transhdrlen &&
length + fragheaderlen <= mtu &&
rt->dst.dev->features & NETIF_F_V4_CSUM &&
!exthdrlen)
csummode = CHECKSUM_PARTIAL;
cork->length += length;
if (((length > mtu) || (skb && skb_is_gso(skb))) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO) && !rt->dst.header_len) {
err = ip_ufo_append_data(sk, queue, getfrag, from, length,
hh_len, fragheaderlen, transhdrlen,
maxfraglen, flags);
if (err)
goto error;
return 0;
}
/* So, what's going on in the loop below?
*
* We use calculated fragment length to generate chained skb,
* each of segments is IP fragment ready for sending to network after
* adding appropriate IP header.
*/
if (!skb)
goto alloc_new_skb;
while (length > 0) {
/* Check if the remaining data fits into current packet. */
copy = mtu - skb->len;
if (copy < length)
copy = maxfraglen - skb->len;
if (copy <= 0) {
char *data;
unsigned int datalen;
unsigned int fraglen;
unsigned int fraggap;
unsigned int alloclen;
struct sk_buff *skb_prev;
alloc_new_skb:
skb_prev = skb;
if (skb_prev)
fraggap = skb_prev->len - maxfraglen;
else
fraggap = 0;
/*
* If remaining data exceeds the mtu,
* we know we need more fragment(s).
*/
datalen = length + fraggap;
if (datalen > mtu - fragheaderlen)
datalen = maxfraglen - fragheaderlen;
fraglen = datalen + fragheaderlen;
if ((flags & MSG_MORE) &&
!(rt->dst.dev->features&NETIF_F_SG))
alloclen = mtu;
else
alloclen = fraglen;
alloclen += exthdrlen;
/* The last fragment gets additional space at tail.
* Note, with MSG_MORE we overallocate on fragments,
* because we have no idea what fragment will be
* the last.
*/
if (datalen == length + fraggap)
alloclen += rt->dst.trailer_len;
if (transhdrlen) {
skb = sock_alloc_send_skb(sk,
alloclen + hh_len + 15,
(flags & MSG_DONTWAIT), &err);
} else {
skb = NULL;
if (atomic_read(&sk->sk_wmem_alloc) <=
2 * sk->sk_sndbuf)
skb = sock_wmalloc(sk,
alloclen + hh_len + 15, 1,
sk->sk_allocation);
if (unlikely(skb == NULL))
err = -ENOBUFS;
else
/* only the initial fragment is
time stamped */
cork->tx_flags = 0;
}
if (skb == NULL)
goto error;
/*
* Fill in the control structures
*/
skb->ip_summed = csummode;
skb->csum = 0;
skb_reserve(skb, hh_len);
skb_shinfo(skb)->tx_flags = cork->tx_flags;
/*
* Find where to start putting bytes.
*/
data = skb_put(skb, fraglen + exthdrlen);
skb_set_network_header(skb, exthdrlen);
skb->transport_header = (skb->network_header +
fragheaderlen);
data += fragheaderlen + exthdrlen;
if (fraggap) {
skb->csum = skb_copy_and_csum_bits(
skb_prev, maxfraglen,
data + transhdrlen, fraggap, 0);
skb_prev->csum = csum_sub(skb_prev->csum,
skb->csum);
data += fraggap;
pskb_trim_unique(skb_prev, maxfraglen);
}
copy = datalen - transhdrlen - fraggap;
if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
err = -EFAULT;
kfree_skb(skb);
goto error;
}
offset += copy;
length -= datalen - fraggap;
transhdrlen = 0;
exthdrlen = 0;
csummode = CHECKSUM_NONE;
/*
* Put the packet on the pending queue.
*/
__skb_queue_tail(queue, skb);
continue;
}
if (copy > length)
copy = length;
if (!(rt->dst.dev->features&NETIF_F_SG)) {
unsigned int off;
off = skb->len;
if (getfrag(from, skb_put(skb, copy),
offset, copy, off, skb) < 0) {
__skb_trim(skb, off);
err = -EFAULT;
goto error;
}
} else {
int i = skb_shinfo(skb)->nr_frags;
err = -ENOMEM;
if (!sk_page_frag_refill(sk, pfrag))
goto error;
if (!skb_can_coalesce(skb, i, pfrag->page,
pfrag->offset)) {
err = -EMSGSIZE;
if (i == MAX_SKB_FRAGS)
goto error;
__skb_fill_page_desc(skb, i, pfrag->page,
pfrag->offset, 0);
skb_shinfo(skb)->nr_frags = ++i;
get_page(pfrag->page);
}
copy = min_t(int, copy, pfrag->size - pfrag->offset);
if (getfrag(from,
page_address(pfrag->page) + pfrag->offset,
offset, copy, skb->len, skb) < 0)
goto error_efault;
pfrag->offset += copy;
skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
skb->len += copy;
skb->data_len += copy;
skb->truesize += copy;
atomic_add(copy, &sk->sk_wmem_alloc);
}
offset += copy;
length -= copy;
}
return 0;
error_efault:
err = -EFAULT;
error:
cork->length -= length;
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
return err;
}
| @@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
- skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- /* specify the length of each IP datagram fragment */
- skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
__skb_queue_tail(queue, skb);
+ } else if (skb_is_gso(skb)) {
+ goto append;
}
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ /* specify the length of each IP datagram fragment */
+ skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
+append:
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
} | CWE-264 | null | null |
23,691 | struct sk_buff *__ip_make_skb(struct sock *sk,
struct flowi4 *fl4,
struct sk_buff_head *queue,
struct inet_cork *cork)
{
struct sk_buff *skb, *tmp_skb;
struct sk_buff **tail_skb;
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
struct ip_options *opt = NULL;
struct rtable *rt = (struct rtable *)cork->dst;
struct iphdr *iph;
__be16 df = 0;
__u8 ttl;
if ((skb = __skb_dequeue(queue)) == NULL)
goto out;
tail_skb = &(skb_shinfo(skb)->frag_list);
/* move skb->data to ip header from ext header */
if (skb->data < skb_network_header(skb))
__skb_pull(skb, skb_network_offset(skb));
while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
__skb_pull(tmp_skb, skb_network_header_len(skb));
*tail_skb = tmp_skb;
tail_skb = &(tmp_skb->next);
skb->len += tmp_skb->len;
skb->data_len += tmp_skb->len;
skb->truesize += tmp_skb->truesize;
tmp_skb->destructor = NULL;
tmp_skb->sk = NULL;
}
/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
* to fragment the frame generated here. No matter, what transforms
* how transforms change size of the packet, it will come out.
*/
if (inet->pmtudisc < IP_PMTUDISC_DO)
skb->local_df = 1;
/* DF bit is set when we want to see DF on outgoing frames.
* If local_df is set too, we still allow to fragment this frame
* locally. */
if (inet->pmtudisc >= IP_PMTUDISC_DO ||
(skb->len <= dst_mtu(&rt->dst) &&
ip_dont_fragment(sk, &rt->dst)))
df = htons(IP_DF);
if (cork->flags & IPCORK_OPT)
opt = cork->opt;
if (rt->rt_type == RTN_MULTICAST)
ttl = inet->mc_ttl;
else
ttl = ip_select_ttl(inet, &rt->dst);
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = 5;
iph->tos = inet->tos;
iph->frag_off = df;
iph->ttl = ttl;
iph->protocol = sk->sk_protocol;
ip_copy_addrs(iph, fl4);
ip_select_ident(skb, &rt->dst, sk);
if (opt) {
iph->ihl += opt->optlen>>2;
ip_options_build(skb, opt, cork->addr, rt, 0);
}
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
/*
* Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
* on dst refcount
*/
cork->dst = NULL;
skb_dst_set(skb, &rt->dst);
if (iph->protocol == IPPROTO_ICMP)
icmp_out_count(net, ((struct icmphdr *)
skb_transport_header(skb))->type);
ip_cork_release(cork);
out:
return skb;
}
| DoS +Priv Mem. Corr. | 0 | struct sk_buff *__ip_make_skb(struct sock *sk,
struct flowi4 *fl4,
struct sk_buff_head *queue,
struct inet_cork *cork)
{
struct sk_buff *skb, *tmp_skb;
struct sk_buff **tail_skb;
struct inet_sock *inet = inet_sk(sk);
struct net *net = sock_net(sk);
struct ip_options *opt = NULL;
struct rtable *rt = (struct rtable *)cork->dst;
struct iphdr *iph;
__be16 df = 0;
__u8 ttl;
if ((skb = __skb_dequeue(queue)) == NULL)
goto out;
tail_skb = &(skb_shinfo(skb)->frag_list);
/* move skb->data to ip header from ext header */
if (skb->data < skb_network_header(skb))
__skb_pull(skb, skb_network_offset(skb));
while ((tmp_skb = __skb_dequeue(queue)) != NULL) {
__skb_pull(tmp_skb, skb_network_header_len(skb));
*tail_skb = tmp_skb;
tail_skb = &(tmp_skb->next);
skb->len += tmp_skb->len;
skb->data_len += tmp_skb->len;
skb->truesize += tmp_skb->truesize;
tmp_skb->destructor = NULL;
tmp_skb->sk = NULL;
}
/* Unless user demanded real pmtu discovery (IP_PMTUDISC_DO), we allow
* to fragment the frame generated here. No matter, what transforms
* how transforms change size of the packet, it will come out.
*/
if (inet->pmtudisc < IP_PMTUDISC_DO)
skb->local_df = 1;
/* DF bit is set when we want to see DF on outgoing frames.
* If local_df is set too, we still allow to fragment this frame
* locally. */
if (inet->pmtudisc >= IP_PMTUDISC_DO ||
(skb->len <= dst_mtu(&rt->dst) &&
ip_dont_fragment(sk, &rt->dst)))
df = htons(IP_DF);
if (cork->flags & IPCORK_OPT)
opt = cork->opt;
if (rt->rt_type == RTN_MULTICAST)
ttl = inet->mc_ttl;
else
ttl = ip_select_ttl(inet, &rt->dst);
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = 5;
iph->tos = inet->tos;
iph->frag_off = df;
iph->ttl = ttl;
iph->protocol = sk->sk_protocol;
ip_copy_addrs(iph, fl4);
ip_select_ident(skb, &rt->dst, sk);
if (opt) {
iph->ihl += opt->optlen>>2;
ip_options_build(skb, opt, cork->addr, rt, 0);
}
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
/*
* Steal rt from cork.dst to avoid a pair of atomic_inc/atomic_dec
* on dst refcount
*/
cork->dst = NULL;
skb_dst_set(skb, &rt->dst);
if (iph->protocol == IPPROTO_ICMP)
icmp_out_count(net, ((struct icmphdr *)
skb_transport_header(skb))->type);
ip_cork_release(cork);
out:
return skb;
}
| @@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
- skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- /* specify the length of each IP datagram fragment */
- skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
__skb_queue_tail(queue, skb);
+ } else if (skb_is_gso(skb)) {
+ goto append;
}
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ /* specify the length of each IP datagram fragment */
+ skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
+append:
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
} | CWE-264 | null | null |
23,692 | ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
int offset, size_t size, int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
struct rtable *rt;
struct ip_options *opt = NULL;
struct inet_cork *cork;
int hh_len;
int mtu;
int len;
int err;
unsigned int maxfraglen, fragheaderlen, fraggap;
if (inet->hdrincl)
return -EPERM;
if (flags&MSG_PROBE)
return 0;
if (skb_queue_empty(&sk->sk_write_queue))
return -EINVAL;
cork = &inet->cork.base;
rt = (struct rtable *)cork->dst;
if (cork->flags & IPCORK_OPT)
opt = cork->opt;
if (!(rt->dst.dev->features&NETIF_F_SG))
return -EOPNOTSUPP;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
mtu = cork->fragsize;
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
if (cork->length + size > 0xFFFF - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
return -EMSGSIZE;
}
if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
return -EINVAL;
cork->length += size;
if ((size + skb->len > mtu) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO)) {
skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
}
while (size > 0) {
int i;
if (skb_is_gso(skb))
len = size;
else {
/* Check if the remaining data fits into current packet. */
len = mtu - skb->len;
if (len < size)
len = maxfraglen - skb->len;
}
if (len <= 0) {
struct sk_buff *skb_prev;
int alloclen;
skb_prev = skb;
fraggap = skb_prev->len - maxfraglen;
alloclen = fragheaderlen + hh_len + fraggap + 15;
skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
if (unlikely(!skb)) {
err = -ENOBUFS;
goto error;
}
/*
* Fill in the control structures
*/
skb->ip_summed = CHECKSUM_NONE;
skb->csum = 0;
skb_reserve(skb, hh_len);
/*
* Find where to start putting bytes.
*/
skb_put(skb, fragheaderlen + fraggap);
skb_reset_network_header(skb);
skb->transport_header = (skb->network_header +
fragheaderlen);
if (fraggap) {
skb->csum = skb_copy_and_csum_bits(skb_prev,
maxfraglen,
skb_transport_header(skb),
fraggap, 0);
skb_prev->csum = csum_sub(skb_prev->csum,
skb->csum);
pskb_trim_unique(skb_prev, maxfraglen);
}
/*
* Put the packet on the pending queue.
*/
__skb_queue_tail(&sk->sk_write_queue, skb);
continue;
}
i = skb_shinfo(skb)->nr_frags;
if (len > size)
len = size;
if (skb_can_coalesce(skb, i, page, offset)) {
skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len);
} else if (i < MAX_SKB_FRAGS) {
get_page(page);
skb_fill_page_desc(skb, i, page, offset, len);
} else {
err = -EMSGSIZE;
goto error;
}
if (skb->ip_summed == CHECKSUM_NONE) {
__wsum csum;
csum = csum_page(page, offset, len);
skb->csum = csum_block_add(skb->csum, csum, skb->len);
}
skb->len += len;
skb->data_len += len;
skb->truesize += len;
atomic_add(len, &sk->sk_wmem_alloc);
offset += len;
size -= len;
}
return 0;
error:
cork->length -= size;
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
return err;
}
| DoS +Priv Mem. Corr. | 0 | ssize_t ip_append_page(struct sock *sk, struct flowi4 *fl4, struct page *page,
int offset, size_t size, int flags)
{
struct inet_sock *inet = inet_sk(sk);
struct sk_buff *skb;
struct rtable *rt;
struct ip_options *opt = NULL;
struct inet_cork *cork;
int hh_len;
int mtu;
int len;
int err;
unsigned int maxfraglen, fragheaderlen, fraggap;
if (inet->hdrincl)
return -EPERM;
if (flags&MSG_PROBE)
return 0;
if (skb_queue_empty(&sk->sk_write_queue))
return -EINVAL;
cork = &inet->cork.base;
rt = (struct rtable *)cork->dst;
if (cork->flags & IPCORK_OPT)
opt = cork->opt;
if (!(rt->dst.dev->features&NETIF_F_SG))
return -EOPNOTSUPP;
hh_len = LL_RESERVED_SPACE(rt->dst.dev);
mtu = cork->fragsize;
fragheaderlen = sizeof(struct iphdr) + (opt ? opt->optlen : 0);
maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen;
if (cork->length + size > 0xFFFF - fragheaderlen) {
ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport, mtu);
return -EMSGSIZE;
}
if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
return -EINVAL;
cork->length += size;
if ((size + skb->len > mtu) &&
(sk->sk_protocol == IPPROTO_UDP) &&
(rt->dst.dev->features & NETIF_F_UFO)) {
skb_shinfo(skb)->gso_size = mtu - fragheaderlen;
skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
}
while (size > 0) {
int i;
if (skb_is_gso(skb))
len = size;
else {
/* Check if the remaining data fits into current packet. */
len = mtu - skb->len;
if (len < size)
len = maxfraglen - skb->len;
}
if (len <= 0) {
struct sk_buff *skb_prev;
int alloclen;
skb_prev = skb;
fraggap = skb_prev->len - maxfraglen;
alloclen = fragheaderlen + hh_len + fraggap + 15;
skb = sock_wmalloc(sk, alloclen, 1, sk->sk_allocation);
if (unlikely(!skb)) {
err = -ENOBUFS;
goto error;
}
/*
* Fill in the control structures
*/
skb->ip_summed = CHECKSUM_NONE;
skb->csum = 0;
skb_reserve(skb, hh_len);
/*
* Find where to start putting bytes.
*/
skb_put(skb, fragheaderlen + fraggap);
skb_reset_network_header(skb);
skb->transport_header = (skb->network_header +
fragheaderlen);
if (fraggap) {
skb->csum = skb_copy_and_csum_bits(skb_prev,
maxfraglen,
skb_transport_header(skb),
fraggap, 0);
skb_prev->csum = csum_sub(skb_prev->csum,
skb->csum);
pskb_trim_unique(skb_prev, maxfraglen);
}
/*
* Put the packet on the pending queue.
*/
__skb_queue_tail(&sk->sk_write_queue, skb);
continue;
}
i = skb_shinfo(skb)->nr_frags;
if (len > size)
len = size;
if (skb_can_coalesce(skb, i, page, offset)) {
skb_frag_size_add(&skb_shinfo(skb)->frags[i-1], len);
} else if (i < MAX_SKB_FRAGS) {
get_page(page);
skb_fill_page_desc(skb, i, page, offset, len);
} else {
err = -EMSGSIZE;
goto error;
}
if (skb->ip_summed == CHECKSUM_NONE) {
__wsum csum;
csum = csum_page(page, offset, len);
skb->csum = csum_block_add(skb->csum, csum, skb->len);
}
skb->len += len;
skb->data_len += len;
skb->truesize += len;
atomic_add(len, &sk->sk_wmem_alloc);
offset += len;
size -= len;
}
return 0;
error:
cork->length -= size;
IP_INC_STATS(sock_net(sk), IPSTATS_MIB_OUTDISCARDS);
return err;
}
| @@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
- skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- /* specify the length of each IP datagram fragment */
- skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
__skb_queue_tail(queue, skb);
+ } else if (skb_is_gso(skb)) {
+ goto append;
}
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ /* specify the length of each IP datagram fragment */
+ skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
+append:
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
} | CWE-264 | null | null |
23,693 | int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
__be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
{
struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = skb_rtable(skb);
struct iphdr *iph;
/* Build the IP header. */
skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
skb_reset_network_header(skb);
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = 5;
iph->tos = inet->tos;
if (ip_dont_fragment(sk, &rt->dst))
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
iph->ttl = ip_select_ttl(inet, &rt->dst);
iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
iph->saddr = saddr;
iph->protocol = sk->sk_protocol;
ip_select_ident(skb, &rt->dst, sk);
if (opt && opt->opt.optlen) {
iph->ihl += opt->opt.optlen>>2;
ip_options_build(skb, &opt->opt, daddr, rt, 0);
}
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
/* Send it out. */
return ip_local_out(skb);
}
| DoS +Priv Mem. Corr. | 0 | int ip_build_and_send_pkt(struct sk_buff *skb, struct sock *sk,
__be32 saddr, __be32 daddr, struct ip_options_rcu *opt)
{
struct inet_sock *inet = inet_sk(sk);
struct rtable *rt = skb_rtable(skb);
struct iphdr *iph;
/* Build the IP header. */
skb_push(skb, sizeof(struct iphdr) + (opt ? opt->opt.optlen : 0));
skb_reset_network_header(skb);
iph = ip_hdr(skb);
iph->version = 4;
iph->ihl = 5;
iph->tos = inet->tos;
if (ip_dont_fragment(sk, &rt->dst))
iph->frag_off = htons(IP_DF);
else
iph->frag_off = 0;
iph->ttl = ip_select_ttl(inet, &rt->dst);
iph->daddr = (opt && opt->opt.srr ? opt->opt.faddr : daddr);
iph->saddr = saddr;
iph->protocol = sk->sk_protocol;
ip_select_ident(skb, &rt->dst, sk);
if (opt && opt->opt.optlen) {
iph->ihl += opt->opt.optlen>>2;
ip_options_build(skb, &opt->opt, daddr, rt, 0);
}
skb->priority = sk->sk_priority;
skb->mark = sk->sk_mark;
/* Send it out. */
return ip_local_out(skb);
}
| @@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
- skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- /* specify the length of each IP datagram fragment */
- skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
__skb_queue_tail(queue, skb);
+ } else if (skb_is_gso(skb)) {
+ goto append;
}
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ /* specify the length of each IP datagram fragment */
+ skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
+append:
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
} | CWE-264 | null | null |
23,694 | static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
{
BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
memcpy(&iph->saddr, &fl4->saddr,
sizeof(fl4->saddr) + sizeof(fl4->daddr));
}
| DoS +Priv Mem. Corr. | 0 | static void ip_copy_addrs(struct iphdr *iph, const struct flowi4 *fl4)
{
BUILD_BUG_ON(offsetof(typeof(*fl4), daddr) !=
offsetof(typeof(*fl4), saddr) + sizeof(fl4->saddr));
memcpy(&iph->saddr, &fl4->saddr,
sizeof(fl4->saddr) + sizeof(fl4->daddr));
}
| @@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
- skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- /* specify the length of each IP datagram fragment */
- skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
__skb_queue_tail(queue, skb);
+ } else if (skb_is_gso(skb)) {
+ goto append;
}
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ /* specify the length of each IP datagram fragment */
+ skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
+append:
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
} | CWE-264 | null | null |
23,695 | static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
{
to->pkt_type = from->pkt_type;
to->priority = from->priority;
to->protocol = from->protocol;
skb_dst_drop(to);
skb_dst_copy(to, from);
to->dev = from->dev;
to->mark = from->mark;
/* Copy the flags to each fragment. */
IPCB(to)->flags = IPCB(from)->flags;
#ifdef CONFIG_NET_SCHED
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
to->nf_trace = from->nf_trace;
#endif
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
to->ipvs_property = from->ipvs_property;
#endif
skb_copy_secmark(to, from);
}
| DoS +Priv Mem. Corr. | 0 | static void ip_copy_metadata(struct sk_buff *to, struct sk_buff *from)
{
to->pkt_type = from->pkt_type;
to->priority = from->priority;
to->protocol = from->protocol;
skb_dst_drop(to);
skb_dst_copy(to, from);
to->dev = from->dev;
to->mark = from->mark;
/* Copy the flags to each fragment. */
IPCB(to)->flags = IPCB(from)->flags;
#ifdef CONFIG_NET_SCHED
to->tc_index = from->tc_index;
#endif
nf_copy(to, from);
#if IS_ENABLED(CONFIG_NETFILTER_XT_TARGET_TRACE)
to->nf_trace = from->nf_trace;
#endif
#if defined(CONFIG_IP_VS) || defined(CONFIG_IP_VS_MODULE)
to->ipvs_property = from->ipvs_property;
#endif
skb_copy_secmark(to, from);
}
| @@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
- skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- /* specify the length of each IP datagram fragment */
- skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
__skb_queue_tail(queue, skb);
+ } else if (skb_is_gso(skb)) {
+ goto append;
}
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ /* specify the length of each IP datagram fragment */
+ skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
+append:
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
} | CWE-264 | null | null |
23,696 | static inline int ip_finish_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct rtable *rt = (struct rtable *)dst;
struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
u32 nexthop;
if (rt->rt_type == RTN_MULTICAST) {
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
} else if (rt->rt_type == RTN_BROADCAST)
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
/* Be paranoid, rather than too clever. */
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
struct sk_buff *skb2;
skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
if (skb2 == NULL) {
kfree_skb(skb);
return -ENOMEM;
}
if (skb->sk)
skb_set_owner_w(skb2, skb->sk);
consume_skb(skb);
skb = skb2;
}
rcu_read_lock_bh();
nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr);
neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
if (!IS_ERR(neigh)) {
int res = dst_neigh_output(dst, neigh, skb);
rcu_read_unlock_bh();
return res;
}
rcu_read_unlock_bh();
net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
__func__);
kfree_skb(skb);
return -EINVAL;
}
| DoS +Priv Mem. Corr. | 0 | static inline int ip_finish_output2(struct sk_buff *skb)
{
struct dst_entry *dst = skb_dst(skb);
struct rtable *rt = (struct rtable *)dst;
struct net_device *dev = dst->dev;
unsigned int hh_len = LL_RESERVED_SPACE(dev);
struct neighbour *neigh;
u32 nexthop;
if (rt->rt_type == RTN_MULTICAST) {
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTMCAST, skb->len);
} else if (rt->rt_type == RTN_BROADCAST)
IP_UPD_PO_STATS(dev_net(dev), IPSTATS_MIB_OUTBCAST, skb->len);
/* Be paranoid, rather than too clever. */
if (unlikely(skb_headroom(skb) < hh_len && dev->header_ops)) {
struct sk_buff *skb2;
skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
if (skb2 == NULL) {
kfree_skb(skb);
return -ENOMEM;
}
if (skb->sk)
skb_set_owner_w(skb2, skb->sk);
consume_skb(skb);
skb = skb2;
}
rcu_read_lock_bh();
nexthop = (__force u32) rt_nexthop(rt, ip_hdr(skb)->daddr);
neigh = __ipv4_neigh_lookup_noref(dev, nexthop);
if (unlikely(!neigh))
neigh = __neigh_create(&arp_tbl, &nexthop, dev, false);
if (!IS_ERR(neigh)) {
int res = dst_neigh_output(dst, neigh, skb);
rcu_read_unlock_bh();
return res;
}
rcu_read_unlock_bh();
net_dbg_ratelimited("%s: No header cache and no neighbour!\n",
__func__);
kfree_skb(skb);
return -EINVAL;
}
| @@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
- skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- /* specify the length of each IP datagram fragment */
- skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
__skb_queue_tail(queue, skb);
+ } else if (skb_is_gso(skb)) {
+ goto append;
}
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ /* specify the length of each IP datagram fragment */
+ skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
+append:
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
} | CWE-264 | null | null |
23,697 | void ip_flush_pending_frames(struct sock *sk)
{
__ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
}
| DoS +Priv Mem. Corr. | 0 | void ip_flush_pending_frames(struct sock *sk)
{
__ip_flush_pending_frames(sk, &sk->sk_write_queue, &inet_sk(sk)->cork.base);
}
| @@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
- skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- /* specify the length of each IP datagram fragment */
- skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
__skb_queue_tail(queue, skb);
+ } else if (skb_is_gso(skb)) {
+ goto append;
}
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ /* specify the length of each IP datagram fragment */
+ skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
+append:
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
} | CWE-264 | null | null |
23,698 | int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
struct iphdr *iph;
int ptr;
struct net_device *dev;
struct sk_buff *skb2;
unsigned int mtu, hlen, left, len, ll_rs;
int offset;
__be16 not_last_frag;
struct rtable *rt = skb_rtable(skb);
int err = 0;
dev = rt->dst.dev;
/*
* Point into the IP datagram header.
*/
iph = ip_hdr(skb);
if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->local_df) ||
(IPCB(skb)->frag_max_size &&
IPCB(skb)->frag_max_size > dst_mtu(&rt->dst)))) {
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(ip_skb_dst_mtu(skb)));
kfree_skb(skb);
return -EMSGSIZE;
}
/*
* Setup starting values.
*/
hlen = iph->ihl * 4;
mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
#ifdef CONFIG_BRIDGE_NETFILTER
if (skb->nf_bridge)
mtu -= nf_bridge_mtu_reduction(skb);
#endif
IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
/* When frag_list is given, use it. First, check its validity:
* some transformers could create wrong frag_list or break existing
* one, it is not prohibited. In this case fall back to copying.
*
* LATER: this step can be merged to real generation of fragments,
* we can switch to copy when see the first bad fragment.
*/
if (skb_has_frag_list(skb)) {
struct sk_buff *frag, *frag2;
int first_len = skb_pagelen(skb);
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
ip_is_fragment(iph) ||
skb_cloned(skb))
goto slow_path;
skb_walk_frags(skb, frag) {
/* Correct geometry. */
if (frag->len > mtu ||
((frag->len & 7) && frag->next) ||
skb_headroom(frag) < hlen)
goto slow_path_clean;
/* Partially cloned skb? */
if (skb_shared(frag))
goto slow_path_clean;
BUG_ON(frag->sk);
if (skb->sk) {
frag->sk = skb->sk;
frag->destructor = sock_wfree;
}
skb->truesize -= frag->truesize;
}
/* Everything is OK. Generate! */
err = 0;
offset = 0;
frag = skb_shinfo(skb)->frag_list;
skb_frag_list_init(skb);
skb->data_len = first_len - skb_headlen(skb);
skb->len = first_len;
iph->tot_len = htons(first_len);
iph->frag_off = htons(IP_MF);
ip_send_check(iph);
for (;;) {
/* Prepare header of the next frame,
* before previous one went down. */
if (frag) {
frag->ip_summed = CHECKSUM_NONE;
skb_reset_transport_header(frag);
__skb_push(frag, hlen);
skb_reset_network_header(frag);
memcpy(skb_network_header(frag), iph, hlen);
iph = ip_hdr(frag);
iph->tot_len = htons(frag->len);
ip_copy_metadata(frag, skb);
if (offset == 0)
ip_options_fragment(frag);
offset += skb->len - hlen;
iph->frag_off = htons(offset>>3);
if (frag->next != NULL)
iph->frag_off |= htons(IP_MF);
/* Ready, complete checksum */
ip_send_check(iph);
}
err = output(skb);
if (!err)
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
if (err || !frag)
break;
skb = frag;
frag = skb->next;
skb->next = NULL;
}
if (err == 0) {
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
return 0;
}
while (frag) {
skb = frag->next;
kfree_skb(frag);
frag = skb;
}
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
return err;
slow_path_clean:
skb_walk_frags(skb, frag2) {
if (frag2 == frag)
break;
frag2->sk = NULL;
frag2->destructor = NULL;
skb->truesize += frag2->truesize;
}
}
slow_path:
/* for offloaded checksums cleanup checksum before fragmentation */
if ((skb->ip_summed == CHECKSUM_PARTIAL) && skb_checksum_help(skb))
goto fail;
iph = ip_hdr(skb);
left = skb->len - hlen; /* Space per frame */
ptr = hlen; /* Where to start from */
/* for bridged IP traffic encapsulated inside f.e. a vlan header,
* we need to make room for the encapsulating header
*/
ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
/*
* Fragment the datagram.
*/
offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
not_last_frag = iph->frag_off & htons(IP_MF);
/*
* Keep copying data until we run out.
*/
while (left > 0) {
len = left;
/* IF: it doesn't fit, use 'mtu' - the data space left */
if (len > mtu)
len = mtu;
/* IF: we are not sending up to and including the packet end
then align the next start on an eight byte boundary */
if (len < left) {
len &= ~7;
}
/*
* Allocate buffer.
*/
if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
err = -ENOMEM;
goto fail;
}
/*
* Set up data on packet
*/
ip_copy_metadata(skb2, skb);
skb_reserve(skb2, ll_rs);
skb_put(skb2, len + hlen);
skb_reset_network_header(skb2);
skb2->transport_header = skb2->network_header + hlen;
/*
* Charge the memory for the fragment to any owner
* it might possess
*/
if (skb->sk)
skb_set_owner_w(skb2, skb->sk);
/*
* Copy the packet header into the new buffer.
*/
skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
/*
* Copy a block of the IP datagram.
*/
if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
BUG();
left -= len;
/*
* Fill in the new header fields.
*/
iph = ip_hdr(skb2);
iph->frag_off = htons((offset >> 3));
/* ANK: dirty, but effective trick. Upgrade options only if
* the segment to be fragmented was THE FIRST (otherwise,
* options are already fixed) and make it ONCE
* on the initial skb, so that all the following fragments
* will inherit fixed options.
*/
if (offset == 0)
ip_options_fragment(skb);
/*
* Added AC : If we are fragmenting a fragment that's not the
* last fragment then keep MF on each bit
*/
if (left > 0 || not_last_frag)
iph->frag_off |= htons(IP_MF);
ptr += len;
offset += len;
/*
* Put this fragment into the sending queue.
*/
iph->tot_len = htons(len + hlen);
ip_send_check(iph);
err = output(skb2);
if (err)
goto fail;
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
}
consume_skb(skb);
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
return err;
fail:
kfree_skb(skb);
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
return err;
}
| DoS +Priv Mem. Corr. | 0 | int ip_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
struct iphdr *iph;
int ptr;
struct net_device *dev;
struct sk_buff *skb2;
unsigned int mtu, hlen, left, len, ll_rs;
int offset;
__be16 not_last_frag;
struct rtable *rt = skb_rtable(skb);
int err = 0;
dev = rt->dst.dev;
/*
* Point into the IP datagram header.
*/
iph = ip_hdr(skb);
if (unlikely(((iph->frag_off & htons(IP_DF)) && !skb->local_df) ||
(IPCB(skb)->frag_max_size &&
IPCB(skb)->frag_max_size > dst_mtu(&rt->dst)))) {
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
icmp_send(skb, ICMP_DEST_UNREACH, ICMP_FRAG_NEEDED,
htonl(ip_skb_dst_mtu(skb)));
kfree_skb(skb);
return -EMSGSIZE;
}
/*
* Setup starting values.
*/
hlen = iph->ihl * 4;
mtu = dst_mtu(&rt->dst) - hlen; /* Size of data space */
#ifdef CONFIG_BRIDGE_NETFILTER
if (skb->nf_bridge)
mtu -= nf_bridge_mtu_reduction(skb);
#endif
IPCB(skb)->flags |= IPSKB_FRAG_COMPLETE;
/* When frag_list is given, use it. First, check its validity:
* some transformers could create wrong frag_list or break existing
* one, it is not prohibited. In this case fall back to copying.
*
* LATER: this step can be merged to real generation of fragments,
* we can switch to copy when see the first bad fragment.
*/
if (skb_has_frag_list(skb)) {
struct sk_buff *frag, *frag2;
int first_len = skb_pagelen(skb);
if (first_len - hlen > mtu ||
((first_len - hlen) & 7) ||
ip_is_fragment(iph) ||
skb_cloned(skb))
goto slow_path;
skb_walk_frags(skb, frag) {
/* Correct geometry. */
if (frag->len > mtu ||
((frag->len & 7) && frag->next) ||
skb_headroom(frag) < hlen)
goto slow_path_clean;
/* Partially cloned skb? */
if (skb_shared(frag))
goto slow_path_clean;
BUG_ON(frag->sk);
if (skb->sk) {
frag->sk = skb->sk;
frag->destructor = sock_wfree;
}
skb->truesize -= frag->truesize;
}
/* Everything is OK. Generate! */
err = 0;
offset = 0;
frag = skb_shinfo(skb)->frag_list;
skb_frag_list_init(skb);
skb->data_len = first_len - skb_headlen(skb);
skb->len = first_len;
iph->tot_len = htons(first_len);
iph->frag_off = htons(IP_MF);
ip_send_check(iph);
for (;;) {
/* Prepare header of the next frame,
* before previous one went down. */
if (frag) {
frag->ip_summed = CHECKSUM_NONE;
skb_reset_transport_header(frag);
__skb_push(frag, hlen);
skb_reset_network_header(frag);
memcpy(skb_network_header(frag), iph, hlen);
iph = ip_hdr(frag);
iph->tot_len = htons(frag->len);
ip_copy_metadata(frag, skb);
if (offset == 0)
ip_options_fragment(frag);
offset += skb->len - hlen;
iph->frag_off = htons(offset>>3);
if (frag->next != NULL)
iph->frag_off |= htons(IP_MF);
/* Ready, complete checksum */
ip_send_check(iph);
}
err = output(skb);
if (!err)
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
if (err || !frag)
break;
skb = frag;
frag = skb->next;
skb->next = NULL;
}
if (err == 0) {
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
return 0;
}
while (frag) {
skb = frag->next;
kfree_skb(frag);
frag = skb;
}
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
return err;
slow_path_clean:
skb_walk_frags(skb, frag2) {
if (frag2 == frag)
break;
frag2->sk = NULL;
frag2->destructor = NULL;
skb->truesize += frag2->truesize;
}
}
slow_path:
/* for offloaded checksums cleanup checksum before fragmentation */
if ((skb->ip_summed == CHECKSUM_PARTIAL) && skb_checksum_help(skb))
goto fail;
iph = ip_hdr(skb);
left = skb->len - hlen; /* Space per frame */
ptr = hlen; /* Where to start from */
/* for bridged IP traffic encapsulated inside f.e. a vlan header,
* we need to make room for the encapsulating header
*/
ll_rs = LL_RESERVED_SPACE_EXTRA(rt->dst.dev, nf_bridge_pad(skb));
/*
* Fragment the datagram.
*/
offset = (ntohs(iph->frag_off) & IP_OFFSET) << 3;
not_last_frag = iph->frag_off & htons(IP_MF);
/*
* Keep copying data until we run out.
*/
while (left > 0) {
len = left;
/* IF: it doesn't fit, use 'mtu' - the data space left */
if (len > mtu)
len = mtu;
/* IF: we are not sending up to and including the packet end
then align the next start on an eight byte boundary */
if (len < left) {
len &= ~7;
}
/*
* Allocate buffer.
*/
if ((skb2 = alloc_skb(len+hlen+ll_rs, GFP_ATOMIC)) == NULL) {
NETDEBUG(KERN_INFO "IP: frag: no memory for new fragment!\n");
err = -ENOMEM;
goto fail;
}
/*
* Set up data on packet
*/
ip_copy_metadata(skb2, skb);
skb_reserve(skb2, ll_rs);
skb_put(skb2, len + hlen);
skb_reset_network_header(skb2);
skb2->transport_header = skb2->network_header + hlen;
/*
* Charge the memory for the fragment to any owner
* it might possess
*/
if (skb->sk)
skb_set_owner_w(skb2, skb->sk);
/*
* Copy the packet header into the new buffer.
*/
skb_copy_from_linear_data(skb, skb_network_header(skb2), hlen);
/*
* Copy a block of the IP datagram.
*/
if (skb_copy_bits(skb, ptr, skb_transport_header(skb2), len))
BUG();
left -= len;
/*
* Fill in the new header fields.
*/
iph = ip_hdr(skb2);
iph->frag_off = htons((offset >> 3));
/* ANK: dirty, but effective trick. Upgrade options only if
* the segment to be fragmented was THE FIRST (otherwise,
* options are already fixed) and make it ONCE
* on the initial skb, so that all the following fragments
* will inherit fixed options.
*/
if (offset == 0)
ip_options_fragment(skb);
/*
* Added AC : If we are fragmenting a fragment that's not the
* last fragment then keep MF on each bit
*/
if (left > 0 || not_last_frag)
iph->frag_off |= htons(IP_MF);
ptr += len;
offset += len;
/*
* Put this fragment into the sending queue.
*/
iph->tot_len = htons(len + hlen);
ip_send_check(iph);
err = output(skb2);
if (err)
goto fail;
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGCREATES);
}
consume_skb(skb);
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGOKS);
return err;
fail:
kfree_skb(skb);
IP_INC_STATS(dev_net(dev), IPSTATS_MIB_FRAGFAILS);
return err;
}
| @@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
- skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- /* specify the length of each IP datagram fragment */
- skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
__skb_queue_tail(queue, skb);
+ } else if (skb_is_gso(skb)) {
+ goto append;
}
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ /* specify the length of each IP datagram fragment */
+ skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
+append:
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
} | CWE-264 | null | null |
23,699 | struct sk_buff *ip_make_skb(struct sock *sk,
struct flowi4 *fl4,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm_cookie *ipc, struct rtable **rtp,
unsigned int flags)
{
struct inet_cork cork;
struct sk_buff_head queue;
int err;
if (flags & MSG_PROBE)
return NULL;
__skb_queue_head_init(&queue);
cork.flags = 0;
cork.addr = 0;
cork.opt = NULL;
err = ip_setup_cork(sk, &cork, ipc, rtp);
if (err)
return ERR_PTR(err);
err = __ip_append_data(sk, fl4, &queue, &cork,
¤t->task_frag, getfrag,
from, length, transhdrlen, flags);
if (err) {
__ip_flush_pending_frames(sk, &queue, &cork);
return ERR_PTR(err);
}
return __ip_make_skb(sk, fl4, &queue, &cork);
}
| DoS +Priv Mem. Corr. | 0 | struct sk_buff *ip_make_skb(struct sock *sk,
struct flowi4 *fl4,
int getfrag(void *from, char *to, int offset,
int len, int odd, struct sk_buff *skb),
void *from, int length, int transhdrlen,
struct ipcm_cookie *ipc, struct rtable **rtp,
unsigned int flags)
{
struct inet_cork cork;
struct sk_buff_head queue;
int err;
if (flags & MSG_PROBE)
return NULL;
__skb_queue_head_init(&queue);
cork.flags = 0;
cork.addr = 0;
cork.opt = NULL;
err = ip_setup_cork(sk, &cork, ipc, rtp);
if (err)
return ERR_PTR(err);
err = __ip_append_data(sk, fl4, &queue, &cork,
¤t->task_frag, getfrag,
from, length, transhdrlen, flags);
if (err) {
__ip_flush_pending_frames(sk, &queue, &cork);
return ERR_PTR(err);
}
return __ip_make_skb(sk, fl4, &queue, &cork);
}
| @@ -772,15 +772,20 @@ static inline int ip_ufo_append_data(struct sock *sk,
/* initialize protocol header pointer */
skb->transport_header = skb->network_header + fragheaderlen;
- skb->ip_summed = CHECKSUM_PARTIAL;
skb->csum = 0;
- /* specify the length of each IP datagram fragment */
- skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
- skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
__skb_queue_tail(queue, skb);
+ } else if (skb_is_gso(skb)) {
+ goto append;
}
+ skb->ip_summed = CHECKSUM_PARTIAL;
+ /* specify the length of each IP datagram fragment */
+ skb_shinfo(skb)->gso_size = maxfraglen - fragheaderlen;
+ skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
+
+append:
return skb_append_datato_frags(sk, skb, getfrag, from,
(length - transhdrlen));
} | CWE-264 | null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.