idx
int64
func_before
string
Vulnerability Classification
string
vul
int64
func_after
string
patch
string
CWE ID
string
lines_before
string
lines_after
string
22,200
int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, u32 *adv_credits, int need_posted, int max_posted) { unsigned int avail, posted, got = 0, advertise; long oldval, newval; *adv_credits = 0; if (!ic->i_flowctl) return wanted; try_again: advertise = 0; oldval = newval = atomic_read(&ic->i_credits); posted = IB_GET_POST_CREDITS(oldval); avail = IB_GET_SEND_CREDITS(oldval); rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n", wanted, avail, posted); /* The last credit must be used to send a credit update. */ if (avail && !posted) avail--; if (avail < wanted) { struct rds_connection *conn = ic->i_cm_id->context; /* Oops, there aren't that many credits left! */ set_bit(RDS_LL_SEND_FULL, &conn->c_flags); got = avail; } else { /* Sometimes you get what you want, lalala. */ got = wanted; } newval -= IB_SET_SEND_CREDITS(got); /* * If need_posted is non-zero, then the caller wants * the posted regardless of whether any send credits are * available. */ if (posted && (got || need_posted)) { advertise = min_t(unsigned int, posted, max_posted); newval -= IB_SET_POST_CREDITS(advertise); } /* Finally bill everything */ if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval) goto try_again; *adv_credits = advertise; return got; }
DoS
0
int rds_ib_send_grab_credits(struct rds_ib_connection *ic, u32 wanted, u32 *adv_credits, int need_posted, int max_posted) { unsigned int avail, posted, got = 0, advertise; long oldval, newval; *adv_credits = 0; if (!ic->i_flowctl) return wanted; try_again: advertise = 0; oldval = newval = atomic_read(&ic->i_credits); posted = IB_GET_POST_CREDITS(oldval); avail = IB_GET_SEND_CREDITS(oldval); rdsdebug("rds_ib_send_grab_credits(%u): credits=%u posted=%u\n", wanted, avail, posted); /* The last credit must be used to send a credit update. */ if (avail && !posted) avail--; if (avail < wanted) { struct rds_connection *conn = ic->i_cm_id->context; /* Oops, there aren't that many credits left! */ set_bit(RDS_LL_SEND_FULL, &conn->c_flags); got = avail; } else { /* Sometimes you get what you want, lalala. */ got = wanted; } newval -= IB_SET_SEND_CREDITS(got); /* * If need_posted is non-zero, then the caller wants * the posted regardless of whether any send credits are * available. */ if (posted && (got || need_posted)) { advertise = min_t(unsigned int, posted, max_posted); newval -= IB_SET_POST_CREDITS(advertise); } /* Finally bill everything */ if (atomic_cmpxchg(&ic->i_credits, oldval, newval) != oldval) goto try_again; *adv_credits = advertise; return got; }
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, if (conn->c_loopback && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + scat = &rm->data.op_sg[sg]; + ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); + return ret; } /* FIXME we may overallocate here */
null
null
null
22,201
static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic, struct rm_atomic_op *op, int wc_status) { /* unmap atomic recvbuf */ if (op->op_mapped) { ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE); op->op_mapped = 0; } rds_ib_send_complete(container_of(op, struct rds_message, atomic), wc_status, rds_atomic_send_complete); if (op->op_type == RDS_ATOMIC_TYPE_CSWP) rds_ib_stats_inc(s_ib_atomic_cswp); else rds_ib_stats_inc(s_ib_atomic_fadd); }
DoS
0
static void rds_ib_send_unmap_atomic(struct rds_ib_connection *ic, struct rm_atomic_op *op, int wc_status) { /* unmap atomic recvbuf */ if (op->op_mapped) { ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE); op->op_mapped = 0; } rds_ib_send_complete(container_of(op, struct rds_message, atomic), wc_status, rds_atomic_send_complete); if (op->op_type == RDS_ATOMIC_TYPE_CSWP) rds_ib_stats_inc(s_ib_atomic_cswp); else rds_ib_stats_inc(s_ib_atomic_fadd); }
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, if (conn->c_loopback && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + scat = &rm->data.op_sg[sg]; + ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); + return ret; } /* FIXME we may overallocate here */
null
null
null
22,202
static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic, struct rds_ib_send_work *send, int wc_status) { struct rds_message *rm = NULL; /* In the error case, wc.opcode sometimes contains garbage */ switch (send->s_wr.opcode) { case IB_WR_SEND: if (send->s_op) { rm = container_of(send->s_op, struct rds_message, data); rds_ib_send_unmap_data(ic, send->s_op, wc_status); } break; case IB_WR_RDMA_WRITE: case IB_WR_RDMA_READ: if (send->s_op) { rm = container_of(send->s_op, struct rds_message, rdma); rds_ib_send_unmap_rdma(ic, send->s_op, wc_status); } break; case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_ATOMIC_CMP_AND_SWP: if (send->s_op) { rm = container_of(send->s_op, struct rds_message, atomic); rds_ib_send_unmap_atomic(ic, send->s_op, wc_status); } break; default: if (printk_ratelimit()) printk(KERN_NOTICE "RDS/IB: %s: unexpected opcode 0x%x in WR!\n", __func__, send->s_wr.opcode); break; } send->s_wr.opcode = 0xdead; return rm; }
DoS
0
static struct rds_message *rds_ib_send_unmap_op(struct rds_ib_connection *ic, struct rds_ib_send_work *send, int wc_status) { struct rds_message *rm = NULL; /* In the error case, wc.opcode sometimes contains garbage */ switch (send->s_wr.opcode) { case IB_WR_SEND: if (send->s_op) { rm = container_of(send->s_op, struct rds_message, data); rds_ib_send_unmap_data(ic, send->s_op, wc_status); } break; case IB_WR_RDMA_WRITE: case IB_WR_RDMA_READ: if (send->s_op) { rm = container_of(send->s_op, struct rds_message, rdma); rds_ib_send_unmap_rdma(ic, send->s_op, wc_status); } break; case IB_WR_ATOMIC_FETCH_AND_ADD: case IB_WR_ATOMIC_CMP_AND_SWP: if (send->s_op) { rm = container_of(send->s_op, struct rds_message, atomic); rds_ib_send_unmap_atomic(ic, send->s_op, wc_status); } break; default: if (printk_ratelimit()) printk(KERN_NOTICE "RDS/IB: %s: unexpected opcode 0x%x in WR!\n", __func__, send->s_wr.opcode); break; } send->s_wr.opcode = 0xdead; return rm; }
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, if (conn->c_loopback && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + scat = &rm->data.op_sg[sg]; + ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); + return ret; } /* FIXME we may overallocate here */
null
null
null
22,203
static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic, struct rm_rdma_op *op, int wc_status) { if (op->op_mapped) { ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, op->op_nents, op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); op->op_mapped = 0; } /* If the user asked for a completion notification on this * message, we can implement three different semantics: * 1. Notify when we received the ACK on the RDS message * that was queued with the RDMA. This provides reliable * notification of RDMA status at the expense of a one-way * packet delay. * 2. Notify when the IB stack gives us the completion event for * the RDMA operation. * 3. Notify when the IB stack gives us the completion event for * the accompanying RDS messages. * Here, we implement approach #3. To implement approach #2, * we would need to take an event for the rdma WR. To implement #1, * don't call rds_rdma_send_complete at all, and fall back to the notify * handling in the ACK processing code. * * Note: There's no need to explicitly sync any RDMA buffers using * ib_dma_sync_sg_for_cpu - the completion for the RDMA * operation itself unmapped the RDMA buffers, which takes care * of synching. */ rds_ib_send_complete(container_of(op, struct rds_message, rdma), wc_status, rds_rdma_send_complete); if (op->op_write) rds_stats_add(s_send_rdma_bytes, op->op_bytes); else rds_stats_add(s_recv_rdma_bytes, op->op_bytes); }
DoS
0
static void rds_ib_send_unmap_rdma(struct rds_ib_connection *ic, struct rm_rdma_op *op, int wc_status) { if (op->op_mapped) { ib_dma_unmap_sg(ic->i_cm_id->device, op->op_sg, op->op_nents, op->op_write ? DMA_TO_DEVICE : DMA_FROM_DEVICE); op->op_mapped = 0; } /* If the user asked for a completion notification on this * message, we can implement three different semantics: * 1. Notify when we received the ACK on the RDS message * that was queued with the RDMA. This provides reliable * notification of RDMA status at the expense of a one-way * packet delay. * 2. Notify when the IB stack gives us the completion event for * the RDMA operation. * 3. Notify when the IB stack gives us the completion event for * the accompanying RDS messages. * Here, we implement approach #3. To implement approach #2, * we would need to take an event for the rdma WR. To implement #1, * don't call rds_rdma_send_complete at all, and fall back to the notify * handling in the ACK processing code. * * Note: There's no need to explicitly sync any RDMA buffers using * ib_dma_sync_sg_for_cpu - the completion for the RDMA * operation itself unmapped the RDMA buffers, which takes care * of synching. */ rds_ib_send_complete(container_of(op, struct rds_message, rdma), wc_status, rds_rdma_send_complete); if (op->op_write) rds_stats_add(s_send_rdma_bytes, op->op_bytes); else rds_stats_add(s_recv_rdma_bytes, op->op_bytes); }
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, if (conn->c_loopback && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + scat = &rm->data.op_sg[sg]; + ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); + return ret; } /* FIXME we may overallocate here */
null
null
null
22,204
static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic, struct rds_ib_send_work *send, bool notify) { /* * We want to delay signaling completions just enough to get * the batching benefits but not so much that we create dead time * on the wire. */ if (ic->i_unsignaled_wrs-- == 0 || notify) { ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; send->s_wr.send_flags |= IB_SEND_SIGNALED; return 1; } return 0; }
DoS
0
static inline int rds_ib_set_wr_signal_state(struct rds_ib_connection *ic, struct rds_ib_send_work *send, bool notify) { /* * We want to delay signaling completions just enough to get * the batching benefits but not so much that we create dead time * on the wire. */ if (ic->i_unsignaled_wrs-- == 0 || notify) { ic->i_unsignaled_wrs = rds_ib_sysctl_max_unsig_wrs; send->s_wr.send_flags |= IB_SEND_SIGNALED; return 1; } return 0; }
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, if (conn->c_loopback && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + scat = &rm->data.op_sg[sg]; + ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); + return ret; } /* FIXME we may overallocate here */
null
null
null
22,205
static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr) { if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) && waitqueue_active(&rds_ib_ring_empty_wait)) wake_up(&rds_ib_ring_empty_wait); BUG_ON(atomic_read(&ic->i_signaled_sends) < 0); }
DoS
0
static void rds_ib_sub_signaled(struct rds_ib_connection *ic, int nr) { if ((atomic_sub_return(nr, &ic->i_signaled_sends) == 0) && waitqueue_active(&rds_ib_ring_empty_wait)) wake_up(&rds_ib_ring_empty_wait); BUG_ON(atomic_read(&ic->i_signaled_sends) < 0); }
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, if (conn->c_loopback && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + scat = &rm->data.op_sg[sg]; + ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); + return ret; } /* FIXME we may overallocate here */
null
null
null
22,206
char *rds_ib_wc_status_str(enum ib_wc_status status) { return rds_str_array(rds_ib_wc_status_strings, ARRAY_SIZE(rds_ib_wc_status_strings), status); }
DoS
0
char *rds_ib_wc_status_str(enum ib_wc_status status) { return rds_str_array(rds_ib_wc_status_strings, ARRAY_SIZE(rds_ib_wc_status_strings), status); }
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, if (conn->c_loopback && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + scat = &rm->data.op_sg[sg]; + ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); + return ret; } /* FIXME we may overallocate here */
null
null
null
22,207
int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op) { struct rds_ib_connection *ic = conn->c_transport_data; struct rds_ib_send_work *send = NULL; struct ib_send_wr *failed_wr; struct rds_ib_device *rds_ibdev; u32 pos; u32 work_alloc; int ret; int nr_sig = 0; rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos); if (work_alloc != 1) { rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); rds_ib_stats_inc(s_ib_tx_ring_full); ret = -ENOMEM; goto out; } /* address of send request in ring */ send = &ic->i_sends[pos]; send->s_queued = jiffies; if (op->op_type == RDS_ATOMIC_TYPE_CSWP) { send->s_wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP; send->s_wr.wr.atomic.compare_add = op->op_m_cswp.compare; send->s_wr.wr.atomic.swap = op->op_m_cswp.swap; send->s_wr.wr.atomic.compare_add_mask = op->op_m_cswp.compare_mask; send->s_wr.wr.atomic.swap_mask = op->op_m_cswp.swap_mask; } else { /* FADD */ send->s_wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD; send->s_wr.wr.atomic.compare_add = op->op_m_fadd.add; send->s_wr.wr.atomic.swap = 0; send->s_wr.wr.atomic.compare_add_mask = op->op_m_fadd.nocarry_mask; send->s_wr.wr.atomic.swap_mask = 0; } nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); send->s_wr.num_sge = 1; send->s_wr.next = NULL; send->s_wr.wr.atomic.remote_addr = op->op_remote_addr; send->s_wr.wr.atomic.rkey = op->op_rkey; send->s_op = op; rds_message_addref(container_of(send->s_op, struct rds_message, atomic)); /* map 8 byte retval buffer to the device */ ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE); rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret); if (ret != 1) { rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); ret = -ENOMEM; /* XXX ? */ goto out; } /* Convert our struct scatterlist to struct ib_sge */ send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg); send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg); send->s_sge[0].lkey = ic->i_mr->lkey; rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr, send->s_sge[0].addr, send->s_sge[0].length); if (nr_sig) atomic_add(nr_sig, &ic->i_signaled_sends); failed_wr = &send->s_wr; ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr); rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic, send, &send->s_wr, ret, failed_wr); BUG_ON(failed_wr != &send->s_wr); if (ret) { printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 " "returned %d\n", &conn->c_faddr, ret); rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); rds_ib_sub_signaled(ic, nr_sig); goto out; } if (unlikely(failed_wr != &send->s_wr)) { printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret); BUG_ON(failed_wr != &send->s_wr); } out: return ret; }
DoS
0
int rds_ib_xmit_atomic(struct rds_connection *conn, struct rm_atomic_op *op) { struct rds_ib_connection *ic = conn->c_transport_data; struct rds_ib_send_work *send = NULL; struct ib_send_wr *failed_wr; struct rds_ib_device *rds_ibdev; u32 pos; u32 work_alloc; int ret; int nr_sig = 0; rds_ibdev = ib_get_client_data(ic->i_cm_id->device, &rds_ib_client); work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, 1, &pos); if (work_alloc != 1) { rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); rds_ib_stats_inc(s_ib_tx_ring_full); ret = -ENOMEM; goto out; } /* address of send request in ring */ send = &ic->i_sends[pos]; send->s_queued = jiffies; if (op->op_type == RDS_ATOMIC_TYPE_CSWP) { send->s_wr.opcode = IB_WR_MASKED_ATOMIC_CMP_AND_SWP; send->s_wr.wr.atomic.compare_add = op->op_m_cswp.compare; send->s_wr.wr.atomic.swap = op->op_m_cswp.swap; send->s_wr.wr.atomic.compare_add_mask = op->op_m_cswp.compare_mask; send->s_wr.wr.atomic.swap_mask = op->op_m_cswp.swap_mask; } else { /* FADD */ send->s_wr.opcode = IB_WR_MASKED_ATOMIC_FETCH_AND_ADD; send->s_wr.wr.atomic.compare_add = op->op_m_fadd.add; send->s_wr.wr.atomic.swap = 0; send->s_wr.wr.atomic.compare_add_mask = op->op_m_fadd.nocarry_mask; send->s_wr.wr.atomic.swap_mask = 0; } nr_sig = rds_ib_set_wr_signal_state(ic, send, op->op_notify); send->s_wr.num_sge = 1; send->s_wr.next = NULL; send->s_wr.wr.atomic.remote_addr = op->op_remote_addr; send->s_wr.wr.atomic.rkey = op->op_rkey; send->s_op = op; rds_message_addref(container_of(send->s_op, struct rds_message, atomic)); /* map 8 byte retval buffer to the device */ ret = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, 1, DMA_FROM_DEVICE); rdsdebug("ic %p mapping atomic op %p. mapped %d pg\n", ic, op, ret); if (ret != 1) { rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); ret = -ENOMEM; /* XXX ? */ goto out; } /* Convert our struct scatterlist to struct ib_sge */ send->s_sge[0].addr = ib_sg_dma_address(ic->i_cm_id->device, op->op_sg); send->s_sge[0].length = ib_sg_dma_len(ic->i_cm_id->device, op->op_sg); send->s_sge[0].lkey = ic->i_mr->lkey; rdsdebug("rva %Lx rpa %Lx len %u\n", op->op_remote_addr, send->s_sge[0].addr, send->s_sge[0].length); if (nr_sig) atomic_add(nr_sig, &ic->i_signaled_sends); failed_wr = &send->s_wr; ret = ib_post_send(ic->i_cm_id->qp, &send->s_wr, &failed_wr); rdsdebug("ic %p send %p (wr %p) ret %d wr %p\n", ic, send, &send->s_wr, ret, failed_wr); BUG_ON(failed_wr != &send->s_wr); if (ret) { printk(KERN_WARNING "RDS/IB: atomic ib_post_send to %pI4 " "returned %d\n", &conn->c_faddr, ret); rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); rds_ib_sub_signaled(ic, nr_sig); goto out; } if (unlikely(failed_wr != &send->s_wr)) { printk(KERN_WARNING "RDS/IB: atomic ib_post_send() rc=%d, but failed_wqe updated!\n", ret); BUG_ON(failed_wr != &send->s_wr); } out: return ret; }
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, if (conn->c_loopback && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + scat = &rm->data.op_sg[sg]; + ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); + return ret; } /* FIXME we may overallocate here */
null
null
null
22,208
void rds_ib_xmit_complete(struct rds_connection *conn) { struct rds_ib_connection *ic = conn->c_transport_data; /* We may have a pending ACK or window update we were unable * to send previously (due to flow control). Try again. */ rds_ib_attempt_ack(ic); }
DoS
0
void rds_ib_xmit_complete(struct rds_connection *conn) { struct rds_ib_connection *ic = conn->c_transport_data; /* We may have a pending ACK or window update we were unable * to send previously (due to flow control). Try again. */ rds_ib_attempt_ack(ic); }
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, if (conn->c_loopback && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + scat = &rm->data.op_sg[sg]; + ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); + return ret; } /* FIXME we may overallocate here */
null
null
null
22,209
int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) { struct rds_ib_connection *ic = conn->c_transport_data; struct rds_ib_send_work *send = NULL; struct rds_ib_send_work *first; struct rds_ib_send_work *prev; struct ib_send_wr *failed_wr; struct scatterlist *scat; unsigned long len; u64 remote_addr = op->op_remote_addr; u32 max_sge = ic->rds_ibdev->max_sge; u32 pos; u32 work_alloc; u32 i; u32 j; int sent; int ret; int num_sge; int nr_sig = 0; /* map the op the first time we see it */ if (!op->op_mapped) { op->op_count = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, op->op_nents, (op->op_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count); if (op->op_count == 0) { rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); ret = -ENOMEM; /* XXX ? */ goto out; } op->op_mapped = 1; } /* * Instead of knowing how to return a partial rdma read/write we insist that there * be enough work requests to send the entire message. */ i = ceil(op->op_count, max_sge); work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); if (work_alloc != i) { rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); rds_ib_stats_inc(s_ib_tx_ring_full); ret = -ENOMEM; goto out; } send = &ic->i_sends[pos]; first = send; prev = NULL; scat = &op->op_sg[0]; sent = 0; num_sge = op->op_count; for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) { send->s_wr.send_flags = 0; send->s_queued = jiffies; send->s_op = NULL; nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify); send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; send->s_wr.wr.rdma.remote_addr = remote_addr; send->s_wr.wr.rdma.rkey = op->op_rkey; if (num_sge > max_sge) { send->s_wr.num_sge = max_sge; num_sge -= max_sge; } else { send->s_wr.num_sge = num_sge; } send->s_wr.next = NULL; if (prev) prev->s_wr.next = &send->s_wr; for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) { len = ib_sg_dma_len(ic->i_cm_id->device, scat); send->s_sge[j].addr = ib_sg_dma_address(ic->i_cm_id->device, scat); send->s_sge[j].length = len; send->s_sge[j].lkey = ic->i_mr->lkey; sent += len; rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr); remote_addr += len; scat++; } rdsdebug("send %p wr %p num_sge %u next %p\n", send, &send->s_wr, send->s_wr.num_sge, send->s_wr.next); prev = send; if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) send = ic->i_sends; } /* give a reference to the last op */ if (scat == &op->op_sg[op->op_count]) { prev->s_op = op; rds_message_addref(container_of(op, struct rds_message, rdma)); } if (i < work_alloc) { rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); work_alloc = i; } if (nr_sig) atomic_add(nr_sig, &ic->i_signaled_sends); failed_wr = &first->s_wr; ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, first, &first->s_wr, ret, failed_wr); BUG_ON(failed_wr != &first->s_wr); if (ret) { printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 " "returned %d\n", &conn->c_faddr, ret); rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); rds_ib_sub_signaled(ic, nr_sig); goto out; } if (unlikely(failed_wr != &first->s_wr)) { printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret); BUG_ON(failed_wr != &first->s_wr); } out: return ret; }
DoS
0
int rds_ib_xmit_rdma(struct rds_connection *conn, struct rm_rdma_op *op) { struct rds_ib_connection *ic = conn->c_transport_data; struct rds_ib_send_work *send = NULL; struct rds_ib_send_work *first; struct rds_ib_send_work *prev; struct ib_send_wr *failed_wr; struct scatterlist *scat; unsigned long len; u64 remote_addr = op->op_remote_addr; u32 max_sge = ic->rds_ibdev->max_sge; u32 pos; u32 work_alloc; u32 i; u32 j; int sent; int ret; int num_sge; int nr_sig = 0; /* map the op the first time we see it */ if (!op->op_mapped) { op->op_count = ib_dma_map_sg(ic->i_cm_id->device, op->op_sg, op->op_nents, (op->op_write) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); rdsdebug("ic %p mapping op %p: %d\n", ic, op, op->op_count); if (op->op_count == 0) { rds_ib_stats_inc(s_ib_tx_sg_mapping_failure); ret = -ENOMEM; /* XXX ? */ goto out; } op->op_mapped = 1; } /* * Instead of knowing how to return a partial rdma read/write we insist that there * be enough work requests to send the entire message. */ i = ceil(op->op_count, max_sge); work_alloc = rds_ib_ring_alloc(&ic->i_send_ring, i, &pos); if (work_alloc != i) { rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); rds_ib_stats_inc(s_ib_tx_ring_full); ret = -ENOMEM; goto out; } send = &ic->i_sends[pos]; first = send; prev = NULL; scat = &op->op_sg[0]; sent = 0; num_sge = op->op_count; for (i = 0; i < work_alloc && scat != &op->op_sg[op->op_count]; i++) { send->s_wr.send_flags = 0; send->s_queued = jiffies; send->s_op = NULL; nr_sig += rds_ib_set_wr_signal_state(ic, send, op->op_notify); send->s_wr.opcode = op->op_write ? IB_WR_RDMA_WRITE : IB_WR_RDMA_READ; send->s_wr.wr.rdma.remote_addr = remote_addr; send->s_wr.wr.rdma.rkey = op->op_rkey; if (num_sge > max_sge) { send->s_wr.num_sge = max_sge; num_sge -= max_sge; } else { send->s_wr.num_sge = num_sge; } send->s_wr.next = NULL; if (prev) prev->s_wr.next = &send->s_wr; for (j = 0; j < send->s_wr.num_sge && scat != &op->op_sg[op->op_count]; j++) { len = ib_sg_dma_len(ic->i_cm_id->device, scat); send->s_sge[j].addr = ib_sg_dma_address(ic->i_cm_id->device, scat); send->s_sge[j].length = len; send->s_sge[j].lkey = ic->i_mr->lkey; sent += len; rdsdebug("ic %p sent %d remote_addr %llu\n", ic, sent, remote_addr); remote_addr += len; scat++; } rdsdebug("send %p wr %p num_sge %u next %p\n", send, &send->s_wr, send->s_wr.num_sge, send->s_wr.next); prev = send; if (++send == &ic->i_sends[ic->i_send_ring.w_nr]) send = ic->i_sends; } /* give a reference to the last op */ if (scat == &op->op_sg[op->op_count]) { prev->s_op = op; rds_message_addref(container_of(op, struct rds_message, rdma)); } if (i < work_alloc) { rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc - i); work_alloc = i; } if (nr_sig) atomic_add(nr_sig, &ic->i_signaled_sends); failed_wr = &first->s_wr; ret = ib_post_send(ic->i_cm_id->qp, &first->s_wr, &failed_wr); rdsdebug("ic %p first %p (wr %p) ret %d wr %p\n", ic, first, &first->s_wr, ret, failed_wr); BUG_ON(failed_wr != &first->s_wr); if (ret) { printk(KERN_WARNING "RDS/IB: rdma ib_post_send to %pI4 " "returned %d\n", &conn->c_faddr, ret); rds_ib_ring_unalloc(&ic->i_send_ring, work_alloc); rds_ib_sub_signaled(ic, nr_sig); goto out; } if (unlikely(failed_wr != &first->s_wr)) { printk(KERN_WARNING "RDS/IB: ib_post_send() rc=%d, but failed_wqe updated!\n", ret); BUG_ON(failed_wr != &first->s_wr); } out: return ret; }
@@ -551,7 +551,10 @@ int rds_ib_xmit(struct rds_connection *conn, struct rds_message *rm, if (conn->c_loopback && rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + scat = &rm->data.op_sg[sg]; + ret = sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, scat->length - conn->c_xmit_data_off); + return ret; } /* FIXME we may overallocate here */
null
null
null
22,210
static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp) { struct rds_loop_connection *lc; unsigned long flags; lc = kzalloc(sizeof(struct rds_loop_connection), GFP_KERNEL); if (!lc) return -ENOMEM; INIT_LIST_HEAD(&lc->loop_node); lc->conn = conn; conn->c_transport_data = lc; spin_lock_irqsave(&loop_conns_lock, flags); list_add_tail(&lc->loop_node, &loop_conns); spin_unlock_irqrestore(&loop_conns_lock, flags); return 0; }
DoS
0
static int rds_loop_conn_alloc(struct rds_connection *conn, gfp_t gfp) { struct rds_loop_connection *lc; unsigned long flags; lc = kzalloc(sizeof(struct rds_loop_connection), GFP_KERNEL); if (!lc) return -ENOMEM; INIT_LIST_HEAD(&lc->loop_node); lc->conn = conn; conn->c_transport_data = lc; spin_lock_irqsave(&loop_conns_lock, flags); list_add_tail(&lc->loop_node, &loop_conns); spin_unlock_irqrestore(&loop_conns_lock, flags); return 0; }
@@ -61,10 +61,15 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, unsigned int hdr_off, unsigned int sg, unsigned int off) { + struct scatterlist *sgp = &rm->data.op_sg[sg]; + int ret = sizeof(struct rds_header) + + be32_to_cpu(rm->m_inc.i_hdr.h_len); + /* Do not send cong updates to loopback */ if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off); + goto out; } BUG_ON(hdr_off || sg || off); @@ -80,8 +85,8 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, NULL); rds_inc_put(&rm->m_inc); - - return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len); +out: + return ret; } /*
null
null
null
22,211
static void rds_loop_conn_free(void *arg) { struct rds_loop_connection *lc = arg; unsigned long flags; rdsdebug("lc %p\n", lc); spin_lock_irqsave(&loop_conns_lock, flags); list_del(&lc->loop_node); spin_unlock_irqrestore(&loop_conns_lock, flags); kfree(lc); }
DoS
0
static void rds_loop_conn_free(void *arg) { struct rds_loop_connection *lc = arg; unsigned long flags; rdsdebug("lc %p\n", lc); spin_lock_irqsave(&loop_conns_lock, flags); list_del(&lc->loop_node); spin_unlock_irqrestore(&loop_conns_lock, flags); kfree(lc); }
@@ -61,10 +61,15 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, unsigned int hdr_off, unsigned int sg, unsigned int off) { + struct scatterlist *sgp = &rm->data.op_sg[sg]; + int ret = sizeof(struct rds_header) + + be32_to_cpu(rm->m_inc.i_hdr.h_len); + /* Do not send cong updates to loopback */ if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off); + goto out; } BUG_ON(hdr_off || sg || off); @@ -80,8 +85,8 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, NULL); rds_inc_put(&rm->m_inc); - - return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len); +out: + return ret; } /*
null
null
null
22,212
static void rds_loop_conn_shutdown(struct rds_connection *conn) { }
DoS
0
static void rds_loop_conn_shutdown(struct rds_connection *conn) { }
@@ -61,10 +61,15 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, unsigned int hdr_off, unsigned int sg, unsigned int off) { + struct scatterlist *sgp = &rm->data.op_sg[sg]; + int ret = sizeof(struct rds_header) + + be32_to_cpu(rm->m_inc.i_hdr.h_len); + /* Do not send cong updates to loopback */ if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off); + goto out; } BUG_ON(hdr_off || sg || off); @@ -80,8 +85,8 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, NULL); rds_inc_put(&rm->m_inc); - - return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len); +out: + return ret; } /*
null
null
null
22,213
void rds_loop_exit(void) { struct rds_loop_connection *lc, *_lc; LIST_HEAD(tmp_list); /* avoid calling conn_destroy with irqs off */ spin_lock_irq(&loop_conns_lock); list_splice(&loop_conns, &tmp_list); INIT_LIST_HEAD(&loop_conns); spin_unlock_irq(&loop_conns_lock); list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) { WARN_ON(lc->conn->c_passive); rds_conn_destroy(lc->conn); } }
DoS
0
void rds_loop_exit(void) { struct rds_loop_connection *lc, *_lc; LIST_HEAD(tmp_list); /* avoid calling conn_destroy with irqs off */ spin_lock_irq(&loop_conns_lock); list_splice(&loop_conns, &tmp_list); INIT_LIST_HEAD(&loop_conns); spin_unlock_irq(&loop_conns_lock); list_for_each_entry_safe(lc, _lc, &tmp_list, loop_node) { WARN_ON(lc->conn->c_passive); rds_conn_destroy(lc->conn); } }
@@ -61,10 +61,15 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, unsigned int hdr_off, unsigned int sg, unsigned int off) { + struct scatterlist *sgp = &rm->data.op_sg[sg]; + int ret = sizeof(struct rds_header) + + be32_to_cpu(rm->m_inc.i_hdr.h_len); + /* Do not send cong updates to loopback */ if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off); + goto out; } BUG_ON(hdr_off || sg || off); @@ -80,8 +85,8 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, NULL); rds_inc_put(&rm->m_inc); - - return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len); +out: + return ret; } /*
null
null
null
22,214
static void rds_loop_inc_free(struct rds_incoming *inc) { struct rds_message *rm = container_of(inc, struct rds_message, m_inc); rds_message_put(rm); }
DoS
0
static void rds_loop_inc_free(struct rds_incoming *inc) { struct rds_message *rm = container_of(inc, struct rds_message, m_inc); rds_message_put(rm); }
@@ -61,10 +61,15 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, unsigned int hdr_off, unsigned int sg, unsigned int off) { + struct scatterlist *sgp = &rm->data.op_sg[sg]; + int ret = sizeof(struct rds_header) + + be32_to_cpu(rm->m_inc.i_hdr.h_len); + /* Do not send cong updates to loopback */ if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off); + goto out; } BUG_ON(hdr_off || sg || off); @@ -80,8 +85,8 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, NULL); rds_inc_put(&rm->m_inc); - - return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len); +out: + return ret; } /*
null
null
null
22,215
static int rds_loop_recv(struct rds_connection *conn) { return 0; }
DoS
0
static int rds_loop_recv(struct rds_connection *conn) { return 0; }
@@ -61,10 +61,15 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, unsigned int hdr_off, unsigned int sg, unsigned int off) { + struct scatterlist *sgp = &rm->data.op_sg[sg]; + int ret = sizeof(struct rds_header) + + be32_to_cpu(rm->m_inc.i_hdr.h_len); + /* Do not send cong updates to loopback */ if (rm->m_inc.i_hdr.h_flags & RDS_FLAG_CONG_BITMAP) { rds_cong_map_updated(conn->c_fcong, ~(u64) 0); - return sizeof(struct rds_header) + RDS_CONG_MAP_BYTES; + ret = min_t(int, ret, sgp->length - conn->c_xmit_data_off); + goto out; } BUG_ON(hdr_off || sg || off); @@ -80,8 +85,8 @@ static int rds_loop_xmit(struct rds_connection *conn, struct rds_message *rm, NULL); rds_inc_put(&rm->m_inc); - - return sizeof(struct rds_header) + be32_to_cpu(rm->m_inc.i_hdr.h_len); +out: + return ret; } /*
null
null
null
22,216
static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) { return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1); }
DoS Mem. Corr.
0
static inline int __br_ip4_hash(struct net_bridge_mdb_htable *mdb, __be32 ip) { return jhash_1word(mdb->secret, (__force u32)ip) & (mdb->max - 1); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,217
static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, const struct in6_addr *ip) { return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1); }
DoS Mem. Corr.
0
static inline int __br_ip6_hash(struct net_bridge_mdb_htable *mdb, const struct in6_addr *ip) { return jhash2((__force u32 *)ip->s6_addr32, 4, mdb->secret) & (mdb->max - 1); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,218
static void __br_multicast_enable_port(struct net_bridge_port *port) { port->multicast_startup_queries_sent = 0; if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || del_timer(&port->multicast_query_timer)) mod_timer(&port->multicast_query_timer, jiffies); }
DoS Mem. Corr.
0
static void __br_multicast_enable_port(struct net_bridge_port *port) { port->multicast_startup_queries_sent = 0; if (try_to_del_timer_sync(&port->multicast_query_timer) >= 0 || del_timer(&port->multicast_query_timer)) mod_timer(&port->multicast_query_timer, jiffies); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,219
static void __br_multicast_send_query(struct net_bridge *br, struct net_bridge_port *port, struct br_ip *ip) { struct sk_buff *skb; skb = br_multicast_alloc_query(br, ip); if (!skb) return; if (port) { __skb_push(skb, sizeof(struct ethhdr)); skb->dev = port->dev; NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, dev_queue_xmit); } else netif_rx(skb); }
DoS Mem. Corr.
0
static void __br_multicast_send_query(struct net_bridge *br, struct net_bridge_port *port, struct br_ip *ip) { struct sk_buff *skb; skb = br_multicast_alloc_query(br, ip); if (!skb) return; if (port) { __skb_push(skb, sizeof(struct ethhdr)); skb->dev = port->dev; NF_HOOK(NFPROTO_BRIDGE, NF_BR_LOCAL_OUT, skb, NULL, skb->dev, dev_queue_xmit); } else netif_rx(skb); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,220
static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, __be32 group) { struct sk_buff *skb; struct igmphdr *ih; struct ethhdr *eth; struct iphdr *iph; skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + sizeof(*ih) + 4); if (!skb) goto out; skb->protocol = htons(ETH_P_IP); skb_reset_mac_header(skb); eth = eth_hdr(skb); memcpy(eth->h_source, br->dev->dev_addr, 6); eth->h_dest[0] = 1; eth->h_dest[1] = 0; eth->h_dest[2] = 0x5e; eth->h_dest[3] = 0; eth->h_dest[4] = 0; eth->h_dest[5] = 1; eth->h_proto = htons(ETH_P_IP); skb_put(skb, sizeof(*eth)); skb_set_network_header(skb, skb->len); iph = ip_hdr(skb); iph->version = 4; iph->ihl = 6; iph->tos = 0xc0; iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4); iph->id = 0; iph->frag_off = htons(IP_DF); iph->ttl = 1; iph->protocol = IPPROTO_IGMP; iph->saddr = 0; iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); ((u8 *)&iph[1])[0] = IPOPT_RA; ((u8 *)&iph[1])[1] = 4; ((u8 *)&iph[1])[2] = 0; ((u8 *)&iph[1])[3] = 0; ip_send_check(iph); skb_put(skb, 24); skb_set_transport_header(skb, skb->len); ih = igmp_hdr(skb); ih->type = IGMP_HOST_MEMBERSHIP_QUERY; ih->code = (group ? br->multicast_last_member_interval : br->multicast_query_response_interval) / (HZ / IGMP_TIMER_SCALE); ih->group = group; ih->csum = 0; ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); skb_put(skb, sizeof(*ih)); __skb_pull(skb, sizeof(*eth)); out: return skb; }
DoS Mem. Corr.
0
static struct sk_buff *br_ip4_multicast_alloc_query(struct net_bridge *br, __be32 group) { struct sk_buff *skb; struct igmphdr *ih; struct ethhdr *eth; struct iphdr *iph; skb = netdev_alloc_skb_ip_align(br->dev, sizeof(*eth) + sizeof(*iph) + sizeof(*ih) + 4); if (!skb) goto out; skb->protocol = htons(ETH_P_IP); skb_reset_mac_header(skb); eth = eth_hdr(skb); memcpy(eth->h_source, br->dev->dev_addr, 6); eth->h_dest[0] = 1; eth->h_dest[1] = 0; eth->h_dest[2] = 0x5e; eth->h_dest[3] = 0; eth->h_dest[4] = 0; eth->h_dest[5] = 1; eth->h_proto = htons(ETH_P_IP); skb_put(skb, sizeof(*eth)); skb_set_network_header(skb, skb->len); iph = ip_hdr(skb); iph->version = 4; iph->ihl = 6; iph->tos = 0xc0; iph->tot_len = htons(sizeof(*iph) + sizeof(*ih) + 4); iph->id = 0; iph->frag_off = htons(IP_DF); iph->ttl = 1; iph->protocol = IPPROTO_IGMP; iph->saddr = 0; iph->daddr = htonl(INADDR_ALLHOSTS_GROUP); ((u8 *)&iph[1])[0] = IPOPT_RA; ((u8 *)&iph[1])[1] = 4; ((u8 *)&iph[1])[2] = 0; ((u8 *)&iph[1])[3] = 0; ip_send_check(iph); skb_put(skb, 24); skb_set_transport_header(skb, skb->len); ih = igmp_hdr(skb); ih->type = IGMP_HOST_MEMBERSHIP_QUERY; ih->code = (group ? br->multicast_last_member_interval : br->multicast_query_response_interval) / (HZ / IGMP_TIMER_SCALE); ih->group = group; ih->csum = 0; ih->csum = ip_compute_csum((void *)ih, sizeof(struct igmphdr)); skb_put(skb, sizeof(*ih)); __skb_pull(skb, sizeof(*eth)); out: return skb; }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,221
static int br_ip4_multicast_igmp3_report(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { struct igmpv3_report *ih; struct igmpv3_grec *grec; int i; int len; int num; int type; int err = 0; __be32 group; if (!pskb_may_pull(skb, sizeof(*ih))) return -EINVAL; ih = igmpv3_report_hdr(skb); num = ntohs(ih->ngrec); len = sizeof(*ih); for (i = 0; i < num; i++) { len += sizeof(*grec); if (!pskb_may_pull(skb, len)) return -EINVAL; grec = (void *)(skb->data + len - sizeof(*grec)); group = grec->grec_mca; type = grec->grec_type; len += ntohs(grec->grec_nsrcs) * 4; if (!pskb_may_pull(skb, len)) return -EINVAL; /* We treat this as an IGMPv2 report for now. */ switch (type) { case IGMPV3_MODE_IS_INCLUDE: case IGMPV3_MODE_IS_EXCLUDE: case IGMPV3_CHANGE_TO_INCLUDE: case IGMPV3_CHANGE_TO_EXCLUDE: case IGMPV3_ALLOW_NEW_SOURCES: case IGMPV3_BLOCK_OLD_SOURCES: break; default: continue; } err = br_ip4_multicast_add_group(br, port, group); if (err) break; } return err; }
DoS Mem. Corr.
0
static int br_ip4_multicast_igmp3_report(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { struct igmpv3_report *ih; struct igmpv3_grec *grec; int i; int len; int num; int type; int err = 0; __be32 group; if (!pskb_may_pull(skb, sizeof(*ih))) return -EINVAL; ih = igmpv3_report_hdr(skb); num = ntohs(ih->ngrec); len = sizeof(*ih); for (i = 0; i < num; i++) { len += sizeof(*grec); if (!pskb_may_pull(skb, len)) return -EINVAL; grec = (void *)(skb->data + len - sizeof(*grec)); group = grec->grec_mca; type = grec->grec_type; len += ntohs(grec->grec_nsrcs) * 4; if (!pskb_may_pull(skb, len)) return -EINVAL; /* We treat this as an IGMPv2 report for now. */ switch (type) { case IGMPV3_MODE_IS_INCLUDE: case IGMPV3_MODE_IS_EXCLUDE: case IGMPV3_CHANGE_TO_INCLUDE: case IGMPV3_CHANGE_TO_EXCLUDE: case IGMPV3_ALLOW_NEW_SOURCES: case IGMPV3_BLOCK_OLD_SOURCES: break; default: continue; } err = br_ip4_multicast_add_group(br, port, group); if (err) break; } return err; }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,222
static void br_ip4_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, __be32 group) { struct br_ip br_group; if (ipv4_is_local_multicast(group)) return; br_group.u.ip4 = group; br_group.proto = htons(ETH_P_IP); br_multicast_leave_group(br, port, &br_group); }
DoS Mem. Corr.
0
static void br_ip4_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, __be32 group) { struct br_ip br_group; if (ipv4_is_local_multicast(group)) return; br_group.u.ip4 = group; br_group.proto = htons(ETH_P_IP); br_multicast_leave_group(br, port, &br_group); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,223
static int br_ip4_multicast_query(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); struct igmphdr *ih = igmp_hdr(skb); struct net_bridge_mdb_entry *mp; struct igmpv3_query *ih3; struct net_bridge_port_group *p; struct net_bridge_port_group __rcu **pp; unsigned long max_delay; unsigned long now = jiffies; __be32 group; int err = 0; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || (port && port->state == BR_STATE_DISABLED)) goto out; br_multicast_query_received(br, port, !!iph->saddr); group = ih->group; if (skb->len == sizeof(*ih)) { max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); if (!max_delay) { max_delay = 10 * HZ; group = 0; } } else { if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) { err = -EINVAL; goto out; } ih3 = igmpv3_query_hdr(skb); if (ih3->nsrcs) goto out; max_delay = ih3->code ? IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; } if (!group) goto out; mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group); if (!mp) goto out; max_delay *= br->multicast_last_member_count; if (!hlist_unhashed(&mp->mglist) && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, now + max_delay) : try_to_del_timer_sync(&mp->timer) >= 0)) mod_timer(&mp->timer, now + max_delay); for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { if (timer_pending(&p->timer) ? time_after(p->timer.expires, now + max_delay) : try_to_del_timer_sync(&p->timer) >= 0) mod_timer(&mp->timer, now + max_delay); } out: spin_unlock(&br->multicast_lock); return err; }
DoS Mem. Corr.
0
static int br_ip4_multicast_query(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { struct iphdr *iph = ip_hdr(skb); struct igmphdr *ih = igmp_hdr(skb); struct net_bridge_mdb_entry *mp; struct igmpv3_query *ih3; struct net_bridge_port_group *p; struct net_bridge_port_group __rcu **pp; unsigned long max_delay; unsigned long now = jiffies; __be32 group; int err = 0; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || (port && port->state == BR_STATE_DISABLED)) goto out; br_multicast_query_received(br, port, !!iph->saddr); group = ih->group; if (skb->len == sizeof(*ih)) { max_delay = ih->code * (HZ / IGMP_TIMER_SCALE); if (!max_delay) { max_delay = 10 * HZ; group = 0; } } else { if (!pskb_may_pull(skb, sizeof(struct igmpv3_query))) { err = -EINVAL; goto out; } ih3 = igmpv3_query_hdr(skb); if (ih3->nsrcs) goto out; max_delay = ih3->code ? IGMPV3_MRC(ih3->code) * (HZ / IGMP_TIMER_SCALE) : 1; } if (!group) goto out; mp = br_mdb_ip4_get(mlock_dereference(br->mdb, br), group); if (!mp) goto out; max_delay *= br->multicast_last_member_count; if (!hlist_unhashed(&mp->mglist) && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, now + max_delay) : try_to_del_timer_sync(&mp->timer) >= 0)) mod_timer(&mp->timer, now + max_delay); for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { if (timer_pending(&p->timer) ? time_after(p->timer.expires, now + max_delay) : try_to_del_timer_sync(&p->timer) >= 0) mod_timer(&mp->timer, now + max_delay); } out: spin_unlock(&br->multicast_lock); return err; }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,224
static int br_ip6_multicast_add_group(struct net_bridge *br, struct net_bridge_port *port, const struct in6_addr *group) { struct br_ip br_group; if (ipv6_is_local_multicast(group)) return 0; ipv6_addr_copy(&br_group.u.ip6, group); br_group.proto = htons(ETH_P_IP); return br_multicast_add_group(br, port, &br_group); }
DoS Mem. Corr.
0
static int br_ip6_multicast_add_group(struct net_bridge *br, struct net_bridge_port *port, const struct in6_addr *group) { struct br_ip br_group; if (ipv6_is_local_multicast(group)) return 0; ipv6_addr_copy(&br_group.u.ip6, group); br_group.proto = htons(ETH_P_IP); return br_multicast_add_group(br, port, &br_group); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,225
static void br_ip6_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, const struct in6_addr *group) { struct br_ip br_group; if (ipv6_is_local_multicast(group)) return; ipv6_addr_copy(&br_group.u.ip6, group); br_group.proto = htons(ETH_P_IPV6); br_multicast_leave_group(br, port, &br_group); }
DoS Mem. Corr.
0
static void br_ip6_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, const struct in6_addr *group) { struct br_ip br_group; if (ipv6_is_local_multicast(group)) return; ipv6_addr_copy(&br_group.u.ip6, group); br_group.proto = htons(ETH_P_IPV6); br_multicast_leave_group(br, port, &br_group); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,226
static int br_ip6_multicast_mld2_report(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { struct icmp6hdr *icmp6h; struct mld2_grec *grec; int i; int len; int num; int err = 0; if (!pskb_may_pull(skb, sizeof(*icmp6h))) return -EINVAL; icmp6h = icmp6_hdr(skb); num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); len = sizeof(*icmp6h); for (i = 0; i < num; i++) { __be16 *nsrcs, _nsrcs; nsrcs = skb_header_pointer(skb, len + offsetof(struct mld2_grec, grec_mca), sizeof(_nsrcs), &_nsrcs); if (!nsrcs) return -EINVAL; if (!pskb_may_pull(skb, len + sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs))) return -EINVAL; grec = (struct mld2_grec *)(skb->data + len); len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs); /* We treat these as MLDv1 reports for now. */ switch (grec->grec_type) { case MLD2_MODE_IS_INCLUDE: case MLD2_MODE_IS_EXCLUDE: case MLD2_CHANGE_TO_INCLUDE: case MLD2_CHANGE_TO_EXCLUDE: case MLD2_ALLOW_NEW_SOURCES: case MLD2_BLOCK_OLD_SOURCES: break; default: continue; } err = br_ip6_multicast_add_group(br, port, &grec->grec_mca); if (!err) break; } return err; }
DoS Mem. Corr.
0
static int br_ip6_multicast_mld2_report(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { struct icmp6hdr *icmp6h; struct mld2_grec *grec; int i; int len; int num; int err = 0; if (!pskb_may_pull(skb, sizeof(*icmp6h))) return -EINVAL; icmp6h = icmp6_hdr(skb); num = ntohs(icmp6h->icmp6_dataun.un_data16[1]); len = sizeof(*icmp6h); for (i = 0; i < num; i++) { __be16 *nsrcs, _nsrcs; nsrcs = skb_header_pointer(skb, len + offsetof(struct mld2_grec, grec_mca), sizeof(_nsrcs), &_nsrcs); if (!nsrcs) return -EINVAL; if (!pskb_may_pull(skb, len + sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs))) return -EINVAL; grec = (struct mld2_grec *)(skb->data + len); len += sizeof(*grec) + sizeof(struct in6_addr) * (*nsrcs); /* We treat these as MLDv1 reports for now. */ switch (grec->grec_type) { case MLD2_MODE_IS_INCLUDE: case MLD2_MODE_IS_EXCLUDE: case MLD2_CHANGE_TO_INCLUDE: case MLD2_CHANGE_TO_EXCLUDE: case MLD2_ALLOW_NEW_SOURCES: case MLD2_BLOCK_OLD_SOURCES: break; default: continue; } err = br_ip6_multicast_add_group(br, port, &grec->grec_mca); if (!err) break; } return err; }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,227
static int br_ip6_multicast_query(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { struct ipv6hdr *ip6h = ipv6_hdr(skb); struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb); struct net_bridge_mdb_entry *mp; struct mld2_query *mld2q; struct net_bridge_port_group *p; struct net_bridge_port_group __rcu **pp; unsigned long max_delay; unsigned long now = jiffies; struct in6_addr *group = NULL; int err = 0; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || (port && port->state == BR_STATE_DISABLED)) goto out; br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr)); if (skb->len == sizeof(*mld)) { if (!pskb_may_pull(skb, sizeof(*mld))) { err = -EINVAL; goto out; } mld = (struct mld_msg *) icmp6_hdr(skb); max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay)); if (max_delay) group = &mld->mld_mca; } else if (skb->len >= sizeof(*mld2q)) { if (!pskb_may_pull(skb, sizeof(*mld2q))) { err = -EINVAL; goto out; } mld2q = (struct mld2_query *)icmp6_hdr(skb); if (!mld2q->mld2q_nsrcs) group = &mld2q->mld2q_mca; max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1; } if (!group) goto out; mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group); if (!mp) goto out; max_delay *= br->multicast_last_member_count; if (!hlist_unhashed(&mp->mglist) && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, now + max_delay) : try_to_del_timer_sync(&mp->timer) >= 0)) mod_timer(&mp->timer, now + max_delay); for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { if (timer_pending(&p->timer) ? time_after(p->timer.expires, now + max_delay) : try_to_del_timer_sync(&p->timer) >= 0) mod_timer(&mp->timer, now + max_delay); } out: spin_unlock(&br->multicast_lock); return err; }
DoS Mem. Corr.
0
static int br_ip6_multicast_query(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { struct ipv6hdr *ip6h = ipv6_hdr(skb); struct mld_msg *mld = (struct mld_msg *) icmp6_hdr(skb); struct net_bridge_mdb_entry *mp; struct mld2_query *mld2q; struct net_bridge_port_group *p; struct net_bridge_port_group __rcu **pp; unsigned long max_delay; unsigned long now = jiffies; struct in6_addr *group = NULL; int err = 0; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || (port && port->state == BR_STATE_DISABLED)) goto out; br_multicast_query_received(br, port, !ipv6_addr_any(&ip6h->saddr)); if (skb->len == sizeof(*mld)) { if (!pskb_may_pull(skb, sizeof(*mld))) { err = -EINVAL; goto out; } mld = (struct mld_msg *) icmp6_hdr(skb); max_delay = msecs_to_jiffies(htons(mld->mld_maxdelay)); if (max_delay) group = &mld->mld_mca; } else if (skb->len >= sizeof(*mld2q)) { if (!pskb_may_pull(skb, sizeof(*mld2q))) { err = -EINVAL; goto out; } mld2q = (struct mld2_query *)icmp6_hdr(skb); if (!mld2q->mld2q_nsrcs) group = &mld2q->mld2q_mca; max_delay = mld2q->mld2q_mrc ? MLDV2_MRC(mld2q->mld2q_mrc) : 1; } if (!group) goto out; mp = br_mdb_ip6_get(mlock_dereference(br->mdb, br), group); if (!mp) goto out; max_delay *= br->multicast_last_member_count; if (!hlist_unhashed(&mp->mglist) && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, now + max_delay) : try_to_del_timer_sync(&mp->timer) >= 0)) mod_timer(&mp->timer, now + max_delay); for (pp = &mp->ports; (p = mlock_dereference(*pp, br)) != NULL; pp = &p->next) { if (timer_pending(&p->timer) ? time_after(p->timer.expires, now + max_delay) : try_to_del_timer_sync(&p->timer) >= 0) mod_timer(&mp->timer, now + max_delay); } out: spin_unlock(&br->multicast_lock); return err; }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,228
static int br_mdb_copy(struct net_bridge_mdb_htable *new, struct net_bridge_mdb_htable *old, int elasticity) { struct net_bridge_mdb_entry *mp; struct hlist_node *p; int maxlen; int len; int i; for (i = 0; i < old->max; i++) hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) hlist_add_head(&mp->hlist[new->ver], &new->mhash[br_ip_hash(new, &mp->addr)]); if (!elasticity) return 0; maxlen = 0; for (i = 0; i < new->max; i++) { len = 0; hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver]) len++; if (len > maxlen) maxlen = len; } return maxlen > elasticity ? -EINVAL : 0; }
DoS Mem. Corr.
0
static int br_mdb_copy(struct net_bridge_mdb_htable *new, struct net_bridge_mdb_htable *old, int elasticity) { struct net_bridge_mdb_entry *mp; struct hlist_node *p; int maxlen; int len; int i; for (i = 0; i < old->max; i++) hlist_for_each_entry(mp, p, &old->mhash[i], hlist[old->ver]) hlist_add_head(&mp->hlist[new->ver], &new->mhash[br_ip_hash(new, &mp->addr)]); if (!elasticity) return 0; maxlen = 0; for (i = 0; i < new->max; i++) { len = 0; hlist_for_each_entry(mp, p, &new->mhash[i], hlist[new->ver]) len++; if (len > maxlen) maxlen = len; } return maxlen > elasticity ? -EINVAL : 0; }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,229
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, struct sk_buff *skb) { struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); struct br_ip ip; if (br->multicast_disabled) return NULL; if (BR_INPUT_SKB_CB(skb)->igmp) return NULL; ip.proto = skb->protocol; switch (skb->protocol) { case htons(ETH_P_IP): ip.u.ip4 = ip_hdr(skb)->daddr; break; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case htons(ETH_P_IPV6): ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr); break; #endif default: return NULL; } return br_mdb_ip_get(mdb, &ip); }
DoS Mem. Corr.
0
struct net_bridge_mdb_entry *br_mdb_get(struct net_bridge *br, struct sk_buff *skb) { struct net_bridge_mdb_htable *mdb = rcu_dereference(br->mdb); struct br_ip ip; if (br->multicast_disabled) return NULL; if (BR_INPUT_SKB_CB(skb)->igmp) return NULL; ip.proto = skb->protocol; switch (skb->protocol) { case htons(ETH_P_IP): ip.u.ip4 = ip_hdr(skb)->daddr; break; #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case htons(ETH_P_IPV6): ipv6_addr_copy(&ip.u.ip6, &ipv6_hdr(skb)->daddr); break; #endif default: return NULL; } return br_mdb_ip_get(mdb, &ip); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,230
static struct net_bridge_mdb_entry *br_mdb_ip4_get( struct net_bridge_mdb_htable *mdb, __be32 dst) { struct br_ip br_dst; br_dst.u.ip4 = dst; br_dst.proto = htons(ETH_P_IP); return br_mdb_ip_get(mdb, &br_dst); }
DoS Mem. Corr.
0
static struct net_bridge_mdb_entry *br_mdb_ip4_get( struct net_bridge_mdb_htable *mdb, __be32 dst) { struct br_ip br_dst; br_dst.u.ip4 = dst; br_dst.proto = htons(ETH_P_IP); return br_mdb_ip_get(mdb, &br_dst); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,231
static struct net_bridge_mdb_entry *br_mdb_ip6_get( struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst) { struct br_ip br_dst; ipv6_addr_copy(&br_dst.u.ip6, dst); br_dst.proto = htons(ETH_P_IPV6); return br_mdb_ip_get(mdb, &br_dst); }
DoS Mem. Corr.
0
static struct net_bridge_mdb_entry *br_mdb_ip6_get( struct net_bridge_mdb_htable *mdb, const struct in6_addr *dst) { struct br_ip br_dst; ipv6_addr_copy(&br_dst.u.ip6, dst); br_dst.proto = htons(ETH_P_IPV6); return br_mdb_ip_get(mdb, &br_dst); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,232
static struct net_bridge_mdb_entry *br_mdb_ip_get( struct net_bridge_mdb_htable *mdb, struct br_ip *dst) { if (!mdb) return NULL; return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); }
DoS Mem. Corr.
0
static struct net_bridge_mdb_entry *br_mdb_ip_get( struct net_bridge_mdb_htable *mdb, struct br_ip *dst) { if (!mdb) return NULL; return __br_mdb_ip_get(mdb, dst, br_ip_hash(mdb, dst)); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,233
static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, int elasticity) { struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); struct net_bridge_mdb_htable *mdb; int err; mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); if (!mdb) return -ENOMEM; mdb->max = max; mdb->old = old; mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); if (!mdb->mhash) { kfree(mdb); return -ENOMEM; } mdb->size = old ? old->size : 0; mdb->ver = old ? old->ver ^ 1 : 0; if (!old || elasticity) get_random_bytes(&mdb->secret, sizeof(mdb->secret)); else mdb->secret = old->secret; if (!old) goto out; err = br_mdb_copy(mdb, old, elasticity); if (err) { kfree(mdb->mhash); kfree(mdb); return err; } call_rcu_bh(&mdb->rcu, br_mdb_free); out: rcu_assign_pointer(*mdbp, mdb); return 0; }
DoS Mem. Corr.
0
static int br_mdb_rehash(struct net_bridge_mdb_htable __rcu **mdbp, int max, int elasticity) { struct net_bridge_mdb_htable *old = rcu_dereference_protected(*mdbp, 1); struct net_bridge_mdb_htable *mdb; int err; mdb = kmalloc(sizeof(*mdb), GFP_ATOMIC); if (!mdb) return -ENOMEM; mdb->max = max; mdb->old = old; mdb->mhash = kzalloc(max * sizeof(*mdb->mhash), GFP_ATOMIC); if (!mdb->mhash) { kfree(mdb); return -ENOMEM; } mdb->size = old ? old->size : 0; mdb->ver = old ? old->ver ^ 1 : 0; if (!old || elasticity) get_random_bytes(&mdb->secret, sizeof(mdb->secret)); else mdb->secret = old->secret; if (!old) goto out; err = br_mdb_copy(mdb, old, elasticity); if (err) { kfree(mdb->mhash); kfree(mdb); return err; } call_rcu_bh(&mdb->rcu, br_mdb_free); out: rcu_assign_pointer(*mdbp, mdb); return 0; }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,234
void br_multicast_add_port(struct net_bridge_port *port) { port->multicast_router = 1; setup_timer(&port->multicast_router_timer, br_multicast_router_expired, (unsigned long)port); setup_timer(&port->multicast_query_timer, br_multicast_port_query_expired, (unsigned long)port); }
DoS Mem. Corr.
0
void br_multicast_add_port(struct net_bridge_port *port) { port->multicast_router = 1; setup_timer(&port->multicast_router_timer, br_multicast_router_expired, (unsigned long)port); setup_timer(&port->multicast_query_timer, br_multicast_port_query_expired, (unsigned long)port); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,235
static void br_multicast_add_router(struct net_bridge *br, struct net_bridge_port *port) { struct net_bridge_port *p; struct hlist_node *n, *slot = NULL; hlist_for_each_entry(p, n, &br->router_list, rlist) { if ((unsigned long) port >= (unsigned long) p) break; slot = n; } if (slot) hlist_add_after_rcu(slot, &port->rlist); else hlist_add_head_rcu(&port->rlist, &br->router_list); }
DoS Mem. Corr.
0
static void br_multicast_add_router(struct net_bridge *br, struct net_bridge_port *port) { struct net_bridge_port *p; struct hlist_node *n, *slot = NULL; hlist_for_each_entry(p, n, &br->router_list, rlist) { if ((unsigned long) port >= (unsigned long) p) break; slot = n; } if (slot) hlist_add_after_rcu(slot, &port->rlist); else hlist_add_head_rcu(&port->rlist, &br->router_list); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,236
static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, struct br_ip *addr) { switch (addr->proto) { case htons(ETH_P_IP): return br_ip4_multicast_alloc_query(br, addr->u.ip4); #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case htons(ETH_P_IPV6): return br_ip6_multicast_alloc_query(br, &addr->u.ip6); #endif } return NULL; }
DoS Mem. Corr.
0
static struct sk_buff *br_multicast_alloc_query(struct net_bridge *br, struct br_ip *addr) { switch (addr->proto) { case htons(ETH_P_IP): return br_ip4_multicast_alloc_query(br, addr->u.ip4); #if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) case htons(ETH_P_IPV6): return br_ip6_multicast_alloc_query(br, &addr->u.ip6); #endif } return NULL; }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,237
void br_multicast_del_port(struct net_bridge_port *port) { del_timer_sync(&port->multicast_router_timer); }
DoS Mem. Corr.
0
void br_multicast_del_port(struct net_bridge_port *port) { del_timer_sync(&port->multicast_router_timer); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,238
void br_multicast_disable_port(struct net_bridge_port *port) { struct net_bridge *br = port->br; struct net_bridge_port_group *pg; struct hlist_node *p, *n; spin_lock(&br->multicast_lock); hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist) br_multicast_del_pg(br, pg); if (!hlist_unhashed(&port->rlist)) hlist_del_init_rcu(&port->rlist); del_timer(&port->multicast_router_timer); del_timer(&port->multicast_query_timer); spin_unlock(&br->multicast_lock); }
DoS Mem. Corr.
0
void br_multicast_disable_port(struct net_bridge_port *port) { struct net_bridge *br = port->br; struct net_bridge_port_group *pg; struct hlist_node *p, *n; spin_lock(&br->multicast_lock); hlist_for_each_entry_safe(pg, p, n, &port->mglist, mglist) br_multicast_del_pg(br, pg); if (!hlist_unhashed(&port->rlist)) hlist_del_init_rcu(&port->rlist); del_timer(&port->multicast_router_timer); del_timer(&port->multicast_query_timer); spin_unlock(&br->multicast_lock); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,239
void br_multicast_enable_port(struct net_bridge_port *port) { struct net_bridge *br = port->br; spin_lock(&br->multicast_lock); if (br->multicast_disabled || !netif_running(br->dev)) goto out; __br_multicast_enable_port(port); out: spin_unlock(&br->multicast_lock); }
DoS Mem. Corr.
0
void br_multicast_enable_port(struct net_bridge_port *port) { struct net_bridge *br = port->br; spin_lock(&br->multicast_lock); if (br->multicast_disabled || !netif_running(br->dev)) goto out; __br_multicast_enable_port(port); out: spin_unlock(&br->multicast_lock); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,240
static void br_multicast_free_group(struct rcu_head *head) { struct net_bridge_mdb_entry *mp = container_of(head, struct net_bridge_mdb_entry, rcu); kfree(mp); }
DoS Mem. Corr.
0
static void br_multicast_free_group(struct rcu_head *head) { struct net_bridge_mdb_entry *mp = container_of(head, struct net_bridge_mdb_entry, rcu); kfree(mp); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,241
static void br_multicast_free_pg(struct rcu_head *head) { struct net_bridge_port_group *p = container_of(head, struct net_bridge_port_group, rcu); kfree(p); }
DoS Mem. Corr.
0
static void br_multicast_free_pg(struct rcu_head *head) { struct net_bridge_port_group *p = container_of(head, struct net_bridge_port_group, rcu); kfree(p); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,242
static struct net_bridge_mdb_entry *br_multicast_get_group( struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group, int hash) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; struct hlist_node *p; unsigned count = 0; unsigned max; int elasticity; int err; mdb = rcu_dereference_protected(br->mdb, 1); hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { count++; if (unlikely(br_ip_equal(group, &mp->addr))) return mp; } elasticity = 0; max = mdb->max; if (unlikely(count > br->hash_elasticity && count)) { if (net_ratelimit()) br_info(br, "Multicast hash table " "chain limit reached: %s\n", port ? port->dev->name : br->dev->name); elasticity = br->hash_elasticity; } if (mdb->size >= max) { max *= 2; if (unlikely(max >= br->hash_max)) { br_warn(br, "Multicast hash table maximum " "reached, disabling snooping: %s, %d\n", port ? port->dev->name : br->dev->name, max); err = -E2BIG; disable: br->multicast_disabled = 1; goto err; } } if (max > mdb->max || elasticity) { if (mdb->old) { if (net_ratelimit()) br_info(br, "Multicast hash table " "on fire: %s\n", port ? port->dev->name : br->dev->name); err = -EEXIST; goto err; } err = br_mdb_rehash(&br->mdb, max, elasticity); if (err) { br_warn(br, "Cannot rehash multicast " "hash table, disabling snooping: %s, %d, %d\n", port ? port->dev->name : br->dev->name, mdb->size, err); goto disable; } err = -EAGAIN; goto err; } return NULL; err: mp = ERR_PTR(err); return mp; }
DoS Mem. Corr.
0
static struct net_bridge_mdb_entry *br_multicast_get_group( struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group, int hash) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; struct hlist_node *p; unsigned count = 0; unsigned max; int elasticity; int err; mdb = rcu_dereference_protected(br->mdb, 1); hlist_for_each_entry(mp, p, &mdb->mhash[hash], hlist[mdb->ver]) { count++; if (unlikely(br_ip_equal(group, &mp->addr))) return mp; } elasticity = 0; max = mdb->max; if (unlikely(count > br->hash_elasticity && count)) { if (net_ratelimit()) br_info(br, "Multicast hash table " "chain limit reached: %s\n", port ? port->dev->name : br->dev->name); elasticity = br->hash_elasticity; } if (mdb->size >= max) { max *= 2; if (unlikely(max >= br->hash_max)) { br_warn(br, "Multicast hash table maximum " "reached, disabling snooping: %s, %d\n", port ? port->dev->name : br->dev->name, max); err = -E2BIG; disable: br->multicast_disabled = 1; goto err; } } if (max > mdb->max || elasticity) { if (mdb->old) { if (net_ratelimit()) br_info(br, "Multicast hash table " "on fire: %s\n", port ? port->dev->name : br->dev->name); err = -EEXIST; goto err; } err = br_mdb_rehash(&br->mdb, max, elasticity); if (err) { br_warn(br, "Cannot rehash multicast " "hash table, disabling snooping: %s, %d, %d\n", port ? port->dev->name : br->dev->name, mdb->size, err); goto disable; } err = -EAGAIN; goto err; } return NULL; err: mp = ERR_PTR(err); return mp; }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,243
static void br_multicast_group_expired(unsigned long data) { struct net_bridge_mdb_entry *mp = (void *)data; struct net_bridge *br = mp->br; struct net_bridge_mdb_htable *mdb; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || timer_pending(&mp->timer)) goto out; if (!hlist_unhashed(&mp->mglist)) hlist_del_init(&mp->mglist); if (mp->ports) goto out; mdb = mlock_dereference(br->mdb, br); hlist_del_rcu(&mp->hlist[mdb->ver]); mdb->size--; del_timer(&mp->query_timer); call_rcu_bh(&mp->rcu, br_multicast_free_group); out: spin_unlock(&br->multicast_lock); }
DoS Mem. Corr.
0
static void br_multicast_group_expired(unsigned long data) { struct net_bridge_mdb_entry *mp = (void *)data; struct net_bridge *br = mp->br; struct net_bridge_mdb_htable *mdb; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || timer_pending(&mp->timer)) goto out; if (!hlist_unhashed(&mp->mglist)) hlist_del_init(&mp->mglist); if (mp->ports) goto out; mdb = mlock_dereference(br->mdb, br); hlist_del_rcu(&mp->hlist[mdb->ver]); mdb->size--; del_timer(&mp->query_timer); call_rcu_bh(&mp->rcu, br_multicast_free_group); out: spin_unlock(&br->multicast_lock); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,244
void br_multicast_init(struct net_bridge *br) { br->hash_elasticity = 4; br->hash_max = 512; br->multicast_router = 1; br->multicast_last_member_count = 2; br->multicast_startup_query_count = 2; br->multicast_last_member_interval = HZ; br->multicast_query_response_interval = 10 * HZ; br->multicast_startup_query_interval = 125 * HZ / 4; br->multicast_query_interval = 125 * HZ; br->multicast_querier_interval = 255 * HZ; br->multicast_membership_interval = 260 * HZ; spin_lock_init(&br->multicast_lock); setup_timer(&br->multicast_router_timer, br_multicast_local_router_expired, 0); setup_timer(&br->multicast_querier_timer, br_multicast_local_router_expired, 0); setup_timer(&br->multicast_query_timer, br_multicast_query_expired, (unsigned long)br); }
DoS Mem. Corr.
0
void br_multicast_init(struct net_bridge *br) { br->hash_elasticity = 4; br->hash_max = 512; br->multicast_router = 1; br->multicast_last_member_count = 2; br->multicast_startup_query_count = 2; br->multicast_last_member_interval = HZ; br->multicast_query_response_interval = 10 * HZ; br->multicast_startup_query_interval = 125 * HZ / 4; br->multicast_query_interval = 125 * HZ; br->multicast_querier_interval = 255 * HZ; br->multicast_membership_interval = 260 * HZ; spin_lock_init(&br->multicast_lock); setup_timer(&br->multicast_router_timer, br_multicast_local_router_expired, 0); setup_timer(&br->multicast_querier_timer, br_multicast_local_router_expired, 0); setup_timer(&br->multicast_query_timer, br_multicast_query_expired, (unsigned long)br); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,245
static int br_multicast_ipv4_rcv(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { struct sk_buff *skb2 = skb; struct iphdr *iph; struct igmphdr *ih; unsigned len; unsigned offset; int err; /* We treat OOM as packet loss for now. */ if (!pskb_may_pull(skb, sizeof(*iph))) return -EINVAL; iph = ip_hdr(skb); if (iph->ihl < 5 || iph->version != 4) return -EINVAL; if (!pskb_may_pull(skb, ip_hdrlen(skb))) return -EINVAL; iph = ip_hdr(skb); if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) return -EINVAL; if (iph->protocol != IPPROTO_IGMP) return 0; len = ntohs(iph->tot_len); if (skb->len < len || len < ip_hdrlen(skb)) return -EINVAL; if (skb->len > len) { skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) return -ENOMEM; err = pskb_trim_rcsum(skb2, len); if (err) goto err_out; } len -= ip_hdrlen(skb2); offset = skb_network_offset(skb2) + ip_hdrlen(skb2); __skb_pull(skb2, offset); skb_reset_transport_header(skb2); err = -EINVAL; if (!pskb_may_pull(skb2, sizeof(*ih))) goto out; switch (skb2->ip_summed) { case CHECKSUM_COMPLETE: if (!csum_fold(skb2->csum)) break; /* fall through */ case CHECKSUM_NONE: skb2->csum = 0; if (skb_checksum_complete(skb2)) goto out; } err = 0; BR_INPUT_SKB_CB(skb)->igmp = 1; ih = igmp_hdr(skb2); switch (ih->type) { case IGMP_HOST_MEMBERSHIP_REPORT: case IGMPV2_HOST_MEMBERSHIP_REPORT: BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; err = br_ip4_multicast_add_group(br, port, ih->group); break; case IGMPV3_HOST_MEMBERSHIP_REPORT: err = br_ip4_multicast_igmp3_report(br, port, skb2); break; case IGMP_HOST_MEMBERSHIP_QUERY: err = br_ip4_multicast_query(br, port, skb2); break; case IGMP_HOST_LEAVE_MESSAGE: br_ip4_multicast_leave_group(br, port, ih->group); break; } out: __skb_push(skb2, offset); err_out: if (skb2 != skb) kfree_skb(skb2); return err; }
DoS Mem. Corr.
0
static int br_multicast_ipv4_rcv(struct net_bridge *br, struct net_bridge_port *port, struct sk_buff *skb) { struct sk_buff *skb2 = skb; struct iphdr *iph; struct igmphdr *ih; unsigned len; unsigned offset; int err; /* We treat OOM as packet loss for now. */ if (!pskb_may_pull(skb, sizeof(*iph))) return -EINVAL; iph = ip_hdr(skb); if (iph->ihl < 5 || iph->version != 4) return -EINVAL; if (!pskb_may_pull(skb, ip_hdrlen(skb))) return -EINVAL; iph = ip_hdr(skb); if (unlikely(ip_fast_csum((u8 *)iph, iph->ihl))) return -EINVAL; if (iph->protocol != IPPROTO_IGMP) return 0; len = ntohs(iph->tot_len); if (skb->len < len || len < ip_hdrlen(skb)) return -EINVAL; if (skb->len > len) { skb2 = skb_clone(skb, GFP_ATOMIC); if (!skb2) return -ENOMEM; err = pskb_trim_rcsum(skb2, len); if (err) goto err_out; } len -= ip_hdrlen(skb2); offset = skb_network_offset(skb2) + ip_hdrlen(skb2); __skb_pull(skb2, offset); skb_reset_transport_header(skb2); err = -EINVAL; if (!pskb_may_pull(skb2, sizeof(*ih))) goto out; switch (skb2->ip_summed) { case CHECKSUM_COMPLETE: if (!csum_fold(skb2->csum)) break; /* fall through */ case CHECKSUM_NONE: skb2->csum = 0; if (skb_checksum_complete(skb2)) goto out; } err = 0; BR_INPUT_SKB_CB(skb)->igmp = 1; ih = igmp_hdr(skb2); switch (ih->type) { case IGMP_HOST_MEMBERSHIP_REPORT: case IGMPV2_HOST_MEMBERSHIP_REPORT: BR_INPUT_SKB_CB(skb2)->mrouters_only = 1; err = br_ip4_multicast_add_group(br, port, ih->group); break; case IGMPV3_HOST_MEMBERSHIP_REPORT: err = br_ip4_multicast_igmp3_report(br, port, skb2); break; case IGMP_HOST_MEMBERSHIP_QUERY: err = br_ip4_multicast_query(br, port, skb2); break; case IGMP_HOST_LEAVE_MESSAGE: br_ip4_multicast_leave_group(br, port, ih->group); break; } out: __skb_push(skb2, offset); err_out: if (skb2 != skb) kfree_skb(skb2); return err; }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,246
static void br_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; struct net_bridge_port_group *p; unsigned long now; unsigned long time; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || (port && port->state == BR_STATE_DISABLED) || timer_pending(&br->multicast_querier_timer)) goto out; mdb = mlock_dereference(br->mdb, br); mp = br_mdb_ip_get(mdb, group); if (!mp) goto out; now = jiffies; time = now + br->multicast_last_member_count * br->multicast_last_member_interval; if (!port) { if (!hlist_unhashed(&mp->mglist) && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, time) : try_to_del_timer_sync(&mp->timer) >= 0)) { mod_timer(&mp->timer, time); mp->queries_sent = 0; mod_timer(&mp->query_timer, now); } goto out; } for (p = mlock_dereference(mp->ports, br); p != NULL; p = mlock_dereference(p->next, br)) { if (p->port != port) continue; if (!hlist_unhashed(&p->mglist) && (timer_pending(&p->timer) ? time_after(p->timer.expires, time) : try_to_del_timer_sync(&p->timer) >= 0)) { mod_timer(&p->timer, time); p->queries_sent = 0; mod_timer(&p->query_timer, now); } break; } out: spin_unlock(&br->multicast_lock); }
DoS Mem. Corr.
0
static void br_multicast_leave_group(struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; struct net_bridge_port_group *p; unsigned long now; unsigned long time; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || (port && port->state == BR_STATE_DISABLED) || timer_pending(&br->multicast_querier_timer)) goto out; mdb = mlock_dereference(br->mdb, br); mp = br_mdb_ip_get(mdb, group); if (!mp) goto out; now = jiffies; time = now + br->multicast_last_member_count * br->multicast_last_member_interval; if (!port) { if (!hlist_unhashed(&mp->mglist) && (timer_pending(&mp->timer) ? time_after(mp->timer.expires, time) : try_to_del_timer_sync(&mp->timer) >= 0)) { mod_timer(&mp->timer, time); mp->queries_sent = 0; mod_timer(&mp->query_timer, now); } goto out; } for (p = mlock_dereference(mp->ports, br); p != NULL; p = mlock_dereference(p->next, br)) { if (p->port != port) continue; if (!hlist_unhashed(&p->mglist) && (timer_pending(&p->timer) ? time_after(p->timer.expires, time) : try_to_del_timer_sync(&p->timer) >= 0)) { mod_timer(&p->timer, time); p->queries_sent = 0; mod_timer(&p->query_timer, now); } break; } out: spin_unlock(&br->multicast_lock); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,247
static void br_multicast_local_router_expired(unsigned long data) { }
DoS Mem. Corr.
0
static void br_multicast_local_router_expired(unsigned long data) { }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,248
static void br_multicast_mark_router(struct net_bridge *br, struct net_bridge_port *port) { unsigned long now = jiffies; if (!port) { if (br->multicast_router == 1) mod_timer(&br->multicast_router_timer, now + br->multicast_querier_interval); return; } if (port->multicast_router != 1) return; if (!hlist_unhashed(&port->rlist)) goto timer; br_multicast_add_router(br, port); timer: mod_timer(&port->multicast_router_timer, now + br->multicast_querier_interval); }
DoS Mem. Corr.
0
static void br_multicast_mark_router(struct net_bridge *br, struct net_bridge_port *port) { unsigned long now = jiffies; if (!port) { if (br->multicast_router == 1) mod_timer(&br->multicast_router_timer, now + br->multicast_querier_interval); return; } if (port->multicast_router != 1) return; if (!hlist_unhashed(&port->rlist)) goto timer; br_multicast_add_router(br, port); timer: mod_timer(&port->multicast_router_timer, now + br->multicast_querier_interval); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,249
static struct net_bridge_mdb_entry *br_multicast_new_group( struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; int hash; int err; mdb = rcu_dereference_protected(br->mdb, 1); if (!mdb) { err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); if (err) return ERR_PTR(err); goto rehash; } hash = br_ip_hash(mdb, group); mp = br_multicast_get_group(br, port, group, hash); switch (PTR_ERR(mp)) { case 0: break; case -EAGAIN: rehash: mdb = rcu_dereference_protected(br->mdb, 1); hash = br_ip_hash(mdb, group); break; default: goto out; } mp = kzalloc(sizeof(*mp), GFP_ATOMIC); if (unlikely(!mp)) return ERR_PTR(-ENOMEM); mp->br = br; mp->addr = *group; setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp); setup_timer(&mp->query_timer, br_multicast_group_query_expired, (unsigned long)mp); hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); mdb->size++; out: return mp; }
DoS Mem. Corr.
0
static struct net_bridge_mdb_entry *br_multicast_new_group( struct net_bridge *br, struct net_bridge_port *port, struct br_ip *group) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; int hash; int err; mdb = rcu_dereference_protected(br->mdb, 1); if (!mdb) { err = br_mdb_rehash(&br->mdb, BR_HASH_SIZE, 0); if (err) return ERR_PTR(err); goto rehash; } hash = br_ip_hash(mdb, group); mp = br_multicast_get_group(br, port, group, hash); switch (PTR_ERR(mp)) { case 0: break; case -EAGAIN: rehash: mdb = rcu_dereference_protected(br->mdb, 1); hash = br_ip_hash(mdb, group); break; default: goto out; } mp = kzalloc(sizeof(*mp), GFP_ATOMIC); if (unlikely(!mp)) return ERR_PTR(-ENOMEM); mp->br = br; mp->addr = *group; setup_timer(&mp->timer, br_multicast_group_expired, (unsigned long)mp); setup_timer(&mp->query_timer, br_multicast_group_query_expired, (unsigned long)mp); hlist_add_head_rcu(&mp->hlist[mdb->ver], &mdb->mhash[hash]); mdb->size++; out: return mp; }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,250
static void br_multicast_port_group_query_expired(unsigned long data) { struct net_bridge_port_group *pg = (void *)data; struct net_bridge_port *port = pg->port; struct net_bridge *br = port->br; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || pg->queries_sent >= br->multicast_last_member_count) goto out; br_multicast_send_port_group_query(pg); out: spin_unlock(&br->multicast_lock); }
DoS Mem. Corr.
0
static void br_multicast_port_group_query_expired(unsigned long data) { struct net_bridge_port_group *pg = (void *)data; struct net_bridge_port *port = pg->port; struct net_bridge *br = port->br; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || hlist_unhashed(&pg->mglist) || pg->queries_sent >= br->multicast_last_member_count) goto out; br_multicast_send_port_group_query(pg); out: spin_unlock(&br->multicast_lock); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,251
static void br_multicast_port_query_expired(unsigned long data) { struct net_bridge_port *port = (void *)data; struct net_bridge *br = port->br; spin_lock(&br->multicast_lock); if (port->state == BR_STATE_DISABLED || port->state == BR_STATE_BLOCKING) goto out; if (port->multicast_startup_queries_sent < br->multicast_startup_query_count) port->multicast_startup_queries_sent++; br_multicast_send_query(port->br, port, port->multicast_startup_queries_sent); out: spin_unlock(&br->multicast_lock); }
DoS Mem. Corr.
0
static void br_multicast_port_query_expired(unsigned long data) { struct net_bridge_port *port = (void *)data; struct net_bridge *br = port->br; spin_lock(&br->multicast_lock); if (port->state == BR_STATE_DISABLED || port->state == BR_STATE_BLOCKING) goto out; if (port->multicast_startup_queries_sent < br->multicast_startup_query_count) port->multicast_startup_queries_sent++; br_multicast_send_query(port->br, port, port->multicast_startup_queries_sent); out: spin_unlock(&br->multicast_lock); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,252
static void br_multicast_query_expired(unsigned long data) { struct net_bridge *br = (void *)data; spin_lock(&br->multicast_lock); if (br->multicast_startup_queries_sent < br->multicast_startup_query_count) br->multicast_startup_queries_sent++; br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); spin_unlock(&br->multicast_lock); }
DoS Mem. Corr.
0
static void br_multicast_query_expired(unsigned long data) { struct net_bridge *br = (void *)data; spin_lock(&br->multicast_lock); if (br->multicast_startup_queries_sent < br->multicast_startup_query_count) br->multicast_startup_queries_sent++; br_multicast_send_query(br, NULL, br->multicast_startup_queries_sent); spin_unlock(&br->multicast_lock); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,253
static void br_multicast_query_received(struct net_bridge *br, struct net_bridge_port *port, int saddr) { if (saddr) mod_timer(&br->multicast_querier_timer, jiffies + br->multicast_querier_interval); else if (timer_pending(&br->multicast_querier_timer)) return; br_multicast_mark_router(br, port); }
DoS Mem. Corr.
0
static void br_multicast_query_received(struct net_bridge *br, struct net_bridge_port *port, int saddr) { if (saddr) mod_timer(&br->multicast_querier_timer, jiffies + br->multicast_querier_interval); else if (timer_pending(&br->multicast_querier_timer)) return; br_multicast_mark_router(br, port); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,254
static void br_multicast_router_expired(unsigned long data) { struct net_bridge_port *port = (void *)data; struct net_bridge *br = port->br; spin_lock(&br->multicast_lock); if (port->multicast_router != 1 || timer_pending(&port->multicast_router_timer) || hlist_unhashed(&port->rlist)) goto out; hlist_del_init_rcu(&port->rlist); out: spin_unlock(&br->multicast_lock); }
DoS Mem. Corr.
0
static void br_multicast_router_expired(unsigned long data) { struct net_bridge_port *port = (void *)data; struct net_bridge *br = port->br; spin_lock(&br->multicast_lock); if (port->multicast_router != 1 || timer_pending(&port->multicast_router_timer) || hlist_unhashed(&port->rlist)) goto out; hlist_del_init_rcu(&port->rlist); out: spin_unlock(&br->multicast_lock); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,255
static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp) { struct net_bridge *br = mp->br; struct sk_buff *skb; skb = br_multicast_alloc_query(br, &mp->addr); if (!skb) goto timer; netif_rx(skb); timer: if (++mp->queries_sent < br->multicast_last_member_count) mod_timer(&mp->query_timer, jiffies + br->multicast_last_member_interval); }
DoS Mem. Corr.
0
static void br_multicast_send_group_query(struct net_bridge_mdb_entry *mp) { struct net_bridge *br = mp->br; struct sk_buff *skb; skb = br_multicast_alloc_query(br, &mp->addr); if (!skb) goto timer; netif_rx(skb); timer: if (++mp->queries_sent < br->multicast_last_member_count) mod_timer(&mp->query_timer, jiffies + br->multicast_last_member_interval); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,256
static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg) { struct net_bridge_port *port = pg->port; struct net_bridge *br = port->br; struct sk_buff *skb; skb = br_multicast_alloc_query(br, &pg->addr); if (!skb) goto timer; br_deliver(port, skb); timer: if (++pg->queries_sent < br->multicast_last_member_count) mod_timer(&pg->query_timer, jiffies + br->multicast_last_member_interval); }
DoS Mem. Corr.
0
static void br_multicast_send_port_group_query(struct net_bridge_port_group *pg) { struct net_bridge_port *port = pg->port; struct net_bridge *br = port->br; struct sk_buff *skb; skb = br_multicast_alloc_query(br, &pg->addr); if (!skb) goto timer; br_deliver(port, skb); timer: if (++pg->queries_sent < br->multicast_last_member_count) mod_timer(&pg->query_timer, jiffies + br->multicast_last_member_interval); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,257
int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) { int err = -ENOENT; u32 old; struct net_bridge_mdb_htable *mdb; spin_lock(&br->multicast_lock); if (!netif_running(br->dev)) goto unlock; err = -EINVAL; if (!is_power_of_2(val)) goto unlock; mdb = mlock_dereference(br->mdb, br); if (mdb && val < mdb->size) goto unlock; err = 0; old = br->hash_max; br->hash_max = val; if (mdb) { if (mdb->old) { err = -EEXIST; rollback: br->hash_max = old; goto unlock; } err = br_mdb_rehash(&br->mdb, br->hash_max, br->hash_elasticity); if (err) goto rollback; } unlock: spin_unlock(&br->multicast_lock); return err; }
DoS Mem. Corr.
0
int br_multicast_set_hash_max(struct net_bridge *br, unsigned long val) { int err = -ENOENT; u32 old; struct net_bridge_mdb_htable *mdb; spin_lock(&br->multicast_lock); if (!netif_running(br->dev)) goto unlock; err = -EINVAL; if (!is_power_of_2(val)) goto unlock; mdb = mlock_dereference(br->mdb, br); if (mdb && val < mdb->size) goto unlock; err = 0; old = br->hash_max; br->hash_max = val; if (mdb) { if (mdb->old) { err = -EEXIST; rollback: br->hash_max = old; goto unlock; } err = br_mdb_rehash(&br->mdb, br->hash_max, br->hash_elasticity); if (err) goto rollback; } unlock: spin_unlock(&br->multicast_lock); return err; }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,258
int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) { struct net_bridge *br = p->br; int err = -ENOENT; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED) goto unlock; switch (val) { case 0: case 1: case 2: p->multicast_router = val; err = 0; if (val < 2 && !hlist_unhashed(&p->rlist)) hlist_del_init_rcu(&p->rlist); if (val == 1) break; del_timer(&p->multicast_router_timer); if (val == 0) break; br_multicast_add_router(br, p); break; default: err = -EINVAL; break; } unlock: spin_unlock(&br->multicast_lock); return err; }
DoS Mem. Corr.
0
int br_multicast_set_port_router(struct net_bridge_port *p, unsigned long val) { struct net_bridge *br = p->br; int err = -ENOENT; spin_lock(&br->multicast_lock); if (!netif_running(br->dev) || p->state == BR_STATE_DISABLED) goto unlock; switch (val) { case 0: case 1: case 2: p->multicast_router = val; err = 0; if (val < 2 && !hlist_unhashed(&p->rlist)) hlist_del_init_rcu(&p->rlist); if (val == 1) break; del_timer(&p->multicast_router_timer); if (val == 0) break; br_multicast_add_router(br, p); break; default: err = -EINVAL; break; } unlock: spin_unlock(&br->multicast_lock); return err; }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,259
void br_multicast_stop(struct net_bridge *br) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; struct hlist_node *p, *n; u32 ver; int i; del_timer_sync(&br->multicast_router_timer); del_timer_sync(&br->multicast_querier_timer); del_timer_sync(&br->multicast_query_timer); spin_lock_bh(&br->multicast_lock); mdb = mlock_dereference(br->mdb, br); if (!mdb) goto out; br->mdb = NULL; ver = mdb->ver; for (i = 0; i < mdb->max; i++) { hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i], hlist[ver]) { del_timer(&mp->timer); del_timer(&mp->query_timer); call_rcu_bh(&mp->rcu, br_multicast_free_group); } } if (mdb->old) { spin_unlock_bh(&br->multicast_lock); rcu_barrier_bh(); spin_lock_bh(&br->multicast_lock); WARN_ON(mdb->old); } mdb->old = mdb; call_rcu_bh(&mdb->rcu, br_mdb_free); out: spin_unlock_bh(&br->multicast_lock); }
DoS Mem. Corr.
0
void br_multicast_stop(struct net_bridge *br) { struct net_bridge_mdb_htable *mdb; struct net_bridge_mdb_entry *mp; struct hlist_node *p, *n; u32 ver; int i; del_timer_sync(&br->multicast_router_timer); del_timer_sync(&br->multicast_querier_timer); del_timer_sync(&br->multicast_query_timer); spin_lock_bh(&br->multicast_lock); mdb = mlock_dereference(br->mdb, br); if (!mdb) goto out; br->mdb = NULL; ver = mdb->ver; for (i = 0; i < mdb->max; i++) { hlist_for_each_entry_safe(mp, p, n, &mdb->mhash[i], hlist[ver]) { del_timer(&mp->timer); del_timer(&mp->query_timer); call_rcu_bh(&mp->rcu, br_multicast_free_group); } } if (mdb->old) { spin_unlock_bh(&br->multicast_lock); rcu_barrier_bh(); spin_lock_bh(&br->multicast_lock); WARN_ON(mdb->old); } mdb->old = mdb; call_rcu_bh(&mdb->rcu, br_mdb_free); out: spin_unlock_bh(&br->multicast_lock); }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,260
int br_multicast_toggle(struct net_bridge *br, unsigned long val) { struct net_bridge_port *port; int err = 0; struct net_bridge_mdb_htable *mdb; spin_lock(&br->multicast_lock); if (br->multicast_disabled == !val) goto unlock; br->multicast_disabled = !val; if (br->multicast_disabled) goto unlock; if (!netif_running(br->dev)) goto unlock; mdb = mlock_dereference(br->mdb, br); if (mdb) { if (mdb->old) { err = -EEXIST; rollback: br->multicast_disabled = !!val; goto unlock; } err = br_mdb_rehash(&br->mdb, mdb->max, br->hash_elasticity); if (err) goto rollback; } br_multicast_open(br); list_for_each_entry(port, &br->port_list, list) { if (port->state == BR_STATE_DISABLED || port->state == BR_STATE_BLOCKING) continue; __br_multicast_enable_port(port); } unlock: spin_unlock(&br->multicast_lock); return err; }
DoS Mem. Corr.
0
int br_multicast_toggle(struct net_bridge *br, unsigned long val) { struct net_bridge_port *port; int err = 0; struct net_bridge_mdb_htable *mdb; spin_lock(&br->multicast_lock); if (br->multicast_disabled == !val) goto unlock; br->multicast_disabled = !val; if (br->multicast_disabled) goto unlock; if (!netif_running(br->dev)) goto unlock; mdb = mlock_dereference(br->mdb, br); if (mdb) { if (mdb->old) { err = -EEXIST; rollback: br->multicast_disabled = !!val; goto unlock; } err = br_mdb_rehash(&br->mdb, mdb->max, br->hash_elasticity); if (err) goto rollback; } br_multicast_open(br); list_for_each_entry(port, &br->port_list, list) { if (port->state == BR_STATE_DISABLED || port->state == BR_STATE_BLOCKING) continue; __br_multicast_enable_port(port); } unlock: spin_unlock(&br->multicast_lock); return err; }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,261
static inline int ipv6_is_local_multicast(const struct in6_addr *addr) { if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL) return 1; return 0; }
DoS Mem. Corr.
0
static inline int ipv6_is_local_multicast(const struct in6_addr *addr) { if (ipv6_addr_is_multicast(addr) && IPV6_ADDR_MC_SCOPE(addr) <= IPV6_ADDR_SCOPE_LINKLOCAL) return 1; return 0; }
@@ -719,7 +719,8 @@ static int br_multicast_add_group(struct net_bridge *br, goto err; if (!port) { - hlist_add_head(&mp->mglist, &br->mglist); + if (hlist_unhashed(&mp->mglist)) + hlist_add_head(&mp->mglist, &br->mglist); mod_timer(&mp->timer, now + br->multicast_membership_interval); goto out; }
CWE-399
null
null
22,262
static int __init default_policy_setup(char *str) { ima_use_tcb = 1; return 1; }
Bypass
0
static int __init default_policy_setup(char *str) { ima_use_tcb = 1; return 1; }
@@ -253,6 +253,8 @@ static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry, result = security_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal, args, &entry->lsm[lsm_rule].rule); + if (!entry->lsm[lsm_rule].rule) + return -EINVAL; return result; }
CWE-264
null
null
22,263
void ima_delete_rules(void) { struct ima_measure_rule_entry *entry, *tmp; mutex_lock(&ima_measure_mutex); list_for_each_entry_safe(entry, tmp, &measure_policy_rules, list) { list_del(&entry->list); kfree(entry); } mutex_unlock(&ima_measure_mutex); }
Bypass
0
void ima_delete_rules(void) { struct ima_measure_rule_entry *entry, *tmp; mutex_lock(&ima_measure_mutex); list_for_each_entry_safe(entry, tmp, &measure_policy_rules, list) { list_del(&entry->list); kfree(entry); } mutex_unlock(&ima_measure_mutex); }
@@ -253,6 +253,8 @@ static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry, result = security_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal, args, &entry->lsm[lsm_rule].rule); + if (!entry->lsm[lsm_rule].rule) + return -EINVAL; return result; }
CWE-264
null
null
22,264
void __init ima_init_policy(void) { int i, entries; /* if !ima_use_tcb set entries = 0 so we load NO default rules */ if (ima_use_tcb) entries = ARRAY_SIZE(default_rules); else entries = 0; for (i = 0; i < entries; i++) list_add_tail(&default_rules[i].list, &measure_default_rules); ima_measure = &measure_default_rules; }
Bypass
0
void __init ima_init_policy(void) { int i, entries; /* if !ima_use_tcb set entries = 0 so we load NO default rules */ if (ima_use_tcb) entries = ARRAY_SIZE(default_rules); else entries = 0; for (i = 0; i < entries; i++) list_add_tail(&default_rules[i].list, &measure_default_rules); ima_measure = &measure_default_rules; }
@@ -253,6 +253,8 @@ static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry, result = security_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal, args, &entry->lsm[lsm_rule].rule); + if (!entry->lsm[lsm_rule].rule) + return -EINVAL; return result; }
CWE-264
null
null
22,265
int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask) { struct ima_measure_rule_entry *entry; list_for_each_entry(entry, ima_measure, list) { bool rc; rc = ima_match_rules(entry, inode, func, mask); if (rc) return entry->action; } return 0; }
Bypass
0
int ima_match_policy(struct inode *inode, enum ima_hooks func, int mask) { struct ima_measure_rule_entry *entry; list_for_each_entry(entry, ima_measure, list) { bool rc; rc = ima_match_rules(entry, inode, func, mask); if (rc) return entry->action; } return 0; }
@@ -253,6 +253,8 @@ static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry, result = security_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal, args, &entry->lsm[lsm_rule].rule); + if (!entry->lsm[lsm_rule].rule) + return -EINVAL; return result; }
CWE-264
null
null
22,266
static bool ima_match_rules(struct ima_measure_rule_entry *rule, struct inode *inode, enum ima_hooks func, int mask) { struct task_struct *tsk = current; int i; if ((rule->flags & IMA_FUNC) && rule->func != func) return false; if ((rule->flags & IMA_MASK) && rule->mask != mask) return false; if ((rule->flags & IMA_FSMAGIC) && rule->fsmagic != inode->i_sb->s_magic) return false; if ((rule->flags & IMA_UID) && rule->uid != tsk->cred->uid) return false; for (i = 0; i < MAX_LSM_RULES; i++) { int rc = 0; u32 osid, sid; if (!rule->lsm[i].rule) continue; switch (i) { case LSM_OBJ_USER: case LSM_OBJ_ROLE: case LSM_OBJ_TYPE: security_inode_getsecid(inode, &osid); rc = security_filter_rule_match(osid, rule->lsm[i].type, Audit_equal, rule->lsm[i].rule, NULL); break; case LSM_SUBJ_USER: case LSM_SUBJ_ROLE: case LSM_SUBJ_TYPE: security_task_getsecid(tsk, &sid); rc = security_filter_rule_match(sid, rule->lsm[i].type, Audit_equal, rule->lsm[i].rule, NULL); default: break; } if (!rc) return false; } return true; }
Bypass
0
static bool ima_match_rules(struct ima_measure_rule_entry *rule, struct inode *inode, enum ima_hooks func, int mask) { struct task_struct *tsk = current; int i; if ((rule->flags & IMA_FUNC) && rule->func != func) return false; if ((rule->flags & IMA_MASK) && rule->mask != mask) return false; if ((rule->flags & IMA_FSMAGIC) && rule->fsmagic != inode->i_sb->s_magic) return false; if ((rule->flags & IMA_UID) && rule->uid != tsk->cred->uid) return false; for (i = 0; i < MAX_LSM_RULES; i++) { int rc = 0; u32 osid, sid; if (!rule->lsm[i].rule) continue; switch (i) { case LSM_OBJ_USER: case LSM_OBJ_ROLE: case LSM_OBJ_TYPE: security_inode_getsecid(inode, &osid); rc = security_filter_rule_match(osid, rule->lsm[i].type, Audit_equal, rule->lsm[i].rule, NULL); break; case LSM_SUBJ_USER: case LSM_SUBJ_ROLE: case LSM_SUBJ_TYPE: security_task_getsecid(tsk, &sid); rc = security_filter_rule_match(sid, rule->lsm[i].type, Audit_equal, rule->lsm[i].rule, NULL); default: break; } if (!rc) return false; } return true; }
@@ -253,6 +253,8 @@ static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry, result = security_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal, args, &entry->lsm[lsm_rule].rule); + if (!entry->lsm[lsm_rule].rule) + return -EINVAL; return result; }
CWE-264
null
null
22,267
ssize_t ima_parse_add_rule(char *rule) { const char *op = "update_policy"; char *p; struct ima_measure_rule_entry *entry; ssize_t result, len; int audit_info = 0; /* Prevent installed policy from changing */ if (ima_measure != &measure_default_rules) { integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, op, "already exists", -EACCES, audit_info); return -EACCES; } entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, op, "-ENOMEM", -ENOMEM, audit_info); return -ENOMEM; } INIT_LIST_HEAD(&entry->list); p = strsep(&rule, "\n"); len = strlen(p) + 1; if (*p == '#') { kfree(entry); return len; } result = ima_parse_rule(p, entry); if (result) { kfree(entry); integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, op, "invalid policy", result, audit_info); return result; } mutex_lock(&ima_measure_mutex); list_add_tail(&entry->list, &measure_policy_rules); mutex_unlock(&ima_measure_mutex); return len; }
Bypass
0
ssize_t ima_parse_add_rule(char *rule) { const char *op = "update_policy"; char *p; struct ima_measure_rule_entry *entry; ssize_t result, len; int audit_info = 0; /* Prevent installed policy from changing */ if (ima_measure != &measure_default_rules) { integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, op, "already exists", -EACCES, audit_info); return -EACCES; } entry = kzalloc(sizeof(*entry), GFP_KERNEL); if (!entry) { integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, op, "-ENOMEM", -ENOMEM, audit_info); return -ENOMEM; } INIT_LIST_HEAD(&entry->list); p = strsep(&rule, "\n"); len = strlen(p) + 1; if (*p == '#') { kfree(entry); return len; } result = ima_parse_rule(p, entry); if (result) { kfree(entry); integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, op, "invalid policy", result, audit_info); return result; } mutex_lock(&ima_measure_mutex); list_add_tail(&entry->list, &measure_policy_rules); mutex_unlock(&ima_measure_mutex); return len; }
@@ -253,6 +253,8 @@ static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry, result = security_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal, args, &entry->lsm[lsm_rule].rule); + if (!entry->lsm[lsm_rule].rule) + return -EINVAL; return result; }
CWE-264
null
null
22,268
static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry) { struct audit_buffer *ab; char *p; int result = 0; ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_RULE); entry->uid = -1; entry->action = UNKNOWN; while ((p = strsep(&rule, " \t")) != NULL) { substring_t args[MAX_OPT_ARGS]; int token; unsigned long lnum; if (result < 0) break; if ((*p == '\0') || (*p == ' ') || (*p == '\t')) continue; token = match_token(p, policy_tokens, args); switch (token) { case Opt_measure: ima_log_string(ab, "action", "measure"); if (entry->action != UNKNOWN) result = -EINVAL; entry->action = MEASURE; break; case Opt_dont_measure: ima_log_string(ab, "action", "dont_measure"); if (entry->action != UNKNOWN) result = -EINVAL; entry->action = DONT_MEASURE; break; case Opt_func: ima_log_string(ab, "func", args[0].from); if (entry->func) result = -EINVAL; if (strcmp(args[0].from, "FILE_CHECK") == 0) entry->func = FILE_CHECK; /* PATH_CHECK is for backwards compat */ else if (strcmp(args[0].from, "PATH_CHECK") == 0) entry->func = FILE_CHECK; else if (strcmp(args[0].from, "FILE_MMAP") == 0) entry->func = FILE_MMAP; else if (strcmp(args[0].from, "BPRM_CHECK") == 0) entry->func = BPRM_CHECK; else result = -EINVAL; if (!result) entry->flags |= IMA_FUNC; break; case Opt_mask: ima_log_string(ab, "mask", args[0].from); if (entry->mask) result = -EINVAL; if ((strcmp(args[0].from, "MAY_EXEC")) == 0) entry->mask = MAY_EXEC; else if (strcmp(args[0].from, "MAY_WRITE") == 0) entry->mask = MAY_WRITE; else if (strcmp(args[0].from, "MAY_READ") == 0) entry->mask = MAY_READ; else if (strcmp(args[0].from, "MAY_APPEND") == 0) entry->mask = MAY_APPEND; else result = -EINVAL; if (!result) entry->flags |= IMA_MASK; break; case Opt_fsmagic: ima_log_string(ab, "fsmagic", args[0].from); if (entry->fsmagic) { result = -EINVAL; break; } result = strict_strtoul(args[0].from, 16, &entry->fsmagic); if (!result) entry->flags |= IMA_FSMAGIC; break; case Opt_uid: ima_log_string(ab, "uid", args[0].from); if (entry->uid != -1) { result = -EINVAL; break; } result = strict_strtoul(args[0].from, 10, &lnum); if (!result) { entry->uid = (uid_t) lnum; if (entry->uid != lnum) result = -EINVAL; else entry->flags |= IMA_UID; } break; case Opt_obj_user: ima_log_string(ab, "obj_user", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_OBJ_USER, AUDIT_OBJ_USER); break; case Opt_obj_role: ima_log_string(ab, "obj_role", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_OBJ_ROLE, AUDIT_OBJ_ROLE); break; case Opt_obj_type: ima_log_string(ab, "obj_type", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_OBJ_TYPE, AUDIT_OBJ_TYPE); break; case Opt_subj_user: ima_log_string(ab, "subj_user", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_SUBJ_USER, AUDIT_SUBJ_USER); break; case Opt_subj_role: ima_log_string(ab, "subj_role", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_SUBJ_ROLE, AUDIT_SUBJ_ROLE); break; case Opt_subj_type: ima_log_string(ab, "subj_type", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_SUBJ_TYPE, AUDIT_SUBJ_TYPE); break; case Opt_err: ima_log_string(ab, "UNKNOWN", p); result = -EINVAL; break; } } if (!result && (entry->action == UNKNOWN)) result = -EINVAL; audit_log_format(ab, "res=%d", !!result); audit_log_end(ab); return result; }
Bypass
0
static int ima_parse_rule(char *rule, struct ima_measure_rule_entry *entry) { struct audit_buffer *ab; char *p; int result = 0; ab = audit_log_start(NULL, GFP_KERNEL, AUDIT_INTEGRITY_RULE); entry->uid = -1; entry->action = UNKNOWN; while ((p = strsep(&rule, " \t")) != NULL) { substring_t args[MAX_OPT_ARGS]; int token; unsigned long lnum; if (result < 0) break; if ((*p == '\0') || (*p == ' ') || (*p == '\t')) continue; token = match_token(p, policy_tokens, args); switch (token) { case Opt_measure: ima_log_string(ab, "action", "measure"); if (entry->action != UNKNOWN) result = -EINVAL; entry->action = MEASURE; break; case Opt_dont_measure: ima_log_string(ab, "action", "dont_measure"); if (entry->action != UNKNOWN) result = -EINVAL; entry->action = DONT_MEASURE; break; case Opt_func: ima_log_string(ab, "func", args[0].from); if (entry->func) result = -EINVAL; if (strcmp(args[0].from, "FILE_CHECK") == 0) entry->func = FILE_CHECK; /* PATH_CHECK is for backwards compat */ else if (strcmp(args[0].from, "PATH_CHECK") == 0) entry->func = FILE_CHECK; else if (strcmp(args[0].from, "FILE_MMAP") == 0) entry->func = FILE_MMAP; else if (strcmp(args[0].from, "BPRM_CHECK") == 0) entry->func = BPRM_CHECK; else result = -EINVAL; if (!result) entry->flags |= IMA_FUNC; break; case Opt_mask: ima_log_string(ab, "mask", args[0].from); if (entry->mask) result = -EINVAL; if ((strcmp(args[0].from, "MAY_EXEC")) == 0) entry->mask = MAY_EXEC; else if (strcmp(args[0].from, "MAY_WRITE") == 0) entry->mask = MAY_WRITE; else if (strcmp(args[0].from, "MAY_READ") == 0) entry->mask = MAY_READ; else if (strcmp(args[0].from, "MAY_APPEND") == 0) entry->mask = MAY_APPEND; else result = -EINVAL; if (!result) entry->flags |= IMA_MASK; break; case Opt_fsmagic: ima_log_string(ab, "fsmagic", args[0].from); if (entry->fsmagic) { result = -EINVAL; break; } result = strict_strtoul(args[0].from, 16, &entry->fsmagic); if (!result) entry->flags |= IMA_FSMAGIC; break; case Opt_uid: ima_log_string(ab, "uid", args[0].from); if (entry->uid != -1) { result = -EINVAL; break; } result = strict_strtoul(args[0].from, 10, &lnum); if (!result) { entry->uid = (uid_t) lnum; if (entry->uid != lnum) result = -EINVAL; else entry->flags |= IMA_UID; } break; case Opt_obj_user: ima_log_string(ab, "obj_user", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_OBJ_USER, AUDIT_OBJ_USER); break; case Opt_obj_role: ima_log_string(ab, "obj_role", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_OBJ_ROLE, AUDIT_OBJ_ROLE); break; case Opt_obj_type: ima_log_string(ab, "obj_type", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_OBJ_TYPE, AUDIT_OBJ_TYPE); break; case Opt_subj_user: ima_log_string(ab, "subj_user", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_SUBJ_USER, AUDIT_SUBJ_USER); break; case Opt_subj_role: ima_log_string(ab, "subj_role", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_SUBJ_ROLE, AUDIT_SUBJ_ROLE); break; case Opt_subj_type: ima_log_string(ab, "subj_type", args[0].from); result = ima_lsm_rule_init(entry, args[0].from, LSM_SUBJ_TYPE, AUDIT_SUBJ_TYPE); break; case Opt_err: ima_log_string(ab, "UNKNOWN", p); result = -EINVAL; break; } } if (!result && (entry->action == UNKNOWN)) result = -EINVAL; audit_log_format(ab, "res=%d", !!result); audit_log_end(ab); return result; }
@@ -253,6 +253,8 @@ static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry, result = security_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal, args, &entry->lsm[lsm_rule].rule); + if (!entry->lsm[lsm_rule].rule) + return -EINVAL; return result; }
CWE-264
null
null
22,269
void ima_update_policy(void) { const char *op = "policy_update"; const char *cause = "already exists"; int result = 1; int audit_info = 0; if (ima_measure == &measure_default_rules) { ima_measure = &measure_policy_rules; cause = "complete"; result = 0; } integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, op, cause, result, audit_info); }
Bypass
0
void ima_update_policy(void) { const char *op = "policy_update"; const char *cause = "already exists"; int result = 1; int audit_info = 0; if (ima_measure == &measure_default_rules) { ima_measure = &measure_policy_rules; cause = "complete"; result = 0; } integrity_audit_msg(AUDIT_INTEGRITY_STATUS, NULL, NULL, op, cause, result, audit_info); }
@@ -253,6 +253,8 @@ static int ima_lsm_rule_init(struct ima_measure_rule_entry *entry, result = security_filter_rule_init(entry->lsm[lsm_rule].type, Audit_equal, args, &entry->lsm[lsm_rule].rule); + if (!entry->lsm[lsm_rule].rule) + return -EINVAL; return result; }
CWE-264
null
null
22,270
static sector_t fuse_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_req *req; struct fuse_bmap_in inarg; struct fuse_bmap_out outarg; int err; if (!inode->i_sb->s_bdev || fc->no_bmap) return 0; req = fuse_get_req(fc); if (IS_ERR(req)) return 0; memset(&inarg, 0, sizeof(inarg)); inarg.block = block; inarg.blocksize = inode->i_sb->s_blocksize; req->in.h.opcode = FUSE_BMAP; req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; req->out.numargs = 1; req->out.args[0].size = sizeof(outarg); req->out.args[0].value = &outarg; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (err == -ENOSYS) fc->no_bmap = 1; return err ? 0 : outarg.block; }
DoS Overflow
0
static sector_t fuse_bmap(struct address_space *mapping, sector_t block) { struct inode *inode = mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_req *req; struct fuse_bmap_in inarg; struct fuse_bmap_out outarg; int err; if (!inode->i_sb->s_bdev || fc->no_bmap) return 0; req = fuse_get_req(fc); if (IS_ERR(req)) return 0; memset(&inarg, 0, sizeof(inarg)); inarg.block = block; inarg.blocksize = inode->i_sb->s_blocksize; req->in.h.opcode = FUSE_BMAP; req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; req->out.numargs = 1; req->out.args[0].size = sizeof(outarg); req->out.args[0].value = &outarg; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (err == -ENOSYS) fc->no_bmap = 1; return err ? 0 : outarg.block; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,271
static int fuse_buffered_write(struct file *file, struct inode *inode, loff_t pos, unsigned count, struct page *page) { int err; size_t nres; struct fuse_conn *fc = get_fuse_conn(inode); unsigned offset = pos & (PAGE_CACHE_SIZE - 1); struct fuse_req *req; if (is_bad_inode(inode)) return -EIO; /* * Make sure writepages on the same page are not mixed up with * plain writes. */ fuse_wait_on_page_writeback(inode, page->index); req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); req->in.argpages = 1; req->num_pages = 1; req->pages[0] = page; req->page_offset = offset; nres = fuse_send_write(req, file, pos, count, NULL); err = req->out.h.error; fuse_put_request(fc, req); if (!err && !nres) err = -EIO; if (!err) { pos += nres; fuse_write_update_size(inode, pos); if (count == PAGE_CACHE_SIZE) SetPageUptodate(page); } fuse_invalidate_attr(inode); return err ? err : nres; }
DoS Overflow
0
static int fuse_buffered_write(struct file *file, struct inode *inode, loff_t pos, unsigned count, struct page *page) { int err; size_t nres; struct fuse_conn *fc = get_fuse_conn(inode); unsigned offset = pos & (PAGE_CACHE_SIZE - 1); struct fuse_req *req; if (is_bad_inode(inode)) return -EIO; /* * Make sure writepages on the same page are not mixed up with * plain writes. */ fuse_wait_on_page_writeback(inode, page->index); req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); req->in.argpages = 1; req->num_pages = 1; req->pages[0] = page; req->page_offset = offset; nres = fuse_send_write(req, file, pos, count, NULL); err = req->out.h.error; fuse_put_request(fc, req); if (!err && !nres) err = -EIO; if (!err) { pos += nres; fuse_write_update_size(inode, pos); if (count == PAGE_CACHE_SIZE) SetPageUptodate(page); } fuse_invalidate_attr(inode); return err ? err : nres; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,272
static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, size_t transferred, unsigned count, bool is_compat) { #ifdef CONFIG_COMPAT if (count * sizeof(struct compat_iovec) == transferred) { struct compat_iovec *ciov = src; unsigned i; /* * With this interface a 32bit server cannot support * non-compat (i.e. ones coming from 64bit apps) ioctl * requests */ if (!is_compat) return -EINVAL; for (i = 0; i < count; i++) { dst[i].iov_base = compat_ptr(ciov[i].iov_base); dst[i].iov_len = ciov[i].iov_len; } return 0; } #endif if (count * sizeof(struct iovec) != transferred) return -EIO; memcpy(dst, src, transferred); return 0; }
DoS Overflow
0
static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, size_t transferred, unsigned count, bool is_compat) { #ifdef CONFIG_COMPAT if (count * sizeof(struct compat_iovec) == transferred) { struct compat_iovec *ciov = src; unsigned i; /* * With this interface a 32bit server cannot support * non-compat (i.e. ones coming from 64bit apps) ioctl * requests */ if (!is_compat) return -EINVAL; for (i = 0; i < count; i++) { dst[i].iov_base = compat_ptr(ciov[i].iov_base); dst[i].iov_len = ciov[i].iov_len; } return 0; } #endif if (count * sizeof(struct iovec) != transferred) return -EIO; memcpy(dst, src, transferred); return 0; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,273
ssize_t fuse_direct_io(struct file *file, const char __user *buf, size_t count, loff_t *ppos, int write) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; size_t nmax = write ? fc->max_write : fc->max_read; loff_t pos = *ppos; ssize_t res = 0; struct fuse_req *req; req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); while (count) { size_t nres; fl_owner_t owner = current->files; size_t nbytes = min(count, nmax); int err = fuse_get_user_pages(req, buf, &nbytes, write); if (err) { res = err; break; } if (write) nres = fuse_send_write(req, file, pos, nbytes, owner); else nres = fuse_send_read(req, file, pos, nbytes, owner); fuse_release_user_pages(req, !write); if (req->out.h.error) { if (!res) res = req->out.h.error; break; } else if (nres > nbytes) { res = -EIO; break; } count -= nres; res += nres; pos += nres; buf += nres; if (nres != nbytes) break; if (count) { fuse_put_request(fc, req); req = fuse_get_req(fc); if (IS_ERR(req)) break; } } if (!IS_ERR(req)) fuse_put_request(fc, req); if (res > 0) *ppos = pos; return res; }
DoS Overflow
0
ssize_t fuse_direct_io(struct file *file, const char __user *buf, size_t count, loff_t *ppos, int write) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; size_t nmax = write ? fc->max_write : fc->max_read; loff_t pos = *ppos; ssize_t res = 0; struct fuse_req *req; req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); while (count) { size_t nres; fl_owner_t owner = current->files; size_t nbytes = min(count, nmax); int err = fuse_get_user_pages(req, buf, &nbytes, write); if (err) { res = err; break; } if (write) nres = fuse_send_write(req, file, pos, nbytes, owner); else nres = fuse_send_read(req, file, pos, nbytes, owner); fuse_release_user_pages(req, !write); if (req->out.h.error) { if (!res) res = req->out.h.error; break; } else if (nres > nbytes) { res = -EIO; break; } count -= nres; res += nres; pos += nres; buf += nres; if (nres != nbytes) break; if (count) { fuse_put_request(fc, req); req = fuse_get_req(fc); if (IS_ERR(req)) break; } } if (!IS_ERR(req)) fuse_put_request(fc, req); if (res > 0) *ppos = pos; return res; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,274
static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma) { /* Can't provide the coherency needed for MAP_SHARED */ if (vma->vm_flags & VM_MAYSHARE) return -ENODEV; invalidate_inode_pages2(file->f_mapping); return generic_file_mmap(file, vma); }
DoS Overflow
0
static int fuse_direct_mmap(struct file *file, struct vm_area_struct *vma) { /* Can't provide the coherency needed for MAP_SHARED */ if (vma->vm_flags & VM_MAYSHARE) return -ENODEV; invalidate_inode_pages2(file->f_mapping); return generic_file_mmap(file, vma); }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,275
static ssize_t fuse_direct_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { ssize_t res; struct inode *inode = file->f_path.dentry->d_inode; if (is_bad_inode(inode)) return -EIO; res = fuse_direct_io(file, buf, count, ppos, 0); fuse_invalidate_attr(inode); return res; }
DoS Overflow
0
static ssize_t fuse_direct_read(struct file *file, char __user *buf, size_t count, loff_t *ppos) { ssize_t res; struct inode *inode = file->f_path.dentry->d_inode; if (is_bad_inode(inode)) return -EIO; res = fuse_direct_io(file, buf, count, ppos, 0); fuse_invalidate_attr(inode); return res; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,276
static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct inode *inode = iocb->ki_filp->f_mapping->host; if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) { int err; /* * If trying to read past EOF, make sure the i_size * attribute is up-to-date. */ err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); if (err) return err; } return generic_file_aio_read(iocb, iov, nr_segs, pos); }
DoS Overflow
0
static ssize_t fuse_file_aio_read(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct inode *inode = iocb->ki_filp->f_mapping->host; if (pos + iov_length(iov, nr_segs) > i_size_read(inode)) { int err; /* * If trying to read past EOF, make sure the i_size * attribute is up-to-date. */ err = fuse_update_attributes(inode, NULL, iocb->ki_filp, NULL); if (err) return err; } return generic_file_aio_read(iocb, iov, nr_segs, pos); }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,277
static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; size_t count = 0; ssize_t written = 0; struct inode *inode = mapping->host; ssize_t err; struct iov_iter i; WARN_ON(iocb->ki_pos != pos); err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ); if (err) return err; mutex_lock(&inode->i_mutex); vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (err) goto out; if (count == 0) goto out; err = file_remove_suid(file); if (err) goto out; file_update_time(file); iov_iter_init(&i, iov, nr_segs, count, 0); written = fuse_perform_write(file, mapping, &i, pos); if (written >= 0) iocb->ki_pos = pos + written; out: current->backing_dev_info = NULL; mutex_unlock(&inode->i_mutex); return written ? written : err; }
DoS Overflow
0
static ssize_t fuse_file_aio_write(struct kiocb *iocb, const struct iovec *iov, unsigned long nr_segs, loff_t pos) { struct file *file = iocb->ki_filp; struct address_space *mapping = file->f_mapping; size_t count = 0; ssize_t written = 0; struct inode *inode = mapping->host; ssize_t err; struct iov_iter i; WARN_ON(iocb->ki_pos != pos); err = generic_segment_checks(iov, &nr_segs, &count, VERIFY_READ); if (err) return err; mutex_lock(&inode->i_mutex); vfs_check_frozen(inode->i_sb, SB_FREEZE_WRITE); /* We can write back this queue in page reclaim */ current->backing_dev_info = mapping->backing_dev_info; err = generic_write_checks(file, &pos, &count, S_ISBLK(inode->i_mode)); if (err) goto out; if (count == 0) goto out; err = file_remove_suid(file); if (err) goto out; file_update_time(file); iov_iter_init(&i, iov, nr_segs, count, 0); written = fuse_perform_write(file, mapping, &i, pos); if (written >= 0) iocb->ki_pos = pos + written; out: current->backing_dev_info = NULL; mutex_unlock(&inode->i_mutex); return written ? written : err; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,278
struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) { struct fuse_file *ff; ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); if (unlikely(!ff)) return NULL; ff->fc = fc; ff->reserved_req = fuse_request_alloc(); if (unlikely(!ff->reserved_req)) { kfree(ff); return NULL; } INIT_LIST_HEAD(&ff->write_entry); atomic_set(&ff->count, 0); RB_CLEAR_NODE(&ff->polled_node); init_waitqueue_head(&ff->poll_wait); spin_lock(&fc->lock); ff->kh = ++fc->khctr; spin_unlock(&fc->lock); return ff; }
DoS Overflow
0
struct fuse_file *fuse_file_alloc(struct fuse_conn *fc) { struct fuse_file *ff; ff = kmalloc(sizeof(struct fuse_file), GFP_KERNEL); if (unlikely(!ff)) return NULL; ff->fc = fc; ff->reserved_req = fuse_request_alloc(); if (unlikely(!ff->reserved_req)) { kfree(ff); return NULL; } INIT_LIST_HEAD(&ff->write_entry); atomic_set(&ff->count, 0); RB_CLEAR_NODE(&ff->polled_node); init_waitqueue_head(&ff->poll_wait); spin_lock(&fc->lock); ff->kh = ++fc->khctr; spin_unlock(&fc->lock); return ff; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,279
static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return fuse_file_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT); }
DoS Overflow
0
static long fuse_file_compat_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { return fuse_file_ioctl_common(file, cmd, arg, FUSE_IOCTL_COMPAT); }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,280
void fuse_file_free(struct fuse_file *ff) { fuse_request_free(ff->reserved_req); kfree(ff); }
DoS Overflow
0
void fuse_file_free(struct fuse_file *ff) { fuse_request_free(ff->reserved_req); kfree(ff); }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,281
struct fuse_file *fuse_file_get(struct fuse_file *ff) { atomic_inc(&ff->count); return ff; }
DoS Overflow
0
struct fuse_file *fuse_file_get(struct fuse_file *ff) { atomic_inc(&ff->count); return ff; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,282
static long fuse_file_ioctl_common(struct file *file, unsigned int cmd, unsigned long arg, unsigned int flags) { struct inode *inode = file->f_dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); if (!fuse_allow_task(fc, current)) return -EACCES; if (is_bad_inode(inode)) return -EIO; return fuse_do_ioctl(file, cmd, arg, flags); }
DoS Overflow
0
static long fuse_file_ioctl_common(struct file *file, unsigned int cmd, unsigned long arg, unsigned int flags) { struct inode *inode = file->f_dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); if (!fuse_allow_task(fc, current)) return -EACCES; if (is_bad_inode(inode)) return -EIO; return fuse_do_ioctl(file, cmd, arg, flags); }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,283
static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin) { loff_t retval; struct inode *inode = file->f_path.dentry->d_inode; mutex_lock(&inode->i_mutex); switch (origin) { case SEEK_END: retval = fuse_update_attributes(inode, NULL, file, NULL); if (retval) goto exit; offset += i_size_read(inode); break; case SEEK_CUR: offset += file->f_pos; } retval = -EINVAL; if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { if (offset != file->f_pos) { file->f_pos = offset; file->f_version = 0; } retval = offset; } exit: mutex_unlock(&inode->i_mutex); return retval; }
DoS Overflow
0
static loff_t fuse_file_llseek(struct file *file, loff_t offset, int origin) { loff_t retval; struct inode *inode = file->f_path.dentry->d_inode; mutex_lock(&inode->i_mutex); switch (origin) { case SEEK_END: retval = fuse_update_attributes(inode, NULL, file, NULL); if (retval) goto exit; offset += i_size_read(inode); break; case SEEK_CUR: offset += file->f_pos; } retval = -EINVAL; if (offset >= 0 && offset <= inode->i_sb->s_maxbytes) { if (offset != file->f_pos) { file->f_pos = offset; file->f_version = 0; } retval = offset; } exit: mutex_unlock(&inode->i_mutex); return retval; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,284
static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) { if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) { struct inode *inode = file->f_dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_file *ff = file->private_data; /* * file may be written through mmap, so chain it onto the * inodes's write_file list */ spin_lock(&fc->lock); if (list_empty(&ff->write_entry)) list_add(&ff->write_entry, &fi->write_files); spin_unlock(&fc->lock); } file_accessed(file); vma->vm_ops = &fuse_file_vm_ops; return 0; }
DoS Overflow
0
static int fuse_file_mmap(struct file *file, struct vm_area_struct *vma) { if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_MAYWRITE)) { struct inode *inode = file->f_dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_inode *fi = get_fuse_inode(inode); struct fuse_file *ff = file->private_data; /* * file may be written through mmap, so chain it onto the * inodes's write_file list */ spin_lock(&fc->lock); if (list_empty(&ff->write_entry)) list_add(&ff->write_entry, &fi->write_files); spin_unlock(&fc->lock); } file_accessed(file); vma->vm_ops = &fuse_file_vm_ops; return 0; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,285
unsigned fuse_file_poll(struct file *file, poll_table *wait) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; struct fuse_poll_out outarg; struct fuse_req *req; int err; if (fc->no_poll) return DEFAULT_POLLMASK; poll_wait(file, &ff->poll_wait, wait); /* * Ask for notification iff there's someone waiting for it. * The client may ignore the flag and always notify. */ if (waitqueue_active(&ff->poll_wait)) { inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; fuse_register_polled_file(fc, ff); } req = fuse_get_req(fc); if (IS_ERR(req)) return POLLERR; req->in.h.opcode = FUSE_POLL; req->in.h.nodeid = ff->nodeid; req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; req->out.numargs = 1; req->out.args[0].size = sizeof(outarg); req->out.args[0].value = &outarg; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (!err) return outarg.revents; if (err == -ENOSYS) { fc->no_poll = 1; return DEFAULT_POLLMASK; } return POLLERR; }
DoS Overflow
0
unsigned fuse_file_poll(struct file *file, poll_table *wait) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = ff->fc; struct fuse_poll_in inarg = { .fh = ff->fh, .kh = ff->kh }; struct fuse_poll_out outarg; struct fuse_req *req; int err; if (fc->no_poll) return DEFAULT_POLLMASK; poll_wait(file, &ff->poll_wait, wait); /* * Ask for notification iff there's someone waiting for it. * The client may ignore the flag and always notify. */ if (waitqueue_active(&ff->poll_wait)) { inarg.flags |= FUSE_POLL_SCHEDULE_NOTIFY; fuse_register_polled_file(fc, ff); } req = fuse_get_req(fc); if (IS_ERR(req)) return POLLERR; req->in.h.opcode = FUSE_POLL; req->in.h.nodeid = ff->nodeid; req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; req->out.numargs = 1; req->out.args[0].size = sizeof(outarg); req->out.args[0].value = &outarg; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (!err) return outarg.revents; if (err == -ENOSYS) { fc->no_poll = 1; return DEFAULT_POLLMASK; } return POLLERR; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,286
static void fuse_file_put(struct fuse_file *ff) { if (atomic_dec_and_test(&ff->count)) { struct fuse_req *req = ff->reserved_req; req->end = fuse_release_end; fuse_request_send_background(ff->fc, req); kfree(ff); } }
DoS Overflow
0
static void fuse_file_put(struct fuse_file *ff) { if (atomic_dec_and_test(&ff->count)) { struct fuse_req *req = ff->reserved_req; req->end = fuse_release_end; fuse_request_send_background(ff->fc, req); kfree(ff); } }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,287
static ssize_t fuse_fill_write_pages(struct fuse_req *req, struct address_space *mapping, struct iov_iter *ii, loff_t pos) { struct fuse_conn *fc = get_fuse_conn(mapping->host); unsigned offset = pos & (PAGE_CACHE_SIZE - 1); size_t count = 0; int err; req->in.argpages = 1; req->page_offset = offset; do { size_t tmp; struct page *page; pgoff_t index = pos >> PAGE_CACHE_SHIFT; size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, iov_iter_count(ii)); bytes = min_t(size_t, bytes, fc->max_write - count); again: err = -EFAULT; if (iov_iter_fault_in_readable(ii, bytes)) break; err = -ENOMEM; page = grab_cache_page_write_begin(mapping, index, 0); if (!page) break; if (mapping_writably_mapped(mapping)) flush_dcache_page(page); pagefault_disable(); tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); pagefault_enable(); flush_dcache_page(page); if (!tmp) { unlock_page(page); page_cache_release(page); bytes = min(bytes, iov_iter_single_seg_count(ii)); goto again; } err = 0; req->pages[req->num_pages] = page; req->num_pages++; iov_iter_advance(ii, tmp); count += tmp; pos += tmp; offset += tmp; if (offset == PAGE_CACHE_SIZE) offset = 0; if (!fc->big_writes) break; } while (iov_iter_count(ii) && count < fc->max_write && req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0); return count > 0 ? count : err; }
DoS Overflow
0
static ssize_t fuse_fill_write_pages(struct fuse_req *req, struct address_space *mapping, struct iov_iter *ii, loff_t pos) { struct fuse_conn *fc = get_fuse_conn(mapping->host); unsigned offset = pos & (PAGE_CACHE_SIZE - 1); size_t count = 0; int err; req->in.argpages = 1; req->page_offset = offset; do { size_t tmp; struct page *page; pgoff_t index = pos >> PAGE_CACHE_SHIFT; size_t bytes = min_t(size_t, PAGE_CACHE_SIZE - offset, iov_iter_count(ii)); bytes = min_t(size_t, bytes, fc->max_write - count); again: err = -EFAULT; if (iov_iter_fault_in_readable(ii, bytes)) break; err = -ENOMEM; page = grab_cache_page_write_begin(mapping, index, 0); if (!page) break; if (mapping_writably_mapped(mapping)) flush_dcache_page(page); pagefault_disable(); tmp = iov_iter_copy_from_user_atomic(page, ii, offset, bytes); pagefault_enable(); flush_dcache_page(page); if (!tmp) { unlock_page(page); page_cache_release(page); bytes = min(bytes, iov_iter_single_seg_count(ii)); goto again; } err = 0; req->pages[req->num_pages] = page; req->num_pages++; iov_iter_advance(ii, tmp); count += tmp; pos += tmp; offset += tmp; if (offset == PAGE_CACHE_SIZE) offset = 0; if (!fc->big_writes) break; } while (iov_iter_count(ii) && count < fc->max_write && req->num_pages < FUSE_MAX_PAGES_PER_REQ && offset == 0); return count > 0 ? count : err; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,288
static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, struct rb_node **parent_out) { struct rb_node **link = &fc->polled_files.rb_node; struct rb_node *last = NULL; while (*link) { struct fuse_file *ff; last = *link; ff = rb_entry(last, struct fuse_file, polled_node); if (kh < ff->kh) link = &last->rb_left; else if (kh > ff->kh) link = &last->rb_right; else return link; } if (parent_out) *parent_out = last; return link; }
DoS Overflow
0
static struct rb_node **fuse_find_polled_node(struct fuse_conn *fc, u64 kh, struct rb_node **parent_out) { struct rb_node **link = &fc->polled_files.rb_node; struct rb_node *last = NULL; while (*link) { struct fuse_file *ff; last = *link; ff = rb_entry(last, struct fuse_file, polled_node); if (kh < ff->kh) link = &last->rb_left; else if (kh > ff->kh) link = &last->rb_right; else return link; } if (parent_out) *parent_out = last; return link; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,289
void fuse_finish_open(struct inode *inode, struct file *file) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = get_fuse_conn(inode); if (ff->open_flags & FOPEN_DIRECT_IO) file->f_op = &fuse_direct_io_file_operations; if (!(ff->open_flags & FOPEN_KEEP_CACHE)) invalidate_inode_pages2(inode->i_mapping); if (ff->open_flags & FOPEN_NONSEEKABLE) nonseekable_open(inode, file); if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { struct fuse_inode *fi = get_fuse_inode(inode); spin_lock(&fc->lock); fi->attr_version = ++fc->attr_version; i_size_write(inode, 0); spin_unlock(&fc->lock); fuse_invalidate_attr(inode); } }
DoS Overflow
0
void fuse_finish_open(struct inode *inode, struct file *file) { struct fuse_file *ff = file->private_data; struct fuse_conn *fc = get_fuse_conn(inode); if (ff->open_flags & FOPEN_DIRECT_IO) file->f_op = &fuse_direct_io_file_operations; if (!(ff->open_flags & FOPEN_KEEP_CACHE)) invalidate_inode_pages2(inode->i_mapping); if (ff->open_flags & FOPEN_NONSEEKABLE) nonseekable_open(inode, file); if (fc->atomic_o_trunc && (file->f_flags & O_TRUNC)) { struct fuse_inode *fi = get_fuse_inode(inode); spin_lock(&fc->lock); fi->attr_version = ++fc->attr_version; i_size_write(inode, 0); spin_unlock(&fc->lock); fuse_invalidate_attr(inode); } }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,290
static int fuse_flush(struct file *file, fl_owner_t id) { struct inode *inode = file->f_path.dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_file *ff = file->private_data; struct fuse_req *req; struct fuse_flush_in inarg; int err; if (is_bad_inode(inode)) return -EIO; if (fc->no_flush) return 0; req = fuse_get_req_nofail(fc, file); memset(&inarg, 0, sizeof(inarg)); inarg.fh = ff->fh; inarg.lock_owner = fuse_lock_owner_id(fc, id); req->in.h.opcode = FUSE_FLUSH; req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; req->force = 1; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (err == -ENOSYS) { fc->no_flush = 1; err = 0; } return err; }
DoS Overflow
0
static int fuse_flush(struct file *file, fl_owner_t id) { struct inode *inode = file->f_path.dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_file *ff = file->private_data; struct fuse_req *req; struct fuse_flush_in inarg; int err; if (is_bad_inode(inode)) return -EIO; if (fc->no_flush) return 0; req = fuse_get_req_nofail(fc, file); memset(&inarg, 0, sizeof(inarg)); inarg.fh = ff->fh; inarg.lock_owner = fuse_lock_owner_id(fc, id); req->in.h.opcode = FUSE_FLUSH; req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; req->force = 1; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (err == -ENOSYS) { fc->no_flush = 1; err = 0; } return err; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,291
static int fuse_fsync(struct file *file, int datasync) { return fuse_fsync_common(file, datasync, 0); }
DoS Overflow
0
static int fuse_fsync(struct file *file, int datasync) { return fuse_fsync_common(file, datasync, 0); }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,292
int fuse_fsync_common(struct file *file, int datasync, int isdir) { struct inode *inode = file->f_mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_file *ff = file->private_data; struct fuse_req *req; struct fuse_fsync_in inarg; int err; if (is_bad_inode(inode)) return -EIO; if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) return 0; /* * Start writeback against all dirty pages of the inode, then * wait for all outstanding writes, before sending the FSYNC * request. */ err = write_inode_now(inode, 0); if (err) return err; fuse_sync_writes(inode); req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); memset(&inarg, 0, sizeof(inarg)); inarg.fh = ff->fh; inarg.fsync_flags = datasync ? 1 : 0; req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (err == -ENOSYS) { if (isdir) fc->no_fsyncdir = 1; else fc->no_fsync = 1; err = 0; } return err; }
DoS Overflow
0
int fuse_fsync_common(struct file *file, int datasync, int isdir) { struct inode *inode = file->f_mapping->host; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_file *ff = file->private_data; struct fuse_req *req; struct fuse_fsync_in inarg; int err; if (is_bad_inode(inode)) return -EIO; if ((!isdir && fc->no_fsync) || (isdir && fc->no_fsyncdir)) return 0; /* * Start writeback against all dirty pages of the inode, then * wait for all outstanding writes, before sending the FSYNC * request. */ err = write_inode_now(inode, 0); if (err) return err; fuse_sync_writes(inode); req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); memset(&inarg, 0, sizeof(inarg)); inarg.fh = ff->fh; inarg.fsync_flags = datasync ? 1 : 0; req->in.h.opcode = isdir ? FUSE_FSYNCDIR : FUSE_FSYNC; req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(inarg); req->in.args[0].value = &inarg; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (err == -ENOSYS) { if (isdir) fc->no_fsyncdir = 1; else fc->no_fsync = 1; err = 0; } return err; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,293
static int fuse_getlk(struct file *file, struct file_lock *fl) { struct inode *inode = file->f_path.dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_req *req; struct fuse_lk_out outarg; int err; req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0); req->out.numargs = 1; req->out.args[0].size = sizeof(outarg); req->out.args[0].value = &outarg; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (!err) err = convert_fuse_file_lock(&outarg.lk, fl); return err; }
DoS Overflow
0
static int fuse_getlk(struct file *file, struct file_lock *fl) { struct inode *inode = file->f_path.dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_req *req; struct fuse_lk_out outarg; int err; req = fuse_get_req(fc); if (IS_ERR(req)) return PTR_ERR(req); fuse_lk_fill(req, file, fl, FUSE_GETLK, 0, 0); req->out.numargs = 1; req->out.args[0].size = sizeof(outarg); req->out.args[0].value = &outarg; fuse_request_send(fc, req); err = req->out.h.error; fuse_put_request(fc, req); if (!err) err = convert_fuse_file_lock(&outarg.lk, fl); return err; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,294
void fuse_init_file_inode(struct inode *inode) { inode->i_fop = &fuse_file_operations; inode->i_data.a_ops = &fuse_file_aops; }
DoS Overflow
0
void fuse_init_file_inode(struct inode *inode) { inode->i_fop = &fuse_file_operations; inode->i_data.a_ops = &fuse_file_aops; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,295
static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, unsigned int nr_segs, size_t bytes, bool to_user) { struct iov_iter ii; int page_idx = 0; if (!bytes) return 0; iov_iter_init(&ii, iov, nr_segs, bytes, 0); while (iov_iter_count(&ii)) { struct page *page = pages[page_idx++]; size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii)); void *kaddr; kaddr = kmap(page); while (todo) { char __user *uaddr = ii.iov->iov_base + ii.iov_offset; size_t iov_len = ii.iov->iov_len - ii.iov_offset; size_t copy = min(todo, iov_len); size_t left; if (!to_user) left = copy_from_user(kaddr, uaddr, copy); else left = copy_to_user(uaddr, kaddr, copy); if (unlikely(left)) return -EFAULT; iov_iter_advance(&ii, copy); todo -= copy; kaddr += copy; } kunmap(page); } return 0; }
DoS Overflow
0
static int fuse_ioctl_copy_user(struct page **pages, struct iovec *iov, unsigned int nr_segs, size_t bytes, bool to_user) { struct iov_iter ii; int page_idx = 0; if (!bytes) return 0; iov_iter_init(&ii, iov, nr_segs, bytes, 0); while (iov_iter_count(&ii)) { struct page *page = pages[page_idx++]; size_t todo = min_t(size_t, PAGE_SIZE, iov_iter_count(&ii)); void *kaddr; kaddr = kmap(page); while (todo) { char __user *uaddr = ii.iov->iov_base + ii.iov_offset; size_t iov_len = ii.iov->iov_len - ii.iov_offset; size_t copy = min(todo, iov_len); size_t left; if (!to_user) left = copy_from_user(kaddr, uaddr, copy); else left = copy_to_user(uaddr, kaddr, copy); if (unlikely(left)) return -EFAULT; iov_iter_advance(&ii, copy); todo -= copy; kaddr += copy; } kunmap(page); } return 0; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,296
static void fuse_lk_fill(struct fuse_req *req, struct file *file, const struct file_lock *fl, int opcode, pid_t pid, int flock) { struct inode *inode = file->f_path.dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_file *ff = file->private_data; struct fuse_lk_in *arg = &req->misc.lk_in; arg->fh = ff->fh; arg->owner = fuse_lock_owner_id(fc, fl->fl_owner); arg->lk.start = fl->fl_start; arg->lk.end = fl->fl_end; arg->lk.type = fl->fl_type; arg->lk.pid = pid; if (flock) arg->lk_flags |= FUSE_LK_FLOCK; req->in.h.opcode = opcode; req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(*arg); req->in.args[0].value = arg; }
DoS Overflow
0
static void fuse_lk_fill(struct fuse_req *req, struct file *file, const struct file_lock *fl, int opcode, pid_t pid, int flock) { struct inode *inode = file->f_path.dentry->d_inode; struct fuse_conn *fc = get_fuse_conn(inode); struct fuse_file *ff = file->private_data; struct fuse_lk_in *arg = &req->misc.lk_in; arg->fh = ff->fh; arg->owner = fuse_lock_owner_id(fc, fl->fl_owner); arg->lk.start = fl->fl_start; arg->lk.end = fl->fl_end; arg->lk.type = fl->fl_type; arg->lk.pid = pid; if (flock) arg->lk_flags |= FUSE_LK_FLOCK; req->in.h.opcode = opcode; req->in.h.nodeid = get_node_id(inode); req->in.numargs = 1; req->in.args[0].size = sizeof(*arg); req->in.args[0].value = arg; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,297
u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) { u32 *k = fc->scramble_key; u64 v = (unsigned long) id; u32 v0 = v; u32 v1 = v >> 32; u32 sum = 0; int i; for (i = 0; i < 32; i++) { v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); sum += 0x9E3779B9; v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); } return (u64) v0 + ((u64) v1 << 32); }
DoS Overflow
0
u64 fuse_lock_owner_id(struct fuse_conn *fc, fl_owner_t id) { u32 *k = fc->scramble_key; u64 v = (unsigned long) id; u32 v0 = v; u32 v1 = v >> 32; u32 sum = 0; int i; for (i = 0; i < 32; i++) { v0 += ((v1 << 4 ^ v1 >> 5) + v1) ^ (sum + k[sum & 3]); sum += 0x9E3779B9; v1 += ((v0 << 4 ^ v0 >> 5) + v0) ^ (sum + k[sum>>11 & 3]); } return (u64) v0 + ((u64) v1 << 32); }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,298
int fuse_notify_poll_wakeup(struct fuse_conn *fc, struct fuse_notify_poll_wakeup_out *outarg) { u64 kh = outarg->kh; struct rb_node **link; spin_lock(&fc->lock); link = fuse_find_polled_node(fc, kh, NULL); if (*link) { struct fuse_file *ff; ff = rb_entry(*link, struct fuse_file, polled_node); wake_up_interruptible_sync(&ff->poll_wait); } spin_unlock(&fc->lock); return 0; }
DoS Overflow
0
int fuse_notify_poll_wakeup(struct fuse_conn *fc, struct fuse_notify_poll_wakeup_out *outarg) { u64 kh = outarg->kh; struct rb_node **link; spin_lock(&fc->lock); link = fuse_find_polled_node(fc, kh, NULL); if (*link) { struct fuse_file *ff; ff = rb_entry(*link, struct fuse_file, polled_node); wake_up_interruptible_sync(&ff->poll_wait); } spin_unlock(&fc->lock); return 0; }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null
22,299
static int fuse_open(struct inode *inode, struct file *file) { return fuse_open_common(inode, file, false); }
DoS Overflow
0
static int fuse_open(struct inode *inode, struct file *file) { return fuse_open_common(inode, file, false); }
@@ -1666,6 +1666,20 @@ static int fuse_copy_ioctl_iovec(struct iovec *dst, void *src, return 0; } +/* Make sure iov_length() won't overflow */ +static int fuse_verify_ioctl_iov(struct iovec *iov, size_t count) +{ + size_t n; + u32 max = FUSE_MAX_PAGES_PER_REQ << PAGE_SHIFT; + + for (n = 0; n < count; n++) { + if (iov->iov_len > (size_t) max) + return -ENOMEM; + max -= iov->iov_len; + } + return 0; +} + /* * For ioctls, there is no generic way to determine how much memory * needs to be read and/or written. Furthermore, ioctls are allowed @@ -1858,6 +1872,14 @@ long fuse_do_ioctl(struct file *file, unsigned int cmd, unsigned long arg, in_iov = page_address(iov_page); out_iov = in_iov + in_iovs; + err = fuse_verify_ioctl_iov(in_iov, in_iovs); + if (err) + goto out; + + err = fuse_verify_ioctl_iov(out_iov, out_iovs); + if (err) + goto out; + goto retry; }
CWE-119
null
null