idx
int64
func_before
string
Vulnerability Classification
string
vul
int64
func_after
string
patch
string
CWE ID
string
lines_before
string
lines_after
string
24,500
static int __init afiucv_init(void) { int err; if (MACHINE_IS_VM) { cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); if (unlikely(err)) { WARN_ON(err); err = -EPROTONOSUPPORT; goto out; } pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv"); if (!pr_iucv) { printk(KERN_WARNING "iucv_if lookup failed\n"); memset(&iucv_userid, 0, sizeof(iucv_userid)); } } else { memset(&iucv_userid, 0, sizeof(iucv_userid)); pr_iucv = NULL; } err = proto_register(&iucv_proto, 0); if (err) goto out; err = sock_register(&iucv_sock_family_ops); if (err) goto out_proto; if (pr_iucv) { err = afiucv_iucv_init(); if (err) goto out_sock; } else register_netdevice_notifier(&afiucv_netdev_notifier); dev_add_pack(&iucv_packet_type); return 0; out_sock: sock_unregister(PF_IUCV); out_proto: proto_unregister(&iucv_proto); out: if (pr_iucv) symbol_put(iucv_if); return err; }
+Info
0
static int __init afiucv_init(void) { int err; if (MACHINE_IS_VM) { cpcmd("QUERY USERID", iucv_userid, sizeof(iucv_userid), &err); if (unlikely(err)) { WARN_ON(err); err = -EPROTONOSUPPORT; goto out; } pr_iucv = try_then_request_module(symbol_get(iucv_if), "iucv"); if (!pr_iucv) { printk(KERN_WARNING "iucv_if lookup failed\n"); memset(&iucv_userid, 0, sizeof(iucv_userid)); } } else { memset(&iucv_userid, 0, sizeof(iucv_userid)); pr_iucv = NULL; } err = proto_register(&iucv_proto, 0); if (err) goto out; err = sock_register(&iucv_sock_family_ops); if (err) goto out_proto; if (pr_iucv) { err = afiucv_iucv_init(); if (err) goto out_sock; } else register_netdevice_notifier(&afiucv_netdev_notifier); dev_add_pack(&iucv_packet_type); return 0; out_sock: sock_unregister(PF_IUCV); out_proto: proto_unregister(&iucv_proto); out: if (pr_iucv) symbol_put(iucv_if); return err; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,501
static int afiucv_iucv_init(void) { int err; err = pr_iucv->iucv_register(&af_iucv_handler, 0); if (err) goto out; /* establish dummy device */ af_iucv_driver.bus = pr_iucv->bus; err = driver_register(&af_iucv_driver); if (err) goto out_iucv; af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!af_iucv_dev) { err = -ENOMEM; goto out_driver; } dev_set_name(af_iucv_dev, "af_iucv"); af_iucv_dev->bus = pr_iucv->bus; af_iucv_dev->parent = pr_iucv->root; af_iucv_dev->release = (void (*)(struct device *))kfree; af_iucv_dev->driver = &af_iucv_driver; err = device_register(af_iucv_dev); if (err) goto out_driver; return 0; out_driver: driver_unregister(&af_iucv_driver); out_iucv: pr_iucv->iucv_unregister(&af_iucv_handler, 0); out: return err; }
+Info
0
static int afiucv_iucv_init(void) { int err; err = pr_iucv->iucv_register(&af_iucv_handler, 0); if (err) goto out; /* establish dummy device */ af_iucv_driver.bus = pr_iucv->bus; err = driver_register(&af_iucv_driver); if (err) goto out_iucv; af_iucv_dev = kzalloc(sizeof(struct device), GFP_KERNEL); if (!af_iucv_dev) { err = -ENOMEM; goto out_driver; } dev_set_name(af_iucv_dev, "af_iucv"); af_iucv_dev->bus = pr_iucv->bus; af_iucv_dev->parent = pr_iucv->root; af_iucv_dev->release = (void (*)(struct device *))kfree; af_iucv_dev->driver = &af_iucv_driver; err = device_register(af_iucv_dev); if (err) goto out_driver; return 0; out_driver: driver_unregister(&af_iucv_driver); out_iucv: pr_iucv->iucv_unregister(&af_iucv_handler, 0); out: return err; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,502
static int afiucv_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *event_dev = (struct net_device *)ptr; struct sock *sk; struct iucv_sock *iucv; switch (event) { case NETDEV_REBOOT: case NETDEV_GOING_DOWN: sk_for_each(sk, &iucv_sk_list.head) { iucv = iucv_sk(sk); if ((iucv->hs_dev == event_dev) && (sk->sk_state == IUCV_CONNECTED)) { if (event == NETDEV_GOING_DOWN) iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); } } break; case NETDEV_DOWN: case NETDEV_UNREGISTER: default: break; } return NOTIFY_DONE; }
+Info
0
static int afiucv_netdev_event(struct notifier_block *this, unsigned long event, void *ptr) { struct net_device *event_dev = (struct net_device *)ptr; struct sock *sk; struct iucv_sock *iucv; switch (event) { case NETDEV_REBOOT: case NETDEV_GOING_DOWN: sk_for_each(sk, &iucv_sk_list.head) { iucv = iucv_sk(sk); if ((iucv->hs_dev == event_dev) && (sk->sk_state == IUCV_CONNECTED)) { if (event == NETDEV_GOING_DOWN) iucv_send_ctrl(sk, AF_IUCV_FLAG_FIN); sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); } } break; case NETDEV_DOWN: case NETDEV_UNREGISTER: default: break; } return NOTIFY_DONE; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,503
static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr) { struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); unsigned char user_data[16]; int err; high_nmcpy(user_data, sa->siucv_name); low_nmcpy(user_data, iucv->src_name); ASCEBC(user_data, sizeof(user_data)); /* Create path. */ iucv->path = iucv_path_alloc(iucv->msglimit, IUCV_IPRMDATA, GFP_KERNEL); if (!iucv->path) { err = -ENOMEM; goto done; } err = pr_iucv->path_connect(iucv->path, &af_iucv_handler, sa->siucv_user_id, NULL, user_data, sk); if (err) { iucv_path_free(iucv->path); iucv->path = NULL; switch (err) { case 0x0b: /* Target communicator is not logged on */ err = -ENETUNREACH; break; case 0x0d: /* Max connections for this guest exceeded */ case 0x0e: /* Max connections for target guest exceeded */ err = -EAGAIN; break; case 0x0f: /* Missing IUCV authorization */ err = -EACCES; break; default: err = -ECONNREFUSED; break; } } done: return err; }
+Info
0
static int afiucv_path_connect(struct socket *sock, struct sockaddr *addr) { struct sockaddr_iucv *sa = (struct sockaddr_iucv *) addr; struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); unsigned char user_data[16]; int err; high_nmcpy(user_data, sa->siucv_name); low_nmcpy(user_data, iucv->src_name); ASCEBC(user_data, sizeof(user_data)); /* Create path. */ iucv->path = iucv_path_alloc(iucv->msglimit, IUCV_IPRMDATA, GFP_KERNEL); if (!iucv->path) { err = -ENOMEM; goto done; } err = pr_iucv->path_connect(iucv->path, &af_iucv_handler, sa->siucv_user_id, NULL, user_data, sk); if (err) { iucv_path_free(iucv->path); iucv->path = NULL; switch (err) { case 0x0b: /* Target communicator is not logged on */ err = -ENETUNREACH; break; case 0x0d: /* Max connections for this guest exceeded */ case 0x0e: /* Max connections for target guest exceeded */ err = -EAGAIN; break; case 0x0f: /* Missing IUCV authorization */ err = -EACCES; break; default: err = -ECONNREFUSED; break; } } done: return err; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,504
static int afiucv_pm_freeze(struct device *dev) { struct iucv_sock *iucv; struct sock *sk; int err = 0; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_freeze\n"); #endif read_lock(&iucv_sk_list.lock); sk_for_each(sk, &iucv_sk_list.head) { iucv = iucv_sk(sk); switch (sk->sk_state) { case IUCV_DISCONN: case IUCV_CLOSING: case IUCV_CONNECTED: iucv_sever_path(sk, 0); break; case IUCV_OPEN: case IUCV_BOUND: case IUCV_LISTEN: case IUCV_CLOSED: default: break; } skb_queue_purge(&iucv->send_skb_q); skb_queue_purge(&iucv->backlog_skb_q); } read_unlock(&iucv_sk_list.lock); return err; }
+Info
0
static int afiucv_pm_freeze(struct device *dev) { struct iucv_sock *iucv; struct sock *sk; int err = 0; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_freeze\n"); #endif read_lock(&iucv_sk_list.lock); sk_for_each(sk, &iucv_sk_list.head) { iucv = iucv_sk(sk); switch (sk->sk_state) { case IUCV_DISCONN: case IUCV_CLOSING: case IUCV_CONNECTED: iucv_sever_path(sk, 0); break; case IUCV_OPEN: case IUCV_BOUND: case IUCV_LISTEN: case IUCV_CLOSED: default: break; } skb_queue_purge(&iucv->send_skb_q); skb_queue_purge(&iucv->backlog_skb_q); } read_unlock(&iucv_sk_list.lock); return err; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,505
static int afiucv_pm_prepare(struct device *dev) { #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_prepare\n"); #endif return 0; }
+Info
0
static int afiucv_pm_prepare(struct device *dev) { #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_prepare\n"); #endif return 0; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,506
static int afiucv_pm_restore_thaw(struct device *dev) { struct sock *sk; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); #endif read_lock(&iucv_sk_list.lock); sk_for_each(sk, &iucv_sk_list.head) { switch (sk->sk_state) { case IUCV_CONNECTED: sk->sk_err = EPIPE; sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); break; case IUCV_DISCONN: case IUCV_CLOSING: case IUCV_LISTEN: case IUCV_BOUND: case IUCV_OPEN: default: break; } } read_unlock(&iucv_sk_list.lock); return 0; }
+Info
0
static int afiucv_pm_restore_thaw(struct device *dev) { struct sock *sk; #ifdef CONFIG_PM_DEBUG printk(KERN_WARNING "afiucv_pm_restore_thaw\n"); #endif read_lock(&iucv_sk_list.lock); sk_for_each(sk, &iucv_sk_list.head) { switch (sk->sk_state) { case IUCV_CONNECTED: sk->sk_err = EPIPE; sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); break; case IUCV_DISCONN: case IUCV_CLOSING: case IUCV_LISTEN: case IUCV_BOUND: case IUCV_OPEN: default: break; } } read_unlock(&iucv_sk_list.lock); return 0; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,507
static void afiucv_swap_src_dest(struct sk_buff *skb) { struct af_iucv_trans_hdr *trans_hdr = (struct af_iucv_trans_hdr *)skb->data; char tmpID[8]; char tmpName[8]; ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); memcpy(tmpID, trans_hdr->srcUserID, 8); memcpy(tmpName, trans_hdr->srcAppName, 8); memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8); memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8); memcpy(trans_hdr->destUserID, tmpID, 8); memcpy(trans_hdr->destAppName, tmpName, 8); skb_push(skb, ETH_HLEN); memset(skb->data, 0, ETH_HLEN); }
+Info
0
static void afiucv_swap_src_dest(struct sk_buff *skb) { struct af_iucv_trans_hdr *trans_hdr = (struct af_iucv_trans_hdr *)skb->data; char tmpID[8]; char tmpName[8]; ASCEBC(trans_hdr->destUserID, sizeof(trans_hdr->destUserID)); ASCEBC(trans_hdr->destAppName, sizeof(trans_hdr->destAppName)); ASCEBC(trans_hdr->srcUserID, sizeof(trans_hdr->srcUserID)); ASCEBC(trans_hdr->srcAppName, sizeof(trans_hdr->srcAppName)); memcpy(tmpID, trans_hdr->srcUserID, 8); memcpy(tmpName, trans_hdr->srcAppName, 8); memcpy(trans_hdr->srcUserID, trans_hdr->destUserID, 8); memcpy(trans_hdr->srcAppName, trans_hdr->destAppName, 8); memcpy(trans_hdr->destUserID, tmpID, 8); memcpy(trans_hdr->destAppName, tmpName, 8); skb_push(skb, ETH_HLEN); memset(skb->data, 0, ETH_HLEN); }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,508
static inline void high_nmcpy(unsigned char *dst, char *src) { memcpy(dst, src, 8); }
+Info
0
static inline void high_nmcpy(unsigned char *dst, char *src) { memcpy(dst, src, 8); }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,509
struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) { struct iucv_sock *isk, *n; struct sock *sk; list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { sk = (struct sock *) isk; lock_sock(sk); if (sk->sk_state == IUCV_CLOSED) { iucv_accept_unlink(sk); release_sock(sk); continue; } if (sk->sk_state == IUCV_CONNECTED || sk->sk_state == IUCV_DISCONN || !newsock) { iucv_accept_unlink(sk); if (newsock) sock_graft(sk, newsock); release_sock(sk); return sk; } release_sock(sk); } return NULL; }
+Info
0
struct sock *iucv_accept_dequeue(struct sock *parent, struct socket *newsock) { struct iucv_sock *isk, *n; struct sock *sk; list_for_each_entry_safe(isk, n, &iucv_sk(parent)->accept_q, accept_q) { sk = (struct sock *) isk; lock_sock(sk); if (sk->sk_state == IUCV_CLOSED) { iucv_accept_unlink(sk); release_sock(sk); continue; } if (sk->sk_state == IUCV_CONNECTED || sk->sk_state == IUCV_DISCONN || !newsock) { iucv_accept_unlink(sk); if (newsock) sock_graft(sk, newsock); release_sock(sk); return sk; } release_sock(sk); } return NULL; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,510
static inline int iucv_below_msglim(struct sock *sk) { struct iucv_sock *iucv = iucv_sk(sk); if (sk->sk_state != IUCV_CONNECTED) return 1; if (iucv->transport == AF_IUCV_TRANS_IUCV) return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); else return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) && (atomic_read(&iucv->pendings) <= 0)); }
+Info
0
static inline int iucv_below_msglim(struct sock *sk) { struct iucv_sock *iucv = iucv_sk(sk); if (sk->sk_state != IUCV_CONNECTED) return 1; if (iucv->transport == AF_IUCV_TRANS_IUCV) return (skb_queue_len(&iucv->send_skb_q) < iucv->path->msglim); else return ((atomic_read(&iucv->msg_sent) < iucv->msglimit_peer) && (atomic_read(&iucv->pendings) <= 0)); }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,511
static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) { struct sock *sk = path->private; if (sk->sk_state == IUCV_CLOSED) return; bh_lock_sock(sk); iucv_sever_path(sk, 1); sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); bh_unlock_sock(sk); }
+Info
0
static void iucv_callback_connrej(struct iucv_path *path, u8 ipuser[16]) { struct sock *sk = path->private; if (sk->sk_state == IUCV_CLOSED) return; bh_lock_sock(sk); iucv_sever_path(sk, 1); sk->sk_state = IUCV_DISCONN; sk->sk_state_change(sk); bh_unlock_sock(sk); }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,512
static int iucv_callback_connreq(struct iucv_path *path, u8 ipvmid[8], u8 ipuser[16]) { unsigned char user_data[16]; unsigned char nuser_data[16]; unsigned char src_name[8]; struct sock *sk, *nsk; struct iucv_sock *iucv, *niucv; int err; memcpy(src_name, ipuser, 8); EBCASC(src_name, 8); /* Find out if this path belongs to af_iucv. */ read_lock(&iucv_sk_list.lock); iucv = NULL; sk = NULL; sk_for_each(sk, &iucv_sk_list.head) if (sk->sk_state == IUCV_LISTEN && !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { /* * Found a listening socket with * src_name == ipuser[0-7]. */ iucv = iucv_sk(sk); break; } read_unlock(&iucv_sk_list.lock); if (!iucv) /* No socket found, not one of our paths. */ return -EINVAL; bh_lock_sock(sk); /* Check if parent socket is listening */ low_nmcpy(user_data, iucv->src_name); high_nmcpy(user_data, iucv->dst_name); ASCEBC(user_data, sizeof(user_data)); if (sk->sk_state != IUCV_LISTEN) { err = pr_iucv->path_sever(path, user_data); iucv_path_free(path); goto fail; } /* Check for backlog size */ if (sk_acceptq_is_full(sk)) { err = pr_iucv->path_sever(path, user_data); iucv_path_free(path); goto fail; } /* Create the new socket */ nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); if (!nsk) { err = pr_iucv->path_sever(path, user_data); iucv_path_free(path); goto fail; } niucv = iucv_sk(nsk); iucv_sock_init(nsk, sk); /* Set the new iucv_sock */ memcpy(niucv->dst_name, ipuser + 8, 8); EBCASC(niucv->dst_name, 8); memcpy(niucv->dst_user_id, ipvmid, 8); memcpy(niucv->src_name, iucv->src_name, 8); memcpy(niucv->src_user_id, iucv->src_user_id, 8); niucv->path = path; /* Call iucv_accept */ high_nmcpy(nuser_data, ipuser + 8); memcpy(nuser_data + 8, niucv->src_name, 8); ASCEBC(nuser_data + 8, 8); /* set message limit for path based on msglimit of accepting socket */ niucv->msglimit = iucv->msglimit; path->msglim = iucv->msglimit; err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk); if (err) { iucv_sever_path(nsk, 1); iucv_sock_kill(nsk); goto fail; } iucv_accept_enqueue(sk, nsk); /* Wake up accept */ nsk->sk_state = IUCV_CONNECTED; sk->sk_data_ready(sk, 1); err = 0; fail: bh_unlock_sock(sk); return 0; }
+Info
0
static int iucv_callback_connreq(struct iucv_path *path, u8 ipvmid[8], u8 ipuser[16]) { unsigned char user_data[16]; unsigned char nuser_data[16]; unsigned char src_name[8]; struct sock *sk, *nsk; struct iucv_sock *iucv, *niucv; int err; memcpy(src_name, ipuser, 8); EBCASC(src_name, 8); /* Find out if this path belongs to af_iucv. */ read_lock(&iucv_sk_list.lock); iucv = NULL; sk = NULL; sk_for_each(sk, &iucv_sk_list.head) if (sk->sk_state == IUCV_LISTEN && !memcmp(&iucv_sk(sk)->src_name, src_name, 8)) { /* * Found a listening socket with * src_name == ipuser[0-7]. */ iucv = iucv_sk(sk); break; } read_unlock(&iucv_sk_list.lock); if (!iucv) /* No socket found, not one of our paths. */ return -EINVAL; bh_lock_sock(sk); /* Check if parent socket is listening */ low_nmcpy(user_data, iucv->src_name); high_nmcpy(user_data, iucv->dst_name); ASCEBC(user_data, sizeof(user_data)); if (sk->sk_state != IUCV_LISTEN) { err = pr_iucv->path_sever(path, user_data); iucv_path_free(path); goto fail; } /* Check for backlog size */ if (sk_acceptq_is_full(sk)) { err = pr_iucv->path_sever(path, user_data); iucv_path_free(path); goto fail; } /* Create the new socket */ nsk = iucv_sock_alloc(NULL, sk->sk_type, GFP_ATOMIC); if (!nsk) { err = pr_iucv->path_sever(path, user_data); iucv_path_free(path); goto fail; } niucv = iucv_sk(nsk); iucv_sock_init(nsk, sk); /* Set the new iucv_sock */ memcpy(niucv->dst_name, ipuser + 8, 8); EBCASC(niucv->dst_name, 8); memcpy(niucv->dst_user_id, ipvmid, 8); memcpy(niucv->src_name, iucv->src_name, 8); memcpy(niucv->src_user_id, iucv->src_user_id, 8); niucv->path = path; /* Call iucv_accept */ high_nmcpy(nuser_data, ipuser + 8); memcpy(nuser_data + 8, niucv->src_name, 8); ASCEBC(nuser_data + 8, 8); /* set message limit for path based on msglimit of accepting socket */ niucv->msglimit = iucv->msglimit; path->msglim = iucv->msglimit; err = pr_iucv->path_accept(path, &af_iucv_handler, nuser_data, nsk); if (err) { iucv_sever_path(nsk, 1); iucv_sock_kill(nsk); goto fail; } iucv_accept_enqueue(sk, nsk); /* Wake up accept */ nsk->sk_state = IUCV_CONNECTED; sk->sk_data_ready(sk, 1); err = 0; fail: bh_unlock_sock(sk); return 0; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,513
static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) { struct sock *sk = path->private; struct iucv_sock *iucv = iucv_sk(sk); struct sk_buff *skb; struct sock_msg_q *save_msg; int len; if (sk->sk_shutdown & RCV_SHUTDOWN) { pr_iucv->message_reject(path, msg); return; } spin_lock(&iucv->message_q.lock); if (!list_empty(&iucv->message_q.list) || !skb_queue_empty(&iucv->backlog_skb_q)) goto save_message; len = atomic_read(&sk->sk_rmem_alloc); len += SKB_TRUESIZE(iucv_msg_length(msg)); if (len > sk->sk_rcvbuf) goto save_message; skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA); if (!skb) goto save_message; iucv_process_message(sk, skb, path, msg); goto out_unlock; save_message: save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); if (!save_msg) goto out_unlock; save_msg->path = path; save_msg->msg = *msg; list_add_tail(&save_msg->list, &iucv->message_q.list); out_unlock: spin_unlock(&iucv->message_q.lock); }
+Info
0
static void iucv_callback_rx(struct iucv_path *path, struct iucv_message *msg) { struct sock *sk = path->private; struct iucv_sock *iucv = iucv_sk(sk); struct sk_buff *skb; struct sock_msg_q *save_msg; int len; if (sk->sk_shutdown & RCV_SHUTDOWN) { pr_iucv->message_reject(path, msg); return; } spin_lock(&iucv->message_q.lock); if (!list_empty(&iucv->message_q.list) || !skb_queue_empty(&iucv->backlog_skb_q)) goto save_message; len = atomic_read(&sk->sk_rmem_alloc); len += SKB_TRUESIZE(iucv_msg_length(msg)); if (len > sk->sk_rcvbuf) goto save_message; skb = alloc_skb(iucv_msg_length(msg), GFP_ATOMIC | GFP_DMA); if (!skb) goto save_message; iucv_process_message(sk, skb, path, msg); goto out_unlock; save_message: save_msg = kzalloc(sizeof(struct sock_msg_q), GFP_ATOMIC | GFP_DMA); if (!save_msg) goto out_unlock; save_msg->path = path; save_msg->msg = *msg; list_add_tail(&save_msg->list, &iucv->message_q.list); out_unlock: spin_unlock(&iucv->message_q.lock); }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,514
static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16]) { struct sock *sk = path->private; bh_lock_sock(sk); if (sk->sk_state != IUCV_CLOSED) { sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); } bh_unlock_sock(sk); }
+Info
0
static void iucv_callback_shutdown(struct iucv_path *path, u8 ipuser[16]) { struct sock *sk = path->private; bh_lock_sock(sk); if (sk->sk_state != IUCV_CLOSED) { sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); } bh_unlock_sock(sk); }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,515
static void iucv_callback_txdone(struct iucv_path *path, struct iucv_message *msg) { struct sock *sk = path->private; struct sk_buff *this = NULL; struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; struct sk_buff *list_skb = list->next; unsigned long flags; bh_lock_sock(sk); if (!skb_queue_empty(list)) { spin_lock_irqsave(&list->lock, flags); while (list_skb != (struct sk_buff *)list) { if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) { this = list_skb; break; } list_skb = list_skb->next; } if (this) __skb_unlink(this, list); spin_unlock_irqrestore(&list->lock, flags); if (this) { kfree_skb(this); /* wake up any process waiting for sending */ iucv_sock_wake_msglim(sk); } } if (sk->sk_state == IUCV_CLOSING) { if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { sk->sk_state = IUCV_CLOSED; sk->sk_state_change(sk); } } bh_unlock_sock(sk); }
+Info
0
static void iucv_callback_txdone(struct iucv_path *path, struct iucv_message *msg) { struct sock *sk = path->private; struct sk_buff *this = NULL; struct sk_buff_head *list = &iucv_sk(sk)->send_skb_q; struct sk_buff *list_skb = list->next; unsigned long flags; bh_lock_sock(sk); if (!skb_queue_empty(list)) { spin_lock_irqsave(&list->lock, flags); while (list_skb != (struct sk_buff *)list) { if (!memcmp(&msg->tag, CB_TAG(list_skb), CB_TAG_LEN)) { this = list_skb; break; } list_skb = list_skb->next; } if (this) __skb_unlink(this, list); spin_unlock_irqrestore(&list->lock, flags); if (this) { kfree_skb(this); /* wake up any process waiting for sending */ iucv_sock_wake_msglim(sk); } } if (sk->sk_state == IUCV_CLOSING) { if (skb_queue_empty(&iucv_sk(sk)->send_skb_q)) { sk->sk_state = IUCV_CLOSED; sk->sk_state_change(sk); } } bh_unlock_sock(sk); }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,516
static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) { int dataleft, size, copied = 0; struct sk_buff *nskb; dataleft = len; while (dataleft) { if (dataleft >= sk->sk_rcvbuf / 4) size = sk->sk_rcvbuf / 4; else size = dataleft; nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); if (!nskb) return -ENOMEM; /* copy target class to control buffer of new skb */ memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN); /* copy data fragment */ memcpy(nskb->data, skb->data + copied, size); copied += size; dataleft -= size; skb_reset_transport_header(nskb); skb_reset_network_header(nskb); nskb->len = size; skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); } return 0; }
+Info
0
static int iucv_fragment_skb(struct sock *sk, struct sk_buff *skb, int len) { int dataleft, size, copied = 0; struct sk_buff *nskb; dataleft = len; while (dataleft) { if (dataleft >= sk->sk_rcvbuf / 4) size = sk->sk_rcvbuf / 4; else size = dataleft; nskb = alloc_skb(size, GFP_ATOMIC | GFP_DMA); if (!nskb) return -ENOMEM; /* copy target class to control buffer of new skb */ memcpy(CB_TRGCLS(nskb), CB_TRGCLS(skb), CB_TRGCLS_LEN); /* copy data fragment */ memcpy(nskb->data, skb->data + copied, size); copied += size; dataleft -= size; skb_reset_transport_header(nskb); skb_reset_network_header(nskb); nskb->len = size; skb_queue_tail(&iucv_sk(sk)->backlog_skb_q, nskb); } return 0; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,517
static void iucv_process_message(struct sock *sk, struct sk_buff *skb, struct iucv_path *path, struct iucv_message *msg) { int rc; unsigned int len; len = iucv_msg_length(msg); /* store msg target class in the second 4 bytes of skb ctrl buffer */ /* Note: the first 4 bytes are reserved for msg tag */ memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN); /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ if ((msg->flags & IUCV_IPRMDATA) && len > 7) { if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) { skb->data = NULL; skb->len = 0; } } else { rc = pr_iucv->message_receive(path, msg, msg->flags & IUCV_IPRMDATA, skb->data, len, NULL); if (rc) { kfree_skb(skb); return; } /* we need to fragment iucv messages for SOCK_STREAM only; * for SOCK_SEQPACKET, it is only relevant if we support * record segmentation using MSG_EOR (see also recvmsg()) */ if (sk->sk_type == SOCK_STREAM && skb->truesize >= sk->sk_rcvbuf / 4) { rc = iucv_fragment_skb(sk, skb, len); kfree_skb(skb); skb = NULL; if (rc) { pr_iucv->path_sever(path, NULL); return; } skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); } else { skb_reset_transport_header(skb); skb_reset_network_header(skb); skb->len = len; } } if (sock_queue_rcv_skb(sk, skb)) skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); }
+Info
0
static void iucv_process_message(struct sock *sk, struct sk_buff *skb, struct iucv_path *path, struct iucv_message *msg) { int rc; unsigned int len; len = iucv_msg_length(msg); /* store msg target class in the second 4 bytes of skb ctrl buffer */ /* Note: the first 4 bytes are reserved for msg tag */ memcpy(CB_TRGCLS(skb), &msg->class, CB_TRGCLS_LEN); /* check for special IPRM messages (e.g. iucv_sock_shutdown) */ if ((msg->flags & IUCV_IPRMDATA) && len > 7) { if (memcmp(msg->rmmsg, iprm_shutdown, 8) == 0) { skb->data = NULL; skb->len = 0; } } else { rc = pr_iucv->message_receive(path, msg, msg->flags & IUCV_IPRMDATA, skb->data, len, NULL); if (rc) { kfree_skb(skb); return; } /* we need to fragment iucv messages for SOCK_STREAM only; * for SOCK_SEQPACKET, it is only relevant if we support * record segmentation using MSG_EOR (see also recvmsg()) */ if (sk->sk_type == SOCK_STREAM && skb->truesize >= sk->sk_rcvbuf / 4) { rc = iucv_fragment_skb(sk, skb, len); kfree_skb(skb); skb = NULL; if (rc) { pr_iucv->path_sever(path, NULL); return; } skb = skb_dequeue(&iucv_sk(sk)->backlog_skb_q); } else { skb_reset_transport_header(skb); skb_reset_network_header(skb); skb->len = len; } } if (sock_queue_rcv_skb(sk, skb)) skb_queue_head(&iucv_sk(sk)->backlog_skb_q, skb); }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,518
static void iucv_process_message_q(struct sock *sk) { struct iucv_sock *iucv = iucv_sk(sk); struct sk_buff *skb; struct sock_msg_q *p, *n; list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA); if (!skb) break; iucv_process_message(sk, skb, p->path, &p->msg); list_del(&p->list); kfree(p); if (!skb_queue_empty(&iucv->backlog_skb_q)) break; } }
+Info
0
static void iucv_process_message_q(struct sock *sk) { struct iucv_sock *iucv = iucv_sk(sk); struct sk_buff *skb; struct sock_msg_q *p, *n; list_for_each_entry_safe(p, n, &iucv->message_q.list, list) { skb = alloc_skb(iucv_msg_length(&p->msg), GFP_ATOMIC | GFP_DMA); if (!skb) break; iucv_process_message(sk, skb, p->path, &p->msg); list_del(&p->list); kfree(p); if (!skb_queue_empty(&iucv->backlog_skb_q)) break; } }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,519
static int iucv_send_ctrl(struct sock *sk, u8 flags) { int err = 0; int blen; struct sk_buff *skb; blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN; skb = sock_alloc_send_skb(sk, blen, 1, &err); if (skb) { skb_reserve(skb, blen); err = afiucv_hs_send(NULL, sk, skb, flags); } return err; }
+Info
0
static int iucv_send_ctrl(struct sock *sk, u8 flags) { int err = 0; int blen; struct sk_buff *skb; blen = sizeof(struct af_iucv_trans_hdr) + ETH_HLEN; skb = sock_alloc_send_skb(sk, blen, 1, &err); if (skb) { skb_reserve(skb, blen); err = afiucv_hs_send(NULL, sk, skb, flags); } return err; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,520
static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg, struct sk_buff *skb) { u8 prmdata[8]; memcpy(prmdata, (void *) skb->data, skb->len); prmdata[7] = 0xff - (u8) skb->len; return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0, (void *) prmdata, 8); }
+Info
0
static int iucv_send_iprm(struct iucv_path *path, struct iucv_message *msg, struct sk_buff *skb) { u8 prmdata[8]; memcpy(prmdata, (void *) skb->data, skb->len); prmdata[7] = 0xff - (u8) skb->len; return pr_iucv->message_send(path, msg, IUCV_IPRMDATA, 0, (void *) prmdata, 8); }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,521
static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) { struct sock *sk; struct iucv_sock *iucv; sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); if (!sk) return NULL; iucv = iucv_sk(sk); sock_init_data(sock, sk); INIT_LIST_HEAD(&iucv->accept_q); spin_lock_init(&iucv->accept_q_lock); skb_queue_head_init(&iucv->send_skb_q); INIT_LIST_HEAD(&iucv->message_q.list); spin_lock_init(&iucv->message_q.lock); skb_queue_head_init(&iucv->backlog_skb_q); iucv->send_tag = 0; atomic_set(&iucv->pendings, 0); iucv->flags = 0; iucv->msglimit = 0; atomic_set(&iucv->msg_sent, 0); atomic_set(&iucv->msg_recv, 0); iucv->path = NULL; iucv->sk_txnotify = afiucv_hs_callback_txnotify; memset(&iucv->src_user_id , 0, 32); if (pr_iucv) iucv->transport = AF_IUCV_TRANS_IUCV; else iucv->transport = AF_IUCV_TRANS_HIPER; sk->sk_destruct = iucv_sock_destruct; sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; sk->sk_allocation = GFP_DMA; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = IUCV_OPEN; iucv_sock_link(&iucv_sk_list, sk); return sk; }
+Info
0
static struct sock *iucv_sock_alloc(struct socket *sock, int proto, gfp_t prio) { struct sock *sk; struct iucv_sock *iucv; sk = sk_alloc(&init_net, PF_IUCV, prio, &iucv_proto); if (!sk) return NULL; iucv = iucv_sk(sk); sock_init_data(sock, sk); INIT_LIST_HEAD(&iucv->accept_q); spin_lock_init(&iucv->accept_q_lock); skb_queue_head_init(&iucv->send_skb_q); INIT_LIST_HEAD(&iucv->message_q.list); spin_lock_init(&iucv->message_q.lock); skb_queue_head_init(&iucv->backlog_skb_q); iucv->send_tag = 0; atomic_set(&iucv->pendings, 0); iucv->flags = 0; iucv->msglimit = 0; atomic_set(&iucv->msg_sent, 0); atomic_set(&iucv->msg_recv, 0); iucv->path = NULL; iucv->sk_txnotify = afiucv_hs_callback_txnotify; memset(&iucv->src_user_id , 0, 32); if (pr_iucv) iucv->transport = AF_IUCV_TRANS_IUCV; else iucv->transport = AF_IUCV_TRANS_HIPER; sk->sk_destruct = iucv_sock_destruct; sk->sk_sndtimeo = IUCV_CONN_TIMEOUT; sk->sk_allocation = GFP_DMA; sock_reset_flag(sk, SOCK_ZAPPED); sk->sk_protocol = proto; sk->sk_state = IUCV_OPEN; iucv_sock_link(&iucv_sk_list, sk); return sk; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,522
static int iucv_sock_autobind(struct sock *sk) { struct iucv_sock *iucv = iucv_sk(sk); char name[12]; int err = 0; if (unlikely(!pr_iucv)) return -EPROTO; memcpy(iucv->src_user_id, iucv_userid, 8); write_lock_bh(&iucv_sk_list.lock); sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); while (__iucv_get_sock_by_name(name)) { sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); } write_unlock_bh(&iucv_sk_list.lock); memcpy(&iucv->src_name, name, 8); if (!iucv->msglimit) iucv->msglimit = IUCV_QUEUELEN_DEFAULT; return err; }
+Info
0
static int iucv_sock_autobind(struct sock *sk) { struct iucv_sock *iucv = iucv_sk(sk); char name[12]; int err = 0; if (unlikely(!pr_iucv)) return -EPROTO; memcpy(iucv->src_user_id, iucv_userid, 8); write_lock_bh(&iucv_sk_list.lock); sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); while (__iucv_get_sock_by_name(name)) { sprintf(name, "%08x", atomic_inc_return(&iucv_sk_list.autobind_name)); } write_unlock_bh(&iucv_sk_list.lock); memcpy(&iucv->src_name, name, 8); if (!iucv->msglimit) iucv->msglimit = IUCV_QUEUELEN_DEFAULT; return err; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,523
static void iucv_sock_cleanup_listen(struct sock *parent) { struct sock *sk; /* Close non-accepted connections */ while ((sk = iucv_accept_dequeue(parent, NULL))) { iucv_sock_close(sk); iucv_sock_kill(sk); } parent->sk_state = IUCV_CLOSED; }
+Info
0
static void iucv_sock_cleanup_listen(struct sock *parent) { struct sock *sk; /* Close non-accepted connections */ while ((sk = iucv_accept_dequeue(parent, NULL))) { iucv_sock_close(sk); iucv_sock_kill(sk); } parent->sk_state = IUCV_CLOSED; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,524
static int iucv_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; if (protocol && protocol != PF_IUCV) return -EPROTONOSUPPORT; sock->state = SS_UNCONNECTED; switch (sock->type) { case SOCK_STREAM: sock->ops = &iucv_sock_ops; break; case SOCK_SEQPACKET: /* currently, proto ops can handle both sk types */ sock->ops = &iucv_sock_ops; break; default: return -ESOCKTNOSUPPORT; } sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL); if (!sk) return -ENOMEM; iucv_sock_init(sk, NULL); return 0; }
+Info
0
static int iucv_sock_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; if (protocol && protocol != PF_IUCV) return -EPROTONOSUPPORT; sock->state = SS_UNCONNECTED; switch (sock->type) { case SOCK_STREAM: sock->ops = &iucv_sock_ops; break; case SOCK_SEQPACKET: /* currently, proto ops can handle both sk types */ sock->ops = &iucv_sock_ops; break; default: return -ESOCKTNOSUPPORT; } sk = iucv_sock_alloc(sock, protocol, GFP_KERNEL); if (!sk) return -ENOMEM; iucv_sock_init(sk, NULL); return 0; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,525
static void iucv_sock_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_error_queue); sk_mem_reclaim(sk); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Attempt to release alive iucv socket %p\n", sk); return; } WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc)); WARN_ON(sk->sk_wmem_queued); WARN_ON(sk->sk_forward_alloc); }
+Info
0
static void iucv_sock_destruct(struct sock *sk) { skb_queue_purge(&sk->sk_receive_queue); skb_queue_purge(&sk->sk_error_queue); sk_mem_reclaim(sk); if (!sock_flag(sk, SOCK_DEAD)) { pr_err("Attempt to release alive iucv socket %p\n", sk); return; } WARN_ON(atomic_read(&sk->sk_rmem_alloc)); WARN_ON(atomic_read(&sk->sk_wmem_alloc)); WARN_ON(sk->sk_wmem_queued); WARN_ON(sk->sk_forward_alloc); }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,526
static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer) { struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); addr->sa_family = AF_IUCV; *len = sizeof(struct sockaddr_iucv); if (peer) { memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8); memcpy(siucv->siucv_name, iucv->dst_name, 8); } else { memcpy(siucv->siucv_user_id, iucv->src_user_id, 8); memcpy(siucv->siucv_name, iucv->src_name, 8); } memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); return 0; }
+Info
0
static int iucv_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer) { struct sockaddr_iucv *siucv = (struct sockaddr_iucv *) addr; struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); addr->sa_family = AF_IUCV; *len = sizeof(struct sockaddr_iucv); if (peer) { memcpy(siucv->siucv_user_id, iucv->dst_user_id, 8); memcpy(siucv->siucv_name, iucv->dst_name, 8); } else { memcpy(siucv->siucv_user_id, iucv->src_user_id, 8); memcpy(siucv->siucv_name, iucv->src_name, 8); } memset(&siucv->siucv_port, 0, sizeof(siucv->siucv_port)); memset(&siucv->siucv_addr, 0, sizeof(siucv->siucv_addr)); memset(&siucv->siucv_nodeid, 0, sizeof(siucv->siucv_nodeid)); return 0; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,527
static int iucv_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); unsigned int val; int len; if (level != SOL_IUCV) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; len = min_t(unsigned int, len, sizeof(int)); switch (optname) { case SO_IPRMDATA_MSG: val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0; break; case SO_MSGLIMIT: lock_sock(sk); val = (iucv->path != NULL) ? iucv->path->msglim /* connected */ : iucv->msglimit; /* default */ release_sock(sk); break; case SO_MSGSIZE: if (sk->sk_state == IUCV_OPEN) return -EBADFD; val = (iucv->hs_dev) ? iucv->hs_dev->mtu - sizeof(struct af_iucv_trans_hdr) - ETH_HLEN : 0x7fffffff; break; default: return -ENOPROTOOPT; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; }
+Info
0
static int iucv_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); unsigned int val; int len; if (level != SOL_IUCV) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if (len < 0) return -EINVAL; len = min_t(unsigned int, len, sizeof(int)); switch (optname) { case SO_IPRMDATA_MSG: val = (iucv->flags & IUCV_IPRMDATA) ? 1 : 0; break; case SO_MSGLIMIT: lock_sock(sk); val = (iucv->path != NULL) ? iucv->path->msglim /* connected */ : iucv->msglimit; /* default */ release_sock(sk); break; case SO_MSGSIZE: if (sk->sk_state == IUCV_OPEN) return -EBADFD; val = (iucv->hs_dev) ? iucv->hs_dev->mtu - sizeof(struct af_iucv_trans_hdr) - ETH_HLEN : 0x7fffffff; break; default: return -ENOPROTOOPT; } if (put_user(len, optlen)) return -EFAULT; if (copy_to_user(optval, &val, len)) return -EFAULT; return 0; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,528
static void iucv_sock_init(struct sock *sk, struct sock *parent) { if (parent) sk->sk_type = parent->sk_type; }
+Info
0
static void iucv_sock_init(struct sock *sk, struct sock *parent) { if (parent) sk->sk_type = parent->sk_type; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,529
void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) { write_lock_bh(&l->lock); sk_add_node(sk, &l->head); write_unlock_bh(&l->lock); }
+Info
0
void iucv_sock_link(struct iucv_sock_list *l, struct sock *sk) { write_lock_bh(&l->lock); sk_add_node(sk, &l->head); write_unlock_bh(&l->lock); }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,530
static int iucv_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err; lock_sock(sk); err = -EINVAL; if (sk->sk_state != IUCV_BOUND) goto done; if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) goto done; sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = IUCV_LISTEN; err = 0; done: release_sock(sk); return err; }
+Info
0
static int iucv_sock_listen(struct socket *sock, int backlog) { struct sock *sk = sock->sk; int err; lock_sock(sk); err = -EINVAL; if (sk->sk_state != IUCV_BOUND) goto done; if (sock->type != SOCK_STREAM && sock->type != SOCK_SEQPACKET) goto done; sk->sk_max_ack_backlog = backlog; sk->sk_ack_backlog = 0; sk->sk_state = IUCV_LISTEN; err = 0; done: release_sock(sk); return err; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,531
unsigned int iucv_sock_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; unsigned int mask = 0; sock_poll_wait(file, sk_sleep(sk), wait); if (sk->sk_state == IUCV_LISTEN) return iucv_accept_poll(sk); if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) mask |= POLLERR; if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; if (!skb_queue_empty(&sk->sk_receive_queue) || (sk->sk_shutdown & RCV_SHUTDOWN)) mask |= POLLIN | POLLRDNORM; if (sk->sk_state == IUCV_CLOSED) mask |= POLLHUP; if (sk->sk_state == IUCV_DISCONN) mask |= POLLIN; if (sock_writeable(sk) && iucv_below_msglim(sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); return mask; }
+Info
0
unsigned int iucv_sock_poll(struct file *file, struct socket *sock, poll_table *wait) { struct sock *sk = sock->sk; unsigned int mask = 0; sock_poll_wait(file, sk_sleep(sk), wait); if (sk->sk_state == IUCV_LISTEN) return iucv_accept_poll(sk); if (sk->sk_err || !skb_queue_empty(&sk->sk_error_queue)) mask |= POLLERR; if (sk->sk_shutdown & RCV_SHUTDOWN) mask |= POLLRDHUP; if (sk->sk_shutdown == SHUTDOWN_MASK) mask |= POLLHUP; if (!skb_queue_empty(&sk->sk_receive_queue) || (sk->sk_shutdown & RCV_SHUTDOWN)) mask |= POLLIN | POLLRDNORM; if (sk->sk_state == IUCV_CLOSED) mask |= POLLHUP; if (sk->sk_state == IUCV_DISCONN) mask |= POLLIN; if (sock_writeable(sk) && iucv_below_msglim(sk)) mask |= POLLOUT | POLLWRNORM | POLLWRBAND; else set_bit(SOCK_ASYNC_NOSPACE, &sk->sk_socket->flags); return mask; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,532
static int iucv_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err = 0; if (!sk) return 0; iucv_sock_close(sk); sock_orphan(sk); iucv_sock_kill(sk); return err; }
+Info
0
static int iucv_sock_release(struct socket *sock) { struct sock *sk = sock->sk; int err = 0; if (!sk) return 0; iucv_sock_close(sk); sock_orphan(sk); iucv_sock_kill(sk); return err; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,533
static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); struct sk_buff *skb; struct iucv_message txmsg; struct cmsghdr *cmsg; int cmsg_done; long timeo; char user_id[9]; char appl_id[9]; int err; int noblock = msg->msg_flags & MSG_DONTWAIT; err = sock_error(sk); if (err) return err; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; /* SOCK_SEQPACKET: we do not support segmented records */ if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) return -EOPNOTSUPP; lock_sock(sk); if (sk->sk_shutdown & SEND_SHUTDOWN) { err = -EPIPE; goto out; } /* Return if the socket is not in connected state */ if (sk->sk_state != IUCV_CONNECTED) { err = -ENOTCONN; goto out; } /* initialize defaults */ cmsg_done = 0; /* check for duplicate headers */ txmsg.class = 0; /* iterate over control messages */ for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { if (!CMSG_OK(msg, cmsg)) { err = -EINVAL; goto out; } if (cmsg->cmsg_level != SOL_IUCV) continue; if (cmsg->cmsg_type & cmsg_done) { err = -EINVAL; goto out; } cmsg_done |= cmsg->cmsg_type; switch (cmsg->cmsg_type) { case SCM_IUCV_TRGCLS: if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) { err = -EINVAL; goto out; } /* set iucv message target class */ memcpy(&txmsg.class, (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); break; default: err = -EINVAL; goto out; break; } } /* allocate one skb for each iucv message: * this is fine for SOCK_SEQPACKET (unless we want to support * segmented records using the MSG_EOR flag), but * for SOCK_STREAM we might want to improve it in future */ if (iucv->transport == AF_IUCV_TRANS_HIPER) skb = sock_alloc_send_skb(sk, len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN, noblock, &err); else skb = sock_alloc_send_skb(sk, len, noblock, &err); if (!skb) { err = -ENOMEM; goto out; } if (iucv->transport == AF_IUCV_TRANS_HIPER) skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { err = -EFAULT; goto fail; } /* wait if outstanding messages for iucv path has reached */ timeo = sock_sndtimeo(sk, noblock); err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); if (err) goto fail; /* return -ECONNRESET if the socket is no longer connected */ if (sk->sk_state != IUCV_CONNECTED) { err = -ECONNRESET; goto fail; } /* increment and save iucv message tag for msg_completion cbk */ txmsg.tag = iucv->send_tag++; memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); if (iucv->transport == AF_IUCV_TRANS_HIPER) { atomic_inc(&iucv->msg_sent); err = afiucv_hs_send(&txmsg, sk, skb, 0); if (err) { atomic_dec(&iucv->msg_sent); goto fail; } goto release; } skb_queue_tail(&iucv->send_skb_q, skb); if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) && skb->len <= 7) { err = iucv_send_iprm(iucv->path, &txmsg, skb); /* on success: there is no message_complete callback * for an IPRMDATA msg; remove skb from send queue */ if (err == 0) { skb_unlink(skb, &iucv->send_skb_q); kfree_skb(skb); } /* this error should never happen since the * IUCV_IPRMDATA path flag is set... sever path */ if (err == 0x15) { pr_iucv->path_sever(iucv->path, NULL); skb_unlink(skb, &iucv->send_skb_q); err = -EPIPE; goto fail; } } else err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0, (void *) skb->data, skb->len); if (err) { if (err == 3) { user_id[8] = 0; memcpy(user_id, iucv->dst_user_id, 8); appl_id[8] = 0; memcpy(appl_id, iucv->dst_name, 8); pr_err("Application %s on z/VM guest %s" " exceeds message limit\n", appl_id, user_id); err = -EAGAIN; } else err = -EPIPE; skb_unlink(skb, &iucv->send_skb_q); goto fail; } release: release_sock(sk); return len; fail: kfree_skb(skb); out: release_sock(sk); return err; }
+Info
0
static int iucv_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); struct sk_buff *skb; struct iucv_message txmsg; struct cmsghdr *cmsg; int cmsg_done; long timeo; char user_id[9]; char appl_id[9]; int err; int noblock = msg->msg_flags & MSG_DONTWAIT; err = sock_error(sk); if (err) return err; if (msg->msg_flags & MSG_OOB) return -EOPNOTSUPP; /* SOCK_SEQPACKET: we do not support segmented records */ if (sk->sk_type == SOCK_SEQPACKET && !(msg->msg_flags & MSG_EOR)) return -EOPNOTSUPP; lock_sock(sk); if (sk->sk_shutdown & SEND_SHUTDOWN) { err = -EPIPE; goto out; } /* Return if the socket is not in connected state */ if (sk->sk_state != IUCV_CONNECTED) { err = -ENOTCONN; goto out; } /* initialize defaults */ cmsg_done = 0; /* check for duplicate headers */ txmsg.class = 0; /* iterate over control messages */ for (cmsg = CMSG_FIRSTHDR(msg); cmsg; cmsg = CMSG_NXTHDR(msg, cmsg)) { if (!CMSG_OK(msg, cmsg)) { err = -EINVAL; goto out; } if (cmsg->cmsg_level != SOL_IUCV) continue; if (cmsg->cmsg_type & cmsg_done) { err = -EINVAL; goto out; } cmsg_done |= cmsg->cmsg_type; switch (cmsg->cmsg_type) { case SCM_IUCV_TRGCLS: if (cmsg->cmsg_len != CMSG_LEN(TRGCLS_SIZE)) { err = -EINVAL; goto out; } /* set iucv message target class */ memcpy(&txmsg.class, (void *) CMSG_DATA(cmsg), TRGCLS_SIZE); break; default: err = -EINVAL; goto out; break; } } /* allocate one skb for each iucv message: * this is fine for SOCK_SEQPACKET (unless we want to support * segmented records using the MSG_EOR flag), but * for SOCK_STREAM we might want to improve it in future */ if (iucv->transport == AF_IUCV_TRANS_HIPER) skb = sock_alloc_send_skb(sk, len + sizeof(struct af_iucv_trans_hdr) + ETH_HLEN, noblock, &err); else skb = sock_alloc_send_skb(sk, len, noblock, &err); if (!skb) { err = -ENOMEM; goto out; } if (iucv->transport == AF_IUCV_TRANS_HIPER) skb_reserve(skb, sizeof(struct af_iucv_trans_hdr) + ETH_HLEN); if (memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len)) { err = -EFAULT; goto fail; } /* wait if outstanding messages for iucv path has reached */ timeo = sock_sndtimeo(sk, noblock); err = iucv_sock_wait(sk, iucv_below_msglim(sk), timeo); if (err) goto fail; /* return -ECONNRESET if the socket is no longer connected */ if (sk->sk_state != IUCV_CONNECTED) { err = -ECONNRESET; goto fail; } /* increment and save iucv message tag for msg_completion cbk */ txmsg.tag = iucv->send_tag++; memcpy(CB_TAG(skb), &txmsg.tag, CB_TAG_LEN); if (iucv->transport == AF_IUCV_TRANS_HIPER) { atomic_inc(&iucv->msg_sent); err = afiucv_hs_send(&txmsg, sk, skb, 0); if (err) { atomic_dec(&iucv->msg_sent); goto fail; } goto release; } skb_queue_tail(&iucv->send_skb_q, skb); if (((iucv->path->flags & IUCV_IPRMDATA) & iucv->flags) && skb->len <= 7) { err = iucv_send_iprm(iucv->path, &txmsg, skb); /* on success: there is no message_complete callback * for an IPRMDATA msg; remove skb from send queue */ if (err == 0) { skb_unlink(skb, &iucv->send_skb_q); kfree_skb(skb); } /* this error should never happen since the * IUCV_IPRMDATA path flag is set... sever path */ if (err == 0x15) { pr_iucv->path_sever(iucv->path, NULL); skb_unlink(skb, &iucv->send_skb_q); err = -EPIPE; goto fail; } } else err = pr_iucv->message_send(iucv->path, &txmsg, 0, 0, (void *) skb->data, skb->len); if (err) { if (err == 3) { user_id[8] = 0; memcpy(user_id, iucv->dst_user_id, 8); appl_id[8] = 0; memcpy(appl_id, iucv->dst_name, 8); pr_err("Application %s on z/VM guest %s" " exceeds message limit\n", appl_id, user_id); err = -EAGAIN; } else err = -EPIPE; skb_unlink(skb, &iucv->send_skb_q); goto fail; } release: release_sock(sk); return len; fail: kfree_skb(skb); out: release_sock(sk); return err; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,534
static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); int val; int rc; if (level != SOL_IUCV) return -ENOPROTOOPT; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *) optval)) return -EFAULT; rc = 0; lock_sock(sk); switch (optname) { case SO_IPRMDATA_MSG: if (val) iucv->flags |= IUCV_IPRMDATA; else iucv->flags &= ~IUCV_IPRMDATA; break; case SO_MSGLIMIT: switch (sk->sk_state) { case IUCV_OPEN: case IUCV_BOUND: if (val < 1 || val > (u16)(~0)) rc = -EINVAL; else iucv->msglimit = val; break; default: rc = -EINVAL; break; } break; default: rc = -ENOPROTOOPT; break; } release_sock(sk); return rc; }
+Info
0
static int iucv_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct iucv_sock *iucv = iucv_sk(sk); int val; int rc; if (level != SOL_IUCV) return -ENOPROTOOPT; if (optlen < sizeof(int)) return -EINVAL; if (get_user(val, (int __user *) optval)) return -EFAULT; rc = 0; lock_sock(sk); switch (optname) { case SO_IPRMDATA_MSG: if (val) iucv->flags |= IUCV_IPRMDATA; else iucv->flags &= ~IUCV_IPRMDATA; break; case SO_MSGLIMIT: switch (sk->sk_state) { case IUCV_OPEN: case IUCV_BOUND: if (val < 1 || val > (u16)(~0)) rc = -EINVAL; else iucv->msglimit = val; break; default: rc = -EINVAL; break; } break; default: rc = -ENOPROTOOPT; break; } release_sock(sk); return rc; }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,535
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) { write_lock_bh(&l->lock); sk_del_node_init(sk); write_unlock_bh(&l->lock); }
+Info
0
void iucv_sock_unlink(struct iucv_sock_list *l, struct sock *sk) { write_lock_bh(&l->lock); sk_del_node_init(sk); write_unlock_bh(&l->lock); }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,536
static void iucv_sock_wake_msglim(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_all(&wq->wait); sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); }
+Info
0
static void iucv_sock_wake_msglim(struct sock *sk) { struct socket_wq *wq; rcu_read_lock(); wq = rcu_dereference(sk->sk_wq); if (wq_has_sleeper(wq)) wake_up_interruptible_all(&wq->wait); sk_wake_async(sk, SOCK_WAKE_SPACE, POLL_OUT); rcu_read_unlock(); }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,537
static inline void low_nmcpy(unsigned char *dst, char *src) { memcpy(&dst[8], src, 8); }
+Info
0
static inline void low_nmcpy(unsigned char *dst, char *src) { memcpy(&dst[8], src, 8); }
@@ -1328,6 +1328,8 @@ static int iucv_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct sk_buff *skb, *rskb, *cskb; int err = 0; + msg->msg_namelen = 0; + if ((sk->sk_state == IUCV_DISCONN) && skb_queue_empty(&iucv->backlog_skb_q) && skb_queue_empty(&sk->sk_receive_queue) &&
CWE-200
null
null
24,538
static int irda_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk = sock->sk; struct irda_sock *new, *self = irda_sk(sk); struct sock *newsk; struct sk_buff *skb; int err; IRDA_DEBUG(2, "%s()\n", __func__); err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0); if (err) return err; err = -EINVAL; lock_sock(sk); if (sock->state != SS_UNCONNECTED) goto out; if ((sk = sock->sk) == NULL) goto out; err = -EOPNOTSUPP; if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && (sk->sk_type != SOCK_DGRAM)) goto out; err = -EINVAL; if (sk->sk_state != TCP_LISTEN) goto out; /* * The read queue this time is holding sockets ready to use * hooked into the SABM we saved */ /* * We can perform the accept only if there is incoming data * on the listening socket. * So, we will block the caller until we receive any data. * If the caller was waiting on select() or poll() before * calling us, the data is waiting for us ;-) * Jean II */ while (1) { skb = skb_dequeue(&sk->sk_receive_queue); if (skb) break; /* Non blocking operation */ err = -EWOULDBLOCK; if (flags & O_NONBLOCK) goto out; err = wait_event_interruptible(*(sk_sleep(sk)), skb_peek(&sk->sk_receive_queue)); if (err) goto out; } newsk = newsock->sk; err = -EIO; if (newsk == NULL) goto out; newsk->sk_state = TCP_ESTABLISHED; new = irda_sk(newsk); /* Now attach up the new socket */ new->tsap = irttp_dup(self->tsap, new); err = -EPERM; /* value does not seem to make sense. -arnd */ if (!new->tsap) { IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); kfree_skb(skb); goto out; } new->stsap_sel = new->tsap->stsap_sel; new->dtsap_sel = new->tsap->dtsap_sel; new->saddr = irttp_get_saddr(new->tsap); new->daddr = irttp_get_daddr(new->tsap); new->max_sdu_size_tx = self->max_sdu_size_tx; new->max_sdu_size_rx = self->max_sdu_size_rx; new->max_data_size = self->max_data_size; new->max_header_size = self->max_header_size; memcpy(&new->qos_tx, &self->qos_tx, sizeof(struct qos_info)); /* Clean up the original one to keep it in listen state */ irttp_listen(self->tsap); kfree_skb(skb); sk->sk_ack_backlog--; newsock->state = SS_CONNECTED; irda_connect_response(new); err = 0; out: release_sock(sk); return err; }
+Info
0
static int irda_accept(struct socket *sock, struct socket *newsock, int flags) { struct sock *sk = sock->sk; struct irda_sock *new, *self = irda_sk(sk); struct sock *newsk; struct sk_buff *skb; int err; IRDA_DEBUG(2, "%s()\n", __func__); err = irda_create(sock_net(sk), newsock, sk->sk_protocol, 0); if (err) return err; err = -EINVAL; lock_sock(sk); if (sock->state != SS_UNCONNECTED) goto out; if ((sk = sock->sk) == NULL) goto out; err = -EOPNOTSUPP; if ((sk->sk_type != SOCK_STREAM) && (sk->sk_type != SOCK_SEQPACKET) && (sk->sk_type != SOCK_DGRAM)) goto out; err = -EINVAL; if (sk->sk_state != TCP_LISTEN) goto out; /* * The read queue this time is holding sockets ready to use * hooked into the SABM we saved */ /* * We can perform the accept only if there is incoming data * on the listening socket. * So, we will block the caller until we receive any data. * If the caller was waiting on select() or poll() before * calling us, the data is waiting for us ;-) * Jean II */ while (1) { skb = skb_dequeue(&sk->sk_receive_queue); if (skb) break; /* Non blocking operation */ err = -EWOULDBLOCK; if (flags & O_NONBLOCK) goto out; err = wait_event_interruptible(*(sk_sleep(sk)), skb_peek(&sk->sk_receive_queue)); if (err) goto out; } newsk = newsock->sk; err = -EIO; if (newsk == NULL) goto out; newsk->sk_state = TCP_ESTABLISHED; new = irda_sk(newsk); /* Now attach up the new socket */ new->tsap = irttp_dup(self->tsap, new); err = -EPERM; /* value does not seem to make sense. -arnd */ if (!new->tsap) { IRDA_DEBUG(0, "%s(), dup failed!\n", __func__); kfree_skb(skb); goto out; } new->stsap_sel = new->tsap->stsap_sel; new->dtsap_sel = new->tsap->dtsap_sel; new->saddr = irttp_get_saddr(new->tsap); new->daddr = irttp_get_daddr(new->tsap); new->max_sdu_size_tx = self->max_sdu_size_tx; new->max_sdu_size_rx = self->max_sdu_size_rx; new->max_data_size = self->max_data_size; new->max_header_size = self->max_header_size; memcpy(&new->qos_tx, &self->qos_tx, sizeof(struct qos_info)); /* Clean up the original one to keep it in listen state */ irttp_listen(self->tsap); kfree_skb(skb); sk->sk_ack_backlog--; newsock->state = SS_CONNECTED; irda_connect_response(new); err = 0; out: release_sock(sk); return err; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,539
static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr; struct irda_sock *self = irda_sk(sk); int err; IRDA_DEBUG(2, "%s(%p)\n", __func__, self); if (addr_len != sizeof(struct sockaddr_irda)) return -EINVAL; lock_sock(sk); #ifdef CONFIG_IRDA_ULTRA /* Special care for Ultra sockets */ if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA)) { self->pid = addr->sir_lsap_sel; err = -EOPNOTSUPP; if (self->pid & 0x80) { IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); goto out; } err = irda_open_lsap(self, self->pid); if (err < 0) goto out; /* Pretend we are connected */ sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; err = 0; goto out; } #endif /* CONFIG_IRDA_ULTRA */ self->ias_obj = irias_new_object(addr->sir_name, jiffies); err = -ENOMEM; if (self->ias_obj == NULL) goto out; err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); if (err < 0) { irias_delete_object(self->ias_obj); self->ias_obj = NULL; goto out; } /* Register with LM-IAS */ irias_add_integer_attrib(self->ias_obj, "IrDA:TinyTP:LsapSel", self->stsap_sel, IAS_KERNEL_ATTR); irias_insert_object(self->ias_obj); err = 0; out: release_sock(sk); return err; }
+Info
0
static int irda_bind(struct socket *sock, struct sockaddr *uaddr, int addr_len) { struct sock *sk = sock->sk; struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr; struct irda_sock *self = irda_sk(sk); int err; IRDA_DEBUG(2, "%s(%p)\n", __func__, self); if (addr_len != sizeof(struct sockaddr_irda)) return -EINVAL; lock_sock(sk); #ifdef CONFIG_IRDA_ULTRA /* Special care for Ultra sockets */ if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA)) { self->pid = addr->sir_lsap_sel; err = -EOPNOTSUPP; if (self->pid & 0x80) { IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); goto out; } err = irda_open_lsap(self, self->pid); if (err < 0) goto out; /* Pretend we are connected */ sock->state = SS_CONNECTED; sk->sk_state = TCP_ESTABLISHED; err = 0; goto out; } #endif /* CONFIG_IRDA_ULTRA */ self->ias_obj = irias_new_object(addr->sir_name, jiffies); err = -ENOMEM; if (self->ias_obj == NULL) goto out; err = irda_open_tsap(self, addr->sir_lsap_sel, addr->sir_name); if (err < 0) { irias_delete_object(self->ias_obj); self->ias_obj = NULL; goto out; } /* Register with LM-IAS */ irias_add_integer_attrib(self->ias_obj, "IrDA:TinyTP:LsapSel", self->stsap_sel, IAS_KERNEL_ATTR); irias_insert_object(self->ias_obj); err = 0; out: release_sock(sk); return err; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,540
static int irda_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { /* * All IRDA's ioctl are standard ones. */ return -ENOIOCTLCMD; }
+Info
0
static int irda_compat_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { /* * All IRDA's ioctl are standard ones. */ return -ENOIOCTLCMD; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,541
static int irda_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr; struct irda_sock *self = irda_sk(sk); int err; IRDA_DEBUG(2, "%s(%p)\n", __func__, self); lock_sock(sk); /* Don't allow connect for Ultra sockets */ err = -ESOCKTNOSUPPORT; if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA)) goto out; if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { sock->state = SS_CONNECTED; err = 0; goto out; /* Connect completed during a ERESTARTSYS event */ } if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; err = -ECONNREFUSED; goto out; } err = -EISCONN; /* No reconnect on a seqpacket socket */ if (sk->sk_state == TCP_ESTABLISHED) goto out; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; err = -EINVAL; if (addr_len != sizeof(struct sockaddr_irda)) goto out; /* Check if user supplied any destination device address */ if ((!addr->sir_addr) || (addr->sir_addr == DEV_ADDR_ANY)) { /* Try to find one suitable */ err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name); if (err) { IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __func__); goto out; } } else { /* Use the one provided by the user */ self->daddr = addr->sir_addr; IRDA_DEBUG(1, "%s(), daddr = %08x\n", __func__, self->daddr); /* If we don't have a valid service name, we assume the * user want to connect on a specific LSAP. Prevent * the use of invalid LSAPs (IrLMP 1.1 p10). Jean II */ if((addr->sir_name[0] != '\0') || (addr->sir_lsap_sel >= 0x70)) { /* Query remote LM-IAS using service name */ err = irda_find_lsap_sel(self, addr->sir_name); if (err) { IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); goto out; } } else { /* Directly connect to the remote LSAP * specified by the sir_lsap field. * Please use with caution, in IrDA LSAPs are * dynamic and there is no "well-known" LSAP. */ self->dtsap_sel = addr->sir_lsap_sel; } } /* Check if we have opened a local TSAP */ if (!self->tsap) irda_open_tsap(self, LSAP_ANY, addr->sir_name); /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; /* Connect to remote device */ err = irttp_connect_request(self->tsap, self->dtsap_sel, self->saddr, self->daddr, NULL, self->max_sdu_size_rx, NULL); if (err) { IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); goto out; } /* Now the loop */ err = -EINPROGRESS; if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) goto out; err = -ERESTARTSYS; if (wait_event_interruptible(*(sk_sleep(sk)), (sk->sk_state != TCP_SYN_SENT))) goto out; if (sk->sk_state != TCP_ESTABLISHED) { sock->state = SS_UNCONNECTED; if (sk->sk_prot->disconnect(sk, flags)) sock->state = SS_DISCONNECTING; err = sock_error(sk); if (!err) err = -ECONNRESET; goto out; } sock->state = SS_CONNECTED; /* At this point, IrLMP has assigned our source address */ self->saddr = irttp_get_saddr(self->tsap); err = 0; out: release_sock(sk); return err; }
+Info
0
static int irda_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct sockaddr_irda *addr = (struct sockaddr_irda *) uaddr; struct irda_sock *self = irda_sk(sk); int err; IRDA_DEBUG(2, "%s(%p)\n", __func__, self); lock_sock(sk); /* Don't allow connect for Ultra sockets */ err = -ESOCKTNOSUPPORT; if ((sk->sk_type == SOCK_DGRAM) && (sk->sk_protocol == IRDAPROTO_ULTRA)) goto out; if (sk->sk_state == TCP_ESTABLISHED && sock->state == SS_CONNECTING) { sock->state = SS_CONNECTED; err = 0; goto out; /* Connect completed during a ERESTARTSYS event */ } if (sk->sk_state == TCP_CLOSE && sock->state == SS_CONNECTING) { sock->state = SS_UNCONNECTED; err = -ECONNREFUSED; goto out; } err = -EISCONN; /* No reconnect on a seqpacket socket */ if (sk->sk_state == TCP_ESTABLISHED) goto out; sk->sk_state = TCP_CLOSE; sock->state = SS_UNCONNECTED; err = -EINVAL; if (addr_len != sizeof(struct sockaddr_irda)) goto out; /* Check if user supplied any destination device address */ if ((!addr->sir_addr) || (addr->sir_addr == DEV_ADDR_ANY)) { /* Try to find one suitable */ err = irda_discover_daddr_and_lsap_sel(self, addr->sir_name); if (err) { IRDA_DEBUG(0, "%s(), auto-connect failed!\n", __func__); goto out; } } else { /* Use the one provided by the user */ self->daddr = addr->sir_addr; IRDA_DEBUG(1, "%s(), daddr = %08x\n", __func__, self->daddr); /* If we don't have a valid service name, we assume the * user want to connect on a specific LSAP. Prevent * the use of invalid LSAPs (IrLMP 1.1 p10). Jean II */ if((addr->sir_name[0] != '\0') || (addr->sir_lsap_sel >= 0x70)) { /* Query remote LM-IAS using service name */ err = irda_find_lsap_sel(self, addr->sir_name); if (err) { IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); goto out; } } else { /* Directly connect to the remote LSAP * specified by the sir_lsap field. * Please use with caution, in IrDA LSAPs are * dynamic and there is no "well-known" LSAP. */ self->dtsap_sel = addr->sir_lsap_sel; } } /* Check if we have opened a local TSAP */ if (!self->tsap) irda_open_tsap(self, LSAP_ANY, addr->sir_name); /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = TCP_SYN_SENT; /* Connect to remote device */ err = irttp_connect_request(self->tsap, self->dtsap_sel, self->saddr, self->daddr, NULL, self->max_sdu_size_rx, NULL); if (err) { IRDA_DEBUG(0, "%s(), connect failed!\n", __func__); goto out; } /* Now the loop */ err = -EINPROGRESS; if (sk->sk_state != TCP_ESTABLISHED && (flags & O_NONBLOCK)) goto out; err = -ERESTARTSYS; if (wait_event_interruptible(*(sk_sleep(sk)), (sk->sk_state != TCP_SYN_SENT))) goto out; if (sk->sk_state != TCP_ESTABLISHED) { sock->state = SS_UNCONNECTED; if (sk->sk_prot->disconnect(sk, flags)) sock->state = SS_DISCONNECTING; err = sock_error(sk); if (!err) err = -ECONNRESET; goto out; } sock->state = SS_CONNECTED; /* At this point, IrLMP has assigned our source address */ self->saddr = irttp_get_saddr(self->tsap); err = 0; out: release_sock(sk); return err; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,542
static void irda_connect_confirm(void *instance, void *sap, struct qos_info *qos, __u32 max_sdu_size, __u8 max_header_size, struct sk_buff *skb) { struct irda_sock *self; struct sock *sk; self = instance; IRDA_DEBUG(2, "%s(%p)\n", __func__, self); sk = instance; if (sk == NULL) { dev_kfree_skb(skb); return; } dev_kfree_skb(skb); /* How much header space do we need to reserve */ self->max_header_size = max_header_size; /* IrTTP max SDU size in transmit direction */ self->max_sdu_size_tx = max_sdu_size; /* Find out what the largest chunk of data that we can transmit is */ switch (sk->sk_type) { case SOCK_STREAM: if (max_sdu_size != 0) { IRDA_ERROR("%s: max_sdu_size must be 0\n", __func__); return; } self->max_data_size = irttp_get_max_seg_size(self->tsap); break; case SOCK_SEQPACKET: if (max_sdu_size == 0) { IRDA_ERROR("%s: max_sdu_size cannot be 0\n", __func__); return; } self->max_data_size = max_sdu_size; break; default: self->max_data_size = irttp_get_max_seg_size(self->tsap); } IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__, self->max_data_size); memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); /* We are now connected! */ sk->sk_state = TCP_ESTABLISHED; sk->sk_state_change(sk); }
+Info
0
static void irda_connect_confirm(void *instance, void *sap, struct qos_info *qos, __u32 max_sdu_size, __u8 max_header_size, struct sk_buff *skb) { struct irda_sock *self; struct sock *sk; self = instance; IRDA_DEBUG(2, "%s(%p)\n", __func__, self); sk = instance; if (sk == NULL) { dev_kfree_skb(skb); return; } dev_kfree_skb(skb); /* How much header space do we need to reserve */ self->max_header_size = max_header_size; /* IrTTP max SDU size in transmit direction */ self->max_sdu_size_tx = max_sdu_size; /* Find out what the largest chunk of data that we can transmit is */ switch (sk->sk_type) { case SOCK_STREAM: if (max_sdu_size != 0) { IRDA_ERROR("%s: max_sdu_size must be 0\n", __func__); return; } self->max_data_size = irttp_get_max_seg_size(self->tsap); break; case SOCK_SEQPACKET: if (max_sdu_size == 0) { IRDA_ERROR("%s: max_sdu_size cannot be 0\n", __func__); return; } self->max_data_size = max_sdu_size; break; default: self->max_data_size = irttp_get_max_seg_size(self->tsap); } IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__, self->max_data_size); memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); /* We are now connected! */ sk->sk_state = TCP_ESTABLISHED; sk->sk_state_change(sk); }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,543
static void irda_connect_indication(void *instance, void *sap, struct qos_info *qos, __u32 max_sdu_size, __u8 max_header_size, struct sk_buff *skb) { struct irda_sock *self; struct sock *sk; self = instance; IRDA_DEBUG(2, "%s(%p)\n", __func__, self); sk = instance; if (sk == NULL) { dev_kfree_skb(skb); return; } /* How much header space do we need to reserve */ self->max_header_size = max_header_size; /* IrTTP max SDU size in transmit direction */ self->max_sdu_size_tx = max_sdu_size; /* Find out what the largest chunk of data that we can transmit is */ switch (sk->sk_type) { case SOCK_STREAM: if (max_sdu_size != 0) { IRDA_ERROR("%s: max_sdu_size must be 0\n", __func__); kfree_skb(skb); return; } self->max_data_size = irttp_get_max_seg_size(self->tsap); break; case SOCK_SEQPACKET: if (max_sdu_size == 0) { IRDA_ERROR("%s: max_sdu_size cannot be 0\n", __func__); kfree_skb(skb); return; } self->max_data_size = max_sdu_size; break; default: self->max_data_size = irttp_get_max_seg_size(self->tsap); } IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__, self->max_data_size); memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_state_change(sk); }
+Info
0
static void irda_connect_indication(void *instance, void *sap, struct qos_info *qos, __u32 max_sdu_size, __u8 max_header_size, struct sk_buff *skb) { struct irda_sock *self; struct sock *sk; self = instance; IRDA_DEBUG(2, "%s(%p)\n", __func__, self); sk = instance; if (sk == NULL) { dev_kfree_skb(skb); return; } /* How much header space do we need to reserve */ self->max_header_size = max_header_size; /* IrTTP max SDU size in transmit direction */ self->max_sdu_size_tx = max_sdu_size; /* Find out what the largest chunk of data that we can transmit is */ switch (sk->sk_type) { case SOCK_STREAM: if (max_sdu_size != 0) { IRDA_ERROR("%s: max_sdu_size must be 0\n", __func__); kfree_skb(skb); return; } self->max_data_size = irttp_get_max_seg_size(self->tsap); break; case SOCK_SEQPACKET: if (max_sdu_size == 0) { IRDA_ERROR("%s: max_sdu_size cannot be 0\n", __func__); kfree_skb(skb); return; } self->max_data_size = max_sdu_size; break; default: self->max_data_size = irttp_get_max_seg_size(self->tsap); } IRDA_DEBUG(2, "%s(), max_data_size=%d\n", __func__, self->max_data_size); memcpy(&self->qos_tx, qos, sizeof(struct qos_info)); skb_queue_tail(&sk->sk_receive_queue, skb); sk->sk_state_change(sk); }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,544
static void irda_connect_response(struct irda_sock *self) { struct sk_buff *skb; IRDA_DEBUG(2, "%s()\n", __func__); skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, GFP_ATOMIC); if (skb == NULL) { IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", __func__); return; } /* Reserve space for MUX_CONTROL and LAP header */ skb_reserve(skb, IRDA_MAX_HEADER); irttp_connect_response(self->tsap, self->max_sdu_size_rx, skb); }
+Info
0
static void irda_connect_response(struct irda_sock *self) { struct sk_buff *skb; IRDA_DEBUG(2, "%s()\n", __func__); skb = alloc_skb(TTP_MAX_HEADER + TTP_SAR_HEADER, GFP_ATOMIC); if (skb == NULL) { IRDA_DEBUG(0, "%s() Unable to allocate sk_buff!\n", __func__); return; } /* Reserve space for MUX_CONTROL and LAP header */ skb_reserve(skb, IRDA_MAX_HEADER); irttp_connect_response(self->tsap, self->max_sdu_size_rx, skb); }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,545
static int irda_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct irda_sock *self; IRDA_DEBUG(2, "%s()\n", __func__); if (net != &init_net) return -EAFNOSUPPORT; /* Check for valid socket type */ switch (sock->type) { case SOCK_STREAM: /* For TTP connections with SAR disabled */ case SOCK_SEQPACKET: /* For TTP connections with SAR enabled */ case SOCK_DGRAM: /* For TTP Unitdata or LMP Ultra transfers */ break; default: return -ESOCKTNOSUPPORT; } /* Allocate networking socket */ sk = sk_alloc(net, PF_IRDA, GFP_ATOMIC, &irda_proto); if (sk == NULL) return -ENOMEM; self = irda_sk(sk); IRDA_DEBUG(2, "%s() : self is %p\n", __func__, self); init_waitqueue_head(&self->query_wait); switch (sock->type) { case SOCK_STREAM: sock->ops = &irda_stream_ops; self->max_sdu_size_rx = TTP_SAR_DISABLE; break; case SOCK_SEQPACKET: sock->ops = &irda_seqpacket_ops; self->max_sdu_size_rx = TTP_SAR_UNBOUND; break; case SOCK_DGRAM: switch (protocol) { #ifdef CONFIG_IRDA_ULTRA case IRDAPROTO_ULTRA: sock->ops = &irda_ultra_ops; /* Initialise now, because we may send on unbound * sockets. Jean II */ self->max_data_size = ULTRA_MAX_DATA - LMP_PID_HEADER; self->max_header_size = IRDA_MAX_HEADER + LMP_PID_HEADER; break; #endif /* CONFIG_IRDA_ULTRA */ case IRDAPROTO_UNITDATA: sock->ops = &irda_dgram_ops; /* We let Unitdata conn. be like seqpack conn. */ self->max_sdu_size_rx = TTP_SAR_UNBOUND; break; default: sk_free(sk); return -ESOCKTNOSUPPORT; } break; default: sk_free(sk); return -ESOCKTNOSUPPORT; } /* Initialise networking socket struct */ sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */ sk->sk_family = PF_IRDA; sk->sk_protocol = protocol; /* Register as a client with IrLMP */ self->ckey = irlmp_register_client(0, NULL, NULL, NULL); self->mask.word = 0xffff; self->rx_flow = self->tx_flow = FLOW_START; self->nslots = DISCOVERY_DEFAULT_SLOTS; self->daddr = DEV_ADDR_ANY; /* Until we get connected */ self->saddr = 0x0; /* so IrLMP assign us any link */ return 0; }
+Info
0
static int irda_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk; struct irda_sock *self; IRDA_DEBUG(2, "%s()\n", __func__); if (net != &init_net) return -EAFNOSUPPORT; /* Check for valid socket type */ switch (sock->type) { case SOCK_STREAM: /* For TTP connections with SAR disabled */ case SOCK_SEQPACKET: /* For TTP connections with SAR enabled */ case SOCK_DGRAM: /* For TTP Unitdata or LMP Ultra transfers */ break; default: return -ESOCKTNOSUPPORT; } /* Allocate networking socket */ sk = sk_alloc(net, PF_IRDA, GFP_ATOMIC, &irda_proto); if (sk == NULL) return -ENOMEM; self = irda_sk(sk); IRDA_DEBUG(2, "%s() : self is %p\n", __func__, self); init_waitqueue_head(&self->query_wait); switch (sock->type) { case SOCK_STREAM: sock->ops = &irda_stream_ops; self->max_sdu_size_rx = TTP_SAR_DISABLE; break; case SOCK_SEQPACKET: sock->ops = &irda_seqpacket_ops; self->max_sdu_size_rx = TTP_SAR_UNBOUND; break; case SOCK_DGRAM: switch (protocol) { #ifdef CONFIG_IRDA_ULTRA case IRDAPROTO_ULTRA: sock->ops = &irda_ultra_ops; /* Initialise now, because we may send on unbound * sockets. Jean II */ self->max_data_size = ULTRA_MAX_DATA - LMP_PID_HEADER; self->max_header_size = IRDA_MAX_HEADER + LMP_PID_HEADER; break; #endif /* CONFIG_IRDA_ULTRA */ case IRDAPROTO_UNITDATA: sock->ops = &irda_dgram_ops; /* We let Unitdata conn. be like seqpack conn. */ self->max_sdu_size_rx = TTP_SAR_UNBOUND; break; default: sk_free(sk); return -ESOCKTNOSUPPORT; } break; default: sk_free(sk); return -ESOCKTNOSUPPORT; } /* Initialise networking socket struct */ sock_init_data(sock, sk); /* Note : set sk->sk_refcnt to 1 */ sk->sk_family = PF_IRDA; sk->sk_protocol = protocol; /* Register as a client with IrLMP */ self->ckey = irlmp_register_client(0, NULL, NULL, NULL); self->mask.word = 0xffff; self->rx_flow = self->tx_flow = FLOW_START; self->nslots = DISCOVERY_DEFAULT_SLOTS; self->daddr = DEV_ADDR_ANY; /* Until we get connected */ self->saddr = 0x0; /* so IrLMP assign us any link */ return 0; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,546
static int irda_data_indication(void *instance, void *sap, struct sk_buff *skb) { struct irda_sock *self; struct sock *sk; int err; IRDA_DEBUG(3, "%s()\n", __func__); self = instance; sk = instance; err = sock_queue_rcv_skb(sk, skb); if (err) { IRDA_DEBUG(1, "%s(), error: no more mem!\n", __func__); self->rx_flow = FLOW_STOP; /* When we return error, TTP will need to requeue the skb */ return err; } return 0; }
+Info
0
static int irda_data_indication(void *instance, void *sap, struct sk_buff *skb) { struct irda_sock *self; struct sock *sk; int err; IRDA_DEBUG(3, "%s()\n", __func__); self = instance; sk = instance; err = sock_queue_rcv_skb(sk, skb); if (err) { IRDA_DEBUG(1, "%s(), error: no more mem!\n", __func__); self->rx_flow = FLOW_STOP; /* When we return error, TTP will need to requeue the skb */ return err; } return 0; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,547
static void irda_destroy_socket(struct irda_sock *self) { IRDA_DEBUG(2, "%s(%p)\n", __func__, self); /* Unregister with IrLMP */ irlmp_unregister_client(self->ckey); irlmp_unregister_service(self->skey); /* Unregister with LM-IAS */ if (self->ias_obj) { irias_delete_object(self->ias_obj); self->ias_obj = NULL; } if (self->iriap) { iriap_close(self->iriap); self->iriap = NULL; } if (self->tsap) { irttp_disconnect_request(self->tsap, NULL, P_NORMAL); irttp_close_tsap(self->tsap); self->tsap = NULL; } #ifdef CONFIG_IRDA_ULTRA if (self->lsap) { irlmp_close_lsap(self->lsap); self->lsap = NULL; } #endif /* CONFIG_IRDA_ULTRA */ }
+Info
0
static void irda_destroy_socket(struct irda_sock *self) { IRDA_DEBUG(2, "%s(%p)\n", __func__, self); /* Unregister with IrLMP */ irlmp_unregister_client(self->ckey); irlmp_unregister_service(self->skey); /* Unregister with LM-IAS */ if (self->ias_obj) { irias_delete_object(self->ias_obj); self->ias_obj = NULL; } if (self->iriap) { iriap_close(self->iriap); self->iriap = NULL; } if (self->tsap) { irttp_disconnect_request(self->tsap, NULL, P_NORMAL); irttp_close_tsap(self->tsap); self->tsap = NULL; } #ifdef CONFIG_IRDA_ULTRA if (self->lsap) { irlmp_close_lsap(self->lsap); self->lsap = NULL; } #endif /* CONFIG_IRDA_ULTRA */ }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,548
static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) { discinfo_t *discoveries; /* Copy of the discovery log */ int number; /* Number of nodes in the log */ int i; int err = -ENETUNREACH; __u32 daddr = DEV_ADDR_ANY; /* Address we found the service on */ __u8 dtsap_sel = 0x0; /* TSAP associated with it */ IRDA_DEBUG(2, "%s(), name=%s\n", __func__, name); /* Ask lmp for the current discovery log * Note : we have to use irlmp_get_discoveries(), as opposed * to play with the cachelog directly, because while we are * making our ias query, le log might change... */ discoveries = irlmp_get_discoveries(&number, self->mask.word, self->nslots); /* Check if the we got some results */ if (discoveries == NULL) return -ENETUNREACH; /* No nodes discovered */ /* * Now, check all discovered devices (if any), and connect * client only about the services that the client is * interested in... */ for(i = 0; i < number; i++) { /* Try the address in the log */ self->daddr = discoveries[i].daddr; self->saddr = 0x0; IRDA_DEBUG(1, "%s(), trying daddr = %08x\n", __func__, self->daddr); /* Query remote LM-IAS for this service */ err = irda_find_lsap_sel(self, name); switch (err) { case 0: /* We found the requested service */ if(daddr != DEV_ADDR_ANY) { IRDA_DEBUG(1, "%s(), discovered service ''%s'' in two different devices !!!\n", __func__, name); self->daddr = DEV_ADDR_ANY; kfree(discoveries); return -ENOTUNIQ; } /* First time we found that one, save it ! */ daddr = self->daddr; dtsap_sel = self->dtsap_sel; break; case -EADDRNOTAVAIL: /* Requested service simply doesn't exist on this node */ break; default: /* Something bad did happen :-( */ IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", __func__); self->daddr = DEV_ADDR_ANY; kfree(discoveries); return -EHOSTUNREACH; break; } } /* Cleanup our copy of the discovery log */ kfree(discoveries); /* Check out what we found */ if(daddr == DEV_ADDR_ANY) { IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any device !!!\n", __func__, name); self->daddr = DEV_ADDR_ANY; return -EADDRNOTAVAIL; } /* Revert back to discovered device & service */ self->daddr = daddr; self->saddr = 0x0; self->dtsap_sel = dtsap_sel; IRDA_DEBUG(1, "%s(), discovered requested service ''%s'' at address %08x\n", __func__, name, self->daddr); return 0; }
+Info
0
static int irda_discover_daddr_and_lsap_sel(struct irda_sock *self, char *name) { discinfo_t *discoveries; /* Copy of the discovery log */ int number; /* Number of nodes in the log */ int i; int err = -ENETUNREACH; __u32 daddr = DEV_ADDR_ANY; /* Address we found the service on */ __u8 dtsap_sel = 0x0; /* TSAP associated with it */ IRDA_DEBUG(2, "%s(), name=%s\n", __func__, name); /* Ask lmp for the current discovery log * Note : we have to use irlmp_get_discoveries(), as opposed * to play with the cachelog directly, because while we are * making our ias query, le log might change... */ discoveries = irlmp_get_discoveries(&number, self->mask.word, self->nslots); /* Check if the we got some results */ if (discoveries == NULL) return -ENETUNREACH; /* No nodes discovered */ /* * Now, check all discovered devices (if any), and connect * client only about the services that the client is * interested in... */ for(i = 0; i < number; i++) { /* Try the address in the log */ self->daddr = discoveries[i].daddr; self->saddr = 0x0; IRDA_DEBUG(1, "%s(), trying daddr = %08x\n", __func__, self->daddr); /* Query remote LM-IAS for this service */ err = irda_find_lsap_sel(self, name); switch (err) { case 0: /* We found the requested service */ if(daddr != DEV_ADDR_ANY) { IRDA_DEBUG(1, "%s(), discovered service ''%s'' in two different devices !!!\n", __func__, name); self->daddr = DEV_ADDR_ANY; kfree(discoveries); return -ENOTUNIQ; } /* First time we found that one, save it ! */ daddr = self->daddr; dtsap_sel = self->dtsap_sel; break; case -EADDRNOTAVAIL: /* Requested service simply doesn't exist on this node */ break; default: /* Something bad did happen :-( */ IRDA_DEBUG(0, "%s(), unexpected IAS query failure\n", __func__); self->daddr = DEV_ADDR_ANY; kfree(discoveries); return -EHOSTUNREACH; break; } } /* Cleanup our copy of the discovery log */ kfree(discoveries); /* Check out what we found */ if(daddr == DEV_ADDR_ANY) { IRDA_DEBUG(1, "%s(), cannot discover service ''%s'' in any device !!!\n", __func__, name); self->daddr = DEV_ADDR_ANY; return -EADDRNOTAVAIL; } /* Revert back to discovered device & service */ self->daddr = daddr; self->saddr = 0x0; self->dtsap_sel = dtsap_sel; IRDA_DEBUG(1, "%s(), discovered requested service ''%s'' at address %08x\n", __func__, name, self->daddr); return 0; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,549
static void irda_discovery_timeout(u_long priv) { struct irda_sock *self; IRDA_DEBUG(2, "%s()\n", __func__); self = (struct irda_sock *) priv; BUG_ON(self == NULL); /* Nothing for the caller */ self->cachelog = NULL; self->cachedaddr = 0; self->errno = -ETIME; /* Wake up process if its still waiting... */ wake_up_interruptible(&self->query_wait); }
+Info
0
static void irda_discovery_timeout(u_long priv) { struct irda_sock *self; IRDA_DEBUG(2, "%s()\n", __func__); self = (struct irda_sock *) priv; BUG_ON(self == NULL); /* Nothing for the caller */ self->cachelog = NULL; self->cachedaddr = 0; self->errno = -ETIME; /* Wake up process if its still waiting... */ wake_up_interruptible(&self->query_wait); }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,550
static int irda_extract_ias_value(struct irda_ias_set *ias_opt, struct ias_value *ias_value) { /* Look at the type */ switch (ias_value->type) { case IAS_INTEGER: /* Copy the integer */ ias_opt->attribute.irda_attrib_int = ias_value->t.integer; break; case IAS_OCT_SEQ: /* Set length */ ias_opt->attribute.irda_attrib_octet_seq.len = ias_value->len; /* Copy over */ memcpy(ias_opt->attribute.irda_attrib_octet_seq.octet_seq, ias_value->t.oct_seq, ias_value->len); break; case IAS_STRING: /* Set length */ ias_opt->attribute.irda_attrib_string.len = ias_value->len; ias_opt->attribute.irda_attrib_string.charset = ias_value->charset; /* Copy over */ memcpy(ias_opt->attribute.irda_attrib_string.string, ias_value->t.string, ias_value->len); /* NULL terminate the string (avoid troubles) */ ias_opt->attribute.irda_attrib_string.string[ias_value->len] = '\0'; break; case IAS_MISSING: default : return -EINVAL; } /* Copy type over */ ias_opt->irda_attrib_type = ias_value->type; return 0; }
+Info
0
static int irda_extract_ias_value(struct irda_ias_set *ias_opt, struct ias_value *ias_value) { /* Look at the type */ switch (ias_value->type) { case IAS_INTEGER: /* Copy the integer */ ias_opt->attribute.irda_attrib_int = ias_value->t.integer; break; case IAS_OCT_SEQ: /* Set length */ ias_opt->attribute.irda_attrib_octet_seq.len = ias_value->len; /* Copy over */ memcpy(ias_opt->attribute.irda_attrib_octet_seq.octet_seq, ias_value->t.oct_seq, ias_value->len); break; case IAS_STRING: /* Set length */ ias_opt->attribute.irda_attrib_string.len = ias_value->len; ias_opt->attribute.irda_attrib_string.charset = ias_value->charset; /* Copy over */ memcpy(ias_opt->attribute.irda_attrib_string.string, ias_value->t.string, ias_value->len); /* NULL terminate the string (avoid troubles) */ ias_opt->attribute.irda_attrib_string.string[ias_value->len] = '\0'; break; case IAS_MISSING: default : return -EINVAL; } /* Copy type over */ ias_opt->irda_attrib_type = ias_value->type; return 0; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,551
static int irda_find_lsap_sel(struct irda_sock *self, char *name) { IRDA_DEBUG(2, "%s(%p, %s)\n", __func__, self, name); if (self->iriap) { IRDA_WARNING("%s(): busy with a previous query\n", __func__); return -EBUSY; } self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, irda_getvalue_confirm); if(self->iriap == NULL) return -ENOMEM; /* Treat unexpected wakeup as disconnect */ self->errno = -EHOSTUNREACH; /* Query remote LM-IAS */ iriap_getvaluebyclass_request(self->iriap, self->saddr, self->daddr, name, "IrDA:TinyTP:LsapSel"); /* Wait for answer, if not yet finished (or failed) */ if (wait_event_interruptible(self->query_wait, (self->iriap==NULL))) /* Treat signals as disconnect */ return -EHOSTUNREACH; /* Check what happened */ if (self->errno) { /* Requested object/attribute doesn't exist */ if((self->errno == IAS_CLASS_UNKNOWN) || (self->errno == IAS_ATTRIB_UNKNOWN)) return -EADDRNOTAVAIL; else return -EHOSTUNREACH; } /* Get the remote TSAP selector */ switch (self->ias_result->type) { case IAS_INTEGER: IRDA_DEBUG(4, "%s() int=%d\n", __func__, self->ias_result->t.integer); if (self->ias_result->t.integer != -1) self->dtsap_sel = self->ias_result->t.integer; else self->dtsap_sel = 0; break; default: self->dtsap_sel = 0; IRDA_DEBUG(0, "%s(), bad type!\n", __func__); break; } if (self->ias_result) irias_delete_value(self->ias_result); if (self->dtsap_sel) return 0; return -EADDRNOTAVAIL; }
+Info
0
static int irda_find_lsap_sel(struct irda_sock *self, char *name) { IRDA_DEBUG(2, "%s(%p, %s)\n", __func__, self, name); if (self->iriap) { IRDA_WARNING("%s(): busy with a previous query\n", __func__); return -EBUSY; } self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, irda_getvalue_confirm); if(self->iriap == NULL) return -ENOMEM; /* Treat unexpected wakeup as disconnect */ self->errno = -EHOSTUNREACH; /* Query remote LM-IAS */ iriap_getvaluebyclass_request(self->iriap, self->saddr, self->daddr, name, "IrDA:TinyTP:LsapSel"); /* Wait for answer, if not yet finished (or failed) */ if (wait_event_interruptible(self->query_wait, (self->iriap==NULL))) /* Treat signals as disconnect */ return -EHOSTUNREACH; /* Check what happened */ if (self->errno) { /* Requested object/attribute doesn't exist */ if((self->errno == IAS_CLASS_UNKNOWN) || (self->errno == IAS_ATTRIB_UNKNOWN)) return -EADDRNOTAVAIL; else return -EHOSTUNREACH; } /* Get the remote TSAP selector */ switch (self->ias_result->type) { case IAS_INTEGER: IRDA_DEBUG(4, "%s() int=%d\n", __func__, self->ias_result->t.integer); if (self->ias_result->t.integer != -1) self->dtsap_sel = self->ias_result->t.integer; else self->dtsap_sel = 0; break; default: self->dtsap_sel = 0; IRDA_DEBUG(0, "%s(), bad type!\n", __func__); break; } if (self->ias_result) irias_delete_value(self->ias_result); if (self->dtsap_sel) return 0; return -EADDRNOTAVAIL; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,552
static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) { struct irda_sock *self; struct sock *sk; IRDA_DEBUG(2, "%s()\n", __func__); self = instance; sk = instance; BUG_ON(sk == NULL); switch (flow) { case FLOW_STOP: IRDA_DEBUG(1, "%s(), IrTTP wants us to slow down\n", __func__); self->tx_flow = flow; break; case FLOW_START: self->tx_flow = flow; IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n", __func__); wake_up_interruptible(sk_sleep(sk)); break; default: IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__); /* Unknown flow command, better stop */ self->tx_flow = flow; break; } }
+Info
0
static void irda_flow_indication(void *instance, void *sap, LOCAL_FLOW flow) { struct irda_sock *self; struct sock *sk; IRDA_DEBUG(2, "%s()\n", __func__); self = instance; sk = instance; BUG_ON(sk == NULL); switch (flow) { case FLOW_STOP: IRDA_DEBUG(1, "%s(), IrTTP wants us to slow down\n", __func__); self->tx_flow = flow; break; case FLOW_START: self->tx_flow = flow; IRDA_DEBUG(1, "%s(), IrTTP wants us to start again\n", __func__); wake_up_interruptible(sk_sleep(sk)); break; default: IRDA_DEBUG(0, "%s(), Unknown flow command!\n", __func__); /* Unknown flow command, better stop */ self->tx_flow = flow; break; } }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,553
static int irda_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct sockaddr_irda saddr; struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); memset(&saddr, 0, sizeof(saddr)); if (peer) { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; saddr.sir_family = AF_IRDA; saddr.sir_lsap_sel = self->dtsap_sel; saddr.sir_addr = self->daddr; } else { saddr.sir_family = AF_IRDA; saddr.sir_lsap_sel = self->stsap_sel; saddr.sir_addr = self->saddr; } IRDA_DEBUG(1, "%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel); IRDA_DEBUG(1, "%s(), addr = %08x\n", __func__, saddr.sir_addr); /* uaddr_len come to us uninitialised */ *uaddr_len = sizeof (struct sockaddr_irda); memcpy(uaddr, &saddr, *uaddr_len); return 0; }
+Info
0
static int irda_getname(struct socket *sock, struct sockaddr *uaddr, int *uaddr_len, int peer) { struct sockaddr_irda saddr; struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); memset(&saddr, 0, sizeof(saddr)); if (peer) { if (sk->sk_state != TCP_ESTABLISHED) return -ENOTCONN; saddr.sir_family = AF_IRDA; saddr.sir_lsap_sel = self->dtsap_sel; saddr.sir_addr = self->daddr; } else { saddr.sir_family = AF_IRDA; saddr.sir_lsap_sel = self->stsap_sel; saddr.sir_addr = self->saddr; } IRDA_DEBUG(1, "%s(), tsap_sel = %#x\n", __func__, saddr.sir_lsap_sel); IRDA_DEBUG(1, "%s(), addr = %08x\n", __func__, saddr.sir_addr); /* uaddr_len come to us uninitialised */ *uaddr_len = sizeof (struct sockaddr_irda); memcpy(uaddr, &saddr, *uaddr_len); return 0; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,554
static int irda_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); struct irda_device_list list; struct irda_device_info *discoveries; struct irda_ias_set * ias_opt; /* IAS get/query params */ struct ias_object * ias_obj; /* Object in IAS */ struct ias_attrib * ias_attr; /* Attribute in IAS object */ int daddr = DEV_ADDR_ANY; /* Dest address for IAS queries */ int val = 0; int len = 0; int err = 0; int offset, total; IRDA_DEBUG(2, "%s(%p)\n", __func__, self); if (level != SOL_IRLMP) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if(len < 0) return -EINVAL; lock_sock(sk); switch (optname) { case IRLMP_ENUMDEVICES: /* Offset to first device entry */ offset = sizeof(struct irda_device_list) - sizeof(struct irda_device_info); if (len < offset) { err = -EINVAL; goto out; } /* Ask lmp for the current discovery log */ discoveries = irlmp_get_discoveries(&list.len, self->mask.word, self->nslots); /* Check if the we got some results */ if (discoveries == NULL) { err = -EAGAIN; goto out; /* Didn't find any devices */ } /* Write total list length back to client */ if (copy_to_user(optval, &list, offset)) err = -EFAULT; /* Copy the list itself - watch for overflow */ if (list.len > 2048) { err = -EINVAL; goto bed; } total = offset + (list.len * sizeof(struct irda_device_info)); if (total > len) total = len; if (copy_to_user(optval+offset, discoveries, total - offset)) err = -EFAULT; /* Write total number of bytes used back to client */ if (put_user(total, optlen)) err = -EFAULT; bed: /* Free up our buffer */ kfree(discoveries); break; case IRLMP_MAX_SDU_SIZE: val = self->max_data_size; len = sizeof(int); if (put_user(len, optlen)) { err = -EFAULT; goto out; } if (copy_to_user(optval, &val, len)) { err = -EFAULT; goto out; } break; case IRLMP_IAS_GET: /* The user want an object from our local IAS database. * We just need to query the IAS and return the value * that we found */ /* Check that the user has allocated the right space for us */ if (len != sizeof(struct irda_ias_set)) { err = -EINVAL; goto out; } ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); if (ias_opt == NULL) { err = -ENOMEM; goto out; } /* Copy query to the driver. */ if (copy_from_user(ias_opt, optval, len)) { kfree(ias_opt); err = -EFAULT; goto out; } /* Find the object we target. * If the user gives us an empty string, we use the object * associated with this socket. This will workaround * duplicated class name - Jean II */ if(ias_opt->irda_class_name[0] == '\0') ias_obj = self->ias_obj; else ias_obj = irias_find_object(ias_opt->irda_class_name); if(ias_obj == (struct ias_object *) NULL) { kfree(ias_opt); err = -EINVAL; goto out; } /* Find the attribute (in the object) we target */ ias_attr = irias_find_attrib(ias_obj, ias_opt->irda_attrib_name); if(ias_attr == (struct ias_attrib *) NULL) { kfree(ias_opt); err = -EINVAL; goto out; } /* Translate from internal to user structure */ err = irda_extract_ias_value(ias_opt, ias_attr->value); if(err) { kfree(ias_opt); goto out; } /* Copy reply to the user */ if (copy_to_user(optval, ias_opt, sizeof(struct irda_ias_set))) { kfree(ias_opt); err = -EFAULT; goto out; } /* Note : don't need to put optlen, we checked it */ kfree(ias_opt); break; case IRLMP_IAS_QUERY: /* The user want an object from a remote IAS database. * We need to use IAP to query the remote database and * then wait for the answer to come back. */ /* Check that the user has allocated the right space for us */ if (len != sizeof(struct irda_ias_set)) { err = -EINVAL; goto out; } ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); if (ias_opt == NULL) { err = -ENOMEM; goto out; } /* Copy query to the driver. */ if (copy_from_user(ias_opt, optval, len)) { kfree(ias_opt); err = -EFAULT; goto out; } /* At this point, there are two cases... * 1) the socket is connected - that's the easy case, we * just query the device we are connected to... * 2) the socket is not connected - the user doesn't want * to connect and/or may not have a valid service name * (so can't create a fake connection). In this case, * we assume that the user pass us a valid destination * address in the requesting structure... */ if(self->daddr != DEV_ADDR_ANY) { /* We are connected - reuse known daddr */ daddr = self->daddr; } else { /* We are not connected, we must specify a valid * destination address */ daddr = ias_opt->daddr; if((!daddr) || (daddr == DEV_ADDR_ANY)) { kfree(ias_opt); err = -EINVAL; goto out; } } /* Check that we can proceed with IAP */ if (self->iriap) { IRDA_WARNING("%s: busy with a previous query\n", __func__); kfree(ias_opt); err = -EBUSY; goto out; } self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, irda_getvalue_confirm); if (self->iriap == NULL) { kfree(ias_opt); err = -ENOMEM; goto out; } /* Treat unexpected wakeup as disconnect */ self->errno = -EHOSTUNREACH; /* Query remote LM-IAS */ iriap_getvaluebyclass_request(self->iriap, self->saddr, daddr, ias_opt->irda_class_name, ias_opt->irda_attrib_name); /* Wait for answer, if not yet finished (or failed) */ if (wait_event_interruptible(self->query_wait, (self->iriap == NULL))) { /* pending request uses copy of ias_opt-content * we can free it regardless! */ kfree(ias_opt); /* Treat signals as disconnect */ err = -EHOSTUNREACH; goto out; } /* Check what happened */ if (self->errno) { kfree(ias_opt); /* Requested object/attribute doesn't exist */ if((self->errno == IAS_CLASS_UNKNOWN) || (self->errno == IAS_ATTRIB_UNKNOWN)) err = -EADDRNOTAVAIL; else err = -EHOSTUNREACH; goto out; } /* Translate from internal to user structure */ err = irda_extract_ias_value(ias_opt, self->ias_result); if (self->ias_result) irias_delete_value(self->ias_result); if (err) { kfree(ias_opt); goto out; } /* Copy reply to the user */ if (copy_to_user(optval, ias_opt, sizeof(struct irda_ias_set))) { kfree(ias_opt); err = -EFAULT; goto out; } /* Note : don't need to put optlen, we checked it */ kfree(ias_opt); break; case IRLMP_WAITDEVICE: /* This function is just another way of seeing life ;-) * IRLMP_ENUMDEVICES assumes that you have a static network, * and that you just want to pick one of the devices present. * On the other hand, in here we assume that no device is * present and that at some point in the future a device will * come into range. When this device arrive, we just wake * up the caller, so that he has time to connect to it before * the device goes away... * Note : once the node has been discovered for more than a * few second, it won't trigger this function, unless it * goes away and come back changes its hint bits (so we * might call it IRLMP_WAITNEWDEVICE). */ /* Check that the user is passing us an int */ if (len != sizeof(int)) { err = -EINVAL; goto out; } /* Get timeout in ms (max time we block the caller) */ if (get_user(val, (int __user *)optval)) { err = -EFAULT; goto out; } /* Tell IrLMP we want to be notified */ irlmp_update_client(self->ckey, self->mask.word, irda_selective_discovery_indication, NULL, (void *) self); /* Do some discovery (and also return cached results) */ irlmp_discovery_request(self->nslots); /* Wait until a node is discovered */ if (!self->cachedaddr) { IRDA_DEBUG(1, "%s(), nothing discovered yet, going to sleep...\n", __func__); /* Set watchdog timer to expire in <val> ms. */ self->errno = 0; setup_timer(&self->watchdog, irda_discovery_timeout, (unsigned long)self); mod_timer(&self->watchdog, jiffies + msecs_to_jiffies(val)); /* Wait for IR-LMP to call us back */ __wait_event_interruptible(self->query_wait, (self->cachedaddr != 0 || self->errno == -ETIME), err); /* If watchdog is still activated, kill it! */ del_timer(&(self->watchdog)); IRDA_DEBUG(1, "%s(), ...waking up !\n", __func__); if (err != 0) goto out; } else IRDA_DEBUG(1, "%s(), found immediately !\n", __func__); /* Tell IrLMP that we have been notified */ irlmp_update_client(self->ckey, self->mask.word, NULL, NULL, NULL); /* Check if the we got some results */ if (!self->cachedaddr) { err = -EAGAIN; /* Didn't find any devices */ goto out; } daddr = self->cachedaddr; /* Cleanup */ self->cachedaddr = 0; /* We return the daddr of the device that trigger the * wakeup. As irlmp pass us only the new devices, we * are sure that it's not an old device. * If the user want more details, he should query * the whole discovery log and pick one device... */ if (put_user(daddr, (int __user *)optval)) { err = -EFAULT; goto out; } break; default: err = -ENOPROTOOPT; } out: release_sock(sk); return err; }
+Info
0
static int irda_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen) { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); struct irda_device_list list; struct irda_device_info *discoveries; struct irda_ias_set * ias_opt; /* IAS get/query params */ struct ias_object * ias_obj; /* Object in IAS */ struct ias_attrib * ias_attr; /* Attribute in IAS object */ int daddr = DEV_ADDR_ANY; /* Dest address for IAS queries */ int val = 0; int len = 0; int err = 0; int offset, total; IRDA_DEBUG(2, "%s(%p)\n", __func__, self); if (level != SOL_IRLMP) return -ENOPROTOOPT; if (get_user(len, optlen)) return -EFAULT; if(len < 0) return -EINVAL; lock_sock(sk); switch (optname) { case IRLMP_ENUMDEVICES: /* Offset to first device entry */ offset = sizeof(struct irda_device_list) - sizeof(struct irda_device_info); if (len < offset) { err = -EINVAL; goto out; } /* Ask lmp for the current discovery log */ discoveries = irlmp_get_discoveries(&list.len, self->mask.word, self->nslots); /* Check if the we got some results */ if (discoveries == NULL) { err = -EAGAIN; goto out; /* Didn't find any devices */ } /* Write total list length back to client */ if (copy_to_user(optval, &list, offset)) err = -EFAULT; /* Copy the list itself - watch for overflow */ if (list.len > 2048) { err = -EINVAL; goto bed; } total = offset + (list.len * sizeof(struct irda_device_info)); if (total > len) total = len; if (copy_to_user(optval+offset, discoveries, total - offset)) err = -EFAULT; /* Write total number of bytes used back to client */ if (put_user(total, optlen)) err = -EFAULT; bed: /* Free up our buffer */ kfree(discoveries); break; case IRLMP_MAX_SDU_SIZE: val = self->max_data_size; len = sizeof(int); if (put_user(len, optlen)) { err = -EFAULT; goto out; } if (copy_to_user(optval, &val, len)) { err = -EFAULT; goto out; } break; case IRLMP_IAS_GET: /* The user want an object from our local IAS database. * We just need to query the IAS and return the value * that we found */ /* Check that the user has allocated the right space for us */ if (len != sizeof(struct irda_ias_set)) { err = -EINVAL; goto out; } ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); if (ias_opt == NULL) { err = -ENOMEM; goto out; } /* Copy query to the driver. */ if (copy_from_user(ias_opt, optval, len)) { kfree(ias_opt); err = -EFAULT; goto out; } /* Find the object we target. * If the user gives us an empty string, we use the object * associated with this socket. This will workaround * duplicated class name - Jean II */ if(ias_opt->irda_class_name[0] == '\0') ias_obj = self->ias_obj; else ias_obj = irias_find_object(ias_opt->irda_class_name); if(ias_obj == (struct ias_object *) NULL) { kfree(ias_opt); err = -EINVAL; goto out; } /* Find the attribute (in the object) we target */ ias_attr = irias_find_attrib(ias_obj, ias_opt->irda_attrib_name); if(ias_attr == (struct ias_attrib *) NULL) { kfree(ias_opt); err = -EINVAL; goto out; } /* Translate from internal to user structure */ err = irda_extract_ias_value(ias_opt, ias_attr->value); if(err) { kfree(ias_opt); goto out; } /* Copy reply to the user */ if (copy_to_user(optval, ias_opt, sizeof(struct irda_ias_set))) { kfree(ias_opt); err = -EFAULT; goto out; } /* Note : don't need to put optlen, we checked it */ kfree(ias_opt); break; case IRLMP_IAS_QUERY: /* The user want an object from a remote IAS database. * We need to use IAP to query the remote database and * then wait for the answer to come back. */ /* Check that the user has allocated the right space for us */ if (len != sizeof(struct irda_ias_set)) { err = -EINVAL; goto out; } ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); if (ias_opt == NULL) { err = -ENOMEM; goto out; } /* Copy query to the driver. */ if (copy_from_user(ias_opt, optval, len)) { kfree(ias_opt); err = -EFAULT; goto out; } /* At this point, there are two cases... * 1) the socket is connected - that's the easy case, we * just query the device we are connected to... * 2) the socket is not connected - the user doesn't want * to connect and/or may not have a valid service name * (so can't create a fake connection). In this case, * we assume that the user pass us a valid destination * address in the requesting structure... */ if(self->daddr != DEV_ADDR_ANY) { /* We are connected - reuse known daddr */ daddr = self->daddr; } else { /* We are not connected, we must specify a valid * destination address */ daddr = ias_opt->daddr; if((!daddr) || (daddr == DEV_ADDR_ANY)) { kfree(ias_opt); err = -EINVAL; goto out; } } /* Check that we can proceed with IAP */ if (self->iriap) { IRDA_WARNING("%s: busy with a previous query\n", __func__); kfree(ias_opt); err = -EBUSY; goto out; } self->iriap = iriap_open(LSAP_ANY, IAS_CLIENT, self, irda_getvalue_confirm); if (self->iriap == NULL) { kfree(ias_opt); err = -ENOMEM; goto out; } /* Treat unexpected wakeup as disconnect */ self->errno = -EHOSTUNREACH; /* Query remote LM-IAS */ iriap_getvaluebyclass_request(self->iriap, self->saddr, daddr, ias_opt->irda_class_name, ias_opt->irda_attrib_name); /* Wait for answer, if not yet finished (or failed) */ if (wait_event_interruptible(self->query_wait, (self->iriap == NULL))) { /* pending request uses copy of ias_opt-content * we can free it regardless! */ kfree(ias_opt); /* Treat signals as disconnect */ err = -EHOSTUNREACH; goto out; } /* Check what happened */ if (self->errno) { kfree(ias_opt); /* Requested object/attribute doesn't exist */ if((self->errno == IAS_CLASS_UNKNOWN) || (self->errno == IAS_ATTRIB_UNKNOWN)) err = -EADDRNOTAVAIL; else err = -EHOSTUNREACH; goto out; } /* Translate from internal to user structure */ err = irda_extract_ias_value(ias_opt, self->ias_result); if (self->ias_result) irias_delete_value(self->ias_result); if (err) { kfree(ias_opt); goto out; } /* Copy reply to the user */ if (copy_to_user(optval, ias_opt, sizeof(struct irda_ias_set))) { kfree(ias_opt); err = -EFAULT; goto out; } /* Note : don't need to put optlen, we checked it */ kfree(ias_opt); break; case IRLMP_WAITDEVICE: /* This function is just another way of seeing life ;-) * IRLMP_ENUMDEVICES assumes that you have a static network, * and that you just want to pick one of the devices present. * On the other hand, in here we assume that no device is * present and that at some point in the future a device will * come into range. When this device arrive, we just wake * up the caller, so that he has time to connect to it before * the device goes away... * Note : once the node has been discovered for more than a * few second, it won't trigger this function, unless it * goes away and come back changes its hint bits (so we * might call it IRLMP_WAITNEWDEVICE). */ /* Check that the user is passing us an int */ if (len != sizeof(int)) { err = -EINVAL; goto out; } /* Get timeout in ms (max time we block the caller) */ if (get_user(val, (int __user *)optval)) { err = -EFAULT; goto out; } /* Tell IrLMP we want to be notified */ irlmp_update_client(self->ckey, self->mask.word, irda_selective_discovery_indication, NULL, (void *) self); /* Do some discovery (and also return cached results) */ irlmp_discovery_request(self->nslots); /* Wait until a node is discovered */ if (!self->cachedaddr) { IRDA_DEBUG(1, "%s(), nothing discovered yet, going to sleep...\n", __func__); /* Set watchdog timer to expire in <val> ms. */ self->errno = 0; setup_timer(&self->watchdog, irda_discovery_timeout, (unsigned long)self); mod_timer(&self->watchdog, jiffies + msecs_to_jiffies(val)); /* Wait for IR-LMP to call us back */ __wait_event_interruptible(self->query_wait, (self->cachedaddr != 0 || self->errno == -ETIME), err); /* If watchdog is still activated, kill it! */ del_timer(&(self->watchdog)); IRDA_DEBUG(1, "%s(), ...waking up !\n", __func__); if (err != 0) goto out; } else IRDA_DEBUG(1, "%s(), found immediately !\n", __func__); /* Tell IrLMP that we have been notified */ irlmp_update_client(self->ckey, self->mask.word, NULL, NULL, NULL); /* Check if the we got some results */ if (!self->cachedaddr) { err = -EAGAIN; /* Didn't find any devices */ goto out; } daddr = self->cachedaddr; /* Cleanup */ self->cachedaddr = 0; /* We return the daddr of the device that trigger the * wakeup. As irlmp pass us only the new devices, we * are sure that it's not an old device. * If the user want more details, he should query * the whole discovery log and pick one device... */ if (put_user(daddr, (int __user *)optval)) { err = -EFAULT; goto out; } break; default: err = -ENOPROTOOPT; } out: release_sock(sk); return err; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,555
static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; int err; IRDA_DEBUG(4, "%s(), cmd=%#x\n", __func__, cmd); err = -EINVAL; switch (cmd) { case TIOCOUTQ: { long amount; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; err = put_user(amount, (unsigned int __user *)arg); break; } case TIOCINQ: { struct sk_buff *skb; long amount = 0L; /* These two are safe on a single CPU system as only user tasks fiddle here */ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; err = put_user(amount, (unsigned int __user *)arg); break; } case SIOCGSTAMP: if (sk != NULL) err = sock_get_timestamp(sk, (struct timeval __user *)arg); break; case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: break; default: IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __func__); err = -ENOIOCTLCMD; } return err; }
+Info
0
static int irda_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg) { struct sock *sk = sock->sk; int err; IRDA_DEBUG(4, "%s(), cmd=%#x\n", __func__, cmd); err = -EINVAL; switch (cmd) { case TIOCOUTQ: { long amount; amount = sk->sk_sndbuf - sk_wmem_alloc_get(sk); if (amount < 0) amount = 0; err = put_user(amount, (unsigned int __user *)arg); break; } case TIOCINQ: { struct sk_buff *skb; long amount = 0L; /* These two are safe on a single CPU system as only user tasks fiddle here */ if ((skb = skb_peek(&sk->sk_receive_queue)) != NULL) amount = skb->len; err = put_user(amount, (unsigned int __user *)arg); break; } case SIOCGSTAMP: if (sk != NULL) err = sock_get_timestamp(sk, (struct timeval __user *)arg); break; case SIOCGIFADDR: case SIOCSIFADDR: case SIOCGIFDSTADDR: case SIOCSIFDSTADDR: case SIOCGIFBRDADDR: case SIOCSIFBRDADDR: case SIOCGIFNETMASK: case SIOCSIFNETMASK: case SIOCGIFMETRIC: case SIOCSIFMETRIC: break; default: IRDA_DEBUG(1, "%s(), doing device ioctl!\n", __func__); err = -ENOIOCTLCMD; } return err; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,556
static int irda_open_lsap(struct irda_sock *self, int pid) { notify_t notify; if (self->lsap) { IRDA_WARNING("%s(), busy!\n", __func__); return -EBUSY; } /* Initialize callbacks to be used by the IrDA stack */ irda_notify_init(&notify); notify.udata_indication = irda_data_indication; notify.instance = self; strncpy(notify.name, "Ultra", NOTIFY_MAX_NAME); self->lsap = irlmp_open_lsap(LSAP_CONNLESS, &notify, pid); if (self->lsap == NULL) { IRDA_DEBUG( 0, "%s(), Unable to allocate LSAP!\n", __func__); return -ENOMEM; } return 0; }
+Info
0
static int irda_open_lsap(struct irda_sock *self, int pid) { notify_t notify; if (self->lsap) { IRDA_WARNING("%s(), busy!\n", __func__); return -EBUSY; } /* Initialize callbacks to be used by the IrDA stack */ irda_notify_init(&notify); notify.udata_indication = irda_data_indication; notify.instance = self; strncpy(notify.name, "Ultra", NOTIFY_MAX_NAME); self->lsap = irlmp_open_lsap(LSAP_CONNLESS, &notify, pid); if (self->lsap == NULL) { IRDA_DEBUG( 0, "%s(), Unable to allocate LSAP!\n", __func__); return -ENOMEM; } return 0; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,557
static int irda_release(struct socket *sock) { struct sock *sk = sock->sk; IRDA_DEBUG(2, "%s()\n", __func__); if (sk == NULL) return 0; lock_sock(sk); sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); /* Destroy IrDA socket */ irda_destroy_socket(irda_sk(sk)); sock_orphan(sk); sock->sk = NULL; release_sock(sk); /* Purge queues (see sock_init_data()) */ skb_queue_purge(&sk->sk_receive_queue); /* Destroy networking socket if we are the last reference on it, * i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */ sock_put(sk); /* Notes on socket locking and deallocation... - Jean II * In theory we should put pairs of sock_hold() / sock_put() to * prevent the socket to be destroyed whenever there is an * outstanding request or outstanding incoming packet or event. * * 1) This may include IAS request, both in connect and getsockopt. * Unfortunately, the situation is a bit more messy than it looks, * because we close iriap and kfree(self) above. * * 2) This may include selective discovery in getsockopt. * Same stuff as above, irlmp registration and self are gone. * * Probably 1 and 2 may not matter, because it's all triggered * by a process and the socket layer already prevent the * socket to go away while a process is holding it, through * sockfd_put() and fput()... * * 3) This may include deferred TSAP closure. In particular, * we may receive a late irda_disconnect_indication() * Fortunately, (tsap_cb *)->close_pend should protect us * from that. * * I did some testing on SMP, and it looks solid. And the socket * memory leak is now gone... - Jean II */ return 0; }
+Info
0
static int irda_release(struct socket *sock) { struct sock *sk = sock->sk; IRDA_DEBUG(2, "%s()\n", __func__); if (sk == NULL) return 0; lock_sock(sk); sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); /* Destroy IrDA socket */ irda_destroy_socket(irda_sk(sk)); sock_orphan(sk); sock->sk = NULL; release_sock(sk); /* Purge queues (see sock_init_data()) */ skb_queue_purge(&sk->sk_receive_queue); /* Destroy networking socket if we are the last reference on it, * i.e. if(sk->sk_refcnt == 0) -> sk_free(sk) */ sock_put(sk); /* Notes on socket locking and deallocation... - Jean II * In theory we should put pairs of sock_hold() / sock_put() to * prevent the socket to be destroyed whenever there is an * outstanding request or outstanding incoming packet or event. * * 1) This may include IAS request, both in connect and getsockopt. * Unfortunately, the situation is a bit more messy than it looks, * because we close iriap and kfree(self) above. * * 2) This may include selective discovery in getsockopt. * Same stuff as above, irlmp registration and self are gone. * * Probably 1 and 2 may not matter, because it's all triggered * by a process and the socket layer already prevent the * socket to go away while a process is holding it, through * sockfd_put() and fput()... * * 3) This may include deferred TSAP closure. In particular, * we may receive a late irda_disconnect_indication() * Fortunately, (tsap_cb *)->close_pend should protect us * from that. * * I did some testing on SMP, and it looks solid. And the socket * memory leak is now gone... - Jean II */ return 0; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,558
static void irda_selective_discovery_indication(discinfo_t *discovery, DISCOVERY_MODE mode, void *priv) { struct irda_sock *self; IRDA_DEBUG(2, "%s()\n", __func__); self = priv; if (!self) { IRDA_WARNING("%s: lost myself!\n", __func__); return; } /* Pass parameter to the caller */ self->cachedaddr = discovery->daddr; /* Wake up process if its waiting for device to be discovered */ wake_up_interruptible(&self->query_wait); }
+Info
0
static void irda_selective_discovery_indication(discinfo_t *discovery, DISCOVERY_MODE mode, void *priv) { struct irda_sock *self; IRDA_DEBUG(2, "%s()\n", __func__); self = priv; if (!self) { IRDA_WARNING("%s: lost myself!\n", __func__); return; } /* Pass parameter to the caller */ self->cachedaddr = discovery->daddr; /* Wake up process if its waiting for device to be discovered */ wake_up_interruptible(&self->query_wait); }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,559
static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct irda_sock *self; struct sk_buff *skb; int err = -EPIPE; IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | MSG_NOSIGNAL)) { return -EINVAL; } lock_sock(sk); if (sk->sk_shutdown & SEND_SHUTDOWN) goto out_err; if (sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } self = irda_sk(sk); /* Check if IrTTP is wants us to slow down */ if (wait_event_interruptible(*(sk_sleep(sk)), (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) { err = -ERESTARTSYS; goto out; } /* Check if we are still connected */ if (sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } /* Check that we don't send out too big frames */ if (len > self->max_data_size) { IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n", __func__, len, self->max_data_size); len = self->max_data_size; } skb = sock_alloc_send_skb(sk, len + self->max_header_size + 16, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) goto out_err; skb_reserve(skb, self->max_header_size + 16); skb_reset_transport_header(skb); skb_put(skb, len); err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (err) { kfree_skb(skb); goto out_err; } /* * Just send the message to TinyTP, and let it deal with possible * errors. No need to duplicate all that here */ err = irttp_data_request(self->tsap, skb); if (err) { IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); goto out_err; } release_sock(sk); /* Tell client how much data we actually sent */ return len; out_err: err = sk_stream_error(sk, msg->msg_flags, err); out: release_sock(sk); return err; }
+Info
0
static int irda_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct irda_sock *self; struct sk_buff *skb; int err = -EPIPE; IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); /* Note : socket.c set MSG_EOR on SEQPACKET sockets */ if (msg->msg_flags & ~(MSG_DONTWAIT | MSG_EOR | MSG_CMSG_COMPAT | MSG_NOSIGNAL)) { return -EINVAL; } lock_sock(sk); if (sk->sk_shutdown & SEND_SHUTDOWN) goto out_err; if (sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } self = irda_sk(sk); /* Check if IrTTP is wants us to slow down */ if (wait_event_interruptible(*(sk_sleep(sk)), (self->tx_flow != FLOW_STOP || sk->sk_state != TCP_ESTABLISHED))) { err = -ERESTARTSYS; goto out; } /* Check if we are still connected */ if (sk->sk_state != TCP_ESTABLISHED) { err = -ENOTCONN; goto out; } /* Check that we don't send out too big frames */ if (len > self->max_data_size) { IRDA_DEBUG(2, "%s(), Chopping frame from %zd to %d bytes!\n", __func__, len, self->max_data_size); len = self->max_data_size; } skb = sock_alloc_send_skb(sk, len + self->max_header_size + 16, msg->msg_flags & MSG_DONTWAIT, &err); if (!skb) goto out_err; skb_reserve(skb, self->max_header_size + 16); skb_reset_transport_header(skb); skb_put(skb, len); err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (err) { kfree_skb(skb); goto out_err; } /* * Just send the message to TinyTP, and let it deal with possible * errors. No need to duplicate all that here */ err = irttp_data_request(self->tsap, skb); if (err) { IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); goto out_err; } release_sock(sk); /* Tell client how much data we actually sent */ return len; out_err: err = sk_stream_error(sk, msg->msg_flags, err); out: release_sock(sk); return err; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,560
static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct irda_sock *self; struct sk_buff *skb; int err; IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) return -EINVAL; lock_sock(sk); if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); err = -EPIPE; goto out; } err = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out; self = irda_sk(sk); /* * Check that we don't send out too big frames. This is an unreliable * service, so we have no fragmentation and no coalescence */ if (len > self->max_data_size) { IRDA_DEBUG(0, "%s(), Warning to much data! " "Chopping frame from %zd to %d bytes!\n", __func__, len, self->max_data_size); len = self->max_data_size; } skb = sock_alloc_send_skb(sk, len + self->max_header_size, msg->msg_flags & MSG_DONTWAIT, &err); err = -ENOBUFS; if (!skb) goto out; skb_reserve(skb, self->max_header_size); skb_reset_transport_header(skb); IRDA_DEBUG(4, "%s(), appending user data\n", __func__); skb_put(skb, len); err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (err) { kfree_skb(skb); goto out; } /* * Just send the message to TinyTP, and let it deal with possible * errors. No need to duplicate all that here */ err = irttp_udata_request(self->tsap, skb); if (err) { IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); goto out; } release_sock(sk); return len; out: release_sock(sk); return err; }
+Info
0
static int irda_sendmsg_dgram(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct irda_sock *self; struct sk_buff *skb; int err; IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) return -EINVAL; lock_sock(sk); if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); err = -EPIPE; goto out; } err = -ENOTCONN; if (sk->sk_state != TCP_ESTABLISHED) goto out; self = irda_sk(sk); /* * Check that we don't send out too big frames. This is an unreliable * service, so we have no fragmentation and no coalescence */ if (len > self->max_data_size) { IRDA_DEBUG(0, "%s(), Warning to much data! " "Chopping frame from %zd to %d bytes!\n", __func__, len, self->max_data_size); len = self->max_data_size; } skb = sock_alloc_send_skb(sk, len + self->max_header_size, msg->msg_flags & MSG_DONTWAIT, &err); err = -ENOBUFS; if (!skb) goto out; skb_reserve(skb, self->max_header_size); skb_reset_transport_header(skb); IRDA_DEBUG(4, "%s(), appending user data\n", __func__); skb_put(skb, len); err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (err) { kfree_skb(skb); goto out; } /* * Just send the message to TinyTP, and let it deal with possible * errors. No need to duplicate all that here */ err = irttp_udata_request(self->tsap, skb); if (err) { IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); goto out; } release_sock(sk); return len; out: release_sock(sk); return err; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,561
static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct irda_sock *self; __u8 pid = 0; int bound = 0; struct sk_buff *skb; int err; IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); err = -EINVAL; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) return -EINVAL; lock_sock(sk); err = -EPIPE; if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); goto out; } self = irda_sk(sk); /* Check if an address was specified with sendto. Jean II */ if (msg->msg_name) { struct sockaddr_irda *addr = (struct sockaddr_irda *) msg->msg_name; err = -EINVAL; /* Check address, extract pid. Jean II */ if (msg->msg_namelen < sizeof(*addr)) goto out; if (addr->sir_family != AF_IRDA) goto out; pid = addr->sir_lsap_sel; if (pid & 0x80) { IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); err = -EOPNOTSUPP; goto out; } } else { /* Check that the socket is properly bound to an Ultra * port. Jean II */ if ((self->lsap == NULL) || (sk->sk_state != TCP_ESTABLISHED)) { IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n", __func__); err = -ENOTCONN; goto out; } /* Use PID from socket */ bound = 1; } /* * Check that we don't send out too big frames. This is an unreliable * service, so we have no fragmentation and no coalescence */ if (len > self->max_data_size) { IRDA_DEBUG(0, "%s(), Warning to much data! " "Chopping frame from %zd to %d bytes!\n", __func__, len, self->max_data_size); len = self->max_data_size; } skb = sock_alloc_send_skb(sk, len + self->max_header_size, msg->msg_flags & MSG_DONTWAIT, &err); err = -ENOBUFS; if (!skb) goto out; skb_reserve(skb, self->max_header_size); skb_reset_transport_header(skb); IRDA_DEBUG(4, "%s(), appending user data\n", __func__); skb_put(skb, len); err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (err) { kfree_skb(skb); goto out; } err = irlmp_connless_data_request((bound ? self->lsap : NULL), skb, pid); if (err) IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); out: release_sock(sk); return err ? : len; }
+Info
0
static int irda_sendmsg_ultra(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct irda_sock *self; __u8 pid = 0; int bound = 0; struct sk_buff *skb; int err; IRDA_DEBUG(4, "%s(), len=%zd\n", __func__, len); err = -EINVAL; if (msg->msg_flags & ~(MSG_DONTWAIT|MSG_CMSG_COMPAT)) return -EINVAL; lock_sock(sk); err = -EPIPE; if (sk->sk_shutdown & SEND_SHUTDOWN) { send_sig(SIGPIPE, current, 0); goto out; } self = irda_sk(sk); /* Check if an address was specified with sendto. Jean II */ if (msg->msg_name) { struct sockaddr_irda *addr = (struct sockaddr_irda *) msg->msg_name; err = -EINVAL; /* Check address, extract pid. Jean II */ if (msg->msg_namelen < sizeof(*addr)) goto out; if (addr->sir_family != AF_IRDA) goto out; pid = addr->sir_lsap_sel; if (pid & 0x80) { IRDA_DEBUG(0, "%s(), extension in PID not supp!\n", __func__); err = -EOPNOTSUPP; goto out; } } else { /* Check that the socket is properly bound to an Ultra * port. Jean II */ if ((self->lsap == NULL) || (sk->sk_state != TCP_ESTABLISHED)) { IRDA_DEBUG(0, "%s(), socket not bound to Ultra PID.\n", __func__); err = -ENOTCONN; goto out; } /* Use PID from socket */ bound = 1; } /* * Check that we don't send out too big frames. This is an unreliable * service, so we have no fragmentation and no coalescence */ if (len > self->max_data_size) { IRDA_DEBUG(0, "%s(), Warning to much data! " "Chopping frame from %zd to %d bytes!\n", __func__, len, self->max_data_size); len = self->max_data_size; } skb = sock_alloc_send_skb(sk, len + self->max_header_size, msg->msg_flags & MSG_DONTWAIT, &err); err = -ENOBUFS; if (!skb) goto out; skb_reserve(skb, self->max_header_size); skb_reset_transport_header(skb); IRDA_DEBUG(4, "%s(), appending user data\n", __func__); skb_put(skb, len); err = memcpy_fromiovec(skb_transport_header(skb), msg->msg_iov, len); if (err) { kfree_skb(skb); goto out; } err = irlmp_connless_data_request((bound ? self->lsap : NULL), skb, pid); if (err) IRDA_DEBUG(0, "%s(), err=%d\n", __func__, err); out: release_sock(sk); return err ? : len; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,562
static int irda_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); struct irda_ias_set *ias_opt; struct ias_object *ias_obj; struct ias_attrib * ias_attr; /* Attribute in IAS object */ int opt, free_ias = 0, err = 0; IRDA_DEBUG(2, "%s(%p)\n", __func__, self); if (level != SOL_IRLMP) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case IRLMP_IAS_SET: /* The user want to add an attribute to an existing IAS object * (in the IAS database) or to create a new object with this * attribute. * We first query IAS to know if the object exist, and then * create the right attribute... */ if (optlen != sizeof(struct irda_ias_set)) { err = -EINVAL; goto out; } ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); if (ias_opt == NULL) { err = -ENOMEM; goto out; } /* Copy query to the driver. */ if (copy_from_user(ias_opt, optval, optlen)) { kfree(ias_opt); err = -EFAULT; goto out; } /* Find the object we target. * If the user gives us an empty string, we use the object * associated with this socket. This will workaround * duplicated class name - Jean II */ if(ias_opt->irda_class_name[0] == '\0') { if(self->ias_obj == NULL) { kfree(ias_opt); err = -EINVAL; goto out; } ias_obj = self->ias_obj; } else ias_obj = irias_find_object(ias_opt->irda_class_name); /* Only ROOT can mess with the global IAS database. * Users can only add attributes to the object associated * with the socket they own - Jean II */ if((!capable(CAP_NET_ADMIN)) && ((ias_obj == NULL) || (ias_obj != self->ias_obj))) { kfree(ias_opt); err = -EPERM; goto out; } /* If the object doesn't exist, create it */ if(ias_obj == (struct ias_object *) NULL) { /* Create a new object */ ias_obj = irias_new_object(ias_opt->irda_class_name, jiffies); if (ias_obj == NULL) { kfree(ias_opt); err = -ENOMEM; goto out; } free_ias = 1; } /* Do we have the attribute already ? */ if(irias_find_attrib(ias_obj, ias_opt->irda_attrib_name)) { kfree(ias_opt); if (free_ias) { kfree(ias_obj->name); kfree(ias_obj); } err = -EINVAL; goto out; } /* Look at the type */ switch(ias_opt->irda_attrib_type) { case IAS_INTEGER: /* Add an integer attribute */ irias_add_integer_attrib( ias_obj, ias_opt->irda_attrib_name, ias_opt->attribute.irda_attrib_int, IAS_USER_ATTR); break; case IAS_OCT_SEQ: /* Check length */ if(ias_opt->attribute.irda_attrib_octet_seq.len > IAS_MAX_OCTET_STRING) { kfree(ias_opt); if (free_ias) { kfree(ias_obj->name); kfree(ias_obj); } err = -EINVAL; goto out; } /* Add an octet sequence attribute */ irias_add_octseq_attrib( ias_obj, ias_opt->irda_attrib_name, ias_opt->attribute.irda_attrib_octet_seq.octet_seq, ias_opt->attribute.irda_attrib_octet_seq.len, IAS_USER_ATTR); break; case IAS_STRING: /* Should check charset & co */ /* Check length */ /* The length is encoded in a __u8, and * IAS_MAX_STRING == 256, so there is no way * userspace can pass us a string too large. * Jean II */ /* NULL terminate the string (avoid troubles) */ ias_opt->attribute.irda_attrib_string.string[ias_opt->attribute.irda_attrib_string.len] = '\0'; /* Add a string attribute */ irias_add_string_attrib( ias_obj, ias_opt->irda_attrib_name, ias_opt->attribute.irda_attrib_string.string, IAS_USER_ATTR); break; default : kfree(ias_opt); if (free_ias) { kfree(ias_obj->name); kfree(ias_obj); } err = -EINVAL; goto out; } irias_insert_object(ias_obj); kfree(ias_opt); break; case IRLMP_IAS_DEL: /* The user want to delete an object from our local IAS * database. We just need to query the IAS, check is the * object is not owned by the kernel and delete it. */ if (optlen != sizeof(struct irda_ias_set)) { err = -EINVAL; goto out; } ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); if (ias_opt == NULL) { err = -ENOMEM; goto out; } /* Copy query to the driver. */ if (copy_from_user(ias_opt, optval, optlen)) { kfree(ias_opt); err = -EFAULT; goto out; } /* Find the object we target. * If the user gives us an empty string, we use the object * associated with this socket. This will workaround * duplicated class name - Jean II */ if(ias_opt->irda_class_name[0] == '\0') ias_obj = self->ias_obj; else ias_obj = irias_find_object(ias_opt->irda_class_name); if(ias_obj == (struct ias_object *) NULL) { kfree(ias_opt); err = -EINVAL; goto out; } /* Only ROOT can mess with the global IAS database. * Users can only del attributes from the object associated * with the socket they own - Jean II */ if((!capable(CAP_NET_ADMIN)) && ((ias_obj == NULL) || (ias_obj != self->ias_obj))) { kfree(ias_opt); err = -EPERM; goto out; } /* Find the attribute (in the object) we target */ ias_attr = irias_find_attrib(ias_obj, ias_opt->irda_attrib_name); if(ias_attr == (struct ias_attrib *) NULL) { kfree(ias_opt); err = -EINVAL; goto out; } /* Check is the user space own the object */ if(ias_attr->value->owner != IAS_USER_ATTR) { IRDA_DEBUG(1, "%s(), attempting to delete a kernel attribute\n", __func__); kfree(ias_opt); err = -EPERM; goto out; } /* Remove the attribute (and maybe the object) */ irias_delete_attrib(ias_obj, ias_attr, 1); kfree(ias_opt); break; case IRLMP_MAX_SDU_SIZE: if (optlen < sizeof(int)) { err = -EINVAL; goto out; } if (get_user(opt, (int __user *)optval)) { err = -EFAULT; goto out; } /* Only possible for a seqpacket service (TTP with SAR) */ if (sk->sk_type != SOCK_SEQPACKET) { IRDA_DEBUG(2, "%s(), setting max_sdu_size = %d\n", __func__, opt); self->max_sdu_size_rx = opt; } else { IRDA_WARNING("%s: not allowed to set MAXSDUSIZE for this socket type!\n", __func__); err = -ENOPROTOOPT; goto out; } break; case IRLMP_HINTS_SET: if (optlen < sizeof(int)) { err = -EINVAL; goto out; } /* The input is really a (__u8 hints[2]), easier as an int */ if (get_user(opt, (int __user *)optval)) { err = -EFAULT; goto out; } /* Unregister any old registration */ if (self->skey) irlmp_unregister_service(self->skey); self->skey = irlmp_register_service((__u16) opt); break; case IRLMP_HINT_MASK_SET: /* As opposed to the previous case which set the hint bits * that we advertise, this one set the filter we use when * making a discovery (nodes which don't match any hint * bit in the mask are not reported). */ if (optlen < sizeof(int)) { err = -EINVAL; goto out; } /* The input is really a (__u8 hints[2]), easier as an int */ if (get_user(opt, (int __user *)optval)) { err = -EFAULT; goto out; } /* Set the new hint mask */ self->mask.word = (__u16) opt; /* Mask out extension bits */ self->mask.word &= 0x7f7f; /* Check if no bits */ if(!self->mask.word) self->mask.word = 0xFFFF; break; default: err = -ENOPROTOOPT; break; } out: release_sock(sk); return err; }
+Info
0
static int irda_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen) { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); struct irda_ias_set *ias_opt; struct ias_object *ias_obj; struct ias_attrib * ias_attr; /* Attribute in IAS object */ int opt, free_ias = 0, err = 0; IRDA_DEBUG(2, "%s(%p)\n", __func__, self); if (level != SOL_IRLMP) return -ENOPROTOOPT; lock_sock(sk); switch (optname) { case IRLMP_IAS_SET: /* The user want to add an attribute to an existing IAS object * (in the IAS database) or to create a new object with this * attribute. * We first query IAS to know if the object exist, and then * create the right attribute... */ if (optlen != sizeof(struct irda_ias_set)) { err = -EINVAL; goto out; } ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); if (ias_opt == NULL) { err = -ENOMEM; goto out; } /* Copy query to the driver. */ if (copy_from_user(ias_opt, optval, optlen)) { kfree(ias_opt); err = -EFAULT; goto out; } /* Find the object we target. * If the user gives us an empty string, we use the object * associated with this socket. This will workaround * duplicated class name - Jean II */ if(ias_opt->irda_class_name[0] == '\0') { if(self->ias_obj == NULL) { kfree(ias_opt); err = -EINVAL; goto out; } ias_obj = self->ias_obj; } else ias_obj = irias_find_object(ias_opt->irda_class_name); /* Only ROOT can mess with the global IAS database. * Users can only add attributes to the object associated * with the socket they own - Jean II */ if((!capable(CAP_NET_ADMIN)) && ((ias_obj == NULL) || (ias_obj != self->ias_obj))) { kfree(ias_opt); err = -EPERM; goto out; } /* If the object doesn't exist, create it */ if(ias_obj == (struct ias_object *) NULL) { /* Create a new object */ ias_obj = irias_new_object(ias_opt->irda_class_name, jiffies); if (ias_obj == NULL) { kfree(ias_opt); err = -ENOMEM; goto out; } free_ias = 1; } /* Do we have the attribute already ? */ if(irias_find_attrib(ias_obj, ias_opt->irda_attrib_name)) { kfree(ias_opt); if (free_ias) { kfree(ias_obj->name); kfree(ias_obj); } err = -EINVAL; goto out; } /* Look at the type */ switch(ias_opt->irda_attrib_type) { case IAS_INTEGER: /* Add an integer attribute */ irias_add_integer_attrib( ias_obj, ias_opt->irda_attrib_name, ias_opt->attribute.irda_attrib_int, IAS_USER_ATTR); break; case IAS_OCT_SEQ: /* Check length */ if(ias_opt->attribute.irda_attrib_octet_seq.len > IAS_MAX_OCTET_STRING) { kfree(ias_opt); if (free_ias) { kfree(ias_obj->name); kfree(ias_obj); } err = -EINVAL; goto out; } /* Add an octet sequence attribute */ irias_add_octseq_attrib( ias_obj, ias_opt->irda_attrib_name, ias_opt->attribute.irda_attrib_octet_seq.octet_seq, ias_opt->attribute.irda_attrib_octet_seq.len, IAS_USER_ATTR); break; case IAS_STRING: /* Should check charset & co */ /* Check length */ /* The length is encoded in a __u8, and * IAS_MAX_STRING == 256, so there is no way * userspace can pass us a string too large. * Jean II */ /* NULL terminate the string (avoid troubles) */ ias_opt->attribute.irda_attrib_string.string[ias_opt->attribute.irda_attrib_string.len] = '\0'; /* Add a string attribute */ irias_add_string_attrib( ias_obj, ias_opt->irda_attrib_name, ias_opt->attribute.irda_attrib_string.string, IAS_USER_ATTR); break; default : kfree(ias_opt); if (free_ias) { kfree(ias_obj->name); kfree(ias_obj); } err = -EINVAL; goto out; } irias_insert_object(ias_obj); kfree(ias_opt); break; case IRLMP_IAS_DEL: /* The user want to delete an object from our local IAS * database. We just need to query the IAS, check is the * object is not owned by the kernel and delete it. */ if (optlen != sizeof(struct irda_ias_set)) { err = -EINVAL; goto out; } ias_opt = kmalloc(sizeof(struct irda_ias_set), GFP_ATOMIC); if (ias_opt == NULL) { err = -ENOMEM; goto out; } /* Copy query to the driver. */ if (copy_from_user(ias_opt, optval, optlen)) { kfree(ias_opt); err = -EFAULT; goto out; } /* Find the object we target. * If the user gives us an empty string, we use the object * associated with this socket. This will workaround * duplicated class name - Jean II */ if(ias_opt->irda_class_name[0] == '\0') ias_obj = self->ias_obj; else ias_obj = irias_find_object(ias_opt->irda_class_name); if(ias_obj == (struct ias_object *) NULL) { kfree(ias_opt); err = -EINVAL; goto out; } /* Only ROOT can mess with the global IAS database. * Users can only del attributes from the object associated * with the socket they own - Jean II */ if((!capable(CAP_NET_ADMIN)) && ((ias_obj == NULL) || (ias_obj != self->ias_obj))) { kfree(ias_opt); err = -EPERM; goto out; } /* Find the attribute (in the object) we target */ ias_attr = irias_find_attrib(ias_obj, ias_opt->irda_attrib_name); if(ias_attr == (struct ias_attrib *) NULL) { kfree(ias_opt); err = -EINVAL; goto out; } /* Check is the user space own the object */ if(ias_attr->value->owner != IAS_USER_ATTR) { IRDA_DEBUG(1, "%s(), attempting to delete a kernel attribute\n", __func__); kfree(ias_opt); err = -EPERM; goto out; } /* Remove the attribute (and maybe the object) */ irias_delete_attrib(ias_obj, ias_attr, 1); kfree(ias_opt); break; case IRLMP_MAX_SDU_SIZE: if (optlen < sizeof(int)) { err = -EINVAL; goto out; } if (get_user(opt, (int __user *)optval)) { err = -EFAULT; goto out; } /* Only possible for a seqpacket service (TTP with SAR) */ if (sk->sk_type != SOCK_SEQPACKET) { IRDA_DEBUG(2, "%s(), setting max_sdu_size = %d\n", __func__, opt); self->max_sdu_size_rx = opt; } else { IRDA_WARNING("%s: not allowed to set MAXSDUSIZE for this socket type!\n", __func__); err = -ENOPROTOOPT; goto out; } break; case IRLMP_HINTS_SET: if (optlen < sizeof(int)) { err = -EINVAL; goto out; } /* The input is really a (__u8 hints[2]), easier as an int */ if (get_user(opt, (int __user *)optval)) { err = -EFAULT; goto out; } /* Unregister any old registration */ if (self->skey) irlmp_unregister_service(self->skey); self->skey = irlmp_register_service((__u16) opt); break; case IRLMP_HINT_MASK_SET: /* As opposed to the previous case which set the hint bits * that we advertise, this one set the filter we use when * making a discovery (nodes which don't match any hint * bit in the mask are not reported). */ if (optlen < sizeof(int)) { err = -EINVAL; goto out; } /* The input is really a (__u8 hints[2]), easier as an int */ if (get_user(opt, (int __user *)optval)) { err = -EFAULT; goto out; } /* Set the new hint mask */ self->mask.word = (__u16) opt; /* Mask out extension bits */ self->mask.word &= 0x7f7f; /* Check if no bits */ if(!self->mask.word) self->mask.word = 0xFFFF; break; default: err = -ENOPROTOOPT; break; } out: release_sock(sk); return err; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,563
static int irda_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); IRDA_DEBUG(1, "%s(%p)\n", __func__, self); lock_sock(sk); sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); if (self->iriap) { iriap_close(self->iriap); self->iriap = NULL; } if (self->tsap) { irttp_disconnect_request(self->tsap, NULL, P_NORMAL); irttp_close_tsap(self->tsap); self->tsap = NULL; } /* A few cleanup so the socket look as good as new... */ self->rx_flow = self->tx_flow = FLOW_START; /* needed ??? */ self->daddr = DEV_ADDR_ANY; /* Until we get re-connected */ self->saddr = 0x0; /* so IrLMP assign us any link */ release_sock(sk); return 0; }
+Info
0
static int irda_shutdown(struct socket *sock, int how) { struct sock *sk = sock->sk; struct irda_sock *self = irda_sk(sk); IRDA_DEBUG(1, "%s(%p)\n", __func__, self); lock_sock(sk); sk->sk_state = TCP_CLOSE; sk->sk_shutdown |= SEND_SHUTDOWN; sk->sk_state_change(sk); if (self->iriap) { iriap_close(self->iriap); self->iriap = NULL; } if (self->tsap) { irttp_disconnect_request(self->tsap, NULL, P_NORMAL); irttp_close_tsap(self->tsap); self->tsap = NULL; } /* A few cleanup so the socket look as good as new... */ self->rx_flow = self->tx_flow = FLOW_START; /* needed ??? */ self->daddr = DEV_ADDR_ANY; /* Until we get re-connected */ self->saddr = 0x0; /* so IrLMP assign us any link */ release_sock(sk); return 0; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,564
void irsock_cleanup(void) { sock_unregister(PF_IRDA); proto_unregister(&irda_proto); }
+Info
0
void irsock_cleanup(void) { sock_unregister(PF_IRDA); proto_unregister(&irda_proto); }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,565
int __init irsock_init(void) { int rc = proto_register(&irda_proto, 0); if (rc == 0) rc = sock_register(&irda_family_ops); return rc; }
+Info
0
int __init irsock_init(void) { int rc = proto_register(&irda_proto, 0); if (rc == 0) rc = sock_register(&irda_family_ops); return rc; }
@@ -1386,6 +1386,8 @@ static int irda_recvmsg_dgram(struct kiocb *iocb, struct socket *sock, IRDA_DEBUG(4, "%s()\n", __func__); + msg->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags & ~MSG_DONTWAIT, flags & MSG_DONTWAIT, &err); if (!skb)
CWE-200
null
null
24,566
static void caif_check_flow_release(struct sock *sk) { struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); if (rx_flow_is_on(cf_sk)) return; if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { set_rx_flow_on(cf_sk); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); } }
+Info
0
static void caif_check_flow_release(struct sock *sk) { struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); if (rx_flow_is_on(cf_sk)) return; if (atomic_read(&sk->sk_rmem_alloc) <= sk_rcvbuf_lowwater(cf_sk)) { set_rx_flow_on(cf_sk); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_ON_REQ); } }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,567
static int caif_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); long timeo; int err; int ifindex, headroom, tailroom; unsigned int mtu; struct net_device *dev; lock_sock(sk); err = -EAFNOSUPPORT; if (uaddr->sa_family != AF_CAIF) goto out; switch (sock->state) { case SS_UNCONNECTED: /* Normal case, a fresh connect */ caif_assert(sk->sk_state == CAIF_DISCONNECTED); break; case SS_CONNECTING: switch (sk->sk_state) { case CAIF_CONNECTED: sock->state = SS_CONNECTED; err = -EISCONN; goto out; case CAIF_DISCONNECTED: /* Reconnect allowed */ break; case CAIF_CONNECTING: err = -EALREADY; if (flags & O_NONBLOCK) goto out; goto wait_connect; } break; case SS_CONNECTED: caif_assert(sk->sk_state == CAIF_CONNECTED || sk->sk_state == CAIF_DISCONNECTED); if (sk->sk_shutdown & SHUTDOWN_MASK) { /* Allow re-connect after SHUTDOWN_IND */ caif_disconnect_client(sock_net(sk), &cf_sk->layer); caif_free_client(&cf_sk->layer); break; } /* No reconnect on a seqpacket socket */ err = -EISCONN; goto out; case SS_DISCONNECTING: case SS_FREE: caif_assert(1); /*Should never happen */ break; } sk->sk_state = CAIF_DISCONNECTED; sock->state = SS_UNCONNECTED; sk_stream_kill_queues(&cf_sk->sk); err = -EINVAL; if (addr_len != sizeof(struct sockaddr_caif)) goto out; memcpy(&cf_sk->conn_req.sockaddr, uaddr, sizeof(struct sockaddr_caif)); /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = CAIF_CONNECTING; /* Check priority value comming from socket */ /* if priority value is out of range it will be ajusted */ if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) cf_sk->conn_req.priority = CAIF_PRIO_MAX; else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN) cf_sk->conn_req.priority = CAIF_PRIO_MIN; else cf_sk->conn_req.priority = cf_sk->sk.sk_priority; /*ifindex = id of the interface.*/ cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; cf_sk->layer.receive = caif_sktrecv_cb; err = caif_connect_client(sock_net(sk), &cf_sk->conn_req, &cf_sk->layer, &ifindex, &headroom, &tailroom); if (err < 0) { cf_sk->sk.sk_socket->state = SS_UNCONNECTED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; goto out; } err = -ENODEV; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), ifindex); if (!dev) { rcu_read_unlock(); goto out; } cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom); mtu = dev->mtu; rcu_read_unlock(); cf_sk->tailroom = tailroom; cf_sk->maxframe = mtu - (headroom + tailroom); if (cf_sk->maxframe < 1) { pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu); err = -ENODEV; goto out; } err = -EINPROGRESS; wait_connect: if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK)) goto out; timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); release_sock(sk); err = -ERESTARTSYS; timeo = wait_event_interruptible_timeout(*sk_sleep(sk), sk->sk_state != CAIF_CONNECTING, timeo); lock_sock(sk); if (timeo < 0) goto out; /* -ERESTARTSYS */ err = -ETIMEDOUT; if (timeo == 0 && sk->sk_state != CAIF_CONNECTED) goto out; if (sk->sk_state != CAIF_CONNECTED) { sock->state = SS_UNCONNECTED; err = sock_error(sk); if (!err) err = -ECONNREFUSED; goto out; } sock->state = SS_CONNECTED; err = 0; out: release_sock(sk); return err; }
+Info
0
static int caif_connect(struct socket *sock, struct sockaddr *uaddr, int addr_len, int flags) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); long timeo; int err; int ifindex, headroom, tailroom; unsigned int mtu; struct net_device *dev; lock_sock(sk); err = -EAFNOSUPPORT; if (uaddr->sa_family != AF_CAIF) goto out; switch (sock->state) { case SS_UNCONNECTED: /* Normal case, a fresh connect */ caif_assert(sk->sk_state == CAIF_DISCONNECTED); break; case SS_CONNECTING: switch (sk->sk_state) { case CAIF_CONNECTED: sock->state = SS_CONNECTED; err = -EISCONN; goto out; case CAIF_DISCONNECTED: /* Reconnect allowed */ break; case CAIF_CONNECTING: err = -EALREADY; if (flags & O_NONBLOCK) goto out; goto wait_connect; } break; case SS_CONNECTED: caif_assert(sk->sk_state == CAIF_CONNECTED || sk->sk_state == CAIF_DISCONNECTED); if (sk->sk_shutdown & SHUTDOWN_MASK) { /* Allow re-connect after SHUTDOWN_IND */ caif_disconnect_client(sock_net(sk), &cf_sk->layer); caif_free_client(&cf_sk->layer); break; } /* No reconnect on a seqpacket socket */ err = -EISCONN; goto out; case SS_DISCONNECTING: case SS_FREE: caif_assert(1); /*Should never happen */ break; } sk->sk_state = CAIF_DISCONNECTED; sock->state = SS_UNCONNECTED; sk_stream_kill_queues(&cf_sk->sk); err = -EINVAL; if (addr_len != sizeof(struct sockaddr_caif)) goto out; memcpy(&cf_sk->conn_req.sockaddr, uaddr, sizeof(struct sockaddr_caif)); /* Move to connecting socket, start sending Connect Requests */ sock->state = SS_CONNECTING; sk->sk_state = CAIF_CONNECTING; /* Check priority value comming from socket */ /* if priority value is out of range it will be ajusted */ if (cf_sk->sk.sk_priority > CAIF_PRIO_MAX) cf_sk->conn_req.priority = CAIF_PRIO_MAX; else if (cf_sk->sk.sk_priority < CAIF_PRIO_MIN) cf_sk->conn_req.priority = CAIF_PRIO_MIN; else cf_sk->conn_req.priority = cf_sk->sk.sk_priority; /*ifindex = id of the interface.*/ cf_sk->conn_req.ifindex = cf_sk->sk.sk_bound_dev_if; cf_sk->layer.receive = caif_sktrecv_cb; err = caif_connect_client(sock_net(sk), &cf_sk->conn_req, &cf_sk->layer, &ifindex, &headroom, &tailroom); if (err < 0) { cf_sk->sk.sk_socket->state = SS_UNCONNECTED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; goto out; } err = -ENODEV; rcu_read_lock(); dev = dev_get_by_index_rcu(sock_net(sk), ifindex); if (!dev) { rcu_read_unlock(); goto out; } cf_sk->headroom = LL_RESERVED_SPACE_EXTRA(dev, headroom); mtu = dev->mtu; rcu_read_unlock(); cf_sk->tailroom = tailroom; cf_sk->maxframe = mtu - (headroom + tailroom); if (cf_sk->maxframe < 1) { pr_warn("CAIF Interface MTU too small (%d)\n", dev->mtu); err = -ENODEV; goto out; } err = -EINPROGRESS; wait_connect: if (sk->sk_state != CAIF_CONNECTED && (flags & O_NONBLOCK)) goto out; timeo = sock_sndtimeo(sk, flags & O_NONBLOCK); release_sock(sk); err = -ERESTARTSYS; timeo = wait_event_interruptible_timeout(*sk_sleep(sk), sk->sk_state != CAIF_CONNECTING, timeo); lock_sock(sk); if (timeo < 0) goto out; /* -ERESTARTSYS */ err = -ETIMEDOUT; if (timeo == 0 && sk->sk_state != CAIF_CONNECTED) goto out; if (sk->sk_state != CAIF_CONNECTED) { sock->state = SS_UNCONNECTED; err = sock_error(sk); if (!err) err = -ECONNREFUSED; goto out; } sock->state = SS_CONNECTED; err = 0; out: release_sock(sk); return err; }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,568
static int caif_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk = NULL; struct caifsock *cf_sk = NULL; static struct proto prot = {.name = "PF_CAIF", .owner = THIS_MODULE, .obj_size = sizeof(struct caifsock), }; if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN)) return -EPERM; /* * The sock->type specifies the socket type to use. * The CAIF socket is a packet stream in the sense * that it is packet based. CAIF trusts the reliability * of the link, no resending is implemented. */ if (sock->type == SOCK_SEQPACKET) sock->ops = &caif_seqpacket_ops; else if (sock->type == SOCK_STREAM) sock->ops = &caif_stream_ops; else return -ESOCKTNOSUPPORT; if (protocol < 0 || protocol >= CAIFPROTO_MAX) return -EPROTONOSUPPORT; /* * Set the socket state to unconnected. The socket state * is really not used at all in the net/core or socket.c but the * initialization makes sure that sock->state is not uninitialized. */ sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot); if (!sk) return -ENOMEM; cf_sk = container_of(sk, struct caifsock, sk); /* Store the protocol */ sk->sk_protocol = (unsigned char) protocol; /* Initialize default priority for well-known cases */ switch (protocol) { case CAIFPROTO_AT: sk->sk_priority = TC_PRIO_CONTROL; break; case CAIFPROTO_RFM: sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; break; default: sk->sk_priority = TC_PRIO_BESTEFFORT; } /* * Lock in order to try to stop someone from opening the socket * too early. */ lock_sock(&(cf_sk->sk)); /* Initialize the nozero default sock structure data. */ sock_init_data(sock, sk); sk->sk_destruct = caif_sock_destructor; mutex_init(&cf_sk->readlock); /* single task reading lock */ cf_sk->layer.ctrlcmd = caif_ctrl_cb; cf_sk->sk.sk_socket->state = SS_UNCONNECTED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; set_tx_flow_off(cf_sk); set_rx_flow_on(cf_sk); /* Set default options on configuration */ cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; cf_sk->conn_req.protocol = protocol; release_sock(&cf_sk->sk); return 0; }
+Info
0
static int caif_create(struct net *net, struct socket *sock, int protocol, int kern) { struct sock *sk = NULL; struct caifsock *cf_sk = NULL; static struct proto prot = {.name = "PF_CAIF", .owner = THIS_MODULE, .obj_size = sizeof(struct caifsock), }; if (!capable(CAP_SYS_ADMIN) && !capable(CAP_NET_ADMIN)) return -EPERM; /* * The sock->type specifies the socket type to use. * The CAIF socket is a packet stream in the sense * that it is packet based. CAIF trusts the reliability * of the link, no resending is implemented. */ if (sock->type == SOCK_SEQPACKET) sock->ops = &caif_seqpacket_ops; else if (sock->type == SOCK_STREAM) sock->ops = &caif_stream_ops; else return -ESOCKTNOSUPPORT; if (protocol < 0 || protocol >= CAIFPROTO_MAX) return -EPROTONOSUPPORT; /* * Set the socket state to unconnected. The socket state * is really not used at all in the net/core or socket.c but the * initialization makes sure that sock->state is not uninitialized. */ sk = sk_alloc(net, PF_CAIF, GFP_KERNEL, &prot); if (!sk) return -ENOMEM; cf_sk = container_of(sk, struct caifsock, sk); /* Store the protocol */ sk->sk_protocol = (unsigned char) protocol; /* Initialize default priority for well-known cases */ switch (protocol) { case CAIFPROTO_AT: sk->sk_priority = TC_PRIO_CONTROL; break; case CAIFPROTO_RFM: sk->sk_priority = TC_PRIO_INTERACTIVE_BULK; break; default: sk->sk_priority = TC_PRIO_BESTEFFORT; } /* * Lock in order to try to stop someone from opening the socket * too early. */ lock_sock(&(cf_sk->sk)); /* Initialize the nozero default sock structure data. */ sock_init_data(sock, sk); sk->sk_destruct = caif_sock_destructor; mutex_init(&cf_sk->readlock); /* single task reading lock */ cf_sk->layer.ctrlcmd = caif_ctrl_cb; cf_sk->sk.sk_socket->state = SS_UNCONNECTED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; set_tx_flow_off(cf_sk); set_rx_flow_on(cf_sk); /* Set default options on configuration */ cf_sk->conn_req.link_selector = CAIF_LINK_LOW_LATENCY; cf_sk->conn_req.protocol = protocol; release_sock(&cf_sk->sk); return 0; }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,569
static void caif_ctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, int phyid) { struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); switch (flow) { case CAIF_CTRLCMD_FLOW_ON_IND: /* OK from modem to start sending again */ set_tx_flow_on(cf_sk); cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_FLOW_OFF_IND: /* Modem asks us to shut up */ set_tx_flow_off(cf_sk); cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_INIT_RSP: /* We're now connected */ caif_client_register_refcnt(&cf_sk->layer, cfsk_hold, cfsk_put); cf_sk->sk.sk_state = CAIF_CONNECTED; set_tx_flow_on(cf_sk); cf_sk->sk.sk_shutdown = 0; cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_DEINIT_RSP: /* We're now disconnected */ cf_sk->sk.sk_state = CAIF_DISCONNECTED; cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_INIT_FAIL_RSP: /* Connect request failed */ cf_sk->sk.sk_err = ECONNREFUSED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; /* * Socket "standards" seems to require POLLOUT to * be set at connect failure. */ set_tx_flow_on(cf_sk); cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: /* Modem has closed this connection, or device is down. */ cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; cf_sk->sk.sk_err = ECONNRESET; set_rx_flow_on(cf_sk); cf_sk->sk.sk_error_report(&cf_sk->sk); break; default: pr_debug("Unexpected flow command %d\n", flow); } }
+Info
0
static void caif_ctrl_cb(struct cflayer *layr, enum caif_ctrlcmd flow, int phyid) { struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); switch (flow) { case CAIF_CTRLCMD_FLOW_ON_IND: /* OK from modem to start sending again */ set_tx_flow_on(cf_sk); cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_FLOW_OFF_IND: /* Modem asks us to shut up */ set_tx_flow_off(cf_sk); cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_INIT_RSP: /* We're now connected */ caif_client_register_refcnt(&cf_sk->layer, cfsk_hold, cfsk_put); cf_sk->sk.sk_state = CAIF_CONNECTED; set_tx_flow_on(cf_sk); cf_sk->sk.sk_shutdown = 0; cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_DEINIT_RSP: /* We're now disconnected */ cf_sk->sk.sk_state = CAIF_DISCONNECTED; cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_INIT_FAIL_RSP: /* Connect request failed */ cf_sk->sk.sk_err = ECONNREFUSED; cf_sk->sk.sk_state = CAIF_DISCONNECTED; cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; /* * Socket "standards" seems to require POLLOUT to * be set at connect failure. */ set_tx_flow_on(cf_sk); cf_sk->sk.sk_state_change(&cf_sk->sk); break; case CAIF_CTRLCMD_REMOTE_SHUTDOWN_IND: /* Modem has closed this connection, or device is down. */ cf_sk->sk.sk_shutdown = SHUTDOWN_MASK; cf_sk->sk.sk_err = ECONNRESET; set_rx_flow_on(cf_sk); cf_sk->sk.sk_error_report(&cf_sk->sk); break; default: pr_debug("Unexpected flow command %d\n", flow); } }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,570
static void caif_flow_ctrl(struct sock *sk, int mode) { struct caifsock *cf_sk; cf_sk = container_of(sk, struct caifsock, sk); if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); }
+Info
0
static void caif_flow_ctrl(struct sock *sk, int mode) { struct caifsock *cf_sk; cf_sk = container_of(sk, struct caifsock, sk); if (cf_sk->layer.dn && cf_sk->layer.dn->modemcmd) cf_sk->layer.dn->modemcmd(cf_sk->layer.dn, mode); }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,571
static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int err; int skb_len; unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { net_dbg_ratelimited("sending flow OFF (queue len = %d %d)\n", atomic_read(&cf_sk->sk.sk_rmem_alloc), sk_rcvbuf_lowwater(cf_sk)); set_rx_flow_off(cf_sk); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } err = sk_filter(sk, skb); if (err) return err; if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { set_rx_flow_off(cf_sk); net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } skb->dev = NULL; skb_set_owner_r(skb, sk); /* Cache the SKB length before we tack it onto the receive * queue. Once it is added it no longer belongs to us and * may be freed by other threads of control pulling packets * from the queue. */ skb_len = skb->len; spin_lock_irqsave(&list->lock, flags); if (!sock_flag(sk, SOCK_DEAD)) __skb_queue_tail(list, skb); spin_unlock_irqrestore(&list->lock, flags); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb_len); else kfree_skb(skb); return 0; }
+Info
0
static int caif_queue_rcv_skb(struct sock *sk, struct sk_buff *skb) { int err; int skb_len; unsigned long flags; struct sk_buff_head *list = &sk->sk_receive_queue; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); if (atomic_read(&sk->sk_rmem_alloc) + skb->truesize >= (unsigned int)sk->sk_rcvbuf && rx_flow_is_on(cf_sk)) { net_dbg_ratelimited("sending flow OFF (queue len = %d %d)\n", atomic_read(&cf_sk->sk.sk_rmem_alloc), sk_rcvbuf_lowwater(cf_sk)); set_rx_flow_off(cf_sk); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } err = sk_filter(sk, skb); if (err) return err; if (!sk_rmem_schedule(sk, skb, skb->truesize) && rx_flow_is_on(cf_sk)) { set_rx_flow_off(cf_sk); net_dbg_ratelimited("sending flow OFF due to rmem_schedule\n"); caif_flow_ctrl(sk, CAIF_MODEMCMD_FLOW_OFF_REQ); } skb->dev = NULL; skb_set_owner_r(skb, sk); /* Cache the SKB length before we tack it onto the receive * queue. Once it is added it no longer belongs to us and * may be freed by other threads of control pulling packets * from the queue. */ skb_len = skb->len; spin_lock_irqsave(&list->lock, flags); if (!sock_flag(sk, SOCK_DEAD)) __skb_queue_tail(list, skb); spin_unlock_irqrestore(&list->lock, flags); if (!sock_flag(sk, SOCK_DEAD)) sk->sk_data_ready(sk, skb_len); else kfree_skb(skb); return 0; }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,572
static void caif_read_lock(struct sock *sk) { struct caifsock *cf_sk; cf_sk = container_of(sk, struct caifsock, sk); mutex_lock(&cf_sk->readlock); }
+Info
0
static void caif_read_lock(struct sock *sk) { struct caifsock *cf_sk; cf_sk = container_of(sk, struct caifsock, sk); mutex_lock(&cf_sk->readlock); }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,573
static void caif_read_unlock(struct sock *sk) { struct caifsock *cf_sk; cf_sk = container_of(sk, struct caifsock, sk); mutex_unlock(&cf_sk->readlock); }
+Info
0
static void caif_read_unlock(struct sock *sk) { struct caifsock *cf_sk; cf_sk = container_of(sk, struct caifsock, sk); mutex_unlock(&cf_sk->readlock); }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,574
static int caif_release(struct socket *sock) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); if (!sk) return 0; set_tx_flow_off(cf_sk); /* * Ensure that packets are not queued after this point in time. * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock, * this ensures no packets when sock is dead. */ spin_lock_bh(&sk->sk_receive_queue.lock); sock_set_flag(sk, SOCK_DEAD); spin_unlock_bh(&sk->sk_receive_queue.lock); sock->sk = NULL; WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir)); if (cf_sk->debugfs_socket_dir != NULL) debugfs_remove_recursive(cf_sk->debugfs_socket_dir); lock_sock(&(cf_sk->sk)); sk->sk_state = CAIF_DISCONNECTED; sk->sk_shutdown = SHUTDOWN_MASK; caif_disconnect_client(sock_net(sk), &cf_sk->layer); cf_sk->sk.sk_socket->state = SS_DISCONNECTING; wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); sock_orphan(sk); sk_stream_kill_queues(&cf_sk->sk); release_sock(sk); sock_put(sk); return 0; }
+Info
0
static int caif_release(struct socket *sock) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); if (!sk) return 0; set_tx_flow_off(cf_sk); /* * Ensure that packets are not queued after this point in time. * caif_queue_rcv_skb checks SOCK_DEAD holding the queue lock, * this ensures no packets when sock is dead. */ spin_lock_bh(&sk->sk_receive_queue.lock); sock_set_flag(sk, SOCK_DEAD); spin_unlock_bh(&sk->sk_receive_queue.lock); sock->sk = NULL; WARN_ON(IS_ERR(cf_sk->debugfs_socket_dir)); if (cf_sk->debugfs_socket_dir != NULL) debugfs_remove_recursive(cf_sk->debugfs_socket_dir); lock_sock(&(cf_sk->sk)); sk->sk_state = CAIF_DISCONNECTED; sk->sk_shutdown = SHUTDOWN_MASK; caif_disconnect_client(sock_net(sk), &cf_sk->layer); cf_sk->sk.sk_socket->state = SS_DISCONNECTING; wake_up_interruptible_poll(sk_sleep(sk), POLLERR|POLLHUP); sock_orphan(sk); sk_stream_kill_queues(&cf_sk->sk); release_sock(sk); sock_put(sk); return 0; }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,575
static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); int buffer_size; int ret = 0; struct sk_buff *skb = NULL; int noblock; long timeo; caif_assert(cf_sk); ret = sock_error(sk); if (ret) goto err; ret = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto err; ret = -EOPNOTSUPP; if (msg->msg_namelen) goto err; ret = -EINVAL; if (unlikely(msg->msg_iov->iov_base == NULL)) goto err; noblock = msg->msg_flags & MSG_DONTWAIT; timeo = sock_sndtimeo(sk, noblock); timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk), 1, timeo, &ret); if (ret) goto err; ret = -EPIPE; if (cf_sk->sk.sk_state != CAIF_CONNECTED || sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN)) goto err; /* Error if trying to write more than maximum frame size. */ ret = -EMSGSIZE; if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM) goto err; buffer_size = len + cf_sk->headroom + cf_sk->tailroom; ret = -ENOMEM; skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret); if (!skb || skb_tailroom(skb) < buffer_size) goto err; skb_reserve(skb, cf_sk->headroom); ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (ret) goto err; ret = transmit_skb(skb, cf_sk, noblock, timeo); if (ret < 0) /* skb is already freed */ return ret; return len; err: kfree_skb(skb); return ret; }
+Info
0
static int caif_seqpkt_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); int buffer_size; int ret = 0; struct sk_buff *skb = NULL; int noblock; long timeo; caif_assert(cf_sk); ret = sock_error(sk); if (ret) goto err; ret = -EOPNOTSUPP; if (msg->msg_flags&MSG_OOB) goto err; ret = -EOPNOTSUPP; if (msg->msg_namelen) goto err; ret = -EINVAL; if (unlikely(msg->msg_iov->iov_base == NULL)) goto err; noblock = msg->msg_flags & MSG_DONTWAIT; timeo = sock_sndtimeo(sk, noblock); timeo = caif_wait_for_flow_on(container_of(sk, struct caifsock, sk), 1, timeo, &ret); if (ret) goto err; ret = -EPIPE; if (cf_sk->sk.sk_state != CAIF_CONNECTED || sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN)) goto err; /* Error if trying to write more than maximum frame size. */ ret = -EMSGSIZE; if (len > cf_sk->maxframe && cf_sk->sk.sk_protocol != CAIFPROTO_RFM) goto err; buffer_size = len + cf_sk->headroom + cf_sk->tailroom; ret = -ENOMEM; skb = sock_alloc_send_skb(sk, buffer_size, noblock, &ret); if (!skb || skb_tailroom(skb) < buffer_size) goto err; skb_reserve(skb, cf_sk->headroom); ret = memcpy_fromiovec(skb_put(skb, len), msg->msg_iov, len); if (ret) goto err; ret = transmit_skb(skb, cf_sk, noblock, timeo); if (ret < 0) /* skb is already freed */ return ret; return len; err: kfree_skb(skb); return ret; }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,576
static int __init caif_sktinit_module(void) { int err = sock_register(&caif_family_ops); if (!err) return err; return 0; }
+Info
0
static int __init caif_sktinit_module(void) { int err = sock_register(&caif_family_ops); if (!err) return err; return 0; }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,577
static void caif_sock_destructor(struct sock *sk) { struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); caif_assert(!atomic_read(&sk->sk_wmem_alloc)); caif_assert(sk_unhashed(sk)); caif_assert(!sk->sk_socket); if (!sock_flag(sk, SOCK_DEAD)) { pr_debug("Attempt to release alive CAIF socket: %p\n", sk); return; } sk_stream_kill_queues(&cf_sk->sk); caif_free_client(&cf_sk->layer); }
+Info
0
static void caif_sock_destructor(struct sock *sk) { struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); caif_assert(!atomic_read(&sk->sk_wmem_alloc)); caif_assert(sk_unhashed(sk)); caif_assert(!sk->sk_socket); if (!sock_flag(sk, SOCK_DEAD)) { pr_debug("Attempt to release alive CAIF socket: %p\n", sk); return; } sk_stream_kill_queues(&cf_sk->sk); caif_free_client(&cf_sk->layer); }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,578
static long caif_stream_data_wait(struct sock *sk, long timeo) { DEFINE_WAIT(wait); lock_sock(sk); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (!skb_queue_empty(&sk->sk_receive_queue) || sk->sk_err || sk->sk_state != CAIF_CONNECTED || sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN) || signal_pending(current) || !timeo) break; set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); } finish_wait(sk_sleep(sk), &wait); release_sock(sk); return timeo; }
+Info
0
static long caif_stream_data_wait(struct sock *sk, long timeo) { DEFINE_WAIT(wait); lock_sock(sk); for (;;) { prepare_to_wait(sk_sleep(sk), &wait, TASK_INTERRUPTIBLE); if (!skb_queue_empty(&sk->sk_receive_queue) || sk->sk_err || sk->sk_state != CAIF_CONNECTED || sock_flag(sk, SOCK_DEAD) || (sk->sk_shutdown & RCV_SHUTDOWN) || signal_pending(current) || !timeo) break; set_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); release_sock(sk); timeo = schedule_timeout(timeo); lock_sock(sk); clear_bit(SOCK_ASYNC_WAITDATA, &sk->sk_socket->flags); } finish_wait(sk_sleep(sk), &wait); release_sock(sk); return timeo; }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,579
static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); int err, size; struct sk_buff *skb; int sent = 0; long timeo; err = -EOPNOTSUPP; if (unlikely(msg->msg_flags&MSG_OOB)) goto out_err; if (unlikely(msg->msg_namelen)) goto out_err; timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err); if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN)) goto pipe_err; while (sent < len) { size = len-sent; if (size > cf_sk->maxframe) size = cf_sk->maxframe; /* If size is more than half of sndbuf, chop up message */ if (size > ((sk->sk_sndbuf >> 1) - 64)) size = (sk->sk_sndbuf >> 1) - 64; if (size > SKB_MAX_ALLOC) size = SKB_MAX_ALLOC; skb = sock_alloc_send_skb(sk, size + cf_sk->headroom + cf_sk->tailroom, msg->msg_flags&MSG_DONTWAIT, &err); if (skb == NULL) goto out_err; skb_reserve(skb, cf_sk->headroom); /* * If you pass two values to the sock_alloc_send_skb * it tries to grab the large buffer with GFP_NOFS * (which can fail easily), and if it fails grab the * fallback size buffer which is under a page and will * succeed. [Alan] */ size = min_t(int, size, skb_tailroom(skb)); err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); if (err) { kfree_skb(skb); goto out_err; } err = transmit_skb(skb, cf_sk, msg->msg_flags&MSG_DONTWAIT, timeo); if (err < 0) /* skb is already freed */ goto pipe_err; sent += size; } return sent; pipe_err: if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); err = -EPIPE; out_err: return sent ? : err; }
+Info
0
static int caif_stream_sendmsg(struct kiocb *kiocb, struct socket *sock, struct msghdr *msg, size_t len) { struct sock *sk = sock->sk; struct caifsock *cf_sk = container_of(sk, struct caifsock, sk); int err, size; struct sk_buff *skb; int sent = 0; long timeo; err = -EOPNOTSUPP; if (unlikely(msg->msg_flags&MSG_OOB)) goto out_err; if (unlikely(msg->msg_namelen)) goto out_err; timeo = sock_sndtimeo(sk, msg->msg_flags & MSG_DONTWAIT); timeo = caif_wait_for_flow_on(cf_sk, 1, timeo, &err); if (unlikely(sk->sk_shutdown & SEND_SHUTDOWN)) goto pipe_err; while (sent < len) { size = len-sent; if (size > cf_sk->maxframe) size = cf_sk->maxframe; /* If size is more than half of sndbuf, chop up message */ if (size > ((sk->sk_sndbuf >> 1) - 64)) size = (sk->sk_sndbuf >> 1) - 64; if (size > SKB_MAX_ALLOC) size = SKB_MAX_ALLOC; skb = sock_alloc_send_skb(sk, size + cf_sk->headroom + cf_sk->tailroom, msg->msg_flags&MSG_DONTWAIT, &err); if (skb == NULL) goto out_err; skb_reserve(skb, cf_sk->headroom); /* * If you pass two values to the sock_alloc_send_skb * it tries to grab the large buffer with GFP_NOFS * (which can fail easily), and if it fails grab the * fallback size buffer which is under a page and will * succeed. [Alan] */ size = min_t(int, size, skb_tailroom(skb)); err = memcpy_fromiovec(skb_put(skb, size), msg->msg_iov, size); if (err) { kfree_skb(skb); goto out_err; } err = transmit_skb(skb, cf_sk, msg->msg_flags&MSG_DONTWAIT, timeo); if (err < 0) /* skb is already freed */ goto pipe_err; sent += size; } return sent; pipe_err: if (sent == 0 && !(msg->msg_flags&MSG_NOSIGNAL)) send_sig(SIGPIPE, current, 0); err = -EPIPE; out_err: return sent ? : err; }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,580
static void cfsk_hold(struct cflayer *layr) { struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); sock_hold(&cf_sk->sk); }
+Info
0
static void cfsk_hold(struct cflayer *layr) { struct caifsock *cf_sk = container_of(layr, struct caifsock, layer); sock_hold(&cf_sk->sk); }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,581
static int rx_flow_is_on(struct caifsock *cf_sk) { return test_bit(RX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); }
+Info
0
static int rx_flow_is_on(struct caifsock *cf_sk) { return test_bit(RX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,582
static void set_rx_flow_off(struct caifsock *cf_sk) { clear_bit(RX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); }
+Info
0
static void set_rx_flow_off(struct caifsock *cf_sk) { clear_bit(RX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,583
static void set_rx_flow_on(struct caifsock *cf_sk) { set_bit(RX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); }
+Info
0
static void set_rx_flow_on(struct caifsock *cf_sk) { set_bit(RX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,584
static void set_tx_flow_off(struct caifsock *cf_sk) { clear_bit(TX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); }
+Info
0
static void set_tx_flow_off(struct caifsock *cf_sk) { clear_bit(TX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,585
static void set_tx_flow_on(struct caifsock *cf_sk) { set_bit(TX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); }
+Info
0
static void set_tx_flow_on(struct caifsock *cf_sk) { set_bit(TX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,586
static int sk_rcvbuf_lowwater(struct caifsock *cf_sk) { /* A quarter of full buffer is used a low water mark */ return cf_sk->sk.sk_rcvbuf / 4; }
+Info
0
static int sk_rcvbuf_lowwater(struct caifsock *cf_sk) { /* A quarter of full buffer is used a low water mark */ return cf_sk->sk.sk_rcvbuf / 4; }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,587
static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk, int noblock, long timeo) { struct cfpkt *pkt; pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); memset(skb->cb, 0, sizeof(struct caif_payload_info)); cfpkt_set_prio(pkt, cf_sk->sk.sk_priority); if (cf_sk->layer.dn == NULL) { kfree_skb(skb); return -EINVAL; } return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); }
+Info
0
static int transmit_skb(struct sk_buff *skb, struct caifsock *cf_sk, int noblock, long timeo) { struct cfpkt *pkt; pkt = cfpkt_fromnative(CAIF_DIR_OUT, skb); memset(skb->cb, 0, sizeof(struct caif_payload_info)); cfpkt_set_prio(pkt, cf_sk->sk.sk_priority); if (cf_sk->layer.dn == NULL) { kfree_skb(skb); return -EINVAL; } return cf_sk->layer.dn->transmit(cf_sk->layer.dn, pkt); }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,588
static int tx_flow_is_on(struct caifsock *cf_sk) { return test_bit(TX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); }
+Info
0
static int tx_flow_is_on(struct caifsock *cf_sk) { return test_bit(TX_FLOW_ON_BIT, (void *) &cf_sk->flow_state); }
@@ -286,6 +286,8 @@ static int caif_seqpkt_recvmsg(struct kiocb *iocb, struct socket *sock, if (m->msg_flags&MSG_OOB) goto read_error; + m->msg_namelen = 0; + skb = skb_recv_datagram(sk, flags, 0 , &ret); if (!skb) goto read_error;
CWE-200
null
null
24,589
static void __sco_sock_close(struct sock *sk) { BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); switch (sk->sk_state) { case BT_LISTEN: sco_sock_cleanup_listen(sk); break; case BT_CONNECTED: case BT_CONFIG: if (sco_pi(sk)->conn->hcon) { sk->sk_state = BT_DISCONN; sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); hci_conn_put(sco_pi(sk)->conn->hcon); sco_pi(sk)->conn->hcon = NULL; } else sco_chan_del(sk, ECONNRESET); break; case BT_CONNECT2: case BT_CONNECT: case BT_DISCONN: sco_chan_del(sk, ECONNRESET); break; default: sock_set_flag(sk, SOCK_ZAPPED); break; } }
+Info
0
static void __sco_sock_close(struct sock *sk) { BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket); switch (sk->sk_state) { case BT_LISTEN: sco_sock_cleanup_listen(sk); break; case BT_CONNECTED: case BT_CONFIG: if (sco_pi(sk)->conn->hcon) { sk->sk_state = BT_DISCONN; sco_sock_set_timer(sk, SCO_DISCONN_TIMEOUT); hci_conn_put(sco_pi(sk)->conn->hcon); sco_pi(sk)->conn->hcon = NULL; } else sco_chan_del(sk, ECONNRESET); break; case BT_CONNECT2: case BT_CONNECT: case BT_DISCONN: sco_chan_del(sk, ECONNRESET); break; default: sock_set_flag(sk, SOCK_ZAPPED); break; } }
@@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hci_conn_accept(pi->conn->hcon, 0); sk->sk_state = BT_CONFIG; + msg->msg_namelen = 0; release_sock(sk); return 0;
CWE-200
null
null
24,590
static int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) { int err = 0; sco_conn_lock(conn); if (conn->sk) err = -EBUSY; else __sco_chan_add(conn, sk, parent); sco_conn_unlock(conn); return err; }
+Info
0
static int sco_chan_add(struct sco_conn *conn, struct sock *sk, struct sock *parent) { int err = 0; sco_conn_lock(conn); if (conn->sk) err = -EBUSY; else __sco_chan_add(conn, sk, parent); sco_conn_unlock(conn); return err; }
@@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hci_conn_accept(pi->conn->hcon, 0); sk->sk_state = BT_CONFIG; + msg->msg_namelen = 0; release_sock(sk); return 0;
CWE-200
null
null
24,591
static void sco_chan_del(struct sock *sk, int err) { struct sco_conn *conn; conn = sco_pi(sk)->conn; BT_DBG("sk %p, conn %p, err %d", sk, conn, err); if (conn) { sco_conn_lock(conn); conn->sk = NULL; sco_pi(sk)->conn = NULL; sco_conn_unlock(conn); if (conn->hcon) hci_conn_put(conn->hcon); } sk->sk_state = BT_CLOSED; sk->sk_err = err; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_ZAPPED); }
+Info
0
static void sco_chan_del(struct sock *sk, int err) { struct sco_conn *conn; conn = sco_pi(sk)->conn; BT_DBG("sk %p, conn %p, err %d", sk, conn, err); if (conn) { sco_conn_lock(conn); conn->sk = NULL; sco_pi(sk)->conn = NULL; sco_conn_unlock(conn); if (conn->hcon) hci_conn_put(conn->hcon); } sk->sk_state = BT_CLOSED; sk->sk_err = err; sk->sk_state_change(sk); sock_set_flag(sk, SOCK_ZAPPED); }
@@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hci_conn_accept(pi->conn->hcon, 0); sk->sk_state = BT_CONFIG; + msg->msg_namelen = 0; release_sock(sk); return 0;
CWE-200
null
null
24,592
static struct sock *sco_chan_get(struct sco_conn *conn) { struct sock *sk = NULL; sco_conn_lock(conn); sk = conn->sk; sco_conn_unlock(conn); return sk; }
+Info
0
static struct sock *sco_chan_get(struct sco_conn *conn) { struct sock *sk = NULL; sco_conn_lock(conn); sk = conn->sk; sco_conn_unlock(conn); return sk; }
@@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hci_conn_accept(pi->conn->hcon, 0); sk->sk_state = BT_CONFIG; + msg->msg_namelen = 0; release_sock(sk); return 0;
CWE-200
null
null
24,593
static struct sco_conn *sco_conn_add(struct hci_conn *hcon) { struct hci_dev *hdev = hcon->hdev; struct sco_conn *conn = hcon->sco_data; if (conn) return conn; conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC); if (!conn) return NULL; spin_lock_init(&conn->lock); hcon->sco_data = conn; conn->hcon = hcon; conn->src = &hdev->bdaddr; conn->dst = &hcon->dst; if (hdev->sco_mtu > 0) conn->mtu = hdev->sco_mtu; else conn->mtu = 60; BT_DBG("hcon %p conn %p", hcon, conn); return conn; }
+Info
0
static struct sco_conn *sco_conn_add(struct hci_conn *hcon) { struct hci_dev *hdev = hcon->hdev; struct sco_conn *conn = hcon->sco_data; if (conn) return conn; conn = kzalloc(sizeof(struct sco_conn), GFP_ATOMIC); if (!conn) return NULL; spin_lock_init(&conn->lock); hcon->sco_data = conn; conn->hcon = hcon; conn->src = &hdev->bdaddr; conn->dst = &hcon->dst; if (hdev->sco_mtu > 0) conn->mtu = hdev->sco_mtu; else conn->mtu = 60; BT_DBG("hcon %p conn %p", hcon, conn); return conn; }
@@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hci_conn_accept(pi->conn->hcon, 0); sk->sk_state = BT_CONFIG; + msg->msg_namelen = 0; release_sock(sk); return 0;
CWE-200
null
null
24,594
static void sco_conn_ready(struct sco_conn *conn) { struct sock *parent; struct sock *sk = conn->sk; BT_DBG("conn %p", conn); if (sk) { sco_sock_clear_timer(sk); bh_lock_sock(sk); sk->sk_state = BT_CONNECTED; sk->sk_state_change(sk); bh_unlock_sock(sk); } else { sco_conn_lock(conn); parent = sco_get_sock_listen(conn->src); if (!parent) { sco_conn_unlock(conn); return; } bh_lock_sock(parent); sk = sco_sock_alloc(sock_net(parent), NULL, BTPROTO_SCO, GFP_ATOMIC); if (!sk) { bh_unlock_sock(parent); sco_conn_unlock(conn); return; } sco_sock_init(sk, parent); bacpy(&bt_sk(sk)->src, conn->src); bacpy(&bt_sk(sk)->dst, conn->dst); hci_conn_hold(conn->hcon); __sco_chan_add(conn, sk, parent); if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) sk->sk_state = BT_CONNECT2; else sk->sk_state = BT_CONNECTED; /* Wake up parent */ parent->sk_data_ready(parent, 1); bh_unlock_sock(parent); sco_conn_unlock(conn); } }
+Info
0
static void sco_conn_ready(struct sco_conn *conn) { struct sock *parent; struct sock *sk = conn->sk; BT_DBG("conn %p", conn); if (sk) { sco_sock_clear_timer(sk); bh_lock_sock(sk); sk->sk_state = BT_CONNECTED; sk->sk_state_change(sk); bh_unlock_sock(sk); } else { sco_conn_lock(conn); parent = sco_get_sock_listen(conn->src); if (!parent) { sco_conn_unlock(conn); return; } bh_lock_sock(parent); sk = sco_sock_alloc(sock_net(parent), NULL, BTPROTO_SCO, GFP_ATOMIC); if (!sk) { bh_unlock_sock(parent); sco_conn_unlock(conn); return; } sco_sock_init(sk, parent); bacpy(&bt_sk(sk)->src, conn->src); bacpy(&bt_sk(sk)->dst, conn->dst); hci_conn_hold(conn->hcon); __sco_chan_add(conn, sk, parent); if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(parent)->flags)) sk->sk_state = BT_CONNECT2; else sk->sk_state = BT_CONNECTED; /* Wake up parent */ parent->sk_data_ready(parent, 1); bh_unlock_sock(parent); sco_conn_unlock(conn); } }
@@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hci_conn_accept(pi->conn->hcon, 0); sk->sk_state = BT_CONFIG; + msg->msg_namelen = 0; release_sock(sk); return 0;
CWE-200
null
null
24,595
static int sco_connect(struct sock *sk) { bdaddr_t *src = &bt_sk(sk)->src; bdaddr_t *dst = &bt_sk(sk)->dst; struct sco_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; int err, type; BT_DBG("%pMR -> %pMR", src, dst); hdev = hci_get_route(dst, src); if (!hdev) return -EHOSTUNREACH; hci_dev_lock(hdev); if (lmp_esco_capable(hdev) && !disable_esco) type = ESCO_LINK; else type = SCO_LINK; hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW, HCI_AT_NO_BONDING); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto done; } conn = sco_conn_add(hcon); if (!conn) { hci_conn_put(hcon); err = -ENOMEM; goto done; } /* Update source addr of the socket */ bacpy(src, conn->src); err = sco_chan_add(conn, sk, NULL); if (err) goto done; if (hcon->state == BT_CONNECTED) { sco_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; } else { sk->sk_state = BT_CONNECT; sco_sock_set_timer(sk, sk->sk_sndtimeo); } done: hci_dev_unlock(hdev); hci_dev_put(hdev); return err; }
+Info
0
static int sco_connect(struct sock *sk) { bdaddr_t *src = &bt_sk(sk)->src; bdaddr_t *dst = &bt_sk(sk)->dst; struct sco_conn *conn; struct hci_conn *hcon; struct hci_dev *hdev; int err, type; BT_DBG("%pMR -> %pMR", src, dst); hdev = hci_get_route(dst, src); if (!hdev) return -EHOSTUNREACH; hci_dev_lock(hdev); if (lmp_esco_capable(hdev) && !disable_esco) type = ESCO_LINK; else type = SCO_LINK; hcon = hci_connect(hdev, type, dst, BDADDR_BREDR, BT_SECURITY_LOW, HCI_AT_NO_BONDING); if (IS_ERR(hcon)) { err = PTR_ERR(hcon); goto done; } conn = sco_conn_add(hcon); if (!conn) { hci_conn_put(hcon); err = -ENOMEM; goto done; } /* Update source addr of the socket */ bacpy(src, conn->src); err = sco_chan_add(conn, sk, NULL); if (err) goto done; if (hcon->state == BT_CONNECTED) { sco_sock_clear_timer(sk); sk->sk_state = BT_CONNECTED; } else { sk->sk_state = BT_CONNECT; sco_sock_set_timer(sk, sk->sk_sndtimeo); } done: hci_dev_unlock(hdev); hci_dev_put(hdev); return err; }
@@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hci_conn_accept(pi->conn->hcon, 0); sk->sk_state = BT_CONFIG; + msg->msg_namelen = 0; release_sock(sk); return 0;
CWE-200
null
null
24,596
void sco_connect_cfm(struct hci_conn *hcon, __u8 status) { BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); if (!status) { struct sco_conn *conn; conn = sco_conn_add(hcon); if (conn) sco_conn_ready(conn); } else sco_conn_del(hcon, bt_to_errno(status)); }
+Info
0
void sco_connect_cfm(struct hci_conn *hcon, __u8 status) { BT_DBG("hcon %p bdaddr %pMR status %d", hcon, &hcon->dst, status); if (!status) { struct sco_conn *conn; conn = sco_conn_add(hcon); if (conn) sco_conn_ready(conn); } else sco_conn_del(hcon, bt_to_errno(status)); }
@@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hci_conn_accept(pi->conn->hcon, 0); sk->sk_state = BT_CONFIG; + msg->msg_namelen = 0; release_sock(sk); return 0;
CWE-200
null
null
24,597
int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) { struct sock *sk; int lm = 0; BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); /* Find listening sockets */ read_lock(&sco_sk_list.lock); sk_for_each(sk, &sco_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) || !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { lm |= HCI_LM_ACCEPT; if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) *flags |= HCI_PROTO_DEFER; break; } } read_unlock(&sco_sk_list.lock); return lm; }
+Info
0
int sco_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, __u8 *flags) { struct sock *sk; int lm = 0; BT_DBG("hdev %s, bdaddr %pMR", hdev->name, bdaddr); /* Find listening sockets */ read_lock(&sco_sk_list.lock); sk_for_each(sk, &sco_sk_list.head) { if (sk->sk_state != BT_LISTEN) continue; if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr) || !bacmp(&bt_sk(sk)->src, BDADDR_ANY)) { lm |= HCI_LM_ACCEPT; if (test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) *flags |= HCI_PROTO_DEFER; break; } } read_unlock(&sco_sk_list.lock); return lm; }
@@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hci_conn_accept(pi->conn->hcon, 0); sk->sk_state = BT_CONFIG; + msg->msg_namelen = 0; release_sock(sk); return 0;
CWE-200
null
null
24,598
static int sco_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; read_lock(&sco_sk_list.lock); sk_for_each(sk, &sco_sk_list.head) { seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src, &bt_sk(sk)->dst, sk->sk_state); } read_unlock(&sco_sk_list.lock); return 0; }
+Info
0
static int sco_debugfs_show(struct seq_file *f, void *p) { struct sock *sk; read_lock(&sco_sk_list.lock); sk_for_each(sk, &sco_sk_list.head) { seq_printf(f, "%pMR %pMR %d\n", &bt_sk(sk)->src, &bt_sk(sk)->dst, sk->sk_state); } read_unlock(&sco_sk_list.lock); return 0; }
@@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hci_conn_accept(pi->conn->hcon, 0); sk->sk_state = BT_CONFIG; + msg->msg_namelen = 0; release_sock(sk); return 0;
CWE-200
null
null
24,599
void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) { BT_DBG("hcon %p reason %d", hcon, reason); sco_conn_del(hcon, bt_to_errno(reason)); }
+Info
0
void sco_disconn_cfm(struct hci_conn *hcon, __u8 reason) { BT_DBG("hcon %p reason %d", hcon, reason); sco_conn_del(hcon, bt_to_errno(reason)); }
@@ -665,6 +665,7 @@ static int sco_sock_recvmsg(struct kiocb *iocb, struct socket *sock, test_bit(BT_SK_DEFER_SETUP, &bt_sk(sk)->flags)) { hci_conn_accept(pi->conn->hcon, 0); sk->sk_state = BT_CONFIG; + msg->msg_namelen = 0; release_sock(sk); return 0;
CWE-200
null
null