idx
int64
func_before
string
Vulnerability Classification
string
vul
int64
func_after
string
patch
string
CWE ID
string
lines_before
string
lines_after
string
19,400
static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) { struct net_device *real_dev = vlan_dev_info(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = -EINVAL; if (ops->ndo_fcoe_get_wwn) rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type); return rc; }
DoS
0
static int vlan_dev_fcoe_get_wwn(struct net_device *dev, u64 *wwn, int type) { struct net_device *real_dev = vlan_dev_info(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int rc = -EINVAL; if (ops->ndo_fcoe_get_wwn) rc = ops->ndo_fcoe_get_wwn(real_dev, wwn, type); return rc; }
@@ -695,7 +695,7 @@ void vlan_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops;
CWE-264
null
null
19,401
static u32 vlan_dev_fix_features(struct net_device *dev, u32 features) { struct net_device *real_dev = vlan_dev_info(dev)->real_dev; u32 old_features = features; features &= real_dev->features; features &= real_dev->vlan_features; features |= old_features & NETIF_F_SOFT_FEATURES; if (dev_ethtool_get_rx_csum(real_dev)) features |= NETIF_F_RXCSUM; features |= NETIF_F_LLTX; return features; }
DoS
0
static u32 vlan_dev_fix_features(struct net_device *dev, u32 features) { struct net_device *real_dev = vlan_dev_info(dev)->real_dev; u32 old_features = features; features &= real_dev->features; features &= real_dev->vlan_features; features |= old_features & NETIF_F_SOFT_FEATURES; if (dev_ethtool_get_rx_csum(real_dev)) features |= NETIF_F_RXCSUM; features |= NETIF_F_LLTX; return features; }
@@ -695,7 +695,7 @@ void vlan_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops;
CWE-264
null
null
19,402
void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) { strncpy(result, vlan_dev_info(dev)->real_dev->name, 23); }
DoS
0
void vlan_dev_get_realdev_name(const struct net_device *dev, char *result) { strncpy(result, vlan_dev_info(dev)->real_dev->name, 23); }
@@ -695,7 +695,7 @@ void vlan_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops;
CWE-264
null
null
19,403
static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { if (vlan_dev_info(dev)->vlan_pcpu_stats) { struct vlan_pcpu_stats *p; u32 rx_errors = 0, tx_dropped = 0; int i; for_each_possible_cpu(i) { u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes; unsigned int start; p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i); do { start = u64_stats_fetch_begin_bh(&p->syncp); rxpackets = p->rx_packets; rxbytes = p->rx_bytes; rxmulticast = p->rx_multicast; txpackets = p->tx_packets; txbytes = p->tx_bytes; } while (u64_stats_fetch_retry_bh(&p->syncp, start)); stats->rx_packets += rxpackets; stats->rx_bytes += rxbytes; stats->multicast += rxmulticast; stats->tx_packets += txpackets; stats->tx_bytes += txbytes; /* rx_errors & tx_dropped are u32 */ rx_errors += p->rx_errors; tx_dropped += p->tx_dropped; } stats->rx_errors = rx_errors; stats->tx_dropped = tx_dropped; } return stats; }
DoS
0
static struct rtnl_link_stats64 *vlan_dev_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats) { if (vlan_dev_info(dev)->vlan_pcpu_stats) { struct vlan_pcpu_stats *p; u32 rx_errors = 0, tx_dropped = 0; int i; for_each_possible_cpu(i) { u64 rxpackets, rxbytes, rxmulticast, txpackets, txbytes; unsigned int start; p = per_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats, i); do { start = u64_stats_fetch_begin_bh(&p->syncp); rxpackets = p->rx_packets; rxbytes = p->rx_bytes; rxmulticast = p->rx_multicast; txpackets = p->tx_packets; txbytes = p->tx_bytes; } while (u64_stats_fetch_retry_bh(&p->syncp, start)); stats->rx_packets += rxpackets; stats->rx_bytes += rxbytes; stats->multicast += rxmulticast; stats->tx_packets += txpackets; stats->tx_bytes += txbytes; /* rx_errors & tx_dropped are u32 */ rx_errors += p->rx_errors; tx_dropped += p->tx_dropped; } stats->rx_errors = rx_errors; stats->tx_dropped = tx_dropped; } return stats; }
@@ -695,7 +695,7 @@ void vlan_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops;
CWE-264
null
null
19,404
static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); unsigned int len; int ret; /* Handle non-VLAN frames if they are sent to us, for example by DHCP. * * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... */ if (veth->h_vlan_proto != htons(ETH_P_8021Q) || vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) { u16 vlan_tci; vlan_tci = vlan_dev_info(dev)->vlan_id; vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); skb = __vlan_hwaccel_put_tag(skb, vlan_tci); } skb_set_dev(skb, vlan_dev_info(dev)->real_dev); len = skb->len; ret = dev_queue_xmit(skb); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { struct vlan_pcpu_stats *stats; stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats); u64_stats_update_begin(&stats->syncp); stats->tx_packets++; stats->tx_bytes += len; u64_stats_update_end(&stats->syncp); } else { this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped); } return ret; }
DoS
0
static netdev_tx_t vlan_dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); unsigned int len; int ret; /* Handle non-VLAN frames if they are sent to us, for example by DHCP. * * NOTE: THIS ASSUMES DIX ETHERNET, SPECIFICALLY NOT SUPPORTING * OTHER THINGS LIKE FDDI/TokenRing/802.3 SNAPs... */ if (veth->h_vlan_proto != htons(ETH_P_8021Q) || vlan_dev_info(dev)->flags & VLAN_FLAG_REORDER_HDR) { u16 vlan_tci; vlan_tci = vlan_dev_info(dev)->vlan_id; vlan_tci |= vlan_dev_get_egress_qos_mask(dev, skb); skb = __vlan_hwaccel_put_tag(skb, vlan_tci); } skb_set_dev(skb, vlan_dev_info(dev)->real_dev); len = skb->len; ret = dev_queue_xmit(skb); if (likely(ret == NET_XMIT_SUCCESS || ret == NET_XMIT_CN)) { struct vlan_pcpu_stats *stats; stats = this_cpu_ptr(vlan_dev_info(dev)->vlan_pcpu_stats); u64_stats_update_begin(&stats->syncp); stats->tx_packets++; stats->tx_bytes += len; u64_stats_update_end(&stats->syncp); } else { this_cpu_inc(vlan_dev_info(dev)->vlan_pcpu_stats->tx_dropped); } return ret; }
@@ -695,7 +695,7 @@ void vlan_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops;
CWE-264
null
null
19,405
static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct net_device *real_dev = vlan_dev_info(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; struct ifreq ifrr; int err = -EOPNOTSUPP; strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ); ifrr.ifr_ifru = ifr->ifr_ifru; switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: if (netif_device_present(real_dev) && ops->ndo_do_ioctl) err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd); break; } if (!err) ifr->ifr_ifru = ifrr.ifr_ifru; return err; }
DoS
0
static int vlan_dev_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd) { struct net_device *real_dev = vlan_dev_info(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; struct ifreq ifrr; int err = -EOPNOTSUPP; strncpy(ifrr.ifr_name, real_dev->name, IFNAMSIZ); ifrr.ifr_ifru = ifr->ifr_ifru; switch (cmd) { case SIOCGMIIPHY: case SIOCGMIIREG: case SIOCSMIIREG: if (netif_device_present(real_dev) && ops->ndo_do_ioctl) err = ops->ndo_do_ioctl(real_dev, &ifrr, cmd); break; } if (!err) ifr->ifr_ifru = ifrr.ifr_ifru; return err; }
@@ -695,7 +695,7 @@ void vlan_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops;
CWE-264
null
null
19,406
static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa) { struct net_device *real_dev = vlan_dev_info(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int err = 0; if (netif_device_present(real_dev) && ops->ndo_neigh_setup) err = ops->ndo_neigh_setup(real_dev, pa); return err; }
DoS
0
static int vlan_dev_neigh_setup(struct net_device *dev, struct neigh_parms *pa) { struct net_device *real_dev = vlan_dev_info(dev)->real_dev; const struct net_device_ops *ops = real_dev->netdev_ops; int err = 0; if (netif_device_present(real_dev) && ops->ndo_neigh_setup) err = ops->ndo_neigh_setup(real_dev, pa); return err; }
@@ -695,7 +695,7 @@ void vlan_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops;
CWE-264
null
null
19,407
static int vlan_dev_rebuild_header(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); switch (veth->h_vlan_encapsulated_proto) { #ifdef CONFIG_INET case htons(ETH_P_IP): /* TODO: Confirm this will work with VLAN headers... */ return arp_find(veth->h_dest, skb); #endif default: pr_debug("%s: unable to resolve type %X addresses\n", dev->name, ntohs(veth->h_vlan_encapsulated_proto)); memcpy(veth->h_source, dev->dev_addr, ETH_ALEN); break; } return 0; }
DoS
0
static int vlan_dev_rebuild_header(struct sk_buff *skb) { struct net_device *dev = skb->dev; struct vlan_ethhdr *veth = (struct vlan_ethhdr *)(skb->data); switch (veth->h_vlan_encapsulated_proto) { #ifdef CONFIG_INET case htons(ETH_P_IP): /* TODO: Confirm this will work with VLAN headers... */ return arp_find(veth->h_dest, skb); #endif default: pr_debug("%s: unable to resolve type %X addresses\n", dev->name, ntohs(veth->h_vlan_encapsulated_proto)); memcpy(veth->h_source, dev->dev_addr, ETH_ALEN); break; } return 0; }
@@ -695,7 +695,7 @@ void vlan_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops;
CWE-264
null
null
19,408
void vlan_dev_set_ingress_priority(const struct net_device *dev, u32 skb_prio, u16 vlan_prio) { struct vlan_dev_info *vlan = vlan_dev_info(dev); if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio) vlan->nr_ingress_mappings--; else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio) vlan->nr_ingress_mappings++; vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio; }
DoS
0
void vlan_dev_set_ingress_priority(const struct net_device *dev, u32 skb_prio, u16 vlan_prio) { struct vlan_dev_info *vlan = vlan_dev_info(dev); if (vlan->ingress_priority_map[vlan_prio & 0x7] && !skb_prio) vlan->nr_ingress_mappings--; else if (!vlan->ingress_priority_map[vlan_prio & 0x7] && skb_prio) vlan->nr_ingress_mappings++; vlan->ingress_priority_map[vlan_prio & 0x7] = skb_prio; }
@@ -695,7 +695,7 @@ void vlan_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops;
CWE-264
null
null
19,409
static void vlan_dev_set_lockdep_one(struct net_device *dev, struct netdev_queue *txq, void *_subclass) { lockdep_set_class_and_subclass(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key, *(int *)_subclass); }
DoS
0
static void vlan_dev_set_lockdep_one(struct net_device *dev, struct netdev_queue *txq, void *_subclass) { lockdep_set_class_and_subclass(&txq->_xmit_lock, &vlan_netdev_xmit_lock_key, *(int *)_subclass); }
@@ -695,7 +695,7 @@ void vlan_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops;
CWE-264
null
null
19,410
static int vlan_dev_stop(struct net_device *dev) { struct vlan_dev_info *vlan = vlan_dev_info(dev); struct net_device *real_dev = vlan->real_dev; dev_mc_unsync(real_dev, dev); dev_uc_unsync(real_dev, dev); if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(real_dev, -1); if (dev->flags & IFF_PROMISC) dev_set_promiscuity(real_dev, -1); if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); netif_carrier_off(dev); return 0; }
DoS
0
static int vlan_dev_stop(struct net_device *dev) { struct vlan_dev_info *vlan = vlan_dev_info(dev); struct net_device *real_dev = vlan->real_dev; dev_mc_unsync(real_dev, dev); dev_uc_unsync(real_dev, dev); if (dev->flags & IFF_ALLMULTI) dev_set_allmulti(real_dev, -1); if (dev->flags & IFF_PROMISC) dev_set_promiscuity(real_dev, -1); if (compare_ether_addr(dev->dev_addr, real_dev->dev_addr)) dev_uc_del(real_dev, dev->dev_addr); netif_carrier_off(dev); return 0; }
@@ -695,7 +695,7 @@ void vlan_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops;
CWE-264
null
null
19,411
static void vlan_dev_uninit(struct net_device *dev) { struct vlan_priority_tci_mapping *pm; struct vlan_dev_info *vlan = vlan_dev_info(dev); int i; free_percpu(vlan->vlan_pcpu_stats); vlan->vlan_pcpu_stats = NULL; for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { while ((pm = vlan->egress_priority_map[i]) != NULL) { vlan->egress_priority_map[i] = pm->next; kfree(pm); } } }
DoS
0
static void vlan_dev_uninit(struct net_device *dev) { struct vlan_priority_tci_mapping *pm; struct vlan_dev_info *vlan = vlan_dev_info(dev); int i; free_percpu(vlan->vlan_pcpu_stats); vlan->vlan_pcpu_stats = NULL; for (i = 0; i < ARRAY_SIZE(vlan->egress_priority_map); i++) { while ((pm = vlan->egress_priority_map[i]) != NULL) { vlan->egress_priority_map[i] = pm->next; kfree(pm); } } }
@@ -695,7 +695,7 @@ void vlan_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops;
CWE-264
null
null
19,412
static void vlan_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, vlan_fullname); strcpy(info->version, vlan_version); strcpy(info->fw_version, "N/A"); }
DoS
0
static void vlan_ethtool_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info) { strcpy(info->driver, vlan_fullname); strcpy(info->version, vlan_version); strcpy(info->fw_version, "N/A"); }
@@ -695,7 +695,7 @@ void vlan_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops;
CWE-264
null
null
19,413
static int vlan_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { const struct vlan_dev_info *vlan = vlan_dev_info(dev); return dev_ethtool_get_settings(vlan->real_dev, cmd); }
DoS
0
static int vlan_ethtool_get_settings(struct net_device *dev, struct ethtool_cmd *cmd) { const struct vlan_dev_info *vlan = vlan_dev_info(dev); return dev_ethtool_get_settings(vlan->real_dev, cmd); }
@@ -695,7 +695,7 @@ void vlan_setup(struct net_device *dev) ether_setup(dev); dev->priv_flags |= IFF_802_1Q_VLAN; - dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; + dev->priv_flags &= ~(IFF_XMIT_DST_RELEASE | IFF_TX_SKB_SHARING); dev->tx_queue_len = 0; dev->netdev_ops = &vlan_netdev_ops;
CWE-264
null
null
19,414
static int bnep_net_close(struct net_device *dev) { netif_stop_queue(dev); return 0; }
DoS
0
static int bnep_net_close(struct net_device *dev) { netif_stop_queue(dev); return 0; }
@@ -231,6 +231,7 @@ void bnep_net_setup(struct net_device *dev) dev->addr_len = ETH_ALEN; ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &bnep_netdev_ops; dev->watchdog_timeo = HZ * 2;
CWE-264
null
null
19,415
static inline u16 bnep_net_eth_proto(struct sk_buff *skb) { struct ethhdr *eh = (void *) skb->data; u16 proto = ntohs(eh->h_proto); if (proto >= 1536) return proto; if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF)) return ETH_P_802_3; return ETH_P_802_2; }
DoS
0
static inline u16 bnep_net_eth_proto(struct sk_buff *skb) { struct ethhdr *eh = (void *) skb->data; u16 proto = ntohs(eh->h_proto); if (proto >= 1536) return proto; if (get_unaligned((__be16 *) skb->data) == htons(0xFFFF)) return ETH_P_802_3; return ETH_P_802_2; }
@@ -231,6 +231,7 @@ void bnep_net_setup(struct net_device *dev) dev->addr_len = ETH_ALEN; ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &bnep_netdev_ops; dev->watchdog_timeo = HZ * 2;
CWE-264
null
null
19,416
static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) { struct ethhdr *eh = (void *) skb->data; if ((eh->h_dest[0] & 1) && !test_bit(bnep_mc_hash(eh->h_dest), (ulong *) &s->mc_filter)) return 1; return 0; }
DoS
0
static inline int bnep_net_mc_filter(struct sk_buff *skb, struct bnep_session *s) { struct ethhdr *eh = (void *) skb->data; if ((eh->h_dest[0] & 1) && !test_bit(bnep_mc_hash(eh->h_dest), (ulong *) &s->mc_filter)) return 1; return 0; }
@@ -231,6 +231,7 @@ void bnep_net_setup(struct net_device *dev) dev->addr_len = ETH_ALEN; ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &bnep_netdev_ops; dev->watchdog_timeo = HZ * 2;
CWE-264
null
null
19,417
static int bnep_net_open(struct net_device *dev) { netif_start_queue(dev); return 0; }
DoS
0
static int bnep_net_open(struct net_device *dev) { netif_start_queue(dev); return 0; }
@@ -231,6 +231,7 @@ void bnep_net_setup(struct net_device *dev) dev->addr_len = ETH_ALEN; ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &bnep_netdev_ops; dev->watchdog_timeo = HZ * 2;
CWE-264
null
null
19,418
static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) { u16 proto = bnep_net_eth_proto(skb); struct bnep_proto_filter *f = s->proto_filter; int i; for (i = 0; i < BNEP_MAX_PROTO_FILTERS && f[i].end; i++) { if (proto >= f[i].start && proto <= f[i].end) return 0; } BT_DBG("BNEP: filtered skb %p, proto 0x%.4x", skb, proto); return 1; }
DoS
0
static inline int bnep_net_proto_filter(struct sk_buff *skb, struct bnep_session *s) { u16 proto = bnep_net_eth_proto(skb); struct bnep_proto_filter *f = s->proto_filter; int i; for (i = 0; i < BNEP_MAX_PROTO_FILTERS && f[i].end; i++) { if (proto >= f[i].start && proto <= f[i].end) return 0; } BT_DBG("BNEP: filtered skb %p, proto 0x%.4x", skb, proto); return 1; }
@@ -231,6 +231,7 @@ void bnep_net_setup(struct net_device *dev) dev->addr_len = ETH_ALEN; ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &bnep_netdev_ops; dev->watchdog_timeo = HZ * 2;
CWE-264
null
null
19,419
static int bnep_net_set_mac_addr(struct net_device *dev, void *arg) { BT_DBG("%s", dev->name); return 0; }
DoS
0
static int bnep_net_set_mac_addr(struct net_device *dev, void *arg) { BT_DBG("%s", dev->name); return 0; }
@@ -231,6 +231,7 @@ void bnep_net_setup(struct net_device *dev) dev->addr_len = ETH_ALEN; ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &bnep_netdev_ops; dev->watchdog_timeo = HZ * 2;
CWE-264
null
null
19,420
static void bnep_net_set_mc_list(struct net_device *dev) { #ifdef CONFIG_BT_BNEP_MC_FILTER struct bnep_session *s = netdev_priv(dev); struct sock *sk = s->sock->sk; struct bnep_set_filter_req *r; struct sk_buff *skb; int size; BT_DBG("%s mc_count %d", dev->name, netdev_mc_count(dev)); size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2; skb = alloc_skb(size, GFP_ATOMIC); if (!skb) { BT_ERR("%s Multicast list allocation failed", dev->name); return; } r = (void *) skb->data; __skb_put(skb, sizeof(*r)); r->type = BNEP_CONTROL; r->ctrl = BNEP_FILTER_MULTI_ADDR_SET; if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { u8 start[ETH_ALEN] = { 0x01 }; /* Request all addresses */ memcpy(__skb_put(skb, ETH_ALEN), start, ETH_ALEN); memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); r->len = htons(ETH_ALEN * 2); } else { struct netdev_hw_addr *ha; int i, len = skb->len; if (dev->flags & IFF_BROADCAST) { memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); } /* FIXME: We should group addresses here. */ i = 0; netdev_for_each_mc_addr(ha, dev) { if (i == BNEP_MAX_MULTICAST_FILTERS) break; memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); i++; } r->len = htons(skb->len - len); } skb_queue_tail(&sk->sk_write_queue, skb); wake_up_interruptible(sk_sleep(sk)); #endif }
DoS
0
static void bnep_net_set_mc_list(struct net_device *dev) { #ifdef CONFIG_BT_BNEP_MC_FILTER struct bnep_session *s = netdev_priv(dev); struct sock *sk = s->sock->sk; struct bnep_set_filter_req *r; struct sk_buff *skb; int size; BT_DBG("%s mc_count %d", dev->name, netdev_mc_count(dev)); size = sizeof(*r) + (BNEP_MAX_MULTICAST_FILTERS + 1) * ETH_ALEN * 2; skb = alloc_skb(size, GFP_ATOMIC); if (!skb) { BT_ERR("%s Multicast list allocation failed", dev->name); return; } r = (void *) skb->data; __skb_put(skb, sizeof(*r)); r->type = BNEP_CONTROL; r->ctrl = BNEP_FILTER_MULTI_ADDR_SET; if (dev->flags & (IFF_PROMISC | IFF_ALLMULTI)) { u8 start[ETH_ALEN] = { 0x01 }; /* Request all addresses */ memcpy(__skb_put(skb, ETH_ALEN), start, ETH_ALEN); memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); r->len = htons(ETH_ALEN * 2); } else { struct netdev_hw_addr *ha; int i, len = skb->len; if (dev->flags & IFF_BROADCAST) { memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); memcpy(__skb_put(skb, ETH_ALEN), dev->broadcast, ETH_ALEN); } /* FIXME: We should group addresses here. */ i = 0; netdev_for_each_mc_addr(ha, dev) { if (i == BNEP_MAX_MULTICAST_FILTERS) break; memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); memcpy(__skb_put(skb, ETH_ALEN), ha->addr, ETH_ALEN); i++; } r->len = htons(skb->len - len); } skb_queue_tail(&sk->sk_write_queue, skb); wake_up_interruptible(sk_sleep(sk)); #endif }
@@ -231,6 +231,7 @@ void bnep_net_setup(struct net_device *dev) dev->addr_len = ETH_ALEN; ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &bnep_netdev_ops; dev->watchdog_timeo = HZ * 2;
CWE-264
null
null
19,421
static void bnep_net_timeout(struct net_device *dev) { BT_DBG("net_timeout"); netif_wake_queue(dev); }
DoS
0
static void bnep_net_timeout(struct net_device *dev) { BT_DBG("net_timeout"); netif_wake_queue(dev); }
@@ -231,6 +231,7 @@ void bnep_net_setup(struct net_device *dev) dev->addr_len = ETH_ALEN; ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &bnep_netdev_ops; dev->watchdog_timeo = HZ * 2;
CWE-264
null
null
19,422
static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) { struct net_device *dev; char name[IFNAMSIZ]; struct l2tp_tunnel *tunnel; struct l2tp_session *session; struct l2tp_eth *priv; struct l2tp_eth_sess *spriv; int rc; struct l2tp_eth_net *pn; tunnel = l2tp_tunnel_find(net, tunnel_id); if (!tunnel) { rc = -ENODEV; goto out; } session = l2tp_session_find(net, tunnel, session_id); if (session) { rc = -EEXIST; goto out; } if (cfg->ifname) { dev = dev_get_by_name(net, cfg->ifname); if (dev) { dev_put(dev); rc = -EEXIST; goto out; } strlcpy(name, cfg->ifname, IFNAMSIZ); } else strcpy(name, L2TP_ETH_DEV_NAME); session = l2tp_session_create(sizeof(*spriv), tunnel, session_id, peer_session_id, cfg); if (!session) { rc = -ENOMEM; goto out; } dev = alloc_netdev(sizeof(*priv), name, l2tp_eth_dev_setup); if (!dev) { rc = -ENOMEM; goto out_del_session; } dev_net_set(dev, net); if (session->mtu == 0) session->mtu = dev->mtu - session->hdr_len; dev->mtu = session->mtu; dev->needed_headroom += session->hdr_len; priv = netdev_priv(dev); priv->dev = dev; priv->session = session; INIT_LIST_HEAD(&priv->list); priv->tunnel_sock = tunnel->sock; session->recv_skb = l2tp_eth_dev_recv; session->session_close = l2tp_eth_delete; #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) session->show = l2tp_eth_show; #endif spriv = l2tp_session_priv(session); spriv->dev = dev; rc = register_netdev(dev); if (rc < 0) goto out_del_dev; /* Must be done after register_netdev() */ strlcpy(session->ifname, dev->name, IFNAMSIZ); dev_hold(dev); pn = l2tp_eth_pernet(dev_net(dev)); spin_lock(&pn->l2tp_eth_lock); list_add(&priv->list, &pn->l2tp_eth_dev_list); spin_unlock(&pn->l2tp_eth_lock); return 0; out_del_dev: free_netdev(dev); out_del_session: l2tp_session_delete(session); out: return rc; }
DoS
0
static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 peer_session_id, struct l2tp_session_cfg *cfg) { struct net_device *dev; char name[IFNAMSIZ]; struct l2tp_tunnel *tunnel; struct l2tp_session *session; struct l2tp_eth *priv; struct l2tp_eth_sess *spriv; int rc; struct l2tp_eth_net *pn; tunnel = l2tp_tunnel_find(net, tunnel_id); if (!tunnel) { rc = -ENODEV; goto out; } session = l2tp_session_find(net, tunnel, session_id); if (session) { rc = -EEXIST; goto out; } if (cfg->ifname) { dev = dev_get_by_name(net, cfg->ifname); if (dev) { dev_put(dev); rc = -EEXIST; goto out; } strlcpy(name, cfg->ifname, IFNAMSIZ); } else strcpy(name, L2TP_ETH_DEV_NAME); session = l2tp_session_create(sizeof(*spriv), tunnel, session_id, peer_session_id, cfg); if (!session) { rc = -ENOMEM; goto out; } dev = alloc_netdev(sizeof(*priv), name, l2tp_eth_dev_setup); if (!dev) { rc = -ENOMEM; goto out_del_session; } dev_net_set(dev, net); if (session->mtu == 0) session->mtu = dev->mtu - session->hdr_len; dev->mtu = session->mtu; dev->needed_headroom += session->hdr_len; priv = netdev_priv(dev); priv->dev = dev; priv->session = session; INIT_LIST_HEAD(&priv->list); priv->tunnel_sock = tunnel->sock; session->recv_skb = l2tp_eth_dev_recv; session->session_close = l2tp_eth_delete; #if defined(CONFIG_L2TP_DEBUGFS) || defined(CONFIG_L2TP_DEBUGFS_MODULE) session->show = l2tp_eth_show; #endif spriv = l2tp_session_priv(session); spriv->dev = dev; rc = register_netdev(dev); if (rc < 0) goto out_del_dev; /* Must be done after register_netdev() */ strlcpy(session->ifname, dev->name, IFNAMSIZ); dev_hold(dev); pn = l2tp_eth_pernet(dev_net(dev)); spin_lock(&pn->l2tp_eth_lock); list_add(&priv->list, &pn->l2tp_eth_dev_list); spin_unlock(&pn->l2tp_eth_lock); return 0; out_del_dev: free_netdev(dev); out_del_session: l2tp_session_delete(session); out: return rc; }
@@ -103,7 +103,7 @@ static struct net_device_ops l2tp_eth_netdev_ops = { static void l2tp_eth_dev_setup(struct net_device *dev) { ether_setup(dev); - + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &l2tp_eth_netdev_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,423
static int l2tp_eth_dev_init(struct net_device *dev) { struct l2tp_eth *priv = netdev_priv(dev); priv->dev = dev; random_ether_addr(dev->dev_addr); memset(&dev->broadcast[0], 0xff, 6); return 0; }
DoS
0
static int l2tp_eth_dev_init(struct net_device *dev) { struct l2tp_eth *priv = netdev_priv(dev); priv->dev = dev; random_ether_addr(dev->dev_addr); memset(&dev->broadcast[0], 0xff, 6); return 0; }
@@ -103,7 +103,7 @@ static struct net_device_ops l2tp_eth_netdev_ops = { static void l2tp_eth_dev_setup(struct net_device *dev) { ether_setup(dev); - + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &l2tp_eth_netdev_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,424
static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) { struct l2tp_eth_sess *spriv = l2tp_session_priv(session); struct net_device *dev = spriv->dev; if (session->debug & L2TP_MSG_DATA) { unsigned int length; int offset; u8 *ptr = skb->data; length = min(32u, skb->len); if (!pskb_may_pull(skb, length)) goto error; printk(KERN_DEBUG "%s: eth recv: ", session->name); offset = 0; do { printk(" %02X", ptr[offset]); } while (++offset < length); printk("\n"); } if (!pskb_may_pull(skb, sizeof(ETH_HLEN))) goto error; secpath_reset(skb); /* checksums verified by L2TP */ skb->ip_summed = CHECKSUM_NONE; skb_dst_drop(skb); nf_reset(skb); if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) { dev->stats.rx_packets++; dev->stats.rx_bytes += data_len; } else dev->stats.rx_errors++; return; error: dev->stats.rx_errors++; kfree_skb(skb); }
DoS
0
static void l2tp_eth_dev_recv(struct l2tp_session *session, struct sk_buff *skb, int data_len) { struct l2tp_eth_sess *spriv = l2tp_session_priv(session); struct net_device *dev = spriv->dev; if (session->debug & L2TP_MSG_DATA) { unsigned int length; int offset; u8 *ptr = skb->data; length = min(32u, skb->len); if (!pskb_may_pull(skb, length)) goto error; printk(KERN_DEBUG "%s: eth recv: ", session->name); offset = 0; do { printk(" %02X", ptr[offset]); } while (++offset < length); printk("\n"); } if (!pskb_may_pull(skb, sizeof(ETH_HLEN))) goto error; secpath_reset(skb); /* checksums verified by L2TP */ skb->ip_summed = CHECKSUM_NONE; skb_dst_drop(skb); nf_reset(skb); if (dev_forward_skb(dev, skb) == NET_RX_SUCCESS) { dev->stats.rx_packets++; dev->stats.rx_bytes += data_len; } else dev->stats.rx_errors++; return; error: dev->stats.rx_errors++; kfree_skb(skb); }
@@ -103,7 +103,7 @@ static struct net_device_ops l2tp_eth_netdev_ops = { static void l2tp_eth_dev_setup(struct net_device *dev) { ether_setup(dev); - + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &l2tp_eth_netdev_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,425
static void l2tp_eth_dev_uninit(struct net_device *dev) { struct l2tp_eth *priv = netdev_priv(dev); struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev)); spin_lock(&pn->l2tp_eth_lock); list_del_init(&priv->list); spin_unlock(&pn->l2tp_eth_lock); dev_put(dev); }
DoS
0
static void l2tp_eth_dev_uninit(struct net_device *dev) { struct l2tp_eth *priv = netdev_priv(dev); struct l2tp_eth_net *pn = l2tp_eth_pernet(dev_net(dev)); spin_lock(&pn->l2tp_eth_lock); list_del_init(&priv->list); spin_unlock(&pn->l2tp_eth_lock); dev_put(dev); }
@@ -103,7 +103,7 @@ static struct net_device_ops l2tp_eth_netdev_ops = { static void l2tp_eth_dev_setup(struct net_device *dev) { ether_setup(dev); - + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &l2tp_eth_netdev_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,426
static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct l2tp_eth *priv = netdev_priv(dev); struct l2tp_session *session = priv->session; l2tp_xmit_skb(session, skb, session->hdr_len); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; return 0; }
DoS
0
static int l2tp_eth_dev_xmit(struct sk_buff *skb, struct net_device *dev) { struct l2tp_eth *priv = netdev_priv(dev); struct l2tp_session *session = priv->session; l2tp_xmit_skb(session, skb, session->hdr_len); dev->stats.tx_bytes += skb->len; dev->stats.tx_packets++; return 0; }
@@ -103,7 +103,7 @@ static struct net_device_ops l2tp_eth_netdev_ops = { static void l2tp_eth_dev_setup(struct net_device *dev) { ether_setup(dev); - + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &l2tp_eth_netdev_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,427
static void __exit l2tp_eth_exit(void) { unregister_pernet_device(&l2tp_eth_net_ops); l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); }
DoS
0
static void __exit l2tp_eth_exit(void) { unregister_pernet_device(&l2tp_eth_net_ops); l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); }
@@ -103,7 +103,7 @@ static struct net_device_ops l2tp_eth_netdev_ops = { static void l2tp_eth_dev_setup(struct net_device *dev) { ether_setup(dev); - + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &l2tp_eth_netdev_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,428
static int __init l2tp_eth_init(void) { int err = 0; err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops); if (err) goto out; err = register_pernet_device(&l2tp_eth_net_ops); if (err) goto out_unreg; printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n"); return 0; out_unreg: l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); out: return err; }
DoS
0
static int __init l2tp_eth_init(void) { int err = 0; err = l2tp_nl_register_ops(L2TP_PWTYPE_ETH, &l2tp_eth_nl_cmd_ops); if (err) goto out; err = register_pernet_device(&l2tp_eth_net_ops); if (err) goto out_unreg; printk(KERN_INFO "L2TP ethernet pseudowire support (L2TPv3)\n"); return 0; out_unreg: l2tp_nl_unregister_ops(L2TP_PWTYPE_ETH); out: return err; }
@@ -103,7 +103,7 @@ static struct net_device_ops l2tp_eth_netdev_ops = { static void l2tp_eth_dev_setup(struct net_device *dev) { ether_setup(dev); - + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &l2tp_eth_netdev_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,429
static void l2tp_eth_show(struct seq_file *m, void *arg) { struct l2tp_session *session = arg; struct l2tp_eth_sess *spriv = l2tp_session_priv(session); struct net_device *dev = spriv->dev; seq_printf(m, " interface %s\n", dev->name); }
DoS
0
static void l2tp_eth_show(struct seq_file *m, void *arg) { struct l2tp_session *session = arg; struct l2tp_eth_sess *spriv = l2tp_session_priv(session); struct net_device *dev = spriv->dev; seq_printf(m, " interface %s\n", dev->name); }
@@ -103,7 +103,7 @@ static struct net_device_ops l2tp_eth_netdev_ops = { static void l2tp_eth_dev_setup(struct net_device *dev) { ether_setup(dev); - + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &l2tp_eth_netdev_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,430
u32 __ieee80211_recalc_idle(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; int count = 0; bool working = false, scanning = false, hw_roc = false; struct ieee80211_work *wk; unsigned int led_trig_start = 0, led_trig_stop = 0; #ifdef CONFIG_PROVE_LOCKING WARN_ON(debug_locks && !lockdep_rtnl_is_held() && !lockdep_is_held(&local->iflist_mtx)); #endif lockdep_assert_held(&local->mtx); list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) { sdata->vif.bss_conf.idle = true; continue; } sdata->old_idle = sdata->vif.bss_conf.idle; /* do not count disabled managed interfaces */ if (sdata->vif.type == NL80211_IFTYPE_STATION && !sdata->u.mgd.associated) { sdata->vif.bss_conf.idle = true; continue; } /* do not count unused IBSS interfaces */ if (sdata->vif.type == NL80211_IFTYPE_ADHOC && !sdata->u.ibss.ssid_len) { sdata->vif.bss_conf.idle = true; continue; } /* count everything else */ count++; } list_for_each_entry(wk, &local->work_list, list) { working = true; wk->sdata->vif.bss_conf.idle = false; } if (local->scan_sdata) { scanning = true; local->scan_sdata->vif.bss_conf.idle = false; } if (local->hw_roc_channel) hw_roc = true; list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->old_idle == sdata->vif.bss_conf.idle) continue; if (!ieee80211_sdata_running(sdata)) continue; ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE); } if (working || scanning || hw_roc) led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK; else led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK; if (count) led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; else led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop); if (hw_roc) return ieee80211_idle_off(local, "hw remain-on-channel"); if (working) return ieee80211_idle_off(local, "working"); if (scanning) return ieee80211_idle_off(local, "scanning"); if (!count) return ieee80211_idle_on(local); else return ieee80211_idle_off(local, "in use"); return 0; }
DoS
0
u32 __ieee80211_recalc_idle(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata; int count = 0; bool working = false, scanning = false, hw_roc = false; struct ieee80211_work *wk; unsigned int led_trig_start = 0, led_trig_stop = 0; #ifdef CONFIG_PROVE_LOCKING WARN_ON(debug_locks && !lockdep_rtnl_is_held() && !lockdep_is_held(&local->iflist_mtx)); #endif lockdep_assert_held(&local->mtx); list_for_each_entry(sdata, &local->interfaces, list) { if (!ieee80211_sdata_running(sdata)) { sdata->vif.bss_conf.idle = true; continue; } sdata->old_idle = sdata->vif.bss_conf.idle; /* do not count disabled managed interfaces */ if (sdata->vif.type == NL80211_IFTYPE_STATION && !sdata->u.mgd.associated) { sdata->vif.bss_conf.idle = true; continue; } /* do not count unused IBSS interfaces */ if (sdata->vif.type == NL80211_IFTYPE_ADHOC && !sdata->u.ibss.ssid_len) { sdata->vif.bss_conf.idle = true; continue; } /* count everything else */ count++; } list_for_each_entry(wk, &local->work_list, list) { working = true; wk->sdata->vif.bss_conf.idle = false; } if (local->scan_sdata) { scanning = true; local->scan_sdata->vif.bss_conf.idle = false; } if (local->hw_roc_channel) hw_roc = true; list_for_each_entry(sdata, &local->interfaces, list) { if (sdata->old_idle == sdata->vif.bss_conf.idle) continue; if (!ieee80211_sdata_running(sdata)) continue; ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_IDLE); } if (working || scanning || hw_roc) led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_WORK; else led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_WORK; if (count) led_trig_start |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; else led_trig_stop |= IEEE80211_TPT_LEDTRIG_FL_CONNECTED; ieee80211_mod_tpt_led_trig(local, led_trig_start, led_trig_stop); if (hw_roc) return ieee80211_idle_off(local, "hw remain-on-channel"); if (working) return ieee80211_idle_off(local, "working"); if (scanning) return ieee80211_idle_off(local, "scanning"); if (!count) return ieee80211_idle_on(local); else return ieee80211_idle_off(local, "in use"); return 0; }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,431
static inline int identical_mac_addr_allowed(int type1, int type2) { return type1 == NL80211_IFTYPE_MONITOR || type2 == NL80211_IFTYPE_MONITOR || (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) || (type1 == NL80211_IFTYPE_WDS && (type2 == NL80211_IFTYPE_WDS || type2 == NL80211_IFTYPE_AP)) || (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_AP_VLAN) || (type1 == NL80211_IFTYPE_AP_VLAN && (type2 == NL80211_IFTYPE_AP || type2 == NL80211_IFTYPE_AP_VLAN)); }
DoS
0
static inline int identical_mac_addr_allowed(int type1, int type2) { return type1 == NL80211_IFTYPE_MONITOR || type2 == NL80211_IFTYPE_MONITOR || (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_WDS) || (type1 == NL80211_IFTYPE_WDS && (type2 == NL80211_IFTYPE_WDS || type2 == NL80211_IFTYPE_AP)) || (type1 == NL80211_IFTYPE_AP && type2 == NL80211_IFTYPE_AP_VLAN) || (type1 == NL80211_IFTYPE_AP_VLAN && (type2 == NL80211_IFTYPE_AP || type2 == NL80211_IFTYPE_AP_VLAN)); }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,432
static int ieee80211_change_mac(struct net_device *dev, void *addr) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sockaddr *sa = addr; int ret; if (ieee80211_sdata_running(sdata)) return -EBUSY; ret = eth_mac_addr(dev, sa); if (ret == 0) memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN); return ret; }
DoS
0
static int ieee80211_change_mac(struct net_device *dev, void *addr) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct sockaddr *sa = addr; int ret; if (ieee80211_sdata_running(sdata)) return -EBUSY; ret = eth_mac_addr(dev, sa); if (ret == 0) memcpy(sdata->vif.addr, sa->sa_data, ETH_ALEN); return ret; }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,433
static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) { int meshhdrlen; struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); meshhdrlen = (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ? 5 : 0; /* FIX: what would be proper limits for MTU? * This interface uses 802.3 frames. */ if (new_mtu < 256 || new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) { return -EINVAL; } #ifdef CONFIG_MAC80211_VERBOSE_DEBUG printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu); #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ dev->mtu = new_mtu; return 0; }
DoS
0
static int ieee80211_change_mtu(struct net_device *dev, int new_mtu) { int meshhdrlen; struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); meshhdrlen = (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ? 5 : 0; /* FIX: what would be proper limits for MTU? * This interface uses 802.3 frames. */ if (new_mtu < 256 || new_mtu > IEEE80211_MAX_DATA_LEN - 24 - 6 - meshhdrlen) { return -EINVAL; } #ifdef CONFIG_MAC80211_VERBOSE_DEBUG printk(KERN_DEBUG "%s: setting MTU %d\n", dev->name, new_mtu); #endif /* CONFIG_MAC80211_VERBOSE_DEBUG */ dev->mtu = new_mtu; return 0; }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,434
static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_down) { struct ieee80211_local *local = sdata->local; unsigned long flags; struct sk_buff *skb, *tmp; u32 hw_reconf_flags = 0; int i; enum nl80211_channel_type orig_ct; clear_bit(SDATA_STATE_RUNNING, &sdata->state); if (local->scan_sdata == sdata) ieee80211_scan_cancel(local); /* * Stop TX on this interface first. */ netif_tx_stop_all_queues(sdata->dev); /* * Purge work for this interface. */ ieee80211_work_purge(sdata); /* * Remove all stations associated with this interface. * * This must be done before calling ops->remove_interface() * because otherwise we can later invoke ops->sta_notify() * whenever the STAs are removed, and that invalidates driver * assumptions about always getting a vif pointer that is valid * (because if we remove a STA after ops->remove_interface() * the driver will have removed the vif info already!) * * This is relevant only in AP, WDS and mesh modes, since in * all other modes we've already removed all stations when * disconnecting etc. */ sta_info_flush(local, sdata); /* * Don't count this interface for promisc/allmulti while it * is down. dev_mc_unsync() will invoke set_multicast_list * on the master interface which will sync these down to the * hardware as filter flags. */ if (sdata->flags & IEEE80211_SDATA_ALLMULTI) atomic_dec(&local->iff_allmultis); if (sdata->flags & IEEE80211_SDATA_PROMISC) atomic_dec(&local->iff_promiscs); if (sdata->vif.type == NL80211_IFTYPE_AP) { local->fif_pspoll--; local->fif_probe_req--; } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { local->fif_probe_req--; } netif_addr_lock_bh(sdata->dev); spin_lock_bh(&local->filter_lock); __hw_addr_unsync(&local->mc_list, &sdata->dev->mc, sdata->dev->addr_len); spin_unlock_bh(&local->filter_lock); netif_addr_unlock_bh(sdata->dev); ieee80211_configure_filter(local); del_timer_sync(&local->dynamic_ps_timer); cancel_work_sync(&local->dynamic_ps_enable_work); /* APs need special treatment */ if (sdata->vif.type == NL80211_IFTYPE_AP) { struct ieee80211_sub_if_data *vlan, *tmpsdata; struct beacon_data *old_beacon = rtnl_dereference(sdata->u.ap.beacon); /* sdata_running will return false, so this will disable */ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); /* remove beacon */ rcu_assign_pointer(sdata->u.ap.beacon, NULL); synchronize_rcu(); kfree(old_beacon); /* free all potentially still buffered bcast frames */ while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) { local->total_ps_buffered--; dev_kfree_skb(skb); } /* down all dependent devices, that is VLANs */ list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, u.vlan.list) dev_close(vlan->dev); WARN_ON(!list_empty(&sdata->u.ap.vlans)); } if (going_down) local->open_count--; switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: list_del(&sdata->u.vlan.list); /* no need to tell driver */ break; case NL80211_IFTYPE_MONITOR: if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { local->cooked_mntrs--; break; } local->monitors--; if (local->monitors == 0) { local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR; hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; } ieee80211_adjust_monitor_flags(sdata, -1); ieee80211_configure_filter(local); break; default: flush_work(&sdata->work); /* * When we get here, the interface is marked down. * Call synchronize_rcu() to wait for the RX path * should it be using the interface and enqueuing * frames at this very time on another CPU. */ synchronize_rcu(); skb_queue_purge(&sdata->skb_queue); /* * Disable beaconing here for mesh only, AP and IBSS * are already taken care of. */ if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); /* * Free all remaining keys, there shouldn't be any, * except maybe group keys in AP more or WDS? */ ieee80211_free_keys(sdata); if (going_down) drv_remove_interface(local, &sdata->vif); } sdata->bss = NULL; mutex_lock(&local->mtx); hw_reconf_flags |= __ieee80211_recalc_idle(local); mutex_unlock(&local->mtx); ieee80211_recalc_ps(local, -1); if (local->open_count == 0) { if (local->ops->napi_poll) napi_disable(&local->napi); ieee80211_clear_tx_pending(local); ieee80211_stop_device(local); /* no reconfiguring after stop! */ hw_reconf_flags = 0; } /* Re-calculate channel-type, in case there are multiple vifs * on different channel types. */ orig_ct = local->_oper_channel_type; ieee80211_set_channel_type(local, NULL, NL80211_CHAN_NO_HT); /* do after stop to avoid reconfiguring when we stop anyway */ if (hw_reconf_flags || (orig_ct != local->_oper_channel_type)) ieee80211_hw_config(local, hw_reconf_flags); spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { skb_queue_walk_safe(&local->pending[i], skb, tmp) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (info->control.vif == &sdata->vif) { __skb_unlink(skb, &local->pending[i]); dev_kfree_skb_irq(skb); } } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); }
DoS
0
static void ieee80211_do_stop(struct ieee80211_sub_if_data *sdata, bool going_down) { struct ieee80211_local *local = sdata->local; unsigned long flags; struct sk_buff *skb, *tmp; u32 hw_reconf_flags = 0; int i; enum nl80211_channel_type orig_ct; clear_bit(SDATA_STATE_RUNNING, &sdata->state); if (local->scan_sdata == sdata) ieee80211_scan_cancel(local); /* * Stop TX on this interface first. */ netif_tx_stop_all_queues(sdata->dev); /* * Purge work for this interface. */ ieee80211_work_purge(sdata); /* * Remove all stations associated with this interface. * * This must be done before calling ops->remove_interface() * because otherwise we can later invoke ops->sta_notify() * whenever the STAs are removed, and that invalidates driver * assumptions about always getting a vif pointer that is valid * (because if we remove a STA after ops->remove_interface() * the driver will have removed the vif info already!) * * This is relevant only in AP, WDS and mesh modes, since in * all other modes we've already removed all stations when * disconnecting etc. */ sta_info_flush(local, sdata); /* * Don't count this interface for promisc/allmulti while it * is down. dev_mc_unsync() will invoke set_multicast_list * on the master interface which will sync these down to the * hardware as filter flags. */ if (sdata->flags & IEEE80211_SDATA_ALLMULTI) atomic_dec(&local->iff_allmultis); if (sdata->flags & IEEE80211_SDATA_PROMISC) atomic_dec(&local->iff_promiscs); if (sdata->vif.type == NL80211_IFTYPE_AP) { local->fif_pspoll--; local->fif_probe_req--; } else if (sdata->vif.type == NL80211_IFTYPE_ADHOC) { local->fif_probe_req--; } netif_addr_lock_bh(sdata->dev); spin_lock_bh(&local->filter_lock); __hw_addr_unsync(&local->mc_list, &sdata->dev->mc, sdata->dev->addr_len); spin_unlock_bh(&local->filter_lock); netif_addr_unlock_bh(sdata->dev); ieee80211_configure_filter(local); del_timer_sync(&local->dynamic_ps_timer); cancel_work_sync(&local->dynamic_ps_enable_work); /* APs need special treatment */ if (sdata->vif.type == NL80211_IFTYPE_AP) { struct ieee80211_sub_if_data *vlan, *tmpsdata; struct beacon_data *old_beacon = rtnl_dereference(sdata->u.ap.beacon); /* sdata_running will return false, so this will disable */ ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); /* remove beacon */ rcu_assign_pointer(sdata->u.ap.beacon, NULL); synchronize_rcu(); kfree(old_beacon); /* free all potentially still buffered bcast frames */ while ((skb = skb_dequeue(&sdata->u.ap.ps_bc_buf))) { local->total_ps_buffered--; dev_kfree_skb(skb); } /* down all dependent devices, that is VLANs */ list_for_each_entry_safe(vlan, tmpsdata, &sdata->u.ap.vlans, u.vlan.list) dev_close(vlan->dev); WARN_ON(!list_empty(&sdata->u.ap.vlans)); } if (going_down) local->open_count--; switch (sdata->vif.type) { case NL80211_IFTYPE_AP_VLAN: list_del(&sdata->u.vlan.list); /* no need to tell driver */ break; case NL80211_IFTYPE_MONITOR: if (sdata->u.mntr_flags & MONITOR_FLAG_COOK_FRAMES) { local->cooked_mntrs--; break; } local->monitors--; if (local->monitors == 0) { local->hw.conf.flags &= ~IEEE80211_CONF_MONITOR; hw_reconf_flags |= IEEE80211_CONF_CHANGE_MONITOR; } ieee80211_adjust_monitor_flags(sdata, -1); ieee80211_configure_filter(local); break; default: flush_work(&sdata->work); /* * When we get here, the interface is marked down. * Call synchronize_rcu() to wait for the RX path * should it be using the interface and enqueuing * frames at this very time on another CPU. */ synchronize_rcu(); skb_queue_purge(&sdata->skb_queue); /* * Disable beaconing here for mesh only, AP and IBSS * are already taken care of. */ if (sdata->vif.type == NL80211_IFTYPE_MESH_POINT) ieee80211_bss_info_change_notify(sdata, BSS_CHANGED_BEACON_ENABLED); /* * Free all remaining keys, there shouldn't be any, * except maybe group keys in AP more or WDS? */ ieee80211_free_keys(sdata); if (going_down) drv_remove_interface(local, &sdata->vif); } sdata->bss = NULL; mutex_lock(&local->mtx); hw_reconf_flags |= __ieee80211_recalc_idle(local); mutex_unlock(&local->mtx); ieee80211_recalc_ps(local, -1); if (local->open_count == 0) { if (local->ops->napi_poll) napi_disable(&local->napi); ieee80211_clear_tx_pending(local); ieee80211_stop_device(local); /* no reconfiguring after stop! */ hw_reconf_flags = 0; } /* Re-calculate channel-type, in case there are multiple vifs * on different channel types. */ orig_ct = local->_oper_channel_type; ieee80211_set_channel_type(local, NULL, NL80211_CHAN_NO_HT); /* do after stop to avoid reconfiguring when we stop anyway */ if (hw_reconf_flags || (orig_ct != local->_oper_channel_type)) ieee80211_hw_config(local, hw_reconf_flags); spin_lock_irqsave(&local->queue_stop_reason_lock, flags); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { skb_queue_walk_safe(&local->pending[i], skb, tmp) { struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); if (info->control.vif == &sdata->vif) { __skb_unlink(skb, &local->pending[i]); dev_kfree_skb_irq(skb); } } } spin_unlock_irqrestore(&local->queue_stop_reason_lock, flags); }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,435
static u32 ieee80211_idle_off(struct ieee80211_local *local, const char *reason) { if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) return 0; #ifdef CONFIG_MAC80211_VERBOSE_DEBUG wiphy_debug(local->hw.wiphy, "device no longer idle - %s\n", reason); #endif local->hw.conf.flags &= ~IEEE80211_CONF_IDLE; return IEEE80211_CONF_CHANGE_IDLE; }
DoS
0
static u32 ieee80211_idle_off(struct ieee80211_local *local, const char *reason) { if (!(local->hw.conf.flags & IEEE80211_CONF_IDLE)) return 0; #ifdef CONFIG_MAC80211_VERBOSE_DEBUG wiphy_debug(local->hw.wiphy, "device no longer idle - %s\n", reason); #endif local->hw.conf.flags &= ~IEEE80211_CONF_IDLE; return IEEE80211_CONF_CHANGE_IDLE; }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,436
int ieee80211_if_add(struct ieee80211_local *local, const char *name, struct net_device **new_dev, enum nl80211_iftype type, struct vif_params *params) { struct net_device *ndev; struct ieee80211_sub_if_data *sdata = NULL; int ret, i; ASSERT_RTNL(); ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size, name, ieee80211_if_setup, local->hw.queues, 1); if (!ndev) return -ENOMEM; dev_net_set(ndev, wiphy_net(local->hw.wiphy)); ndev->needed_headroom = local->tx_headroom + 4*6 /* four MAC addresses */ + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */ + 6 /* mesh */ + 8 /* rfc1042/bridge tunnel */ - ETH_HLEN /* ethernet hard_header_len */ + IEEE80211_ENCRYPT_HEADROOM; ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM; ret = dev_alloc_name(ndev, ndev->name); if (ret < 0) goto fail; ieee80211_assign_perm_addr(local, ndev, type); memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN); SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ sdata = netdev_priv(ndev); ndev->ieee80211_ptr = &sdata->wdev; memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN); memcpy(sdata->name, ndev->name, IFNAMSIZ); /* initialise type-independent data */ sdata->wdev.wiphy = local->hw.wiphy; sdata->local = local; sdata->dev = ndev; #ifdef CONFIG_INET sdata->arp_filter_state = true; #endif for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) skb_queue_head_init(&sdata->fragments[i].skb_list); INIT_LIST_HEAD(&sdata->key_list); for (i = 0; i < IEEE80211_NUM_BANDS; i++) { struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[i]; sdata->rc_rateidx_mask[i] = sband ? (1 << sband->n_bitrates) - 1 : 0; } /* setup type-dependent data */ ieee80211_setup_sdata(sdata, type); if (params) { ndev->ieee80211_ptr->use_4addr = params->use_4addr; if (type == NL80211_IFTYPE_STATION) sdata->u.mgd.use_4addr = params->use_4addr; } ret = register_netdevice(ndev); if (ret) goto fail; mutex_lock(&local->iflist_mtx); list_add_tail_rcu(&sdata->list, &local->interfaces); mutex_unlock(&local->iflist_mtx); if (new_dev) *new_dev = ndev; return 0; fail: free_netdev(ndev); return ret; }
DoS
0
int ieee80211_if_add(struct ieee80211_local *local, const char *name, struct net_device **new_dev, enum nl80211_iftype type, struct vif_params *params) { struct net_device *ndev; struct ieee80211_sub_if_data *sdata = NULL; int ret, i; ASSERT_RTNL(); ndev = alloc_netdev_mqs(sizeof(*sdata) + local->hw.vif_data_size, name, ieee80211_if_setup, local->hw.queues, 1); if (!ndev) return -ENOMEM; dev_net_set(ndev, wiphy_net(local->hw.wiphy)); ndev->needed_headroom = local->tx_headroom + 4*6 /* four MAC addresses */ + 2 + 2 + 2 + 2 /* ctl, dur, seq, qos */ + 6 /* mesh */ + 8 /* rfc1042/bridge tunnel */ - ETH_HLEN /* ethernet hard_header_len */ + IEEE80211_ENCRYPT_HEADROOM; ndev->needed_tailroom = IEEE80211_ENCRYPT_TAILROOM; ret = dev_alloc_name(ndev, ndev->name); if (ret < 0) goto fail; ieee80211_assign_perm_addr(local, ndev, type); memcpy(ndev->dev_addr, ndev->perm_addr, ETH_ALEN); SET_NETDEV_DEV(ndev, wiphy_dev(local->hw.wiphy)); /* don't use IEEE80211_DEV_TO_SUB_IF because it checks too much */ sdata = netdev_priv(ndev); ndev->ieee80211_ptr = &sdata->wdev; memcpy(sdata->vif.addr, ndev->dev_addr, ETH_ALEN); memcpy(sdata->name, ndev->name, IFNAMSIZ); /* initialise type-independent data */ sdata->wdev.wiphy = local->hw.wiphy; sdata->local = local; sdata->dev = ndev; #ifdef CONFIG_INET sdata->arp_filter_state = true; #endif for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) skb_queue_head_init(&sdata->fragments[i].skb_list); INIT_LIST_HEAD(&sdata->key_list); for (i = 0; i < IEEE80211_NUM_BANDS; i++) { struct ieee80211_supported_band *sband; sband = local->hw.wiphy->bands[i]; sdata->rc_rateidx_mask[i] = sband ? (1 << sband->n_bitrates) - 1 : 0; } /* setup type-dependent data */ ieee80211_setup_sdata(sdata, type); if (params) { ndev->ieee80211_ptr->use_4addr = params->use_4addr; if (type == NL80211_IFTYPE_STATION) sdata->u.mgd.use_4addr = params->use_4addr; } ret = register_netdevice(ndev); if (ret) goto fail; mutex_lock(&local->iflist_mtx); list_add_tail_rcu(&sdata->list, &local->interfaces); mutex_unlock(&local->iflist_mtx); if (new_dev) *new_dev = ndev; return 0; fail: free_netdev(ndev); return ret; }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,437
int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { int ret; ASSERT_RTNL(); if (type == ieee80211_vif_type_p2p(&sdata->vif)) return 0; /* Setting ad-hoc mode on non-IBSS channel is not supported. */ if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS && type == NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; if (ieee80211_sdata_running(sdata)) { ret = ieee80211_runtime_change_iftype(sdata, type); if (ret) return ret; } else { /* Purge and reset type-dependent state. */ ieee80211_teardown_sdata(sdata->dev); ieee80211_setup_sdata(sdata, type); } /* reset some values that shouldn't be kept across type changes */ sdata->vif.bss_conf.basic_rates = ieee80211_mandatory_rates(sdata->local, sdata->local->hw.conf.channel->band); sdata->drop_unencrypted = 0; if (type == NL80211_IFTYPE_STATION) sdata->u.mgd.use_4addr = false; return 0; }
DoS
0
int ieee80211_if_change_type(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { int ret; ASSERT_RTNL(); if (type == ieee80211_vif_type_p2p(&sdata->vif)) return 0; /* Setting ad-hoc mode on non-IBSS channel is not supported. */ if (sdata->local->oper_channel->flags & IEEE80211_CHAN_NO_IBSS && type == NL80211_IFTYPE_ADHOC) return -EOPNOTSUPP; if (ieee80211_sdata_running(sdata)) { ret = ieee80211_runtime_change_iftype(sdata, type); if (ret) return ret; } else { /* Purge and reset type-dependent state. */ ieee80211_teardown_sdata(sdata->dev); ieee80211_setup_sdata(sdata, type); } /* reset some values that shouldn't be kept across type changes */ sdata->vif.bss_conf.basic_rates = ieee80211_mandatory_rates(sdata->local, sdata->local->hw.conf.channel->band); sdata->drop_unencrypted = 0; if (type == NL80211_IFTYPE_STATION) sdata->u.mgd.use_4addr = false; return 0; }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,438
void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) { ASSERT_RTNL(); mutex_lock(&sdata->local->iflist_mtx); list_del_rcu(&sdata->list); mutex_unlock(&sdata->local->iflist_mtx); synchronize_rcu(); unregister_netdevice(sdata->dev); }
DoS
0
void ieee80211_if_remove(struct ieee80211_sub_if_data *sdata) { ASSERT_RTNL(); mutex_lock(&sdata->local->iflist_mtx); list_del_rcu(&sdata->list); mutex_unlock(&sdata->local->iflist_mtx); synchronize_rcu(); unregister_netdevice(sdata->dev); }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,439
void ieee80211_iface_exit(void) { unregister_netdevice_notifier(&mac80211_netdev_notifier); }
DoS
0
void ieee80211_iface_exit(void) { unregister_netdevice_notifier(&mac80211_netdev_notifier); }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,440
int ieee80211_iface_init(void) { return register_netdevice_notifier(&mac80211_netdev_notifier); }
DoS
0
int ieee80211_iface_init(void) { return register_netdevice_notifier(&mac80211_netdev_notifier); }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,441
static void ieee80211_iface_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, work); struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct sta_info *sta; struct ieee80211_ra_tid *ra_tid; if (!ieee80211_sdata_running(sdata)) return; if (local->scanning) return; /* * ieee80211_queue_work() should have picked up most cases, * here we'll pick the rest. */ if (WARN(local->suspended, "interface work scheduled while going to suspend\n")) return; /* first process frames */ while ((skb = skb_dequeue(&sdata->skb_queue))) { struct ieee80211_mgmt *mgmt = (void *)skb->data; if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) { ra_tid = (void *)&skb->cb; ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra, ra_tid->tid); } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) { ra_tid = (void *)&skb->cb; ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra, ra_tid->tid); } else if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_BACK) { int len = skb->len; mutex_lock(&local->sta_mtx); sta = sta_info_get_bss(sdata, mgmt->sa); if (sta) { switch (mgmt->u.action.u.addba_req.action_code) { case WLAN_ACTION_ADDBA_REQ: ieee80211_process_addba_request( local, sta, mgmt, len); break; case WLAN_ACTION_ADDBA_RESP: ieee80211_process_addba_resp(local, sta, mgmt, len); break; case WLAN_ACTION_DELBA: ieee80211_process_delba(sdata, sta, mgmt, len); break; default: WARN_ON(1); break; } } mutex_unlock(&local->sta_mtx); } else if (ieee80211_is_data_qos(mgmt->frame_control)) { struct ieee80211_hdr *hdr = (void *)mgmt; /* * So the frame isn't mgmt, but frame_control * is at the right place anyway, of course, so * the if statement is correct. * * Warn if we have other data frame types here, * they must not get here. */ WARN_ON(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)); WARN_ON(!(hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG))); /* * This was a fragment of a frame, received while * a block-ack session was active. That cannot be * right, so terminate the session. */ mutex_lock(&local->sta_mtx); sta = sta_info_get_bss(sdata, mgmt->sa); if (sta) { u16 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; __ieee80211_stop_rx_ba_session( sta, tid, WLAN_BACK_RECIPIENT, WLAN_REASON_QSTA_REQUIRE_SETUP, true); } mutex_unlock(&local->sta_mtx); } else switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: ieee80211_sta_rx_queued_mgmt(sdata, skb); break; case NL80211_IFTYPE_ADHOC: ieee80211_ibss_rx_queued_mgmt(sdata, skb); break; case NL80211_IFTYPE_MESH_POINT: if (!ieee80211_vif_is_mesh(&sdata->vif)) break; ieee80211_mesh_rx_queued_mgmt(sdata, skb); break; default: WARN(1, "frame for unexpected interface type"); break; } kfree_skb(skb); } /* then other type-dependent work */ switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: ieee80211_sta_work(sdata); break; case NL80211_IFTYPE_ADHOC: ieee80211_ibss_work(sdata); break; case NL80211_IFTYPE_MESH_POINT: if (!ieee80211_vif_is_mesh(&sdata->vif)) break; ieee80211_mesh_work(sdata); break; default: break; } }
DoS
0
static void ieee80211_iface_work(struct work_struct *work) { struct ieee80211_sub_if_data *sdata = container_of(work, struct ieee80211_sub_if_data, work); struct ieee80211_local *local = sdata->local; struct sk_buff *skb; struct sta_info *sta; struct ieee80211_ra_tid *ra_tid; if (!ieee80211_sdata_running(sdata)) return; if (local->scanning) return; /* * ieee80211_queue_work() should have picked up most cases, * here we'll pick the rest. */ if (WARN(local->suspended, "interface work scheduled while going to suspend\n")) return; /* first process frames */ while ((skb = skb_dequeue(&sdata->skb_queue))) { struct ieee80211_mgmt *mgmt = (void *)skb->data; if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_START) { ra_tid = (void *)&skb->cb; ieee80211_start_tx_ba_cb(&sdata->vif, ra_tid->ra, ra_tid->tid); } else if (skb->pkt_type == IEEE80211_SDATA_QUEUE_AGG_STOP) { ra_tid = (void *)&skb->cb; ieee80211_stop_tx_ba_cb(&sdata->vif, ra_tid->ra, ra_tid->tid); } else if (ieee80211_is_action(mgmt->frame_control) && mgmt->u.action.category == WLAN_CATEGORY_BACK) { int len = skb->len; mutex_lock(&local->sta_mtx); sta = sta_info_get_bss(sdata, mgmt->sa); if (sta) { switch (mgmt->u.action.u.addba_req.action_code) { case WLAN_ACTION_ADDBA_REQ: ieee80211_process_addba_request( local, sta, mgmt, len); break; case WLAN_ACTION_ADDBA_RESP: ieee80211_process_addba_resp(local, sta, mgmt, len); break; case WLAN_ACTION_DELBA: ieee80211_process_delba(sdata, sta, mgmt, len); break; default: WARN_ON(1); break; } } mutex_unlock(&local->sta_mtx); } else if (ieee80211_is_data_qos(mgmt->frame_control)) { struct ieee80211_hdr *hdr = (void *)mgmt; /* * So the frame isn't mgmt, but frame_control * is at the right place anyway, of course, so * the if statement is correct. * * Warn if we have other data frame types here, * they must not get here. */ WARN_ON(hdr->frame_control & cpu_to_le16(IEEE80211_STYPE_NULLFUNC)); WARN_ON(!(hdr->seq_ctrl & cpu_to_le16(IEEE80211_SCTL_FRAG))); /* * This was a fragment of a frame, received while * a block-ack session was active. That cannot be * right, so terminate the session. */ mutex_lock(&local->sta_mtx); sta = sta_info_get_bss(sdata, mgmt->sa); if (sta) { u16 tid = *ieee80211_get_qos_ctl(hdr) & IEEE80211_QOS_CTL_TID_MASK; __ieee80211_stop_rx_ba_session( sta, tid, WLAN_BACK_RECIPIENT, WLAN_REASON_QSTA_REQUIRE_SETUP, true); } mutex_unlock(&local->sta_mtx); } else switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: ieee80211_sta_rx_queued_mgmt(sdata, skb); break; case NL80211_IFTYPE_ADHOC: ieee80211_ibss_rx_queued_mgmt(sdata, skb); break; case NL80211_IFTYPE_MESH_POINT: if (!ieee80211_vif_is_mesh(&sdata->vif)) break; ieee80211_mesh_rx_queued_mgmt(sdata, skb); break; default: WARN(1, "frame for unexpected interface type"); break; } kfree_skb(skb); } /* then other type-dependent work */ switch (sdata->vif.type) { case NL80211_IFTYPE_STATION: ieee80211_sta_work(sdata); break; case NL80211_IFTYPE_ADHOC: ieee80211_ibss_work(sdata); break; case NL80211_IFTYPE_MESH_POINT: if (!ieee80211_vif_is_mesh(&sdata->vif)) break; ieee80211_mesh_work(sdata); break; default: break; } }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,442
static u16 ieee80211_monitor_select_queue(struct net_device *dev, struct sk_buff *skb) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct ieee80211_hdr *hdr; struct ieee80211_radiotap_header *rtap = (void *)skb->data; u8 *p; if (local->hw.queues < 4) return 0; if (skb->len < 4 || skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */) return 0; /* doesn't matter, frame will be dropped */ hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len)); if (!ieee80211_is_data(hdr->frame_control)) { skb->priority = 7; return ieee802_1d_to_ac[skb->priority]; } if (!ieee80211_is_data_qos(hdr->frame_control)) { skb->priority = 0; return ieee802_1d_to_ac[skb->priority]; } p = ieee80211_get_qos_ctl(hdr); skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; return ieee80211_downgrade_queue(local, skb); }
DoS
0
static u16 ieee80211_monitor_select_queue(struct net_device *dev, struct sk_buff *skb) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; struct ieee80211_hdr *hdr; struct ieee80211_radiotap_header *rtap = (void *)skb->data; u8 *p; if (local->hw.queues < 4) return 0; if (skb->len < 4 || skb->len < le16_to_cpu(rtap->it_len) + 2 /* frame control */) return 0; /* doesn't matter, frame will be dropped */ hdr = (void *)((u8 *)skb->data + le16_to_cpu(rtap->it_len)); if (!ieee80211_is_data(hdr->frame_control)) { skb->priority = 7; return ieee802_1d_to_ac[skb->priority]; } if (!ieee80211_is_data_qos(hdr->frame_control)) { skb->priority = 0; return ieee802_1d_to_ac[skb->priority]; } p = ieee80211_get_qos_ctl(hdr); skb->priority = *p & IEEE80211_QOS_CTL_TAG1D_MASK; return ieee80211_downgrade_queue(local, skb); }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,443
static u16 ieee80211_netdev_select_queue(struct net_device *dev, struct sk_buff *skb) { return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); }
DoS
0
static u16 ieee80211_netdev_select_queue(struct net_device *dev, struct sk_buff *skb) { return ieee80211_select_queue(IEEE80211_DEV_TO_SUB_IF(dev), skb); }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,444
void ieee80211_recalc_idle(struct ieee80211_local *local) { u32 chg; mutex_lock(&local->iflist_mtx); chg = __ieee80211_recalc_idle(local); mutex_unlock(&local->iflist_mtx); if (chg) ieee80211_hw_config(local, chg); }
DoS
0
void ieee80211_recalc_idle(struct ieee80211_local *local) { u32 chg; mutex_lock(&local->iflist_mtx); chg = __ieee80211_recalc_idle(local); mutex_unlock(&local->iflist_mtx); if (chg) ieee80211_hw_config(local, chg); }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,445
void ieee80211_remove_interfaces(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata, *tmp; LIST_HEAD(unreg_list); ASSERT_RTNL(); mutex_lock(&local->iflist_mtx); list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { list_del(&sdata->list); unregister_netdevice_queue(sdata->dev, &unreg_list); } mutex_unlock(&local->iflist_mtx); unregister_netdevice_many(&unreg_list); list_del(&unreg_list); }
DoS
0
void ieee80211_remove_interfaces(struct ieee80211_local *local) { struct ieee80211_sub_if_data *sdata, *tmp; LIST_HEAD(unreg_list); ASSERT_RTNL(); mutex_lock(&local->iflist_mtx); list_for_each_entry_safe(sdata, tmp, &local->interfaces, list) { list_del(&sdata->list); unregister_netdevice_queue(sdata->dev, &unreg_list); } mutex_unlock(&local->iflist_mtx); unregister_netdevice_many(&unreg_list); list_del(&unreg_list); }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,446
static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { struct ieee80211_local *local = sdata->local; int ret, err; enum nl80211_iftype internal_type = type; bool p2p = false; ASSERT_RTNL(); if (!local->ops->change_interface) return -EBUSY; switch (sdata->vif.type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: /* * Could maybe also all others here? * Just not sure how that interacts * with the RX/config path e.g. for * mesh. */ break; default: return -EBUSY; } switch (type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: /* * Could probably support everything * but WDS here (WDS do_open can fail * under memory pressure, which this * code isn't prepared to handle). */ break; case NL80211_IFTYPE_P2P_CLIENT: p2p = true; internal_type = NL80211_IFTYPE_STATION; break; case NL80211_IFTYPE_P2P_GO: p2p = true; internal_type = NL80211_IFTYPE_AP; break; default: return -EBUSY; } ret = ieee80211_check_concurrent_iface(sdata, internal_type); if (ret) return ret; ieee80211_do_stop(sdata, false); ieee80211_teardown_sdata(sdata->dev); ret = drv_change_interface(local, sdata, internal_type, p2p); if (ret) type = sdata->vif.type; ieee80211_setup_sdata(sdata, type); err = ieee80211_do_open(sdata->dev, false); WARN(err, "type change: do_open returned %d", err); return ret; }
DoS
0
static int ieee80211_runtime_change_iftype(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { struct ieee80211_local *local = sdata->local; int ret, err; enum nl80211_iftype internal_type = type; bool p2p = false; ASSERT_RTNL(); if (!local->ops->change_interface) return -EBUSY; switch (sdata->vif.type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: /* * Could maybe also all others here? * Just not sure how that interacts * with the RX/config path e.g. for * mesh. */ break; default: return -EBUSY; } switch (type) { case NL80211_IFTYPE_AP: case NL80211_IFTYPE_STATION: case NL80211_IFTYPE_ADHOC: /* * Could probably support everything * but WDS here (WDS do_open can fail * under memory pressure, which this * code isn't prepared to handle). */ break; case NL80211_IFTYPE_P2P_CLIENT: p2p = true; internal_type = NL80211_IFTYPE_STATION; break; case NL80211_IFTYPE_P2P_GO: p2p = true; internal_type = NL80211_IFTYPE_AP; break; default: return -EBUSY; } ret = ieee80211_check_concurrent_iface(sdata, internal_type); if (ret) return ret; ieee80211_do_stop(sdata, false); ieee80211_teardown_sdata(sdata->dev); ret = drv_change_interface(local, sdata, internal_type, p2p); if (ret) type = sdata->vif.type; ieee80211_setup_sdata(sdata, type); err = ieee80211_do_open(sdata->dev, false); WARN(err, "type change: do_open returned %d", err); return ret; }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,447
static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { /* clear type-dependent union */ memset(&sdata->u, 0, sizeof(sdata->u)); /* and set some type-dependent values */ sdata->vif.type = type; sdata->vif.p2p = false; sdata->dev->netdev_ops = &ieee80211_dataif_ops; sdata->wdev.iftype = type; sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE); sdata->control_port_no_encrypt = false; /* only monitor differs */ sdata->dev->type = ARPHRD_ETHER; skb_queue_head_init(&sdata->skb_queue); INIT_WORK(&sdata->work, ieee80211_iface_work); switch (type) { case NL80211_IFTYPE_P2P_GO: type = NL80211_IFTYPE_AP; sdata->vif.type = type; sdata->vif.p2p = true; /* fall through */ case NL80211_IFTYPE_AP: skb_queue_head_init(&sdata->u.ap.ps_bc_buf); INIT_LIST_HEAD(&sdata->u.ap.vlans); break; case NL80211_IFTYPE_P2P_CLIENT: type = NL80211_IFTYPE_STATION; sdata->vif.type = type; sdata->vif.p2p = true; /* fall through */ case NL80211_IFTYPE_STATION: ieee80211_sta_setup_sdata(sdata); break; case NL80211_IFTYPE_ADHOC: ieee80211_ibss_setup_sdata(sdata); break; case NL80211_IFTYPE_MESH_POINT: if (ieee80211_vif_is_mesh(&sdata->vif)) ieee80211_mesh_init_sdata(sdata); break; case NL80211_IFTYPE_MONITOR: sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP; sdata->dev->netdev_ops = &ieee80211_monitorif_ops; sdata->u.mntr_flags = MONITOR_FLAG_CONTROL | MONITOR_FLAG_OTHER_BSS; break; case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_AP_VLAN: break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: BUG(); break; } ieee80211_debugfs_add_netdev(sdata); }
DoS
0
static void ieee80211_setup_sdata(struct ieee80211_sub_if_data *sdata, enum nl80211_iftype type) { /* clear type-dependent union */ memset(&sdata->u, 0, sizeof(sdata->u)); /* and set some type-dependent values */ sdata->vif.type = type; sdata->vif.p2p = false; sdata->dev->netdev_ops = &ieee80211_dataif_ops; sdata->wdev.iftype = type; sdata->control_port_protocol = cpu_to_be16(ETH_P_PAE); sdata->control_port_no_encrypt = false; /* only monitor differs */ sdata->dev->type = ARPHRD_ETHER; skb_queue_head_init(&sdata->skb_queue); INIT_WORK(&sdata->work, ieee80211_iface_work); switch (type) { case NL80211_IFTYPE_P2P_GO: type = NL80211_IFTYPE_AP; sdata->vif.type = type; sdata->vif.p2p = true; /* fall through */ case NL80211_IFTYPE_AP: skb_queue_head_init(&sdata->u.ap.ps_bc_buf); INIT_LIST_HEAD(&sdata->u.ap.vlans); break; case NL80211_IFTYPE_P2P_CLIENT: type = NL80211_IFTYPE_STATION; sdata->vif.type = type; sdata->vif.p2p = true; /* fall through */ case NL80211_IFTYPE_STATION: ieee80211_sta_setup_sdata(sdata); break; case NL80211_IFTYPE_ADHOC: ieee80211_ibss_setup_sdata(sdata); break; case NL80211_IFTYPE_MESH_POINT: if (ieee80211_vif_is_mesh(&sdata->vif)) ieee80211_mesh_init_sdata(sdata); break; case NL80211_IFTYPE_MONITOR: sdata->dev->type = ARPHRD_IEEE80211_RADIOTAP; sdata->dev->netdev_ops = &ieee80211_monitorif_ops; sdata->u.mntr_flags = MONITOR_FLAG_CONTROL | MONITOR_FLAG_OTHER_BSS; break; case NL80211_IFTYPE_WDS: case NL80211_IFTYPE_AP_VLAN: break; case NL80211_IFTYPE_UNSPECIFIED: case NUM_NL80211_IFTYPES: BUG(); break; } ieee80211_debugfs_add_netdev(sdata); }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,448
static int ieee80211_stop(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); ieee80211_do_stop(sdata, true); return 0; }
DoS
0
static int ieee80211_stop(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); ieee80211_do_stop(sdata, true); return 0; }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,449
static void ieee80211_teardown_sdata(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; int flushed; int i; /* free extra data */ ieee80211_free_keys(sdata); ieee80211_debugfs_remove_netdev(sdata); for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) __skb_queue_purge(&sdata->fragments[i].skb_list); sdata->fragment_next = 0; if (ieee80211_vif_is_mesh(&sdata->vif)) mesh_rmc_free(sdata); flushed = sta_info_flush(local, sdata); WARN_ON(flushed); }
DoS
0
static void ieee80211_teardown_sdata(struct net_device *dev) { struct ieee80211_sub_if_data *sdata = IEEE80211_DEV_TO_SUB_IF(dev); struct ieee80211_local *local = sdata->local; int flushed; int i; /* free extra data */ ieee80211_free_keys(sdata); ieee80211_debugfs_remove_netdev(sdata); for (i = 0; i < IEEE80211_FRAGMENT_MAX; i++) __skb_queue_purge(&sdata->fragments[i].skb_list); sdata->fragment_next = 0; if (ieee80211_vif_is_mesh(&sdata->vif)) mesh_rmc_free(sdata); flushed = sta_info_flush(local, sdata); WARN_ON(flushed); }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,450
static int netdev_notify(struct notifier_block *nb, unsigned long state, void *ndev) { struct net_device *dev = ndev; struct ieee80211_sub_if_data *sdata; if (state != NETDEV_CHANGENAME) return 0; if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy) return 0; if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid) return 0; sdata = IEEE80211_DEV_TO_SUB_IF(dev); memcpy(sdata->name, dev->name, IFNAMSIZ); ieee80211_debugfs_rename_netdev(sdata); return 0; }
DoS
0
static int netdev_notify(struct notifier_block *nb, unsigned long state, void *ndev) { struct net_device *dev = ndev; struct ieee80211_sub_if_data *sdata; if (state != NETDEV_CHANGENAME) return 0; if (!dev->ieee80211_ptr || !dev->ieee80211_ptr->wiphy) return 0; if (dev->ieee80211_ptr->wiphy->privid != mac80211_wiphy_privid) return 0; sdata = IEEE80211_DEV_TO_SUB_IF(dev); memcpy(sdata->name, dev->name, IFNAMSIZ); ieee80211_debugfs_rename_netdev(sdata); return 0; }
@@ -698,6 +698,7 @@ static const struct net_device_ops ieee80211_monitorif_ops = { static void ieee80211_if_setup(struct net_device *dev) { ether_setup(dev); + dev->priv_flags &= ~IFF_TX_SKB_SHARING; dev->netdev_ops = &ieee80211_dataif_ops; dev->destructor = free_netdev; }
CWE-264
null
null
19,451
static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, int order, const nodemask_t *nodemask) { if (likely(!sysctl_panic_on_oom)) return; if (sysctl_panic_on_oom != 2) { /* * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel * does not panic for cpuset, mempolicy, or memcg allocation * failures. */ if (constraint != CONSTRAINT_NONE) return; } read_lock(&tasklist_lock); dump_header(NULL, gfp_mask, order, NULL, nodemask); read_unlock(&tasklist_lock); panic("Out of memory: %s panic_on_oom is enabled\n", sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); }
DoS Overflow
0
static void check_panic_on_oom(enum oom_constraint constraint, gfp_t gfp_mask, int order, const nodemask_t *nodemask) { if (likely(!sysctl_panic_on_oom)) return; if (sysctl_panic_on_oom != 2) { /* * panic_on_oom == 1 only affects CONSTRAINT_NONE, the kernel * does not panic for cpuset, mempolicy, or memcg allocation * failures. */ if (constraint != CONSTRAINT_NONE) return; } read_lock(&tasklist_lock); dump_header(NULL, gfp_mask, order, NULL, nodemask); read_unlock(&tasklist_lock); panic("Out of memory: %s panic_on_oom is enabled\n", sysctl_panic_on_oom == 2 ? "compulsory" : "system-wide"); }
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0;
CWE-189
null
null
19,452
static void clear_system_oom(void) { struct zone *zone; spin_lock(&zone_scan_lock); for_each_populated_zone(zone) zone_clear_flag(zone, ZONE_OOM_LOCKED); spin_unlock(&zone_scan_lock); }
DoS Overflow
0
static void clear_system_oom(void) { struct zone *zone; spin_lock(&zone_scan_lock); for_each_populated_zone(zone) zone_clear_flag(zone, ZONE_OOM_LOCKED); spin_unlock(&zone_scan_lock); }
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0;
CWE-189
null
null
19,453
void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) { struct zoneref *z; struct zone *zone; spin_lock(&zone_scan_lock); for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { zone_clear_flag(zone, ZONE_OOM_LOCKED); } spin_unlock(&zone_scan_lock); }
DoS Overflow
0
void clear_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) { struct zoneref *z; struct zone *zone; spin_lock(&zone_scan_lock); for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { zone_clear_flag(zone, ZONE_OOM_LOCKED); } spin_unlock(&zone_scan_lock); }
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0;
CWE-189
null
null
19,454
static enum oom_constraint constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask, nodemask_t *nodemask, unsigned long *totalpages) { *totalpages = totalram_pages + total_swap_pages; return CONSTRAINT_NONE; }
DoS Overflow
0
static enum oom_constraint constrained_alloc(struct zonelist *zonelist, gfp_t gfp_mask, nodemask_t *nodemask, unsigned long *totalpages) { *totalpages = totalram_pages + total_swap_pages; return CONSTRAINT_NONE; }
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0;
CWE-189
null
null
19,455
static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, struct mem_cgroup *mem, const nodemask_t *nodemask) { task_lock(current); pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " "oom_adj=%d, oom_score_adj=%d\n", current->comm, gfp_mask, order, current->signal->oom_adj, current->signal->oom_score_adj); cpuset_print_task_mems_allowed(current); task_unlock(current); dump_stack(); mem_cgroup_print_oom_info(mem, p); show_mem(SHOW_MEM_FILTER_NODES); if (sysctl_oom_dump_tasks) dump_tasks(mem, nodemask); }
DoS Overflow
0
static void dump_header(struct task_struct *p, gfp_t gfp_mask, int order, struct mem_cgroup *mem, const nodemask_t *nodemask) { task_lock(current); pr_warning("%s invoked oom-killer: gfp_mask=0x%x, order=%d, " "oom_adj=%d, oom_score_adj=%d\n", current->comm, gfp_mask, order, current->signal->oom_adj, current->signal->oom_score_adj); cpuset_print_task_mems_allowed(current); task_unlock(current); dump_stack(); mem_cgroup_print_oom_info(mem, p); show_mem(SHOW_MEM_FILTER_NODES); if (sysctl_oom_dump_tasks) dump_tasks(mem, nodemask); }
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0;
CWE-189
null
null
19,456
static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask) { struct task_struct *p; struct task_struct *task; pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n"); for_each_process(p) { if (oom_unkillable_task(p, mem, nodemask)) continue; task = find_lock_task_mm(p); if (!task) { /* * This is a kthread or all of p's threads have already * detached their mm's. There's no need to report * them; they can't be oom killed anyway. */ continue; } pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n", task->pid, task_uid(task), task->tgid, task->mm->total_vm, get_mm_rss(task->mm), task_cpu(task), task->signal->oom_adj, task->signal->oom_score_adj, task->comm); task_unlock(task); } }
DoS Overflow
0
static void dump_tasks(const struct mem_cgroup *mem, const nodemask_t *nodemask) { struct task_struct *p; struct task_struct *task; pr_info("[ pid ] uid tgid total_vm rss cpu oom_adj oom_score_adj name\n"); for_each_process(p) { if (oom_unkillable_task(p, mem, nodemask)) continue; task = find_lock_task_mm(p); if (!task) { /* * This is a kthread or all of p's threads have already * detached their mm's. There's no need to report * them; they can't be oom killed anyway. */ continue; } pr_info("[%5d] %5d %5d %8lu %8lu %3u %3d %5d %s\n", task->pid, task_uid(task), task->tgid, task->mm->total_vm, get_mm_rss(task->mm), task_cpu(task), task->signal->oom_adj, task->signal->oom_score_adj, task->comm); task_unlock(task); } }
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0;
CWE-189
null
null
19,457
struct task_struct *find_lock_task_mm(struct task_struct *p) { struct task_struct *t = p; do { task_lock(t); if (likely(t->mm)) return t; task_unlock(t); } while_each_thread(p, t); return NULL; }
DoS Overflow
0
struct task_struct *find_lock_task_mm(struct task_struct *p) { struct task_struct *t = p; do { task_lock(t); if (likely(t->mm)) return t; task_unlock(t); } while_each_thread(p, t); return NULL; }
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0;
CWE-189
null
null
19,458
static bool has_intersects_mems_allowed(struct task_struct *tsk, const nodemask_t *mask) { struct task_struct *start = tsk; do { if (mask) { /* * If this is a mempolicy constrained oom, tsk's * cpuset is irrelevant. Only return true if its * mempolicy intersects current, otherwise it may be * needlessly killed. */ if (mempolicy_nodemask_intersects(tsk, mask)) return true; } else { /* * This is not a mempolicy constrained oom, so only * check the mems of tsk's cpuset. */ if (cpuset_mems_allowed_intersects(current, tsk)) return true; } } while_each_thread(start, tsk); return false; }
DoS Overflow
0
static bool has_intersects_mems_allowed(struct task_struct *tsk, const nodemask_t *mask) { struct task_struct *start = tsk; do { if (mask) { /* * If this is a mempolicy constrained oom, tsk's * cpuset is irrelevant. Only return true if its * mempolicy intersects current, otherwise it may be * needlessly killed. */ if (mempolicy_nodemask_intersects(tsk, mask)) return true; } else { /* * This is not a mempolicy constrained oom, so only * check the mems of tsk's cpuset. */ if (cpuset_mems_allowed_intersects(current, tsk)) return true; } } while_each_thread(start, tsk); return false; }
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0;
CWE-189
null
null
19,459
static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) { struct task_struct *q; struct mm_struct *mm; p = find_lock_task_mm(p); if (!p) return 1; /* mm cannot be safely dereferenced after task_unlock(p) */ mm = p->mm; pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", task_pid_nr(p), p->comm, K(p->mm->total_vm), K(get_mm_counter(p->mm, MM_ANONPAGES)), K(get_mm_counter(p->mm, MM_FILEPAGES))); task_unlock(p); /* * Kill all processes sharing p->mm in other thread groups, if any. * They don't get access to memory reserves or a higher scheduler * priority, though, to avoid depletion of all memory or task * starvation. This prevents mm->mmap_sem livelock when an oom killed * task cannot exit because it requires the semaphore and its contended * by another thread trying to allocate memory itself. That thread will * now get access to memory reserves since it has a pending fatal * signal. */ for_each_process(q) if (q->mm == mm && !same_thread_group(q, p)) { task_lock(q); /* Protect ->comm from prctl() */ pr_err("Kill process %d (%s) sharing same memory\n", task_pid_nr(q), q->comm); task_unlock(q); force_sig(SIGKILL, q); } set_tsk_thread_flag(p, TIF_MEMDIE); force_sig(SIGKILL, p); return 0; }
DoS Overflow
0
static int oom_kill_task(struct task_struct *p, struct mem_cgroup *mem) { struct task_struct *q; struct mm_struct *mm; p = find_lock_task_mm(p); if (!p) return 1; /* mm cannot be safely dereferenced after task_unlock(p) */ mm = p->mm; pr_err("Killed process %d (%s) total-vm:%lukB, anon-rss:%lukB, file-rss:%lukB\n", task_pid_nr(p), p->comm, K(p->mm->total_vm), K(get_mm_counter(p->mm, MM_ANONPAGES)), K(get_mm_counter(p->mm, MM_FILEPAGES))); task_unlock(p); /* * Kill all processes sharing p->mm in other thread groups, if any. * They don't get access to memory reserves or a higher scheduler * priority, though, to avoid depletion of all memory or task * starvation. This prevents mm->mmap_sem livelock when an oom killed * task cannot exit because it requires the semaphore and its contended * by another thread trying to allocate memory itself. That thread will * now get access to memory reserves since it has a pending fatal * signal. */ for_each_process(q) if (q->mm == mm && !same_thread_group(q, p)) { task_lock(q); /* Protect ->comm from prctl() */ pr_err("Kill process %d (%s) sharing same memory\n", task_pid_nr(q), q->comm); task_unlock(q); force_sig(SIGKILL, q); } set_tsk_thread_flag(p, TIF_MEMDIE); force_sig(SIGKILL, p); return 0; }
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0;
CWE-189
null
null
19,460
static bool oom_unkillable_task(struct task_struct *p, const struct mem_cgroup *mem, const nodemask_t *nodemask) { if (is_global_init(p)) return true; if (p->flags & PF_KTHREAD) return true; /* When mem_cgroup_out_of_memory() and p is not member of the group */ if (mem && !task_in_mem_cgroup(p, mem)) return true; /* p may not have freeable memory in nodemask */ if (!has_intersects_mems_allowed(p, nodemask)) return true; return false; }
DoS Overflow
0
static bool oom_unkillable_task(struct task_struct *p, const struct mem_cgroup *mem, const nodemask_t *nodemask) { if (is_global_init(p)) return true; if (p->flags & PF_KTHREAD) return true; /* When mem_cgroup_out_of_memory() and p is not member of the group */ if (mem && !task_in_mem_cgroup(p, mem)) return true; /* p may not have freeable memory in nodemask */ if (!has_intersects_mems_allowed(p, nodemask)) return true; return false; }
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0;
CWE-189
null
null
19,461
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order, nodemask_t *nodemask) { const nodemask_t *mpol_mask; struct task_struct *p; unsigned long totalpages; unsigned long freed = 0; unsigned int points; enum oom_constraint constraint = CONSTRAINT_NONE; int killed = 0; blocking_notifier_call_chain(&oom_notify_list, 0, &freed); if (freed > 0) /* Got some memory back in the last second. */ return; /* * If current has a pending SIGKILL, then automatically select it. The * goal is to allow it to allocate so that it may quickly exit and free * its memory. */ if (fatal_signal_pending(current)) { set_thread_flag(TIF_MEMDIE); return; } /* * Check if there were limitations on the allocation (only relevant for * NUMA) that may require different handling. */ constraint = constrained_alloc(zonelist, gfp_mask, nodemask, &totalpages); mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL; check_panic_on_oom(constraint, gfp_mask, order, mpol_mask); read_lock(&tasklist_lock); if (sysctl_oom_kill_allocating_task && !oom_unkillable_task(current, NULL, nodemask) && current->mm && !atomic_read(&current->mm->oom_disable_count)) { /* * oom_kill_process() needs tasklist_lock held. If it returns * non-zero, current could not be killed so we must fallback to * the tasklist scan. */ if (!oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL, nodemask, "Out of memory (oom_kill_allocating_task)")) goto out; } retry: p = select_bad_process(&points, totalpages, NULL, mpol_mask); if (PTR_ERR(p) == -1UL) goto out; /* Found nothing?!?! Either we hang forever, or we panic. */ if (!p) { dump_header(NULL, gfp_mask, order, NULL, mpol_mask); read_unlock(&tasklist_lock); panic("Out of memory and no killable processes...\n"); } if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, nodemask, "Out of memory")) goto retry; killed = 1; out: read_unlock(&tasklist_lock); /* * Give "p" a good chance of killing itself before we * retry to allocate memory unless "p" is current */ if (killed && !test_thread_flag(TIF_MEMDIE)) schedule_timeout_uninterruptible(1); }
DoS Overflow
0
void out_of_memory(struct zonelist *zonelist, gfp_t gfp_mask, int order, nodemask_t *nodemask) { const nodemask_t *mpol_mask; struct task_struct *p; unsigned long totalpages; unsigned long freed = 0; unsigned int points; enum oom_constraint constraint = CONSTRAINT_NONE; int killed = 0; blocking_notifier_call_chain(&oom_notify_list, 0, &freed); if (freed > 0) /* Got some memory back in the last second. */ return; /* * If current has a pending SIGKILL, then automatically select it. The * goal is to allow it to allocate so that it may quickly exit and free * its memory. */ if (fatal_signal_pending(current)) { set_thread_flag(TIF_MEMDIE); return; } /* * Check if there were limitations on the allocation (only relevant for * NUMA) that may require different handling. */ constraint = constrained_alloc(zonelist, gfp_mask, nodemask, &totalpages); mpol_mask = (constraint == CONSTRAINT_MEMORY_POLICY) ? nodemask : NULL; check_panic_on_oom(constraint, gfp_mask, order, mpol_mask); read_lock(&tasklist_lock); if (sysctl_oom_kill_allocating_task && !oom_unkillable_task(current, NULL, nodemask) && current->mm && !atomic_read(&current->mm->oom_disable_count)) { /* * oom_kill_process() needs tasklist_lock held. If it returns * non-zero, current could not be killed so we must fallback to * the tasklist scan. */ if (!oom_kill_process(current, gfp_mask, order, 0, totalpages, NULL, nodemask, "Out of memory (oom_kill_allocating_task)")) goto out; } retry: p = select_bad_process(&points, totalpages, NULL, mpol_mask); if (PTR_ERR(p) == -1UL) goto out; /* Found nothing?!?! Either we hang forever, or we panic. */ if (!p) { dump_header(NULL, gfp_mask, order, NULL, mpol_mask); read_unlock(&tasklist_lock); panic("Out of memory and no killable processes...\n"); } if (oom_kill_process(p, gfp_mask, order, points, totalpages, NULL, nodemask, "Out of memory")) goto retry; killed = 1; out: read_unlock(&tasklist_lock); /* * Give "p" a good chance of killing itself before we * retry to allocate memory unless "p" is current */ if (killed && !test_thread_flag(TIF_MEMDIE)) schedule_timeout_uninterruptible(1); }
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0;
CWE-189
null
null
19,462
void pagefault_out_of_memory(void) { if (try_set_system_oom()) { out_of_memory(NULL, 0, 0, NULL); clear_system_oom(); } if (!test_thread_flag(TIF_MEMDIE)) schedule_timeout_uninterruptible(1); }
DoS Overflow
0
void pagefault_out_of_memory(void) { if (try_set_system_oom()) { out_of_memory(NULL, 0, 0, NULL); clear_system_oom(); } if (!test_thread_flag(TIF_MEMDIE)) schedule_timeout_uninterruptible(1); }
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0;
CWE-189
null
null
19,463
static struct task_struct *select_bad_process(unsigned int *ppoints, unsigned long totalpages, struct mem_cgroup *mem, const nodemask_t *nodemask) { struct task_struct *g, *p; struct task_struct *chosen = NULL; *ppoints = 0; do_each_thread(g, p) { unsigned int points; if (p->exit_state) continue; if (oom_unkillable_task(p, mem, nodemask)) continue; /* * This task already has access to memory reserves and is * being killed. Don't allow any other task access to the * memory reserve. * * Note: this may have a chance of deadlock if it gets * blocked waiting for another task which itself is waiting * for memory. Is there a better alternative? */ if (test_tsk_thread_flag(p, TIF_MEMDIE)) return ERR_PTR(-1UL); if (!p->mm) continue; if (p->flags & PF_EXITING) { /* * If p is the current task and is in the process of * releasing memory, we allow the "kill" to set * TIF_MEMDIE, which will allow it to gain access to * memory reserves. Otherwise, it may stall forever. * * The loop isn't broken here, however, in case other * threads are found to have already been oom killed. */ if (p == current) { chosen = p; *ppoints = 1000; } else { /* * If this task is not being ptraced on exit, * then wait for it to finish before killing * some other task unnecessarily. */ if (!(p->group_leader->ptrace & PT_TRACE_EXIT)) return ERR_PTR(-1UL); } } points = oom_badness(p, mem, nodemask, totalpages); if (points > *ppoints) { chosen = p; *ppoints = points; } } while_each_thread(g, p); return chosen; }
DoS Overflow
0
static struct task_struct *select_bad_process(unsigned int *ppoints, unsigned long totalpages, struct mem_cgroup *mem, const nodemask_t *nodemask) { struct task_struct *g, *p; struct task_struct *chosen = NULL; *ppoints = 0; do_each_thread(g, p) { unsigned int points; if (p->exit_state) continue; if (oom_unkillable_task(p, mem, nodemask)) continue; /* * This task already has access to memory reserves and is * being killed. Don't allow any other task access to the * memory reserve. * * Note: this may have a chance of deadlock if it gets * blocked waiting for another task which itself is waiting * for memory. Is there a better alternative? */ if (test_tsk_thread_flag(p, TIF_MEMDIE)) return ERR_PTR(-1UL); if (!p->mm) continue; if (p->flags & PF_EXITING) { /* * If p is the current task and is in the process of * releasing memory, we allow the "kill" to set * TIF_MEMDIE, which will allow it to gain access to * memory reserves. Otherwise, it may stall forever. * * The loop isn't broken here, however, in case other * threads are found to have already been oom killed. */ if (p == current) { chosen = p; *ppoints = 1000; } else { /* * If this task is not being ptraced on exit, * then wait for it to finish before killing * some other task unnecessarily. */ if (!(p->group_leader->ptrace & PT_TRACE_EXIT)) return ERR_PTR(-1UL); } } points = oom_badness(p, mem, nodemask, totalpages); if (points > *ppoints) { chosen = p; *ppoints = points; } } while_each_thread(g, p); return chosen; }
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0;
CWE-189
null
null
19,464
static int try_set_system_oom(void) { struct zone *zone; int ret = 1; spin_lock(&zone_scan_lock); for_each_populated_zone(zone) if (zone_is_oom_locked(zone)) { ret = 0; goto out; } for_each_populated_zone(zone) zone_set_flag(zone, ZONE_OOM_LOCKED); out: spin_unlock(&zone_scan_lock); return ret; }
DoS Overflow
0
static int try_set_system_oom(void) { struct zone *zone; int ret = 1; spin_lock(&zone_scan_lock); for_each_populated_zone(zone) if (zone_is_oom_locked(zone)) { ret = 0; goto out; } for_each_populated_zone(zone) zone_set_flag(zone, ZONE_OOM_LOCKED); out: spin_unlock(&zone_scan_lock); return ret; }
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0;
CWE-189
null
null
19,465
int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) { struct zoneref *z; struct zone *zone; int ret = 1; spin_lock(&zone_scan_lock); for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { if (zone_is_oom_locked(zone)) { ret = 0; goto out; } } for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { /* * Lock each zone in the zonelist under zone_scan_lock so a * parallel invocation of try_set_zonelist_oom() doesn't succeed * when it shouldn't. */ zone_set_flag(zone, ZONE_OOM_LOCKED); } out: spin_unlock(&zone_scan_lock); return ret; }
DoS Overflow
0
int try_set_zonelist_oom(struct zonelist *zonelist, gfp_t gfp_mask) { struct zoneref *z; struct zone *zone; int ret = 1; spin_lock(&zone_scan_lock); for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { if (zone_is_oom_locked(zone)) { ret = 0; goto out; } } for_each_zone_zonelist(zone, z, zonelist, gfp_zone(gfp_mask)) { /* * Lock each zone in the zonelist under zone_scan_lock so a * parallel invocation of try_set_zonelist_oom() doesn't succeed * when it shouldn't. */ zone_set_flag(zone, ZONE_OOM_LOCKED); } out: spin_unlock(&zone_scan_lock); return ret; }
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0;
CWE-189
null
null
19,466
int unregister_oom_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&oom_notify_list, nb); }
DoS Overflow
0
int unregister_oom_notifier(struct notifier_block *nb) { return blocking_notifier_chain_unregister(&oom_notify_list, nb); }
@@ -162,7 +162,7 @@ static bool oom_unkillable_task(struct task_struct *p, unsigned int oom_badness(struct task_struct *p, struct mem_cgroup *mem, const nodemask_t *nodemask, unsigned long totalpages) { - int points; + long points; if (oom_unkillable_task(p, mem, nodemask)) return 0;
CWE-189
null
null
19,467
__blist_add_buffer(struct journal_head **list, struct journal_head *jh) { if (!*list) { jh->b_tnext = jh->b_tprev = jh; *list = jh; } else { /* Insert at the tail of the list to preserve order */ struct journal_head *first = *list, *last = first->b_tprev; jh->b_tprev = last; jh->b_tnext = first; last->b_tnext = first->b_tprev = jh; } }
DoS Overflow
0
__blist_add_buffer(struct journal_head **list, struct journal_head *jh) { if (!*list) { jh->b_tnext = jh->b_tprev = jh; *list = jh; } else { /* Insert at the tail of the list to preserve order */ struct journal_head *first = *list, *last = first->b_tprev; jh->b_tprev = last; jh->b_tnext = first; last->b_tnext = first->b_tprev = jh; } }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,468
static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) { int may_free = 1; struct buffer_head *bh = jh2bh(jh); if (jh->b_cp_transaction) { JBUFFER_TRACE(jh, "on running+cp transaction"); __jbd2_journal_temp_unlink_buffer(jh); /* * We don't want to write the buffer anymore, clear the * bit so that we don't confuse checks in * __journal_file_buffer */ clear_buffer_dirty(bh); __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); may_free = 0; } else { JBUFFER_TRACE(jh, "on running transaction"); __jbd2_journal_unfile_buffer(jh); } return may_free; }
DoS Overflow
0
static int __dispose_buffer(struct journal_head *jh, transaction_t *transaction) { int may_free = 1; struct buffer_head *bh = jh2bh(jh); if (jh->b_cp_transaction) { JBUFFER_TRACE(jh, "on running+cp transaction"); __jbd2_journal_temp_unlink_buffer(jh); /* * We don't want to write the buffer anymore, clear the * bit so that we don't confuse checks in * __journal_file_buffer */ clear_buffer_dirty(bh); __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); may_free = 0; } else { JBUFFER_TRACE(jh, "on running transaction"); __jbd2_journal_unfile_buffer(jh); } return may_free; }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,469
void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) { struct journal_head **list = NULL; transaction_t *transaction; struct buffer_head *bh = jh2bh(jh); J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); transaction = jh->b_transaction; if (transaction) assert_spin_locked(&transaction->t_journal->j_list_lock); J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); if (jh->b_jlist != BJ_None) J_ASSERT_JH(jh, transaction != NULL); switch (jh->b_jlist) { case BJ_None: return; case BJ_Metadata: transaction->t_nr_buffers--; J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0); list = &transaction->t_buffers; break; case BJ_Forget: list = &transaction->t_forget; break; case BJ_IO: list = &transaction->t_iobuf_list; break; case BJ_Shadow: list = &transaction->t_shadow_list; break; case BJ_LogCtl: list = &transaction->t_log_list; break; case BJ_Reserved: list = &transaction->t_reserved_list; break; } __blist_del_buffer(list, jh); jh->b_jlist = BJ_None; if (test_clear_buffer_jbddirty(bh)) mark_buffer_dirty(bh); /* Expose it to the VM */ }
DoS Overflow
0
void __jbd2_journal_temp_unlink_buffer(struct journal_head *jh) { struct journal_head **list = NULL; transaction_t *transaction; struct buffer_head *bh = jh2bh(jh); J_ASSERT_JH(jh, jbd_is_locked_bh_state(bh)); transaction = jh->b_transaction; if (transaction) assert_spin_locked(&transaction->t_journal->j_list_lock); J_ASSERT_JH(jh, jh->b_jlist < BJ_Types); if (jh->b_jlist != BJ_None) J_ASSERT_JH(jh, transaction != NULL); switch (jh->b_jlist) { case BJ_None: return; case BJ_Metadata: transaction->t_nr_buffers--; J_ASSERT_JH(jh, transaction->t_nr_buffers >= 0); list = &transaction->t_buffers; break; case BJ_Forget: list = &transaction->t_forget; break; case BJ_IO: list = &transaction->t_iobuf_list; break; case BJ_Shadow: list = &transaction->t_shadow_list; break; case BJ_LogCtl: list = &transaction->t_log_list; break; case BJ_Reserved: list = &transaction->t_reserved_list; break; } __blist_del_buffer(list, jh); jh->b_jlist = BJ_None; if (test_clear_buffer_jbddirty(bh)) mark_buffer_dirty(bh); /* Expose it to the VM */ }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,470
static void __jbd2_journal_unfile_buffer(struct journal_head *jh) { __jbd2_journal_temp_unlink_buffer(jh); jh->b_transaction = NULL; jbd2_journal_put_journal_head(jh); }
DoS Overflow
0
static void __jbd2_journal_unfile_buffer(struct journal_head *jh) { __jbd2_journal_temp_unlink_buffer(jh); jh->b_transaction = NULL; jbd2_journal_put_journal_head(jh); }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,471
__journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh) { struct journal_head *jh; jh = bh2jh(bh); if (buffer_locked(bh) || buffer_dirty(bh)) goto out; if (jh->b_next_transaction != NULL) goto out; spin_lock(&journal->j_list_lock); if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) { /* written-back checkpointed metadata buffer */ if (jh->b_jlist == BJ_None) { JBUFFER_TRACE(jh, "remove from checkpoint list"); __jbd2_journal_remove_checkpoint(jh); } } spin_unlock(&journal->j_list_lock); out: return; }
DoS Overflow
0
__journal_try_to_free_buffer(journal_t *journal, struct buffer_head *bh) { struct journal_head *jh; jh = bh2jh(bh); if (buffer_locked(bh) || buffer_dirty(bh)) goto out; if (jh->b_next_transaction != NULL) goto out; spin_lock(&journal->j_list_lock); if (jh->b_cp_transaction != NULL && jh->b_transaction == NULL) { /* written-back checkpointed metadata buffer */ if (jh->b_jlist == BJ_None) { JBUFFER_TRACE(jh, "remove from checkpoint list"); __jbd2_journal_remove_checkpoint(jh); } } spin_unlock(&journal->j_list_lock); out: return; }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,472
do_get_write_access(handle_t *handle, struct journal_head *jh, int force_copy) { struct buffer_head *bh; transaction_t *transaction; journal_t *journal; int error; char *frozen_buffer = NULL; int need_copy = 0; if (is_handle_aborted(handle)) return -EROFS; transaction = handle->h_transaction; journal = transaction->t_journal; jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy); JBUFFER_TRACE(jh, "entry"); repeat: bh = jh2bh(jh); /* @@@ Need to check for errors here at some point. */ lock_buffer(bh); jbd_lock_bh_state(bh); /* We now hold the buffer lock so it is safe to query the buffer * state. Is the buffer dirty? * * If so, there are two possibilities. The buffer may be * non-journaled, and undergoing a quite legitimate writeback. * Otherwise, it is journaled, and we don't expect dirty buffers * in that state (the buffers should be marked JBD_Dirty * instead.) So either the IO is being done under our own * control and this is a bug, or it's a third party IO such as * dump(8) (which may leave the buffer scheduled for read --- * ie. locked but not dirty) or tune2fs (which may actually have * the buffer dirtied, ugh.) */ if (buffer_dirty(bh)) { /* * First question: is this buffer already part of the current * transaction or the existing committing transaction? */ if (jh->b_transaction) { J_ASSERT_JH(jh, jh->b_transaction == transaction || jh->b_transaction == journal->j_committing_transaction); if (jh->b_next_transaction) J_ASSERT_JH(jh, jh->b_next_transaction == transaction); warn_dirty_buffer(bh); } /* * In any case we need to clean the dirty flag and we must * do it under the buffer lock to be sure we don't race * with running write-out. */ JBUFFER_TRACE(jh, "Journalling dirty buffer"); clear_buffer_dirty(bh); set_buffer_jbddirty(bh); } unlock_buffer(bh); error = -EROFS; if (is_handle_aborted(handle)) { jbd_unlock_bh_state(bh); goto out; } error = 0; /* * The buffer is already part of this transaction if b_transaction or * b_next_transaction points to it */ if (jh->b_transaction == transaction || jh->b_next_transaction == transaction) goto done; /* * this is the first time this transaction is touching this buffer, * reset the modified flag */ jh->b_modified = 0; /* * If there is already a copy-out version of this buffer, then we don't * need to make another one */ if (jh->b_frozen_data) { JBUFFER_TRACE(jh, "has frozen data"); J_ASSERT_JH(jh, jh->b_next_transaction == NULL); jh->b_next_transaction = transaction; goto done; } /* Is there data here we need to preserve? */ if (jh->b_transaction && jh->b_transaction != transaction) { JBUFFER_TRACE(jh, "owned by older transaction"); J_ASSERT_JH(jh, jh->b_next_transaction == NULL); J_ASSERT_JH(jh, jh->b_transaction == journal->j_committing_transaction); /* There is one case we have to be very careful about. * If the committing transaction is currently writing * this buffer out to disk and has NOT made a copy-out, * then we cannot modify the buffer contents at all * right now. The essence of copy-out is that it is the * extra copy, not the primary copy, which gets * journaled. If the primary copy is already going to * disk then we cannot do copy-out here. */ if (jh->b_jlist == BJ_Shadow) { DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow); wait_queue_head_t *wqh; wqh = bit_waitqueue(&bh->b_state, BH_Unshadow); JBUFFER_TRACE(jh, "on shadow: sleep"); jbd_unlock_bh_state(bh); /* commit wakes up all shadow buffers after IO */ for ( ; ; ) { prepare_to_wait(wqh, &wait.wait, TASK_UNINTERRUPTIBLE); if (jh->b_jlist != BJ_Shadow) break; schedule(); } finish_wait(wqh, &wait.wait); goto repeat; } /* Only do the copy if the currently-owning transaction * still needs it. If it is on the Forget list, the * committing transaction is past that stage. The * buffer had better remain locked during the kmalloc, * but that should be true --- we hold the journal lock * still and the buffer is already on the BUF_JOURNAL * list so won't be flushed. * * Subtle point, though: if this is a get_undo_access, * then we will be relying on the frozen_data to contain * the new value of the committed_data record after the * transaction, so we HAVE to force the frozen_data copy * in that case. */ if (jh->b_jlist != BJ_Forget || force_copy) { JBUFFER_TRACE(jh, "generate frozen data"); if (!frozen_buffer) { JBUFFER_TRACE(jh, "allocate memory for buffer"); jbd_unlock_bh_state(bh); frozen_buffer = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); if (!frozen_buffer) { printk(KERN_EMERG "%s: OOM for frozen_buffer\n", __func__); JBUFFER_TRACE(jh, "oom!"); error = -ENOMEM; jbd_lock_bh_state(bh); goto done; } goto repeat; } jh->b_frozen_data = frozen_buffer; frozen_buffer = NULL; need_copy = 1; } jh->b_next_transaction = transaction; } /* * Finally, if the buffer is not journaled right now, we need to make * sure it doesn't get written to disk before the caller actually * commits the new data */ if (!jh->b_transaction) { JBUFFER_TRACE(jh, "no transaction"); J_ASSERT_JH(jh, !jh->b_next_transaction); JBUFFER_TRACE(jh, "file as BJ_Reserved"); spin_lock(&journal->j_list_lock); __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); spin_unlock(&journal->j_list_lock); } done: if (need_copy) { struct page *page; int offset; char *source; J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)), "Possible IO failure.\n"); page = jh2bh(jh)->b_page; offset = offset_in_page(jh2bh(jh)->b_data); source = kmap_atomic(page, KM_USER0); /* Fire data frozen trigger just before we copy the data */ jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers); memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); kunmap_atomic(source, KM_USER0); /* * Now that the frozen data is saved off, we need to store * any matching triggers. */ jh->b_frozen_triggers = jh->b_triggers; } jbd_unlock_bh_state(bh); /* * If we are about to journal a buffer, then any revoke pending on it is * no longer valid */ jbd2_journal_cancel_revoke(handle, jh); out: if (unlikely(frozen_buffer)) /* It's usually NULL */ jbd2_free(frozen_buffer, bh->b_size); JBUFFER_TRACE(jh, "exit"); return error; }
DoS Overflow
0
do_get_write_access(handle_t *handle, struct journal_head *jh, int force_copy) { struct buffer_head *bh; transaction_t *transaction; journal_t *journal; int error; char *frozen_buffer = NULL; int need_copy = 0; if (is_handle_aborted(handle)) return -EROFS; transaction = handle->h_transaction; journal = transaction->t_journal; jbd_debug(5, "journal_head %p, force_copy %d\n", jh, force_copy); JBUFFER_TRACE(jh, "entry"); repeat: bh = jh2bh(jh); /* @@@ Need to check for errors here at some point. */ lock_buffer(bh); jbd_lock_bh_state(bh); /* We now hold the buffer lock so it is safe to query the buffer * state. Is the buffer dirty? * * If so, there are two possibilities. The buffer may be * non-journaled, and undergoing a quite legitimate writeback. * Otherwise, it is journaled, and we don't expect dirty buffers * in that state (the buffers should be marked JBD_Dirty * instead.) So either the IO is being done under our own * control and this is a bug, or it's a third party IO such as * dump(8) (which may leave the buffer scheduled for read --- * ie. locked but not dirty) or tune2fs (which may actually have * the buffer dirtied, ugh.) */ if (buffer_dirty(bh)) { /* * First question: is this buffer already part of the current * transaction or the existing committing transaction? */ if (jh->b_transaction) { J_ASSERT_JH(jh, jh->b_transaction == transaction || jh->b_transaction == journal->j_committing_transaction); if (jh->b_next_transaction) J_ASSERT_JH(jh, jh->b_next_transaction == transaction); warn_dirty_buffer(bh); } /* * In any case we need to clean the dirty flag and we must * do it under the buffer lock to be sure we don't race * with running write-out. */ JBUFFER_TRACE(jh, "Journalling dirty buffer"); clear_buffer_dirty(bh); set_buffer_jbddirty(bh); } unlock_buffer(bh); error = -EROFS; if (is_handle_aborted(handle)) { jbd_unlock_bh_state(bh); goto out; } error = 0; /* * The buffer is already part of this transaction if b_transaction or * b_next_transaction points to it */ if (jh->b_transaction == transaction || jh->b_next_transaction == transaction) goto done; /* * this is the first time this transaction is touching this buffer, * reset the modified flag */ jh->b_modified = 0; /* * If there is already a copy-out version of this buffer, then we don't * need to make another one */ if (jh->b_frozen_data) { JBUFFER_TRACE(jh, "has frozen data"); J_ASSERT_JH(jh, jh->b_next_transaction == NULL); jh->b_next_transaction = transaction; goto done; } /* Is there data here we need to preserve? */ if (jh->b_transaction && jh->b_transaction != transaction) { JBUFFER_TRACE(jh, "owned by older transaction"); J_ASSERT_JH(jh, jh->b_next_transaction == NULL); J_ASSERT_JH(jh, jh->b_transaction == journal->j_committing_transaction); /* There is one case we have to be very careful about. * If the committing transaction is currently writing * this buffer out to disk and has NOT made a copy-out, * then we cannot modify the buffer contents at all * right now. The essence of copy-out is that it is the * extra copy, not the primary copy, which gets * journaled. If the primary copy is already going to * disk then we cannot do copy-out here. */ if (jh->b_jlist == BJ_Shadow) { DEFINE_WAIT_BIT(wait, &bh->b_state, BH_Unshadow); wait_queue_head_t *wqh; wqh = bit_waitqueue(&bh->b_state, BH_Unshadow); JBUFFER_TRACE(jh, "on shadow: sleep"); jbd_unlock_bh_state(bh); /* commit wakes up all shadow buffers after IO */ for ( ; ; ) { prepare_to_wait(wqh, &wait.wait, TASK_UNINTERRUPTIBLE); if (jh->b_jlist != BJ_Shadow) break; schedule(); } finish_wait(wqh, &wait.wait); goto repeat; } /* Only do the copy if the currently-owning transaction * still needs it. If it is on the Forget list, the * committing transaction is past that stage. The * buffer had better remain locked during the kmalloc, * but that should be true --- we hold the journal lock * still and the buffer is already on the BUF_JOURNAL * list so won't be flushed. * * Subtle point, though: if this is a get_undo_access, * then we will be relying on the frozen_data to contain * the new value of the committed_data record after the * transaction, so we HAVE to force the frozen_data copy * in that case. */ if (jh->b_jlist != BJ_Forget || force_copy) { JBUFFER_TRACE(jh, "generate frozen data"); if (!frozen_buffer) { JBUFFER_TRACE(jh, "allocate memory for buffer"); jbd_unlock_bh_state(bh); frozen_buffer = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); if (!frozen_buffer) { printk(KERN_EMERG "%s: OOM for frozen_buffer\n", __func__); JBUFFER_TRACE(jh, "oom!"); error = -ENOMEM; jbd_lock_bh_state(bh); goto done; } goto repeat; } jh->b_frozen_data = frozen_buffer; frozen_buffer = NULL; need_copy = 1; } jh->b_next_transaction = transaction; } /* * Finally, if the buffer is not journaled right now, we need to make * sure it doesn't get written to disk before the caller actually * commits the new data */ if (!jh->b_transaction) { JBUFFER_TRACE(jh, "no transaction"); J_ASSERT_JH(jh, !jh->b_next_transaction); JBUFFER_TRACE(jh, "file as BJ_Reserved"); spin_lock(&journal->j_list_lock); __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); spin_unlock(&journal->j_list_lock); } done: if (need_copy) { struct page *page; int offset; char *source; J_EXPECT_JH(jh, buffer_uptodate(jh2bh(jh)), "Possible IO failure.\n"); page = jh2bh(jh)->b_page; offset = offset_in_page(jh2bh(jh)->b_data); source = kmap_atomic(page, KM_USER0); /* Fire data frozen trigger just before we copy the data */ jbd2_buffer_frozen_trigger(jh, source + offset, jh->b_triggers); memcpy(jh->b_frozen_data, source+offset, jh2bh(jh)->b_size); kunmap_atomic(source, KM_USER0); /* * Now that the frozen data is saved off, we need to store * any matching triggers. */ jh->b_frozen_triggers = jh->b_triggers; } jbd_unlock_bh_state(bh); /* * If we are about to journal a buffer, then any revoke pending on it is * no longer valid */ jbd2_journal_cancel_revoke(handle, jh); out: if (unlikely(frozen_buffer)) /* It's usually NULL */ jbd2_free(frozen_buffer, bh->b_size); JBUFFER_TRACE(jh, "exit"); return error; }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,473
int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; tid_t tid; int need_to_start, ret; /* If we've had an abort of any type, don't even think about * actually doing the restart! */ if (is_handle_aborted(handle)) return 0; /* * First unlink the handle from its current transaction, and start the * commit on that. */ J_ASSERT(atomic_read(&transaction->t_updates) > 0); J_ASSERT(journal_current_handle() == handle); read_lock(&journal->j_state_lock); spin_lock(&transaction->t_handle_lock); atomic_sub(handle->h_buffer_credits, &transaction->t_outstanding_credits); if (atomic_dec_and_test(&transaction->t_updates)) wake_up(&journal->j_wait_updates); spin_unlock(&transaction->t_handle_lock); jbd_debug(2, "restarting handle %p\n", handle); tid = transaction->t_tid; need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); if (need_to_start) jbd2_log_start_commit(journal, tid); lock_map_release(&handle->h_lockdep_map); handle->h_buffer_credits = nblocks; ret = start_this_handle(journal, handle, gfp_mask); return ret; }
DoS Overflow
0
int jbd2__journal_restart(handle_t *handle, int nblocks, gfp_t gfp_mask) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; tid_t tid; int need_to_start, ret; /* If we've had an abort of any type, don't even think about * actually doing the restart! */ if (is_handle_aborted(handle)) return 0; /* * First unlink the handle from its current transaction, and start the * commit on that. */ J_ASSERT(atomic_read(&transaction->t_updates) > 0); J_ASSERT(journal_current_handle() == handle); read_lock(&journal->j_state_lock); spin_lock(&transaction->t_handle_lock); atomic_sub(handle->h_buffer_credits, &transaction->t_outstanding_credits); if (atomic_dec_and_test(&transaction->t_updates)) wake_up(&journal->j_wait_updates); spin_unlock(&transaction->t_handle_lock); jbd_debug(2, "restarting handle %p\n", handle); tid = transaction->t_tid; need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); if (need_to_start) jbd2_log_start_commit(journal, tid); lock_map_release(&handle->h_lockdep_map); handle->h_buffer_credits = nblocks; ret = start_this_handle(journal, handle, gfp_mask); return ret; }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,474
handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask) { handle_t *handle = journal_current_handle(); int err; if (!journal) return ERR_PTR(-EROFS); if (handle) { J_ASSERT(handle->h_transaction->t_journal == journal); handle->h_ref++; return handle; } handle = new_handle(nblocks); if (!handle) return ERR_PTR(-ENOMEM); current->journal_info = handle; err = start_this_handle(journal, handle, gfp_mask); if (err < 0) { jbd2_free_handle(handle); current->journal_info = NULL; handle = ERR_PTR(err); } return handle; }
DoS Overflow
0
handle_t *jbd2__journal_start(journal_t *journal, int nblocks, gfp_t gfp_mask) { handle_t *handle = journal_current_handle(); int err; if (!journal) return ERR_PTR(-EROFS); if (handle) { J_ASSERT(handle->h_transaction->t_journal == journal); handle->h_ref++; return handle; } handle = new_handle(nblocks); if (!handle) return ERR_PTR(-ENOMEM); current->journal_info = handle; err = start_this_handle(journal, handle, gfp_mask); if (err < 0) { jbd2_free_handle(handle); current->journal_info = NULL; handle = ERR_PTR(err); } return handle; }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,475
void jbd2_buffer_abort_trigger(struct journal_head *jh, struct jbd2_buffer_trigger_type *triggers) { if (!triggers || !triggers->t_abort) return; triggers->t_abort(triggers, jh2bh(jh)); }
DoS Overflow
0
void jbd2_buffer_abort_trigger(struct journal_head *jh, struct jbd2_buffer_trigger_type *triggers) { if (!triggers || !triggers->t_abort) return; triggers->t_abort(triggers, jh2bh(jh)); }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,476
jbd2_get_transaction(journal_t *journal, transaction_t *transaction) { transaction->t_journal = journal; transaction->t_state = T_RUNNING; transaction->t_start_time = ktime_get(); transaction->t_tid = journal->j_transaction_sequence++; transaction->t_expires = jiffies + journal->j_commit_interval; spin_lock_init(&transaction->t_handle_lock); atomic_set(&transaction->t_updates, 0); atomic_set(&transaction->t_outstanding_credits, 0); atomic_set(&transaction->t_handle_count, 0); INIT_LIST_HEAD(&transaction->t_inode_list); INIT_LIST_HEAD(&transaction->t_private_list); /* Set up the commit timer for the new transaction. */ journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires); add_timer(&journal->j_commit_timer); J_ASSERT(journal->j_running_transaction == NULL); journal->j_running_transaction = transaction; transaction->t_max_wait = 0; transaction->t_start = jiffies; return transaction; }
DoS Overflow
0
jbd2_get_transaction(journal_t *journal, transaction_t *transaction) { transaction->t_journal = journal; transaction->t_state = T_RUNNING; transaction->t_start_time = ktime_get(); transaction->t_tid = journal->j_transaction_sequence++; transaction->t_expires = jiffies + journal->j_commit_interval; spin_lock_init(&transaction->t_handle_lock); atomic_set(&transaction->t_updates, 0); atomic_set(&transaction->t_outstanding_credits, 0); atomic_set(&transaction->t_handle_count, 0); INIT_LIST_HEAD(&transaction->t_inode_list); INIT_LIST_HEAD(&transaction->t_private_list); /* Set up the commit timer for the new transaction. */ journal->j_commit_timer.expires = round_jiffies_up(transaction->t_expires); add_timer(&journal->j_commit_timer); J_ASSERT(journal->j_running_transaction == NULL); journal->j_running_transaction = transaction; transaction->t_max_wait = 0; transaction->t_start = jiffies; return transaction; }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,477
int jbd2_journal_extend(handle_t *handle, int nblocks) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; int result; int wanted; result = -EIO; if (is_handle_aborted(handle)) goto out; result = 1; read_lock(&journal->j_state_lock); /* Don't extend a locked-down transaction! */ if (handle->h_transaction->t_state != T_RUNNING) { jbd_debug(3, "denied handle %p %d blocks: " "transaction not running\n", handle, nblocks); goto error_out; } spin_lock(&transaction->t_handle_lock); wanted = atomic_read(&transaction->t_outstanding_credits) + nblocks; if (wanted > journal->j_max_transaction_buffers) { jbd_debug(3, "denied handle %p %d blocks: " "transaction too large\n", handle, nblocks); goto unlock; } if (wanted > __jbd2_log_space_left(journal)) { jbd_debug(3, "denied handle %p %d blocks: " "insufficient log space\n", handle, nblocks); goto unlock; } handle->h_buffer_credits += nblocks; atomic_add(nblocks, &transaction->t_outstanding_credits); result = 0; jbd_debug(3, "extended handle %p by %d\n", handle, nblocks); unlock: spin_unlock(&transaction->t_handle_lock); error_out: read_unlock(&journal->j_state_lock); out: return result; }
DoS Overflow
0
int jbd2_journal_extend(handle_t *handle, int nblocks) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; int result; int wanted; result = -EIO; if (is_handle_aborted(handle)) goto out; result = 1; read_lock(&journal->j_state_lock); /* Don't extend a locked-down transaction! */ if (handle->h_transaction->t_state != T_RUNNING) { jbd_debug(3, "denied handle %p %d blocks: " "transaction not running\n", handle, nblocks); goto error_out; } spin_lock(&transaction->t_handle_lock); wanted = atomic_read(&transaction->t_outstanding_credits) + nblocks; if (wanted > journal->j_max_transaction_buffers) { jbd_debug(3, "denied handle %p %d blocks: " "transaction too large\n", handle, nblocks); goto unlock; } if (wanted > __jbd2_log_space_left(journal)) { jbd_debug(3, "denied handle %p %d blocks: " "insufficient log space\n", handle, nblocks); goto unlock; } handle->h_buffer_credits += nblocks; atomic_add(nblocks, &transaction->t_outstanding_credits); result = 0; jbd_debug(3, "extended handle %p by %d\n", handle, nblocks); unlock: spin_unlock(&transaction->t_handle_lock); error_out: read_unlock(&journal->j_state_lock); out: return result; }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,478
void jbd2_journal_file_buffer(struct journal_head *jh, transaction_t *transaction, int jlist) { jbd_lock_bh_state(jh2bh(jh)); spin_lock(&transaction->t_journal->j_list_lock); __jbd2_journal_file_buffer(jh, transaction, jlist); spin_unlock(&transaction->t_journal->j_list_lock); jbd_unlock_bh_state(jh2bh(jh)); }
DoS Overflow
0
void jbd2_journal_file_buffer(struct journal_head *jh, transaction_t *transaction, int jlist) { jbd_lock_bh_state(jh2bh(jh)); spin_lock(&transaction->t_journal->j_list_lock); __jbd2_journal_file_buffer(jh, transaction, jlist); spin_unlock(&transaction->t_journal->j_list_lock); jbd_unlock_bh_state(jh2bh(jh)); }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,479
int jbd2_journal_force_commit(journal_t *journal) { handle_t *handle; int ret; handle = jbd2_journal_start(journal, 1); if (IS_ERR(handle)) { ret = PTR_ERR(handle); } else { handle->h_sync = 1; ret = jbd2_journal_stop(handle); } return ret; }
DoS Overflow
0
int jbd2_journal_force_commit(journal_t *journal) { handle_t *handle; int ret; handle = jbd2_journal_start(journal, 1); if (IS_ERR(handle)) { ret = PTR_ERR(handle); } else { handle->h_sync = 1; ret = jbd2_journal_stop(handle); } return ret; }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,480
int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; struct journal_head *jh; int drop_reserve = 0; int err = 0; int was_modified = 0; BUFFER_TRACE(bh, "entry"); jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); if (!buffer_jbd(bh)) goto not_jbd; jh = bh2jh(bh); /* Critical error: attempting to delete a bitmap buffer, maybe? * Don't do any jbd operations, and return an error. */ if (!J_EXPECT_JH(jh, !jh->b_committed_data, "inconsistent data on disk")) { err = -EIO; goto not_jbd; } /* keep track of wether or not this transaction modified us */ was_modified = jh->b_modified; /* * The buffer's going from the transaction, we must drop * all references -bzzz */ jh->b_modified = 0; if (jh->b_transaction == handle->h_transaction) { J_ASSERT_JH(jh, !jh->b_frozen_data); /* If we are forgetting a buffer which is already part * of this transaction, then we can just drop it from * the transaction immediately. */ clear_buffer_dirty(bh); clear_buffer_jbddirty(bh); JBUFFER_TRACE(jh, "belongs to current transaction: unfile"); /* * we only want to drop a reference if this transaction * modified the buffer */ if (was_modified) drop_reserve = 1; /* * We are no longer going to journal this buffer. * However, the commit of this transaction is still * important to the buffer: the delete that we are now * processing might obsolete an old log entry, so by * committing, we can satisfy the buffer's checkpoint. * * So, if we have a checkpoint on the buffer, we should * now refile the buffer on our BJ_Forget list so that * we know to remove the checkpoint after we commit. */ if (jh->b_cp_transaction) { __jbd2_journal_temp_unlink_buffer(jh); __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); } else { __jbd2_journal_unfile_buffer(jh); if (!buffer_jbd(bh)) { spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); __bforget(bh); goto drop; } } } else if (jh->b_transaction) { J_ASSERT_JH(jh, (jh->b_transaction == journal->j_committing_transaction)); /* However, if the buffer is still owned by a prior * (committing) transaction, we can't drop it yet... */ JBUFFER_TRACE(jh, "belongs to older transaction"); /* ... but we CAN drop it from the new transaction if we * have also modified it since the original commit. */ if (jh->b_next_transaction) { J_ASSERT(jh->b_next_transaction == transaction); jh->b_next_transaction = NULL; /* * only drop a reference if this transaction modified * the buffer */ if (was_modified) drop_reserve = 1; } } not_jbd: spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); __brelse(bh); drop: if (drop_reserve) { /* no need to reserve log space for this block -bzzz */ handle->h_buffer_credits++; } return err; }
DoS Overflow
0
int jbd2_journal_forget (handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; struct journal_head *jh; int drop_reserve = 0; int err = 0; int was_modified = 0; BUFFER_TRACE(bh, "entry"); jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); if (!buffer_jbd(bh)) goto not_jbd; jh = bh2jh(bh); /* Critical error: attempting to delete a bitmap buffer, maybe? * Don't do any jbd operations, and return an error. */ if (!J_EXPECT_JH(jh, !jh->b_committed_data, "inconsistent data on disk")) { err = -EIO; goto not_jbd; } /* keep track of wether or not this transaction modified us */ was_modified = jh->b_modified; /* * The buffer's going from the transaction, we must drop * all references -bzzz */ jh->b_modified = 0; if (jh->b_transaction == handle->h_transaction) { J_ASSERT_JH(jh, !jh->b_frozen_data); /* If we are forgetting a buffer which is already part * of this transaction, then we can just drop it from * the transaction immediately. */ clear_buffer_dirty(bh); clear_buffer_jbddirty(bh); JBUFFER_TRACE(jh, "belongs to current transaction: unfile"); /* * we only want to drop a reference if this transaction * modified the buffer */ if (was_modified) drop_reserve = 1; /* * We are no longer going to journal this buffer. * However, the commit of this transaction is still * important to the buffer: the delete that we are now * processing might obsolete an old log entry, so by * committing, we can satisfy the buffer's checkpoint. * * So, if we have a checkpoint on the buffer, we should * now refile the buffer on our BJ_Forget list so that * we know to remove the checkpoint after we commit. */ if (jh->b_cp_transaction) { __jbd2_journal_temp_unlink_buffer(jh); __jbd2_journal_file_buffer(jh, transaction, BJ_Forget); } else { __jbd2_journal_unfile_buffer(jh); if (!buffer_jbd(bh)) { spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); __bforget(bh); goto drop; } } } else if (jh->b_transaction) { J_ASSERT_JH(jh, (jh->b_transaction == journal->j_committing_transaction)); /* However, if the buffer is still owned by a prior * (committing) transaction, we can't drop it yet... */ JBUFFER_TRACE(jh, "belongs to older transaction"); /* ... but we CAN drop it from the new transaction if we * have also modified it since the original commit. */ if (jh->b_next_transaction) { J_ASSERT(jh->b_next_transaction == transaction); jh->b_next_transaction = NULL; /* * only drop a reference if this transaction modified * the buffer */ if (was_modified) drop_reserve = 1; } } not_jbd: spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); __brelse(bh); drop: if (drop_reserve) { /* no need to reserve log space for this block -bzzz */ handle->h_buffer_credits++; } return err; }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,481
int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; struct journal_head *jh = jbd2_journal_add_journal_head(bh); int err; jbd_debug(5, "journal_head %p\n", jh); err = -EROFS; if (is_handle_aborted(handle)) goto out; err = 0; JBUFFER_TRACE(jh, "entry"); /* * The buffer may already belong to this transaction due to pre-zeroing * in the filesystem's new_block code. It may also be on the previous, * committing transaction's lists, but it HAS to be in Forget state in * that case: the transaction must have deleted the buffer for it to be * reused here. */ jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); J_ASSERT_JH(jh, (jh->b_transaction == transaction || jh->b_transaction == NULL || (jh->b_transaction == journal->j_committing_transaction && jh->b_jlist == BJ_Forget))); J_ASSERT_JH(jh, jh->b_next_transaction == NULL); J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); if (jh->b_transaction == NULL) { /* * Previous jbd2_journal_forget() could have left the buffer * with jbddirty bit set because it was being committed. When * the commit finished, we've filed the buffer for * checkpointing and marked it dirty. Now we are reallocating * the buffer so the transaction freeing it must have * committed and so it's safe to clear the dirty bit. */ clear_buffer_dirty(jh2bh(jh)); /* first access by this transaction */ jh->b_modified = 0; JBUFFER_TRACE(jh, "file as BJ_Reserved"); __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); } else if (jh->b_transaction == journal->j_committing_transaction) { /* first access by this transaction */ jh->b_modified = 0; JBUFFER_TRACE(jh, "set next transaction"); jh->b_next_transaction = transaction; } spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); /* * akpm: I added this. ext3_alloc_branch can pick up new indirect * blocks which contain freed but then revoked metadata. We need * to cancel the revoke in case we end up freeing it yet again * and the reallocating as data - this would cause a second revoke, * which hits an assertion error. */ JBUFFER_TRACE(jh, "cancelling revoke"); jbd2_journal_cancel_revoke(handle, jh); out: jbd2_journal_put_journal_head(jh); return err; }
DoS Overflow
0
int jbd2_journal_get_create_access(handle_t *handle, struct buffer_head *bh) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; struct journal_head *jh = jbd2_journal_add_journal_head(bh); int err; jbd_debug(5, "journal_head %p\n", jh); err = -EROFS; if (is_handle_aborted(handle)) goto out; err = 0; JBUFFER_TRACE(jh, "entry"); /* * The buffer may already belong to this transaction due to pre-zeroing * in the filesystem's new_block code. It may also be on the previous, * committing transaction's lists, but it HAS to be in Forget state in * that case: the transaction must have deleted the buffer for it to be * reused here. */ jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); J_ASSERT_JH(jh, (jh->b_transaction == transaction || jh->b_transaction == NULL || (jh->b_transaction == journal->j_committing_transaction && jh->b_jlist == BJ_Forget))); J_ASSERT_JH(jh, jh->b_next_transaction == NULL); J_ASSERT_JH(jh, buffer_locked(jh2bh(jh))); if (jh->b_transaction == NULL) { /* * Previous jbd2_journal_forget() could have left the buffer * with jbddirty bit set because it was being committed. When * the commit finished, we've filed the buffer for * checkpointing and marked it dirty. Now we are reallocating * the buffer so the transaction freeing it must have * committed and so it's safe to clear the dirty bit. */ clear_buffer_dirty(jh2bh(jh)); /* first access by this transaction */ jh->b_modified = 0; JBUFFER_TRACE(jh, "file as BJ_Reserved"); __jbd2_journal_file_buffer(jh, transaction, BJ_Reserved); } else if (jh->b_transaction == journal->j_committing_transaction) { /* first access by this transaction */ jh->b_modified = 0; JBUFFER_TRACE(jh, "set next transaction"); jh->b_next_transaction = transaction; } spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); /* * akpm: I added this. ext3_alloc_branch can pick up new indirect * blocks which contain freed but then revoked metadata. We need * to cancel the revoke in case we end up freeing it yet again * and the reallocating as data - this would cause a second revoke, * which hits an assertion error. */ JBUFFER_TRACE(jh, "cancelling revoke"); jbd2_journal_cancel_revoke(handle, jh); out: jbd2_journal_put_journal_head(jh); return err; }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,482
int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh) { int err; struct journal_head *jh = jbd2_journal_add_journal_head(bh); char *committed_data = NULL; JBUFFER_TRACE(jh, "entry"); /* * Do this first --- it can drop the journal lock, so we want to * make sure that obtaining the committed_data is done * atomically wrt. completion of any outstanding commits. */ err = do_get_write_access(handle, jh, 1); if (err) goto out; repeat: if (!jh->b_committed_data) { committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); if (!committed_data) { printk(KERN_EMERG "%s: No memory for committed data\n", __func__); err = -ENOMEM; goto out; } } jbd_lock_bh_state(bh); if (!jh->b_committed_data) { /* Copy out the current buffer contents into the * preserved, committed copy. */ JBUFFER_TRACE(jh, "generate b_committed data"); if (!committed_data) { jbd_unlock_bh_state(bh); goto repeat; } jh->b_committed_data = committed_data; committed_data = NULL; memcpy(jh->b_committed_data, bh->b_data, bh->b_size); } jbd_unlock_bh_state(bh); out: jbd2_journal_put_journal_head(jh); if (unlikely(committed_data)) jbd2_free(committed_data, bh->b_size); return err; }
DoS Overflow
0
int jbd2_journal_get_undo_access(handle_t *handle, struct buffer_head *bh) { int err; struct journal_head *jh = jbd2_journal_add_journal_head(bh); char *committed_data = NULL; JBUFFER_TRACE(jh, "entry"); /* * Do this first --- it can drop the journal lock, so we want to * make sure that obtaining the committed_data is done * atomically wrt. completion of any outstanding commits. */ err = do_get_write_access(handle, jh, 1); if (err) goto out; repeat: if (!jh->b_committed_data) { committed_data = jbd2_alloc(jh2bh(jh)->b_size, GFP_NOFS); if (!committed_data) { printk(KERN_EMERG "%s: No memory for committed data\n", __func__); err = -ENOMEM; goto out; } } jbd_lock_bh_state(bh); if (!jh->b_committed_data) { /* Copy out the current buffer contents into the * preserved, committed copy. */ JBUFFER_TRACE(jh, "generate b_committed data"); if (!committed_data) { jbd_unlock_bh_state(bh); goto repeat; } jh->b_committed_data = committed_data; committed_data = NULL; memcpy(jh->b_committed_data, bh->b_data, bh->b_size); } jbd_unlock_bh_state(bh); out: jbd2_journal_put_journal_head(jh); if (unlikely(committed_data)) jbd2_free(committed_data, bh->b_size); return err; }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,483
int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh) { struct journal_head *jh = jbd2_journal_add_journal_head(bh); int rc; /* We do not want to get caught playing with fields which the * log thread also manipulates. Make sure that the buffer * completes any outstanding IO before proceeding. */ rc = do_get_write_access(handle, jh, 0); jbd2_journal_put_journal_head(jh); return rc; }
DoS Overflow
0
int jbd2_journal_get_write_access(handle_t *handle, struct buffer_head *bh) { struct journal_head *jh = jbd2_journal_add_journal_head(bh); int rc; /* We do not want to get caught playing with fields which the * log thread also manipulates. Make sure that the buffer * completes any outstanding IO before proceeding. */ rc = do_get_write_access(handle, jh, 0); jbd2_journal_put_journal_head(jh); return rc; }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,484
void jbd2_journal_invalidatepage(journal_t *journal, struct page *page, unsigned long offset) { struct buffer_head *head, *bh, *next; unsigned int curr_off = 0; int may_free = 1; if (!PageLocked(page)) BUG(); if (!page_has_buffers(page)) return; /* We will potentially be playing with lists other than just the * data lists (especially for journaled data mode), so be * cautious in our locking. */ head = bh = page_buffers(page); do { unsigned int next_off = curr_off + bh->b_size; next = bh->b_this_page; if (offset <= curr_off) { /* This block is wholly outside the truncation point */ lock_buffer(bh); may_free &= journal_unmap_buffer(journal, bh); unlock_buffer(bh); } curr_off = next_off; bh = next; } while (bh != head); if (!offset) { if (may_free && try_to_free_buffers(page)) J_ASSERT(!page_has_buffers(page)); } }
DoS Overflow
0
void jbd2_journal_invalidatepage(journal_t *journal, struct page *page, unsigned long offset) { struct buffer_head *head, *bh, *next; unsigned int curr_off = 0; int may_free = 1; if (!PageLocked(page)) BUG(); if (!page_has_buffers(page)) return; /* We will potentially be playing with lists other than just the * data lists (especially for journaled data mode), so be * cautious in our locking. */ head = bh = page_buffers(page); do { unsigned int next_off = curr_off + bh->b_size; next = bh->b_this_page; if (offset <= curr_off) { /* This block is wholly outside the truncation point */ lock_buffer(bh); may_free &= journal_unmap_buffer(journal, bh); unlock_buffer(bh); } curr_off = next_off; bh = next; } while (bh != head); if (!offset) { if (may_free && try_to_free_buffers(page)) J_ASSERT(!page_has_buffers(page)); } }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,485
void jbd2_journal_lock_updates(journal_t *journal) { DEFINE_WAIT(wait); write_lock(&journal->j_state_lock); ++journal->j_barrier_count; /* Wait until there are no running updates */ while (1) { transaction_t *transaction = journal->j_running_transaction; if (!transaction) break; spin_lock(&transaction->t_handle_lock); prepare_to_wait(&journal->j_wait_updates, &wait, TASK_UNINTERRUPTIBLE); if (!atomic_read(&transaction->t_updates)) { spin_unlock(&transaction->t_handle_lock); finish_wait(&journal->j_wait_updates, &wait); break; } spin_unlock(&transaction->t_handle_lock); write_unlock(&journal->j_state_lock); schedule(); finish_wait(&journal->j_wait_updates, &wait); write_lock(&journal->j_state_lock); } write_unlock(&journal->j_state_lock); /* * We have now established a barrier against other normal updates, but * we also need to barrier against other jbd2_journal_lock_updates() calls * to make sure that we serialise special journal-locked operations * too. */ mutex_lock(&journal->j_barrier); }
DoS Overflow
0
void jbd2_journal_lock_updates(journal_t *journal) { DEFINE_WAIT(wait); write_lock(&journal->j_state_lock); ++journal->j_barrier_count; /* Wait until there are no running updates */ while (1) { transaction_t *transaction = journal->j_running_transaction; if (!transaction) break; spin_lock(&transaction->t_handle_lock); prepare_to_wait(&journal->j_wait_updates, &wait, TASK_UNINTERRUPTIBLE); if (!atomic_read(&transaction->t_updates)) { spin_unlock(&transaction->t_handle_lock); finish_wait(&journal->j_wait_updates, &wait); break; } spin_unlock(&transaction->t_handle_lock); write_unlock(&journal->j_state_lock); schedule(); finish_wait(&journal->j_wait_updates, &wait); write_lock(&journal->j_state_lock); } write_unlock(&journal->j_state_lock); /* * We have now established a barrier against other normal updates, but * we also need to barrier against other jbd2_journal_lock_updates() calls * to make sure that we serialise special journal-locked operations * too. */ mutex_lock(&journal->j_barrier); }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,486
void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh) { struct buffer_head *bh = jh2bh(jh); /* Get reference so that buffer cannot be freed before we unlock it */ get_bh(bh); jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); __jbd2_journal_refile_buffer(jh); jbd_unlock_bh_state(bh); spin_unlock(&journal->j_list_lock); __brelse(bh); }
DoS Overflow
0
void jbd2_journal_refile_buffer(journal_t *journal, struct journal_head *jh) { struct buffer_head *bh = jh2bh(jh); /* Get reference so that buffer cannot be freed before we unlock it */ get_bh(bh); jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); __jbd2_journal_refile_buffer(jh); jbd_unlock_bh_state(bh); spin_unlock(&journal->j_list_lock); __brelse(bh); }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,487
jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh) { BUFFER_TRACE(bh, "entry"); }
DoS Overflow
0
jbd2_journal_release_buffer(handle_t *handle, struct buffer_head *bh) { BUFFER_TRACE(bh, "entry"); }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,488
int jbd2_journal_restart(handle_t *handle, int nblocks) { return jbd2__journal_restart(handle, nblocks, GFP_NOFS); }
DoS Overflow
0
int jbd2_journal_restart(handle_t *handle, int nblocks) { return jbd2__journal_restart(handle, nblocks, GFP_NOFS); }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,489
handle_t *jbd2_journal_start(journal_t *journal, int nblocks) { return jbd2__journal_start(journal, nblocks, GFP_NOFS); }
DoS Overflow
0
handle_t *jbd2_journal_start(journal_t *journal, int nblocks) { return jbd2__journal_start(journal, nblocks, GFP_NOFS); }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,490
int jbd2_journal_stop(handle_t *handle) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; int err, wait_for_commit = 0; tid_t tid; pid_t pid; J_ASSERT(journal_current_handle() == handle); if (is_handle_aborted(handle)) err = -EIO; else { J_ASSERT(atomic_read(&transaction->t_updates) > 0); err = 0; } if (--handle->h_ref > 0) { jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1, handle->h_ref); return err; } jbd_debug(4, "Handle %p going down\n", handle); /* * Implement synchronous transaction batching. If the handle * was synchronous, don't force a commit immediately. Let's * yield and let another thread piggyback onto this * transaction. Keep doing that while new threads continue to * arrive. It doesn't cost much - we're about to run a commit * and sleep on IO anyway. Speeds up many-threaded, many-dir * operations by 30x or more... * * We try and optimize the sleep time against what the * underlying disk can do, instead of having a static sleep * time. This is useful for the case where our storage is so * fast that it is more optimal to go ahead and force a flush * and wait for the transaction to be committed than it is to * wait for an arbitrary amount of time for new writers to * join the transaction. We achieve this by measuring how * long it takes to commit a transaction, and compare it with * how long this transaction has been running, and if run time * < commit time then we sleep for the delta and commit. This * greatly helps super fast disks that would see slowdowns as * more threads started doing fsyncs. * * But don't do this if this process was the most recent one * to perform a synchronous write. We do this to detect the * case where a single process is doing a stream of sync * writes. No point in waiting for joiners in that case. */ pid = current->pid; if (handle->h_sync && journal->j_last_sync_writer != pid) { u64 commit_time, trans_time; journal->j_last_sync_writer = pid; read_lock(&journal->j_state_lock); commit_time = journal->j_average_commit_time; read_unlock(&journal->j_state_lock); trans_time = ktime_to_ns(ktime_sub(ktime_get(), transaction->t_start_time)); commit_time = max_t(u64, commit_time, 1000*journal->j_min_batch_time); commit_time = min_t(u64, commit_time, 1000*journal->j_max_batch_time); if (trans_time < commit_time) { ktime_t expires = ktime_add_ns(ktime_get(), commit_time); set_current_state(TASK_UNINTERRUPTIBLE); schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); } } if (handle->h_sync) transaction->t_synchronous_commit = 1; current->journal_info = NULL; atomic_sub(handle->h_buffer_credits, &transaction->t_outstanding_credits); /* * If the handle is marked SYNC, we need to set another commit * going! We also want to force a commit if the current * transaction is occupying too much of the log, or if the * transaction is too old now. */ if (handle->h_sync || (atomic_read(&transaction->t_outstanding_credits) > journal->j_max_transaction_buffers) || time_after_eq(jiffies, transaction->t_expires)) { /* Do this even for aborted journals: an abort still * completes the commit thread, it just doesn't write * anything to disk. */ jbd_debug(2, "transaction too old, requesting commit for " "handle %p\n", handle); /* This is non-blocking */ jbd2_log_start_commit(journal, transaction->t_tid); /* * Special case: JBD2_SYNC synchronous updates require us * to wait for the commit to complete. */ if (handle->h_sync && !(current->flags & PF_MEMALLOC)) wait_for_commit = 1; } /* * Once we drop t_updates, if it goes to zero the transaction * could start committing on us and eventually disappear. So * once we do this, we must not dereference transaction * pointer again. */ tid = transaction->t_tid; if (atomic_dec_and_test(&transaction->t_updates)) { wake_up(&journal->j_wait_updates); if (journal->j_barrier_count) wake_up(&journal->j_wait_transaction_locked); } if (wait_for_commit) err = jbd2_log_wait_commit(journal, tid); lock_map_release(&handle->h_lockdep_map); jbd2_free_handle(handle); return err; }
DoS Overflow
0
int jbd2_journal_stop(handle_t *handle) { transaction_t *transaction = handle->h_transaction; journal_t *journal = transaction->t_journal; int err, wait_for_commit = 0; tid_t tid; pid_t pid; J_ASSERT(journal_current_handle() == handle); if (is_handle_aborted(handle)) err = -EIO; else { J_ASSERT(atomic_read(&transaction->t_updates) > 0); err = 0; } if (--handle->h_ref > 0) { jbd_debug(4, "h_ref %d -> %d\n", handle->h_ref + 1, handle->h_ref); return err; } jbd_debug(4, "Handle %p going down\n", handle); /* * Implement synchronous transaction batching. If the handle * was synchronous, don't force a commit immediately. Let's * yield and let another thread piggyback onto this * transaction. Keep doing that while new threads continue to * arrive. It doesn't cost much - we're about to run a commit * and sleep on IO anyway. Speeds up many-threaded, many-dir * operations by 30x or more... * * We try and optimize the sleep time against what the * underlying disk can do, instead of having a static sleep * time. This is useful for the case where our storage is so * fast that it is more optimal to go ahead and force a flush * and wait for the transaction to be committed than it is to * wait for an arbitrary amount of time for new writers to * join the transaction. We achieve this by measuring how * long it takes to commit a transaction, and compare it with * how long this transaction has been running, and if run time * < commit time then we sleep for the delta and commit. This * greatly helps super fast disks that would see slowdowns as * more threads started doing fsyncs. * * But don't do this if this process was the most recent one * to perform a synchronous write. We do this to detect the * case where a single process is doing a stream of sync * writes. No point in waiting for joiners in that case. */ pid = current->pid; if (handle->h_sync && journal->j_last_sync_writer != pid) { u64 commit_time, trans_time; journal->j_last_sync_writer = pid; read_lock(&journal->j_state_lock); commit_time = journal->j_average_commit_time; read_unlock(&journal->j_state_lock); trans_time = ktime_to_ns(ktime_sub(ktime_get(), transaction->t_start_time)); commit_time = max_t(u64, commit_time, 1000*journal->j_min_batch_time); commit_time = min_t(u64, commit_time, 1000*journal->j_max_batch_time); if (trans_time < commit_time) { ktime_t expires = ktime_add_ns(ktime_get(), commit_time); set_current_state(TASK_UNINTERRUPTIBLE); schedule_hrtimeout(&expires, HRTIMER_MODE_ABS); } } if (handle->h_sync) transaction->t_synchronous_commit = 1; current->journal_info = NULL; atomic_sub(handle->h_buffer_credits, &transaction->t_outstanding_credits); /* * If the handle is marked SYNC, we need to set another commit * going! We also want to force a commit if the current * transaction is occupying too much of the log, or if the * transaction is too old now. */ if (handle->h_sync || (atomic_read(&transaction->t_outstanding_credits) > journal->j_max_transaction_buffers) || time_after_eq(jiffies, transaction->t_expires)) { /* Do this even for aborted journals: an abort still * completes the commit thread, it just doesn't write * anything to disk. */ jbd_debug(2, "transaction too old, requesting commit for " "handle %p\n", handle); /* This is non-blocking */ jbd2_log_start_commit(journal, transaction->t_tid); /* * Special case: JBD2_SYNC synchronous updates require us * to wait for the commit to complete. */ if (handle->h_sync && !(current->flags & PF_MEMALLOC)) wait_for_commit = 1; } /* * Once we drop t_updates, if it goes to zero the transaction * could start committing on us and eventually disappear. So * once we do this, we must not dereference transaction * pointer again. */ tid = transaction->t_tid; if (atomic_dec_and_test(&transaction->t_updates)) { wake_up(&journal->j_wait_updates); if (journal->j_barrier_count) wake_up(&journal->j_wait_transaction_locked); } if (wait_for_commit) err = jbd2_log_wait_commit(journal, tid); lock_map_release(&handle->h_lockdep_map); jbd2_free_handle(handle); return err; }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,491
int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page, gfp_t gfp_mask) { struct buffer_head *head; struct buffer_head *bh; int ret = 0; J_ASSERT(PageLocked(page)); head = page_buffers(page); bh = head; do { struct journal_head *jh; /* * We take our own ref against the journal_head here to avoid * having to add tons of locking around each instance of * jbd2_journal_put_journal_head(). */ jh = jbd2_journal_grab_journal_head(bh); if (!jh) continue; jbd_lock_bh_state(bh); __journal_try_to_free_buffer(journal, bh); jbd2_journal_put_journal_head(jh); jbd_unlock_bh_state(bh); if (buffer_jbd(bh)) goto busy; } while ((bh = bh->b_this_page) != head); ret = try_to_free_buffers(page); busy: return ret; }
DoS Overflow
0
int jbd2_journal_try_to_free_buffers(journal_t *journal, struct page *page, gfp_t gfp_mask) { struct buffer_head *head; struct buffer_head *bh; int ret = 0; J_ASSERT(PageLocked(page)); head = page_buffers(page); bh = head; do { struct journal_head *jh; /* * We take our own ref against the journal_head here to avoid * having to add tons of locking around each instance of * jbd2_journal_put_journal_head(). */ jh = jbd2_journal_grab_journal_head(bh); if (!jh) continue; jbd_lock_bh_state(bh); __journal_try_to_free_buffer(journal, bh); jbd2_journal_put_journal_head(jh); jbd_unlock_bh_state(bh); if (buffer_jbd(bh)) goto busy; } while ((bh = bh->b_this_page) != head); ret = try_to_free_buffers(page); busy: return ret; }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,492
void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh) { struct buffer_head *bh = jh2bh(jh); /* Get reference so that buffer cannot be freed before we unlock it */ get_bh(bh); jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); __jbd2_journal_unfile_buffer(jh); spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); __brelse(bh); }
DoS Overflow
0
void jbd2_journal_unfile_buffer(journal_t *journal, struct journal_head *jh) { struct buffer_head *bh = jh2bh(jh); /* Get reference so that buffer cannot be freed before we unlock it */ get_bh(bh); jbd_lock_bh_state(bh); spin_lock(&journal->j_list_lock); __jbd2_journal_unfile_buffer(jh); spin_unlock(&journal->j_list_lock); jbd_unlock_bh_state(bh); __brelse(bh); }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,493
void jbd2_journal_unlock_updates (journal_t *journal) { J_ASSERT(journal->j_barrier_count != 0); mutex_unlock(&journal->j_barrier); write_lock(&journal->j_state_lock); --journal->j_barrier_count; write_unlock(&journal->j_state_lock); wake_up(&journal->j_wait_transaction_locked); }
DoS Overflow
0
void jbd2_journal_unlock_updates (journal_t *journal) { J_ASSERT(journal->j_barrier_count != 0); mutex_unlock(&journal->j_barrier); write_lock(&journal->j_state_lock); --journal->j_barrier_count; write_unlock(&journal->j_state_lock); wake_up(&journal->j_wait_transaction_locked); }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,494
static int start_this_handle(journal_t *journal, handle_t *handle, gfp_t gfp_mask) { transaction_t *transaction, *new_transaction = NULL; tid_t tid; int needed, need_to_start; int nblocks = handle->h_buffer_credits; unsigned long ts = jiffies; if (nblocks > journal->j_max_transaction_buffers) { printk(KERN_ERR "JBD2: %s wants too many credits (%d > %d)\n", current->comm, nblocks, journal->j_max_transaction_buffers); return -ENOSPC; } alloc_transaction: if (!journal->j_running_transaction) { new_transaction = kzalloc(sizeof(*new_transaction), gfp_mask); if (!new_transaction) { /* * If __GFP_FS is not present, then we may be * being called from inside the fs writeback * layer, so we MUST NOT fail. Since * __GFP_NOFAIL is going away, we will arrange * to retry the allocation ourselves. */ if ((gfp_mask & __GFP_FS) == 0) { congestion_wait(BLK_RW_ASYNC, HZ/50); goto alloc_transaction; } return -ENOMEM; } } jbd_debug(3, "New handle %p going live.\n", handle); /* * We need to hold j_state_lock until t_updates has been incremented, * for proper journal barrier handling */ repeat: read_lock(&journal->j_state_lock); BUG_ON(journal->j_flags & JBD2_UNMOUNT); if (is_journal_aborted(journal) || (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) { read_unlock(&journal->j_state_lock); kfree(new_transaction); return -EROFS; } /* Wait on the journal's transaction barrier if necessary */ if (journal->j_barrier_count) { read_unlock(&journal->j_state_lock); wait_event(journal->j_wait_transaction_locked, journal->j_barrier_count == 0); goto repeat; } if (!journal->j_running_transaction) { read_unlock(&journal->j_state_lock); if (!new_transaction) goto alloc_transaction; write_lock(&journal->j_state_lock); if (!journal->j_running_transaction) { jbd2_get_transaction(journal, new_transaction); new_transaction = NULL; } write_unlock(&journal->j_state_lock); goto repeat; } transaction = journal->j_running_transaction; /* * If the current transaction is locked down for commit, wait for the * lock to be released. */ if (transaction->t_state == T_LOCKED) { DEFINE_WAIT(wait); prepare_to_wait(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); read_unlock(&journal->j_state_lock); schedule(); finish_wait(&journal->j_wait_transaction_locked, &wait); goto repeat; } /* * If there is not enough space left in the log to write all potential * buffers requested by this operation, we need to stall pending a log * checkpoint to free some more log space. */ needed = atomic_add_return(nblocks, &transaction->t_outstanding_credits); if (needed > journal->j_max_transaction_buffers) { /* * If the current transaction is already too large, then start * to commit it: we can then go back and attach this handle to * a new transaction. */ DEFINE_WAIT(wait); jbd_debug(2, "Handle %p starting new commit...\n", handle); atomic_sub(nblocks, &transaction->t_outstanding_credits); prepare_to_wait(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); tid = transaction->t_tid; need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); if (need_to_start) jbd2_log_start_commit(journal, tid); schedule(); finish_wait(&journal->j_wait_transaction_locked, &wait); goto repeat; } /* * The commit code assumes that it can get enough log space * without forcing a checkpoint. This is *critical* for * correctness: a checkpoint of a buffer which is also * associated with a committing transaction creates a deadlock, * so commit simply cannot force through checkpoints. * * We must therefore ensure the necessary space in the journal * *before* starting to dirty potentially checkpointed buffers * in the new transaction. * * The worst part is, any transaction currently committing can * reduce the free space arbitrarily. Be careful to account for * those buffers when checkpointing. */ /* * @@@ AKPM: This seems rather over-defensive. We're giving commit * a _lot_ of headroom: 1/4 of the journal plus the size of * the committing transaction. Really, we only need to give it * committing_transaction->t_outstanding_credits plus "enough" for * the log control blocks. * Also, this test is inconsistent with the matching one in * jbd2_journal_extend(). */ if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) { jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle); atomic_sub(nblocks, &transaction->t_outstanding_credits); read_unlock(&journal->j_state_lock); write_lock(&journal->j_state_lock); if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) __jbd2_log_wait_for_space(journal); write_unlock(&journal->j_state_lock); goto repeat; } /* OK, account for the buffers that this operation expects to * use and add the handle to the running transaction. */ update_t_max_wait(transaction, ts); handle->h_transaction = transaction; atomic_inc(&transaction->t_updates); atomic_inc(&transaction->t_handle_count); jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n", handle, nblocks, atomic_read(&transaction->t_outstanding_credits), __jbd2_log_space_left(journal)); read_unlock(&journal->j_state_lock); lock_map_acquire(&handle->h_lockdep_map); kfree(new_transaction); return 0; }
DoS Overflow
0
static int start_this_handle(journal_t *journal, handle_t *handle, gfp_t gfp_mask) { transaction_t *transaction, *new_transaction = NULL; tid_t tid; int needed, need_to_start; int nblocks = handle->h_buffer_credits; unsigned long ts = jiffies; if (nblocks > journal->j_max_transaction_buffers) { printk(KERN_ERR "JBD2: %s wants too many credits (%d > %d)\n", current->comm, nblocks, journal->j_max_transaction_buffers); return -ENOSPC; } alloc_transaction: if (!journal->j_running_transaction) { new_transaction = kzalloc(sizeof(*new_transaction), gfp_mask); if (!new_transaction) { /* * If __GFP_FS is not present, then we may be * being called from inside the fs writeback * layer, so we MUST NOT fail. Since * __GFP_NOFAIL is going away, we will arrange * to retry the allocation ourselves. */ if ((gfp_mask & __GFP_FS) == 0) { congestion_wait(BLK_RW_ASYNC, HZ/50); goto alloc_transaction; } return -ENOMEM; } } jbd_debug(3, "New handle %p going live.\n", handle); /* * We need to hold j_state_lock until t_updates has been incremented, * for proper journal barrier handling */ repeat: read_lock(&journal->j_state_lock); BUG_ON(journal->j_flags & JBD2_UNMOUNT); if (is_journal_aborted(journal) || (journal->j_errno != 0 && !(journal->j_flags & JBD2_ACK_ERR))) { read_unlock(&journal->j_state_lock); kfree(new_transaction); return -EROFS; } /* Wait on the journal's transaction barrier if necessary */ if (journal->j_barrier_count) { read_unlock(&journal->j_state_lock); wait_event(journal->j_wait_transaction_locked, journal->j_barrier_count == 0); goto repeat; } if (!journal->j_running_transaction) { read_unlock(&journal->j_state_lock); if (!new_transaction) goto alloc_transaction; write_lock(&journal->j_state_lock); if (!journal->j_running_transaction) { jbd2_get_transaction(journal, new_transaction); new_transaction = NULL; } write_unlock(&journal->j_state_lock); goto repeat; } transaction = journal->j_running_transaction; /* * If the current transaction is locked down for commit, wait for the * lock to be released. */ if (transaction->t_state == T_LOCKED) { DEFINE_WAIT(wait); prepare_to_wait(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); read_unlock(&journal->j_state_lock); schedule(); finish_wait(&journal->j_wait_transaction_locked, &wait); goto repeat; } /* * If there is not enough space left in the log to write all potential * buffers requested by this operation, we need to stall pending a log * checkpoint to free some more log space. */ needed = atomic_add_return(nblocks, &transaction->t_outstanding_credits); if (needed > journal->j_max_transaction_buffers) { /* * If the current transaction is already too large, then start * to commit it: we can then go back and attach this handle to * a new transaction. */ DEFINE_WAIT(wait); jbd_debug(2, "Handle %p starting new commit...\n", handle); atomic_sub(nblocks, &transaction->t_outstanding_credits); prepare_to_wait(&journal->j_wait_transaction_locked, &wait, TASK_UNINTERRUPTIBLE); tid = transaction->t_tid; need_to_start = !tid_geq(journal->j_commit_request, tid); read_unlock(&journal->j_state_lock); if (need_to_start) jbd2_log_start_commit(journal, tid); schedule(); finish_wait(&journal->j_wait_transaction_locked, &wait); goto repeat; } /* * The commit code assumes that it can get enough log space * without forcing a checkpoint. This is *critical* for * correctness: a checkpoint of a buffer which is also * associated with a committing transaction creates a deadlock, * so commit simply cannot force through checkpoints. * * We must therefore ensure the necessary space in the journal * *before* starting to dirty potentially checkpointed buffers * in the new transaction. * * The worst part is, any transaction currently committing can * reduce the free space arbitrarily. Be careful to account for * those buffers when checkpointing. */ /* * @@@ AKPM: This seems rather over-defensive. We're giving commit * a _lot_ of headroom: 1/4 of the journal plus the size of * the committing transaction. Really, we only need to give it * committing_transaction->t_outstanding_credits plus "enough" for * the log control blocks. * Also, this test is inconsistent with the matching one in * jbd2_journal_extend(). */ if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) { jbd_debug(2, "Handle %p waiting for checkpoint...\n", handle); atomic_sub(nblocks, &transaction->t_outstanding_credits); read_unlock(&journal->j_state_lock); write_lock(&journal->j_state_lock); if (__jbd2_log_space_left(journal) < jbd_space_needed(journal)) __jbd2_log_wait_for_space(journal); write_unlock(&journal->j_state_lock); goto repeat; } /* OK, account for the buffers that this operation expects to * use and add the handle to the running transaction. */ update_t_max_wait(transaction, ts); handle->h_transaction = transaction; atomic_inc(&transaction->t_updates); atomic_inc(&transaction->t_handle_count); jbd_debug(4, "Handle %p given %d credits (total %d, free %d)\n", handle, nblocks, atomic_read(&transaction->t_outstanding_credits), __jbd2_log_space_left(journal)); read_unlock(&journal->j_state_lock); lock_map_acquire(&handle->h_lockdep_map); kfree(new_transaction); return 0; }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,495
static inline void update_t_max_wait(transaction_t *transaction, unsigned long ts) { #ifdef CONFIG_JBD2_DEBUG if (jbd2_journal_enable_debug && time_after(transaction->t_start, ts)) { ts = jbd2_time_diff(ts, transaction->t_start); spin_lock(&transaction->t_handle_lock); if (ts > transaction->t_max_wait) transaction->t_max_wait = ts; spin_unlock(&transaction->t_handle_lock); } #endif }
DoS Overflow
0
static inline void update_t_max_wait(transaction_t *transaction, unsigned long ts) { #ifdef CONFIG_JBD2_DEBUG if (jbd2_journal_enable_debug && time_after(transaction->t_start, ts)) { ts = jbd2_time_diff(ts, transaction->t_start); spin_lock(&transaction->t_handle_lock); if (ts > transaction->t_max_wait) transaction->t_max_wait = ts; spin_unlock(&transaction->t_handle_lock); } #endif }
@@ -1949,6 +1949,8 @@ static int journal_unmap_buffer(journal_t *journal, struct buffer_head *bh) clear_buffer_mapped(bh); clear_buffer_req(bh); clear_buffer_new(bh); + clear_buffer_delay(bh); + clear_buffer_unwritten(bh); bh->b_bdev = NULL; return may_free; }
CWE-119
null
null
19,496
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) { u8 *dst = dctx->buffer; if (dctx->bytes) { u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes); while (dctx->bytes--) *tmp++ ^= 0; gf128mul_4k_lle((be128 *)dst, ctx->gf128); } dctx->bytes = 0; }
DoS
0
static void ghash_flush(struct ghash_ctx *ctx, struct ghash_desc_ctx *dctx) { u8 *dst = dctx->buffer; if (dctx->bytes) { u8 *tmp = dst + (GHASH_BLOCK_SIZE - dctx->bytes); while (dctx->bytes--) *tmp++ ^= 0; gf128mul_4k_lle((be128 *)dst, ctx->gf128); } dctx->bytes = 0; }
@@ -67,6 +67,9 @@ static int ghash_update(struct shash_desc *desc, struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *dst = dctx->buffer; + if (!ctx->gf128) + return -ENOKEY; + if (dctx->bytes) { int n = min(srclen, dctx->bytes); u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); @@ -119,6 +122,9 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *buf = dctx->buffer; + if (!ctx->gf128) + return -ENOKEY; + ghash_flush(ctx, dctx); memcpy(dst, buf, GHASH_BLOCK_SIZE);
null
null
null
19,497
static int ghash_init(struct shash_desc *desc) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); memset(dctx, 0, sizeof(*dctx)); return 0; }
DoS
0
static int ghash_init(struct shash_desc *desc) { struct ghash_desc_ctx *dctx = shash_desc_ctx(desc); memset(dctx, 0, sizeof(*dctx)); return 0; }
@@ -67,6 +67,9 @@ static int ghash_update(struct shash_desc *desc, struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *dst = dctx->buffer; + if (!ctx->gf128) + return -ENOKEY; + if (dctx->bytes) { int n = min(srclen, dctx->bytes); u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); @@ -119,6 +122,9 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *buf = dctx->buffer; + if (!ctx->gf128) + return -ENOKEY; + ghash_flush(ctx, dctx); memcpy(dst, buf, GHASH_BLOCK_SIZE);
null
null
null
19,498
static void __exit ghash_mod_exit(void) { crypto_unregister_shash(&ghash_alg); }
DoS
0
static void __exit ghash_mod_exit(void) { crypto_unregister_shash(&ghash_alg); }
@@ -67,6 +67,9 @@ static int ghash_update(struct shash_desc *desc, struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *dst = dctx->buffer; + if (!ctx->gf128) + return -ENOKEY; + if (dctx->bytes) { int n = min(srclen, dctx->bytes); u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); @@ -119,6 +122,9 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *buf = dctx->buffer; + if (!ctx->gf128) + return -ENOKEY; + ghash_flush(ctx, dctx); memcpy(dst, buf, GHASH_BLOCK_SIZE);
null
null
null
19,499
static int __init ghash_mod_init(void) { return crypto_register_shash(&ghash_alg); }
DoS
0
static int __init ghash_mod_init(void) { return crypto_register_shash(&ghash_alg); }
@@ -67,6 +67,9 @@ static int ghash_update(struct shash_desc *desc, struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *dst = dctx->buffer; + if (!ctx->gf128) + return -ENOKEY; + if (dctx->bytes) { int n = min(srclen, dctx->bytes); u8 *pos = dst + (GHASH_BLOCK_SIZE - dctx->bytes); @@ -119,6 +122,9 @@ static int ghash_final(struct shash_desc *desc, u8 *dst) struct ghash_ctx *ctx = crypto_shash_ctx(desc->tfm); u8 *buf = dctx->buffer; + if (!ctx->gf128) + return -ENOKEY; + ghash_flush(ctx, dctx); memcpy(dst, buf, GHASH_BLOCK_SIZE);
null
null
null