idx
int64 | func_before
string | Vulnerability Classification
string | vul
int64 | func_after
string | patch
string | CWE ID
string | lines_before
string | lines_after
string |
|---|---|---|---|---|---|---|---|---|
4,700
|
static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed)
{
int dir;
size_t len = 0, pktlen = 0;
const char *str = NULL;
int pid;
int ret;
int i;
USBDevice *dev;
USBEndpoint *ep;
struct ohci_td td;
uint32_t addr;
int flag_r;
int completion;
addr = ed->head & OHCI_DPTR_MASK;
/* See if this TD has already been submitted to the device. */
completion = (addr == ohci->async_td);
if (completion && !ohci->async_complete) {
trace_usb_ohci_td_skip_async();
return 1;
}
if (ohci_read_td(ohci, addr, &td)) {
trace_usb_ohci_td_read_error(addr);
ohci_die(ohci);
return 0;
}
dir = OHCI_BM(ed->flags, ED_D);
switch (dir) {
case OHCI_TD_DIR_OUT:
case OHCI_TD_DIR_IN:
/* Same value. */
break;
default:
dir = OHCI_BM(td.flags, TD_DP);
break;
}
switch (dir) {
case OHCI_TD_DIR_IN:
str = "in";
pid = USB_TOKEN_IN;
break;
case OHCI_TD_DIR_OUT:
str = "out";
pid = USB_TOKEN_OUT;
break;
case OHCI_TD_DIR_SETUP:
str = "setup";
pid = USB_TOKEN_SETUP;
break;
default:
trace_usb_ohci_td_bad_direction(dir);
return 1;
}
if (td.cbp && td.be) {
if ((td.cbp & 0xfffff000) != (td.be & 0xfffff000)) {
len = (td.be & 0xfff) + 0x1001 - (td.cbp & 0xfff);
} else {
len = (td.be - td.cbp) + 1;
}
pktlen = len;
if (len && dir != OHCI_TD_DIR_IN) {
/* The endpoint may not allow us to transfer it all now */
pktlen = (ed->flags & OHCI_ED_MPS_MASK) >> OHCI_ED_MPS_SHIFT;
if (pktlen > len) {
pktlen = len;
}
if (!completion) {
if (ohci_copy_td(ohci, &td, ohci->usb_buf, pktlen,
DMA_DIRECTION_TO_DEVICE)) {
ohci_die(ohci);
}
}
}
}
flag_r = (td.flags & OHCI_TD_R) != 0;
trace_usb_ohci_td_pkt_hdr(addr, (int64_t)pktlen, (int64_t)len, str,
flag_r, td.cbp, td.be);
ohci_td_pkt("OUT", ohci->usb_buf, pktlen);
if (completion) {
ohci->async_td = 0;
ohci->async_complete = false;
} else {
if (ohci->async_td) {
/* ??? The hardware should allow one active packet per
endpoint. We only allow one active packet per controller.
This should be sufficient as long as devices respond in a
timely manner.
*/
trace_usb_ohci_td_too_many_pending();
return 1;
}
dev = ohci_find_device(ohci, OHCI_BM(ed->flags, ED_FA));
ep = usb_ep_get(dev, pid, OHCI_BM(ed->flags, ED_EN));
usb_packet_setup(&ohci->usb_packet, pid, ep, 0, addr, !flag_r,
OHCI_BM(td.flags, TD_DI) == 0);
usb_packet_addbuf(&ohci->usb_packet, ohci->usb_buf, pktlen);
usb_handle_packet(dev, &ohci->usb_packet);
trace_usb_ohci_td_packet_status(ohci->usb_packet.status);
if (ohci->usb_packet.status == USB_RET_ASYNC) {
usb_device_flush_ep_queue(dev, ep);
ohci->async_td = addr;
return 1;
}
}
if (ohci->usb_packet.status == USB_RET_SUCCESS) {
ret = ohci->usb_packet.actual_length;
} else {
ret = ohci->usb_packet.status;
}
if (ret >= 0) {
if (dir == OHCI_TD_DIR_IN) {
if (ohci_copy_td(ohci, &td, ohci->usb_buf, ret,
DMA_DIRECTION_FROM_DEVICE)) {
ohci_die(ohci);
}
ohci_td_pkt("IN", ohci->usb_buf, pktlen);
} else {
ret = pktlen;
}
}
/* Writeback */
if (ret == pktlen || (dir == OHCI_TD_DIR_IN && ret >= 0 && flag_r)) {
/* Transmission succeeded. */
if (ret == len) {
td.cbp = 0;
} else {
if ((td.cbp & 0xfff) + ret > 0xfff) {
td.cbp = (td.be & ~0xfff) + ((td.cbp + ret) & 0xfff);
} else {
td.cbp += ret;
}
}
td.flags |= OHCI_TD_T1;
td.flags ^= OHCI_TD_T0;
OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_NOERROR);
OHCI_SET_BM(td.flags, TD_EC, 0);
if ((dir != OHCI_TD_DIR_IN) && (ret != len)) {
/* Partial packet transfer: TD not ready to retire yet */
goto exit_no_retire;
}
/* Setting ED_C is part of the TD retirement process */
ed->head &= ~OHCI_ED_C;
if (td.flags & OHCI_TD_T0)
ed->head |= OHCI_ED_C;
} else {
if (ret >= 0) {
trace_usb_ohci_td_underrun();
OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_DATAUNDERRUN);
} else {
switch (ret) {
case USB_RET_IOERROR:
case USB_RET_NODEV:
trace_usb_ohci_td_dev_error();
OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_DEVICENOTRESPONDING);
break;
case USB_RET_NAK:
trace_usb_ohci_td_nak();
return 1;
case USB_RET_STALL:
trace_usb_ohci_td_stall();
OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_STALL);
break;
case USB_RET_BABBLE:
trace_usb_ohci_td_babble();
OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_DATAOVERRUN);
break;
default:
trace_usb_ohci_td_bad_device_response(ret);
OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_UNDEXPETEDPID);
OHCI_SET_BM(td.flags, TD_EC, 3);
break;
}
}
ed->head |= OHCI_ED_H;
}
/* Retire this TD */
ed->head &= ~OHCI_DPTR_MASK;
ed->head |= td.next & OHCI_DPTR_MASK;
td.next = ohci->done;
ohci->done = addr;
i = OHCI_BM(td.flags, TD_DI);
if (i < ohci->done_count)
ohci->done_count = i;
exit_no_retire:
if (ohci_put_td(ohci, addr, &td)) {
ohci_die(ohci);
return 1;
}
return OHCI_BM(td.flags, TD_CC) != OHCI_CC_NOERROR;
}
|
DoS
| 0
|
static int ohci_service_td(OHCIState *ohci, struct ohci_ed *ed)
{
int dir;
size_t len = 0, pktlen = 0;
const char *str = NULL;
int pid;
int ret;
int i;
USBDevice *dev;
USBEndpoint *ep;
struct ohci_td td;
uint32_t addr;
int flag_r;
int completion;
addr = ed->head & OHCI_DPTR_MASK;
/* See if this TD has already been submitted to the device. */
completion = (addr == ohci->async_td);
if (completion && !ohci->async_complete) {
trace_usb_ohci_td_skip_async();
return 1;
}
if (ohci_read_td(ohci, addr, &td)) {
trace_usb_ohci_td_read_error(addr);
ohci_die(ohci);
return 0;
}
dir = OHCI_BM(ed->flags, ED_D);
switch (dir) {
case OHCI_TD_DIR_OUT:
case OHCI_TD_DIR_IN:
/* Same value. */
break;
default:
dir = OHCI_BM(td.flags, TD_DP);
break;
}
switch (dir) {
case OHCI_TD_DIR_IN:
str = "in";
pid = USB_TOKEN_IN;
break;
case OHCI_TD_DIR_OUT:
str = "out";
pid = USB_TOKEN_OUT;
break;
case OHCI_TD_DIR_SETUP:
str = "setup";
pid = USB_TOKEN_SETUP;
break;
default:
trace_usb_ohci_td_bad_direction(dir);
return 1;
}
if (td.cbp && td.be) {
if ((td.cbp & 0xfffff000) != (td.be & 0xfffff000)) {
len = (td.be & 0xfff) + 0x1001 - (td.cbp & 0xfff);
} else {
len = (td.be - td.cbp) + 1;
}
pktlen = len;
if (len && dir != OHCI_TD_DIR_IN) {
/* The endpoint may not allow us to transfer it all now */
pktlen = (ed->flags & OHCI_ED_MPS_MASK) >> OHCI_ED_MPS_SHIFT;
if (pktlen > len) {
pktlen = len;
}
if (!completion) {
if (ohci_copy_td(ohci, &td, ohci->usb_buf, pktlen,
DMA_DIRECTION_TO_DEVICE)) {
ohci_die(ohci);
}
}
}
}
flag_r = (td.flags & OHCI_TD_R) != 0;
trace_usb_ohci_td_pkt_hdr(addr, (int64_t)pktlen, (int64_t)len, str,
flag_r, td.cbp, td.be);
ohci_td_pkt("OUT", ohci->usb_buf, pktlen);
if (completion) {
ohci->async_td = 0;
ohci->async_complete = false;
} else {
if (ohci->async_td) {
/* ??? The hardware should allow one active packet per
endpoint. We only allow one active packet per controller.
This should be sufficient as long as devices respond in a
timely manner.
*/
trace_usb_ohci_td_too_many_pending();
return 1;
}
dev = ohci_find_device(ohci, OHCI_BM(ed->flags, ED_FA));
ep = usb_ep_get(dev, pid, OHCI_BM(ed->flags, ED_EN));
usb_packet_setup(&ohci->usb_packet, pid, ep, 0, addr, !flag_r,
OHCI_BM(td.flags, TD_DI) == 0);
usb_packet_addbuf(&ohci->usb_packet, ohci->usb_buf, pktlen);
usb_handle_packet(dev, &ohci->usb_packet);
trace_usb_ohci_td_packet_status(ohci->usb_packet.status);
if (ohci->usb_packet.status == USB_RET_ASYNC) {
usb_device_flush_ep_queue(dev, ep);
ohci->async_td = addr;
return 1;
}
}
if (ohci->usb_packet.status == USB_RET_SUCCESS) {
ret = ohci->usb_packet.actual_length;
} else {
ret = ohci->usb_packet.status;
}
if (ret >= 0) {
if (dir == OHCI_TD_DIR_IN) {
if (ohci_copy_td(ohci, &td, ohci->usb_buf, ret,
DMA_DIRECTION_FROM_DEVICE)) {
ohci_die(ohci);
}
ohci_td_pkt("IN", ohci->usb_buf, pktlen);
} else {
ret = pktlen;
}
}
/* Writeback */
if (ret == pktlen || (dir == OHCI_TD_DIR_IN && ret >= 0 && flag_r)) {
/* Transmission succeeded. */
if (ret == len) {
td.cbp = 0;
} else {
if ((td.cbp & 0xfff) + ret > 0xfff) {
td.cbp = (td.be & ~0xfff) + ((td.cbp + ret) & 0xfff);
} else {
td.cbp += ret;
}
}
td.flags |= OHCI_TD_T1;
td.flags ^= OHCI_TD_T0;
OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_NOERROR);
OHCI_SET_BM(td.flags, TD_EC, 0);
if ((dir != OHCI_TD_DIR_IN) && (ret != len)) {
/* Partial packet transfer: TD not ready to retire yet */
goto exit_no_retire;
}
/* Setting ED_C is part of the TD retirement process */
ed->head &= ~OHCI_ED_C;
if (td.flags & OHCI_TD_T0)
ed->head |= OHCI_ED_C;
} else {
if (ret >= 0) {
trace_usb_ohci_td_underrun();
OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_DATAUNDERRUN);
} else {
switch (ret) {
case USB_RET_IOERROR:
case USB_RET_NODEV:
trace_usb_ohci_td_dev_error();
OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_DEVICENOTRESPONDING);
break;
case USB_RET_NAK:
trace_usb_ohci_td_nak();
return 1;
case USB_RET_STALL:
trace_usb_ohci_td_stall();
OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_STALL);
break;
case USB_RET_BABBLE:
trace_usb_ohci_td_babble();
OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_DATAOVERRUN);
break;
default:
trace_usb_ohci_td_bad_device_response(ret);
OHCI_SET_BM(td.flags, TD_CC, OHCI_CC_UNDEXPETEDPID);
OHCI_SET_BM(td.flags, TD_EC, 3);
break;
}
}
ed->head |= OHCI_ED_H;
}
/* Retire this TD */
ed->head &= ~OHCI_DPTR_MASK;
ed->head |= td.next & OHCI_DPTR_MASK;
td.next = ohci->done;
ohci->done = addr;
i = OHCI_BM(td.flags, TD_DI);
if (i < ohci->done_count)
ohci->done_count = i;
exit_no_retire:
if (ohci_put_td(ohci, addr, &td)) {
ohci_die(ohci);
return 1;
}
return OHCI_BM(td.flags, TD_CC) != OHCI_CC_NOERROR;
}
|
@@ -725,7 +725,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
if (ohci_read_iso_td(ohci, addr, &iso_td)) {
trace_usb_ohci_iso_td_read_failed(addr);
ohci_die(ohci);
- return 0;
+ return 1;
}
starting_frame = OHCI_BM(iso_td.flags, TD_SF);
|
CWE-835
| null | null |
4,701
|
static void ohci_set_ctl(OHCIState *ohci, uint32_t val)
{
uint32_t old_state;
uint32_t new_state;
old_state = ohci->ctl & OHCI_CTL_HCFS;
ohci->ctl = val;
new_state = ohci->ctl & OHCI_CTL_HCFS;
/* no state change */
if (old_state == new_state)
return;
trace_usb_ohci_set_ctl(ohci->name, new_state);
switch (new_state) {
case OHCI_USB_OPERATIONAL:
ohci_bus_start(ohci);
break;
case OHCI_USB_SUSPEND:
ohci_bus_stop(ohci);
/* clear pending SF otherwise linux driver loops in ohci_irq() */
ohci->intr_status &= ~OHCI_INTR_SF;
ohci_intr_update(ohci);
break;
case OHCI_USB_RESUME:
trace_usb_ohci_resume(ohci->name);
break;
case OHCI_USB_RESET:
ohci_roothub_reset(ohci);
break;
}
}
|
DoS
| 0
|
static void ohci_set_ctl(OHCIState *ohci, uint32_t val)
{
uint32_t old_state;
uint32_t new_state;
old_state = ohci->ctl & OHCI_CTL_HCFS;
ohci->ctl = val;
new_state = ohci->ctl & OHCI_CTL_HCFS;
/* no state change */
if (old_state == new_state)
return;
trace_usb_ohci_set_ctl(ohci->name, new_state);
switch (new_state) {
case OHCI_USB_OPERATIONAL:
ohci_bus_start(ohci);
break;
case OHCI_USB_SUSPEND:
ohci_bus_stop(ohci);
/* clear pending SF otherwise linux driver loops in ohci_irq() */
ohci->intr_status &= ~OHCI_INTR_SF;
ohci_intr_update(ohci);
break;
case OHCI_USB_RESUME:
trace_usb_ohci_resume(ohci->name);
break;
case OHCI_USB_RESET:
ohci_roothub_reset(ohci);
break;
}
}
|
@@ -725,7 +725,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
if (ohci_read_iso_td(ohci, addr, &iso_td)) {
trace_usb_ohci_iso_td_read_failed(addr);
ohci_die(ohci);
- return 0;
+ return 1;
}
starting_frame = OHCI_BM(iso_td.flags, TD_SF);
|
CWE-835
| null | null |
4,702
|
static void ohci_set_hub_status(OHCIState *ohci, uint32_t val)
{
uint32_t old_state;
old_state = ohci->rhstatus;
/* write 1 to clear OCIC */
if (val & OHCI_RHS_OCIC)
ohci->rhstatus &= ~OHCI_RHS_OCIC;
if (val & OHCI_RHS_LPS) {
int i;
for (i = 0; i < ohci->num_ports; i++)
ohci_port_power(ohci, i, 0);
trace_usb_ohci_hub_power_down();
}
if (val & OHCI_RHS_LPSC) {
int i;
for (i = 0; i < ohci->num_ports; i++)
ohci_port_power(ohci, i, 1);
trace_usb_ohci_hub_power_up();
}
if (val & OHCI_RHS_DRWE)
ohci->rhstatus |= OHCI_RHS_DRWE;
if (val & OHCI_RHS_CRWE)
ohci->rhstatus &= ~OHCI_RHS_DRWE;
if (old_state != ohci->rhstatus)
ohci_set_interrupt(ohci, OHCI_INTR_RHSC);
}
|
DoS
| 0
|
static void ohci_set_hub_status(OHCIState *ohci, uint32_t val)
{
uint32_t old_state;
old_state = ohci->rhstatus;
/* write 1 to clear OCIC */
if (val & OHCI_RHS_OCIC)
ohci->rhstatus &= ~OHCI_RHS_OCIC;
if (val & OHCI_RHS_LPS) {
int i;
for (i = 0; i < ohci->num_ports; i++)
ohci_port_power(ohci, i, 0);
trace_usb_ohci_hub_power_down();
}
if (val & OHCI_RHS_LPSC) {
int i;
for (i = 0; i < ohci->num_ports; i++)
ohci_port_power(ohci, i, 1);
trace_usb_ohci_hub_power_up();
}
if (val & OHCI_RHS_DRWE)
ohci->rhstatus |= OHCI_RHS_DRWE;
if (val & OHCI_RHS_CRWE)
ohci->rhstatus &= ~OHCI_RHS_DRWE;
if (old_state != ohci->rhstatus)
ohci_set_interrupt(ohci, OHCI_INTR_RHSC);
}
|
@@ -725,7 +725,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
if (ohci_read_iso_td(ohci, addr, &iso_td)) {
trace_usb_ohci_iso_td_read_failed(addr);
ohci_die(ohci);
- return 0;
+ return 1;
}
starting_frame = OHCI_BM(iso_td.flags, TD_SF);
|
CWE-835
| null | null |
4,703
|
static inline void ohci_set_interrupt(OHCIState *ohci, uint32_t intr)
{
ohci->intr_status |= intr;
ohci_intr_update(ohci);
}
|
DoS
| 0
|
static inline void ohci_set_interrupt(OHCIState *ohci, uint32_t intr)
{
ohci->intr_status |= intr;
ohci_intr_update(ohci);
}
|
@@ -725,7 +725,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
if (ohci_read_iso_td(ohci, addr, &iso_td)) {
trace_usb_ohci_iso_td_read_failed(addr);
ohci_die(ohci);
- return 0;
+ return 1;
}
starting_frame = OHCI_BM(iso_td.flags, TD_SF);
|
CWE-835
| null | null |
4,704
|
static void ohci_stop_endpoints(OHCIState *ohci)
{
USBDevice *dev;
int i, j;
for (i = 0; i < ohci->num_ports; i++) {
dev = ohci->rhport[i].port.dev;
if (dev && dev->attached) {
usb_device_ep_stopped(dev, &dev->ep_ctl);
for (j = 0; j < USB_MAX_ENDPOINTS; j++) {
usb_device_ep_stopped(dev, &dev->ep_in[j]);
usb_device_ep_stopped(dev, &dev->ep_out[j]);
}
}
}
}
|
DoS
| 0
|
static void ohci_stop_endpoints(OHCIState *ohci)
{
USBDevice *dev;
int i, j;
for (i = 0; i < ohci->num_ports; i++) {
dev = ohci->rhport[i].port.dev;
if (dev && dev->attached) {
usb_device_ep_stopped(dev, &dev->ep_ctl);
for (j = 0; j < USB_MAX_ENDPOINTS; j++) {
usb_device_ep_stopped(dev, &dev->ep_in[j]);
usb_device_ep_stopped(dev, &dev->ep_out[j]);
}
}
}
}
|
@@ -725,7 +725,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
if (ohci_read_iso_td(ohci, addr, &iso_td)) {
trace_usb_ohci_iso_td_read_failed(addr);
ohci_die(ohci);
- return 0;
+ return 1;
}
starting_frame = OHCI_BM(iso_td.flags, TD_SF);
|
CWE-835
| null | null |
4,705
|
static void ohci_sysbus_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = ohci_realize_pxa;
set_bit(DEVICE_CATEGORY_USB, dc->categories);
dc->desc = "OHCI USB Controller";
dc->props = ohci_sysbus_properties;
dc->reset = usb_ohci_reset_sysbus;
}
|
DoS
| 0
|
static void ohci_sysbus_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = ohci_realize_pxa;
set_bit(DEVICE_CATEGORY_USB, dc->categories);
dc->desc = "OHCI USB Controller";
dc->props = ohci_sysbus_properties;
dc->reset = usb_ohci_reset_sysbus;
}
|
@@ -725,7 +725,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
if (ohci_read_iso_td(ohci, addr, &iso_td)) {
trace_usb_ohci_iso_td_read_failed(addr);
ohci_die(ohci);
- return 0;
+ return 1;
}
starting_frame = OHCI_BM(iso_td.flags, TD_SF);
|
CWE-835
| null | null |
4,706
|
static void ohci_td_pkt(const char *msg, const uint8_t *buf, size_t len)
{
bool print16 = !!trace_event_get_state(TRACE_USB_OHCI_TD_PKT_SHORT);
bool printall = !!trace_event_get_state(TRACE_USB_OHCI_TD_PKT_FULL);
const int width = 16;
int i;
char tmp[3 * width + 1];
char *p = tmp;
if (!printall && !print16) {
return;
}
for (i = 0; ; i++) {
if (i && (!(i % width) || (i == len))) {
if (!printall) {
trace_usb_ohci_td_pkt_short(msg, tmp);
break;
}
trace_usb_ohci_td_pkt_full(msg, tmp);
p = tmp;
*p = 0;
}
if (i == len) {
break;
}
p += sprintf(p, " %.2x", buf[i]);
}
}
|
DoS
| 0
|
static void ohci_td_pkt(const char *msg, const uint8_t *buf, size_t len)
{
bool print16 = !!trace_event_get_state(TRACE_USB_OHCI_TD_PKT_SHORT);
bool printall = !!trace_event_get_state(TRACE_USB_OHCI_TD_PKT_FULL);
const int width = 16;
int i;
char tmp[3 * width + 1];
char *p = tmp;
if (!printall && !print16) {
return;
}
for (i = 0; ; i++) {
if (i && (!(i % width) || (i == len))) {
if (!printall) {
trace_usb_ohci_td_pkt_short(msg, tmp);
break;
}
trace_usb_ohci_td_pkt_full(msg, tmp);
p = tmp;
*p = 0;
}
if (i == len) {
break;
}
p += sprintf(p, " %.2x", buf[i]);
}
}
|
@@ -725,7 +725,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
if (ohci_read_iso_td(ohci, addr, &iso_td)) {
trace_usb_ohci_iso_td_read_failed(addr);
ohci_die(ohci);
- return 0;
+ return 1;
}
starting_frame = OHCI_BM(iso_td.flags, TD_SF);
|
CWE-835
| null | null |
4,707
|
static inline int put_words(OHCIState *ohci,
dma_addr_t addr, uint16_t *buf, int num)
{
int i;
addr += ohci->localmem_base;
for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) {
uint16_t tmp = cpu_to_le16(*buf);
if (dma_memory_write(ohci->as, addr, &tmp, sizeof(tmp))) {
return -1;
}
}
return 0;
}
|
DoS
| 0
|
static inline int put_words(OHCIState *ohci,
dma_addr_t addr, uint16_t *buf, int num)
{
int i;
addr += ohci->localmem_base;
for (i = 0; i < num; i++, buf++, addr += sizeof(*buf)) {
uint16_t tmp = cpu_to_le16(*buf);
if (dma_memory_write(ohci->as, addr, &tmp, sizeof(tmp))) {
return -1;
}
}
return 0;
}
|
@@ -725,7 +725,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
if (ohci_read_iso_td(ohci, addr, &iso_td)) {
trace_usb_ohci_iso_td_read_failed(addr);
ohci_die(ohci);
- return 0;
+ return 1;
}
starting_frame = OHCI_BM(iso_td.flags, TD_SF);
|
CWE-835
| null | null |
4,708
|
static void usb_ohci_exit(PCIDevice *dev)
{
OHCIPCIState *ohci = PCI_OHCI(dev);
OHCIState *s = &ohci->state;
trace_usb_ohci_exit(s->name);
ohci_bus_stop(s);
if (s->async_td) {
usb_cancel_packet(&s->usb_packet);
s->async_td = 0;
}
ohci_stop_endpoints(s);
if (!ohci->masterbus) {
usb_bus_release(&s->bus);
}
timer_del(s->eof_timer);
timer_free(s->eof_timer);
}
|
DoS
| 0
|
static void usb_ohci_exit(PCIDevice *dev)
{
OHCIPCIState *ohci = PCI_OHCI(dev);
OHCIState *s = &ohci->state;
trace_usb_ohci_exit(s->name);
ohci_bus_stop(s);
if (s->async_td) {
usb_cancel_packet(&s->usb_packet);
s->async_td = 0;
}
ohci_stop_endpoints(s);
if (!ohci->masterbus) {
usb_bus_release(&s->bus);
}
timer_del(s->eof_timer);
timer_free(s->eof_timer);
}
|
@@ -725,7 +725,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
if (ohci_read_iso_td(ohci, addr, &iso_td)) {
trace_usb_ohci_iso_td_read_failed(addr);
ohci_die(ohci);
- return 0;
+ return 1;
}
starting_frame = OHCI_BM(iso_td.flags, TD_SF);
|
CWE-835
| null | null |
4,709
|
static void usb_ohci_realize_pci(PCIDevice *dev, Error **errp)
{
Error *err = NULL;
OHCIPCIState *ohci = PCI_OHCI(dev);
dev->config[PCI_CLASS_PROG] = 0x10; /* OHCI */
dev->config[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin A */
usb_ohci_init(&ohci->state, DEVICE(dev), ohci->num_ports, 0,
ohci->masterbus, ohci->firstport,
pci_get_address_space(dev), &err);
if (err) {
error_propagate(errp, err);
return;
}
ohci->state.irq = pci_allocate_irq(dev);
pci_register_bar(dev, 0, 0, &ohci->state.mem);
}
|
DoS
| 0
|
static void usb_ohci_realize_pci(PCIDevice *dev, Error **errp)
{
Error *err = NULL;
OHCIPCIState *ohci = PCI_OHCI(dev);
dev->config[PCI_CLASS_PROG] = 0x10; /* OHCI */
dev->config[PCI_INTERRUPT_PIN] = 0x01; /* interrupt pin A */
usb_ohci_init(&ohci->state, DEVICE(dev), ohci->num_ports, 0,
ohci->masterbus, ohci->firstport,
pci_get_address_space(dev), &err);
if (err) {
error_propagate(errp, err);
return;
}
ohci->state.irq = pci_allocate_irq(dev);
pci_register_bar(dev, 0, 0, &ohci->state.mem);
}
|
@@ -725,7 +725,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
if (ohci_read_iso_td(ohci, addr, &iso_td)) {
trace_usb_ohci_iso_td_read_failed(addr);
ohci_die(ohci);
- return 0;
+ return 1;
}
starting_frame = OHCI_BM(iso_td.flags, TD_SF);
|
CWE-835
| null | null |
4,710
|
static void usb_ohci_reset_pci(DeviceState *d)
{
PCIDevice *dev = PCI_DEVICE(d);
OHCIPCIState *ohci = PCI_OHCI(dev);
OHCIState *s = &ohci->state;
ohci_hard_reset(s);
}
|
DoS
| 0
|
static void usb_ohci_reset_pci(DeviceState *d)
{
PCIDevice *dev = PCI_DEVICE(d);
OHCIPCIState *ohci = PCI_OHCI(dev);
OHCIState *s = &ohci->state;
ohci_hard_reset(s);
}
|
@@ -725,7 +725,7 @@ static int ohci_service_iso_td(OHCIState *ohci, struct ohci_ed *ed,
if (ohci_read_iso_td(ohci, addr, &iso_td)) {
trace_usb_ohci_iso_td_read_failed(addr);
ohci_die(ohci);
- return 0;
+ return 1;
}
starting_frame = OHCI_BM(iso_td.flags, TD_SF);
|
CWE-835
| null | null |
4,711
|
e1000e_autoneg_pause(E1000ECore *core)
{
timer_del(core->autoneg_timer);
}
|
DoS
| 0
|
e1000e_autoneg_pause(E1000ECore *core)
{
timer_del(core->autoneg_timer);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,712
|
e1000e_autoneg_resume(E1000ECore *core)
{
if (e1000e_have_autoneg(core) &&
!(core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
qemu_get_queue(core->owner_nic)->link_down = false;
timer_mod(core->autoneg_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
}
}
|
DoS
| 0
|
e1000e_autoneg_resume(E1000ECore *core)
{
if (e1000e_have_autoneg(core) &&
!(core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
qemu_get_queue(core->owner_nic)->link_down = false;
timer_mod(core->autoneg_timer,
qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) + 500);
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,713
|
e1000e_autoneg_timer(void *opaque)
{
E1000ECore *core = opaque;
if (!qemu_get_queue(core->owner_nic)->link_down) {
e1000x_update_regs_on_autoneg_done(core->mac, core->phy[0]);
e1000e_start_recv(core);
e1000e_update_flowctl_status(core);
/* signal link status change to the guest */
e1000e_set_interrupt_cause(core, E1000_ICR_LSC);
}
}
|
DoS
| 0
|
e1000e_autoneg_timer(void *opaque)
{
E1000ECore *core = opaque;
if (!qemu_get_queue(core->owner_nic)->link_down) {
e1000x_update_regs_on_autoneg_done(core->mac, core->phy[0]);
e1000e_start_recv(core);
e1000e_update_flowctl_status(core);
/* signal link status change to the guest */
e1000e_set_interrupt_cause(core, E1000_ICR_LSC);
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,714
|
e1000e_build_rx_metadata(E1000ECore *core,
struct NetRxPkt *pkt,
bool is_eop,
const E1000E_RSSInfo *rss_info,
uint32_t *rss, uint32_t *mrq,
uint32_t *status_flags,
uint16_t *ip_id,
uint16_t *vlan_tag)
{
struct virtio_net_hdr *vhdr;
bool isip4, isip6, istcp, isudp;
uint32_t pkt_type;
*status_flags = E1000_RXD_STAT_DD;
/* No additional metadata needed for non-EOP descriptors */
if (!is_eop) {
goto func_exit;
}
*status_flags |= E1000_RXD_STAT_EOP;
net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
trace_e1000e_rx_metadata_protocols(isip4, isip6, isudp, istcp);
/* VLAN state */
if (net_rx_pkt_is_vlan_stripped(pkt)) {
*status_flags |= E1000_RXD_STAT_VP;
*vlan_tag = cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt));
trace_e1000e_rx_metadata_vlan(*vlan_tag);
}
/* Packet parsing results */
if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) {
if (rss_info->enabled) {
*rss = cpu_to_le32(rss_info->hash);
*mrq = cpu_to_le32(rss_info->type | (rss_info->queue << 8));
trace_e1000e_rx_metadata_rss(*rss, *mrq);
}
} else if (isip4) {
*status_flags |= E1000_RXD_STAT_IPIDV;
*ip_id = cpu_to_le16(net_rx_pkt_get_ip_id(pkt));
trace_e1000e_rx_metadata_ip_id(*ip_id);
}
if (istcp && e1000e_is_tcp_ack(core, pkt)) {
*status_flags |= E1000_RXD_STAT_ACK;
trace_e1000e_rx_metadata_ack();
}
if (isip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) {
trace_e1000e_rx_metadata_ipv6_filtering_disabled();
pkt_type = E1000_RXD_PKT_MAC;
} else if (istcp || isudp) {
pkt_type = isip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP;
} else if (isip4 || isip6) {
pkt_type = isip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6;
} else {
pkt_type = E1000_RXD_PKT_MAC;
}
*status_flags |= E1000_RXD_PKT_TYPE(pkt_type);
trace_e1000e_rx_metadata_pkt_type(pkt_type);
/* RX CSO information */
if (isip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) {
trace_e1000e_rx_metadata_ipv6_sum_disabled();
goto func_exit;
}
if (!net_rx_pkt_has_virt_hdr(pkt)) {
trace_e1000e_rx_metadata_no_virthdr();
e1000e_verify_csum_in_sw(core, pkt, status_flags, istcp, isudp);
goto func_exit;
}
vhdr = net_rx_pkt_get_vhdr(pkt);
if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) &&
!(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
trace_e1000e_rx_metadata_virthdr_no_csum_info();
e1000e_verify_csum_in_sw(core, pkt, status_flags, istcp, isudp);
goto func_exit;
}
if (e1000e_rx_l3_cso_enabled(core)) {
*status_flags |= isip4 ? E1000_RXD_STAT_IPCS : 0;
} else {
trace_e1000e_rx_metadata_l3_cso_disabled();
}
if (e1000e_rx_l4_cso_enabled(core)) {
if (istcp) {
*status_flags |= E1000_RXD_STAT_TCPCS;
} else if (isudp) {
*status_flags |= E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS;
}
} else {
trace_e1000e_rx_metadata_l4_cso_disabled();
}
trace_e1000e_rx_metadata_status_flags(*status_flags);
func_exit:
*status_flags = cpu_to_le32(*status_flags);
}
|
DoS
| 0
|
e1000e_build_rx_metadata(E1000ECore *core,
struct NetRxPkt *pkt,
bool is_eop,
const E1000E_RSSInfo *rss_info,
uint32_t *rss, uint32_t *mrq,
uint32_t *status_flags,
uint16_t *ip_id,
uint16_t *vlan_tag)
{
struct virtio_net_hdr *vhdr;
bool isip4, isip6, istcp, isudp;
uint32_t pkt_type;
*status_flags = E1000_RXD_STAT_DD;
/* No additional metadata needed for non-EOP descriptors */
if (!is_eop) {
goto func_exit;
}
*status_flags |= E1000_RXD_STAT_EOP;
net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
trace_e1000e_rx_metadata_protocols(isip4, isip6, isudp, istcp);
/* VLAN state */
if (net_rx_pkt_is_vlan_stripped(pkt)) {
*status_flags |= E1000_RXD_STAT_VP;
*vlan_tag = cpu_to_le16(net_rx_pkt_get_vlan_tag(pkt));
trace_e1000e_rx_metadata_vlan(*vlan_tag);
}
/* Packet parsing results */
if ((core->mac[RXCSUM] & E1000_RXCSUM_PCSD) != 0) {
if (rss_info->enabled) {
*rss = cpu_to_le32(rss_info->hash);
*mrq = cpu_to_le32(rss_info->type | (rss_info->queue << 8));
trace_e1000e_rx_metadata_rss(*rss, *mrq);
}
} else if (isip4) {
*status_flags |= E1000_RXD_STAT_IPIDV;
*ip_id = cpu_to_le16(net_rx_pkt_get_ip_id(pkt));
trace_e1000e_rx_metadata_ip_id(*ip_id);
}
if (istcp && e1000e_is_tcp_ack(core, pkt)) {
*status_flags |= E1000_RXD_STAT_ACK;
trace_e1000e_rx_metadata_ack();
}
if (isip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_DIS)) {
trace_e1000e_rx_metadata_ipv6_filtering_disabled();
pkt_type = E1000_RXD_PKT_MAC;
} else if (istcp || isudp) {
pkt_type = isip4 ? E1000_RXD_PKT_IP4_XDP : E1000_RXD_PKT_IP6_XDP;
} else if (isip4 || isip6) {
pkt_type = isip4 ? E1000_RXD_PKT_IP4 : E1000_RXD_PKT_IP6;
} else {
pkt_type = E1000_RXD_PKT_MAC;
}
*status_flags |= E1000_RXD_PKT_TYPE(pkt_type);
trace_e1000e_rx_metadata_pkt_type(pkt_type);
/* RX CSO information */
if (isip6 && (core->mac[RFCTL] & E1000_RFCTL_IPV6_XSUM_DIS)) {
trace_e1000e_rx_metadata_ipv6_sum_disabled();
goto func_exit;
}
if (!net_rx_pkt_has_virt_hdr(pkt)) {
trace_e1000e_rx_metadata_no_virthdr();
e1000e_verify_csum_in_sw(core, pkt, status_flags, istcp, isudp);
goto func_exit;
}
vhdr = net_rx_pkt_get_vhdr(pkt);
if (!(vhdr->flags & VIRTIO_NET_HDR_F_DATA_VALID) &&
!(vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM)) {
trace_e1000e_rx_metadata_virthdr_no_csum_info();
e1000e_verify_csum_in_sw(core, pkt, status_flags, istcp, isudp);
goto func_exit;
}
if (e1000e_rx_l3_cso_enabled(core)) {
*status_flags |= isip4 ? E1000_RXD_STAT_IPCS : 0;
} else {
trace_e1000e_rx_metadata_l3_cso_disabled();
}
if (e1000e_rx_l4_cso_enabled(core)) {
if (istcp) {
*status_flags |= E1000_RXD_STAT_TCPCS;
} else if (isudp) {
*status_flags |= E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS;
}
} else {
trace_e1000e_rx_metadata_l4_cso_disabled();
}
trace_e1000e_rx_metadata_status_flags(*status_flags);
func_exit:
*status_flags = cpu_to_le32(*status_flags);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,715
|
e1000e_calc_per_desc_buf_size(E1000ECore *core)
{
int i;
core->rx_desc_buf_size = 0;
for (i = 0; i < ARRAY_SIZE(core->rxbuf_sizes); i++) {
core->rx_desc_buf_size += core->rxbuf_sizes[i];
}
}
|
DoS
| 0
|
e1000e_calc_per_desc_buf_size(E1000ECore *core)
{
int i;
core->rx_desc_buf_size = 0;
for (i = 0; i < ARRAY_SIZE(core->rxbuf_sizes); i++) {
core->rx_desc_buf_size += core->rxbuf_sizes[i];
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,716
|
e1000e_can_receive(E1000ECore *core)
{
int i;
if (!e1000x_rx_ready(core->owner, core->mac)) {
return false;
}
for (i = 0; i < E1000E_NUM_QUEUES; i++) {
E1000E_RxRing rxr;
e1000e_rx_ring_init(core, &rxr, i);
if (e1000e_ring_enabled(core, rxr.i) &&
e1000e_has_rxbufs(core, rxr.i, 1)) {
trace_e1000e_rx_can_recv();
return true;
}
}
trace_e1000e_rx_can_recv_rings_full();
return false;
}
|
DoS
| 0
|
e1000e_can_receive(E1000ECore *core)
{
int i;
if (!e1000x_rx_ready(core->owner, core->mac)) {
return false;
}
for (i = 0; i < E1000E_NUM_QUEUES; i++) {
E1000E_RxRing rxr;
e1000e_rx_ring_init(core, &rxr, i);
if (e1000e_ring_enabled(core, rxr.i) &&
e1000e_has_rxbufs(core, rxr.i, 1)) {
trace_e1000e_rx_can_recv();
return true;
}
}
trace_e1000e_rx_can_recv_rings_full();
return false;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,717
|
e1000e_clear_ims_bits(E1000ECore *core, uint32_t bits)
{
trace_e1000e_irq_clear_ims(bits, core->mac[IMS], core->mac[IMS] & ~bits);
core->mac[IMS] &= ~bits;
}
|
DoS
| 0
|
e1000e_clear_ims_bits(E1000ECore *core, uint32_t bits)
{
trace_e1000e_irq_clear_ims(bits, core->mac[IMS], core->mac[IMS] & ~bits);
core->mac[IMS] &= ~bits;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,718
|
e1000e_core_pci_realize(E1000ECore *core,
const uint16_t *eeprom_templ,
uint32_t eeprom_size,
const uint8_t *macaddr)
{
int i;
core->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
e1000e_autoneg_timer, core);
e1000e_intrmgr_pci_realize(core);
core->vmstate =
qemu_add_vm_change_state_handler(e1000e_vm_state_change, core);
for (i = 0; i < E1000E_NUM_QUEUES; i++) {
net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner,
E1000E_MAX_TX_FRAGS, core->has_vnet);
}
net_rx_pkt_init(&core->rx_pkt, core->has_vnet);
e1000x_core_prepare_eeprom(core->eeprom,
eeprom_templ,
eeprom_size,
PCI_DEVICE_GET_CLASS(core->owner)->device_id,
macaddr);
e1000e_update_rx_offloads(core);
}
|
DoS
| 0
|
e1000e_core_pci_realize(E1000ECore *core,
const uint16_t *eeprom_templ,
uint32_t eeprom_size,
const uint8_t *macaddr)
{
int i;
core->autoneg_timer = timer_new_ms(QEMU_CLOCK_VIRTUAL,
e1000e_autoneg_timer, core);
e1000e_intrmgr_pci_realize(core);
core->vmstate =
qemu_add_vm_change_state_handler(e1000e_vm_state_change, core);
for (i = 0; i < E1000E_NUM_QUEUES; i++) {
net_tx_pkt_init(&core->tx[i].tx_pkt, core->owner,
E1000E_MAX_TX_FRAGS, core->has_vnet);
}
net_rx_pkt_init(&core->rx_pkt, core->has_vnet);
e1000x_core_prepare_eeprom(core->eeprom,
eeprom_templ,
eeprom_size,
PCI_DEVICE_GET_CLASS(core->owner)->device_id,
macaddr);
e1000e_update_rx_offloads(core);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,719
|
e1000e_core_pci_uninit(E1000ECore *core)
{
int i;
timer_del(core->autoneg_timer);
timer_free(core->autoneg_timer);
e1000e_intrmgr_pci_unint(core);
qemu_del_vm_change_state_handler(core->vmstate);
for (i = 0; i < E1000E_NUM_QUEUES; i++) {
net_tx_pkt_reset(core->tx[i].tx_pkt);
net_tx_pkt_uninit(core->tx[i].tx_pkt);
}
net_rx_pkt_uninit(core->rx_pkt);
}
|
DoS
| 0
|
e1000e_core_pci_uninit(E1000ECore *core)
{
int i;
timer_del(core->autoneg_timer);
timer_free(core->autoneg_timer);
e1000e_intrmgr_pci_unint(core);
qemu_del_vm_change_state_handler(core->vmstate);
for (i = 0; i < E1000E_NUM_QUEUES; i++) {
net_tx_pkt_reset(core->tx[i].tx_pkt);
net_tx_pkt_uninit(core->tx[i].tx_pkt);
}
net_rx_pkt_uninit(core->rx_pkt);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,720
|
e1000e_core_post_load(E1000ECore *core)
{
NetClientState *nc = qemu_get_queue(core->owner_nic);
/* nc.link_down can't be migrated, so infer link_down according
* to link status bit in core.mac[STATUS].
*/
nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0;
return 0;
}
|
DoS
| 0
|
e1000e_core_post_load(E1000ECore *core)
{
NetClientState *nc = qemu_get_queue(core->owner_nic);
/* nc.link_down can't be migrated, so infer link_down according
* to link status bit in core.mac[STATUS].
*/
nc->link_down = (core->mac[STATUS] & E1000_STATUS_LU) == 0;
return 0;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,721
|
void e1000e_core_pre_save(E1000ECore *core)
{
int i;
NetClientState *nc = qemu_get_queue(core->owner_nic);
/*
* If link is down and auto-negotiation is supported and ongoing,
* complete auto-negotiation immediately. This allows us to look
* at MII_SR_AUTONEG_COMPLETE to infer link status on load.
*/
if (nc->link_down && e1000e_have_autoneg(core)) {
core->phy[0][PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
e1000e_update_flowctl_status(core);
}
for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
if (net_tx_pkt_has_fragments(core->tx[i].tx_pkt)) {
core->tx[i].skip_cp = true;
}
}
}
|
DoS
| 0
|
void e1000e_core_pre_save(E1000ECore *core)
{
int i;
NetClientState *nc = qemu_get_queue(core->owner_nic);
/*
* If link is down and auto-negotiation is supported and ongoing,
* complete auto-negotiation immediately. This allows us to look
* at MII_SR_AUTONEG_COMPLETE to infer link status on load.
*/
if (nc->link_down && e1000e_have_autoneg(core)) {
core->phy[0][PHY_STATUS] |= MII_SR_AUTONEG_COMPLETE;
e1000e_update_flowctl_status(core);
}
for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
if (net_tx_pkt_has_fragments(core->tx[i].tx_pkt)) {
core->tx[i].skip_cp = true;
}
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,722
|
e1000e_core_read(E1000ECore *core, hwaddr addr, unsigned size)
{
uint64_t val;
uint16_t index = e1000e_get_reg_index_with_offset(mac_reg_access, addr);
if (index < E1000E_NREADOPS && e1000e_macreg_readops[index]) {
if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
trace_e1000e_wrn_regs_read_trivial(index << 2);
}
val = e1000e_macreg_readops[index](core, index);
trace_e1000e_core_read(index << 2, size, val);
return val;
} else {
trace_e1000e_wrn_regs_read_unknown(index << 2, size);
}
return 0;
}
|
DoS
| 0
|
e1000e_core_read(E1000ECore *core, hwaddr addr, unsigned size)
{
uint64_t val;
uint16_t index = e1000e_get_reg_index_with_offset(mac_reg_access, addr);
if (index < E1000E_NREADOPS && e1000e_macreg_readops[index]) {
if (mac_reg_access[index] & MAC_ACCESS_PARTIAL) {
trace_e1000e_wrn_regs_read_trivial(index << 2);
}
val = e1000e_macreg_readops[index](core, index);
trace_e1000e_core_read(index << 2, size, val);
return val;
} else {
trace_e1000e_wrn_regs_read_unknown(index << 2, size);
}
return 0;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,723
|
e1000e_core_reset(E1000ECore *core)
{
int i;
timer_del(core->autoneg_timer);
e1000e_intrmgr_reset(core);
memset(core->phy, 0, sizeof core->phy);
memmove(core->phy, e1000e_phy_reg_init, sizeof e1000e_phy_reg_init);
memset(core->mac, 0, sizeof core->mac);
memmove(core->mac, e1000e_mac_reg_init, sizeof e1000e_mac_reg_init);
core->rxbuf_min_shift = 1 + E1000_RING_DESC_LEN_SHIFT;
if (qemu_get_queue(core->owner_nic)->link_down) {
e1000e_link_down(core);
}
e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac);
for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
net_tx_pkt_reset(core->tx[i].tx_pkt);
memset(&core->tx[i].props, 0, sizeof(core->tx[i].props));
core->tx[i].skip_cp = false;
}
}
|
DoS
| 0
|
e1000e_core_reset(E1000ECore *core)
{
int i;
timer_del(core->autoneg_timer);
e1000e_intrmgr_reset(core);
memset(core->phy, 0, sizeof core->phy);
memmove(core->phy, e1000e_phy_reg_init, sizeof e1000e_phy_reg_init);
memset(core->mac, 0, sizeof core->mac);
memmove(core->mac, e1000e_mac_reg_init, sizeof e1000e_mac_reg_init);
core->rxbuf_min_shift = 1 + E1000_RING_DESC_LEN_SHIFT;
if (qemu_get_queue(core->owner_nic)->link_down) {
e1000e_link_down(core);
}
e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac);
for (i = 0; i < ARRAY_SIZE(core->tx); i++) {
net_tx_pkt_reset(core->tx[i].tx_pkt);
memset(&core->tx[i].props, 0, sizeof(core->tx[i].props));
core->tx[i].skip_cp = false;
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,724
|
e1000e_core_set_link_status(E1000ECore *core)
{
NetClientState *nc = qemu_get_queue(core->owner_nic);
uint32_t old_status = core->mac[STATUS];
trace_e1000e_link_status_changed(nc->link_down ? false : true);
if (nc->link_down) {
e1000x_update_regs_on_link_down(core->mac, core->phy[0]);
} else {
if (e1000e_have_autoneg(core) &&
!(core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
e1000x_restart_autoneg(core->mac, core->phy[0],
core->autoneg_timer);
} else {
e1000x_update_regs_on_link_up(core->mac, core->phy[0]);
e1000e_start_recv(core);
}
}
if (core->mac[STATUS] != old_status) {
e1000e_set_interrupt_cause(core, E1000_ICR_LSC);
}
}
|
DoS
| 0
|
e1000e_core_set_link_status(E1000ECore *core)
{
NetClientState *nc = qemu_get_queue(core->owner_nic);
uint32_t old_status = core->mac[STATUS];
trace_e1000e_link_status_changed(nc->link_down ? false : true);
if (nc->link_down) {
e1000x_update_regs_on_link_down(core->mac, core->phy[0]);
} else {
if (e1000e_have_autoneg(core) &&
!(core->phy[0][PHY_STATUS] & MII_SR_AUTONEG_COMPLETE)) {
e1000x_restart_autoneg(core->mac, core->phy[0],
core->autoneg_timer);
} else {
e1000x_update_regs_on_link_up(core->mac, core->phy[0]);
e1000e_start_recv(core);
}
}
if (core->mac[STATUS] != old_status) {
e1000e_set_interrupt_cause(core, E1000_ICR_LSC);
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,725
|
e1000e_do_ps(E1000ECore *core, struct NetRxPkt *pkt, size_t *hdr_len)
{
bool isip4, isip6, isudp, istcp;
bool fragment;
if (!e1000e_rx_use_ps_descriptor(core)) {
return false;
}
net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
if (isip4) {
fragment = net_rx_pkt_get_ip4_info(pkt)->fragment;
} else if (isip6) {
fragment = net_rx_pkt_get_ip6_info(pkt)->fragment;
} else {
return false;
}
if (fragment && (core->mac[RFCTL] & E1000_RFCTL_IPFRSP_DIS)) {
return false;
}
if (!fragment && (isudp || istcp)) {
*hdr_len = net_rx_pkt_get_l5_hdr_offset(pkt);
} else {
*hdr_len = net_rx_pkt_get_l4_hdr_offset(pkt);
}
if ((*hdr_len > core->rxbuf_sizes[0]) ||
(*hdr_len > net_rx_pkt_get_total_len(pkt))) {
return false;
}
return true;
}
|
DoS
| 0
|
e1000e_do_ps(E1000ECore *core, struct NetRxPkt *pkt, size_t *hdr_len)
{
bool isip4, isip6, isudp, istcp;
bool fragment;
if (!e1000e_rx_use_ps_descriptor(core)) {
return false;
}
net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
if (isip4) {
fragment = net_rx_pkt_get_ip4_info(pkt)->fragment;
} else if (isip6) {
fragment = net_rx_pkt_get_ip6_info(pkt)->fragment;
} else {
return false;
}
if (fragment && (core->mac[RFCTL] & E1000_RFCTL_IPFRSP_DIS)) {
return false;
}
if (!fragment && (isudp || istcp)) {
*hdr_len = net_rx_pkt_get_l5_hdr_offset(pkt);
} else {
*hdr_len = net_rx_pkt_get_l4_hdr_offset(pkt);
}
if ((*hdr_len > core->rxbuf_sizes[0]) ||
(*hdr_len > net_rx_pkt_get_total_len(pkt))) {
return false;
}
return true;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,726
|
e1000e_eitr_should_postpone(E1000ECore *core, int idx)
{
return e1000e_postpone_interrupt(&core->eitr_intr_pending[idx],
&core->eitr[idx]);
}
|
DoS
| 0
|
e1000e_eitr_should_postpone(E1000ECore *core, int idx)
{
return e1000e_postpone_interrupt(&core->eitr_intr_pending[idx],
&core->eitr[idx]);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,727
|
e1000e_get_ctrl(E1000ECore *core, int index)
{
uint32_t val = core->mac[CTRL];
trace_e1000e_link_read_params(
!!(val & E1000_CTRL_ASDE),
(val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT,
!!(val & E1000_CTRL_FRCSPD),
!!(val & E1000_CTRL_FRCDPX),
!!(val & E1000_CTRL_RFCE),
!!(val & E1000_CTRL_TFCE));
return val;
}
|
DoS
| 0
|
e1000e_get_ctrl(E1000ECore *core, int index)
{
uint32_t val = core->mac[CTRL];
trace_e1000e_link_read_params(
!!(val & E1000_CTRL_ASDE),
(val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT,
!!(val & E1000_CTRL_FRCSPD),
!!(val & E1000_CTRL_FRCDPX),
!!(val & E1000_CTRL_RFCE),
!!(val & E1000_CTRL_TFCE));
return val;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,728
|
e1000e_get_reg_index_with_offset(const uint16_t *mac_reg_access, hwaddr addr)
{
uint16_t index = (addr & 0x1ffff) >> 2;
return index + (mac_reg_access[index] & 0xfffe);
}
|
DoS
| 0
|
e1000e_get_reg_index_with_offset(const uint16_t *mac_reg_access, hwaddr addr)
{
uint16_t index = (addr & 0x1ffff) >> 2;
return index + (mac_reg_access[index] & 0xfffe);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,729
|
e1000e_get_status(E1000ECore *core, int index)
{
uint32_t res = core->mac[STATUS];
if (!(core->mac[CTRL] & E1000_CTRL_GIO_MASTER_DISABLE)) {
res |= E1000_STATUS_GIO_MASTER_ENABLE;
}
if (core->mac[CTRL] & E1000_CTRL_FRCDPX) {
res |= (core->mac[CTRL] & E1000_CTRL_FD) ? E1000_STATUS_FD : 0;
} else {
res |= E1000_STATUS_FD;
}
if ((core->mac[CTRL] & E1000_CTRL_FRCSPD) ||
(core->mac[CTRL_EXT] & E1000_CTRL_EXT_SPD_BYPS)) {
switch (core->mac[CTRL] & E1000_CTRL_SPD_SEL) {
case E1000_CTRL_SPD_10:
res |= E1000_STATUS_SPEED_10;
break;
case E1000_CTRL_SPD_100:
res |= E1000_STATUS_SPEED_100;
break;
case E1000_CTRL_SPD_1000:
default:
res |= E1000_STATUS_SPEED_1000;
break;
}
} else {
res |= E1000_STATUS_SPEED_1000;
}
trace_e1000e_link_status(
!!(res & E1000_STATUS_LU),
!!(res & E1000_STATUS_FD),
(res & E1000_STATUS_SPEED_MASK) >> E1000_STATUS_SPEED_SHIFT,
(res & E1000_STATUS_ASDV) >> E1000_STATUS_ASDV_SHIFT);
return res;
}
|
DoS
| 0
|
e1000e_get_status(E1000ECore *core, int index)
{
uint32_t res = core->mac[STATUS];
if (!(core->mac[CTRL] & E1000_CTRL_GIO_MASTER_DISABLE)) {
res |= E1000_STATUS_GIO_MASTER_ENABLE;
}
if (core->mac[CTRL] & E1000_CTRL_FRCDPX) {
res |= (core->mac[CTRL] & E1000_CTRL_FD) ? E1000_STATUS_FD : 0;
} else {
res |= E1000_STATUS_FD;
}
if ((core->mac[CTRL] & E1000_CTRL_FRCSPD) ||
(core->mac[CTRL_EXT] & E1000_CTRL_EXT_SPD_BYPS)) {
switch (core->mac[CTRL] & E1000_CTRL_SPD_SEL) {
case E1000_CTRL_SPD_10:
res |= E1000_STATUS_SPEED_10;
break;
case E1000_CTRL_SPD_100:
res |= E1000_STATUS_SPEED_100;
break;
case E1000_CTRL_SPD_1000:
default:
res |= E1000_STATUS_SPEED_1000;
break;
}
} else {
res |= E1000_STATUS_SPEED_1000;
}
trace_e1000e_link_status(
!!(res & E1000_STATUS_LU),
!!(res & E1000_STATUS_FD),
(res & E1000_STATUS_SPEED_MASK) >> E1000_STATUS_SPEED_SHIFT,
(res & E1000_STATUS_ASDV) >> E1000_STATUS_ASDV_SHIFT);
return res;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,730
|
e1000e_get_tarc(E1000ECore *core, int index)
{
return core->mac[index] & ((BIT(11) - 1) |
BIT(27) |
BIT(28) |
BIT(29) |
BIT(30));
}
|
DoS
| 0
|
e1000e_get_tarc(E1000ECore *core, int index)
{
return core->mac[index] & ((BIT(11) - 1) |
BIT(27) |
BIT(28) |
BIT(29) |
BIT(30));
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,731
|
e1000e_has_rxbufs(E1000ECore *core, const E1000E_RingInfo *r,
size_t total_size)
{
uint32_t bufs = e1000e_ring_free_descr_num(core, r);
trace_e1000e_rx_has_buffers(r->idx, bufs, total_size,
core->rx_desc_buf_size);
return total_size <= bufs / (core->rx_desc_len / E1000_MIN_RX_DESC_LEN) *
core->rx_desc_buf_size;
}
|
DoS
| 0
|
e1000e_has_rxbufs(E1000ECore *core, const E1000E_RingInfo *r,
size_t total_size)
{
uint32_t bufs = e1000e_ring_free_descr_num(core, r);
trace_e1000e_rx_has_buffers(r->idx, bufs, total_size,
core->rx_desc_buf_size);
return total_size <= bufs / (core->rx_desc_len / E1000_MIN_RX_DESC_LEN) *
core->rx_desc_buf_size;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,732
|
e1000e_have_autoneg(E1000ECore *core)
{
return core->phy[0][PHY_CTRL] & MII_CR_AUTO_NEG_EN;
}
|
DoS
| 0
|
e1000e_have_autoneg(E1000ECore *core)
{
return core->phy[0][PHY_CTRL] & MII_CR_AUTO_NEG_EN;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,733
|
e1000e_intmgr_collect_delayed_causes(E1000ECore *core)
{
uint32_t res;
if (msix_enabled(core->owner)) {
assert(core->delayed_causes == 0);
return 0;
}
res = core->delayed_causes;
core->delayed_causes = 0;
e1000e_intrmgr_stop_delay_timers(core);
return res;
}
|
DoS
| 0
|
e1000e_intmgr_collect_delayed_causes(E1000ECore *core)
{
uint32_t res;
if (msix_enabled(core->owner)) {
assert(core->delayed_causes == 0);
return 0;
}
res = core->delayed_causes;
core->delayed_causes = 0;
e1000e_intrmgr_stop_delay_timers(core);
return res;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,734
|
e1000e_intmgr_timer_resume(E1000IntrDelayTimer *timer)
{
if (timer->running) {
e1000e_intrmgr_rearm_timer(timer);
}
}
|
DoS
| 0
|
e1000e_intmgr_timer_resume(E1000IntrDelayTimer *timer)
{
if (timer->running) {
e1000e_intrmgr_rearm_timer(timer);
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,735
|
e1000e_intrmgr_delay_rx_causes(E1000ECore *core, uint32_t *causes)
{
uint32_t delayable_causes;
uint32_t rdtr = core->mac[RDTR];
uint32_t radv = core->mac[RADV];
uint32_t raid = core->mac[RAID];
if (msix_enabled(core->owner)) {
return false;
}
delayable_causes = E1000_ICR_RXQ0 |
E1000_ICR_RXQ1 |
E1000_ICR_RXT0;
if (!(core->mac[RFCTL] & E1000_RFCTL_ACK_DIS)) {
delayable_causes |= E1000_ICR_ACK;
}
/* Clean up all causes that may be delayed */
core->delayed_causes |= *causes & delayable_causes;
*causes &= ~delayable_causes;
/* Check if delayed RX interrupts disabled by client
or if there are causes that cannot be delayed */
if ((rdtr == 0) || (*causes != 0)) {
return false;
}
/* Check if delayed RX ACK interrupts disabled by client
and there is an ACK packet received */
if ((raid == 0) && (core->delayed_causes & E1000_ICR_ACK)) {
return false;
}
/* All causes delayed */
e1000e_intrmgr_rearm_timer(&core->rdtr);
if (!core->radv.running && (radv != 0)) {
e1000e_intrmgr_rearm_timer(&core->radv);
}
if (!core->raid.running && (core->delayed_causes & E1000_ICR_ACK)) {
e1000e_intrmgr_rearm_timer(&core->raid);
}
return true;
}
|
DoS
| 0
|
e1000e_intrmgr_delay_rx_causes(E1000ECore *core, uint32_t *causes)
{
uint32_t delayable_causes;
uint32_t rdtr = core->mac[RDTR];
uint32_t radv = core->mac[RADV];
uint32_t raid = core->mac[RAID];
if (msix_enabled(core->owner)) {
return false;
}
delayable_causes = E1000_ICR_RXQ0 |
E1000_ICR_RXQ1 |
E1000_ICR_RXT0;
if (!(core->mac[RFCTL] & E1000_RFCTL_ACK_DIS)) {
delayable_causes |= E1000_ICR_ACK;
}
/* Clean up all causes that may be delayed */
core->delayed_causes |= *causes & delayable_causes;
*causes &= ~delayable_causes;
/* Check if delayed RX interrupts disabled by client
or if there are causes that cannot be delayed */
if ((rdtr == 0) || (*causes != 0)) {
return false;
}
/* Check if delayed RX ACK interrupts disabled by client
and there is an ACK packet received */
if ((raid == 0) && (core->delayed_causes & E1000_ICR_ACK)) {
return false;
}
/* All causes delayed */
e1000e_intrmgr_rearm_timer(&core->rdtr);
if (!core->radv.running && (radv != 0)) {
e1000e_intrmgr_rearm_timer(&core->radv);
}
if (!core->raid.running && (core->delayed_causes & E1000_ICR_ACK)) {
e1000e_intrmgr_rearm_timer(&core->raid);
}
return true;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,736
|
e1000e_intrmgr_delay_tx_causes(E1000ECore *core, uint32_t *causes)
{
static const uint32_t delayable_causes = E1000_ICR_TXQ0 |
E1000_ICR_TXQ1 |
E1000_ICR_TXQE |
E1000_ICR_TXDW;
if (msix_enabled(core->owner)) {
return false;
}
/* Clean up all causes that may be delayed */
core->delayed_causes |= *causes & delayable_causes;
*causes &= ~delayable_causes;
/* If there are causes that cannot be delayed */
if (*causes != 0) {
return false;
}
/* All causes delayed */
e1000e_intrmgr_rearm_timer(&core->tidv);
if (!core->tadv.running && (core->mac[TADV] != 0)) {
e1000e_intrmgr_rearm_timer(&core->tadv);
}
return true;
}
|
DoS
| 0
|
e1000e_intrmgr_delay_tx_causes(E1000ECore *core, uint32_t *causes)
{
static const uint32_t delayable_causes = E1000_ICR_TXQ0 |
E1000_ICR_TXQ1 |
E1000_ICR_TXQE |
E1000_ICR_TXDW;
if (msix_enabled(core->owner)) {
return false;
}
/* Clean up all causes that may be delayed */
core->delayed_causes |= *causes & delayable_causes;
*causes &= ~delayable_causes;
/* If there are causes that cannot be delayed */
if (*causes != 0) {
return false;
}
/* All causes delayed */
e1000e_intrmgr_rearm_timer(&core->tidv);
if (!core->tadv.running && (core->mac[TADV] != 0)) {
e1000e_intrmgr_rearm_timer(&core->tadv);
}
return true;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,737
|
e1000e_intrmgr_fire_all_timers(E1000ECore *core)
{
int i;
uint32_t val = e1000e_intmgr_collect_delayed_causes(core);
trace_e1000e_irq_adding_delayed_causes(val, core->mac[ICR]);
core->mac[ICR] |= val;
if (core->itr.running) {
timer_del(core->itr.timer);
e1000e_intrmgr_on_throttling_timer(&core->itr);
}
for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) {
if (core->eitr[i].running) {
timer_del(core->eitr[i].timer);
e1000e_intrmgr_on_msix_throttling_timer(&core->eitr[i]);
}
}
}
|
DoS
| 0
|
e1000e_intrmgr_fire_all_timers(E1000ECore *core)
{
int i;
uint32_t val = e1000e_intmgr_collect_delayed_causes(core);
trace_e1000e_irq_adding_delayed_causes(val, core->mac[ICR]);
core->mac[ICR] |= val;
if (core->itr.running) {
timer_del(core->itr.timer);
e1000e_intrmgr_on_throttling_timer(&core->itr);
}
for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) {
if (core->eitr[i].running) {
timer_del(core->eitr[i].timer);
e1000e_intrmgr_on_msix_throttling_timer(&core->eitr[i]);
}
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,738
|
e1000e_intrmgr_fire_delayed_interrupts(E1000ECore *core)
{
trace_e1000e_irq_fire_delayed_interrupts();
e1000e_set_interrupt_cause(core, 0);
}
|
DoS
| 0
|
e1000e_intrmgr_fire_delayed_interrupts(E1000ECore *core)
{
trace_e1000e_irq_fire_delayed_interrupts();
e1000e_set_interrupt_cause(core, 0);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,739
|
e1000e_intrmgr_initialize_all_timers(E1000ECore *core, bool create)
{
int i;
core->radv.delay_reg = RADV;
core->rdtr.delay_reg = RDTR;
core->raid.delay_reg = RAID;
core->tadv.delay_reg = TADV;
core->tidv.delay_reg = TIDV;
core->radv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES;
core->rdtr.delay_resolution_ns = E1000_INTR_DELAY_NS_RES;
core->raid.delay_resolution_ns = E1000_INTR_DELAY_NS_RES;
core->tadv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES;
core->tidv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES;
core->radv.core = core;
core->rdtr.core = core;
core->raid.core = core;
core->tadv.core = core;
core->tidv.core = core;
core->itr.core = core;
core->itr.delay_reg = ITR;
core->itr.delay_resolution_ns = E1000_INTR_THROTTLING_NS_RES;
for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) {
core->eitr[i].core = core;
core->eitr[i].delay_reg = EITR + i;
core->eitr[i].delay_resolution_ns = E1000_INTR_THROTTLING_NS_RES;
}
if (!create) {
return;
}
core->radv.timer =
timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->radv);
core->rdtr.timer =
timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->rdtr);
core->raid.timer =
timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->raid);
core->tadv.timer =
timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->tadv);
core->tidv.timer =
timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->tidv);
core->itr.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
e1000e_intrmgr_on_throttling_timer,
&core->itr);
for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) {
core->eitr[i].timer =
timer_new_ns(QEMU_CLOCK_VIRTUAL,
e1000e_intrmgr_on_msix_throttling_timer,
&core->eitr[i]);
}
}
|
DoS
| 0
|
e1000e_intrmgr_initialize_all_timers(E1000ECore *core, bool create)
{
int i;
core->radv.delay_reg = RADV;
core->rdtr.delay_reg = RDTR;
core->raid.delay_reg = RAID;
core->tadv.delay_reg = TADV;
core->tidv.delay_reg = TIDV;
core->radv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES;
core->rdtr.delay_resolution_ns = E1000_INTR_DELAY_NS_RES;
core->raid.delay_resolution_ns = E1000_INTR_DELAY_NS_RES;
core->tadv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES;
core->tidv.delay_resolution_ns = E1000_INTR_DELAY_NS_RES;
core->radv.core = core;
core->rdtr.core = core;
core->raid.core = core;
core->tadv.core = core;
core->tidv.core = core;
core->itr.core = core;
core->itr.delay_reg = ITR;
core->itr.delay_resolution_ns = E1000_INTR_THROTTLING_NS_RES;
for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) {
core->eitr[i].core = core;
core->eitr[i].delay_reg = EITR + i;
core->eitr[i].delay_resolution_ns = E1000_INTR_THROTTLING_NS_RES;
}
if (!create) {
return;
}
core->radv.timer =
timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->radv);
core->rdtr.timer =
timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->rdtr);
core->raid.timer =
timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->raid);
core->tadv.timer =
timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->tadv);
core->tidv.timer =
timer_new_ns(QEMU_CLOCK_VIRTUAL, e1000e_intrmgr_on_timer, &core->tidv);
core->itr.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL,
e1000e_intrmgr_on_throttling_timer,
&core->itr);
for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) {
core->eitr[i].timer =
timer_new_ns(QEMU_CLOCK_VIRTUAL,
e1000e_intrmgr_on_msix_throttling_timer,
&core->eitr[i]);
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,740
|
e1000e_intrmgr_on_msix_throttling_timer(void *opaque)
{
E1000IntrDelayTimer *timer = opaque;
int idx = timer - &timer->core->eitr[0];
assert(msix_enabled(timer->core->owner));
timer->running = false;
if (!timer->core->eitr_intr_pending[idx]) {
trace_e1000e_irq_throttling_no_pending_vec(idx);
return;
}
trace_e1000e_irq_msix_notify_postponed_vec(idx);
msix_notify(timer->core->owner, idx);
}
|
DoS
| 0
|
e1000e_intrmgr_on_msix_throttling_timer(void *opaque)
{
E1000IntrDelayTimer *timer = opaque;
int idx = timer - &timer->core->eitr[0];
assert(msix_enabled(timer->core->owner));
timer->running = false;
if (!timer->core->eitr_intr_pending[idx]) {
trace_e1000e_irq_throttling_no_pending_vec(idx);
return;
}
trace_e1000e_irq_msix_notify_postponed_vec(idx);
msix_notify(timer->core->owner, idx);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,741
|
e1000e_intrmgr_on_throttling_timer(void *opaque)
{
E1000IntrDelayTimer *timer = opaque;
assert(!msix_enabled(timer->core->owner));
timer->running = false;
if (!timer->core->itr_intr_pending) {
trace_e1000e_irq_throttling_no_pending_interrupts();
return;
}
if (msi_enabled(timer->core->owner)) {
trace_e1000e_irq_msi_notify_postponed();
e1000e_set_interrupt_cause(timer->core, 0);
} else {
trace_e1000e_irq_legacy_notify_postponed();
e1000e_set_interrupt_cause(timer->core, 0);
}
}
|
DoS
| 0
|
e1000e_intrmgr_on_throttling_timer(void *opaque)
{
E1000IntrDelayTimer *timer = opaque;
assert(!msix_enabled(timer->core->owner));
timer->running = false;
if (!timer->core->itr_intr_pending) {
trace_e1000e_irq_throttling_no_pending_interrupts();
return;
}
if (msi_enabled(timer->core->owner)) {
trace_e1000e_irq_msi_notify_postponed();
e1000e_set_interrupt_cause(timer->core, 0);
} else {
trace_e1000e_irq_legacy_notify_postponed();
e1000e_set_interrupt_cause(timer->core, 0);
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,742
|
e1000e_intrmgr_on_timer(void *opaque)
{
E1000IntrDelayTimer *timer = opaque;
trace_e1000e_irq_throttling_timer(timer->delay_reg << 2);
timer->running = false;
e1000e_intrmgr_fire_delayed_interrupts(timer->core);
}
|
DoS
| 0
|
e1000e_intrmgr_on_timer(void *opaque)
{
E1000IntrDelayTimer *timer = opaque;
trace_e1000e_irq_throttling_timer(timer->delay_reg << 2);
timer->running = false;
e1000e_intrmgr_fire_delayed_interrupts(timer->core);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,743
|
e1000e_intrmgr_pause(E1000ECore *core)
{
int i;
e1000e_intmgr_timer_pause(&core->radv);
e1000e_intmgr_timer_pause(&core->rdtr);
e1000e_intmgr_timer_pause(&core->raid);
e1000e_intmgr_timer_pause(&core->tidv);
e1000e_intmgr_timer_pause(&core->tadv);
e1000e_intmgr_timer_pause(&core->itr);
for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) {
e1000e_intmgr_timer_pause(&core->eitr[i]);
}
}
|
DoS
| 0
|
e1000e_intrmgr_pause(E1000ECore *core)
{
int i;
e1000e_intmgr_timer_pause(&core->radv);
e1000e_intmgr_timer_pause(&core->rdtr);
e1000e_intmgr_timer_pause(&core->raid);
e1000e_intmgr_timer_pause(&core->tidv);
e1000e_intmgr_timer_pause(&core->tadv);
e1000e_intmgr_timer_pause(&core->itr);
for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) {
e1000e_intmgr_timer_pause(&core->eitr[i]);
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,744
|
e1000e_intrmgr_pci_unint(E1000ECore *core)
{
int i;
timer_del(core->radv.timer);
timer_free(core->radv.timer);
timer_del(core->rdtr.timer);
timer_free(core->rdtr.timer);
timer_del(core->raid.timer);
timer_free(core->raid.timer);
timer_del(core->tadv.timer);
timer_free(core->tadv.timer);
timer_del(core->tidv.timer);
timer_free(core->tidv.timer);
timer_del(core->itr.timer);
timer_free(core->itr.timer);
for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) {
timer_del(core->eitr[i].timer);
timer_free(core->eitr[i].timer);
}
}
|
DoS
| 0
|
e1000e_intrmgr_pci_unint(E1000ECore *core)
{
int i;
timer_del(core->radv.timer);
timer_free(core->radv.timer);
timer_del(core->rdtr.timer);
timer_free(core->rdtr.timer);
timer_del(core->raid.timer);
timer_free(core->raid.timer);
timer_del(core->tadv.timer);
timer_free(core->tadv.timer);
timer_del(core->tidv.timer);
timer_free(core->tidv.timer);
timer_del(core->itr.timer);
timer_free(core->itr.timer);
for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) {
timer_del(core->eitr[i].timer);
timer_free(core->eitr[i].timer);
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,745
|
e1000e_intrmgr_rearm_timer(E1000IntrDelayTimer *timer)
{
int64_t delay_ns = (int64_t) timer->core->mac[timer->delay_reg] *
timer->delay_resolution_ns;
trace_e1000e_irq_rearm_timer(timer->delay_reg << 2, delay_ns);
timer_mod(timer->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + delay_ns);
timer->running = true;
}
|
DoS
| 0
|
e1000e_intrmgr_rearm_timer(E1000IntrDelayTimer *timer)
{
int64_t delay_ns = (int64_t) timer->core->mac[timer->delay_reg] *
timer->delay_resolution_ns;
trace_e1000e_irq_rearm_timer(timer->delay_reg << 2, delay_ns);
timer_mod(timer->timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) + delay_ns);
timer->running = true;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,746
|
e1000e_intrmgr_resume(E1000ECore *core)
{
int i;
e1000e_intmgr_timer_resume(&core->radv);
e1000e_intmgr_timer_resume(&core->rdtr);
e1000e_intmgr_timer_resume(&core->raid);
e1000e_intmgr_timer_resume(&core->tidv);
e1000e_intmgr_timer_resume(&core->tadv);
e1000e_intmgr_timer_resume(&core->itr);
for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) {
e1000e_intmgr_timer_resume(&core->eitr[i]);
}
}
|
DoS
| 0
|
e1000e_intrmgr_resume(E1000ECore *core)
{
int i;
e1000e_intmgr_timer_resume(&core->radv);
e1000e_intmgr_timer_resume(&core->rdtr);
e1000e_intmgr_timer_resume(&core->raid);
e1000e_intmgr_timer_resume(&core->tidv);
e1000e_intmgr_timer_resume(&core->tadv);
e1000e_intmgr_timer_resume(&core->itr);
for (i = 0; i < E1000E_MSIX_VEC_NUM; i++) {
e1000e_intmgr_timer_resume(&core->eitr[i]);
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,747
|
e1000e_intrmgr_stop_delay_timers(E1000ECore *core)
{
e1000e_intrmgr_stop_timer(&core->radv);
e1000e_intrmgr_stop_timer(&core->rdtr);
e1000e_intrmgr_stop_timer(&core->raid);
e1000e_intrmgr_stop_timer(&core->tidv);
e1000e_intrmgr_stop_timer(&core->tadv);
}
|
DoS
| 0
|
e1000e_intrmgr_stop_delay_timers(E1000ECore *core)
{
e1000e_intrmgr_stop_timer(&core->radv);
e1000e_intrmgr_stop_timer(&core->rdtr);
e1000e_intrmgr_stop_timer(&core->raid);
e1000e_intrmgr_stop_timer(&core->tidv);
e1000e_intrmgr_stop_timer(&core->tadv);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,748
|
e1000e_intrmgr_stop_timer(E1000IntrDelayTimer *timer)
{
if (timer->running) {
timer_del(timer->timer);
timer->running = false;
}
}
|
DoS
| 0
|
e1000e_intrmgr_stop_timer(E1000IntrDelayTimer *timer)
{
if (timer->running) {
timer_del(timer->timer);
timer->running = false;
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,749
|
e1000e_is_tcp_ack(E1000ECore *core, struct NetRxPkt *rx_pkt)
{
if (!net_rx_pkt_is_tcp_ack(rx_pkt)) {
return false;
}
if (core->mac[RFCTL] & E1000_RFCTL_ACK_DATA_DIS) {
return !net_rx_pkt_has_tcp_data(rx_pkt);
}
return true;
}
|
DoS
| 0
|
e1000e_is_tcp_ack(E1000ECore *core, struct NetRxPkt *rx_pkt)
{
if (!net_rx_pkt_is_tcp_ack(rx_pkt)) {
return false;
}
if (core->mac[RFCTL] & E1000_RFCTL_ACK_DATA_DIS) {
return !net_rx_pkt_has_tcp_data(rx_pkt);
}
return true;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,750
|
e1000e_itr_should_postpone(E1000ECore *core)
{
return e1000e_postpone_interrupt(&core->itr_intr_pending, &core->itr);
}
|
DoS
| 0
|
e1000e_itr_should_postpone(E1000ECore *core)
{
return e1000e_postpone_interrupt(&core->itr_intr_pending, &core->itr);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,751
|
e1000e_link_down(E1000ECore *core)
{
e1000x_update_regs_on_link_down(core->mac, core->phy[0]);
e1000e_update_flowctl_status(core);
}
|
DoS
| 0
|
e1000e_link_down(E1000ECore *core)
{
e1000x_update_regs_on_link_down(core->mac, core->phy[0]);
e1000e_update_flowctl_status(core);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,752
|
e1000e_lower_legacy_irq(E1000ECore *core)
{
trace_e1000e_irq_legacy_notify(false);
pci_set_irq(core->owner, 0);
}
|
DoS
| 0
|
e1000e_lower_legacy_irq(E1000ECore *core)
{
trace_e1000e_irq_legacy_notify(false);
pci_set_irq(core->owner, 0);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,753
|
e1000e_mac_eitr_read(E1000ECore *core, int index)
{
return core->eitr_guest_value[index - EITR];
}
|
DoS
| 0
|
e1000e_mac_eitr_read(E1000ECore *core, int index)
{
return core->eitr_guest_value[index - EITR];
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,754
|
e1000e_mac_ics_read(E1000ECore *core, int index)
{
trace_e1000e_irq_read_ics(core->mac[ICS]);
return core->mac[ICS];
}
|
DoS
| 0
|
e1000e_mac_ics_read(E1000ECore *core, int index)
{
trace_e1000e_irq_read_ics(core->mac[ICS]);
return core->mac[ICS];
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,755
|
e1000e_mac_itr_read(E1000ECore *core, int index)
{
return core->itr_guest_value;
}
|
DoS
| 0
|
e1000e_mac_itr_read(E1000ECore *core, int index)
{
return core->itr_guest_value;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,756
|
e1000e_mac_read_clr8(E1000ECore *core, int index)
{
uint32_t ret = core->mac[index];
core->mac[index] = 0;
core->mac[index - 1] = 0;
return ret;
}
|
DoS
| 0
|
e1000e_mac_read_clr8(E1000ECore *core, int index)
{
uint32_t ret = core->mac[index];
core->mac[index] = 0;
core->mac[index - 1] = 0;
return ret;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,757
|
e1000e_mac_readreg(E1000ECore *core, int index)
{
return core->mac[index];
}
|
DoS
| 0
|
e1000e_mac_readreg(E1000ECore *core, int index)
{
return core->mac[index];
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,758
|
e1000e_mac_setmacaddr(E1000ECore *core, int index, uint32_t val)
{
uint32_t macaddr[2];
core->mac[index] = val;
macaddr[0] = cpu_to_le32(core->mac[RA]);
macaddr[1] = cpu_to_le32(core->mac[RA + 1]);
qemu_format_nic_info_str(qemu_get_queue(core->owner_nic),
(uint8_t *) macaddr);
trace_e1000e_mac_set_sw(MAC_ARG(macaddr));
}
|
DoS
| 0
|
e1000e_mac_setmacaddr(E1000ECore *core, int index, uint32_t val)
{
uint32_t macaddr[2];
core->mac[index] = val;
macaddr[0] = cpu_to_le32(core->mac[RA]);
macaddr[1] = cpu_to_le32(core->mac[RA + 1]);
qemu_format_nic_info_str(qemu_get_queue(core->owner_nic),
(uint8_t *) macaddr);
trace_e1000e_mac_set_sw(MAC_ARG(macaddr));
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,759
|
e1000e_mac_swsm_read(E1000ECore *core, int index)
{
uint32_t val = core->mac[SWSM];
core->mac[SWSM] = val | 1;
return val;
}
|
DoS
| 0
|
e1000e_mac_swsm_read(E1000ECore *core, int index)
{
uint32_t val = core->mac[SWSM];
core->mac[SWSM] = val | 1;
return val;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,760
|
e1000e_mac_writereg(E1000ECore *core, int index, uint32_t val)
{
core->mac[index] = val;
}
|
DoS
| 0
|
e1000e_mac_writereg(E1000ECore *core, int index, uint32_t val)
{
core->mac[index] = val;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,761
|
e1000e_mq_queue_idx(int base_reg_idx, int reg_idx)
{
return (reg_idx - base_reg_idx) / (0x100 >> 2);
}
|
DoS
| 0
|
e1000e_mq_queue_idx(int base_reg_idx, int reg_idx)
{
return (reg_idx - base_reg_idx) / (0x100 >> 2);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,762
|
e1000e_msix_clear(E1000ECore *core, uint32_t causes)
{
if (causes & E1000_ICR_RXQ0) {
e1000e_msix_clear_one(core, E1000_ICR_RXQ0,
E1000_IVAR_RXQ0(core->mac[IVAR]));
}
if (causes & E1000_ICR_RXQ1) {
e1000e_msix_clear_one(core, E1000_ICR_RXQ1,
E1000_IVAR_RXQ1(core->mac[IVAR]));
}
if (causes & E1000_ICR_TXQ0) {
e1000e_msix_clear_one(core, E1000_ICR_TXQ0,
E1000_IVAR_TXQ0(core->mac[IVAR]));
}
if (causes & E1000_ICR_TXQ1) {
e1000e_msix_clear_one(core, E1000_ICR_TXQ1,
E1000_IVAR_TXQ1(core->mac[IVAR]));
}
if (causes & E1000_ICR_OTHER) {
e1000e_msix_clear_one(core, E1000_ICR_OTHER,
E1000_IVAR_OTHER(core->mac[IVAR]));
}
}
|
DoS
| 0
|
e1000e_msix_clear(E1000ECore *core, uint32_t causes)
{
if (causes & E1000_ICR_RXQ0) {
e1000e_msix_clear_one(core, E1000_ICR_RXQ0,
E1000_IVAR_RXQ0(core->mac[IVAR]));
}
if (causes & E1000_ICR_RXQ1) {
e1000e_msix_clear_one(core, E1000_ICR_RXQ1,
E1000_IVAR_RXQ1(core->mac[IVAR]));
}
if (causes & E1000_ICR_TXQ0) {
e1000e_msix_clear_one(core, E1000_ICR_TXQ0,
E1000_IVAR_TXQ0(core->mac[IVAR]));
}
if (causes & E1000_ICR_TXQ1) {
e1000e_msix_clear_one(core, E1000_ICR_TXQ1,
E1000_IVAR_TXQ1(core->mac[IVAR]));
}
if (causes & E1000_ICR_OTHER) {
e1000e_msix_clear_one(core, E1000_ICR_OTHER,
E1000_IVAR_OTHER(core->mac[IVAR]));
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,763
|
e1000e_msix_clear_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg)
{
if (E1000_IVAR_ENTRY_VALID(int_cfg)) {
uint32_t vec = E1000_IVAR_ENTRY_VEC(int_cfg);
if (vec < E1000E_MSIX_VEC_NUM) {
trace_e1000e_irq_msix_pending_clearing(cause, int_cfg, vec);
msix_clr_pending(core->owner, vec);
} else {
trace_e1000e_wrn_msix_vec_wrong(cause, int_cfg);
}
} else {
trace_e1000e_wrn_msix_invalid(cause, int_cfg);
}
}
|
DoS
| 0
|
e1000e_msix_clear_one(E1000ECore *core, uint32_t cause, uint32_t int_cfg)
{
if (E1000_IVAR_ENTRY_VALID(int_cfg)) {
uint32_t vec = E1000_IVAR_ENTRY_VEC(int_cfg);
if (vec < E1000E_MSIX_VEC_NUM) {
trace_e1000e_irq_msix_pending_clearing(cause, int_cfg, vec);
msix_clr_pending(core->owner, vec);
} else {
trace_e1000e_wrn_msix_vec_wrong(cause, int_cfg);
}
} else {
trace_e1000e_wrn_msix_invalid(cause, int_cfg);
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,764
|
e1000e_parse_rxbufsize(E1000ECore *core)
{
uint32_t rctl = core->mac[RCTL];
memset(core->rxbuf_sizes, 0, sizeof(core->rxbuf_sizes));
if (rctl & E1000_RCTL_DTYP_MASK) {
uint32_t bsize;
bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE0_MASK;
core->rxbuf_sizes[0] = (bsize >> E1000_PSRCTL_BSIZE0_SHIFT) * 128;
bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE1_MASK;
core->rxbuf_sizes[1] = (bsize >> E1000_PSRCTL_BSIZE1_SHIFT) * 1024;
bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE2_MASK;
core->rxbuf_sizes[2] = (bsize >> E1000_PSRCTL_BSIZE2_SHIFT) * 1024;
bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE3_MASK;
core->rxbuf_sizes[3] = (bsize >> E1000_PSRCTL_BSIZE3_SHIFT) * 1024;
} else if (rctl & E1000_RCTL_FLXBUF_MASK) {
int flxbuf = rctl & E1000_RCTL_FLXBUF_MASK;
core->rxbuf_sizes[0] = (flxbuf >> E1000_RCTL_FLXBUF_SHIFT) * 1024;
} else {
core->rxbuf_sizes[0] = e1000x_rxbufsize(rctl);
}
trace_e1000e_rx_desc_buff_sizes(core->rxbuf_sizes[0], core->rxbuf_sizes[1],
core->rxbuf_sizes[2], core->rxbuf_sizes[3]);
e1000e_calc_per_desc_buf_size(core);
}
|
DoS
| 0
|
e1000e_parse_rxbufsize(E1000ECore *core)
{
uint32_t rctl = core->mac[RCTL];
memset(core->rxbuf_sizes, 0, sizeof(core->rxbuf_sizes));
if (rctl & E1000_RCTL_DTYP_MASK) {
uint32_t bsize;
bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE0_MASK;
core->rxbuf_sizes[0] = (bsize >> E1000_PSRCTL_BSIZE0_SHIFT) * 128;
bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE1_MASK;
core->rxbuf_sizes[1] = (bsize >> E1000_PSRCTL_BSIZE1_SHIFT) * 1024;
bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE2_MASK;
core->rxbuf_sizes[2] = (bsize >> E1000_PSRCTL_BSIZE2_SHIFT) * 1024;
bsize = core->mac[PSRCTL] & E1000_PSRCTL_BSIZE3_MASK;
core->rxbuf_sizes[3] = (bsize >> E1000_PSRCTL_BSIZE3_SHIFT) * 1024;
} else if (rctl & E1000_RCTL_FLXBUF_MASK) {
int flxbuf = rctl & E1000_RCTL_FLXBUF_MASK;
core->rxbuf_sizes[0] = (flxbuf >> E1000_RCTL_FLXBUF_SHIFT) * 1024;
} else {
core->rxbuf_sizes[0] = e1000x_rxbufsize(rctl);
}
trace_e1000e_rx_desc_buff_sizes(core->rxbuf_sizes[0], core->rxbuf_sizes[1],
core->rxbuf_sizes[2], core->rxbuf_sizes[3]);
e1000e_calc_per_desc_buf_size(core);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,765
|
e1000e_phy_reg_check_cap(E1000ECore *core, uint32_t addr,
char cap, uint8_t *page)
{
*page =
(e1000e_phy_regcap[0][addr] & PHY_ANYPAGE) ? 0
: core->phy[0][PHY_PAGE];
if (*page >= E1000E_PHY_PAGES) {
return false;
}
return e1000e_phy_regcap[*page][addr] & cap;
}
|
DoS
| 0
|
e1000e_phy_reg_check_cap(E1000ECore *core, uint32_t addr,
char cap, uint8_t *page)
{
*page =
(e1000e_phy_regcap[0][addr] & PHY_ANYPAGE) ? 0
: core->phy[0][PHY_PAGE];
if (*page >= E1000E_PHY_PAGES) {
return false;
}
return e1000e_phy_regcap[*page][addr] & cap;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,766
|
e1000e_phy_reg_write(E1000ECore *core, uint8_t page,
uint32_t addr, uint16_t data)
{
assert(page < E1000E_PHY_PAGES);
assert(addr < E1000E_PHY_PAGE_SIZE);
if (e1000e_phyreg_writeops[page][addr]) {
e1000e_phyreg_writeops[page][addr](core, addr, data);
} else {
core->phy[page][addr] = data;
}
}
|
DoS
| 0
|
e1000e_phy_reg_write(E1000ECore *core, uint8_t page,
uint32_t addr, uint16_t data)
{
assert(page < E1000E_PHY_PAGES);
assert(addr < E1000E_PHY_PAGE_SIZE);
if (e1000e_phyreg_writeops[page][addr]) {
e1000e_phyreg_writeops[page][addr](core, addr, data);
} else {
core->phy[page][addr] = data;
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,767
|
e1000e_postpone_interrupt(bool *interrupt_pending,
E1000IntrDelayTimer *timer)
{
if (timer->running) {
trace_e1000e_irq_postponed_by_xitr(timer->delay_reg << 2);
*interrupt_pending = true;
return true;
}
if (timer->core->mac[timer->delay_reg] != 0) {
e1000e_intrmgr_rearm_timer(timer);
}
return false;
}
|
DoS
| 0
|
e1000e_postpone_interrupt(bool *interrupt_pending,
E1000IntrDelayTimer *timer)
{
if (timer->running) {
trace_e1000e_irq_postponed_by_xitr(timer->delay_reg << 2);
*interrupt_pending = true;
return true;
}
if (timer->core->mac[timer->delay_reg] != 0) {
e1000e_intrmgr_rearm_timer(timer);
}
return false;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,768
|
e1000e_process_ts_option(E1000ECore *core, struct e1000_tx_desc *dp)
{
if (le32_to_cpu(dp->upper.data) & E1000_TXD_EXTCMD_TSTAMP) {
trace_e1000e_wrn_no_ts_support();
}
}
|
DoS
| 0
|
e1000e_process_ts_option(E1000ECore *core, struct e1000_tx_desc *dp)
{
if (le32_to_cpu(dp->upper.data) & E1000_TXD_EXTCMD_TSTAMP) {
trace_e1000e_wrn_no_ts_support();
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,769
|
e1000e_process_tx_desc(E1000ECore *core,
struct e1000e_tx *tx,
struct e1000_tx_desc *dp,
int queue_index)
{
uint32_t txd_lower = le32_to_cpu(dp->lower.data);
uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
unsigned int split_size = txd_lower & 0xffff;
uint64_t addr;
struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
bool eop = txd_lower & E1000_TXD_CMD_EOP;
if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */
e1000x_read_tx_ctx_descr(xp, &tx->props);
e1000e_process_snap_option(core, le32_to_cpu(xp->cmd_and_length));
return;
} else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
/* data descriptor */
tx->props.sum_needed = le32_to_cpu(dp->upper.data) >> 8;
tx->props.cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0;
e1000e_process_ts_option(core, dp);
} else {
/* legacy descriptor */
e1000e_process_ts_option(core, dp);
tx->props.cptse = 0;
}
addr = le64_to_cpu(dp->buffer_addr);
if (!tx->skip_cp) {
if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, addr, split_size)) {
tx->skip_cp = true;
}
}
if (eop) {
if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) {
if (e1000x_vlan_enabled(core->mac) &&
e1000x_is_vlan_txd(txd_lower)) {
net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt,
le16_to_cpu(dp->upper.fields.special), core->vet);
}
if (e1000e_tx_pkt_send(core, tx, queue_index)) {
e1000e_on_tx_done_update_stats(core, tx->tx_pkt);
}
}
tx->skip_cp = false;
net_tx_pkt_reset(tx->tx_pkt);
tx->props.sum_needed = 0;
tx->props.cptse = 0;
}
}
|
DoS
| 0
|
e1000e_process_tx_desc(E1000ECore *core,
struct e1000e_tx *tx,
struct e1000_tx_desc *dp,
int queue_index)
{
uint32_t txd_lower = le32_to_cpu(dp->lower.data);
uint32_t dtype = txd_lower & (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D);
unsigned int split_size = txd_lower & 0xffff;
uint64_t addr;
struct e1000_context_desc *xp = (struct e1000_context_desc *)dp;
bool eop = txd_lower & E1000_TXD_CMD_EOP;
if (dtype == E1000_TXD_CMD_DEXT) { /* context descriptor */
e1000x_read_tx_ctx_descr(xp, &tx->props);
e1000e_process_snap_option(core, le32_to_cpu(xp->cmd_and_length));
return;
} else if (dtype == (E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D)) {
/* data descriptor */
tx->props.sum_needed = le32_to_cpu(dp->upper.data) >> 8;
tx->props.cptse = (txd_lower & E1000_TXD_CMD_TSE) ? 1 : 0;
e1000e_process_ts_option(core, dp);
} else {
/* legacy descriptor */
e1000e_process_ts_option(core, dp);
tx->props.cptse = 0;
}
addr = le64_to_cpu(dp->buffer_addr);
if (!tx->skip_cp) {
if (!net_tx_pkt_add_raw_fragment(tx->tx_pkt, addr, split_size)) {
tx->skip_cp = true;
}
}
if (eop) {
if (!tx->skip_cp && net_tx_pkt_parse(tx->tx_pkt)) {
if (e1000x_vlan_enabled(core->mac) &&
e1000x_is_vlan_txd(txd_lower)) {
net_tx_pkt_setup_vlan_header_ex(tx->tx_pkt,
le16_to_cpu(dp->upper.fields.special), core->vet);
}
if (e1000e_tx_pkt_send(core, tx, queue_index)) {
e1000e_on_tx_done_update_stats(core, tx->tx_pkt);
}
}
tx->skip_cp = false;
net_tx_pkt_reset(tx->tx_pkt);
tx->props.sum_needed = 0;
tx->props.cptse = 0;
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,770
|
e1000e_raise_legacy_irq(E1000ECore *core)
{
trace_e1000e_irq_legacy_notify(true);
e1000x_inc_reg_if_not_full(core->mac, IAC);
pci_set_irq(core->owner, 1);
}
|
DoS
| 0
|
e1000e_raise_legacy_irq(E1000ECore *core)
{
trace_e1000e_irq_legacy_notify(true);
e1000x_inc_reg_if_not_full(core->mac, IAC);
pci_set_irq(core->owner, 1);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,771
|
e1000e_read_lgcy_rx_descr(E1000ECore *core, uint8_t *desc, hwaddr *buff_addr)
{
struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc;
*buff_addr = le64_to_cpu(d->buffer_addr);
}
|
DoS
| 0
|
e1000e_read_lgcy_rx_descr(E1000ECore *core, uint8_t *desc, hwaddr *buff_addr)
{
struct e1000_rx_desc *d = (struct e1000_rx_desc *) desc;
*buff_addr = le64_to_cpu(d->buffer_addr);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,772
|
e1000e_read_ps_rx_descr(E1000ECore *core, uint8_t *desc,
hwaddr (*buff_addr)[MAX_PS_BUFFERS])
{
int i;
union e1000_rx_desc_packet_split *d =
(union e1000_rx_desc_packet_split *) desc;
for (i = 0; i < MAX_PS_BUFFERS; i++) {
(*buff_addr)[i] = le64_to_cpu(d->read.buffer_addr[i]);
}
trace_e1000e_rx_desc_ps_read((*buff_addr)[0], (*buff_addr)[1],
(*buff_addr)[2], (*buff_addr)[3]);
}
|
DoS
| 0
|
e1000e_read_ps_rx_descr(E1000ECore *core, uint8_t *desc,
hwaddr (*buff_addr)[MAX_PS_BUFFERS])
{
int i;
union e1000_rx_desc_packet_split *d =
(union e1000_rx_desc_packet_split *) desc;
for (i = 0; i < MAX_PS_BUFFERS; i++) {
(*buff_addr)[i] = le64_to_cpu(d->read.buffer_addr[i]);
}
trace_e1000e_rx_desc_ps_read((*buff_addr)[0], (*buff_addr)[1],
(*buff_addr)[2], (*buff_addr)[3]);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,773
|
e1000e_read_rx_descr(E1000ECore *core, uint8_t *desc,
hwaddr (*buff_addr)[MAX_PS_BUFFERS])
{
if (e1000e_rx_use_legacy_descriptor(core)) {
e1000e_read_lgcy_rx_descr(core, desc, &(*buff_addr)[0]);
(*buff_addr)[1] = (*buff_addr)[2] = (*buff_addr)[3] = 0;
} else {
if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) {
e1000e_read_ps_rx_descr(core, desc, buff_addr);
} else {
e1000e_read_ext_rx_descr(core, desc, &(*buff_addr)[0]);
(*buff_addr)[1] = (*buff_addr)[2] = (*buff_addr)[3] = 0;
}
}
}
|
DoS
| 0
|
e1000e_read_rx_descr(E1000ECore *core, uint8_t *desc,
hwaddr (*buff_addr)[MAX_PS_BUFFERS])
{
if (e1000e_rx_use_legacy_descriptor(core)) {
e1000e_read_lgcy_rx_descr(core, desc, &(*buff_addr)[0]);
(*buff_addr)[1] = (*buff_addr)[2] = (*buff_addr)[3] = 0;
} else {
if (core->mac[RCTL] & E1000_RCTL_DTYP_PS) {
e1000e_read_ps_rx_descr(core, desc, buff_addr);
} else {
e1000e_read_ext_rx_descr(core, desc, &(*buff_addr)[0]);
(*buff_addr)[1] = (*buff_addr)[2] = (*buff_addr)[3] = 0;
}
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,774
|
e1000e_receive(E1000ECore *core, const uint8_t *buf, size_t size)
{
const struct iovec iov = {
.iov_base = (uint8_t *)buf,
.iov_len = size
};
return e1000e_receive_iov(core, &iov, 1);
}
|
DoS
| 0
|
e1000e_receive(E1000ECore *core, const uint8_t *buf, size_t size)
{
const struct iovec iov = {
.iov_base = (uint8_t *)buf,
.iov_len = size
};
return e1000e_receive_iov(core, &iov, 1);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,775
|
e1000e_receive_filter(E1000ECore *core, const uint8_t *buf, int size)
{
uint32_t rctl = core->mac[RCTL];
if (e1000x_is_vlan_packet(buf, core->vet) &&
e1000x_vlan_rx_filter_enabled(core->mac)) {
uint16_t vid = lduw_be_p(buf + 14);
uint32_t vfta = ldl_le_p((uint32_t *)(core->mac + VFTA) +
((vid >> 5) & 0x7f));
if ((vfta & (1 << (vid & 0x1f))) == 0) {
trace_e1000e_rx_flt_vlan_mismatch(vid);
return false;
} else {
trace_e1000e_rx_flt_vlan_match(vid);
}
}
switch (net_rx_pkt_get_packet_type(core->rx_pkt)) {
case ETH_PKT_UCAST:
if (rctl & E1000_RCTL_UPE) {
return true; /* promiscuous ucast */
}
break;
case ETH_PKT_BCAST:
if (rctl & E1000_RCTL_BAM) {
return true; /* broadcast enabled */
}
break;
case ETH_PKT_MCAST:
if (rctl & E1000_RCTL_MPE) {
return true; /* promiscuous mcast */
}
break;
default:
g_assert_not_reached();
}
return e1000x_rx_group_filter(core->mac, buf);
}
|
DoS
| 0
|
e1000e_receive_filter(E1000ECore *core, const uint8_t *buf, int size)
{
uint32_t rctl = core->mac[RCTL];
if (e1000x_is_vlan_packet(buf, core->vet) &&
e1000x_vlan_rx_filter_enabled(core->mac)) {
uint16_t vid = lduw_be_p(buf + 14);
uint32_t vfta = ldl_le_p((uint32_t *)(core->mac + VFTA) +
((vid >> 5) & 0x7f));
if ((vfta & (1 << (vid & 0x1f))) == 0) {
trace_e1000e_rx_flt_vlan_mismatch(vid);
return false;
} else {
trace_e1000e_rx_flt_vlan_match(vid);
}
}
switch (net_rx_pkt_get_packet_type(core->rx_pkt)) {
case ETH_PKT_UCAST:
if (rctl & E1000_RCTL_UPE) {
return true; /* promiscuous ucast */
}
break;
case ETH_PKT_BCAST:
if (rctl & E1000_RCTL_BAM) {
return true; /* broadcast enabled */
}
break;
case ETH_PKT_MCAST:
if (rctl & E1000_RCTL_MPE) {
return true; /* promiscuous mcast */
}
break;
default:
g_assert_not_reached();
}
return e1000x_rx_group_filter(core->mac, buf);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,776
|
e1000e_ring_advance(E1000ECore *core, const E1000E_RingInfo *r, uint32_t count)
{
core->mac[r->dh] += count;
if (core->mac[r->dh] * E1000_RING_DESC_LEN >= core->mac[r->dlen]) {
core->mac[r->dh] = 0;
}
}
|
DoS
| 0
|
e1000e_ring_advance(E1000ECore *core, const E1000E_RingInfo *r, uint32_t count)
{
core->mac[r->dh] += count;
if (core->mac[r->dh] * E1000_RING_DESC_LEN >= core->mac[r->dlen]) {
core->mac[r->dh] = 0;
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,777
|
e1000e_ring_free_descr_num(E1000ECore *core, const E1000E_RingInfo *r)
{
trace_e1000e_ring_free_space(r->idx, core->mac[r->dlen],
core->mac[r->dh], core->mac[r->dt]);
if (core->mac[r->dh] <= core->mac[r->dt]) {
return core->mac[r->dt] - core->mac[r->dh];
}
if (core->mac[r->dh] > core->mac[r->dt]) {
return core->mac[r->dlen] / E1000_RING_DESC_LEN +
core->mac[r->dt] - core->mac[r->dh];
}
g_assert_not_reached();
return 0;
}
|
DoS
| 0
|
e1000e_ring_free_descr_num(E1000ECore *core, const E1000E_RingInfo *r)
{
trace_e1000e_ring_free_space(r->idx, core->mac[r->dlen],
core->mac[r->dh], core->mac[r->dt]);
if (core->mac[r->dh] <= core->mac[r->dt]) {
return core->mac[r->dt] - core->mac[r->dh];
}
if (core->mac[r->dh] > core->mac[r->dt]) {
return core->mac[r->dlen] / E1000_RING_DESC_LEN +
core->mac[r->dt] - core->mac[r->dh];
}
g_assert_not_reached();
return 0;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,778
|
e1000e_ring_head_descr(E1000ECore *core, const E1000E_RingInfo *r)
{
return e1000e_ring_base(core, r) + E1000_RING_DESC_LEN * core->mac[r->dh];
}
|
DoS
| 0
|
e1000e_ring_head_descr(E1000ECore *core, const E1000E_RingInfo *r)
{
return e1000e_ring_base(core, r) + E1000_RING_DESC_LEN * core->mac[r->dh];
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,779
|
e1000e_ring_len(E1000ECore *core, const E1000E_RingInfo *r)
{
return core->mac[r->dlen];
}
|
DoS
| 0
|
e1000e_ring_len(E1000ECore *core, const E1000E_RingInfo *r)
{
return core->mac[r->dlen];
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,780
|
e1000e_rss_calc_hash(E1000ECore *core,
struct NetRxPkt *pkt,
E1000E_RSSInfo *info)
{
NetRxPktRssType type;
assert(e1000e_rss_enabled(core));
switch (info->type) {
case E1000_MRQ_RSS_TYPE_IPV4:
type = NetPktRssIpV4;
break;
case E1000_MRQ_RSS_TYPE_IPV4TCP:
type = NetPktRssIpV4Tcp;
break;
case E1000_MRQ_RSS_TYPE_IPV6TCP:
type = NetPktRssIpV6Tcp;
break;
case E1000_MRQ_RSS_TYPE_IPV6:
type = NetPktRssIpV6;
break;
case E1000_MRQ_RSS_TYPE_IPV6EX:
type = NetPktRssIpV6Ex;
break;
default:
assert(false);
return 0;
}
return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]);
}
|
DoS
| 0
|
e1000e_rss_calc_hash(E1000ECore *core,
struct NetRxPkt *pkt,
E1000E_RSSInfo *info)
{
NetRxPktRssType type;
assert(e1000e_rss_enabled(core));
switch (info->type) {
case E1000_MRQ_RSS_TYPE_IPV4:
type = NetPktRssIpV4;
break;
case E1000_MRQ_RSS_TYPE_IPV4TCP:
type = NetPktRssIpV4Tcp;
break;
case E1000_MRQ_RSS_TYPE_IPV6TCP:
type = NetPktRssIpV6Tcp;
break;
case E1000_MRQ_RSS_TYPE_IPV6:
type = NetPktRssIpV6;
break;
case E1000_MRQ_RSS_TYPE_IPV6EX:
type = NetPktRssIpV6Ex;
break;
default:
assert(false);
return 0;
}
return net_rx_pkt_calc_rss_hash(pkt, type, (uint8_t *) &core->mac[RSSRK]);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,781
|
e1000e_rss_enabled(E1000ECore *core)
{
return E1000_MRQC_ENABLED(core->mac[MRQC]) &&
!e1000e_rx_csum_enabled(core) &&
!e1000e_rx_use_legacy_descriptor(core);
}
|
DoS
| 0
|
e1000e_rss_enabled(E1000ECore *core)
{
return E1000_MRQC_ENABLED(core->mac[MRQC]) &&
!e1000e_rx_csum_enabled(core) &&
!e1000e_rx_use_legacy_descriptor(core);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,782
|
e1000e_rss_get_hash_type(E1000ECore *core, struct NetRxPkt *pkt)
{
bool isip4, isip6, isudp, istcp;
assert(e1000e_rss_enabled(core));
net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
if (isip4) {
bool fragment = net_rx_pkt_get_ip4_info(pkt)->fragment;
trace_e1000e_rx_rss_ip4(fragment, istcp, core->mac[MRQC],
E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]),
E1000_MRQC_EN_IPV4(core->mac[MRQC]));
if (!fragment && istcp && E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) {
return E1000_MRQ_RSS_TYPE_IPV4TCP;
}
if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) {
return E1000_MRQ_RSS_TYPE_IPV4;
}
} else if (isip6) {
eth_ip6_hdr_info *ip6info = net_rx_pkt_get_ip6_info(pkt);
bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS;
bool new_ex_dis = core->mac[RFCTL] & E1000_RFCTL_NEW_IPV6_EXT_DIS;
/*
* Following two traces must not be combined because resulting
* event will have 11 arguments totally and some trace backends
* (at least "ust") have limitation of maximum 10 arguments per
* event. Events with more arguments fail to compile for
* backends like these.
*/
trace_e1000e_rx_rss_ip6_rfctl(core->mac[RFCTL]);
trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, istcp,
ip6info->has_ext_hdrs,
ip6info->rss_ex_dst_valid,
ip6info->rss_ex_src_valid,
core->mac[MRQC],
E1000_MRQC_EN_TCPIPV6(core->mac[MRQC]),
E1000_MRQC_EN_IPV6EX(core->mac[MRQC]),
E1000_MRQC_EN_IPV6(core->mac[MRQC]));
if ((!ex_dis || !ip6info->has_ext_hdrs) &&
(!new_ex_dis || !(ip6info->rss_ex_dst_valid ||
ip6info->rss_ex_src_valid))) {
if (istcp && !ip6info->fragment &&
E1000_MRQC_EN_TCPIPV6(core->mac[MRQC])) {
return E1000_MRQ_RSS_TYPE_IPV6TCP;
}
if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) {
return E1000_MRQ_RSS_TYPE_IPV6EX;
}
}
if (E1000_MRQC_EN_IPV6(core->mac[MRQC])) {
return E1000_MRQ_RSS_TYPE_IPV6;
}
}
return E1000_MRQ_RSS_TYPE_NONE;
}
|
DoS
| 0
|
e1000e_rss_get_hash_type(E1000ECore *core, struct NetRxPkt *pkt)
{
bool isip4, isip6, isudp, istcp;
assert(e1000e_rss_enabled(core));
net_rx_pkt_get_protocols(pkt, &isip4, &isip6, &isudp, &istcp);
if (isip4) {
bool fragment = net_rx_pkt_get_ip4_info(pkt)->fragment;
trace_e1000e_rx_rss_ip4(fragment, istcp, core->mac[MRQC],
E1000_MRQC_EN_TCPIPV4(core->mac[MRQC]),
E1000_MRQC_EN_IPV4(core->mac[MRQC]));
if (!fragment && istcp && E1000_MRQC_EN_TCPIPV4(core->mac[MRQC])) {
return E1000_MRQ_RSS_TYPE_IPV4TCP;
}
if (E1000_MRQC_EN_IPV4(core->mac[MRQC])) {
return E1000_MRQ_RSS_TYPE_IPV4;
}
} else if (isip6) {
eth_ip6_hdr_info *ip6info = net_rx_pkt_get_ip6_info(pkt);
bool ex_dis = core->mac[RFCTL] & E1000_RFCTL_IPV6_EX_DIS;
bool new_ex_dis = core->mac[RFCTL] & E1000_RFCTL_NEW_IPV6_EXT_DIS;
/*
* Following two traces must not be combined because resulting
* event will have 11 arguments totally and some trace backends
* (at least "ust") have limitation of maximum 10 arguments per
* event. Events with more arguments fail to compile for
* backends like these.
*/
trace_e1000e_rx_rss_ip6_rfctl(core->mac[RFCTL]);
trace_e1000e_rx_rss_ip6(ex_dis, new_ex_dis, istcp,
ip6info->has_ext_hdrs,
ip6info->rss_ex_dst_valid,
ip6info->rss_ex_src_valid,
core->mac[MRQC],
E1000_MRQC_EN_TCPIPV6(core->mac[MRQC]),
E1000_MRQC_EN_IPV6EX(core->mac[MRQC]),
E1000_MRQC_EN_IPV6(core->mac[MRQC]));
if ((!ex_dis || !ip6info->has_ext_hdrs) &&
(!new_ex_dis || !(ip6info->rss_ex_dst_valid ||
ip6info->rss_ex_src_valid))) {
if (istcp && !ip6info->fragment &&
E1000_MRQC_EN_TCPIPV6(core->mac[MRQC])) {
return E1000_MRQ_RSS_TYPE_IPV6TCP;
}
if (E1000_MRQC_EN_IPV6EX(core->mac[MRQC])) {
return E1000_MRQ_RSS_TYPE_IPV6EX;
}
}
if (E1000_MRQC_EN_IPV6(core->mac[MRQC])) {
return E1000_MRQ_RSS_TYPE_IPV6;
}
}
return E1000_MRQ_RSS_TYPE_NONE;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,783
|
e1000e_rss_parse_packet(E1000ECore *core,
struct NetRxPkt *pkt,
E1000E_RSSInfo *info)
{
trace_e1000e_rx_rss_started();
if (!e1000e_rss_enabled(core)) {
info->enabled = false;
info->hash = 0;
info->queue = 0;
info->type = 0;
trace_e1000e_rx_rss_disabled();
return;
}
info->enabled = true;
info->type = e1000e_rss_get_hash_type(core, pkt);
trace_e1000e_rx_rss_type(info->type);
if (info->type == E1000_MRQ_RSS_TYPE_NONE) {
info->hash = 0;
info->queue = 0;
return;
}
info->hash = e1000e_rss_calc_hash(core, pkt, info);
info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash);
}
|
DoS
| 0
|
e1000e_rss_parse_packet(E1000ECore *core,
struct NetRxPkt *pkt,
E1000E_RSSInfo *info)
{
trace_e1000e_rx_rss_started();
if (!e1000e_rss_enabled(core)) {
info->enabled = false;
info->hash = 0;
info->queue = 0;
info->type = 0;
trace_e1000e_rx_rss_disabled();
return;
}
info->enabled = true;
info->type = e1000e_rss_get_hash_type(core, pkt);
trace_e1000e_rx_rss_type(info->type);
if (info->type == E1000_MRQ_RSS_TYPE_NONE) {
info->hash = 0;
info->queue = 0;
return;
}
info->hash = e1000e_rss_calc_hash(core, pkt, info);
info->queue = E1000_RSS_QUEUE(&core->mac[RETA], info->hash);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,784
|
e1000e_rx_fix_l4_csum(E1000ECore *core, struct NetRxPkt *pkt)
{
if (net_rx_pkt_has_virt_hdr(pkt)) {
struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt);
if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
net_rx_pkt_fix_l4_csum(pkt);
}
}
}
|
DoS
| 0
|
e1000e_rx_fix_l4_csum(E1000ECore *core, struct NetRxPkt *pkt)
{
if (net_rx_pkt_has_virt_hdr(pkt)) {
struct virtio_net_hdr *vhdr = net_rx_pkt_get_vhdr(pkt);
if (vhdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
net_rx_pkt_fix_l4_csum(pkt);
}
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,785
|
e1000e_rx_l3_cso_enabled(E1000ECore *core)
{
return !!(core->mac[RXCSUM] & E1000_RXCSUM_IPOFLD);
}
|
DoS
| 0
|
e1000e_rx_l3_cso_enabled(E1000ECore *core)
{
return !!(core->mac[RXCSUM] & E1000_RXCSUM_IPOFLD);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,786
|
e1000e_rx_l4_cso_enabled(E1000ECore *core)
{
return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD);
}
|
DoS
| 0
|
e1000e_rx_l4_cso_enabled(E1000ECore *core)
{
return !!(core->mac[RXCSUM] & E1000_RXCSUM_TUOFLD);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,787
|
e1000e_rx_use_legacy_descriptor(E1000ECore *core)
{
return (core->mac[RFCTL] & E1000_RFCTL_EXTEN) ? false : true;
}
|
DoS
| 0
|
e1000e_rx_use_legacy_descriptor(E1000ECore *core)
{
return (core->mac[RFCTL] & E1000_RFCTL_EXTEN) ? false : true;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,788
|
e1000e_rx_use_ps_descriptor(E1000ECore *core)
{
return !e1000e_rx_use_legacy_descriptor(core) &&
(core->mac[RCTL] & E1000_RCTL_DTYP_PS);
}
|
DoS
| 0
|
e1000e_rx_use_ps_descriptor(E1000ECore *core)
{
return !e1000e_rx_use_legacy_descriptor(core) &&
(core->mac[RCTL] & E1000_RCTL_DTYP_PS);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,789
|
e1000e_rx_wb_interrupt_cause(E1000ECore *core, int queue_idx,
bool min_threshold_hit)
{
if (!msix_enabled(core->owner)) {
return E1000_ICS_RXT0 | (min_threshold_hit ? E1000_ICS_RXDMT0 : 0);
}
return (queue_idx == 0) ? E1000_ICR_RXQ0 : E1000_ICR_RXQ1;
}
|
DoS
| 0
|
e1000e_rx_wb_interrupt_cause(E1000ECore *core, int queue_idx,
bool min_threshold_hit)
{
if (!msix_enabled(core->owner)) {
return E1000_ICS_RXT0 | (min_threshold_hit ? E1000_ICS_RXDMT0 : 0);
}
return (queue_idx == 0) ? E1000_ICR_RXQ0 : E1000_ICR_RXQ1;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,790
|
e1000e_send_msi(E1000ECore *core, bool msix)
{
uint32_t causes = core->mac[ICR] & core->mac[IMS] & ~E1000_ICR_ASSERTED;
if (msix) {
e1000e_msix_notify(core, causes);
} else {
if (!e1000e_itr_should_postpone(core)) {
trace_e1000e_irq_msi_notify(causes);
msi_notify(core->owner, 0);
}
}
}
|
DoS
| 0
|
e1000e_send_msi(E1000ECore *core, bool msix)
{
uint32_t causes = core->mac[ICR] & core->mac[IMS] & ~E1000_ICR_ASSERTED;
if (msix) {
e1000e_msix_notify(core, causes);
} else {
if (!e1000e_itr_should_postpone(core)) {
trace_e1000e_irq_msi_notify(causes);
msi_notify(core->owner, 0);
}
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,791
|
e1000e_set_12bit(E1000ECore *core, int index, uint32_t val)
{
core->mac[index] = val & 0xfff;
}
|
DoS
| 0
|
e1000e_set_12bit(E1000ECore *core, int index, uint32_t val)
{
core->mac[index] = val & 0xfff;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,792
|
e1000e_set_ctrl(E1000ECore *core, int index, uint32_t val)
{
trace_e1000e_core_ctrl_write(index, val);
/* RST is self clearing */
core->mac[CTRL] = val & ~E1000_CTRL_RST;
core->mac[CTRL_DUP] = core->mac[CTRL];
trace_e1000e_link_set_params(
!!(val & E1000_CTRL_ASDE),
(val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT,
!!(val & E1000_CTRL_FRCSPD),
!!(val & E1000_CTRL_FRCDPX),
!!(val & E1000_CTRL_RFCE),
!!(val & E1000_CTRL_TFCE));
if (val & E1000_CTRL_RST) {
trace_e1000e_core_ctrl_sw_reset();
e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac);
}
if (val & E1000_CTRL_PHY_RST) {
trace_e1000e_core_ctrl_phy_reset();
core->mac[STATUS] |= E1000_STATUS_PHYRA;
}
}
|
DoS
| 0
|
e1000e_set_ctrl(E1000ECore *core, int index, uint32_t val)
{
trace_e1000e_core_ctrl_write(index, val);
/* RST is self clearing */
core->mac[CTRL] = val & ~E1000_CTRL_RST;
core->mac[CTRL_DUP] = core->mac[CTRL];
trace_e1000e_link_set_params(
!!(val & E1000_CTRL_ASDE),
(val & E1000_CTRL_SPD_SEL) >> E1000_CTRL_SPD_SHIFT,
!!(val & E1000_CTRL_FRCSPD),
!!(val & E1000_CTRL_FRCDPX),
!!(val & E1000_CTRL_RFCE),
!!(val & E1000_CTRL_TFCE));
if (val & E1000_CTRL_RST) {
trace_e1000e_core_ctrl_sw_reset();
e1000x_reset_mac_addr(core->owner_nic, core->mac, core->permanent_mac);
}
if (val & E1000_CTRL_PHY_RST) {
trace_e1000e_core_ctrl_phy_reset();
core->mac[STATUS] |= E1000_STATUS_PHYRA;
}
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,793
|
e1000e_set_ctrlext(E1000ECore *core, int index, uint32_t val)
{
trace_e1000e_link_set_ext_params(!!(val & E1000_CTRL_EXT_ASDCHK),
!!(val & E1000_CTRL_EXT_SPD_BYPS));
/* Zero self-clearing bits */
val &= ~(E1000_CTRL_EXT_ASDCHK | E1000_CTRL_EXT_EE_RST);
core->mac[CTRL_EXT] = val;
}
|
DoS
| 0
|
e1000e_set_ctrlext(E1000ECore *core, int index, uint32_t val)
{
trace_e1000e_link_set_ext_params(!!(val & E1000_CTRL_EXT_ASDCHK),
!!(val & E1000_CTRL_EXT_SPD_BYPS));
/* Zero self-clearing bits */
val &= ~(E1000_CTRL_EXT_ASDCHK | E1000_CTRL_EXT_EE_RST);
core->mac[CTRL_EXT] = val;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,794
|
e1000e_set_dbal(E1000ECore *core, int index, uint32_t val)
{
core->mac[index] = val & E1000_XDBAL_MASK;
}
|
DoS
| 0
|
e1000e_set_dbal(E1000ECore *core, int index, uint32_t val)
{
core->mac[index] = val & E1000_XDBAL_MASK;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,795
|
e1000e_set_dlen(E1000ECore *core, int index, uint32_t val)
{
core->mac[index] = val & E1000_XDLEN_MASK;
}
|
DoS
| 0
|
e1000e_set_dlen(E1000ECore *core, int index, uint32_t val)
{
core->mac[index] = val & E1000_XDLEN_MASK;
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,796
|
e1000e_set_eecd(E1000ECore *core, int index, uint32_t val)
{
static const uint32_t ro_bits = E1000_EECD_PRES |
E1000_EECD_AUTO_RD |
E1000_EECD_SIZE_EX_MASK;
core->mac[EECD] = (core->mac[EECD] & ro_bits) | (val & ~ro_bits);
}
|
DoS
| 0
|
e1000e_set_eecd(E1000ECore *core, int index, uint32_t val)
{
static const uint32_t ro_bits = E1000_EECD_PRES |
E1000_EECD_AUTO_RD |
E1000_EECD_SIZE_EX_MASK;
core->mac[EECD] = (core->mac[EECD] & ro_bits) | (val & ~ro_bits);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,797
|
e1000e_set_eerd(E1000ECore *core, int index, uint32_t val)
{
uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK;
uint32_t flags = 0;
uint32_t data = 0;
if ((addr < E1000E_EEPROM_SIZE) && (val & E1000_EERW_START)) {
data = core->eeprom[addr];
flags = E1000_EERW_DONE;
}
core->mac[EERD] = flags |
(addr << E1000_EERW_ADDR_SHIFT) |
(data << E1000_EERW_DATA_SHIFT);
}
|
DoS
| 0
|
e1000e_set_eerd(E1000ECore *core, int index, uint32_t val)
{
uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK;
uint32_t flags = 0;
uint32_t data = 0;
if ((addr < E1000E_EEPROM_SIZE) && (val & E1000_EERW_START)) {
data = core->eeprom[addr];
flags = E1000_EERW_DONE;
}
core->mac[EERD] = flags |
(addr << E1000_EERW_ADDR_SHIFT) |
(data << E1000_EERW_DATA_SHIFT);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,798
|
e1000e_set_eewr(E1000ECore *core, int index, uint32_t val)
{
uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK;
uint32_t data = (val >> E1000_EERW_DATA_SHIFT) & E1000_EERW_DATA_MASK;
uint32_t flags = 0;
if ((addr < E1000E_EEPROM_SIZE) && (val & E1000_EERW_START)) {
core->eeprom[addr] = data;
flags = E1000_EERW_DONE;
}
core->mac[EERD] = flags |
(addr << E1000_EERW_ADDR_SHIFT) |
(data << E1000_EERW_DATA_SHIFT);
}
|
DoS
| 0
|
e1000e_set_eewr(E1000ECore *core, int index, uint32_t val)
{
uint32_t addr = (val >> E1000_EERW_ADDR_SHIFT) & E1000_EERW_ADDR_MASK;
uint32_t data = (val >> E1000_EERW_DATA_SHIFT) & E1000_EERW_DATA_MASK;
uint32_t flags = 0;
if ((addr < E1000E_EEPROM_SIZE) && (val & E1000_EERW_START)) {
core->eeprom[addr] = data;
flags = E1000_EERW_DONE;
}
core->mac[EERD] = flags |
(addr << E1000_EERW_ADDR_SHIFT) |
(data << E1000_EERW_DATA_SHIFT);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
4,799
|
e1000e_set_eitr(E1000ECore *core, int index, uint32_t val)
{
uint32_t interval = val & 0xffff;
uint32_t eitr_num = index - EITR;
trace_e1000e_irq_eitr_set(eitr_num, val);
core->eitr_guest_value[eitr_num] = interval;
core->mac[index] = MAX(interval, E1000E_MIN_XITR);
}
|
DoS
| 0
|
e1000e_set_eitr(E1000ECore *core, int index, uint32_t val)
{
uint32_t interval = val & 0xffff;
uint32_t eitr_num = index - EITR;
trace_e1000e_irq_eitr_set(eitr_num, val);
core->eitr_guest_value[eitr_num] = interval;
core->mac[index] = MAX(interval, E1000E_MIN_XITR);
}
|
@@ -806,7 +806,8 @@ typedef struct E1000E_RingInfo_st {
static inline bool
e1000e_ring_empty(E1000ECore *core, const E1000E_RingInfo *r)
{
- return core->mac[r->dh] == core->mac[r->dt];
+ return core->mac[r->dh] == core->mac[r->dt] ||
+ core->mac[r->dt] >= core->mac[r->dlen] / E1000_RING_DESC_LEN;
}
static inline uint64_t
@@ -1522,6 +1523,10 @@ e1000e_write_packet_to_guest(E1000ECore *core, struct NetRxPkt *pkt,
desc_size = core->rx_desc_buf_size;
}
+ if (e1000e_ring_empty(core, rxi)) {
+ return;
+ }
+
base = e1000e_ring_head_descr(core, rxi);
pci_dma_read(d, base, &desc, core->rx_desc_len);
|
CWE-835
| null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.