idx
int64 | func_before
string | Vulnerability Classification
string | vul
int64 | func_after
string | patch
string | CWE ID
string | lines_before
string | lines_after
string |
|---|---|---|---|---|---|---|---|---|
7,000
|
static NOINLINE int send_select(uint32_t xid, uint32_t server, uint32_t requested)
{
struct dhcp_packet packet;
struct in_addr temp_addr;
/*
* RFC 2131 4.3.2 DHCPREQUEST message
* ...
* If the DHCPREQUEST message contains a 'server identifier'
* option, the message is in response to a DHCPOFFER message.
* Otherwise, the message is a request to verify or extend an
* existing lease. If the client uses a 'client identifier'
* in a DHCPREQUEST message, it MUST use that same 'client identifier'
* in all subsequent messages. If the client included a list
* of requested parameters in a DHCPDISCOVER message, it MUST
* include that list in all subsequent messages.
*/
/* Fill in: op, htype, hlen, cookie, chaddr fields,
* random xid field (we override it below),
* client-id option (unless -C), message type option:
*/
init_packet(&packet, DHCPREQUEST);
packet.xid = xid;
udhcp_add_simple_option(&packet, DHCP_REQUESTED_IP, requested);
udhcp_add_simple_option(&packet, DHCP_SERVER_ID, server);
/* Add options: maxsize,
* optionally: hostname, fqdn, vendorclass,
* "param req" option according to -O, and options specified with -x
*/
add_client_options(&packet);
temp_addr.s_addr = requested;
bb_error_msg("sending select for %s", inet_ntoa(temp_addr));
return raw_bcast_from_client_config_ifindex(&packet, INADDR_ANY);
}
|
+Info
| 0
|
static NOINLINE int send_select(uint32_t xid, uint32_t server, uint32_t requested)
{
struct dhcp_packet packet;
struct in_addr temp_addr;
/*
* RFC 2131 4.3.2 DHCPREQUEST message
* ...
* If the DHCPREQUEST message contains a 'server identifier'
* option, the message is in response to a DHCPOFFER message.
* Otherwise, the message is a request to verify or extend an
* existing lease. If the client uses a 'client identifier'
* in a DHCPREQUEST message, it MUST use that same 'client identifier'
* in all subsequent messages. If the client included a list
* of requested parameters in a DHCPDISCOVER message, it MUST
* include that list in all subsequent messages.
*/
/* Fill in: op, htype, hlen, cookie, chaddr fields,
* random xid field (we override it below),
* client-id option (unless -C), message type option:
*/
init_packet(&packet, DHCPREQUEST);
packet.xid = xid;
udhcp_add_simple_option(&packet, DHCP_REQUESTED_IP, requested);
udhcp_add_simple_option(&packet, DHCP_SERVER_ID, server);
/* Add options: maxsize,
* optionally: hostname, fqdn, vendorclass,
* "param req" option according to -O, and options specified with -x
*/
add_client_options(&packet);
temp_addr.s_addr = requested;
bb_error_msg("sending select for %s", inet_ntoa(temp_addr));
return raw_bcast_from_client_config_ifindex(&packet, INADDR_ANY);
}
|
@@ -531,7 +531,7 @@ static char **fill_envp(struct dhcp_packet *packet)
temp = udhcp_get_option(packet, code);
*curr = xmalloc_optname_optval(temp, &dhcp_optflags[i], opt_name);
putenv(*curr++);
- if (code == DHCP_SUBNET) {
+ if (code == DHCP_SUBNET && temp[-OPT_DATA + OPT_LEN] == 4) {
/* Subnet option: make things like "$ip/$mask" possible */
uint32_t subnet;
move_from_unaligned32(subnet, temp);
|
CWE-125
| null | null |
7,001
|
static int sprint_nip(char *dest, const char *pre, const uint8_t *ip)
{
return sprintf(dest, "%s%u.%u.%u.%u", pre, ip[0], ip[1], ip[2], ip[3]);
}
|
+Info
| 0
|
static int sprint_nip(char *dest, const char *pre, const uint8_t *ip)
{
return sprintf(dest, "%s%u.%u.%u.%u", pre, ip[0], ip[1], ip[2], ip[3]);
}
|
@@ -531,7 +531,7 @@ static char **fill_envp(struct dhcp_packet *packet)
temp = udhcp_get_option(packet, code);
*curr = xmalloc_optname_optval(temp, &dhcp_optflags[i], opt_name);
putenv(*curr++);
- if (code == DHCP_SUBNET) {
+ if (code == DHCP_SUBNET && temp[-OPT_DATA + OPT_LEN] == 4) {
/* Subnet option: make things like "$ip/$mask" possible */
uint32_t subnet;
move_from_unaligned32(subnet, temp);
|
CWE-125
| null | null |
7,002
|
static int udhcp_raw_socket(int ifindex)
{
int fd;
struct sockaddr_ll sock;
log2("opening raw socket on ifindex %d", ifindex);
fd = xsocket(PF_PACKET, SOCK_DGRAM, htons(ETH_P_IP));
/* ^^^^^
* SOCK_DGRAM: remove link-layer headers on input (SOCK_RAW keeps them)
* ETH_P_IP: want to receive only packets with IPv4 eth type
*/
log2("got raw socket fd");
memset(&sock, 0, sizeof(sock)); /* let's be deterministic */
sock.sll_family = AF_PACKET;
sock.sll_protocol = htons(ETH_P_IP);
sock.sll_ifindex = ifindex;
/*sock.sll_hatype = ARPHRD_???;*/
/*sock.sll_pkttype = PACKET_???;*/
/*sock.sll_halen = ???;*/
/*sock.sll_addr[8] = ???;*/
xbind(fd, (struct sockaddr *) &sock, sizeof(sock));
#if 0 /* Several users reported breakage when BPF filter is used */
if (CLIENT_PORT == 68) {
/* Use only if standard port is in use */
/*
* I've selected not to see LL header, so BPF doesn't see it, too.
* The filter may also pass non-IP and non-ARP packets, but we do
* a more complete check when receiving the message in userspace.
*
* and filter shamelessly stolen from:
*
* http://www.flamewarmaster.de/software/dhcpclient/
*
* There are a few other interesting ideas on that page (look under
* "Motivation"). Use of netlink events is most interesting. Think
* of various network servers listening for events and reconfiguring.
* That would obsolete sending HUP signals and/or make use of restarts.
*
* Copyright: 2006, 2007 Stefan Rompf <sux@loplof.de>.
* License: GPL v2.
*/
static const struct sock_filter filter_instr[] = {
/* load 9th byte (protocol) */
BPF_STMT(BPF_LD|BPF_B|BPF_ABS, 9),
/* jump to L1 if it is IPPROTO_UDP, else to L4 */
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, IPPROTO_UDP, 0, 6),
/* L1: load halfword from offset 6 (flags and frag offset) */
BPF_STMT(BPF_LD|BPF_H|BPF_ABS, 6),
/* jump to L4 if any bits in frag offset field are set, else to L2 */
BPF_JUMP(BPF_JMP|BPF_JSET|BPF_K, 0x1fff, 4, 0),
/* L2: skip IP header (load index reg with header len) */
BPF_STMT(BPF_LDX|BPF_B|BPF_MSH, 0),
/* load udp destination port from halfword[header_len + 2] */
BPF_STMT(BPF_LD|BPF_H|BPF_IND, 2),
/* jump to L3 if udp dport is CLIENT_PORT, else to L4 */
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 68, 0, 1),
/* L3: accept packet ("accept 0x7fffffff bytes") */
/* Accepting 0xffffffff works too but kernel 2.6.19 is buggy */
BPF_STMT(BPF_RET|BPF_K, 0x7fffffff),
/* L4: discard packet ("accept zero bytes") */
BPF_STMT(BPF_RET|BPF_K, 0),
};
static const struct sock_fprog filter_prog = {
.len = sizeof(filter_instr) / sizeof(filter_instr[0]),
/* casting const away: */
.filter = (struct sock_filter *) filter_instr,
};
/* Ignoring error (kernel may lack support for this) */
if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &filter_prog,
sizeof(filter_prog)) >= 0)
log1("attached filter to raw socket fd"); // log?
}
#endif
if (setsockopt_1(fd, SOL_PACKET, PACKET_AUXDATA) != 0) {
if (errno != ENOPROTOOPT)
log1("can't set PACKET_AUXDATA on raw socket");
}
log1("created raw socket");
return fd;
}
|
+Info
| 0
|
static int udhcp_raw_socket(int ifindex)
{
int fd;
struct sockaddr_ll sock;
log2("opening raw socket on ifindex %d", ifindex);
fd = xsocket(PF_PACKET, SOCK_DGRAM, htons(ETH_P_IP));
/* ^^^^^
* SOCK_DGRAM: remove link-layer headers on input (SOCK_RAW keeps them)
* ETH_P_IP: want to receive only packets with IPv4 eth type
*/
log2("got raw socket fd");
memset(&sock, 0, sizeof(sock)); /* let's be deterministic */
sock.sll_family = AF_PACKET;
sock.sll_protocol = htons(ETH_P_IP);
sock.sll_ifindex = ifindex;
/*sock.sll_hatype = ARPHRD_???;*/
/*sock.sll_pkttype = PACKET_???;*/
/*sock.sll_halen = ???;*/
/*sock.sll_addr[8] = ???;*/
xbind(fd, (struct sockaddr *) &sock, sizeof(sock));
#if 0 /* Several users reported breakage when BPF filter is used */
if (CLIENT_PORT == 68) {
/* Use only if standard port is in use */
/*
* I've selected not to see LL header, so BPF doesn't see it, too.
* The filter may also pass non-IP and non-ARP packets, but we do
* a more complete check when receiving the message in userspace.
*
* and filter shamelessly stolen from:
*
* http://www.flamewarmaster.de/software/dhcpclient/
*
* There are a few other interesting ideas on that page (look under
* "Motivation"). Use of netlink events is most interesting. Think
* of various network servers listening for events and reconfiguring.
* That would obsolete sending HUP signals and/or make use of restarts.
*
* Copyright: 2006, 2007 Stefan Rompf <sux@loplof.de>.
* License: GPL v2.
*/
static const struct sock_filter filter_instr[] = {
/* load 9th byte (protocol) */
BPF_STMT(BPF_LD|BPF_B|BPF_ABS, 9),
/* jump to L1 if it is IPPROTO_UDP, else to L4 */
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, IPPROTO_UDP, 0, 6),
/* L1: load halfword from offset 6 (flags and frag offset) */
BPF_STMT(BPF_LD|BPF_H|BPF_ABS, 6),
/* jump to L4 if any bits in frag offset field are set, else to L2 */
BPF_JUMP(BPF_JMP|BPF_JSET|BPF_K, 0x1fff, 4, 0),
/* L2: skip IP header (load index reg with header len) */
BPF_STMT(BPF_LDX|BPF_B|BPF_MSH, 0),
/* load udp destination port from halfword[header_len + 2] */
BPF_STMT(BPF_LD|BPF_H|BPF_IND, 2),
/* jump to L3 if udp dport is CLIENT_PORT, else to L4 */
BPF_JUMP(BPF_JMP|BPF_JEQ|BPF_K, 68, 0, 1),
/* L3: accept packet ("accept 0x7fffffff bytes") */
/* Accepting 0xffffffff works too but kernel 2.6.19 is buggy */
BPF_STMT(BPF_RET|BPF_K, 0x7fffffff),
/* L4: discard packet ("accept zero bytes") */
BPF_STMT(BPF_RET|BPF_K, 0),
};
static const struct sock_fprog filter_prog = {
.len = sizeof(filter_instr) / sizeof(filter_instr[0]),
/* casting const away: */
.filter = (struct sock_filter *) filter_instr,
};
/* Ignoring error (kernel may lack support for this) */
if (setsockopt(fd, SOL_SOCKET, SO_ATTACH_FILTER, &filter_prog,
sizeof(filter_prog)) >= 0)
log1("attached filter to raw socket fd"); // log?
}
#endif
if (setsockopt_1(fd, SOL_PACKET, PACKET_AUXDATA) != 0) {
if (errno != ENOPROTOOPT)
log1("can't set PACKET_AUXDATA on raw socket");
}
log1("created raw socket");
return fd;
}
|
@@ -531,7 +531,7 @@ static char **fill_envp(struct dhcp_packet *packet)
temp = udhcp_get_option(packet, code);
*curr = xmalloc_optname_optval(temp, &dhcp_optflags[i], opt_name);
putenv(*curr++);
- if (code == DHCP_SUBNET) {
+ if (code == DHCP_SUBNET && temp[-OPT_DATA + OPT_LEN] == 4) {
/* Subnet option: make things like "$ip/$mask" possible */
uint32_t subnet;
move_from_unaligned32(subnet, temp);
|
CWE-125
| null | null |
7,003
|
static NOINLINE int udhcp_recv_raw_packet(struct dhcp_packet *dhcp_pkt, int fd)
{
int bytes;
struct ip_udp_dhcp_packet packet;
uint16_t check;
unsigned char cmsgbuf[CMSG_LEN(sizeof(struct tpacket_auxdata))];
struct iovec iov;
struct msghdr msg;
struct cmsghdr *cmsg;
/* used to use just safe_read(fd, &packet, sizeof(packet))
* but we need to check for TP_STATUS_CSUMNOTREADY :(
*/
iov.iov_base = &packet;
iov.iov_len = sizeof(packet);
memset(&msg, 0, sizeof(msg));
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = cmsgbuf;
msg.msg_controllen = sizeof(cmsgbuf);
for (;;) {
bytes = recvmsg(fd, &msg, 0);
if (bytes < 0) {
if (errno == EINTR)
continue;
log1("packet read error, ignoring");
/* NB: possible down interface, etc. Caller should pause. */
return bytes; /* returns -1 */
}
break;
}
if (bytes < (int) (sizeof(packet.ip) + sizeof(packet.udp))) {
log1("packet is too short, ignoring");
return -2;
}
if (bytes < ntohs(packet.ip.tot_len)) {
/* packet is bigger than sizeof(packet), we did partial read */
log1("oversized packet, ignoring");
return -2;
}
/* ignore any extra garbage bytes */
bytes = ntohs(packet.ip.tot_len);
/* make sure its the right packet for us, and that it passes sanity checks */
if (packet.ip.protocol != IPPROTO_UDP
|| packet.ip.version != IPVERSION
|| packet.ip.ihl != (sizeof(packet.ip) >> 2)
|| packet.udp.dest != htons(CLIENT_PORT)
/* || bytes > (int) sizeof(packet) - can't happen */
|| ntohs(packet.udp.len) != (uint16_t)(bytes - sizeof(packet.ip))
) {
log1("unrelated/bogus packet, ignoring");
return -2;
}
/* verify IP checksum */
check = packet.ip.check;
packet.ip.check = 0;
if (check != inet_cksum((uint16_t *)&packet.ip, sizeof(packet.ip))) {
log1("bad IP header checksum, ignoring");
return -2;
}
for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
if (cmsg->cmsg_level == SOL_PACKET
&& cmsg->cmsg_type == PACKET_AUXDATA
) {
/* some VMs don't checksum UDP and TCP data
* they send to the same physical machine,
* here we detect this case:
*/
struct tpacket_auxdata *aux = (void *)CMSG_DATA(cmsg);
if (aux->tp_status & TP_STATUS_CSUMNOTREADY)
goto skip_udp_sum_check;
}
}
/* verify UDP checksum. IP header has to be modified for this */
memset(&packet.ip, 0, offsetof(struct iphdr, protocol));
/* ip.xx fields which are not memset: protocol, check, saddr, daddr */
packet.ip.tot_len = packet.udp.len; /* yes, this is needed */
check = packet.udp.check;
packet.udp.check = 0;
if (check && check != inet_cksum((uint16_t *)&packet, bytes)) {
log1("packet with bad UDP checksum received, ignoring");
return -2;
}
skip_udp_sum_check:
if (packet.data.cookie != htonl(DHCP_MAGIC)) {
bb_error_msg("packet with bad magic, ignoring");
return -2;
}
log1("received %s", "a packet");
udhcp_dump_packet(&packet.data);
bytes -= sizeof(packet.ip) + sizeof(packet.udp);
memcpy(dhcp_pkt, &packet.data, bytes);
return bytes;
}
|
+Info
| 0
|
static NOINLINE int udhcp_recv_raw_packet(struct dhcp_packet *dhcp_pkt, int fd)
{
int bytes;
struct ip_udp_dhcp_packet packet;
uint16_t check;
unsigned char cmsgbuf[CMSG_LEN(sizeof(struct tpacket_auxdata))];
struct iovec iov;
struct msghdr msg;
struct cmsghdr *cmsg;
/* used to use just safe_read(fd, &packet, sizeof(packet))
* but we need to check for TP_STATUS_CSUMNOTREADY :(
*/
iov.iov_base = &packet;
iov.iov_len = sizeof(packet);
memset(&msg, 0, sizeof(msg));
msg.msg_iov = &iov;
msg.msg_iovlen = 1;
msg.msg_control = cmsgbuf;
msg.msg_controllen = sizeof(cmsgbuf);
for (;;) {
bytes = recvmsg(fd, &msg, 0);
if (bytes < 0) {
if (errno == EINTR)
continue;
log1("packet read error, ignoring");
/* NB: possible down interface, etc. Caller should pause. */
return bytes; /* returns -1 */
}
break;
}
if (bytes < (int) (sizeof(packet.ip) + sizeof(packet.udp))) {
log1("packet is too short, ignoring");
return -2;
}
if (bytes < ntohs(packet.ip.tot_len)) {
/* packet is bigger than sizeof(packet), we did partial read */
log1("oversized packet, ignoring");
return -2;
}
/* ignore any extra garbage bytes */
bytes = ntohs(packet.ip.tot_len);
/* make sure its the right packet for us, and that it passes sanity checks */
if (packet.ip.protocol != IPPROTO_UDP
|| packet.ip.version != IPVERSION
|| packet.ip.ihl != (sizeof(packet.ip) >> 2)
|| packet.udp.dest != htons(CLIENT_PORT)
/* || bytes > (int) sizeof(packet) - can't happen */
|| ntohs(packet.udp.len) != (uint16_t)(bytes - sizeof(packet.ip))
) {
log1("unrelated/bogus packet, ignoring");
return -2;
}
/* verify IP checksum */
check = packet.ip.check;
packet.ip.check = 0;
if (check != inet_cksum((uint16_t *)&packet.ip, sizeof(packet.ip))) {
log1("bad IP header checksum, ignoring");
return -2;
}
for (cmsg = CMSG_FIRSTHDR(&msg); cmsg; cmsg = CMSG_NXTHDR(&msg, cmsg)) {
if (cmsg->cmsg_level == SOL_PACKET
&& cmsg->cmsg_type == PACKET_AUXDATA
) {
/* some VMs don't checksum UDP and TCP data
* they send to the same physical machine,
* here we detect this case:
*/
struct tpacket_auxdata *aux = (void *)CMSG_DATA(cmsg);
if (aux->tp_status & TP_STATUS_CSUMNOTREADY)
goto skip_udp_sum_check;
}
}
/* verify UDP checksum. IP header has to be modified for this */
memset(&packet.ip, 0, offsetof(struct iphdr, protocol));
/* ip.xx fields which are not memset: protocol, check, saddr, daddr */
packet.ip.tot_len = packet.udp.len; /* yes, this is needed */
check = packet.udp.check;
packet.udp.check = 0;
if (check && check != inet_cksum((uint16_t *)&packet, bytes)) {
log1("packet with bad UDP checksum received, ignoring");
return -2;
}
skip_udp_sum_check:
if (packet.data.cookie != htonl(DHCP_MAGIC)) {
bb_error_msg("packet with bad magic, ignoring");
return -2;
}
log1("received %s", "a packet");
udhcp_dump_packet(&packet.data);
bytes -= sizeof(packet.ip) + sizeof(packet.udp);
memcpy(dhcp_pkt, &packet.data, bytes);
return bytes;
}
|
@@ -531,7 +531,7 @@ static char **fill_envp(struct dhcp_packet *packet)
temp = udhcp_get_option(packet, code);
*curr = xmalloc_optname_optval(temp, &dhcp_optflags[i], opt_name);
putenv(*curr++);
- if (code == DHCP_SUBNET) {
+ if (code == DHCP_SUBNET && temp[-OPT_DATA + OPT_LEN] == 4) {
/* Subnet option: make things like "$ip/$mask" possible */
uint32_t subnet;
move_from_unaligned32(subnet, temp);
|
CWE-125
| null | null |
7,004
|
int udhcpc_main(int argc UNUSED_PARAM, char **argv)
{
uint8_t *message;
const char *str_V, *str_h, *str_F, *str_r;
IF_FEATURE_UDHCPC_ARPING(const char *str_a = "2000";)
IF_FEATURE_UDHCP_PORT(char *str_P;)
void *clientid_mac_ptr;
llist_t *list_O = NULL;
llist_t *list_x = NULL;
int tryagain_timeout = 20;
int discover_timeout = 3;
int discover_retries = 3;
uint32_t server_addr = server_addr; /* for compiler */
uint32_t requested_ip = 0;
uint32_t xid = xid; /* for compiler */
int packet_num;
int timeout; /* must be signed */
unsigned already_waited_sec;
unsigned opt;
IF_FEATURE_UDHCPC_ARPING(unsigned arpping_ms;)
int retval;
setup_common_bufsiz();
/* Default options */
IF_FEATURE_UDHCP_PORT(SERVER_PORT = 67;)
IF_FEATURE_UDHCP_PORT(CLIENT_PORT = 68;)
client_config.interface = "eth0";
client_config.script = CONFIG_UDHCPC_DEFAULT_SCRIPT;
str_V = "udhcp "BB_VER;
/* Parse command line */
opt = getopt32long(argv, "^"
/* O,x: list; -T,-t,-A take numeric param */
"CV:H:h:F:i:np:qRr:s:T:+t:+SA:+O:*ox:*fB"
USE_FOR_MMU("b")
IF_FEATURE_UDHCPC_ARPING("a::")
IF_FEATURE_UDHCP_PORT("P:")
"v"
"\0" IF_UDHCP_VERBOSE("vv") /* -v is a counter */
, udhcpc_longopts
, &str_V, &str_h, &str_h, &str_F
, &client_config.interface, &client_config.pidfile /* i,p */
, &str_r /* r */
, &client_config.script /* s */
, &discover_timeout, &discover_retries, &tryagain_timeout /* T,t,A */
, &list_O
, &list_x
IF_FEATURE_UDHCPC_ARPING(, &str_a)
IF_FEATURE_UDHCP_PORT(, &str_P)
IF_UDHCP_VERBOSE(, &dhcp_verbose)
);
if (opt & (OPT_h|OPT_H)) {
bb_error_msg("option -h NAME is deprecated, use -x hostname:NAME");
client_config.hostname = alloc_dhcp_option(DHCP_HOST_NAME, str_h, 0);
}
if (opt & OPT_F) {
/* FQDN option format: [0x51][len][flags][0][0]<fqdn> */
client_config.fqdn = alloc_dhcp_option(DHCP_FQDN, str_F, 3);
/* Flag bits: 0000NEOS
* S: 1 = Client requests server to update A RR in DNS as well as PTR
* O: 1 = Server indicates to client that DNS has been updated regardless
* E: 1 = Name is in DNS format, i.e. <4>host<6>domain<3>com<0>,
* not "host.domain.com". Format 0 is obsolete.
* N: 1 = Client requests server to not update DNS (S must be 0 then)
* Two [0] bytes which follow are deprecated and must be 0.
*/
client_config.fqdn[OPT_DATA + 0] = 0x1;
/*client_config.fqdn[OPT_DATA + 1] = 0; - xzalloc did it */
/*client_config.fqdn[OPT_DATA + 2] = 0; */
}
if (opt & OPT_r)
requested_ip = inet_addr(str_r);
#if ENABLE_FEATURE_UDHCP_PORT
if (opt & OPT_P) {
CLIENT_PORT = xatou16(str_P);
SERVER_PORT = CLIENT_PORT - 1;
}
#endif
IF_FEATURE_UDHCPC_ARPING(arpping_ms = xatou(str_a);)
while (list_O) {
char *optstr = llist_pop(&list_O);
unsigned n = bb_strtou(optstr, NULL, 0);
if (errno || n > 254) {
n = udhcp_option_idx(optstr, dhcp_option_strings);
n = dhcp_optflags[n].code;
}
client_config.opt_mask[n >> 3] |= 1 << (n & 7);
}
if (!(opt & OPT_o)) {
unsigned i, n;
for (i = 0; (n = dhcp_optflags[i].code) != 0; i++) {
if (dhcp_optflags[i].flags & OPTION_REQ) {
client_config.opt_mask[n >> 3] |= 1 << (n & 7);
}
}
}
while (list_x) {
char *optstr = xstrdup(llist_pop(&list_x));
udhcp_str2optset(optstr, &client_config.options,
dhcp_optflags, dhcp_option_strings,
/*dhcpv6:*/ 0
);
free(optstr);
}
if (udhcp_read_interface(client_config.interface,
&client_config.ifindex,
NULL,
client_config.client_mac)
) {
return 1;
}
clientid_mac_ptr = NULL;
if (!(opt & OPT_C) && !udhcp_find_option(client_config.options, DHCP_CLIENT_ID)) {
/* not suppressed and not set, set the default client ID */
client_config.clientid = alloc_dhcp_option(DHCP_CLIENT_ID, "", 7);
client_config.clientid[OPT_DATA] = 1; /* type: ethernet */
clientid_mac_ptr = client_config.clientid + OPT_DATA+1;
memcpy(clientid_mac_ptr, client_config.client_mac, 6);
}
if (str_V[0] != '\0') {
client_config.vendorclass = alloc_dhcp_option(DHCP_VENDOR, str_V, 0);
}
#if !BB_MMU
/* on NOMMU reexec (i.e., background) early */
if (!(opt & OPT_f)) {
bb_daemonize_or_rexec(0 /* flags */, argv);
logmode = LOGMODE_NONE;
}
#endif
if (opt & OPT_S) {
openlog(applet_name, LOG_PID, LOG_DAEMON);
logmode |= LOGMODE_SYSLOG;
}
/* Make sure fd 0,1,2 are open */
bb_sanitize_stdio();
/* Create pidfile */
write_pidfile(client_config.pidfile);
/* Goes to stdout (unless NOMMU) and possibly syslog */
bb_error_msg("started, v"BB_VER);
/* Set up the signal pipe */
udhcp_sp_setup();
/* We want random_xid to be random... */
srand(monotonic_us());
state = INIT_SELECTING;
udhcp_run_script(NULL, "deconfig");
change_listen_mode(LISTEN_RAW);
packet_num = 0;
timeout = 0;
already_waited_sec = 0;
/* Main event loop. select() waits on signal pipe and possibly
* on sockfd.
* "continue" statements in code below jump to the top of the loop.
*/
for (;;) {
int tv;
struct pollfd pfds[2];
struct dhcp_packet packet;
/* silence "uninitialized!" warning */
unsigned timestamp_before_wait = timestamp_before_wait;
/* Was opening raw or udp socket here
* if (listen_mode != LISTEN_NONE && sockfd < 0),
* but on fast network renew responses return faster
* than we open sockets. Thus this code is moved
* to change_listen_mode(). Thus we open listen socket
* BEFORE we send renew request (see "case BOUND:"). */
udhcp_sp_fd_set(pfds, sockfd);
tv = timeout - already_waited_sec;
retval = 0;
/* If we already timed out, fall through with retval = 0, else... */
if (tv > 0) {
log1("waiting %u seconds", tv);
timestamp_before_wait = (unsigned)monotonic_sec();
retval = poll(pfds, 2, tv < INT_MAX/1000 ? tv * 1000 : INT_MAX);
if (retval < 0) {
/* EINTR? A signal was caught, don't panic */
if (errno == EINTR) {
already_waited_sec += (unsigned)monotonic_sec() - timestamp_before_wait;
continue;
}
/* Else: an error occurred, panic! */
bb_perror_msg_and_die("poll");
}
}
/* If timeout dropped to zero, time to become active:
* resend discover/renew/whatever
*/
if (retval == 0) {
/* When running on a bridge, the ifindex may have changed
* (e.g. if member interfaces were added/removed
* or if the status of the bridge changed).
* Refresh ifindex and client_mac:
*/
if (udhcp_read_interface(client_config.interface,
&client_config.ifindex,
NULL,
client_config.client_mac)
) {
goto ret0; /* iface is gone? */
}
if (clientid_mac_ptr)
memcpy(clientid_mac_ptr, client_config.client_mac, 6);
/* We will restart the wait in any case */
already_waited_sec = 0;
switch (state) {
case INIT_SELECTING:
if (!discover_retries || packet_num < discover_retries) {
if (packet_num == 0)
xid = random_xid();
/* broadcast */
send_discover(xid, requested_ip);
timeout = discover_timeout;
packet_num++;
continue;
}
leasefail:
udhcp_run_script(NULL, "leasefail");
#if BB_MMU /* -b is not supported on NOMMU */
if (opt & OPT_b) { /* background if no lease */
bb_error_msg("no lease, forking to background");
client_background();
/* do not background again! */
opt = ((opt & ~OPT_b) | OPT_f);
} else
#endif
if (opt & OPT_n) { /* abort if no lease */
bb_error_msg("no lease, failing");
retval = 1;
goto ret;
}
/* wait before trying again */
timeout = tryagain_timeout;
packet_num = 0;
continue;
case REQUESTING:
if (packet_num < 3) {
/* send broadcast select packet */
send_select(xid, server_addr, requested_ip);
timeout = discover_timeout;
packet_num++;
continue;
}
/* Timed out, go back to init state.
* "discover...select...discover..." loops
* were seen in the wild. Treat them similarly
* to "no response to discover" case */
change_listen_mode(LISTEN_RAW);
state = INIT_SELECTING;
goto leasefail;
case BOUND:
/* 1/2 lease passed, enter renewing state */
state = RENEWING;
client_config.first_secs = 0; /* make secs field count from 0 */
change_listen_mode(LISTEN_KERNEL);
log1("entering renew state");
/* fall right through */
case RENEW_REQUESTED: /* manual (SIGUSR1) renew */
case_RENEW_REQUESTED:
case RENEWING:
if (timeout >= 60) {
/* send an unicast renew request */
/* Sometimes observed to fail (EADDRNOTAVAIL) to bind
* a new UDP socket for sending inside send_renew.
* I hazard to guess existing listening socket
* is somehow conflicting with it, but why is it
* not deterministic then?! Strange.
* Anyway, it does recover by eventually failing through
* into INIT_SELECTING state.
*/
if (send_renew(xid, server_addr, requested_ip) >= 0) {
timeout >>= 1;
continue;
}
/* else: error sending.
* example: ENETUNREACH seen with server
* which gave us bogus server ID 1.1.1.1
* which wasn't reachable (and probably did not exist).
*/
}
/* Timed out or error, enter rebinding state */
log1("entering rebinding state");
state = REBINDING;
/* fall right through */
case REBINDING:
/* Switch to bcast receive */
change_listen_mode(LISTEN_RAW);
/* Lease is *really* about to run out,
* try to find DHCP server using broadcast */
if (timeout > 0) {
/* send a broadcast renew request */
send_renew(xid, 0 /*INADDR_ANY*/, requested_ip);
timeout >>= 1;
continue;
}
/* Timed out, enter init state */
bb_error_msg("lease lost, entering init state");
udhcp_run_script(NULL, "deconfig");
state = INIT_SELECTING;
client_config.first_secs = 0; /* make secs field count from 0 */
/*timeout = 0; - already is */
packet_num = 0;
continue;
/* case RELEASED: */
}
/* yah, I know, *you* say it would never happen */
timeout = INT_MAX;
continue; /* back to main loop */
} /* if poll timed out */
/* poll() didn't timeout, something happened */
/* Is it a signal? */
switch (udhcp_sp_read()) {
case SIGUSR1:
client_config.first_secs = 0; /* make secs field count from 0 */
already_waited_sec = 0;
perform_renew();
if (state == RENEW_REQUESTED) {
/* We might be either on the same network
* (in which case renew might work),
* or we might be on a completely different one
* (in which case renew won't ever succeed).
* For the second case, must make sure timeout
* is not too big, or else we can send
* futile renew requests for hours.
*/
if (timeout > 60)
timeout = 60;
goto case_RENEW_REQUESTED;
}
/* Start things over */
packet_num = 0;
/* Kill any timeouts, user wants this to hurry along */
timeout = 0;
continue;
case SIGUSR2:
perform_release(server_addr, requested_ip);
timeout = INT_MAX;
continue;
case SIGTERM:
bb_error_msg("received %s", "SIGTERM");
goto ret0;
}
/* Is it a packet? */
if (!pfds[1].revents)
continue; /* no */
{
int len;
/* A packet is ready, read it */
if (listen_mode == LISTEN_KERNEL)
len = udhcp_recv_kernel_packet(&packet, sockfd);
else
len = udhcp_recv_raw_packet(&packet, sockfd);
if (len == -1) {
/* Error is severe, reopen socket */
bb_error_msg("read error: "STRERROR_FMT", reopening socket" STRERROR_ERRNO);
sleep(discover_timeout); /* 3 seconds by default */
change_listen_mode(listen_mode); /* just close and reopen */
}
/* If this packet will turn out to be unrelated/bogus,
* we will go back and wait for next one.
* Be sure timeout is properly decreased. */
already_waited_sec += (unsigned)monotonic_sec() - timestamp_before_wait;
if (len < 0)
continue;
}
if (packet.xid != xid) {
log1("xid %x (our is %x), ignoring packet",
(unsigned)packet.xid, (unsigned)xid);
continue;
}
/* Ignore packets that aren't for us */
if (packet.hlen != 6
|| memcmp(packet.chaddr, client_config.client_mac, 6) != 0
) {
log1("chaddr does not match, ignoring packet"); // log2?
continue;
}
message = udhcp_get_option(&packet, DHCP_MESSAGE_TYPE);
if (message == NULL) {
bb_error_msg("no message type option, ignoring packet");
continue;
}
switch (state) {
case INIT_SELECTING:
/* Must be a DHCPOFFER */
if (*message == DHCPOFFER) {
uint8_t *temp;
/* What exactly is server's IP? There are several values.
* Example DHCP offer captured with tchdump:
*
* 10.34.25.254:67 > 10.34.25.202:68 // IP header's src
* BOOTP fields:
* Your-IP 10.34.25.202
* Server-IP 10.34.32.125 // "next server" IP
* Gateway-IP 10.34.25.254 // relay's address (if DHCP relays are in use)
* DHCP options:
* DHCP-Message Option 53, length 1: Offer
* Server-ID Option 54, length 4: 10.34.255.7 // "server ID"
* Default-Gateway Option 3, length 4: 10.34.25.254 // router
*
* We think that real server IP (one to use in renew/release)
* is one in Server-ID option. But I am not 100% sure.
* IP header's src and Gateway-IP (same in this example)
* might work too.
* "Next server" and router are definitely wrong ones to use, though...
*/
/* We used to ignore pcakets without DHCP_SERVER_ID.
* I've got user reports from people who run "address-less" servers.
* They either supply DHCP_SERVER_ID of 0.0.0.0 or don't supply it at all.
* They say ISC DHCP client supports this case.
*/
server_addr = 0;
temp = udhcp_get_option32(&packet, DHCP_SERVER_ID);
if (!temp) {
bb_error_msg("no server ID, using 0.0.0.0");
} else {
/* it IS unaligned sometimes, don't "optimize" */
move_from_unaligned32(server_addr, temp);
}
/*xid = packet.xid; - already is */
requested_ip = packet.yiaddr;
/* enter requesting state */
state = REQUESTING;
timeout = 0;
packet_num = 0;
already_waited_sec = 0;
}
continue;
case REQUESTING:
case RENEWING:
case RENEW_REQUESTED:
case REBINDING:
if (*message == DHCPACK) {
unsigned start;
uint32_t lease_seconds;
struct in_addr temp_addr;
uint8_t *temp;
temp = udhcp_get_option32(&packet, DHCP_LEASE_TIME);
if (!temp) {
bb_error_msg("no lease time with ACK, using 1 hour lease");
lease_seconds = 60 * 60;
} else {
/* it IS unaligned sometimes, don't "optimize" */
move_from_unaligned32(lease_seconds, temp);
lease_seconds = ntohl(lease_seconds);
/* paranoia: must not be too small and not prone to overflows */
/* timeout > 60 - ensures at least one unicast renew attempt */
if (lease_seconds < 2 * 61)
lease_seconds = 2 * 61;
}
#if ENABLE_FEATURE_UDHCPC_ARPING
if (opt & OPT_a) {
/* RFC 2131 3.1 paragraph 5:
* "The client receives the DHCPACK message with configuration
* parameters. The client SHOULD perform a final check on the
* parameters (e.g., ARP for allocated network address), and notes
* the duration of the lease specified in the DHCPACK message. At this
* point, the client is configured. If the client detects that the
* address is already in use (e.g., through the use of ARP),
* the client MUST send a DHCPDECLINE message to the server and restarts
* the configuration process..." */
if (!arpping(packet.yiaddr,
NULL,
(uint32_t) 0,
client_config.client_mac,
client_config.interface,
arpping_ms)
) {
bb_error_msg("offered address is in use "
"(got ARP reply), declining");
send_decline(/*xid,*/ server_addr, packet.yiaddr);
if (state != REQUESTING)
udhcp_run_script(NULL, "deconfig");
change_listen_mode(LISTEN_RAW);
state = INIT_SELECTING;
client_config.first_secs = 0; /* make secs field count from 0 */
requested_ip = 0;
timeout = tryagain_timeout;
packet_num = 0;
already_waited_sec = 0;
continue; /* back to main loop */
}
}
#endif
/* enter bound state */
temp_addr.s_addr = packet.yiaddr;
bb_error_msg("lease of %s obtained, lease time %u",
inet_ntoa(temp_addr), (unsigned)lease_seconds);
requested_ip = packet.yiaddr;
start = monotonic_sec();
udhcp_run_script(&packet, state == REQUESTING ? "bound" : "renew");
already_waited_sec = (unsigned)monotonic_sec() - start;
timeout = lease_seconds / 2;
if ((unsigned)timeout < already_waited_sec) {
/* Something went wrong. Back to discover state */
timeout = already_waited_sec = 0;
}
state = BOUND;
change_listen_mode(LISTEN_NONE);
if (opt & OPT_q) { /* quit after lease */
goto ret0;
}
/* future renew failures should not exit (JM) */
opt &= ~OPT_n;
#if BB_MMU /* NOMMU case backgrounded earlier */
if (!(opt & OPT_f)) {
client_background();
/* do not background again! */
opt = ((opt & ~OPT_b) | OPT_f);
}
#endif
/* make future renew packets use different xid */
/* xid = random_xid(); ...but why bother? */
continue; /* back to main loop */
}
if (*message == DHCPNAK) {
/* If network has more than one DHCP server,
* "wrong" server can reply first, with a NAK.
* Do not interpret it as a NAK from "our" server.
*/
if (server_addr != 0) {
uint32_t svid;
uint8_t *temp;
temp = udhcp_get_option32(&packet, DHCP_SERVER_ID);
if (!temp) {
non_matching_svid:
log1("received DHCP NAK with wrong"
" server ID, ignoring packet");
continue;
}
move_from_unaligned32(svid, temp);
if (svid != server_addr)
goto non_matching_svid;
}
/* return to init state */
bb_error_msg("received %s", "DHCP NAK");
udhcp_run_script(&packet, "nak");
if (state != REQUESTING)
udhcp_run_script(NULL, "deconfig");
change_listen_mode(LISTEN_RAW);
sleep(3); /* avoid excessive network traffic */
state = INIT_SELECTING;
client_config.first_secs = 0; /* make secs field count from 0 */
requested_ip = 0;
timeout = 0;
packet_num = 0;
already_waited_sec = 0;
}
continue;
/* case BOUND: - ignore all packets */
/* case RELEASED: - ignore all packets */
}
/* back to main loop */
} /* for (;;) - main loop ends */
ret0:
if (opt & OPT_R) /* release on quit */
perform_release(server_addr, requested_ip);
retval = 0;
ret:
/*if (client_config.pidfile) - remove_pidfile has its own check */
remove_pidfile(client_config.pidfile);
return retval;
}
|
+Info
| 0
|
int udhcpc_main(int argc UNUSED_PARAM, char **argv)
{
uint8_t *message;
const char *str_V, *str_h, *str_F, *str_r;
IF_FEATURE_UDHCPC_ARPING(const char *str_a = "2000";)
IF_FEATURE_UDHCP_PORT(char *str_P;)
void *clientid_mac_ptr;
llist_t *list_O = NULL;
llist_t *list_x = NULL;
int tryagain_timeout = 20;
int discover_timeout = 3;
int discover_retries = 3;
uint32_t server_addr = server_addr; /* for compiler */
uint32_t requested_ip = 0;
uint32_t xid = xid; /* for compiler */
int packet_num;
int timeout; /* must be signed */
unsigned already_waited_sec;
unsigned opt;
IF_FEATURE_UDHCPC_ARPING(unsigned arpping_ms;)
int retval;
setup_common_bufsiz();
/* Default options */
IF_FEATURE_UDHCP_PORT(SERVER_PORT = 67;)
IF_FEATURE_UDHCP_PORT(CLIENT_PORT = 68;)
client_config.interface = "eth0";
client_config.script = CONFIG_UDHCPC_DEFAULT_SCRIPT;
str_V = "udhcp "BB_VER;
/* Parse command line */
opt = getopt32long(argv, "^"
/* O,x: list; -T,-t,-A take numeric param */
"CV:H:h:F:i:np:qRr:s:T:+t:+SA:+O:*ox:*fB"
USE_FOR_MMU("b")
IF_FEATURE_UDHCPC_ARPING("a::")
IF_FEATURE_UDHCP_PORT("P:")
"v"
"\0" IF_UDHCP_VERBOSE("vv") /* -v is a counter */
, udhcpc_longopts
, &str_V, &str_h, &str_h, &str_F
, &client_config.interface, &client_config.pidfile /* i,p */
, &str_r /* r */
, &client_config.script /* s */
, &discover_timeout, &discover_retries, &tryagain_timeout /* T,t,A */
, &list_O
, &list_x
IF_FEATURE_UDHCPC_ARPING(, &str_a)
IF_FEATURE_UDHCP_PORT(, &str_P)
IF_UDHCP_VERBOSE(, &dhcp_verbose)
);
if (opt & (OPT_h|OPT_H)) {
bb_error_msg("option -h NAME is deprecated, use -x hostname:NAME");
client_config.hostname = alloc_dhcp_option(DHCP_HOST_NAME, str_h, 0);
}
if (opt & OPT_F) {
/* FQDN option format: [0x51][len][flags][0][0]<fqdn> */
client_config.fqdn = alloc_dhcp_option(DHCP_FQDN, str_F, 3);
/* Flag bits: 0000NEOS
* S: 1 = Client requests server to update A RR in DNS as well as PTR
* O: 1 = Server indicates to client that DNS has been updated regardless
* E: 1 = Name is in DNS format, i.e. <4>host<6>domain<3>com<0>,
* not "host.domain.com". Format 0 is obsolete.
* N: 1 = Client requests server to not update DNS (S must be 0 then)
* Two [0] bytes which follow are deprecated and must be 0.
*/
client_config.fqdn[OPT_DATA + 0] = 0x1;
/*client_config.fqdn[OPT_DATA + 1] = 0; - xzalloc did it */
/*client_config.fqdn[OPT_DATA + 2] = 0; */
}
if (opt & OPT_r)
requested_ip = inet_addr(str_r);
#if ENABLE_FEATURE_UDHCP_PORT
if (opt & OPT_P) {
CLIENT_PORT = xatou16(str_P);
SERVER_PORT = CLIENT_PORT - 1;
}
#endif
IF_FEATURE_UDHCPC_ARPING(arpping_ms = xatou(str_a);)
while (list_O) {
char *optstr = llist_pop(&list_O);
unsigned n = bb_strtou(optstr, NULL, 0);
if (errno || n > 254) {
n = udhcp_option_idx(optstr, dhcp_option_strings);
n = dhcp_optflags[n].code;
}
client_config.opt_mask[n >> 3] |= 1 << (n & 7);
}
if (!(opt & OPT_o)) {
unsigned i, n;
for (i = 0; (n = dhcp_optflags[i].code) != 0; i++) {
if (dhcp_optflags[i].flags & OPTION_REQ) {
client_config.opt_mask[n >> 3] |= 1 << (n & 7);
}
}
}
while (list_x) {
char *optstr = xstrdup(llist_pop(&list_x));
udhcp_str2optset(optstr, &client_config.options,
dhcp_optflags, dhcp_option_strings,
/*dhcpv6:*/ 0
);
free(optstr);
}
if (udhcp_read_interface(client_config.interface,
&client_config.ifindex,
NULL,
client_config.client_mac)
) {
return 1;
}
clientid_mac_ptr = NULL;
if (!(opt & OPT_C) && !udhcp_find_option(client_config.options, DHCP_CLIENT_ID)) {
/* not suppressed and not set, set the default client ID */
client_config.clientid = alloc_dhcp_option(DHCP_CLIENT_ID, "", 7);
client_config.clientid[OPT_DATA] = 1; /* type: ethernet */
clientid_mac_ptr = client_config.clientid + OPT_DATA+1;
memcpy(clientid_mac_ptr, client_config.client_mac, 6);
}
if (str_V[0] != '\0') {
client_config.vendorclass = alloc_dhcp_option(DHCP_VENDOR, str_V, 0);
}
#if !BB_MMU
/* on NOMMU reexec (i.e., background) early */
if (!(opt & OPT_f)) {
bb_daemonize_or_rexec(0 /* flags */, argv);
logmode = LOGMODE_NONE;
}
#endif
if (opt & OPT_S) {
openlog(applet_name, LOG_PID, LOG_DAEMON);
logmode |= LOGMODE_SYSLOG;
}
/* Make sure fd 0,1,2 are open */
bb_sanitize_stdio();
/* Create pidfile */
write_pidfile(client_config.pidfile);
/* Goes to stdout (unless NOMMU) and possibly syslog */
bb_error_msg("started, v"BB_VER);
/* Set up the signal pipe */
udhcp_sp_setup();
/* We want random_xid to be random... */
srand(monotonic_us());
state = INIT_SELECTING;
udhcp_run_script(NULL, "deconfig");
change_listen_mode(LISTEN_RAW);
packet_num = 0;
timeout = 0;
already_waited_sec = 0;
/* Main event loop. select() waits on signal pipe and possibly
* on sockfd.
* "continue" statements in code below jump to the top of the loop.
*/
for (;;) {
int tv;
struct pollfd pfds[2];
struct dhcp_packet packet;
/* silence "uninitialized!" warning */
unsigned timestamp_before_wait = timestamp_before_wait;
/* Was opening raw or udp socket here
* if (listen_mode != LISTEN_NONE && sockfd < 0),
* but on fast network renew responses return faster
* than we open sockets. Thus this code is moved
* to change_listen_mode(). Thus we open listen socket
* BEFORE we send renew request (see "case BOUND:"). */
udhcp_sp_fd_set(pfds, sockfd);
tv = timeout - already_waited_sec;
retval = 0;
/* If we already timed out, fall through with retval = 0, else... */
if (tv > 0) {
log1("waiting %u seconds", tv);
timestamp_before_wait = (unsigned)monotonic_sec();
retval = poll(pfds, 2, tv < INT_MAX/1000 ? tv * 1000 : INT_MAX);
if (retval < 0) {
/* EINTR? A signal was caught, don't panic */
if (errno == EINTR) {
already_waited_sec += (unsigned)monotonic_sec() - timestamp_before_wait;
continue;
}
/* Else: an error occurred, panic! */
bb_perror_msg_and_die("poll");
}
}
/* If timeout dropped to zero, time to become active:
* resend discover/renew/whatever
*/
if (retval == 0) {
/* When running on a bridge, the ifindex may have changed
* (e.g. if member interfaces were added/removed
* or if the status of the bridge changed).
* Refresh ifindex and client_mac:
*/
if (udhcp_read_interface(client_config.interface,
&client_config.ifindex,
NULL,
client_config.client_mac)
) {
goto ret0; /* iface is gone? */
}
if (clientid_mac_ptr)
memcpy(clientid_mac_ptr, client_config.client_mac, 6);
/* We will restart the wait in any case */
already_waited_sec = 0;
switch (state) {
case INIT_SELECTING:
if (!discover_retries || packet_num < discover_retries) {
if (packet_num == 0)
xid = random_xid();
/* broadcast */
send_discover(xid, requested_ip);
timeout = discover_timeout;
packet_num++;
continue;
}
leasefail:
udhcp_run_script(NULL, "leasefail");
#if BB_MMU /* -b is not supported on NOMMU */
if (opt & OPT_b) { /* background if no lease */
bb_error_msg("no lease, forking to background");
client_background();
/* do not background again! */
opt = ((opt & ~OPT_b) | OPT_f);
} else
#endif
if (opt & OPT_n) { /* abort if no lease */
bb_error_msg("no lease, failing");
retval = 1;
goto ret;
}
/* wait before trying again */
timeout = tryagain_timeout;
packet_num = 0;
continue;
case REQUESTING:
if (packet_num < 3) {
/* send broadcast select packet */
send_select(xid, server_addr, requested_ip);
timeout = discover_timeout;
packet_num++;
continue;
}
/* Timed out, go back to init state.
* "discover...select...discover..." loops
* were seen in the wild. Treat them similarly
* to "no response to discover" case */
change_listen_mode(LISTEN_RAW);
state = INIT_SELECTING;
goto leasefail;
case BOUND:
/* 1/2 lease passed, enter renewing state */
state = RENEWING;
client_config.first_secs = 0; /* make secs field count from 0 */
change_listen_mode(LISTEN_KERNEL);
log1("entering renew state");
/* fall right through */
case RENEW_REQUESTED: /* manual (SIGUSR1) renew */
case_RENEW_REQUESTED:
case RENEWING:
if (timeout >= 60) {
/* send an unicast renew request */
/* Sometimes observed to fail (EADDRNOTAVAIL) to bind
* a new UDP socket for sending inside send_renew.
* I hazard to guess existing listening socket
* is somehow conflicting with it, but why is it
* not deterministic then?! Strange.
* Anyway, it does recover by eventually failing through
* into INIT_SELECTING state.
*/
if (send_renew(xid, server_addr, requested_ip) >= 0) {
timeout >>= 1;
continue;
}
/* else: error sending.
* example: ENETUNREACH seen with server
* which gave us bogus server ID 1.1.1.1
* which wasn't reachable (and probably did not exist).
*/
}
/* Timed out or error, enter rebinding state */
log1("entering rebinding state");
state = REBINDING;
/* fall right through */
case REBINDING:
/* Switch to bcast receive */
change_listen_mode(LISTEN_RAW);
/* Lease is *really* about to run out,
* try to find DHCP server using broadcast */
if (timeout > 0) {
/* send a broadcast renew request */
send_renew(xid, 0 /*INADDR_ANY*/, requested_ip);
timeout >>= 1;
continue;
}
/* Timed out, enter init state */
bb_error_msg("lease lost, entering init state");
udhcp_run_script(NULL, "deconfig");
state = INIT_SELECTING;
client_config.first_secs = 0; /* make secs field count from 0 */
/*timeout = 0; - already is */
packet_num = 0;
continue;
/* case RELEASED: */
}
/* yah, I know, *you* say it would never happen */
timeout = INT_MAX;
continue; /* back to main loop */
} /* if poll timed out */
/* poll() didn't timeout, something happened */
/* Is it a signal? */
switch (udhcp_sp_read()) {
case SIGUSR1:
client_config.first_secs = 0; /* make secs field count from 0 */
already_waited_sec = 0;
perform_renew();
if (state == RENEW_REQUESTED) {
/* We might be either on the same network
* (in which case renew might work),
* or we might be on a completely different one
* (in which case renew won't ever succeed).
* For the second case, must make sure timeout
* is not too big, or else we can send
* futile renew requests for hours.
*/
if (timeout > 60)
timeout = 60;
goto case_RENEW_REQUESTED;
}
/* Start things over */
packet_num = 0;
/* Kill any timeouts, user wants this to hurry along */
timeout = 0;
continue;
case SIGUSR2:
perform_release(server_addr, requested_ip);
timeout = INT_MAX;
continue;
case SIGTERM:
bb_error_msg("received %s", "SIGTERM");
goto ret0;
}
/* Is it a packet? */
if (!pfds[1].revents)
continue; /* no */
{
int len;
/* A packet is ready, read it */
if (listen_mode == LISTEN_KERNEL)
len = udhcp_recv_kernel_packet(&packet, sockfd);
else
len = udhcp_recv_raw_packet(&packet, sockfd);
if (len == -1) {
/* Error is severe, reopen socket */
bb_error_msg("read error: "STRERROR_FMT", reopening socket" STRERROR_ERRNO);
sleep(discover_timeout); /* 3 seconds by default */
change_listen_mode(listen_mode); /* just close and reopen */
}
/* If this packet will turn out to be unrelated/bogus,
* we will go back and wait for next one.
* Be sure timeout is properly decreased. */
already_waited_sec += (unsigned)monotonic_sec() - timestamp_before_wait;
if (len < 0)
continue;
}
if (packet.xid != xid) {
log1("xid %x (our is %x), ignoring packet",
(unsigned)packet.xid, (unsigned)xid);
continue;
}
/* Ignore packets that aren't for us */
if (packet.hlen != 6
|| memcmp(packet.chaddr, client_config.client_mac, 6) != 0
) {
log1("chaddr does not match, ignoring packet"); // log2?
continue;
}
message = udhcp_get_option(&packet, DHCP_MESSAGE_TYPE);
if (message == NULL) {
bb_error_msg("no message type option, ignoring packet");
continue;
}
switch (state) {
case INIT_SELECTING:
/* Must be a DHCPOFFER */
if (*message == DHCPOFFER) {
uint8_t *temp;
/* What exactly is server's IP? There are several values.
* Example DHCP offer captured with tchdump:
*
* 10.34.25.254:67 > 10.34.25.202:68 // IP header's src
* BOOTP fields:
* Your-IP 10.34.25.202
* Server-IP 10.34.32.125 // "next server" IP
* Gateway-IP 10.34.25.254 // relay's address (if DHCP relays are in use)
* DHCP options:
* DHCP-Message Option 53, length 1: Offer
* Server-ID Option 54, length 4: 10.34.255.7 // "server ID"
* Default-Gateway Option 3, length 4: 10.34.25.254 // router
*
* We think that real server IP (one to use in renew/release)
* is one in Server-ID option. But I am not 100% sure.
* IP header's src and Gateway-IP (same in this example)
* might work too.
* "Next server" and router are definitely wrong ones to use, though...
*/
/* We used to ignore pcakets without DHCP_SERVER_ID.
* I've got user reports from people who run "address-less" servers.
* They either supply DHCP_SERVER_ID of 0.0.0.0 or don't supply it at all.
* They say ISC DHCP client supports this case.
*/
server_addr = 0;
temp = udhcp_get_option32(&packet, DHCP_SERVER_ID);
if (!temp) {
bb_error_msg("no server ID, using 0.0.0.0");
} else {
/* it IS unaligned sometimes, don't "optimize" */
move_from_unaligned32(server_addr, temp);
}
/*xid = packet.xid; - already is */
requested_ip = packet.yiaddr;
/* enter requesting state */
state = REQUESTING;
timeout = 0;
packet_num = 0;
already_waited_sec = 0;
}
continue;
case REQUESTING:
case RENEWING:
case RENEW_REQUESTED:
case REBINDING:
if (*message == DHCPACK) {
unsigned start;
uint32_t lease_seconds;
struct in_addr temp_addr;
uint8_t *temp;
temp = udhcp_get_option32(&packet, DHCP_LEASE_TIME);
if (!temp) {
bb_error_msg("no lease time with ACK, using 1 hour lease");
lease_seconds = 60 * 60;
} else {
/* it IS unaligned sometimes, don't "optimize" */
move_from_unaligned32(lease_seconds, temp);
lease_seconds = ntohl(lease_seconds);
/* paranoia: must not be too small and not prone to overflows */
/* timeout > 60 - ensures at least one unicast renew attempt */
if (lease_seconds < 2 * 61)
lease_seconds = 2 * 61;
}
#if ENABLE_FEATURE_UDHCPC_ARPING
if (opt & OPT_a) {
/* RFC 2131 3.1 paragraph 5:
* "The client receives the DHCPACK message with configuration
* parameters. The client SHOULD perform a final check on the
* parameters (e.g., ARP for allocated network address), and notes
* the duration of the lease specified in the DHCPACK message. At this
* point, the client is configured. If the client detects that the
* address is already in use (e.g., through the use of ARP),
* the client MUST send a DHCPDECLINE message to the server and restarts
* the configuration process..." */
if (!arpping(packet.yiaddr,
NULL,
(uint32_t) 0,
client_config.client_mac,
client_config.interface,
arpping_ms)
) {
bb_error_msg("offered address is in use "
"(got ARP reply), declining");
send_decline(/*xid,*/ server_addr, packet.yiaddr);
if (state != REQUESTING)
udhcp_run_script(NULL, "deconfig");
change_listen_mode(LISTEN_RAW);
state = INIT_SELECTING;
client_config.first_secs = 0; /* make secs field count from 0 */
requested_ip = 0;
timeout = tryagain_timeout;
packet_num = 0;
already_waited_sec = 0;
continue; /* back to main loop */
}
}
#endif
/* enter bound state */
temp_addr.s_addr = packet.yiaddr;
bb_error_msg("lease of %s obtained, lease time %u",
inet_ntoa(temp_addr), (unsigned)lease_seconds);
requested_ip = packet.yiaddr;
start = monotonic_sec();
udhcp_run_script(&packet, state == REQUESTING ? "bound" : "renew");
already_waited_sec = (unsigned)monotonic_sec() - start;
timeout = lease_seconds / 2;
if ((unsigned)timeout < already_waited_sec) {
/* Something went wrong. Back to discover state */
timeout = already_waited_sec = 0;
}
state = BOUND;
change_listen_mode(LISTEN_NONE);
if (opt & OPT_q) { /* quit after lease */
goto ret0;
}
/* future renew failures should not exit (JM) */
opt &= ~OPT_n;
#if BB_MMU /* NOMMU case backgrounded earlier */
if (!(opt & OPT_f)) {
client_background();
/* do not background again! */
opt = ((opt & ~OPT_b) | OPT_f);
}
#endif
/* make future renew packets use different xid */
/* xid = random_xid(); ...but why bother? */
continue; /* back to main loop */
}
if (*message == DHCPNAK) {
/* If network has more than one DHCP server,
* "wrong" server can reply first, with a NAK.
* Do not interpret it as a NAK from "our" server.
*/
if (server_addr != 0) {
uint32_t svid;
uint8_t *temp;
temp = udhcp_get_option32(&packet, DHCP_SERVER_ID);
if (!temp) {
non_matching_svid:
log1("received DHCP NAK with wrong"
" server ID, ignoring packet");
continue;
}
move_from_unaligned32(svid, temp);
if (svid != server_addr)
goto non_matching_svid;
}
/* return to init state */
bb_error_msg("received %s", "DHCP NAK");
udhcp_run_script(&packet, "nak");
if (state != REQUESTING)
udhcp_run_script(NULL, "deconfig");
change_listen_mode(LISTEN_RAW);
sleep(3); /* avoid excessive network traffic */
state = INIT_SELECTING;
client_config.first_secs = 0; /* make secs field count from 0 */
requested_ip = 0;
timeout = 0;
packet_num = 0;
already_waited_sec = 0;
}
continue;
/* case BOUND: - ignore all packets */
/* case RELEASED: - ignore all packets */
}
/* back to main loop */
} /* for (;;) - main loop ends */
ret0:
if (opt & OPT_R) /* release on quit */
perform_release(server_addr, requested_ip);
retval = 0;
ret:
/*if (client_config.pidfile) - remove_pidfile has its own check */
remove_pidfile(client_config.pidfile);
return retval;
}
|
@@ -531,7 +531,7 @@ static char **fill_envp(struct dhcp_packet *packet)
temp = udhcp_get_option(packet, code);
*curr = xmalloc_optname_optval(temp, &dhcp_optflags[i], opt_name);
putenv(*curr++);
- if (code == DHCP_SUBNET) {
+ if (code == DHCP_SUBNET && temp[-OPT_DATA + OPT_LEN] == 4) {
/* Subnet option: make things like "$ip/$mask" possible */
uint32_t subnet;
move_from_unaligned32(subnet, temp);
|
CWE-125
| null | null |
7,005
|
static const char *valid_domain_label(const char *label)
{
unsigned char ch;
if (label[0] == '-')
return NULL;
for (;;) {
ch = *label;
if ((ch|0x20) < 'a' || (ch|0x20) > 'z') {
if (ch < '0' || ch > '9') {
if (ch == '\0' || ch == '.')
return label;
/* DNS allows only '-', but we are more permissive */
if (ch != '-' && ch != '_')
return NULL;
}
}
label++;
}
}
|
+Info
| 0
|
static const char *valid_domain_label(const char *label)
{
unsigned char ch;
if (label[0] == '-')
return NULL;
for (;;) {
ch = *label;
if ((ch|0x20) < 'a' || (ch|0x20) > 'z') {
if (ch < '0' || ch > '9') {
if (ch == '\0' || ch == '.')
return label;
/* DNS allows only '-', but we are more permissive */
if (ch != '-' && ch != '_')
return NULL;
}
}
label++;
}
}
|
@@ -531,7 +531,7 @@ static char **fill_envp(struct dhcp_packet *packet)
temp = udhcp_get_option(packet, code);
*curr = xmalloc_optname_optval(temp, &dhcp_optflags[i], opt_name);
putenv(*curr++);
- if (code == DHCP_SUBNET) {
+ if (code == DHCP_SUBNET && temp[-OPT_DATA + OPT_LEN] == 4) {
/* Subnet option: make things like "$ip/$mask" possible */
uint32_t subnet;
move_from_unaligned32(subnet, temp);
|
CWE-125
| null | null |
7,006
|
static NOINLINE char *xmalloc_optname_optval(uint8_t *option, const struct dhcp_optflag *optflag, const char *opt_name)
{
unsigned upper_length;
int len, type, optlen;
char *dest, *ret;
/* option points to OPT_DATA, need to go back to get OPT_LEN */
len = option[-OPT_DATA + OPT_LEN];
type = optflag->flags & OPTION_TYPE_MASK;
optlen = dhcp_option_lengths[type];
upper_length = len_of_option_as_string[type]
* ((unsigned)(len + optlen) / (unsigned)optlen);
dest = ret = xmalloc(upper_length + strlen(opt_name) + 2);
dest += sprintf(ret, "%s=", opt_name);
while (len >= optlen) {
switch (type) {
case OPTION_IP:
case OPTION_IP_PAIR:
dest += sprint_nip(dest, "", option);
if (type == OPTION_IP)
break;
dest += sprint_nip(dest, "/", option + 4);
break;
case OPTION_U8:
dest += sprintf(dest, "%u", *option);
break;
case OPTION_U16: {
uint16_t val_u16;
move_from_unaligned16(val_u16, option);
dest += sprintf(dest, "%u", ntohs(val_u16));
break;
}
case OPTION_S32:
case OPTION_U32: {
uint32_t val_u32;
move_from_unaligned32(val_u32, option);
dest += sprintf(dest, type == OPTION_U32 ? "%lu" : "%ld", (unsigned long) ntohl(val_u32));
break;
}
/* Note: options which use 'return' instead of 'break'
* (for example, OPTION_STRING) skip the code which handles
* the case of list of options.
*/
case OPTION_STRING:
case OPTION_STRING_HOST:
memcpy(dest, option, len);
dest[len] = '\0';
if (type == OPTION_STRING_HOST && !good_hostname(dest))
safe_strncpy(dest, "bad", len);
return ret;
case OPTION_STATIC_ROUTES: {
/* Option binary format:
* mask [one byte, 0..32]
* ip [big endian, 0..4 bytes depending on mask]
* router [big endian, 4 bytes]
* may be repeated
*
* We convert it to a string "IP/MASK ROUTER IP2/MASK2 ROUTER2"
*/
const char *pfx = "";
while (len >= 1 + 4) { /* mask + 0-byte ip + router */
uint32_t nip;
uint8_t *p;
unsigned mask;
int bytes;
mask = *option++;
if (mask > 32)
break;
len--;
nip = 0;
p = (void*) &nip;
bytes = (mask + 7) / 8; /* 0 -> 0, 1..8 -> 1, 9..16 -> 2 etc */
while (--bytes >= 0) {
*p++ = *option++;
len--;
}
if (len < 4)
break;
/* print ip/mask */
dest += sprint_nip(dest, pfx, (void*) &nip);
pfx = " ";
dest += sprintf(dest, "/%u ", mask);
/* print router */
dest += sprint_nip(dest, "", option);
option += 4;
len -= 4;
}
return ret;
}
case OPTION_6RD:
/* Option binary format (see RFC 5969):
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | OPTION_6RD | option-length | IPv4MaskLen | 6rdPrefixLen |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | 6rdPrefix |
* ... (16 octets) ...
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* ... 6rdBRIPv4Address(es) ...
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* We convert it to a string
* "IPv4MaskLen 6rdPrefixLen 6rdPrefix 6rdBRIPv4Address..."
*
* Sanity check: ensure that our length is at least 22 bytes, that
* IPv4MaskLen <= 32,
* 6rdPrefixLen <= 128,
* 6rdPrefixLen + (32 - IPv4MaskLen) <= 128
* (2nd condition need no check - it follows from 1st and 3rd).
* Else, return envvar with empty value ("optname=")
*/
if (len >= (1 + 1 + 16 + 4)
&& option[0] <= 32
&& (option[1] + 32 - option[0]) <= 128
) {
/* IPv4MaskLen */
dest += sprintf(dest, "%u ", *option++);
/* 6rdPrefixLen */
dest += sprintf(dest, "%u ", *option++);
/* 6rdPrefix */
dest += sprint_nip6(dest, /* "", */ option);
option += 16;
len -= 1 + 1 + 16 + 4;
/* "+ 4" above corresponds to the length of IPv4 addr
* we consume in the loop below */
while (1) {
/* 6rdBRIPv4Address(es) */
dest += sprint_nip(dest, " ", option);
option += 4;
len -= 4; /* do we have yet another 4+ bytes? */
if (len < 0)
break; /* no */
}
}
return ret;
#if ENABLE_FEATURE_UDHCP_RFC3397
case OPTION_DNS_STRING:
/* unpack option into dest; use ret for prefix (i.e., "optname=") */
dest = dname_dec(option, len, ret);
if (dest) {
free(ret);
return dest;
}
/* error. return "optname=" string */
return ret;
case OPTION_SIP_SERVERS:
/* Option binary format:
* type: byte
* type=0: domain names, dns-compressed
* type=1: IP addrs
*/
option++;
len--;
if (option[-1] == 0) {
dest = dname_dec(option, len, ret);
if (dest) {
free(ret);
return dest;
}
} else
if (option[-1] == 1) {
const char *pfx = "";
while (1) {
len -= 4;
if (len < 0)
break;
dest += sprint_nip(dest, pfx, option);
pfx = " ";
option += 4;
}
}
return ret;
#endif
} /* switch */
/* If we are here, try to format any remaining data
* in the option as another, similarly-formatted option
*/
option += optlen;
len -= optlen;
if (len < optlen /* || !(optflag->flags & OPTION_LIST) */)
break;
*dest++ = ' ';
*dest = '\0';
} /* while */
return ret;
}
|
+Info
| 0
|
static NOINLINE char *xmalloc_optname_optval(uint8_t *option, const struct dhcp_optflag *optflag, const char *opt_name)
{
unsigned upper_length;
int len, type, optlen;
char *dest, *ret;
/* option points to OPT_DATA, need to go back to get OPT_LEN */
len = option[-OPT_DATA + OPT_LEN];
type = optflag->flags & OPTION_TYPE_MASK;
optlen = dhcp_option_lengths[type];
upper_length = len_of_option_as_string[type]
* ((unsigned)(len + optlen) / (unsigned)optlen);
dest = ret = xmalloc(upper_length + strlen(opt_name) + 2);
dest += sprintf(ret, "%s=", opt_name);
while (len >= optlen) {
switch (type) {
case OPTION_IP:
case OPTION_IP_PAIR:
dest += sprint_nip(dest, "", option);
if (type == OPTION_IP)
break;
dest += sprint_nip(dest, "/", option + 4);
break;
case OPTION_U8:
dest += sprintf(dest, "%u", *option);
break;
case OPTION_U16: {
uint16_t val_u16;
move_from_unaligned16(val_u16, option);
dest += sprintf(dest, "%u", ntohs(val_u16));
break;
}
case OPTION_S32:
case OPTION_U32: {
uint32_t val_u32;
move_from_unaligned32(val_u32, option);
dest += sprintf(dest, type == OPTION_U32 ? "%lu" : "%ld", (unsigned long) ntohl(val_u32));
break;
}
/* Note: options which use 'return' instead of 'break'
* (for example, OPTION_STRING) skip the code which handles
* the case of list of options.
*/
case OPTION_STRING:
case OPTION_STRING_HOST:
memcpy(dest, option, len);
dest[len] = '\0';
if (type == OPTION_STRING_HOST && !good_hostname(dest))
safe_strncpy(dest, "bad", len);
return ret;
case OPTION_STATIC_ROUTES: {
/* Option binary format:
* mask [one byte, 0..32]
* ip [big endian, 0..4 bytes depending on mask]
* router [big endian, 4 bytes]
* may be repeated
*
* We convert it to a string "IP/MASK ROUTER IP2/MASK2 ROUTER2"
*/
const char *pfx = "";
while (len >= 1 + 4) { /* mask + 0-byte ip + router */
uint32_t nip;
uint8_t *p;
unsigned mask;
int bytes;
mask = *option++;
if (mask > 32)
break;
len--;
nip = 0;
p = (void*) &nip;
bytes = (mask + 7) / 8; /* 0 -> 0, 1..8 -> 1, 9..16 -> 2 etc */
while (--bytes >= 0) {
*p++ = *option++;
len--;
}
if (len < 4)
break;
/* print ip/mask */
dest += sprint_nip(dest, pfx, (void*) &nip);
pfx = " ";
dest += sprintf(dest, "/%u ", mask);
/* print router */
dest += sprint_nip(dest, "", option);
option += 4;
len -= 4;
}
return ret;
}
case OPTION_6RD:
/* Option binary format (see RFC 5969):
* 0 1 2 3
* 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1 2 3 4 5 6 7 8 9 0 1
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | OPTION_6RD | option-length | IPv4MaskLen | 6rdPrefixLen |
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* | 6rdPrefix |
* ... (16 octets) ...
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* ... 6rdBRIPv4Address(es) ...
* +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
* We convert it to a string
* "IPv4MaskLen 6rdPrefixLen 6rdPrefix 6rdBRIPv4Address..."
*
* Sanity check: ensure that our length is at least 22 bytes, that
* IPv4MaskLen <= 32,
* 6rdPrefixLen <= 128,
* 6rdPrefixLen + (32 - IPv4MaskLen) <= 128
* (2nd condition need no check - it follows from 1st and 3rd).
* Else, return envvar with empty value ("optname=")
*/
if (len >= (1 + 1 + 16 + 4)
&& option[0] <= 32
&& (option[1] + 32 - option[0]) <= 128
) {
/* IPv4MaskLen */
dest += sprintf(dest, "%u ", *option++);
/* 6rdPrefixLen */
dest += sprintf(dest, "%u ", *option++);
/* 6rdPrefix */
dest += sprint_nip6(dest, /* "", */ option);
option += 16;
len -= 1 + 1 + 16 + 4;
/* "+ 4" above corresponds to the length of IPv4 addr
* we consume in the loop below */
while (1) {
/* 6rdBRIPv4Address(es) */
dest += sprint_nip(dest, " ", option);
option += 4;
len -= 4; /* do we have yet another 4+ bytes? */
if (len < 0)
break; /* no */
}
}
return ret;
#if ENABLE_FEATURE_UDHCP_RFC3397
case OPTION_DNS_STRING:
/* unpack option into dest; use ret for prefix (i.e., "optname=") */
dest = dname_dec(option, len, ret);
if (dest) {
free(ret);
return dest;
}
/* error. return "optname=" string */
return ret;
case OPTION_SIP_SERVERS:
/* Option binary format:
* type: byte
* type=0: domain names, dns-compressed
* type=1: IP addrs
*/
option++;
len--;
if (option[-1] == 0) {
dest = dname_dec(option, len, ret);
if (dest) {
free(ret);
return dest;
}
} else
if (option[-1] == 1) {
const char *pfx = "";
while (1) {
len -= 4;
if (len < 0)
break;
dest += sprint_nip(dest, pfx, option);
pfx = " ";
option += 4;
}
}
return ret;
#endif
} /* switch */
/* If we are here, try to format any remaining data
* in the option as another, similarly-formatted option
*/
option += optlen;
len -= optlen;
if (len < optlen /* || !(optflag->flags & OPTION_LIST) */)
break;
*dest++ = ' ';
*dest = '\0';
} /* while */
return ret;
}
|
@@ -531,7 +531,7 @@ static char **fill_envp(struct dhcp_packet *packet)
temp = udhcp_get_option(packet, code);
*curr = xmalloc_optname_optval(temp, &dhcp_optflags[i], opt_name);
putenv(*curr++);
- if (code == DHCP_SUBNET) {
+ if (code == DHCP_SUBNET && temp[-OPT_DATA + OPT_LEN] == 4) {
/* Subnet option: make things like "$ip/$mask" possible */
uint32_t subnet;
move_from_unaligned32(subnet, temp);
|
CWE-125
| null | null |
7,007
|
static void __report_context_error(const char *fname, struct vrend_context *ctx, enum virgl_ctx_errors error, uint32_t value)
{
ctx->in_error = true;
ctx->last_error = error;
fprintf(stderr,"%s: context error reported %d \"%s\" %s %d\n", fname, ctx->ctx_id, ctx->debug_name, vrend_ctx_error_strings[error], value);
}
|
DoS
| 0
|
static void __report_context_error(const char *fname, struct vrend_context *ctx, enum virgl_ctx_errors error, uint32_t value)
{
ctx->in_error = true;
ctx->last_error = error;
fprintf(stderr,"%s: context error reported %d \"%s\" %s %d\n", fname, ctx->ctx_id, ctx->debug_name, vrend_ctx_error_strings[error], value);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,008
|
static struct vrend_linked_shader_program *add_shader_program(struct vrend_context *ctx,
struct vrend_shader *vs,
struct vrend_shader *fs,
struct vrend_shader *gs)
{
struct vrend_linked_shader_program *sprog = CALLOC_STRUCT(vrend_linked_shader_program);
char name[16];
int i;
GLuint prog_id;
GLint lret;
int id;
int last_shader;
if (!sprog)
return NULL;
/* need to rewrite VS code to add interpolation params */
if ((gs && gs->compiled_fs_id != fs->id) ||
(!gs && vs->compiled_fs_id != fs->id)) {
bool ret;
if (gs)
vrend_patch_vertex_shader_interpolants(gs->glsl_prog,
&gs->sel->sinfo,
&fs->sel->sinfo, true, fs->key.flatshade);
else
vrend_patch_vertex_shader_interpolants(vs->glsl_prog,
&vs->sel->sinfo,
&fs->sel->sinfo, false, fs->key.flatshade);
ret = vrend_compile_shader(ctx, gs ? gs : vs);
if (ret == false) {
glDeleteShader(gs ? gs->id : vs->id);
free(sprog);
return NULL;
}
if (gs)
gs->compiled_fs_id = fs->id;
else
vs->compiled_fs_id = fs->id;
}
prog_id = glCreateProgram();
glAttachShader(prog_id, vs->id);
if (gs) {
if (gs->id > 0)
glAttachShader(prog_id, gs->id);
set_stream_out_varyings(prog_id, &gs->sel->sinfo);
}
else
set_stream_out_varyings(prog_id, &vs->sel->sinfo);
glAttachShader(prog_id, fs->id);
if (fs->sel->sinfo.num_outputs > 1) {
if (util_blend_state_is_dual(&ctx->sub->blend_state, 0)) {
glBindFragDataLocationIndexed(prog_id, 0, 0, "fsout_c0");
glBindFragDataLocationIndexed(prog_id, 0, 1, "fsout_c1");
sprog->dual_src_linked = true;
} else {
glBindFragDataLocationIndexed(prog_id, 0, 0, "fsout_c0");
glBindFragDataLocationIndexed(prog_id, 1, 0, "fsout_c1");
sprog->dual_src_linked = false;
}
} else
sprog->dual_src_linked = false;
if (vrend_state.have_vertex_attrib_binding) {
uint32_t mask = vs->sel->sinfo.attrib_input_mask;
while (mask) {
i = u_bit_scan(&mask);
snprintf(name, 10, "in_%d", i);
glBindAttribLocation(prog_id, i, name);
}
}
glLinkProgram(prog_id);
glGetProgramiv(prog_id, GL_LINK_STATUS, &lret);
if (lret == GL_FALSE) {
char infolog[65536];
int len;
glGetProgramInfoLog(prog_id, 65536, &len, infolog);
fprintf(stderr,"got error linking\n%s\n", infolog);
/* dump shaders */
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
fprintf(stderr,"vert shader: %d GLSL\n%s\n", vs->id, vs->glsl_prog);
if (gs)
fprintf(stderr,"geom shader: %d GLSL\n%s\n", gs->id, gs->glsl_prog);
fprintf(stderr,"frag shader: %d GLSL\n%s\n", fs->id, fs->glsl_prog);
glDeleteProgram(prog_id);
free(sprog);
return NULL;
}
sprog->ss[PIPE_SHADER_VERTEX] = vs;
sprog->ss[PIPE_SHADER_FRAGMENT] = fs;
sprog->ss[PIPE_SHADER_GEOMETRY] = gs;
list_add(&sprog->sl[PIPE_SHADER_VERTEX], &vs->programs);
list_add(&sprog->sl[PIPE_SHADER_FRAGMENT], &fs->programs);
if (gs)
list_add(&sprog->sl[PIPE_SHADER_GEOMETRY], &gs->programs);
last_shader = gs ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT;
sprog->id = prog_id;
list_addtail(&sprog->head, &ctx->sub->programs);
if (fs->key.pstipple_tex)
sprog->fs_stipple_loc = glGetUniformLocation(prog_id, "pstipple_sampler");
else
sprog->fs_stipple_loc = -1;
sprog->vs_ws_adjust_loc = glGetUniformLocation(prog_id, "winsys_adjust");
for (id = PIPE_SHADER_VERTEX; id <= last_shader; id++) {
if (sprog->ss[id]->sel->sinfo.samplers_used_mask) {
uint32_t mask = sprog->ss[id]->sel->sinfo.samplers_used_mask;
int nsamp = util_bitcount(sprog->ss[id]->sel->sinfo.samplers_used_mask);
int index;
sprog->shadow_samp_mask[id] = sprog->ss[id]->sel->sinfo.shadow_samp_mask;
if (sprog->ss[id]->sel->sinfo.shadow_samp_mask) {
sprog->shadow_samp_mask_locs[id] = calloc(nsamp, sizeof(uint32_t));
sprog->shadow_samp_add_locs[id] = calloc(nsamp, sizeof(uint32_t));
} else {
sprog->shadow_samp_mask_locs[id] = sprog->shadow_samp_add_locs[id] = NULL;
}
sprog->samp_locs[id] = calloc(nsamp, sizeof(uint32_t));
if (sprog->samp_locs[id]) {
const char *prefix = pipe_shader_to_prefix(id);
index = 0;
while(mask) {
i = u_bit_scan(&mask);
snprintf(name, 10, "%ssamp%d", prefix, i);
sprog->samp_locs[id][index] = glGetUniformLocation(prog_id, name);
if (sprog->ss[id]->sel->sinfo.shadow_samp_mask & (1 << i)) {
snprintf(name, 14, "%sshadmask%d", prefix, i);
sprog->shadow_samp_mask_locs[id][index] = glGetUniformLocation(prog_id, name);
snprintf(name, 14, "%sshadadd%d", prefix, i);
sprog->shadow_samp_add_locs[id][index] = glGetUniformLocation(prog_id, name);
}
index++;
}
}
} else {
sprog->samp_locs[id] = NULL;
sprog->shadow_samp_mask_locs[id] = NULL;
sprog->shadow_samp_add_locs[id] = NULL;
sprog->shadow_samp_mask[id] = 0;
}
sprog->samplers_used_mask[id] = sprog->ss[id]->sel->sinfo.samplers_used_mask;
}
for (id = PIPE_SHADER_VERTEX; id <= last_shader; id++) {
if (sprog->ss[id]->sel->sinfo.num_consts) {
sprog->const_locs[id] = calloc(sprog->ss[id]->sel->sinfo.num_consts, sizeof(uint32_t));
if (sprog->const_locs[id]) {
const char *prefix = pipe_shader_to_prefix(id);
for (i = 0; i < sprog->ss[id]->sel->sinfo.num_consts; i++) {
snprintf(name, 16, "%sconst0[%d]", prefix, i);
sprog->const_locs[id][i] = glGetUniformLocation(prog_id, name);
}
}
} else
sprog->const_locs[id] = NULL;
}
if (!vrend_state.have_vertex_attrib_binding) {
if (vs->sel->sinfo.num_inputs) {
sprog->attrib_locs = calloc(vs->sel->sinfo.num_inputs, sizeof(uint32_t));
if (sprog->attrib_locs) {
for (i = 0; i < vs->sel->sinfo.num_inputs; i++) {
snprintf(name, 10, "in_%d", i);
sprog->attrib_locs[i] = glGetAttribLocation(prog_id, name);
}
}
} else
sprog->attrib_locs = NULL;
}
for (id = PIPE_SHADER_VERTEX; id <= last_shader; id++) {
if (sprog->ss[id]->sel->sinfo.num_ubos) {
const char *prefix = pipe_shader_to_prefix(id);
sprog->ubo_locs[id] = calloc(sprog->ss[id]->sel->sinfo.num_ubos, sizeof(uint32_t));
for (i = 0; i < sprog->ss[id]->sel->sinfo.num_ubos; i++) {
snprintf(name, 16, "%subo%d", prefix, i + 1);
sprog->ubo_locs[id][i] = glGetUniformBlockIndex(prog_id, name);
}
} else
sprog->ubo_locs[id] = NULL;
}
if (vs->sel->sinfo.num_ucp) {
for (i = 0; i < vs->sel->sinfo.num_ucp; i++) {
snprintf(name, 10, "clipp[%d]", i);
sprog->clip_locs[i] = glGetUniformLocation(prog_id, name);
}
}
return sprog;
}
|
DoS
| 0
|
static struct vrend_linked_shader_program *add_shader_program(struct vrend_context *ctx,
struct vrend_shader *vs,
struct vrend_shader *fs,
struct vrend_shader *gs)
{
struct vrend_linked_shader_program *sprog = CALLOC_STRUCT(vrend_linked_shader_program);
char name[16];
int i;
GLuint prog_id;
GLint lret;
int id;
int last_shader;
if (!sprog)
return NULL;
/* need to rewrite VS code to add interpolation params */
if ((gs && gs->compiled_fs_id != fs->id) ||
(!gs && vs->compiled_fs_id != fs->id)) {
bool ret;
if (gs)
vrend_patch_vertex_shader_interpolants(gs->glsl_prog,
&gs->sel->sinfo,
&fs->sel->sinfo, true, fs->key.flatshade);
else
vrend_patch_vertex_shader_interpolants(vs->glsl_prog,
&vs->sel->sinfo,
&fs->sel->sinfo, false, fs->key.flatshade);
ret = vrend_compile_shader(ctx, gs ? gs : vs);
if (ret == false) {
glDeleteShader(gs ? gs->id : vs->id);
free(sprog);
return NULL;
}
if (gs)
gs->compiled_fs_id = fs->id;
else
vs->compiled_fs_id = fs->id;
}
prog_id = glCreateProgram();
glAttachShader(prog_id, vs->id);
if (gs) {
if (gs->id > 0)
glAttachShader(prog_id, gs->id);
set_stream_out_varyings(prog_id, &gs->sel->sinfo);
}
else
set_stream_out_varyings(prog_id, &vs->sel->sinfo);
glAttachShader(prog_id, fs->id);
if (fs->sel->sinfo.num_outputs > 1) {
if (util_blend_state_is_dual(&ctx->sub->blend_state, 0)) {
glBindFragDataLocationIndexed(prog_id, 0, 0, "fsout_c0");
glBindFragDataLocationIndexed(prog_id, 0, 1, "fsout_c1");
sprog->dual_src_linked = true;
} else {
glBindFragDataLocationIndexed(prog_id, 0, 0, "fsout_c0");
glBindFragDataLocationIndexed(prog_id, 1, 0, "fsout_c1");
sprog->dual_src_linked = false;
}
} else
sprog->dual_src_linked = false;
if (vrend_state.have_vertex_attrib_binding) {
uint32_t mask = vs->sel->sinfo.attrib_input_mask;
while (mask) {
i = u_bit_scan(&mask);
snprintf(name, 10, "in_%d", i);
glBindAttribLocation(prog_id, i, name);
}
}
glLinkProgram(prog_id);
glGetProgramiv(prog_id, GL_LINK_STATUS, &lret);
if (lret == GL_FALSE) {
char infolog[65536];
int len;
glGetProgramInfoLog(prog_id, 65536, &len, infolog);
fprintf(stderr,"got error linking\n%s\n", infolog);
/* dump shaders */
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
fprintf(stderr,"vert shader: %d GLSL\n%s\n", vs->id, vs->glsl_prog);
if (gs)
fprintf(stderr,"geom shader: %d GLSL\n%s\n", gs->id, gs->glsl_prog);
fprintf(stderr,"frag shader: %d GLSL\n%s\n", fs->id, fs->glsl_prog);
glDeleteProgram(prog_id);
free(sprog);
return NULL;
}
sprog->ss[PIPE_SHADER_VERTEX] = vs;
sprog->ss[PIPE_SHADER_FRAGMENT] = fs;
sprog->ss[PIPE_SHADER_GEOMETRY] = gs;
list_add(&sprog->sl[PIPE_SHADER_VERTEX], &vs->programs);
list_add(&sprog->sl[PIPE_SHADER_FRAGMENT], &fs->programs);
if (gs)
list_add(&sprog->sl[PIPE_SHADER_GEOMETRY], &gs->programs);
last_shader = gs ? PIPE_SHADER_GEOMETRY : PIPE_SHADER_FRAGMENT;
sprog->id = prog_id;
list_addtail(&sprog->head, &ctx->sub->programs);
if (fs->key.pstipple_tex)
sprog->fs_stipple_loc = glGetUniformLocation(prog_id, "pstipple_sampler");
else
sprog->fs_stipple_loc = -1;
sprog->vs_ws_adjust_loc = glGetUniformLocation(prog_id, "winsys_adjust");
for (id = PIPE_SHADER_VERTEX; id <= last_shader; id++) {
if (sprog->ss[id]->sel->sinfo.samplers_used_mask) {
uint32_t mask = sprog->ss[id]->sel->sinfo.samplers_used_mask;
int nsamp = util_bitcount(sprog->ss[id]->sel->sinfo.samplers_used_mask);
int index;
sprog->shadow_samp_mask[id] = sprog->ss[id]->sel->sinfo.shadow_samp_mask;
if (sprog->ss[id]->sel->sinfo.shadow_samp_mask) {
sprog->shadow_samp_mask_locs[id] = calloc(nsamp, sizeof(uint32_t));
sprog->shadow_samp_add_locs[id] = calloc(nsamp, sizeof(uint32_t));
} else {
sprog->shadow_samp_mask_locs[id] = sprog->shadow_samp_add_locs[id] = NULL;
}
sprog->samp_locs[id] = calloc(nsamp, sizeof(uint32_t));
if (sprog->samp_locs[id]) {
const char *prefix = pipe_shader_to_prefix(id);
index = 0;
while(mask) {
i = u_bit_scan(&mask);
snprintf(name, 10, "%ssamp%d", prefix, i);
sprog->samp_locs[id][index] = glGetUniformLocation(prog_id, name);
if (sprog->ss[id]->sel->sinfo.shadow_samp_mask & (1 << i)) {
snprintf(name, 14, "%sshadmask%d", prefix, i);
sprog->shadow_samp_mask_locs[id][index] = glGetUniformLocation(prog_id, name);
snprintf(name, 14, "%sshadadd%d", prefix, i);
sprog->shadow_samp_add_locs[id][index] = glGetUniformLocation(prog_id, name);
}
index++;
}
}
} else {
sprog->samp_locs[id] = NULL;
sprog->shadow_samp_mask_locs[id] = NULL;
sprog->shadow_samp_add_locs[id] = NULL;
sprog->shadow_samp_mask[id] = 0;
}
sprog->samplers_used_mask[id] = sprog->ss[id]->sel->sinfo.samplers_used_mask;
}
for (id = PIPE_SHADER_VERTEX; id <= last_shader; id++) {
if (sprog->ss[id]->sel->sinfo.num_consts) {
sprog->const_locs[id] = calloc(sprog->ss[id]->sel->sinfo.num_consts, sizeof(uint32_t));
if (sprog->const_locs[id]) {
const char *prefix = pipe_shader_to_prefix(id);
for (i = 0; i < sprog->ss[id]->sel->sinfo.num_consts; i++) {
snprintf(name, 16, "%sconst0[%d]", prefix, i);
sprog->const_locs[id][i] = glGetUniformLocation(prog_id, name);
}
}
} else
sprog->const_locs[id] = NULL;
}
if (!vrend_state.have_vertex_attrib_binding) {
if (vs->sel->sinfo.num_inputs) {
sprog->attrib_locs = calloc(vs->sel->sinfo.num_inputs, sizeof(uint32_t));
if (sprog->attrib_locs) {
for (i = 0; i < vs->sel->sinfo.num_inputs; i++) {
snprintf(name, 10, "in_%d", i);
sprog->attrib_locs[i] = glGetAttribLocation(prog_id, name);
}
}
} else
sprog->attrib_locs = NULL;
}
for (id = PIPE_SHADER_VERTEX; id <= last_shader; id++) {
if (sprog->ss[id]->sel->sinfo.num_ubos) {
const char *prefix = pipe_shader_to_prefix(id);
sprog->ubo_locs[id] = calloc(sprog->ss[id]->sel->sinfo.num_ubos, sizeof(uint32_t));
for (i = 0; i < sprog->ss[id]->sel->sinfo.num_ubos; i++) {
snprintf(name, 16, "%subo%d", prefix, i + 1);
sprog->ubo_locs[id][i] = glGetUniformBlockIndex(prog_id, name);
}
} else
sprog->ubo_locs[id] = NULL;
}
if (vs->sel->sinfo.num_ucp) {
for (i = 0; i < vs->sel->sinfo.num_ucp; i++) {
snprintf(name, 10, "clipp[%d]", i);
sprog->clip_locs[i] = glGetUniformLocation(prog_id, name);
}
}
return sprog;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,009
|
static bool check_iov_bounds(struct vrend_resource *res,
const struct vrend_transfer_info *info,
struct iovec *iov, int num_iovs)
{
GLuint send_size;
GLuint iovsize = vrend_get_iovec_size(iov, num_iovs);
GLuint valid_stride, valid_layer_stride;
/* validate the send size */
valid_stride = util_format_get_stride(res->base.format, info->box->width);
if (info->stride) {
/* only validate passed in stride for boxes with height */
if (info->box->height > 1) {
if (info->stride < valid_stride)
return false;
valid_stride = info->stride;
}
}
valid_layer_stride = util_format_get_2d_size(res->base.format, valid_stride,
info->box->height);
/* layer stride only makes sense for 3d,cube and arrays */
if (info->layer_stride) {
if ((res->base.target != PIPE_TEXTURE_3D &&
res->base.target != PIPE_TEXTURE_CUBE &&
res->base.target != PIPE_TEXTURE_1D_ARRAY &&
res->base.target != PIPE_TEXTURE_2D_ARRAY &&
res->base.target != PIPE_TEXTURE_CUBE_ARRAY))
return false;
/* only validate passed in layer_stride for boxes with depth */
if (info->box->depth > 1) {
if (info->layer_stride < valid_layer_stride)
return false;
valid_layer_stride = info->layer_stride;
}
}
send_size = valid_layer_stride * info->box->depth;
if (iovsize < info->offset)
return false;
if (iovsize < send_size)
return false;
if (iovsize < info->offset + send_size)
return false;
return true;
}
|
DoS
| 0
|
static bool check_iov_bounds(struct vrend_resource *res,
const struct vrend_transfer_info *info,
struct iovec *iov, int num_iovs)
{
GLuint send_size;
GLuint iovsize = vrend_get_iovec_size(iov, num_iovs);
GLuint valid_stride, valid_layer_stride;
/* validate the send size */
valid_stride = util_format_get_stride(res->base.format, info->box->width);
if (info->stride) {
/* only validate passed in stride for boxes with height */
if (info->box->height > 1) {
if (info->stride < valid_stride)
return false;
valid_stride = info->stride;
}
}
valid_layer_stride = util_format_get_2d_size(res->base.format, valid_stride,
info->box->height);
/* layer stride only makes sense for 3d,cube and arrays */
if (info->layer_stride) {
if ((res->base.target != PIPE_TEXTURE_3D &&
res->base.target != PIPE_TEXTURE_CUBE &&
res->base.target != PIPE_TEXTURE_1D_ARRAY &&
res->base.target != PIPE_TEXTURE_2D_ARRAY &&
res->base.target != PIPE_TEXTURE_CUBE_ARRAY))
return false;
/* only validate passed in layer_stride for boxes with depth */
if (info->box->depth > 1) {
if (info->layer_stride < valid_layer_stride)
return false;
valid_layer_stride = info->layer_stride;
}
}
send_size = valid_layer_stride * info->box->depth;
if (iovsize < info->offset)
return false;
if (iovsize < send_size)
return false;
if (iovsize < info->offset + send_size)
return false;
return true;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,010
|
static int check_resource_valid(struct vrend_renderer_resource_create_args *args)
{
/* do not accept handle 0 */
if (args->handle == 0)
return -1;
/* limit the target */
if (args->target >= PIPE_MAX_TEXTURE_TYPES)
return -1;
if (args->format >= VIRGL_FORMAT_MAX)
return -1;
/* only texture 2d and 2d array can have multiple samples */
if (args->nr_samples > 1) {
if (args->target != PIPE_TEXTURE_2D && args->target != PIPE_TEXTURE_2D_ARRAY)
return -1;
/* multisample can't have miplevels */
if (args->last_level > 0)
return -1;
}
if (args->last_level > 0) {
/* buffer and rect textures can't have mipmaps */
if (args->target == PIPE_BUFFER || args->target == PIPE_TEXTURE_RECT)
return -1;
if (args->last_level > (floor(log2(MAX2(args->width, args->height))) + 1))
return -1;
}
if (args->flags != 0 && args->flags != VIRGL_RESOURCE_Y_0_TOP)
return -1;
if (args->flags & VIRGL_RESOURCE_Y_0_TOP)
if (args->target != PIPE_TEXTURE_2D && args->target != PIPE_TEXTURE_RECT)
return -1;
/* array size for array textures only */
if (args->target == PIPE_TEXTURE_CUBE) {
if (args->array_size != 6)
return -1;
} else if (args->target == PIPE_TEXTURE_CUBE_ARRAY) {
if (args->array_size % 6)
return -1;
} else if (args->array_size > 1) {
if (args->target != PIPE_TEXTURE_2D_ARRAY &&
args->target != PIPE_TEXTURE_1D_ARRAY)
return -1;
}
if (args->bind == 0 ||
args->bind == VREND_RES_BIND_CUSTOM ||
args->bind == VREND_RES_BIND_INDEX_BUFFER ||
args->bind == VREND_RES_BIND_STREAM_OUTPUT ||
args->bind == VREND_RES_BIND_VERTEX_BUFFER ||
args->bind == VREND_RES_BIND_CONSTANT_BUFFER) {
if (args->target != PIPE_BUFFER)
return -1;
if (args->height != 1 || args->depth != 1)
return -1;
} else {
if (!((args->bind & VREND_RES_BIND_SAMPLER_VIEW) ||
(args->bind & VREND_RES_BIND_DEPTH_STENCIL) ||
(args->bind & VREND_RES_BIND_RENDER_TARGET) ||
(args->bind & VREND_RES_BIND_CURSOR)))
return -1;
if (args->target == PIPE_TEXTURE_2D ||
args->target == PIPE_TEXTURE_RECT ||
args->target == PIPE_TEXTURE_CUBE ||
args->target == PIPE_TEXTURE_2D_ARRAY ||
args->target == PIPE_TEXTURE_CUBE_ARRAY) {
if (args->depth != 1)
return -1;
}
if (args->target == PIPE_TEXTURE_1D ||
args->target == PIPE_TEXTURE_1D_ARRAY) {
if (args->height != 1 || args->depth != 1)
return -1;
}
}
return 0;
}
|
DoS
| 0
|
static int check_resource_valid(struct vrend_renderer_resource_create_args *args)
{
/* do not accept handle 0 */
if (args->handle == 0)
return -1;
/* limit the target */
if (args->target >= PIPE_MAX_TEXTURE_TYPES)
return -1;
if (args->format >= VIRGL_FORMAT_MAX)
return -1;
/* only texture 2d and 2d array can have multiple samples */
if (args->nr_samples > 1) {
if (args->target != PIPE_TEXTURE_2D && args->target != PIPE_TEXTURE_2D_ARRAY)
return -1;
/* multisample can't have miplevels */
if (args->last_level > 0)
return -1;
}
if (args->last_level > 0) {
/* buffer and rect textures can't have mipmaps */
if (args->target == PIPE_BUFFER || args->target == PIPE_TEXTURE_RECT)
return -1;
if (args->last_level > (floor(log2(MAX2(args->width, args->height))) + 1))
return -1;
}
if (args->flags != 0 && args->flags != VIRGL_RESOURCE_Y_0_TOP)
return -1;
if (args->flags & VIRGL_RESOURCE_Y_0_TOP)
if (args->target != PIPE_TEXTURE_2D && args->target != PIPE_TEXTURE_RECT)
return -1;
/* array size for array textures only */
if (args->target == PIPE_TEXTURE_CUBE) {
if (args->array_size != 6)
return -1;
} else if (args->target == PIPE_TEXTURE_CUBE_ARRAY) {
if (args->array_size % 6)
return -1;
} else if (args->array_size > 1) {
if (args->target != PIPE_TEXTURE_2D_ARRAY &&
args->target != PIPE_TEXTURE_1D_ARRAY)
return -1;
}
if (args->bind == 0 ||
args->bind == VREND_RES_BIND_CUSTOM ||
args->bind == VREND_RES_BIND_INDEX_BUFFER ||
args->bind == VREND_RES_BIND_STREAM_OUTPUT ||
args->bind == VREND_RES_BIND_VERTEX_BUFFER ||
args->bind == VREND_RES_BIND_CONSTANT_BUFFER) {
if (args->target != PIPE_BUFFER)
return -1;
if (args->height != 1 || args->depth != 1)
return -1;
} else {
if (!((args->bind & VREND_RES_BIND_SAMPLER_VIEW) ||
(args->bind & VREND_RES_BIND_DEPTH_STENCIL) ||
(args->bind & VREND_RES_BIND_RENDER_TARGET) ||
(args->bind & VREND_RES_BIND_CURSOR)))
return -1;
if (args->target == PIPE_TEXTURE_2D ||
args->target == PIPE_TEXTURE_RECT ||
args->target == PIPE_TEXTURE_CUBE ||
args->target == PIPE_TEXTURE_2D_ARRAY ||
args->target == PIPE_TEXTURE_CUBE_ARRAY) {
if (args->depth != 1)
return -1;
}
if (args->target == PIPE_TEXTURE_1D ||
args->target == PIPE_TEXTURE_1D_ARRAY) {
if (args->height != 1 || args->depth != 1)
return -1;
}
}
return 0;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,011
|
static inline int conv_dst_blend(int blend_factor)
{
if (blend_factor == PIPE_BLENDFACTOR_DST_ALPHA)
return PIPE_BLENDFACTOR_ONE;
if (blend_factor == PIPE_BLENDFACTOR_INV_DST_ALPHA)
return PIPE_BLENDFACTOR_ZERO;
return blend_factor;
}
|
DoS
| 0
|
static inline int conv_dst_blend(int blend_factor)
{
if (blend_factor == PIPE_BLENDFACTOR_DST_ALPHA)
return PIPE_BLENDFACTOR_ONE;
if (blend_factor == PIPE_BLENDFACTOR_INV_DST_ALPHA)
return PIPE_BLENDFACTOR_ZERO;
return blend_factor;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,012
|
static inline int conv_shader_type(int type)
{
switch (type) {
case PIPE_SHADER_VERTEX: return GL_VERTEX_SHADER;
case PIPE_SHADER_FRAGMENT: return GL_FRAGMENT_SHADER;
case PIPE_SHADER_GEOMETRY: return GL_GEOMETRY_SHADER;
default:
return 0;
};
}
|
DoS
| 0
|
static inline int conv_shader_type(int type)
{
switch (type) {
case PIPE_SHADER_VERTEX: return GL_VERTEX_SHADER;
case PIPE_SHADER_FRAGMENT: return GL_FRAGMENT_SHADER;
case PIPE_SHADER_GEOMETRY: return GL_GEOMETRY_SHADER;
default:
return 0;
};
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,013
|
static inline GLenum convert_mag_filter(unsigned int filter)
{
if (filter == PIPE_TEX_FILTER_NEAREST)
return GL_NEAREST;
return GL_LINEAR;
}
|
DoS
| 0
|
static inline GLenum convert_mag_filter(unsigned int filter)
{
if (filter == PIPE_TEX_FILTER_NEAREST)
return GL_NEAREST;
return GL_LINEAR;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,014
|
static inline GLenum convert_min_filter(unsigned int filter, unsigned int mip_filter)
{
if (mip_filter == PIPE_TEX_MIPFILTER_NONE)
return convert_mag_filter(filter);
else if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
if (filter == PIPE_TEX_FILTER_NEAREST)
return GL_NEAREST_MIPMAP_LINEAR;
else
return GL_LINEAR_MIPMAP_LINEAR;
} else if (mip_filter == PIPE_TEX_MIPFILTER_NEAREST) {
if (filter == PIPE_TEX_FILTER_NEAREST)
return GL_NEAREST_MIPMAP_NEAREST;
else
return GL_LINEAR_MIPMAP_NEAREST;
}
assert(0);
return 0;
}
|
DoS
| 0
|
static inline GLenum convert_min_filter(unsigned int filter, unsigned int mip_filter)
{
if (mip_filter == PIPE_TEX_MIPFILTER_NONE)
return convert_mag_filter(filter);
else if (mip_filter == PIPE_TEX_MIPFILTER_LINEAR) {
if (filter == PIPE_TEX_FILTER_NEAREST)
return GL_NEAREST_MIPMAP_LINEAR;
else
return GL_LINEAR_MIPMAP_LINEAR;
} else if (mip_filter == PIPE_TEX_MIPFILTER_NEAREST) {
if (filter == PIPE_TEX_FILTER_NEAREST)
return GL_NEAREST_MIPMAP_NEAREST;
else
return GL_LINEAR_MIPMAP_NEAREST;
}
assert(0);
return 0;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,015
|
static GLuint convert_wrap(int wrap)
{
switch(wrap){
case PIPE_TEX_WRAP_REPEAT: return GL_REPEAT;
case PIPE_TEX_WRAP_CLAMP: if (vrend_state.use_core_profile == false) return GL_CLAMP; else return GL_CLAMP_TO_EDGE;
case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return GL_CLAMP_TO_EDGE;
case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return GL_CLAMP_TO_BORDER;
case PIPE_TEX_WRAP_MIRROR_REPEAT: return GL_MIRRORED_REPEAT;
case PIPE_TEX_WRAP_MIRROR_CLAMP: return GL_MIRROR_CLAMP_EXT;
case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return GL_MIRROR_CLAMP_TO_EDGE_EXT;
case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return GL_MIRROR_CLAMP_TO_BORDER_EXT;
default:
assert(0);
return -1;
}
}
|
DoS
| 0
|
static GLuint convert_wrap(int wrap)
{
switch(wrap){
case PIPE_TEX_WRAP_REPEAT: return GL_REPEAT;
case PIPE_TEX_WRAP_CLAMP: if (vrend_state.use_core_profile == false) return GL_CLAMP; else return GL_CLAMP_TO_EDGE;
case PIPE_TEX_WRAP_CLAMP_TO_EDGE: return GL_CLAMP_TO_EDGE;
case PIPE_TEX_WRAP_CLAMP_TO_BORDER: return GL_CLAMP_TO_BORDER;
case PIPE_TEX_WRAP_MIRROR_REPEAT: return GL_MIRRORED_REPEAT;
case PIPE_TEX_WRAP_MIRROR_CLAMP: return GL_MIRROR_CLAMP_EXT;
case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_EDGE: return GL_MIRROR_CLAMP_TO_EDGE_EXT;
case PIPE_TEX_WRAP_MIRROR_CLAMP_TO_BORDER: return GL_MIRROR_CLAMP_TO_BORDER_EXT;
default:
assert(0);
return -1;
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,016
|
static void flush_eventfd(int fd)
{
ssize_t len;
uint64_t value;
do {
len = read(fd, &value, sizeof(value));
} while ((len == -1 && errno == EINTR) || len == sizeof(value));
}
|
DoS
| 0
|
static void flush_eventfd(int fd)
{
ssize_t len;
uint64_t value;
do {
len = read(fd, &value, sizeof(value));
} while ((len == -1 && errno == EINTR) || len == sizeof(value));
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,017
|
static void free_fence_locked(struct vrend_fence *fence)
{
list_del(&fence->fences);
glDeleteSync(fence->syncobj);
free(fence);
}
|
DoS
| 0
|
static void free_fence_locked(struct vrend_fence *fence)
{
list_del(&fence->fences);
glDeleteSync(fence->syncobj);
free(fence);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,018
|
static GLenum get_gs_xfb_mode(GLenum mode)
{
switch (mode) {
case GL_POINTS:
return GL_POINTS;
case GL_LINE_STRIP:
return GL_LINES;
case GL_TRIANGLE_STRIP:
return GL_TRIANGLES;
default:
fprintf(stderr, "illegal gs transform feedback mode %d\n", mode);
return GL_POINTS;
}
}
|
DoS
| 0
|
static GLenum get_gs_xfb_mode(GLenum mode)
{
switch (mode) {
case GL_POINTS:
return GL_POINTS;
case GL_LINE_STRIP:
return GL_LINES;
case GL_TRIANGLE_STRIP:
return GL_TRIANGLES;
default:
fprintf(stderr, "illegal gs transform feedback mode %d\n", mode);
return GL_POINTS;
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,019
|
static GLenum get_xfb_mode(GLenum mode)
{
switch (mode) {
case GL_POINTS:
return GL_POINTS;
case GL_TRIANGLES:
case GL_TRIANGLE_STRIP:
case GL_TRIANGLE_FAN:
case GL_QUADS:
case GL_QUAD_STRIP:
case GL_POLYGON:
return GL_TRIANGLES;
case GL_LINES:
case GL_LINE_LOOP:
case GL_LINE_STRIP:
return GL_LINES;
default:
fprintf(stderr, "failed to translate TFB %d\n", mode);
return GL_POINTS;
}
}
|
DoS
| 0
|
static GLenum get_xfb_mode(GLenum mode)
{
switch (mode) {
case GL_POINTS:
return GL_POINTS;
case GL_TRIANGLES:
case GL_TRIANGLE_STRIP:
case GL_TRIANGLE_FAN:
case GL_QUADS:
case GL_QUAD_STRIP:
case GL_POLYGON:
return GL_TRIANGLES;
case GL_LINES:
case GL_LINE_LOOP:
case GL_LINE_STRIP:
return GL_LINES;
default:
fprintf(stderr, "failed to translate TFB %d\n", mode);
return GL_POINTS;
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,020
|
static void iov_buffer_upload(void *cookie, uint32_t doff, void *src, int len)
{
struct virgl_sub_upload_data *d = cookie;
glBufferSubData(d->target, d->box->x + doff, len, src);
}
|
DoS
| 0
|
static void iov_buffer_upload(void *cookie, uint32_t doff, void *src, int len)
{
struct virgl_sub_upload_data *d = cookie;
glBufferSubData(d->target, d->box->x + doff, len, src);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,021
|
static inline bool is_dst_blend(int blend_factor)
{
return (blend_factor == PIPE_BLENDFACTOR_DST_ALPHA ||
blend_factor == PIPE_BLENDFACTOR_INV_DST_ALPHA);
}
|
DoS
| 0
|
static inline bool is_dst_blend(int blend_factor)
{
return (blend_factor == PIPE_BLENDFACTOR_DST_ALPHA ||
blend_factor == PIPE_BLENDFACTOR_INV_DST_ALPHA);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,022
|
static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_context *ctx,
GLuint vs_id, GLuint fs_id, GLuint gs_id, bool dual_src)
{
struct vrend_linked_shader_program *ent;
LIST_FOR_EACH_ENTRY(ent, &ctx->sub->programs, head) {
if (ent->dual_src_linked != dual_src)
continue;
if (ent->ss[PIPE_SHADER_VERTEX]->id == vs_id && ent->ss[PIPE_SHADER_FRAGMENT]->id == fs_id) {
if (!ent->ss[PIPE_SHADER_GEOMETRY] && gs_id == 0)
return ent;
if (ent->ss[PIPE_SHADER_GEOMETRY] && ent->ss[PIPE_SHADER_GEOMETRY]->id == gs_id)
return ent;
}
}
return NULL;
}
|
DoS
| 0
|
static struct vrend_linked_shader_program *lookup_shader_program(struct vrend_context *ctx,
GLuint vs_id, GLuint fs_id, GLuint gs_id, bool dual_src)
{
struct vrend_linked_shader_program *ent;
LIST_FOR_EACH_ENTRY(ent, &ctx->sub->programs, head) {
if (ent->dual_src_linked != dual_src)
continue;
if (ent->ss[PIPE_SHADER_VERTEX]->id == vs_id && ent->ss[PIPE_SHADER_FRAGMENT]->id == fs_id) {
if (!ent->ss[PIPE_SHADER_GEOMETRY] && gs_id == 0)
return ent;
if (ent->ss[PIPE_SHADER_GEOMETRY] && ent->ss[PIPE_SHADER_GEOMETRY]->id == gs_id)
return ent;
}
}
return NULL;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,023
|
static inline const char *pipe_shader_to_prefix(int shader_type)
{
switch (shader_type) {
case PIPE_SHADER_VERTEX: return "vs";
case PIPE_SHADER_FRAGMENT: return "fs";
case PIPE_SHADER_GEOMETRY: return "gs";
default:
return NULL;
};
}
|
DoS
| 0
|
static inline const char *pipe_shader_to_prefix(int shader_type)
{
switch (shader_type) {
case PIPE_SHADER_VERTEX: return "vs";
case PIPE_SHADER_FRAGMENT: return "fs";
case PIPE_SHADER_GEOMETRY: return "gs";
default:
return NULL;
};
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,024
|
static void read_transfer_data(struct pipe_resource *res,
struct iovec *iov,
unsigned int num_iovs,
char *data,
uint32_t src_stride,
struct pipe_box *box,
uint64_t offset, bool invert)
{
int blsize = util_format_get_blocksize(res->format);
uint32_t size = vrend_get_iovec_size(iov, num_iovs);
uint32_t send_size = util_format_get_nblocks(res->format, box->width,
box->height) * blsize * box->depth;
uint32_t bwx = util_format_get_nblocksx(res->format, box->width) * blsize;
uint32_t bh = util_format_get_nblocksy(res->format, box->height);
int h;
uint32_t myoffset = offset;
if ((send_size == size || bh == 1) && !invert)
vrend_read_from_iovec(iov, num_iovs, offset, data, send_size);
else {
if (invert) {
for (h = bh - 1; h >= 0; h--) {
void *ptr = data + (h * bwx);
vrend_read_from_iovec(iov, num_iovs, myoffset, ptr, bwx);
myoffset += src_stride;
}
} else {
for (h = 0; h < bh; h++) {
void *ptr = data + (h * bwx);
vrend_read_from_iovec(iov, num_iovs, myoffset, ptr, bwx);
myoffset += src_stride;
}
}
}
}
|
DoS
| 0
|
static void read_transfer_data(struct pipe_resource *res,
struct iovec *iov,
unsigned int num_iovs,
char *data,
uint32_t src_stride,
struct pipe_box *box,
uint64_t offset, bool invert)
{
int blsize = util_format_get_blocksize(res->format);
uint32_t size = vrend_get_iovec_size(iov, num_iovs);
uint32_t send_size = util_format_get_nblocks(res->format, box->width,
box->height) * blsize * box->depth;
uint32_t bwx = util_format_get_nblocksx(res->format, box->width) * blsize;
uint32_t bh = util_format_get_nblocksy(res->format, box->height);
int h;
uint32_t myoffset = offset;
if ((send_size == size || bh == 1) && !invert)
vrend_read_from_iovec(iov, num_iovs, offset, data, send_size);
else {
if (invert) {
for (h = bh - 1; h >= 0; h--) {
void *ptr = data + (h * bwx);
vrend_read_from_iovec(iov, num_iovs, myoffset, ptr, bwx);
myoffset += src_stride;
}
} else {
for (h = 0; h < bh; h++) {
void *ptr = data + (h * bwx);
vrend_read_from_iovec(iov, num_iovs, myoffset, ptr, bwx);
myoffset += src_stride;
}
}
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,025
|
static void set_stream_out_varyings(int prog_id, struct vrend_shader_info *sinfo)
{
struct pipe_stream_output_info *so = &sinfo->so_info;
char *varyings[PIPE_MAX_SHADER_OUTPUTS*2];
int i, j;
int n_outputs = 0;
int last_buffer = 0;
char *start_skip;
int buf_offset = 0;
int skip;
if (!so->num_outputs)
return;
if (vrend_dump_shaders)
dump_stream_out(so);
for (i = 0; i < so->num_outputs; i++) {
if (last_buffer != so->output[i].output_buffer) {
skip = so->stride[last_buffer] - buf_offset;
while (skip) {
start_skip = get_skip_str(&skip);
if (start_skip)
varyings[n_outputs++] = start_skip;
}
for (j = last_buffer; j < so->output[i].output_buffer; j++)
varyings[n_outputs++] = strdup("gl_NextBuffer");
last_buffer = so->output[i].output_buffer;
buf_offset = 0;
}
skip = so->output[i].dst_offset - buf_offset;
while (skip) {
start_skip = get_skip_str(&skip);
if (start_skip)
varyings[n_outputs++] = start_skip;
}
buf_offset = so->output[i].dst_offset;
buf_offset += so->output[i].num_components;
if (sinfo->so_names[i])
varyings[n_outputs++] = strdup(sinfo->so_names[i]);
}
skip = so->stride[last_buffer] - buf_offset;
while (skip) {
start_skip = get_skip_str(&skip);
if (start_skip)
varyings[n_outputs++] = start_skip;
}
glTransformFeedbackVaryings(prog_id, n_outputs,
(const GLchar **)varyings, GL_INTERLEAVED_ATTRIBS_EXT);
for (i = 0; i < n_outputs; i++)
if (varyings[i])
free(varyings[i]);
}
|
DoS
| 0
|
static void set_stream_out_varyings(int prog_id, struct vrend_shader_info *sinfo)
{
struct pipe_stream_output_info *so = &sinfo->so_info;
char *varyings[PIPE_MAX_SHADER_OUTPUTS*2];
int i, j;
int n_outputs = 0;
int last_buffer = 0;
char *start_skip;
int buf_offset = 0;
int skip;
if (!so->num_outputs)
return;
if (vrend_dump_shaders)
dump_stream_out(so);
for (i = 0; i < so->num_outputs; i++) {
if (last_buffer != so->output[i].output_buffer) {
skip = so->stride[last_buffer] - buf_offset;
while (skip) {
start_skip = get_skip_str(&skip);
if (start_skip)
varyings[n_outputs++] = start_skip;
}
for (j = last_buffer; j < so->output[i].output_buffer; j++)
varyings[n_outputs++] = strdup("gl_NextBuffer");
last_buffer = so->output[i].output_buffer;
buf_offset = 0;
}
skip = so->output[i].dst_offset - buf_offset;
while (skip) {
start_skip = get_skip_str(&skip);
if (start_skip)
varyings[n_outputs++] = start_skip;
}
buf_offset = so->output[i].dst_offset;
buf_offset += so->output[i].num_components;
if (sinfo->so_names[i])
varyings[n_outputs++] = strdup(sinfo->so_names[i]);
}
skip = so->stride[last_buffer] - buf_offset;
while (skip) {
start_skip = get_skip_str(&skip);
if (start_skip)
varyings[n_outputs++] = start_skip;
}
glTransformFeedbackVaryings(prog_id, n_outputs,
(const GLchar **)varyings, GL_INTERLEAVED_ATTRIBS_EXT);
for (i = 0; i < n_outputs; i++)
if (varyings[i])
free(varyings[i]);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,026
|
static inline bool should_invert_viewport(struct vrend_context *ctx)
{
/* if we have a negative viewport then gallium wanted to invert it,
however since we are rendering to GL FBOs we need to invert it
again unless we are rendering upside down already
- confused?
so if gallium asks for a negative viewport */
return !(ctx->sub->viewport_is_negative ^ ctx->sub->inverted_fbo_content);
}
|
DoS
| 0
|
static inline bool should_invert_viewport(struct vrend_context *ctx)
{
/* if we have a negative viewport then gallium wanted to invert it,
however since we are rendering to GL FBOs we need to invert it
again unless we are rendering upside down already
- confused?
so if gallium asks for a negative viewport */
return !(ctx->sub->viewport_is_negative ^ ctx->sub->inverted_fbo_content);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,027
|
static int thread_sync(void *arg)
{
virgl_gl_context gl_context = vrend_state.sync_context;
struct vrend_fence *fence, *stor;
pipe_mutex_lock(vrend_state.fence_mutex);
vrend_clicbs->make_current(0, gl_context);
while (!vrend_state.stop_sync_thread) {
if (LIST_IS_EMPTY(&vrend_state.fence_wait_list) &&
pipe_condvar_wait(vrend_state.fence_cond, vrend_state.fence_mutex) != 0) {
fprintf(stderr, "error while waiting on condition\n");
break;
}
LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_wait_list, fences) {
if (vrend_state.stop_sync_thread)
break;
list_del(&fence->fences);
pipe_mutex_unlock(vrend_state.fence_mutex);
wait_sync(fence);
pipe_mutex_lock(vrend_state.fence_mutex);
}
}
vrend_clicbs->make_current(0, 0);
vrend_clicbs->destroy_gl_context(vrend_state.sync_context);
pipe_mutex_unlock(vrend_state.fence_mutex);
return 0;
}
|
DoS
| 0
|
static int thread_sync(void *arg)
{
virgl_gl_context gl_context = vrend_state.sync_context;
struct vrend_fence *fence, *stor;
pipe_mutex_lock(vrend_state.fence_mutex);
vrend_clicbs->make_current(0, gl_context);
while (!vrend_state.stop_sync_thread) {
if (LIST_IS_EMPTY(&vrend_state.fence_wait_list) &&
pipe_condvar_wait(vrend_state.fence_cond, vrend_state.fence_mutex) != 0) {
fprintf(stderr, "error while waiting on condition\n");
break;
}
LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_wait_list, fences) {
if (vrend_state.stop_sync_thread)
break;
list_del(&fence->fences);
pipe_mutex_unlock(vrend_state.fence_mutex);
wait_sync(fence);
pipe_mutex_lock(vrend_state.fence_mutex);
}
}
vrend_clicbs->make_current(0, 0);
vrend_clicbs->destroy_gl_context(vrend_state.sync_context);
pipe_mutex_unlock(vrend_state.fence_mutex);
return 0;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,028
|
static inline GLenum to_gl_swizzle(int swizzle)
{
switch (swizzle) {
case PIPE_SWIZZLE_RED: return GL_RED;
case PIPE_SWIZZLE_GREEN: return GL_GREEN;
case PIPE_SWIZZLE_BLUE: return GL_BLUE;
case PIPE_SWIZZLE_ALPHA: return GL_ALPHA;
case PIPE_SWIZZLE_ZERO: return GL_ZERO;
case PIPE_SWIZZLE_ONE: return GL_ONE;
default:
assert(0);
return 0;
}
}
|
DoS
| 0
|
static inline GLenum to_gl_swizzle(int swizzle)
{
switch (swizzle) {
case PIPE_SWIZZLE_RED: return GL_RED;
case PIPE_SWIZZLE_GREEN: return GL_GREEN;
case PIPE_SWIZZLE_BLUE: return GL_BLUE;
case PIPE_SWIZZLE_ALPHA: return GL_ALPHA;
case PIPE_SWIZZLE_ZERO: return GL_ZERO;
case PIPE_SWIZZLE_ONE: return GL_ONE;
default:
assert(0);
return 0;
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,029
|
static GLenum translate_blend_factor(uint32_t pipe_factor)
{
switch (pipe_factor) {
case PIPE_BLENDFACTOR_ONE: return GL_ONE;
case PIPE_BLENDFACTOR_SRC_COLOR: return GL_SRC_COLOR;
case PIPE_BLENDFACTOR_SRC_ALPHA: return GL_SRC_ALPHA;
case PIPE_BLENDFACTOR_DST_COLOR: return GL_DST_COLOR;
case PIPE_BLENDFACTOR_DST_ALPHA: return GL_DST_ALPHA;
case PIPE_BLENDFACTOR_CONST_COLOR: return GL_CONSTANT_COLOR;
case PIPE_BLENDFACTOR_CONST_ALPHA: return GL_CONSTANT_ALPHA;
case PIPE_BLENDFACTOR_SRC1_COLOR: return GL_SRC1_COLOR;
case PIPE_BLENDFACTOR_SRC1_ALPHA: return GL_SRC1_ALPHA;
case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE: return GL_SRC_ALPHA_SATURATE;
case PIPE_BLENDFACTOR_ZERO: return GL_ZERO;
case PIPE_BLENDFACTOR_INV_SRC_COLOR: return GL_ONE_MINUS_SRC_COLOR;
case PIPE_BLENDFACTOR_INV_SRC_ALPHA: return GL_ONE_MINUS_SRC_ALPHA;
case PIPE_BLENDFACTOR_INV_DST_COLOR: return GL_ONE_MINUS_DST_COLOR;
case PIPE_BLENDFACTOR_INV_DST_ALPHA: return GL_ONE_MINUS_DST_ALPHA;
case PIPE_BLENDFACTOR_INV_CONST_COLOR: return GL_ONE_MINUS_CONSTANT_COLOR;
case PIPE_BLENDFACTOR_INV_CONST_ALPHA: return GL_ONE_MINUS_CONSTANT_ALPHA;
case PIPE_BLENDFACTOR_INV_SRC1_COLOR: return GL_ONE_MINUS_SRC1_COLOR;
case PIPE_BLENDFACTOR_INV_SRC1_ALPHA: return GL_ONE_MINUS_SRC1_ALPHA;
default:
assert("invalid blend token()" == NULL);
return 0;
}
}
|
DoS
| 0
|
static GLenum translate_blend_factor(uint32_t pipe_factor)
{
switch (pipe_factor) {
case PIPE_BLENDFACTOR_ONE: return GL_ONE;
case PIPE_BLENDFACTOR_SRC_COLOR: return GL_SRC_COLOR;
case PIPE_BLENDFACTOR_SRC_ALPHA: return GL_SRC_ALPHA;
case PIPE_BLENDFACTOR_DST_COLOR: return GL_DST_COLOR;
case PIPE_BLENDFACTOR_DST_ALPHA: return GL_DST_ALPHA;
case PIPE_BLENDFACTOR_CONST_COLOR: return GL_CONSTANT_COLOR;
case PIPE_BLENDFACTOR_CONST_ALPHA: return GL_CONSTANT_ALPHA;
case PIPE_BLENDFACTOR_SRC1_COLOR: return GL_SRC1_COLOR;
case PIPE_BLENDFACTOR_SRC1_ALPHA: return GL_SRC1_ALPHA;
case PIPE_BLENDFACTOR_SRC_ALPHA_SATURATE: return GL_SRC_ALPHA_SATURATE;
case PIPE_BLENDFACTOR_ZERO: return GL_ZERO;
case PIPE_BLENDFACTOR_INV_SRC_COLOR: return GL_ONE_MINUS_SRC_COLOR;
case PIPE_BLENDFACTOR_INV_SRC_ALPHA: return GL_ONE_MINUS_SRC_ALPHA;
case PIPE_BLENDFACTOR_INV_DST_COLOR: return GL_ONE_MINUS_DST_COLOR;
case PIPE_BLENDFACTOR_INV_DST_ALPHA: return GL_ONE_MINUS_DST_ALPHA;
case PIPE_BLENDFACTOR_INV_CONST_COLOR: return GL_ONE_MINUS_CONSTANT_COLOR;
case PIPE_BLENDFACTOR_INV_CONST_ALPHA: return GL_ONE_MINUS_CONSTANT_ALPHA;
case PIPE_BLENDFACTOR_INV_SRC1_COLOR: return GL_ONE_MINUS_SRC1_COLOR;
case PIPE_BLENDFACTOR_INV_SRC1_ALPHA: return GL_ONE_MINUS_SRC1_ALPHA;
default:
assert("invalid blend token()" == NULL);
return 0;
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,030
|
static GLenum translate_blend_func(uint32_t pipe_blend)
{
switch(pipe_blend){
case PIPE_BLEND_ADD: return GL_FUNC_ADD;
case PIPE_BLEND_SUBTRACT: return GL_FUNC_SUBTRACT;
case PIPE_BLEND_REVERSE_SUBTRACT: return GL_FUNC_REVERSE_SUBTRACT;
case PIPE_BLEND_MIN: return GL_MIN;
case PIPE_BLEND_MAX: return GL_MAX;
default:
assert("invalid blend token()" == NULL);
return 0;
}
}
|
DoS
| 0
|
static GLenum translate_blend_func(uint32_t pipe_blend)
{
switch(pipe_blend){
case PIPE_BLEND_ADD: return GL_FUNC_ADD;
case PIPE_BLEND_SUBTRACT: return GL_FUNC_SUBTRACT;
case PIPE_BLEND_REVERSE_SUBTRACT: return GL_FUNC_REVERSE_SUBTRACT;
case PIPE_BLEND_MIN: return GL_MIN;
case PIPE_BLEND_MAX: return GL_MAX;
default:
assert("invalid blend token()" == NULL);
return 0;
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,031
|
translate_logicop(GLuint pipe_logicop)
{
switch (pipe_logicop) {
#define CASE(x) case PIPE_LOGICOP_##x: return GL_##x
CASE(CLEAR);
CASE(NOR);
CASE(AND_INVERTED);
CASE(COPY_INVERTED);
CASE(AND_REVERSE);
CASE(INVERT);
CASE(XOR);
CASE(NAND);
CASE(AND);
CASE(EQUIV);
CASE(NOOP);
CASE(OR_INVERTED);
CASE(COPY);
CASE(OR_REVERSE);
CASE(OR);
CASE(SET);
default:
assert("invalid logicop token()" == NULL);
return 0;
}
#undef CASE
}
|
DoS
| 0
|
translate_logicop(GLuint pipe_logicop)
{
switch (pipe_logicop) {
#define CASE(x) case PIPE_LOGICOP_##x: return GL_##x
CASE(CLEAR);
CASE(NOR);
CASE(AND_INVERTED);
CASE(COPY_INVERTED);
CASE(AND_REVERSE);
CASE(INVERT);
CASE(XOR);
CASE(NAND);
CASE(AND);
CASE(EQUIV);
CASE(NOOP);
CASE(OR_INVERTED);
CASE(COPY);
CASE(OR_REVERSE);
CASE(OR);
CASE(SET);
default:
assert("invalid logicop token()" == NULL);
return 0;
}
#undef CASE
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,032
|
translate_stencil_op(GLuint op)
{
switch (op) {
#define CASE(x) case PIPE_STENCIL_OP_##x: return GL_##x
CASE(KEEP);
CASE(ZERO);
CASE(REPLACE);
CASE(INCR);
CASE(DECR);
CASE(INCR_WRAP);
CASE(DECR_WRAP);
CASE(INVERT);
default:
assert("invalid stencilop token()" == NULL);
return 0;
}
#undef CASE
}
|
DoS
| 0
|
translate_stencil_op(GLuint op)
{
switch (op) {
#define CASE(x) case PIPE_STENCIL_OP_##x: return GL_##x
CASE(KEEP);
CASE(ZERO);
CASE(REPLACE);
CASE(INCR);
CASE(DECR);
CASE(INCR_WRAP);
CASE(DECR_WRAP);
CASE(INVERT);
default:
assert("invalid stencilop token()" == NULL);
return 0;
}
#undef CASE
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,033
|
static void vrend_alpha_test_enable(struct vrend_context *ctx, bool alpha_test_enable)
{
if (vrend_state.use_core_profile) {
/* handled in shaders */
return;
}
if (ctx->sub->alpha_test_enabled != alpha_test_enable) {
ctx->sub->alpha_test_enabled = alpha_test_enable;
if (alpha_test_enable)
glEnable(GL_ALPHA_TEST);
else
glDisable(GL_ALPHA_TEST);
}
}
|
DoS
| 0
|
static void vrend_alpha_test_enable(struct vrend_context *ctx, bool alpha_test_enable)
{
if (vrend_state.use_core_profile) {
/* handled in shaders */
return;
}
if (ctx->sub->alpha_test_enabled != alpha_test_enable) {
ctx->sub->alpha_test_enabled = alpha_test_enable;
if (alpha_test_enable)
glEnable(GL_ALPHA_TEST);
else
glDisable(GL_ALPHA_TEST);
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,034
|
static void vrend_apply_sampler_state(struct vrend_context *ctx,
struct vrend_resource *res,
uint32_t shader_type,
int id,
int sampler_id,
uint32_t srgb_decode)
{
struct vrend_texture *tex = (struct vrend_texture *)res;
struct vrend_sampler_state *vstate = ctx->sub->sampler_state[shader_type][id];
struct pipe_sampler_state *state = &vstate->base;
bool set_all = false;
GLenum target = tex->base.target;
if (!state) {
fprintf(stderr, "cannot find sampler state for %d %d\n", shader_type, id);
return;
}
if (res->base.nr_samples > 1) {
tex->state = *state;
return;
}
if (target == GL_TEXTURE_BUFFER) {
tex->state = *state;
return;
}
if (vrend_state.have_samplers) {
glBindSampler(sampler_id, vstate->id);
glSamplerParameteri(vstate->id, GL_TEXTURE_SRGB_DECODE_EXT,
srgb_decode);
return;
}
if (tex->state.max_lod == -1)
set_all = true;
if (tex->state.wrap_s != state->wrap_s || set_all)
glTexParameteri(target, GL_TEXTURE_WRAP_S, convert_wrap(state->wrap_s));
if (tex->state.wrap_t != state->wrap_t || set_all)
glTexParameteri(target, GL_TEXTURE_WRAP_T, convert_wrap(state->wrap_t));
if (tex->state.wrap_r != state->wrap_r || set_all)
glTexParameteri(target, GL_TEXTURE_WRAP_R, convert_wrap(state->wrap_r));
if (tex->state.min_img_filter != state->min_img_filter ||
tex->state.min_mip_filter != state->min_mip_filter || set_all)
glTexParameterf(target, GL_TEXTURE_MIN_FILTER, convert_min_filter(state->min_img_filter, state->min_mip_filter));
if (tex->state.mag_img_filter != state->mag_img_filter || set_all)
glTexParameterf(target, GL_TEXTURE_MAG_FILTER, convert_mag_filter(state->mag_img_filter));
if (res->target != GL_TEXTURE_RECTANGLE) {
if (tex->state.min_lod != state->min_lod || set_all)
glTexParameterf(target, GL_TEXTURE_MIN_LOD, state->min_lod);
if (tex->state.max_lod != state->max_lod || set_all)
glTexParameterf(target, GL_TEXTURE_MAX_LOD, state->max_lod);
if (tex->state.lod_bias != state->lod_bias || set_all)
glTexParameterf(target, GL_TEXTURE_LOD_BIAS, state->lod_bias);
}
if (tex->state.compare_mode != state->compare_mode || set_all)
glTexParameteri(target, GL_TEXTURE_COMPARE_MODE, state->compare_mode ? GL_COMPARE_R_TO_TEXTURE : GL_NONE);
if (tex->state.compare_func != state->compare_func || set_all)
glTexParameteri(target, GL_TEXTURE_COMPARE_FUNC, GL_NEVER + state->compare_func);
if (memcmp(&tex->state.border_color, &state->border_color, 16) || set_all)
glTexParameterIuiv(target, GL_TEXTURE_BORDER_COLOR, state->border_color.ui);
tex->state = *state;
}
|
DoS
| 0
|
static void vrend_apply_sampler_state(struct vrend_context *ctx,
struct vrend_resource *res,
uint32_t shader_type,
int id,
int sampler_id,
uint32_t srgb_decode)
{
struct vrend_texture *tex = (struct vrend_texture *)res;
struct vrend_sampler_state *vstate = ctx->sub->sampler_state[shader_type][id];
struct pipe_sampler_state *state = &vstate->base;
bool set_all = false;
GLenum target = tex->base.target;
if (!state) {
fprintf(stderr, "cannot find sampler state for %d %d\n", shader_type, id);
return;
}
if (res->base.nr_samples > 1) {
tex->state = *state;
return;
}
if (target == GL_TEXTURE_BUFFER) {
tex->state = *state;
return;
}
if (vrend_state.have_samplers) {
glBindSampler(sampler_id, vstate->id);
glSamplerParameteri(vstate->id, GL_TEXTURE_SRGB_DECODE_EXT,
srgb_decode);
return;
}
if (tex->state.max_lod == -1)
set_all = true;
if (tex->state.wrap_s != state->wrap_s || set_all)
glTexParameteri(target, GL_TEXTURE_WRAP_S, convert_wrap(state->wrap_s));
if (tex->state.wrap_t != state->wrap_t || set_all)
glTexParameteri(target, GL_TEXTURE_WRAP_T, convert_wrap(state->wrap_t));
if (tex->state.wrap_r != state->wrap_r || set_all)
glTexParameteri(target, GL_TEXTURE_WRAP_R, convert_wrap(state->wrap_r));
if (tex->state.min_img_filter != state->min_img_filter ||
tex->state.min_mip_filter != state->min_mip_filter || set_all)
glTexParameterf(target, GL_TEXTURE_MIN_FILTER, convert_min_filter(state->min_img_filter, state->min_mip_filter));
if (tex->state.mag_img_filter != state->mag_img_filter || set_all)
glTexParameterf(target, GL_TEXTURE_MAG_FILTER, convert_mag_filter(state->mag_img_filter));
if (res->target != GL_TEXTURE_RECTANGLE) {
if (tex->state.min_lod != state->min_lod || set_all)
glTexParameterf(target, GL_TEXTURE_MIN_LOD, state->min_lod);
if (tex->state.max_lod != state->max_lod || set_all)
glTexParameterf(target, GL_TEXTURE_MAX_LOD, state->max_lod);
if (tex->state.lod_bias != state->lod_bias || set_all)
glTexParameterf(target, GL_TEXTURE_LOD_BIAS, state->lod_bias);
}
if (tex->state.compare_mode != state->compare_mode || set_all)
glTexParameteri(target, GL_TEXTURE_COMPARE_MODE, state->compare_mode ? GL_COMPARE_R_TO_TEXTURE : GL_NONE);
if (tex->state.compare_func != state->compare_func || set_all)
glTexParameteri(target, GL_TEXTURE_COMPARE_FUNC, GL_NEVER + state->compare_func);
if (memcmp(&tex->state.border_color, &state->border_color, 16) || set_all)
glTexParameterIuiv(target, GL_TEXTURE_BORDER_COLOR, state->border_color.ui);
tex->state = *state;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,035
|
void vrend_begin_query(struct vrend_context *ctx, uint32_t handle)
{
struct vrend_query *q;
q = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_QUERY);
if (!q)
return;
if (q->gltype == GL_TIMESTAMP)
return;
glBeginQuery(q->gltype, q->id);
}
|
DoS
| 0
|
void vrend_begin_query(struct vrend_context *ctx, uint32_t handle)
{
struct vrend_query *q;
q = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_QUERY);
if (!q)
return;
if (q->gltype == GL_TIMESTAMP)
return;
glBeginQuery(q->gltype, q->id);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,036
|
void vrend_bind_sampler_states(struct vrend_context *ctx,
uint32_t shader_type,
uint32_t start_slot,
uint32_t num_states,
uint32_t *handles)
{
int i;
struct vrend_sampler_state *state;
if (shader_type >= PIPE_SHADER_TYPES) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, shader_type);
return;
}
if (num_states > PIPE_MAX_SAMPLERS ||
start_slot > (PIPE_MAX_SAMPLERS - num_states)) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, num_states);
return;
}
ctx->sub->num_sampler_states[shader_type] = num_states;
for (i = 0; i < num_states; i++) {
if (handles[i] == 0)
state = NULL;
else
state = vrend_object_lookup(ctx->sub->object_hash, handles[i], VIRGL_OBJECT_SAMPLER_STATE);
ctx->sub->sampler_state[shader_type][i + start_slot] = state;
}
ctx->sub->sampler_state_dirty = true;
}
|
DoS
| 0
|
void vrend_bind_sampler_states(struct vrend_context *ctx,
uint32_t shader_type,
uint32_t start_slot,
uint32_t num_states,
uint32_t *handles)
{
int i;
struct vrend_sampler_state *state;
if (shader_type >= PIPE_SHADER_TYPES) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, shader_type);
return;
}
if (num_states > PIPE_MAX_SAMPLERS ||
start_slot > (PIPE_MAX_SAMPLERS - num_states)) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, num_states);
return;
}
ctx->sub->num_sampler_states[shader_type] = num_states;
for (i = 0; i < num_states; i++) {
if (handles[i] == 0)
state = NULL;
else
state = vrend_object_lookup(ctx->sub->object_hash, handles[i], VIRGL_OBJECT_SAMPLER_STATE);
ctx->sub->sampler_state[shader_type][i + start_slot] = state;
}
ctx->sub->sampler_state_dirty = true;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,037
|
void vrend_bind_shader(struct vrend_context *ctx,
uint32_t handle, uint32_t type)
{
struct vrend_shader_selector *sel;
if (type > PIPE_SHADER_GEOMETRY)
return;
if (handle == 0) {
ctx->sub->shader_dirty = true;
vrend_shader_state_reference(&ctx->sub->shaders[type], NULL);
return;
}
sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SHADER);
if (!sel)
return;
if (sel->type != type)
return;
if (ctx->sub->shaders[sel->type] != sel)
ctx->sub->shader_dirty = true;
vrend_shader_state_reference(&ctx->sub->shaders[sel->type], sel);
}
|
DoS
| 0
|
void vrend_bind_shader(struct vrend_context *ctx,
uint32_t handle, uint32_t type)
{
struct vrend_shader_selector *sel;
if (type > PIPE_SHADER_GEOMETRY)
return;
if (handle == 0) {
ctx->sub->shader_dirty = true;
vrend_shader_state_reference(&ctx->sub->shaders[type], NULL);
return;
}
sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SHADER);
if (!sel)
return;
if (sel->type != type)
return;
if (ctx->sub->shaders[sel->type] != sel)
ctx->sub->shader_dirty = true;
vrend_shader_state_reference(&ctx->sub->shaders[sel->type], sel);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,038
|
void vrend_bind_vertex_elements_state(struct vrend_context *ctx,
uint32_t handle)
{
struct vrend_vertex_element_array *v;
if (!handle) {
ctx->sub->ve = NULL;
return;
}
v = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
if (!v) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle);
return;
}
if (ctx->sub->ve != v)
ctx->sub->vbo_dirty = true;
ctx->sub->ve = v;
}
|
DoS
| 0
|
void vrend_bind_vertex_elements_state(struct vrend_context *ctx,
uint32_t handle)
{
struct vrend_vertex_element_array *v;
if (!handle) {
ctx->sub->ve = NULL;
return;
}
v = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_VERTEX_ELEMENTS);
if (!v) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle);
return;
}
if (ctx->sub->ve != v)
ctx->sub->vbo_dirty = true;
ctx->sub->ve = v;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,039
|
static void vrend_blend_enable(struct vrend_context *ctx, bool blend_enable)
{
if (ctx->sub->blend_enabled != blend_enable) {
ctx->sub->blend_enabled = blend_enable;
if (blend_enable)
glEnable(GL_BLEND);
else
glDisable(GL_BLEND);
}
}
|
DoS
| 0
|
static void vrend_blend_enable(struct vrend_context *ctx, bool blend_enable)
{
if (ctx->sub->blend_enabled != blend_enable) {
ctx->sub->blend_enabled = blend_enable;
if (blend_enable)
glEnable(GL_BLEND);
else
glDisable(GL_BLEND);
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,040
|
static bool vrend_check_query(struct vrend_query *query)
{
uint64_t result;
struct virgl_host_query_state *state;
bool ret;
ret = vrend_get_one_query_result(query->id, false, &result);
if (ret == false)
return false;
state = (struct virgl_host_query_state *)query->res->ptr;
state->result = result;
state->query_state = VIRGL_QUERY_STATE_DONE;
return true;
}
|
DoS
| 0
|
static bool vrend_check_query(struct vrend_query *query)
{
uint64_t result;
struct virgl_host_query_state *state;
bool ret;
ret = vrend_get_one_query_result(query->id, false, &result);
if (ret == false)
return false;
state = (struct virgl_host_query_state *)query->res->ptr;
state->result = result;
state->query_state = VIRGL_QUERY_STATE_DONE;
return true;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,041
|
static bool vrend_compile_shader(struct vrend_context *ctx,
struct vrend_shader *shader)
{
GLint param;
glShaderSource(shader->id, 1, (const char **)&shader->glsl_prog, NULL);
glCompileShader(shader->id);
glGetShaderiv(shader->id, GL_COMPILE_STATUS, ¶m);
if (param == GL_FALSE) {
char infolog[65536];
int len;
glGetShaderInfoLog(shader->id, 65536, &len, infolog);
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
fprintf(stderr,"shader failed to compile\n%s\n", infolog);
fprintf(stderr,"GLSL:\n%s\n", shader->glsl_prog);
return false;
}
return true;
}
|
DoS
| 0
|
static bool vrend_compile_shader(struct vrend_context *ctx,
struct vrend_shader *shader)
{
GLint param;
glShaderSource(shader->id, 1, (const char **)&shader->glsl_prog, NULL);
glCompileShader(shader->id);
glGetShaderiv(shader->id, GL_COMPILE_STATUS, ¶m);
if (param == GL_FALSE) {
char infolog[65536];
int len;
glGetShaderInfoLog(shader->id, 65536, &len, infolog);
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SHADER, 0);
fprintf(stderr,"shader failed to compile\n%s\n", infolog);
fprintf(stderr,"GLSL:\n%s\n", shader->glsl_prog);
return false;
}
return true;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,042
|
struct vrend_context *vrend_create_context(int id, uint32_t nlen, const char *debug_name)
{
struct vrend_context *grctx = CALLOC_STRUCT(vrend_context);
if (!grctx)
return NULL;
if (nlen && debug_name) {
strncpy(grctx->debug_name, debug_name, 64);
}
grctx->ctx_id = id;
list_inithead(&grctx->sub_ctxs);
list_inithead(&grctx->active_nontimer_query_list);
grctx->res_hash = vrend_object_init_ctx_table();
grctx->shader_cfg.use_core_profile = vrend_state.use_core_profile;
grctx->shader_cfg.use_explicit_locations = vrend_state.use_explicit_locations;
vrend_renderer_create_sub_ctx(grctx, 0);
vrend_renderer_set_sub_ctx(grctx, 0);
vrender_get_glsl_version(&grctx->shader_cfg.glsl_version);
list_addtail(&grctx->ctx_entry, &vrend_state.active_ctx_list);
return grctx;
}
|
DoS
| 0
|
struct vrend_context *vrend_create_context(int id, uint32_t nlen, const char *debug_name)
{
struct vrend_context *grctx = CALLOC_STRUCT(vrend_context);
if (!grctx)
return NULL;
if (nlen && debug_name) {
strncpy(grctx->debug_name, debug_name, 64);
}
grctx->ctx_id = id;
list_inithead(&grctx->sub_ctxs);
list_inithead(&grctx->active_nontimer_query_list);
grctx->res_hash = vrend_object_init_ctx_table();
grctx->shader_cfg.use_core_profile = vrend_state.use_core_profile;
grctx->shader_cfg.use_explicit_locations = vrend_state.use_explicit_locations;
vrend_renderer_create_sub_ctx(grctx, 0);
vrend_renderer_set_sub_ctx(grctx, 0);
vrender_get_glsl_version(&grctx->shader_cfg.glsl_version);
list_addtail(&grctx->ctx_entry, &vrend_state.active_ctx_list);
return grctx;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,043
|
int vrend_create_query(struct vrend_context *ctx, uint32_t handle,
uint32_t query_type, uint32_t query_index,
uint32_t res_handle, uint32_t offset)
{
struct vrend_query *q;
struct vrend_resource *res;
uint32_t ret_handle;
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return EINVAL;
}
q = CALLOC_STRUCT(vrend_query);
if (!q)
return ENOMEM;
list_inithead(&q->waiting_queries);
q->type = query_type;
q->index = query_index;
q->ctx_id = ctx->ctx_id;
vrend_resource_reference(&q->res, res);
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
q->gltype = GL_SAMPLES_PASSED_ARB;
break;
case PIPE_QUERY_OCCLUSION_PREDICATE:
q->gltype = GL_ANY_SAMPLES_PASSED;
break;
case PIPE_QUERY_TIMESTAMP:
q->gltype = GL_TIMESTAMP;
break;
case PIPE_QUERY_TIME_ELAPSED:
q->gltype = GL_TIME_ELAPSED;
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
q->gltype = GL_PRIMITIVES_GENERATED;
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
q->gltype = GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN;
break;
default:
fprintf(stderr,"unknown query object received %d\n", q->type);
break;
}
glGenQueries(1, &q->id);
ret_handle = vrend_renderer_object_insert(ctx, q, sizeof(struct vrend_query), handle,
VIRGL_OBJECT_QUERY);
if (!ret_handle) {
FREE(q);
return ENOMEM;
}
return 0;
}
|
DoS
| 0
|
int vrend_create_query(struct vrend_context *ctx, uint32_t handle,
uint32_t query_type, uint32_t query_index,
uint32_t res_handle, uint32_t offset)
{
struct vrend_query *q;
struct vrend_resource *res;
uint32_t ret_handle;
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return EINVAL;
}
q = CALLOC_STRUCT(vrend_query);
if (!q)
return ENOMEM;
list_inithead(&q->waiting_queries);
q->type = query_type;
q->index = query_index;
q->ctx_id = ctx->ctx_id;
vrend_resource_reference(&q->res, res);
switch (q->type) {
case PIPE_QUERY_OCCLUSION_COUNTER:
q->gltype = GL_SAMPLES_PASSED_ARB;
break;
case PIPE_QUERY_OCCLUSION_PREDICATE:
q->gltype = GL_ANY_SAMPLES_PASSED;
break;
case PIPE_QUERY_TIMESTAMP:
q->gltype = GL_TIMESTAMP;
break;
case PIPE_QUERY_TIME_ELAPSED:
q->gltype = GL_TIME_ELAPSED;
break;
case PIPE_QUERY_PRIMITIVES_GENERATED:
q->gltype = GL_PRIMITIVES_GENERATED;
break;
case PIPE_QUERY_PRIMITIVES_EMITTED:
q->gltype = GL_TRANSFORM_FEEDBACK_PRIMITIVES_WRITTEN;
break;
default:
fprintf(stderr,"unknown query object received %d\n", q->type);
break;
}
glGenQueries(1, &q->id);
ret_handle = vrend_renderer_object_insert(ctx, q, sizeof(struct vrend_query), handle,
VIRGL_OBJECT_QUERY);
if (!ret_handle) {
FREE(q);
return ENOMEM;
}
return 0;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,044
|
int vrend_create_sampler_view(struct vrend_context *ctx,
uint32_t handle,
uint32_t res_handle, uint32_t format,
uint32_t val0, uint32_t val1, uint32_t swizzle_packed)
{
struct vrend_sampler_view *view;
struct vrend_resource *res;
int ret_handle;
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return EINVAL;
}
view = CALLOC_STRUCT(vrend_sampler_view);
if (!view)
return ENOMEM;
pipe_reference_init(&view->reference, 1);
view->format = format;
view->val0 = val0;
view->val1 = val1;
view->swizzle_r = swizzle_packed & 0x7;
view->swizzle_g = (swizzle_packed >> 3) & 0x7;
view->swizzle_b = (swizzle_packed >> 6) & 0x7;
view->swizzle_a = (swizzle_packed >> 9) & 0x7;
view->cur_base = -1;
view->cur_max = 10000;
vrend_resource_reference(&view->texture, res);
view->srgb_decode = GL_DECODE_EXT;
if (view->format != view->texture->base.format) {
if (util_format_is_srgb(view->texture->base.format) &&
!util_format_is_srgb(view->format))
view->srgb_decode = GL_SKIP_DECODE_EXT;
}
view->gl_swizzle_a = to_gl_swizzle(view->swizzle_a);
view->gl_swizzle_r = to_gl_swizzle(view->swizzle_r);
view->gl_swizzle_g = to_gl_swizzle(view->swizzle_g);
view->gl_swizzle_b = to_gl_swizzle(view->swizzle_b);
if (!(util_format_has_alpha(format) || util_format_is_depth_or_stencil(format))) {
if (view->gl_swizzle_a == GL_ALPHA)
view->gl_swizzle_a = GL_ONE;
if (view->gl_swizzle_r == GL_ALPHA)
view->gl_swizzle_r = GL_ONE;
if (view->gl_swizzle_g == GL_ALPHA)
view->gl_swizzle_g = GL_ONE;
if (view->gl_swizzle_b == GL_ALPHA)
view->gl_swizzle_b = GL_ONE;
}
if (tex_conv_table[format].flags & VREND_BIND_NEED_SWIZZLE) {
view->gl_swizzle_r = to_gl_swizzle(tex_conv_table[format].swizzle[0]);
view->gl_swizzle_g = to_gl_swizzle(tex_conv_table[format].swizzle[1]);
view->gl_swizzle_b = to_gl_swizzle(tex_conv_table[format].swizzle[2]);
view->gl_swizzle_a = to_gl_swizzle(tex_conv_table[format].swizzle[3]);
}
ret_handle = vrend_renderer_object_insert(ctx, view, sizeof(*view), handle, VIRGL_OBJECT_SAMPLER_VIEW);
if (ret_handle == 0) {
FREE(view);
return ENOMEM;
}
return 0;
}
|
DoS
| 0
|
int vrend_create_sampler_view(struct vrend_context *ctx,
uint32_t handle,
uint32_t res_handle, uint32_t format,
uint32_t val0, uint32_t val1, uint32_t swizzle_packed)
{
struct vrend_sampler_view *view;
struct vrend_resource *res;
int ret_handle;
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return EINVAL;
}
view = CALLOC_STRUCT(vrend_sampler_view);
if (!view)
return ENOMEM;
pipe_reference_init(&view->reference, 1);
view->format = format;
view->val0 = val0;
view->val1 = val1;
view->swizzle_r = swizzle_packed & 0x7;
view->swizzle_g = (swizzle_packed >> 3) & 0x7;
view->swizzle_b = (swizzle_packed >> 6) & 0x7;
view->swizzle_a = (swizzle_packed >> 9) & 0x7;
view->cur_base = -1;
view->cur_max = 10000;
vrend_resource_reference(&view->texture, res);
view->srgb_decode = GL_DECODE_EXT;
if (view->format != view->texture->base.format) {
if (util_format_is_srgb(view->texture->base.format) &&
!util_format_is_srgb(view->format))
view->srgb_decode = GL_SKIP_DECODE_EXT;
}
view->gl_swizzle_a = to_gl_swizzle(view->swizzle_a);
view->gl_swizzle_r = to_gl_swizzle(view->swizzle_r);
view->gl_swizzle_g = to_gl_swizzle(view->swizzle_g);
view->gl_swizzle_b = to_gl_swizzle(view->swizzle_b);
if (!(util_format_has_alpha(format) || util_format_is_depth_or_stencil(format))) {
if (view->gl_swizzle_a == GL_ALPHA)
view->gl_swizzle_a = GL_ONE;
if (view->gl_swizzle_r == GL_ALPHA)
view->gl_swizzle_r = GL_ONE;
if (view->gl_swizzle_g == GL_ALPHA)
view->gl_swizzle_g = GL_ONE;
if (view->gl_swizzle_b == GL_ALPHA)
view->gl_swizzle_b = GL_ONE;
}
if (tex_conv_table[format].flags & VREND_BIND_NEED_SWIZZLE) {
view->gl_swizzle_r = to_gl_swizzle(tex_conv_table[format].swizzle[0]);
view->gl_swizzle_g = to_gl_swizzle(tex_conv_table[format].swizzle[1]);
view->gl_swizzle_b = to_gl_swizzle(tex_conv_table[format].swizzle[2]);
view->gl_swizzle_a = to_gl_swizzle(tex_conv_table[format].swizzle[3]);
}
ret_handle = vrend_renderer_object_insert(ctx, view, sizeof(*view), handle, VIRGL_OBJECT_SAMPLER_VIEW);
if (ret_handle == 0) {
FREE(view);
return ENOMEM;
}
return 0;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,045
|
static void *vrend_create_shader_state(struct vrend_context *ctx,
const struct pipe_stream_output_info *so_info,
unsigned pipe_shader_type)
{
struct vrend_shader_selector *sel = CALLOC_STRUCT(vrend_shader_selector);
if (!sel)
return NULL;
sel->type = pipe_shader_type;
sel->sinfo.so_info = *so_info;
pipe_reference_init(&sel->reference, 1);
return sel;
}
|
DoS
| 0
|
static void *vrend_create_shader_state(struct vrend_context *ctx,
const struct pipe_stream_output_info *so_info,
unsigned pipe_shader_type)
{
struct vrend_shader_selector *sel = CALLOC_STRUCT(vrend_shader_selector);
if (!sel)
return NULL;
sel->type = pipe_shader_type;
sel->sinfo.so_info = *so_info;
pipe_reference_init(&sel->reference, 1);
return sel;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,046
|
int vrend_create_so_target(struct vrend_context *ctx,
uint32_t handle,
uint32_t res_handle,
uint32_t buffer_offset,
uint32_t buffer_size)
{
struct vrend_so_target *target;
struct vrend_resource *res;
int ret_handle;
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return EINVAL;
}
target = CALLOC_STRUCT(vrend_so_target);
if (!target)
return ENOMEM;
pipe_reference_init(&target->reference, 1);
target->res_handle = res_handle;
target->buffer_offset = buffer_offset;
target->buffer_size = buffer_size;
target->sub_ctx = ctx->sub;
vrend_resource_reference(&target->buffer, res);
ret_handle = vrend_renderer_object_insert(ctx, target, sizeof(*target), handle,
VIRGL_OBJECT_STREAMOUT_TARGET);
if (ret_handle == 0) {
FREE(target);
return ENOMEM;
}
return 0;
}
|
DoS
| 0
|
int vrend_create_so_target(struct vrend_context *ctx,
uint32_t handle,
uint32_t res_handle,
uint32_t buffer_offset,
uint32_t buffer_size)
{
struct vrend_so_target *target;
struct vrend_resource *res;
int ret_handle;
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return EINVAL;
}
target = CALLOC_STRUCT(vrend_so_target);
if (!target)
return ENOMEM;
pipe_reference_init(&target->reference, 1);
target->res_handle = res_handle;
target->buffer_offset = buffer_offset;
target->buffer_size = buffer_size;
target->sub_ctx = ctx->sub;
vrend_resource_reference(&target->buffer, res);
ret_handle = vrend_renderer_object_insert(ctx, target, sizeof(*target), handle,
VIRGL_OBJECT_STREAMOUT_TARGET);
if (ret_handle == 0) {
FREE(target);
return ENOMEM;
}
return 0;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,047
|
int vrend_create_surface(struct vrend_context *ctx,
uint32_t handle,
uint32_t res_handle, uint32_t format,
uint32_t val0, uint32_t val1)
{
struct vrend_surface *surf;
struct vrend_resource *res;
uint32_t ret_handle;
if (format >= PIPE_FORMAT_COUNT) {
return EINVAL;
}
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return EINVAL;
}
surf = CALLOC_STRUCT(vrend_surface);
if (!surf)
return ENOMEM;
surf->res_handle = res_handle;
surf->format = format;
surf->val0 = val0;
surf->val1 = val1;
pipe_reference_init(&surf->reference, 1);
vrend_resource_reference(&surf->texture, res);
ret_handle = vrend_renderer_object_insert(ctx, surf, sizeof(*surf), handle, VIRGL_OBJECT_SURFACE);
if (ret_handle == 0) {
FREE(surf);
return ENOMEM;
}
return 0;
}
|
DoS
| 0
|
int vrend_create_surface(struct vrend_context *ctx,
uint32_t handle,
uint32_t res_handle, uint32_t format,
uint32_t val0, uint32_t val1)
{
struct vrend_surface *surf;
struct vrend_resource *res;
uint32_t ret_handle;
if (format >= PIPE_FORMAT_COUNT) {
return EINVAL;
}
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return EINVAL;
}
surf = CALLOC_STRUCT(vrend_surface);
if (!surf)
return ENOMEM;
surf->res_handle = res_handle;
surf->format = format;
surf->val0 = val0;
surf->val1 = val1;
pipe_reference_init(&surf->reference, 1);
vrend_resource_reference(&surf->texture, res);
ret_handle = vrend_renderer_object_insert(ctx, surf, sizeof(*surf), handle, VIRGL_OBJECT_SURFACE);
if (ret_handle == 0) {
FREE(surf);
return ENOMEM;
}
return 0;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,048
|
static void vrend_depth_test_enable(struct vrend_context *ctx, bool depth_test_enable)
{
if (ctx->sub->depth_test_enabled != depth_test_enable) {
ctx->sub->depth_test_enabled = depth_test_enable;
if (depth_test_enable)
glEnable(GL_DEPTH_TEST);
else
glDisable(GL_DEPTH_TEST);
}
}
|
DoS
| 0
|
static void vrend_depth_test_enable(struct vrend_context *ctx, bool depth_test_enable)
{
if (ctx->sub->depth_test_enabled != depth_test_enable) {
ctx->sub->depth_test_enabled = depth_test_enable;
if (depth_test_enable)
glEnable(GL_DEPTH_TEST);
else
glDisable(GL_DEPTH_TEST);
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,049
|
static void vrend_destroy_program(struct vrend_linked_shader_program *ent)
{
int i;
glDeleteProgram(ent->id);
list_del(&ent->head);
for (i = PIPE_SHADER_VERTEX; i <= PIPE_SHADER_GEOMETRY; i++) {
if (ent->ss[i])
list_del(&ent->sl[i]);
free(ent->shadow_samp_mask_locs[i]);
free(ent->shadow_samp_add_locs[i]);
free(ent->samp_locs[i]);
free(ent->const_locs[i]);
free(ent->ubo_locs[i]);
}
free(ent->attrib_locs);
free(ent);
}
|
DoS
| 0
|
static void vrend_destroy_program(struct vrend_linked_shader_program *ent)
{
int i;
glDeleteProgram(ent->id);
list_del(&ent->head);
for (i = PIPE_SHADER_VERTEX; i <= PIPE_SHADER_GEOMETRY; i++) {
if (ent->ss[i])
list_del(&ent->sl[i]);
free(ent->shadow_samp_mask_locs[i]);
free(ent->shadow_samp_add_locs[i]);
free(ent->samp_locs[i]);
free(ent->const_locs[i]);
free(ent->ubo_locs[i]);
}
free(ent->attrib_locs);
free(ent);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,050
|
static void vrend_destroy_query(struct vrend_query *query)
{
vrend_resource_reference(&query->res, NULL);
list_del(&query->waiting_queries);
glDeleteQueries(1, &query->id);
free(query);
}
|
DoS
| 0
|
static void vrend_destroy_query(struct vrend_query *query)
{
vrend_resource_reference(&query->res, NULL);
list_del(&query->waiting_queries);
glDeleteQueries(1, &query->id);
free(query);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,051
|
static void vrend_destroy_resource_object(void *obj_ptr)
{
struct vrend_resource *res = obj_ptr;
if (pipe_reference(&res->base.reference, NULL))
vrend_renderer_resource_destroy(res, false);
}
|
DoS
| 0
|
static void vrend_destroy_resource_object(void *obj_ptr)
{
struct vrend_resource *res = obj_ptr;
if (pipe_reference(&res->base.reference, NULL))
vrend_renderer_resource_destroy(res, false);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,052
|
static void vrend_destroy_sampler_state_object(void *obj_ptr)
{
struct vrend_sampler_state *state = obj_ptr;
glDeleteSamplers(1, &state->id);
FREE(state);
}
|
DoS
| 0
|
static void vrend_destroy_sampler_state_object(void *obj_ptr)
{
struct vrend_sampler_state *state = obj_ptr;
glDeleteSamplers(1, &state->id);
FREE(state);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,053
|
static void vrend_destroy_sampler_view(struct vrend_sampler_view *samp)
{
vrend_resource_reference(&samp->texture, NULL);
free(samp);
}
|
DoS
| 0
|
static void vrend_destroy_sampler_view(struct vrend_sampler_view *samp)
{
vrend_resource_reference(&samp->texture, NULL);
free(samp);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,054
|
static void vrend_destroy_sampler_view_object(void *obj_ptr)
{
struct vrend_sampler_view *samp = obj_ptr;
vrend_sampler_view_reference(&samp, NULL);
}
|
DoS
| 0
|
static void vrend_destroy_sampler_view_object(void *obj_ptr)
{
struct vrend_sampler_view *samp = obj_ptr;
vrend_sampler_view_reference(&samp, NULL);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,055
|
static void vrend_destroy_shader_object(void *obj_ptr)
{
struct vrend_shader_selector *state = obj_ptr;
vrend_shader_state_reference(&state, NULL);
}
|
DoS
| 0
|
static void vrend_destroy_shader_object(void *obj_ptr)
{
struct vrend_shader_selector *state = obj_ptr;
vrend_shader_state_reference(&state, NULL);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,056
|
static void vrend_destroy_shader_selector(struct vrend_shader_selector *sel)
{
struct vrend_shader *p = sel->current, *c;
int i;
while (p) {
c = p->next_variant;
vrend_shader_destroy(p);
p = c;
}
if (sel->sinfo.so_names)
for (i = 0; i < sel->sinfo.so_info.num_outputs; i++)
free(sel->sinfo.so_names[i]);
free(sel->tmp_buf);
free(sel->sinfo.so_names);
free(sel->sinfo.interpinfo);
free(sel->tokens);
free(sel);
}
|
DoS
| 0
|
static void vrend_destroy_shader_selector(struct vrend_shader_selector *sel)
{
struct vrend_shader *p = sel->current, *c;
int i;
while (p) {
c = p->next_variant;
vrend_shader_destroy(p);
p = c;
}
if (sel->sinfo.so_names)
for (i = 0; i < sel->sinfo.so_info.num_outputs; i++)
free(sel->sinfo.so_names[i]);
free(sel->tmp_buf);
free(sel->sinfo.so_names);
free(sel->sinfo.interpinfo);
free(sel->tokens);
free(sel);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,057
|
static void vrend_destroy_so_target(struct vrend_so_target *target)
{
vrend_resource_reference(&target->buffer, NULL);
free(target);
}
|
DoS
| 0
|
static void vrend_destroy_so_target(struct vrend_so_target *target)
{
vrend_resource_reference(&target->buffer, NULL);
free(target);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,058
|
static void vrend_destroy_so_target_object(void *obj_ptr)
{
struct vrend_so_target *target = obj_ptr;
struct vrend_sub_context *sub_ctx = target->sub_ctx;
struct vrend_streamout_object *obj, *tmp;
bool found;
int i;
LIST_FOR_EACH_ENTRY_SAFE(obj, tmp, &sub_ctx->streamout_list, head) {
found = false;
for (i = 0; i < obj->num_targets; i++) {
if (obj->so_targets[i] == target) {
found = true;
break;
}
}
if (found) {
if (obj == sub_ctx->current_so)
sub_ctx->current_so = NULL;
if (obj->xfb_state == XFB_STATE_PAUSED) {
if (vrend_state.have_tf2)
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, obj->id);
glEndTransformFeedback();
if (sub_ctx->current_so && vrend_state.have_tf2)
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, sub_ctx->current_so->id);
}
vrend_destroy_streamout_object(obj);
}
}
vrend_so_target_reference(&target, NULL);
}
|
DoS
| 0
|
static void vrend_destroy_so_target_object(void *obj_ptr)
{
struct vrend_so_target *target = obj_ptr;
struct vrend_sub_context *sub_ctx = target->sub_ctx;
struct vrend_streamout_object *obj, *tmp;
bool found;
int i;
LIST_FOR_EACH_ENTRY_SAFE(obj, tmp, &sub_ctx->streamout_list, head) {
found = false;
for (i = 0; i < obj->num_targets; i++) {
if (obj->so_targets[i] == target) {
found = true;
break;
}
}
if (found) {
if (obj == sub_ctx->current_so)
sub_ctx->current_so = NULL;
if (obj->xfb_state == XFB_STATE_PAUSED) {
if (vrend_state.have_tf2)
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, obj->id);
glEndTransformFeedback();
if (sub_ctx->current_so && vrend_state.have_tf2)
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, sub_ctx->current_so->id);
}
vrend_destroy_streamout_object(obj);
}
}
vrend_so_target_reference(&target, NULL);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,059
|
static void vrend_destroy_streamout_object(struct vrend_streamout_object *obj)
{
int i;
list_del(&obj->head);
for (i = 0; i < obj->num_targets; i++)
vrend_so_target_reference(&obj->so_targets[i], NULL);
if (vrend_state.have_tf2)
glDeleteTransformFeedbacks(1, &obj->id);
FREE(obj);
}
|
DoS
| 0
|
static void vrend_destroy_streamout_object(struct vrend_streamout_object *obj)
{
int i;
list_del(&obj->head);
for (i = 0; i < obj->num_targets; i++)
vrend_so_target_reference(&obj->so_targets[i], NULL);
if (vrend_state.have_tf2)
glDeleteTransformFeedbacks(1, &obj->id);
FREE(obj);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,060
|
static void vrend_destroy_sub_context(struct vrend_sub_context *sub)
{
int i, j;
struct vrend_streamout_object *obj, *tmp;
if (sub->fb_id)
glDeleteFramebuffers(1, &sub->fb_id);
if (sub->blit_fb_ids[0])
glDeleteFramebuffers(2, sub->blit_fb_ids);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
if (!vrend_state.have_vertex_attrib_binding) {
while (sub->enabled_attribs_bitmask) {
i = u_bit_scan(&sub->enabled_attribs_bitmask);
glDisableVertexAttribArray(i);
}
glDeleteVertexArrays(1, &sub->vaoid);
}
glBindVertexArray(0);
if (sub->current_so)
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, 0);
LIST_FOR_EACH_ENTRY_SAFE(obj, tmp, &sub->streamout_list, head) {
vrend_destroy_streamout_object(obj);
}
vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_VERTEX], NULL);
vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_FRAGMENT], NULL);
vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_GEOMETRY], NULL);
vrend_free_programs(sub);
for (i = 0; i < PIPE_SHADER_TYPES; i++) {
free(sub->consts[i].consts);
sub->consts[i].consts = NULL;
for (j = 0; j < PIPE_MAX_SHADER_SAMPLER_VIEWS; j++) {
vrend_sampler_view_reference(&sub->views[i].views[j], NULL);
}
}
if (sub->zsurf)
vrend_surface_reference(&sub->zsurf, NULL);
for (i = 0; i < sub->nr_cbufs; i++) {
if (!sub->surf[i])
continue;
vrend_surface_reference(&sub->surf[i], NULL);
}
vrend_resource_reference((struct vrend_resource **)&sub->ib.buffer, NULL);
vrend_object_fini_ctx_table(sub->object_hash);
vrend_clicbs->destroy_gl_context(sub->gl_context);
list_del(&sub->head);
FREE(sub);
}
|
DoS
| 0
|
static void vrend_destroy_sub_context(struct vrend_sub_context *sub)
{
int i, j;
struct vrend_streamout_object *obj, *tmp;
if (sub->fb_id)
glDeleteFramebuffers(1, &sub->fb_id);
if (sub->blit_fb_ids[0])
glDeleteFramebuffers(2, sub->blit_fb_ids);
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0);
if (!vrend_state.have_vertex_attrib_binding) {
while (sub->enabled_attribs_bitmask) {
i = u_bit_scan(&sub->enabled_attribs_bitmask);
glDisableVertexAttribArray(i);
}
glDeleteVertexArrays(1, &sub->vaoid);
}
glBindVertexArray(0);
if (sub->current_so)
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, 0);
LIST_FOR_EACH_ENTRY_SAFE(obj, tmp, &sub->streamout_list, head) {
vrend_destroy_streamout_object(obj);
}
vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_VERTEX], NULL);
vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_FRAGMENT], NULL);
vrend_shader_state_reference(&sub->shaders[PIPE_SHADER_GEOMETRY], NULL);
vrend_free_programs(sub);
for (i = 0; i < PIPE_SHADER_TYPES; i++) {
free(sub->consts[i].consts);
sub->consts[i].consts = NULL;
for (j = 0; j < PIPE_MAX_SHADER_SAMPLER_VIEWS; j++) {
vrend_sampler_view_reference(&sub->views[i].views[j], NULL);
}
}
if (sub->zsurf)
vrend_surface_reference(&sub->zsurf, NULL);
for (i = 0; i < sub->nr_cbufs; i++) {
if (!sub->surf[i])
continue;
vrend_surface_reference(&sub->surf[i], NULL);
}
vrend_resource_reference((struct vrend_resource **)&sub->ib.buffer, NULL);
vrend_object_fini_ctx_table(sub->object_hash);
vrend_clicbs->destroy_gl_context(sub->gl_context);
list_del(&sub->head);
FREE(sub);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,061
|
static void vrend_destroy_surface(struct vrend_surface *surf)
{
vrend_resource_reference(&surf->texture, NULL);
free(surf);
}
|
DoS
| 0
|
static void vrend_destroy_surface(struct vrend_surface *surf)
{
vrend_resource_reference(&surf->texture, NULL);
free(surf);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,062
|
static void vrend_destroy_surface_object(void *obj_ptr)
{
struct vrend_surface *surface = obj_ptr;
vrend_surface_reference(&surface, NULL);
}
|
DoS
| 0
|
static void vrend_destroy_surface_object(void *obj_ptr)
{
struct vrend_surface *surface = obj_ptr;
vrend_surface_reference(&surface, NULL);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,063
|
static void vrend_destroy_vertex_elements_object(void *obj_ptr)
{
struct vrend_vertex_element_array *v = obj_ptr;
if (vrend_state.have_vertex_attrib_binding) {
glDeleteVertexArrays(1, &v->id);
}
FREE(v);
}
|
DoS
| 0
|
static void vrend_destroy_vertex_elements_object(void *obj_ptr)
{
struct vrend_vertex_element_array *v = obj_ptr;
if (vrend_state.have_vertex_attrib_binding) {
glDeleteVertexArrays(1, &v->id);
}
FREE(v);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,064
|
static void vrend_draw_bind_samplers(struct vrend_context *ctx)
{
int sampler_id;
int i;
int shader_type;
sampler_id = 0;
for (shader_type = PIPE_SHADER_VERTEX; shader_type <= ctx->sub->last_shader_idx; shader_type++) {
int index = 0;
for (i = 0; i < ctx->sub->views[shader_type].num_views; i++) {
struct vrend_resource *texture = NULL;
if (ctx->sub->views[shader_type].views[i]) {
texture = ctx->sub->views[shader_type].views[i]->texture;
}
if (!(ctx->sub->prog->samplers_used_mask[shader_type] & (1 << i)))
continue;
if (ctx->sub->prog->samp_locs[shader_type])
glUniform1i(ctx->sub->prog->samp_locs[shader_type][index], sampler_id);
if (ctx->sub->prog->shadow_samp_mask[shader_type] & (1 << i)) {
struct vrend_sampler_view *tview = ctx->sub->views[shader_type].views[i];
glUniform4f(ctx->sub->prog->shadow_samp_mask_locs[shader_type][index],
tview->gl_swizzle_r == GL_ZERO ? 0.0 : 1.0,
tview->gl_swizzle_g == GL_ZERO ? 0.0 : 1.0,
tview->gl_swizzle_b == GL_ZERO ? 0.0 : 1.0,
tview->gl_swizzle_a == GL_ZERO ? 0.0 : 1.0);
glUniform4f(ctx->sub->prog->shadow_samp_add_locs[shader_type][index],
tview->gl_swizzle_r == GL_ONE ? 1.0 : 0.0,
tview->gl_swizzle_g == GL_ONE ? 1.0 : 0.0,
tview->gl_swizzle_b == GL_ONE ? 1.0 : 0.0,
tview->gl_swizzle_a == GL_ONE ? 1.0 : 0.0);
}
glActiveTexture(GL_TEXTURE0 + sampler_id);
if (texture) {
int id;
if (texture->target == GL_TEXTURE_BUFFER)
id = texture->tbo_tex_id;
else
id = texture->id;
glBindTexture(texture->target, id);
if (ctx->sub->views[shader_type].old_ids[i] != id || ctx->sub->sampler_state_dirty) {
vrend_apply_sampler_state(ctx, texture, shader_type, i, sampler_id, ctx->sub->views[shader_type].views[i]->srgb_decode);
ctx->sub->views[shader_type].old_ids[i] = id;
}
if (ctx->sub->rs_state.point_quad_rasterization) {
if (vrend_state.use_core_profile == false) {
if (ctx->sub->rs_state.sprite_coord_enable & (1 << i))
glTexEnvi(GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE);
else
glTexEnvi(GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_FALSE);
}
}
sampler_id++;
}
index++;
}
}
if (vrend_state.use_core_profile && ctx->sub->prog->fs_stipple_loc != -1) {
glActiveTexture(GL_TEXTURE0 + sampler_id);
glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id);
glUniform1i(ctx->sub->prog->fs_stipple_loc, sampler_id);
}
ctx->sub->sampler_state_dirty = false;
}
|
DoS
| 0
|
static void vrend_draw_bind_samplers(struct vrend_context *ctx)
{
int sampler_id;
int i;
int shader_type;
sampler_id = 0;
for (shader_type = PIPE_SHADER_VERTEX; shader_type <= ctx->sub->last_shader_idx; shader_type++) {
int index = 0;
for (i = 0; i < ctx->sub->views[shader_type].num_views; i++) {
struct vrend_resource *texture = NULL;
if (ctx->sub->views[shader_type].views[i]) {
texture = ctx->sub->views[shader_type].views[i]->texture;
}
if (!(ctx->sub->prog->samplers_used_mask[shader_type] & (1 << i)))
continue;
if (ctx->sub->prog->samp_locs[shader_type])
glUniform1i(ctx->sub->prog->samp_locs[shader_type][index], sampler_id);
if (ctx->sub->prog->shadow_samp_mask[shader_type] & (1 << i)) {
struct vrend_sampler_view *tview = ctx->sub->views[shader_type].views[i];
glUniform4f(ctx->sub->prog->shadow_samp_mask_locs[shader_type][index],
tview->gl_swizzle_r == GL_ZERO ? 0.0 : 1.0,
tview->gl_swizzle_g == GL_ZERO ? 0.0 : 1.0,
tview->gl_swizzle_b == GL_ZERO ? 0.0 : 1.0,
tview->gl_swizzle_a == GL_ZERO ? 0.0 : 1.0);
glUniform4f(ctx->sub->prog->shadow_samp_add_locs[shader_type][index],
tview->gl_swizzle_r == GL_ONE ? 1.0 : 0.0,
tview->gl_swizzle_g == GL_ONE ? 1.0 : 0.0,
tview->gl_swizzle_b == GL_ONE ? 1.0 : 0.0,
tview->gl_swizzle_a == GL_ONE ? 1.0 : 0.0);
}
glActiveTexture(GL_TEXTURE0 + sampler_id);
if (texture) {
int id;
if (texture->target == GL_TEXTURE_BUFFER)
id = texture->tbo_tex_id;
else
id = texture->id;
glBindTexture(texture->target, id);
if (ctx->sub->views[shader_type].old_ids[i] != id || ctx->sub->sampler_state_dirty) {
vrend_apply_sampler_state(ctx, texture, shader_type, i, sampler_id, ctx->sub->views[shader_type].views[i]->srgb_decode);
ctx->sub->views[shader_type].old_ids[i] = id;
}
if (ctx->sub->rs_state.point_quad_rasterization) {
if (vrend_state.use_core_profile == false) {
if (ctx->sub->rs_state.sprite_coord_enable & (1 << i))
glTexEnvi(GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_TRUE);
else
glTexEnvi(GL_POINT_SPRITE_ARB, GL_COORD_REPLACE_ARB, GL_FALSE);
}
}
sampler_id++;
}
index++;
}
}
if (vrend_state.use_core_profile && ctx->sub->prog->fs_stipple_loc != -1) {
glActiveTexture(GL_TEXTURE0 + sampler_id);
glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id);
glUniform1i(ctx->sub->prog->fs_stipple_loc, sampler_id);
}
ctx->sub->sampler_state_dirty = false;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,065
|
static void vrend_draw_bind_ubo(struct vrend_context *ctx)
{
int i;
int ubo_id;
int shader_type;
ubo_id = 0;
for (shader_type = PIPE_SHADER_VERTEX; shader_type <= ctx->sub->last_shader_idx; shader_type++) {
uint32_t mask;
int shader_ubo_idx = 0;
struct pipe_constant_buffer *cb;
struct vrend_resource *res;
if (!ctx->sub->const_bufs_used_mask[shader_type])
continue;
if (!ctx->sub->prog->ubo_locs[shader_type])
continue;
mask = ctx->sub->const_bufs_used_mask[shader_type];
while (mask) {
i = u_bit_scan(&mask);
cb = &ctx->sub->cbs[shader_type][i];
res = (struct vrend_resource *)cb->buffer;
glBindBufferRange(GL_UNIFORM_BUFFER, ubo_id, res->id,
cb->buffer_offset, cb->buffer_size);
glUniformBlockBinding(ctx->sub->prog->id, ctx->sub->prog->ubo_locs[shader_type][shader_ubo_idx], ubo_id);
shader_ubo_idx++;
ubo_id++;
}
}
}
|
DoS
| 0
|
static void vrend_draw_bind_ubo(struct vrend_context *ctx)
{
int i;
int ubo_id;
int shader_type;
ubo_id = 0;
for (shader_type = PIPE_SHADER_VERTEX; shader_type <= ctx->sub->last_shader_idx; shader_type++) {
uint32_t mask;
int shader_ubo_idx = 0;
struct pipe_constant_buffer *cb;
struct vrend_resource *res;
if (!ctx->sub->const_bufs_used_mask[shader_type])
continue;
if (!ctx->sub->prog->ubo_locs[shader_type])
continue;
mask = ctx->sub->const_bufs_used_mask[shader_type];
while (mask) {
i = u_bit_scan(&mask);
cb = &ctx->sub->cbs[shader_type][i];
res = (struct vrend_resource *)cb->buffer;
glBindBufferRange(GL_UNIFORM_BUFFER, ubo_id, res->id,
cb->buffer_offset, cb->buffer_size);
glUniformBlockBinding(ctx->sub->prog->id, ctx->sub->prog->ubo_locs[shader_type][shader_ubo_idx], ubo_id);
shader_ubo_idx++;
ubo_id++;
}
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,066
|
static void vrend_draw_bind_vertex_binding(struct vrend_context *ctx,
struct vrend_vertex_element_array *va)
{
int i;
glBindVertexArray(va->id);
if (ctx->sub->vbo_dirty) {
for (i = 0; i < ctx->sub->num_vbos; i++) {
struct vrend_resource *res = (struct vrend_resource *)ctx->sub->vbo[i].buffer;
if (!res)
glBindVertexBuffer(i, 0, 0, 0);
else
glBindVertexBuffer(i,
res->id,
ctx->sub->vbo[i].buffer_offset,
ctx->sub->vbo[i].stride);
}
for (i = ctx->sub->num_vbos; i < ctx->sub->old_num_vbos; i++) {
glBindVertexBuffer(i, 0, 0, 0);
}
ctx->sub->vbo_dirty = false;
}
}
|
DoS
| 0
|
static void vrend_draw_bind_vertex_binding(struct vrend_context *ctx,
struct vrend_vertex_element_array *va)
{
int i;
glBindVertexArray(va->id);
if (ctx->sub->vbo_dirty) {
for (i = 0; i < ctx->sub->num_vbos; i++) {
struct vrend_resource *res = (struct vrend_resource *)ctx->sub->vbo[i].buffer;
if (!res)
glBindVertexBuffer(i, 0, 0, 0);
else
glBindVertexBuffer(i,
res->id,
ctx->sub->vbo[i].buffer_offset,
ctx->sub->vbo[i].stride);
}
for (i = ctx->sub->num_vbos; i < ctx->sub->old_num_vbos; i++) {
glBindVertexBuffer(i, 0, 0, 0);
}
ctx->sub->vbo_dirty = false;
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,067
|
static void vrend_draw_bind_vertex_legacy(struct vrend_context *ctx,
struct vrend_vertex_element_array *va)
{
uint32_t num_enable;
uint32_t enable_bitmask;
uint32_t disable_bitmask;
int i;
num_enable = va->count;
enable_bitmask = 0;
disable_bitmask = ~((1ull << num_enable) - 1);
for (i = 0; i < va->count; i++) {
struct vrend_vertex_element *ve = &va->elements[i];
int vbo_index = ve->base.vertex_buffer_index;
struct vrend_resource *res;
GLint loc;
if (i >= ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs) {
/* XYZZY: debug this? */
num_enable = ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs;
break;
}
res = (struct vrend_resource *)ctx->sub->vbo[vbo_index].buffer;
if (!res) {
fprintf(stderr,"cannot find vbo buf %d %d %d\n", i, va->count, ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs);
continue;
}
if (vrend_state.use_explicit_locations || vrend_state.have_vertex_attrib_binding) {
loc = i;
} else {
if (ctx->sub->prog->attrib_locs) {
loc = ctx->sub->prog->attrib_locs[i];
} else loc = -1;
if (loc == -1) {
fprintf(stderr,"%s: cannot find loc %d %d %d\n", ctx->debug_name, i, va->count, ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs);
num_enable--;
if (i == 0) {
fprintf(stderr,"%s: shader probably didn't compile - skipping rendering\n", ctx->debug_name);
return;
}
continue;
}
}
if (ve->type == GL_FALSE) {
fprintf(stderr,"failed to translate vertex type - skipping render\n");
return;
}
glBindBuffer(GL_ARRAY_BUFFER, res->id);
if (ctx->sub->vbo[vbo_index].stride == 0) {
void *data;
/* for 0 stride we are kinda screwed */
data = glMapBufferRange(GL_ARRAY_BUFFER, ctx->sub->vbo[vbo_index].buffer_offset, ve->nr_chan * sizeof(GLfloat), GL_MAP_READ_BIT);
switch (ve->nr_chan) {
case 1:
glVertexAttrib1fv(loc, data);
break;
case 2:
glVertexAttrib2fv(loc, data);
break;
case 3:
glVertexAttrib3fv(loc, data);
break;
case 4:
default:
glVertexAttrib4fv(loc, data);
break;
}
glUnmapBuffer(GL_ARRAY_BUFFER);
disable_bitmask |= (1 << loc);
} else {
enable_bitmask |= (1 << loc);
if (util_format_is_pure_integer(ve->base.src_format)) {
glVertexAttribIPointer(loc, ve->nr_chan, ve->type, ctx->sub->vbo[vbo_index].stride, (void *)(unsigned long)(ve->base.src_offset + ctx->sub->vbo[vbo_index].buffer_offset));
} else {
glVertexAttribPointer(loc, ve->nr_chan, ve->type, ve->norm, ctx->sub->vbo[vbo_index].stride, (void *)(unsigned long)(ve->base.src_offset + ctx->sub->vbo[vbo_index].buffer_offset));
}
glVertexAttribDivisorARB(loc, ve->base.instance_divisor);
}
}
if (ctx->sub->enabled_attribs_bitmask != enable_bitmask) {
uint32_t mask = ctx->sub->enabled_attribs_bitmask & disable_bitmask;
while (mask) {
i = u_bit_scan(&mask);
glDisableVertexAttribArray(i);
}
ctx->sub->enabled_attribs_bitmask &= ~disable_bitmask;
mask = ctx->sub->enabled_attribs_bitmask ^ enable_bitmask;
while (mask) {
i = u_bit_scan(&mask);
glEnableVertexAttribArray(i);
}
ctx->sub->enabled_attribs_bitmask = enable_bitmask;
}
}
|
DoS
| 0
|
static void vrend_draw_bind_vertex_legacy(struct vrend_context *ctx,
struct vrend_vertex_element_array *va)
{
uint32_t num_enable;
uint32_t enable_bitmask;
uint32_t disable_bitmask;
int i;
num_enable = va->count;
enable_bitmask = 0;
disable_bitmask = ~((1ull << num_enable) - 1);
for (i = 0; i < va->count; i++) {
struct vrend_vertex_element *ve = &va->elements[i];
int vbo_index = ve->base.vertex_buffer_index;
struct vrend_resource *res;
GLint loc;
if (i >= ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs) {
/* XYZZY: debug this? */
num_enable = ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs;
break;
}
res = (struct vrend_resource *)ctx->sub->vbo[vbo_index].buffer;
if (!res) {
fprintf(stderr,"cannot find vbo buf %d %d %d\n", i, va->count, ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs);
continue;
}
if (vrend_state.use_explicit_locations || vrend_state.have_vertex_attrib_binding) {
loc = i;
} else {
if (ctx->sub->prog->attrib_locs) {
loc = ctx->sub->prog->attrib_locs[i];
} else loc = -1;
if (loc == -1) {
fprintf(stderr,"%s: cannot find loc %d %d %d\n", ctx->debug_name, i, va->count, ctx->sub->prog->ss[PIPE_SHADER_VERTEX]->sel->sinfo.num_inputs);
num_enable--;
if (i == 0) {
fprintf(stderr,"%s: shader probably didn't compile - skipping rendering\n", ctx->debug_name);
return;
}
continue;
}
}
if (ve->type == GL_FALSE) {
fprintf(stderr,"failed to translate vertex type - skipping render\n");
return;
}
glBindBuffer(GL_ARRAY_BUFFER, res->id);
if (ctx->sub->vbo[vbo_index].stride == 0) {
void *data;
/* for 0 stride we are kinda screwed */
data = glMapBufferRange(GL_ARRAY_BUFFER, ctx->sub->vbo[vbo_index].buffer_offset, ve->nr_chan * sizeof(GLfloat), GL_MAP_READ_BIT);
switch (ve->nr_chan) {
case 1:
glVertexAttrib1fv(loc, data);
break;
case 2:
glVertexAttrib2fv(loc, data);
break;
case 3:
glVertexAttrib3fv(loc, data);
break;
case 4:
default:
glVertexAttrib4fv(loc, data);
break;
}
glUnmapBuffer(GL_ARRAY_BUFFER);
disable_bitmask |= (1 << loc);
} else {
enable_bitmask |= (1 << loc);
if (util_format_is_pure_integer(ve->base.src_format)) {
glVertexAttribIPointer(loc, ve->nr_chan, ve->type, ctx->sub->vbo[vbo_index].stride, (void *)(unsigned long)(ve->base.src_offset + ctx->sub->vbo[vbo_index].buffer_offset));
} else {
glVertexAttribPointer(loc, ve->nr_chan, ve->type, ve->norm, ctx->sub->vbo[vbo_index].stride, (void *)(unsigned long)(ve->base.src_offset + ctx->sub->vbo[vbo_index].buffer_offset));
}
glVertexAttribDivisorARB(loc, ve->base.instance_divisor);
}
}
if (ctx->sub->enabled_attribs_bitmask != enable_bitmask) {
uint32_t mask = ctx->sub->enabled_attribs_bitmask & disable_bitmask;
while (mask) {
i = u_bit_scan(&mask);
glDisableVertexAttribArray(i);
}
ctx->sub->enabled_attribs_bitmask &= ~disable_bitmask;
mask = ctx->sub->enabled_attribs_bitmask ^ enable_bitmask;
while (mask) {
i = u_bit_scan(&mask);
glEnableVertexAttribArray(i);
}
ctx->sub->enabled_attribs_bitmask = enable_bitmask;
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,068
|
void vrend_end_query(struct vrend_context *ctx, uint32_t handle)
{
struct vrend_query *q;
q = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_QUERY);
if (!q)
return;
if (vrend_is_timer_query(q->gltype)) {
if (q->gltype == GL_TIMESTAMP)
glQueryCounter(q->id, q->gltype);
/* remove from active query list for this context */
else
glEndQuery(q->gltype);
return;
}
glEndQuery(q->gltype);
}
|
DoS
| 0
|
void vrend_end_query(struct vrend_context *ctx, uint32_t handle)
{
struct vrend_query *q;
q = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_QUERY);
if (!q)
return;
if (vrend_is_timer_query(q->gltype)) {
if (q->gltype == GL_TIMESTAMP)
glQueryCounter(q->id, q->gltype);
/* remove from active query list for this context */
else
glEndQuery(q->gltype);
return;
}
glEndQuery(q->gltype);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,069
|
void vrend_fb_bind_texture(struct vrend_resource *res,
int idx,
uint32_t level, uint32_t layer)
{
const struct util_format_description *desc = util_format_description(res->base.format);
GLenum attachment = GL_COLOR_ATTACHMENT0_EXT + idx;
if (vrend_format_is_ds(res->base.format)) { {
if (util_format_has_stencil(desc)) {
if (util_format_has_depth(desc))
attachment = GL_DEPTH_STENCIL_ATTACHMENT;
else
attachment = GL_STENCIL_ATTACHMENT;
} else
attachment = GL_DEPTH_ATTACHMENT;
}
}
switch (res->target) {
case GL_TEXTURE_1D_ARRAY:
case GL_TEXTURE_2D_ARRAY:
case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
case GL_TEXTURE_CUBE_MAP_ARRAY:
if (layer == 0xffffffff)
glFramebufferTexture(GL_FRAMEBUFFER_EXT, attachment,
res->id, level);
else
glFramebufferTextureLayer(GL_FRAMEBUFFER_EXT, attachment,
res->id, level, layer);
break;
case GL_TEXTURE_3D:
if (layer == 0xffffffff)
glFramebufferTexture(GL_FRAMEBUFFER_EXT, attachment,
res->id, level);
else
glFramebufferTexture3DEXT(GL_FRAMEBUFFER_EXT, attachment,
res->target, res->id, level, layer);
break;
case GL_TEXTURE_CUBE_MAP:
if (layer == 0xffffffff)
glFramebufferTexture(GL_FRAMEBUFFER_EXT, attachment,
res->id, level);
else
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, attachment,
GL_TEXTURE_CUBE_MAP_POSITIVE_X + layer, res->id, level);
break;
case GL_TEXTURE_1D:
glFramebufferTexture1DEXT(GL_FRAMEBUFFER_EXT, attachment,
res->target, res->id, level);
break;
case GL_TEXTURE_2D:
default:
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, attachment,
res->target, res->id, level);
break;
}
if (attachment == GL_DEPTH_ATTACHMENT) {
switch (res->target) {
case GL_TEXTURE_1D:
glFramebufferTexture1DEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT,
GL_TEXTURE_1D, 0, 0);
break;
case GL_TEXTURE_2D:
default:
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT,
GL_TEXTURE_2D, 0, 0);
break;
}
}
}
|
DoS
| 0
|
void vrend_fb_bind_texture(struct vrend_resource *res,
int idx,
uint32_t level, uint32_t layer)
{
const struct util_format_description *desc = util_format_description(res->base.format);
GLenum attachment = GL_COLOR_ATTACHMENT0_EXT + idx;
if (vrend_format_is_ds(res->base.format)) { {
if (util_format_has_stencil(desc)) {
if (util_format_has_depth(desc))
attachment = GL_DEPTH_STENCIL_ATTACHMENT;
else
attachment = GL_STENCIL_ATTACHMENT;
} else
attachment = GL_DEPTH_ATTACHMENT;
}
}
switch (res->target) {
case GL_TEXTURE_1D_ARRAY:
case GL_TEXTURE_2D_ARRAY:
case GL_TEXTURE_2D_MULTISAMPLE_ARRAY:
case GL_TEXTURE_CUBE_MAP_ARRAY:
if (layer == 0xffffffff)
glFramebufferTexture(GL_FRAMEBUFFER_EXT, attachment,
res->id, level);
else
glFramebufferTextureLayer(GL_FRAMEBUFFER_EXT, attachment,
res->id, level, layer);
break;
case GL_TEXTURE_3D:
if (layer == 0xffffffff)
glFramebufferTexture(GL_FRAMEBUFFER_EXT, attachment,
res->id, level);
else
glFramebufferTexture3DEXT(GL_FRAMEBUFFER_EXT, attachment,
res->target, res->id, level, layer);
break;
case GL_TEXTURE_CUBE_MAP:
if (layer == 0xffffffff)
glFramebufferTexture(GL_FRAMEBUFFER_EXT, attachment,
res->id, level);
else
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, attachment,
GL_TEXTURE_CUBE_MAP_POSITIVE_X + layer, res->id, level);
break;
case GL_TEXTURE_1D:
glFramebufferTexture1DEXT(GL_FRAMEBUFFER_EXT, attachment,
res->target, res->id, level);
break;
case GL_TEXTURE_2D:
default:
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, attachment,
res->target, res->id, level);
break;
}
if (attachment == GL_DEPTH_ATTACHMENT) {
switch (res->target) {
case GL_TEXTURE_1D:
glFramebufferTexture1DEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT,
GL_TEXTURE_1D, 0, 0);
break;
case GL_TEXTURE_2D:
default:
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_STENCIL_ATTACHMENT,
GL_TEXTURE_2D, 0, 0);
break;
}
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,070
|
static inline void vrend_fill_shader_key(struct vrend_context *ctx,
struct vrend_shader_key *key)
{
if (vrend_state.use_core_profile == true) {
int i;
bool add_alpha_test = true;
key->cbufs_are_a8_bitmask = 0;
for (i = 0; i < ctx->sub->nr_cbufs; i++) {
if (!ctx->sub->surf[i])
continue;
if (vrend_format_is_emulated_alpha(ctx->sub->surf[i]->format))
key->cbufs_are_a8_bitmask |= (1 << i);
if (util_format_is_pure_integer(ctx->sub->surf[i]->format))
add_alpha_test = false;
}
if (add_alpha_test) {
key->add_alpha_test = ctx->sub->dsa_state.alpha.enabled;
key->alpha_test = ctx->sub->dsa_state.alpha.func;
key->alpha_ref_val = ctx->sub->dsa_state.alpha.ref_value;
}
key->pstipple_tex = ctx->sub->rs_state.poly_stipple_enable;
key->color_two_side = ctx->sub->rs_state.light_twoside;
key->clip_plane_enable = ctx->sub->rs_state.clip_plane_enable;
key->flatshade = ctx->sub->rs_state.flatshade ? true : false;
} else {
key->add_alpha_test = 0;
key->pstipple_tex = 0;
}
key->invert_fs_origin = !ctx->sub->inverted_fbo_content;
key->coord_replace = ctx->sub->rs_state.point_quad_rasterization ? ctx->sub->rs_state.sprite_coord_enable : 0;
if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
key->gs_present = true;
}
|
DoS
| 0
|
static inline void vrend_fill_shader_key(struct vrend_context *ctx,
struct vrend_shader_key *key)
{
if (vrend_state.use_core_profile == true) {
int i;
bool add_alpha_test = true;
key->cbufs_are_a8_bitmask = 0;
for (i = 0; i < ctx->sub->nr_cbufs; i++) {
if (!ctx->sub->surf[i])
continue;
if (vrend_format_is_emulated_alpha(ctx->sub->surf[i]->format))
key->cbufs_are_a8_bitmask |= (1 << i);
if (util_format_is_pure_integer(ctx->sub->surf[i]->format))
add_alpha_test = false;
}
if (add_alpha_test) {
key->add_alpha_test = ctx->sub->dsa_state.alpha.enabled;
key->alpha_test = ctx->sub->dsa_state.alpha.func;
key->alpha_ref_val = ctx->sub->dsa_state.alpha.ref_value;
}
key->pstipple_tex = ctx->sub->rs_state.poly_stipple_enable;
key->color_two_side = ctx->sub->rs_state.light_twoside;
key->clip_plane_enable = ctx->sub->rs_state.clip_plane_enable;
key->flatshade = ctx->sub->rs_state.flatshade ? true : false;
} else {
key->add_alpha_test = 0;
key->pstipple_tex = 0;
}
key->invert_fs_origin = !ctx->sub->inverted_fbo_content;
key->coord_replace = ctx->sub->rs_state.point_quad_rasterization ? ctx->sub->rs_state.sprite_coord_enable : 0;
if (ctx->sub->shaders[PIPE_SHADER_GEOMETRY])
key->gs_present = true;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,071
|
static void vrend_finish_context_switch(struct vrend_context *ctx)
{
if (ctx->ctx_switch_pending == false)
return;
ctx->ctx_switch_pending = false;
if (vrend_state.current_hw_ctx == ctx)
return;
vrend_state.current_hw_ctx = ctx;
vrend_clicbs->make_current(0, ctx->sub->gl_context);
}
|
DoS
| 0
|
static void vrend_finish_context_switch(struct vrend_context *ctx)
{
if (ctx->ctx_switch_pending == false)
return;
ctx->ctx_switch_pending = false;
if (vrend_state.current_hw_ctx == ctx)
return;
vrend_state.current_hw_ctx = ctx;
vrend_clicbs->make_current(0, ctx->sub->gl_context);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,072
|
static int vrend_finish_shader(struct vrend_context *ctx,
struct vrend_shader_selector *sel,
const struct tgsi_token *tokens)
{
int r;
sel->tokens = tgsi_dup_tokens(tokens);
r = vrend_shader_select(ctx, sel, NULL);
if (r) {
return EINVAL;
}
return 0;
}
|
DoS
| 0
|
static int vrend_finish_shader(struct vrend_context *ctx,
struct vrend_shader_selector *sel,
const struct tgsi_token *tokens)
{
int r;
sel->tokens = tgsi_dup_tokens(tokens);
r = vrend_shader_select(ctx, sel, NULL);
if (r) {
return EINVAL;
}
return 0;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,073
|
static inline bool vrend_format_can_render(enum virgl_formats format)
{
return tex_conv_table[format].bindings & VREND_BIND_RENDER;
}
|
DoS
| 0
|
static inline bool vrend_format_can_render(enum virgl_formats format)
{
return tex_conv_table[format].bindings & VREND_BIND_RENDER;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,074
|
static inline bool vrend_format_can_sample(enum virgl_formats format)
{
return tex_conv_table[format].bindings & VREND_BIND_SAMPLER;
}
|
DoS
| 0
|
static inline bool vrend_format_can_sample(enum virgl_formats format)
{
return tex_conv_table[format].bindings & VREND_BIND_SAMPLER;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,075
|
static inline bool vrend_format_is_ds(enum virgl_formats format)
{
return tex_conv_table[format].bindings & VREND_BIND_DEPTHSTENCIL;
}
|
DoS
| 0
|
static inline bool vrend_format_is_ds(enum virgl_formats format)
{
return tex_conv_table[format].bindings & VREND_BIND_DEPTHSTENCIL;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,076
|
bool vrend_format_is_emulated_alpha(enum virgl_formats format)
{
if (!vrend_state.use_core_profile)
return false;
return (format == VIRGL_FORMAT_A8_UNORM ||
format == VIRGL_FORMAT_A16_UNORM);
}
|
DoS
| 0
|
bool vrend_format_is_emulated_alpha(enum virgl_formats format)
{
if (!vrend_state.use_core_profile)
return false;
return (format == VIRGL_FORMAT_A8_UNORM ||
format == VIRGL_FORMAT_A16_UNORM);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,077
|
static void vrend_free_programs(struct vrend_sub_context *sub)
{
struct vrend_linked_shader_program *ent, *tmp;
if (LIST_IS_EMPTY(&sub->programs))
return;
LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->programs, head) {
vrend_destroy_program(ent);
}
}
|
DoS
| 0
|
static void vrend_free_programs(struct vrend_sub_context *sub)
{
struct vrend_linked_shader_program *ent, *tmp;
if (LIST_IS_EMPTY(&sub->programs))
return;
LIST_FOR_EACH_ENTRY_SAFE(ent, tmp, &sub->programs, head) {
vrend_destroy_program(ent);
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,078
|
static void vrend_free_sync_thread(void)
{
if (!vrend_state.sync_thread)
return;
pipe_mutex_lock(vrend_state.fence_mutex);
vrend_state.stop_sync_thread = true;
pipe_mutex_unlock(vrend_state.fence_mutex);
pipe_condvar_signal(vrend_state.fence_cond);
pipe_thread_wait(vrend_state.sync_thread);
vrend_state.sync_thread = 0;
pipe_condvar_destroy(vrend_state.fence_cond);
pipe_mutex_destroy(vrend_state.fence_mutex);
}
|
DoS
| 0
|
static void vrend_free_sync_thread(void)
{
if (!vrend_state.sync_thread)
return;
pipe_mutex_lock(vrend_state.fence_mutex);
vrend_state.stop_sync_thread = true;
pipe_mutex_unlock(vrend_state.fence_mutex);
pipe_condvar_signal(vrend_state.fence_cond);
pipe_thread_wait(vrend_state.sync_thread);
vrend_state.sync_thread = 0;
pipe_condvar_destroy(vrend_state.fence_cond);
pipe_mutex_destroy(vrend_state.fence_mutex);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,079
|
static bool vrend_get_one_query_result(GLuint query_id, bool use_64, uint64_t *result)
{
GLint ready;
GLuint passed;
GLuint64 pass64;
glGetQueryObjectiv(query_id, GL_QUERY_RESULT_AVAILABLE_ARB, &ready);
if (!ready)
return false;
if (use_64) {
glGetQueryObjectui64v(query_id, GL_QUERY_RESULT_ARB, &pass64);
*result = pass64;
} else {
glGetQueryObjectuiv(query_id, GL_QUERY_RESULT_ARB, &passed);
*result = passed;
}
return true;
}
|
DoS
| 0
|
static bool vrend_get_one_query_result(GLuint query_id, bool use_64, uint64_t *result)
{
GLint ready;
GLuint passed;
GLuint64 pass64;
glGetQueryObjectiv(query_id, GL_QUERY_RESULT_AVAILABLE_ARB, &ready);
if (!ready)
return false;
if (use_64) {
glGetQueryObjectui64v(query_id, GL_QUERY_RESULT_ARB, &pass64);
*result = pass64;
} else {
glGetQueryObjectuiv(query_id, GL_QUERY_RESULT_ARB, &passed);
*result = passed;
}
return true;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,080
|
void vrend_get_query_result(struct vrend_context *ctx, uint32_t handle,
uint32_t wait)
{
struct vrend_query *q;
bool ret;
q = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_QUERY);
if (!q)
return;
ret = vrend_check_query(q);
if (ret == false)
list_addtail(&q->waiting_queries, &vrend_state.waiting_query_list);
}
|
DoS
| 0
|
void vrend_get_query_result(struct vrend_context *ctx, uint32_t handle,
uint32_t wait)
{
struct vrend_query *q;
bool ret;
q = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_QUERY);
if (!q)
return;
ret = vrend_check_query(q);
if (ret == false)
list_addtail(&q->waiting_queries, &vrend_state.waiting_query_list);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,081
|
static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_state *state)
{
if (state->logicop_enable != ctx->sub->hw_blend_state.logicop_enable) {
ctx->sub->hw_blend_state.logicop_enable = state->logicop_enable;
if (state->logicop_enable) {
glEnable(GL_COLOR_LOGIC_OP);
glLogicOp(translate_logicop(state->logicop_func));
} else
glDisable(GL_COLOR_LOGIC_OP);
}
if (state->independent_blend_enable) {
/* ARB_draw_buffers_blend is required for this */
int i;
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
if (state->rt[i].blend_enable) {
glBlendFuncSeparateiARB(i, translate_blend_factor(state->rt[i].rgb_src_factor),
translate_blend_factor(state->rt[i].rgb_dst_factor),
translate_blend_factor(state->rt[i].alpha_src_factor),
translate_blend_factor(state->rt[i].alpha_dst_factor));
glBlendEquationSeparateiARB(i, translate_blend_func(state->rt[i].rgb_func),
translate_blend_func(state->rt[i].alpha_func));
glEnableIndexedEXT(GL_BLEND, i);
} else
glDisableIndexedEXT(GL_BLEND, i);
if (state->rt[i].colormask != ctx->sub->hw_blend_state.rt[i].colormask) {
ctx->sub->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
glColorMaskIndexedEXT(i, state->rt[i].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
state->rt[i].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
state->rt[i].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
state->rt[i].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
}
}
} else {
if (state->rt[0].blend_enable) {
glBlendFuncSeparate(translate_blend_factor(state->rt[0].rgb_src_factor),
translate_blend_factor(state->rt[0].rgb_dst_factor),
translate_blend_factor(state->rt[0].alpha_src_factor),
translate_blend_factor(state->rt[0].alpha_dst_factor));
glBlendEquationSeparate(translate_blend_func(state->rt[0].rgb_func),
translate_blend_func(state->rt[0].alpha_func));
vrend_blend_enable(ctx, true);
}
else
vrend_blend_enable(ctx, false);
if (state->rt[0].colormask != ctx->sub->hw_blend_state.rt[0].colormask) {
int i;
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
ctx->sub->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
glColorMask(state->rt[0].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
}
}
if (vrend_state.have_multisample) {
if (state->alpha_to_coverage)
glEnable(GL_SAMPLE_ALPHA_TO_COVERAGE);
else
glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE);
if (state->alpha_to_one)
glEnable(GL_SAMPLE_ALPHA_TO_ONE);
else
glDisable(GL_SAMPLE_ALPHA_TO_ONE);
}
}
|
DoS
| 0
|
static void vrend_hw_emit_blend(struct vrend_context *ctx, struct pipe_blend_state *state)
{
if (state->logicop_enable != ctx->sub->hw_blend_state.logicop_enable) {
ctx->sub->hw_blend_state.logicop_enable = state->logicop_enable;
if (state->logicop_enable) {
glEnable(GL_COLOR_LOGIC_OP);
glLogicOp(translate_logicop(state->logicop_func));
} else
glDisable(GL_COLOR_LOGIC_OP);
}
if (state->independent_blend_enable) {
/* ARB_draw_buffers_blend is required for this */
int i;
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++) {
if (state->rt[i].blend_enable) {
glBlendFuncSeparateiARB(i, translate_blend_factor(state->rt[i].rgb_src_factor),
translate_blend_factor(state->rt[i].rgb_dst_factor),
translate_blend_factor(state->rt[i].alpha_src_factor),
translate_blend_factor(state->rt[i].alpha_dst_factor));
glBlendEquationSeparateiARB(i, translate_blend_func(state->rt[i].rgb_func),
translate_blend_func(state->rt[i].alpha_func));
glEnableIndexedEXT(GL_BLEND, i);
} else
glDisableIndexedEXT(GL_BLEND, i);
if (state->rt[i].colormask != ctx->sub->hw_blend_state.rt[i].colormask) {
ctx->sub->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
glColorMaskIndexedEXT(i, state->rt[i].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
state->rt[i].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
state->rt[i].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
state->rt[i].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
}
}
} else {
if (state->rt[0].blend_enable) {
glBlendFuncSeparate(translate_blend_factor(state->rt[0].rgb_src_factor),
translate_blend_factor(state->rt[0].rgb_dst_factor),
translate_blend_factor(state->rt[0].alpha_src_factor),
translate_blend_factor(state->rt[0].alpha_dst_factor));
glBlendEquationSeparate(translate_blend_func(state->rt[0].rgb_func),
translate_blend_func(state->rt[0].alpha_func));
vrend_blend_enable(ctx, true);
}
else
vrend_blend_enable(ctx, false);
if (state->rt[0].colormask != ctx->sub->hw_blend_state.rt[0].colormask) {
int i;
for (i = 0; i < PIPE_MAX_COLOR_BUFS; i++)
ctx->sub->hw_blend_state.rt[i].colormask = state->rt[i].colormask;
glColorMask(state->rt[0].colormask & PIPE_MASK_R ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_G ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_B ? GL_TRUE : GL_FALSE,
state->rt[0].colormask & PIPE_MASK_A ? GL_TRUE : GL_FALSE);
}
}
if (vrend_state.have_multisample) {
if (state->alpha_to_coverage)
glEnable(GL_SAMPLE_ALPHA_TO_COVERAGE);
else
glDisable(GL_SAMPLE_ALPHA_TO_COVERAGE);
if (state->alpha_to_one)
glEnable(GL_SAMPLE_ALPHA_TO_ONE);
else
glDisable(GL_SAMPLE_ALPHA_TO_ONE);
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,082
|
static void vrend_hw_emit_dsa(struct vrend_context *ctx)
{
struct pipe_depth_stencil_alpha_state *state = &ctx->sub->dsa_state;
if (state->depth.enabled) {
vrend_depth_test_enable(ctx, true);
glDepthFunc(GL_NEVER + state->depth.func);
if (state->depth.writemask)
glDepthMask(GL_TRUE);
else
glDepthMask(GL_FALSE);
} else
vrend_depth_test_enable(ctx, false);
if (state->alpha.enabled) {
vrend_alpha_test_enable(ctx, true);
if (!vrend_state.use_core_profile)
glAlphaFunc(GL_NEVER + state->alpha.func, state->alpha.ref_value);
} else
vrend_alpha_test_enable(ctx, false);
}
|
DoS
| 0
|
static void vrend_hw_emit_dsa(struct vrend_context *ctx)
{
struct pipe_depth_stencil_alpha_state *state = &ctx->sub->dsa_state;
if (state->depth.enabled) {
vrend_depth_test_enable(ctx, true);
glDepthFunc(GL_NEVER + state->depth.func);
if (state->depth.writemask)
glDepthMask(GL_TRUE);
else
glDepthMask(GL_FALSE);
} else
vrend_depth_test_enable(ctx, false);
if (state->alpha.enabled) {
vrend_alpha_test_enable(ctx, true);
if (!vrend_state.use_core_profile)
glAlphaFunc(GL_NEVER + state->alpha.func, state->alpha.ref_value);
} else
vrend_alpha_test_enable(ctx, false);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,083
|
static void vrend_hw_emit_framebuffer_state(struct vrend_context *ctx)
{
static const GLenum buffers[8] = {
GL_COLOR_ATTACHMENT0_EXT,
GL_COLOR_ATTACHMENT1_EXT,
GL_COLOR_ATTACHMENT2_EXT,
GL_COLOR_ATTACHMENT3_EXT,
GL_COLOR_ATTACHMENT4_EXT,
GL_COLOR_ATTACHMENT5_EXT,
GL_COLOR_ATTACHMENT6_EXT,
GL_COLOR_ATTACHMENT7_EXT,
};
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, ctx->sub->fb_id);
if (ctx->sub->nr_cbufs == 0) {
glReadBuffer(GL_NONE);
glDisable(GL_FRAMEBUFFER_SRGB_EXT);
} else {
struct vrend_surface *surf = NULL;
int i;
for (i = 0; i < ctx->sub->nr_cbufs; i++) {
if (ctx->sub->surf[i]) {
surf = ctx->sub->surf[i];
}
}
if (util_format_is_srgb(surf->format)) {
glEnable(GL_FRAMEBUFFER_SRGB_EXT);
} else {
glDisable(GL_FRAMEBUFFER_SRGB_EXT);
}
}
glDrawBuffers(ctx->sub->nr_cbufs, buffers);
}
|
DoS
| 0
|
static void vrend_hw_emit_framebuffer_state(struct vrend_context *ctx)
{
static const GLenum buffers[8] = {
GL_COLOR_ATTACHMENT0_EXT,
GL_COLOR_ATTACHMENT1_EXT,
GL_COLOR_ATTACHMENT2_EXT,
GL_COLOR_ATTACHMENT3_EXT,
GL_COLOR_ATTACHMENT4_EXT,
GL_COLOR_ATTACHMENT5_EXT,
GL_COLOR_ATTACHMENT6_EXT,
GL_COLOR_ATTACHMENT7_EXT,
};
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, ctx->sub->fb_id);
if (ctx->sub->nr_cbufs == 0) {
glReadBuffer(GL_NONE);
glDisable(GL_FRAMEBUFFER_SRGB_EXT);
} else {
struct vrend_surface *surf = NULL;
int i;
for (i = 0; i < ctx->sub->nr_cbufs; i++) {
if (ctx->sub->surf[i]) {
surf = ctx->sub->surf[i];
}
}
if (util_format_is_srgb(surf->format)) {
glEnable(GL_FRAMEBUFFER_SRGB_EXT);
} else {
glDisable(GL_FRAMEBUFFER_SRGB_EXT);
}
}
glDrawBuffers(ctx->sub->nr_cbufs, buffers);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,084
|
static void vrend_hw_emit_rs(struct vrend_context *ctx)
{
struct pipe_rasterizer_state *state = &ctx->sub->rs_state;
int i;
if (state->depth_clip) {
glDisable(GL_DEPTH_CLAMP);
} else {
glEnable(GL_DEPTH_CLAMP);
}
if (state->point_size_per_vertex) {
glEnable(GL_PROGRAM_POINT_SIZE);
} else {
glDisable(GL_PROGRAM_POINT_SIZE);
if (state->point_size)
glPointSize(state->point_size);
}
if (state->rasterizer_discard != ctx->sub->hw_rs_state.rasterizer_discard) {
ctx->sub->hw_rs_state.rasterizer_discard = state->rasterizer_discard;
if (state->rasterizer_discard)
glEnable(GL_RASTERIZER_DISCARD);
else
glDisable(GL_RASTERIZER_DISCARD);
}
if (vrend_state.use_core_profile == false) {
glPolygonMode(GL_FRONT, translate_fill(state->fill_front));
glPolygonMode(GL_BACK, translate_fill(state->fill_back));
} else if (state->fill_front == state->fill_back) {
glPolygonMode(GL_FRONT_AND_BACK, translate_fill(state->fill_front));
} else
report_core_warn(ctx, CORE_PROFILE_WARN_POLYGON_MODE, 0);
if (state->offset_tri)
glEnable(GL_POLYGON_OFFSET_FILL);
else
glDisable(GL_POLYGON_OFFSET_FILL);
if (state->offset_line)
glEnable(GL_POLYGON_OFFSET_LINE);
else
glDisable(GL_POLYGON_OFFSET_LINE);
if (state->offset_point)
glEnable(GL_POLYGON_OFFSET_POINT);
else
glDisable(GL_POLYGON_OFFSET_POINT);
if (state->flatshade != ctx->sub->hw_rs_state.flatshade) {
ctx->sub->hw_rs_state.flatshade = state->flatshade;
if (vrend_state.use_core_profile == false) {
if (state->flatshade) {
glShadeModel(GL_FLAT);
} else {
glShadeModel(GL_SMOOTH);
}
}
}
if (state->flatshade_first != ctx->sub->hw_rs_state.flatshade_first) {
ctx->sub->hw_rs_state.flatshade_first = state->flatshade_first;
if (state->flatshade_first)
glProvokingVertexEXT(GL_FIRST_VERTEX_CONVENTION_EXT);
else
glProvokingVertexEXT(GL_LAST_VERTEX_CONVENTION_EXT);
}
glPolygonOffset(state->offset_scale, state->offset_units);
if (vrend_state.use_core_profile == false) {
if (state->poly_stipple_enable)
glEnable(GL_POLYGON_STIPPLE);
else
glDisable(GL_POLYGON_STIPPLE);
} else if (state->poly_stipple_enable) {
if (!ctx->pstip_inited)
vrend_init_pstipple_texture(ctx);
}
if (state->point_quad_rasterization) {
if (vrend_state.use_core_profile == false)
glEnable(GL_POINT_SPRITE);
glPointParameteri(GL_POINT_SPRITE_COORD_ORIGIN, state->sprite_coord_mode ? GL_UPPER_LEFT : GL_LOWER_LEFT);
} else {
if (vrend_state.use_core_profile == false)
glDisable(GL_POINT_SPRITE);
}
if (state->cull_face != PIPE_FACE_NONE) {
switch (state->cull_face) {
case PIPE_FACE_FRONT:
glCullFace(GL_FRONT);
break;
case PIPE_FACE_BACK:
glCullFace(GL_BACK);
break;
case PIPE_FACE_FRONT_AND_BACK:
glCullFace(GL_FRONT_AND_BACK);
break;
default:
fprintf(stderr, "unhandled cull-face: %x\n", state->cull_face);
}
glEnable(GL_CULL_FACE);
} else
glDisable(GL_CULL_FACE);
/* two sided lighting handled in shader for core profile */
if (vrend_state.use_core_profile == false) {
if (state->light_twoside)
glEnable(GL_VERTEX_PROGRAM_TWO_SIDE);
else
glDisable(GL_VERTEX_PROGRAM_TWO_SIDE);
}
if (state->clip_plane_enable != ctx->sub->hw_rs_state.clip_plane_enable) {
ctx->sub->hw_rs_state.clip_plane_enable = state->clip_plane_enable;
for (i = 0; i < 8; i++) {
if (state->clip_plane_enable & (1 << i))
glEnable(GL_CLIP_PLANE0 + i);
else
glDisable(GL_CLIP_PLANE0 + i);
}
}
if (vrend_state.use_core_profile == false) {
glLineStipple(state->line_stipple_factor, state->line_stipple_pattern);
if (state->line_stipple_enable)
glEnable(GL_LINE_STIPPLE);
else
glDisable(GL_LINE_STIPPLE);
} else if (state->line_stipple_enable)
report_core_warn(ctx, CORE_PROFILE_WARN_STIPPLE, 0);
if (state->line_smooth)
glEnable(GL_LINE_SMOOTH);
else
glDisable(GL_LINE_SMOOTH);
if (state->poly_smooth)
glEnable(GL_POLYGON_SMOOTH);
else
glDisable(GL_POLYGON_SMOOTH);
if (vrend_state.use_core_profile == false) {
if (state->clamp_vertex_color)
glClampColor(GL_CLAMP_VERTEX_COLOR_ARB, GL_TRUE);
else
glClampColor(GL_CLAMP_VERTEX_COLOR_ARB, GL_FALSE);
if (state->clamp_fragment_color)
glClampColor(GL_CLAMP_FRAGMENT_COLOR_ARB, GL_TRUE);
else
glClampColor(GL_CLAMP_FRAGMENT_COLOR_ARB, GL_FALSE);
} else {
if (state->clamp_vertex_color || state->clamp_fragment_color)
report_core_warn(ctx, CORE_PROFILE_WARN_CLAMP, 0);
}
if (vrend_state.have_multisample) {
if (state->multisample) {
glEnable(GL_MULTISAMPLE);
glEnable(GL_SAMPLE_MASK);
} else {
glDisable(GL_MULTISAMPLE);
glDisable(GL_SAMPLE_MASK);
}
}
}
|
DoS
| 0
|
static void vrend_hw_emit_rs(struct vrend_context *ctx)
{
struct pipe_rasterizer_state *state = &ctx->sub->rs_state;
int i;
if (state->depth_clip) {
glDisable(GL_DEPTH_CLAMP);
} else {
glEnable(GL_DEPTH_CLAMP);
}
if (state->point_size_per_vertex) {
glEnable(GL_PROGRAM_POINT_SIZE);
} else {
glDisable(GL_PROGRAM_POINT_SIZE);
if (state->point_size)
glPointSize(state->point_size);
}
if (state->rasterizer_discard != ctx->sub->hw_rs_state.rasterizer_discard) {
ctx->sub->hw_rs_state.rasterizer_discard = state->rasterizer_discard;
if (state->rasterizer_discard)
glEnable(GL_RASTERIZER_DISCARD);
else
glDisable(GL_RASTERIZER_DISCARD);
}
if (vrend_state.use_core_profile == false) {
glPolygonMode(GL_FRONT, translate_fill(state->fill_front));
glPolygonMode(GL_BACK, translate_fill(state->fill_back));
} else if (state->fill_front == state->fill_back) {
glPolygonMode(GL_FRONT_AND_BACK, translate_fill(state->fill_front));
} else
report_core_warn(ctx, CORE_PROFILE_WARN_POLYGON_MODE, 0);
if (state->offset_tri)
glEnable(GL_POLYGON_OFFSET_FILL);
else
glDisable(GL_POLYGON_OFFSET_FILL);
if (state->offset_line)
glEnable(GL_POLYGON_OFFSET_LINE);
else
glDisable(GL_POLYGON_OFFSET_LINE);
if (state->offset_point)
glEnable(GL_POLYGON_OFFSET_POINT);
else
glDisable(GL_POLYGON_OFFSET_POINT);
if (state->flatshade != ctx->sub->hw_rs_state.flatshade) {
ctx->sub->hw_rs_state.flatshade = state->flatshade;
if (vrend_state.use_core_profile == false) {
if (state->flatshade) {
glShadeModel(GL_FLAT);
} else {
glShadeModel(GL_SMOOTH);
}
}
}
if (state->flatshade_first != ctx->sub->hw_rs_state.flatshade_first) {
ctx->sub->hw_rs_state.flatshade_first = state->flatshade_first;
if (state->flatshade_first)
glProvokingVertexEXT(GL_FIRST_VERTEX_CONVENTION_EXT);
else
glProvokingVertexEXT(GL_LAST_VERTEX_CONVENTION_EXT);
}
glPolygonOffset(state->offset_scale, state->offset_units);
if (vrend_state.use_core_profile == false) {
if (state->poly_stipple_enable)
glEnable(GL_POLYGON_STIPPLE);
else
glDisable(GL_POLYGON_STIPPLE);
} else if (state->poly_stipple_enable) {
if (!ctx->pstip_inited)
vrend_init_pstipple_texture(ctx);
}
if (state->point_quad_rasterization) {
if (vrend_state.use_core_profile == false)
glEnable(GL_POINT_SPRITE);
glPointParameteri(GL_POINT_SPRITE_COORD_ORIGIN, state->sprite_coord_mode ? GL_UPPER_LEFT : GL_LOWER_LEFT);
} else {
if (vrend_state.use_core_profile == false)
glDisable(GL_POINT_SPRITE);
}
if (state->cull_face != PIPE_FACE_NONE) {
switch (state->cull_face) {
case PIPE_FACE_FRONT:
glCullFace(GL_FRONT);
break;
case PIPE_FACE_BACK:
glCullFace(GL_BACK);
break;
case PIPE_FACE_FRONT_AND_BACK:
glCullFace(GL_FRONT_AND_BACK);
break;
default:
fprintf(stderr, "unhandled cull-face: %x\n", state->cull_face);
}
glEnable(GL_CULL_FACE);
} else
glDisable(GL_CULL_FACE);
/* two sided lighting handled in shader for core profile */
if (vrend_state.use_core_profile == false) {
if (state->light_twoside)
glEnable(GL_VERTEX_PROGRAM_TWO_SIDE);
else
glDisable(GL_VERTEX_PROGRAM_TWO_SIDE);
}
if (state->clip_plane_enable != ctx->sub->hw_rs_state.clip_plane_enable) {
ctx->sub->hw_rs_state.clip_plane_enable = state->clip_plane_enable;
for (i = 0; i < 8; i++) {
if (state->clip_plane_enable & (1 << i))
glEnable(GL_CLIP_PLANE0 + i);
else
glDisable(GL_CLIP_PLANE0 + i);
}
}
if (vrend_state.use_core_profile == false) {
glLineStipple(state->line_stipple_factor, state->line_stipple_pattern);
if (state->line_stipple_enable)
glEnable(GL_LINE_STIPPLE);
else
glDisable(GL_LINE_STIPPLE);
} else if (state->line_stipple_enable)
report_core_warn(ctx, CORE_PROFILE_WARN_STIPPLE, 0);
if (state->line_smooth)
glEnable(GL_LINE_SMOOTH);
else
glDisable(GL_LINE_SMOOTH);
if (state->poly_smooth)
glEnable(GL_POLYGON_SMOOTH);
else
glDisable(GL_POLYGON_SMOOTH);
if (vrend_state.use_core_profile == false) {
if (state->clamp_vertex_color)
glClampColor(GL_CLAMP_VERTEX_COLOR_ARB, GL_TRUE);
else
glClampColor(GL_CLAMP_VERTEX_COLOR_ARB, GL_FALSE);
if (state->clamp_fragment_color)
glClampColor(GL_CLAMP_FRAGMENT_COLOR_ARB, GL_TRUE);
else
glClampColor(GL_CLAMP_FRAGMENT_COLOR_ARB, GL_FALSE);
} else {
if (state->clamp_vertex_color || state->clamp_fragment_color)
report_core_warn(ctx, CORE_PROFILE_WARN_CLAMP, 0);
}
if (vrend_state.have_multisample) {
if (state->multisample) {
glEnable(GL_MULTISAMPLE);
glEnable(GL_SAMPLE_MASK);
} else {
glDisable(GL_MULTISAMPLE);
glDisable(GL_SAMPLE_MASK);
}
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,085
|
bool vrend_hw_switch_context(struct vrend_context *ctx, bool now)
{
if (ctx == vrend_state.current_ctx && ctx->ctx_switch_pending == false)
return true;
if (ctx->ctx_id != 0 && ctx->in_error) {
return false;
}
ctx->ctx_switch_pending = true;
if (now == true) {
vrend_finish_context_switch(ctx);
}
vrend_state.current_ctx = ctx;
return true;
}
|
DoS
| 0
|
bool vrend_hw_switch_context(struct vrend_context *ctx, bool now)
{
if (ctx == vrend_state.current_ctx && ctx->ctx_switch_pending == false)
return true;
if (ctx->ctx_id != 0 && ctx->in_error) {
return false;
}
ctx->ctx_switch_pending = true;
if (now == true) {
vrend_finish_context_switch(ctx);
}
vrend_state.current_ctx = ctx;
return true;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,086
|
static void vrend_init_pstipple_texture(struct vrend_context *ctx)
{
glGenTextures(1, &ctx->pstipple_tex_id);
glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, 32, 32, 0, GL_RED, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
ctx->pstip_inited = true;
}
|
DoS
| 0
|
static void vrend_init_pstipple_texture(struct vrend_context *ctx)
{
glGenTextures(1, &ctx->pstipple_tex_id);
glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id);
glTexImage2D(GL_TEXTURE_2D, 0, GL_R8, 32, 32, 0, GL_RED, GL_UNSIGNED_BYTE, NULL);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_REPEAT);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
glTexParameteri(GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
ctx->pstip_inited = true;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,087
|
vrend_insert_format(struct vrend_format_table *entry, uint32_t bindings)
{
tex_conv_table[entry->format] = *entry;
tex_conv_table[entry->format].bindings = bindings;
}
|
DoS
| 0
|
vrend_insert_format(struct vrend_format_table *entry, uint32_t bindings)
{
tex_conv_table[entry->format] = *entry;
tex_conv_table[entry->format].bindings = bindings;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,088
|
bool vrend_is_ds_format(enum virgl_formats format)
{
return vrend_format_is_ds(format);
}
|
DoS
| 0
|
bool vrend_is_ds_format(enum virgl_formats format)
{
return vrend_format_is_ds(format);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,089
|
static bool vrend_is_timer_query(GLenum gltype)
{
return gltype == GL_TIMESTAMP ||
gltype == GL_TIME_ELAPSED;
}
|
DoS
| 0
|
static bool vrend_is_timer_query(GLenum gltype)
{
return gltype == GL_TIMESTAMP ||
gltype == GL_TIME_ELAPSED;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,090
|
void vrend_object_bind_blend(struct vrend_context *ctx,
uint32_t handle)
{
struct pipe_blend_state *state;
if (handle == 0) {
memset(&ctx->sub->blend_state, 0, sizeof(ctx->sub->blend_state));
vrend_blend_enable(ctx, false);
return;
}
state = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_BLEND);
if (!state) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle);
return;
}
ctx->sub->blend_state = *state;
vrend_hw_emit_blend(ctx, &ctx->sub->blend_state);
}
|
DoS
| 0
|
void vrend_object_bind_blend(struct vrend_context *ctx,
uint32_t handle)
{
struct pipe_blend_state *state;
if (handle == 0) {
memset(&ctx->sub->blend_state, 0, sizeof(ctx->sub->blend_state));
vrend_blend_enable(ctx, false);
return;
}
state = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_BLEND);
if (!state) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle);
return;
}
ctx->sub->blend_state = *state;
vrend_hw_emit_blend(ctx, &ctx->sub->blend_state);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,091
|
void vrend_object_bind_dsa(struct vrend_context *ctx,
uint32_t handle)
{
struct pipe_depth_stencil_alpha_state *state;
if (handle == 0) {
memset(&ctx->sub->dsa_state, 0, sizeof(ctx->sub->dsa_state));
ctx->sub->dsa = NULL;
ctx->sub->stencil_state_dirty = true;
ctx->sub->shader_dirty = true;
vrend_hw_emit_dsa(ctx);
return;
}
state = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_DSA);
if (!state) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle);
return;
}
if (ctx->sub->dsa != state) {
ctx->sub->stencil_state_dirty = true;
ctx->sub->shader_dirty = true;
}
ctx->sub->dsa_state = *state;
ctx->sub->dsa = state;
vrend_hw_emit_dsa(ctx);
}
|
DoS
| 0
|
void vrend_object_bind_dsa(struct vrend_context *ctx,
uint32_t handle)
{
struct pipe_depth_stencil_alpha_state *state;
if (handle == 0) {
memset(&ctx->sub->dsa_state, 0, sizeof(ctx->sub->dsa_state));
ctx->sub->dsa = NULL;
ctx->sub->stencil_state_dirty = true;
ctx->sub->shader_dirty = true;
vrend_hw_emit_dsa(ctx);
return;
}
state = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_DSA);
if (!state) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle);
return;
}
if (ctx->sub->dsa != state) {
ctx->sub->stencil_state_dirty = true;
ctx->sub->shader_dirty = true;
}
ctx->sub->dsa_state = *state;
ctx->sub->dsa = state;
vrend_hw_emit_dsa(ctx);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,092
|
void vrend_object_bind_rasterizer(struct vrend_context *ctx,
uint32_t handle)
{
struct pipe_rasterizer_state *state;
if (handle == 0) {
memset(&ctx->sub->rs_state, 0, sizeof(ctx->sub->rs_state));
return;
}
state = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_RASTERIZER);
if (!state) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle);
return;
}
ctx->sub->rs_state = *state;
ctx->sub->scissor_state_dirty = (1 << 0);
ctx->sub->shader_dirty = true;
vrend_hw_emit_rs(ctx);
}
|
DoS
| 0
|
void vrend_object_bind_rasterizer(struct vrend_context *ctx,
uint32_t handle)
{
struct pipe_rasterizer_state *state;
if (handle == 0) {
memset(&ctx->sub->rs_state, 0, sizeof(ctx->sub->rs_state));
return;
}
state = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_RASTERIZER);
if (!state) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handle);
return;
}
ctx->sub->rs_state = *state;
ctx->sub->scissor_state_dirty = (1 << 0);
ctx->sub->shader_dirty = true;
vrend_hw_emit_rs(ctx);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,093
|
static void vrend_patch_blend_state(struct vrend_context *ctx)
{
struct pipe_blend_state new_state = ctx->sub->blend_state;
struct pipe_blend_state *state = &ctx->sub->blend_state;
bool dest_alpha_only = false, dest_has_no_alpha = false;
struct pipe_blend_color blend_color = ctx->sub->blend_color;
int i;
if (ctx->sub->nr_cbufs == 0)
return;
for (i = 0; i < ctx->sub->nr_cbufs; i++) {
if (!ctx->sub->surf[i])
continue;
if (vrend_format_is_emulated_alpha(ctx->sub->surf[i]->format)) {
dest_alpha_only = true;
}
if (!util_format_has_alpha(ctx->sub->surf[i]->format)) {
dest_has_no_alpha = true;
}
}
if (dest_alpha_only) {
for (i = 0; i < (state->independent_blend_enable ? PIPE_MAX_COLOR_BUFS : 1); i++) {
if (state->rt[i].blend_enable) {
new_state.rt[i].rgb_src_factor = conv_a8_blend(state->rt[i].alpha_src_factor);
new_state.rt[i].rgb_dst_factor = conv_a8_blend(state->rt[i].alpha_dst_factor);
new_state.rt[i].alpha_src_factor = PIPE_BLENDFACTOR_ZERO;
new_state.rt[i].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
}
new_state.rt[i].colormask = 0;
if (state->rt[i].colormask & PIPE_MASK_A)
new_state.rt[i].colormask |= PIPE_MASK_R;
}
blend_color.color[0] = blend_color.color[3];
blend_color.color[1] = 0.0f;
blend_color.color[2] = 0.0f;
blend_color.color[3] = 0.0f;
} else if (dest_has_no_alpha) {
for (i = 0; i < (state->independent_blend_enable ? PIPE_MAX_COLOR_BUFS : 1); i++) {
if (!(is_dst_blend(state->rt[i].rgb_src_factor) ||
is_dst_blend(state->rt[i].rgb_dst_factor) ||
is_dst_blend(state->rt[i].alpha_src_factor) ||
is_dst_blend(state->rt[i].alpha_dst_factor)))
continue;
new_state.rt[i].rgb_src_factor = conv_dst_blend(state->rt[i].rgb_src_factor);
new_state.rt[i].rgb_dst_factor = conv_dst_blend(state->rt[i].rgb_dst_factor);
new_state.rt[i].alpha_src_factor = conv_dst_blend(state->rt[i].alpha_src_factor);
new_state.rt[i].alpha_dst_factor = conv_dst_blend(state->rt[i].alpha_dst_factor);
}
}
vrend_hw_emit_blend(ctx, &new_state);
glBlendColor(blend_color.color[0],
blend_color.color[1],
blend_color.color[2],
blend_color.color[3]);
}
|
DoS
| 0
|
static void vrend_patch_blend_state(struct vrend_context *ctx)
{
struct pipe_blend_state new_state = ctx->sub->blend_state;
struct pipe_blend_state *state = &ctx->sub->blend_state;
bool dest_alpha_only = false, dest_has_no_alpha = false;
struct pipe_blend_color blend_color = ctx->sub->blend_color;
int i;
if (ctx->sub->nr_cbufs == 0)
return;
for (i = 0; i < ctx->sub->nr_cbufs; i++) {
if (!ctx->sub->surf[i])
continue;
if (vrend_format_is_emulated_alpha(ctx->sub->surf[i]->format)) {
dest_alpha_only = true;
}
if (!util_format_has_alpha(ctx->sub->surf[i]->format)) {
dest_has_no_alpha = true;
}
}
if (dest_alpha_only) {
for (i = 0; i < (state->independent_blend_enable ? PIPE_MAX_COLOR_BUFS : 1); i++) {
if (state->rt[i].blend_enable) {
new_state.rt[i].rgb_src_factor = conv_a8_blend(state->rt[i].alpha_src_factor);
new_state.rt[i].rgb_dst_factor = conv_a8_blend(state->rt[i].alpha_dst_factor);
new_state.rt[i].alpha_src_factor = PIPE_BLENDFACTOR_ZERO;
new_state.rt[i].alpha_dst_factor = PIPE_BLENDFACTOR_ZERO;
}
new_state.rt[i].colormask = 0;
if (state->rt[i].colormask & PIPE_MASK_A)
new_state.rt[i].colormask |= PIPE_MASK_R;
}
blend_color.color[0] = blend_color.color[3];
blend_color.color[1] = 0.0f;
blend_color.color[2] = 0.0f;
blend_color.color[3] = 0.0f;
} else if (dest_has_no_alpha) {
for (i = 0; i < (state->independent_blend_enable ? PIPE_MAX_COLOR_BUFS : 1); i++) {
if (!(is_dst_blend(state->rt[i].rgb_src_factor) ||
is_dst_blend(state->rt[i].rgb_dst_factor) ||
is_dst_blend(state->rt[i].alpha_src_factor) ||
is_dst_blend(state->rt[i].alpha_dst_factor)))
continue;
new_state.rt[i].rgb_src_factor = conv_dst_blend(state->rt[i].rgb_src_factor);
new_state.rt[i].rgb_dst_factor = conv_dst_blend(state->rt[i].rgb_dst_factor);
new_state.rt[i].alpha_src_factor = conv_dst_blend(state->rt[i].alpha_src_factor);
new_state.rt[i].alpha_dst_factor = conv_dst_blend(state->rt[i].alpha_dst_factor);
}
}
vrend_hw_emit_blend(ctx, &new_state);
glBlendColor(blend_color.color[0],
blend_color.color[1],
blend_color.color[2],
blend_color.color[3]);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,094
|
static void vrend_pause_render_condition(struct vrend_context *ctx, bool pause)
{
if (pause) {
if (ctx->sub->cond_render_q_id)
glEndConditionalRenderNV();
} else {
if (ctx->sub->cond_render_q_id)
glBeginConditionalRender(ctx->sub->cond_render_q_id,
ctx->sub->cond_render_gl_mode);
}
}
|
DoS
| 0
|
static void vrend_pause_render_condition(struct vrend_context *ctx, bool pause)
{
if (pause) {
if (ctx->sub->cond_render_q_id)
glEndConditionalRenderNV();
} else {
if (ctx->sub->cond_render_q_id)
glBeginConditionalRender(ctx->sub->cond_render_q_id,
ctx->sub->cond_render_gl_mode);
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,095
|
void vrend_renderer_attach_res_ctx(int ctx_id, int resource_id)
{
struct vrend_context *ctx = vrend_lookup_renderer_ctx(ctx_id);
struct vrend_resource *res;
if (!ctx)
return;
res = vrend_resource_lookup(resource_id, 0);
if (!res)
return;
vrend_object_insert_nofree(ctx->res_hash, res, sizeof(*res), resource_id, 1, false);
}
|
DoS
| 0
|
void vrend_renderer_attach_res_ctx(int ctx_id, int resource_id)
{
struct vrend_context *ctx = vrend_lookup_renderer_ctx(ctx_id);
struct vrend_resource *res;
if (!ctx)
return;
res = vrend_resource_lookup(resource_id, 0);
if (!res)
return;
vrend_object_insert_nofree(ctx->res_hash, res, sizeof(*res), resource_id, 1, false);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,096
|
void vrend_renderer_blit(struct vrend_context *ctx,
uint32_t dst_handle, uint32_t src_handle,
const struct pipe_blit_info *info)
{
struct vrend_resource *src_res, *dst_res;
src_res = vrend_renderer_ctx_res_lookup(ctx, src_handle);
dst_res = vrend_renderer_ctx_res_lookup(ctx, dst_handle);
if (!src_res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, src_handle);
return;
}
if (!dst_res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle);
return;
}
if (ctx->in_error)
return;
if (info->render_condition_enable == false)
vrend_pause_render_condition(ctx, true);
vrend_renderer_blit_int(ctx, src_res, dst_res, info);
if (info->render_condition_enable == false)
vrend_pause_render_condition(ctx, false);
}
|
DoS
| 0
|
void vrend_renderer_blit(struct vrend_context *ctx,
uint32_t dst_handle, uint32_t src_handle,
const struct pipe_blit_info *info)
{
struct vrend_resource *src_res, *dst_res;
src_res = vrend_renderer_ctx_res_lookup(ctx, src_handle);
dst_res = vrend_renderer_ctx_res_lookup(ctx, dst_handle);
if (!src_res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, src_handle);
return;
}
if (!dst_res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle);
return;
}
if (ctx->in_error)
return;
if (info->render_condition_enable == false)
vrend_pause_render_condition(ctx, true);
vrend_renderer_blit_int(ctx, src_res, dst_res, info);
if (info->render_condition_enable == false)
vrend_pause_render_condition(ctx, false);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,097
|
static void vrend_renderer_blit_int(struct vrend_context *ctx,
struct vrend_resource *src_res,
struct vrend_resource *dst_res,
const struct pipe_blit_info *info)
{
GLbitfield glmask = 0;
int src_y1, src_y2, dst_y1, dst_y2;
GLenum filter;
int n_layers = 1, i;
bool use_gl = false;
filter = convert_mag_filter(info->filter);
/* if we can't make FBO's use the fallback path */
if (!vrend_format_can_render(src_res->base.format) &&
!vrend_format_is_ds(src_res->base.format))
use_gl = true;
if (!vrend_format_can_render(dst_res->base.format) &&
!vrend_format_is_ds(dst_res->base.format))
use_gl = true;
/* different depth formats */
if (vrend_format_is_ds(src_res->base.format) &&
vrend_format_is_ds(dst_res->base.format)) {
if (src_res->base.format != dst_res->base.format) {
if (!(src_res->base.format == PIPE_FORMAT_S8_UINT_Z24_UNORM &&
(dst_res->base.format == PIPE_FORMAT_Z24X8_UNORM))) {
use_gl = true;
}
}
}
/* glBlitFramebuffer - can support depth stencil with NEAREST
which we use for mipmaps */
if ((info->mask & (PIPE_MASK_Z | PIPE_MASK_S)) && info->filter == PIPE_TEX_FILTER_LINEAR)
use_gl = true;
/* for scaled MS blits we either need extensions or hand roll */
if (src_res->base.nr_samples > 1 &&
src_res->base.nr_samples != dst_res->base.nr_samples &&
(info->src.box.width != info->dst.box.width ||
info->src.box.height != info->dst.box.height)) {
if (vrend_state.have_ms_scaled_blit)
filter = GL_SCALED_RESOLVE_NICEST_EXT;
else
use_gl = true;
}
/* for 3D mipmapped blits - hand roll time */
if (info->src.box.depth != info->dst.box.depth)
use_gl = true;
if (vrend_format_is_emulated_alpha(info->dst.format) ||
vrend_format_is_emulated_alpha(info->src.format))
use_gl = true;
if (use_gl) {
vrend_renderer_blit_gl(ctx, src_res, dst_res, info);
vrend_clicbs->make_current(0, ctx->sub->gl_context);
return;
}
if (info->mask & PIPE_MASK_Z)
glmask |= GL_DEPTH_BUFFER_BIT;
if (info->mask & PIPE_MASK_S)
glmask |= GL_STENCIL_BUFFER_BIT;
if (info->mask & PIPE_MASK_RGBA)
glmask |= GL_COLOR_BUFFER_BIT;
if (!dst_res->y_0_top) {
dst_y1 = info->dst.box.y + info->dst.box.height;
dst_y2 = info->dst.box.y;
} else {
dst_y1 = dst_res->base.height0 - info->dst.box.y - info->dst.box.height;
dst_y2 = dst_res->base.height0 - info->dst.box.y;
}
if (!src_res->y_0_top) {
src_y1 = info->src.box.y + info->src.box.height;
src_y2 = info->src.box.y;
} else {
src_y1 = src_res->base.height0 - info->src.box.y - info->src.box.height;
src_y2 = src_res->base.height0 - info->src.box.y;
}
if (info->scissor_enable) {
glScissor(info->scissor.minx, info->scissor.miny, info->scissor.maxx - info->scissor.minx, info->scissor.maxy - info->scissor.miny);
ctx->sub->scissor_state_dirty = (1 << 0);
glEnable(GL_SCISSOR_TEST);
} else
glDisable(GL_SCISSOR_TEST);
glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[0]);
if (info->mask & PIPE_MASK_RGBA)
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_STENCIL_ATTACHMENT,
GL_TEXTURE_2D, 0, 0);
else
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, 0, 0);
glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[1]);
if (info->mask & PIPE_MASK_RGBA)
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_STENCIL_ATTACHMENT,
GL_TEXTURE_2D, 0, 0);
else if (info->mask & (PIPE_MASK_Z | PIPE_MASK_S))
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, 0, 0);
if (info->src.box.depth == info->dst.box.depth)
n_layers = info->dst.box.depth;
for (i = 0; i < n_layers; i++) {
glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[0]);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_STENCIL_ATTACHMENT,
GL_TEXTURE_2D, 0, 0);
vrend_fb_bind_texture(src_res, 0, info->src.level, info->src.box.z + i);
glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[1]);
vrend_fb_bind_texture(dst_res, 0, info->dst.level, info->dst.box.z + i);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]);
glBindFramebuffer(GL_READ_FRAMEBUFFER, ctx->sub->blit_fb_ids[0]);
glBlitFramebuffer(info->src.box.x,
src_y1,
info->src.box.x + info->src.box.width,
src_y2,
info->dst.box.x,
dst_y1,
info->dst.box.x + info->dst.box.width,
dst_y2,
glmask, filter);
}
}
|
DoS
| 0
|
static void vrend_renderer_blit_int(struct vrend_context *ctx,
struct vrend_resource *src_res,
struct vrend_resource *dst_res,
const struct pipe_blit_info *info)
{
GLbitfield glmask = 0;
int src_y1, src_y2, dst_y1, dst_y2;
GLenum filter;
int n_layers = 1, i;
bool use_gl = false;
filter = convert_mag_filter(info->filter);
/* if we can't make FBO's use the fallback path */
if (!vrend_format_can_render(src_res->base.format) &&
!vrend_format_is_ds(src_res->base.format))
use_gl = true;
if (!vrend_format_can_render(dst_res->base.format) &&
!vrend_format_is_ds(dst_res->base.format))
use_gl = true;
/* different depth formats */
if (vrend_format_is_ds(src_res->base.format) &&
vrend_format_is_ds(dst_res->base.format)) {
if (src_res->base.format != dst_res->base.format) {
if (!(src_res->base.format == PIPE_FORMAT_S8_UINT_Z24_UNORM &&
(dst_res->base.format == PIPE_FORMAT_Z24X8_UNORM))) {
use_gl = true;
}
}
}
/* glBlitFramebuffer - can support depth stencil with NEAREST
which we use for mipmaps */
if ((info->mask & (PIPE_MASK_Z | PIPE_MASK_S)) && info->filter == PIPE_TEX_FILTER_LINEAR)
use_gl = true;
/* for scaled MS blits we either need extensions or hand roll */
if (src_res->base.nr_samples > 1 &&
src_res->base.nr_samples != dst_res->base.nr_samples &&
(info->src.box.width != info->dst.box.width ||
info->src.box.height != info->dst.box.height)) {
if (vrend_state.have_ms_scaled_blit)
filter = GL_SCALED_RESOLVE_NICEST_EXT;
else
use_gl = true;
}
/* for 3D mipmapped blits - hand roll time */
if (info->src.box.depth != info->dst.box.depth)
use_gl = true;
if (vrend_format_is_emulated_alpha(info->dst.format) ||
vrend_format_is_emulated_alpha(info->src.format))
use_gl = true;
if (use_gl) {
vrend_renderer_blit_gl(ctx, src_res, dst_res, info);
vrend_clicbs->make_current(0, ctx->sub->gl_context);
return;
}
if (info->mask & PIPE_MASK_Z)
glmask |= GL_DEPTH_BUFFER_BIT;
if (info->mask & PIPE_MASK_S)
glmask |= GL_STENCIL_BUFFER_BIT;
if (info->mask & PIPE_MASK_RGBA)
glmask |= GL_COLOR_BUFFER_BIT;
if (!dst_res->y_0_top) {
dst_y1 = info->dst.box.y + info->dst.box.height;
dst_y2 = info->dst.box.y;
} else {
dst_y1 = dst_res->base.height0 - info->dst.box.y - info->dst.box.height;
dst_y2 = dst_res->base.height0 - info->dst.box.y;
}
if (!src_res->y_0_top) {
src_y1 = info->src.box.y + info->src.box.height;
src_y2 = info->src.box.y;
} else {
src_y1 = src_res->base.height0 - info->src.box.y - info->src.box.height;
src_y2 = src_res->base.height0 - info->src.box.y;
}
if (info->scissor_enable) {
glScissor(info->scissor.minx, info->scissor.miny, info->scissor.maxx - info->scissor.minx, info->scissor.maxy - info->scissor.miny);
ctx->sub->scissor_state_dirty = (1 << 0);
glEnable(GL_SCISSOR_TEST);
} else
glDisable(GL_SCISSOR_TEST);
glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[0]);
if (info->mask & PIPE_MASK_RGBA)
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_STENCIL_ATTACHMENT,
GL_TEXTURE_2D, 0, 0);
else
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, 0, 0);
glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[1]);
if (info->mask & PIPE_MASK_RGBA)
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_STENCIL_ATTACHMENT,
GL_TEXTURE_2D, 0, 0);
else if (info->mask & (PIPE_MASK_Z | PIPE_MASK_S))
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_COLOR_ATTACHMENT0,
GL_TEXTURE_2D, 0, 0);
if (info->src.box.depth == info->dst.box.depth)
n_layers = info->dst.box.depth;
for (i = 0; i < n_layers; i++) {
glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[0]);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_STENCIL_ATTACHMENT,
GL_TEXTURE_2D, 0, 0);
vrend_fb_bind_texture(src_res, 0, info->src.level, info->src.box.z + i);
glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[1]);
vrend_fb_bind_texture(dst_res, 0, info->dst.level, info->dst.box.z + i);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]);
glBindFramebuffer(GL_READ_FRAMEBUFFER, ctx->sub->blit_fb_ids[0]);
glBlitFramebuffer(info->src.box.x,
src_y1,
info->src.box.x + info->src.box.width,
src_y2,
info->dst.box.x,
dst_y1,
info->dst.box.x + info->dst.box.width,
dst_y2,
glmask, filter);
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,098
|
void vrend_renderer_check_queries(void)
{
struct vrend_query *query, *stor;
if (!vrend_state.inited)
return;
LIST_FOR_EACH_ENTRY_SAFE(query, stor, &vrend_state.waiting_query_list, waiting_queries) {
vrend_hw_switch_context(vrend_lookup_renderer_ctx(query->ctx_id), true);
if (vrend_check_query(query))
list_delinit(&query->waiting_queries);
}
}
|
DoS
| 0
|
void vrend_renderer_check_queries(void)
{
struct vrend_query *query, *stor;
if (!vrend_state.inited)
return;
LIST_FOR_EACH_ENTRY_SAFE(query, stor, &vrend_state.waiting_query_list, waiting_queries) {
vrend_hw_switch_context(vrend_lookup_renderer_ctx(query->ctx_id), true);
if (vrend_check_query(query))
list_delinit(&query->waiting_queries);
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,099
|
void vrend_renderer_create_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
{
struct vrend_sub_context *sub;
struct virgl_gl_ctx_param ctx_params;
LIST_FOR_EACH_ENTRY(sub, &ctx->sub_ctxs, head) {
if (sub->sub_ctx_id == sub_ctx_id) {
return;
}
}
sub = CALLOC_STRUCT(vrend_sub_context);
if (!sub)
return;
ctx_params.shared = (ctx->ctx_id == 0 && sub_ctx_id == 0) ? false : true;
ctx_params.major_ver = vrend_state.gl_major_ver;
ctx_params.minor_ver = vrend_state.gl_minor_ver;
sub->gl_context = vrend_clicbs->create_gl_context(0, &ctx_params);
vrend_clicbs->make_current(0, sub->gl_context);
sub->sub_ctx_id = sub_ctx_id;
if (!vrend_state.have_vertex_attrib_binding) {
glGenVertexArrays(1, &sub->vaoid);
glBindVertexArray(sub->vaoid);
}
glGenFramebuffers(1, &sub->fb_id);
glGenFramebuffers(2, sub->blit_fb_ids);
list_inithead(&sub->programs);
list_inithead(&sub->streamout_list);
sub->object_hash = vrend_object_init_ctx_table();
ctx->sub = sub;
list_add(&sub->head, &ctx->sub_ctxs);
if (sub_ctx_id == 0)
ctx->sub0 = sub;
}
|
DoS
| 0
|
void vrend_renderer_create_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
{
struct vrend_sub_context *sub;
struct virgl_gl_ctx_param ctx_params;
LIST_FOR_EACH_ENTRY(sub, &ctx->sub_ctxs, head) {
if (sub->sub_ctx_id == sub_ctx_id) {
return;
}
}
sub = CALLOC_STRUCT(vrend_sub_context);
if (!sub)
return;
ctx_params.shared = (ctx->ctx_id == 0 && sub_ctx_id == 0) ? false : true;
ctx_params.major_ver = vrend_state.gl_major_ver;
ctx_params.minor_ver = vrend_state.gl_minor_ver;
sub->gl_context = vrend_clicbs->create_gl_context(0, &ctx_params);
vrend_clicbs->make_current(0, sub->gl_context);
sub->sub_ctx_id = sub_ctx_id;
if (!vrend_state.have_vertex_attrib_binding) {
glGenVertexArrays(1, &sub->vaoid);
glBindVertexArray(sub->vaoid);
}
glGenFramebuffers(1, &sub->fb_id);
glGenFramebuffers(2, sub->blit_fb_ids);
list_inithead(&sub->programs);
list_inithead(&sub->streamout_list);
sub->object_hash = vrend_object_init_ctx_table();
ctx->sub = sub;
list_add(&sub->head, &ctx->sub_ctxs);
if (sub_ctx_id == 0)
ctx->sub0 = sub;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.