idx
int64 | func_before
string | Vulnerability Classification
string | vul
int64 | func_after
string | patch
string | CWE ID
string | lines_before
string | lines_after
string |
|---|---|---|---|---|---|---|---|---|
7,100
|
static struct vrend_resource *vrend_renderer_ctx_res_lookup(struct vrend_context *ctx, int res_handle)
{
struct vrend_resource *res = vrend_object_lookup(ctx->res_hash, res_handle, 1);
return res;
}
|
DoS
| 0
|
static struct vrend_resource *vrend_renderer_ctx_res_lookup(struct vrend_context *ctx, int res_handle)
{
struct vrend_resource *res = vrend_object_lookup(ctx->res_hash, res_handle, 1);
return res;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,101
|
void vrend_renderer_destroy_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
{
struct vrend_sub_context *sub, *tofree = NULL;
/* never destroy sub context id 0 */
if (sub_ctx_id == 0)
return;
LIST_FOR_EACH_ENTRY(sub, &ctx->sub_ctxs, head) {
if (sub->sub_ctx_id == sub_ctx_id) {
tofree = sub;
}
}
if (tofree) {
if (ctx->sub == tofree) {
ctx->sub = ctx->sub0;
vrend_clicbs->make_current(0, ctx->sub->gl_context);
}
vrend_destroy_sub_context(tofree);
}
}
|
DoS
| 0
|
void vrend_renderer_destroy_sub_ctx(struct vrend_context *ctx, int sub_ctx_id)
{
struct vrend_sub_context *sub, *tofree = NULL;
/* never destroy sub context id 0 */
if (sub_ctx_id == 0)
return;
LIST_FOR_EACH_ENTRY(sub, &ctx->sub_ctxs, head) {
if (sub->sub_ctx_id == sub_ctx_id) {
tofree = sub;
}
}
if (tofree) {
if (ctx->sub == tofree) {
ctx->sub = ctx->sub0;
vrend_clicbs->make_current(0, ctx->sub->gl_context);
}
vrend_destroy_sub_context(tofree);
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,102
|
void vrend_renderer_detach_res_ctx(int ctx_id, int res_handle)
{
struct vrend_context *ctx = vrend_lookup_renderer_ctx(ctx_id);
if (!ctx)
return;
vrend_renderer_detach_res_ctx_p(ctx, res_handle);
}
|
DoS
| 0
|
void vrend_renderer_detach_res_ctx(int ctx_id, int res_handle)
{
struct vrend_context *ctx = vrend_lookup_renderer_ctx(ctx_id);
if (!ctx)
return;
vrend_renderer_detach_res_ctx_p(ctx, res_handle);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,103
|
static void vrend_renderer_detach_res_ctx_p(struct vrend_context *ctx, int res_handle)
{
struct vrend_resource *res;
res = vrend_object_lookup(ctx->res_hash, res_handle, 1);
if (!res)
return;
vrend_object_remove(ctx->res_hash, res_handle, 1);
}
|
DoS
| 0
|
static void vrend_renderer_detach_res_ctx_p(struct vrend_context *ctx, int res_handle)
{
struct vrend_resource *res;
res = vrend_object_lookup(ctx->res_hash, res_handle, 1);
if (!res)
return;
vrend_object_remove(ctx->res_hash, res_handle, 1);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,104
|
void vrend_renderer_fill_caps(uint32_t set, uint32_t version,
union virgl_caps *caps)
{
int i;
GLint max;
int gl_ver = epoxy_gl_version();
if (!caps)
return;
memset(caps, 0, sizeof(*caps));
if (set != 1 && set != 0) {
caps->max_version = 0;
return;
}
caps->max_version = 1;
caps->v1.bset.occlusion_query = 1;
if (gl_ver >= 30) {
caps->v1.bset.indep_blend_enable = 1;
caps->v1.bset.conditional_render = 1;
} else {
if (epoxy_has_gl_extension("GL_EXT_draw_buffers2"))
caps->v1.bset.indep_blend_enable = 1;
if (epoxy_has_gl_extension("GL_NV_conditional_render"))
caps->v1.bset.conditional_render = 1;
}
if (vrend_state.use_core_profile) {
caps->v1.bset.poly_stipple = 0;
caps->v1.bset.color_clamping = 0;
} else {
caps->v1.bset.poly_stipple = 1;
caps->v1.bset.color_clamping = 1;
}
if (gl_ver >= 31) {
caps->v1.bset.instanceid = 1;
glGetIntegerv(GL_MAX_VERTEX_UNIFORM_BLOCKS, &max);
vrend_state.max_uniform_blocks = max;
caps->v1.max_uniform_blocks = max + 1;
} else {
if (epoxy_has_gl_extension("GL_ARB_draw_instanced"))
caps->v1.bset.instanceid = 1;
}
if (vrend_state.have_nv_prim_restart || vrend_state.have_gl_prim_restart)
caps->v1.bset.primitive_restart = 1;
if (gl_ver >= 32) {
caps->v1.bset.fragment_coord_conventions = 1;
caps->v1.bset.depth_clip_disable = 1;
caps->v1.bset.seamless_cube_map = 1;
} else {
if (epoxy_has_gl_extension("GL_ARB_fragment_coord_conventions"))
caps->v1.bset.fragment_coord_conventions = 1;
if (epoxy_has_gl_extension("GL_ARB_seamless_cube_map"))
caps->v1.bset.seamless_cube_map = 1;
}
if (epoxy_has_gl_extension("GL_AMD_seamless_cube_map_per_texture"))
caps->v1.bset.seamless_cube_map_per_texture = 1;
if (epoxy_has_gl_extension("GL_ARB_texture_multisample")) {
/* disable multisample until developed */
caps->v1.bset.texture_multisample = 1;
}
if (gl_ver >= 40) {
caps->v1.bset.indep_blend_func = 1;
caps->v1.bset.cube_map_array = 1;
caps->v1.bset.texture_query_lod = 1;
} else {
if (epoxy_has_gl_extension("GL_ARB_draw_buffers_blend"))
caps->v1.bset.indep_blend_func = 1;
if (epoxy_has_gl_extension("GL_ARB_texture_cube_map_array"))
caps->v1.bset.cube_map_array = 1;
if (epoxy_has_gl_extension("GL_ARB_texture_query_lod"))
caps->v1.bset.texture_query_lod = 1;
}
if (gl_ver >= 42) {
caps->v1.bset.start_instance = 1;
} else {
if (epoxy_has_gl_extension("GL_ARB_base_instance"))
caps->v1.bset.start_instance = 1;
}
if (epoxy_has_gl_extension("GL_ARB_shader_stencil_export"))
caps->v1.bset.shader_stencil_export = 1;
/* we only support up to GLSL 1.40 features now */
caps->v1.glsl_level = 130;
if (vrend_state.use_core_profile) {
if (gl_ver == 31)
caps->v1.glsl_level = 140;
else if (gl_ver == 32)
caps->v1.glsl_level = 150;
else if (gl_ver >= 33)
caps->v1.glsl_level = 330;
}
if (epoxy_has_gl_extension("GL_EXT_texture_mirror_clamp"))
caps->v1.bset.mirror_clamp = true;
if (epoxy_has_gl_extension("GL_EXT_texture_array")) {
glGetIntegerv(GL_MAX_ARRAY_TEXTURE_LAYERS, &max);
caps->v1.max_texture_array_layers = max;
}
/* we need tf3 so we can do gallium skip buffers */
if (epoxy_has_gl_extension("GL_ARB_transform_feedback2")) {
caps->v1.bset.streamout_pause_resume = 1;
}
if (epoxy_has_gl_extension("GL_ARB_transform_feedback3")) {
glGetIntegerv(GL_MAX_TRANSFORM_FEEDBACK_BUFFERS, &max);
caps->v1.max_streamout_buffers = max;
} else if (epoxy_has_gl_extension("GL_EXT_transform_feedback"))
caps->v1.max_streamout_buffers = 4;
if (epoxy_has_gl_extension("GL_ARB_blend_func_extended")) {
glGetIntegerv(GL_MAX_DUAL_SOURCE_DRAW_BUFFERS, &max);
caps->v1.max_dual_source_render_targets = max;
} else
caps->v1.max_dual_source_render_targets = 0;
glGetIntegerv(GL_MAX_DRAW_BUFFERS, &max);
caps->v1.max_render_targets = max;
glGetIntegerv(GL_MAX_SAMPLES, &max);
caps->v1.max_samples = max;
if (epoxy_has_gl_extension("GL_ARB_texture_buffer_object")) {
glGetIntegerv(GL_MAX_TEXTURE_BUFFER_SIZE, &max);
caps->v1.max_tbo_size = max;
}
if (epoxy_has_gl_extension("GL_ARB_texture_gather")) {
glGetIntegerv(GL_MAX_PROGRAM_TEXTURE_GATHER_COMPONENTS_ARB, &max);
caps->v1.max_texture_gather_components = max;
}
if (epoxy_has_gl_extension("GL_ARB_viewport_array")) {
glGetIntegerv(GL_MAX_VIEWPORTS, &max);
caps->v1.max_viewports = max;
} else
caps->v1.max_viewports = 1;
caps->v1.prim_mask = (1 << PIPE_PRIM_POINTS) | (1 << PIPE_PRIM_LINES) | (1 << PIPE_PRIM_LINE_STRIP) | (1 << PIPE_PRIM_LINE_LOOP) | (1 << PIPE_PRIM_TRIANGLES) | (1 << PIPE_PRIM_TRIANGLE_STRIP) | (1 << PIPE_PRIM_TRIANGLE_FAN);
if (vrend_state.use_core_profile == false) {
caps->v1.prim_mask |= (1 << PIPE_PRIM_QUADS) | (1 << PIPE_PRIM_QUAD_STRIP) | (1 << PIPE_PRIM_POLYGON);
}
if (caps->v1.glsl_level >= 150)
caps->v1.prim_mask |= (1 << PIPE_PRIM_LINES_ADJACENCY) |
(1 << PIPE_PRIM_LINE_STRIP_ADJACENCY) |
(1 << PIPE_PRIM_TRIANGLES_ADJACENCY) |
(1 << PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY);
if (epoxy_has_gl_extension("GL_ARB_vertex_type_10f_11f_11f_rev")) {
int val = VIRGL_FORMAT_R11G11B10_FLOAT;
uint32_t offset = val / 32;
uint32_t index = val % 32;
caps->v1.vertexbuffer.bitmask[offset] |= (1 << index);
}
for (i = 0; i < VIRGL_FORMAT_MAX; i++) {
uint32_t offset = i / 32;
uint32_t index = i % 32;
if (tex_conv_table[i].internalformat != 0) {
if (vrend_format_can_sample(i)) {
caps->v1.sampler.bitmask[offset] |= (1 << index);
if (vrend_format_can_render(i))
caps->v1.render.bitmask[offset] |= (1 << index);
}
}
}
}
|
DoS
| 0
|
void vrend_renderer_fill_caps(uint32_t set, uint32_t version,
union virgl_caps *caps)
{
int i;
GLint max;
int gl_ver = epoxy_gl_version();
if (!caps)
return;
memset(caps, 0, sizeof(*caps));
if (set != 1 && set != 0) {
caps->max_version = 0;
return;
}
caps->max_version = 1;
caps->v1.bset.occlusion_query = 1;
if (gl_ver >= 30) {
caps->v1.bset.indep_blend_enable = 1;
caps->v1.bset.conditional_render = 1;
} else {
if (epoxy_has_gl_extension("GL_EXT_draw_buffers2"))
caps->v1.bset.indep_blend_enable = 1;
if (epoxy_has_gl_extension("GL_NV_conditional_render"))
caps->v1.bset.conditional_render = 1;
}
if (vrend_state.use_core_profile) {
caps->v1.bset.poly_stipple = 0;
caps->v1.bset.color_clamping = 0;
} else {
caps->v1.bset.poly_stipple = 1;
caps->v1.bset.color_clamping = 1;
}
if (gl_ver >= 31) {
caps->v1.bset.instanceid = 1;
glGetIntegerv(GL_MAX_VERTEX_UNIFORM_BLOCKS, &max);
vrend_state.max_uniform_blocks = max;
caps->v1.max_uniform_blocks = max + 1;
} else {
if (epoxy_has_gl_extension("GL_ARB_draw_instanced"))
caps->v1.bset.instanceid = 1;
}
if (vrend_state.have_nv_prim_restart || vrend_state.have_gl_prim_restart)
caps->v1.bset.primitive_restart = 1;
if (gl_ver >= 32) {
caps->v1.bset.fragment_coord_conventions = 1;
caps->v1.bset.depth_clip_disable = 1;
caps->v1.bset.seamless_cube_map = 1;
} else {
if (epoxy_has_gl_extension("GL_ARB_fragment_coord_conventions"))
caps->v1.bset.fragment_coord_conventions = 1;
if (epoxy_has_gl_extension("GL_ARB_seamless_cube_map"))
caps->v1.bset.seamless_cube_map = 1;
}
if (epoxy_has_gl_extension("GL_AMD_seamless_cube_map_per_texture"))
caps->v1.bset.seamless_cube_map_per_texture = 1;
if (epoxy_has_gl_extension("GL_ARB_texture_multisample")) {
/* disable multisample until developed */
caps->v1.bset.texture_multisample = 1;
}
if (gl_ver >= 40) {
caps->v1.bset.indep_blend_func = 1;
caps->v1.bset.cube_map_array = 1;
caps->v1.bset.texture_query_lod = 1;
} else {
if (epoxy_has_gl_extension("GL_ARB_draw_buffers_blend"))
caps->v1.bset.indep_blend_func = 1;
if (epoxy_has_gl_extension("GL_ARB_texture_cube_map_array"))
caps->v1.bset.cube_map_array = 1;
if (epoxy_has_gl_extension("GL_ARB_texture_query_lod"))
caps->v1.bset.texture_query_lod = 1;
}
if (gl_ver >= 42) {
caps->v1.bset.start_instance = 1;
} else {
if (epoxy_has_gl_extension("GL_ARB_base_instance"))
caps->v1.bset.start_instance = 1;
}
if (epoxy_has_gl_extension("GL_ARB_shader_stencil_export"))
caps->v1.bset.shader_stencil_export = 1;
/* we only support up to GLSL 1.40 features now */
caps->v1.glsl_level = 130;
if (vrend_state.use_core_profile) {
if (gl_ver == 31)
caps->v1.glsl_level = 140;
else if (gl_ver == 32)
caps->v1.glsl_level = 150;
else if (gl_ver >= 33)
caps->v1.glsl_level = 330;
}
if (epoxy_has_gl_extension("GL_EXT_texture_mirror_clamp"))
caps->v1.bset.mirror_clamp = true;
if (epoxy_has_gl_extension("GL_EXT_texture_array")) {
glGetIntegerv(GL_MAX_ARRAY_TEXTURE_LAYERS, &max);
caps->v1.max_texture_array_layers = max;
}
/* we need tf3 so we can do gallium skip buffers */
if (epoxy_has_gl_extension("GL_ARB_transform_feedback2")) {
caps->v1.bset.streamout_pause_resume = 1;
}
if (epoxy_has_gl_extension("GL_ARB_transform_feedback3")) {
glGetIntegerv(GL_MAX_TRANSFORM_FEEDBACK_BUFFERS, &max);
caps->v1.max_streamout_buffers = max;
} else if (epoxy_has_gl_extension("GL_EXT_transform_feedback"))
caps->v1.max_streamout_buffers = 4;
if (epoxy_has_gl_extension("GL_ARB_blend_func_extended")) {
glGetIntegerv(GL_MAX_DUAL_SOURCE_DRAW_BUFFERS, &max);
caps->v1.max_dual_source_render_targets = max;
} else
caps->v1.max_dual_source_render_targets = 0;
glGetIntegerv(GL_MAX_DRAW_BUFFERS, &max);
caps->v1.max_render_targets = max;
glGetIntegerv(GL_MAX_SAMPLES, &max);
caps->v1.max_samples = max;
if (epoxy_has_gl_extension("GL_ARB_texture_buffer_object")) {
glGetIntegerv(GL_MAX_TEXTURE_BUFFER_SIZE, &max);
caps->v1.max_tbo_size = max;
}
if (epoxy_has_gl_extension("GL_ARB_texture_gather")) {
glGetIntegerv(GL_MAX_PROGRAM_TEXTURE_GATHER_COMPONENTS_ARB, &max);
caps->v1.max_texture_gather_components = max;
}
if (epoxy_has_gl_extension("GL_ARB_viewport_array")) {
glGetIntegerv(GL_MAX_VIEWPORTS, &max);
caps->v1.max_viewports = max;
} else
caps->v1.max_viewports = 1;
caps->v1.prim_mask = (1 << PIPE_PRIM_POINTS) | (1 << PIPE_PRIM_LINES) | (1 << PIPE_PRIM_LINE_STRIP) | (1 << PIPE_PRIM_LINE_LOOP) | (1 << PIPE_PRIM_TRIANGLES) | (1 << PIPE_PRIM_TRIANGLE_STRIP) | (1 << PIPE_PRIM_TRIANGLE_FAN);
if (vrend_state.use_core_profile == false) {
caps->v1.prim_mask |= (1 << PIPE_PRIM_QUADS) | (1 << PIPE_PRIM_QUAD_STRIP) | (1 << PIPE_PRIM_POLYGON);
}
if (caps->v1.glsl_level >= 150)
caps->v1.prim_mask |= (1 << PIPE_PRIM_LINES_ADJACENCY) |
(1 << PIPE_PRIM_LINE_STRIP_ADJACENCY) |
(1 << PIPE_PRIM_TRIANGLES_ADJACENCY) |
(1 << PIPE_PRIM_TRIANGLE_STRIP_ADJACENCY);
if (epoxy_has_gl_extension("GL_ARB_vertex_type_10f_11f_11f_rev")) {
int val = VIRGL_FORMAT_R11G11B10_FLOAT;
uint32_t offset = val / 32;
uint32_t index = val % 32;
caps->v1.vertexbuffer.bitmask[offset] |= (1 << index);
}
for (i = 0; i < VIRGL_FORMAT_MAX; i++) {
uint32_t offset = i / 32;
uint32_t index = i % 32;
if (tex_conv_table[i].internalformat != 0) {
if (vrend_format_can_sample(i)) {
caps->v1.sampler.bitmask[offset] |= (1 << index);
if (vrend_format_can_render(i))
caps->v1.render.bitmask[offset] |= (1 << index);
}
}
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,105
|
vrend_renderer_fini(void)
{
if (!vrend_state.inited)
return;
vrend_free_sync_thread();
if (vrend_state.eventfd != -1) {
close(vrend_state.eventfd);
vrend_state.eventfd = -1;
}
vrend_decode_reset(false);
vrend_object_fini_resource_table();
vrend_decode_reset(true);
vrend_state.inited = false;
}
|
DoS
| 0
|
vrend_renderer_fini(void)
{
if (!vrend_state.inited)
return;
vrend_free_sync_thread();
if (vrend_state.eventfd != -1) {
close(vrend_state.eventfd);
vrend_state.eventfd = -1;
}
vrend_decode_reset(false);
vrend_object_fini_resource_table();
vrend_decode_reset(true);
vrend_state.inited = false;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,106
|
void vrend_renderer_force_ctx_0(void)
{
struct vrend_context *ctx0 = vrend_lookup_renderer_ctx(0);
vrend_state.current_ctx = NULL;
vrend_state.current_hw_ctx = NULL;
vrend_hw_switch_context(ctx0, true);
vrend_clicbs->make_current(0, ctx0->sub->gl_context);
}
|
DoS
| 0
|
void vrend_renderer_force_ctx_0(void)
{
struct vrend_context *ctx0 = vrend_lookup_renderer_ctx(0);
vrend_state.current_ctx = NULL;
vrend_state.current_hw_ctx = NULL;
vrend_hw_switch_context(ctx0, true);
vrend_clicbs->make_current(0, ctx0->sub->gl_context);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,107
|
void vrend_renderer_get_cap_set(uint32_t cap_set, uint32_t *max_ver,
uint32_t *max_size)
{
if (cap_set != VREND_CAP_SET) {
*max_ver = 0;
*max_size = 0;
return;
}
*max_ver = 1;
*max_size = sizeof(union virgl_caps);
}
|
DoS
| 0
|
void vrend_renderer_get_cap_set(uint32_t cap_set, uint32_t *max_ver,
uint32_t *max_size)
{
if (cap_set != VREND_CAP_SET) {
*max_ver = 0;
*max_size = 0;
return;
}
*max_ver = 1;
*max_size = sizeof(union virgl_caps);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,108
|
int vrend_renderer_get_poll_fd(void)
{
if (!vrend_state.inited)
return -1;
return vrend_state.eventfd;
}
|
DoS
| 0
|
int vrend_renderer_get_poll_fd(void)
{
if (!vrend_state.inited)
return -1;
return vrend_state.eventfd;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,109
|
void vrend_renderer_get_rect(int res_handle, struct iovec *iov, unsigned int num_iovs,
uint32_t offset, int x, int y, int width, int height)
{
struct vrend_resource *res = vrend_resource_lookup(res_handle, 0);
struct vrend_transfer_info transfer_info;
struct pipe_box box;
int elsize;
memset(&transfer_info, 0, sizeof(transfer_info));
elsize = util_format_get_blocksize(res->base.format);
box.x = x;
box.y = y;
box.z = 0;
box.width = width;
box.height = height;
box.depth = 1;
transfer_info.box = &box;
transfer_info.stride = util_format_get_nblocksx(res->base.format, res->base.width0) * elsize;
transfer_info.offset = offset;
transfer_info.handle = res->handle;
transfer_info.iovec = iov;
transfer_info.iovec_cnt = num_iovs;
vrend_renderer_transfer_iov(&transfer_info, VREND_TRANSFER_READ);
}
|
DoS
| 0
|
void vrend_renderer_get_rect(int res_handle, struct iovec *iov, unsigned int num_iovs,
uint32_t offset, int x, int y, int width, int height)
{
struct vrend_resource *res = vrend_resource_lookup(res_handle, 0);
struct vrend_transfer_info transfer_info;
struct pipe_box box;
int elsize;
memset(&transfer_info, 0, sizeof(transfer_info));
elsize = util_format_get_blocksize(res->base.format);
box.x = x;
box.y = y;
box.z = 0;
box.width = width;
box.height = height;
box.depth = 1;
transfer_info.box = &box;
transfer_info.stride = util_format_get_nblocksx(res->base.format, res->base.width0) * elsize;
transfer_info.offset = offset;
transfer_info.handle = res->handle;
transfer_info.iovec = iov;
transfer_info.iovec_cnt = num_iovs;
vrend_renderer_transfer_iov(&transfer_info, VREND_TRANSFER_READ);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,110
|
GLint64 vrend_renderer_get_timestamp(void)
{
GLint64 v;
glGetInteger64v(GL_TIMESTAMP, &v);
return v;
}
|
DoS
| 0
|
GLint64 vrend_renderer_get_timestamp(void)
{
GLint64 v;
glGetInteger64v(GL_TIMESTAMP, &v);
return v;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,111
|
int vrend_renderer_init(struct vrend_if_cbs *cbs, uint32_t flags)
{
int gl_ver;
virgl_gl_context gl_context;
struct virgl_gl_ctx_param ctx_params;
if (!vrend_state.inited) {
vrend_state.inited = true;
vrend_object_init_resource_table();
vrend_clicbs = cbs;
}
ctx_params.shared = false;
ctx_params.major_ver = VREND_GL_VER_MAJOR;
ctx_params.minor_ver = VREND_GL_VER_MINOR;
gl_context = vrend_clicbs->create_gl_context(0, &ctx_params);
vrend_clicbs->make_current(0, gl_context);
gl_ver = epoxy_gl_version();
vrend_state.gl_major_ver = gl_ver / 10;
vrend_state.gl_minor_ver = gl_ver % 10;
if (gl_ver > 30 && !epoxy_has_gl_extension("GL_ARB_compatibility")) {
fprintf(stderr, "gl_version %d - core profile enabled\n", gl_ver);
vrend_state.use_core_profile = 1;
} else {
fprintf(stderr, "gl_version %d - compat profile\n", gl_ver);
}
if (epoxy_has_gl_extension("GL_ARB_robustness"))
vrend_state.have_robustness = true;
else
fprintf(stderr,"WARNING: running without ARB robustness in place may crash\n");
if (epoxy_has_gl_extension("GL_MESA_pack_invert"))
vrend_state.have_mesa_invert = true;
if (gl_ver >= 43 || epoxy_has_gl_extension("GL_ARB_vertex_attrib_binding"))
vrend_state.have_vertex_attrib_binding = true;
if (gl_ver >= 33 || epoxy_has_gl_extension("GL_ARB_sampler_objects"))
vrend_state.have_samplers = true;
if (gl_ver >= 33 || epoxy_has_gl_extension("GL_ARB_shader_bit_encoding"))
vrend_state.have_bit_encoding = true;
if (gl_ver >= 31)
vrend_state.have_gl_prim_restart = true;
else if (epoxy_has_gl_extension("GL_NV_primitive_restart"))
vrend_state.have_nv_prim_restart = true;
if (gl_ver >= 40 || epoxy_has_gl_extension("GL_ARB_transform_feedback2"))
vrend_state.have_tf2 = true;
if (epoxy_has_gl_extension("GL_EXT_framebuffer_multisample") && epoxy_has_gl_extension("GL_ARB_texture_multisample")) {
vrend_state.have_multisample = true;
if (epoxy_has_gl_extension("GL_EXT_framebuffer_multisample_blit_scaled"))
vrend_state.have_ms_scaled_blit = true;
}
/* callbacks for when we are cleaning up the object table */
vrend_resource_set_destroy_callback(vrend_destroy_resource_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_QUERY, vrend_destroy_query_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_SURFACE, vrend_destroy_surface_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_SHADER, vrend_destroy_shader_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_SAMPLER_VIEW, vrend_destroy_sampler_view_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_STREAMOUT_TARGET, vrend_destroy_so_target_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_SAMPLER_STATE, vrend_destroy_sampler_state_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_VERTEX_ELEMENTS, vrend_destroy_vertex_elements_object);
vrend_build_format_list();
vrend_clicbs->destroy_gl_context(gl_context);
list_inithead(&vrend_state.fence_list);
list_inithead(&vrend_state.fence_wait_list);
list_inithead(&vrend_state.waiting_query_list);
list_inithead(&vrend_state.active_ctx_list);
/* create 0 context */
vrend_renderer_context_create_internal(0, 0, NULL);
vrend_state.eventfd = -1;
if (flags & VREND_USE_THREAD_SYNC) {
vrend_renderer_use_threaded_sync();
}
return 0;
}
|
DoS
| 0
|
int vrend_renderer_init(struct vrend_if_cbs *cbs, uint32_t flags)
{
int gl_ver;
virgl_gl_context gl_context;
struct virgl_gl_ctx_param ctx_params;
if (!vrend_state.inited) {
vrend_state.inited = true;
vrend_object_init_resource_table();
vrend_clicbs = cbs;
}
ctx_params.shared = false;
ctx_params.major_ver = VREND_GL_VER_MAJOR;
ctx_params.minor_ver = VREND_GL_VER_MINOR;
gl_context = vrend_clicbs->create_gl_context(0, &ctx_params);
vrend_clicbs->make_current(0, gl_context);
gl_ver = epoxy_gl_version();
vrend_state.gl_major_ver = gl_ver / 10;
vrend_state.gl_minor_ver = gl_ver % 10;
if (gl_ver > 30 && !epoxy_has_gl_extension("GL_ARB_compatibility")) {
fprintf(stderr, "gl_version %d - core profile enabled\n", gl_ver);
vrend_state.use_core_profile = 1;
} else {
fprintf(stderr, "gl_version %d - compat profile\n", gl_ver);
}
if (epoxy_has_gl_extension("GL_ARB_robustness"))
vrend_state.have_robustness = true;
else
fprintf(stderr,"WARNING: running without ARB robustness in place may crash\n");
if (epoxy_has_gl_extension("GL_MESA_pack_invert"))
vrend_state.have_mesa_invert = true;
if (gl_ver >= 43 || epoxy_has_gl_extension("GL_ARB_vertex_attrib_binding"))
vrend_state.have_vertex_attrib_binding = true;
if (gl_ver >= 33 || epoxy_has_gl_extension("GL_ARB_sampler_objects"))
vrend_state.have_samplers = true;
if (gl_ver >= 33 || epoxy_has_gl_extension("GL_ARB_shader_bit_encoding"))
vrend_state.have_bit_encoding = true;
if (gl_ver >= 31)
vrend_state.have_gl_prim_restart = true;
else if (epoxy_has_gl_extension("GL_NV_primitive_restart"))
vrend_state.have_nv_prim_restart = true;
if (gl_ver >= 40 || epoxy_has_gl_extension("GL_ARB_transform_feedback2"))
vrend_state.have_tf2 = true;
if (epoxy_has_gl_extension("GL_EXT_framebuffer_multisample") && epoxy_has_gl_extension("GL_ARB_texture_multisample")) {
vrend_state.have_multisample = true;
if (epoxy_has_gl_extension("GL_EXT_framebuffer_multisample_blit_scaled"))
vrend_state.have_ms_scaled_blit = true;
}
/* callbacks for when we are cleaning up the object table */
vrend_resource_set_destroy_callback(vrend_destroy_resource_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_QUERY, vrend_destroy_query_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_SURFACE, vrend_destroy_surface_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_SHADER, vrend_destroy_shader_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_SAMPLER_VIEW, vrend_destroy_sampler_view_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_STREAMOUT_TARGET, vrend_destroy_so_target_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_SAMPLER_STATE, vrend_destroy_sampler_state_object);
vrend_object_set_destroy_callback(VIRGL_OBJECT_VERTEX_ELEMENTS, vrend_destroy_vertex_elements_object);
vrend_build_format_list();
vrend_clicbs->destroy_gl_context(gl_context);
list_inithead(&vrend_state.fence_list);
list_inithead(&vrend_state.fence_wait_list);
list_inithead(&vrend_state.waiting_query_list);
list_inithead(&vrend_state.active_ctx_list);
/* create 0 context */
vrend_renderer_context_create_internal(0, 0, NULL);
vrend_state.eventfd = -1;
if (flags & VREND_USE_THREAD_SYNC) {
vrend_renderer_use_threaded_sync();
}
return 0;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,112
|
vrend_renderer_object_destroy(struct vrend_context *ctx, uint32_t handle)
{
vrend_object_remove(ctx->sub->object_hash, handle, 0);
}
|
DoS
| 0
|
vrend_renderer_object_destroy(struct vrend_context *ctx, uint32_t handle)
{
vrend_object_remove(ctx->sub->object_hash, handle, 0);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,113
|
uint32_t vrend_renderer_object_insert(struct vrend_context *ctx, void *data,
uint32_t size, uint32_t handle, enum virgl_object_type type)
{
return vrend_object_insert(ctx->sub->object_hash, data, size, handle, type);
}
|
DoS
| 0
|
uint32_t vrend_renderer_object_insert(struct vrend_context *ctx, void *data,
uint32_t size, uint32_t handle, enum virgl_object_type type)
{
return vrend_object_insert(ctx->sub->object_hash, data, size, handle, type);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,114
|
void vrend_renderer_reset(void)
{
if (vrend_state.sync_thread) {
vrend_free_sync_thread();
vrend_state.stop_sync_thread = false;
}
vrend_reset_fences();
vrend_decode_reset(false);
vrend_object_fini_resource_table();
vrend_decode_reset(true);
vrend_object_init_resource_table();
vrend_renderer_context_create_internal(0, 0, NULL);
}
|
DoS
| 0
|
void vrend_renderer_reset(void)
{
if (vrend_state.sync_thread) {
vrend_free_sync_thread();
vrend_state.stop_sync_thread = false;
}
vrend_reset_fences();
vrend_decode_reset(false);
vrend_object_fini_resource_table();
vrend_decode_reset(true);
vrend_object_init_resource_table();
vrend_renderer_context_create_internal(0, 0, NULL);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,115
|
int vrend_renderer_resource_attach_iov(int res_handle, struct iovec *iov,
int num_iovs)
{
struct vrend_resource *res;
res = vrend_resource_lookup(res_handle, 0);
if (!res)
return EINVAL;
if (res->iov)
return 0;
/* work out size and max resource size */
res->iov = iov;
res->num_iovs = num_iovs;
return 0;
}
|
DoS
| 0
|
int vrend_renderer_resource_attach_iov(int res_handle, struct iovec *iov,
int num_iovs)
{
struct vrend_resource *res;
res = vrend_resource_lookup(res_handle, 0);
if (!res)
return EINVAL;
if (res->iov)
return 0;
/* work out size and max resource size */
res->iov = iov;
res->num_iovs = num_iovs;
return 0;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,116
|
void vrend_renderer_resource_copy_region(struct vrend_context *ctx,
uint32_t dst_handle, uint32_t dst_level,
uint32_t dstx, uint32_t dsty, uint32_t dstz,
uint32_t src_handle, uint32_t src_level,
const struct pipe_box *src_box)
{
struct vrend_resource *src_res, *dst_res;
GLbitfield glmask = 0;
GLint sy1, sy2, dy1, dy2;
if (ctx->in_error)
return;
src_res = vrend_renderer_ctx_res_lookup(ctx, src_handle);
dst_res = vrend_renderer_ctx_res_lookup(ctx, dst_handle);
if (!src_res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, src_handle);
return;
}
if (!dst_res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle);
return;
}
if (src_res->base.target == PIPE_BUFFER && dst_res->base.target == PIPE_BUFFER) {
/* do a buffer copy */
vrend_resource_buffer_copy(ctx, src_res, dst_res, dstx,
src_box->x, src_box->width);
return;
}
if (!vrend_format_can_render(src_res->base.format) ||
!vrend_format_can_render(dst_res->base.format)) {
vrend_resource_copy_fallback(ctx, src_res, dst_res, dst_level, dstx,
dsty, dstz, src_level, src_box);
return;
}
glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[0]);
/* clean out fb ids */
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_STENCIL_ATTACHMENT,
GL_TEXTURE_2D, 0, 0);
vrend_fb_bind_texture(src_res, 0, src_level, src_box->z);
glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[1]);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_STENCIL_ATTACHMENT,
GL_TEXTURE_2D, 0, 0);
vrend_fb_bind_texture(dst_res, 0, dst_level, dstz);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]);
glBindFramebuffer(GL_READ_FRAMEBUFFER, ctx->sub->blit_fb_ids[0]);
glmask = GL_COLOR_BUFFER_BIT;
glDisable(GL_SCISSOR_TEST);
if (!src_res->y_0_top) {
sy1 = src_box->y;
sy2 = src_box->y + src_box->height;
} else {
sy1 = src_res->base.height0 - src_box->y - src_box->height;
sy2 = src_res->base.height0 - src_box->y;
}
if (!dst_res->y_0_top) {
dy1 = dsty;
dy2 = dsty + src_box->height;
} else {
dy1 = dst_res->base.height0 - dsty - src_box->height;
dy2 = dst_res->base.height0 - dsty;
}
glBlitFramebuffer(src_box->x, sy1,
src_box->x + src_box->width,
sy2,
dstx, dy1,
dstx + src_box->width,
dy2,
glmask, GL_NEAREST);
}
|
DoS
| 0
|
void vrend_renderer_resource_copy_region(struct vrend_context *ctx,
uint32_t dst_handle, uint32_t dst_level,
uint32_t dstx, uint32_t dsty, uint32_t dstz,
uint32_t src_handle, uint32_t src_level,
const struct pipe_box *src_box)
{
struct vrend_resource *src_res, *dst_res;
GLbitfield glmask = 0;
GLint sy1, sy2, dy1, dy2;
if (ctx->in_error)
return;
src_res = vrend_renderer_ctx_res_lookup(ctx, src_handle);
dst_res = vrend_renderer_ctx_res_lookup(ctx, dst_handle);
if (!src_res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, src_handle);
return;
}
if (!dst_res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, dst_handle);
return;
}
if (src_res->base.target == PIPE_BUFFER && dst_res->base.target == PIPE_BUFFER) {
/* do a buffer copy */
vrend_resource_buffer_copy(ctx, src_res, dst_res, dstx,
src_box->x, src_box->width);
return;
}
if (!vrend_format_can_render(src_res->base.format) ||
!vrend_format_can_render(dst_res->base.format)) {
vrend_resource_copy_fallback(ctx, src_res, dst_res, dst_level, dstx,
dsty, dstz, src_level, src_box);
return;
}
glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[0]);
/* clean out fb ids */
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_STENCIL_ATTACHMENT,
GL_TEXTURE_2D, 0, 0);
vrend_fb_bind_texture(src_res, 0, src_level, src_box->z);
glBindFramebuffer(GL_FRAMEBUFFER_EXT, ctx->sub->blit_fb_ids[1]);
glFramebufferTexture2DEXT(GL_FRAMEBUFFER_EXT, GL_DEPTH_STENCIL_ATTACHMENT,
GL_TEXTURE_2D, 0, 0);
vrend_fb_bind_texture(dst_res, 0, dst_level, dstz);
glBindFramebuffer(GL_DRAW_FRAMEBUFFER, ctx->sub->blit_fb_ids[1]);
glBindFramebuffer(GL_READ_FRAMEBUFFER, ctx->sub->blit_fb_ids[0]);
glmask = GL_COLOR_BUFFER_BIT;
glDisable(GL_SCISSOR_TEST);
if (!src_res->y_0_top) {
sy1 = src_box->y;
sy2 = src_box->y + src_box->height;
} else {
sy1 = src_res->base.height0 - src_box->y - src_box->height;
sy2 = src_res->base.height0 - src_box->y;
}
if (!dst_res->y_0_top) {
dy1 = dsty;
dy2 = dsty + src_box->height;
} else {
dy1 = dst_res->base.height0 - dsty - src_box->height;
dy2 = dst_res->base.height0 - dsty;
}
glBlitFramebuffer(src_box->x, sy1,
src_box->x + src_box->width,
sy2,
dstx, dy1,
dstx + src_box->width,
dy2,
glmask, GL_NEAREST);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,117
|
int vrend_renderer_resource_create(struct vrend_renderer_resource_create_args *args, struct iovec *iov, uint32_t num_iovs)
{
struct vrend_resource *gr;
int level;
int ret;
ret = check_resource_valid(args);
if (ret)
return EINVAL;
gr = (struct vrend_resource *)CALLOC_STRUCT(vrend_texture);
if (!gr)
return ENOMEM;
gr->handle = args->handle;
gr->iov = iov;
gr->num_iovs = num_iovs;
gr->base.width0 = args->width;
gr->base.height0 = args->height;
gr->base.depth0 = args->depth;
gr->base.format = args->format;
gr->base.target = args->target;
gr->base.last_level = args->last_level;
gr->base.nr_samples = args->nr_samples;
gr->base.array_size = args->array_size;
if (args->flags & VIRGL_RESOURCE_Y_0_TOP)
gr->y_0_top = true;
pipe_reference_init(&gr->base.reference, 1);
if (args->bind == VREND_RES_BIND_CUSTOM) {
/* custom should only be for buffers */
gr->ptr = malloc(args->width);
if (!gr->ptr) {
FREE(gr);
return ENOMEM;
}
} else if (args->bind == VREND_RES_BIND_INDEX_BUFFER) {
gr->target = GL_ELEMENT_ARRAY_BUFFER_ARB;
glGenBuffersARB(1, &gr->id);
glBindBufferARB(gr->target, gr->id);
glBufferData(gr->target, args->width, NULL, GL_STREAM_DRAW);
} else if (args->bind == VREND_RES_BIND_STREAM_OUTPUT) {
gr->target = GL_TRANSFORM_FEEDBACK_BUFFER;
glGenBuffersARB(1, &gr->id);
glBindBuffer(gr->target, gr->id);
glBufferData(gr->target, args->width, NULL, GL_STREAM_DRAW);
} else if (args->bind == VREND_RES_BIND_VERTEX_BUFFER) {
gr->target = GL_ARRAY_BUFFER_ARB;
glGenBuffersARB(1, &gr->id);
glBindBufferARB(gr->target, gr->id);
glBufferData(gr->target, args->width, NULL, GL_STREAM_DRAW);
} else if (args->bind == VREND_RES_BIND_CONSTANT_BUFFER) {
gr->target = GL_UNIFORM_BUFFER;
glGenBuffersARB(1, &gr->id);
glBindBufferARB(gr->target, gr->id);
glBufferData(gr->target, args->width, NULL, GL_STREAM_DRAW);
} else if (args->target == PIPE_BUFFER && args->bind == 0) {
gr->target = GL_ARRAY_BUFFER_ARB;
glGenBuffersARB(1, &gr->id);
glBindBufferARB(gr->target, gr->id);
glBufferData(gr->target, args->width, NULL, GL_STREAM_DRAW);
} else if (args->target == PIPE_BUFFER && (args->bind & VREND_RES_BIND_SAMPLER_VIEW)) {
GLenum internalformat;
/* need to check GL version here */
if (epoxy_has_gl_extension("GL_ARB_texture_buffer_object")) {
gr->target = GL_TEXTURE_BUFFER;
glGenBuffersARB(1, &gr->id);
glBindBufferARB(gr->target, gr->id);
glGenTextures(1, &gr->tbo_tex_id);
glBufferData(gr->target, args->width, NULL, GL_STREAM_DRAW);
glBindTexture(gr->target, gr->tbo_tex_id);
internalformat = tex_conv_table[args->format].internalformat;
glTexBuffer(gr->target, internalformat, gr->id);
} else {
gr->target = GL_PIXEL_PACK_BUFFER_ARB;
glGenBuffersARB(1, &gr->id);
glBindBufferARB(gr->target, gr->id);
glBufferData(gr->target, args->width, NULL, GL_STREAM_DRAW);
}
} else {
struct vrend_texture *gt = (struct vrend_texture *)gr;
GLenum internalformat, glformat, gltype;
gr->target = tgsitargettogltarget(args->target, args->nr_samples);
glGenTextures(1, &gr->id);
glBindTexture(gr->target, gr->id);
internalformat = tex_conv_table[args->format].internalformat;
glformat = tex_conv_table[args->format].glformat;
gltype = tex_conv_table[args->format].gltype;
if (internalformat == 0) {
fprintf(stderr,"unknown format is %d\n", args->format);
FREE(gr);
return EINVAL;
}
if (args->nr_samples > 1) {
if (gr->target == GL_TEXTURE_2D_MULTISAMPLE) {
glTexImage2DMultisample(gr->target, args->nr_samples,
internalformat, args->width, args->height,
GL_TRUE);
} else {
glTexImage3DMultisample(gr->target, args->nr_samples,
internalformat, args->width, args->height, args->array_size,
GL_TRUE);
}
} else if (gr->target == GL_TEXTURE_CUBE_MAP) {
int i;
for (i = 0; i < 6; i++) {
GLenum ctarget = GL_TEXTURE_CUBE_MAP_POSITIVE_X + i;
for (level = 0; level <= args->last_level; level++) {
unsigned mwidth = u_minify(args->width, level);
unsigned mheight = u_minify(args->height, level);
glTexImage2D(ctarget, level, internalformat, mwidth, mheight, 0, glformat,
gltype, NULL);
}
}
} else if (gr->target == GL_TEXTURE_3D ||
gr->target == GL_TEXTURE_2D_ARRAY ||
gr->target == GL_TEXTURE_CUBE_MAP_ARRAY) {
for (level = 0; level <= args->last_level; level++) {
unsigned depth_param = (gr->target == GL_TEXTURE_2D_ARRAY || gr->target == GL_TEXTURE_CUBE_MAP_ARRAY) ? args->array_size : u_minify(args->depth, level);
unsigned mwidth = u_minify(args->width, level);
unsigned mheight = u_minify(args->height, level);
glTexImage3D(gr->target, level, internalformat, mwidth, mheight, depth_param, 0,
glformat,
gltype, NULL);
}
} else if (gr->target == GL_TEXTURE_1D) {
for (level = 0; level <= args->last_level; level++) {
unsigned mwidth = u_minify(args->width, level);
glTexImage1D(gr->target, level, internalformat, mwidth, 0,
glformat,
gltype, NULL);
}
} else {
for (level = 0; level <= args->last_level; level++) {
unsigned mwidth = u_minify(args->width, level);
unsigned mheight = u_minify(args->height, level);
glTexImage2D(gr->target, level, internalformat, mwidth, gr->target == GL_TEXTURE_1D_ARRAY ? args->array_size : mheight, 0, glformat,
gltype, NULL);
}
}
gt->state.max_lod = -1;
gt->cur_swizzle_r = gt->cur_swizzle_g = gt->cur_swizzle_b = gt->cur_swizzle_a = -1;
}
ret = vrend_resource_insert(gr, args->handle);
if (ret == 0) {
vrend_renderer_resource_destroy(gr, true);
return ENOMEM;
}
return 0;
}
|
DoS
| 0
|
int vrend_renderer_resource_create(struct vrend_renderer_resource_create_args *args, struct iovec *iov, uint32_t num_iovs)
{
struct vrend_resource *gr;
int level;
int ret;
ret = check_resource_valid(args);
if (ret)
return EINVAL;
gr = (struct vrend_resource *)CALLOC_STRUCT(vrend_texture);
if (!gr)
return ENOMEM;
gr->handle = args->handle;
gr->iov = iov;
gr->num_iovs = num_iovs;
gr->base.width0 = args->width;
gr->base.height0 = args->height;
gr->base.depth0 = args->depth;
gr->base.format = args->format;
gr->base.target = args->target;
gr->base.last_level = args->last_level;
gr->base.nr_samples = args->nr_samples;
gr->base.array_size = args->array_size;
if (args->flags & VIRGL_RESOURCE_Y_0_TOP)
gr->y_0_top = true;
pipe_reference_init(&gr->base.reference, 1);
if (args->bind == VREND_RES_BIND_CUSTOM) {
/* custom should only be for buffers */
gr->ptr = malloc(args->width);
if (!gr->ptr) {
FREE(gr);
return ENOMEM;
}
} else if (args->bind == VREND_RES_BIND_INDEX_BUFFER) {
gr->target = GL_ELEMENT_ARRAY_BUFFER_ARB;
glGenBuffersARB(1, &gr->id);
glBindBufferARB(gr->target, gr->id);
glBufferData(gr->target, args->width, NULL, GL_STREAM_DRAW);
} else if (args->bind == VREND_RES_BIND_STREAM_OUTPUT) {
gr->target = GL_TRANSFORM_FEEDBACK_BUFFER;
glGenBuffersARB(1, &gr->id);
glBindBuffer(gr->target, gr->id);
glBufferData(gr->target, args->width, NULL, GL_STREAM_DRAW);
} else if (args->bind == VREND_RES_BIND_VERTEX_BUFFER) {
gr->target = GL_ARRAY_BUFFER_ARB;
glGenBuffersARB(1, &gr->id);
glBindBufferARB(gr->target, gr->id);
glBufferData(gr->target, args->width, NULL, GL_STREAM_DRAW);
} else if (args->bind == VREND_RES_BIND_CONSTANT_BUFFER) {
gr->target = GL_UNIFORM_BUFFER;
glGenBuffersARB(1, &gr->id);
glBindBufferARB(gr->target, gr->id);
glBufferData(gr->target, args->width, NULL, GL_STREAM_DRAW);
} else if (args->target == PIPE_BUFFER && args->bind == 0) {
gr->target = GL_ARRAY_BUFFER_ARB;
glGenBuffersARB(1, &gr->id);
glBindBufferARB(gr->target, gr->id);
glBufferData(gr->target, args->width, NULL, GL_STREAM_DRAW);
} else if (args->target == PIPE_BUFFER && (args->bind & VREND_RES_BIND_SAMPLER_VIEW)) {
GLenum internalformat;
/* need to check GL version here */
if (epoxy_has_gl_extension("GL_ARB_texture_buffer_object")) {
gr->target = GL_TEXTURE_BUFFER;
glGenBuffersARB(1, &gr->id);
glBindBufferARB(gr->target, gr->id);
glGenTextures(1, &gr->tbo_tex_id);
glBufferData(gr->target, args->width, NULL, GL_STREAM_DRAW);
glBindTexture(gr->target, gr->tbo_tex_id);
internalformat = tex_conv_table[args->format].internalformat;
glTexBuffer(gr->target, internalformat, gr->id);
} else {
gr->target = GL_PIXEL_PACK_BUFFER_ARB;
glGenBuffersARB(1, &gr->id);
glBindBufferARB(gr->target, gr->id);
glBufferData(gr->target, args->width, NULL, GL_STREAM_DRAW);
}
} else {
struct vrend_texture *gt = (struct vrend_texture *)gr;
GLenum internalformat, glformat, gltype;
gr->target = tgsitargettogltarget(args->target, args->nr_samples);
glGenTextures(1, &gr->id);
glBindTexture(gr->target, gr->id);
internalformat = tex_conv_table[args->format].internalformat;
glformat = tex_conv_table[args->format].glformat;
gltype = tex_conv_table[args->format].gltype;
if (internalformat == 0) {
fprintf(stderr,"unknown format is %d\n", args->format);
FREE(gr);
return EINVAL;
}
if (args->nr_samples > 1) {
if (gr->target == GL_TEXTURE_2D_MULTISAMPLE) {
glTexImage2DMultisample(gr->target, args->nr_samples,
internalformat, args->width, args->height,
GL_TRUE);
} else {
glTexImage3DMultisample(gr->target, args->nr_samples,
internalformat, args->width, args->height, args->array_size,
GL_TRUE);
}
} else if (gr->target == GL_TEXTURE_CUBE_MAP) {
int i;
for (i = 0; i < 6; i++) {
GLenum ctarget = GL_TEXTURE_CUBE_MAP_POSITIVE_X + i;
for (level = 0; level <= args->last_level; level++) {
unsigned mwidth = u_minify(args->width, level);
unsigned mheight = u_minify(args->height, level);
glTexImage2D(ctarget, level, internalformat, mwidth, mheight, 0, glformat,
gltype, NULL);
}
}
} else if (gr->target == GL_TEXTURE_3D ||
gr->target == GL_TEXTURE_2D_ARRAY ||
gr->target == GL_TEXTURE_CUBE_MAP_ARRAY) {
for (level = 0; level <= args->last_level; level++) {
unsigned depth_param = (gr->target == GL_TEXTURE_2D_ARRAY || gr->target == GL_TEXTURE_CUBE_MAP_ARRAY) ? args->array_size : u_minify(args->depth, level);
unsigned mwidth = u_minify(args->width, level);
unsigned mheight = u_minify(args->height, level);
glTexImage3D(gr->target, level, internalformat, mwidth, mheight, depth_param, 0,
glformat,
gltype, NULL);
}
} else if (gr->target == GL_TEXTURE_1D) {
for (level = 0; level <= args->last_level; level++) {
unsigned mwidth = u_minify(args->width, level);
glTexImage1D(gr->target, level, internalformat, mwidth, 0,
glformat,
gltype, NULL);
}
} else {
for (level = 0; level <= args->last_level; level++) {
unsigned mwidth = u_minify(args->width, level);
unsigned mheight = u_minify(args->height, level);
glTexImage2D(gr->target, level, internalformat, mwidth, gr->target == GL_TEXTURE_1D_ARRAY ? args->array_size : mheight, 0, glformat,
gltype, NULL);
}
}
gt->state.max_lod = -1;
gt->cur_swizzle_r = gt->cur_swizzle_g = gt->cur_swizzle_b = gt->cur_swizzle_a = -1;
}
ret = vrend_resource_insert(gr, args->handle);
if (ret == 0) {
vrend_renderer_resource_destroy(gr, true);
return ENOMEM;
}
return 0;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,118
|
void vrend_renderer_resource_destroy(struct vrend_resource *res, bool remove)
{
if (res->readback_fb_id)
glDeleteFramebuffers(1, &res->readback_fb_id);
if (res->ptr)
free(res->ptr);
if (res->id) {
if (res->target == GL_ELEMENT_ARRAY_BUFFER_ARB ||
res->target == GL_ARRAY_BUFFER_ARB ||
res->target == GL_UNIFORM_BUFFER||
res->target == GL_TEXTURE_BUFFER||
res->target == GL_TRANSFORM_FEEDBACK_BUFFER) {
glDeleteBuffers(1, &res->id);
if (res->target == GL_TEXTURE_BUFFER)
glDeleteTextures(1, &res->tbo_tex_id);
} else
glDeleteTextures(1, &res->id);
}
if (res->handle && remove)
vrend_resource_remove(res->handle);
free(res);
}
|
DoS
| 0
|
void vrend_renderer_resource_destroy(struct vrend_resource *res, bool remove)
{
if (res->readback_fb_id)
glDeleteFramebuffers(1, &res->readback_fb_id);
if (res->ptr)
free(res->ptr);
if (res->id) {
if (res->target == GL_ELEMENT_ARRAY_BUFFER_ARB ||
res->target == GL_ARRAY_BUFFER_ARB ||
res->target == GL_UNIFORM_BUFFER||
res->target == GL_TEXTURE_BUFFER||
res->target == GL_TRANSFORM_FEEDBACK_BUFFER) {
glDeleteBuffers(1, &res->id);
if (res->target == GL_TEXTURE_BUFFER)
glDeleteTextures(1, &res->tbo_tex_id);
} else
glDeleteTextures(1, &res->id);
}
if (res->handle && remove)
vrend_resource_remove(res->handle);
free(res);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,119
|
void vrend_renderer_resource_detach_iov(int res_handle,
struct iovec **iov_p,
int *num_iovs_p)
{
struct vrend_resource *res;
res = vrend_resource_lookup(res_handle, 0);
if (!res) {
return;
}
if (iov_p)
*iov_p = res->iov;
if (num_iovs_p)
*num_iovs_p = res->num_iovs;
res->iov = NULL;
res->num_iovs = 0;
}
|
DoS
| 0
|
void vrend_renderer_resource_detach_iov(int res_handle,
struct iovec **iov_p,
int *num_iovs_p)
{
struct vrend_resource *res;
res = vrend_resource_lookup(res_handle, 0);
if (!res) {
return;
}
if (iov_p)
*iov_p = res->iov;
if (num_iovs_p)
*num_iovs_p = res->num_iovs;
res->iov = NULL;
res->num_iovs = 0;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,120
|
int vrend_renderer_resource_get_info(int res_handle,
struct vrend_renderer_resource_info *info)
{
struct vrend_resource *res;
int elsize;
if (!info)
return EINVAL;
res = vrend_resource_lookup(res_handle, 0);
if (!res)
return EINVAL;
elsize = util_format_get_blocksize(res->base.format);
info->handle = res_handle;
info->tex_id = res->id;
info->width = res->base.width0;
info->height = res->base.height0;
info->depth = res->base.depth0;
info->format = res->base.format;
info->flags = res->y_0_top ? VIRGL_RESOURCE_Y_0_TOP : 0;
info->stride = util_format_get_nblocksx(res->base.format, u_minify(res->base.width0, 0)) * elsize;
return 0;
}
|
DoS
| 0
|
int vrend_renderer_resource_get_info(int res_handle,
struct vrend_renderer_resource_info *info)
{
struct vrend_resource *res;
int elsize;
if (!info)
return EINVAL;
res = vrend_resource_lookup(res_handle, 0);
if (!res)
return EINVAL;
elsize = util_format_get_blocksize(res->base.format);
info->handle = res_handle;
info->tex_id = res->id;
info->width = res->base.width0;
info->height = res->base.height0;
info->depth = res->base.depth0;
info->format = res->base.format;
info->flags = res->y_0_top ? VIRGL_RESOURCE_Y_0_TOP : 0;
info->stride = util_format_get_nblocksx(res->base.format, u_minify(res->base.width0, 0)) * elsize;
return 0;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,121
|
int vrend_renderer_transfer_iov(const struct vrend_transfer_info *info,
int transfer_mode)
{
struct vrend_resource *res;
struct vrend_context *ctx;
struct iovec *iov;
int num_iovs;
if (!info->box)
return EINVAL;
ctx = vrend_lookup_renderer_ctx(info->ctx_id);
if (!ctx)
return EINVAL;
if (info->ctx_id == 0)
res = vrend_resource_lookup(info->handle, 0);
else
res = vrend_renderer_ctx_res_lookup(ctx, info->handle);
if (!res) {
if (info->ctx_id)
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, info->handle);
return EINVAL;
}
iov = info->iovec;
num_iovs = info->iovec_cnt;
if (res->iov && (!iov || num_iovs == 0)) {
iov = res->iov;
num_iovs = res->num_iovs;
}
if (!iov) {
if (info->ctx_id)
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, info->handle);
return EINVAL;
}
if (!check_transfer_bounds(res, info))
return EINVAL;
if (!check_iov_bounds(res, info, iov, num_iovs))
return EINVAL;
vrend_hw_switch_context(vrend_lookup_renderer_ctx(0), true);
if (transfer_mode == VREND_TRANSFER_WRITE)
return vrend_renderer_transfer_write_iov(ctx, res, iov, num_iovs,
info);
else
return vrend_renderer_transfer_send_iov(ctx, res, iov, num_iovs,
info);
}
|
DoS
| 0
|
int vrend_renderer_transfer_iov(const struct vrend_transfer_info *info,
int transfer_mode)
{
struct vrend_resource *res;
struct vrend_context *ctx;
struct iovec *iov;
int num_iovs;
if (!info->box)
return EINVAL;
ctx = vrend_lookup_renderer_ctx(info->ctx_id);
if (!ctx)
return EINVAL;
if (info->ctx_id == 0)
res = vrend_resource_lookup(info->handle, 0);
else
res = vrend_renderer_ctx_res_lookup(ctx, info->handle);
if (!res) {
if (info->ctx_id)
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, info->handle);
return EINVAL;
}
iov = info->iovec;
num_iovs = info->iovec_cnt;
if (res->iov && (!iov || num_iovs == 0)) {
iov = res->iov;
num_iovs = res->num_iovs;
}
if (!iov) {
if (info->ctx_id)
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, info->handle);
return EINVAL;
}
if (!check_transfer_bounds(res, info))
return EINVAL;
if (!check_iov_bounds(res, info, iov, num_iovs))
return EINVAL;
vrend_hw_switch_context(vrend_lookup_renderer_ctx(0), true);
if (transfer_mode == VREND_TRANSFER_WRITE)
return vrend_renderer_transfer_write_iov(ctx, res, iov, num_iovs,
info);
else
return vrend_renderer_transfer_send_iov(ctx, res, iov, num_iovs,
info);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,122
|
static int vrend_renderer_transfer_send_iov(struct vrend_context *ctx,
struct vrend_resource *res,
struct iovec *iov, int num_iovs,
const struct vrend_transfer_info *info)
{
if (res->target == 0 && res->ptr) {
uint32_t send_size = info->box->width * util_format_get_blocksize(res->base.format);
vrend_write_to_iovec(iov, num_iovs, info->offset, res->ptr + info->box->x, send_size);
return 0;
}
if (res->target == GL_ELEMENT_ARRAY_BUFFER_ARB ||
res->target == GL_ARRAY_BUFFER_ARB ||
res->target == GL_TRANSFORM_FEEDBACK_BUFFER ||
res->target == GL_TEXTURE_BUFFER ||
res->target == GL_UNIFORM_BUFFER) {
uint32_t send_size = info->box->width * util_format_get_blocksize(res->base.format);
void *data;
glBindBufferARB(res->target, res->id);
data = glMapBufferRange(res->target, info->box->x, info->box->width, GL_MAP_READ_BIT);
if (!data)
fprintf(stderr,"unable to open buffer for reading %d\n", res->target);
else
vrend_write_to_iovec(iov, num_iovs, info->offset, data, send_size);
glUnmapBuffer(res->target);
} else {
bool can_readpixels = true;
can_readpixels = vrend_format_can_render(res->base.format) || vrend_format_is_ds(res->base.format);
if (can_readpixels) {
return vrend_transfer_send_readpixels(ctx, res,
iov, num_iovs, info);
}
return vrend_transfer_send_getteximage(ctx, res,
iov, num_iovs, info);
}
return 0;
}
|
DoS
| 0
|
static int vrend_renderer_transfer_send_iov(struct vrend_context *ctx,
struct vrend_resource *res,
struct iovec *iov, int num_iovs,
const struct vrend_transfer_info *info)
{
if (res->target == 0 && res->ptr) {
uint32_t send_size = info->box->width * util_format_get_blocksize(res->base.format);
vrend_write_to_iovec(iov, num_iovs, info->offset, res->ptr + info->box->x, send_size);
return 0;
}
if (res->target == GL_ELEMENT_ARRAY_BUFFER_ARB ||
res->target == GL_ARRAY_BUFFER_ARB ||
res->target == GL_TRANSFORM_FEEDBACK_BUFFER ||
res->target == GL_TEXTURE_BUFFER ||
res->target == GL_UNIFORM_BUFFER) {
uint32_t send_size = info->box->width * util_format_get_blocksize(res->base.format);
void *data;
glBindBufferARB(res->target, res->id);
data = glMapBufferRange(res->target, info->box->x, info->box->width, GL_MAP_READ_BIT);
if (!data)
fprintf(stderr,"unable to open buffer for reading %d\n", res->target);
else
vrend_write_to_iovec(iov, num_iovs, info->offset, data, send_size);
glUnmapBuffer(res->target);
} else {
bool can_readpixels = true;
can_readpixels = vrend_format_can_render(res->base.format) || vrend_format_is_ds(res->base.format);
if (can_readpixels) {
return vrend_transfer_send_readpixels(ctx, res,
iov, num_iovs, info);
}
return vrend_transfer_send_getteximage(ctx, res,
iov, num_iovs, info);
}
return 0;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,123
|
static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
struct vrend_resource *res,
struct iovec *iov, int num_iovs,
const struct vrend_transfer_info *info)
{
void *data;
if (res->target == 0 && res->ptr) {
vrend_read_from_iovec(iov, num_iovs, info->offset, res->ptr + info->box->x, info->box->width);
return 0;
}
if (res->target == GL_TRANSFORM_FEEDBACK_BUFFER ||
res->target == GL_ELEMENT_ARRAY_BUFFER_ARB ||
res->target == GL_ARRAY_BUFFER_ARB ||
res->target == GL_TEXTURE_BUFFER ||
res->target == GL_UNIFORM_BUFFER) {
struct virgl_sub_upload_data d;
d.box = info->box;
d.target = res->target;
glBindBufferARB(res->target, res->id);
if (use_sub_data == 1) {
vrend_read_from_iovec_cb(iov, num_iovs, info->offset, info->box->width, &iov_buffer_upload, &d);
} else {
data = glMapBufferRange(res->target, info->box->x, info->box->width, GL_MAP_INVALIDATE_RANGE_BIT | GL_MAP_UNSYNCHRONIZED_BIT | GL_MAP_WRITE_BIT);
if (data == NULL) {
fprintf(stderr,"map failed for element buffer\n");
vrend_read_from_iovec_cb(iov, num_iovs, info->offset, info->box->width, &iov_buffer_upload, &d);
} else {
vrend_read_from_iovec(iov, num_iovs, info->offset, data, info->box->width);
glUnmapBuffer(res->target);
}
}
} else {
GLenum glformat;
GLenum gltype;
int need_temp = 0;
int elsize = util_format_get_blocksize(res->base.format);
int x = 0, y = 0;
bool compressed;
bool invert = false;
float depth_scale;
GLuint send_size = 0;
uint32_t stride = info->stride;
vrend_use_program(ctx, 0);
if (!stride)
stride = util_format_get_nblocksx(res->base.format, u_minify(res->base.width0, info->level)) * elsize;
compressed = util_format_is_compressed(res->base.format);
if (num_iovs > 1 || compressed) {
need_temp = true;
}
if (vrend_state.use_core_profile == true && (res->y_0_top || (res->base.format == (enum pipe_format)VIRGL_FORMAT_Z24X8_UNORM))) {
need_temp = true;
if (res->y_0_top)
invert = true;
}
if (need_temp) {
send_size = util_format_get_nblocks(res->base.format, info->box->width,
info->box->height) * elsize * info->box->depth;
data = malloc(send_size);
if (!data)
return ENOMEM;
read_transfer_data(&res->base, iov, num_iovs, data, stride,
info->box, info->offset, invert);
} else {
data = (char*)iov[0].iov_base + info->offset;
}
if (stride && !need_temp) {
glPixelStorei(GL_UNPACK_ROW_LENGTH, stride / elsize);
} else
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
switch (elsize) {
case 1:
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
break;
case 2:
case 6:
glPixelStorei(GL_UNPACK_ALIGNMENT, 2);
break;
case 4:
default:
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
break;
case 8:
glPixelStorei(GL_UNPACK_ALIGNMENT, 8);
break;
}
glformat = tex_conv_table[res->base.format].glformat;
gltype = tex_conv_table[res->base.format].gltype;
if ((!vrend_state.use_core_profile) && (res->y_0_top)) {
if (res->readback_fb_id == 0 || res->readback_fb_level != info->level) {
GLuint fb_id;
if (res->readback_fb_id)
glDeleteFramebuffers(1, &res->readback_fb_id);
glGenFramebuffers(1, &fb_id);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fb_id);
vrend_fb_bind_texture(res, 0, info->level, 0);
res->readback_fb_id = fb_id;
res->readback_fb_level = info->level;
} else {
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, res->readback_fb_id);
}
glDrawBuffer(GL_COLOR_ATTACHMENT0_EXT);
vrend_blend_enable(ctx, false);
vrend_depth_test_enable(ctx, false);
vrend_alpha_test_enable(ctx, false);
vrend_stencil_test_enable(ctx, false);
glPixelZoom(1.0f, res->y_0_top ? -1.0f : 1.0f);
glWindowPos2i(info->box->x, res->y_0_top ? res->base.height0 - info->box->y : info->box->y);
glDrawPixels(info->box->width, info->box->height, glformat, gltype,
data);
} else {
uint32_t comp_size;
glBindTexture(res->target, res->id);
if (compressed) {
glformat = tex_conv_table[res->base.format].internalformat;
comp_size = util_format_get_nblocks(res->base.format, info->box->width,
info->box->height) * util_format_get_blocksize(res->base.format);
}
if (glformat == 0) {
glformat = GL_BGRA;
gltype = GL_UNSIGNED_BYTE;
}
x = info->box->x;
y = invert ? res->base.height0 - info->box->y - info->box->height : info->box->y;
if (res->base.format == (enum pipe_format)VIRGL_FORMAT_Z24X8_UNORM) {
/* we get values from the guest as 24-bit scaled integers
but we give them to the host GL and it interprets them
as 32-bit scaled integers, so we need to scale them here */
depth_scale = 256.0;
if (!vrend_state.use_core_profile)
glPixelTransferf(GL_DEPTH_SCALE, depth_scale);
else
vrend_scale_depth(data, send_size, depth_scale);
}
if (res->target == GL_TEXTURE_CUBE_MAP) {
GLenum ctarget = GL_TEXTURE_CUBE_MAP_POSITIVE_X + info->box->z;
if (compressed) {
glCompressedTexSubImage2D(ctarget, info->level, x, y,
info->box->width, info->box->height,
glformat, comp_size, data);
} else {
glTexSubImage2D(ctarget, info->level, x, y, info->box->width, info->box->height,
glformat, gltype, data);
}
} else if (res->target == GL_TEXTURE_3D || res->target == GL_TEXTURE_2D_ARRAY || res->target == GL_TEXTURE_CUBE_MAP_ARRAY) {
if (compressed) {
glCompressedTexSubImage3D(res->target, info->level, x, y, info->box->z,
info->box->width, info->box->height, info->box->depth,
glformat, comp_size, data);
} else {
glTexSubImage3D(res->target, info->level, x, y, info->box->z,
info->box->width, info->box->height, info->box->depth,
glformat, gltype, data);
}
} else if (res->target == GL_TEXTURE_1D) {
if (compressed) {
glCompressedTexSubImage1D(res->target, info->level, info->box->x,
info->box->width,
glformat, comp_size, data);
} else {
glTexSubImage1D(res->target, info->level, info->box->x, info->box->width,
glformat, gltype, data);
}
} else {
if (compressed) {
glCompressedTexSubImage2D(res->target, info->level, x, res->target == GL_TEXTURE_1D_ARRAY ? info->box->z : y,
info->box->width, info->box->height,
glformat, comp_size, data);
} else {
glTexSubImage2D(res->target, info->level, x, res->target == GL_TEXTURE_1D_ARRAY ? info->box->z : y,
info->box->width, info->box->height,
glformat, gltype, data);
}
}
if (res->base.format == (enum pipe_format)VIRGL_FORMAT_Z24X8_UNORM) {
if (!vrend_state.use_core_profile)
glPixelTransferf(GL_DEPTH_SCALE, 1.0);
}
}
if (stride && !need_temp)
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
if (need_temp)
free(data);
}
return 0;
}
|
DoS
| 0
|
static int vrend_renderer_transfer_write_iov(struct vrend_context *ctx,
struct vrend_resource *res,
struct iovec *iov, int num_iovs,
const struct vrend_transfer_info *info)
{
void *data;
if (res->target == 0 && res->ptr) {
vrend_read_from_iovec(iov, num_iovs, info->offset, res->ptr + info->box->x, info->box->width);
return 0;
}
if (res->target == GL_TRANSFORM_FEEDBACK_BUFFER ||
res->target == GL_ELEMENT_ARRAY_BUFFER_ARB ||
res->target == GL_ARRAY_BUFFER_ARB ||
res->target == GL_TEXTURE_BUFFER ||
res->target == GL_UNIFORM_BUFFER) {
struct virgl_sub_upload_data d;
d.box = info->box;
d.target = res->target;
glBindBufferARB(res->target, res->id);
if (use_sub_data == 1) {
vrend_read_from_iovec_cb(iov, num_iovs, info->offset, info->box->width, &iov_buffer_upload, &d);
} else {
data = glMapBufferRange(res->target, info->box->x, info->box->width, GL_MAP_INVALIDATE_RANGE_BIT | GL_MAP_UNSYNCHRONIZED_BIT | GL_MAP_WRITE_BIT);
if (data == NULL) {
fprintf(stderr,"map failed for element buffer\n");
vrend_read_from_iovec_cb(iov, num_iovs, info->offset, info->box->width, &iov_buffer_upload, &d);
} else {
vrend_read_from_iovec(iov, num_iovs, info->offset, data, info->box->width);
glUnmapBuffer(res->target);
}
}
} else {
GLenum glformat;
GLenum gltype;
int need_temp = 0;
int elsize = util_format_get_blocksize(res->base.format);
int x = 0, y = 0;
bool compressed;
bool invert = false;
float depth_scale;
GLuint send_size = 0;
uint32_t stride = info->stride;
vrend_use_program(ctx, 0);
if (!stride)
stride = util_format_get_nblocksx(res->base.format, u_minify(res->base.width0, info->level)) * elsize;
compressed = util_format_is_compressed(res->base.format);
if (num_iovs > 1 || compressed) {
need_temp = true;
}
if (vrend_state.use_core_profile == true && (res->y_0_top || (res->base.format == (enum pipe_format)VIRGL_FORMAT_Z24X8_UNORM))) {
need_temp = true;
if (res->y_0_top)
invert = true;
}
if (need_temp) {
send_size = util_format_get_nblocks(res->base.format, info->box->width,
info->box->height) * elsize * info->box->depth;
data = malloc(send_size);
if (!data)
return ENOMEM;
read_transfer_data(&res->base, iov, num_iovs, data, stride,
info->box, info->offset, invert);
} else {
data = (char*)iov[0].iov_base + info->offset;
}
if (stride && !need_temp) {
glPixelStorei(GL_UNPACK_ROW_LENGTH, stride / elsize);
} else
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
switch (elsize) {
case 1:
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
break;
case 2:
case 6:
glPixelStorei(GL_UNPACK_ALIGNMENT, 2);
break;
case 4:
default:
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
break;
case 8:
glPixelStorei(GL_UNPACK_ALIGNMENT, 8);
break;
}
glformat = tex_conv_table[res->base.format].glformat;
gltype = tex_conv_table[res->base.format].gltype;
if ((!vrend_state.use_core_profile) && (res->y_0_top)) {
if (res->readback_fb_id == 0 || res->readback_fb_level != info->level) {
GLuint fb_id;
if (res->readback_fb_id)
glDeleteFramebuffers(1, &res->readback_fb_id);
glGenFramebuffers(1, &fb_id);
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, fb_id);
vrend_fb_bind_texture(res, 0, info->level, 0);
res->readback_fb_id = fb_id;
res->readback_fb_level = info->level;
} else {
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, res->readback_fb_id);
}
glDrawBuffer(GL_COLOR_ATTACHMENT0_EXT);
vrend_blend_enable(ctx, false);
vrend_depth_test_enable(ctx, false);
vrend_alpha_test_enable(ctx, false);
vrend_stencil_test_enable(ctx, false);
glPixelZoom(1.0f, res->y_0_top ? -1.0f : 1.0f);
glWindowPos2i(info->box->x, res->y_0_top ? res->base.height0 - info->box->y : info->box->y);
glDrawPixels(info->box->width, info->box->height, glformat, gltype,
data);
} else {
uint32_t comp_size;
glBindTexture(res->target, res->id);
if (compressed) {
glformat = tex_conv_table[res->base.format].internalformat;
comp_size = util_format_get_nblocks(res->base.format, info->box->width,
info->box->height) * util_format_get_blocksize(res->base.format);
}
if (glformat == 0) {
glformat = GL_BGRA;
gltype = GL_UNSIGNED_BYTE;
}
x = info->box->x;
y = invert ? res->base.height0 - info->box->y - info->box->height : info->box->y;
if (res->base.format == (enum pipe_format)VIRGL_FORMAT_Z24X8_UNORM) {
/* we get values from the guest as 24-bit scaled integers
but we give them to the host GL and it interprets them
as 32-bit scaled integers, so we need to scale them here */
depth_scale = 256.0;
if (!vrend_state.use_core_profile)
glPixelTransferf(GL_DEPTH_SCALE, depth_scale);
else
vrend_scale_depth(data, send_size, depth_scale);
}
if (res->target == GL_TEXTURE_CUBE_MAP) {
GLenum ctarget = GL_TEXTURE_CUBE_MAP_POSITIVE_X + info->box->z;
if (compressed) {
glCompressedTexSubImage2D(ctarget, info->level, x, y,
info->box->width, info->box->height,
glformat, comp_size, data);
} else {
glTexSubImage2D(ctarget, info->level, x, y, info->box->width, info->box->height,
glformat, gltype, data);
}
} else if (res->target == GL_TEXTURE_3D || res->target == GL_TEXTURE_2D_ARRAY || res->target == GL_TEXTURE_CUBE_MAP_ARRAY) {
if (compressed) {
glCompressedTexSubImage3D(res->target, info->level, x, y, info->box->z,
info->box->width, info->box->height, info->box->depth,
glformat, comp_size, data);
} else {
glTexSubImage3D(res->target, info->level, x, y, info->box->z,
info->box->width, info->box->height, info->box->depth,
glformat, gltype, data);
}
} else if (res->target == GL_TEXTURE_1D) {
if (compressed) {
glCompressedTexSubImage1D(res->target, info->level, info->box->x,
info->box->width,
glformat, comp_size, data);
} else {
glTexSubImage1D(res->target, info->level, info->box->x, info->box->width,
glformat, gltype, data);
}
} else {
if (compressed) {
glCompressedTexSubImage2D(res->target, info->level, x, res->target == GL_TEXTURE_1D_ARRAY ? info->box->z : y,
info->box->width, info->box->height,
glformat, comp_size, data);
} else {
glTexSubImage2D(res->target, info->level, x, res->target == GL_TEXTURE_1D_ARRAY ? info->box->z : y,
info->box->width, info->box->height,
glformat, gltype, data);
}
}
if (res->base.format == (enum pipe_format)VIRGL_FORMAT_Z24X8_UNORM) {
if (!vrend_state.use_core_profile)
glPixelTransferf(GL_DEPTH_SCALE, 1.0);
}
}
if (stride && !need_temp)
glPixelStorei(GL_UNPACK_ROW_LENGTH, 0);
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
if (need_temp)
free(data);
}
return 0;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,124
|
static void vrend_renderer_use_threaded_sync(void)
{
struct virgl_gl_ctx_param ctx_params;
if (getenv("VIRGL_DISABLE_MT"))
return;
ctx_params.shared = true;
ctx_params.major_ver = vrend_state.gl_major_ver;
ctx_params.minor_ver = vrend_state.gl_minor_ver;
vrend_state.stop_sync_thread = false;
vrend_state.sync_context = vrend_clicbs->create_gl_context(0, &ctx_params);
if (vrend_state.sync_context == NULL) {
fprintf(stderr, "failed to create sync opengl context\n");
return;
}
vrend_state.eventfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
if (vrend_state.eventfd == -1) {
fprintf(stderr, "Failed to create eventfd\n");
vrend_clicbs->destroy_gl_context(vrend_state.sync_context);
return;
}
pipe_condvar_init(vrend_state.fence_cond);
pipe_mutex_init(vrend_state.fence_mutex);
vrend_state.sync_thread = pipe_thread_create(thread_sync, NULL);
if (!vrend_state.sync_thread) {
close(vrend_state.eventfd);
vrend_state.eventfd = -1;
vrend_clicbs->destroy_gl_context(vrend_state.sync_context);
pipe_condvar_destroy(vrend_state.fence_cond);
pipe_mutex_destroy(vrend_state.fence_mutex);
}
}
|
DoS
| 0
|
static void vrend_renderer_use_threaded_sync(void)
{
struct virgl_gl_ctx_param ctx_params;
if (getenv("VIRGL_DISABLE_MT"))
return;
ctx_params.shared = true;
ctx_params.major_ver = vrend_state.gl_major_ver;
ctx_params.minor_ver = vrend_state.gl_minor_ver;
vrend_state.stop_sync_thread = false;
vrend_state.sync_context = vrend_clicbs->create_gl_context(0, &ctx_params);
if (vrend_state.sync_context == NULL) {
fprintf(stderr, "failed to create sync opengl context\n");
return;
}
vrend_state.eventfd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
if (vrend_state.eventfd == -1) {
fprintf(stderr, "Failed to create eventfd\n");
vrend_clicbs->destroy_gl_context(vrend_state.sync_context);
return;
}
pipe_condvar_init(vrend_state.fence_cond);
pipe_mutex_init(vrend_state.fence_mutex);
vrend_state.sync_thread = pipe_thread_create(thread_sync, NULL);
if (!vrend_state.sync_thread) {
close(vrend_state.eventfd);
vrend_state.eventfd = -1;
vrend_clicbs->destroy_gl_context(vrend_state.sync_context);
pipe_condvar_destroy(vrend_state.fence_cond);
pipe_mutex_destroy(vrend_state.fence_mutex);
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,125
|
static void vrend_renderer_use_threaded_sync(void)
{
}
|
DoS
| 0
|
static void vrend_renderer_use_threaded_sync(void)
{
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,126
|
void vrend_report_buffer_error(struct vrend_context *ctx, int cmd)
{
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, cmd);
}
|
DoS
| 0
|
void vrend_report_buffer_error(struct vrend_context *ctx, int cmd)
{
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_CMD_BUFFER, cmd);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,127
|
static void vrend_reset_fences(void)
{
struct vrend_fence *fence, *stor;
if (vrend_state.sync_thread)
pipe_mutex_lock(vrend_state.fence_mutex);
LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
free_fence_locked(fence);
}
if (vrend_state.sync_thread)
pipe_mutex_unlock(vrend_state.fence_mutex);
}
|
DoS
| 0
|
static void vrend_reset_fences(void)
{
struct vrend_fence *fence, *stor;
if (vrend_state.sync_thread)
pipe_mutex_lock(vrend_state.fence_mutex);
LIST_FOR_EACH_ENTRY_SAFE(fence, stor, &vrend_state.fence_list, fences) {
free_fence_locked(fence);
}
if (vrend_state.sync_thread)
pipe_mutex_unlock(vrend_state.fence_mutex);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,128
|
static void vrend_resource_copy_fallback(struct vrend_context *ctx,
struct vrend_resource *src_res,
struct vrend_resource *dst_res,
uint32_t dst_level,
uint32_t dstx, uint32_t dsty,
uint32_t dstz, uint32_t src_level,
const struct pipe_box *src_box)
{
char *tptr;
uint32_t transfer_size;
GLenum glformat, gltype;
int elsize = util_format_get_blocksize(dst_res->base.format);
int compressed = util_format_is_compressed(dst_res->base.format);
int cube_slice = 1;
uint32_t slice_size, slice_offset;
int i;
if (src_res->target == GL_TEXTURE_CUBE_MAP)
cube_slice = 6;
if (src_res->base.format != dst_res->base.format) {
fprintf(stderr, "copy fallback failed due to mismatched formats %d %d\n", src_res->base.format, dst_res->base.format);
return;
}
/* this is ugly need to do a full GetTexImage */
slice_size = util_format_get_nblocks(src_res->base.format, u_minify(src_res->base.width0, src_level), u_minify(src_res->base.height0, src_level)) *
u_minify(src_res->base.depth0, src_level) * util_format_get_blocksize(src_res->base.format);
transfer_size = slice_size * src_res->base.array_size;
tptr = malloc(transfer_size);
if (!tptr)
return;
glformat = tex_conv_table[src_res->base.format].glformat;
gltype = tex_conv_table[src_res->base.format].gltype;
if (compressed)
glformat = tex_conv_table[src_res->base.format].internalformat;
switch (elsize) {
case 1:
glPixelStorei(GL_PACK_ALIGNMENT, 1);
break;
case 2:
glPixelStorei(GL_PACK_ALIGNMENT, 2);
break;
case 4:
default:
glPixelStorei(GL_PACK_ALIGNMENT, 4);
break;
case 8:
glPixelStorei(GL_PACK_ALIGNMENT, 8);
break;
}
glBindTexture(src_res->target, src_res->id);
slice_offset = 0;
for (i = 0; i < cube_slice; i++) {
GLenum ctarget = src_res->target == GL_TEXTURE_CUBE_MAP ? GL_TEXTURE_CUBE_MAP_POSITIVE_X + i : src_res->target;
if (compressed) {
if (vrend_state.have_robustness)
glGetnCompressedTexImageARB(ctarget, src_level, transfer_size, tptr + slice_offset);
else
glGetCompressedTexImage(ctarget, src_level, tptr + slice_offset);
} else {
if (vrend_state.have_robustness)
glGetnTexImageARB(ctarget, src_level, glformat, gltype, transfer_size, tptr + slice_offset);
else
glGetTexImage(ctarget, src_level, glformat, gltype, tptr + slice_offset);
}
slice_offset += slice_size;
}
glPixelStorei(GL_PACK_ALIGNMENT, 4);
switch (elsize) {
case 1:
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
break;
case 2:
glPixelStorei(GL_UNPACK_ALIGNMENT, 2);
break;
case 4:
default:
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
break;
case 8:
glPixelStorei(GL_UNPACK_ALIGNMENT, 8);
break;
}
glBindTexture(dst_res->target, dst_res->id);
slice_offset = 0;
for (i = 0; i < cube_slice; i++) {
GLenum ctarget = dst_res->target == GL_TEXTURE_CUBE_MAP ? GL_TEXTURE_CUBE_MAP_POSITIVE_X + i : dst_res->target;
if (compressed) {
if (ctarget == GL_TEXTURE_1D) {
glCompressedTexSubImage1D(ctarget, dst_level, dstx,
src_box->width,
glformat, transfer_size, tptr + slice_offset);
} else {
glCompressedTexSubImage2D(ctarget, dst_level, dstx, dsty,
src_box->width, src_box->height,
glformat, transfer_size, tptr + slice_offset);
}
} else {
if (ctarget == GL_TEXTURE_1D) {
glTexSubImage1D(ctarget, dst_level, dstx, src_box->width, glformat, gltype, tptr + slice_offset);
} else if (ctarget == GL_TEXTURE_3D ||
ctarget == GL_TEXTURE_2D_ARRAY ||
ctarget == GL_TEXTURE_CUBE_MAP_ARRAY) {
glTexSubImage3D(ctarget, dst_level, dstx, dsty, 0,src_box->width, src_box->height, src_box->depth, glformat, gltype, tptr + slice_offset);
} else {
glTexSubImage2D(ctarget, dst_level, dstx, dsty, src_box->width, src_box->height, glformat, gltype, tptr + slice_offset);
}
}
slice_offset += slice_size;
}
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
free(tptr);
}
|
DoS
| 0
|
static void vrend_resource_copy_fallback(struct vrend_context *ctx,
struct vrend_resource *src_res,
struct vrend_resource *dst_res,
uint32_t dst_level,
uint32_t dstx, uint32_t dsty,
uint32_t dstz, uint32_t src_level,
const struct pipe_box *src_box)
{
char *tptr;
uint32_t transfer_size;
GLenum glformat, gltype;
int elsize = util_format_get_blocksize(dst_res->base.format);
int compressed = util_format_is_compressed(dst_res->base.format);
int cube_slice = 1;
uint32_t slice_size, slice_offset;
int i;
if (src_res->target == GL_TEXTURE_CUBE_MAP)
cube_slice = 6;
if (src_res->base.format != dst_res->base.format) {
fprintf(stderr, "copy fallback failed due to mismatched formats %d %d\n", src_res->base.format, dst_res->base.format);
return;
}
/* this is ugly need to do a full GetTexImage */
slice_size = util_format_get_nblocks(src_res->base.format, u_minify(src_res->base.width0, src_level), u_minify(src_res->base.height0, src_level)) *
u_minify(src_res->base.depth0, src_level) * util_format_get_blocksize(src_res->base.format);
transfer_size = slice_size * src_res->base.array_size;
tptr = malloc(transfer_size);
if (!tptr)
return;
glformat = tex_conv_table[src_res->base.format].glformat;
gltype = tex_conv_table[src_res->base.format].gltype;
if (compressed)
glformat = tex_conv_table[src_res->base.format].internalformat;
switch (elsize) {
case 1:
glPixelStorei(GL_PACK_ALIGNMENT, 1);
break;
case 2:
glPixelStorei(GL_PACK_ALIGNMENT, 2);
break;
case 4:
default:
glPixelStorei(GL_PACK_ALIGNMENT, 4);
break;
case 8:
glPixelStorei(GL_PACK_ALIGNMENT, 8);
break;
}
glBindTexture(src_res->target, src_res->id);
slice_offset = 0;
for (i = 0; i < cube_slice; i++) {
GLenum ctarget = src_res->target == GL_TEXTURE_CUBE_MAP ? GL_TEXTURE_CUBE_MAP_POSITIVE_X + i : src_res->target;
if (compressed) {
if (vrend_state.have_robustness)
glGetnCompressedTexImageARB(ctarget, src_level, transfer_size, tptr + slice_offset);
else
glGetCompressedTexImage(ctarget, src_level, tptr + slice_offset);
} else {
if (vrend_state.have_robustness)
glGetnTexImageARB(ctarget, src_level, glformat, gltype, transfer_size, tptr + slice_offset);
else
glGetTexImage(ctarget, src_level, glformat, gltype, tptr + slice_offset);
}
slice_offset += slice_size;
}
glPixelStorei(GL_PACK_ALIGNMENT, 4);
switch (elsize) {
case 1:
glPixelStorei(GL_UNPACK_ALIGNMENT, 1);
break;
case 2:
glPixelStorei(GL_UNPACK_ALIGNMENT, 2);
break;
case 4:
default:
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
break;
case 8:
glPixelStorei(GL_UNPACK_ALIGNMENT, 8);
break;
}
glBindTexture(dst_res->target, dst_res->id);
slice_offset = 0;
for (i = 0; i < cube_slice; i++) {
GLenum ctarget = dst_res->target == GL_TEXTURE_CUBE_MAP ? GL_TEXTURE_CUBE_MAP_POSITIVE_X + i : dst_res->target;
if (compressed) {
if (ctarget == GL_TEXTURE_1D) {
glCompressedTexSubImage1D(ctarget, dst_level, dstx,
src_box->width,
glformat, transfer_size, tptr + slice_offset);
} else {
glCompressedTexSubImage2D(ctarget, dst_level, dstx, dsty,
src_box->width, src_box->height,
glformat, transfer_size, tptr + slice_offset);
}
} else {
if (ctarget == GL_TEXTURE_1D) {
glTexSubImage1D(ctarget, dst_level, dstx, src_box->width, glformat, gltype, tptr + slice_offset);
} else if (ctarget == GL_TEXTURE_3D ||
ctarget == GL_TEXTURE_2D_ARRAY ||
ctarget == GL_TEXTURE_CUBE_MAP_ARRAY) {
glTexSubImage3D(ctarget, dst_level, dstx, dsty, 0,src_box->width, src_box->height, src_box->depth, glformat, gltype, tptr + slice_offset);
} else {
glTexSubImage2D(ctarget, dst_level, dstx, dsty, src_box->width, src_box->height, glformat, gltype, tptr + slice_offset);
}
}
slice_offset += slice_size;
}
glPixelStorei(GL_UNPACK_ALIGNMENT, 4);
free(tptr);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,129
|
vrend_sampler_view_reference(struct vrend_sampler_view **ptr, struct vrend_sampler_view *view)
{
struct vrend_sampler_view *old_view = *ptr;
if (pipe_reference(&(*ptr)->reference, &view->reference))
vrend_destroy_sampler_view(old_view);
*ptr = view;
}
|
DoS
| 0
|
vrend_sampler_view_reference(struct vrend_sampler_view **ptr, struct vrend_sampler_view *view)
{
struct vrend_sampler_view *old_view = *ptr;
if (pipe_reference(&(*ptr)->reference, &view->reference))
vrend_destroy_sampler_view(old_view);
*ptr = view;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,130
|
static void vrend_scale_depth(void *ptr, int size, float scale_val)
{
GLuint *ival = ptr;
const GLfloat myscale = 1.0f / 0xffffff;
int i;
for (i = 0; i < size / 4; i++) {
GLuint value = ival[i];
GLfloat d = ((float)(value >> 8) * myscale) * scale_val;
d = CLAMP(d, 0.0F, 1.0F);
ival[i] = (int)(d / myscale) << 8;
}
}
|
DoS
| 0
|
static void vrend_scale_depth(void *ptr, int size, float scale_val)
{
GLuint *ival = ptr;
const GLfloat myscale = 1.0f / 0xffffff;
int i;
for (i = 0; i < size / 4; i++) {
GLuint value = ival[i];
GLfloat d = ((float)(value >> 8) * myscale) * scale_val;
d = CLAMP(d, 0.0F, 1.0F);
ival[i] = (int)(d / myscale) << 8;
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,131
|
void vrend_set_blend_color(struct vrend_context *ctx,
struct pipe_blend_color *color)
{
ctx->sub->blend_color = *color;
glBlendColor(color->color[0], color->color[1], color->color[2],
color->color[3]);
}
|
DoS
| 0
|
void vrend_set_blend_color(struct vrend_context *ctx,
struct pipe_blend_color *color)
{
ctx->sub->blend_color = *color;
glBlendColor(color->color[0], color->color[1], color->color[2],
color->color[3]);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,132
|
void vrend_set_clip_state(struct vrend_context *ctx, struct pipe_clip_state *ucp)
{
if (vrend_state.use_core_profile) {
ctx->sub->ucp_state = *ucp;
} else {
int i, j;
GLdouble val[4];
for (i = 0; i < 8; i++) {
for (j = 0; j < 4; j++)
val[j] = ucp->ucp[i][j];
glClipPlane(GL_CLIP_PLANE0 + i, val);
}
}
}
|
DoS
| 0
|
void vrend_set_clip_state(struct vrend_context *ctx, struct pipe_clip_state *ucp)
{
if (vrend_state.use_core_profile) {
ctx->sub->ucp_state = *ucp;
} else {
int i, j;
GLdouble val[4];
for (i = 0; i < 8; i++) {
for (j = 0; j < 4; j++)
val[j] = ucp->ucp[i][j];
glClipPlane(GL_CLIP_PLANE0 + i, val);
}
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,133
|
void vrend_set_framebuffer_state(struct vrend_context *ctx,
uint32_t nr_cbufs, uint32_t surf_handle[PIPE_MAX_COLOR_BUFS],
uint32_t zsurf_handle)
{
struct vrend_surface *surf, *zsurf;
int i;
int old_num;
GLenum status;
GLint new_height = -1;
bool new_ibf = false;
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, ctx->sub->fb_id);
if (zsurf_handle) {
zsurf = vrend_object_lookup(ctx->sub->object_hash, zsurf_handle, VIRGL_OBJECT_SURFACE);
if (!zsurf) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, zsurf_handle);
return;
}
} else
zsurf = NULL;
if (ctx->sub->zsurf != zsurf) {
vrend_surface_reference(&ctx->sub->zsurf, zsurf);
vrend_hw_set_zsurf_texture(ctx);
}
old_num = ctx->sub->nr_cbufs;
ctx->sub->nr_cbufs = nr_cbufs;
ctx->sub->old_nr_cbufs = old_num;
for (i = 0; i < nr_cbufs; i++) {
if (surf_handle[i] != 0) {
surf = vrend_object_lookup(ctx->sub->object_hash, surf_handle[i], VIRGL_OBJECT_SURFACE);
if (!surf) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, surf_handle[i]);
return;
}
} else
surf = NULL;
if (ctx->sub->surf[i] != surf) {
vrend_surface_reference(&ctx->sub->surf[i], surf);
vrend_hw_set_color_surface(ctx, i);
}
}
if (old_num > ctx->sub->nr_cbufs) {
for (i = ctx->sub->nr_cbufs; i < old_num; i++) {
vrend_surface_reference(&ctx->sub->surf[i], NULL);
vrend_hw_set_color_surface(ctx, i);
}
}
/* find a buffer to set fb_height from */
if (ctx->sub->nr_cbufs == 0 && !ctx->sub->zsurf) {
new_height = 0;
new_ibf = false;
} else if (ctx->sub->nr_cbufs == 0) {
new_height = u_minify(ctx->sub->zsurf->texture->base.height0, ctx->sub->zsurf->val0);
new_ibf = ctx->sub->zsurf->texture->y_0_top ? true : false;
}
else {
surf = NULL;
for (i = 0; i < ctx->sub->nr_cbufs; i++) {
if (ctx->sub->surf[i]) {
surf = ctx->sub->surf[i];
break;
}
}
if (surf == NULL) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, i);
return;
}
new_height = u_minify(surf->texture->base.height0, surf->val0);
new_ibf = surf->texture->y_0_top ? true : false;
}
if (new_height != -1) {
if (ctx->sub->fb_height != new_height || ctx->sub->inverted_fbo_content != new_ibf) {
ctx->sub->fb_height = new_height;
ctx->sub->inverted_fbo_content = new_ibf;
ctx->sub->scissor_state_dirty = (1 << 0);
ctx->sub->viewport_state_dirty = (1 << 0);
}
}
vrend_hw_emit_framebuffer_state(ctx);
if (ctx->sub->nr_cbufs > 0 || ctx->sub->zsurf) {
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE)
fprintf(stderr,"failed to complete framebuffer 0x%x %s\n", status, ctx->debug_name);
}
ctx->sub->shader_dirty = true;
}
|
DoS
| 0
|
void vrend_set_framebuffer_state(struct vrend_context *ctx,
uint32_t nr_cbufs, uint32_t surf_handle[PIPE_MAX_COLOR_BUFS],
uint32_t zsurf_handle)
{
struct vrend_surface *surf, *zsurf;
int i;
int old_num;
GLenum status;
GLint new_height = -1;
bool new_ibf = false;
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, ctx->sub->fb_id);
if (zsurf_handle) {
zsurf = vrend_object_lookup(ctx->sub->object_hash, zsurf_handle, VIRGL_OBJECT_SURFACE);
if (!zsurf) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, zsurf_handle);
return;
}
} else
zsurf = NULL;
if (ctx->sub->zsurf != zsurf) {
vrend_surface_reference(&ctx->sub->zsurf, zsurf);
vrend_hw_set_zsurf_texture(ctx);
}
old_num = ctx->sub->nr_cbufs;
ctx->sub->nr_cbufs = nr_cbufs;
ctx->sub->old_nr_cbufs = old_num;
for (i = 0; i < nr_cbufs; i++) {
if (surf_handle[i] != 0) {
surf = vrend_object_lookup(ctx->sub->object_hash, surf_handle[i], VIRGL_OBJECT_SURFACE);
if (!surf) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, surf_handle[i]);
return;
}
} else
surf = NULL;
if (ctx->sub->surf[i] != surf) {
vrend_surface_reference(&ctx->sub->surf[i], surf);
vrend_hw_set_color_surface(ctx, i);
}
}
if (old_num > ctx->sub->nr_cbufs) {
for (i = ctx->sub->nr_cbufs; i < old_num; i++) {
vrend_surface_reference(&ctx->sub->surf[i], NULL);
vrend_hw_set_color_surface(ctx, i);
}
}
/* find a buffer to set fb_height from */
if (ctx->sub->nr_cbufs == 0 && !ctx->sub->zsurf) {
new_height = 0;
new_ibf = false;
} else if (ctx->sub->nr_cbufs == 0) {
new_height = u_minify(ctx->sub->zsurf->texture->base.height0, ctx->sub->zsurf->val0);
new_ibf = ctx->sub->zsurf->texture->y_0_top ? true : false;
}
else {
surf = NULL;
for (i = 0; i < ctx->sub->nr_cbufs; i++) {
if (ctx->sub->surf[i]) {
surf = ctx->sub->surf[i];
break;
}
}
if (surf == NULL) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, i);
return;
}
new_height = u_minify(surf->texture->base.height0, surf->val0);
new_ibf = surf->texture->y_0_top ? true : false;
}
if (new_height != -1) {
if (ctx->sub->fb_height != new_height || ctx->sub->inverted_fbo_content != new_ibf) {
ctx->sub->fb_height = new_height;
ctx->sub->inverted_fbo_content = new_ibf;
ctx->sub->scissor_state_dirty = (1 << 0);
ctx->sub->viewport_state_dirty = (1 << 0);
}
}
vrend_hw_emit_framebuffer_state(ctx);
if (ctx->sub->nr_cbufs > 0 || ctx->sub->zsurf) {
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE)
fprintf(stderr,"failed to complete framebuffer 0x%x %s\n", status, ctx->debug_name);
}
ctx->sub->shader_dirty = true;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,134
|
void vrend_set_index_buffer(struct vrend_context *ctx,
uint32_t res_handle,
uint32_t index_size,
uint32_t offset)
{
struct vrend_resource *res;
ctx->sub->ib.index_size = index_size;
ctx->sub->ib.offset = offset;
if (res_handle) {
if (ctx->sub->index_buffer_res_id != res_handle) {
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
vrend_resource_reference((struct vrend_resource **)&ctx->sub->ib.buffer, NULL);
ctx->sub->index_buffer_res_id = 0;
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return;
}
vrend_resource_reference((struct vrend_resource **)&ctx->sub->ib.buffer, res);
ctx->sub->index_buffer_res_id = res_handle;
}
} else {
vrend_resource_reference((struct vrend_resource **)&ctx->sub->ib.buffer, NULL);
ctx->sub->index_buffer_res_id = 0;
}
}
|
DoS
| 0
|
void vrend_set_index_buffer(struct vrend_context *ctx,
uint32_t res_handle,
uint32_t index_size,
uint32_t offset)
{
struct vrend_resource *res;
ctx->sub->ib.index_size = index_size;
ctx->sub->ib.offset = offset;
if (res_handle) {
if (ctx->sub->index_buffer_res_id != res_handle) {
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
vrend_resource_reference((struct vrend_resource **)&ctx->sub->ib.buffer, NULL);
ctx->sub->index_buffer_res_id = 0;
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return;
}
vrend_resource_reference((struct vrend_resource **)&ctx->sub->ib.buffer, res);
ctx->sub->index_buffer_res_id = res_handle;
}
} else {
vrend_resource_reference((struct vrend_resource **)&ctx->sub->ib.buffer, NULL);
ctx->sub->index_buffer_res_id = 0;
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,135
|
void vrend_set_num_sampler_views(struct vrend_context *ctx,
uint32_t shader_type,
uint32_t start_slot,
int num_sampler_views)
{
if (start_slot + num_sampler_views < ctx->sub->views[shader_type].num_views) {
int i;
for (i = start_slot + num_sampler_views; i < ctx->sub->views[shader_type].num_views; i++)
vrend_sampler_view_reference(&ctx->sub->views[shader_type].views[i], NULL);
}
ctx->sub->views[shader_type].num_views = start_slot + num_sampler_views;
}
|
DoS
| 0
|
void vrend_set_num_sampler_views(struct vrend_context *ctx,
uint32_t shader_type,
uint32_t start_slot,
int num_sampler_views)
{
if (start_slot + num_sampler_views < ctx->sub->views[shader_type].num_views) {
int i;
for (i = start_slot + num_sampler_views; i < ctx->sub->views[shader_type].num_views; i++)
vrend_sampler_view_reference(&ctx->sub->views[shader_type].views[i], NULL);
}
ctx->sub->views[shader_type].num_views = start_slot + num_sampler_views;
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,136
|
void vrend_set_num_vbo(struct vrend_context *ctx,
int num_vbo)
{
int old_num = ctx->sub->num_vbos;
int i;
ctx->sub->num_vbos = num_vbo;
ctx->sub->old_num_vbos = old_num;
if (old_num != num_vbo)
ctx->sub->vbo_dirty = true;
for (i = num_vbo; i < old_num; i++) {
vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[i].buffer, NULL);
ctx->sub->vbo_res_ids[i] = 0;
}
}
|
DoS
| 0
|
void vrend_set_num_vbo(struct vrend_context *ctx,
int num_vbo)
{
int old_num = ctx->sub->num_vbos;
int i;
ctx->sub->num_vbos = num_vbo;
ctx->sub->old_num_vbos = old_num;
if (old_num != num_vbo)
ctx->sub->vbo_dirty = true;
for (i = num_vbo; i < old_num; i++) {
vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[i].buffer, NULL);
ctx->sub->vbo_res_ids[i] = 0;
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,137
|
void vrend_set_polygon_stipple(struct vrend_context *ctx,
struct pipe_poly_stipple *ps)
{
if (vrend_state.use_core_profile) {
static const unsigned bit31 = 1 << 31;
GLubyte *stip = calloc(1, 1024);
int i, j;
if (!ctx->pstip_inited)
vrend_init_pstipple_texture(ctx);
if (!stip)
return;
for (i = 0; i < 32; i++) {
for (j = 0; j < 32; j++) {
if (ps->stipple[i] & (bit31 >> j))
stip[i * 32 + j] = 0;
else
stip[i * 32 + j] = 255;
}
}
glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 32, 32,
GL_RED, GL_UNSIGNED_BYTE, stip);
free(stip);
return;
}
glPolygonStipple((const GLubyte *)ps->stipple);
}
|
DoS
| 0
|
void vrend_set_polygon_stipple(struct vrend_context *ctx,
struct pipe_poly_stipple *ps)
{
if (vrend_state.use_core_profile) {
static const unsigned bit31 = 1 << 31;
GLubyte *stip = calloc(1, 1024);
int i, j;
if (!ctx->pstip_inited)
vrend_init_pstipple_texture(ctx);
if (!stip)
return;
for (i = 0; i < 32; i++) {
for (j = 0; j < 32; j++) {
if (ps->stipple[i] & (bit31 >> j))
stip[i * 32 + j] = 0;
else
stip[i * 32 + j] = 255;
}
}
glBindTexture(GL_TEXTURE_2D, ctx->pstipple_tex_id);
glTexSubImage2D(GL_TEXTURE_2D, 0, 0, 0, 32, 32,
GL_RED, GL_UNSIGNED_BYTE, stip);
free(stip);
return;
}
glPolygonStipple((const GLubyte *)ps->stipple);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,138
|
void vrend_set_sample_mask(struct vrend_context *ctx, unsigned sample_mask)
{
glSampleMaski(0, sample_mask);
}
|
DoS
| 0
|
void vrend_set_sample_mask(struct vrend_context *ctx, unsigned sample_mask)
{
glSampleMaski(0, sample_mask);
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,139
|
void vrend_set_scissor_state(struct vrend_context *ctx,
uint32_t start_slot,
uint32_t num_scissor,
struct pipe_scissor_state *ss)
{
int i, idx;
if (start_slot > PIPE_MAX_VIEWPORTS ||
num_scissor > (PIPE_MAX_VIEWPORTS - start_slot)) {
vrend_report_buffer_error(ctx, 0);
return;
}
for (i = 0; i < num_scissor; i++) {
idx = start_slot + i;
ctx->sub->ss[idx] = ss[i];
ctx->sub->scissor_state_dirty |= (1 << idx);
}
}
|
DoS
| 0
|
void vrend_set_scissor_state(struct vrend_context *ctx,
uint32_t start_slot,
uint32_t num_scissor,
struct pipe_scissor_state *ss)
{
int i, idx;
if (start_slot > PIPE_MAX_VIEWPORTS ||
num_scissor > (PIPE_MAX_VIEWPORTS - start_slot)) {
vrend_report_buffer_error(ctx, 0);
return;
}
for (i = 0; i < num_scissor; i++) {
idx = start_slot + i;
ctx->sub->ss[idx] = ss[i];
ctx->sub->scissor_state_dirty |= (1 << idx);
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,140
|
void vrend_set_single_vbo(struct vrend_context *ctx,
int index,
uint32_t stride,
uint32_t buffer_offset,
uint32_t res_handle)
{
struct vrend_resource *res;
if (ctx->sub->vbo[index].stride != stride ||
ctx->sub->vbo[index].buffer_offset != buffer_offset ||
ctx->sub->vbo_res_ids[index] != res_handle)
ctx->sub->vbo_dirty = true;
ctx->sub->vbo[index].stride = stride;
ctx->sub->vbo[index].buffer_offset = buffer_offset;
if (res_handle == 0) {
vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[index].buffer, NULL);
ctx->sub->vbo_res_ids[index] = 0;
} else if (ctx->sub->vbo_res_ids[index] != res_handle) {
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
ctx->sub->vbo_res_ids[index] = 0;
return;
}
vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[index].buffer, res);
ctx->sub->vbo_res_ids[index] = res_handle;
}
}
|
DoS
| 0
|
void vrend_set_single_vbo(struct vrend_context *ctx,
int index,
uint32_t stride,
uint32_t buffer_offset,
uint32_t res_handle)
{
struct vrend_resource *res;
if (ctx->sub->vbo[index].stride != stride ||
ctx->sub->vbo[index].buffer_offset != buffer_offset ||
ctx->sub->vbo_res_ids[index] != res_handle)
ctx->sub->vbo_dirty = true;
ctx->sub->vbo[index].stride = stride;
ctx->sub->vbo[index].buffer_offset = buffer_offset;
if (res_handle == 0) {
vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[index].buffer, NULL);
ctx->sub->vbo_res_ids[index] = 0;
} else if (ctx->sub->vbo_res_ids[index] != res_handle) {
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
ctx->sub->vbo_res_ids[index] = 0;
return;
}
vrend_resource_reference((struct vrend_resource **)&ctx->sub->vbo[index].buffer, res);
ctx->sub->vbo_res_ids[index] = res_handle;
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,141
|
void vrend_set_stencil_ref(struct vrend_context *ctx,
struct pipe_stencil_ref *ref)
{
if (ctx->sub->stencil_refs[0] != ref->ref_value[0] ||
ctx->sub->stencil_refs[1] != ref->ref_value[1]) {
ctx->sub->stencil_refs[0] = ref->ref_value[0];
ctx->sub->stencil_refs[1] = ref->ref_value[1];
ctx->sub->stencil_state_dirty = true;
}
}
|
DoS
| 0
|
void vrend_set_stencil_ref(struct vrend_context *ctx,
struct pipe_stencil_ref *ref)
{
if (ctx->sub->stencil_refs[0] != ref->ref_value[0] ||
ctx->sub->stencil_refs[1] != ref->ref_value[1]) {
ctx->sub->stencil_refs[0] = ref->ref_value[0];
ctx->sub->stencil_refs[1] = ref->ref_value[1];
ctx->sub->stencil_state_dirty = true;
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,142
|
void vrend_set_streamout_targets(struct vrend_context *ctx,
uint32_t append_bitmask,
uint32_t num_targets,
uint32_t *handles)
{
struct vrend_so_target *target;
int i;
if (num_targets) {
bool found = false;
struct vrend_streamout_object *obj;
LIST_FOR_EACH_ENTRY(obj, &ctx->sub->streamout_list, head) {
if (obj->num_targets == num_targets) {
if (!memcmp(handles, obj->handles, num_targets * 4)) {
found = true;
break;
}
}
}
if (found) {
ctx->sub->current_so = obj;
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, obj->id);
return;
}
obj = CALLOC_STRUCT(vrend_streamout_object);
if (vrend_state.have_tf2) {
glGenTransformFeedbacks(1, &obj->id);
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, obj->id);
}
obj->num_targets = num_targets;
for (i = 0; i < num_targets; i++) {
obj->handles[i] = handles[i];
target = vrend_object_lookup(ctx->sub->object_hash, handles[i], VIRGL_OBJECT_STREAMOUT_TARGET);
if (!target) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handles[i]);
free(obj);
return;
}
vrend_so_target_reference(&obj->so_targets[i], target);
}
vrend_hw_emit_streamout_targets(ctx, obj);
list_addtail(&obj->head, &ctx->sub->streamout_list);
ctx->sub->current_so = obj;
obj->xfb_state = XFB_STATE_STARTED_NEED_BEGIN;
} else {
if (vrend_state.have_tf2)
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, 0);
ctx->sub->current_so = NULL;
}
}
|
DoS
| 0
|
void vrend_set_streamout_targets(struct vrend_context *ctx,
uint32_t append_bitmask,
uint32_t num_targets,
uint32_t *handles)
{
struct vrend_so_target *target;
int i;
if (num_targets) {
bool found = false;
struct vrend_streamout_object *obj;
LIST_FOR_EACH_ENTRY(obj, &ctx->sub->streamout_list, head) {
if (obj->num_targets == num_targets) {
if (!memcmp(handles, obj->handles, num_targets * 4)) {
found = true;
break;
}
}
}
if (found) {
ctx->sub->current_so = obj;
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, obj->id);
return;
}
obj = CALLOC_STRUCT(vrend_streamout_object);
if (vrend_state.have_tf2) {
glGenTransformFeedbacks(1, &obj->id);
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, obj->id);
}
obj->num_targets = num_targets;
for (i = 0; i < num_targets; i++) {
obj->handles[i] = handles[i];
target = vrend_object_lookup(ctx->sub->object_hash, handles[i], VIRGL_OBJECT_STREAMOUT_TARGET);
if (!target) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_HANDLE, handles[i]);
free(obj);
return;
}
vrend_so_target_reference(&obj->so_targets[i], target);
}
vrend_hw_emit_streamout_targets(ctx, obj);
list_addtail(&obj->head, &ctx->sub->streamout_list);
ctx->sub->current_so = obj;
obj->xfb_state = XFB_STATE_STARTED_NEED_BEGIN;
} else {
if (vrend_state.have_tf2)
glBindTransformFeedback(GL_TRANSFORM_FEEDBACK, 0);
ctx->sub->current_so = NULL;
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,143
|
void vrend_set_uniform_buffer(struct vrend_context *ctx,
uint32_t shader,
uint32_t index,
uint32_t offset,
uint32_t length,
uint32_t res_handle)
{
struct vrend_resource *res;
if (res_handle) {
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return;
}
vrend_resource_reference((struct vrend_resource **)&ctx->sub->cbs[shader][index].buffer, res);
ctx->sub->cbs[shader][index].buffer_offset = offset;
ctx->sub->cbs[shader][index].buffer_size = length;
ctx->sub->const_bufs_used_mask[shader] |= (1 << index);
} else {
vrend_resource_reference((struct vrend_resource **)&ctx->sub->cbs[shader][index].buffer, NULL);
ctx->sub->cbs[shader][index].buffer_offset = 0;
ctx->sub->cbs[shader][index].buffer_size = 0;
ctx->sub->const_bufs_used_mask[shader] &= ~(1 << index);
}
}
|
DoS
| 0
|
void vrend_set_uniform_buffer(struct vrend_context *ctx,
uint32_t shader,
uint32_t index,
uint32_t offset,
uint32_t length,
uint32_t res_handle)
{
struct vrend_resource *res;
if (res_handle) {
res = vrend_renderer_ctx_res_lookup(ctx, res_handle);
if (!res) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_RESOURCE, res_handle);
return;
}
vrend_resource_reference((struct vrend_resource **)&ctx->sub->cbs[shader][index].buffer, res);
ctx->sub->cbs[shader][index].buffer_offset = offset;
ctx->sub->cbs[shader][index].buffer_size = length;
ctx->sub->const_bufs_used_mask[shader] |= (1 << index);
} else {
vrend_resource_reference((struct vrend_resource **)&ctx->sub->cbs[shader][index].buffer, NULL);
ctx->sub->cbs[shader][index].buffer_offset = 0;
ctx->sub->cbs[shader][index].buffer_size = 0;
ctx->sub->const_bufs_used_mask[shader] &= ~(1 << index);
}
}
|
@@ -1648,18 +1648,19 @@ int vrend_create_vertex_elements_state(struct vrend_context *ctx,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
- struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
+ struct vrend_vertex_element_array *v;
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
- if (!v)
- return ENOMEM;
-
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
+ v = CALLOC_STRUCT(vrend_vertex_element_array);
+ if (!v)
+ return ENOMEM;
+
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
|
CWE-772
| null | null |
7,144
|
int vrend_create_vertex_elements_state(struct vrend_context *ctx,
uint32_t handle,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
if (!v)
return ENOMEM;
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
desc = util_format_description(elements[i].src_format);
if (!desc) {
FREE(v);
return EINVAL;
}
type = GL_FALSE;
if (desc->channel[0].type == UTIL_FORMAT_TYPE_FLOAT) {
if (desc->channel[0].size == 32)
type = GL_FLOAT;
else if (desc->channel[0].size == 64)
type = GL_DOUBLE;
else if (desc->channel[0].size == 16)
type = GL_HALF_FLOAT;
} else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
desc->channel[0].size == 8)
type = GL_UNSIGNED_BYTE;
else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
desc->channel[0].size == 8)
type = GL_BYTE;
else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
desc->channel[0].size == 16)
type = GL_UNSIGNED_SHORT;
else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
desc->channel[0].size == 16)
type = GL_SHORT;
else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
desc->channel[0].size == 32)
type = GL_UNSIGNED_INT;
else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
desc->channel[0].size == 32)
type = GL_INT;
else if (elements[i].src_format == PIPE_FORMAT_R10G10B10A2_SSCALED ||
elements[i].src_format == PIPE_FORMAT_R10G10B10A2_SNORM ||
elements[i].src_format == PIPE_FORMAT_B10G10R10A2_SNORM)
type = GL_INT_2_10_10_10_REV;
else if (elements[i].src_format == PIPE_FORMAT_R10G10B10A2_USCALED ||
elements[i].src_format == PIPE_FORMAT_R10G10B10A2_UNORM ||
elements[i].src_format == PIPE_FORMAT_B10G10R10A2_UNORM)
type = GL_UNSIGNED_INT_2_10_10_10_REV;
else if (elements[i].src_format == PIPE_FORMAT_R11G11B10_FLOAT)
type = GL_UNSIGNED_INT_10F_11F_11F_REV;
if (type == GL_FALSE) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_VERTEX_FORMAT, elements[i].src_format);
FREE(v);
return EINVAL;
}
v->elements[i].type = type;
if (desc->channel[0].normalized)
v->elements[i].norm = GL_TRUE;
if (desc->nr_channels == 4 && desc->swizzle[0] == UTIL_FORMAT_SWIZZLE_Z)
v->elements[i].nr_chan = GL_BGRA;
else if (elements[i].src_format == PIPE_FORMAT_R11G11B10_FLOAT)
v->elements[i].nr_chan = 3;
else
v->elements[i].nr_chan = desc->nr_channels;
}
if (vrend_state.have_vertex_attrib_binding) {
glGenVertexArrays(1, &v->id);
glBindVertexArray(v->id);
for (i = 0; i < num_elements; i++) {
struct vrend_vertex_element *ve = &v->elements[i];
if (util_format_is_pure_integer(ve->base.src_format))
glVertexAttribIFormat(i, ve->nr_chan, ve->type, ve->base.src_offset);
else
glVertexAttribFormat(i, ve->nr_chan, ve->type, ve->norm, ve->base.src_offset);
glVertexAttribBinding(i, ve->base.vertex_buffer_index);
glVertexBindingDivisor(i, ve->base.instance_divisor);
glEnableVertexAttribArray(i);
}
}
ret_handle = vrend_renderer_object_insert(ctx, v, sizeof(struct vrend_vertex_element), handle,
VIRGL_OBJECT_VERTEX_ELEMENTS);
if (!ret_handle) {
FREE(v);
return ENOMEM;
}
return 0;
}
|
DoS Overflow
| 0
|
int vrend_create_vertex_elements_state(struct vrend_context *ctx,
uint32_t handle,
unsigned num_elements,
const struct pipe_vertex_element *elements)
{
struct vrend_vertex_element_array *v = CALLOC_STRUCT(vrend_vertex_element_array);
const struct util_format_description *desc;
GLenum type;
int i;
uint32_t ret_handle;
if (!v)
return ENOMEM;
if (num_elements > PIPE_MAX_ATTRIBS)
return EINVAL;
v->count = num_elements;
for (i = 0; i < num_elements; i++) {
memcpy(&v->elements[i].base, &elements[i], sizeof(struct pipe_vertex_element));
desc = util_format_description(elements[i].src_format);
if (!desc) {
FREE(v);
return EINVAL;
}
type = GL_FALSE;
if (desc->channel[0].type == UTIL_FORMAT_TYPE_FLOAT) {
if (desc->channel[0].size == 32)
type = GL_FLOAT;
else if (desc->channel[0].size == 64)
type = GL_DOUBLE;
else if (desc->channel[0].size == 16)
type = GL_HALF_FLOAT;
} else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
desc->channel[0].size == 8)
type = GL_UNSIGNED_BYTE;
else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
desc->channel[0].size == 8)
type = GL_BYTE;
else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
desc->channel[0].size == 16)
type = GL_UNSIGNED_SHORT;
else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
desc->channel[0].size == 16)
type = GL_SHORT;
else if (desc->channel[0].type == UTIL_FORMAT_TYPE_UNSIGNED &&
desc->channel[0].size == 32)
type = GL_UNSIGNED_INT;
else if (desc->channel[0].type == UTIL_FORMAT_TYPE_SIGNED &&
desc->channel[0].size == 32)
type = GL_INT;
else if (elements[i].src_format == PIPE_FORMAT_R10G10B10A2_SSCALED ||
elements[i].src_format == PIPE_FORMAT_R10G10B10A2_SNORM ||
elements[i].src_format == PIPE_FORMAT_B10G10R10A2_SNORM)
type = GL_INT_2_10_10_10_REV;
else if (elements[i].src_format == PIPE_FORMAT_R10G10B10A2_USCALED ||
elements[i].src_format == PIPE_FORMAT_R10G10B10A2_UNORM ||
elements[i].src_format == PIPE_FORMAT_B10G10R10A2_UNORM)
type = GL_UNSIGNED_INT_2_10_10_10_REV;
else if (elements[i].src_format == PIPE_FORMAT_R11G11B10_FLOAT)
type = GL_UNSIGNED_INT_10F_11F_11F_REV;
if (type == GL_FALSE) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_VERTEX_FORMAT, elements[i].src_format);
FREE(v);
return EINVAL;
}
v->elements[i].type = type;
if (desc->channel[0].normalized)
v->elements[i].norm = GL_TRUE;
if (desc->nr_channels == 4 && desc->swizzle[0] == UTIL_FORMAT_SWIZZLE_Z)
v->elements[i].nr_chan = GL_BGRA;
else if (elements[i].src_format == PIPE_FORMAT_R11G11B10_FLOAT)
v->elements[i].nr_chan = 3;
else
v->elements[i].nr_chan = desc->nr_channels;
}
if (vrend_state.have_vertex_attrib_binding) {
glGenVertexArrays(1, &v->id);
glBindVertexArray(v->id);
for (i = 0; i < num_elements; i++) {
struct vrend_vertex_element *ve = &v->elements[i];
if (util_format_is_pure_integer(ve->base.src_format))
glVertexAttribIFormat(i, ve->nr_chan, ve->type, ve->base.src_offset);
else
glVertexAttribFormat(i, ve->nr_chan, ve->type, ve->norm, ve->base.src_offset);
glVertexAttribBinding(i, ve->base.vertex_buffer_index);
glVertexBindingDivisor(i, ve->base.instance_divisor);
glEnableVertexAttribArray(i);
}
}
ret_handle = vrend_renderer_object_insert(ctx, v, sizeof(struct vrend_vertex_element), handle,
VIRGL_OBJECT_VERTEX_ELEMENTS);
if (!ret_handle) {
FREE(v);
return ENOMEM;
}
return 0;
}
|
@@ -2215,6 +2215,15 @@ int vrend_create_shader(struct vrend_context *ctx,
ret = EINVAL;
goto error;
}
+
+ /*make sure no overflow */
+ if (pkt_length * 4 < pkt_length ||
+ pkt_length * 4 + sel->buf_offset < pkt_length * 4 ||
+ pkt_length * 4 + sel->buf_offset < sel->buf_offset) {
+ ret = EINVAL;
+ goto error;
+ }
+
if ((pkt_length * 4 + sel->buf_offset) > sel->buf_len) {
fprintf(stderr, "Got too large shader continuation %d vs %d\n",
pkt_length * 4 + sel->buf_offset, sel->buf_len);
|
CWE-190
| null | null |
7,145
|
void vrend_set_framebuffer_state(struct vrend_context *ctx,
uint32_t nr_cbufs, uint32_t surf_handle[8],
uint32_t zsurf_handle)
{
struct vrend_surface *surf, *zsurf;
int i;
int old_num;
GLenum status;
GLint new_height = -1;
bool new_ibf = false;
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, ctx->sub->fb_id);
if (zsurf_handle) {
zsurf = vrend_object_lookup(ctx->sub->object_hash, zsurf_handle, VIRGL_OBJECT_SURFACE);
if (!zsurf) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, zsurf_handle);
return;
}
} else
zsurf = NULL;
if (ctx->sub->zsurf != zsurf) {
vrend_surface_reference(&ctx->sub->zsurf, zsurf);
vrend_hw_set_zsurf_texture(ctx);
}
old_num = ctx->sub->nr_cbufs;
ctx->sub->nr_cbufs = nr_cbufs;
ctx->sub->old_nr_cbufs = old_num;
for (i = 0; i < nr_cbufs; i++) {
if (surf_handle[i] != 0) {
surf = vrend_object_lookup(ctx->sub->object_hash, surf_handle[i], VIRGL_OBJECT_SURFACE);
if (!surf) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, surf_handle[i]);
return;
}
} else
surf = NULL;
if (ctx->sub->surf[i] != surf) {
vrend_surface_reference(&ctx->sub->surf[i], surf);
vrend_hw_set_color_surface(ctx, i);
}
}
if (old_num > ctx->sub->nr_cbufs) {
for (i = ctx->sub->nr_cbufs; i < old_num; i++) {
vrend_surface_reference(&ctx->sub->surf[i], NULL);
vrend_hw_set_color_surface(ctx, i);
}
}
/* find a buffer to set fb_height from */
if (ctx->sub->nr_cbufs == 0 && !ctx->sub->zsurf) {
new_height = 0;
new_ibf = false;
} else if (ctx->sub->nr_cbufs == 0) {
new_height = u_minify(ctx->sub->zsurf->texture->base.height0, ctx->sub->zsurf->val0);
new_ibf = ctx->sub->zsurf->texture->y_0_top ? true : false;
}
else {
surf = NULL;
for (i = 0; i < ctx->sub->nr_cbufs; i++) {
if (ctx->sub->surf[i]) {
surf = ctx->sub->surf[i];
break;
}
}
if (surf == NULL) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, i);
return;
}
new_height = u_minify(surf->texture->base.height0, surf->val0);
new_ibf = surf->texture->y_0_top ? true : false;
}
if (new_height != -1) {
if (ctx->sub->fb_height != new_height || ctx->sub->inverted_fbo_content != new_ibf) {
ctx->sub->fb_height = new_height;
ctx->sub->inverted_fbo_content = new_ibf;
ctx->sub->scissor_state_dirty = (1 << 0);
ctx->sub->viewport_state_dirty = (1 << 0);
}
}
vrend_hw_emit_framebuffer_state(ctx);
if (ctx->sub->nr_cbufs > 0 || ctx->sub->zsurf) {
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE)
fprintf(stderr,"failed to complete framebuffer 0x%x %s\n", status, ctx->debug_name);
}
ctx->sub->shader_dirty = true;
}
|
DoS Overflow
| 0
|
void vrend_set_framebuffer_state(struct vrend_context *ctx,
uint32_t nr_cbufs, uint32_t surf_handle[8],
uint32_t zsurf_handle)
{
struct vrend_surface *surf, *zsurf;
int i;
int old_num;
GLenum status;
GLint new_height = -1;
bool new_ibf = false;
glBindFramebufferEXT(GL_FRAMEBUFFER_EXT, ctx->sub->fb_id);
if (zsurf_handle) {
zsurf = vrend_object_lookup(ctx->sub->object_hash, zsurf_handle, VIRGL_OBJECT_SURFACE);
if (!zsurf) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, zsurf_handle);
return;
}
} else
zsurf = NULL;
if (ctx->sub->zsurf != zsurf) {
vrend_surface_reference(&ctx->sub->zsurf, zsurf);
vrend_hw_set_zsurf_texture(ctx);
}
old_num = ctx->sub->nr_cbufs;
ctx->sub->nr_cbufs = nr_cbufs;
ctx->sub->old_nr_cbufs = old_num;
for (i = 0; i < nr_cbufs; i++) {
if (surf_handle[i] != 0) {
surf = vrend_object_lookup(ctx->sub->object_hash, surf_handle[i], VIRGL_OBJECT_SURFACE);
if (!surf) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, surf_handle[i]);
return;
}
} else
surf = NULL;
if (ctx->sub->surf[i] != surf) {
vrend_surface_reference(&ctx->sub->surf[i], surf);
vrend_hw_set_color_surface(ctx, i);
}
}
if (old_num > ctx->sub->nr_cbufs) {
for (i = ctx->sub->nr_cbufs; i < old_num; i++) {
vrend_surface_reference(&ctx->sub->surf[i], NULL);
vrend_hw_set_color_surface(ctx, i);
}
}
/* find a buffer to set fb_height from */
if (ctx->sub->nr_cbufs == 0 && !ctx->sub->zsurf) {
new_height = 0;
new_ibf = false;
} else if (ctx->sub->nr_cbufs == 0) {
new_height = u_minify(ctx->sub->zsurf->texture->base.height0, ctx->sub->zsurf->val0);
new_ibf = ctx->sub->zsurf->texture->y_0_top ? true : false;
}
else {
surf = NULL;
for (i = 0; i < ctx->sub->nr_cbufs; i++) {
if (ctx->sub->surf[i]) {
surf = ctx->sub->surf[i];
break;
}
}
if (surf == NULL) {
report_context_error(ctx, VIRGL_ERROR_CTX_ILLEGAL_SURFACE, i);
return;
}
new_height = u_minify(surf->texture->base.height0, surf->val0);
new_ibf = surf->texture->y_0_top ? true : false;
}
if (new_height != -1) {
if (ctx->sub->fb_height != new_height || ctx->sub->inverted_fbo_content != new_ibf) {
ctx->sub->fb_height = new_height;
ctx->sub->inverted_fbo_content = new_ibf;
ctx->sub->scissor_state_dirty = (1 << 0);
ctx->sub->viewport_state_dirty = (1 << 0);
}
}
vrend_hw_emit_framebuffer_state(ctx);
if (ctx->sub->nr_cbufs > 0 || ctx->sub->zsurf) {
status = glCheckFramebufferStatus(GL_FRAMEBUFFER);
if (status != GL_FRAMEBUFFER_COMPLETE)
fprintf(stderr,"failed to complete framebuffer 0x%x %s\n", status, ctx->debug_name);
}
ctx->sub->shader_dirty = true;
}
|
@@ -2215,6 +2215,15 @@ int vrend_create_shader(struct vrend_context *ctx,
ret = EINVAL;
goto error;
}
+
+ /*make sure no overflow */
+ if (pkt_length * 4 < pkt_length ||
+ pkt_length * 4 + sel->buf_offset < pkt_length * 4 ||
+ pkt_length * 4 + sel->buf_offset < sel->buf_offset) {
+ ret = EINVAL;
+ goto error;
+ }
+
if ((pkt_length * 4 + sel->buf_offset) > sel->buf_len) {
fprintf(stderr, "Got too large shader continuation %d vs %d\n",
pkt_length * 4 + sel->buf_offset, sel->buf_len);
|
CWE-190
| null | null |
7,146
|
int vrend_create_shader(struct vrend_context *ctx,
uint32_t handle,
const struct pipe_stream_output_info *so_info,
const char *shd_text, uint32_t offlen, uint32_t num_tokens,
uint32_t type, uint32_t pkt_length)
{
struct vrend_shader_selector *sel = NULL;
int ret_handle;
bool new_shader = true, long_shader = false;
bool finished = false;
int ret;
if (type > PIPE_SHADER_GEOMETRY)
return EINVAL;
if (offlen & VIRGL_OBJ_SHADER_OFFSET_CONT)
new_shader = false;
else if (((offlen + 3) / 4) > pkt_length)
long_shader = true;
/* if we have an in progress one - don't allow a new shader
of that type or a different handle. */
if (ctx->sub->long_shader_in_progress_handle[type]) {
if (new_shader == true)
return EINVAL;
if (handle != ctx->sub->long_shader_in_progress_handle[type])
return EINVAL;
}
if (new_shader) {
sel = vrend_create_shader_state(ctx, so_info, type);
if (sel == NULL)
return ENOMEM;
if (long_shader) {
sel->buf_len = ((offlen + 3) / 4) * 4; /* round up buffer size */
sel->tmp_buf = malloc(sel->buf_len);
if (!sel->tmp_buf) {
ret = ENOMEM;
goto error;
}
memcpy(sel->tmp_buf, shd_text, pkt_length * 4);
sel->buf_offset = pkt_length * 4;
ctx->sub->long_shader_in_progress_handle[type] = handle;
} else
finished = true;
} else {
sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SHADER);
if (!sel) {
fprintf(stderr, "got continuation without original shader %d\n", handle);
ret = EINVAL;
goto error;
}
offlen &= ~VIRGL_OBJ_SHADER_OFFSET_CONT;
if (offlen != sel->buf_offset) {
fprintf(stderr, "Got mismatched shader continuation %d vs %d\n",
offlen, sel->buf_offset);
ret = EINVAL;
goto error;
}
if ((pkt_length * 4 + sel->buf_offset) > sel->buf_len) {
fprintf(stderr, "Got too large shader continuation %d vs %d\n",
pkt_length * 4 + sel->buf_offset, sel->buf_len);
ret = EINVAL;
goto error;
}
memcpy(sel->tmp_buf + sel->buf_offset, shd_text, pkt_length * 4);
sel->buf_offset += pkt_length * 4;
if (sel->buf_offset >= sel->buf_len) {
finished = true;
shd_text = sel->tmp_buf;
}
}
if (finished) {
struct tgsi_token *tokens;
tokens = calloc(num_tokens + 10, sizeof(struct tgsi_token));
if (!tokens) {
ret = ENOMEM;
goto error;
}
if (vrend_dump_shaders)
fprintf(stderr,"shader\n%s\n", shd_text);
if (!tgsi_text_translate((const char *)shd_text, tokens, num_tokens + 10)) {
free(tokens);
ret = EINVAL;
goto error;
}
if (vrend_finish_shader(ctx, sel, tokens)) {
free(tokens);
ret = EINVAL;
goto error;
} else {
free(sel->tmp_buf);
sel->tmp_buf = NULL;
}
free(tokens);
ctx->sub->long_shader_in_progress_handle[type] = 0;
}
if (new_shader) {
ret_handle = vrend_renderer_object_insert(ctx, sel, sizeof(*sel), handle, VIRGL_OBJECT_SHADER);
if (ret_handle == 0) {
ret = ENOMEM;
goto error;
}
}
return 0;
error:
if (new_shader)
vrend_destroy_shader_selector(sel);
else
vrend_renderer_object_destroy(ctx, handle);
return ret;
}
|
DoS
| 0
|
int vrend_create_shader(struct vrend_context *ctx,
uint32_t handle,
const struct pipe_stream_output_info *so_info,
const char *shd_text, uint32_t offlen, uint32_t num_tokens,
uint32_t type, uint32_t pkt_length)
{
struct vrend_shader_selector *sel = NULL;
int ret_handle;
bool new_shader = true, long_shader = false;
bool finished = false;
int ret;
if (type > PIPE_SHADER_GEOMETRY)
return EINVAL;
if (offlen & VIRGL_OBJ_SHADER_OFFSET_CONT)
new_shader = false;
else if (((offlen + 3) / 4) > pkt_length)
long_shader = true;
/* if we have an in progress one - don't allow a new shader
of that type or a different handle. */
if (ctx->sub->long_shader_in_progress_handle[type]) {
if (new_shader == true)
return EINVAL;
if (handle != ctx->sub->long_shader_in_progress_handle[type])
return EINVAL;
}
if (new_shader) {
sel = vrend_create_shader_state(ctx, so_info, type);
if (sel == NULL)
return ENOMEM;
if (long_shader) {
sel->buf_len = ((offlen + 3) / 4) * 4; /* round up buffer size */
sel->tmp_buf = malloc(sel->buf_len);
if (!sel->tmp_buf) {
ret = ENOMEM;
goto error;
}
memcpy(sel->tmp_buf, shd_text, pkt_length * 4);
sel->buf_offset = pkt_length * 4;
ctx->sub->long_shader_in_progress_handle[type] = handle;
} else
finished = true;
} else {
sel = vrend_object_lookup(ctx->sub->object_hash, handle, VIRGL_OBJECT_SHADER);
if (!sel) {
fprintf(stderr, "got continuation without original shader %d\n", handle);
ret = EINVAL;
goto error;
}
offlen &= ~VIRGL_OBJ_SHADER_OFFSET_CONT;
if (offlen != sel->buf_offset) {
fprintf(stderr, "Got mismatched shader continuation %d vs %d\n",
offlen, sel->buf_offset);
ret = EINVAL;
goto error;
}
if ((pkt_length * 4 + sel->buf_offset) > sel->buf_len) {
fprintf(stderr, "Got too large shader continuation %d vs %d\n",
pkt_length * 4 + sel->buf_offset, sel->buf_len);
ret = EINVAL;
goto error;
}
memcpy(sel->tmp_buf + sel->buf_offset, shd_text, pkt_length * 4);
sel->buf_offset += pkt_length * 4;
if (sel->buf_offset >= sel->buf_len) {
finished = true;
shd_text = sel->tmp_buf;
}
}
if (finished) {
struct tgsi_token *tokens;
tokens = calloc(num_tokens + 10, sizeof(struct tgsi_token));
if (!tokens) {
ret = ENOMEM;
goto error;
}
if (vrend_dump_shaders)
fprintf(stderr,"shader\n%s\n", shd_text);
if (!tgsi_text_translate((const char *)shd_text, tokens, num_tokens + 10)) {
free(tokens);
ret = EINVAL;
goto error;
}
if (vrend_finish_shader(ctx, sel, tokens)) {
free(tokens);
ret = EINVAL;
goto error;
} else {
free(sel->tmp_buf);
sel->tmp_buf = NULL;
}
free(tokens);
ctx->sub->long_shader_in_progress_handle[type] = 0;
}
if (new_shader) {
ret_handle = vrend_renderer_object_insert(ctx, sel, sizeof(*sel), handle, VIRGL_OBJECT_SHADER);
if (ret_handle == 0) {
ret = ENOMEM;
goto error;
}
}
return 0;
error:
if (new_shader)
vrend_destroy_shader_selector(sel);
else
vrend_renderer_object_destroy(ctx, handle);
return ret;
}
|
@@ -892,6 +892,7 @@ static struct vrend_linked_shader_program *add_shader_program(struct vrend_conte
fprintf(stderr,"geom shader: %d GLSL\n%s\n", gs->id, gs->glsl_prog);
fprintf(stderr,"frag shader: %d GLSL\n%s\n", fs->id, fs->glsl_prog);
glDeleteProgram(prog_id);
+ free(sprog);
return NULL;
}
|
CWE-772
| null | null |
7,147
|
void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable,
bool csum_enable, uint32_t gso_size)
{
struct tcp_hdr l4hdr;
assert(pkt);
/* csum has to be enabled if tso is. */
assert(csum_enable || !tso_enable);
pkt->virt_hdr.gso_type = net_tx_pkt_get_gso_type(pkt, tso_enable);
switch (pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_NONE:
pkt->virt_hdr.hdr_len = 0;
pkt->virt_hdr.gso_size = 0;
break;
case VIRTIO_NET_HDR_GSO_UDP:
pkt->virt_hdr.gso_size = gso_size;
pkt->virt_hdr.hdr_len = pkt->hdr_len + sizeof(struct udp_header);
break;
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_TCPV6:
iov_to_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags,
0, &l4hdr, sizeof(l4hdr));
pkt->virt_hdr.hdr_len = pkt->hdr_len + l4hdr.th_off * sizeof(uint32_t);
pkt->virt_hdr.gso_size = gso_size;
break;
default:
g_assert_not_reached();
}
if (csum_enable) {
switch (pkt->l4proto) {
case IP_PROTO_TCP:
pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
pkt->virt_hdr.csum_start = pkt->hdr_len;
pkt->virt_hdr.csum_offset = offsetof(struct tcp_hdr, th_sum);
break;
case IP_PROTO_UDP:
pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
pkt->virt_hdr.csum_start = pkt->hdr_len;
pkt->virt_hdr.csum_offset = offsetof(struct udp_hdr, uh_sum);
break;
default:
break;
}
}
}
|
DoS Overflow
| 0
|
void net_tx_pkt_build_vheader(struct NetTxPkt *pkt, bool tso_enable,
bool csum_enable, uint32_t gso_size)
{
struct tcp_hdr l4hdr;
assert(pkt);
/* csum has to be enabled if tso is. */
assert(csum_enable || !tso_enable);
pkt->virt_hdr.gso_type = net_tx_pkt_get_gso_type(pkt, tso_enable);
switch (pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
case VIRTIO_NET_HDR_GSO_NONE:
pkt->virt_hdr.hdr_len = 0;
pkt->virt_hdr.gso_size = 0;
break;
case VIRTIO_NET_HDR_GSO_UDP:
pkt->virt_hdr.gso_size = gso_size;
pkt->virt_hdr.hdr_len = pkt->hdr_len + sizeof(struct udp_header);
break;
case VIRTIO_NET_HDR_GSO_TCPV4:
case VIRTIO_NET_HDR_GSO_TCPV6:
iov_to_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags,
0, &l4hdr, sizeof(l4hdr));
pkt->virt_hdr.hdr_len = pkt->hdr_len + l4hdr.th_off * sizeof(uint32_t);
pkt->virt_hdr.gso_size = gso_size;
break;
default:
g_assert_not_reached();
}
if (csum_enable) {
switch (pkt->l4proto) {
case IP_PROTO_TCP:
pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
pkt->virt_hdr.csum_start = pkt->hdr_len;
pkt->virt_hdr.csum_offset = offsetof(struct tcp_hdr, th_sum);
break;
case IP_PROTO_UDP:
pkt->virt_hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
pkt->virt_hdr.csum_start = pkt->hdr_len;
pkt->virt_hdr.csum_offset = offsetof(struct udp_hdr, uh_sum);
break;
default:
break;
}
}
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,148
|
static void net_tx_pkt_calculate_hdr_len(struct NetTxPkt *pkt)
{
pkt->hdr_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len +
pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len;
}
|
DoS Overflow
| 0
|
static void net_tx_pkt_calculate_hdr_len(struct NetTxPkt *pkt)
{
pkt->hdr_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len +
pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len;
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,149
|
static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt,
NetClientState *nc)
{
struct iovec fragment[NET_MAX_FRAG_SG_LIST];
size_t fragment_len = 0;
bool more_frags = false;
/* some pointers for shorter code */
void *l2_iov_base, *l3_iov_base;
size_t l2_iov_len, l3_iov_len;
int src_idx = NET_TX_PKT_PL_START_FRAG, dst_idx;
size_t src_offset = 0;
size_t fragment_offset = 0;
l2_iov_base = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base;
l2_iov_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len;
l3_iov_base = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base;
l3_iov_len = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len;
/* Copy headers */
fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_base = l2_iov_base;
fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_len = l2_iov_len;
fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_base = l3_iov_base;
fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_len = l3_iov_len;
/* Put as much data as possible and send */
do {
fragment_len = net_tx_pkt_fetch_fragment(pkt, &src_idx, &src_offset,
fragment, &dst_idx);
more_frags = (fragment_offset + fragment_len < pkt->payload_len);
eth_setup_ip4_fragmentation(l2_iov_base, l2_iov_len, l3_iov_base,
l3_iov_len, fragment_len, fragment_offset, more_frags);
eth_fix_ip4_checksum(l3_iov_base, l3_iov_len);
net_tx_pkt_sendv(pkt, nc, fragment, dst_idx);
fragment_offset += fragment_len;
} while (fragment_len && more_frags);
return true;
}
|
DoS Overflow
| 0
|
static bool net_tx_pkt_do_sw_fragmentation(struct NetTxPkt *pkt,
NetClientState *nc)
{
struct iovec fragment[NET_MAX_FRAG_SG_LIST];
size_t fragment_len = 0;
bool more_frags = false;
/* some pointers for shorter code */
void *l2_iov_base, *l3_iov_base;
size_t l2_iov_len, l3_iov_len;
int src_idx = NET_TX_PKT_PL_START_FRAG, dst_idx;
size_t src_offset = 0;
size_t fragment_offset = 0;
l2_iov_base = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base;
l2_iov_len = pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len;
l3_iov_base = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base;
l3_iov_len = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len;
/* Copy headers */
fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_base = l2_iov_base;
fragment[NET_TX_PKT_FRAGMENT_L2_HDR_POS].iov_len = l2_iov_len;
fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_base = l3_iov_base;
fragment[NET_TX_PKT_FRAGMENT_L3_HDR_POS].iov_len = l3_iov_len;
/* Put as much data as possible and send */
do {
fragment_len = net_tx_pkt_fetch_fragment(pkt, &src_idx, &src_offset,
fragment, &dst_idx);
more_frags = (fragment_offset + fragment_len < pkt->payload_len);
eth_setup_ip4_fragmentation(l2_iov_base, l2_iov_len, l3_iov_base,
l3_iov_len, fragment_len, fragment_offset, more_frags);
eth_fix_ip4_checksum(l3_iov_base, l3_iov_len);
net_tx_pkt_sendv(pkt, nc, fragment, dst_idx);
fragment_offset += fragment_len;
} while (fragment_len && more_frags);
return true;
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,150
|
void net_tx_pkt_dump(struct NetTxPkt *pkt)
{
#ifdef NET_TX_PKT_DEBUG
assert(pkt);
printf("TX PKT: hdr_len: %d, pkt_type: 0x%X, l2hdr_len: %lu, "
"l3hdr_len: %lu, payload_len: %u\n", pkt->hdr_len, pkt->packet_type,
pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len,
pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len, pkt->payload_len);
#endif
}
|
DoS Overflow
| 0
|
void net_tx_pkt_dump(struct NetTxPkt *pkt)
{
#ifdef NET_TX_PKT_DEBUG
assert(pkt);
printf("TX PKT: hdr_len: %d, pkt_type: 0x%X, l2hdr_len: %lu, "
"l3hdr_len: %lu, payload_len: %u\n", pkt->hdr_len, pkt->packet_type,
pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len,
pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len, pkt->payload_len);
#endif
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,151
|
static size_t net_tx_pkt_fetch_fragment(struct NetTxPkt *pkt,
int *src_idx, size_t *src_offset, struct iovec *dst, int *dst_idx)
{
size_t fetched = 0;
struct iovec *src = pkt->vec;
*dst_idx = NET_TX_PKT_FRAGMENT_HEADER_NUM;
while (fetched < IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size)) {
/* no more place in fragment iov */
if (*dst_idx == NET_MAX_FRAG_SG_LIST) {
break;
}
/* no more data in iovec */
if (*src_idx == (pkt->payload_frags + NET_TX_PKT_PL_START_FRAG)) {
break;
}
dst[*dst_idx].iov_base = src[*src_idx].iov_base + *src_offset;
dst[*dst_idx].iov_len = MIN(src[*src_idx].iov_len - *src_offset,
IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size) - fetched);
*src_offset += dst[*dst_idx].iov_len;
fetched += dst[*dst_idx].iov_len;
if (*src_offset == src[*src_idx].iov_len) {
*src_offset = 0;
(*src_idx)++;
}
(*dst_idx)++;
}
return fetched;
}
|
DoS Overflow
| 0
|
static size_t net_tx_pkt_fetch_fragment(struct NetTxPkt *pkt,
int *src_idx, size_t *src_offset, struct iovec *dst, int *dst_idx)
{
size_t fetched = 0;
struct iovec *src = pkt->vec;
*dst_idx = NET_TX_PKT_FRAGMENT_HEADER_NUM;
while (fetched < IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size)) {
/* no more place in fragment iov */
if (*dst_idx == NET_MAX_FRAG_SG_LIST) {
break;
}
/* no more data in iovec */
if (*src_idx == (pkt->payload_frags + NET_TX_PKT_PL_START_FRAG)) {
break;
}
dst[*dst_idx].iov_base = src[*src_idx].iov_base + *src_offset;
dst[*dst_idx].iov_len = MIN(src[*src_idx].iov_len - *src_offset,
IP_FRAG_ALIGN_SIZE(pkt->virt_hdr.gso_size) - fetched);
*src_offset += dst[*dst_idx].iov_len;
fetched += dst[*dst_idx].iov_len;
if (*src_offset == src[*src_idx].iov_len) {
*src_offset = 0;
(*src_idx)++;
}
(*dst_idx)++;
}
return fetched;
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,152
|
eth_pkt_types_e net_tx_pkt_get_packet_type(struct NetTxPkt *pkt)
{
assert(pkt);
return pkt->packet_type;
}
|
DoS Overflow
| 0
|
eth_pkt_types_e net_tx_pkt_get_packet_type(struct NetTxPkt *pkt)
{
assert(pkt);
return pkt->packet_type;
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,153
|
size_t net_tx_pkt_get_total_len(struct NetTxPkt *pkt)
{
assert(pkt);
return pkt->hdr_len + pkt->payload_len;
}
|
DoS Overflow
| 0
|
size_t net_tx_pkt_get_total_len(struct NetTxPkt *pkt)
{
assert(pkt);
return pkt->hdr_len + pkt->payload_len;
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,154
|
struct virtio_net_hdr *net_tx_pkt_get_vhdr(struct NetTxPkt *pkt)
{
assert(pkt);
return &pkt->virt_hdr;
}
|
DoS Overflow
| 0
|
struct virtio_net_hdr *net_tx_pkt_get_vhdr(struct NetTxPkt *pkt)
{
assert(pkt);
return &pkt->virt_hdr;
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,155
|
bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt)
{
return pkt->raw_frags > 0;
}
|
DoS Overflow
| 0
|
bool net_tx_pkt_has_fragments(struct NetTxPkt *pkt)
{
return pkt->raw_frags > 0;
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,156
|
bool net_tx_pkt_parse(struct NetTxPkt *pkt)
{
if (net_tx_pkt_parse_headers(pkt)) {
net_tx_pkt_rebuild_payload(pkt);
return true;
} else {
return false;
}
}
|
DoS Overflow
| 0
|
bool net_tx_pkt_parse(struct NetTxPkt *pkt)
{
if (net_tx_pkt_parse_headers(pkt)) {
net_tx_pkt_rebuild_payload(pkt);
return true;
} else {
return false;
}
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,157
|
static bool net_tx_pkt_parse_headers(struct NetTxPkt *pkt)
{
struct iovec *l2_hdr, *l3_hdr;
size_t bytes_read;
size_t full_ip6hdr_len;
uint16_t l3_proto;
assert(pkt);
l2_hdr = &pkt->vec[NET_TX_PKT_L2HDR_FRAG];
l3_hdr = &pkt->vec[NET_TX_PKT_L3HDR_FRAG];
bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, 0, l2_hdr->iov_base,
ETH_MAX_L2_HDR_LEN);
if (bytes_read < sizeof(struct eth_header)) {
l2_hdr->iov_len = 0;
return false;
}
l2_hdr->iov_len = sizeof(struct eth_header);
switch (be16_to_cpu(PKT_GET_ETH_HDR(l2_hdr->iov_base)->h_proto)) {
case ETH_P_VLAN:
l2_hdr->iov_len += sizeof(struct vlan_header);
break;
case ETH_P_DVLAN:
l2_hdr->iov_len += 2 * sizeof(struct vlan_header);
break;
}
if (bytes_read < l2_hdr->iov_len) {
l2_hdr->iov_len = 0;
l3_hdr->iov_len = 0;
pkt->packet_type = ETH_PKT_UCAST;
return false;
} else {
l2_hdr->iov_len = ETH_MAX_L2_HDR_LEN;
l2_hdr->iov_len = eth_get_l2_hdr_length(l2_hdr->iov_base);
pkt->packet_type = get_eth_packet_type(l2_hdr->iov_base);
}
l3_proto = eth_get_l3_proto(l2_hdr, 1, l2_hdr->iov_len);
switch (l3_proto) {
case ETH_P_IP:
bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
l3_hdr->iov_base, sizeof(struct ip_header));
if (bytes_read < sizeof(struct ip_header)) {
l3_hdr->iov_len = 0;
return false;
}
l3_hdr->iov_len = IP_HDR_GET_LEN(l3_hdr->iov_base);
if (l3_hdr->iov_len < sizeof(struct ip_header)) {
l3_hdr->iov_len = 0;
return false;
}
pkt->l4proto = ((struct ip_header *) l3_hdr->iov_base)->ip_p;
if (IP_HDR_GET_LEN(l3_hdr->iov_base) != sizeof(struct ip_header)) {
/* copy optional IPv4 header data if any*/
bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags,
l2_hdr->iov_len + sizeof(struct ip_header),
l3_hdr->iov_base + sizeof(struct ip_header),
l3_hdr->iov_len - sizeof(struct ip_header));
if (bytes_read < l3_hdr->iov_len - sizeof(struct ip_header)) {
l3_hdr->iov_len = 0;
return false;
}
}
break;
case ETH_P_IPV6:
{
eth_ip6_hdr_info hdrinfo;
if (!eth_parse_ipv6_hdr(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
&hdrinfo)) {
l3_hdr->iov_len = 0;
return false;
}
pkt->l4proto = hdrinfo.l4proto;
full_ip6hdr_len = hdrinfo.full_hdr_len;
if (full_ip6hdr_len > ETH_MAX_IP_DGRAM_LEN) {
l3_hdr->iov_len = 0;
return false;
}
bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
l3_hdr->iov_base, full_ip6hdr_len);
if (bytes_read < full_ip6hdr_len) {
l3_hdr->iov_len = 0;
return false;
} else {
l3_hdr->iov_len = full_ip6hdr_len;
}
break;
}
default:
l3_hdr->iov_len = 0;
break;
}
net_tx_pkt_calculate_hdr_len(pkt);
return true;
}
|
DoS Overflow
| 0
|
static bool net_tx_pkt_parse_headers(struct NetTxPkt *pkt)
{
struct iovec *l2_hdr, *l3_hdr;
size_t bytes_read;
size_t full_ip6hdr_len;
uint16_t l3_proto;
assert(pkt);
l2_hdr = &pkt->vec[NET_TX_PKT_L2HDR_FRAG];
l3_hdr = &pkt->vec[NET_TX_PKT_L3HDR_FRAG];
bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, 0, l2_hdr->iov_base,
ETH_MAX_L2_HDR_LEN);
if (bytes_read < sizeof(struct eth_header)) {
l2_hdr->iov_len = 0;
return false;
}
l2_hdr->iov_len = sizeof(struct eth_header);
switch (be16_to_cpu(PKT_GET_ETH_HDR(l2_hdr->iov_base)->h_proto)) {
case ETH_P_VLAN:
l2_hdr->iov_len += sizeof(struct vlan_header);
break;
case ETH_P_DVLAN:
l2_hdr->iov_len += 2 * sizeof(struct vlan_header);
break;
}
if (bytes_read < l2_hdr->iov_len) {
l2_hdr->iov_len = 0;
l3_hdr->iov_len = 0;
pkt->packet_type = ETH_PKT_UCAST;
return false;
} else {
l2_hdr->iov_len = ETH_MAX_L2_HDR_LEN;
l2_hdr->iov_len = eth_get_l2_hdr_length(l2_hdr->iov_base);
pkt->packet_type = get_eth_packet_type(l2_hdr->iov_base);
}
l3_proto = eth_get_l3_proto(l2_hdr, 1, l2_hdr->iov_len);
switch (l3_proto) {
case ETH_P_IP:
bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
l3_hdr->iov_base, sizeof(struct ip_header));
if (bytes_read < sizeof(struct ip_header)) {
l3_hdr->iov_len = 0;
return false;
}
l3_hdr->iov_len = IP_HDR_GET_LEN(l3_hdr->iov_base);
if (l3_hdr->iov_len < sizeof(struct ip_header)) {
l3_hdr->iov_len = 0;
return false;
}
pkt->l4proto = ((struct ip_header *) l3_hdr->iov_base)->ip_p;
if (IP_HDR_GET_LEN(l3_hdr->iov_base) != sizeof(struct ip_header)) {
/* copy optional IPv4 header data if any*/
bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags,
l2_hdr->iov_len + sizeof(struct ip_header),
l3_hdr->iov_base + sizeof(struct ip_header),
l3_hdr->iov_len - sizeof(struct ip_header));
if (bytes_read < l3_hdr->iov_len - sizeof(struct ip_header)) {
l3_hdr->iov_len = 0;
return false;
}
}
break;
case ETH_P_IPV6:
{
eth_ip6_hdr_info hdrinfo;
if (!eth_parse_ipv6_hdr(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
&hdrinfo)) {
l3_hdr->iov_len = 0;
return false;
}
pkt->l4proto = hdrinfo.l4proto;
full_ip6hdr_len = hdrinfo.full_hdr_len;
if (full_ip6hdr_len > ETH_MAX_IP_DGRAM_LEN) {
l3_hdr->iov_len = 0;
return false;
}
bytes_read = iov_to_buf(pkt->raw, pkt->raw_frags, l2_hdr->iov_len,
l3_hdr->iov_base, full_ip6hdr_len);
if (bytes_read < full_ip6hdr_len) {
l3_hdr->iov_len = 0;
return false;
} else {
l3_hdr->iov_len = full_ip6hdr_len;
}
break;
}
default:
l3_hdr->iov_len = 0;
break;
}
net_tx_pkt_calculate_hdr_len(pkt);
return true;
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,158
|
static void net_tx_pkt_rebuild_payload(struct NetTxPkt *pkt)
{
pkt->payload_len = iov_size(pkt->raw, pkt->raw_frags) - pkt->hdr_len;
pkt->payload_frags = iov_copy(&pkt->vec[NET_TX_PKT_PL_START_FRAG],
pkt->max_payload_frags,
pkt->raw, pkt->raw_frags,
pkt->hdr_len, pkt->payload_len);
}
|
DoS Overflow
| 0
|
static void net_tx_pkt_rebuild_payload(struct NetTxPkt *pkt)
{
pkt->payload_len = iov_size(pkt->raw, pkt->raw_frags) - pkt->hdr_len;
pkt->payload_frags = iov_copy(&pkt->vec[NET_TX_PKT_PL_START_FRAG],
pkt->max_payload_frags,
pkt->raw, pkt->raw_frags,
pkt->hdr_len, pkt->payload_len);
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,159
|
void net_tx_pkt_reset(struct NetTxPkt *pkt)
{
int i;
/* no assert, as reset can be called before tx_pkt_init */
if (!pkt) {
return;
}
memset(&pkt->virt_hdr, 0, sizeof(pkt->virt_hdr));
assert(pkt->vec);
pkt->payload_len = 0;
pkt->payload_frags = 0;
assert(pkt->raw);
for (i = 0; i < pkt->raw_frags; i++) {
assert(pkt->raw[i].iov_base);
pci_dma_unmap(pkt->pci_dev, pkt->raw[i].iov_base, pkt->raw[i].iov_len,
DMA_DIRECTION_TO_DEVICE, 0);
}
pkt->raw_frags = 0;
pkt->hdr_len = 0;
pkt->l4proto = 0;
}
|
DoS Overflow
| 0
|
void net_tx_pkt_reset(struct NetTxPkt *pkt)
{
int i;
/* no assert, as reset can be called before tx_pkt_init */
if (!pkt) {
return;
}
memset(&pkt->virt_hdr, 0, sizeof(pkt->virt_hdr));
assert(pkt->vec);
pkt->payload_len = 0;
pkt->payload_frags = 0;
assert(pkt->raw);
for (i = 0; i < pkt->raw_frags; i++) {
assert(pkt->raw[i].iov_base);
pci_dma_unmap(pkt->pci_dev, pkt->raw[i].iov_base, pkt->raw[i].iov_len,
DMA_DIRECTION_TO_DEVICE, 0);
}
pkt->raw_frags = 0;
pkt->hdr_len = 0;
pkt->l4proto = 0;
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,160
|
bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc)
{
assert(pkt);
if (!pkt->has_virt_hdr &&
pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
net_tx_pkt_do_sw_csum(pkt);
}
/*
* Since underlying infrastructure does not support IP datagrams longer
* than 64K we should drop such packets and don't even try to send
*/
if (VIRTIO_NET_HDR_GSO_NONE != pkt->virt_hdr.gso_type) {
if (pkt->payload_len >
ETH_MAX_IP_DGRAM_LEN -
pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len) {
return false;
}
}
if (pkt->has_virt_hdr ||
pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) {
net_tx_pkt_sendv(pkt, nc, pkt->vec,
pkt->payload_frags + NET_TX_PKT_PL_START_FRAG);
return true;
}
return net_tx_pkt_do_sw_fragmentation(pkt, nc);
}
|
DoS Overflow
| 0
|
bool net_tx_pkt_send(struct NetTxPkt *pkt, NetClientState *nc)
{
assert(pkt);
if (!pkt->has_virt_hdr &&
pkt->virt_hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
net_tx_pkt_do_sw_csum(pkt);
}
/*
* Since underlying infrastructure does not support IP datagrams longer
* than 64K we should drop such packets and don't even try to send
*/
if (VIRTIO_NET_HDR_GSO_NONE != pkt->virt_hdr.gso_type) {
if (pkt->payload_len >
ETH_MAX_IP_DGRAM_LEN -
pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len) {
return false;
}
}
if (pkt->has_virt_hdr ||
pkt->virt_hdr.gso_type == VIRTIO_NET_HDR_GSO_NONE) {
net_tx_pkt_sendv(pkt, nc, pkt->vec,
pkt->payload_frags + NET_TX_PKT_PL_START_FRAG);
return true;
}
return net_tx_pkt_do_sw_fragmentation(pkt, nc);
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,161
|
bool net_tx_pkt_send_loopback(struct NetTxPkt *pkt, NetClientState *nc)
{
bool res;
pkt->is_loopback = true;
res = net_tx_pkt_send(pkt, nc);
pkt->is_loopback = false;
return res;
}
|
DoS Overflow
| 0
|
bool net_tx_pkt_send_loopback(struct NetTxPkt *pkt, NetClientState *nc)
{
bool res;
pkt->is_loopback = true;
res = net_tx_pkt_send(pkt, nc);
pkt->is_loopback = false;
return res;
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,162
|
static inline void net_tx_pkt_sendv(struct NetTxPkt *pkt,
NetClientState *nc, const struct iovec *iov, int iov_cnt)
{
if (pkt->is_loopback) {
nc->info->receive_iov(nc, iov, iov_cnt);
} else {
qemu_sendv_packet(nc, iov, iov_cnt);
}
}
|
DoS Overflow
| 0
|
static inline void net_tx_pkt_sendv(struct NetTxPkt *pkt,
NetClientState *nc, const struct iovec *iov, int iov_cnt)
{
if (pkt->is_loopback) {
nc->info->receive_iov(nc, iov, iov_cnt);
} else {
qemu_sendv_packet(nc, iov, iov_cnt);
}
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,163
|
void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt,
uint16_t vlan, uint16_t vlan_ethtype)
{
bool is_new;
assert(pkt);
eth_setup_vlan_headers_ex(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base,
vlan, vlan_ethtype, &is_new);
/* update l2hdrlen */
if (is_new) {
pkt->hdr_len += sizeof(struct vlan_header);
pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len +=
sizeof(struct vlan_header);
}
}
|
DoS Overflow
| 0
|
void net_tx_pkt_setup_vlan_header_ex(struct NetTxPkt *pkt,
uint16_t vlan, uint16_t vlan_ethtype)
{
bool is_new;
assert(pkt);
eth_setup_vlan_headers_ex(pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_base,
vlan, vlan_ethtype, &is_new);
/* update l2hdrlen */
if (is_new) {
pkt->hdr_len += sizeof(struct vlan_header);
pkt->vec[NET_TX_PKT_L2HDR_FRAG].iov_len +=
sizeof(struct vlan_header);
}
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,164
|
void net_tx_pkt_uninit(struct NetTxPkt *pkt)
{
if (pkt) {
g_free(pkt->vec);
g_free(pkt->raw);
g_free(pkt);
}
}
|
DoS Overflow
| 0
|
void net_tx_pkt_uninit(struct NetTxPkt *pkt)
{
if (pkt) {
g_free(pkt->vec);
g_free(pkt->raw);
g_free(pkt);
}
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,165
|
void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt)
{
uint16_t csum;
uint32_t cntr, cso;
assert(pkt);
uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN;
void *ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base;
if (pkt->payload_len + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len >
ETH_MAX_IP_DGRAM_LEN) {
return;
}
if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 ||
gso_type == VIRTIO_NET_HDR_GSO_UDP) {
/* Calculate IP header checksum */
net_tx_pkt_update_ip_hdr_checksum(pkt);
/* Calculate IP pseudo header checksum */
cntr = eth_calc_ip4_pseudo_hdr_csum(ip_hdr, pkt->payload_len, &cso);
csum = cpu_to_be16(~net_checksum_finish(cntr));
} else if (gso_type == VIRTIO_NET_HDR_GSO_TCPV6) {
/* Calculate IP pseudo header checksum */
cntr = eth_calc_ip6_pseudo_hdr_csum(ip_hdr, pkt->payload_len,
IP_PROTO_TCP, &cso);
csum = cpu_to_be16(~net_checksum_finish(cntr));
} else {
return;
}
iov_from_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags,
pkt->virt_hdr.csum_offset, &csum, sizeof(csum));
}
|
DoS Overflow
| 0
|
void net_tx_pkt_update_ip_checksums(struct NetTxPkt *pkt)
{
uint16_t csum;
uint32_t cntr, cso;
assert(pkt);
uint8_t gso_type = pkt->virt_hdr.gso_type & ~VIRTIO_NET_HDR_GSO_ECN;
void *ip_hdr = pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_base;
if (pkt->payload_len + pkt->vec[NET_TX_PKT_L3HDR_FRAG].iov_len >
ETH_MAX_IP_DGRAM_LEN) {
return;
}
if (gso_type == VIRTIO_NET_HDR_GSO_TCPV4 ||
gso_type == VIRTIO_NET_HDR_GSO_UDP) {
/* Calculate IP header checksum */
net_tx_pkt_update_ip_hdr_checksum(pkt);
/* Calculate IP pseudo header checksum */
cntr = eth_calc_ip4_pseudo_hdr_csum(ip_hdr, pkt->payload_len, &cso);
csum = cpu_to_be16(~net_checksum_finish(cntr));
} else if (gso_type == VIRTIO_NET_HDR_GSO_TCPV6) {
/* Calculate IP pseudo header checksum */
cntr = eth_calc_ip6_pseudo_hdr_csum(ip_hdr, pkt->payload_len,
IP_PROTO_TCP, &cso);
csum = cpu_to_be16(~net_checksum_finish(cntr));
} else {
return;
}
iov_from_buf(&pkt->vec[NET_TX_PKT_PL_START_FRAG], pkt->payload_frags,
pkt->virt_hdr.csum_offset, &csum, sizeof(csum));
}
|
@@ -65,10 +65,9 @@ void net_tx_pkt_init(struct NetTxPkt **pkt, PCIDevice *pci_dev,
p->pci_dev = pci_dev;
- p->vec = g_malloc((sizeof *p->vec) *
- (max_frags + NET_TX_PKT_PL_START_FRAG));
+ p->vec = g_new(struct iovec, max_frags + NET_TX_PKT_PL_START_FRAG);
- p->raw = g_malloc((sizeof *p->raw) * max_frags);
+ p->raw = g_new(struct iovec, max_frags);
p->max_payload_frags = max_frags;
p->max_raw_frags = max_frags;
|
CWE-190
| null | null |
7,166
|
static bool _vmxnet3_assert_interrupt_line(VMXNET3State *s, uint32_t int_idx)
{
PCIDevice *d = PCI_DEVICE(s);
if (s->msix_used && msix_enabled(d)) {
VMW_IRPRN("Sending MSI-X notification for vector %u", int_idx);
msix_notify(d, int_idx);
return false;
}
if (msi_enabled(d)) {
VMW_IRPRN("Sending MSI notification for vector %u", int_idx);
msi_notify(d, int_idx);
return false;
}
VMW_IRPRN("Asserting line for interrupt %u", int_idx);
pci_irq_assert(d);
return true;
}
|
+Info
| 0
|
static bool _vmxnet3_assert_interrupt_line(VMXNET3State *s, uint32_t int_idx)
{
PCIDevice *d = PCI_DEVICE(s);
if (s->msix_used && msix_enabled(d)) {
VMW_IRPRN("Sending MSI-X notification for vector %u", int_idx);
msix_notify(d, int_idx);
return false;
}
if (msi_enabled(d)) {
VMW_IRPRN("Sending MSI notification for vector %u", int_idx);
msi_notify(d, int_idx);
return false;
}
VMW_IRPRN("Asserting line for interrupt %u", int_idx);
pci_irq_assert(d);
return true;
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,167
|
static void _vmxnet3_deassert_interrupt_line(VMXNET3State *s, int lidx)
{
PCIDevice *d = PCI_DEVICE(s);
/*
* This function should never be called for MSI(X) interrupts
* because deassertion never required for message interrupts
*/
assert(!s->msix_used || !msix_enabled(d));
/*
* This function should never be called for MSI(X) interrupts
* because deassertion never required for message interrupts
*/
assert(!msi_enabled(d));
VMW_IRPRN("Deasserting line for interrupt %u", lidx);
pci_irq_deassert(d);
}
|
+Info
| 0
|
static void _vmxnet3_deassert_interrupt_line(VMXNET3State *s, int lidx)
{
PCIDevice *d = PCI_DEVICE(s);
/*
* This function should never be called for MSI(X) interrupts
* because deassertion never required for message interrupts
*/
assert(!s->msix_used || !msix_enabled(d));
/*
* This function should never be called for MSI(X) interrupts
* because deassertion never required for message interrupts
*/
assert(!msi_enabled(d));
VMW_IRPRN("Deasserting line for interrupt %u", lidx);
pci_irq_deassert(d);
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,168
|
static void vmxnet3_ack_events(VMXNET3State *s, uint32_t val)
{
PCIDevice *d = PCI_DEVICE(s);
uint32_t events;
VMW_CBPRN("Clearing events: 0x%x", val);
events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) & ~val;
VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events);
}
|
+Info
| 0
|
static void vmxnet3_ack_events(VMXNET3State *s, uint32_t val)
{
PCIDevice *d = PCI_DEVICE(s);
uint32_t events;
VMW_CBPRN("Clearing events: 0x%x", val);
events = VMXNET3_READ_DRV_SHARED32(d, s->drv_shmem, ecr) & ~val;
VMXNET3_WRITE_DRV_SHARED32(d, s->drv_shmem, ecr, events);
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,169
|
vmxnet3_can_receive(NetClientState *nc)
{
VMXNET3State *s = qemu_get_nic_opaque(nc);
return s->device_active &&
VMXNET_FLAG_IS_SET(s->link_status_and_speed, VMXNET3_LINK_STATUS_UP);
}
|
+Info
| 0
|
vmxnet3_can_receive(NetClientState *nc)
{
VMXNET3State *s = qemu_get_nic_opaque(nc);
return s->device_active &&
VMXNET_FLAG_IS_SET(s->link_status_and_speed, VMXNET3_LINK_STATUS_UP);
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,170
|
static void vmxnet3_class_init(ObjectClass *class, void *data)
{
DeviceClass *dc = DEVICE_CLASS(class);
PCIDeviceClass *c = PCI_DEVICE_CLASS(class);
VMXNET3Class *vc = VMXNET3_DEVICE_CLASS(class);
c->realize = vmxnet3_pci_realize;
c->exit = vmxnet3_pci_uninit;
c->vendor_id = PCI_VENDOR_ID_VMWARE;
c->device_id = PCI_DEVICE_ID_VMWARE_VMXNET3;
c->revision = PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION;
c->romfile = "efi-vmxnet3.rom";
c->class_id = PCI_CLASS_NETWORK_ETHERNET;
c->subsystem_vendor_id = PCI_VENDOR_ID_VMWARE;
c->subsystem_id = PCI_DEVICE_ID_VMWARE_VMXNET3;
vc->parent_dc_realize = dc->realize;
dc->realize = vmxnet3_realize;
dc->desc = "VMWare Paravirtualized Ethernet v3";
dc->reset = vmxnet3_qdev_reset;
dc->vmsd = &vmstate_vmxnet3;
dc->props = vmxnet3_properties;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
}
|
+Info
| 0
|
static void vmxnet3_class_init(ObjectClass *class, void *data)
{
DeviceClass *dc = DEVICE_CLASS(class);
PCIDeviceClass *c = PCI_DEVICE_CLASS(class);
VMXNET3Class *vc = VMXNET3_DEVICE_CLASS(class);
c->realize = vmxnet3_pci_realize;
c->exit = vmxnet3_pci_uninit;
c->vendor_id = PCI_VENDOR_ID_VMWARE;
c->device_id = PCI_DEVICE_ID_VMWARE_VMXNET3;
c->revision = PCI_DEVICE_ID_VMWARE_VMXNET3_REVISION;
c->romfile = "efi-vmxnet3.rom";
c->class_id = PCI_CLASS_NETWORK_ETHERNET;
c->subsystem_vendor_id = PCI_VENDOR_ID_VMWARE;
c->subsystem_id = PCI_DEVICE_ID_VMWARE_VMXNET3;
vc->parent_dc_realize = dc->realize;
dc->realize = vmxnet3_realize;
dc->desc = "VMWare Paravirtualized Ethernet v3";
dc->reset = vmxnet3_qdev_reset;
dc->vmsd = &vmstate_vmxnet3;
dc->props = vmxnet3_properties;
set_bit(DEVICE_CATEGORY_NETWORK, dc->categories);
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,171
|
vmxnet3_cleanup_msi(VMXNET3State *s)
{
PCIDevice *d = PCI_DEVICE(s);
msi_uninit(d);
}
|
+Info
| 0
|
vmxnet3_cleanup_msi(VMXNET3State *s)
{
PCIDevice *d = PCI_DEVICE(s);
msi_uninit(d);
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,172
|
vmxnet3_cleanup_msix(VMXNET3State *s)
{
PCIDevice *d = PCI_DEVICE(s);
if (s->msix_used) {
vmxnet3_unuse_msix_vectors(s, VMXNET3_MAX_INTRS);
msix_uninit(d, &s->msix_bar, &s->msix_bar);
}
}
|
+Info
| 0
|
vmxnet3_cleanup_msix(VMXNET3State *s)
{
PCIDevice *d = PCI_DEVICE(s);
if (s->msix_used) {
vmxnet3_unuse_msix_vectors(s, VMXNET3_MAX_INTRS);
msix_uninit(d, &s->msix_bar, &s->msix_bar);
}
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,173
|
static void vmxnet3_deactivate_device(VMXNET3State *s)
{
if (s->device_active) {
VMW_CBPRN("Deactivating vmxnet3...");
net_tx_pkt_reset(s->tx_pkt);
net_tx_pkt_uninit(s->tx_pkt);
net_rx_pkt_uninit(s->rx_pkt);
s->device_active = false;
}
}
|
+Info
| 0
|
static void vmxnet3_deactivate_device(VMXNET3State *s)
{
if (s->device_active) {
VMW_CBPRN("Deactivating vmxnet3...");
net_tx_pkt_reset(s->tx_pkt);
net_tx_pkt_uninit(s->tx_pkt);
net_rx_pkt_uninit(s->rx_pkt);
s->device_active = false;
}
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,174
|
vmxnet3_dump_tx_descr(struct Vmxnet3_TxDesc *descr)
{
VMW_PKPRN("TX DESCR: "
"addr %" PRIx64 ", len: %d, gen: %d, rsvd: %d, "
"dtype: %d, ext1: %d, msscof: %d, hlen: %d, om: %d, "
"eop: %d, cq: %d, ext2: %d, ti: %d, tci: %d",
le64_to_cpu(descr->addr), descr->len, descr->gen, descr->rsvd,
descr->dtype, descr->ext1, descr->msscof, descr->hlen, descr->om,
descr->eop, descr->cq, descr->ext2, descr->ti, descr->tci);
}
|
+Info
| 0
|
vmxnet3_dump_tx_descr(struct Vmxnet3_TxDesc *descr)
{
VMW_PKPRN("TX DESCR: "
"addr %" PRIx64 ", len: %d, gen: %d, rsvd: %d, "
"dtype: %d, ext1: %d, msscof: %d, hlen: %d, om: %d, "
"eop: %d, cq: %d, ext2: %d, ti: %d, tci: %d",
le64_to_cpu(descr->addr), descr->len, descr->gen, descr->rsvd,
descr->dtype, descr->ext1, descr->msscof, descr->hlen, descr->om,
descr->eop, descr->cq, descr->ext2, descr->ti, descr->tci);
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,175
|
vmxnet3_dump_virt_hdr(struct virtio_net_hdr *vhdr)
{
VMW_PKPRN("VHDR: flags 0x%x, gso_type: 0x%x, hdr_len: %d, gso_size: %d, "
"csum_start: %d, csum_offset: %d",
vhdr->flags, vhdr->gso_type, vhdr->hdr_len, vhdr->gso_size,
vhdr->csum_start, vhdr->csum_offset);
}
|
+Info
| 0
|
vmxnet3_dump_virt_hdr(struct virtio_net_hdr *vhdr)
{
VMW_PKPRN("VHDR: flags 0x%x, gso_type: 0x%x, hdr_len: %d, gso_size: %d, "
"csum_start: %d, csum_offset: %d",
vhdr->flags, vhdr->gso_type, vhdr->hdr_len, vhdr->gso_size,
vhdr->csum_start, vhdr->csum_offset);
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,176
|
static void vmxnet3_fill_stats(VMXNET3State *s)
{
int i;
PCIDevice *d = PCI_DEVICE(s);
if (!s->device_active)
return;
for (i = 0; i < s->txq_num; i++) {
pci_dma_write(d,
s->txq_descr[i].tx_stats_pa,
&s->txq_descr[i].txq_stats,
sizeof(s->txq_descr[i].txq_stats));
}
for (i = 0; i < s->rxq_num; i++) {
pci_dma_write(d,
s->rxq_descr[i].rx_stats_pa,
&s->rxq_descr[i].rxq_stats,
sizeof(s->rxq_descr[i].rxq_stats));
}
}
|
+Info
| 0
|
static void vmxnet3_fill_stats(VMXNET3State *s)
{
int i;
PCIDevice *d = PCI_DEVICE(s);
if (!s->device_active)
return;
for (i = 0; i < s->txq_num; i++) {
pci_dma_write(d,
s->txq_descr[i].tx_stats_pa,
&s->txq_descr[i].txq_stats,
sizeof(s->txq_descr[i].txq_stats));
}
for (i = 0; i < s->rxq_num; i++) {
pci_dma_write(d,
s->rxq_descr[i].rx_stats_pa,
&s->rxq_descr[i].rxq_stats,
sizeof(s->rxq_descr[i].rxq_stats));
}
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,177
|
static uint64_t vmxnet3_get_command_status(VMXNET3State *s)
{
uint64_t ret;
switch (s->last_command) {
case VMXNET3_CMD_ACTIVATE_DEV:
ret = (s->device_active) ? 0 : 1;
VMW_CFPRN("Device active: %" PRIx64, ret);
break;
case VMXNET3_CMD_RESET_DEV:
case VMXNET3_CMD_QUIESCE_DEV:
case VMXNET3_CMD_GET_QUEUE_STATUS:
case VMXNET3_CMD_GET_DEV_EXTRA_INFO:
ret = 0;
break;
case VMXNET3_CMD_GET_LINK:
ret = s->link_status_and_speed;
VMW_CFPRN("Link and speed: %" PRIx64, ret);
break;
case VMXNET3_CMD_GET_PERM_MAC_LO:
ret = vmxnet3_get_mac_low(&s->perm_mac);
break;
case VMXNET3_CMD_GET_PERM_MAC_HI:
ret = vmxnet3_get_mac_high(&s->perm_mac);
break;
case VMXNET3_CMD_GET_CONF_INTR:
ret = vmxnet3_get_interrupt_config(s);
break;
case VMXNET3_CMD_GET_ADAPTIVE_RING_INFO:
ret = VMXNET3_DISABLE_ADAPTIVE_RING;
break;
case VMXNET3_CMD_GET_DID_LO:
ret = PCI_DEVICE_ID_VMWARE_VMXNET3;
break;
case VMXNET3_CMD_GET_DID_HI:
ret = VMXNET3_DEVICE_REVISION;
break;
default:
VMW_WRPRN("Received request for unknown command: %x", s->last_command);
ret = 0;
break;
}
return ret;
}
|
+Info
| 0
|
static uint64_t vmxnet3_get_command_status(VMXNET3State *s)
{
uint64_t ret;
switch (s->last_command) {
case VMXNET3_CMD_ACTIVATE_DEV:
ret = (s->device_active) ? 0 : 1;
VMW_CFPRN("Device active: %" PRIx64, ret);
break;
case VMXNET3_CMD_RESET_DEV:
case VMXNET3_CMD_QUIESCE_DEV:
case VMXNET3_CMD_GET_QUEUE_STATUS:
case VMXNET3_CMD_GET_DEV_EXTRA_INFO:
ret = 0;
break;
case VMXNET3_CMD_GET_LINK:
ret = s->link_status_and_speed;
VMW_CFPRN("Link and speed: %" PRIx64, ret);
break;
case VMXNET3_CMD_GET_PERM_MAC_LO:
ret = vmxnet3_get_mac_low(&s->perm_mac);
break;
case VMXNET3_CMD_GET_PERM_MAC_HI:
ret = vmxnet3_get_mac_high(&s->perm_mac);
break;
case VMXNET3_CMD_GET_CONF_INTR:
ret = vmxnet3_get_interrupt_config(s);
break;
case VMXNET3_CMD_GET_ADAPTIVE_RING_INFO:
ret = VMXNET3_DISABLE_ADAPTIVE_RING;
break;
case VMXNET3_CMD_GET_DID_LO:
ret = PCI_DEVICE_ID_VMWARE_VMXNET3;
break;
case VMXNET3_CMD_GET_DID_HI:
ret = VMXNET3_DEVICE_REVISION;
break;
default:
VMW_WRPRN("Received request for unknown command: %x", s->last_command);
ret = 0;
break;
}
return ret;
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,178
|
static int vmxnet3_get_int_state(QEMUFile *f, void *pv, size_t size)
{
Vmxnet3IntState *r = pv;
r->is_masked = qemu_get_byte(f);
r->is_pending = qemu_get_byte(f);
r->is_asserted = qemu_get_byte(f);
return 0;
}
|
+Info
| 0
|
static int vmxnet3_get_int_state(QEMUFile *f, void *pv, size_t size)
{
Vmxnet3IntState *r = pv;
r->is_masked = qemu_get_byte(f);
r->is_pending = qemu_get_byte(f);
r->is_asserted = qemu_get_byte(f);
return 0;
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,179
|
static uint32_t vmxnet3_get_interrupt_config(VMXNET3State *s)
{
uint32_t interrupt_mode = VMXNET3_IT_AUTO | (VMXNET3_IMM_AUTO << 2);
VMW_CFPRN("Interrupt config is 0x%X", interrupt_mode);
return interrupt_mode;
}
|
+Info
| 0
|
static uint32_t vmxnet3_get_interrupt_config(VMXNET3State *s)
{
uint32_t interrupt_mode = VMXNET3_IT_AUTO | (VMXNET3_IMM_AUTO << 2);
VMW_CFPRN("Interrupt config is 0x%X", interrupt_mode);
return interrupt_mode;
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,180
|
static uint64_t vmxnet3_get_mac_high(MACAddr *addr)
{
return VMXNET3_MAKE_BYTE(0, addr->a[4]) |
VMXNET3_MAKE_BYTE(1, addr->a[5]);
}
|
+Info
| 0
|
static uint64_t vmxnet3_get_mac_high(MACAddr *addr)
{
return VMXNET3_MAKE_BYTE(0, addr->a[4]) |
VMXNET3_MAKE_BYTE(1, addr->a[5]);
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,181
|
static uint64_t vmxnet3_get_mac_low(MACAddr *addr)
{
return VMXNET3_MAKE_BYTE(0, addr->a[0]) |
VMXNET3_MAKE_BYTE(1, addr->a[1]) |
VMXNET3_MAKE_BYTE(2, addr->a[2]) |
VMXNET3_MAKE_BYTE(3, addr->a[3]);
}
|
+Info
| 0
|
static uint64_t vmxnet3_get_mac_low(MACAddr *addr)
{
return VMXNET3_MAKE_BYTE(0, addr->a[0]) |
VMXNET3_MAKE_BYTE(1, addr->a[1]) |
VMXNET3_MAKE_BYTE(2, addr->a[2]) |
VMXNET3_MAKE_BYTE(3, addr->a[3]);
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,182
|
vmxnet3_get_next_body_rx_descr(VMXNET3State *s,
struct Vmxnet3_RxDesc *d,
uint32_t *didx,
uint32_t *ridx)
{
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, d, didx);
/* Try to find corresponding descriptor in head/body ring */
if (d->gen == vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_HEAD_BODY_RING)) {
/* Only read after generation field verification */
smp_rmb();
/* Re-read to be sure we got the latest version */
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, d, didx);
if (d->btype == VMXNET3_RXD_BTYPE_BODY) {
vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_HEAD_BODY_RING);
*ridx = RX_HEAD_BODY_RING;
return true;
}
}
/*
* If there is no free descriptors on head/body ring or next free
* descriptor is a head descriptor switch to body only ring
*/
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_BODY_ONLY_RING, d, didx);
/* If no more free descriptors - return */
if (d->gen == vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_BODY_ONLY_RING)) {
/* Only read after generation field verification */
smp_rmb();
/* Re-read to be sure we got the latest version */
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_BODY_ONLY_RING, d, didx);
assert(d->btype == VMXNET3_RXD_BTYPE_BODY);
*ridx = RX_BODY_ONLY_RING;
vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_BODY_ONLY_RING);
return true;
}
return false;
}
|
+Info
| 0
|
vmxnet3_get_next_body_rx_descr(VMXNET3State *s,
struct Vmxnet3_RxDesc *d,
uint32_t *didx,
uint32_t *ridx)
{
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, d, didx);
/* Try to find corresponding descriptor in head/body ring */
if (d->gen == vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_HEAD_BODY_RING)) {
/* Only read after generation field verification */
smp_rmb();
/* Re-read to be sure we got the latest version */
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING, d, didx);
if (d->btype == VMXNET3_RXD_BTYPE_BODY) {
vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_HEAD_BODY_RING);
*ridx = RX_HEAD_BODY_RING;
return true;
}
}
/*
* If there is no free descriptors on head/body ring or next free
* descriptor is a head descriptor switch to body only ring
*/
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_BODY_ONLY_RING, d, didx);
/* If no more free descriptors - return */
if (d->gen == vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_BODY_ONLY_RING)) {
/* Only read after generation field verification */
smp_rmb();
/* Re-read to be sure we got the latest version */
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_BODY_ONLY_RING, d, didx);
assert(d->btype == VMXNET3_RXD_BTYPE_BODY);
*ridx = RX_BODY_ONLY_RING;
vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_BODY_ONLY_RING);
return true;
}
return false;
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,183
|
vmxnet3_get_next_head_rx_descr(VMXNET3State *s,
struct Vmxnet3_RxDesc *descr_buf,
uint32_t *descr_idx,
uint32_t *ridx)
{
for (;;) {
uint32_t ring_gen;
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING,
descr_buf, descr_idx);
/* If no more free descriptors - return */
ring_gen = vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_HEAD_BODY_RING);
if (descr_buf->gen != ring_gen) {
return false;
}
/* Only read after generation field verification */
smp_rmb();
/* Re-read to be sure we got the latest version */
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING,
descr_buf, descr_idx);
/* Mark current descriptor as used/skipped */
vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_HEAD_BODY_RING);
/* If this is what we are looking for - return */
if (descr_buf->btype == VMXNET3_RXD_BTYPE_HEAD) {
*ridx = RX_HEAD_BODY_RING;
return true;
}
}
}
|
+Info
| 0
|
vmxnet3_get_next_head_rx_descr(VMXNET3State *s,
struct Vmxnet3_RxDesc *descr_buf,
uint32_t *descr_idx,
uint32_t *ridx)
{
for (;;) {
uint32_t ring_gen;
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING,
descr_buf, descr_idx);
/* If no more free descriptors - return */
ring_gen = vmxnet3_get_rx_ring_gen(s, RXQ_IDX, RX_HEAD_BODY_RING);
if (descr_buf->gen != ring_gen) {
return false;
}
/* Only read after generation field verification */
smp_rmb();
/* Re-read to be sure we got the latest version */
vmxnet3_read_next_rx_descr(s, RXQ_IDX, RX_HEAD_BODY_RING,
descr_buf, descr_idx);
/* Mark current descriptor as used/skipped */
vmxnet3_inc_rx_consumption_counter(s, RXQ_IDX, RX_HEAD_BODY_RING);
/* If this is what we are looking for - return */
if (descr_buf->btype == VMXNET3_RXD_BTYPE_HEAD) {
*ridx = RX_HEAD_BODY_RING;
return true;
}
}
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,184
|
vmxnet3_get_next_rx_descr(VMXNET3State *s, bool is_head,
struct Vmxnet3_RxDesc *descr_buf,
uint32_t *descr_idx,
uint32_t *ridx)
{
if (is_head || !s->rx_packets_compound) {
return vmxnet3_get_next_head_rx_descr(s, descr_buf, descr_idx, ridx);
} else {
return vmxnet3_get_next_body_rx_descr(s, descr_buf, descr_idx, ridx);
}
}
|
+Info
| 0
|
vmxnet3_get_next_rx_descr(VMXNET3State *s, bool is_head,
struct Vmxnet3_RxDesc *descr_buf,
uint32_t *descr_idx,
uint32_t *ridx)
{
if (is_head || !s->rx_packets_compound) {
return vmxnet3_get_next_head_rx_descr(s, descr_buf, descr_idx, ridx);
} else {
return vmxnet3_get_next_body_rx_descr(s, descr_buf, descr_idx, ridx);
}
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,185
|
static void vmxnet3_get_rx_stats_from_file(QEMUFile *f,
struct UPT1_RxStats *rx_stat)
{
rx_stat->LROPktsRxOK = qemu_get_be64(f);
rx_stat->LROBytesRxOK = qemu_get_be64(f);
rx_stat->ucastPktsRxOK = qemu_get_be64(f);
rx_stat->ucastBytesRxOK = qemu_get_be64(f);
rx_stat->mcastPktsRxOK = qemu_get_be64(f);
rx_stat->mcastBytesRxOK = qemu_get_be64(f);
rx_stat->bcastPktsRxOK = qemu_get_be64(f);
rx_stat->bcastBytesRxOK = qemu_get_be64(f);
rx_stat->pktsRxOutOfBuf = qemu_get_be64(f);
rx_stat->pktsRxError = qemu_get_be64(f);
}
|
+Info
| 0
|
static void vmxnet3_get_rx_stats_from_file(QEMUFile *f,
struct UPT1_RxStats *rx_stat)
{
rx_stat->LROPktsRxOK = qemu_get_be64(f);
rx_stat->LROBytesRxOK = qemu_get_be64(f);
rx_stat->ucastPktsRxOK = qemu_get_be64(f);
rx_stat->ucastBytesRxOK = qemu_get_be64(f);
rx_stat->mcastPktsRxOK = qemu_get_be64(f);
rx_stat->mcastBytesRxOK = qemu_get_be64(f);
rx_stat->bcastPktsRxOK = qemu_get_be64(f);
rx_stat->bcastBytesRxOK = qemu_get_be64(f);
rx_stat->pktsRxOutOfBuf = qemu_get_be64(f);
rx_stat->pktsRxError = qemu_get_be64(f);
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,186
|
static int vmxnet3_get_rxq_descr(QEMUFile *f, void *pv, size_t size)
{
Vmxnet3RxqDescr *r = pv;
int i;
for (i = 0; i < VMXNET3_RX_RINGS_PER_QUEUE; i++) {
vmxnet3_get_ring_from_file(f, &r->rx_ring[i]);
}
vmxnet3_get_ring_from_file(f, &r->comp_ring);
r->intr_idx = qemu_get_byte(f);
r->rx_stats_pa = qemu_get_be64(f);
vmxnet3_get_rx_stats_from_file(f, &r->rxq_stats);
return 0;
}
|
+Info
| 0
|
static int vmxnet3_get_rxq_descr(QEMUFile *f, void *pv, size_t size)
{
Vmxnet3RxqDescr *r = pv;
int i;
for (i = 0; i < VMXNET3_RX_RINGS_PER_QUEUE; i++) {
vmxnet3_get_ring_from_file(f, &r->rx_ring[i]);
}
vmxnet3_get_ring_from_file(f, &r->comp_ring);
r->intr_idx = qemu_get_byte(f);
r->rx_stats_pa = qemu_get_be64(f);
vmxnet3_get_rx_stats_from_file(f, &r->rxq_stats);
return 0;
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,187
|
static void vmxnet3_get_tx_stats_from_file(QEMUFile *f,
struct UPT1_TxStats *tx_stat)
{
tx_stat->TSOPktsTxOK = qemu_get_be64(f);
tx_stat->TSOBytesTxOK = qemu_get_be64(f);
tx_stat->ucastPktsTxOK = qemu_get_be64(f);
tx_stat->ucastBytesTxOK = qemu_get_be64(f);
tx_stat->mcastPktsTxOK = qemu_get_be64(f);
tx_stat->mcastBytesTxOK = qemu_get_be64(f);
tx_stat->bcastPktsTxOK = qemu_get_be64(f);
tx_stat->bcastBytesTxOK = qemu_get_be64(f);
tx_stat->pktsTxError = qemu_get_be64(f);
tx_stat->pktsTxDiscard = qemu_get_be64(f);
}
|
+Info
| 0
|
static void vmxnet3_get_tx_stats_from_file(QEMUFile *f,
struct UPT1_TxStats *tx_stat)
{
tx_stat->TSOPktsTxOK = qemu_get_be64(f);
tx_stat->TSOBytesTxOK = qemu_get_be64(f);
tx_stat->ucastPktsTxOK = qemu_get_be64(f);
tx_stat->ucastBytesTxOK = qemu_get_be64(f);
tx_stat->mcastPktsTxOK = qemu_get_be64(f);
tx_stat->mcastBytesTxOK = qemu_get_be64(f);
tx_stat->bcastPktsTxOK = qemu_get_be64(f);
tx_stat->bcastBytesTxOK = qemu_get_be64(f);
tx_stat->pktsTxError = qemu_get_be64(f);
tx_stat->pktsTxDiscard = qemu_get_be64(f);
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,188
|
static int vmxnet3_get_txq_descr(QEMUFile *f, void *pv, size_t size)
{
Vmxnet3TxqDescr *r = pv;
vmxnet3_get_ring_from_file(f, &r->tx_ring);
vmxnet3_get_ring_from_file(f, &r->comp_ring);
r->intr_idx = qemu_get_byte(f);
r->tx_stats_pa = qemu_get_be64(f);
vmxnet3_get_tx_stats_from_file(f, &r->txq_stats);
return 0;
}
|
+Info
| 0
|
static int vmxnet3_get_txq_descr(QEMUFile *f, void *pv, size_t size)
{
Vmxnet3TxqDescr *r = pv;
vmxnet3_get_ring_from_file(f, &r->tx_ring);
vmxnet3_get_ring_from_file(f, &r->comp_ring);
r->intr_idx = qemu_get_byte(f);
r->tx_stats_pa = qemu_get_be64(f);
vmxnet3_get_tx_stats_from_file(f, &r->txq_stats);
return 0;
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,189
|
static void vmxnet3_handle_command(VMXNET3State *s, uint64_t cmd)
{
s->last_command = cmd;
switch (cmd) {
case VMXNET3_CMD_GET_PERM_MAC_HI:
VMW_CBPRN("Set: Get upper part of permanent MAC");
break;
case VMXNET3_CMD_GET_PERM_MAC_LO:
VMW_CBPRN("Set: Get lower part of permanent MAC");
break;
case VMXNET3_CMD_GET_STATS:
VMW_CBPRN("Set: Get device statistics");
vmxnet3_fill_stats(s);
break;
case VMXNET3_CMD_ACTIVATE_DEV:
VMW_CBPRN("Set: Activating vmxnet3 device");
vmxnet3_activate_device(s);
break;
case VMXNET3_CMD_UPDATE_RX_MODE:
VMW_CBPRN("Set: Update rx mode");
vmxnet3_update_rx_mode(s);
break;
case VMXNET3_CMD_UPDATE_VLAN_FILTERS:
VMW_CBPRN("Set: Update VLAN filters");
vmxnet3_update_vlan_filters(s);
break;
case VMXNET3_CMD_UPDATE_MAC_FILTERS:
VMW_CBPRN("Set: Update MAC filters");
vmxnet3_update_mcast_filters(s);
break;
case VMXNET3_CMD_UPDATE_FEATURE:
VMW_CBPRN("Set: Update features");
vmxnet3_update_features(s);
break;
case VMXNET3_CMD_UPDATE_PMCFG:
VMW_CBPRN("Set: Update power management config");
vmxnet3_update_pm_state(s);
break;
case VMXNET3_CMD_GET_LINK:
VMW_CBPRN("Set: Get link");
break;
case VMXNET3_CMD_RESET_DEV:
VMW_CBPRN("Set: Reset device");
vmxnet3_reset(s);
break;
case VMXNET3_CMD_QUIESCE_DEV:
VMW_CBPRN("Set: VMXNET3_CMD_QUIESCE_DEV - deactivate the device");
vmxnet3_deactivate_device(s);
break;
case VMXNET3_CMD_GET_CONF_INTR:
VMW_CBPRN("Set: VMXNET3_CMD_GET_CONF_INTR - interrupt configuration");
break;
case VMXNET3_CMD_GET_ADAPTIVE_RING_INFO:
VMW_CBPRN("Set: VMXNET3_CMD_GET_ADAPTIVE_RING_INFO - "
"adaptive ring info flags");
break;
case VMXNET3_CMD_GET_DID_LO:
VMW_CBPRN("Set: Get lower part of device ID");
break;
case VMXNET3_CMD_GET_DID_HI:
VMW_CBPRN("Set: Get upper part of device ID");
break;
case VMXNET3_CMD_GET_DEV_EXTRA_INFO:
VMW_CBPRN("Set: Get device extra info");
break;
default:
VMW_CBPRN("Received unknown command: %" PRIx64, cmd);
break;
}
}
|
+Info
| 0
|
static void vmxnet3_handle_command(VMXNET3State *s, uint64_t cmd)
{
s->last_command = cmd;
switch (cmd) {
case VMXNET3_CMD_GET_PERM_MAC_HI:
VMW_CBPRN("Set: Get upper part of permanent MAC");
break;
case VMXNET3_CMD_GET_PERM_MAC_LO:
VMW_CBPRN("Set: Get lower part of permanent MAC");
break;
case VMXNET3_CMD_GET_STATS:
VMW_CBPRN("Set: Get device statistics");
vmxnet3_fill_stats(s);
break;
case VMXNET3_CMD_ACTIVATE_DEV:
VMW_CBPRN("Set: Activating vmxnet3 device");
vmxnet3_activate_device(s);
break;
case VMXNET3_CMD_UPDATE_RX_MODE:
VMW_CBPRN("Set: Update rx mode");
vmxnet3_update_rx_mode(s);
break;
case VMXNET3_CMD_UPDATE_VLAN_FILTERS:
VMW_CBPRN("Set: Update VLAN filters");
vmxnet3_update_vlan_filters(s);
break;
case VMXNET3_CMD_UPDATE_MAC_FILTERS:
VMW_CBPRN("Set: Update MAC filters");
vmxnet3_update_mcast_filters(s);
break;
case VMXNET3_CMD_UPDATE_FEATURE:
VMW_CBPRN("Set: Update features");
vmxnet3_update_features(s);
break;
case VMXNET3_CMD_UPDATE_PMCFG:
VMW_CBPRN("Set: Update power management config");
vmxnet3_update_pm_state(s);
break;
case VMXNET3_CMD_GET_LINK:
VMW_CBPRN("Set: Get link");
break;
case VMXNET3_CMD_RESET_DEV:
VMW_CBPRN("Set: Reset device");
vmxnet3_reset(s);
break;
case VMXNET3_CMD_QUIESCE_DEV:
VMW_CBPRN("Set: VMXNET3_CMD_QUIESCE_DEV - deactivate the device");
vmxnet3_deactivate_device(s);
break;
case VMXNET3_CMD_GET_CONF_INTR:
VMW_CBPRN("Set: VMXNET3_CMD_GET_CONF_INTR - interrupt configuration");
break;
case VMXNET3_CMD_GET_ADAPTIVE_RING_INFO:
VMW_CBPRN("Set: VMXNET3_CMD_GET_ADAPTIVE_RING_INFO - "
"adaptive ring info flags");
break;
case VMXNET3_CMD_GET_DID_LO:
VMW_CBPRN("Set: Get lower part of device ID");
break;
case VMXNET3_CMD_GET_DID_HI:
VMW_CBPRN("Set: Get upper part of device ID");
break;
case VMXNET3_CMD_GET_DEV_EXTRA_INFO:
VMW_CBPRN("Set: Get device extra info");
break;
default:
VMW_CBPRN("Received unknown command: %" PRIx64, cmd);
break;
}
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,190
|
vmxnet3_inc_rx_consumption_counter(VMXNET3State *s, int qidx, int ridx)
{
vmxnet3_ring_inc(&s->rxq_descr[qidx].rx_ring[ridx]);
}
|
+Info
| 0
|
vmxnet3_inc_rx_consumption_counter(VMXNET3State *s, int qidx, int ridx)
{
vmxnet3_ring_inc(&s->rxq_descr[qidx].rx_ring[ridx]);
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,191
|
vmxnet3_inc_tx_completion_counter(VMXNET3State *s, int qidx)
{
vmxnet3_ring_inc(&s->txq_descr[qidx].comp_ring);
}
|
+Info
| 0
|
vmxnet3_inc_tx_completion_counter(VMXNET3State *s, int qidx)
{
vmxnet3_ring_inc(&s->txq_descr[qidx].comp_ring);
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,192
|
vmxnet3_indicate_packet(VMXNET3State *s)
{
struct Vmxnet3_RxDesc rxd;
PCIDevice *d = PCI_DEVICE(s);
bool is_head = true;
uint32_t rxd_idx;
uint32_t rx_ridx = 0;
struct Vmxnet3_RxCompDesc rxcd;
uint32_t new_rxcd_gen = VMXNET3_INIT_GEN;
hwaddr new_rxcd_pa = 0;
hwaddr ready_rxcd_pa = 0;
struct iovec *data = net_rx_pkt_get_iovec(s->rx_pkt);
size_t bytes_copied = 0;
size_t bytes_left = net_rx_pkt_get_total_len(s->rx_pkt);
uint16_t num_frags = 0;
size_t chunk_size;
net_rx_pkt_dump(s->rx_pkt);
while (bytes_left > 0) {
/* cannot add more frags to packet */
if (num_frags == s->max_rx_frags) {
break;
}
new_rxcd_pa = vmxnet3_pop_rxc_descr(s, RXQ_IDX, &new_rxcd_gen);
if (!new_rxcd_pa) {
break;
}
if (!vmxnet3_get_next_rx_descr(s, is_head, &rxd, &rxd_idx, &rx_ridx)) {
break;
}
chunk_size = MIN(bytes_left, rxd.len);
vmxnet3_pci_dma_writev(d, data, bytes_copied,
le64_to_cpu(rxd.addr), chunk_size);
bytes_copied += chunk_size;
bytes_left -= chunk_size;
vmxnet3_dump_rx_descr(&rxd);
if (ready_rxcd_pa != 0) {
pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd));
}
memset(&rxcd, 0, sizeof(struct Vmxnet3_RxCompDesc));
rxcd.rxdIdx = rxd_idx;
rxcd.len = chunk_size;
rxcd.sop = is_head;
rxcd.gen = new_rxcd_gen;
rxcd.rqID = RXQ_IDX + rx_ridx * s->rxq_num;
if (bytes_left == 0) {
vmxnet3_rx_update_descr(s->rx_pkt, &rxcd);
}
VMW_RIPRN("RX Completion descriptor: rxRing: %lu rxIdx %lu len %lu "
"sop %d csum_correct %lu",
(unsigned long) rx_ridx,
(unsigned long) rxcd.rxdIdx,
(unsigned long) rxcd.len,
(int) rxcd.sop,
(unsigned long) rxcd.tuc);
is_head = false;
ready_rxcd_pa = new_rxcd_pa;
new_rxcd_pa = 0;
num_frags++;
}
if (ready_rxcd_pa != 0) {
rxcd.eop = 1;
rxcd.err = (bytes_left != 0);
pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd));
/* Flush RX descriptor changes */
smp_wmb();
}
if (new_rxcd_pa != 0) {
vmxnet3_revert_rxc_descr(s, RXQ_IDX);
}
vmxnet3_trigger_interrupt(s, s->rxq_descr[RXQ_IDX].intr_idx);
if (bytes_left == 0) {
vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, VMXNET3_PKT_STATUS_OK);
return true;
} else if (num_frags == s->max_rx_frags) {
vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, VMXNET3_PKT_STATUS_ERROR);
return false;
} else {
vmxnet3_on_rx_done_update_stats(s, RXQ_IDX,
VMXNET3_PKT_STATUS_OUT_OF_BUF);
return false;
}
}
|
+Info
| 0
|
vmxnet3_indicate_packet(VMXNET3State *s)
{
struct Vmxnet3_RxDesc rxd;
PCIDevice *d = PCI_DEVICE(s);
bool is_head = true;
uint32_t rxd_idx;
uint32_t rx_ridx = 0;
struct Vmxnet3_RxCompDesc rxcd;
uint32_t new_rxcd_gen = VMXNET3_INIT_GEN;
hwaddr new_rxcd_pa = 0;
hwaddr ready_rxcd_pa = 0;
struct iovec *data = net_rx_pkt_get_iovec(s->rx_pkt);
size_t bytes_copied = 0;
size_t bytes_left = net_rx_pkt_get_total_len(s->rx_pkt);
uint16_t num_frags = 0;
size_t chunk_size;
net_rx_pkt_dump(s->rx_pkt);
while (bytes_left > 0) {
/* cannot add more frags to packet */
if (num_frags == s->max_rx_frags) {
break;
}
new_rxcd_pa = vmxnet3_pop_rxc_descr(s, RXQ_IDX, &new_rxcd_gen);
if (!new_rxcd_pa) {
break;
}
if (!vmxnet3_get_next_rx_descr(s, is_head, &rxd, &rxd_idx, &rx_ridx)) {
break;
}
chunk_size = MIN(bytes_left, rxd.len);
vmxnet3_pci_dma_writev(d, data, bytes_copied,
le64_to_cpu(rxd.addr), chunk_size);
bytes_copied += chunk_size;
bytes_left -= chunk_size;
vmxnet3_dump_rx_descr(&rxd);
if (ready_rxcd_pa != 0) {
pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd));
}
memset(&rxcd, 0, sizeof(struct Vmxnet3_RxCompDesc));
rxcd.rxdIdx = rxd_idx;
rxcd.len = chunk_size;
rxcd.sop = is_head;
rxcd.gen = new_rxcd_gen;
rxcd.rqID = RXQ_IDX + rx_ridx * s->rxq_num;
if (bytes_left == 0) {
vmxnet3_rx_update_descr(s->rx_pkt, &rxcd);
}
VMW_RIPRN("RX Completion descriptor: rxRing: %lu rxIdx %lu len %lu "
"sop %d csum_correct %lu",
(unsigned long) rx_ridx,
(unsigned long) rxcd.rxdIdx,
(unsigned long) rxcd.len,
(int) rxcd.sop,
(unsigned long) rxcd.tuc);
is_head = false;
ready_rxcd_pa = new_rxcd_pa;
new_rxcd_pa = 0;
num_frags++;
}
if (ready_rxcd_pa != 0) {
rxcd.eop = 1;
rxcd.err = (bytes_left != 0);
pci_dma_write(d, ready_rxcd_pa, &rxcd, sizeof(rxcd));
/* Flush RX descriptor changes */
smp_wmb();
}
if (new_rxcd_pa != 0) {
vmxnet3_revert_rxc_descr(s, RXQ_IDX);
}
vmxnet3_trigger_interrupt(s, s->rxq_descr[RXQ_IDX].intr_idx);
if (bytes_left == 0) {
vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, VMXNET3_PKT_STATUS_OK);
return true;
} else if (num_frags == s->max_rx_frags) {
vmxnet3_on_rx_done_update_stats(s, RXQ_IDX, VMXNET3_PKT_STATUS_ERROR);
return false;
} else {
vmxnet3_on_rx_done_update_stats(s, RXQ_IDX,
VMXNET3_PKT_STATUS_OUT_OF_BUF);
return false;
}
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,193
|
static void vmxnet3_instance_init(Object *obj)
{
VMXNET3State *s = VMXNET3(obj);
device_add_bootindex_property(obj, &s->conf.bootindex,
"bootindex", "/ethernet-phy@0",
DEVICE(obj), NULL);
}
|
+Info
| 0
|
static void vmxnet3_instance_init(Object *obj)
{
VMXNET3State *s = VMXNET3(obj);
device_add_bootindex_property(obj, &s->conf.bootindex,
"bootindex", "/ethernet-phy@0",
DEVICE(obj), NULL);
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,194
|
static bool vmxnet3_interrupt_asserted(VMXNET3State *s, int lidx)
{
return s->interrupt_states[lidx].is_asserted;
}
|
+Info
| 0
|
static bool vmxnet3_interrupt_asserted(VMXNET3State *s, int lidx)
{
return s->interrupt_states[lidx].is_asserted;
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,195
|
vmxnet3_io_bar0_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
VMXNET3State *s = opaque;
if (!s->device_active) {
return;
}
if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_TXPROD,
VMXNET3_DEVICE_MAX_TX_QUEUES, VMXNET3_REG_ALIGN)) {
int tx_queue_idx =
VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_TXPROD,
VMXNET3_REG_ALIGN);
assert(tx_queue_idx <= s->txq_num);
vmxnet3_process_tx_queue(s, tx_queue_idx);
return;
}
if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_IMR,
VMXNET3_MAX_INTRS, VMXNET3_REG_ALIGN)) {
int l = VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_IMR,
VMXNET3_REG_ALIGN);
VMW_CBPRN("Interrupt mask for line %d written: 0x%" PRIx64, l, val);
vmxnet3_on_interrupt_mask_changed(s, l, val);
return;
}
if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_RXPROD,
VMXNET3_DEVICE_MAX_RX_QUEUES, VMXNET3_REG_ALIGN) ||
VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_RXPROD2,
VMXNET3_DEVICE_MAX_RX_QUEUES, VMXNET3_REG_ALIGN)) {
return;
}
VMW_WRPRN("BAR0 unknown write [%" PRIx64 "] = %" PRIx64 ", size %d",
(uint64_t) addr, val, size);
}
|
+Info
| 0
|
vmxnet3_io_bar0_write(void *opaque, hwaddr addr,
uint64_t val, unsigned size)
{
VMXNET3State *s = opaque;
if (!s->device_active) {
return;
}
if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_TXPROD,
VMXNET3_DEVICE_MAX_TX_QUEUES, VMXNET3_REG_ALIGN)) {
int tx_queue_idx =
VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_TXPROD,
VMXNET3_REG_ALIGN);
assert(tx_queue_idx <= s->txq_num);
vmxnet3_process_tx_queue(s, tx_queue_idx);
return;
}
if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_IMR,
VMXNET3_MAX_INTRS, VMXNET3_REG_ALIGN)) {
int l = VMW_MULTIREG_IDX_BY_ADDR(addr, VMXNET3_REG_IMR,
VMXNET3_REG_ALIGN);
VMW_CBPRN("Interrupt mask for line %d written: 0x%" PRIx64, l, val);
vmxnet3_on_interrupt_mask_changed(s, l, val);
return;
}
if (VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_RXPROD,
VMXNET3_DEVICE_MAX_RX_QUEUES, VMXNET3_REG_ALIGN) ||
VMW_IS_MULTIREG_ADDR(addr, VMXNET3_REG_RXPROD2,
VMXNET3_DEVICE_MAX_RX_QUEUES, VMXNET3_REG_ALIGN)) {
return;
}
VMW_WRPRN("BAR0 unknown write [%" PRIx64 "] = %" PRIx64 ", size %d",
(uint64_t) addr, val, size);
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,196
|
vmxnet3_io_bar1_read(void *opaque, hwaddr addr, unsigned size)
{
VMXNET3State *s = opaque;
uint64_t ret = 0;
switch (addr) {
/* Vmxnet3 Revision Report Selection */
case VMXNET3_REG_VRRS:
VMW_CBPRN("Read BAR1 [VMXNET3_REG_VRRS], size %d", size);
ret = VMXNET3_DEVICE_REVISION;
break;
/* UPT Version Report Selection */
case VMXNET3_REG_UVRS:
VMW_CBPRN("Read BAR1 [VMXNET3_REG_UVRS], size %d", size);
ret = VMXNET3_UPT_REVISION;
break;
/* Command */
case VMXNET3_REG_CMD:
VMW_CBPRN("Read BAR1 [VMXNET3_REG_CMD], size %d", size);
ret = vmxnet3_get_command_status(s);
break;
/* MAC Address Low */
case VMXNET3_REG_MACL:
VMW_CBPRN("Read BAR1 [VMXNET3_REG_MACL], size %d", size);
ret = vmxnet3_get_mac_low(&s->conf.macaddr);
break;
/* MAC Address High */
case VMXNET3_REG_MACH:
VMW_CBPRN("Read BAR1 [VMXNET3_REG_MACH], size %d", size);
ret = vmxnet3_get_mac_high(&s->conf.macaddr);
break;
/*
* Interrupt Cause Register
* Used for legacy interrupts only so interrupt index always 0
*/
case VMXNET3_REG_ICR:
VMW_CBPRN("Read BAR1 [VMXNET3_REG_ICR], size %d", size);
if (vmxnet3_interrupt_asserted(s, 0)) {
vmxnet3_clear_interrupt(s, 0);
ret = true;
} else {
ret = false;
}
break;
default:
VMW_CBPRN("Unknow read BAR1[%" PRIx64 "], %d bytes", addr, size);
break;
}
return ret;
}
|
+Info
| 0
|
vmxnet3_io_bar1_read(void *opaque, hwaddr addr, unsigned size)
{
VMXNET3State *s = opaque;
uint64_t ret = 0;
switch (addr) {
/* Vmxnet3 Revision Report Selection */
case VMXNET3_REG_VRRS:
VMW_CBPRN("Read BAR1 [VMXNET3_REG_VRRS], size %d", size);
ret = VMXNET3_DEVICE_REVISION;
break;
/* UPT Version Report Selection */
case VMXNET3_REG_UVRS:
VMW_CBPRN("Read BAR1 [VMXNET3_REG_UVRS], size %d", size);
ret = VMXNET3_UPT_REVISION;
break;
/* Command */
case VMXNET3_REG_CMD:
VMW_CBPRN("Read BAR1 [VMXNET3_REG_CMD], size %d", size);
ret = vmxnet3_get_command_status(s);
break;
/* MAC Address Low */
case VMXNET3_REG_MACL:
VMW_CBPRN("Read BAR1 [VMXNET3_REG_MACL], size %d", size);
ret = vmxnet3_get_mac_low(&s->conf.macaddr);
break;
/* MAC Address High */
case VMXNET3_REG_MACH:
VMW_CBPRN("Read BAR1 [VMXNET3_REG_MACH], size %d", size);
ret = vmxnet3_get_mac_high(&s->conf.macaddr);
break;
/*
* Interrupt Cause Register
* Used for legacy interrupts only so interrupt index always 0
*/
case VMXNET3_REG_ICR:
VMW_CBPRN("Read BAR1 [VMXNET3_REG_ICR], size %d", size);
if (vmxnet3_interrupt_asserted(s, 0)) {
vmxnet3_clear_interrupt(s, 0);
ret = true;
} else {
ret = false;
}
break;
default:
VMW_CBPRN("Unknow read BAR1[%" PRIx64 "], %d bytes", addr, size);
break;
}
return ret;
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,197
|
vmxnet3_io_bar1_write(void *opaque,
hwaddr addr,
uint64_t val,
unsigned size)
{
VMXNET3State *s = opaque;
switch (addr) {
/* Vmxnet3 Revision Report Selection */
case VMXNET3_REG_VRRS:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_VRRS] = %" PRIx64 ", size %d",
val, size);
break;
/* UPT Version Report Selection */
case VMXNET3_REG_UVRS:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_UVRS] = %" PRIx64 ", size %d",
val, size);
break;
/* Driver Shared Address Low */
case VMXNET3_REG_DSAL:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_DSAL] = %" PRIx64 ", size %d",
val, size);
/*
* Guest driver will first write the low part of the shared
* memory address. We save it to temp variable and set the
* shared address only after we get the high part
*/
if (val == 0) {
vmxnet3_deactivate_device(s);
}
s->temp_shared_guest_driver_memory = val;
s->drv_shmem = 0;
break;
/* Driver Shared Address High */
case VMXNET3_REG_DSAH:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_DSAH] = %" PRIx64 ", size %d",
val, size);
/*
* Set the shared memory between guest driver and device.
* We already should have low address part.
*/
s->drv_shmem = s->temp_shared_guest_driver_memory | (val << 32);
break;
/* Command */
case VMXNET3_REG_CMD:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_CMD] = %" PRIx64 ", size %d",
val, size);
vmxnet3_handle_command(s, val);
break;
/* MAC Address Low */
case VMXNET3_REG_MACL:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_MACL] = %" PRIx64 ", size %d",
val, size);
s->temp_mac = val;
break;
/* MAC Address High */
case VMXNET3_REG_MACH:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_MACH] = %" PRIx64 ", size %d",
val, size);
vmxnet3_set_variable_mac(s, val, s->temp_mac);
break;
/* Interrupt Cause Register */
case VMXNET3_REG_ICR:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_ICR] = %" PRIx64 ", size %d",
val, size);
g_assert_not_reached();
break;
/* Event Cause Register */
case VMXNET3_REG_ECR:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_ECR] = %" PRIx64 ", size %d",
val, size);
vmxnet3_ack_events(s, val);
break;
default:
VMW_CBPRN("Unknown Write to BAR1 [%" PRIx64 "] = %" PRIx64 ", size %d",
addr, val, size);
break;
}
}
|
+Info
| 0
|
vmxnet3_io_bar1_write(void *opaque,
hwaddr addr,
uint64_t val,
unsigned size)
{
VMXNET3State *s = opaque;
switch (addr) {
/* Vmxnet3 Revision Report Selection */
case VMXNET3_REG_VRRS:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_VRRS] = %" PRIx64 ", size %d",
val, size);
break;
/* UPT Version Report Selection */
case VMXNET3_REG_UVRS:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_UVRS] = %" PRIx64 ", size %d",
val, size);
break;
/* Driver Shared Address Low */
case VMXNET3_REG_DSAL:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_DSAL] = %" PRIx64 ", size %d",
val, size);
/*
* Guest driver will first write the low part of the shared
* memory address. We save it to temp variable and set the
* shared address only after we get the high part
*/
if (val == 0) {
vmxnet3_deactivate_device(s);
}
s->temp_shared_guest_driver_memory = val;
s->drv_shmem = 0;
break;
/* Driver Shared Address High */
case VMXNET3_REG_DSAH:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_DSAH] = %" PRIx64 ", size %d",
val, size);
/*
* Set the shared memory between guest driver and device.
* We already should have low address part.
*/
s->drv_shmem = s->temp_shared_guest_driver_memory | (val << 32);
break;
/* Command */
case VMXNET3_REG_CMD:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_CMD] = %" PRIx64 ", size %d",
val, size);
vmxnet3_handle_command(s, val);
break;
/* MAC Address Low */
case VMXNET3_REG_MACL:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_MACL] = %" PRIx64 ", size %d",
val, size);
s->temp_mac = val;
break;
/* MAC Address High */
case VMXNET3_REG_MACH:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_MACH] = %" PRIx64 ", size %d",
val, size);
vmxnet3_set_variable_mac(s, val, s->temp_mac);
break;
/* Interrupt Cause Register */
case VMXNET3_REG_ICR:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_ICR] = %" PRIx64 ", size %d",
val, size);
g_assert_not_reached();
break;
/* Event Cause Register */
case VMXNET3_REG_ECR:
VMW_CBPRN("Write BAR1 [VMXNET3_REG_ECR] = %" PRIx64 ", size %d",
val, size);
vmxnet3_ack_events(s, val);
break;
default:
VMW_CBPRN("Unknown Write to BAR1 [%" PRIx64 "] = %" PRIx64 ", size %d",
addr, val, size);
break;
}
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,198
|
vmxnet3_is_allowed_mcast_group(VMXNET3State *s, const uint8_t *group_mac)
{
int i;
for (i = 0; i < s->mcast_list_len; i++) {
if (!memcmp(group_mac, s->mcast_list[i].a, sizeof(s->mcast_list[i]))) {
return true;
}
}
return false;
}
|
+Info
| 0
|
vmxnet3_is_allowed_mcast_group(VMXNET3State *s, const uint8_t *group_mac)
{
int i;
for (i = 0; i < s->mcast_list_len; i++) {
if (!memcmp(group_mac, s->mcast_list[i].a, sizeof(s->mcast_list[i]))) {
return true;
}
}
return false;
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
7,199
|
vmxnet3_is_registered_vlan(VMXNET3State *s, const void *data)
{
uint16_t vlan_tag = eth_get_pkt_tci(data) & VLAN_VID_MASK;
if (IS_SPECIAL_VLAN_ID(vlan_tag)) {
return true;
}
return VMXNET3_VFTABLE_ENTRY_IS_SET(s->vlan_table, vlan_tag);
}
|
+Info
| 0
|
vmxnet3_is_registered_vlan(VMXNET3State *s, const void *data)
{
uint16_t vlan_tag = eth_get_pkt_tci(data) & VLAN_VID_MASK;
if (IS_SPECIAL_VLAN_ID(vlan_tag)) {
return true;
}
return VMXNET3_VFTABLE_ENTRY_IS_SET(s->vlan_table, vlan_tag);
}
|
@@ -531,6 +531,7 @@ static void vmxnet3_complete_packet(VMXNET3State *s, int qidx, uint32_t tx_ridx)
VMXNET3_RING_DUMP(VMW_RIPRN, "TXC", qidx, &s->txq_descr[qidx].comp_ring);
+ memset(&txcq_descr, 0, sizeof(txcq_descr));
txcq_descr.txdIdx = tx_ridx;
txcq_descr.gen = vmxnet3_ring_curr_gen(&s->txq_descr[qidx].comp_ring);
|
CWE-200
| null | null |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.