id stringlengths 22 26 | content stringlengths 72 142k |
|---|---|
devign_test_set_data_9282 | static int request_frame(AVFilterLink *link)
{
AVFilterContext *ctx = link->src;
IDETContext *idet = ctx->priv;
do {
int ret;
if (idet->eof)
return AVERROR_EOF;
ret = ff_request_frame(link->src->inputs[0]);
if (ret == AVERROR_EOF && idet->cur) {
AVFrame *next = av_frame_clone(idet->next);
if (!next)
return AVERROR(ENOMEM);
filter_frame(link->src->inputs[0], next);
idet->eof = 1;
} else if (ret < 0) {
return ret;
}
} while (!idet->cur);
return 0;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9283 | static int dpcm_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
uint8_t *buf, int buf_size)
{
DPCMContext *s = avctx->priv_data;
int in, out = 0;
int predictor[2];
int channel_number = 0;
short *output_samples = data;
int shift[2];
unsigned char byte;
short diff;
if (!buf_size)
return 0;
switch(avctx->codec->id) {
case CODEC_ID_ROQ_DPCM:
if (s->channels == 1)
predictor[0] = AV_RL16(&buf[6]);
else {
predictor[0] = buf[7] << 8;
predictor[1] = buf[6] << 8;
}
SE_16BIT(predictor[0]);
SE_16BIT(predictor[1]);
/* decode the samples */
for (in = 8, out = 0; in < buf_size; in++, out++) {
predictor[channel_number] += s->roq_square_array[buf[in]];
predictor[channel_number] = av_clip_int16(predictor[channel_number]);
output_samples[out] = predictor[channel_number];
/* toggle channel */
channel_number ^= s->channels - 1;
}
break;
case CODEC_ID_INTERPLAY_DPCM:
in = 6; /* skip over the stream mask and stream length */
predictor[0] = AV_RL16(&buf[in]);
in += 2;
SE_16BIT(predictor[0])
output_samples[out++] = predictor[0];
if (s->channels == 2) {
predictor[1] = AV_RL16(&buf[in]);
in += 2;
SE_16BIT(predictor[1])
output_samples[out++] = predictor[1];
}
while (in < buf_size) {
predictor[channel_number] += interplay_delta_table[buf[in++]];
predictor[channel_number] = av_clip_int16(predictor[channel_number]);
output_samples[out++] = predictor[channel_number];
/* toggle channel */
channel_number ^= s->channels - 1;
}
break;
case CODEC_ID_XAN_DPCM:
in = 0;
shift[0] = shift[1] = 4;
predictor[0] = AV_RL16(&buf[in]);
in += 2;
SE_16BIT(predictor[0]);
if (s->channels == 2) {
predictor[1] = AV_RL16(&buf[in]);
in += 2;
SE_16BIT(predictor[1]);
}
while (in < buf_size) {
byte = buf[in++];
diff = (byte & 0xFC) << 8;
if ((byte & 0x03) == 3)
shift[channel_number]++;
else
shift[channel_number] -= (2 * (byte & 3));
/* saturate the shifter to a lower limit of 0 */
if (shift[channel_number] < 0)
shift[channel_number] = 0;
diff >>= shift[channel_number];
predictor[channel_number] += diff;
predictor[channel_number] = av_clip_int16(predictor[channel_number]);
output_samples[out++] = predictor[channel_number];
/* toggle channel */
channel_number ^= s->channels - 1;
}
break;
case CODEC_ID_SOL_DPCM:
in = 0;
if (avctx->codec_tag != 3) {
if(*data_size/4 < buf_size)
while (in < buf_size) {
int n1, n2;
n1 = (buf[in] >> 4) & 0xF;
n2 = buf[in++] & 0xF;
s->sample[0] += s->sol_table[n1];
if (s->sample[0] < 0) s->sample[0] = 0;
if (s->sample[0] > 255) s->sample[0] = 255;
output_samples[out++] = (s->sample[0] - 128) << 8;
s->sample[s->channels - 1] += s->sol_table[n2];
if (s->sample[s->channels - 1] < 0) s->sample[s->channels - 1] = 0;
if (s->sample[s->channels - 1] > 255) s->sample[s->channels - 1] = 255;
output_samples[out++] = (s->sample[s->channels - 1] - 128) << 8;
}
} else {
while (in < buf_size) {
int n;
n = buf[in++];
if (n & 0x80) s->sample[channel_number] -= s->sol_table[n & 0x7F];
else s->sample[channel_number] += s->sol_table[n & 0x7F];
s->sample[channel_number] = av_clip_int16(s->sample[channel_number]);
output_samples[out++] = s->sample[channel_number];
/* toggle channel */
channel_number ^= s->channels - 1;
}
}
break;
}
*data_size = out * sizeof(short);
return buf_size;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9301 | int ff_rm_read_mdpr_codecdata(AVFormatContext *s, AVIOContext *pb,
AVStream *st, RMStream *rst,
unsigned int codec_data_size, const uint8_t *mime)
{
unsigned int v;
int size;
int64_t codec_pos;
int ret;
if (codec_data_size > INT_MAX)
return AVERROR_INVALIDDATA;
avpriv_set_pts_info(st, 64, 1, 1000);
codec_pos = avio_tell(pb);
v = avio_rb32(pb);
if (v == MKBETAG('M', 'L', 'T', 'I')) {
int number_of_streams = avio_rb16(pb);
int number_of_mdpr;
int i;
for (i = 0; i<number_of_streams; i++)
avio_rb16(pb);
number_of_mdpr = avio_rb16(pb);
if (number_of_mdpr != 1) {
avpriv_request_sample(s, "MLTI with multiple MDPR");
}
avio_rb32(pb);
v = avio_rb32(pb);
}
if (v == MKTAG(0xfd, 'a', 'r', '.')) {
/* ra type header */
if (rm_read_audio_stream_info(s, pb, st, rst, 0))
return -1;
} else if (v == MKBETAG('L', 'S', 'D', ':')) {
avio_seek(pb, -4, SEEK_CUR);
if ((ret = rm_read_extradata(pb, st->codec, codec_data_size)) < 0)
return ret;
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
st->codec->codec_tag = AV_RL32(st->codec->extradata);
st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags,
st->codec->codec_tag);
} else if(mime && !strcmp(mime, "logical-fileinfo")){
int stream_count, rule_count, property_count, i;
ff_free_stream(s, st);
if (avio_rb16(pb) != 0) {
av_log(s, AV_LOG_WARNING, "Unsupported version\n");
goto skip;
}
stream_count = avio_rb16(pb);
avio_skip(pb, 6*stream_count);
rule_count = avio_rb16(pb);
avio_skip(pb, 2*rule_count);
property_count = avio_rb16(pb);
for(i=0; i<property_count; i++){
uint8_t name[128], val[128];
avio_rb32(pb);
if (avio_rb16(pb) != 0) {
av_log(s, AV_LOG_WARNING, "Unsupported Name value property version\n");
goto skip; //FIXME skip just this one
}
get_str8(pb, name, sizeof(name));
switch(avio_rb32(pb)) {
case 2: get_strl(pb, val, sizeof(val), avio_rb16(pb));
av_dict_set(&s->metadata, name, val, 0);
break;
default: avio_skip(pb, avio_rb16(pb));
}
}
} else {
int fps;
if (avio_rl32(pb) != MKTAG('V', 'I', 'D', 'O')) {
fail1:
av_log(s, AV_LOG_WARNING, "Unsupported stream type %08x\n", v);
goto skip;
}
st->codec->codec_tag = avio_rl32(pb);
st->codec->codec_id = ff_codec_get_id(ff_rm_codec_tags,
st->codec->codec_tag);
av_dlog(s, "%X %X\n", st->codec->codec_tag, MKTAG('R', 'V', '2', '0'));
if (st->codec->codec_id == AV_CODEC_ID_NONE)
goto fail1;
st->codec->width = avio_rb16(pb);
st->codec->height = avio_rb16(pb);
avio_skip(pb, 2); // looks like bits per sample
avio_skip(pb, 4); // always zero?
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->need_parsing = AVSTREAM_PARSE_TIMESTAMPS;
fps = avio_rb32(pb);
if ((ret = rm_read_extradata(pb, st->codec, codec_data_size - (avio_tell(pb) - codec_pos))) < 0)
return ret;
if (fps > 0) {
av_reduce(&st->avg_frame_rate.den, &st->avg_frame_rate.num,
0x10000, fps, (1 << 30) - 1);
#if FF_API_R_FRAME_RATE
st->r_frame_rate = st->avg_frame_rate;
#endif
} else if (s->error_recognition & AV_EF_EXPLODE) {
av_log(s, AV_LOG_ERROR, "Invalid framerate\n");
return AVERROR_INVALIDDATA;
}
}
skip:
/* skip codec info */
size = avio_tell(pb) - codec_pos;
avio_skip(pb, codec_data_size - size);
return 0;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9330 | int sd_do_command(SDState *sd, SDRequest *req,
uint8_t *response) {
uint32_t last_status = sd->card_status;
sd_rsp_type_t rtype;
int rsplen;
if (!sd->bdrv || !bdrv_is_inserted(sd->bdrv) || !sd->enable) {
return 0;
}
if (sd_req_crc_validate(req)) {
sd->card_status &= ~COM_CRC_ERROR;
return 0;
}
sd->card_status &= ~CARD_STATUS_B;
sd_set_status(sd);
if (last_status & CARD_IS_LOCKED) {
if (!cmd_valid_while_locked(sd, req)) {
sd->card_status |= ILLEGAL_COMMAND;
fprintf(stderr, "SD: Card is locked\n");
return 0;
}
}
if (last_status & APP_CMD) {
rtype = sd_app_command(sd, *req);
sd->card_status &= ~APP_CMD;
} else
rtype = sd_normal_command(sd, *req);
sd->current_cmd = req->cmd;
switch (rtype) {
case sd_r1:
case sd_r1b:
sd_response_r1_make(sd, response, last_status);
rsplen = 4;
break;
case sd_r2_i:
memcpy(response, sd->cid, sizeof(sd->cid));
rsplen = 16;
break;
case sd_r2_s:
memcpy(response, sd->csd, sizeof(sd->csd));
rsplen = 16;
break;
case sd_r3:
sd_response_r3_make(sd, response);
rsplen = 4;
break;
case sd_r6:
sd_response_r6_make(sd, response);
rsplen = 4;
break;
case sd_r7:
sd_response_r7_make(sd, response);
rsplen = 4;
break;
case sd_r0:
default:
rsplen = 0;
break;
}
if (sd->card_status & ILLEGAL_COMMAND)
rsplen = 0;
#ifdef DEBUG_SD
if (rsplen) {
int i;
DPRINTF("Response:");
for (i = 0; i < rsplen; i++)
printf(" %02x", response[i]);
printf(" state %d\n", sd->state);
} else {
DPRINTF("No response %d\n", sd->state);
}
#endif
return rsplen;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9343 | static int dpcm_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
DPCMContext *s = avctx->priv_data;
int in, out = 0;
int predictor[2];
int ch = 0;
int stereo = s->channels - 1;
short *output_samples = data;
int shift[2];
unsigned char byte;
short diff;
if (!buf_size)
return 0;
// almost every DPCM variant expands one byte of data into two
if(*data_size/2 < buf_size)
return -1;
switch(avctx->codec->id) {
case CODEC_ID_ROQ_DPCM:
if (stereo) {
predictor[0] = buf[7] << 8;
predictor[1] = buf[6] << 8;
} else {
predictor[0] = AV_RL16(&buf[6]);
}
SE_16BIT(predictor[0]);
SE_16BIT(predictor[1]);
/* decode the samples */
for (in = 8, out = 0; in < buf_size; in++, out++) {
predictor[ch] += s->roq_square_array[buf[in]];
predictor[ch] = av_clip_int16(predictor[ch]);
output_samples[out] = predictor[ch];
/* toggle channel */
ch ^= stereo;
}
break;
case CODEC_ID_INTERPLAY_DPCM:
in = 6; /* skip over the stream mask and stream length */
predictor[0] = AV_RL16(&buf[in]);
in += 2;
SE_16BIT(predictor[0])
output_samples[out++] = predictor[0];
if (stereo) {
predictor[1] = AV_RL16(&buf[in]);
in += 2;
SE_16BIT(predictor[1])
output_samples[out++] = predictor[1];
}
while (in < buf_size) {
predictor[ch] += interplay_delta_table[buf[in++]];
predictor[ch] = av_clip_int16(predictor[ch]);
output_samples[out++] = predictor[ch];
/* toggle channel */
ch ^= stereo;
}
break;
case CODEC_ID_XAN_DPCM:
in = 0;
shift[0] = shift[1] = 4;
predictor[0] = AV_RL16(&buf[in]);
in += 2;
SE_16BIT(predictor[0]);
if (stereo) {
predictor[1] = AV_RL16(&buf[in]);
in += 2;
SE_16BIT(predictor[1]);
}
while (in < buf_size) {
byte = buf[in++];
diff = (byte & 0xFC) << 8;
if ((byte & 0x03) == 3)
shift[ch]++;
else
shift[ch] -= (2 * (byte & 3));
/* saturate the shifter to a lower limit of 0 */
if (shift[ch] < 0)
shift[ch] = 0;
diff >>= shift[ch];
predictor[ch] += diff;
predictor[ch] = av_clip_int16(predictor[ch]);
output_samples[out++] = predictor[ch];
/* toggle channel */
ch ^= stereo;
}
break;
case CODEC_ID_SOL_DPCM:
in = 0;
if (avctx->codec_tag != 3) {
if(*data_size/4 < buf_size)
return -1;
while (in < buf_size) {
int n1, n2;
n1 = (buf[in] >> 4) & 0xF;
n2 = buf[in++] & 0xF;
s->sample[0] += s->sol_table[n1];
if (s->sample[0] < 0) s->sample[0] = 0;
if (s->sample[0] > 255) s->sample[0] = 255;
output_samples[out++] = (s->sample[0] - 128) << 8;
s->sample[stereo] += s->sol_table[n2];
if (s->sample[stereo] < 0) s->sample[stereo] = 0;
if (s->sample[stereo] > 255) s->sample[stereo] = 255;
output_samples[out++] = (s->sample[stereo] - 128) << 8;
}
} else {
while (in < buf_size) {
int n;
n = buf[in++];
if (n & 0x80) s->sample[ch] -= s->sol_table[n & 0x7F];
else s->sample[ch] += s->sol_table[n & 0x7F];
s->sample[ch] = av_clip_int16(s->sample[ch]);
output_samples[out++] = s->sample[ch];
/* toggle channel */
ch ^= stereo;
}
}
break;
}
*data_size = out * sizeof(short);
return buf_size;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9347 | static void picmemset(PicContext *s, AVFrame *frame, unsigned value, int run,
int *x, int *y, int *plane, int bits_per_plane)
{
uint8_t *d;
int shift = *plane * bits_per_plane;
unsigned mask = ((1 << bits_per_plane) - 1) << shift;
value <<= shift;
while (run > 0) {
int j;
for (j = 8-bits_per_plane; j >= 0; j -= bits_per_plane) {
d = frame->data[0] + *y * frame->linesize[0];
d[*x] |= (value >> j) & mask;
*x += 1;
if (*x == s->width) {
*y -= 1;
*x = 0;
if (*y < 0) {
*y = s->height - 1;
*plane += 1;
if (*plane >= s->nb_planes)
return;
value <<= bits_per_plane;
mask <<= bits_per_plane;
}
}
}
run--;
}
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9349 | mp_image_t* vf_get_image(vf_instance_t* vf, unsigned int outfmt, int mp_imgtype, int mp_imgflag, int w, int h){
MPContext *m= (MPContext*)(((uint8_t*)vf) - offsetof(MPContext, next_vf));
mp_image_t* mpi=NULL;
int w2;
int number = mp_imgtype >> 16;
av_assert0(vf->next == NULL); // all existing filters call this just on next
//vf_dint needs these as it calls vf_get_image() before configuring the output
if(vf->w==0 && w>0) vf->w=w;
if(vf->h==0 && h>0) vf->h=h;
av_assert0(w == -1 || w >= vf->w);
av_assert0(h == -1 || h >= vf->h);
av_assert0(vf->w > 0);
av_assert0(vf->h > 0);
av_log(m->avfctx, AV_LOG_DEBUG, "get_image: %d:%d, vf: %d:%d\n", w,h,vf->w,vf->h);
if (w == -1) w = vf->w;
if (h == -1) h = vf->h;
w2=(mp_imgflag&MP_IMGFLAG_ACCEPT_ALIGNED_STRIDE)?((w+15)&(~15)):w;
// Note: we should call libvo first to check if it supports direct rendering
// and if not, then fallback to software buffers:
switch(mp_imgtype & 0xff){
case MP_IMGTYPE_EXPORT:
if(!vf->imgctx.export_images[0]) vf->imgctx.export_images[0]=new_mp_image(w2,h);
mpi=vf->imgctx.export_images[0];
break;
case MP_IMGTYPE_STATIC:
if(!vf->imgctx.static_images[0]) vf->imgctx.static_images[0]=new_mp_image(w2,h);
mpi=vf->imgctx.static_images[0];
break;
case MP_IMGTYPE_TEMP:
if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=new_mp_image(w2,h);
mpi=vf->imgctx.temp_images[0];
break;
case MP_IMGTYPE_IPB:
if(!(mp_imgflag&MP_IMGFLAG_READABLE)){ // B frame:
if(!vf->imgctx.temp_images[0]) vf->imgctx.temp_images[0]=new_mp_image(w2,h);
mpi=vf->imgctx.temp_images[0];
break;
}
case MP_IMGTYPE_IP:
if(!vf->imgctx.static_images[vf->imgctx.static_idx]) vf->imgctx.static_images[vf->imgctx.static_idx]=new_mp_image(w2,h);
mpi=vf->imgctx.static_images[vf->imgctx.static_idx];
vf->imgctx.static_idx^=1;
break;
case MP_IMGTYPE_NUMBERED:
if (number == -1) {
int i;
for (i = 0; i < NUM_NUMBERED_MPI; i++)
if (!vf->imgctx.numbered_images[i] || !vf->imgctx.numbered_images[i]->usage_count)
break;
number = i;
}
if (number < 0 || number >= NUM_NUMBERED_MPI) return NULL;
if (!vf->imgctx.numbered_images[number]) vf->imgctx.numbered_images[number] = new_mp_image(w2,h);
mpi = vf->imgctx.numbered_images[number];
mpi->number = number;
break;
}
if(mpi){
mpi->type=mp_imgtype;
mpi->w=vf->w; mpi->h=vf->h;
// keep buffer allocation status & color flags only:
// mpi->flags&=~(MP_IMGFLAG_PRESERVE|MP_IMGFLAG_READABLE|MP_IMGFLAG_DIRECT);
mpi->flags&=MP_IMGFLAG_ALLOCATED|MP_IMGFLAG_TYPE_DISPLAYED|MP_IMGFLAGMASK_COLORS;
// accept restrictions, draw_slice and palette flags only:
mpi->flags|=mp_imgflag&(MP_IMGFLAGMASK_RESTRICTIONS|MP_IMGFLAG_DRAW_CALLBACK|MP_IMGFLAG_RGB_PALETTE);
if(!vf->draw_slice) mpi->flags&=~MP_IMGFLAG_DRAW_CALLBACK;
if(mpi->width!=w2 || mpi->height!=h){
// printf("vf.c: MPI parameters changed! %dx%d -> %dx%d \n", mpi->width,mpi->height,w2,h);
if(mpi->flags&MP_IMGFLAG_ALLOCATED){
if(mpi->width<w2 || mpi->height<h){
// need to re-allocate buffer memory:
av_free(mpi->planes[0]);
mpi->flags&=~MP_IMGFLAG_ALLOCATED;
mp_msg(MSGT_VFILTER,MSGL_V,"vf.c: have to REALLOCATE buffer memory :(\n");
}
// } else {
} {
mpi->width=w2; mpi->chroma_width=(w2 + (1<<mpi->chroma_x_shift) - 1)>>mpi->chroma_x_shift;
mpi->height=h; mpi->chroma_height=(h + (1<<mpi->chroma_y_shift) - 1)>>mpi->chroma_y_shift;
}
}
if(!mpi->bpp) mp_image_setfmt(mpi,outfmt);
if(!(mpi->flags&MP_IMGFLAG_ALLOCATED) && mpi->type>MP_IMGTYPE_EXPORT){
av_assert0(!vf->get_image);
// check libvo first!
if(vf->get_image) vf->get_image(vf,mpi);
if(!(mpi->flags&MP_IMGFLAG_DIRECT)){
// non-direct and not yet allocated image. allocate it!
if (!mpi->bpp) { // no way we can allocate this
mp_msg(MSGT_DECVIDEO, MSGL_FATAL,
"vf_get_image: Tried to allocate a format that can not be allocated!\n");
return NULL;
}
// check if codec prefer aligned stride:
if(mp_imgflag&MP_IMGFLAG_PREFER_ALIGNED_STRIDE){
int align=(mpi->flags&MP_IMGFLAG_PLANAR &&
mpi->flags&MP_IMGFLAG_YUV) ?
(8<<mpi->chroma_x_shift)-1 : 15; // -- maybe FIXME
w2=((w+align)&(~align));
if(mpi->width!=w2){
#if 0
// we have to change width... check if we CAN co it:
int flags=vf->query_format(vf,outfmt); // should not fail
if(!(flags&3)) mp_msg(MSGT_DECVIDEO,MSGL_WARN,"??? vf_get_image{vf->query_format(outfmt)} failed!\n");
// printf("query -> 0x%X \n",flags);
if(flags&VFCAP_ACCEPT_STRIDE){
#endif
mpi->width=w2;
mpi->chroma_width=(w2 + (1<<mpi->chroma_x_shift) - 1)>>mpi->chroma_x_shift;
// }
}
}
mp_image_alloc_planes(mpi);
// printf("clearing img!\n");
vf_mpi_clear(mpi,0,0,mpi->width,mpi->height);
}
}
av_assert0(!vf->start_slice);
if(mpi->flags&MP_IMGFLAG_DRAW_CALLBACK)
if(vf->start_slice) vf->start_slice(vf,mpi);
if(!(mpi->flags&MP_IMGFLAG_TYPE_DISPLAYED)){
mp_msg(MSGT_DECVIDEO,MSGL_V,"*** [%s] %s%s mp_image_t, %dx%dx%dbpp %s %s, %d bytes\n",
"NULL"/*vf->info->name*/,
(mpi->type==MP_IMGTYPE_EXPORT)?"Exporting":
((mpi->flags&MP_IMGFLAG_DIRECT)?"Direct Rendering":"Allocating"),
(mpi->flags&MP_IMGFLAG_DRAW_CALLBACK)?" (slices)":"",
mpi->width,mpi->height,mpi->bpp,
(mpi->flags&MP_IMGFLAG_YUV)?"YUV":((mpi->flags&MP_IMGFLAG_SWAPPED)?"BGR":"RGB"),
(mpi->flags&MP_IMGFLAG_PLANAR)?"planar":"packed",
mpi->bpp*mpi->width*mpi->height/8);
mp_msg(MSGT_DECVIDEO,MSGL_DBG2,"(imgfmt: %x, planes: %p,%p,%p strides: %d,%d,%d, chroma: %dx%d, shift: h:%d,v:%d)\n",
mpi->imgfmt, mpi->planes[0], mpi->planes[1], mpi->planes[2],
mpi->stride[0], mpi->stride[1], mpi->stride[2],
mpi->chroma_width, mpi->chroma_height, mpi->chroma_x_shift, mpi->chroma_y_shift);
mpi->flags|=MP_IMGFLAG_TYPE_DISPLAYED;
}
mpi->qscale = NULL;
}
mpi->usage_count++;
// printf("\rVF_MPI: %p %p %p %d %d %d \n",
// mpi->planes[0],mpi->planes[1],mpi->planes[2],
// mpi->stride[0],mpi->stride[1],mpi->stride[2]);
return mpi;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9351 | static int parse_MP4SLDescrTag(MP4DescrParseContext *d, int64_t off, int len)
{
Mp4Descr *descr = d->active_descr;
int predefined;
if (!descr)
return -1;
predefined = avio_r8(&d->pb);
if (!predefined) {
int lengths;
int flags = avio_r8(&d->pb);
descr->sl.use_au_start = !!(flags & 0x80);
descr->sl.use_au_end = !!(flags & 0x40);
descr->sl.use_rand_acc_pt = !!(flags & 0x20);
descr->sl.use_padding = !!(flags & 0x08);
descr->sl.use_timestamps = !!(flags & 0x04);
descr->sl.use_idle = !!(flags & 0x02);
descr->sl.timestamp_res = avio_rb32(&d->pb);
avio_rb32(&d->pb);
descr->sl.timestamp_len = avio_r8(&d->pb);
descr->sl.ocr_len = avio_r8(&d->pb);
descr->sl.au_len = avio_r8(&d->pb);
descr->sl.inst_bitrate_len = avio_r8(&d->pb);
lengths = avio_rb16(&d->pb);
descr->sl.degr_prior_len = lengths >> 12;
descr->sl.au_seq_num_len = (lengths >> 7) & 0x1f;
descr->sl.packet_seq_num_len = (lengths >> 2) & 0x1f;
} else {
avpriv_report_missing_feature(d->s, "Predefined SLConfigDescriptor");
return 0;
The vulnerability label is: Vulnerable |
devign_test_set_data_9352 | target_ulong helper_mul_suov(CPUTriCoreState *env, target_ulong r1,
target_ulong r2)
{
int64_t t1 = extract64(r1, 0, 32);
int64_t t2 = extract64(r2, 0, 32);
int64_t result = t1 * t2;
return suov32(env, result);
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9366 | static int parse_chunks(AVFormatContext *s, int mode, int64_t seekts, int *len_ptr)
{
WtvContext *wtv = s->priv_data;
AVIOContext *pb = wtv->pb;
while (!url_feof(pb)) {
ff_asf_guid g;
int len, sid, consumed;
ff_get_guid(pb, &g);
len = avio_rl32(pb);
if (len < 32)
break;
sid = avio_rl32(pb) & 0x7FFF;
avio_skip(pb, 8);
consumed = 32;
if (!ff_guidcmp(g, ff_SBE2_STREAM_DESC_EVENT)) {
if (ff_find_stream_index(s, sid) < 0) {
ff_asf_guid mediatype, subtype, formattype;
int size;
avio_skip(pb, 28);
ff_get_guid(pb, &mediatype);
ff_get_guid(pb, &subtype);
avio_skip(pb, 12);
ff_get_guid(pb, &formattype);
size = avio_rl32(pb);
parse_media_type(s, 0, sid, mediatype, subtype, formattype, size);
consumed += 92 + size;
}
} else if (!ff_guidcmp(g, ff_stream2_guid)) {
int stream_index = ff_find_stream_index(s, sid);
if (stream_index >= 0 && !((WtvStream*)s->streams[stream_index]->priv_data)->seen_data) {
ff_asf_guid mediatype, subtype, formattype;
int size;
avio_skip(pb, 12);
ff_get_guid(pb, &mediatype);
ff_get_guid(pb, &subtype);
avio_skip(pb, 12);
ff_get_guid(pb, &formattype);
size = avio_rl32(pb);
parse_media_type(s, s->streams[stream_index], sid, mediatype, subtype, formattype, size);
consumed += 76 + size;
}
} else if (!ff_guidcmp(g, EVENTID_AudioDescriptorSpanningEvent) ||
!ff_guidcmp(g, EVENTID_CtxADescriptorSpanningEvent) ||
!ff_guidcmp(g, EVENTID_CSDescriptorSpanningEvent) ||
!ff_guidcmp(g, EVENTID_StreamIDSpanningEvent) ||
!ff_guidcmp(g, EVENTID_SubtitleSpanningEvent) ||
!ff_guidcmp(g, EVENTID_TeletextSpanningEvent)) {
int stream_index = ff_find_stream_index(s, sid);
if (stream_index >= 0) {
AVStream *st = s->streams[stream_index];
uint8_t buf[258];
const uint8_t *pbuf = buf;
int buf_size;
avio_skip(pb, 8);
consumed += 8;
if (!ff_guidcmp(g, EVENTID_CtxADescriptorSpanningEvent) ||
!ff_guidcmp(g, EVENTID_CSDescriptorSpanningEvent)) {
avio_skip(pb, 6);
consumed += 6;
}
buf_size = FFMIN(len - consumed, sizeof(buf));
avio_read(pb, buf, buf_size);
consumed += buf_size;
ff_parse_mpeg2_descriptor(s, st, 0, &pbuf, buf + buf_size, NULL, 0, 0, NULL);
}
} else if (!ff_guidcmp(g, EVENTID_AudioTypeSpanningEvent)) {
int stream_index = ff_find_stream_index(s, sid);
if (stream_index >= 0) {
AVStream *st = s->streams[stream_index];
int audio_type;
avio_skip(pb, 8);
audio_type = avio_r8(pb);
if (audio_type == 2)
st->disposition |= AV_DISPOSITION_HEARING_IMPAIRED;
else if (audio_type == 3)
st->disposition |= AV_DISPOSITION_VISUAL_IMPAIRED;
consumed += 9;
}
} else if (!ff_guidcmp(g, EVENTID_DVBScramblingControlSpanningEvent)) {
int stream_index = ff_find_stream_index(s, sid);
if (stream_index >= 0) {
avio_skip(pb, 12);
if (avio_rl32(pb))
av_log(s, AV_LOG_WARNING, "DVB scrambled stream detected (st:%d), decoding will likely fail\n", stream_index);
consumed += 16;
}
} else if (!ff_guidcmp(g, EVENTID_LanguageSpanningEvent)) {
int stream_index = ff_find_stream_index(s, sid);
if (stream_index >= 0) {
AVStream *st = s->streams[stream_index];
uint8_t language[4];
avio_skip(pb, 12);
avio_read(pb, language, 3);
if (language[0]) {
language[3] = 0;
av_dict_set(&st->metadata, "language", language, 0);
if (!strcmp(language, "nar") || !strcmp(language, "NAR"))
st->disposition |= AV_DISPOSITION_VISUAL_IMPAIRED;
}
consumed += 15;
}
} else if (!ff_guidcmp(g, ff_timestamp_guid)) {
int stream_index = ff_find_stream_index(s, sid);
if (stream_index >= 0) {
avio_skip(pb, 8);
wtv->pts = avio_rl64(pb);
consumed += 16;
if (wtv->pts == -1)
wtv->pts = AV_NOPTS_VALUE;
else {
wtv->last_valid_pts = wtv->pts;
if (wtv->epoch == AV_NOPTS_VALUE || wtv->pts < wtv->epoch)
wtv->epoch = wtv->pts;
if (mode == SEEK_TO_PTS && wtv->pts >= seekts) {
avio_skip(pb, WTV_PAD8(len) - consumed);
return 0;
}
}
}
} else if (!ff_guidcmp(g, ff_data_guid)) {
int stream_index = ff_find_stream_index(s, sid);
if (mode == SEEK_TO_DATA && stream_index >= 0 && len > 32 && s->streams[stream_index]->priv_data) {
WtvStream *wst = s->streams[stream_index]->priv_data;
wst->seen_data = 1;
if (len_ptr) {
*len_ptr = len;
}
return stream_index;
}
} else if (!ff_guidcmp(g, /* DSATTRIB_WMDRMProtectionInfo */ (const ff_asf_guid){0x83,0x95,0x74,0x40,0x9D,0x6B,0xEC,0x4E,0xB4,0x3C,0x67,0xA1,0x80,0x1E,0x1A,0x9B})) {
int stream_index = ff_find_stream_index(s, sid);
if (stream_index >= 0)
av_log(s, AV_LOG_WARNING, "encrypted stream detected (st:%d), decoding will likely fail\n", stream_index);
} else if (
!ff_guidcmp(g, /* DSATTRIB_CAPTURE_STREAMTIME */ (const ff_asf_guid){0x14,0x56,0x1A,0x0C,0xCD,0x30,0x40,0x4F,0xBC,0xBF,0xD0,0x3E,0x52,0x30,0x62,0x07}) ||
!ff_guidcmp(g, /* DSATTRIB_PBDATAG_ATTRIBUTE */ (const ff_asf_guid){0x79,0x66,0xB5,0xE0,0xB9,0x12,0xCC,0x43,0xB7,0xDF,0x57,0x8C,0xAA,0x5A,0x7B,0x63}) ||
!ff_guidcmp(g, /* DSATTRIB_PicSampleSeq */ (const ff_asf_guid){0x02,0xAE,0x5B,0x2F,0x8F,0x7B,0x60,0x4F,0x82,0xD6,0xE4,0xEA,0x2F,0x1F,0x4C,0x99}) ||
!ff_guidcmp(g, /* DSATTRIB_TRANSPORT_PROPERTIES */ ff_DSATTRIB_TRANSPORT_PROPERTIES) ||
!ff_guidcmp(g, /* dvr_ms_vid_frame_rep_data */ (const ff_asf_guid){0xCC,0x32,0x64,0xDD,0x29,0xE2,0xDB,0x40,0x80,0xF6,0xD2,0x63,0x28,0xD2,0x76,0x1F}) ||
!ff_guidcmp(g, /* EVENTID_ChannelChangeSpanningEvent */ (const ff_asf_guid){0xE5,0xC5,0x67,0x90,0x5C,0x4C,0x05,0x42,0x86,0xC8,0x7A,0xFE,0x20,0xFE,0x1E,0xFA}) ||
!ff_guidcmp(g, /* EVENTID_ChannelInfoSpanningEvent */ (const ff_asf_guid){0x80,0x6D,0xF3,0x41,0x32,0x41,0xC2,0x4C,0xB1,0x21,0x01,0xA4,0x32,0x19,0xD8,0x1B}) ||
!ff_guidcmp(g, /* EVENTID_ChannelTypeSpanningEvent */ (const ff_asf_guid){0x51,0x1D,0xAB,0x72,0xD2,0x87,0x9B,0x48,0xBA,0x11,0x0E,0x08,0xDC,0x21,0x02,0x43}) ||
!ff_guidcmp(g, /* EVENTID_PIDListSpanningEvent */ (const ff_asf_guid){0x65,0x8F,0xFC,0x47,0xBB,0xE2,0x34,0x46,0x9C,0xEF,0xFD,0xBF,0xE6,0x26,0x1D,0x5C}) ||
!ff_guidcmp(g, /* EVENTID_SignalAndServiceStatusSpanningEvent */ (const ff_asf_guid){0xCB,0xC5,0x68,0x80,0x04,0x3C,0x2B,0x49,0xB4,0x7D,0x03,0x08,0x82,0x0D,0xCE,0x51}) ||
!ff_guidcmp(g, /* EVENTID_StreamTypeSpanningEvent */ (const ff_asf_guid){0xBC,0x2E,0xAF,0x82,0xA6,0x30,0x64,0x42,0xA8,0x0B,0xAD,0x2E,0x13,0x72,0xAC,0x60}) ||
!ff_guidcmp(g, (const ff_asf_guid){0x1E,0xBE,0xC3,0xC5,0x43,0x92,0xDC,0x11,0x85,0xE5,0x00,0x12,0x3F,0x6F,0x73,0xB9}) ||
!ff_guidcmp(g, (const ff_asf_guid){0x3B,0x86,0xA2,0xB1,0xEB,0x1E,0xC3,0x44,0x8C,0x88,0x1C,0xA3,0xFF,0xE3,0xE7,0x6A}) ||
!ff_guidcmp(g, (const ff_asf_guid){0x4E,0x7F,0x4C,0x5B,0xC4,0xD0,0x38,0x4B,0xA8,0x3E,0x21,0x7F,0x7B,0xBF,0x52,0xE7}) ||
!ff_guidcmp(g, (const ff_asf_guid){0x63,0x36,0xEB,0xFE,0xA1,0x7E,0xD9,0x11,0x83,0x08,0x00,0x07,0xE9,0x5E,0xAD,0x8D}) ||
!ff_guidcmp(g, (const ff_asf_guid){0x70,0xE9,0xF1,0xF8,0x89,0xA4,0x4C,0x4D,0x83,0x73,0xB8,0x12,0xE0,0xD5,0xF8,0x1E}) ||
!ff_guidcmp(g, ff_index_guid) ||
!ff_guidcmp(g, ff_sync_guid) ||
!ff_guidcmp(g, ff_stream1_guid) ||
!ff_guidcmp(g, (const ff_asf_guid){0xF7,0x10,0x02,0xB9,0xEE,0x7C,0xED,0x4E,0xBD,0x7F,0x05,0x40,0x35,0x86,0x18,0xA1})) {
//ignore known guids
} else
av_log(s, AV_LOG_WARNING, "unsupported chunk:"FF_PRI_GUID"\n", FF_ARG_GUID(g));
avio_skip(pb, WTV_PAD8(len) - consumed);
}
return AVERROR_EOF;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9399 | static void virtio_rng_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
dc->props = virtio_rng_properties;
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
vdc->realize = virtio_rng_device_realize;
vdc->unrealize = virtio_rng_device_unrealize;
vdc->get_features = get_features;
vdc->load = virtio_rng_load_device;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9418 | int kvm_init(int smp_cpus)
{
KVMState *s;
int ret;
int i;
if (smp_cpus > 1)
return -EINVAL;
s = qemu_mallocz(sizeof(KVMState));
if (s == NULL)
return -ENOMEM;
for (i = 0; i < ARRAY_SIZE(s->slots); i++)
s->slots[i].slot = i;
s->vmfd = -1;
s->fd = open("/dev/kvm", O_RDWR);
if (s->fd == -1) {
fprintf(stderr, "Could not access KVM kernel module: %m\n");
ret = -errno;
goto err;
}
ret = kvm_ioctl(s, KVM_GET_API_VERSION, 0);
if (ret < KVM_API_VERSION) {
ret = -EINVAL;
fprintf(stderr, "kvm version too old\n");
goto err;
}
if (ret > KVM_API_VERSION) {
ret = -EINVAL;
fprintf(stderr, "kvm version not supported\n");
goto err;
}
s->vmfd = kvm_ioctl(s, KVM_CREATE_VM, 0);
if (s->vmfd < 0)
goto err;
/* initially, KVM allocated its own memory and we had to jump through
* hooks to make phys_ram_base point to this. Modern versions of KVM
* just use a user allocated buffer so we can use phys_ram_base
* unmodified. Make sure we have a sufficiently modern version of KVM.
*/
ret = kvm_ioctl(s, KVM_CHECK_EXTENSION, KVM_CAP_USER_MEMORY);
if (ret <= 0) {
if (ret == 0)
ret = -EINVAL;
fprintf(stderr, "kvm does not support KVM_CAP_USER_MEMORY\n");
goto err;
}
/* There was a nasty bug in < kvm-80 that prevents memory slots from being
* destroyed properly. Since we rely on this capability, refuse to work
* with any kernel without this capability. */
ret = kvm_ioctl(s, KVM_CHECK_EXTENSION,
KVM_CAP_DESTROY_MEMORY_REGION_WORKS);
if (ret <= 0) {
if (ret == 0)
ret = -EINVAL;
fprintf(stderr,
"KVM kernel module broken (DESTROY_MEMORY_REGION)\n"
"Please upgrade to at least kvm-81.\n");
goto err;
}
ret = kvm_arch_init(s, smp_cpus);
if (ret < 0)
goto err;
kvm_state = s;
return 0;
err:
if (s) {
if (s->vmfd != -1)
close(s->vmfd);
if (s->fd != -1)
close(s->fd);
}
qemu_free(s);
return ret;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9434 | static int64_t nfs_client_open(NFSClient *client, const char *filename,
int flags, Error **errp)
{
int ret = -EINVAL, i;
struct stat st;
URI *uri;
QueryParams *qp = NULL;
char *file = NULL, *strp = NULL;
uri = uri_parse(filename);
if (!uri) {
error_setg(errp, "Invalid URL specified");
goto fail;
}
if (!uri->server) {
error_setg(errp, "Invalid URL specified");
goto fail;
}
strp = strrchr(uri->path, '/');
if (strp == NULL) {
error_setg(errp, "Invalid URL specified");
goto fail;
}
file = g_strdup(strp);
*strp = 0;
client->context = nfs_init_context();
if (client->context == NULL) {
error_setg(errp, "Failed to init NFS context");
goto fail;
}
qp = query_params_parse(uri->query);
for (i = 0; i < qp->n; i++) {
if (!qp->p[i].value) {
error_setg(errp, "Value for NFS parameter expected: %s",
qp->p[i].name);
goto fail;
}
if (!strncmp(qp->p[i].name, "uid", 3)) {
nfs_set_uid(client->context, atoi(qp->p[i].value));
} else if (!strncmp(qp->p[i].name, "gid", 3)) {
nfs_set_gid(client->context, atoi(qp->p[i].value));
} else if (!strncmp(qp->p[i].name, "tcp-syncnt", 10)) {
nfs_set_tcp_syncnt(client->context, atoi(qp->p[i].value));
} else {
error_setg(errp, "Unknown NFS parameter name: %s",
qp->p[i].name);
goto fail;
}
}
ret = nfs_mount(client->context, uri->server, uri->path);
if (ret < 0) {
error_setg(errp, "Failed to mount nfs share: %s",
nfs_get_error(client->context));
goto fail;
}
if (flags & O_CREAT) {
ret = nfs_creat(client->context, file, 0600, &client->fh);
if (ret < 0) {
error_setg(errp, "Failed to create file: %s",
nfs_get_error(client->context));
goto fail;
}
} else {
ret = nfs_open(client->context, file, flags, &client->fh);
if (ret < 0) {
error_setg(errp, "Failed to open file : %s",
nfs_get_error(client->context));
goto fail;
}
}
ret = nfs_fstat(client->context, client->fh, &st);
if (ret < 0) {
error_setg(errp, "Failed to fstat file: %s",
nfs_get_error(client->context));
goto fail;
}
ret = DIV_ROUND_UP(st.st_size, BDRV_SECTOR_SIZE);
client->has_zero_init = S_ISREG(st.st_mode);
goto out;
fail:
nfs_client_close(client);
out:
if (qp) {
query_params_free(qp);
}
uri_free(uri);
g_free(file);
return ret;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9435 | static int nut_read_header(AVFormatContext * avf, AVFormatParameters * ap) {
NUTContext * priv = avf->priv_data;
AVIOContext * bc = avf->pb;
nut_demuxer_opts_tt dopts = {
.input = {
.priv = bc,
.seek = av_seek,
.read = av_read,
.eof = NULL,
.file_pos = 0,
},
.alloc = { av_malloc, av_realloc, av_free },
.read_index = 1,
.cache_syncpoints = 1,
};
nut_context_tt * nut = priv->nut = nut_demuxer_init(&dopts);
nut_stream_header_tt * s;
int ret, i;
if ((ret = nut_read_headers(nut, &s, NULL))) {
av_log(avf, AV_LOG_ERROR, " NUT error: %s\n", nut_error(ret));
return -1;
priv->s = s;
for (i = 0; s[i].type != -1 && i < 2; i++) {
AVStream * st = avformat_new_stream(avf, NULL);
int j;
for (j = 0; j < s[i].fourcc_len && j < 8; j++) st->codec->codec_tag |= s[i].fourcc[j]<<(j*8);
st->codec->has_b_frames = s[i].decode_delay;
st->codec->extradata_size = s[i].codec_specific_len;
if (st->codec->extradata_size) {
st->codec->extradata = av_mallocz(st->codec->extradata_size);
memcpy(st->codec->extradata, s[i].codec_specific, st->codec->extradata_size);
avpriv_set_pts_info(avf->streams[i], 60, s[i].time_base.num, s[i].time_base.den);
st->start_time = 0;
st->duration = s[i].max_pts;
st->codec->codec_id = ff_codec_get_id(nut_tags, st->codec->codec_tag);
switch(s[i].type) {
case NUT_AUDIO_CLASS:
st->codec->codec_type = AVMEDIA_TYPE_AUDIO;
if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = ff_codec_get_id(ff_codec_wav_tags, st->codec->codec_tag);
st->codec->channels = s[i].channel_count;
st->codec->sample_rate = s[i].samplerate_num / s[i].samplerate_denom;
break;
case NUT_VIDEO_CLASS:
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
if (st->codec->codec_id == CODEC_ID_NONE) st->codec->codec_id = ff_codec_get_id(ff_codec_bmp_tags, st->codec->codec_tag);
st->codec->width = s[i].width;
st->codec->height = s[i].height;
st->sample_aspect_ratio.num = s[i].sample_width;
st->sample_aspect_ratio.den = s[i].sample_height;
break;
if (st->codec->codec_id == CODEC_ID_NONE) av_log(avf, AV_LOG_ERROR, "Unknown codec?!\n");
return 0;
The vulnerability label is: Vulnerable |
devign_test_set_data_9437 | static void ppc_cpu_realizefn(DeviceState *dev, Error **errp)
{
CPUState *cs = CPU(dev);
PowerPCCPU *cpu = POWERPC_CPU(dev);
PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
Error *local_err = NULL;
#if !defined(CONFIG_USER_ONLY)
int max_smt = kvm_enabled() ? kvmppc_smt_threads() : 1;
#endif
#if !defined(CONFIG_USER_ONLY)
if (smp_threads > max_smt) {
error_setg(errp, "Cannot support more than %d threads on PPC with %s",
max_smt, kvm_enabled() ? "KVM" : "TCG");
if (!is_power_of_2(smp_threads)) {
error_setg(errp, "Cannot support %d threads on PPC with %s, "
"threads count must be a power of 2.",
smp_threads, kvm_enabled() ? "KVM" : "TCG");
#endif
cpu_exec_init(cs, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
#if !defined(CONFIG_USER_ONLY)
cpu->cpu_dt_id = (cs->cpu_index / smp_threads) * max_smt
+ (cs->cpu_index % smp_threads);
#endif
if (tcg_enabled()) {
if (ppc_fixup_cpu(cpu) != 0) {
error_setg(errp, "Unable to emulate selected CPU with TCG");
#if defined(TARGET_PPCEMB)
if (!ppc_cpu_is_valid(pcc)) {
error_setg(errp, "CPU does not possess a BookE or 4xx MMU. "
"Please use qemu-system-ppc or qemu-system-ppc64 instead "
"or choose another CPU model.");
#endif
create_ppc_opcodes(cpu, &local_err);
if (local_err != NULL) {
error_propagate(errp, local_err);
init_ppc_proc(cpu);
if (pcc->insns_flags & PPC_FLOAT) {
gdb_register_coprocessor(cs, gdb_get_float_reg, gdb_set_float_reg,
33, "power-fpu.xml", 0);
if (pcc->insns_flags & PPC_ALTIVEC) {
gdb_register_coprocessor(cs, gdb_get_avr_reg, gdb_set_avr_reg,
34, "power-altivec.xml", 0);
if (pcc->insns_flags & PPC_SPE) {
gdb_register_coprocessor(cs, gdb_get_spe_reg, gdb_set_spe_reg,
34, "power-spe.xml", 0);
if (pcc->insns_flags2 & PPC2_VSX) {
gdb_register_coprocessor(cs, gdb_get_vsx_reg, gdb_set_vsx_reg,
32, "power-vsx.xml", 0);
qemu_init_vcpu(cs);
pcc->parent_realize(dev, errp);
#if defined(PPC_DUMP_CPU)
{
CPUPPCState *env = &cpu->env;
const char *mmu_model, *excp_model, *bus_model;
switch (env->mmu_model) {
case POWERPC_MMU_32B:
mmu_model = "PowerPC 32";
break;
case POWERPC_MMU_SOFT_6xx:
mmu_model = "PowerPC 6xx/7xx with software driven TLBs";
break;
case POWERPC_MMU_SOFT_74xx:
mmu_model = "PowerPC 74xx with software driven TLBs";
break;
case POWERPC_MMU_SOFT_4xx:
mmu_model = "PowerPC 4xx with software driven TLBs";
break;
case POWERPC_MMU_SOFT_4xx_Z:
mmu_model = "PowerPC 4xx with software driven TLBs "
"and zones protections";
break;
case POWERPC_MMU_REAL:
mmu_model = "PowerPC real mode only";
break;
case POWERPC_MMU_MPC8xx:
mmu_model = "PowerPC MPC8xx";
break;
case POWERPC_MMU_BOOKE:
mmu_model = "PowerPC BookE";
break;
case POWERPC_MMU_BOOKE206:
mmu_model = "PowerPC BookE 2.06";
break;
case POWERPC_MMU_601:
mmu_model = "PowerPC 601";
break;
#if defined (TARGET_PPC64)
case POWERPC_MMU_64B:
mmu_model = "PowerPC 64";
break;
#endif
default:
mmu_model = "Unknown or invalid";
break;
switch (env->excp_model) {
case POWERPC_EXCP_STD:
excp_model = "PowerPC";
break;
case POWERPC_EXCP_40x:
excp_model = "PowerPC 40x";
break;
case POWERPC_EXCP_601:
excp_model = "PowerPC 601";
break;
case POWERPC_EXCP_602:
excp_model = "PowerPC 602";
break;
case POWERPC_EXCP_603:
excp_model = "PowerPC 603";
break;
case POWERPC_EXCP_603E:
excp_model = "PowerPC 603e";
break;
case POWERPC_EXCP_604:
excp_model = "PowerPC 604";
break;
case POWERPC_EXCP_7x0:
excp_model = "PowerPC 740/750";
break;
case POWERPC_EXCP_7x5:
excp_model = "PowerPC 745/755";
break;
case POWERPC_EXCP_74xx:
excp_model = "PowerPC 74xx";
break;
case POWERPC_EXCP_BOOKE:
excp_model = "PowerPC BookE";
break;
#if defined (TARGET_PPC64)
case POWERPC_EXCP_970:
excp_model = "PowerPC 970";
break;
#endif
default:
excp_model = "Unknown or invalid";
break;
switch (env->bus_model) {
case PPC_FLAGS_INPUT_6xx:
bus_model = "PowerPC 6xx";
break;
case PPC_FLAGS_INPUT_BookE:
bus_model = "PowerPC BookE";
break;
case PPC_FLAGS_INPUT_405:
bus_model = "PowerPC 405";
break;
case PPC_FLAGS_INPUT_401:
bus_model = "PowerPC 401/403";
break;
case PPC_FLAGS_INPUT_RCPU:
bus_model = "RCPU / MPC8xx";
break;
#if defined (TARGET_PPC64)
case PPC_FLAGS_INPUT_970:
bus_model = "PowerPC 970";
break;
#endif
default:
bus_model = "Unknown or invalid";
break;
printf("PowerPC %-12s : PVR %08x MSR %016" PRIx64 "\n"
" MMU model : %s\n",
object_class_get_name(OBJECT_CLASS(pcc)),
pcc->pvr, pcc->msr_mask, mmu_model);
#if !defined(CONFIG_USER_ONLY)
if (env->tlb.tlb6) {
printf(" %d %s TLB in %d ways\n",
env->nb_tlb, env->id_tlbs ? "splitted" : "merged",
env->nb_ways);
#endif
printf(" Exceptions model : %s\n"
" Bus model : %s\n",
excp_model, bus_model);
printf(" MSR features :\n");
if (env->flags & POWERPC_FLAG_SPE)
printf(" signal processing engine enable"
"\n");
else if (env->flags & POWERPC_FLAG_VRE)
printf(" vector processor enable\n");
if (env->flags & POWERPC_FLAG_TGPR)
printf(" temporary GPRs\n");
else if (env->flags & POWERPC_FLAG_CE)
printf(" critical input enable\n");
if (env->flags & POWERPC_FLAG_SE)
printf(" single-step trace mode\n");
else if (env->flags & POWERPC_FLAG_DWE)
printf(" debug wait enable\n");
else if (env->flags & POWERPC_FLAG_UBLE)
printf(" user BTB lock enable\n");
if (env->flags & POWERPC_FLAG_BE)
printf(" branch-step trace mode\n");
else if (env->flags & POWERPC_FLAG_DE)
printf(" debug interrupt enable\n");
if (env->flags & POWERPC_FLAG_PX)
printf(" inclusive protection\n");
else if (env->flags & POWERPC_FLAG_PMM)
printf(" performance monitor mark\n");
if (env->flags == POWERPC_FLAG_NONE)
printf(" none\n");
printf(" Time-base/decrementer clock source: %s\n",
env->flags & POWERPC_FLAG_RTC_CLK ? "RTC clock" : "bus clock");
dump_ppc_insns(env);
dump_ppc_sprs(env);
fflush(stdout);
#endif
The vulnerability label is: Vulnerable |
devign_test_set_data_9443 | static void ram_init(target_phys_addr_t addr, ram_addr_t RAM_size)
{
DeviceState *dev;
SysBusDevice *s;
RamDevice *d;
/* allocate RAM */
dev = qdev_create(NULL, "memory");
s = sysbus_from_qdev(dev);
d = FROM_SYSBUS(RamDevice, s);
d->size = RAM_size;
qdev_init(dev);
sysbus_mmio_map(s, 0, addr);
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9445 | QemuOpts *qemu_chr_parse_compat(const char *label, const char *filename)
{
char host[65], port[33], width[8], height[8];
int pos;
const char *p;
QemuOpts *opts;
Error *local_err = NULL;
opts = qemu_opts_create(qemu_find_opts("chardev"), label, 1, &local_err);
if (error_is_set(&local_err)) {
qerror_report_err(local_err);
error_free(local_err);
return NULL;
}
if (strstart(filename, "mon:", &p)) {
filename = p;
qemu_opt_set(opts, "mux", "on");
if (strcmp(filename, "stdio") == 0) {
/* Monitor is muxed to stdio: do not exit on Ctrl+C by default
* but pass it to the guest. Handle this only for compat syntax,
* for -chardev syntax we have special option for this.
* This is what -nographic did, redirecting+muxing serial+monitor
* to stdio causing Ctrl+C to be passed to guest. */
qemu_opt_set(opts, "signal", "off");
}
}
if (strcmp(filename, "null") == 0 ||
strcmp(filename, "pty") == 0 ||
strcmp(filename, "msmouse") == 0 ||
strcmp(filename, "braille") == 0 ||
strcmp(filename, "stdio") == 0) {
qemu_opt_set(opts, "backend", filename);
return opts;
}
if (strstart(filename, "vc", &p)) {
qemu_opt_set(opts, "backend", "vc");
if (*p == ':') {
if (sscanf(p+1, "%8[0-9]x%8[0-9]", width, height) == 2) {
/* pixels */
qemu_opt_set(opts, "width", width);
qemu_opt_set(opts, "height", height);
} else if (sscanf(p+1, "%8[0-9]Cx%8[0-9]C", width, height) == 2) {
/* chars */
qemu_opt_set(opts, "cols", width);
qemu_opt_set(opts, "rows", height);
} else {
goto fail;
}
}
return opts;
}
if (strcmp(filename, "con:") == 0) {
qemu_opt_set(opts, "backend", "console");
return opts;
}
if (strstart(filename, "COM", NULL)) {
qemu_opt_set(opts, "backend", "serial");
qemu_opt_set(opts, "path", filename);
return opts;
}
if (strstart(filename, "file:", &p)) {
qemu_opt_set(opts, "backend", "file");
qemu_opt_set(opts, "path", p);
return opts;
}
if (strstart(filename, "pipe:", &p)) {
qemu_opt_set(opts, "backend", "pipe");
qemu_opt_set(opts, "path", p);
return opts;
}
if (strstart(filename, "tcp:", &p) ||
strstart(filename, "telnet:", &p)) {
if (sscanf(p, "%64[^:]:%32[^,]%n", host, port, &pos) < 2) {
host[0] = 0;
if (sscanf(p, ":%32[^,]%n", port, &pos) < 1)
goto fail;
}
qemu_opt_set(opts, "backend", "socket");
qemu_opt_set(opts, "host", host);
qemu_opt_set(opts, "port", port);
if (p[pos] == ',') {
if (qemu_opts_do_parse(opts, p+pos+1, NULL) != 0)
goto fail;
}
if (strstart(filename, "telnet:", &p))
qemu_opt_set(opts, "telnet", "on");
return opts;
}
if (strstart(filename, "udp:", &p)) {
qemu_opt_set(opts, "backend", "udp");
if (sscanf(p, "%64[^:]:%32[^@,]%n", host, port, &pos) < 2) {
host[0] = 0;
if (sscanf(p, ":%32[^@,]%n", port, &pos) < 1) {
goto fail;
}
}
qemu_opt_set(opts, "host", host);
qemu_opt_set(opts, "port", port);
if (p[pos] == '@') {
p += pos + 1;
if (sscanf(p, "%64[^:]:%32[^,]%n", host, port, &pos) < 2) {
host[0] = 0;
if (sscanf(p, ":%32[^,]%n", port, &pos) < 1) {
goto fail;
}
}
qemu_opt_set(opts, "localaddr", host);
qemu_opt_set(opts, "localport", port);
}
return opts;
}
if (strstart(filename, "unix:", &p)) {
qemu_opt_set(opts, "backend", "socket");
if (qemu_opts_do_parse(opts, p, "path") != 0)
goto fail;
return opts;
}
if (strstart(filename, "/dev/parport", NULL) ||
strstart(filename, "/dev/ppi", NULL)) {
qemu_opt_set(opts, "backend", "parport");
qemu_opt_set(opts, "path", filename);
return opts;
}
if (strstart(filename, "/dev/", NULL)) {
qemu_opt_set(opts, "backend", "tty");
qemu_opt_set(opts, "path", filename);
return opts;
}
fail:
qemu_opts_del(opts);
return NULL;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9457 | POWERPC_FAMILY(POWER8)(ObjectClass *oc, void *data)
{
DeviceClass *dc = DEVICE_CLASS(oc);
PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
dc->fw_name = "PowerPC,POWER8";
dc->desc = "POWER8";
pcc->pvr = CPU_POWERPC_POWER8_BASE;
pcc->pvr_mask = CPU_POWERPC_POWER8_MASK;
pcc->init_proc = init_proc_POWER7;
pcc->check_pow = check_pow_nocheck;
pcc->insns_flags = PPC_INSNS_BASE | PPC_STRING | PPC_MFTB |
PPC_FLOAT | PPC_FLOAT_FSEL | PPC_FLOAT_FRES |
PPC_FLOAT_FSQRT | PPC_FLOAT_FRSQRTE |
PPC_FLOAT_STFIWX |
PPC_CACHE | PPC_CACHE_ICBI | PPC_CACHE_DCBZ |
PPC_MEM_SYNC | PPC_MEM_EIEIO |
PPC_MEM_TLBIE | PPC_MEM_TLBSYNC |
PPC_64B | PPC_ALTIVEC |
PPC_SEGMENT_64B | PPC_SLBI |
PPC_POPCNTB | PPC_POPCNTWD;
pcc->insns_flags2 = PPC2_VSX | PPC2_VSX207 | PPC2_DFP | PPC2_DBRX |
PPC2_PERM_ISA206 | PPC2_DIVE_ISA206 |
PPC2_ATOMIC_ISA206 | PPC2_FP_CVT_ISA206;
pcc->msr_mask = 0x800000000284FF36ULL;
pcc->mmu_model = POWERPC_MMU_2_06;
#if defined(CONFIG_SOFTMMU)
pcc->handle_mmu_fault = ppc_hash64_handle_mmu_fault;
#endif
pcc->excp_model = POWERPC_EXCP_POWER7;
pcc->bus_model = PPC_FLAGS_INPUT_POWER7;
pcc->bfd_mach = bfd_mach_ppc64;
pcc->flags = POWERPC_FLAG_VRE | POWERPC_FLAG_SE |
POWERPC_FLAG_BE | POWERPC_FLAG_PMM |
POWERPC_FLAG_BUS_CLK | POWERPC_FLAG_CFAR |
POWERPC_FLAG_VSX;
pcc->l1_dcache_size = 0x8000;
pcc->l1_icache_size = 0x8000;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9482 | static int parse_object_segment(AVCodecContext *avctx,
const uint8_t *buf, int buf_size)
{
PGSSubContext *ctx = avctx->priv_data;
PGSSubObject *object;
uint8_t sequence_desc;
unsigned int rle_bitmap_len, width, height;
int id;
if (buf_size <= 4)
return AVERROR_INVALIDDATA;
buf_size -= 4;
id = bytestream_get_be16(&buf);
object = find_object(id, &ctx->objects);
if (!object) {
if (ctx->objects.count >= MAX_EPOCH_OBJECTS) {
av_log(avctx, AV_LOG_ERROR, "Too many objects in epoch\n");
return AVERROR_INVALIDDATA;
}
object = &ctx->objects.object[ctx->objects.count++];
object->id = id;
}
/* skip object version number */
buf += 1;
/* Read the Sequence Description to determine if start of RLE data or appended to previous RLE */
sequence_desc = bytestream_get_byte(&buf);
if (!(sequence_desc & 0x80)) {
/* Additional RLE data */
if (buf_size > object->rle_remaining_len)
return AVERROR_INVALIDDATA;
memcpy(object->rle + object->rle_data_len, buf, buf_size);
object->rle_data_len += buf_size;
object->rle_remaining_len -= buf_size;
return 0;
}
if (buf_size <= 7)
return AVERROR_INVALIDDATA;
buf_size -= 7;
/* Decode rle bitmap length, stored size includes width/height data */
rle_bitmap_len = bytestream_get_be24(&buf) - 2*2;
if (buf_size > rle_bitmap_len) {
av_log(avctx, AV_LOG_ERROR,
"Buffer dimension %d larger than the expected RLE data %d\n",
buf_size, rle_bitmap_len);
return AVERROR_INVALIDDATA;
}
/* Get bitmap dimensions from data */
width = bytestream_get_be16(&buf);
height = bytestream_get_be16(&buf);
/* Make sure the bitmap is not too large */
if (avctx->width < width || avctx->height < height || !width || !height) {
av_log(avctx, AV_LOG_ERROR, "Bitmap dimensions (%dx%d) invalid.\n", width, height);
return AVERROR_INVALIDDATA;
}
object->w = width;
object->h = height;
av_fast_padded_malloc(&object->rle, &object->rle_buffer_size, rle_bitmap_len);
if (!object->rle)
return AVERROR(ENOMEM);
memcpy(object->rle, buf, buf_size);
object->rle_data_len = buf_size;
object->rle_remaining_len = rle_bitmap_len - buf_size;
return 0;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9508 | static int qemu_rdma_register_and_get_keys(RDMAContext *rdma,
RDMALocalBlock *block, uintptr_t host_addr,
uint32_t *lkey, uint32_t *rkey, int chunk,
uint8_t *chunk_start, uint8_t *chunk_end)
{
if (block->mr) {
if (lkey) {
*lkey = block->mr->lkey;
}
if (rkey) {
*rkey = block->mr->rkey;
}
return 0;
}
/* allocate memory to store chunk MRs */
if (!block->pmr) {
block->pmr = g_malloc0(block->nb_chunks * sizeof(struct ibv_mr *));
}
/*
* If 'rkey', then we're the destination, so grant access to the source.
*
* If 'lkey', then we're the source VM, so grant access only to ourselves.
*/
if (!block->pmr[chunk]) {
uint64_t len = chunk_end - chunk_start;
trace_qemu_rdma_register_and_get_keys(len, chunk_start);
block->pmr[chunk] = ibv_reg_mr(rdma->pd,
chunk_start, len,
(rkey ? (IBV_ACCESS_LOCAL_WRITE |
IBV_ACCESS_REMOTE_WRITE) : 0));
if (!block->pmr[chunk]) {
perror("Failed to register chunk!");
fprintf(stderr, "Chunk details: block: %d chunk index %d"
" start %" PRIuPTR " end %" PRIuPTR
" host %" PRIuPTR
" local %" PRIuPTR " registrations: %d\n",
block->index, chunk, (uintptr_t)chunk_start,
(uintptr_t)chunk_end, host_addr,
(uintptr_t)block->local_host_addr,
rdma->total_registrations);
return -1;
}
rdma->total_registrations++;
}
if (lkey) {
*lkey = block->pmr[chunk]->lkey;
}
if (rkey) {
*rkey = block->pmr[chunk]->rkey;
}
return 0;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9509 | static void vga_draw_graphic(VGAState *s, int full_update)
{
int y1, y, update, page_min, page_max, linesize, y_start, double_scan, mask, depth;
int width, height, shift_control, line_offset, page0, page1, bwidth, bits;
int disp_width, multi_scan, multi_run;
uint8_t *d;
uint32_t v, addr1, addr;
vga_draw_line_func *vga_draw_line;
full_update |= update_basic_params(s);
if (!full_update)
vga_sync_dirty_bitmap(s);
s->get_resolution(s, &width, &height);
disp_width = width;
shift_control = (s->gr[0x05] >> 5) & 3;
double_scan = (s->cr[0x09] >> 7);
if (shift_control != 1) {
multi_scan = (((s->cr[0x09] & 0x1f) + 1) << double_scan) - 1;
} else {
/* in CGA modes, multi_scan is ignored */
/* XXX: is it correct ? */
multi_scan = double_scan;
}
multi_run = multi_scan;
if (shift_control != s->shift_control ||
double_scan != s->double_scan) {
full_update = 1;
s->shift_control = shift_control;
s->double_scan = double_scan;
}
if (shift_control == 0) {
full_update |= update_palette16(s);
if (s->sr[0x01] & 8) {
v = VGA_DRAW_LINE4D2;
disp_width <<= 1;
} else {
v = VGA_DRAW_LINE4;
}
bits = 4;
} else if (shift_control == 1) {
full_update |= update_palette16(s);
if (s->sr[0x01] & 8) {
v = VGA_DRAW_LINE2D2;
disp_width <<= 1;
} else {
v = VGA_DRAW_LINE2;
}
bits = 4;
} else {
switch(s->get_bpp(s)) {
default:
case 0:
full_update |= update_palette256(s);
v = VGA_DRAW_LINE8D2;
bits = 4;
break;
case 8:
full_update |= update_palette256(s);
v = VGA_DRAW_LINE8;
bits = 8;
break;
case 15:
v = VGA_DRAW_LINE15;
bits = 16;
break;
case 16:
v = VGA_DRAW_LINE16;
bits = 16;
break;
case 24:
v = VGA_DRAW_LINE24;
bits = 24;
break;
case 32:
v = VGA_DRAW_LINE32;
bits = 32;
break;
}
}
vga_draw_line = vga_draw_line_table[v * NB_DEPTHS + get_depth_index(s->ds)];
depth = s->get_bpp(s);
if (s->line_offset != s->last_line_offset ||
disp_width != s->last_width ||
height != s->last_height ||
s->last_depth != depth) {
if (depth == 16 || depth == 32) {
if (is_graphic_console()) {
qemu_free_displaysurface(s->ds->surface);
s->ds->surface = qemu_create_displaysurface_from(disp_width, height, depth,
s->line_offset,
s->vram_ptr + (s->start_addr * 4));
dpy_resize(s->ds);
} else {
qemu_console_resize(s->ds, disp_width, height);
}
} else {
qemu_console_resize(s->ds, disp_width, height);
}
s->last_scr_width = disp_width;
s->last_scr_height = height;
s->last_width = disp_width;
s->last_height = height;
s->last_line_offset = s->line_offset;
s->last_depth = depth;
full_update = 1;
} else if (is_graphic_console() && is_buffer_shared(s->ds->surface) &&
(full_update || s->ds->surface->data != s->vram_ptr + (s->start_addr * 4))) {
s->ds->surface->data = s->vram_ptr + (s->start_addr * 4);
dpy_setdata(s->ds);
}
s->rgb_to_pixel =
rgb_to_pixel_dup_table[get_depth_index(s->ds)];
if (!is_buffer_shared(s->ds->surface) && s->cursor_invalidate)
s->cursor_invalidate(s);
line_offset = s->line_offset;
#if 0
printf("w=%d h=%d v=%d line_offset=%d cr[0x09]=0x%02x cr[0x17]=0x%02x linecmp=%d sr[0x01]=0x%02x\n",
width, height, v, line_offset, s->cr[9], s->cr[0x17], s->line_compare, s->sr[0x01]);
#endif
addr1 = (s->start_addr * 4);
bwidth = (width * bits + 7) / 8;
y_start = -1;
page_min = 0x7fffffff;
page_max = -1;
d = ds_get_data(s->ds);
linesize = ds_get_linesize(s->ds);
y1 = 0;
for(y = 0; y < height; y++) {
addr = addr1;
if (!(s->cr[0x17] & 1)) {
int shift;
/* CGA compatibility handling */
shift = 14 + ((s->cr[0x17] >> 6) & 1);
addr = (addr & ~(1 << shift)) | ((y1 & 1) << shift);
}
if (!(s->cr[0x17] & 2)) {
addr = (addr & ~0x8000) | ((y1 & 2) << 14);
}
page0 = s->vram_offset + (addr & TARGET_PAGE_MASK);
page1 = s->vram_offset + ((addr + bwidth - 1) & TARGET_PAGE_MASK);
update = full_update |
cpu_physical_memory_get_dirty(page0, VGA_DIRTY_FLAG) |
cpu_physical_memory_get_dirty(page1, VGA_DIRTY_FLAG);
if ((page1 - page0) > TARGET_PAGE_SIZE) {
/* if wide line, can use another page */
update |= cpu_physical_memory_get_dirty(page0 + TARGET_PAGE_SIZE,
VGA_DIRTY_FLAG);
}
/* explicit invalidation for the hardware cursor */
update |= (s->invalidated_y_table[y >> 5] >> (y & 0x1f)) & 1;
if (update) {
if (y_start < 0)
y_start = y;
if (page0 < page_min)
page_min = page0;
if (page1 > page_max)
page_max = page1;
if (!(is_buffer_shared(s->ds->surface))) {
vga_draw_line(s, d, s->vram_ptr + addr, width);
if (s->cursor_draw_line)
s->cursor_draw_line(s, d, y);
}
} else {
if (y_start >= 0) {
/* flush to display */
dpy_update(s->ds, 0, y_start,
disp_width, y - y_start);
y_start = -1;
}
}
if (!multi_run) {
mask = (s->cr[0x17] & 3) ^ 3;
if ((y1 & mask) == mask)
addr1 += line_offset;
y1++;
multi_run = multi_scan;
} else {
multi_run--;
}
/* line compare acts on the displayed lines */
if (y == s->line_compare)
addr1 = 0;
d += linesize;
}
if (y_start >= 0) {
/* flush to display */
dpy_update(s->ds, 0, y_start,
disp_width, y - y_start);
}
/* reset modified pages */
if (page_max != -1) {
cpu_physical_memory_reset_dirty(page_min, page_max + TARGET_PAGE_SIZE,
VGA_DIRTY_FLAG);
}
memset(s->invalidated_y_table, 0, ((height + 31) >> 5) * 4);
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9510 | static int qemu_signalfd_compat(const sigset_t *mask)
{
pthread_attr_t attr;
pthread_t tid;
struct sigfd_compat_info *info;
int fds[2];
info = malloc(sizeof(*info));
if (info == NULL) {
errno = ENOMEM;
return -1;
}
if (pipe(fds) == -1) {
free(info);
return -1;
}
memcpy(&info->mask, mask, sizeof(*mask));
info->fd = fds[1];
pthread_attr_init(&attr);
pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED);
pthread_create(&tid, &attr, sigwait_compat, info);
pthread_attr_destroy(&attr);
return fds[0];
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9516 | static int protocol_client_auth_sasl_start_len(VncState *vs, uint8_t *data, size_t len)
{
uint32_t startlen = read_u32(data, 0);
VNC_DEBUG("Got client start len %d\n", startlen);
if (startlen > SASL_DATA_MAX_LEN) {
VNC_DEBUG("Too much SASL data %d\n", startlen);
vnc_client_error(vs);
return -1;
}
if (startlen == 0)
return protocol_client_auth_sasl_start(vs, NULL, 0);
vnc_read_when(vs, protocol_client_auth_sasl_start, startlen);
return 0;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9528 | static inline void mv_pred_direct(AVSContext *h, cavs_vector *pmv_fw,
cavs_vector *col_mv)
{
cavs_vector *pmv_bw = pmv_fw + MV_BWD_OFFS;
int den = h->direct_den[col_mv->ref];
int m = FF_SIGNBIT(col_mv->x);
pmv_fw->dist = h->dist[1];
pmv_bw->dist = h->dist[0];
pmv_fw->ref = 1;
pmv_bw->ref = 0;
/* scale the co-located motion vector according to its temporal span */
pmv_fw->x = (((den + (den * col_mv->x * pmv_fw->dist ^ m) - m - 1) >> 14) ^ m) - m;
pmv_bw->x = m - (((den + (den * col_mv->x * pmv_bw->dist ^ m) - m - 1) >> 14) ^ m);
m = FF_SIGNBIT(col_mv->y);
pmv_fw->y = (((den + (den * col_mv->y * pmv_fw->dist ^ m) - m - 1) >> 14) ^ m) - m;
pmv_bw->y = m - (((den + (den * col_mv->y * pmv_bw->dist ^ m) - m - 1) >> 14) ^ m);
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9533 | static inline void h264_deblock_q1(register vector unsigned char p0,
register vector unsigned char p1,
register vector unsigned char p2,
register vector unsigned char q0,
register vector unsigned char tc0) {
register vector unsigned char average = vec_avg(p0, q0);
register vector unsigned char temp;
register vector unsigned char uncliped;
register vector unsigned char ones;
register vector unsigned char max;
register vector unsigned char min;
temp = vec_xor(average, p2);
average = vec_avg(average, p2); /*avg(p2, avg(p0, q0)) */
ones = vec_splat_u8(1);
temp = vec_and(temp, ones); /*(p2^avg(p0, q0)) & 1 */
uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */
max = vec_adds(p1, tc0);
min = vec_subs(p1, tc0);
p1 = vec_max(min, uncliped);
p1 = vec_min(max, p1);
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9546 | static inline void gen_branch_a(DisasContext *dc, target_ulong pc1,
target_ulong pc2, TCGv r_cond)
{
int l1;
l1 = gen_new_label();
tcg_gen_brcondi_tl(TCG_COND_EQ, r_cond, 0, l1);
gen_goto_tb(dc, 0, pc2, pc1);
gen_set_label(l1);
gen_goto_tb(dc, 1, pc2 + 4, pc2 + 8);
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9548 | uint64_t qcow2_alloc_cluster_offset(BlockDriverState *bs,
uint64_t offset,
int n_start, int n_end,
int *num, QCowL2Meta *m)
{
BDRVQcowState *s = bs->opaque;
int l2_index, ret;
uint64_t l2_offset, *l2_table, cluster_offset;
int nb_clusters, i = 0;
QCowL2Meta *old_alloc;
ret = get_cluster_table(bs, offset, &l2_table, &l2_offset, &l2_index);
if (ret == 0)
return 0;
nb_clusters = size_to_clusters(s, n_end << 9);
nb_clusters = MIN(nb_clusters, s->l2_size - l2_index);
cluster_offset = be64_to_cpu(l2_table[l2_index]);
/* We keep all QCOW_OFLAG_COPIED clusters */
if (cluster_offset & QCOW_OFLAG_COPIED) {
nb_clusters = count_contiguous_clusters(nb_clusters, s->cluster_size,
&l2_table[l2_index], 0, 0);
cluster_offset &= ~QCOW_OFLAG_COPIED;
m->nb_clusters = 0;
goto out;
}
/* for the moment, multiple compressed clusters are not managed */
if (cluster_offset & QCOW_OFLAG_COMPRESSED)
nb_clusters = 1;
/* how many available clusters ? */
while (i < nb_clusters) {
i += count_contiguous_clusters(nb_clusters - i, s->cluster_size,
&l2_table[l2_index], i, 0);
if(be64_to_cpu(l2_table[l2_index + i]))
break;
i += count_contiguous_free_clusters(nb_clusters - i,
&l2_table[l2_index + i]);
cluster_offset = be64_to_cpu(l2_table[l2_index + i]);
if ((cluster_offset & QCOW_OFLAG_COPIED) ||
(cluster_offset & QCOW_OFLAG_COMPRESSED))
break;
}
nb_clusters = i;
/*
* Check if there already is an AIO write request in flight which allocates
* the same cluster. In this case we need to wait until the previous
* request has completed and updated the L2 table accordingly.
*/
LIST_FOREACH(old_alloc, &s->cluster_allocs, next_in_flight) {
uint64_t end_offset = offset + nb_clusters * s->cluster_size;
uint64_t old_offset = old_alloc->offset;
uint64_t old_end_offset = old_alloc->offset +
old_alloc->nb_clusters * s->cluster_size;
if (end_offset < old_offset || offset > old_end_offset) {
/* No intersection */
} else {
if (offset < old_offset) {
/* Stop at the start of a running allocation */
nb_clusters = (old_offset - offset) >> s->cluster_bits;
} else {
nb_clusters = 0;
}
if (nb_clusters == 0) {
/* Set dependency and wait for a callback */
m->depends_on = old_alloc;
m->nb_clusters = 0;
*num = 0;
return 0;
}
}
}
if (!nb_clusters) {
abort();
}
LIST_INSERT_HEAD(&s->cluster_allocs, m, next_in_flight);
/* allocate a new cluster */
cluster_offset = qcow2_alloc_clusters(bs, nb_clusters * s->cluster_size);
/* save info needed for meta data update */
m->offset = offset;
m->n_start = n_start;
m->nb_clusters = nb_clusters;
out:
m->nb_available = MIN(nb_clusters << (s->cluster_bits - 9), n_end);
*num = m->nb_available - n_start;
return cluster_offset;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9574 | static int tx_consume(Rocker *r, DescInfo *info)
{
PCIDevice *dev = PCI_DEVICE(r);
char *buf = desc_get_buf(info, true);
RockerTlv *tlv_frag;
RockerTlv *tlvs[ROCKER_TLV_TX_MAX + 1];
struct iovec iov[ROCKER_TX_FRAGS_MAX] = { { 0, }, };
uint32_t pport;
uint32_t port;
uint16_t tx_offload = ROCKER_TX_OFFLOAD_NONE;
uint16_t tx_l3_csum_off = 0;
uint16_t tx_tso_mss = 0;
uint16_t tx_tso_hdr_len = 0;
int iovcnt = 0;
int err = ROCKER_OK;
int rem;
int i;
if (!buf) {
return -ROCKER_ENXIO;
}
rocker_tlv_parse(tlvs, ROCKER_TLV_TX_MAX, buf, desc_tlv_size(info));
if (!tlvs[ROCKER_TLV_TX_FRAGS]) {
return -ROCKER_EINVAL;
}
pport = rocker_get_pport_by_tx_ring(r, desc_get_ring(info));
if (!fp_port_from_pport(pport, &port)) {
return -ROCKER_EINVAL;
}
if (tlvs[ROCKER_TLV_TX_OFFLOAD]) {
tx_offload = rocker_tlv_get_u8(tlvs[ROCKER_TLV_TX_OFFLOAD]);
}
switch (tx_offload) {
case ROCKER_TX_OFFLOAD_L3_CSUM:
if (!tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]) {
return -ROCKER_EINVAL;
}
break;
case ROCKER_TX_OFFLOAD_TSO:
if (!tlvs[ROCKER_TLV_TX_TSO_MSS] ||
!tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]) {
return -ROCKER_EINVAL;
}
break;
}
if (tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]) {
tx_l3_csum_off = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_L3_CSUM_OFF]);
}
if (tlvs[ROCKER_TLV_TX_TSO_MSS]) {
tx_tso_mss = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_TSO_MSS]);
}
if (tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]) {
tx_tso_hdr_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_TSO_HDR_LEN]);
}
rocker_tlv_for_each_nested(tlv_frag, tlvs[ROCKER_TLV_TX_FRAGS], rem) {
hwaddr frag_addr;
uint16_t frag_len;
if (rocker_tlv_type(tlv_frag) != ROCKER_TLV_TX_FRAG) {
err = -ROCKER_EINVAL;
goto err_bad_attr;
}
rocker_tlv_parse_nested(tlvs, ROCKER_TLV_TX_FRAG_ATTR_MAX, tlv_frag);
if (!tlvs[ROCKER_TLV_TX_FRAG_ATTR_ADDR] ||
!tlvs[ROCKER_TLV_TX_FRAG_ATTR_LEN]) {
err = -ROCKER_EINVAL;
goto err_bad_attr;
}
frag_addr = rocker_tlv_get_le64(tlvs[ROCKER_TLV_TX_FRAG_ATTR_ADDR]);
frag_len = rocker_tlv_get_le16(tlvs[ROCKER_TLV_TX_FRAG_ATTR_LEN]);
iov[iovcnt].iov_len = frag_len;
iov[iovcnt].iov_base = g_malloc(frag_len);
if (!iov[iovcnt].iov_base) {
err = -ROCKER_ENOMEM;
goto err_no_mem;
}
if (pci_dma_read(dev, frag_addr, iov[iovcnt].iov_base,
iov[iovcnt].iov_len)) {
err = -ROCKER_ENXIO;
goto err_bad_io;
}
if (++iovcnt > ROCKER_TX_FRAGS_MAX) {
goto err_too_many_frags;
}
}
if (iovcnt) {
/* XXX perform Tx offloads */
/* XXX silence compiler for now */
tx_l3_csum_off += tx_tso_mss = tx_tso_hdr_len = 0;
}
err = fp_port_eg(r->fp_port[port], iov, iovcnt);
err_too_many_frags:
err_bad_io:
err_no_mem:
err_bad_attr:
for (i = 0; i < ROCKER_TX_FRAGS_MAX; i++) {
if (iov[i].iov_base) {
g_free(iov[i].iov_base);
}
}
return err;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9576 | AVFilterFormats *avfilter_all_colorspaces(void)
{
return avfilter_make_format_list(35,
PIX_FMT_YUV444P, PIX_FMT_YUV422P, PIX_FMT_YUV420P,
PIX_FMT_YUV411P, PIX_FMT_YUV410P,
PIX_FMT_YUYV422, PIX_FMT_UYVY422, PIX_FMT_UYYVYY411,
PIX_FMT_YUVJ444P, PIX_FMT_YUVJ422P, PIX_FMT_YUVJ420P,
PIX_FMT_YUV440P, PIX_FMT_YUVJ440P,
PIX_FMT_RGB32, PIX_FMT_BGR32,
PIX_FMT_RGB32_1, PIX_FMT_BGR32_1,
PIX_FMT_RGB24, PIX_FMT_BGR24,
PIX_FMT_RGB565, PIX_FMT_BGR565,
PIX_FMT_RGB555, PIX_FMT_BGR555,
PIX_FMT_RGB8, PIX_FMT_BGR8,
PIX_FMT_RGB4_BYTE,PIX_FMT_BGR4_BYTE,
PIX_FMT_GRAY16BE, PIX_FMT_GRAY16LE,
PIX_FMT_GRAY8, PIX_FMT_PAL8,
PIX_FMT_MONOWHITE,PIX_FMT_MONOBLACK
PIX_FMT_NV12, PIX_FMT_NV21);
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9584 | MemoryRegion *escc_init(target_phys_addr_t base, qemu_irq irqA, qemu_irq irqB,
CharDriverState *chrA, CharDriverState *chrB,
int clock, int it_shift)
{
DeviceState *dev;
SysBusDevice *s;
SerialState *d;
dev = qdev_create(NULL, "escc");
qdev_prop_set_uint32(dev, "disabled", 0);
qdev_prop_set_uint32(dev, "frequency", clock);
qdev_prop_set_uint32(dev, "it_shift", it_shift);
qdev_prop_set_chr(dev, "chrB", chrB);
qdev_prop_set_chr(dev, "chrA", chrA);
qdev_prop_set_uint32(dev, "chnBtype", ser);
qdev_prop_set_uint32(dev, "chnAtype", ser);
qdev_init_nofail(dev);
s = sysbus_from_qdev(dev);
sysbus_connect_irq(s, 0, irqB);
sysbus_connect_irq(s, 1, irqA);
if (base) {
sysbus_mmio_map(s, 0, base);
}
d = FROM_SYSBUS(SerialState, s);
return &d->mmio;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9598 | static void free_geotags(TiffContext *const s)
{
int i;
for (i = 0; i < s->geotag_count; i++) {
if (s->geotags[i].val)
av_freep(&s->geotags[i].val);
}
av_freep(&s->geotags);
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9607 | void ff_bink_idct_c(DCTELEM *block)
{
int i;
DCTELEM temp[64];
for (i = 0; i < 8; i++)
bink_idct_col(&temp[i], &block[i]);
for (i = 0; i < 8; i++) {
IDCT_ROW( (&block[8*i]), (&temp[8*i]) );
}
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9616 | PCIBus *pci_pmac_init(qemu_irq *pic)
{
DeviceState *dev;
SysBusDevice *s;
UNINState *d;
/* Use values found on a real PowerMac */
/* Uninorth main bus */
dev = qdev_create(NULL, "Uni-north main");
qdev_init_nofail(dev);
s = sysbus_from_qdev(dev);
d = FROM_SYSBUS(UNINState, s);
d->host_state.bus = pci_register_bus(&d->busdev.qdev, "pci",
pci_unin_set_irq, pci_unin_map_irq,
pic, 11 << 3, 4);
pci_create_simple(d->host_state.bus, 11 << 3, "Uni-north main");
sysbus_mmio_map(s, 0, 0xf2800000);
sysbus_mmio_map(s, 1, 0xf2c00000);
/* DEC 21154 bridge */
#if 0
/* XXX: not activated as PPC BIOS doesn't handle multiple buses properly */
pci_create_simple(d->host_state.bus, 12 << 3, "DEC 21154");
#endif
/* Uninorth AGP bus */
pci_create_simple(d->host_state.bus, 13 << 3, "Uni-north AGP");
/* Uninorth internal bus */
#if 0
/* XXX: not needed for now */
pci_create_simple(d->host_state.bus, 14 << 3, "Uni-north internal");
#endif
return d->host_state.bus;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9619 | static void json_message_process_token(JSONLexer *lexer, GString *input,
JSONTokenType type, int x, int y)
{
JSONMessageParser *parser = container_of(lexer, JSONMessageParser, lexer);
QDict *dict;
switch (type) {
case JSON_LCURLY:
parser->brace_count++;
break;
case JSON_RCURLY:
parser->brace_count--;
break;
case JSON_LSQUARE:
parser->bracket_count++;
break;
case JSON_RSQUARE:
parser->bracket_count--;
break;
default:
break;
}
dict = qdict_new();
qdict_put(dict, "type", qint_from_int(type));
qdict_put(dict, "token", qstring_from_str(input->str));
qdict_put(dict, "x", qint_from_int(x));
qdict_put(dict, "y", qint_from_int(y));
parser->token_size += input->len;
g_queue_push_tail(parser->tokens, dict);
if (type == JSON_ERROR) {
goto out_emit_bad;
} else if (parser->brace_count < 0 ||
parser->bracket_count < 0 ||
(parser->brace_count == 0 &&
parser->bracket_count == 0)) {
goto out_emit;
} else if (parser->token_size > MAX_TOKEN_SIZE ||
parser->bracket_count + parser->brace_count > MAX_NESTING) {
/* Security consideration, we limit total memory allocated per object
* and the maximum recursion depth that a message can force.
*/
goto out_emit_bad;
}
return;
out_emit_bad:
/*
* Clear out token list and tell the parser to emit an error
* indication by passing it a NULL list
*/
json_message_free_tokens(parser);
out_emit:
/* send current list of tokens to parser and reset tokenizer */
parser->brace_count = 0;
parser->bracket_count = 0;
/* parser->emit takes ownership of parser->tokens. */
parser->emit(parser, parser->tokens);
parser->tokens = g_queue_new();
parser->token_size = 0;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9629 | static inline void ppc4xx_tlb_invalidate_virt(CPUPPCState *env,
target_ulong eaddr, uint32_t pid)
{
#if !defined(FLUSH_ALL_TLBS)
CPUState *cs = CPU(ppc_env_get_cpu(env));
ppcemb_tlb_t *tlb;
hwaddr raddr;
target_ulong page, end;
int i;
for (i = 0; i < env->nb_tlb; i++) {
tlb = &env->tlb.tlbe[i];
if (ppcemb_tlb_check(env, tlb, &raddr, eaddr, pid, 0, i) == 0) {
end = tlb->EPN + tlb->size;
for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE) {
tlb_flush_page(cs, page);
}
tlb->prot &= ~PAGE_VALID;
break;
}
}
#else
ppc4xx_tlb_invalidate_all(env);
#endif
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9630 | void pci_bridge_initfn(PCIDevice *dev, const char *typename)
{
PCIBus *parent = dev->bus;
PCIBridge *br = PCI_BRIDGE(dev);
PCIBus *sec_bus = &br->sec_bus;
pci_word_test_and_set_mask(dev->config + PCI_STATUS,
PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK);
/*
* TODO: We implement VGA Enable in the Bridge Control Register
* therefore per the PCI to PCI bridge spec we must also implement
* VGA Palette Snooping. When done, set this bit writable:
*
* pci_word_test_and_set_mask(dev->wmask + PCI_COMMAND,
* PCI_COMMAND_VGA_PALETTE);
*/
pci_config_set_class(dev->config, PCI_CLASS_BRIDGE_PCI);
dev->config[PCI_HEADER_TYPE] =
(dev->config[PCI_HEADER_TYPE] & PCI_HEADER_TYPE_MULTI_FUNCTION) |
PCI_HEADER_TYPE_BRIDGE;
pci_set_word(dev->config + PCI_SEC_STATUS,
PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK);
/*
* If we don't specify the name, the bus will be addressed as <id>.0, where
* id is the device id.
* Since PCI Bridge devices have a single bus each, we don't need the index:
* let users address the bus using the device name.
*/
if (!br->bus_name && dev->qdev.id && *dev->qdev.id) {
br->bus_name = dev->qdev.id;
}
qbus_create_inplace(sec_bus, sizeof(br->sec_bus), typename, DEVICE(dev),
br->bus_name);
sec_bus->parent_dev = dev;
sec_bus->map_irq = br->map_irq ? br->map_irq : pci_swizzle_map_irq_fn;
sec_bus->address_space_mem = &br->address_space_mem;
memory_region_init(&br->address_space_mem, OBJECT(br), "pci_bridge_pci", UINT64_MAX);
sec_bus->address_space_io = &br->address_space_io;
memory_region_init(&br->address_space_io, OBJECT(br), "pci_bridge_io", 65536);
br->windows = pci_bridge_region_init(br);
QLIST_INIT(&sec_bus->child);
QLIST_INSERT_HEAD(&parent->child, sec_bus, sibling);
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9639 | void qemu_pixman_linebuf_fill(pixman_image_t *linebuf, pixman_image_t *fb,
int width, int y)
{
pixman_image_composite(PIXMAN_OP_SRC, fb, NULL, linebuf,
0, y, 0, 0, 0, 0, width, 1);
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9643 | static inline abi_long target_to_host_cmsg(struct msghdr *msgh,
struct target_msghdr *target_msgh)
{
struct cmsghdr *cmsg = CMSG_FIRSTHDR(msgh);
abi_long msg_controllen;
abi_ulong target_cmsg_addr;
struct target_cmsghdr *target_cmsg;
socklen_t space = 0;
msg_controllen = tswapal(target_msgh->msg_controllen);
if (msg_controllen < sizeof (struct target_cmsghdr))
goto the_end;
target_cmsg_addr = tswapal(target_msgh->msg_control);
target_cmsg = lock_user(VERIFY_READ, target_cmsg_addr, msg_controllen, 1);
if (!target_cmsg)
return -TARGET_EFAULT;
while (cmsg && target_cmsg) {
void *data = CMSG_DATA(cmsg);
void *target_data = TARGET_CMSG_DATA(target_cmsg);
int len = tswapal(target_cmsg->cmsg_len)
- TARGET_CMSG_ALIGN(sizeof (struct target_cmsghdr));
space += CMSG_SPACE(len);
if (space > msgh->msg_controllen) {
space -= CMSG_SPACE(len);
gemu_log("Host cmsg overflow\n");
break;
}
if (tswap32(target_cmsg->cmsg_level) == TARGET_SOL_SOCKET) {
cmsg->cmsg_level = SOL_SOCKET;
} else {
cmsg->cmsg_level = tswap32(target_cmsg->cmsg_level);
}
cmsg->cmsg_type = tswap32(target_cmsg->cmsg_type);
cmsg->cmsg_len = CMSG_LEN(len);
if (cmsg->cmsg_level != SOL_SOCKET || cmsg->cmsg_type != SCM_RIGHTS) {
gemu_log("Unsupported ancillary data: %d/%d\n", cmsg->cmsg_level, cmsg->cmsg_type);
memcpy(data, target_data, len);
} else {
int *fd = (int *)data;
int *target_fd = (int *)target_data;
int i, numfds = len / sizeof(int);
for (i = 0; i < numfds; i++)
fd[i] = tswap32(target_fd[i]);
}
cmsg = CMSG_NXTHDR(msgh, cmsg);
target_cmsg = TARGET_CMSG_NXTHDR(target_msgh, target_cmsg);
}
unlock_user(target_cmsg, target_cmsg_addr, 0);
the_end:
msgh->msg_controllen = space;
return 0;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9647 | static bool ga_open_pidfile(const char *pidfile)
{
int pidfd;
char pidstr[32];
pidfd = open(pidfile, O_CREAT|O_WRONLY, S_IRUSR|S_IWUSR);
if (pidfd == -1 || lockf(pidfd, F_TLOCK, 0)) {
g_critical("Cannot lock pid file, %s", strerror(errno));
if (pidfd != -1) {
close(pidfd);
}
return false;
}
if (ftruncate(pidfd, 0) || lseek(pidfd, 0, SEEK_SET)) {
g_critical("Failed to truncate pid file");
goto fail;
}
sprintf(pidstr, "%d", getpid());
if (write(pidfd, pidstr, strlen(pidstr)) != strlen(pidstr)) {
g_critical("Failed to write pid file");
goto fail;
}
return true;
fail:
unlink(pidfile);
return false;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9650 | static void handle_pending_signal(CPUArchState *cpu_env, int sig)
{
CPUState *cpu = ENV_GET_CPU(cpu_env);
abi_ulong handler;
sigset_t set, old_set;
target_sigset_t target_old_set;
struct target_sigaction *sa;
struct sigqueue *q;
TaskState *ts = cpu->opaque;
struct emulated_sigtable *k = &ts->sigtab[sig - 1];
trace_user_handle_signal(cpu_env, sig);
/* dequeue signal */
q = k->first;
k->first = q->next;
if (!k->first)
k->pending = 0;
sig = gdb_handlesig(cpu, sig);
if (!sig) {
sa = NULL;
handler = TARGET_SIG_IGN;
} else {
sa = &sigact_table[sig - 1];
handler = sa->_sa_handler;
}
if (ts->sigsegv_blocked && sig == TARGET_SIGSEGV) {
/* Guest has blocked SIGSEGV but we got one anyway. Assume this
* is a forced SIGSEGV (ie one the kernel handles via force_sig_info
* because it got a real MMU fault), and treat as if default handler.
*/
handler = TARGET_SIG_DFL;
}
if (handler == TARGET_SIG_DFL) {
/* default handler : ignore some signal. The other are job control or fatal */
if (sig == TARGET_SIGTSTP || sig == TARGET_SIGTTIN || sig == TARGET_SIGTTOU) {
kill(getpid(),SIGSTOP);
} else if (sig != TARGET_SIGCHLD &&
sig != TARGET_SIGURG &&
sig != TARGET_SIGWINCH &&
sig != TARGET_SIGCONT) {
force_sig(sig);
}
} else if (handler == TARGET_SIG_IGN) {
/* ignore sig */
} else if (handler == TARGET_SIG_ERR) {
force_sig(sig);
} else {
/* compute the blocked signals during the handler execution */
target_to_host_sigset(&set, &sa->sa_mask);
/* SA_NODEFER indicates that the current signal should not be
blocked during the handler */
if (!(sa->sa_flags & TARGET_SA_NODEFER))
sigaddset(&set, target_to_host_signal(sig));
/* block signals in the handler using Linux */
do_sigprocmask(SIG_BLOCK, &set, &old_set);
/* save the previous blocked signal state to restore it at the
end of the signal execution (see do_sigreturn) */
host_to_target_sigset_internal(&target_old_set, &old_set);
/* if the CPU is in VM86 mode, we restore the 32 bit values */
#if defined(TARGET_I386) && !defined(TARGET_X86_64)
{
CPUX86State *env = cpu_env;
if (env->eflags & VM_MASK)
save_v86_state(env);
}
#endif
/* prepare the stack frame of the virtual CPU */
#if defined(TARGET_ABI_MIPSN32) || defined(TARGET_ABI_MIPSN64) \
|| defined(TARGET_OPENRISC) || defined(TARGET_TILEGX)
/* These targets do not have traditional signals. */
setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env);
#else
if (sa->sa_flags & TARGET_SA_SIGINFO)
setup_rt_frame(sig, sa, &q->info, &target_old_set, cpu_env);
else
setup_frame(sig, sa, &target_old_set, cpu_env);
#endif
if (sa->sa_flags & TARGET_SA_RESETHAND) {
sa->_sa_handler = TARGET_SIG_DFL;
}
}
if (q != &k->info)
free_sigqueue(cpu_env, q);
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9651 | static void pcie_pci_bridge_write_config(PCIDevice *d,
uint32_t address, uint32_t val, int len)
{
pci_bridge_write_config(d, address, val, len);
msi_write_config(d, address, val, len);
shpc_cap_write_config(d, address, val, len);
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9670 | static ram_addr_t find_ram_offset(ram_addr_t size)
{
RAMBlock *block, *next_block;
ram_addr_t offset = RAM_ADDR_MAX, mingap = RAM_ADDR_MAX;
assert(size != 0); /* it would hand out same offset multiple times */
if (QTAILQ_EMPTY(&ram_list.blocks))
return 0;
QTAILQ_FOREACH(block, &ram_list.blocks, next) {
ram_addr_t end, next = RAM_ADDR_MAX;
end = block->offset + block->length;
QTAILQ_FOREACH(next_block, &ram_list.blocks, next) {
if (next_block->offset >= end) {
next = MIN(next, next_block->offset);
}
}
if (next - end >= size && next - end < mingap) {
offset = end;
mingap = next - end;
}
}
if (offset == RAM_ADDR_MAX) {
fprintf(stderr, "Failed to find gap of requested size: %" PRIu64 "\n",
(uint64_t)size);
abort();
}
return offset;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9683 | CharDriverState *qemu_chr_alloc(void)
{
CharDriverState *chr = g_malloc0(sizeof(CharDriverState));
qemu_mutex_init(&chr->chr_write_lock);
return chr;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9706 | static int scsi_disk_emulate_command(SCSIDiskReq *r, uint8_t *outbuf)
{
SCSIRequest *req = &r->req;
SCSIDiskState *s = DO_UPCAST(SCSIDiskState, qdev, req->dev);
uint64_t nb_sectors;
int buflen = 0;
int ret;
switch (req->cmd.buf[0]) {
case TEST_UNIT_READY:
if (!bdrv_is_inserted(s->bs))
goto not_ready;
break;
case REQUEST_SENSE:
if (req->cmd.xfer < 4)
goto illegal_request;
buflen = scsi_device_get_sense(&s->qdev, outbuf, req->cmd.xfer,
(req->cmd.buf[1] & 1) == 0);
break;
case INQUIRY:
buflen = scsi_disk_emulate_inquiry(req, outbuf);
if (buflen < 0)
goto illegal_request;
break;
case MODE_SENSE:
case MODE_SENSE_10:
buflen = scsi_disk_emulate_mode_sense(req, outbuf);
if (buflen < 0)
goto illegal_request;
break;
case READ_TOC:
buflen = scsi_disk_emulate_read_toc(req, outbuf);
if (buflen < 0)
goto illegal_request;
break;
case RESERVE:
if (req->cmd.buf[1] & 1)
goto illegal_request;
break;
case RESERVE_10:
if (req->cmd.buf[1] & 3)
goto illegal_request;
break;
case RELEASE:
if (req->cmd.buf[1] & 1)
goto illegal_request;
break;
case RELEASE_10:
if (req->cmd.buf[1] & 3)
goto illegal_request;
break;
case START_STOP:
if (s->qdev.type == TYPE_ROM && (req->cmd.buf[4] & 2)) {
/* load/eject medium */
bdrv_eject(s->bs, !(req->cmd.buf[4] & 1));
}
break;
case ALLOW_MEDIUM_REMOVAL:
bdrv_set_locked(s->bs, req->cmd.buf[4] & 1);
break;
case READ_CAPACITY_10:
/* The normal LEN field for this command is zero. */
memset(outbuf, 0, 8);
bdrv_get_geometry(s->bs, &nb_sectors);
if (!nb_sectors)
goto not_ready;
nb_sectors /= s->cluster_size;
/* Returned value is the address of the last sector. */
nb_sectors--;
/* Remember the new size for read/write sanity checking. */
s->max_lba = nb_sectors;
/* Clip to 2TB, instead of returning capacity modulo 2TB. */
if (nb_sectors > UINT32_MAX)
nb_sectors = UINT32_MAX;
outbuf[0] = (nb_sectors >> 24) & 0xff;
outbuf[1] = (nb_sectors >> 16) & 0xff;
outbuf[2] = (nb_sectors >> 8) & 0xff;
outbuf[3] = nb_sectors & 0xff;
outbuf[4] = 0;
outbuf[5] = 0;
outbuf[6] = s->cluster_size * 2;
outbuf[7] = 0;
buflen = 8;
break;
case SYNCHRONIZE_CACHE:
ret = bdrv_flush(s->bs);
if (ret < 0) {
if (scsi_handle_rw_error(r, -ret, SCSI_REQ_STATUS_RETRY_FLUSH)) {
return -1;
}
}
break;
case GET_CONFIGURATION:
memset(outbuf, 0, 8);
/* ??? This should probably return much more information. For now
just return the basic header indicating the CD-ROM profile. */
outbuf[7] = 8; // CD-ROM
buflen = 8;
break;
case SERVICE_ACTION_IN:
/* Service Action In subcommands. */
if ((req->cmd.buf[1] & 31) == 0x10) {
DPRINTF("SAI READ CAPACITY(16)\n");
memset(outbuf, 0, req->cmd.xfer);
bdrv_get_geometry(s->bs, &nb_sectors);
if (!nb_sectors)
goto not_ready;
nb_sectors /= s->cluster_size;
/* Returned value is the address of the last sector. */
nb_sectors--;
/* Remember the new size for read/write sanity checking. */
s->max_lba = nb_sectors;
outbuf[0] = (nb_sectors >> 56) & 0xff;
outbuf[1] = (nb_sectors >> 48) & 0xff;
outbuf[2] = (nb_sectors >> 40) & 0xff;
outbuf[3] = (nb_sectors >> 32) & 0xff;
outbuf[4] = (nb_sectors >> 24) & 0xff;
outbuf[5] = (nb_sectors >> 16) & 0xff;
outbuf[6] = (nb_sectors >> 8) & 0xff;
outbuf[7] = nb_sectors & 0xff;
outbuf[8] = 0;
outbuf[9] = 0;
outbuf[10] = s->cluster_size * 2;
outbuf[11] = 0;
outbuf[12] = 0;
outbuf[13] = get_physical_block_exp(&s->qdev.conf);
/* set TPE bit if the format supports discard */
if (s->qdev.conf.discard_granularity) {
outbuf[14] = 0x80;
}
/* Protection, exponent and lowest lba field left blank. */
buflen = req->cmd.xfer;
break;
}
DPRINTF("Unsupported Service Action In\n");
goto illegal_request;
case REPORT_LUNS:
if (req->cmd.xfer < 16)
goto illegal_request;
memset(outbuf, 0, 16);
outbuf[3] = 8;
buflen = 16;
break;
case VERIFY_10:
break;
default:
scsi_check_condition(r, SENSE_CODE(INVALID_OPCODE));
return -1;
}
return buflen;
not_ready:
if (!bdrv_is_inserted(s->bs)) {
scsi_check_condition(r, SENSE_CODE(NO_MEDIUM));
} else {
scsi_check_condition(r, SENSE_CODE(LUN_NOT_READY));
}
return -1;
illegal_request:
scsi_check_condition(r, SENSE_CODE(INVALID_FIELD));
return -1;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9707 | static void flush_queued_work(CPUState *cpu)
{
struct qemu_work_item *wi;
if (cpu->queued_work_first == NULL) {
return;
}
while ((wi = cpu->queued_work_first)) {
cpu->queued_work_first = wi->next;
wi->func(wi->data);
wi->done = true;
if (wi->free) {
g_free(wi);
}
}
cpu->queued_work_last = NULL;
qemu_cond_broadcast(&qemu_work_cond);
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9709 | static int validate_guest_space(unsigned long guest_base,
unsigned long guest_size)
{
unsigned long real_start, test_page_addr;
/* We need to check that we can force a fault on access to the
* commpage at 0xffff0fxx
*/
test_page_addr = guest_base + (0xffff0f00 & qemu_host_page_mask);
/* If the commpage lies within the already allocated guest space,
* then there is no way we can allocate it.
*/
if (test_page_addr >= guest_base
&& test_page_addr <= (guest_base + guest_size)) {
return -1;
}
/* Note it needs to be writeable to let us initialise it */
real_start = (unsigned long)
mmap((void *)test_page_addr, qemu_host_page_size,
PROT_READ | PROT_WRITE,
MAP_ANONYMOUS | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
/* If we can't map it then try another address */
if (real_start == -1ul) {
return 0;
}
if (real_start != test_page_addr) {
/* OS didn't put the page where we asked - unmap and reject */
munmap((void *)real_start, qemu_host_page_size);
return 0;
}
/* Leave the page mapped
* Populate it (mmap should have left it all 0'd)
*/
/* Kernel helper versions */
__put_user(5, (uint32_t *)g2h(0xffff0ffcul));
/* Now it's populated make it RO */
if (mprotect((void *)test_page_addr, qemu_host_page_size, PROT_READ)) {
perror("Protecting guest commpage");
exit(-1);
}
return 1; /* All good */
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9721 | long do_sigreturn(CPUCRISState *env)
{
struct target_signal_frame *frame;
abi_ulong frame_addr;
target_sigset_t target_set;
sigset_t set;
int i;
frame_addr = env->regs[R_SP];
/* Make sure the guest isn't playing games. */
if (!lock_user_struct(VERIFY_WRITE, frame, frame_addr, 1))
goto badframe;
/* Restore blocked signals */
if (__get_user(target_set.sig[0], &frame->sc.oldmask))
goto badframe;
for(i = 1; i < TARGET_NSIG_WORDS; i++) {
if (__get_user(target_set.sig[i], &frame->extramask[i - 1]))
goto badframe;
}
target_to_host_sigset_internal(&set, &target_set);
do_sigprocmask(SIG_SETMASK, &set, NULL);
restore_sigcontext(&frame->sc, env);
unlock_user_struct(frame, frame_addr, 0);
return env->regs[10];
badframe:
unlock_user_struct(frame, frame_addr, 0);
force_sig(TARGET_SIGSEGV);
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9722 | static void usbredir_interrupt_packet(void *priv, uint32_t id,
struct usb_redir_interrupt_packet_header *interrupt_packet,
uint8_t *data, int data_len)
{
USBRedirDevice *dev = priv;
uint8_t ep = interrupt_packet->endpoint;
DPRINTF("interrupt-in status %d ep %02X len %d id %u\n",
interrupt_packet->status, ep, data_len, id);
if (dev->endpoint[EP2I(ep)].type != USB_ENDPOINT_XFER_INT) {
ERROR("received int packet for non interrupt endpoint %02X\n", ep);
free(data);
return;
}
if (ep & USB_DIR_IN) {
if (dev->endpoint[EP2I(ep)].interrupt_started == 0) {
DPRINTF("received int packet while not started ep %02X\n", ep);
free(data);
return;
}
/* bufp_alloc also adds the packet to the ep queue */
bufp_alloc(dev, data, data_len, interrupt_packet->status, ep);
} else {
int len = interrupt_packet->length;
AsyncURB *aurb = async_find(dev, id);
if (!aurb) {
return;
}
if (aurb->interrupt_packet.endpoint != interrupt_packet->endpoint) {
ERROR("return int packet mismatch, please report this!\n");
len = USB_RET_NAK;
}
if (aurb->packet) {
aurb->packet->len = usbredir_handle_status(dev,
interrupt_packet->status, len);
usb_packet_complete(&dev->dev, aurb->packet);
}
async_free(dev, aurb);
}
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9727 | void mpeg1_encode_mb(MpegEncContext *s,
DCTELEM block[6][64],
int motion_x, int motion_y)
{
int i, cbp;
const int mb_x = s->mb_x;
const int mb_y = s->mb_y;
const int first_mb= mb_x == s->resync_mb_x && mb_y == s->resync_mb_y;
/* compute cbp */
cbp = 0;
for(i=0;i<6;i++) {
if (s->block_last_index[i] >= 0)
cbp |= 1 << (5 - i);
}
if (cbp == 0 && !first_mb && (mb_x != s->mb_width - 1 || (mb_y != s->mb_height - 1 && s->codec_id == CODEC_ID_MPEG1VIDEO)) &&
((s->pict_type == P_TYPE && s->mv_type == MV_TYPE_16X16 && (motion_x | motion_y) == 0) ||
(s->pict_type == B_TYPE && s->mv_dir == s->last_mv_dir && (((s->mv_dir & MV_DIR_FORWARD) ? ((s->mv[0][0][0] - s->last_mv[0][0][0])|(s->mv[0][0][1] - s->last_mv[0][0][1])) : 0) |
((s->mv_dir & MV_DIR_BACKWARD) ? ((s->mv[1][0][0] - s->last_mv[1][0][0])|(s->mv[1][0][1] - s->last_mv[1][0][1])) : 0)) == 0))) {
s->mb_skip_run++;
s->qscale -= s->dquant;
s->skip_count++;
s->misc_bits++;
s->last_bits++;
if(s->pict_type == P_TYPE){
s->last_mv[0][1][0]= s->last_mv[0][0][0]=
s->last_mv[0][1][1]= s->last_mv[0][0][1]= 0;
}
} else {
if(first_mb){
assert(s->mb_skip_run == 0);
encode_mb_skip_run(s, s->mb_x);
}else{
encode_mb_skip_run(s, s->mb_skip_run);
}
if (s->pict_type == I_TYPE) {
if(s->dquant && cbp){
put_mb_modes(s, 2, 1, 0, 0); /* macroblock_type : macroblock_quant = 1 */
put_bits(&s->pb, 5, s->qscale);
}else{
put_mb_modes(s, 1, 1, 0, 0); /* macroblock_type : macroblock_quant = 0 */
s->qscale -= s->dquant;
}
s->misc_bits+= get_bits_diff(s);
s->i_count++;
} else if (s->mb_intra) {
if(s->dquant && cbp){
put_mb_modes(s, 6, 0x01, 0, 0);
put_bits(&s->pb, 5, s->qscale);
}else{
put_mb_modes(s, 5, 0x03, 0, 0);
s->qscale -= s->dquant;
}
s->misc_bits+= get_bits_diff(s);
s->i_count++;
memset(s->last_mv, 0, sizeof(s->last_mv));
} else if (s->pict_type == P_TYPE) {
if(s->mv_type == MV_TYPE_16X16){
if (cbp != 0) {
if ((motion_x|motion_y) == 0) {
if(s->dquant){
put_mb_modes(s, 5, 1, 0, 0); /* macroblock_pattern & quant */
put_bits(&s->pb, 5, s->qscale);
}else{
put_mb_modes(s, 2, 1, 0, 0); /* macroblock_pattern only */
}
s->misc_bits+= get_bits_diff(s);
} else {
if(s->dquant){
put_mb_modes(s, 5, 2, 1, 0); /* motion + cbp */
put_bits(&s->pb, 5, s->qscale);
}else{
put_mb_modes(s, 1, 1, 1, 0); /* motion + cbp */
}
s->misc_bits+= get_bits_diff(s);
mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added
mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added
s->mv_bits+= get_bits_diff(s);
}
} else {
put_bits(&s->pb, 3, 1); /* motion only */
if (!s->frame_pred_frame_dct)
put_bits(&s->pb, 2, 2); /* motion_type: frame */
s->misc_bits+= get_bits_diff(s);
mpeg1_encode_motion(s, motion_x - s->last_mv[0][0][0], s->f_code); // RAL: f_code parameter added
mpeg1_encode_motion(s, motion_y - s->last_mv[0][0][1], s->f_code); // RAL: f_code parameter added
s->qscale -= s->dquant;
s->mv_bits+= get_bits_diff(s);
}
s->last_mv[0][1][0]= s->last_mv[0][0][0]= motion_x;
s->last_mv[0][1][1]= s->last_mv[0][0][1]= motion_y;
}else{
assert(!s->frame_pred_frame_dct && s->mv_type == MV_TYPE_FIELD);
if (cbp) {
if(s->dquant){
put_mb_modes(s, 5, 2, 1, 1); /* motion + cbp */
put_bits(&s->pb, 5, s->qscale);
}else{
put_mb_modes(s, 1, 1, 1, 1); /* motion + cbp */
}
} else {
put_bits(&s->pb, 3, 1); /* motion only */
put_bits(&s->pb, 2, 1); /* motion_type: field */
s->qscale -= s->dquant;
}
s->misc_bits+= get_bits_diff(s);
for(i=0; i<2; i++){
put_bits(&s->pb, 1, s->field_select[0][i]);
mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code);
mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code);
s->last_mv[0][i][0]= s->mv[0][i][0];
s->last_mv[0][i][1]= 2*s->mv[0][i][1];
}
s->mv_bits+= get_bits_diff(s);
}
if(cbp)
put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]);
s->f_count++;
} else{
static const int mb_type_len[4]={0,3,4,2}; //bak,for,bi
if(s->mv_type == MV_TYPE_16X16){
if (cbp){ // With coded bloc pattern
if (s->dquant) {
if(s->mv_dir == MV_DIR_FORWARD)
put_mb_modes(s, 6, 3, 1, 0);
else
put_mb_modes(s, mb_type_len[s->mv_dir]+3, 2, 1, 0);
put_bits(&s->pb, 5, s->qscale);
} else {
put_mb_modes(s, mb_type_len[s->mv_dir], 3, 1, 0);
}
}else{ // No coded bloc pattern
put_bits(&s->pb, mb_type_len[s->mv_dir], 2);
if (!s->frame_pred_frame_dct)
put_bits(&s->pb, 2, 2); /* motion_type: frame */
s->qscale -= s->dquant;
}
s->misc_bits += get_bits_diff(s);
if (s->mv_dir&MV_DIR_FORWARD){
mpeg1_encode_motion(s, s->mv[0][0][0] - s->last_mv[0][0][0], s->f_code);
mpeg1_encode_motion(s, s->mv[0][0][1] - s->last_mv[0][0][1], s->f_code);
s->last_mv[0][0][0]=s->last_mv[0][1][0]= s->mv[0][0][0];
s->last_mv[0][0][1]=s->last_mv[0][1][1]= s->mv[0][0][1];
s->f_count++;
}
if (s->mv_dir&MV_DIR_BACKWARD){
mpeg1_encode_motion(s, s->mv[1][0][0] - s->last_mv[1][0][0], s->b_code);
mpeg1_encode_motion(s, s->mv[1][0][1] - s->last_mv[1][0][1], s->b_code);
s->last_mv[1][0][0]=s->last_mv[1][1][0]= s->mv[1][0][0];
s->last_mv[1][0][1]=s->last_mv[1][1][1]= s->mv[1][0][1];
s->b_count++;
}
}else{
assert(s->mv_type == MV_TYPE_FIELD);
assert(!s->frame_pred_frame_dct);
if (cbp){ // With coded bloc pattern
if (s->dquant) {
if(s->mv_dir == MV_DIR_FORWARD)
put_mb_modes(s, 6, 3, 1, 1);
else
put_mb_modes(s, mb_type_len[s->mv_dir]+3, 2, 1, 1);
put_bits(&s->pb, 5, s->qscale);
} else {
put_mb_modes(s, mb_type_len[s->mv_dir], 3, 1, 1);
}
}else{ // No coded bloc pattern
put_bits(&s->pb, mb_type_len[s->mv_dir], 2);
put_bits(&s->pb, 2, 1); /* motion_type: field */
s->qscale -= s->dquant;
}
s->misc_bits += get_bits_diff(s);
if (s->mv_dir&MV_DIR_FORWARD){
for(i=0; i<2; i++){
put_bits(&s->pb, 1, s->field_select[0][i]);
mpeg1_encode_motion(s, s->mv[0][i][0] - s->last_mv[0][i][0] , s->f_code);
mpeg1_encode_motion(s, s->mv[0][i][1] - (s->last_mv[0][i][1]>>1), s->f_code);
s->last_mv[0][i][0]= s->mv[0][i][0];
s->last_mv[0][i][1]= 2*s->mv[0][i][1];
}
s->f_count++;
}
if (s->mv_dir&MV_DIR_BACKWARD){
for(i=0; i<2; i++){
put_bits(&s->pb, 1, s->field_select[1][i]);
mpeg1_encode_motion(s, s->mv[1][i][0] - s->last_mv[1][i][0] , s->b_code);
mpeg1_encode_motion(s, s->mv[1][i][1] - (s->last_mv[1][i][1]>>1), s->b_code);
s->last_mv[1][i][0]= s->mv[1][i][0];
s->last_mv[1][i][1]= 2*s->mv[1][i][1];
}
s->b_count++;
}
}
s->mv_bits += get_bits_diff(s);
if(cbp)
put_bits(&s->pb, mbPatTable[cbp - 1][1], mbPatTable[cbp - 1][0]);
}
for(i=0;i<6;i++) {
if (cbp & (1 << (5 - i))) {
mpeg1_encode_block(s, block[i], i);
}
}
s->mb_skip_run = 0;
if(s->mb_intra)
s->i_tex_bits+= get_bits_diff(s);
else
s->p_tex_bits+= get_bits_diff(s);
}
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9744 | static void virtio_ccw_device_realize(VirtioCcwDevice *dev, Error **errp)
{
unsigned int cssid = 0;
unsigned int ssid = 0;
unsigned int schid;
unsigned int devno;
bool have_devno = false;
bool found = false;
SubchDev *sch;
int num;
DeviceState *parent = DEVICE(dev);
Error *err = NULL;
VirtIOCCWDeviceClass *k = VIRTIO_CCW_DEVICE_GET_CLASS(dev);
VirtIODevice *vdev;
sch = g_malloc0(sizeof(SubchDev));
sch->driver_data = dev;
dev->sch = sch;
dev->indicators = NULL;
/* Initialize subchannel structure. */
sch->channel_prog = 0x0;
sch->last_cmd_valid = false;
sch->thinint_active = false;
/*
* Use a device number if provided. Otherwise, fall back to subchannel
* number.
*/
if (dev->bus_id) {
num = sscanf(dev->bus_id, "%x.%x.%04x", &cssid, &ssid, &devno);
if (num == 3) {
if ((cssid > MAX_CSSID) || (ssid > MAX_SSID)) {
error_setg(errp, "Invalid cssid or ssid: cssid %x, ssid %x",
cssid, ssid);
goto out_err;
}
/* Enforce use of virtual cssid. */
if (cssid != VIRTUAL_CSSID) {
error_setg(errp, "cssid %x not valid for virtio devices",
cssid);
goto out_err;
}
if (css_devno_used(cssid, ssid, devno)) {
error_setg(errp, "Device %x.%x.%04x already exists",
cssid, ssid, devno);
goto out_err;
}
sch->cssid = cssid;
sch->ssid = ssid;
sch->devno = devno;
have_devno = true;
} else {
error_setg(errp, "Malformed devno parameter '%s'", dev->bus_id);
goto out_err;
}
}
/* Find the next free id. */
if (have_devno) {
for (schid = 0; schid <= MAX_SCHID; schid++) {
if (!css_find_subch(1, cssid, ssid, schid)) {
sch->schid = schid;
css_subch_assign(cssid, ssid, schid, devno, sch);
found = true;
break;
}
}
if (!found) {
error_setg(errp, "No free subchannel found for %x.%x.%04x",
cssid, ssid, devno);
goto out_err;
}
trace_virtio_ccw_new_device(cssid, ssid, schid, devno,
"user-configured");
} else {
cssid = VIRTUAL_CSSID;
for (ssid = 0; ssid <= MAX_SSID; ssid++) {
for (schid = 0; schid <= MAX_SCHID; schid++) {
if (!css_find_subch(1, cssid, ssid, schid)) {
sch->cssid = cssid;
sch->ssid = ssid;
sch->schid = schid;
devno = schid;
/*
* If the devno is already taken, look further in this
* subchannel set.
*/
while (css_devno_used(cssid, ssid, devno)) {
if (devno == MAX_SCHID) {
devno = 0;
} else if (devno == schid - 1) {
error_setg(errp, "No free devno found");
goto out_err;
} else {
devno++;
}
}
sch->devno = devno;
css_subch_assign(cssid, ssid, schid, devno, sch);
found = true;
break;
}
}
if (found) {
break;
}
}
if (!found) {
error_setg(errp, "Virtual channel subsystem is full!");
goto out_err;
}
trace_virtio_ccw_new_device(cssid, ssid, schid, devno,
"auto-configured");
}
/* Build initial schib. */
css_sch_build_virtual_schib(sch, 0, VIRTIO_CCW_CHPID_TYPE);
sch->ccw_cb = virtio_ccw_cb;
/* Build senseid data. */
memset(&sch->id, 0, sizeof(SenseId));
sch->id.reserved = 0xff;
sch->id.cu_type = VIRTIO_CCW_CU_TYPE;
if (k->realize) {
k->realize(dev, &err);
}
if (err) {
error_propagate(errp, err);
css_subch_assign(cssid, ssid, schid, devno, NULL);
goto out_err;
}
/* device_id is only set after vdev has been realized */
vdev = virtio_ccw_get_vdev(sch);
sch->id.cu_model = vdev->device_id;
/* Only the first 32 feature bits are used. */
dev->host_features[0] = virtio_bus_get_vdev_features(&dev->bus,
dev->host_features[0]);
virtio_add_feature(&dev->host_features[0], VIRTIO_F_NOTIFY_ON_EMPTY);
virtio_add_feature(&dev->host_features[0], VIRTIO_F_BAD_FEATURE);
css_generate_sch_crws(sch->cssid, sch->ssid, sch->schid,
parent->hotplugged, 1);
return;
out_err:
dev->sch = NULL;
g_free(sch);
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9754 | void op_cp1_registers(void)
{
if (!(env->CP0_Status & (1 << CP0St_FR)) && (PARAM1 & 1)) {
CALL_FROM_TB1(do_raise_exception, EXCP_RI);
}
RETURN();
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9761 | static inline void RENAME(rgb24tobgr24)(const uint8_t *src, uint8_t *dst, long src_size)
{
unsigned i;
#ifdef HAVE_MMX
long mmx_size= 23 - src_size;
asm volatile (
"test %%"REG_a", %%"REG_a" \n\t"
"jns 2f \n\t"
"movq "MANGLE(mask24r)", %%mm5 \n\t"
"movq "MANGLE(mask24g)", %%mm6 \n\t"
"movq "MANGLE(mask24b)", %%mm7 \n\t"
ASMALIGN(4)
"1: \n\t"
PREFETCH" 32(%1, %%"REG_a") \n\t"
"movq (%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
"movq (%1, %%"REG_a"), %%mm1 \n\t" // BGR BGR BG
"movq 2(%1, %%"REG_a"), %%mm2 \n\t" // R BGR BGR B
"psllq $16, %%mm0 \n\t" // 00 BGR BGR
"pand %%mm5, %%mm0 \n\t"
"pand %%mm6, %%mm1 \n\t"
"pand %%mm7, %%mm2 \n\t"
"por %%mm0, %%mm1 \n\t"
"por %%mm2, %%mm1 \n\t"
"movq 6(%1, %%"REG_a"), %%mm0 \n\t" // BGR BGR BG
MOVNTQ" %%mm1, (%2, %%"REG_a")\n\t" // RGB RGB RG
"movq 8(%1, %%"REG_a"), %%mm1 \n\t" // R BGR BGR B
"movq 10(%1, %%"REG_a"), %%mm2 \n\t" // GR BGR BGR
"pand %%mm7, %%mm0 \n\t"
"pand %%mm5, %%mm1 \n\t"
"pand %%mm6, %%mm2 \n\t"
"por %%mm0, %%mm1 \n\t"
"por %%mm2, %%mm1 \n\t"
"movq 14(%1, %%"REG_a"), %%mm0 \n\t" // R BGR BGR B
MOVNTQ" %%mm1, 8(%2, %%"REG_a")\n\t" // B RGB RGB R
"movq 16(%1, %%"REG_a"), %%mm1 \n\t" // GR BGR BGR
"movq 18(%1, %%"REG_a"), %%mm2 \n\t" // BGR BGR BG
"pand %%mm6, %%mm0 \n\t"
"pand %%mm7, %%mm1 \n\t"
"pand %%mm5, %%mm2 \n\t"
"por %%mm0, %%mm1 \n\t"
"por %%mm2, %%mm1 \n\t"
MOVNTQ" %%mm1, 16(%2, %%"REG_a")\n\t"
"add $24, %%"REG_a" \n\t"
" js 1b \n\t"
"2: \n\t"
: "+a" (mmx_size)
: "r" (src-mmx_size), "r"(dst-mmx_size)
);
__asm __volatile(SFENCE:::"memory");
__asm __volatile(EMMS:::"memory");
if(mmx_size==23) return; //finihsed, was multiple of 8
src+= src_size;
dst+= src_size;
src_size= 23-mmx_size;
src-= src_size;
dst-= src_size;
#endif
for(i=0; i<src_size; i+=3)
{
register uint8_t x;
x = src[i + 2];
dst[i + 1] = src[i + 1];
dst[i + 2] = src[i + 0];
dst[i + 0] = x;
}
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9772 | static int vda_h264_end_frame(AVCodecContext *avctx)
{
H264Context *h = avctx->priv_data;
struct vda_context *vda_ctx = avctx->hwaccel_context;
AVFrame *frame = &h->cur_pic_ptr->f;
struct vda_buffer *context;
AVBufferRef *buffer;
int status;
if (!vda_ctx->decoder || !vda_ctx->priv_bitstream)
status = vda_sync_decode(vda_ctx);
frame->data[3] = (void*)vda_ctx->cv_buffer;
if (status)
av_log(avctx, AV_LOG_ERROR, "Failed to decode frame (%d)\n", status);
The vulnerability label is: Vulnerable |
devign_test_set_data_9781 | static int decode_frame(AVCodecContext *avctx, void *data,
int *got_frame, AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
H264Context *h = avctx->priv_data;
AVFrame *pict = data;
int buf_index = 0;
Picture *out;
int i, out_idx;
int ret;
h->flags = avctx->flags;
/* end of stream, output what is still in the buffers */
if (buf_size == 0) {
out:
h->cur_pic_ptr = NULL;
h->first_field = 0;
// FIXME factorize this with the output code below
out = h->delayed_pic[0];
out_idx = 0;
for (i = 1;
h->delayed_pic[i] &&
!h->delayed_pic[i]->f.key_frame &&
!h->delayed_pic[i]->mmco_reset;
i++)
if (h->delayed_pic[i]->poc < out->poc) {
out = h->delayed_pic[i];
out_idx = i;
}
for (i = out_idx; h->delayed_pic[i]; i++)
h->delayed_pic[i] = h->delayed_pic[i + 1];
if (out) {
out->reference &= ~DELAYED_PIC_REF;
ret = output_frame(h, pict, &out->f);
if (ret < 0)
return ret;
*got_frame = 1;
}
return buf_index;
}
if(h->is_avc && buf_size >= 9 && buf[0]==1 && buf[2]==0 && (buf[4]&0xFC)==0xFC && (buf[5]&0x1F) && buf[8]==0x67){
int cnt= buf[5]&0x1f;
const uint8_t *p= buf+6;
while(cnt--){
int nalsize= AV_RB16(p) + 2;
if(nalsize > buf_size - (p-buf) || p[2]!=0x67)
goto not_extra;
p += nalsize;
}
cnt = *(p++);
if(!cnt)
goto not_extra;
while(cnt--){
int nalsize= AV_RB16(p) + 2;
if(nalsize > buf_size - (p-buf) || p[2]!=0x68)
goto not_extra;
p += nalsize;
}
return ff_h264_decode_extradata(h, buf, buf_size);
}
not_extra:
buf_index = decode_nal_units(h, buf, buf_size, 0);
if (buf_index < 0)
return -1;
if (!h->cur_pic_ptr && h->nal_unit_type == NAL_END_SEQUENCE) {
av_assert0(buf_index <= buf_size);
goto out;
}
if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) && !h->cur_pic_ptr) {
if (avctx->skip_frame >= AVDISCARD_NONREF ||
buf_size >= 4 && !memcmp("Q264", buf, 4))
return buf_size;
av_log(avctx, AV_LOG_ERROR, "no frame!\n");
return -1;
}
if (!(avctx->flags2 & CODEC_FLAG2_CHUNKS) ||
(h->mb_y >= h->mb_height && h->mb_height)) {
if (avctx->flags2 & CODEC_FLAG2_CHUNKS)
decode_postinit(h, 1);
field_end(h, 0);
/* Wait for second field. */
*got_frame = 0;
if (h->next_output_pic && (h->next_output_pic->sync || h->sync>1)) {
ret = output_frame(h, pict, &h->next_output_pic->f);
if (ret < 0)
return ret;
*got_frame = 1;
if (CONFIG_MPEGVIDEO) {
ff_print_debug_info2(h->avctx, h->next_output_pic, pict, h->er.mbskip_table,
&h->low_delay,
h->mb_width, h->mb_height, h->mb_stride, 1);
}
}
}
assert(pict->data[0] || !*got_frame);
return get_consumed_bytes(buf_index, buf_size);
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9787 | GIOStatus ga_channel_write_all(GAChannel *c, const char *buf, size_t size)
{
GIOStatus status = G_IO_STATUS_NORMAL;
size_t count;
while (size) {
status = ga_channel_write(c, buf, size, &count);
if (status == G_IO_STATUS_NORMAL) {
size -= count;
buf += count;
} else if (status != G_IO_STATUS_AGAIN) {
break;
}
}
return status;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9789 | static int channelmap_query_formats(AVFilterContext *ctx)
{
ChannelMapContext *s = ctx->priv;
ff_set_common_formats(ctx, ff_planar_sample_fmts());
ff_set_common_samplerates(ctx, ff_all_samplerates());
ff_channel_layouts_ref(ff_all_channel_layouts(), &ctx->inputs[0]->out_channel_layouts);
ff_channel_layouts_ref(s->channel_layouts, &ctx->outputs[0]->in_channel_layouts);
return 0;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9805 | static void flush_queued_work(CPUState *env)
{
struct qemu_work_item *wi;
if (!env->queued_work_first)
return;
while ((wi = env->queued_work_first)) {
env->queued_work_first = wi->next;
wi->func(wi->data);
wi->done = true;
}
env->queued_work_last = NULL;
qemu_cond_broadcast(&qemu_work_cond);
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9817 | static int coroutine_fn add_aio_request(BDRVSheepdogState *s, AIOReq *aio_req,
struct iovec *iov, int niov, bool create,
enum AIOCBState aiocb_type)
{
int nr_copies = s->inode.nr_copies;
SheepdogObjReq hdr;
unsigned int wlen;
int ret;
uint64_t oid = aio_req->oid;
unsigned int datalen = aio_req->data_len;
uint64_t offset = aio_req->offset;
uint8_t flags = aio_req->flags;
uint64_t old_oid = aio_req->base_oid;
if (!nr_copies) {
error_report("bug");
}
memset(&hdr, 0, sizeof(hdr));
if (aiocb_type == AIOCB_READ_UDATA) {
wlen = 0;
hdr.opcode = SD_OP_READ_OBJ;
hdr.flags = flags;
} else if (create) {
wlen = datalen;
hdr.opcode = SD_OP_CREATE_AND_WRITE_OBJ;
hdr.flags = SD_FLAG_CMD_WRITE | flags;
} else {
wlen = datalen;
hdr.opcode = SD_OP_WRITE_OBJ;
hdr.flags = SD_FLAG_CMD_WRITE | flags;
}
if (s->cache_flags) {
hdr.flags |= s->cache_flags;
}
hdr.oid = oid;
hdr.cow_oid = old_oid;
hdr.copies = s->inode.nr_copies;
hdr.data_length = datalen;
hdr.offset = offset;
hdr.id = aio_req->id;
qemu_co_mutex_lock(&s->lock);
s->co_send = qemu_coroutine_self();
qemu_aio_set_fd_handler(s->fd, co_read_response, co_write_request,
aio_flush_request, s);
socket_set_cork(s->fd, 1);
/* send a header */
ret = qemu_co_send(s->fd, &hdr, sizeof(hdr));
if (ret < 0) {
qemu_co_mutex_unlock(&s->lock);
error_report("failed to send a req, %s", strerror(errno));
return -errno;
}
if (wlen) {
ret = qemu_co_sendv(s->fd, iov, niov, aio_req->iov_offset, wlen);
if (ret < 0) {
qemu_co_mutex_unlock(&s->lock);
error_report("failed to send a data, %s", strerror(errno));
return -errno;
}
}
socket_set_cork(s->fd, 0);
qemu_aio_set_fd_handler(s->fd, co_read_response, NULL,
aio_flush_request, s);
qemu_co_mutex_unlock(&s->lock);
return 0;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9842 | static int img_write_packet(AVFormatContext *s, int stream_index,
UINT8 *buf, int size)
{
VideoData *img = s->priv_data;
AVStream *st = s->streams[stream_index];
ByteIOContext pb1, *pb;
AVPicture picture;
int width, height, ret, size1;
char filename[1024];
width = st->codec.width;
height = st->codec.height;
switch(st->codec.pix_fmt) {
case PIX_FMT_YUV420P:
size1 = (width * height * 3) / 2;
if (size != size1)
return -EIO;
picture.data[0] = buf;
picture.data[1] = picture.data[0] + width * height;
picture.data[2] = picture.data[1] + (width * height) / 4;
picture.linesize[0] = width;
picture.linesize[1] = width >> 1;
picture.linesize[2] = width >> 1;
break;
case PIX_FMT_RGB24:
size1 = (width * height * 3);
if (size != size1)
return -EIO;
picture.data[0] = buf;
picture.linesize[0] = width * 3;
break;
default:
return -EIO;
}
if (get_frame_filename(filename, sizeof(filename),
img->path, img->img_number) < 0)
return -EIO;
if (!img->is_pipe) {
pb = &pb1;
if (url_fopen(pb, filename, URL_WRONLY) < 0)
return -EIO;
} else {
pb = &s->pb;
}
switch(img->img_fmt) {
case IMGFMT_PGMYUV:
ret = pgm_save(&picture, width, height, pb, 1);
break;
case IMGFMT_PGM:
ret = pgm_save(&picture, width, height, pb, 0);
break;
case IMGFMT_YUV:
ret = yuv_save(&picture, width, height, filename);
break;
case IMGFMT_PPM:
ret = ppm_save(&picture, width, height, pb);
break;
}
if (!img->is_pipe) {
url_fclose(pb);
}
img->img_number++;
return 0;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9845 | int pci_drive_hot_add(Monitor *mon, const QDict *qdict, DriveInfo *dinfo)
{
/* On non-x86 we don't do PCI hotplug */
monitor_printf(mon, "Can't hot-add drive to type %d\n", dinfo->type);
return -1;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9846 | static inline int yv12toyuy2_unscaled_altivec(SwsContext *c, uint8_t* src[], int srcStride[], int srcSliceY,
int srcSliceH, uint8_t* dstParam[], int dstStride_a[]) {
uint8_t *dst=dstParam[0] + dstStride_a[0]*srcSliceY;
// yv12toyuy2( src[0],src[1],src[2],dst,c->srcW,srcSliceH,srcStride[0],srcStride[1],dstStride[0] );
uint8_t *ysrc = src[0];
uint8_t *usrc = src[1];
uint8_t *vsrc = src[2];
const int width = c->srcW;
const int height = srcSliceH;
const int lumStride = srcStride[0];
const int chromStride = srcStride[1];
const int dstStride = dstStride_a[0];
const vector unsigned char yperm = vec_lvsl(0, ysrc);
const int vertLumPerChroma = 2;
register unsigned int y;
if(width&15){
yv12toyuy2( ysrc, usrc, vsrc, dst,c->srcW,srcSliceH, lumStride, chromStride, dstStride);
return srcSliceH;
}
/* this code assume:
1) dst is 16 bytes-aligned
2) dstStride is a multiple of 16
3) width is a multiple of 16
4) lum&chrom stride are multiple of 8
*/
for(y=0; y<height; y++)
{
int i;
for (i = 0; i < width - 31; i+= 32) {
const unsigned int j = i >> 1;
vector unsigned char v_yA = vec_ld(i, ysrc);
vector unsigned char v_yB = vec_ld(i + 16, ysrc);
vector unsigned char v_yC = vec_ld(i + 32, ysrc);
vector unsigned char v_y1 = vec_perm(v_yA, v_yB, yperm);
vector unsigned char v_y2 = vec_perm(v_yB, v_yC, yperm);
vector unsigned char v_uA = vec_ld(j, usrc);
vector unsigned char v_uB = vec_ld(j + 16, usrc);
vector unsigned char v_u = vec_perm(v_uA, v_uB, vec_lvsl(j, usrc));
vector unsigned char v_vA = vec_ld(j, vsrc);
vector unsigned char v_vB = vec_ld(j + 16, vsrc);
vector unsigned char v_v = vec_perm(v_vA, v_vB, vec_lvsl(j, vsrc));
vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
vector unsigned char v_uv_b = vec_mergel(v_u, v_v);
vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
vector unsigned char v_yuy2_2 = vec_mergeh(v_y2, v_uv_b);
vector unsigned char v_yuy2_3 = vec_mergel(v_y2, v_uv_b);
vec_st(v_yuy2_0, (i << 1), dst);
vec_st(v_yuy2_1, (i << 1) + 16, dst);
vec_st(v_yuy2_2, (i << 1) + 32, dst);
vec_st(v_yuy2_3, (i << 1) + 48, dst);
}
if (i < width) {
const unsigned int j = i >> 1;
vector unsigned char v_y1 = vec_ld(i, ysrc);
vector unsigned char v_u = vec_ld(j, usrc);
vector unsigned char v_v = vec_ld(j, vsrc);
vector unsigned char v_uv_a = vec_mergeh(v_u, v_v);
vector unsigned char v_yuy2_0 = vec_mergeh(v_y1, v_uv_a);
vector unsigned char v_yuy2_1 = vec_mergel(v_y1, v_uv_a);
vec_st(v_yuy2_0, (i << 1), dst);
vec_st(v_yuy2_1, (i << 1) + 16, dst);
}
if((y&(vertLumPerChroma-1))==(vertLumPerChroma-1) )
{
usrc += chromStride;
vsrc += chromStride;
}
ysrc += lumStride;
dst += dstStride;
}
return srcSliceH;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9864 | void cpu_loop(CPUAlphaState *env)
{
CPUState *cs = CPU(alpha_env_get_cpu(env));
int trapnr;
target_siginfo_t info;
abi_long sysret;
while (1) {
cpu_exec_start(cs);
trapnr = cpu_alpha_exec(cs);
cpu_exec_end(cs);
/* All of the traps imply a transition through PALcode, which
implies an REI instruction has been executed. Which means
that the intr_flag should be cleared. */
env->intr_flag = 0;
switch (trapnr) {
case EXCP_RESET:
fprintf(stderr, "Reset requested. Exit\n");
exit(EXIT_FAILURE);
break;
case EXCP_MCHK:
fprintf(stderr, "Machine check exception. Exit\n");
exit(EXIT_FAILURE);
break;
case EXCP_SMP_INTERRUPT:
case EXCP_CLK_INTERRUPT:
case EXCP_DEV_INTERRUPT:
fprintf(stderr, "External interrupt. Exit\n");
exit(EXIT_FAILURE);
break;
case EXCP_MMFAULT:
env->lock_addr = -1;
info.si_signo = TARGET_SIGSEGV;
info.si_errno = 0;
info.si_code = (page_get_flags(env->trap_arg0) & PAGE_VALID
? TARGET_SEGV_ACCERR : TARGET_SEGV_MAPERR);
info._sifields._sigfault._addr = env->trap_arg0;
queue_signal(env, info.si_signo, &info);
break;
case EXCP_UNALIGN:
env->lock_addr = -1;
info.si_signo = TARGET_SIGBUS;
info.si_errno = 0;
info.si_code = TARGET_BUS_ADRALN;
info._sifields._sigfault._addr = env->trap_arg0;
queue_signal(env, info.si_signo, &info);
break;
case EXCP_OPCDEC:
do_sigill:
env->lock_addr = -1;
info.si_signo = TARGET_SIGILL;
info.si_errno = 0;
info.si_code = TARGET_ILL_ILLOPC;
info._sifields._sigfault._addr = env->pc;
queue_signal(env, info.si_signo, &info);
break;
case EXCP_ARITH:
env->lock_addr = -1;
info.si_signo = TARGET_SIGFPE;
info.si_errno = 0;
info.si_code = TARGET_FPE_FLTINV;
info._sifields._sigfault._addr = env->pc;
queue_signal(env, info.si_signo, &info);
break;
case EXCP_FEN:
/* No-op. Linux simply re-enables the FPU. */
break;
case EXCP_CALL_PAL:
env->lock_addr = -1;
switch (env->error_code) {
case 0x80:
/* BPT */
info.si_signo = TARGET_SIGTRAP;
info.si_errno = 0;
info.si_code = TARGET_TRAP_BRKPT;
info._sifields._sigfault._addr = env->pc;
queue_signal(env, info.si_signo, &info);
break;
case 0x81:
/* BUGCHK */
info.si_signo = TARGET_SIGTRAP;
info.si_errno = 0;
info.si_code = 0;
info._sifields._sigfault._addr = env->pc;
queue_signal(env, info.si_signo, &info);
break;
case 0x83:
/* CALLSYS */
trapnr = env->ir[IR_V0];
sysret = do_syscall(env, trapnr,
env->ir[IR_A0], env->ir[IR_A1],
env->ir[IR_A2], env->ir[IR_A3],
env->ir[IR_A4], env->ir[IR_A5],
0, 0);
if (trapnr == TARGET_NR_sigreturn
|| trapnr == TARGET_NR_rt_sigreturn) {
break;
}
/* Syscall writes 0 to V0 to bypass error check, similar
to how this is handled internal to Linux kernel.
(Ab)use trapnr temporarily as boolean indicating error. */
trapnr = (env->ir[IR_V0] != 0 && sysret < 0);
env->ir[IR_V0] = (trapnr ? -sysret : sysret);
env->ir[IR_A3] = trapnr;
break;
case 0x86:
/* IMB */
/* ??? We can probably elide the code using page_unprotect
that is checking for self-modifying code. Instead we
could simply call tb_flush here. Until we work out the
changes required to turn off the extra write protection,
this can be a no-op. */
break;
case 0x9E:
/* RDUNIQUE */
/* Handled in the translator for usermode. */
abort();
case 0x9F:
/* WRUNIQUE */
/* Handled in the translator for usermode. */
abort();
case 0xAA:
/* GENTRAP */
info.si_signo = TARGET_SIGFPE;
switch (env->ir[IR_A0]) {
case TARGET_GEN_INTOVF:
info.si_code = TARGET_FPE_INTOVF;
break;
case TARGET_GEN_INTDIV:
info.si_code = TARGET_FPE_INTDIV;
break;
case TARGET_GEN_FLTOVF:
info.si_code = TARGET_FPE_FLTOVF;
break;
case TARGET_GEN_FLTUND:
info.si_code = TARGET_FPE_FLTUND;
break;
case TARGET_GEN_FLTINV:
info.si_code = TARGET_FPE_FLTINV;
break;
case TARGET_GEN_FLTINE:
info.si_code = TARGET_FPE_FLTRES;
break;
case TARGET_GEN_ROPRAND:
info.si_code = 0;
break;
default:
info.si_signo = TARGET_SIGTRAP;
info.si_code = 0;
break;
}
info.si_errno = 0;
info._sifields._sigfault._addr = env->pc;
queue_signal(env, info.si_signo, &info);
break;
default:
goto do_sigill;
}
break;
case EXCP_DEBUG:
info.si_signo = gdb_handlesig(cs, TARGET_SIGTRAP);
if (info.si_signo) {
env->lock_addr = -1;
info.si_errno = 0;
info.si_code = TARGET_TRAP_BRKPT;
queue_signal(env, info.si_signo, &info);
}
break;
case EXCP_STL_C:
case EXCP_STQ_C:
do_store_exclusive(env, env->error_code, trapnr - EXCP_STL_C);
break;
case EXCP_INTERRUPT:
/* Just indicate that signals should be handled asap. */
break;
default:
printf ("Unhandled trap: 0x%x\n", trapnr);
cpu_dump_state(cs, stderr, fprintf, 0);
exit(EXIT_FAILURE);
}
process_pending_signals (env);
}
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9870 | static S390PCIBusDevice *s390_pci_find_dev_by_target(const char *target)
{
int i;
S390PCIBusDevice *pbdev;
S390pciState *s = s390_get_phb();
if (!target) {
return NULL;
}
for (i = 0; i < PCI_SLOT_MAX; i++) {
pbdev = s->pbdev[i];
if (!pbdev) {
continue;
}
if (!strcmp(pbdev->target, target)) {
return pbdev;
}
}
return NULL;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9896 | static void cin_decode_lzss(const unsigned char *src, int src_size, unsigned char *dst, int dst_size)
{
uint16_t cmd;
int i, sz, offset, code;
unsigned char *dst_end = dst + dst_size;
const unsigned char *src_end = src + src_size;
while (src < src_end && dst < dst_end) {
code = *src++;
for (i = 0; i < 8 && src < src_end && dst < dst_end; ++i) {
if (code & (1 << i)) {
*dst++ = *src++;
} else {
cmd = AV_RL16(src); src += 2;
offset = cmd >> 4;
sz = (cmd & 0xF) + 2;
/* don't use memcpy/memmove here as the decoding routine (ab)uses */
/* buffer overlappings to repeat bytes in the destination */
sz = FFMIN(sz, dst_end - dst);
while (sz--) {
*dst = *(dst - offset - 1);
++dst;
}
}
}
}
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9913 | ff_rdt_parse_open(AVFormatContext *ic, int first_stream_of_set_idx,
void *priv_data, RTPDynamicProtocolHandler *handler)
{
RDTDemuxContext *s = av_mallocz(sizeof(RDTDemuxContext));
if (!s)
return NULL;
s->ic = ic;
s->streams = &ic->streams[first_stream_of_set_idx];
do {
s->n_streams++;
} while (first_stream_of_set_idx + s->n_streams < ic->nb_streams &&
s->streams[s->n_streams]->priv_data == s->streams[0]->priv_data);
s->prev_set_id = -1;
s->prev_stream_id = -1;
s->prev_timestamp = -1;
s->parse_packet = handler->parse_packet;
s->dynamic_protocol_context = priv_data;
return s;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9922 | static int qcow2_co_writev(BlockDriverState *bs,
int64_t sector_num,
int remaining_sectors,
QEMUIOVector *qiov)
{
BDRVQcowState *s = bs->opaque;
int index_in_cluster;
int n_end;
int ret;
int cur_nr_sectors; /* number of sectors in current iteration */
QCowL2Meta l2meta;
uint64_t cluster_offset;
QEMUIOVector hd_qiov;
uint64_t bytes_done = 0;
uint8_t *cluster_data = NULL;
l2meta.nb_clusters = 0;
qemu_co_queue_init(&l2meta.dependent_requests);
qemu_iovec_init(&hd_qiov, qiov->niov);
s->cluster_cache_offset = -1; /* disable compressed cache */
qemu_co_mutex_lock(&s->lock);
while (remaining_sectors != 0) {
index_in_cluster = sector_num & (s->cluster_sectors - 1);
n_end = index_in_cluster + remaining_sectors;
if (s->crypt_method &&
n_end > QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors) {
n_end = QCOW_MAX_CRYPT_CLUSTERS * s->cluster_sectors;
}
ret = qcow2_alloc_cluster_offset(bs, sector_num << 9,
index_in_cluster, n_end, &cur_nr_sectors, &l2meta);
if (ret < 0) {
goto fail;
}
cluster_offset = l2meta.cluster_offset;
assert((cluster_offset & 511) == 0);
qemu_iovec_reset(&hd_qiov);
qemu_iovec_copy(&hd_qiov, qiov, bytes_done,
cur_nr_sectors * 512);
if (s->crypt_method) {
if (!cluster_data) {
cluster_data = g_malloc0(QCOW_MAX_CRYPT_CLUSTERS *
s->cluster_size);
}
assert(hd_qiov.size <=
QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
qemu_iovec_to_buffer(&hd_qiov, cluster_data);
qcow2_encrypt_sectors(s, sector_num, cluster_data,
cluster_data, cur_nr_sectors, 1, &s->aes_encrypt_key);
qemu_iovec_reset(&hd_qiov);
qemu_iovec_add(&hd_qiov, cluster_data,
cur_nr_sectors * 512);
}
BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
qemu_co_mutex_unlock(&s->lock);
ret = bdrv_co_writev(bs->file,
(cluster_offset >> 9) + index_in_cluster,
cur_nr_sectors, &hd_qiov);
qemu_co_mutex_lock(&s->lock);
if (ret < 0) {
goto fail;
}
ret = qcow2_alloc_cluster_link_l2(bs, &l2meta);
run_dependent_requests(s, &l2meta);
if (ret < 0) {
goto fail;
}
remaining_sectors -= cur_nr_sectors;
sector_num += cur_nr_sectors;
bytes_done += cur_nr_sectors * 512;
}
ret = 0;
fail:
qemu_co_mutex_unlock(&s->lock);
qemu_iovec_destroy(&hd_qiov);
return ret;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9929 | int vhost_dev_init(struct vhost_dev *hdev, void *opaque,
VhostBackendType backend_type, bool force)
{
uint64_t features;
int i, r;
if (vhost_set_backend_type(hdev, backend_type) < 0) {
close((uintptr_t)opaque);
return -1;
}
if (hdev->vhost_ops->vhost_backend_init(hdev, opaque) < 0) {
close((uintptr_t)opaque);
return -errno;
}
r = hdev->vhost_ops->vhost_call(hdev, VHOST_SET_OWNER, NULL);
if (r < 0) {
goto fail;
}
r = hdev->vhost_ops->vhost_call(hdev, VHOST_GET_FEATURES, &features);
if (r < 0) {
goto fail;
}
for (i = 0; i < hdev->nvqs; ++i) {
r = vhost_virtqueue_init(hdev, hdev->vqs + i, i);
if (r < 0) {
goto fail_vq;
}
}
hdev->features = features;
hdev->memory_listener = (MemoryListener) {
.begin = vhost_begin,
.commit = vhost_commit,
.region_add = vhost_region_add,
.region_del = vhost_region_del,
.region_nop = vhost_region_nop,
.log_start = vhost_log_start,
.log_stop = vhost_log_stop,
.log_sync = vhost_log_sync,
.log_global_start = vhost_log_global_start,
.log_global_stop = vhost_log_global_stop,
.eventfd_add = vhost_eventfd_add,
.eventfd_del = vhost_eventfd_del,
.priority = 10
};
hdev->migration_blocker = NULL;
if (!(hdev->features & (0x1ULL << VHOST_F_LOG_ALL))) {
error_setg(&hdev->migration_blocker,
"Migration disabled: vhost lacks VHOST_F_LOG_ALL feature.");
migrate_add_blocker(hdev->migration_blocker);
}
hdev->mem = g_malloc0(offsetof(struct vhost_memory, regions));
hdev->n_mem_sections = 0;
hdev->mem_sections = NULL;
hdev->log = NULL;
hdev->log_size = 0;
hdev->log_enabled = false;
hdev->started = false;
hdev->memory_changed = false;
memory_listener_register(&hdev->memory_listener, &address_space_memory);
hdev->force = force;
return 0;
fail_vq:
while (--i >= 0) {
vhost_virtqueue_cleanup(hdev->vqs + i);
}
fail:
r = -errno;
hdev->vhost_ops->vhost_backend_cleanup(hdev);
return r;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9934 | int ff_set_systematic_pal2(uint32_t pal[256], enum AVPixelFormat pix_fmt)
{
int i;
for (i = 0; i < 256; i++) {
int r, g, b;
switch (pix_fmt) {
case AV_PIX_FMT_RGB8:
r = (i>>5 )*36;
g = ((i>>2)&7)*36;
b = (i&3 )*85;
break;
case AV_PIX_FMT_BGR8:
b = (i>>6 )*85;
g = ((i>>3)&7)*36;
r = (i&7 )*36;
break;
case AV_PIX_FMT_RGB4_BYTE:
r = (i>>3 )*255;
g = ((i>>1)&3)*85;
b = (i&1 )*255;
break;
case AV_PIX_FMT_BGR4_BYTE:
b = (i>>3 )*255;
g = ((i>>1)&3)*85;
r = (i&1 )*255;
break;
case AV_PIX_FMT_GRAY8:
r = b = g = i;
break;
default:
return AVERROR(EINVAL);
}
pal[i] = b + (g<<8) + (r<<16) + (0xFF<<24);
}
return 0;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_9943 | void qmp_transaction(TransactionActionList *dev_list, Error **errp)
{
TransactionActionList *dev_entry = dev_list;
BlkTransactionState *state, *next;
Error *local_err = NULL;
QSIMPLEQ_HEAD(snap_bdrv_states, BlkTransactionState) snap_bdrv_states;
QSIMPLEQ_INIT(&snap_bdrv_states);
/* drain all i/o before any operations */
bdrv_drain_all();
/* We don't do anything in this loop that commits us to the operations */
while (NULL != dev_entry) {
TransactionAction *dev_info = NULL;
const BdrvActionOps *ops;
dev_info = dev_entry->value;
dev_entry = dev_entry->next;
assert(dev_info->kind < ARRAY_SIZE(actions));
ops = &actions[dev_info->kind];
assert(ops->instance_size > 0);
state = g_malloc0(ops->instance_size);
state->ops = ops;
state->action = dev_info;
QSIMPLEQ_INSERT_TAIL(&snap_bdrv_states, state, entry);
state->ops->prepare(state, &local_err);
if (local_err) {
error_propagate(errp, local_err);
goto delete_and_fail;
}
}
QSIMPLEQ_FOREACH(state, &snap_bdrv_states, entry) {
if (state->ops->commit) {
state->ops->commit(state);
}
}
/* success */
goto exit;
delete_and_fail:
/* failure, and it is all-or-none; roll back all operations */
QSIMPLEQ_FOREACH(state, &snap_bdrv_states, entry) {
if (state->ops->abort) {
state->ops->abort(state);
}
}
exit:
QSIMPLEQ_FOREACH_SAFE(state, &snap_bdrv_states, entry, next) {
if (state->ops->clean) {
state->ops->clean(state);
}
g_free(state);
}
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9956 | static int intel_hda_init(PCIDevice *pci)
{
IntelHDAState *d = DO_UPCAST(IntelHDAState, pci, pci);
uint8_t *conf = d->pci.config;
d->name = d->pci.qdev.info->name;
pci_config_set_vendor_id(conf, PCI_VENDOR_ID_INTEL);
pci_config_set_device_id(conf, 0x2668);
pci_config_set_revision(conf, 1);
pci_config_set_class(conf, PCI_CLASS_MULTIMEDIA_HD_AUDIO);
pci_config_set_interrupt_pin(conf, 1);
/* HDCTL off 0x40 bit 0 selects signaling mode (1-HDA, 0 - Ac97) 18.1.19 */
conf[0x40] = 0x01;
d->mmio_addr = cpu_register_io_memory(intel_hda_mmio_read,
intel_hda_mmio_write, d,
DEVICE_NATIVE_ENDIAN);
pci_register_bar_simple(&d->pci, 0, 0x4000, 0, d->mmio_addr);
if (d->msi) {
msi_init(&d->pci, 0x50, 1, true, false);
}
hda_codec_bus_init(&d->pci.qdev, &d->codecs,
intel_hda_response, intel_hda_xfer);
return 0;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9961 | static BlockDriverAIOCB *raw_aio_writev(BlockDriverState *bs,
int64_t sector_num, QEMUIOVector *qiov, int nb_sectors,
BlockDriverCompletionFunc *cb, void *opaque)
{
RawAIOCB *acb;
acb = raw_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque);
if (!acb)
return NULL;
if (qemu_paio_write(&acb->aiocb) < 0) {
raw_aio_remove(acb);
return NULL;
}
return &acb->common;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_9977 | static inline void pred_direct_motion(H264Context * const h, int *mb_type){
MpegEncContext * const s = &h->s;
const int mb_xy = s->mb_x + s->mb_y*s->mb_stride;
const int b8_xy = 2*s->mb_x + 2*s->mb_y*h->b8_stride;
const int b4_xy = 4*s->mb_x + 4*s->mb_y*h->b_stride;
const int mb_type_col = h->ref_list[1][0].mb_type[mb_xy];
const int16_t (*l1mv0)[2] = (const int16_t (*)[2]) &h->ref_list[1][0].motion_val[0][b4_xy];
const int8_t *l1ref0 = &h->ref_list[1][0].ref_index[0][b8_xy];
const int is_b8x8 = IS_8X8(*mb_type);
int sub_mb_type;
int i8, i4;
if(IS_8X8(mb_type_col) && !h->sps.direct_8x8_inference_flag){
/* FIXME save sub mb types from previous frames (or derive from MVs)
* so we know exactly what block size to use */
sub_mb_type = MB_TYPE_8x8|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_4x4 */
*mb_type = MB_TYPE_8x8;
}else if(!is_b8x8 && (IS_16X16(mb_type_col) || IS_INTRA(mb_type_col))){
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
*mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_16x16 */
}else{
sub_mb_type = MB_TYPE_16x16|MB_TYPE_P0L0|MB_TYPE_P0L1|MB_TYPE_DIRECT2; /* B_SUB_8x8 */
*mb_type = MB_TYPE_8x8;
}
if(!is_b8x8)
*mb_type |= MB_TYPE_DIRECT2;
tprintf("mb_type = %08x, sub_mb_type = %08x, is_b8x8 = %d, mb_type_col = %08x\n", *mb_type, sub_mb_type, is_b8x8, mb_type_col);
if(h->direct_spatial_mv_pred){
int ref[2];
int mv[2][2];
int list;
/* ref = min(neighbors) */
for(list=0; list<2; list++){
int refa = h->ref_cache[list][scan8[0] - 1];
int refb = h->ref_cache[list][scan8[0] - 8];
int refc = h->ref_cache[list][scan8[0] - 8 + 4];
if(refc == -2)
refc = h->ref_cache[list][scan8[0] - 8 - 1];
ref[list] = refa;
if(ref[list] < 0 || (refb < ref[list] && refb >= 0))
ref[list] = refb;
if(ref[list] < 0 || (refc < ref[list] && refc >= 0))
ref[list] = refc;
if(ref[list] < 0)
ref[list] = -1;
}
if(ref[0] < 0 && ref[1] < 0){
ref[0] = ref[1] = 0;
mv[0][0] = mv[0][1] =
mv[1][0] = mv[1][1] = 0;
}else{
for(list=0; list<2; list++){
if(ref[list] >= 0)
pred_motion(h, 0, 4, list, ref[list], &mv[list][0], &mv[list][1]);
else
mv[list][0] = mv[list][1] = 0;
}
}
if(ref[1] < 0){
*mb_type &= ~MB_TYPE_P0L1;
sub_mb_type &= ~MB_TYPE_P0L1;
}else if(ref[0] < 0){
*mb_type &= ~MB_TYPE_P0L0;
sub_mb_type &= ~MB_TYPE_P0L0;
}
if(IS_16X16(*mb_type)){
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref[0], 1);
fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, ref[1], 1);
if(!IS_INTRA(mb_type_col) && l1ref0[0] == 0 &&
ABS(l1mv0[0][0]) <= 1 && ABS(l1mv0[0][1]) <= 1){
if(ref[0] > 0)
fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv[0][0],mv[0][1]), 4);
else
fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
if(ref[1] > 0)
fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv[1][0],mv[1][1]), 4);
else
fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, 0, 4);
}else{
fill_rectangle(&h->mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv[0][0],mv[0][1]), 4);
fill_rectangle(&h->mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv[1][0],mv[1][1]), 4);
}
}else{
for(i8=0; i8<4; i8++){
const int x8 = i8&1;
const int y8 = i8>>1;
if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
continue;
h->sub_mb_type[i8] = sub_mb_type;
fill_rectangle(&h->mv_cache[0][scan8[i8*4]], 2, 2, 8, pack16to32(mv[0][0],mv[0][1]), 4);
fill_rectangle(&h->mv_cache[1][scan8[i8*4]], 2, 2, 8, pack16to32(mv[1][0],mv[1][1]), 4);
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref[0], 1);
fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, ref[1], 1);
/* col_zero_flag */
if(!IS_INTRA(mb_type_col) && l1ref0[x8 + y8*h->b8_stride] == 0){
for(i4=0; i4<4; i4++){
const int16_t *mv_col = l1mv0[x8*2 + (i4&1) + (y8*2 + (i4>>1))*h->b_stride];
if(ABS(mv_col[0]) <= 1 && ABS(mv_col[1]) <= 1){
if(ref[0] == 0)
*(uint32_t*)h->mv_cache[0][scan8[i8*4+i4]] = 0;
if(ref[1] == 0)
*(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] = 0;
}
}
}
}
}
}else{ /* direct temporal mv pred */
/* FIXME assumes that L1ref0 used the same ref lists as current frame */
if(IS_16X16(*mb_type)){
fill_rectangle(&h->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1);
if(IS_INTRA(mb_type_col)){
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, 0, 1);
fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, 0, 4);
fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, 0, 4);
}else{
const int ref0 = l1ref0[0];
const int dist_scale_factor = h->dist_scale_factor[ref0];
const int16_t *mv_col = l1mv0[0];
int mv_l0[2];
mv_l0[0] = (dist_scale_factor * mv_col[0] + 128) >> 8;
mv_l0[1] = (dist_scale_factor * mv_col[1] + 128) >> 8;
fill_rectangle(&h->ref_cache[0][scan8[0]], 4, 4, 8, ref0, 1);
fill_rectangle(&h-> mv_cache[0][scan8[0]], 4, 4, 8, pack16to32(mv_l0[0],mv_l0[1]), 4);
fill_rectangle(&h-> mv_cache[1][scan8[0]], 4, 4, 8, pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]), 4);
}
}else{
for(i8=0; i8<4; i8++){
const int x8 = i8&1;
const int y8 = i8>>1;
int ref0, dist_scale_factor;
if(is_b8x8 && !IS_DIRECT(h->sub_mb_type[i8]))
continue;
h->sub_mb_type[i8] = sub_mb_type;
if(IS_INTRA(mb_type_col)){
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, 0, 1);
fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
fill_rectangle(&h-> mv_cache[0][scan8[i8*4]], 2, 2, 8, 0, 4);
fill_rectangle(&h-> mv_cache[1][scan8[i8*4]], 2, 2, 8, 0, 4);
continue;
}
ref0 = l1ref0[x8 + y8*h->b8_stride];
dist_scale_factor = h->dist_scale_factor[ref0];
fill_rectangle(&h->ref_cache[0][scan8[i8*4]], 2, 2, 8, ref0, 1);
fill_rectangle(&h->ref_cache[1][scan8[i8*4]], 2, 2, 8, 0, 1);
for(i4=0; i4<4; i4++){
const int16_t *mv_col = l1mv0[x8*2 + (i4&1) + (y8*2 + (i4>>1))*h->b_stride];
int16_t *mv_l0 = h->mv_cache[0][scan8[i8*4+i4]];
mv_l0[0] = (dist_scale_factor * mv_col[0] + 128) >> 8;
mv_l0[1] = (dist_scale_factor * mv_col[1] + 128) >> 8;
*(uint32_t*)h->mv_cache[1][scan8[i8*4+i4]] =
pack16to32(mv_l0[0]-mv_col[0],mv_l0[1]-mv_col[1]);
}
}
}
}
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10000 | static inline uint32_t insn_get(CPUX86State *env, DisasContext *s, TCGMemOp ot)
{
uint32_t ret;
switch (ot) {
case MO_8:
ret = cpu_ldub_code(env, s->pc);
s->pc++;
break;
case MO_16:
ret = cpu_lduw_code(env, s->pc);
s->pc += 2;
break;
case MO_32:
#ifdef TARGET_X86_64
case MO_64:
#endif
ret = cpu_ldl_code(env, s->pc);
s->pc += 4;
break;
default:
tcg_abort();
}
return ret;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10026 | static void bdrv_throttle_write_timer_cb(void *opaque)
{
BlockDriverState *bs = opaque;
qemu_co_enter_next(&bs->throttled_reqs[1]);
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10049 | static int find_pte32(CPUPPCState *env, mmu_ctx_t *ctx, int h,
int rw, int type, int target_page_bits)
{
hwaddr pteg_off;
target_ulong pte0, pte1;
int i, good = -1;
int ret, r;
ret = -1; /* No entry found */
pteg_off = get_pteg_offset(env, ctx->hash[h], HASH_PTE_SIZE_32);
for (i = 0; i < 8; i++) {
if (env->external_htab) {
pte0 = ldl_p(env->external_htab + pteg_off + (i * 8));
pte1 = ldl_p(env->external_htab + pteg_off + (i * 8) + 4);
} else {
pte0 = ldl_phys(env->htab_base + pteg_off + (i * 8));
pte1 = ldl_phys(env->htab_base + pteg_off + (i * 8) + 4);
}
r = pte_check_hash32(ctx, pte0, pte1, h, rw, type);
LOG_MMU("Load pte from %08" HWADDR_PRIx " => " TARGET_FMT_lx " "
TARGET_FMT_lx " %d %d %d " TARGET_FMT_lx "\n",
pteg_off + (i * 8), pte0, pte1, (int)(pte0 >> 31), h,
(int)((pte0 >> 6) & 1), ctx->ptem);
switch (r) {
case -3:
/* PTE inconsistency */
return -1;
case -2:
/* Access violation */
ret = -2;
good = i;
break;
case -1:
default:
/* No PTE match */
break;
case 0:
/* access granted */
/* XXX: we should go on looping to check all PTEs consistency
* but if we can speed-up the whole thing as the
* result would be undefined if PTEs are not consistent.
*/
ret = 0;
good = i;
goto done;
}
}
if (good != -1) {
done:
LOG_MMU("found PTE at addr %08" HWADDR_PRIx " prot=%01x ret=%d\n",
ctx->raddr, ctx->prot, ret);
/* Update page flags */
pte1 = ctx->raddr;
if (pte_update_flags(ctx, &pte1, ret, rw) == 1) {
if (env->external_htab) {
stl_p(env->external_htab + pteg_off + (good * 8) + 4,
pte1);
} else {
stl_phys_notdirty(env->htab_base + pteg_off +
(good * 8) + 4, pte1);
}
}
}
/* We have a TLB that saves 4K pages, so let's
* split a huge page to 4k chunks */
if (target_page_bits != TARGET_PAGE_BITS) {
ctx->raddr |= (ctx->eaddr & ((1 << target_page_bits) - 1))
& TARGET_PAGE_MASK;
}
return ret;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10055 | static int kvm_put_xcrs(CPUState *env)
{
#ifdef KVM_CAP_XCRS
struct kvm_xcrs xcrs;
if (!kvm_has_xcrs())
return 0;
xcrs.nr_xcrs = 1;
xcrs.flags = 0;
xcrs.xcrs[0].xcr = 0;
xcrs.xcrs[0].value = env->xcr0;
return kvm_vcpu_ioctl(env, KVM_SET_XCRS, &xcrs);
#else
return 0;
#endif
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10064 | static ExitStatus gen_bcond_internal(DisasContext *ctx, TCGCond cond,
TCGv cmp, int32_t disp)
{
uint64_t dest = ctx->pc + (disp << 2);
int lab_true = gen_new_label();
if (use_goto_tb(ctx, dest)) {
tcg_gen_brcondi_i64(cond, cmp, 0, lab_true);
tcg_gen_goto_tb(0);
tcg_gen_movi_i64(cpu_pc, ctx->pc);
tcg_gen_exit_tb((uintptr_t)ctx->tb);
gen_set_label(lab_true);
tcg_gen_goto_tb(1);
tcg_gen_movi_i64(cpu_pc, dest);
tcg_gen_exit_tb((uintptr_t)ctx->tb + 1);
return EXIT_GOTO_TB;
} else {
TCGv_i64 z = tcg_const_i64(0);
TCGv_i64 d = tcg_const_i64(dest);
TCGv_i64 p = tcg_const_i64(ctx->pc);
tcg_gen_movcond_i64(cond, cpu_pc, cmp, z, d, p);
tcg_temp_free_i64(z);
tcg_temp_free_i64(d);
tcg_temp_free_i64(p);
return EXIT_PC_UPDATED;
}
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10081 | static uint32_t gt64120_read_config(PCIDevice *d, uint32_t address, int len)
{
uint32_t val = pci_default_read_config(d, address, len);
#ifdef TARGET_WORDS_BIGENDIAN
val = bswap32(val);
#endif
return val;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10104 | static void rtas_get_time_of_day(PowerPCCPU *cpu, sPAPREnvironment *spapr,
uint32_t token, uint32_t nargs,
target_ulong args,
uint32_t nret, target_ulong rets)
{
struct tm tm;
if (nret != 8) {
rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR);
return;
}
qemu_get_timedate(&tm, spapr->rtc_offset);
rtas_st(rets, 0, RTAS_OUT_SUCCESS);
rtas_st(rets, 1, tm.tm_year + 1900);
rtas_st(rets, 2, tm.tm_mon + 1);
rtas_st(rets, 3, tm.tm_mday);
rtas_st(rets, 4, tm.tm_hour);
rtas_st(rets, 5, tm.tm_min);
rtas_st(rets, 6, tm.tm_sec);
rtas_st(rets, 7, 0); /* we don't do nanoseconds */
}
The vulnerability label is: Vulnerable |
devign_test_set_data_10128 | void qmp_block_job_set_speed(const char *device, int64_t speed, Error **errp)
{
BlockJob *job = find_block_job(device);
if (!job) {
error_set(errp, QERR_BLOCK_JOB_NOT_ACTIVE, device);
return;
}
block_job_set_speed(job, speed, errp);
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10161 | static int create_stream(AVFormatContext *s)
{
XCBGrabContext *c = s->priv_data;
AVStream *st = avformat_new_stream(s, NULL);
xcb_get_geometry_cookie_t gc;
xcb_get_geometry_reply_t *geo;
int ret;
if (!st)
return AVERROR(ENOMEM);
ret = av_parse_video_size(&c->width, &c->height, c->video_size);
if (ret < 0)
return ret;
ret = av_parse_video_rate(&st->avg_frame_rate, c->framerate);
if (ret < 0)
return ret;
avpriv_set_pts_info(st, 64, 1, 1000000);
gc = xcb_get_geometry(c->conn, c->screen->root);
geo = xcb_get_geometry_reply(c->conn, gc, NULL);
c->width = FFMIN(geo->width, c->width);
c->height = FFMIN(geo->height, c->height);
c->time_base = (AVRational){ st->avg_frame_rate.den,
st->avg_frame_rate.num };
c->time_frame = av_gettime();
st->codec->codec_type = AVMEDIA_TYPE_VIDEO;
st->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
st->codec->width = c->width;
st->codec->height = c->height;
st->codec->time_base = c->time_base;
ret = pixfmt_from_pixmap_format(s, geo->depth, &st->codec->pix_fmt);
free(geo);
return ret;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10175 | static int vorbis_parse_setup_hdr_codebooks(vorbis_context *vc)
{
unsigned cb;
uint8_t *tmp_vlc_bits;
uint32_t *tmp_vlc_codes;
GetBitContext *gb = &vc->gb;
uint16_t *codebook_multiplicands;
int ret = 0;
vc->codebook_count = get_bits(gb, 8) + 1;
av_dlog(NULL, " Codebooks: %d \n", vc->codebook_count);
vc->codebooks = av_mallocz(vc->codebook_count * sizeof(*vc->codebooks));
tmp_vlc_bits = av_mallocz(V_MAX_VLCS * sizeof(*tmp_vlc_bits));
tmp_vlc_codes = av_mallocz(V_MAX_VLCS * sizeof(*tmp_vlc_codes));
codebook_multiplicands = av_malloc(V_MAX_VLCS * sizeof(*codebook_multiplicands));
for (cb = 0; cb < vc->codebook_count; ++cb) {
vorbis_codebook *codebook_setup = &vc->codebooks[cb];
unsigned ordered, t, entries, used_entries = 0;
av_dlog(NULL, " %u. Codebook\n", cb);
if (get_bits(gb, 24) != 0x564342) {
av_log(vc->avctx, AV_LOG_ERROR,
" %u. Codebook setup data corrupt.\n", cb);
ret = AVERROR_INVALIDDATA;
goto error;
}
codebook_setup->dimensions=get_bits(gb, 16);
if (codebook_setup->dimensions > 16 || codebook_setup->dimensions == 0) {
av_log(vc->avctx, AV_LOG_ERROR,
" %u. Codebook's dimension is invalid (%d).\n",
cb, codebook_setup->dimensions);
ret = AVERROR_INVALIDDATA;
goto error;
}
entries = get_bits(gb, 24);
if (entries > V_MAX_VLCS) {
av_log(vc->avctx, AV_LOG_ERROR,
" %u. Codebook has too many entries (%u).\n",
cb, entries);
ret = AVERROR_INVALIDDATA;
goto error;
}
ordered = get_bits1(gb);
av_dlog(NULL, " codebook_dimensions %d, codebook_entries %u\n",
codebook_setup->dimensions, entries);
if (!ordered) {
unsigned ce, flag;
unsigned sparse = get_bits1(gb);
av_dlog(NULL, " not ordered \n");
if (sparse) {
av_dlog(NULL, " sparse \n");
used_entries = 0;
for (ce = 0; ce < entries; ++ce) {
flag = get_bits1(gb);
if (flag) {
tmp_vlc_bits[ce] = get_bits(gb, 5) + 1;
++used_entries;
} else
tmp_vlc_bits[ce] = 0;
}
} else {
av_dlog(NULL, " not sparse \n");
used_entries = entries;
for (ce = 0; ce < entries; ++ce)
tmp_vlc_bits[ce] = get_bits(gb, 5) + 1;
}
} else {
unsigned current_entry = 0;
unsigned current_length = get_bits(gb, 5) + 1;
av_dlog(NULL, " ordered, current length: %u\n", current_length); //FIXME
used_entries = entries;
for (; current_entry < used_entries && current_length <= 32; ++current_length) {
unsigned i, number;
av_dlog(NULL, " number bits: %u ", ilog(entries - current_entry));
number = get_bits(gb, ilog(entries - current_entry));
av_dlog(NULL, " number: %u\n", number);
for (i = current_entry; i < number+current_entry; ++i)
if (i < used_entries)
tmp_vlc_bits[i] = current_length;
current_entry+=number;
}
if (current_entry>used_entries) {
av_log(vc->avctx, AV_LOG_ERROR, " More codelengths than codes in codebook. \n");
ret = AVERROR_INVALIDDATA;
goto error;
}
}
codebook_setup->lookup_type = get_bits(gb, 4);
av_dlog(NULL, " lookup type: %d : %s \n", codebook_setup->lookup_type,
codebook_setup->lookup_type ? "vq" : "no lookup");
// If the codebook is used for (inverse) VQ, calculate codevectors.
if (codebook_setup->lookup_type == 1) {
unsigned i, j, k;
unsigned codebook_lookup_values = ff_vorbis_nth_root(entries, codebook_setup->dimensions);
float codebook_minimum_value = vorbisfloat2float(get_bits_long(gb, 32));
float codebook_delta_value = vorbisfloat2float(get_bits_long(gb, 32));
unsigned codebook_value_bits = get_bits(gb, 4) + 1;
unsigned codebook_sequence_p = get_bits1(gb);
av_dlog(NULL, " We expect %d numbers for building the codevectors. \n",
codebook_lookup_values);
av_dlog(NULL, " delta %f minmum %f \n",
codebook_delta_value, codebook_minimum_value);
for (i = 0; i < codebook_lookup_values; ++i) {
codebook_multiplicands[i] = get_bits(gb, codebook_value_bits);
av_dlog(NULL, " multiplicands*delta+minmum : %e \n",
(float)codebook_multiplicands[i] * codebook_delta_value + codebook_minimum_value);
av_dlog(NULL, " multiplicand %u\n", codebook_multiplicands[i]);
}
// Weed out unused vlcs and build codevector vector
codebook_setup->codevectors = used_entries ? av_mallocz(used_entries *
codebook_setup->dimensions *
sizeof(*codebook_setup->codevectors))
: NULL;
for (j = 0, i = 0; i < entries; ++i) {
unsigned dim = codebook_setup->dimensions;
if (tmp_vlc_bits[i]) {
float last = 0.0;
unsigned lookup_offset = i;
av_dlog(vc->avctx, "Lookup offset %u ,", i);
for (k = 0; k < dim; ++k) {
unsigned multiplicand_offset = lookup_offset % codebook_lookup_values;
codebook_setup->codevectors[j * dim + k] = codebook_multiplicands[multiplicand_offset] * codebook_delta_value + codebook_minimum_value + last;
if (codebook_sequence_p)
last = codebook_setup->codevectors[j * dim + k];
lookup_offset/=codebook_lookup_values;
}
tmp_vlc_bits[j] = tmp_vlc_bits[i];
av_dlog(vc->avctx, "real lookup offset %u, vector: ", j);
for (k = 0; k < dim; ++k)
av_dlog(vc->avctx, " %f ",
codebook_setup->codevectors[j * dim + k]);
av_dlog(vc->avctx, "\n");
++j;
}
}
if (j != used_entries) {
av_log(vc->avctx, AV_LOG_ERROR, "Bug in codevector vector building code. \n");
ret = AVERROR_INVALIDDATA;
goto error;
}
entries = used_entries;
} else if (codebook_setup->lookup_type >= 2) {
av_log(vc->avctx, AV_LOG_ERROR, "Codebook lookup type not supported. \n");
ret = AVERROR_INVALIDDATA;
goto error;
}
// Initialize VLC table
if (ff_vorbis_len2vlc(tmp_vlc_bits, tmp_vlc_codes, entries)) {
av_log(vc->avctx, AV_LOG_ERROR, " Invalid code lengths while generating vlcs. \n");
ret = AVERROR_INVALIDDATA;
goto error;
}
codebook_setup->maxdepth = 0;
for (t = 0; t < entries; ++t)
if (tmp_vlc_bits[t] >= codebook_setup->maxdepth)
codebook_setup->maxdepth = tmp_vlc_bits[t];
if (codebook_setup->maxdepth > 3 * V_NB_BITS)
codebook_setup->nb_bits = V_NB_BITS2;
else
codebook_setup->nb_bits = V_NB_BITS;
codebook_setup->maxdepth = (codebook_setup->maxdepth+codebook_setup->nb_bits - 1) / codebook_setup->nb_bits;
if ((ret = init_vlc(&codebook_setup->vlc, codebook_setup->nb_bits,
entries, tmp_vlc_bits, sizeof(*tmp_vlc_bits),
sizeof(*tmp_vlc_bits), tmp_vlc_codes,
sizeof(*tmp_vlc_codes), sizeof(*tmp_vlc_codes),
INIT_VLC_LE))) {
av_log(vc->avctx, AV_LOG_ERROR, " Error generating vlc tables. \n");
goto error;
}
}
av_free(tmp_vlc_bits);
av_free(tmp_vlc_codes);
av_free(codebook_multiplicands);
return 0;
// Error:
error:
av_free(tmp_vlc_bits);
av_free(tmp_vlc_codes);
av_free(codebook_multiplicands);
return ret;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10180 | static int htab_save_later_pass(QEMUFile *f, sPAPREnvironment *spapr,
int64_t max_ns)
{
bool final = max_ns < 0;
int htabslots = HTAB_SIZE(spapr) / HASH_PTE_SIZE_64;
int examined = 0, sent = 0;
int index = spapr->htab_save_index;
int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
assert(!spapr->htab_first_pass);
do {
int chunkstart, invalidstart;
/* Consume non-dirty HPTEs */
while ((index < htabslots)
&& !HPTE_DIRTY(HPTE(spapr->htab, index))) {
index++;
examined++;
}
chunkstart = index;
/* Consume valid dirty HPTEs */
while ((index < htabslots)
&& HPTE_DIRTY(HPTE(spapr->htab, index))
&& HPTE_VALID(HPTE(spapr->htab, index))) {
CLEAN_HPTE(HPTE(spapr->htab, index));
index++;
examined++;
}
invalidstart = index;
/* Consume invalid dirty HPTEs */
while ((index < htabslots)
&& HPTE_DIRTY(HPTE(spapr->htab, index))
&& !HPTE_VALID(HPTE(spapr->htab, index))) {
CLEAN_HPTE(HPTE(spapr->htab, index));
index++;
examined++;
}
if (index > chunkstart) {
int n_valid = invalidstart - chunkstart;
int n_invalid = index - invalidstart;
qemu_put_be32(f, chunkstart);
qemu_put_be16(f, n_valid);
qemu_put_be16(f, n_invalid);
qemu_put_buffer(f, HPTE(spapr->htab, chunkstart),
HASH_PTE_SIZE_64 * n_valid);
sent += index - chunkstart;
if (!final && (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) > max_ns) {
break;
}
}
if (examined >= htabslots) {
break;
}
if (index >= htabslots) {
assert(index == htabslots);
index = 0;
}
} while ((examined < htabslots) && (!qemu_file_rate_limit(f) || final));
if (index >= htabslots) {
assert(index == htabslots);
index = 0;
}
spapr->htab_save_index = index;
return (examined >= htabslots) && (sent == 0) ? 1 : 0;
}
The vulnerability label is: Vulnerable |
devign_test_set_data_10190 | static abi_long do_socket(int domain, int type, int protocol)
{
int target_type = type;
int ret;
ret = target_to_host_sock_type(&type);
if (ret) {
return ret;
}
if (domain == PF_NETLINK)
return -TARGET_EAFNOSUPPORT;
if (domain == AF_PACKET ||
(domain == AF_INET && type == SOCK_PACKET)) {
protocol = tswap16(protocol);
}
ret = get_errno(socket(domain, type, protocol));
if (ret >= 0) {
ret = sock_flags_fixup(ret, target_type);
if (type == SOCK_PACKET) {
/* Manage an obsolete case :
* if socket type is SOCK_PACKET, bind by name
*/
fd_trans_register(ret, &target_packet_trans);
}
}
return ret;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10192 | int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
{
PowerPCCPU *cpu = ppc_env_get_cpu(env);
CPUState *cs = CPU(cpu);
uint32_t *hc = (uint32_t*)buf;
struct kvm_ppc_pvinfo pvinfo;
if (kvm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
!kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, &pvinfo)) {
memcpy(buf, pvinfo.hcall, buf_len);
return 0;
}
/*
* Fallback to always fail hypercalls:
*
* li r3, -1
* nop
* nop
* nop
*/
hc[0] = 0x3860ffff;
hc[1] = 0x60000000;
hc[2] = 0x60000000;
hc[3] = 0x60000000;
return 0;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10194 | void process_incoming_migration(QEMUFile *f)
{
if (qemu_loadvm_state(f) < 0) {
fprintf(stderr, "load of migration failed\n");
exit(0);
}
qemu_announce_self();
DPRINTF("successfully loaded vm state\n");
incoming_expected = false;
if (autostart) {
vm_start();
} else {
runstate_set(RSTATE_PRE_LAUNCH);
}
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10213 | static int rv40_decode_mb_info(RV34DecContext *r)
{
MpegEncContext *s = &r->s;
GetBitContext *gb = &s->gb;
int q, i;
int prev_type = 0;
int mb_pos = s->mb_x + s->mb_y * s->mb_stride;
int blocks[RV34_MB_TYPES] = {0};
int count = 0;
if(!r->s.mb_skip_run)
r->s.mb_skip_run = svq3_get_ue_golomb(gb) + 1;
if(--r->s.mb_skip_run)
return RV34_MB_SKIP;
if(r->avail_cache[6-1])
blocks[r->mb_type[mb_pos - 1]]++;
if(r->avail_cache[6-4]){
blocks[r->mb_type[mb_pos - s->mb_stride]]++;
if(r->avail_cache[6-2])
blocks[r->mb_type[mb_pos - s->mb_stride + 1]]++;
if(r->avail_cache[6-5])
blocks[r->mb_type[mb_pos - s->mb_stride - 1]]++;
}
for(i = 0; i < RV34_MB_TYPES; i++){
if(blocks[i] > count){
count = blocks[i];
prev_type = i;
}
}
if(s->pict_type == AV_PICTURE_TYPE_P){
prev_type = block_num_to_ptype_vlc_num[prev_type];
q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1);
if(q < PBTYPE_ESCAPE)
return q;
q = get_vlc2(gb, ptype_vlc[prev_type].table, PTYPE_VLC_BITS, 1);
av_log(s->avctx, AV_LOG_ERROR, "Dquant for P-frame\n");
}else{
prev_type = block_num_to_btype_vlc_num[prev_type];
q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1);
if(q < PBTYPE_ESCAPE)
return q;
q = get_vlc2(gb, btype_vlc[prev_type].table, BTYPE_VLC_BITS, 1);
av_log(s->avctx, AV_LOG_ERROR, "Dquant for B-frame\n");
}
return 0;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10233 | static inline void RENAME(yvu9_to_yuy2)(const uint8_t *src1, const uint8_t *src2, const uint8_t *src3,
uint8_t *dst,
long width, long height,
long srcStride1, long srcStride2,
long srcStride3, long dstStride)
{
x86_reg x;
long y,w,h;
w=width/2; h=height;
for (y=0;y<h;y++) {
const uint8_t* yp=src1+srcStride1*y;
const uint8_t* up=src2+srcStride2*(y>>2);
const uint8_t* vp=src3+srcStride3*(y>>2);
uint8_t* d=dst+dstStride*y;
x=0;
#if COMPILE_TEMPLATE_MMX
for (;x<w-7;x+=8) {
__asm__ volatile(
PREFETCH" 32(%1, %0) \n\t"
PREFETCH" 32(%2, %0) \n\t"
PREFETCH" 32(%3, %0) \n\t"
"movq (%1, %0, 4), %%mm0 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
"movq (%2, %0), %%mm1 \n\t" /* U0U1U2U3U4U5U6U7 */
"movq (%3, %0), %%mm2 \n\t" /* V0V1V2V3V4V5V6V7 */
"movq %%mm0, %%mm3 \n\t" /* Y0Y1Y2Y3Y4Y5Y6Y7 */
"movq %%mm1, %%mm4 \n\t" /* U0U1U2U3U4U5U6U7 */
"movq %%mm2, %%mm5 \n\t" /* V0V1V2V3V4V5V6V7 */
"punpcklbw %%mm1, %%mm1 \n\t" /* U0U0 U1U1 U2U2 U3U3 */
"punpcklbw %%mm2, %%mm2 \n\t" /* V0V0 V1V1 V2V2 V3V3 */
"punpckhbw %%mm4, %%mm4 \n\t" /* U4U4 U5U5 U6U6 U7U7 */
"punpckhbw %%mm5, %%mm5 \n\t" /* V4V4 V5V5 V6V6 V7V7 */
"movq %%mm1, %%mm6 \n\t"
"punpcklbw %%mm2, %%mm1 \n\t" /* U0V0 U0V0 U1V1 U1V1*/
"punpcklbw %%mm1, %%mm0 \n\t" /* Y0U0 Y1V0 Y2U0 Y3V0*/
"punpckhbw %%mm1, %%mm3 \n\t" /* Y4U1 Y5V1 Y6U1 Y7V1*/
MOVNTQ" %%mm0, (%4, %0, 8) \n\t"
MOVNTQ" %%mm3, 8(%4, %0, 8) \n\t"
"punpckhbw %%mm2, %%mm6 \n\t" /* U2V2 U2V2 U3V3 U3V3*/
"movq 8(%1, %0, 4), %%mm0 \n\t"
"movq %%mm0, %%mm3 \n\t"
"punpcklbw %%mm6, %%mm0 \n\t" /* Y U2 Y V2 Y U2 Y V2*/
"punpckhbw %%mm6, %%mm3 \n\t" /* Y U3 Y V3 Y U3 Y V3*/
MOVNTQ" %%mm0, 16(%4, %0, 8) \n\t"
MOVNTQ" %%mm3, 24(%4, %0, 8) \n\t"
"movq %%mm4, %%mm6 \n\t"
"movq 16(%1, %0, 4), %%mm0 \n\t"
"movq %%mm0, %%mm3 \n\t"
"punpcklbw %%mm5, %%mm4 \n\t"
"punpcklbw %%mm4, %%mm0 \n\t" /* Y U4 Y V4 Y U4 Y V4*/
"punpckhbw %%mm4, %%mm3 \n\t" /* Y U5 Y V5 Y U5 Y V5*/
MOVNTQ" %%mm0, 32(%4, %0, 8) \n\t"
MOVNTQ" %%mm3, 40(%4, %0, 8) \n\t"
"punpckhbw %%mm5, %%mm6 \n\t"
"movq 24(%1, %0, 4), %%mm0 \n\t"
"movq %%mm0, %%mm3 \n\t"
"punpcklbw %%mm6, %%mm0 \n\t" /* Y U6 Y V6 Y U6 Y V6*/
"punpckhbw %%mm6, %%mm3 \n\t" /* Y U7 Y V7 Y U7 Y V7*/
MOVNTQ" %%mm0, 48(%4, %0, 8) \n\t"
MOVNTQ" %%mm3, 56(%4, %0, 8) \n\t"
: "+r" (x)
: "r"(yp), "r" (up), "r"(vp), "r"(d)
:"memory");
}
#endif
for (; x<w; x++) {
const long x2 = x<<2;
d[8*x+0] = yp[x2];
d[8*x+1] = up[x];
d[8*x+2] = yp[x2+1];
d[8*x+3] = vp[x];
d[8*x+4] = yp[x2+2];
d[8*x+5] = up[x];
d[8*x+6] = yp[x2+3];
d[8*x+7] = vp[x];
}
}
#if COMPILE_TEMPLATE_MMX
__asm__(
EMMS" \n\t"
SFENCE" \n\t"
::: "memory"
);
#endif
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10244 | static int pcm_decode_frame(AVCodecContext *avctx,
void *data, int *data_size,
AVPacket *avpkt)
{
const uint8_t *buf = avpkt->data;
int buf_size = avpkt->size;
PCMDecode *s = avctx->priv_data;
int sample_size, c, n, i;
uint8_t *samples;
const uint8_t *src, *src8, *src2[MAX_CHANNELS];
int32_t *dst_int32_t;
samples = data;
src = buf;
if (avctx->sample_fmt!=avctx->codec->sample_fmts[0]) {
av_log(avctx, AV_LOG_ERROR, "invalid sample_fmt\n");
return -1;
}
if(avctx->channels <= 0 || avctx->channels > MAX_CHANNELS){
av_log(avctx, AV_LOG_ERROR, "PCM channels out of bounds\n");
return -1;
}
sample_size = av_get_bits_per_sample(avctx->codec_id)/8;
/* av_get_bits_per_sample returns 0 for CODEC_ID_PCM_DVD */
if (CODEC_ID_PCM_DVD == avctx->codec_id)
/* 2 samples are interleaved per block in PCM_DVD */
sample_size = avctx->bits_per_coded_sample * 2 / 8;
else if (avctx->codec_id == CODEC_ID_PCM_LXF)
/* we process 40-bit blocks per channel for LXF */
sample_size = 5;
if (sample_size == 0) {
av_log(avctx, AV_LOG_ERROR, "Invalid sample_size\n");
return AVERROR(EINVAL);
}
n = avctx->channels * sample_size;
if(n && buf_size % n){
if (buf_size < n) {
av_log(avctx, AV_LOG_ERROR, "invalid PCM packet\n");
return -1;
}else
buf_size -= buf_size % n;
}
buf_size= FFMIN(buf_size, *data_size/2);
n = buf_size/sample_size;
switch(avctx->codec->id) {
case CODEC_ID_PCM_U32LE:
DECODE(32, le32, src, samples, n, 0, 0x80000000)
break;
case CODEC_ID_PCM_U32BE:
DECODE(32, be32, src, samples, n, 0, 0x80000000)
break;
case CODEC_ID_PCM_S24LE:
DECODE(32, le24, src, samples, n, 8, 0)
break;
case CODEC_ID_PCM_S24BE:
DECODE(32, be24, src, samples, n, 8, 0)
break;
case CODEC_ID_PCM_U24LE:
DECODE(32, le24, src, samples, n, 8, 0x800000)
break;
case CODEC_ID_PCM_U24BE:
DECODE(32, be24, src, samples, n, 8, 0x800000)
break;
case CODEC_ID_PCM_S24DAUD:
for(;n>0;n--) {
uint32_t v = bytestream_get_be24(&src);
v >>= 4; // sync flags are here
AV_WN16A(samples, av_reverse[(v >> 8) & 0xff] +
(av_reverse[v & 0xff] << 8));
samples += 2;
}
break;
case CODEC_ID_PCM_S16LE_PLANAR:
n /= avctx->channels;
for(c=0;c<avctx->channels;c++)
src2[c] = &src[c*n*2];
for(;n>0;n--)
for(c=0;c<avctx->channels;c++) {
AV_WN16A(samples, bytestream_get_le16(&src2[c]));
samples += 2;
}
src = src2[avctx->channels-1];
break;
case CODEC_ID_PCM_U16LE:
DECODE(16, le16, src, samples, n, 0, 0x8000)
break;
case CODEC_ID_PCM_U16BE:
DECODE(16, be16, src, samples, n, 0, 0x8000)
break;
case CODEC_ID_PCM_S8:
for(;n>0;n--) {
*samples++ = *src++ + 128;
}
break;
#if HAVE_BIGENDIAN
case CODEC_ID_PCM_F64LE:
DECODE(64, le64, src, samples, n, 0, 0)
break;
case CODEC_ID_PCM_S32LE:
case CODEC_ID_PCM_F32LE:
DECODE(32, le32, src, samples, n, 0, 0)
break;
case CODEC_ID_PCM_S16LE:
DECODE(16, le16, src, samples, n, 0, 0)
break;
case CODEC_ID_PCM_F64BE:
case CODEC_ID_PCM_F32BE:
case CODEC_ID_PCM_S32BE:
case CODEC_ID_PCM_S16BE:
#else
case CODEC_ID_PCM_F64BE:
DECODE(64, be64, src, samples, n, 0, 0)
break;
case CODEC_ID_PCM_F32BE:
case CODEC_ID_PCM_S32BE:
DECODE(32, be32, src, samples, n, 0, 0)
break;
case CODEC_ID_PCM_S16BE:
DECODE(16, be16, src, samples, n, 0, 0)
break;
case CODEC_ID_PCM_F64LE:
case CODEC_ID_PCM_F32LE:
case CODEC_ID_PCM_S32LE:
case CODEC_ID_PCM_S16LE:
#endif /* HAVE_BIGENDIAN */
case CODEC_ID_PCM_U8:
memcpy(samples, src, n*sample_size);
src += n*sample_size;
samples += n * sample_size;
break;
case CODEC_ID_PCM_ZORK:
for(;n>0;n--) {
int x= *src++;
if(x&128) x-= 128;
else x = -x;
AV_WN16A(samples, x << 8);
samples += 2;
}
break;
case CODEC_ID_PCM_ALAW:
case CODEC_ID_PCM_MULAW:
for(;n>0;n--) {
AV_WN16A(samples, s->table[*src++]);
samples += 2;
}
break;
case CODEC_ID_PCM_DVD:
dst_int32_t = data;
n /= avctx->channels;
switch (avctx->bits_per_coded_sample) {
case 20:
while (n--) {
c = avctx->channels;
src8 = src + 4*c;
while (c--) {
*dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8 &0xf0) << 8);
*dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++ &0x0f) << 12);
}
src = src8;
}
break;
case 24:
while (n--) {
c = avctx->channels;
src8 = src + 4*c;
while (c--) {
*dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++) << 8);
*dst_int32_t++ = (bytestream_get_be16(&src) << 16) + ((*src8++) << 8);
}
src = src8;
}
break;
default:
av_log(avctx, AV_LOG_ERROR, "PCM DVD unsupported sample depth\n");
return -1;
}
samples = (uint8_t *) dst_int32_t;
break;
case CODEC_ID_PCM_LXF:
dst_int32_t = data;
n /= avctx->channels;
//unpack and de-planerize
for (i = 0; i < n; i++) {
for (c = 0, src8 = src + i*5; c < avctx->channels; c++, src8 += n*5) {
//extract low 20 bits and expand to 32 bits
*dst_int32_t++ = (src8[2] << 28) | (src8[1] << 20) | (src8[0] << 12) |
((src8[2] & 0xF) << 8) | src8[1];
}
for (c = 0, src8 = src + i*5; c < avctx->channels; c++, src8 += n*5) {
//extract high 20 bits and expand to 32 bits
*dst_int32_t++ = (src8[4] << 24) | (src8[3] << 16) |
((src8[2] & 0xF0) << 8) | (src8[4] << 4) | (src8[3] >> 4);
}
}
src += n * avctx->channels * 5;
samples = (uint8_t *) dst_int32_t;
break;
default:
return -1;
}
*data_size = samples - (uint8_t *)data;
return src - buf;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10253 | static int parse_presentation_segment(AVCodecContext *avctx,
const uint8_t *buf, int buf_size,
int64_t pts)
{
PGSSubContext *ctx = avctx->priv_data;
int x, y, ret;
int w = bytestream_get_be16(&buf);
int h = bytestream_get_be16(&buf);
ctx->presentation.pts = pts;
av_dlog(avctx, "Video Dimensions %dx%d\n",
w, h);
ret = ff_set_dimensions(avctx, w, h);
if (ret < 0)
return ret;
/* Skip 1 bytes of unknown, frame rate? */
buf++;
ctx->presentation.id_number = bytestream_get_be16(&buf);
/*
* Skip 3 bytes of unknown:
* state
* palette_update_flag (0x80),
* palette_id_to_use,
*/
buf += 3;
ctx->presentation.object_number = bytestream_get_byte(&buf);
ctx->presentation.composition_flag = 0;
if (!ctx->presentation.object_number)
return 0;
/*
* Skip 3 bytes of unknown:
* object_id_ref (2 bytes),
* window_id_ref,
*/
buf += 3;
ctx->presentation.composition_flag = bytestream_get_byte(&buf);
x = bytestream_get_be16(&buf);
y = bytestream_get_be16(&buf);
/* TODO If cropping, cropping_x, cropping_y, cropping_width, cropping_height (all 2 bytes).*/
av_dlog(avctx, "Subtitle Placement x=%d, y=%d\n", x, y);
if (x > avctx->width || y > avctx->height) {
av_log(avctx, AV_LOG_ERROR, "Subtitle out of video bounds. x = %d, y = %d, video width = %d, video height = %d.\n",
x, y, avctx->width, avctx->height);
x = 0; y = 0;
}
/* Fill in dimensions */
ctx->presentation.x = x;
ctx->presentation.y = y;
return 0;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10266 | void qemu_coroutine_delete(Coroutine *co_)
{
CoroutineThreadState *s = coroutine_get_thread_state();
CoroutineUContext *co = DO_UPCAST(CoroutineUContext, base, co_);
if (s->pool_size < POOL_MAX_SIZE) {
QLIST_INSERT_HEAD(&s->pool, &co->base, pool_next);
co->base.caller = NULL;
s->pool_size++;
return;
}
g_free(co->stack);
g_free(co);
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10268 | int float64_eq_signaling( float64 a, float64 b STATUS_PARAM )
{
if ( ( ( extractFloat64Exp( a ) == 0x7FF ) && extractFloat64Frac( a ) )
|| ( ( extractFloat64Exp( b ) == 0x7FF ) && extractFloat64Frac( b ) )
) {
float_raise( float_flag_invalid STATUS_VAR);
return 0;
}
return ( a == b ) || ( (bits64) ( ( a | b )<<1 ) == 0 );
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10269 | static int tpm_passthrough_unix_tx_bufs(int tpm_fd,
const uint8_t *in, uint32_t in_len,
uint8_t *out, uint32_t out_len)
{
int ret;
ret = tpm_passthrough_unix_write(tpm_fd, in, in_len);
if (ret != in_len) {
error_report("tpm_passthrough: error while transmitting data "
"to TPM: %s (%i)\n",
strerror(errno), errno);
goto err_exit;
}
ret = tpm_passthrough_unix_read(tpm_fd, out, out_len);
if (ret < 0) {
error_report("tpm_passthrough: error while reading data from "
"TPM: %s (%i)\n",
strerror(errno), errno);
} else if (ret < sizeof(struct tpm_resp_hdr) ||
tpm_passthrough_get_size_from_buffer(out) != ret) {
ret = -1;
error_report("tpm_passthrough: received invalid response "
"packet from TPM\n");
}
err_exit:
if (ret < 0) {
tpm_write_fatal_error_response(out, out_len);
}
return ret;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10278 | int cpu_memory_rw_debug(CPUArchState *env, target_ulong addr,
uint8_t *buf, int len, int is_write)
{
int l;
target_phys_addr_t phys_addr;
target_ulong page;
while (len > 0) {
page = addr & TARGET_PAGE_MASK;
phys_addr = cpu_get_phys_page_debug(env, page);
/* if no physical page mapped, return an error */
if (phys_addr == -1)
return -1;
l = (page + TARGET_PAGE_SIZE) - addr;
if (l > len)
l = len;
phys_addr += (addr & ~TARGET_PAGE_MASK);
if (is_write)
cpu_physical_memory_write_rom(phys_addr, buf, l);
else
cpu_physical_memory_rw(phys_addr, buf, l, is_write);
len -= l;
buf += l;
addr += l;
}
return 0;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10282 | struct omap_uwire_s *omap_uwire_init(MemoryRegion *system_memory,
target_phys_addr_t base,
qemu_irq *irq, qemu_irq dma, omap_clk clk)
{
struct omap_uwire_s *s = (struct omap_uwire_s *)
g_malloc0(sizeof(struct omap_uwire_s));
s->txirq = irq[0];
s->rxirq = irq[1];
s->txdrq = dma;
omap_uwire_reset(s);
memory_region_init_io(&s->iomem, &omap_uwire_ops, s, "omap-uwire", 0x800);
memory_region_add_subregion(system_memory, base, &s->iomem);
return s;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10291 | static int send_sub_rect(VncState *vs, int x, int y, int w, int h)
{
VncPalette *palette = &color_count_palette;
uint32_t bg = 0, fg = 0;
int colors;
int ret = 0;
#ifdef CONFIG_VNC_JPEG
bool force_jpeg = false;
bool allow_jpeg = true;
#endif
vnc_framebuffer_update(vs, x, y, w, h, vs->tight.type);
vnc_tight_start(vs);
vnc_raw_send_framebuffer_update(vs, x, y, w, h);
vnc_tight_stop(vs);
#ifdef CONFIG_VNC_JPEG
if (!vs->vd->non_adaptive && vs->tight.quality != (uint8_t)-1) {
double freq = vnc_update_freq(vs, x, y, w, h);
if (freq < tight_jpeg_conf[vs->tight.quality].jpeg_freq_min) {
allow_jpeg = false;
}
if (freq >= tight_jpeg_conf[vs->tight.quality].jpeg_freq_threshold) {
force_jpeg = true;
vnc_sent_lossy_rect(vs, x, y, w, h);
}
}
#endif
colors = tight_fill_palette(vs, x, y, w * h, &bg, &fg, palette);
#ifdef CONFIG_VNC_JPEG
if (allow_jpeg && vs->tight.quality != (uint8_t)-1) {
ret = send_sub_rect_jpeg(vs, x, y, w, h, bg, fg, colors, palette,
force_jpeg);
} else {
ret = send_sub_rect_nojpeg(vs, x, y, w, h, bg, fg, colors, palette);
}
#else
ret = send_sub_rect_nojpeg(vs, x, y, w, h, bg, fg, colors, palette);
#endif
return ret;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10295 | static ExitStatus translate_one(DisasContext *ctx, uint32_t insn)
{
uint32_t palcode;
int32_t disp21, disp16;
#ifndef CONFIG_USER_ONLY
int32_t disp12;
#endif
uint16_t fn11;
uint8_t opc, ra, rb, rc, fpfn, fn7, islit, real_islit;
uint8_t lit;
ExitStatus ret;
/* Decode all instruction fields */
opc = insn >> 26;
ra = (insn >> 21) & 0x1F;
rb = (insn >> 16) & 0x1F;
rc = insn & 0x1F;
real_islit = islit = (insn >> 12) & 1;
if (rb == 31 && !islit) {
islit = 1;
lit = 0;
} else
lit = (insn >> 13) & 0xFF;
palcode = insn & 0x03FFFFFF;
disp21 = ((int32_t)((insn & 0x001FFFFF) << 11)) >> 11;
disp16 = (int16_t)(insn & 0x0000FFFF);
#ifndef CONFIG_USER_ONLY
disp12 = (int32_t)((insn & 0x00000FFF) << 20) >> 20;
#endif
fn11 = (insn >> 5) & 0x000007FF;
fpfn = fn11 & 0x3F;
fn7 = (insn >> 5) & 0x0000007F;
LOG_DISAS("opc %02x ra %2d rb %2d rc %2d disp16 %6d\n",
opc, ra, rb, rc, disp16);
ret = NO_EXIT;
switch (opc) {
case 0x00:
/* CALL_PAL */
ret = gen_call_pal(ctx, palcode);
break;
case 0x01:
/* OPC01 */
goto invalid_opc;
case 0x02:
/* OPC02 */
goto invalid_opc;
case 0x03:
/* OPC03 */
goto invalid_opc;
case 0x04:
/* OPC04 */
goto invalid_opc;
case 0x05:
/* OPC05 */
goto invalid_opc;
case 0x06:
/* OPC06 */
goto invalid_opc;
case 0x07:
/* OPC07 */
goto invalid_opc;
case 0x08:
/* LDA */
if (likely(ra != 31)) {
if (rb != 31) {
tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16);
} else {
tcg_gen_movi_i64(cpu_ir[ra], disp16);
}
}
break;
case 0x09:
/* LDAH */
if (likely(ra != 31)) {
if (rb != 31) {
tcg_gen_addi_i64(cpu_ir[ra], cpu_ir[rb], disp16 << 16);
} else {
tcg_gen_movi_i64(cpu_ir[ra], disp16 << 16);
}
}
break;
case 0x0A:
/* LDBU */
if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
gen_load_mem(ctx, &tcg_gen_qemu_ld8u, ra, rb, disp16, 0, 0);
break;
}
goto invalid_opc;
case 0x0B:
/* LDQ_U */
gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 1);
break;
case 0x0C:
/* LDWU */
if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
gen_load_mem(ctx, &tcg_gen_qemu_ld16u, ra, rb, disp16, 0, 0);
break;
}
goto invalid_opc;
case 0x0D:
/* STW */
gen_store_mem(ctx, &tcg_gen_qemu_st16, ra, rb, disp16, 0, 0);
break;
case 0x0E:
/* STB */
gen_store_mem(ctx, &tcg_gen_qemu_st8, ra, rb, disp16, 0, 0);
break;
case 0x0F:
/* STQ_U */
gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 1);
break;
case 0x10:
switch (fn7) {
case 0x00:
/* ADDL */
if (likely(rc != 31)) {
if (ra != 31) {
if (islit) {
tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
} else {
tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
}
} else {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], lit);
} else {
tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
}
}
}
break;
case 0x02:
/* S4ADDL */
if (likely(rc != 31)) {
if (ra != 31) {
TCGv tmp = tcg_temp_new();
tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
if (islit) {
tcg_gen_addi_i64(tmp, tmp, lit);
} else {
tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
}
tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
tcg_temp_free(tmp);
} else {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], lit);
} else {
tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
}
}
}
break;
case 0x09:
/* SUBL */
if (likely(rc != 31)) {
if (ra != 31) {
if (islit) {
tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
} else {
tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
}
tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
} else {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], -lit);
} else {
tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
}
}
}
break;
case 0x0B:
/* S4SUBL */
if (likely(rc != 31)) {
if (ra != 31) {
TCGv tmp = tcg_temp_new();
tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
if (islit) {
tcg_gen_subi_i64(tmp, tmp, lit);
} else {
tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
}
tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
tcg_temp_free(tmp);
} else {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], -lit);
} else {
tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
}
}
}
break;
case 0x0F:
/* CMPBGE */
gen_cmpbge(ra, rb, rc, islit, lit);
break;
case 0x12:
/* S8ADDL */
if (likely(rc != 31)) {
if (ra != 31) {
TCGv tmp = tcg_temp_new();
tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
if (islit) {
tcg_gen_addi_i64(tmp, tmp, lit);
} else {
tcg_gen_add_i64(tmp, tmp, cpu_ir[rb]);
}
tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
tcg_temp_free(tmp);
} else {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], lit);
} else {
tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rb]);
}
}
}
break;
case 0x1B:
/* S8SUBL */
if (likely(rc != 31)) {
if (ra != 31) {
TCGv tmp = tcg_temp_new();
tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
if (islit) {
tcg_gen_subi_i64(tmp, tmp, lit);
} else {
tcg_gen_sub_i64(tmp, tmp, cpu_ir[rb]);
}
tcg_gen_ext32s_i64(cpu_ir[rc], tmp);
tcg_temp_free(tmp);
} else {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], -lit);
} else {
tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
}
}
}
break;
case 0x1D:
/* CMPULT */
gen_cmp(TCG_COND_LTU, ra, rb, rc, islit, lit);
break;
case 0x20:
/* ADDQ */
if (likely(rc != 31)) {
if (ra != 31) {
if (islit) {
tcg_gen_addi_i64(cpu_ir[rc], cpu_ir[ra], lit);
} else {
tcg_gen_add_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
}
} else {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], lit);
} else {
tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
}
}
}
break;
case 0x22:
/* S4ADDQ */
if (likely(rc != 31)) {
if (ra != 31) {
TCGv tmp = tcg_temp_new();
tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
if (islit) {
tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
} else {
tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
}
tcg_temp_free(tmp);
} else {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], lit);
} else {
tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
}
}
}
break;
case 0x29:
/* SUBQ */
if (likely(rc != 31)) {
if (ra != 31) {
if (islit) {
tcg_gen_subi_i64(cpu_ir[rc], cpu_ir[ra], lit);
} else {
tcg_gen_sub_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
}
} else {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], -lit);
} else {
tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
}
}
}
break;
case 0x2B:
/* S4SUBQ */
if (likely(rc != 31)) {
if (ra != 31) {
TCGv tmp = tcg_temp_new();
tcg_gen_shli_i64(tmp, cpu_ir[ra], 2);
if (islit) {
tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
} else {
tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
}
tcg_temp_free(tmp);
} else {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], -lit);
} else {
tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
}
}
}
break;
case 0x2D:
/* CMPEQ */
gen_cmp(TCG_COND_EQ, ra, rb, rc, islit, lit);
break;
case 0x32:
/* S8ADDQ */
if (likely(rc != 31)) {
if (ra != 31) {
TCGv tmp = tcg_temp_new();
tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
if (islit) {
tcg_gen_addi_i64(cpu_ir[rc], tmp, lit);
} else {
tcg_gen_add_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
}
tcg_temp_free(tmp);
} else {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], lit);
} else {
tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
}
}
}
break;
case 0x3B:
/* S8SUBQ */
if (likely(rc != 31)) {
if (ra != 31) {
TCGv tmp = tcg_temp_new();
tcg_gen_shli_i64(tmp, cpu_ir[ra], 3);
if (islit) {
tcg_gen_subi_i64(cpu_ir[rc], tmp, lit);
} else {
tcg_gen_sub_i64(cpu_ir[rc], tmp, cpu_ir[rb]);
}
tcg_temp_free(tmp);
} else {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], -lit);
} else {
tcg_gen_neg_i64(cpu_ir[rc], cpu_ir[rb]);
}
}
}
break;
case 0x3D:
/* CMPULE */
gen_cmp(TCG_COND_LEU, ra, rb, rc, islit, lit);
break;
case 0x40:
/* ADDL/V */
gen_addlv(ra, rb, rc, islit, lit);
break;
case 0x49:
/* SUBL/V */
gen_sublv(ra, rb, rc, islit, lit);
break;
case 0x4D:
/* CMPLT */
gen_cmp(TCG_COND_LT, ra, rb, rc, islit, lit);
break;
case 0x60:
/* ADDQ/V */
gen_addqv(ra, rb, rc, islit, lit);
break;
case 0x69:
/* SUBQ/V */
gen_subqv(ra, rb, rc, islit, lit);
break;
case 0x6D:
/* CMPLE */
gen_cmp(TCG_COND_LE, ra, rb, rc, islit, lit);
break;
default:
goto invalid_opc;
}
break;
case 0x11:
switch (fn7) {
case 0x00:
/* AND */
if (likely(rc != 31)) {
if (ra == 31) {
tcg_gen_movi_i64(cpu_ir[rc], 0);
} else if (islit) {
tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], lit);
} else {
tcg_gen_and_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
}
}
break;
case 0x08:
/* BIC */
if (likely(rc != 31)) {
if (ra != 31) {
if (islit) {
tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
} else {
tcg_gen_andc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
}
} else
tcg_gen_movi_i64(cpu_ir[rc], 0);
}
break;
case 0x14:
/* CMOVLBS */
gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 1);
break;
case 0x16:
/* CMOVLBC */
gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 1);
break;
case 0x20:
/* BIS */
if (likely(rc != 31)) {
if (ra != 31) {
if (islit) {
tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], lit);
} else {
tcg_gen_or_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
}
} else {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], lit);
} else {
tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
}
}
}
break;
case 0x24:
/* CMOVEQ */
gen_cmov(TCG_COND_EQ, ra, rb, rc, islit, lit, 0);
break;
case 0x26:
/* CMOVNE */
gen_cmov(TCG_COND_NE, ra, rb, rc, islit, lit, 0);
break;
case 0x28:
/* ORNOT */
if (likely(rc != 31)) {
if (ra != 31) {
if (islit) {
tcg_gen_ori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
} else {
tcg_gen_orc_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
}
} else {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], ~lit);
} else {
tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
}
}
}
break;
case 0x40:
/* XOR */
if (likely(rc != 31)) {
if (ra != 31) {
if (islit) {
tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], lit);
} else {
tcg_gen_xor_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
}
} else {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], lit);
} else {
tcg_gen_mov_i64(cpu_ir[rc], cpu_ir[rb]);
}
}
}
break;
case 0x44:
/* CMOVLT */
gen_cmov(TCG_COND_LT, ra, rb, rc, islit, lit, 0);
break;
case 0x46:
/* CMOVGE */
gen_cmov(TCG_COND_GE, ra, rb, rc, islit, lit, 0);
break;
case 0x48:
/* EQV */
if (likely(rc != 31)) {
if (ra != 31) {
if (islit) {
tcg_gen_xori_i64(cpu_ir[rc], cpu_ir[ra], ~lit);
} else {
tcg_gen_eqv_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
}
} else {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], ~lit);
} else {
tcg_gen_not_i64(cpu_ir[rc], cpu_ir[rb]);
}
}
}
break;
case 0x61:
/* AMASK */
if (likely(rc != 31)) {
uint64_t amask = ctx->tb->flags >> TB_FLAGS_AMASK_SHIFT;
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], lit & ~amask);
} else {
tcg_gen_andi_i64(cpu_ir[rc], cpu_ir[rb], ~amask);
}
}
break;
case 0x64:
/* CMOVLE */
gen_cmov(TCG_COND_LE, ra, rb, rc, islit, lit, 0);
break;
case 0x66:
/* CMOVGT */
gen_cmov(TCG_COND_GT, ra, rb, rc, islit, lit, 0);
break;
case 0x6C:
/* IMPLVER */
if (rc != 31) {
tcg_gen_movi_i64(cpu_ir[rc], ctx->implver);
}
break;
default:
goto invalid_opc;
}
break;
case 0x12:
switch (fn7) {
case 0x02:
/* MSKBL */
gen_msk_l(ra, rb, rc, islit, lit, 0x01);
break;
case 0x06:
/* EXTBL */
gen_ext_l(ra, rb, rc, islit, lit, 0x01);
break;
case 0x0B:
/* INSBL */
gen_ins_l(ra, rb, rc, islit, lit, 0x01);
break;
case 0x12:
/* MSKWL */
gen_msk_l(ra, rb, rc, islit, lit, 0x03);
break;
case 0x16:
/* EXTWL */
gen_ext_l(ra, rb, rc, islit, lit, 0x03);
break;
case 0x1B:
/* INSWL */
gen_ins_l(ra, rb, rc, islit, lit, 0x03);
break;
case 0x22:
/* MSKLL */
gen_msk_l(ra, rb, rc, islit, lit, 0x0f);
break;
case 0x26:
/* EXTLL */
gen_ext_l(ra, rb, rc, islit, lit, 0x0f);
break;
case 0x2B:
/* INSLL */
gen_ins_l(ra, rb, rc, islit, lit, 0x0f);
break;
case 0x30:
/* ZAP */
gen_zap(ra, rb, rc, islit, lit);
break;
case 0x31:
/* ZAPNOT */
gen_zapnot(ra, rb, rc, islit, lit);
break;
case 0x32:
/* MSKQL */
gen_msk_l(ra, rb, rc, islit, lit, 0xff);
break;
case 0x34:
/* SRL */
if (likely(rc != 31)) {
if (ra != 31) {
if (islit) {
tcg_gen_shri_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
} else {
TCGv shift = tcg_temp_new();
tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
tcg_gen_shr_i64(cpu_ir[rc], cpu_ir[ra], shift);
tcg_temp_free(shift);
}
} else
tcg_gen_movi_i64(cpu_ir[rc], 0);
}
break;
case 0x36:
/* EXTQL */
gen_ext_l(ra, rb, rc, islit, lit, 0xff);
break;
case 0x39:
/* SLL */
if (likely(rc != 31)) {
if (ra != 31) {
if (islit) {
tcg_gen_shli_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
} else {
TCGv shift = tcg_temp_new();
tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
tcg_gen_shl_i64(cpu_ir[rc], cpu_ir[ra], shift);
tcg_temp_free(shift);
}
} else
tcg_gen_movi_i64(cpu_ir[rc], 0);
}
break;
case 0x3B:
/* INSQL */
gen_ins_l(ra, rb, rc, islit, lit, 0xff);
break;
case 0x3C:
/* SRA */
if (likely(rc != 31)) {
if (ra != 31) {
if (islit) {
tcg_gen_sari_i64(cpu_ir[rc], cpu_ir[ra], lit & 0x3f);
} else {
TCGv shift = tcg_temp_new();
tcg_gen_andi_i64(shift, cpu_ir[rb], 0x3f);
tcg_gen_sar_i64(cpu_ir[rc], cpu_ir[ra], shift);
tcg_temp_free(shift);
}
} else
tcg_gen_movi_i64(cpu_ir[rc], 0);
}
break;
case 0x52:
/* MSKWH */
gen_msk_h(ra, rb, rc, islit, lit, 0x03);
break;
case 0x57:
/* INSWH */
gen_ins_h(ra, rb, rc, islit, lit, 0x03);
break;
case 0x5A:
/* EXTWH */
gen_ext_h(ra, rb, rc, islit, lit, 0x03);
break;
case 0x62:
/* MSKLH */
gen_msk_h(ra, rb, rc, islit, lit, 0x0f);
break;
case 0x67:
/* INSLH */
gen_ins_h(ra, rb, rc, islit, lit, 0x0f);
break;
case 0x6A:
/* EXTLH */
gen_ext_h(ra, rb, rc, islit, lit, 0x0f);
break;
case 0x72:
/* MSKQH */
gen_msk_h(ra, rb, rc, islit, lit, 0xff);
break;
case 0x77:
/* INSQH */
gen_ins_h(ra, rb, rc, islit, lit, 0xff);
break;
case 0x7A:
/* EXTQH */
gen_ext_h(ra, rb, rc, islit, lit, 0xff);
break;
default:
goto invalid_opc;
}
break;
case 0x13:
switch (fn7) {
case 0x00:
/* MULL */
if (likely(rc != 31)) {
if (ra == 31) {
tcg_gen_movi_i64(cpu_ir[rc], 0);
} else {
if (islit) {
tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
} else {
tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
}
tcg_gen_ext32s_i64(cpu_ir[rc], cpu_ir[rc]);
}
}
break;
case 0x20:
/* MULQ */
if (likely(rc != 31)) {
if (ra == 31) {
tcg_gen_movi_i64(cpu_ir[rc], 0);
} else if (islit) {
tcg_gen_muli_i64(cpu_ir[rc], cpu_ir[ra], lit);
} else {
tcg_gen_mul_i64(cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
}
}
break;
case 0x30:
/* UMULH */
{
TCGv low;
if (unlikely(rc == 31)){
break;
}
if (ra == 31) {
tcg_gen_movi_i64(cpu_ir[rc], 0);
break;
}
low = tcg_temp_new();
if (islit) {
tcg_gen_movi_tl(low, lit);
tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], low);
} else {
tcg_gen_mulu2_i64(low, cpu_ir[rc], cpu_ir[ra], cpu_ir[rb]);
}
tcg_temp_free(low);
}
break;
case 0x40:
/* MULL/V */
gen_mullv(ra, rb, rc, islit, lit);
break;
case 0x60:
/* MULQ/V */
gen_mulqv(ra, rb, rc, islit, lit);
break;
default:
goto invalid_opc;
}
break;
case 0x14:
switch (fpfn) { /* fn11 & 0x3F */
case 0x04:
/* ITOFS */
if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
goto invalid_opc;
}
if (likely(rc != 31)) {
if (ra != 31) {
TCGv_i32 tmp = tcg_temp_new_i32();
tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
gen_helper_memory_to_s(cpu_fir[rc], tmp);
tcg_temp_free_i32(tmp);
} else
tcg_gen_movi_i64(cpu_fir[rc], 0);
}
break;
case 0x0A:
/* SQRTF */
if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
gen_fsqrtf(rb, rc);
break;
}
goto invalid_opc;
case 0x0B:
/* SQRTS */
if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
gen_fsqrts(ctx, rb, rc, fn11);
break;
}
goto invalid_opc;
case 0x14:
/* ITOFF */
if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
goto invalid_opc;
}
if (likely(rc != 31)) {
if (ra != 31) {
TCGv_i32 tmp = tcg_temp_new_i32();
tcg_gen_trunc_i64_i32(tmp, cpu_ir[ra]);
gen_helper_memory_to_f(cpu_fir[rc], tmp);
tcg_temp_free_i32(tmp);
} else
tcg_gen_movi_i64(cpu_fir[rc], 0);
}
break;
case 0x24:
/* ITOFT */
if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
goto invalid_opc;
}
if (likely(rc != 31)) {
if (ra != 31) {
tcg_gen_mov_i64(cpu_fir[rc], cpu_ir[ra]);
} else {
tcg_gen_movi_i64(cpu_fir[rc], 0);
}
}
break;
case 0x2A:
/* SQRTG */
if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
gen_fsqrtg(rb, rc);
break;
}
goto invalid_opc;
case 0x02B:
/* SQRTT */
if (ctx->tb->flags & TB_FLAGS_AMASK_FIX) {
gen_fsqrtt(ctx, rb, rc, fn11);
break;
}
goto invalid_opc;
default:
goto invalid_opc;
}
break;
case 0x15:
/* VAX floating point */
/* XXX: rounding mode and trap are ignored (!) */
switch (fpfn) { /* fn11 & 0x3F */
case 0x00:
/* ADDF */
gen_faddf(ra, rb, rc);
break;
case 0x01:
/* SUBF */
gen_fsubf(ra, rb, rc);
break;
case 0x02:
/* MULF */
gen_fmulf(ra, rb, rc);
break;
case 0x03:
/* DIVF */
gen_fdivf(ra, rb, rc);
break;
case 0x1E:
/* CVTDG */
#if 0 // TODO
gen_fcvtdg(rb, rc);
#else
goto invalid_opc;
#endif
break;
case 0x20:
/* ADDG */
gen_faddg(ra, rb, rc);
break;
case 0x21:
/* SUBG */
gen_fsubg(ra, rb, rc);
break;
case 0x22:
/* MULG */
gen_fmulg(ra, rb, rc);
break;
case 0x23:
/* DIVG */
gen_fdivg(ra, rb, rc);
break;
case 0x25:
/* CMPGEQ */
gen_fcmpgeq(ra, rb, rc);
break;
case 0x26:
/* CMPGLT */
gen_fcmpglt(ra, rb, rc);
break;
case 0x27:
/* CMPGLE */
gen_fcmpgle(ra, rb, rc);
break;
case 0x2C:
/* CVTGF */
gen_fcvtgf(rb, rc);
break;
case 0x2D:
/* CVTGD */
#if 0 // TODO
gen_fcvtgd(rb, rc);
#else
goto invalid_opc;
#endif
break;
case 0x2F:
/* CVTGQ */
gen_fcvtgq(rb, rc);
break;
case 0x3C:
/* CVTQF */
gen_fcvtqf(rb, rc);
break;
case 0x3E:
/* CVTQG */
gen_fcvtqg(rb, rc);
break;
default:
goto invalid_opc;
}
break;
case 0x16:
/* IEEE floating-point */
switch (fpfn) { /* fn11 & 0x3F */
case 0x00:
/* ADDS */
gen_fadds(ctx, ra, rb, rc, fn11);
break;
case 0x01:
/* SUBS */
gen_fsubs(ctx, ra, rb, rc, fn11);
break;
case 0x02:
/* MULS */
gen_fmuls(ctx, ra, rb, rc, fn11);
break;
case 0x03:
/* DIVS */
gen_fdivs(ctx, ra, rb, rc, fn11);
break;
case 0x20:
/* ADDT */
gen_faddt(ctx, ra, rb, rc, fn11);
break;
case 0x21:
/* SUBT */
gen_fsubt(ctx, ra, rb, rc, fn11);
break;
case 0x22:
/* MULT */
gen_fmult(ctx, ra, rb, rc, fn11);
break;
case 0x23:
/* DIVT */
gen_fdivt(ctx, ra, rb, rc, fn11);
break;
case 0x24:
/* CMPTUN */
gen_fcmptun(ctx, ra, rb, rc, fn11);
break;
case 0x25:
/* CMPTEQ */
gen_fcmpteq(ctx, ra, rb, rc, fn11);
break;
case 0x26:
/* CMPTLT */
gen_fcmptlt(ctx, ra, rb, rc, fn11);
break;
case 0x27:
/* CMPTLE */
gen_fcmptle(ctx, ra, rb, rc, fn11);
break;
case 0x2C:
if (fn11 == 0x2AC || fn11 == 0x6AC) {
/* CVTST */
gen_fcvtst(ctx, rb, rc, fn11);
} else {
/* CVTTS */
gen_fcvtts(ctx, rb, rc, fn11);
}
break;
case 0x2F:
/* CVTTQ */
gen_fcvttq(ctx, rb, rc, fn11);
break;
case 0x3C:
/* CVTQS */
gen_fcvtqs(ctx, rb, rc, fn11);
break;
case 0x3E:
/* CVTQT */
gen_fcvtqt(ctx, rb, rc, fn11);
break;
default:
goto invalid_opc;
}
break;
case 0x17:
switch (fn11) {
case 0x010:
/* CVTLQ */
gen_fcvtlq(rb, rc);
break;
case 0x020:
if (likely(rc != 31)) {
if (ra == rb) {
/* FMOV */
if (ra == 31) {
tcg_gen_movi_i64(cpu_fir[rc], 0);
} else {
tcg_gen_mov_i64(cpu_fir[rc], cpu_fir[ra]);
}
} else {
/* CPYS */
gen_fcpys(ra, rb, rc);
}
}
break;
case 0x021:
/* CPYSN */
gen_fcpysn(ra, rb, rc);
break;
case 0x022:
/* CPYSE */
gen_fcpyse(ra, rb, rc);
break;
case 0x024:
/* MT_FPCR */
if (likely(ra != 31)) {
gen_helper_store_fpcr(cpu_env, cpu_fir[ra]);
} else {
TCGv tmp = tcg_const_i64(0);
gen_helper_store_fpcr(cpu_env, tmp);
tcg_temp_free(tmp);
}
break;
case 0x025:
/* MF_FPCR */
if (likely(ra != 31)) {
gen_helper_load_fpcr(cpu_fir[ra], cpu_env);
}
break;
case 0x02A:
/* FCMOVEQ */
gen_fcmov(TCG_COND_EQ, ra, rb, rc);
break;
case 0x02B:
/* FCMOVNE */
gen_fcmov(TCG_COND_NE, ra, rb, rc);
break;
case 0x02C:
/* FCMOVLT */
gen_fcmov(TCG_COND_LT, ra, rb, rc);
break;
case 0x02D:
/* FCMOVGE */
gen_fcmov(TCG_COND_GE, ra, rb, rc);
break;
case 0x02E:
/* FCMOVLE */
gen_fcmov(TCG_COND_LE, ra, rb, rc);
break;
case 0x02F:
/* FCMOVGT */
gen_fcmov(TCG_COND_GT, ra, rb, rc);
break;
case 0x030:
/* CVTQL */
gen_fcvtql(rb, rc);
break;
case 0x130:
/* CVTQL/V */
case 0x530:
/* CVTQL/SV */
/* ??? I'm pretty sure there's nothing that /sv needs to do that
/v doesn't do. The only thing I can think is that /sv is a
valid instruction merely for completeness in the ISA. */
gen_fcvtql_v(ctx, rb, rc);
break;
default:
goto invalid_opc;
}
break;
case 0x18:
switch ((uint16_t)disp16) {
case 0x0000:
/* TRAPB */
/* No-op. */
break;
case 0x0400:
/* EXCB */
/* No-op. */
break;
case 0x4000:
/* MB */
/* No-op */
break;
case 0x4400:
/* WMB */
/* No-op */
break;
case 0x8000:
/* FETCH */
/* No-op */
break;
case 0xA000:
/* FETCH_M */
/* No-op */
break;
case 0xC000:
/* RPCC */
if (ra != 31) {
if (use_icount) {
gen_io_start();
gen_helper_load_pcc(cpu_ir[ra], cpu_env);
gen_io_end();
ret = EXIT_PC_STALE;
} else {
gen_helper_load_pcc(cpu_ir[ra], cpu_env);
}
}
break;
case 0xE000:
/* RC */
gen_rx(ra, 0);
break;
case 0xE800:
/* ECB */
break;
case 0xF000:
/* RS */
gen_rx(ra, 1);
break;
case 0xF800:
/* WH64 */
/* No-op */
break;
default:
goto invalid_opc;
}
break;
case 0x19:
/* HW_MFPR (PALcode) */
#ifndef CONFIG_USER_ONLY
if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
return gen_mfpr(ra, insn & 0xffff);
}
#endif
goto invalid_opc;
case 0x1A:
/* JMP, JSR, RET, JSR_COROUTINE. These only differ by the branch
prediction stack action, which of course we don't implement. */
if (rb != 31) {
tcg_gen_andi_i64(cpu_pc, cpu_ir[rb], ~3);
} else {
tcg_gen_movi_i64(cpu_pc, 0);
}
if (ra != 31) {
tcg_gen_movi_i64(cpu_ir[ra], ctx->pc);
}
ret = EXIT_PC_UPDATED;
break;
case 0x1B:
/* HW_LD (PALcode) */
#ifndef CONFIG_USER_ONLY
if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
TCGv addr;
if (ra == 31) {
break;
}
addr = tcg_temp_new();
if (rb != 31) {
tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
} else {
tcg_gen_movi_i64(addr, disp12);
}
switch ((insn >> 12) & 0xF) {
case 0x0:
/* Longword physical access (hw_ldl/p) */
gen_helper_ldl_phys(cpu_ir[ra], cpu_env, addr);
break;
case 0x1:
/* Quadword physical access (hw_ldq/p) */
gen_helper_ldq_phys(cpu_ir[ra], cpu_env, addr);
break;
case 0x2:
/* Longword physical access with lock (hw_ldl_l/p) */
gen_helper_ldl_l_phys(cpu_ir[ra], cpu_env, addr);
break;
case 0x3:
/* Quadword physical access with lock (hw_ldq_l/p) */
gen_helper_ldq_l_phys(cpu_ir[ra], cpu_env, addr);
break;
case 0x4:
/* Longword virtual PTE fetch (hw_ldl/v) */
goto invalid_opc;
case 0x5:
/* Quadword virtual PTE fetch (hw_ldq/v) */
goto invalid_opc;
break;
case 0x6:
/* Incpu_ir[ra]id */
goto invalid_opc;
case 0x7:
/* Incpu_ir[ra]id */
goto invalid_opc;
case 0x8:
/* Longword virtual access (hw_ldl) */
goto invalid_opc;
case 0x9:
/* Quadword virtual access (hw_ldq) */
goto invalid_opc;
case 0xA:
/* Longword virtual access with protection check (hw_ldl/w) */
tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_KERNEL_IDX, MO_LESL);
break;
case 0xB:
/* Quadword virtual access with protection check (hw_ldq/w) */
tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_KERNEL_IDX, MO_LEQ);
break;
case 0xC:
/* Longword virtual access with alt access mode (hw_ldl/a)*/
goto invalid_opc;
case 0xD:
/* Quadword virtual access with alt access mode (hw_ldq/a) */
goto invalid_opc;
case 0xE:
/* Longword virtual access with alternate access mode and
protection checks (hw_ldl/wa) */
tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_USER_IDX, MO_LESL);
break;
case 0xF:
/* Quadword virtual access with alternate access mode and
protection checks (hw_ldq/wa) */
tcg_gen_qemu_ld_i64(cpu_ir[ra], addr, MMU_USER_IDX, MO_LEQ);
break;
}
tcg_temp_free(addr);
break;
}
#endif
goto invalid_opc;
case 0x1C:
switch (fn7) {
case 0x00:
/* SEXTB */
if ((ctx->tb->flags & TB_FLAGS_AMASK_BWX) == 0) {
goto invalid_opc;
}
if (likely(rc != 31)) {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int8_t)lit));
} else {
tcg_gen_ext8s_i64(cpu_ir[rc], cpu_ir[rb]);
}
}
break;
case 0x01:
/* SEXTW */
if (ctx->tb->flags & TB_FLAGS_AMASK_BWX) {
if (likely(rc != 31)) {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], (int64_t)((int16_t)lit));
} else {
tcg_gen_ext16s_i64(cpu_ir[rc], cpu_ir[rb]);
}
}
break;
}
goto invalid_opc;
case 0x30:
/* CTPOP */
if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
if (likely(rc != 31)) {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], ctpop64(lit));
} else {
gen_helper_ctpop(cpu_ir[rc], cpu_ir[rb]);
}
}
break;
}
goto invalid_opc;
case 0x31:
/* PERR */
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
gen_perr(ra, rb, rc, islit, lit);
break;
}
goto invalid_opc;
case 0x32:
/* CTLZ */
if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
if (likely(rc != 31)) {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], clz64(lit));
} else {
gen_helper_ctlz(cpu_ir[rc], cpu_ir[rb]);
}
}
break;
}
goto invalid_opc;
case 0x33:
/* CTTZ */
if (ctx->tb->flags & TB_FLAGS_AMASK_CIX) {
if (likely(rc != 31)) {
if (islit) {
tcg_gen_movi_i64(cpu_ir[rc], ctz64(lit));
} else {
gen_helper_cttz(cpu_ir[rc], cpu_ir[rb]);
}
}
break;
}
goto invalid_opc;
case 0x34:
/* UNPKBW */
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
if (real_islit || ra != 31) {
goto invalid_opc;
}
gen_unpkbw(rb, rc);
break;
}
goto invalid_opc;
case 0x35:
/* UNPKBL */
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
if (real_islit || ra != 31) {
goto invalid_opc;
}
gen_unpkbl(rb, rc);
break;
}
goto invalid_opc;
case 0x36:
/* PKWB */
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
if (real_islit || ra != 31) {
goto invalid_opc;
}
gen_pkwb(rb, rc);
break;
}
goto invalid_opc;
case 0x37:
/* PKLB */
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
if (real_islit || ra != 31) {
goto invalid_opc;
}
gen_pklb(rb, rc);
break;
}
goto invalid_opc;
case 0x38:
/* MINSB8 */
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
gen_minsb8(ra, rb, rc, islit, lit);
break;
}
goto invalid_opc;
case 0x39:
/* MINSW4 */
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
gen_minsw4(ra, rb, rc, islit, lit);
break;
}
goto invalid_opc;
case 0x3A:
/* MINUB8 */
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
gen_minub8(ra, rb, rc, islit, lit);
break;
}
goto invalid_opc;
case 0x3B:
/* MINUW4 */
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
gen_minuw4(ra, rb, rc, islit, lit);
break;
}
goto invalid_opc;
case 0x3C:
/* MAXUB8 */
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
gen_maxub8(ra, rb, rc, islit, lit);
break;
}
goto invalid_opc;
case 0x3D:
/* MAXUW4 */
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
gen_maxuw4(ra, rb, rc, islit, lit);
break;
}
goto invalid_opc;
case 0x3E:
/* MAXSB8 */
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
gen_maxsb8(ra, rb, rc, islit, lit);
break;
}
goto invalid_opc;
case 0x3F:
/* MAXSW4 */
if (ctx->tb->flags & TB_FLAGS_AMASK_MVI) {
gen_maxsw4(ra, rb, rc, islit, lit);
break;
}
goto invalid_opc;
case 0x70:
/* FTOIT */
if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
goto invalid_opc;
}
if (likely(rc != 31)) {
if (ra != 31) {
tcg_gen_mov_i64(cpu_ir[rc], cpu_fir[ra]);
} else {
tcg_gen_movi_i64(cpu_ir[rc], 0);
}
}
break;
case 0x78:
/* FTOIS */
if ((ctx->tb->flags & TB_FLAGS_AMASK_FIX) == 0) {
goto invalid_opc;
}
if (rc != 31) {
TCGv_i32 tmp1 = tcg_temp_new_i32();
if (ra != 31) {
gen_helper_s_to_memory(tmp1, cpu_fir[ra]);
} else {
TCGv tmp2 = tcg_const_i64(0);
gen_helper_s_to_memory(tmp1, tmp2);
tcg_temp_free(tmp2);
}
tcg_gen_ext_i32_i64(cpu_ir[rc], tmp1);
tcg_temp_free_i32(tmp1);
}
break;
default:
goto invalid_opc;
}
break;
case 0x1D:
/* HW_MTPR (PALcode) */
#ifndef CONFIG_USER_ONLY
if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
return gen_mtpr(ctx, rb, insn & 0xffff);
}
#endif
goto invalid_opc;
case 0x1E:
/* HW_RET (PALcode) */
#ifndef CONFIG_USER_ONLY
if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
if (rb == 31) {
/* Pre-EV6 CPUs interpreted this as HW_REI, loading the return
address from EXC_ADDR. This turns out to be useful for our
emulation PALcode, so continue to accept it. */
TCGv tmp = tcg_temp_new();
tcg_gen_ld_i64(tmp, cpu_env, offsetof(CPUAlphaState, exc_addr));
gen_helper_hw_ret(cpu_env, tmp);
tcg_temp_free(tmp);
} else {
gen_helper_hw_ret(cpu_env, cpu_ir[rb]);
}
ret = EXIT_PC_UPDATED;
break;
}
#endif
goto invalid_opc;
case 0x1F:
/* HW_ST (PALcode) */
#ifndef CONFIG_USER_ONLY
if (ctx->tb->flags & TB_FLAGS_PAL_MODE) {
TCGv addr, val;
addr = tcg_temp_new();
if (rb != 31) {
tcg_gen_addi_i64(addr, cpu_ir[rb], disp12);
} else {
tcg_gen_movi_i64(addr, disp12);
}
if (ra != 31) {
val = cpu_ir[ra];
} else {
val = tcg_temp_new();
tcg_gen_movi_i64(val, 0);
}
switch ((insn >> 12) & 0xF) {
case 0x0:
/* Longword physical access */
gen_helper_stl_phys(cpu_env, addr, val);
break;
case 0x1:
/* Quadword physical access */
gen_helper_stq_phys(cpu_env, addr, val);
break;
case 0x2:
/* Longword physical access with lock */
gen_helper_stl_c_phys(val, cpu_env, addr, val);
break;
case 0x3:
/* Quadword physical access with lock */
gen_helper_stq_c_phys(val, cpu_env, addr, val);
break;
case 0x4:
/* Longword virtual access */
goto invalid_opc;
case 0x5:
/* Quadword virtual access */
goto invalid_opc;
case 0x6:
/* Invalid */
goto invalid_opc;
case 0x7:
/* Invalid */
goto invalid_opc;
case 0x8:
/* Invalid */
goto invalid_opc;
case 0x9:
/* Invalid */
goto invalid_opc;
case 0xA:
/* Invalid */
goto invalid_opc;
case 0xB:
/* Invalid */
goto invalid_opc;
case 0xC:
/* Longword virtual access with alternate access mode */
goto invalid_opc;
case 0xD:
/* Quadword virtual access with alternate access mode */
goto invalid_opc;
case 0xE:
/* Invalid */
goto invalid_opc;
case 0xF:
/* Invalid */
goto invalid_opc;
}
if (ra == 31) {
tcg_temp_free(val);
}
tcg_temp_free(addr);
break;
}
#endif
goto invalid_opc;
case 0x20:
/* LDF */
gen_load_mem(ctx, &gen_qemu_ldf, ra, rb, disp16, 1, 0);
break;
case 0x21:
/* LDG */
gen_load_mem(ctx, &gen_qemu_ldg, ra, rb, disp16, 1, 0);
break;
case 0x22:
/* LDS */
gen_load_mem(ctx, &gen_qemu_lds, ra, rb, disp16, 1, 0);
break;
case 0x23:
/* LDT */
gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 1, 0);
break;
case 0x24:
/* STF */
gen_store_mem(ctx, &gen_qemu_stf, ra, rb, disp16, 1, 0);
break;
case 0x25:
/* STG */
gen_store_mem(ctx, &gen_qemu_stg, ra, rb, disp16, 1, 0);
break;
case 0x26:
/* STS */
gen_store_mem(ctx, &gen_qemu_sts, ra, rb, disp16, 1, 0);
break;
case 0x27:
/* STT */
gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 1, 0);
break;
case 0x28:
/* LDL */
gen_load_mem(ctx, &tcg_gen_qemu_ld32s, ra, rb, disp16, 0, 0);
break;
case 0x29:
/* LDQ */
gen_load_mem(ctx, &tcg_gen_qemu_ld64, ra, rb, disp16, 0, 0);
break;
case 0x2A:
/* LDL_L */
gen_load_mem(ctx, &gen_qemu_ldl_l, ra, rb, disp16, 0, 0);
break;
case 0x2B:
/* LDQ_L */
gen_load_mem(ctx, &gen_qemu_ldq_l, ra, rb, disp16, 0, 0);
break;
case 0x2C:
/* STL */
gen_store_mem(ctx, &tcg_gen_qemu_st32, ra, rb, disp16, 0, 0);
break;
case 0x2D:
/* STQ */
gen_store_mem(ctx, &tcg_gen_qemu_st64, ra, rb, disp16, 0, 0);
break;
case 0x2E:
/* STL_C */
ret = gen_store_conditional(ctx, ra, rb, disp16, 0);
break;
case 0x2F:
/* STQ_C */
ret = gen_store_conditional(ctx, ra, rb, disp16, 1);
break;
case 0x30:
/* BR */
ret = gen_bdirect(ctx, ra, disp21);
break;
case 0x31: /* FBEQ */
ret = gen_fbcond(ctx, TCG_COND_EQ, ra, disp21);
break;
case 0x32: /* FBLT */
ret = gen_fbcond(ctx, TCG_COND_LT, ra, disp21);
break;
case 0x33: /* FBLE */
ret = gen_fbcond(ctx, TCG_COND_LE, ra, disp21);
break;
case 0x34:
/* BSR */
ret = gen_bdirect(ctx, ra, disp21);
break;
case 0x35: /* FBNE */
ret = gen_fbcond(ctx, TCG_COND_NE, ra, disp21);
break;
case 0x36: /* FBGE */
ret = gen_fbcond(ctx, TCG_COND_GE, ra, disp21);
break;
case 0x37: /* FBGT */
ret = gen_fbcond(ctx, TCG_COND_GT, ra, disp21);
break;
case 0x38:
/* BLBC */
ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 1);
break;
case 0x39:
/* BEQ */
ret = gen_bcond(ctx, TCG_COND_EQ, ra, disp21, 0);
break;
case 0x3A:
/* BLT */
ret = gen_bcond(ctx, TCG_COND_LT, ra, disp21, 0);
break;
case 0x3B:
/* BLE */
ret = gen_bcond(ctx, TCG_COND_LE, ra, disp21, 0);
break;
case 0x3C:
/* BLBS */
ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 1);
break;
case 0x3D:
/* BNE */
ret = gen_bcond(ctx, TCG_COND_NE, ra, disp21, 0);
break;
case 0x3E:
/* BGE */
ret = gen_bcond(ctx, TCG_COND_GE, ra, disp21, 0);
break;
case 0x3F:
/* BGT */
ret = gen_bcond(ctx, TCG_COND_GT, ra, disp21, 0);
break;
invalid_opc:
ret = gen_invalid(ctx);
break;
}
return ret;
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10308 | inline static void RENAME(hcscale)(SwsContext *c, uint16_t *dst, long dstWidth, const uint8_t *src1, const uint8_t *src2,
int srcW, int xInc, int flags, const int16_t *hChrFilter,
const int16_t *hChrFilterPos, int hChrFilterSize,
int srcFormat, uint8_t *formatConvBuffer,
uint32_t *pal)
{
int32_t av_unused *mmx2FilterPos = c->chrMmx2FilterPos;
int16_t av_unused *mmx2Filter = c->chrMmx2Filter;
int av_unused canMMX2BeUsed = c->canMMX2BeUsed;
void av_unused *mmx2FilterCode= c->chrMmx2FilterCode;
if (isGray(srcFormat) || srcFormat==PIX_FMT_MONOBLACK || srcFormat==PIX_FMT_MONOWHITE)
return;
if (srcFormat==PIX_FMT_RGB32_1 || srcFormat==PIX_FMT_BGR32_1) {
src1 += ALT32_CORR;
src2 += ALT32_CORR;
}
if (srcFormat==PIX_FMT_RGB48LE) {
src1++;
src2++;
}
if (c->hcscale_internal) {
c->hcscale_internal(formatConvBuffer, formatConvBuffer+VOFW, src1, src2, srcW, pal);
src1= formatConvBuffer;
src2= formatConvBuffer+VOFW;
}
#if COMPILE_TEMPLATE_MMX
// Use the new MMX scaler if the MMX2 one can't be used (it is faster than the x86 ASM one).
if (!(flags&SWS_FAST_BILINEAR) || (!canMMX2BeUsed))
#else
if (!(flags&SWS_FAST_BILINEAR))
#endif
{
c->hScale(dst , dstWidth, src1, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
c->hScale(dst+VOFW, dstWidth, src2, srcW, xInc, hChrFilter, hChrFilterPos, hChrFilterSize);
} else { // fast bilinear upscale / crap downscale
#if ARCH_X86 && CONFIG_GPL
#if COMPILE_TEMPLATE_MMX2
int i;
#if defined(PIC)
DECLARE_ALIGNED(8, uint64_t, ebxsave);
#endif
if (canMMX2BeUsed) {
__asm__ volatile(
#if defined(PIC)
"mov %%"REG_b", %6 \n\t"
#endif
"pxor %%mm7, %%mm7 \n\t"
"mov %0, %%"REG_c" \n\t"
"mov %1, %%"REG_D" \n\t"
"mov %2, %%"REG_d" \n\t"
"mov %3, %%"REG_b" \n\t"
"xor %%"REG_a", %%"REG_a" \n\t" // i
PREFETCH" (%%"REG_c") \n\t"
PREFETCH" 32(%%"REG_c") \n\t"
PREFETCH" 64(%%"REG_c") \n\t"
CALL_MMX2_FILTER_CODE
CALL_MMX2_FILTER_CODE
CALL_MMX2_FILTER_CODE
CALL_MMX2_FILTER_CODE
"xor %%"REG_a", %%"REG_a" \n\t" // i
"mov %5, %%"REG_c" \n\t" // src
"mov %1, %%"REG_D" \n\t" // buf1
"add $"AV_STRINGIFY(VOF)", %%"REG_D" \n\t"
PREFETCH" (%%"REG_c") \n\t"
PREFETCH" 32(%%"REG_c") \n\t"
PREFETCH" 64(%%"REG_c") \n\t"
CALL_MMX2_FILTER_CODE
CALL_MMX2_FILTER_CODE
CALL_MMX2_FILTER_CODE
CALL_MMX2_FILTER_CODE
#if defined(PIC)
"mov %6, %%"REG_b" \n\t"
#endif
:: "m" (src1), "m" (dst), "m" (mmx2Filter), "m" (mmx2FilterPos),
"m" (mmx2FilterCode), "m" (src2)
#if defined(PIC)
,"m" (ebxsave)
#endif
: "%"REG_a, "%"REG_c, "%"REG_d, "%"REG_S, "%"REG_D
#if !defined(PIC)
,"%"REG_b
#endif
);
for (i=dstWidth-1; (i*xInc)>>16 >=srcW-1; i--) {
//printf("%d %d %d\n", dstWidth, i, srcW);
dst[i] = src1[srcW-1]*128;
dst[i+VOFW] = src2[srcW-1]*128;
}
} else {
#endif /* COMPILE_TEMPLATE_MMX2 */
x86_reg xInc_shr16 = (x86_reg) (xInc >> 16);
uint16_t xInc_mask = xInc & 0xffff;
__asm__ volatile(
"xor %%"REG_a", %%"REG_a" \n\t" // i
"xor %%"REG_d", %%"REG_d" \n\t" // xx
"xorl %%ecx, %%ecx \n\t" // xalpha
ASMALIGN(4)
"1: \n\t"
"mov %0, %%"REG_S" \n\t"
"movzbl (%%"REG_S", %%"REG_d"), %%edi \n\t" //src[xx]
"movzbl 1(%%"REG_S", %%"REG_d"), %%esi \n\t" //src[xx+1]
FAST_BILINEAR_X86
"movw %%si, (%%"REG_D", %%"REG_a", 2) \n\t"
"movzbl (%5, %%"REG_d"), %%edi \n\t" //src[xx]
"movzbl 1(%5, %%"REG_d"), %%esi \n\t" //src[xx+1]
FAST_BILINEAR_X86
"movw %%si, "AV_STRINGIFY(VOF)"(%%"REG_D", %%"REG_a", 2) \n\t"
"addw %4, %%cx \n\t" //xalpha += xInc&0xFFFF
"adc %3, %%"REG_d" \n\t" //xx+= xInc>>16 + carry
"add $1, %%"REG_a" \n\t"
"cmp %2, %%"REG_a" \n\t"
" jb 1b \n\t"
/* GCC 3.3 makes MPlayer crash on IA-32 machines when using "g" operand here,
which is needed to support GCC 4.0. */
#if ARCH_X86_64 && ((__GNUC__ > 3) || (__GNUC__ == 3 && __GNUC_MINOR__ >= 4))
:: "m" (src1), "m" (dst), "g" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
#else
:: "m" (src1), "m" (dst), "m" (dstWidth), "m" (xInc_shr16), "m" (xInc_mask),
#endif
"r" (src2)
: "%"REG_a, "%"REG_d, "%ecx", "%"REG_D, "%esi"
);
#if COMPILE_TEMPLATE_MMX2
} //if MMX2 can't be used
#endif
#else
c->hcscale_fast(c, dst, dstWidth, src1, src2, srcW, xInc);
#endif /* ARCH_X86 */
}
if(c->srcRange != c->dstRange && !(isRGB(c->dstFormat) || isBGR(c->dstFormat))) {
int i;
//FIXME all pal and rgb srcFormats could do this convertion as well
//FIXME all scalers more complex than bilinear could do half of this transform
if(c->srcRange) {
for (i=0; i<dstWidth; i++) {
dst[i ]= (dst[i ]*1799 + 4081085)>>11; //1469
dst[i+VOFW]= (dst[i+VOFW]*1799 + 4081085)>>11; //1469
}
} else {
for (i=0; i<dstWidth; i++) {
dst[i ]= (FFMIN(dst[i ],30775)*4663 - 9289992)>>12; //-264
dst[i+VOFW]= (FFMIN(dst[i+VOFW],30775)*4663 - 9289992)>>12; //-264
}
}
}
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10316 | float32 HELPER(ucf64_abss)(float32 a)
{
return float32_abs(a);
}
The vulnerability label is: Non-vulnerable |
devign_test_set_data_10322 | int qemu_thread_equal(QemuThread *thread1, QemuThread *thread2)
{
return pthread_equal(thread1->thread, thread2->thread);
}
The vulnerability label is: Non-vulnerable |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.