id int32 0 27.3k | func stringlengths 26 142k | target bool 2
classes | project stringclasses 2
values | commit_id stringlengths 40 40 | func_clean stringlengths 26 131k | vul_lines dict | normalized_func stringlengths 24 132k | lines listlengths 1 2.8k | label listlengths 1 2.8k | line_no listlengths 1 2.8k |
|---|---|---|---|---|---|---|---|---|---|---|
13,089 | void ff_jpeg2000_cleanup(Jpeg2000Component *comp, Jpeg2000CodingStyle *codsty)
{
int reslevelno, bandno, precno;
for (reslevelno = 0;
comp->reslevel && reslevelno < codsty->nreslevels;
reslevelno++) {
Jpeg2000ResLevel *reslevel;
if (!comp->reslevel)
continue;
reslevel = comp->reslevel + reslevelno;
for (bandno = 0; bandno < reslevel->nbands; bandno++) {
Jpeg2000Band *band;
if (!reslevel->band)
continue;
band = reslevel->band + bandno;
for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++) {
if (band->prec) {
Jpeg2000Prec *prec = band->prec + precno;
av_freep(&prec->zerobits);
av_freep(&prec->cblkincl);
av_freep(&prec->cblk);
}
}
av_freep(&band->prec);
}
av_freep(&reslevel->band);
}
ff_dwt_destroy(&comp->dwt);
av_freep(&comp->reslevel);
av_freep(&comp->i_data);
av_freep(&comp->f_data);
}
| true | FFmpeg | 3d5822d9cf07d08bce82903e4715658f46b01b5c | void ff_jpeg2000_cleanup(Jpeg2000Component *comp, Jpeg2000CodingStyle *codsty)
{
int reslevelno, bandno, precno;
for (reslevelno = 0;
comp->reslevel && reslevelno < codsty->nreslevels;
reslevelno++) {
Jpeg2000ResLevel *reslevel;
if (!comp->reslevel)
continue;
reslevel = comp->reslevel + reslevelno;
for (bandno = 0; bandno < reslevel->nbands; bandno++) {
Jpeg2000Band *band;
if (!reslevel->band)
continue;
band = reslevel->band + bandno;
for (precno = 0; precno < reslevel->num_precincts_x * reslevel->num_precincts_y; precno++) {
if (band->prec) {
Jpeg2000Prec *prec = band->prec + precno;
av_freep(&prec->zerobits);
av_freep(&prec->cblkincl);
av_freep(&prec->cblk);
}
}
av_freep(&band->prec);
}
av_freep(&reslevel->band);
}
ff_dwt_destroy(&comp->dwt);
av_freep(&comp->reslevel);
av_freep(&comp->i_data);
av_freep(&comp->f_data);
}
| {
"code": [
" av_freep(&prec->cblk);"
],
"line_no": [
49
]
} | void FUNC_0(Jpeg2000Component *VAR_0, Jpeg2000CodingStyle *VAR_1)
{
int VAR_2, VAR_3, VAR_4;
for (VAR_2 = 0;
VAR_0->reslevel && VAR_2 < VAR_1->nreslevels;
VAR_2++) {
Jpeg2000ResLevel *reslevel;
if (!VAR_0->reslevel)
continue;
reslevel = VAR_0->reslevel + VAR_2;
for (VAR_3 = 0; VAR_3 < reslevel->nbands; VAR_3++) {
Jpeg2000Band *band;
if (!reslevel->band)
continue;
band = reslevel->band + VAR_3;
for (VAR_4 = 0; VAR_4 < reslevel->num_precincts_x * reslevel->num_precincts_y; VAR_4++) {
if (band->prec) {
Jpeg2000Prec *prec = band->prec + VAR_4;
av_freep(&prec->zerobits);
av_freep(&prec->cblkincl);
av_freep(&prec->cblk);
}
}
av_freep(&band->prec);
}
av_freep(&reslevel->band);
}
ff_dwt_destroy(&VAR_0->dwt);
av_freep(&VAR_0->reslevel);
av_freep(&VAR_0->i_data);
av_freep(&VAR_0->f_data);
}
| [
"void FUNC_0(Jpeg2000Component *VAR_0, Jpeg2000CodingStyle *VAR_1)\n{",
"int VAR_2, VAR_3, VAR_4;",
"for (VAR_2 = 0;",
"VAR_0->reslevel && VAR_2 < VAR_1->nreslevels;",
"VAR_2++) {",
"Jpeg2000ResLevel *reslevel;",
"if (!VAR_0->reslevel)\ncontinue;",
"reslevel = VAR_0->reslevel + VAR_2;",
"for (VAR_3 ... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
17,
19
],
[
23
],
[
25
],
[
27
],
[
31,
33
],
[
37
],
[
39
],
[
41
],
[
43
],
[
45
],
[
47
],
[
49
],
[
51
... |
13,090 | SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
int unit, bool removable, int bootindex,
const char *serial, Error **errp)
{
const char *driver;
char *name;
DeviceState *dev;
Error *err = NULL;
driver = blk_is_sg(blk) ? "scsi-generic" : "scsi-disk";
dev = qdev_create(&bus->qbus, driver);
name = g_strdup_printf("legacy[%d]", unit);
object_property_add_child(OBJECT(bus), name, OBJECT(dev), NULL);
g_free(name);
qdev_prop_set_uint32(dev, "scsi-id", unit);
if (bootindex >= 0) {
object_property_set_int(OBJECT(dev), bootindex, "bootindex",
&error_abort);
}
if (object_property_find(OBJECT(dev), "removable", NULL)) {
qdev_prop_set_bit(dev, "removable", removable);
}
if (serial && object_property_find(OBJECT(dev), "serial", NULL)) {
qdev_prop_set_string(dev, "serial", serial);
}
qdev_prop_set_drive(dev, "drive", blk, &err);
if (err) {
qerror_report_err(err);
error_free(err);
error_setg(errp, "Setting drive property failed");
object_unparent(OBJECT(dev));
return NULL;
}
object_property_set_bool(OBJECT(dev), true, "realized", &err);
if (err != NULL) {
error_propagate(errp, err);
object_unparent(OBJECT(dev));
return NULL;
}
return SCSI_DEVICE(dev);
}
| true | qemu | 390e90a90736f98ca47f2e767d7f2a15d68d6bc4 | SCSIDevice *scsi_bus_legacy_add_drive(SCSIBus *bus, BlockBackend *blk,
int unit, bool removable, int bootindex,
const char *serial, Error **errp)
{
const char *driver;
char *name;
DeviceState *dev;
Error *err = NULL;
driver = blk_is_sg(blk) ? "scsi-generic" : "scsi-disk";
dev = qdev_create(&bus->qbus, driver);
name = g_strdup_printf("legacy[%d]", unit);
object_property_add_child(OBJECT(bus), name, OBJECT(dev), NULL);
g_free(name);
qdev_prop_set_uint32(dev, "scsi-id", unit);
if (bootindex >= 0) {
object_property_set_int(OBJECT(dev), bootindex, "bootindex",
&error_abort);
}
if (object_property_find(OBJECT(dev), "removable", NULL)) {
qdev_prop_set_bit(dev, "removable", removable);
}
if (serial && object_property_find(OBJECT(dev), "serial", NULL)) {
qdev_prop_set_string(dev, "serial", serial);
}
qdev_prop_set_drive(dev, "drive", blk, &err);
if (err) {
qerror_report_err(err);
error_free(err);
error_setg(errp, "Setting drive property failed");
object_unparent(OBJECT(dev));
return NULL;
}
object_property_set_bool(OBJECT(dev), true, "realized", &err);
if (err != NULL) {
error_propagate(errp, err);
object_unparent(OBJECT(dev));
return NULL;
}
return SCSI_DEVICE(dev);
}
| {
"code": [
" qerror_report_err(err);",
" error_free(err);",
" error_setg(errp, \"Setting drive property failed\");"
],
"line_no": [
57,
59,
61
]
} | SCSIDevice *FUNC_0(SCSIBus *bus, BlockBackend *blk,
int unit, bool removable, int bootindex,
const char *serial, Error **errp)
{
const char *VAR_0;
char *VAR_1;
DeviceState *dev;
Error *err = NULL;
VAR_0 = blk_is_sg(blk) ? "scsi-generic" : "scsi-disk";
dev = qdev_create(&bus->qbus, VAR_0);
VAR_1 = g_strdup_printf("legacy[%d]", unit);
object_property_add_child(OBJECT(bus), VAR_1, OBJECT(dev), NULL);
g_free(VAR_1);
qdev_prop_set_uint32(dev, "scsi-id", unit);
if (bootindex >= 0) {
object_property_set_int(OBJECT(dev), bootindex, "bootindex",
&error_abort);
}
if (object_property_find(OBJECT(dev), "removable", NULL)) {
qdev_prop_set_bit(dev, "removable", removable);
}
if (serial && object_property_find(OBJECT(dev), "serial", NULL)) {
qdev_prop_set_string(dev, "serial", serial);
}
qdev_prop_set_drive(dev, "drive", blk, &err);
if (err) {
qerror_report_err(err);
error_free(err);
error_setg(errp, "Setting drive property failed");
object_unparent(OBJECT(dev));
return NULL;
}
object_property_set_bool(OBJECT(dev), true, "realized", &err);
if (err != NULL) {
error_propagate(errp, err);
object_unparent(OBJECT(dev));
return NULL;
}
return SCSI_DEVICE(dev);
}
| [
"SCSIDevice *FUNC_0(SCSIBus *bus, BlockBackend *blk,\nint unit, bool removable, int bootindex,\nconst char *serial, Error **errp)\n{",
"const char *VAR_0;",
"char *VAR_1;",
"DeviceState *dev;",
"Error *err = NULL;",
"VAR_0 = blk_is_sg(blk) ? \"scsi-generic\" : \"scsi-disk\";",
"dev = qdev_create(&bus->q... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
31
],
[
33
],
[
35,
37
],
[
39
],
[
41
],
[
43
],
[
45
],
[
47
],
[... |
13,092 | static int smacker_decode_header_tree(SmackVContext *smk, BitstreamContext *bc,
int **recodes, int *last, int size)
{
int res;
HuffContext huff;
HuffContext tmp1, tmp2;
VLC vlc[2] = { { 0 } };
int escapes[3];
DBCtx ctx;
int err = 0;
if(size >= UINT_MAX>>4){ // (((size + 3) >> 2) + 3) << 2 must not overflow
av_log(smk->avctx, AV_LOG_ERROR, "size too large\n");
return AVERROR_INVALIDDATA;
}
tmp1.length = 256;
tmp1.maxlength = 0;
tmp1.current = 0;
tmp1.bits = av_mallocz(256 * 4);
tmp1.lengths = av_mallocz(256 * sizeof(int));
tmp1.values = av_mallocz(256 * sizeof(int));
tmp2.length = 256;
tmp2.maxlength = 0;
tmp2.current = 0;
tmp2.bits = av_mallocz(256 * 4);
tmp2.lengths = av_mallocz(256 * sizeof(int));
tmp2.values = av_mallocz(256 * sizeof(int));
if (!tmp1.bits || !tmp1.lengths || !tmp1.values ||
!tmp2.bits || !tmp2.lengths || !tmp2.values) {
err = AVERROR(ENOMEM);
goto error;
}
if (bitstream_read_bit(bc)) {
smacker_decode_tree(bc, &tmp1, 0, 0);
bitstream_skip(bc, 1);
res = init_vlc(&vlc[0], SMKTREE_BITS, tmp1.length,
tmp1.lengths, sizeof(int), sizeof(int),
tmp1.bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE);
if(res < 0) {
av_log(smk->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
err = res;
goto error;
}
} else {
av_log(smk->avctx, AV_LOG_ERROR, "Skipping low bytes tree\n");
}
if (bitstream_read_bit(bc)) {
smacker_decode_tree(bc, &tmp2, 0, 0);
bitstream_skip(bc, 1);
res = init_vlc(&vlc[1], SMKTREE_BITS, tmp2.length,
tmp2.lengths, sizeof(int), sizeof(int),
tmp2.bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE);
if(res < 0) {
av_log(smk->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
err = res;
goto error;
}
} else {
av_log(smk->avctx, AV_LOG_ERROR, "Skipping high bytes tree\n");
}
escapes[0] = bitstream_read(bc, 8);
escapes[0] |= bitstream_read(bc, 8) << 8;
escapes[1] = bitstream_read(bc, 8);
escapes[1] |= bitstream_read(bc, 8) << 8;
escapes[2] = bitstream_read(bc, 8);
escapes[2] |= bitstream_read(bc, 8) << 8;
last[0] = last[1] = last[2] = -1;
ctx.escapes[0] = escapes[0];
ctx.escapes[1] = escapes[1];
ctx.escapes[2] = escapes[2];
ctx.v1 = &vlc[0];
ctx.v2 = &vlc[1];
ctx.recode1 = tmp1.values;
ctx.recode2 = tmp2.values;
ctx.last = last;
huff.length = ((size + 3) >> 2) + 4;
huff.maxlength = 0;
huff.current = 0;
huff.values = av_mallocz(huff.length * sizeof(int));
if (!huff.values) {
err = AVERROR(ENOMEM);
goto error;
}
if ((res = smacker_decode_bigtree(bc, &huff, &ctx)) < 0)
err = res;
bitstream_skip(bc, 1);
if(ctx.last[0] == -1) ctx.last[0] = huff.current++;
if(ctx.last[1] == -1) ctx.last[1] = huff.current++;
if(ctx.last[2] == -1) ctx.last[2] = huff.current++;
if (ctx.last[0] >= huff.length ||
ctx.last[1] >= huff.length ||
ctx.last[2] >= huff.length) {
av_log(smk->avctx, AV_LOG_ERROR, "Huffman codes out of range\n");
err = AVERROR_INVALIDDATA;
}
*recodes = huff.values;
error:
if(vlc[0].table)
ff_free_vlc(&vlc[0]);
if(vlc[1].table)
ff_free_vlc(&vlc[1]);
av_free(tmp1.bits);
av_free(tmp1.lengths);
av_free(tmp1.values);
av_free(tmp2.bits);
av_free(tmp2.lengths);
av_free(tmp2.values);
return err;
}
| true | FFmpeg | 0ccddbad200c1d9439c5a836501917d515cddf76 | static int smacker_decode_header_tree(SmackVContext *smk, BitstreamContext *bc,
int **recodes, int *last, int size)
{
int res;
HuffContext huff;
HuffContext tmp1, tmp2;
VLC vlc[2] = { { 0 } };
int escapes[3];
DBCtx ctx;
int err = 0;
if(size >= UINT_MAX>>4){
av_log(smk->avctx, AV_LOG_ERROR, "size too large\n");
return AVERROR_INVALIDDATA;
}
tmp1.length = 256;
tmp1.maxlength = 0;
tmp1.current = 0;
tmp1.bits = av_mallocz(256 * 4);
tmp1.lengths = av_mallocz(256 * sizeof(int));
tmp1.values = av_mallocz(256 * sizeof(int));
tmp2.length = 256;
tmp2.maxlength = 0;
tmp2.current = 0;
tmp2.bits = av_mallocz(256 * 4);
tmp2.lengths = av_mallocz(256 * sizeof(int));
tmp2.values = av_mallocz(256 * sizeof(int));
if (!tmp1.bits || !tmp1.lengths || !tmp1.values ||
!tmp2.bits || !tmp2.lengths || !tmp2.values) {
err = AVERROR(ENOMEM);
goto error;
}
if (bitstream_read_bit(bc)) {
smacker_decode_tree(bc, &tmp1, 0, 0);
bitstream_skip(bc, 1);
res = init_vlc(&vlc[0], SMKTREE_BITS, tmp1.length,
tmp1.lengths, sizeof(int), sizeof(int),
tmp1.bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE);
if(res < 0) {
av_log(smk->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
err = res;
goto error;
}
} else {
av_log(smk->avctx, AV_LOG_ERROR, "Skipping low bytes tree\n");
}
if (bitstream_read_bit(bc)) {
smacker_decode_tree(bc, &tmp2, 0, 0);
bitstream_skip(bc, 1);
res = init_vlc(&vlc[1], SMKTREE_BITS, tmp2.length,
tmp2.lengths, sizeof(int), sizeof(int),
tmp2.bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE);
if(res < 0) {
av_log(smk->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
err = res;
goto error;
}
} else {
av_log(smk->avctx, AV_LOG_ERROR, "Skipping high bytes tree\n");
}
escapes[0] = bitstream_read(bc, 8);
escapes[0] |= bitstream_read(bc, 8) << 8;
escapes[1] = bitstream_read(bc, 8);
escapes[1] |= bitstream_read(bc, 8) << 8;
escapes[2] = bitstream_read(bc, 8);
escapes[2] |= bitstream_read(bc, 8) << 8;
last[0] = last[1] = last[2] = -1;
ctx.escapes[0] = escapes[0];
ctx.escapes[1] = escapes[1];
ctx.escapes[2] = escapes[2];
ctx.v1 = &vlc[0];
ctx.v2 = &vlc[1];
ctx.recode1 = tmp1.values;
ctx.recode2 = tmp2.values;
ctx.last = last;
huff.length = ((size + 3) >> 2) + 4;
huff.maxlength = 0;
huff.current = 0;
huff.values = av_mallocz(huff.length * sizeof(int));
if (!huff.values) {
err = AVERROR(ENOMEM);
goto error;
}
if ((res = smacker_decode_bigtree(bc, &huff, &ctx)) < 0)
err = res;
bitstream_skip(bc, 1);
if(ctx.last[0] == -1) ctx.last[0] = huff.current++;
if(ctx.last[1] == -1) ctx.last[1] = huff.current++;
if(ctx.last[2] == -1) ctx.last[2] = huff.current++;
if (ctx.last[0] >= huff.length ||
ctx.last[1] >= huff.length ||
ctx.last[2] >= huff.length) {
av_log(smk->avctx, AV_LOG_ERROR, "Huffman codes out of range\n");
err = AVERROR_INVALIDDATA;
}
*recodes = huff.values;
error:
if(vlc[0].table)
ff_free_vlc(&vlc[0]);
if(vlc[1].table)
ff_free_vlc(&vlc[1]);
av_free(tmp1.bits);
av_free(tmp1.lengths);
av_free(tmp1.values);
av_free(tmp2.bits);
av_free(tmp2.lengths);
av_free(tmp2.values);
return err;
}
| {
"code": [
" if ((res = smacker_decode_bigtree(bc, &huff, &ctx)) < 0)"
],
"line_no": [
183
]
} | static int FUNC_0(SmackVContext *VAR_0, BitstreamContext *VAR_1,
int **VAR_2, int *VAR_3, int VAR_4)
{
int VAR_5;
HuffContext huff;
HuffContext tmp1, tmp2;
VLC vlc[2] = { { 0 } };
int VAR_6[3];
DBCtx ctx;
int VAR_7 = 0;
if(VAR_4 >= UINT_MAX>>4){
av_log(VAR_0->avctx, AV_LOG_ERROR, "VAR_4 too large\n");
return AVERROR_INVALIDDATA;
}
tmp1.length = 256;
tmp1.maxlength = 0;
tmp1.current = 0;
tmp1.bits = av_mallocz(256 * 4);
tmp1.lengths = av_mallocz(256 * sizeof(int));
tmp1.values = av_mallocz(256 * sizeof(int));
tmp2.length = 256;
tmp2.maxlength = 0;
tmp2.current = 0;
tmp2.bits = av_mallocz(256 * 4);
tmp2.lengths = av_mallocz(256 * sizeof(int));
tmp2.values = av_mallocz(256 * sizeof(int));
if (!tmp1.bits || !tmp1.lengths || !tmp1.values ||
!tmp2.bits || !tmp2.lengths || !tmp2.values) {
VAR_7 = AVERROR(ENOMEM);
goto error;
}
if (bitstream_read_bit(VAR_1)) {
smacker_decode_tree(VAR_1, &tmp1, 0, 0);
bitstream_skip(VAR_1, 1);
VAR_5 = init_vlc(&vlc[0], SMKTREE_BITS, tmp1.length,
tmp1.lengths, sizeof(int), sizeof(int),
tmp1.bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE);
if(VAR_5 < 0) {
av_log(VAR_0->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
VAR_7 = VAR_5;
goto error;
}
} else {
av_log(VAR_0->avctx, AV_LOG_ERROR, "Skipping low bytes tree\n");
}
if (bitstream_read_bit(VAR_1)) {
smacker_decode_tree(VAR_1, &tmp2, 0, 0);
bitstream_skip(VAR_1, 1);
VAR_5 = init_vlc(&vlc[1], SMKTREE_BITS, tmp2.length,
tmp2.lengths, sizeof(int), sizeof(int),
tmp2.bits, sizeof(uint32_t), sizeof(uint32_t), INIT_VLC_LE);
if(VAR_5 < 0) {
av_log(VAR_0->avctx, AV_LOG_ERROR, "Cannot build VLC table\n");
VAR_7 = VAR_5;
goto error;
}
} else {
av_log(VAR_0->avctx, AV_LOG_ERROR, "Skipping high bytes tree\n");
}
VAR_6[0] = bitstream_read(VAR_1, 8);
VAR_6[0] |= bitstream_read(VAR_1, 8) << 8;
VAR_6[1] = bitstream_read(VAR_1, 8);
VAR_6[1] |= bitstream_read(VAR_1, 8) << 8;
VAR_6[2] = bitstream_read(VAR_1, 8);
VAR_6[2] |= bitstream_read(VAR_1, 8) << 8;
VAR_3[0] = VAR_3[1] = VAR_3[2] = -1;
ctx.VAR_6[0] = VAR_6[0];
ctx.VAR_6[1] = VAR_6[1];
ctx.VAR_6[2] = VAR_6[2];
ctx.v1 = &vlc[0];
ctx.v2 = &vlc[1];
ctx.recode1 = tmp1.values;
ctx.recode2 = tmp2.values;
ctx.VAR_3 = VAR_3;
huff.length = ((VAR_4 + 3) >> 2) + 4;
huff.maxlength = 0;
huff.current = 0;
huff.values = av_mallocz(huff.length * sizeof(int));
if (!huff.values) {
VAR_7 = AVERROR(ENOMEM);
goto error;
}
if ((VAR_5 = smacker_decode_bigtree(VAR_1, &huff, &ctx)) < 0)
VAR_7 = VAR_5;
bitstream_skip(VAR_1, 1);
if(ctx.VAR_3[0] == -1) ctx.VAR_3[0] = huff.current++;
if(ctx.VAR_3[1] == -1) ctx.VAR_3[1] = huff.current++;
if(ctx.VAR_3[2] == -1) ctx.VAR_3[2] = huff.current++;
if (ctx.VAR_3[0] >= huff.length ||
ctx.VAR_3[1] >= huff.length ||
ctx.VAR_3[2] >= huff.length) {
av_log(VAR_0->avctx, AV_LOG_ERROR, "Huffman codes out of range\n");
VAR_7 = AVERROR_INVALIDDATA;
}
*VAR_2 = huff.values;
error:
if(vlc[0].table)
ff_free_vlc(&vlc[0]);
if(vlc[1].table)
ff_free_vlc(&vlc[1]);
av_free(tmp1.bits);
av_free(tmp1.lengths);
av_free(tmp1.values);
av_free(tmp2.bits);
av_free(tmp2.lengths);
av_free(tmp2.values);
return VAR_7;
}
| [
"static int FUNC_0(SmackVContext *VAR_0, BitstreamContext *VAR_1,\nint **VAR_2, int *VAR_3, int VAR_4)\n{",
"int VAR_5;",
"HuffContext huff;",
"HuffContext tmp1, tmp2;",
"VLC vlc[2] = { { 0 } };",
"int VAR_6[3];",
"DBCtx ctx;",
"int VAR_7 = 0;",
"if(VAR_4 >= UINT_MAX>>4){",
"av_log(VAR_0->avctx, A... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
23
],
[
25
],
[
27
],
[
29
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
47
],
[... |
13,093 | static BlockAIOCB *hdev_aio_ioctl(BlockDriverState *bs,
unsigned long int req, void *buf,
BlockCompletionFunc *cb, void *opaque)
{
BDRVRawState *s = bs->opaque;
RawPosixAIOData *acb;
ThreadPool *pool;
if (fd_open(bs) < 0)
return NULL;
acb = g_new(RawPosixAIOData, 1);
acb->bs = bs;
acb->aio_type = QEMU_AIO_IOCTL;
acb->aio_fildes = s->fd;
acb->aio_offset = 0;
acb->aio_ioctl_buf = buf;
acb->aio_ioctl_cmd = req;
pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque); | true | qemu | 7c9e527659c67d4d7b41d9504f93d2d7ee482488 | static BlockAIOCB *hdev_aio_ioctl(BlockDriverState *bs,
unsigned long int req, void *buf,
BlockCompletionFunc *cb, void *opaque)
{
BDRVRawState *s = bs->opaque;
RawPosixAIOData *acb;
ThreadPool *pool;
if (fd_open(bs) < 0)
return NULL;
acb = g_new(RawPosixAIOData, 1);
acb->bs = bs;
acb->aio_type = QEMU_AIO_IOCTL;
acb->aio_fildes = s->fd;
acb->aio_offset = 0;
acb->aio_ioctl_buf = buf;
acb->aio_ioctl_cmd = req;
pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque); | {
"code": [],
"line_no": []
} | static BlockAIOCB *FUNC_0(BlockDriverState *bs,
unsigned long int req, void *buf,
BlockCompletionFunc *cb, void *opaque)
{
BDRVRawState *s = bs->opaque;
RawPosixAIOData *acb;
ThreadPool *pool;
if (fd_open(bs) < 0)
return NULL;
acb = g_new(RawPosixAIOData, 1);
acb->bs = bs;
acb->aio_type = QEMU_AIO_IOCTL;
acb->aio_fildes = s->fd;
acb->aio_offset = 0;
acb->aio_ioctl_buf = buf;
acb->aio_ioctl_cmd = req;
pool = aio_get_thread_pool(bdrv_get_aio_context(bs));
return thread_pool_submit_aio(pool, aio_worker, acb, cb, opaque); | [
"static BlockAIOCB *FUNC_0(BlockDriverState *bs,\nunsigned long int req, void *buf,\nBlockCompletionFunc *cb, void *opaque)\n{",
"BDRVRawState *s = bs->opaque;",
"RawPosixAIOData *acb;",
"ThreadPool *pool;",
"if (fd_open(bs) < 0)\nreturn NULL;",
"acb = g_new(RawPosixAIOData, 1);",
"acb->bs = bs;",
"ac... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
2,
3,
4
],
[
5
],
[
6
],
[
7
],
[
8,
9
],
[
10
],
[
11
],
[
12
],
[
13
],
[
14
],
[
15
],
[
16
],
[
17
],
[
18
]
] |
13,094 | static int blkdebug_debug_remove_breakpoint(BlockDriverState *bs,
const char *tag)
{
BDRVBlkdebugState *s = bs->opaque;
BlkdebugSuspendedReq *r, *r_next;
BlkdebugRule *rule, *next;
int i, ret = -ENOENT;
for (i = 0; i < BLKDBG__MAX; i++) {
QLIST_FOREACH_SAFE(rule, &s->rules[i], next, next) {
if (rule->action == ACTION_SUSPEND &&
!strcmp(rule->options.suspend.tag, tag)) {
remove_rule(rule);
ret = 0;
}
}
}
QLIST_FOREACH_SAFE(r, &s->suspended_reqs, next, r_next) {
if (!strcmp(r->tag, tag)) {
qemu_coroutine_enter(r->co, NULL);
ret = 0;
}
}
return ret;
}
| true | qemu | 0b8b8753e4d94901627b3e86431230f2319215c4 | static int blkdebug_debug_remove_breakpoint(BlockDriverState *bs,
const char *tag)
{
BDRVBlkdebugState *s = bs->opaque;
BlkdebugSuspendedReq *r, *r_next;
BlkdebugRule *rule, *next;
int i, ret = -ENOENT;
for (i = 0; i < BLKDBG__MAX; i++) {
QLIST_FOREACH_SAFE(rule, &s->rules[i], next, next) {
if (rule->action == ACTION_SUSPEND &&
!strcmp(rule->options.suspend.tag, tag)) {
remove_rule(rule);
ret = 0;
}
}
}
QLIST_FOREACH_SAFE(r, &s->suspended_reqs, next, r_next) {
if (!strcmp(r->tag, tag)) {
qemu_coroutine_enter(r->co, NULL);
ret = 0;
}
}
return ret;
}
| {
"code": [
" qemu_coroutine_enter(r->co, NULL);",
" qemu_coroutine_enter(r->co, NULL);"
],
"line_no": [
39,
39
]
} | static int FUNC_0(BlockDriverState *VAR_0,
const char *VAR_1)
{
BDRVBlkdebugState *s = VAR_0->opaque;
BlkdebugSuspendedReq *r, *r_next;
BlkdebugRule *rule, *next;
int VAR_2, VAR_3 = -ENOENT;
for (VAR_2 = 0; VAR_2 < BLKDBG__MAX; VAR_2++) {
QLIST_FOREACH_SAFE(rule, &s->rules[VAR_2], next, next) {
if (rule->action == ACTION_SUSPEND &&
!strcmp(rule->options.suspend.VAR_1, VAR_1)) {
remove_rule(rule);
VAR_3 = 0;
}
}
}
QLIST_FOREACH_SAFE(r, &s->suspended_reqs, next, r_next) {
if (!strcmp(r->VAR_1, VAR_1)) {
qemu_coroutine_enter(r->co, NULL);
VAR_3 = 0;
}
}
return VAR_3;
}
| [
"static int FUNC_0(BlockDriverState *VAR_0,\nconst char *VAR_1)\n{",
"BDRVBlkdebugState *s = VAR_0->opaque;",
"BlkdebugSuspendedReq *r, *r_next;",
"BlkdebugRule *rule, *next;",
"int VAR_2, VAR_3 = -ENOENT;",
"for (VAR_2 = 0; VAR_2 < BLKDBG__MAX; VAR_2++) {",
"QLIST_FOREACH_SAFE(rule, &s->rules[VAR_2], n... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
17
],
[
19
],
[
21,
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
45
... |
13,095 | static int ram_save_target_page(MigrationState *ms, QEMUFile *f,
RAMBlock *block, ram_addr_t offset,
bool last_stage,
uint64_t *bytes_transferred,
ram_addr_t dirty_ram_abs)
{
int res = 0;
/* Check the pages is dirty and if it is send it */
if (migration_bitmap_clear_dirty(dirty_ram_abs)) {
unsigned long *unsentmap;
if (compression_switch && migrate_use_compression()) {
res = ram_save_compressed_page(f, block, offset,
last_stage,
bytes_transferred);
} else {
res = ram_save_page(f, block, offset, last_stage,
bytes_transferred);
}
if (res < 0) {
return res;
}
unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
if (unsentmap) {
clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap);
}
last_sent_block = block;
}
return res;
}
| true | qemu | 3fd3c4b37c116cce7e9810fcc15a0a2cf15115a5 | static int ram_save_target_page(MigrationState *ms, QEMUFile *f,
RAMBlock *block, ram_addr_t offset,
bool last_stage,
uint64_t *bytes_transferred,
ram_addr_t dirty_ram_abs)
{
int res = 0;
if (migration_bitmap_clear_dirty(dirty_ram_abs)) {
unsigned long *unsentmap;
if (compression_switch && migrate_use_compression()) {
res = ram_save_compressed_page(f, block, offset,
last_stage,
bytes_transferred);
} else {
res = ram_save_page(f, block, offset, last_stage,
bytes_transferred);
}
if (res < 0) {
return res;
}
unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
if (unsentmap) {
clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap);
}
last_sent_block = block;
}
return res;
}
| {
"code": [
" last_sent_block = block;"
],
"line_no": [
55
]
} | static int FUNC_0(MigrationState *VAR_0, QEMUFile *VAR_1,
RAMBlock *VAR_2, ram_addr_t VAR_3,
bool VAR_4,
uint64_t *VAR_5,
ram_addr_t VAR_6)
{
int VAR_7 = 0;
if (migration_bitmap_clear_dirty(VAR_6)) {
unsigned long *VAR_8;
if (compression_switch && migrate_use_compression()) {
VAR_7 = ram_save_compressed_page(VAR_1, VAR_2, VAR_3,
VAR_4,
VAR_5);
} else {
VAR_7 = ram_save_page(VAR_1, VAR_2, VAR_3, VAR_4,
VAR_5);
}
if (VAR_7 < 0) {
return VAR_7;
}
VAR_8 = atomic_rcu_read(&migration_bitmap_rcu)->VAR_8;
if (VAR_8) {
clear_bit(VAR_6 >> TARGET_PAGE_BITS, VAR_8);
}
last_sent_block = VAR_2;
}
return VAR_7;
}
| [
"static int FUNC_0(MigrationState *VAR_0, QEMUFile *VAR_1,\nRAMBlock *VAR_2, ram_addr_t VAR_3,\nbool VAR_4,\nuint64_t *VAR_5,\nram_addr_t VAR_6)\n{",
"int VAR_7 = 0;",
"if (migration_bitmap_clear_dirty(VAR_6)) {",
"unsigned long *VAR_8;",
"if (compression_switch && migrate_use_compression()) {",
"VAR_7 = ... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0
] | [
[
1,
3,
5,
7,
9,
11
],
[
13
],
[
19
],
[
21
],
[
23
],
[
25,
27,
29
],
[
31
],
[
33,
35
],
[
37
],
[
41
],
[
43
],
[
45
],
[
47
],
[
49
],
[
51
],
[
53
],
[... |
13,096 | static inline void RENAME(yuv2rgb565_1)(SwsContext *c, const uint16_t *buf0,
const uint16_t *ubuf0, const uint16_t *ubuf1,
const uint16_t *vbuf0, const uint16_t *vbuf1,
const uint16_t *abuf0, uint8_t *dest,
int dstW, int uvalpha, enum PixelFormat dstFormat,
int flags, int y)
{
x86_reg uv_off = c->uv_off << 1;
const uint16_t *buf1= buf0; //FIXME needed for RGB1/BGR1
if (uvalpha < 2048) { // note this is not correct (shifts chrominance by 0.5 pixels) but it is a bit faster
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1(%%REGBP, %5, %6)
"pxor %%mm7, %%mm7 \n\t"
/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
"paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
"paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
"paddusb "RED_DITHER"(%5), %%mm5 \n\t"
#endif
WRITERGB16(%%REGb, 8280(%5), %%REGBP)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
"a" (&c->redDither), "m"(uv_off)
);
} else {
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1b(%%REGBP, %5, %6)
"pxor %%mm7, %%mm7 \n\t"
/* mm2=B, %%mm4=G, %%mm5=R, %%mm7=0 */
#ifdef DITHER1XBPP
"paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
"paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
"paddusb "RED_DITHER"(%5), %%mm5 \n\t"
#endif
WRITERGB16(%%REGb, 8280(%5), %%REGBP)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
"a" (&c->redDither), "m"(uv_off)
);
}
}
| true | FFmpeg | 009f829dde811af654af7110326aea3a72c05d5e | static inline void RENAME(yuv2rgb565_1)(SwsContext *c, const uint16_t *buf0,
const uint16_t *ubuf0, const uint16_t *ubuf1,
const uint16_t *vbuf0, const uint16_t *vbuf1,
const uint16_t *abuf0, uint8_t *dest,
int dstW, int uvalpha, enum PixelFormat dstFormat,
int flags, int y)
{
x86_reg uv_off = c->uv_off << 1;
const uint16_t *buf1= buf0;
if (uvalpha < 2048) {
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1(%%REGBP, %5, %6)
"pxor %%mm7, %%mm7 \n\t"
#ifdef DITHER1XBPP
"paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
"paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
"paddusb "RED_DITHER"(%5), %%mm5 \n\t"
#endif
WRITERGB16(%%REGb, 8280(%5), %%REGBP)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
"a" (&c->redDither), "m"(uv_off)
);
} else {
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1b(%%REGBP, %5, %6)
"pxor %%mm7, %%mm7 \n\t"
#ifdef DITHER1XBPP
"paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
"paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
"paddusb "RED_DITHER"(%5), %%mm5 \n\t"
#endif
WRITERGB16(%%REGb, 8280(%5), %%REGBP)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (buf1), "S" (ubuf0), "D" (ubuf1), "m" (dest),
"a" (&c->redDither), "m"(uv_off)
);
}
}
| {
"code": [
" x86_reg uv_off = c->uv_off << 1;",
" \"a\" (&c->redDither), \"m\"(uv_off)",
" \"a\" (&c->redDither), \"m\"(uv_off)",
" x86_reg uv_off = c->uv_off << 1;",
" x86_reg uv_off = c->uv_off << 1;",
" x86_reg uv_off = c->uv_off << 1;",
" x86_reg uv_off = c->uv_off << 1;",
" x86_reg uv_off = c->uv_off << 1;",
" x86_reg uv_off = c->uv_off << 1;",
" YSCALEYUV2RGB1(%%REGBP, %5, %6)",
" \"a\" (&c->redDither), \"m\"(uv_off)",
" YSCALEYUV2RGB1b(%%REGBP, %5, %6)",
" \"a\" (&c->redDither), \"m\"(uv_off)",
" x86_reg uv_off = c->uv_off << 1;",
" YSCALEYUV2RGB1(%%REGBP, %5, %6)",
" \"a\" (&c->redDither), \"m\"(uv_off)",
" YSCALEYUV2RGB1b(%%REGBP, %5, %6)",
" \"a\" (&c->redDither), \"m\"(uv_off)",
" x86_reg uv_off = c->uv_off << 1;",
" YSCALEYUV2RGB1(%%REGBP, %5, %6)",
" \"a\" (&c->redDither), \"m\"(uv_off)",
" YSCALEYUV2RGB1b(%%REGBP, %5, %6)",
" \"a\" (&c->redDither), \"m\"(uv_off)",
" x86_reg uv_off = c->uv_off << 1;",
" \"a\" (&c->redDither), \"m\"(uv_off)",
" \"a\" (&c->redDither), \"m\"(uv_off)"
],
"line_no": [
15,
55,
55,
15,
15,
15,
15,
15,
15,
31,
55,
69,
55,
15,
31,
55,
69,
55,
15,
31,
55,
69,
55,
15,
55,
55
]
} | static inline void FUNC_0(yuv2rgb565_1)(SwsContext *c, const uint16_t *buf0,
const uint16_t *ubuf0, const uint16_t *ubuf1,
const uint16_t *vbuf0, const uint16_t *vbuf1,
const uint16_t *abuf0, uint8_t *dest,
int dstW, int uvalpha, enum PixelFormat dstFormat,
int flags, int y)
{
x86_reg uv_off = c->uv_off << 1;
const uint16_t *VAR_0= buf0;
if (uvalpha < 2048) {
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1(%%REGBP, %5, %6)
"pxor %%mm7, %%mm7 \n\t"
#ifdef DITHER1XBPP
"paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
"paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
"paddusb "RED_DITHER"(%5), %%mm5 \n\t"
#endif
WRITERGB16(%%REGb, 8280(%5), %%REGBP)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (VAR_0), "S" (ubuf0), "D" (ubuf1), "m" (dest),
"a" (&c->redDither), "m"(uv_off)
);
} else {
__asm__ volatile(
"mov %%"REG_b", "ESP_OFFSET"(%5) \n\t"
"mov %4, %%"REG_b" \n\t"
"push %%"REG_BP" \n\t"
YSCALEYUV2RGB1b(%%REGBP, %5, %6)
"pxor %%mm7, %%mm7 \n\t"
#ifdef DITHER1XBPP
"paddusb "BLUE_DITHER"(%5), %%mm2 \n\t"
"paddusb "GREEN_DITHER"(%5), %%mm4 \n\t"
"paddusb "RED_DITHER"(%5), %%mm5 \n\t"
#endif
WRITERGB16(%%REGb, 8280(%5), %%REGBP)
"pop %%"REG_BP" \n\t"
"mov "ESP_OFFSET"(%5), %%"REG_b" \n\t"
:: "c" (buf0), "d" (VAR_0), "S" (ubuf0), "D" (ubuf1), "m" (dest),
"a" (&c->redDither), "m"(uv_off)
);
}
}
| [
"static inline void FUNC_0(yuv2rgb565_1)(SwsContext *c, const uint16_t *buf0,\nconst uint16_t *ubuf0, const uint16_t *ubuf1,\nconst uint16_t *vbuf0, const uint16_t *vbuf1,\nconst uint16_t *abuf0, uint8_t *dest,\nint dstW, int uvalpha, enum PixelFormat dstFormat,\nint flags, int y)\n{",
"x86_reg uv_off = c->uv_off... | [
0,
1,
0,
0,
1,
0,
1,
0,
0
] | [
[
1,
3,
5,
7,
9,
11,
13
],
[
15
],
[
17
],
[
21
],
[
23,
25,
27,
29,
31,
33,
37,
39,
41,
43,
45,
47,
49,
51,
53,
55,
57
],
[
59
],
[
61,
63,
65,
67,
69,
71,... |
13,098 | static void coroutine_enter_cb(void *opaque, int ret)
{
Coroutine *co = opaque;
qemu_coroutine_enter(co, NULL);
}
| true | qemu | 0b8b8753e4d94901627b3e86431230f2319215c4 | static void coroutine_enter_cb(void *opaque, int ret)
{
Coroutine *co = opaque;
qemu_coroutine_enter(co, NULL);
}
| {
"code": [
" qemu_coroutine_enter(co, NULL);",
" qemu_coroutine_enter(co, NULL);",
" qemu_coroutine_enter(co, NULL);",
" qemu_coroutine_enter(co, NULL);",
" qemu_coroutine_enter(co, NULL);",
" qemu_coroutine_enter(co, NULL);",
" qemu_coroutine_enter(co, NULL);"
],
"line_no": [
7,
7,
7,
7,
7,
7,
7
]
} | static void FUNC_0(void *VAR_0, int VAR_1)
{
Coroutine *co = VAR_0;
qemu_coroutine_enter(co, NULL);
}
| [
"static void FUNC_0(void *VAR_0, int VAR_1)\n{",
"Coroutine *co = VAR_0;",
"qemu_coroutine_enter(co, NULL);",
"}"
] | [
0,
0,
1,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
]
] |
13,099 | static int ppc6xx_tlb_check (CPUState *env, mmu_ctx_t *ctx,
target_ulong eaddr, int rw, int access_type)
{
ppc_tlb_t *tlb;
int nr, best, way;
int ret;
best = -1;
ret = -1; /* No TLB found */
for (way = 0; way < env->nb_ways; way++) {
nr = ppc6xx_tlb_getnum(env, eaddr, way,
access_type == ACCESS_CODE ? 1 : 0);
tlb = &env->tlb[nr];
/* This test "emulates" the PTE index match for hardware TLBs */
if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) {
#if defined (DEBUG_SOFTWARE_TLB)
if (loglevel != 0) {
fprintf(logfile, "TLB %d/%d %s [%08x %08x] <> %08x\n",
nr, env->nb_tlb,
pte_is_valid(tlb->pte0) ? "valid" : "inval",
tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr);
}
#endif
continue;
}
#if defined (DEBUG_SOFTWARE_TLB)
if (loglevel != 0) {
fprintf(logfile, "TLB %d/%d %s %08x <> %08x %08x %c %c\n",
nr, env->nb_tlb,
pte_is_valid(tlb->pte0) ? "valid" : "inval",
tlb->EPN, eaddr, tlb->pte1,
rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D');
}
#endif
switch (pte_check(ctx, tlb->pte0, tlb->pte1, 0, rw)) {
case -3:
/* TLB inconsistency */
return -1;
case -2:
/* Access violation */
ret = -2;
best = nr;
break;
case -1:
default:
/* No match */
break;
case 0:
/* access granted */
/* XXX: we should go on looping to check all TLBs consistency
* but we can speed-up the whole thing as the
* result would be undefined if TLBs are not consistent.
*/
ret = 0;
best = nr;
goto done;
}
}
if (best != -1) {
done:
#if defined (DEBUG_SOFTWARE_TLB)
if (loglevel > 0) {
fprintf(logfile, "found TLB at addr 0x%08lx prot=0x%01x ret=%d\n",
ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
}
#endif
/* Update page flags */
pte_update_flags(ctx, &env->tlb[best].pte1, ret, rw);
}
return ret;
}
| true | qemu | d9bce9d99f4656ae0b0127f7472db9067b8f84ab | static int ppc6xx_tlb_check (CPUState *env, mmu_ctx_t *ctx,
target_ulong eaddr, int rw, int access_type)
{
ppc_tlb_t *tlb;
int nr, best, way;
int ret;
best = -1;
ret = -1;
for (way = 0; way < env->nb_ways; way++) {
nr = ppc6xx_tlb_getnum(env, eaddr, way,
access_type == ACCESS_CODE ? 1 : 0);
tlb = &env->tlb[nr];
if ((eaddr & TARGET_PAGE_MASK) != tlb->EPN) {
#if defined (DEBUG_SOFTWARE_TLB)
if (loglevel != 0) {
fprintf(logfile, "TLB %d/%d %s [%08x %08x] <> %08x\n",
nr, env->nb_tlb,
pte_is_valid(tlb->pte0) ? "valid" : "inval",
tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, eaddr);
}
#endif
continue;
}
#if defined (DEBUG_SOFTWARE_TLB)
if (loglevel != 0) {
fprintf(logfile, "TLB %d/%d %s %08x <> %08x %08x %c %c\n",
nr, env->nb_tlb,
pte_is_valid(tlb->pte0) ? "valid" : "inval",
tlb->EPN, eaddr, tlb->pte1,
rw ? 'S' : 'L', access_type == ACCESS_CODE ? 'I' : 'D');
}
#endif
switch (pte_check(ctx, tlb->pte0, tlb->pte1, 0, rw)) {
case -3:
return -1;
case -2:
ret = -2;
best = nr;
break;
case -1:
default:
break;
case 0:
ret = 0;
best = nr;
goto done;
}
}
if (best != -1) {
done:
#if defined (DEBUG_SOFTWARE_TLB)
if (loglevel > 0) {
fprintf(logfile, "found TLB at addr 0x%08lx prot=0x%01x ret=%d\n",
ctx->raddr & TARGET_PAGE_MASK, ctx->prot, ret);
}
#endif
pte_update_flags(ctx, &env->tlb[best].pte1, ret, rw);
}
return ret;
}
| {
"code": [
"#endif",
"#endif"
],
"line_no": [
45,
45
]
} | static int FUNC_0 (CPUState *VAR_0, mmu_ctx_t *VAR_1,
target_ulong VAR_2, int VAR_3, int VAR_4)
{
ppc_tlb_t *tlb;
int VAR_5, VAR_6, VAR_7;
int VAR_8;
VAR_6 = -1;
VAR_8 = -1;
for (VAR_7 = 0; VAR_7 < VAR_0->nb_ways; VAR_7++) {
VAR_5 = ppc6xx_tlb_getnum(VAR_0, VAR_2, VAR_7,
VAR_4 == ACCESS_CODE ? 1 : 0);
tlb = &VAR_0->tlb[VAR_5];
if ((VAR_2 & TARGET_PAGE_MASK) != tlb->EPN) {
#if defined (DEBUG_SOFTWARE_TLB)
if (loglevel != 0) {
fprintf(logfile, "TLB %d/%d %s [%08x %08x] <> %08x\n",
VAR_5, VAR_0->nb_tlb,
pte_is_valid(tlb->pte0) ? "valid" : "inval",
tlb->EPN, tlb->EPN + TARGET_PAGE_SIZE, VAR_2);
}
#endif
continue;
}
#if defined (DEBUG_SOFTWARE_TLB)
if (loglevel != 0) {
fprintf(logfile, "TLB %d/%d %s %08x <> %08x %08x %c %c\n",
VAR_5, VAR_0->nb_tlb,
pte_is_valid(tlb->pte0) ? "valid" : "inval",
tlb->EPN, VAR_2, tlb->pte1,
VAR_3 ? 'S' : 'L', VAR_4 == ACCESS_CODE ? 'I' : 'D');
}
#endif
switch (pte_check(VAR_1, tlb->pte0, tlb->pte1, 0, VAR_3)) {
case -3:
return -1;
case -2:
VAR_8 = -2;
VAR_6 = VAR_5;
break;
case -1:
default:
break;
case 0:
VAR_8 = 0;
VAR_6 = VAR_5;
goto done;
}
}
if (VAR_6 != -1) {
done:
#if defined (DEBUG_SOFTWARE_TLB)
if (loglevel > 0) {
fprintf(logfile, "found TLB at addr 0x%08lx prot=0x%01x VAR_8=%d\n",
VAR_1->raddr & TARGET_PAGE_MASK, VAR_1->prot, VAR_8);
}
#endif
pte_update_flags(VAR_1, &VAR_0->tlb[VAR_6].pte1, VAR_8, VAR_3);
}
return VAR_8;
}
| [
"static int FUNC_0 (CPUState *VAR_0, mmu_ctx_t *VAR_1,\ntarget_ulong VAR_2, int VAR_3, int VAR_4)\n{",
"ppc_tlb_t *tlb;",
"int VAR_5, VAR_6, VAR_7;",
"int VAR_8;",
"VAR_6 = -1;",
"VAR_8 = -1;",
"for (VAR_7 = 0; VAR_7 < VAR_0->nb_ways; VAR_7++) {",
"VAR_5 = ppc6xx_tlb_getnum(VAR_0, VAR_2, VAR_7,\nVAR_4... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
15
],
[
17
],
[
19
],
[
21,
23
],
[
25
],
[
29
],
[
31,
33
],
[
35,
37,
39,
41
],
[
43
],
[
45,
47
],
[
49
],
[
51,
53
... |
13,101 | int bdrv_all_goto_snapshot(const char *name, BlockDriverState **first_bad_bs)
{
int err = 0;
BlockDriverState *bs;
BdrvNextIterator *it = NULL;
while (err == 0 && (it = bdrv_next(it, &bs))) {
AioContext *ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
if (bdrv_can_snapshot(bs)) {
err = bdrv_snapshot_goto(bs, name);
}
aio_context_release(ctx);
}
*first_bad_bs = bs;
return err;
}
| true | qemu | 88be7b4be4aa17c88247e162bdd7577ea79db94f | int bdrv_all_goto_snapshot(const char *name, BlockDriverState **first_bad_bs)
{
int err = 0;
BlockDriverState *bs;
BdrvNextIterator *it = NULL;
while (err == 0 && (it = bdrv_next(it, &bs))) {
AioContext *ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
if (bdrv_can_snapshot(bs)) {
err = bdrv_snapshot_goto(bs, name);
}
aio_context_release(ctx);
}
*first_bad_bs = bs;
return err;
}
| {
"code": [
" BdrvNextIterator *it = NULL;",
" BdrvNextIterator *it = NULL;",
" BdrvNextIterator *it = NULL;",
" BlockDriverState *bs;",
" BdrvNextIterator *it = NULL;",
" BdrvNextIterator *it = NULL;",
" BdrvNextIterator *it = NULL;",
" BdrvNextIterator *it = NULL;",
" while (err == 0 && (it = bdrv_next(it, &bs))) {",
" BdrvNextIterator *it = NULL;",
" while (err == 0 && (it = bdrv_next(it, &bs))) {",
" BdrvNextIterator *it = NULL;",
" while (err == 0 && (it = bdrv_next(it, &bs))) {",
" BdrvNextIterator *it = NULL;",
" BdrvNextIterator *it = NULL;",
" BdrvNextIterator *it = NULL;",
" BdrvNextIterator *it = NULL;"
],
"line_no": [
9,
9,
9,
7,
9,
9,
9,
9,
13,
9,
13,
9,
13,
9,
9,
9,
9
]
} | int FUNC_0(const char *VAR_0, BlockDriverState **VAR_1)
{
int VAR_2 = 0;
BlockDriverState *bs;
BdrvNextIterator *it = NULL;
while (VAR_2 == 0 && (it = bdrv_next(it, &bs))) {
AioContext *ctx = bdrv_get_aio_context(bs);
aio_context_acquire(ctx);
if (bdrv_can_snapshot(bs)) {
VAR_2 = bdrv_snapshot_goto(bs, VAR_0);
}
aio_context_release(ctx);
}
*VAR_1 = bs;
return VAR_2;
}
| [
"int FUNC_0(const char *VAR_0, BlockDriverState **VAR_1)\n{",
"int VAR_2 = 0;",
"BlockDriverState *bs;",
"BdrvNextIterator *it = NULL;",
"while (VAR_2 == 0 && (it = bdrv_next(it, &bs))) {",
"AioContext *ctx = bdrv_get_aio_context(bs);",
"aio_context_acquire(ctx);",
"if (bdrv_can_snapshot(bs)) {",
"V... | [
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
33
],
[
35
],
[
37
]
] |
13,102 | int RENAME(swri_resample)(ResampleContext *c, DELEM *dst, const DELEM *src, int *consumed, int src_size, int dst_size, int update_ctx){
int dst_index, i;
int index= c->index;
int frac= c->frac;
int dst_incr_frac= c->dst_incr % c->src_incr;
int dst_incr= c->dst_incr / c->src_incr;
int compensation_distance= c->compensation_distance;
av_assert1(c->filter_shift == FILTER_SHIFT);
av_assert1(c->felem_size == sizeof(FELEM));
if(compensation_distance == 0 && c->filter_length == 1 && c->phase_shift==0){
int64_t index2= (1LL<<32)*c->frac/c->src_incr + (1LL<<32)*index;
int64_t incr= (1LL<<32) * c->dst_incr / c->src_incr;
int new_size = (src_size * (int64_t)c->src_incr - frac + c->dst_incr - 1) / c->dst_incr;
dst_size= FFMIN(dst_size, new_size);
for(dst_index=0; dst_index < dst_size; dst_index++){
dst[dst_index] = src[index2>>32];
index2 += incr;
}
index += dst_index * dst_incr;
index += (frac + dst_index * (int64_t)dst_incr_frac) / c->src_incr;
frac = (frac + dst_index * (int64_t)dst_incr_frac) % c->src_incr;
av_assert2(index >= 0);
*consumed= index;
index = 0;
} else if (compensation_distance == 0 && index >= 0) {
int64_t end_index = (1 + src_size - c->filter_length) << c->phase_shift;
int64_t delta_frac = (end_index - index) * c->src_incr - c->frac;
int delta_n = (delta_frac + c->dst_incr - 1) / c->dst_incr;
int n = FFMIN(dst_size, delta_n);
int sample_index;
if (!c->linear) {
sample_index = index >> c->phase_shift;
index &= c->phase_mask;
for (dst_index = 0; dst_index < n; dst_index++) {
FELEM *filter = ((FELEM *) c->filter_bank) + c->filter_alloc * index;
#ifdef COMMON_CORE
COMMON_CORE
#else
FELEM2 val=0;
for (i = 0; i < c->filter_length; i++) {
val += src[sample_index + i] * (FELEM2)filter[i];
}
OUT(dst[dst_index], val);
#endif
frac += dst_incr_frac;
index += dst_incr;
if (frac >= c->src_incr) {
frac -= c->src_incr;
index++;
}
sample_index += index >> c->phase_shift;
index &= c->phase_mask;
}
} else {
sample_index = index >> c->phase_shift;
index &= c->phase_mask;
for (dst_index = 0; dst_index < n; dst_index++) {
FELEM *filter = ((FELEM *) c->filter_bank) + c->filter_alloc * index;
FELEM2 val=0, v2 = 0;
#ifdef LINEAR_CORE
LINEAR_CORE
#else
for (i = 0; i < c->filter_length; i++) {
val += src[sample_index + i] * (FELEM2)filter[i];
v2 += src[sample_index + i] * (FELEM2)filter[i + c->filter_alloc];
}
#endif
val += (v2 - val) * (FELEML) frac / c->src_incr;
OUT(dst[dst_index], val);
frac += dst_incr_frac;
index += dst_incr;
if (frac >= c->src_incr) {
frac -= c->src_incr;
index++;
}
sample_index += index >> c->phase_shift;
index &= c->phase_mask;
}
}
*consumed = sample_index;
} else {
int sample_index = 0;
for(dst_index=0; dst_index < dst_size; dst_index++){
FELEM *filter;
FELEM2 val=0;
sample_index += index >> c->phase_shift;
index &= c->phase_mask;
filter = ((FELEM*)c->filter_bank) + c->filter_alloc*index;
if(sample_index + c->filter_length > src_size || -sample_index >= src_size){
break;
}else if(sample_index < 0){
for(i=0; i<c->filter_length; i++)
val += src[FFABS(sample_index + i)] * (FELEM2)filter[i];
OUT(dst[dst_index], val);
}else if(c->linear){
FELEM2 v2=0;
#ifdef LINEAR_CORE
LINEAR_CORE
#else
for(i=0; i<c->filter_length; i++){
val += src[sample_index + i] * (FELEM2)filter[i];
v2 += src[sample_index + i] * (FELEM2)filter[i + c->filter_alloc];
}
#endif
val+=(v2-val)*(FELEML)frac / c->src_incr;
OUT(dst[dst_index], val);
}else{
#ifdef COMMON_CORE
COMMON_CORE
#else
for(i=0; i<c->filter_length; i++){
val += src[sample_index + i] * (FELEM2)filter[i];
}
OUT(dst[dst_index], val);
#endif
}
frac += dst_incr_frac;
index += dst_incr;
if(frac >= c->src_incr){
frac -= c->src_incr;
index++;
}
if(dst_index + 1 == compensation_distance){
compensation_distance= 0;
dst_incr_frac= c->ideal_dst_incr % c->src_incr;
dst_incr= c->ideal_dst_incr / c->src_incr;
}
}
*consumed= FFMAX(sample_index, 0);
index += FFMIN(sample_index, 0) << c->phase_shift;
if(compensation_distance){
compensation_distance -= dst_index;
av_assert1(compensation_distance > 0);
}
}
if(update_ctx){
c->frac= frac;
c->index= index;
c->dst_incr= dst_incr_frac + c->src_incr*dst_incr;
c->compensation_distance= compensation_distance;
}
return dst_index;
}
| true | FFmpeg | 2c23f87c8553d5cfc1c130bc3e487660a3c826ec | int RENAME(swri_resample)(ResampleContext *c, DELEM *dst, const DELEM *src, int *consumed, int src_size, int dst_size, int update_ctx){
int dst_index, i;
int index= c->index;
int frac= c->frac;
int dst_incr_frac= c->dst_incr % c->src_incr;
int dst_incr= c->dst_incr / c->src_incr;
int compensation_distance= c->compensation_distance;
av_assert1(c->filter_shift == FILTER_SHIFT);
av_assert1(c->felem_size == sizeof(FELEM));
if(compensation_distance == 0 && c->filter_length == 1 && c->phase_shift==0){
int64_t index2= (1LL<<32)*c->frac/c->src_incr + (1LL<<32)*index;
int64_t incr= (1LL<<32) * c->dst_incr / c->src_incr;
int new_size = (src_size * (int64_t)c->src_incr - frac + c->dst_incr - 1) / c->dst_incr;
dst_size= FFMIN(dst_size, new_size);
for(dst_index=0; dst_index < dst_size; dst_index++){
dst[dst_index] = src[index2>>32];
index2 += incr;
}
index += dst_index * dst_incr;
index += (frac + dst_index * (int64_t)dst_incr_frac) / c->src_incr;
frac = (frac + dst_index * (int64_t)dst_incr_frac) % c->src_incr;
av_assert2(index >= 0);
*consumed= index;
index = 0;
} else if (compensation_distance == 0 && index >= 0) {
int64_t end_index = (1 + src_size - c->filter_length) << c->phase_shift;
int64_t delta_frac = (end_index - index) * c->src_incr - c->frac;
int delta_n = (delta_frac + c->dst_incr - 1) / c->dst_incr;
int n = FFMIN(dst_size, delta_n);
int sample_index;
if (!c->linear) {
sample_index = index >> c->phase_shift;
index &= c->phase_mask;
for (dst_index = 0; dst_index < n; dst_index++) {
FELEM *filter = ((FELEM *) c->filter_bank) + c->filter_alloc * index;
#ifdef COMMON_CORE
COMMON_CORE
#else
FELEM2 val=0;
for (i = 0; i < c->filter_length; i++) {
val += src[sample_index + i] * (FELEM2)filter[i];
}
OUT(dst[dst_index], val);
#endif
frac += dst_incr_frac;
index += dst_incr;
if (frac >= c->src_incr) {
frac -= c->src_incr;
index++;
}
sample_index += index >> c->phase_shift;
index &= c->phase_mask;
}
} else {
sample_index = index >> c->phase_shift;
index &= c->phase_mask;
for (dst_index = 0; dst_index < n; dst_index++) {
FELEM *filter = ((FELEM *) c->filter_bank) + c->filter_alloc * index;
FELEM2 val=0, v2 = 0;
#ifdef LINEAR_CORE
LINEAR_CORE
#else
for (i = 0; i < c->filter_length; i++) {
val += src[sample_index + i] * (FELEM2)filter[i];
v2 += src[sample_index + i] * (FELEM2)filter[i + c->filter_alloc];
}
#endif
val += (v2 - val) * (FELEML) frac / c->src_incr;
OUT(dst[dst_index], val);
frac += dst_incr_frac;
index += dst_incr;
if (frac >= c->src_incr) {
frac -= c->src_incr;
index++;
}
sample_index += index >> c->phase_shift;
index &= c->phase_mask;
}
}
*consumed = sample_index;
} else {
int sample_index = 0;
for(dst_index=0; dst_index < dst_size; dst_index++){
FELEM *filter;
FELEM2 val=0;
sample_index += index >> c->phase_shift;
index &= c->phase_mask;
filter = ((FELEM*)c->filter_bank) + c->filter_alloc*index;
if(sample_index + c->filter_length > src_size || -sample_index >= src_size){
break;
}else if(sample_index < 0){
for(i=0; i<c->filter_length; i++)
val += src[FFABS(sample_index + i)] * (FELEM2)filter[i];
OUT(dst[dst_index], val);
}else if(c->linear){
FELEM2 v2=0;
#ifdef LINEAR_CORE
LINEAR_CORE
#else
for(i=0; i<c->filter_length; i++){
val += src[sample_index + i] * (FELEM2)filter[i];
v2 += src[sample_index + i] * (FELEM2)filter[i + c->filter_alloc];
}
#endif
val+=(v2-val)*(FELEML)frac / c->src_incr;
OUT(dst[dst_index], val);
}else{
#ifdef COMMON_CORE
COMMON_CORE
#else
for(i=0; i<c->filter_length; i++){
val += src[sample_index + i] * (FELEM2)filter[i];
}
OUT(dst[dst_index], val);
#endif
}
frac += dst_incr_frac;
index += dst_incr;
if(frac >= c->src_incr){
frac -= c->src_incr;
index++;
}
if(dst_index + 1 == compensation_distance){
compensation_distance= 0;
dst_incr_frac= c->ideal_dst_incr % c->src_incr;
dst_incr= c->ideal_dst_incr / c->src_incr;
}
}
*consumed= FFMAX(sample_index, 0);
index += FFMIN(sample_index, 0) << c->phase_shift;
if(compensation_distance){
compensation_distance -= dst_index;
av_assert1(compensation_distance > 0);
}
}
if(update_ctx){
c->frac= frac;
c->index= index;
c->dst_incr= dst_incr_frac + c->src_incr*dst_incr;
c->compensation_distance= compensation_distance;
}
return dst_index;
}
| {
"code": [
" } else if (compensation_distance == 0 && index >= 0) {",
" int64_t end_index = (1 + src_size - c->filter_length) << c->phase_shift;"
],
"line_no": [
57,
59
]
} | int FUNC_0(swri_resample)(ResampleContext *c, DELEM *dst, const DELEM *src, int *consumed, int src_size, int dst_size, int update_ctx){
int VAR_0, VAR_1;
int VAR_2= c->VAR_2;
int VAR_3= c->VAR_3;
int VAR_4= c->VAR_5 % c->src_incr;
int VAR_5= c->VAR_5 / c->src_incr;
int VAR_6= c->VAR_6;
av_assert1(c->filter_shift == FILTER_SHIFT);
av_assert1(c->felem_size == sizeof(FELEM));
if(VAR_6 == 0 && c->filter_length == 1 && c->phase_shift==0){
int64_t index2= (1LL<<32)*c->VAR_3/c->src_incr + (1LL<<32)*VAR_2;
int64_t incr= (1LL<<32) * c->VAR_5 / c->src_incr;
int VAR_7 = (src_size * (int64_t)c->src_incr - VAR_3 + c->VAR_5 - 1) / c->VAR_5;
dst_size= FFMIN(dst_size, VAR_7);
for(VAR_0=0; VAR_0 < dst_size; VAR_0++){
dst[VAR_0] = src[index2>>32];
index2 += incr;
}
VAR_2 += VAR_0 * VAR_5;
VAR_2 += (VAR_3 + VAR_0 * (int64_t)VAR_4) / c->src_incr;
VAR_3 = (VAR_3 + VAR_0 * (int64_t)VAR_4) % c->src_incr;
av_assert2(VAR_2 >= 0);
*consumed= VAR_2;
VAR_2 = 0;
} else if (VAR_6 == 0 && VAR_2 >= 0) {
int64_t end_index = (1 + src_size - c->filter_length) << c->phase_shift;
int64_t delta_frac = (end_index - VAR_2) * c->src_incr - c->VAR_3;
int VAR_8 = (delta_frac + c->VAR_5 - 1) / c->VAR_5;
int VAR_9 = FFMIN(dst_size, VAR_8);
int VAR_11;
if (!c->linear) {
VAR_11 = VAR_2 >> c->phase_shift;
VAR_2 &= c->phase_mask;
for (VAR_0 = 0; VAR_0 < VAR_9; VAR_0++) {
FELEM *filter = ((FELEM *) c->filter_bank) + c->filter_alloc * VAR_2;
#ifdef COMMON_CORE
COMMON_CORE
#else
FELEM2 val=0;
for (VAR_1 = 0; VAR_1 < c->filter_length; VAR_1++) {
val += src[VAR_11 + VAR_1] * (FELEM2)filter[VAR_1];
}
OUT(dst[VAR_0], val);
#endif
VAR_3 += VAR_4;
VAR_2 += VAR_5;
if (VAR_3 >= c->src_incr) {
VAR_3 -= c->src_incr;
VAR_2++;
}
VAR_11 += VAR_2 >> c->phase_shift;
VAR_2 &= c->phase_mask;
}
} else {
VAR_11 = VAR_2 >> c->phase_shift;
VAR_2 &= c->phase_mask;
for (VAR_0 = 0; VAR_0 < VAR_9; VAR_0++) {
FELEM *filter = ((FELEM *) c->filter_bank) + c->filter_alloc * VAR_2;
FELEM2 val=0, v2 = 0;
#ifdef LINEAR_CORE
LINEAR_CORE
#else
for (VAR_1 = 0; VAR_1 < c->filter_length; VAR_1++) {
val += src[VAR_11 + VAR_1] * (FELEM2)filter[VAR_1];
v2 += src[VAR_11 + VAR_1] * (FELEM2)filter[VAR_1 + c->filter_alloc];
}
#endif
val += (v2 - val) * (FELEML) VAR_3 / c->src_incr;
OUT(dst[VAR_0], val);
VAR_3 += VAR_4;
VAR_2 += VAR_5;
if (VAR_3 >= c->src_incr) {
VAR_3 -= c->src_incr;
VAR_2++;
}
VAR_11 += VAR_2 >> c->phase_shift;
VAR_2 &= c->phase_mask;
}
}
*consumed = VAR_11;
} else {
int VAR_11 = 0;
for(VAR_0=0; VAR_0 < dst_size; VAR_0++){
FELEM *filter;
FELEM2 val=0;
VAR_11 += VAR_2 >> c->phase_shift;
VAR_2 &= c->phase_mask;
filter = ((FELEM*)c->filter_bank) + c->filter_alloc*VAR_2;
if(VAR_11 + c->filter_length > src_size || -VAR_11 >= src_size){
break;
}else if(VAR_11 < 0){
for(VAR_1=0; VAR_1<c->filter_length; VAR_1++)
val += src[FFABS(VAR_11 + VAR_1)] * (FELEM2)filter[VAR_1];
OUT(dst[VAR_0], val);
}else if(c->linear){
FELEM2 v2=0;
#ifdef LINEAR_CORE
LINEAR_CORE
#else
for(VAR_1=0; VAR_1<c->filter_length; VAR_1++){
val += src[VAR_11 + VAR_1] * (FELEM2)filter[VAR_1];
v2 += src[VAR_11 + VAR_1] * (FELEM2)filter[VAR_1 + c->filter_alloc];
}
#endif
val+=(v2-val)*(FELEML)VAR_3 / c->src_incr;
OUT(dst[VAR_0], val);
}else{
#ifdef COMMON_CORE
COMMON_CORE
#else
for(VAR_1=0; VAR_1<c->filter_length; VAR_1++){
val += src[VAR_11 + VAR_1] * (FELEM2)filter[VAR_1];
}
OUT(dst[VAR_0], val);
#endif
}
VAR_3 += VAR_4;
VAR_2 += VAR_5;
if(VAR_3 >= c->src_incr){
VAR_3 -= c->src_incr;
VAR_2++;
}
if(VAR_0 + 1 == VAR_6){
VAR_6= 0;
VAR_4= c->ideal_dst_incr % c->src_incr;
VAR_5= c->ideal_dst_incr / c->src_incr;
}
}
*consumed= FFMAX(VAR_11, 0);
VAR_2 += FFMIN(VAR_11, 0) << c->phase_shift;
if(VAR_6){
VAR_6 -= VAR_0;
av_assert1(VAR_6 > 0);
}
}
if(update_ctx){
c->VAR_3= VAR_3;
c->VAR_2= VAR_2;
c->VAR_5= VAR_4 + c->src_incr*VAR_5;
c->VAR_6= VAR_6;
}
return VAR_0;
}
| [
"int FUNC_0(swri_resample)(ResampleContext *c, DELEM *dst, const DELEM *src, int *consumed, int src_size, int dst_size, int update_ctx){",
"int VAR_0, VAR_1;",
"int VAR_2= c->VAR_2;",
"int VAR_3= c->VAR_3;",
"int VAR_4= c->VAR_5 % c->src_incr;",
"int VAR_5= c->VAR_5 / c->src_incr;",
"int VAR_6= c->... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
[
1
],
[
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
17
],
[
19
],
[
23
],
[
25
],
[
27
],
[
29
],
[
33
],
[
37
],
[
39
],
[
41
],
[
43
],
[
45
],
[
47
],
[... |
13,103 | static void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel)
{
TCGv t0;
#if !defined(CONFIG_USER_ONLY)
/* The Linux kernel will emulate rdhwr if it's not supported natively.
Therefore only check the ISA in system mode. */
check_insn(ctx, ISA_MIPS32R2);
#endif
t0 = tcg_temp_new();
switch (rd) {
case 0:
gen_helper_rdhwr_cpunum(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case 1:
gen_helper_rdhwr_synci_step(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case 2:
if (ctx->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdhwr_cc(t0, cpu_env);
if (ctx->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
}
gen_store_gpr(t0, rt);
/* Break the TB to be able to take timer interrupts immediately
after reading count. BS_STOP isn't sufficient, we need to ensure
we break completely out of translated code. */
gen_save_pc(ctx->pc + 4);
ctx->bstate = BS_EXCP;
break;
case 3:
gen_helper_rdhwr_ccres(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case 4:
check_insn(ctx, ISA_MIPS32R6);
if (sel != 0) {
/* Performance counter registers are not implemented other than
* control register 0.
*/
generate_exception(ctx, EXCP_RI);
}
gen_helper_rdhwr_performance(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case 5:
check_insn(ctx, ISA_MIPS32R6);
gen_helper_rdhwr_xnp(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case 29:
#if defined(CONFIG_USER_ONLY)
tcg_gen_ld_tl(t0, cpu_env,
offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
gen_store_gpr(t0, rt);
break;
#else
if ((ctx->hflags & MIPS_HFLAG_CP0) ||
(ctx->hflags & MIPS_HFLAG_HWRENA_ULR)) {
tcg_gen_ld_tl(t0, cpu_env,
offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
gen_store_gpr(t0, rt);
} else {
generate_exception_end(ctx, EXCP_RI);
}
break;
#endif
default: /* Invalid */
MIPS_INVAL("rdhwr");
generate_exception_end(ctx, EXCP_RI);
break;
}
tcg_temp_free(t0);
}
| true | qemu | c5a49c63fa26e8825ad101dfe86339ae4c216539 | static void gen_rdhwr(DisasContext *ctx, int rt, int rd, int sel)
{
TCGv t0;
#if !defined(CONFIG_USER_ONLY)
check_insn(ctx, ISA_MIPS32R2);
#endif
t0 = tcg_temp_new();
switch (rd) {
case 0:
gen_helper_rdhwr_cpunum(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case 1:
gen_helper_rdhwr_synci_step(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case 2:
if (ctx->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdhwr_cc(t0, cpu_env);
if (ctx->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
}
gen_store_gpr(t0, rt);
gen_save_pc(ctx->pc + 4);
ctx->bstate = BS_EXCP;
break;
case 3:
gen_helper_rdhwr_ccres(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case 4:
check_insn(ctx, ISA_MIPS32R6);
if (sel != 0) {
generate_exception(ctx, EXCP_RI);
}
gen_helper_rdhwr_performance(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case 5:
check_insn(ctx, ISA_MIPS32R6);
gen_helper_rdhwr_xnp(t0, cpu_env);
gen_store_gpr(t0, rt);
break;
case 29:
#if defined(CONFIG_USER_ONLY)
tcg_gen_ld_tl(t0, cpu_env,
offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
gen_store_gpr(t0, rt);
break;
#else
if ((ctx->hflags & MIPS_HFLAG_CP0) ||
(ctx->hflags & MIPS_HFLAG_HWRENA_ULR)) {
tcg_gen_ld_tl(t0, cpu_env,
offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
gen_store_gpr(t0, rt);
} else {
generate_exception_end(ctx, EXCP_RI);
}
break;
#endif
default:
MIPS_INVAL("rdhwr");
generate_exception_end(ctx, EXCP_RI);
break;
}
tcg_temp_free(t0);
}
| {
"code": [
" if (ctx->tb->cflags & CF_USE_ICOUNT) {",
" if (ctx->tb->cflags & CF_USE_ICOUNT) {"
],
"line_no": [
43,
43
]
} | static void FUNC_0(DisasContext *VAR_0, int VAR_1, int VAR_2, int VAR_3)
{
TCGv t0;
#if !defined(CONFIG_USER_ONLY)
check_insn(VAR_0, ISA_MIPS32R2);
#endif
t0 = tcg_temp_new();
switch (VAR_2) {
case 0:
gen_helper_rdhwr_cpunum(t0, cpu_env);
gen_store_gpr(t0, VAR_1);
break;
case 1:
gen_helper_rdhwr_synci_step(t0, cpu_env);
gen_store_gpr(t0, VAR_1);
break;
case 2:
if (VAR_0->tb->cflags & CF_USE_ICOUNT) {
gen_io_start();
}
gen_helper_rdhwr_cc(t0, cpu_env);
if (VAR_0->tb->cflags & CF_USE_ICOUNT) {
gen_io_end();
}
gen_store_gpr(t0, VAR_1);
gen_save_pc(VAR_0->pc + 4);
VAR_0->bstate = BS_EXCP;
break;
case 3:
gen_helper_rdhwr_ccres(t0, cpu_env);
gen_store_gpr(t0, VAR_1);
break;
case 4:
check_insn(VAR_0, ISA_MIPS32R6);
if (VAR_3 != 0) {
generate_exception(VAR_0, EXCP_RI);
}
gen_helper_rdhwr_performance(t0, cpu_env);
gen_store_gpr(t0, VAR_1);
break;
case 5:
check_insn(VAR_0, ISA_MIPS32R6);
gen_helper_rdhwr_xnp(t0, cpu_env);
gen_store_gpr(t0, VAR_1);
break;
case 29:
#if defined(CONFIG_USER_ONLY)
tcg_gen_ld_tl(t0, cpu_env,
offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
gen_store_gpr(t0, VAR_1);
break;
#else
if ((VAR_0->hflags & MIPS_HFLAG_CP0) ||
(VAR_0->hflags & MIPS_HFLAG_HWRENA_ULR)) {
tcg_gen_ld_tl(t0, cpu_env,
offsetof(CPUMIPSState, active_tc.CP0_UserLocal));
gen_store_gpr(t0, VAR_1);
} else {
generate_exception_end(VAR_0, EXCP_RI);
}
break;
#endif
default:
MIPS_INVAL("rdhwr");
generate_exception_end(VAR_0, EXCP_RI);
break;
}
tcg_temp_free(t0);
}
| [
"static void FUNC_0(DisasContext *VAR_0, int VAR_1, int VAR_2, int VAR_3)\n{",
"TCGv t0;",
"#if !defined(CONFIG_USER_ONLY)\ncheck_insn(VAR_0, ISA_MIPS32R2);",
"#endif\nt0 = tcg_temp_new();",
"switch (VAR_2) {",
"case 0:\ngen_helper_rdhwr_cpunum(t0, cpu_env);",
"gen_store_gpr(t0, VAR_1);",
"break;",
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9,
15
],
[
17,
19
],
[
23
],
[
25,
27
],
[
29
],
[
31
],
[
33,
35
],
[
37
],
[
39
],
[
41,
43
],
[
45
],
[
47
],
[
49
],
[
51
],
[
53
],
... |
13,104 | static int dca_subframe_footer(DCAContext *s, int base_channel)
{
int in, out, aux_data_count, aux_data_end, reserved;
uint32_t nsyncaux;
/*
* Unpack optional information
*/
/* presumably optional information only appears in the core? */
if (!base_channel) {
if (s->timestamp)
skip_bits_long(&s->gb, 32);
if (s->aux_data) {
aux_data_count = get_bits(&s->gb, 6);
// align (32-bit)
skip_bits_long(&s->gb, (-get_bits_count(&s->gb)) & 31);
aux_data_end = 8 * aux_data_count + get_bits_count(&s->gb);
if ((nsyncaux = get_bits_long(&s->gb, 32)) != DCA_NSYNCAUX) {
av_log(s->avctx, AV_LOG_ERROR, "nSYNCAUX mismatch %#"PRIx32"\n",
nsyncaux);
return AVERROR_INVALIDDATA;
}
if (get_bits1(&s->gb)) { // bAUXTimeStampFlag
avpriv_request_sample(s->avctx,
"Auxiliary Decode Time Stamp Flag");
// align (4-bit)
skip_bits(&s->gb, (-get_bits_count(&s->gb)) & 4);
// 44 bits: nMSByte (8), nMarker (4), nLSByte (28), nMarker (4)
skip_bits_long(&s->gb, 44);
}
if ((s->core_downmix = get_bits1(&s->gb))) {
int am = get_bits(&s->gb, 3);
switch (am) {
case 0:
s->core_downmix_amode = DCA_MONO;
break;
case 1:
s->core_downmix_amode = DCA_STEREO;
break;
case 2:
s->core_downmix_amode = DCA_STEREO_TOTAL;
break;
case 3:
s->core_downmix_amode = DCA_3F;
break;
case 4:
s->core_downmix_amode = DCA_2F1R;
break;
case 5:
s->core_downmix_amode = DCA_2F2R;
break;
case 6:
s->core_downmix_amode = DCA_3F1R;
break;
default:
av_log(s->avctx, AV_LOG_ERROR,
"Invalid mode %d for embedded downmix coefficients\n",
am);
return AVERROR_INVALIDDATA;
}
for (out = 0; out < ff_dca_channels[s->core_downmix_amode]; out++) {
for (in = 0; in < s->audio_header.prim_channels + !!s->lfe; in++) {
uint16_t tmp = get_bits(&s->gb, 9);
if ((tmp & 0xFF) > 241) {
av_log(s->avctx, AV_LOG_ERROR,
"Invalid downmix coefficient code %"PRIu16"\n",
tmp);
return AVERROR_INVALIDDATA;
}
s->core_downmix_codes[in][out] = tmp;
}
}
}
align_get_bits(&s->gb); // byte align
skip_bits(&s->gb, 16); // nAUXCRC16
// additional data (reserved, cf. ETSI TS 102 114 V1.4.1)
if ((reserved = (aux_data_end - get_bits_count(&s->gb))) < 0) {
av_log(s->avctx, AV_LOG_ERROR,
"Overread auxiliary data by %d bits\n", -reserved);
return AVERROR_INVALIDDATA;
} else if (reserved) {
avpriv_request_sample(s->avctx,
"Core auxiliary data reserved content");
skip_bits_long(&s->gb, reserved);
}
}
if (s->crc_present && s->dynrange)
get_bits(&s->gb, 16);
}
return 0;
}
| true | FFmpeg | c12c085be7e86880924249e5cb3f898e45dee134 | static int dca_subframe_footer(DCAContext *s, int base_channel)
{
int in, out, aux_data_count, aux_data_end, reserved;
uint32_t nsyncaux;
if (!base_channel) {
if (s->timestamp)
skip_bits_long(&s->gb, 32);
if (s->aux_data) {
aux_data_count = get_bits(&s->gb, 6);
skip_bits_long(&s->gb, (-get_bits_count(&s->gb)) & 31);
aux_data_end = 8 * aux_data_count + get_bits_count(&s->gb);
if ((nsyncaux = get_bits_long(&s->gb, 32)) != DCA_NSYNCAUX) {
av_log(s->avctx, AV_LOG_ERROR, "nSYNCAUX mismatch %#"PRIx32"\n",
nsyncaux);
return AVERROR_INVALIDDATA;
}
if (get_bits1(&s->gb)) {
avpriv_request_sample(s->avctx,
"Auxiliary Decode Time Stamp Flag");
skip_bits(&s->gb, (-get_bits_count(&s->gb)) & 4);
skip_bits_long(&s->gb, 44);
}
if ((s->core_downmix = get_bits1(&s->gb))) {
int am = get_bits(&s->gb, 3);
switch (am) {
case 0:
s->core_downmix_amode = DCA_MONO;
break;
case 1:
s->core_downmix_amode = DCA_STEREO;
break;
case 2:
s->core_downmix_amode = DCA_STEREO_TOTAL;
break;
case 3:
s->core_downmix_amode = DCA_3F;
break;
case 4:
s->core_downmix_amode = DCA_2F1R;
break;
case 5:
s->core_downmix_amode = DCA_2F2R;
break;
case 6:
s->core_downmix_amode = DCA_3F1R;
break;
default:
av_log(s->avctx, AV_LOG_ERROR,
"Invalid mode %d for embedded downmix coefficients\n",
am);
return AVERROR_INVALIDDATA;
}
for (out = 0; out < ff_dca_channels[s->core_downmix_amode]; out++) {
for (in = 0; in < s->audio_header.prim_channels + !!s->lfe; in++) {
uint16_t tmp = get_bits(&s->gb, 9);
if ((tmp & 0xFF) > 241) {
av_log(s->avctx, AV_LOG_ERROR,
"Invalid downmix coefficient code %"PRIu16"\n",
tmp);
return AVERROR_INVALIDDATA;
}
s->core_downmix_codes[in][out] = tmp;
}
}
}
align_get_bits(&s->gb);
skip_bits(&s->gb, 16);
if ((reserved = (aux_data_end - get_bits_count(&s->gb))) < 0) {
av_log(s->avctx, AV_LOG_ERROR,
"Overread auxiliary data by %d bits\n", -reserved);
return AVERROR_INVALIDDATA;
} else if (reserved) {
avpriv_request_sample(s->avctx,
"Core auxiliary data reserved content");
skip_bits_long(&s->gb, reserved);
}
}
if (s->crc_present && s->dynrange)
get_bits(&s->gb, 16);
}
return 0;
}
| {
"code": [
" if ((reserved = (aux_data_end - get_bits_count(&s->gb))) < 0) {",
" av_log(s->avctx, AV_LOG_ERROR,",
" \"Overread auxiliary data by %d bits\\n\", -reserved);",
" return AVERROR_INVALIDDATA;",
" } else if (reserved) {"
],
"line_no": [
171,
173,
175,
51,
179
]
} | static int FUNC_0(DCAContext *VAR_0, int VAR_1)
{
int VAR_2, VAR_3, VAR_4, VAR_5, VAR_6;
uint32_t nsyncaux;
if (!VAR_1) {
if (VAR_0->timestamp)
skip_bits_long(&VAR_0->gb, 32);
if (VAR_0->aux_data) {
VAR_4 = get_bits(&VAR_0->gb, 6);
skip_bits_long(&VAR_0->gb, (-get_bits_count(&VAR_0->gb)) & 31);
VAR_5 = 8 * VAR_4 + get_bits_count(&VAR_0->gb);
if ((nsyncaux = get_bits_long(&VAR_0->gb, 32)) != DCA_NSYNCAUX) {
av_log(VAR_0->avctx, AV_LOG_ERROR, "nSYNCAUX mismatch %#"PRIx32"\n",
nsyncaux);
return AVERROR_INVALIDDATA;
}
if (get_bits1(&VAR_0->gb)) {
avpriv_request_sample(VAR_0->avctx,
"Auxiliary Decode Time Stamp Flag");
skip_bits(&VAR_0->gb, (-get_bits_count(&VAR_0->gb)) & 4);
skip_bits_long(&VAR_0->gb, 44);
}
if ((VAR_0->core_downmix = get_bits1(&VAR_0->gb))) {
int VAR_7 = get_bits(&VAR_0->gb, 3);
switch (VAR_7) {
case 0:
VAR_0->core_downmix_amode = DCA_MONO;
break;
case 1:
VAR_0->core_downmix_amode = DCA_STEREO;
break;
case 2:
VAR_0->core_downmix_amode = DCA_STEREO_TOTAL;
break;
case 3:
VAR_0->core_downmix_amode = DCA_3F;
break;
case 4:
VAR_0->core_downmix_amode = DCA_2F1R;
break;
case 5:
VAR_0->core_downmix_amode = DCA_2F2R;
break;
case 6:
VAR_0->core_downmix_amode = DCA_3F1R;
break;
default:
av_log(VAR_0->avctx, AV_LOG_ERROR,
"Invalid mode %d for embedded downmix coefficients\n",
VAR_7);
return AVERROR_INVALIDDATA;
}
for (VAR_3 = 0; VAR_3 < ff_dca_channels[VAR_0->core_downmix_amode]; VAR_3++) {
for (VAR_2 = 0; VAR_2 < VAR_0->audio_header.prim_channels + !!VAR_0->lfe; VAR_2++) {
uint16_t tmp = get_bits(&VAR_0->gb, 9);
if ((tmp & 0xFF) > 241) {
av_log(VAR_0->avctx, AV_LOG_ERROR,
"Invalid downmix coefficient code %"PRIu16"\n",
tmp);
return AVERROR_INVALIDDATA;
}
VAR_0->core_downmix_codes[VAR_2][VAR_3] = tmp;
}
}
}
align_get_bits(&VAR_0->gb);
skip_bits(&VAR_0->gb, 16);
if ((VAR_6 = (VAR_5 - get_bits_count(&VAR_0->gb))) < 0) {
av_log(VAR_0->avctx, AV_LOG_ERROR,
"Overread auxiliary data by %d bits\n", -VAR_6);
return AVERROR_INVALIDDATA;
} else if (VAR_6) {
avpriv_request_sample(VAR_0->avctx,
"Core auxiliary data VAR_6 content");
skip_bits_long(&VAR_0->gb, VAR_6);
}
}
if (VAR_0->crc_present && VAR_0->dynrange)
get_bits(&VAR_0->gb, 16);
}
return 0;
}
| [
"static int FUNC_0(DCAContext *VAR_0, int VAR_1)\n{",
"int VAR_2, VAR_3, VAR_4, VAR_5, VAR_6;",
"uint32_t nsyncaux;",
"if (!VAR_1) {",
"if (VAR_0->timestamp)\nskip_bits_long(&VAR_0->gb, 32);",
"if (VAR_0->aux_data) {",
"VAR_4 = get_bits(&VAR_0->gb, 6);",
"skip_bits_long(&VAR_0->gb, (-get_bits_count(&V... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
21
],
[
23,
25
],
[
29
],
[
31
],
[
37
],
[
41
],
[
45
],
[
47,
49
],
[
51
],
[
53
],
[
57
],
[
59,
61
],
[
65
],
[
69
],
[
71
],
... |
13,105 | static void pred_temp_direct_motion(const H264Context *const h, H264SliceContext *sl,
int *mb_type)
{
int b8_stride = 2;
int b4_stride = h->b_stride;
int mb_xy = sl->mb_xy, mb_y = sl->mb_y;
int mb_type_col[2];
const int16_t (*l1mv0)[2], (*l1mv1)[2];
const int8_t *l1ref0, *l1ref1;
const int is_b8x8 = IS_8X8(*mb_type);
unsigned int sub_mb_type;
int i8, i4;
assert(sl->ref_list[1][0].reference & 3);
await_reference_mb_row(h, sl->ref_list[1][0].parent,
sl->mb_y + !!IS_INTERLACED(*mb_type));
if (IS_INTERLACED(sl->ref_list[1][0].parent->mb_type[mb_xy])) { // AFL/AFR/FR/FL -> AFL/FL
if (!IS_INTERLACED(*mb_type)) { // AFR/FR -> AFL/FL
mb_y = (sl->mb_y & ~1) + sl->col_parity;
mb_xy = sl->mb_x +
((sl->mb_y & ~1) + sl->col_parity) * h->mb_stride;
b8_stride = 0;
} else {
mb_y += sl->col_fieldoff;
mb_xy += h->mb_stride * sl->col_fieldoff; // non-zero for FL -> FL & differ parity
}
goto single_col;
} else { // AFL/AFR/FR/FL -> AFR/FR
if (IS_INTERLACED(*mb_type)) { // AFL /FL -> AFR/FR
mb_y = sl->mb_y & ~1;
mb_xy = sl->mb_x + (sl->mb_y & ~1) * h->mb_stride;
mb_type_col[0] = sl->ref_list[1][0].parent->mb_type[mb_xy];
mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy + h->mb_stride];
b8_stride = 2 + 4 * h->mb_stride;
b4_stride *= 6;
if (IS_INTERLACED(mb_type_col[0]) !=
IS_INTERLACED(mb_type_col[1])) {
mb_type_col[0] &= ~MB_TYPE_INTERLACED;
mb_type_col[1] &= ~MB_TYPE_INTERLACED;
}
sub_mb_type = MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
MB_TYPE_DIRECT2; /* B_SUB_8x8 */
if ((mb_type_col[0] & MB_TYPE_16x16_OR_INTRA) &&
(mb_type_col[1] & MB_TYPE_16x16_OR_INTRA) &&
!is_b8x8) {
*mb_type |= MB_TYPE_16x8 | MB_TYPE_L0L1 |
MB_TYPE_DIRECT2; /* B_16x8 */
} else {
*mb_type |= MB_TYPE_8x8 | MB_TYPE_L0L1;
}
} else { // AFR/FR -> AFR/FR
single_col:
mb_type_col[0] =
mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy];
sub_mb_type = MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
MB_TYPE_DIRECT2; /* B_SUB_8x8 */
if (!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)) {
*mb_type |= MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
MB_TYPE_DIRECT2; /* B_16x16 */
} else if (!is_b8x8 &&
(mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16))) {
*mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 |
(mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16));
} else {
if (!h->ps.sps->direct_8x8_inference_flag) {
/* FIXME: save sub mb types from previous frames (or derive
* from MVs) so we know exactly what block size to use */
sub_mb_type = MB_TYPE_8x8 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
MB_TYPE_DIRECT2; /* B_SUB_4x4 */
}
*mb_type |= MB_TYPE_8x8 | MB_TYPE_L0L1;
}
}
}
await_reference_mb_row(h, sl->ref_list[1][0].parent, mb_y);
l1mv0 = &sl->ref_list[1][0].parent->motion_val[0][h->mb2b_xy[mb_xy]];
l1mv1 = &sl->ref_list[1][0].parent->motion_val[1][h->mb2b_xy[mb_xy]];
l1ref0 = &sl->ref_list[1][0].parent->ref_index[0][4 * mb_xy];
l1ref1 = &sl->ref_list[1][0].parent->ref_index[1][4 * mb_xy];
if (!b8_stride) {
if (sl->mb_y & 1) {
l1ref0 += 2;
l1ref1 += 2;
l1mv0 += 2 * b4_stride;
l1mv1 += 2 * b4_stride;
}
}
{
const int *map_col_to_list0[2] = { sl->map_col_to_list0[0],
sl->map_col_to_list0[1] };
const int *dist_scale_factor = sl->dist_scale_factor;
int ref_offset;
if (FRAME_MBAFF(h) && IS_INTERLACED(*mb_type)) {
map_col_to_list0[0] = sl->map_col_to_list0_field[sl->mb_y & 1][0];
map_col_to_list0[1] = sl->map_col_to_list0_field[sl->mb_y & 1][1];
dist_scale_factor = sl->dist_scale_factor_field[sl->mb_y & 1];
}
ref_offset = (sl->ref_list[1][0].parent->mbaff << 4) & (mb_type_col[0] >> 3);
if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) {
int y_shift = 2 * !IS_INTERLACED(*mb_type);
assert(h->ps.sps->direct_8x8_inference_flag);
for (i8 = 0; i8 < 4; i8++) {
const int x8 = i8 & 1;
const int y8 = i8 >> 1;
int ref0, scale;
const int16_t (*l1mv)[2] = l1mv0;
if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8]))
continue;
sl->sub_mb_type[i8] = sub_mb_type;
fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 1);
if (IS_INTRA(mb_type_col[y8])) {
fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 1);
fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 4);
fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 4);
continue;
}
ref0 = l1ref0[x8 + y8 * b8_stride];
if (ref0 >= 0)
ref0 = map_col_to_list0[0][ref0 + ref_offset];
else {
ref0 = map_col_to_list0[1][l1ref1[x8 + y8 * b8_stride] +
ref_offset];
l1mv = l1mv1;
}
scale = dist_scale_factor[ref0];
fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
ref0, 1);
{
const int16_t *mv_col = l1mv[x8 * 3 + y8 * b4_stride];
int my_col = (mv_col[1] << y_shift) / 2;
int mx = (scale * mv_col[0] + 128) >> 8;
int my = (scale * my_col + 128) >> 8;
fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8,
pack16to32(mx, my), 4);
fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8,
pack16to32(mx - mv_col[0], my - my_col), 4);
}
}
return;
}
/* one-to-one mv scaling */
if (IS_16X16(*mb_type)) {
int ref, mv0, mv1;
fill_rectangle(&sl->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1);
if (IS_INTRA(mb_type_col[0])) {
ref = mv0 = mv1 = 0;
} else {
const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0] + ref_offset]
: map_col_to_list0[1][l1ref1[0] + ref_offset];
const int scale = dist_scale_factor[ref0];
const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0];
int mv_l0[2];
mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
ref = ref0;
mv0 = pack16to32(mv_l0[0], mv_l0[1]);
mv1 = pack16to32(mv_l0[0] - mv_col[0], mv_l0[1] - mv_col[1]);
}
fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
fill_rectangle(&sl->mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4);
fill_rectangle(&sl->mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4);
} else {
for (i8 = 0; i8 < 4; i8++) {
const int x8 = i8 & 1;
const int y8 = i8 >> 1;
int ref0, scale;
const int16_t (*l1mv)[2] = l1mv0;
if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8]))
continue;
sl->sub_mb_type[i8] = sub_mb_type;
fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 1);
if (IS_INTRA(mb_type_col[0])) {
fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 1);
fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 4);
fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 4);
continue;
}
assert(b8_stride == 2);
ref0 = l1ref0[i8];
if (ref0 >= 0)
ref0 = map_col_to_list0[0][ref0 + ref_offset];
else {
ref0 = map_col_to_list0[1][l1ref1[i8] + ref_offset];
l1mv = l1mv1;
}
scale = dist_scale_factor[ref0];
fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
ref0, 1);
if (IS_SUB_8X8(sub_mb_type)) {
const int16_t *mv_col = l1mv[x8 * 3 + y8 * 3 * b4_stride];
int mx = (scale * mv_col[0] + 128) >> 8;
int my = (scale * mv_col[1] + 128) >> 8;
fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8,
pack16to32(mx, my), 4);
fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8,
pack16to32(mx - mv_col[0], my - mv_col[1]), 4);
} else {
for (i4 = 0; i4 < 4; i4++) {
const int16_t *mv_col = l1mv[x8 * 2 + (i4 & 1) +
(y8 * 2 + (i4 >> 1)) * b4_stride];
int16_t *mv_l0 = sl->mv_cache[0][scan8[i8 * 4 + i4]];
mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
AV_WN32A(sl->mv_cache[1][scan8[i8 * 4 + i4]],
pack16to32(mv_l0[0] - mv_col[0],
mv_l0[1] - mv_col[1]));
}
}
}
}
}
}
| true | FFmpeg | 04763c6f87690b31cfcd0d324cf36a451531dcd0 | static void pred_temp_direct_motion(const H264Context *const h, H264SliceContext *sl,
int *mb_type)
{
int b8_stride = 2;
int b4_stride = h->b_stride;
int mb_xy = sl->mb_xy, mb_y = sl->mb_y;
int mb_type_col[2];
const int16_t (*l1mv0)[2], (*l1mv1)[2];
const int8_t *l1ref0, *l1ref1;
const int is_b8x8 = IS_8X8(*mb_type);
unsigned int sub_mb_type;
int i8, i4;
assert(sl->ref_list[1][0].reference & 3);
await_reference_mb_row(h, sl->ref_list[1][0].parent,
sl->mb_y + !!IS_INTERLACED(*mb_type));
if (IS_INTERLACED(sl->ref_list[1][0].parent->mb_type[mb_xy])) {
if (!IS_INTERLACED(*mb_type)) {
mb_y = (sl->mb_y & ~1) + sl->col_parity;
mb_xy = sl->mb_x +
((sl->mb_y & ~1) + sl->col_parity) * h->mb_stride;
b8_stride = 0;
} else {
mb_y += sl->col_fieldoff;
mb_xy += h->mb_stride * sl->col_fieldoff;
}
goto single_col;
} else {
if (IS_INTERLACED(*mb_type)) {
mb_y = sl->mb_y & ~1;
mb_xy = sl->mb_x + (sl->mb_y & ~1) * h->mb_stride;
mb_type_col[0] = sl->ref_list[1][0].parent->mb_type[mb_xy];
mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy + h->mb_stride];
b8_stride = 2 + 4 * h->mb_stride;
b4_stride *= 6;
if (IS_INTERLACED(mb_type_col[0]) !=
IS_INTERLACED(mb_type_col[1])) {
mb_type_col[0] &= ~MB_TYPE_INTERLACED;
mb_type_col[1] &= ~MB_TYPE_INTERLACED;
}
sub_mb_type = MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
MB_TYPE_DIRECT2;
if ((mb_type_col[0] & MB_TYPE_16x16_OR_INTRA) &&
(mb_type_col[1] & MB_TYPE_16x16_OR_INTRA) &&
!is_b8x8) {
*mb_type |= MB_TYPE_16x8 | MB_TYPE_L0L1 |
MB_TYPE_DIRECT2;
} else {
*mb_type |= MB_TYPE_8x8 | MB_TYPE_L0L1;
}
} else {
single_col:
mb_type_col[0] =
mb_type_col[1] = sl->ref_list[1][0].parent->mb_type[mb_xy];
sub_mb_type = MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
MB_TYPE_DIRECT2;
if (!is_b8x8 && (mb_type_col[0] & MB_TYPE_16x16_OR_INTRA)) {
*mb_type |= MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
MB_TYPE_DIRECT2;
} else if (!is_b8x8 &&
(mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16))) {
*mb_type |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 |
(mb_type_col[0] & (MB_TYPE_16x8 | MB_TYPE_8x16));
} else {
if (!h->ps.sps->direct_8x8_inference_flag) {
sub_mb_type = MB_TYPE_8x8 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
MB_TYPE_DIRECT2;
}
*mb_type |= MB_TYPE_8x8 | MB_TYPE_L0L1;
}
}
}
await_reference_mb_row(h, sl->ref_list[1][0].parent, mb_y);
l1mv0 = &sl->ref_list[1][0].parent->motion_val[0][h->mb2b_xy[mb_xy]];
l1mv1 = &sl->ref_list[1][0].parent->motion_val[1][h->mb2b_xy[mb_xy]];
l1ref0 = &sl->ref_list[1][0].parent->ref_index[0][4 * mb_xy];
l1ref1 = &sl->ref_list[1][0].parent->ref_index[1][4 * mb_xy];
if (!b8_stride) {
if (sl->mb_y & 1) {
l1ref0 += 2;
l1ref1 += 2;
l1mv0 += 2 * b4_stride;
l1mv1 += 2 * b4_stride;
}
}
{
const int *map_col_to_list0[2] = { sl->map_col_to_list0[0],
sl->map_col_to_list0[1] };
const int *dist_scale_factor = sl->dist_scale_factor;
int ref_offset;
if (FRAME_MBAFF(h) && IS_INTERLACED(*mb_type)) {
map_col_to_list0[0] = sl->map_col_to_list0_field[sl->mb_y & 1][0];
map_col_to_list0[1] = sl->map_col_to_list0_field[sl->mb_y & 1][1];
dist_scale_factor = sl->dist_scale_factor_field[sl->mb_y & 1];
}
ref_offset = (sl->ref_list[1][0].parent->mbaff << 4) & (mb_type_col[0] >> 3);
if (IS_INTERLACED(*mb_type) != IS_INTERLACED(mb_type_col[0])) {
int y_shift = 2 * !IS_INTERLACED(*mb_type);
assert(h->ps.sps->direct_8x8_inference_flag);
for (i8 = 0; i8 < 4; i8++) {
const int x8 = i8 & 1;
const int y8 = i8 >> 1;
int ref0, scale;
const int16_t (*l1mv)[2] = l1mv0;
if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8]))
continue;
sl->sub_mb_type[i8] = sub_mb_type;
fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 1);
if (IS_INTRA(mb_type_col[y8])) {
fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 1);
fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 4);
fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 4);
continue;
}
ref0 = l1ref0[x8 + y8 * b8_stride];
if (ref0 >= 0)
ref0 = map_col_to_list0[0][ref0 + ref_offset];
else {
ref0 = map_col_to_list0[1][l1ref1[x8 + y8 * b8_stride] +
ref_offset];
l1mv = l1mv1;
}
scale = dist_scale_factor[ref0];
fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
ref0, 1);
{
const int16_t *mv_col = l1mv[x8 * 3 + y8 * b4_stride];
int my_col = (mv_col[1] << y_shift) / 2;
int mx = (scale * mv_col[0] + 128) >> 8;
int my = (scale * my_col + 128) >> 8;
fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8,
pack16to32(mx, my), 4);
fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8,
pack16to32(mx - mv_col[0], my - my_col), 4);
}
}
return;
}
if (IS_16X16(*mb_type)) {
int ref, mv0, mv1;
fill_rectangle(&sl->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1);
if (IS_INTRA(mb_type_col[0])) {
ref = mv0 = mv1 = 0;
} else {
const int ref0 = l1ref0[0] >= 0 ? map_col_to_list0[0][l1ref0[0] + ref_offset]
: map_col_to_list0[1][l1ref1[0] + ref_offset];
const int scale = dist_scale_factor[ref0];
const int16_t *mv_col = l1ref0[0] >= 0 ? l1mv0[0] : l1mv1[0];
int mv_l0[2];
mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
ref = ref0;
mv0 = pack16to32(mv_l0[0], mv_l0[1]);
mv1 = pack16to32(mv_l0[0] - mv_col[0], mv_l0[1] - mv_col[1]);
}
fill_rectangle(&sl->ref_cache[0][scan8[0]], 4, 4, 8, ref, 1);
fill_rectangle(&sl->mv_cache[0][scan8[0]], 4, 4, 8, mv0, 4);
fill_rectangle(&sl->mv_cache[1][scan8[0]], 4, 4, 8, mv1, 4);
} else {
for (i8 = 0; i8 < 4; i8++) {
const int x8 = i8 & 1;
const int y8 = i8 >> 1;
int ref0, scale;
const int16_t (*l1mv)[2] = l1mv0;
if (is_b8x8 && !IS_DIRECT(sl->sub_mb_type[i8]))
continue;
sl->sub_mb_type[i8] = sub_mb_type;
fill_rectangle(&sl->ref_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 1);
if (IS_INTRA(mb_type_col[0])) {
fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 1);
fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8, 0, 4);
fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8, 0, 4);
continue;
}
assert(b8_stride == 2);
ref0 = l1ref0[i8];
if (ref0 >= 0)
ref0 = map_col_to_list0[0][ref0 + ref_offset];
else {
ref0 = map_col_to_list0[1][l1ref1[i8] + ref_offset];
l1mv = l1mv1;
}
scale = dist_scale_factor[ref0];
fill_rectangle(&sl->ref_cache[0][scan8[i8 * 4]], 2, 2, 8,
ref0, 1);
if (IS_SUB_8X8(sub_mb_type)) {
const int16_t *mv_col = l1mv[x8 * 3 + y8 * 3 * b4_stride];
int mx = (scale * mv_col[0] + 128) >> 8;
int my = (scale * mv_col[1] + 128) >> 8;
fill_rectangle(&sl->mv_cache[0][scan8[i8 * 4]], 2, 2, 8,
pack16to32(mx, my), 4);
fill_rectangle(&sl->mv_cache[1][scan8[i8 * 4]], 2, 2, 8,
pack16to32(mx - mv_col[0], my - mv_col[1]), 4);
} else {
for (i4 = 0; i4 < 4; i4++) {
const int16_t *mv_col = l1mv[x8 * 2 + (i4 & 1) +
(y8 * 2 + (i4 >> 1)) * b4_stride];
int16_t *mv_l0 = sl->mv_cache[0][scan8[i8 * 4 + i4]];
mv_l0[0] = (scale * mv_col[0] + 128) >> 8;
mv_l0[1] = (scale * mv_col[1] + 128) >> 8;
AV_WN32A(sl->mv_cache[1][scan8[i8 * 4 + i4]],
pack16to32(mv_l0[0] - mv_col[0],
mv_l0[1] - mv_col[1]));
}
}
}
}
}
}
| {
"code": [
" await_reference_mb_row(h, sl->ref_list[1][0].parent,",
" await_reference_mb_row(h, sl->ref_list[1][0].parent, mb_y);",
" await_reference_mb_row(h, sl->ref_list[1][0].parent,",
" await_reference_mb_row(h, sl->ref_list[1][0].parent, mb_y);"
],
"line_no": [
31,
161,
31,
161
]
} | static void FUNC_0(const H264Context *const VAR_0, H264SliceContext *VAR_1,
int *VAR_2)
{
int VAR_3 = 2;
int VAR_4 = VAR_0->b_stride;
int VAR_5 = VAR_1->VAR_5, VAR_6 = VAR_1->VAR_6;
int VAR_7[2];
const FUNC_2 (*l1mv0)[2], (*l1mv1)[2];
const int8_t *VAR_8, *l1ref1;
const int VAR_9 = IS_8X8(*VAR_2);
unsigned int VAR_10;
int VAR_11, VAR_12;
assert(VAR_1->ref_list[1][0].reference & 3);
await_reference_mb_row(VAR_0, VAR_1->ref_list[1][0].parent,
VAR_1->VAR_6 + !!IS_INTERLACED(*VAR_2));
if (IS_INTERLACED(VAR_1->ref_list[1][0].parent->VAR_2[VAR_5])) {
if (!IS_INTERLACED(*VAR_2)) {
VAR_6 = (VAR_1->VAR_6 & ~1) + VAR_1->col_parity;
VAR_5 = VAR_1->mb_x +
((VAR_1->VAR_6 & ~1) + VAR_1->col_parity) * VAR_0->mb_stride;
VAR_3 = 0;
} else {
VAR_6 += VAR_1->col_fieldoff;
VAR_5 += VAR_0->mb_stride * VAR_1->col_fieldoff;
}
goto single_col;
} else {
if (IS_INTERLACED(*VAR_2)) {
VAR_6 = VAR_1->VAR_6 & ~1;
VAR_5 = VAR_1->mb_x + (VAR_1->VAR_6 & ~1) * VAR_0->mb_stride;
VAR_7[0] = VAR_1->ref_list[1][0].parent->VAR_2[VAR_5];
VAR_7[1] = VAR_1->ref_list[1][0].parent->VAR_2[VAR_5 + VAR_0->mb_stride];
VAR_3 = 2 + 4 * VAR_0->mb_stride;
VAR_4 *= 6;
if (IS_INTERLACED(VAR_7[0]) !=
IS_INTERLACED(VAR_7[1])) {
VAR_7[0] &= ~MB_TYPE_INTERLACED;
VAR_7[1] &= ~MB_TYPE_INTERLACED;
}
VAR_10 = MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
MB_TYPE_DIRECT2;
if ((VAR_7[0] & MB_TYPE_16x16_OR_INTRA) &&
(VAR_7[1] & MB_TYPE_16x16_OR_INTRA) &&
!VAR_9) {
*VAR_2 |= MB_TYPE_16x8 | MB_TYPE_L0L1 |
MB_TYPE_DIRECT2;
} else {
*VAR_2 |= MB_TYPE_8x8 | MB_TYPE_L0L1;
}
} else {
single_col:
VAR_7[0] =
VAR_7[1] = VAR_1->ref_list[1][0].parent->VAR_2[VAR_5];
VAR_10 = MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
MB_TYPE_DIRECT2;
if (!VAR_9 && (VAR_7[0] & MB_TYPE_16x16_OR_INTRA)) {
*VAR_2 |= MB_TYPE_16x16 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
MB_TYPE_DIRECT2;
} else if (!VAR_9 &&
(VAR_7[0] & (MB_TYPE_16x8 | MB_TYPE_8x16))) {
*VAR_2 |= MB_TYPE_L0L1 | MB_TYPE_DIRECT2 |
(VAR_7[0] & (MB_TYPE_16x8 | MB_TYPE_8x16));
} else {
if (!VAR_0->ps.sps->direct_8x8_inference_flag) {
VAR_10 = MB_TYPE_8x8 | MB_TYPE_P0L0 | MB_TYPE_P0L1 |
MB_TYPE_DIRECT2;
}
*VAR_2 |= MB_TYPE_8x8 | MB_TYPE_L0L1;
}
}
}
await_reference_mb_row(VAR_0, VAR_1->ref_list[1][0].parent, VAR_6);
l1mv0 = &VAR_1->ref_list[1][0].parent->motion_val[0][VAR_0->mb2b_xy[VAR_5]];
l1mv1 = &VAR_1->ref_list[1][0].parent->motion_val[1][VAR_0->mb2b_xy[VAR_5]];
VAR_8 = &VAR_1->ref_list[1][0].parent->ref_index[0][4 * VAR_5];
l1ref1 = &VAR_1->ref_list[1][0].parent->ref_index[1][4 * VAR_5];
if (!VAR_3) {
if (VAR_1->VAR_6 & 1) {
VAR_8 += 2;
l1ref1 += 2;
l1mv0 += 2 * VAR_4;
l1mv1 += 2 * VAR_4;
}
}
{
const int *VAR_13[2] = { VAR_1->VAR_13[0],
VAR_1->VAR_13[1] };
const int *VAR_14 = VAR_1->VAR_14;
int VAR_15;
if (FRAME_MBAFF(VAR_0) && IS_INTERLACED(*VAR_2)) {
VAR_13[0] = VAR_1->map_col_to_list0_field[VAR_1->VAR_6 & 1][0];
VAR_13[1] = VAR_1->map_col_to_list0_field[VAR_1->VAR_6 & 1][1];
VAR_14 = VAR_1->dist_scale_factor_field[VAR_1->VAR_6 & 1];
}
VAR_15 = (VAR_1->ref_list[1][0].parent->mbaff << 4) & (VAR_7[0] >> 3);
if (IS_INTERLACED(*VAR_2) != IS_INTERLACED(VAR_7[0])) {
int VAR_16 = 2 * !IS_INTERLACED(*VAR_2);
assert(VAR_0->ps.sps->direct_8x8_inference_flag);
for (VAR_11 = 0; VAR_11 < 4; VAR_11++) {
const int VAR_29 = VAR_11 & 1;
const int VAR_29 = VAR_11 >> 1;
int VAR_29, VAR_29;
const FUNC_2 (*l1mv)[2] = l1mv0;
if (VAR_9 && !IS_DIRECT(VAR_1->VAR_10[VAR_11]))
continue;
VAR_1->VAR_10[VAR_11] = VAR_10;
fill_rectangle(&VAR_1->ref_cache[1][scan8[VAR_11 * 4]], 2, 2, 8, 0, 1);
if (IS_INTRA(VAR_7[VAR_29])) {
fill_rectangle(&VAR_1->ref_cache[0][scan8[VAR_11 * 4]], 2, 2, 8, 0, 1);
fill_rectangle(&VAR_1->mv_cache[0][scan8[VAR_11 * 4]], 2, 2, 8, 0, 4);
fill_rectangle(&VAR_1->mv_cache[1][scan8[VAR_11 * 4]], 2, 2, 8, 0, 4);
continue;
}
VAR_29 = VAR_8[VAR_29 + VAR_29 * VAR_3];
if (VAR_29 >= 0)
VAR_29 = VAR_13[0][VAR_29 + VAR_15];
else {
VAR_29 = VAR_13[1][l1ref1[VAR_29 + VAR_29 * VAR_3] +
VAR_15];
l1mv = l1mv1;
}
VAR_29 = VAR_14[VAR_29];
fill_rectangle(&VAR_1->ref_cache[0][scan8[VAR_11 * 4]], 2, 2, 8,
VAR_29, 1);
{
const FUNC_2 *VAR_29 = l1mv[VAR_29 * 3 + VAR_29 * VAR_4];
int VAR_22 = (VAR_29[1] << VAR_16) / 2;
int VAR_29 = (VAR_29 * VAR_29[0] + 128) >> 8;
int VAR_29 = (VAR_29 * VAR_22 + 128) >> 8;
fill_rectangle(&VAR_1->mv_cache[0][scan8[VAR_11 * 4]], 2, 2, 8,
pack16to32(VAR_29, VAR_29), 4);
fill_rectangle(&VAR_1->mv_cache[1][scan8[VAR_11 * 4]], 2, 2, 8,
pack16to32(VAR_29 - VAR_29[0], VAR_29 - VAR_22), 4);
}
}
return;
}
if (IS_16X16(*VAR_2)) {
int VAR_25, VAR_26, VAR_27;
fill_rectangle(&VAR_1->ref_cache[1][scan8[0]], 4, 4, 8, 0, 1);
if (IS_INTRA(VAR_7[0])) {
VAR_25 = VAR_26 = VAR_27 = 0;
} else {
const int VAR_29 = VAR_8[0] >= 0 ? VAR_13[0][VAR_8[0] + VAR_15]
: VAR_13[1][l1ref1[0] + VAR_15];
const int VAR_29 = VAR_14[VAR_29];
const FUNC_2 *VAR_29 = VAR_8[0] >= 0 ? l1mv0[0] : l1mv1[0];
int VAR_28[2];
VAR_28[0] = (VAR_29 * VAR_29[0] + 128) >> 8;
VAR_28[1] = (VAR_29 * VAR_29[1] + 128) >> 8;
VAR_25 = VAR_29;
VAR_26 = pack16to32(VAR_28[0], VAR_28[1]);
VAR_27 = pack16to32(VAR_28[0] - VAR_29[0], VAR_28[1] - VAR_29[1]);
}
fill_rectangle(&VAR_1->ref_cache[0][scan8[0]], 4, 4, 8, VAR_25, 1);
fill_rectangle(&VAR_1->mv_cache[0][scan8[0]], 4, 4, 8, VAR_26, 4);
fill_rectangle(&VAR_1->mv_cache[1][scan8[0]], 4, 4, 8, VAR_27, 4);
} else {
for (VAR_11 = 0; VAR_11 < 4; VAR_11++) {
const int VAR_29 = VAR_11 & 1;
const int VAR_29 = VAR_11 >> 1;
int VAR_29, VAR_29;
const FUNC_2 (*l1mv)[2] = l1mv0;
if (VAR_9 && !IS_DIRECT(VAR_1->VAR_10[VAR_11]))
continue;
VAR_1->VAR_10[VAR_11] = VAR_10;
fill_rectangle(&VAR_1->ref_cache[1][scan8[VAR_11 * 4]], 2, 2, 8, 0, 1);
if (IS_INTRA(VAR_7[0])) {
fill_rectangle(&VAR_1->ref_cache[0][scan8[VAR_11 * 4]], 2, 2, 8, 0, 1);
fill_rectangle(&VAR_1->mv_cache[0][scan8[VAR_11 * 4]], 2, 2, 8, 0, 4);
fill_rectangle(&VAR_1->mv_cache[1][scan8[VAR_11 * 4]], 2, 2, 8, 0, 4);
continue;
}
assert(VAR_3 == 2);
VAR_29 = VAR_8[VAR_11];
if (VAR_29 >= 0)
VAR_29 = VAR_13[0][VAR_29 + VAR_15];
else {
VAR_29 = VAR_13[1][l1ref1[VAR_11] + VAR_15];
l1mv = l1mv1;
}
VAR_29 = VAR_14[VAR_29];
fill_rectangle(&VAR_1->ref_cache[0][scan8[VAR_11 * 4]], 2, 2, 8,
VAR_29, 1);
if (IS_SUB_8X8(VAR_10)) {
const FUNC_2 *VAR_29 = l1mv[VAR_29 * 3 + VAR_29 * 3 * VAR_4];
int VAR_29 = (VAR_29 * VAR_29[0] + 128) >> 8;
int VAR_29 = (VAR_29 * VAR_29[1] + 128) >> 8;
fill_rectangle(&VAR_1->mv_cache[0][scan8[VAR_11 * 4]], 2, 2, 8,
pack16to32(VAR_29, VAR_29), 4);
fill_rectangle(&VAR_1->mv_cache[1][scan8[VAR_11 * 4]], 2, 2, 8,
pack16to32(VAR_29 - VAR_29[0], VAR_29 - VAR_29[1]), 4);
} else {
for (VAR_12 = 0; VAR_12 < 4; VAR_12++) {
const FUNC_2 *VAR_29 = l1mv[VAR_29 * 2 + (VAR_12 & 1) +
(VAR_29 * 2 + (VAR_12 >> 1)) * VAR_4];
FUNC_2 *VAR_28 = VAR_1->mv_cache[0][scan8[VAR_11 * 4 + VAR_12]];
VAR_28[0] = (VAR_29 * VAR_29[0] + 128) >> 8;
VAR_28[1] = (VAR_29 * VAR_29[1] + 128) >> 8;
AV_WN32A(VAR_1->mv_cache[1][scan8[VAR_11 * 4 + VAR_12]],
pack16to32(VAR_28[0] - VAR_29[0],
VAR_28[1] - VAR_29[1]));
}
}
}
}
}
}
| [
"static void FUNC_0(const H264Context *const VAR_0, H264SliceContext *VAR_1,\nint *VAR_2)\n{",
"int VAR_3 = 2;",
"int VAR_4 = VAR_0->b_stride;",
"int VAR_5 = VAR_1->VAR_5, VAR_6 = VAR_1->VAR_6;",
"int VAR_7[2];",
"const FUNC_2 (*l1mv0)[2], (*l1mv1)[2];",
"const int8_t *VAR_8, *l1ref1;",
"const int VAR... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0... | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
],
[
27
],
[
31,
33
],
[
37
],
[
39
],
[
41
],
[
43,
45
],
[
47
],
[
49
],
[... |
13,106 | static void ff_mpeg4_init_direct_mv(MpegEncContext *s){
//FIXME table is stored in MpegEncContext for thread-safety,
// but a static array would be faster
static const int tab_size = sizeof(s->direct_scale_mv[0])/sizeof(int16_t);
static const int tab_bias = (tab_size/2);
int i;
for(i=0; i<tab_size; i++){
s->direct_scale_mv[0][i] = (i-tab_bias)*s->pb_time/s->pp_time;
s->direct_scale_mv[1][i] = (i-tab_bias)*(s->pb_time-s->pp_time)/s->pp_time;
}
}
| true | FFmpeg | c4e2a535b3a8b192c144acfaa9f1a7bc8b7f99f3 | static void ff_mpeg4_init_direct_mv(MpegEncContext *s){
static const int tab_size = sizeof(s->direct_scale_mv[0])/sizeof(int16_t);
static const int tab_bias = (tab_size/2);
int i;
for(i=0; i<tab_size; i++){
s->direct_scale_mv[0][i] = (i-tab_bias)*s->pb_time/s->pp_time;
s->direct_scale_mv[1][i] = (i-tab_bias)*(s->pb_time-s->pp_time)/s->pp_time;
}
}
| {
"code": [
" static const int tab_size = sizeof(s->direct_scale_mv[0])/sizeof(int16_t);",
" static const int tab_bias = (tab_size/2);",
" static const int tab_size = sizeof(s->direct_scale_mv[0])/sizeof(int16_t);",
" static const int tab_bias = (tab_size/2);"
],
"line_no": [
7,
9,
7,
9
]
} | static void FUNC_0(MpegEncContext *VAR_0){
static const int VAR_1 = sizeof(VAR_0->direct_scale_mv[0])/sizeof(int16_t);
static const int VAR_2 = (VAR_1/2);
int VAR_3;
for(VAR_3=0; VAR_3<VAR_1; VAR_3++){
VAR_0->direct_scale_mv[0][VAR_3] = (VAR_3-VAR_2)*VAR_0->pb_time/VAR_0->pp_time;
VAR_0->direct_scale_mv[1][VAR_3] = (VAR_3-VAR_2)*(VAR_0->pb_time-VAR_0->pp_time)/VAR_0->pp_time;
}
}
| [
"static void FUNC_0(MpegEncContext *VAR_0){",
"static const int VAR_1 = sizeof(VAR_0->direct_scale_mv[0])/sizeof(int16_t);",
"static const int VAR_2 = (VAR_1/2);",
"int VAR_3;",
"for(VAR_3=0; VAR_3<VAR_1; VAR_3++){",
"VAR_0->direct_scale_mv[0][VAR_3] = (VAR_3-VAR_2)*VAR_0->pb_time/VAR_0->pp_time;",
"VAR... | [
0,
1,
1,
0,
0,
0,
0,
0,
0
] | [
[
1
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
]
] |
13,107 | static void video_image_display(VideoState *is)
{
Frame *vp;
Frame *sp;
AVPicture pict;
SDL_Rect rect;
int i;
vp = frame_queue_peek(&is->pictq);
if (vp->bmp) {
if (is->subtitle_st) {
if (frame_queue_nb_remaining(&is->subpq) > 0) {
sp = frame_queue_peek(&is->subpq);
if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
SDL_LockYUVOverlay (vp->bmp);
pict.data[0] = vp->bmp->pixels[0];
pict.data[1] = vp->bmp->pixels[2];
pict.data[2] = vp->bmp->pixels[1];
pict.linesize[0] = vp->bmp->pitches[0];
pict.linesize[1] = vp->bmp->pitches[2];
pict.linesize[2] = vp->bmp->pitches[1];
for (i = 0; i < sp->sub.num_rects; i++)
blend_subrect(&pict, sp->sub.rects[i],
vp->bmp->w, vp->bmp->h);
SDL_UnlockYUVOverlay (vp->bmp);
}
}
}
calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
SDL_DisplayYUVOverlay(vp->bmp, &rect);
if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
is->last_display_rect = rect;
}
}
}
| false | FFmpeg | 03037a4aad8b92c00ef2f115605ad20fc4410fe5 | static void video_image_display(VideoState *is)
{
Frame *vp;
Frame *sp;
AVPicture pict;
SDL_Rect rect;
int i;
vp = frame_queue_peek(&is->pictq);
if (vp->bmp) {
if (is->subtitle_st) {
if (frame_queue_nb_remaining(&is->subpq) > 0) {
sp = frame_queue_peek(&is->subpq);
if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
SDL_LockYUVOverlay (vp->bmp);
pict.data[0] = vp->bmp->pixels[0];
pict.data[1] = vp->bmp->pixels[2];
pict.data[2] = vp->bmp->pixels[1];
pict.linesize[0] = vp->bmp->pitches[0];
pict.linesize[1] = vp->bmp->pitches[2];
pict.linesize[2] = vp->bmp->pitches[1];
for (i = 0; i < sp->sub.num_rects; i++)
blend_subrect(&pict, sp->sub.rects[i],
vp->bmp->w, vp->bmp->h);
SDL_UnlockYUVOverlay (vp->bmp);
}
}
}
calculate_display_rect(&rect, is->xleft, is->ytop, is->width, is->height, vp->width, vp->height, vp->sar);
SDL_DisplayYUVOverlay(vp->bmp, &rect);
if (rect.x != is->last_display_rect.x || rect.y != is->last_display_rect.y || rect.w != is->last_display_rect.w || rect.h != is->last_display_rect.h || is->force_refresh) {
int bgcolor = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
fill_border(is->xleft, is->ytop, is->width, is->height, rect.x, rect.y, rect.w, rect.h, bgcolor, 1);
is->last_display_rect = rect;
}
}
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(VideoState *VAR_0)
{
Frame *vp;
Frame *sp;
AVPicture pict;
SDL_Rect rect;
int VAR_1;
vp = frame_queue_peek(&VAR_0->pictq);
if (vp->bmp) {
if (VAR_0->subtitle_st) {
if (frame_queue_nb_remaining(&VAR_0->subpq) > 0) {
sp = frame_queue_peek(&VAR_0->subpq);
if (vp->pts >= sp->pts + ((float) sp->sub.start_display_time / 1000)) {
SDL_LockYUVOverlay (vp->bmp);
pict.data[0] = vp->bmp->pixels[0];
pict.data[1] = vp->bmp->pixels[2];
pict.data[2] = vp->bmp->pixels[1];
pict.linesize[0] = vp->bmp->pitches[0];
pict.linesize[1] = vp->bmp->pitches[2];
pict.linesize[2] = vp->bmp->pitches[1];
for (VAR_1 = 0; VAR_1 < sp->sub.num_rects; VAR_1++)
blend_subrect(&pict, sp->sub.rects[VAR_1],
vp->bmp->w, vp->bmp->h);
SDL_UnlockYUVOverlay (vp->bmp);
}
}
}
calculate_display_rect(&rect, VAR_0->xleft, VAR_0->ytop, VAR_0->width, VAR_0->height, vp->width, vp->height, vp->sar);
SDL_DisplayYUVOverlay(vp->bmp, &rect);
if (rect.x != VAR_0->last_display_rect.x || rect.y != VAR_0->last_display_rect.y || rect.w != VAR_0->last_display_rect.w || rect.h != VAR_0->last_display_rect.h || VAR_0->force_refresh) {
int VAR_2 = SDL_MapRGB(screen->format, 0x00, 0x00, 0x00);
fill_border(VAR_0->xleft, VAR_0->ytop, VAR_0->width, VAR_0->height, rect.x, rect.y, rect.w, rect.h, VAR_2, 1);
VAR_0->last_display_rect = rect;
}
}
}
| [
"static void FUNC_0(VideoState *VAR_0)\n{",
"Frame *vp;",
"Frame *sp;",
"AVPicture pict;",
"SDL_Rect rect;",
"int VAR_1;",
"vp = frame_queue_peek(&VAR_0->pictq);",
"if (vp->bmp) {",
"if (VAR_0->subtitle_st) {",
"if (frame_queue_nb_remaining(&VAR_0->subpq) > 0) {",
"sp = frame_queue_peek(&VAR_0->... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
17
],
[
19
],
[
21
],
[
23
],
[
25
],
[
29
],
[
31
],
[
35
],
[
37
],
[
39
],
[
43
],
[
45
],
[
47
],
[
51
... |
13,108 | static int pad_count(const AVFilterPad *pads)
{
int count;
if (!pads)
return 0;
for(count = 0; pads->name; count ++) pads ++;
return count;
}
| false | FFmpeg | 7e8fe4be5fb4c98aa3c6a4ed3cec999f4e3cc3aa | static int pad_count(const AVFilterPad *pads)
{
int count;
if (!pads)
return 0;
for(count = 0; pads->name; count ++) pads ++;
return count;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(const AVFilterPad *VAR_0)
{
int VAR_1;
if (!VAR_0)
return 0;
for(VAR_1 = 0; VAR_0->name; VAR_1 ++) VAR_0 ++;
return VAR_1;
}
| [
"static int FUNC_0(const AVFilterPad *VAR_0)\n{",
"int VAR_1;",
"if (!VAR_0)\nreturn 0;",
"for(VAR_1 = 0; VAR_0->name; VAR_1 ++) VAR_0 ++;",
"return VAR_1;",
"}"
] | [
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9,
11
],
[
15
],
[
17
],
[
19
]
] |
13,109 | rtsp_read_reply (AVFormatContext *s, RTSPMessageHeader *reply,
unsigned char **content_ptr, int return_on_interleaved_data)
{
RTSPState *rt = s->priv_data;
char buf[4096], buf1[1024], *q;
unsigned char ch;
const char *p;
int ret, content_length, line_count = 0;
unsigned char *content = NULL;
memset(reply, 0, sizeof(*reply));
/* parse reply (XXX: use buffers) */
rt->last_reply[0] = '\0';
for(;;) {
q = buf;
for(;;) {
ret = url_read_complete(rt->rtsp_hd, &ch, 1);
#ifdef DEBUG_RTP_TCP
dprintf(s, "ret=%d c=%02x [%c]\n", ret, ch, ch);
#endif
if (ret != 1)
return -1;
if (ch == '\n')
break;
if (ch == '$') {
/* XXX: only parse it if first char on line ? */
if (return_on_interleaved_data) {
return 1;
} else
rtsp_skip_packet(s);
} else if (ch != '\r') {
if ((q - buf) < sizeof(buf) - 1)
*q++ = ch;
}
}
*q = '\0';
dprintf(s, "line='%s'\n", buf);
/* test if last line */
if (buf[0] == '\0')
break;
p = buf;
if (line_count == 0) {
/* get reply code */
get_word(buf1, sizeof(buf1), &p);
get_word(buf1, sizeof(buf1), &p);
reply->status_code = atoi(buf1);
} else {
rtsp_parse_line(reply, p);
av_strlcat(rt->last_reply, p, sizeof(rt->last_reply));
av_strlcat(rt->last_reply, "\n", sizeof(rt->last_reply));
}
line_count++;
}
if (rt->session_id[0] == '\0' && reply->session_id[0] != '\0')
av_strlcpy(rt->session_id, reply->session_id, sizeof(rt->session_id));
content_length = reply->content_length;
if (content_length > 0) {
/* leave some room for a trailing '\0' (useful for simple parsing) */
content = av_malloc(content_length + 1);
(void)url_read_complete(rt->rtsp_hd, content, content_length);
content[content_length] = '\0';
}
if (content_ptr)
*content_ptr = content;
else
av_free(content);
/* EOS */
if (reply->notice == 2101 /* End-of-Stream Reached */ ||
reply->notice == 2104 /* Start-of-Stream Reached */ ||
reply->notice == 2306 /* Continuous Feed Terminated */)
rt->state = RTSP_STATE_IDLE;
else if (reply->notice >= 4400 && reply->notice < 5500)
return AVERROR(EIO); /* data or server error */
else if (reply->notice == 2401 /* Ticket Expired */ ||
(reply->notice >= 5500 && reply->notice < 5600) /* end of term */ )
return AVERROR(EPERM);
return 0;
}
| false | FFmpeg | c89658008705d949c319df3fa6f400c481ad73e1 | rtsp_read_reply (AVFormatContext *s, RTSPMessageHeader *reply,
unsigned char **content_ptr, int return_on_interleaved_data)
{
RTSPState *rt = s->priv_data;
char buf[4096], buf1[1024], *q;
unsigned char ch;
const char *p;
int ret, content_length, line_count = 0;
unsigned char *content = NULL;
memset(reply, 0, sizeof(*reply));
rt->last_reply[0] = '\0';
for(;;) {
q = buf;
for(;;) {
ret = url_read_complete(rt->rtsp_hd, &ch, 1);
#ifdef DEBUG_RTP_TCP
dprintf(s, "ret=%d c=%02x [%c]\n", ret, ch, ch);
#endif
if (ret != 1)
return -1;
if (ch == '\n')
break;
if (ch == '$') {
if (return_on_interleaved_data) {
return 1;
} else
rtsp_skip_packet(s);
} else if (ch != '\r') {
if ((q - buf) < sizeof(buf) - 1)
*q++ = ch;
}
}
*q = '\0';
dprintf(s, "line='%s'\n", buf);
if (buf[0] == '\0')
break;
p = buf;
if (line_count == 0) {
get_word(buf1, sizeof(buf1), &p);
get_word(buf1, sizeof(buf1), &p);
reply->status_code = atoi(buf1);
} else {
rtsp_parse_line(reply, p);
av_strlcat(rt->last_reply, p, sizeof(rt->last_reply));
av_strlcat(rt->last_reply, "\n", sizeof(rt->last_reply));
}
line_count++;
}
if (rt->session_id[0] == '\0' && reply->session_id[0] != '\0')
av_strlcpy(rt->session_id, reply->session_id, sizeof(rt->session_id));
content_length = reply->content_length;
if (content_length > 0) {
content = av_malloc(content_length + 1);
(void)url_read_complete(rt->rtsp_hd, content, content_length);
content[content_length] = '\0';
}
if (content_ptr)
*content_ptr = content;
else
av_free(content);
if (reply->notice == 2101 ||
reply->notice == 2104 ||
reply->notice == 2306 )
rt->state = RTSP_STATE_IDLE;
else if (reply->notice >= 4400 && reply->notice < 5500)
return AVERROR(EIO);
else if (reply->notice == 2401 ||
(reply->notice >= 5500 && reply->notice < 5600) )
return AVERROR(EPERM);
return 0;
}
| {
"code": [],
"line_no": []
} | FUNC_0 (AVFormatContext *VAR_0, RTSPMessageHeader *VAR_1,
unsigned char **VAR_2, int VAR_3)
{
RTSPState *rt = VAR_0->priv_data;
char VAR_4[4096], VAR_5[1024], *VAR_6;
unsigned char VAR_7;
const char *VAR_8;
int VAR_9, VAR_10, VAR_11 = 0;
unsigned char *VAR_12 = NULL;
memset(VAR_1, 0, sizeof(*VAR_1));
rt->last_reply[0] = '\0';
for(;;) {
VAR_6 = VAR_4;
for(;;) {
VAR_9 = url_read_complete(rt->rtsp_hd, &VAR_7, 1);
#ifdef DEBUG_RTP_TCP
dprintf(VAR_0, "VAR_9=%d c=%02x [%c]\n", VAR_9, VAR_7, VAR_7);
#endif
if (VAR_9 != 1)
return -1;
if (VAR_7 == '\n')
break;
if (VAR_7 == '$') {
if (VAR_3) {
return 1;
} else
rtsp_skip_packet(VAR_0);
} else if (VAR_7 != '\r') {
if ((VAR_6 - VAR_4) < sizeof(VAR_4) - 1)
*VAR_6++ = VAR_7;
}
}
*VAR_6 = '\0';
dprintf(VAR_0, "line='%VAR_0'\n", VAR_4);
if (VAR_4[0] == '\0')
break;
VAR_8 = VAR_4;
if (VAR_11 == 0) {
get_word(VAR_5, sizeof(VAR_5), &VAR_8);
get_word(VAR_5, sizeof(VAR_5), &VAR_8);
VAR_1->status_code = atoi(VAR_5);
} else {
rtsp_parse_line(VAR_1, VAR_8);
av_strlcat(rt->last_reply, VAR_8, sizeof(rt->last_reply));
av_strlcat(rt->last_reply, "\n", sizeof(rt->last_reply));
}
VAR_11++;
}
if (rt->session_id[0] == '\0' && VAR_1->session_id[0] != '\0')
av_strlcpy(rt->session_id, VAR_1->session_id, sizeof(rt->session_id));
VAR_10 = VAR_1->VAR_10;
if (VAR_10 > 0) {
VAR_12 = av_malloc(VAR_10 + 1);
(void)url_read_complete(rt->rtsp_hd, VAR_12, VAR_10);
VAR_12[VAR_10] = '\0';
}
if (VAR_2)
*VAR_2 = VAR_12;
else
av_free(VAR_12);
if (VAR_1->notice == 2101 ||
VAR_1->notice == 2104 ||
VAR_1->notice == 2306 )
rt->state = RTSP_STATE_IDLE;
else if (VAR_1->notice >= 4400 && VAR_1->notice < 5500)
return AVERROR(EIO);
else if (VAR_1->notice == 2401 ||
(VAR_1->notice >= 5500 && VAR_1->notice < 5600) )
return AVERROR(EPERM);
return 0;
}
| [
"FUNC_0 (AVFormatContext *VAR_0, RTSPMessageHeader *VAR_1,\nunsigned char **VAR_2, int VAR_3)\n{",
"RTSPState *rt = VAR_0->priv_data;",
"char VAR_4[4096], VAR_5[1024], *VAR_6;",
"unsigned char VAR_7;",
"const char *VAR_8;",
"int VAR_9, VAR_10, VAR_11 = 0;",
"unsigned char *VAR_12 = NULL;",
"memset(VAR... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
21
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37,
39
],
[
41,
43,
45
],
[
47,
49
],
[
51
],
[... |
13,111 | int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int))
{
int total = size;
do {
int len = FFMIN(f->end - f->wptr, size);
if (func) {
if (func(src, f->wptr, len) <= 0)
break;
} else {
memcpy(f->wptr, src, len);
src = (uint8_t*)src + len;
}
// Write memory barrier needed for SMP here in theory
f->wptr += len;
if (f->wptr >= f->end)
f->wptr = f->buffer;
f->wndx += len;
size -= len;
} while (size > 0);
return total - size;
}
| false | FFmpeg | 9eb0d8bab1c475edf73c36146d1c3d31ea47f997 | int av_fifo_generic_write(AVFifoBuffer *f, void *src, int size, int (*func)(void*, void*, int))
{
int total = size;
do {
int len = FFMIN(f->end - f->wptr, size);
if (func) {
if (func(src, f->wptr, len) <= 0)
break;
} else {
memcpy(f->wptr, src, len);
src = (uint8_t*)src + len;
}
f->wptr += len;
if (f->wptr >= f->end)
f->wptr = f->buffer;
f->wndx += len;
size -= len;
} while (size > 0);
return total - size;
}
| {
"code": [],
"line_no": []
} | VAR_5intVAR_5 VAR_5av_fifo_generic_writeVAR_5(VAR_5AVFifoBufferVAR_5 *VAR_5VAR_0VAR_5, VAR_5voidVAR_5 *VAR_5VAR_1VAR_5, VAR_5intVAR_5 VAR_5VAR_2VAR_5, VAR_5intVAR_5 (*VAR_5VAR_3VAR_5)(VAR_5voidVAR_5*, VAR_5voidVAR_5*, VAR_5intVAR_5))
{
VAR_5intVAR_5 VAR_5totalVAR_5 = VAR_5VAR_2VAR_5;
VAR_5doVAR_5 {
VAR_5intVAR_5 VAR_5lenVAR_5 = VAR_5FFMINVAR_5(VAR_5VAR_0VAR_5->VAR_5endVAR_5 - VAR_5VAR_0VAR_5->VAR_5wptrVAR_5, VAR_5VAR_2VAR_5);
VAR_5ifVAR_5 (VAR_5VAR_3VAR_5) {
VAR_5ifVAR_5 (VAR_5VAR_3VAR_5(VAR_5VAR_1VAR_5, VAR_5VAR_0VAR_5->VAR_5wptrVAR_5, VAR_5lenVAR_5) <= VAR_50VAR_5)
VAR_5breakVAR_5;
} VAR_5elseVAR_5 {
VAR_5memcpyVAR_5(VAR_5VAR_0VAR_5->VAR_5wptrVAR_5, VAR_5VAR_1VAR_5, VAR_5lenVAR_5);
VAR_5VAR_1VAR_5 = (VAR_5uint8_tVAR_5*)VAR_5VAR_1VAR_5 + VAR_5lenVAR_5;
}
VAR_5VAR_0VAR_5->VAR_5wptrVAR_5 += VAR_5lenVAR_5;
VAR_5ifVAR_5 (VAR_5VAR_0VAR_5->VAR_5wptrVAR_5 >= VAR_5VAR_0VAR_5->VAR_5endVAR_5)
VAR_5VAR_0VAR_5->VAR_5wptrVAR_5 = VAR_5VAR_0VAR_5->VAR_5bufferVAR_5;
VAR_5VAR_0VAR_5->VAR_5wndxVAR_5 += VAR_5lenVAR_5;
VAR_5VAR_2VAR_5 -= VAR_5lenVAR_5;
} VAR_5whileVAR_5 (VAR_5VAR_2VAR_5 > VAR_50VAR_5);
VAR_5returnVAR_5 VAR_5totalVAR_5 - VAR_5VAR_2VAR_5;
}
| [
"VAR_5intVAR_5 VAR_5av_fifo_generic_writeVAR_5(VAR_5AVFifoBufferVAR_5 *VAR_5VAR_0VAR_5, VAR_5voidVAR_5 *VAR_5VAR_1VAR_5, VAR_5intVAR_5 VAR_5VAR_2VAR_5, VAR_5intVAR_5 (*VAR_5VAR_3VAR_5)(VAR_5voidVAR_5*, VAR_5voidVAR_5*, VAR_5intVAR_5))\n{",
"VAR_5intVAR_5 VAR_5totalVAR_5 = VAR_5VAR_2VAR_5;",
"VAR_5doVAR_5 {",
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13,
15
],
[
17
],
[
19
],
[
21
],
[
23
],
[
27
],
[
29,
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41
]
] |
13,112 | int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
int max_step [4]; /* max pixel step for each plane */
int max_step_comp[4]; /* the component for each plane which has the max pixel step */
if ((unsigned)pix_fmt >= AV_PIX_FMT_NB || desc->flags & AV_PIX_FMT_FLAG_HWACCEL)
return AVERROR(EINVAL);
av_image_fill_max_pixsteps(max_step, max_step_comp, desc);
return image_get_linesize(width, plane, max_step[plane], max_step_comp[plane], desc);
}
| false | FFmpeg | ea37df2d528c15dc472e7272ac5278090f01f38e | int av_image_get_linesize(enum AVPixelFormat pix_fmt, int width, int plane)
{
const AVPixFmtDescriptor *desc = av_pix_fmt_desc_get(pix_fmt);
int max_step [4];
int max_step_comp[4];
if ((unsigned)pix_fmt >= AV_PIX_FMT_NB || desc->flags & AV_PIX_FMT_FLAG_HWACCEL)
return AVERROR(EINVAL);
av_image_fill_max_pixsteps(max_step, max_step_comp, desc);
return image_get_linesize(width, plane, max_step[plane], max_step_comp[plane], desc);
}
| {
"code": [],
"line_no": []
} | int FUNC_0(enum AVPixelFormat VAR_0, int VAR_1, int VAR_2)
{
const AVPixFmtDescriptor *VAR_3 = av_pix_fmt_desc_get(VAR_0);
int VAR_4 [4];
int VAR_5[4];
if ((unsigned)VAR_0 >= AV_PIX_FMT_NB || VAR_3->flags & AV_PIX_FMT_FLAG_HWACCEL)
return AVERROR(EINVAL);
av_image_fill_max_pixsteps(VAR_4, VAR_5, VAR_3);
return image_get_linesize(VAR_1, VAR_2, VAR_4[VAR_2], VAR_5[VAR_2], VAR_3);
}
| [
"int FUNC_0(enum AVPixelFormat VAR_0, int VAR_1, int VAR_2)\n{",
"const AVPixFmtDescriptor *VAR_3 = av_pix_fmt_desc_get(VAR_0);",
"int VAR_4 [4];",
"int VAR_5[4];",
"if ((unsigned)VAR_0 >= AV_PIX_FMT_NB || VAR_3->flags & AV_PIX_FMT_FLAG_HWACCEL)\nreturn AVERROR(EINVAL);",
"av_image_fill_max_pixsteps(V... | [
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13,
15
],
[
19
],
[
21
],
[
23
]
] |
13,113 | static int encode_slice(AVCodecContext *c, void *arg)
{
FFV1Context *fs = *(void **)arg;
FFV1Context *f = fs->avctx->priv_data;
int width = fs->slice_width;
int height = fs->slice_height;
int x = fs->slice_x;
int y = fs->slice_y;
const AVFrame *const p = f->frame;
const int ps = (av_pix_fmt_desc_get(c->pix_fmt)->flags & AV_PIX_FMT_FLAG_PLANAR)
? (f->bits_per_raw_sample > 8) + 1
: 4;
if (f->key_frame)
ffv1_clear_slice_state(f, fs);
if (f->version > 2) {
encode_slice_header(f, fs);
}
if (!fs->ac) {
if (f->version > 2)
put_rac(&fs->c, (uint8_t[]) { 129 }, 0);
fs->ac_byte_count = f->version > 2 || (!x && !y) ? ff_rac_terminate( &fs->c) : 0;
init_put_bits(&fs->pb, fs->c.bytestream_start + fs->ac_byte_count,
fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count);
}
if (f->colorspace == 0) {
const int chroma_width = -((-width) >> f->chroma_h_shift);
const int chroma_height = -((-height) >> f->chroma_v_shift);
const int cx = x >> f->chroma_h_shift;
const int cy = y >> f->chroma_v_shift;
encode_plane(fs, p->data[0] + ps * x + y * p->linesize[0],
width, height, p->linesize[0], 0);
if (f->chroma_planes) {
encode_plane(fs, p->data[1] + ps * cx + cy * p->linesize[1],
chroma_width, chroma_height, p->linesize[1], 1);
encode_plane(fs, p->data[2] + ps * cx + cy * p->linesize[2],
chroma_width, chroma_height, p->linesize[2], 1);
}
if (fs->transparency)
encode_plane(fs, p->data[3] + ps * x + y * p->linesize[3], width,
height, p->linesize[3], 2);
} else {
const uint8_t *planes[3] = { p->data[0] + ps * x + y * p->linesize[0],
p->data[1] + ps * x + y * p->linesize[1],
p->data[2] + ps * x + y * p->linesize[2] };
encode_rgb_frame(fs, planes, width, height, p->linesize);
}
emms_c();
return 0;
}
| false | FFmpeg | 4bb1070c154e49d35805fbcdac9c9e92f702ef96 | static int encode_slice(AVCodecContext *c, void *arg)
{
FFV1Context *fs = *(void **)arg;
FFV1Context *f = fs->avctx->priv_data;
int width = fs->slice_width;
int height = fs->slice_height;
int x = fs->slice_x;
int y = fs->slice_y;
const AVFrame *const p = f->frame;
const int ps = (av_pix_fmt_desc_get(c->pix_fmt)->flags & AV_PIX_FMT_FLAG_PLANAR)
? (f->bits_per_raw_sample > 8) + 1
: 4;
if (f->key_frame)
ffv1_clear_slice_state(f, fs);
if (f->version > 2) {
encode_slice_header(f, fs);
}
if (!fs->ac) {
if (f->version > 2)
put_rac(&fs->c, (uint8_t[]) { 129 }, 0);
fs->ac_byte_count = f->version > 2 || (!x && !y) ? ff_rac_terminate( &fs->c) : 0;
init_put_bits(&fs->pb, fs->c.bytestream_start + fs->ac_byte_count,
fs->c.bytestream_end - fs->c.bytestream_start - fs->ac_byte_count);
}
if (f->colorspace == 0) {
const int chroma_width = -((-width) >> f->chroma_h_shift);
const int chroma_height = -((-height) >> f->chroma_v_shift);
const int cx = x >> f->chroma_h_shift;
const int cy = y >> f->chroma_v_shift;
encode_plane(fs, p->data[0] + ps * x + y * p->linesize[0],
width, height, p->linesize[0], 0);
if (f->chroma_planes) {
encode_plane(fs, p->data[1] + ps * cx + cy * p->linesize[1],
chroma_width, chroma_height, p->linesize[1], 1);
encode_plane(fs, p->data[2] + ps * cx + cy * p->linesize[2],
chroma_width, chroma_height, p->linesize[2], 1);
}
if (fs->transparency)
encode_plane(fs, p->data[3] + ps * x + y * p->linesize[3], width,
height, p->linesize[3], 2);
} else {
const uint8_t *planes[3] = { p->data[0] + ps * x + y * p->linesize[0],
p->data[1] + ps * x + y * p->linesize[1],
p->data[2] + ps * x + y * p->linesize[2] };
encode_rgb_frame(fs, planes, width, height, p->linesize);
}
emms_c();
return 0;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(AVCodecContext *VAR_0, void *VAR_1)
{
FFV1Context *fs = *(void **)VAR_1;
FFV1Context *f = fs->avctx->priv_data;
int VAR_2 = fs->slice_width;
int VAR_3 = fs->slice_height;
int VAR_4 = fs->slice_x;
int VAR_5 = fs->slice_y;
const AVFrame *const VAR_6 = f->frame;
const int VAR_7 = (av_pix_fmt_desc_get(VAR_0->pix_fmt)->flags & AV_PIX_FMT_FLAG_PLANAR)
? (f->bits_per_raw_sample > 8) + 1
: 4;
if (f->key_frame)
ffv1_clear_slice_state(f, fs);
if (f->version > 2) {
encode_slice_header(f, fs);
}
if (!fs->ac) {
if (f->version > 2)
put_rac(&fs->VAR_0, (uint8_t[]) { 129 }, 0);
fs->ac_byte_count = f->version > 2 || (!VAR_4 && !VAR_5) ? ff_rac_terminate( &fs->VAR_0) : 0;
init_put_bits(&fs->pb, fs->VAR_0.bytestream_start + fs->ac_byte_count,
fs->VAR_0.bytestream_end - fs->VAR_0.bytestream_start - fs->ac_byte_count);
}
if (f->colorspace == 0) {
const int VAR_8 = -((-VAR_2) >> f->chroma_h_shift);
const int VAR_9 = -((-VAR_3) >> f->chroma_v_shift);
const int VAR_10 = VAR_4 >> f->chroma_h_shift;
const int VAR_11 = VAR_5 >> f->chroma_v_shift;
encode_plane(fs, VAR_6->data[0] + VAR_7 * VAR_4 + VAR_5 * VAR_6->linesize[0],
VAR_2, VAR_3, VAR_6->linesize[0], 0);
if (f->chroma_planes) {
encode_plane(fs, VAR_6->data[1] + VAR_7 * VAR_10 + VAR_11 * VAR_6->linesize[1],
VAR_8, VAR_9, VAR_6->linesize[1], 1);
encode_plane(fs, VAR_6->data[2] + VAR_7 * VAR_10 + VAR_11 * VAR_6->linesize[2],
VAR_8, VAR_9, VAR_6->linesize[2], 1);
}
if (fs->transparency)
encode_plane(fs, VAR_6->data[3] + VAR_7 * VAR_4 + VAR_5 * VAR_6->linesize[3], VAR_2,
VAR_3, VAR_6->linesize[3], 2);
} else {
const uint8_t *VAR_12[3] = { VAR_6->data[0] + VAR_7 * VAR_4 + VAR_5 * VAR_6->linesize[0],
VAR_6->data[1] + VAR_7 * VAR_4 + VAR_5 * VAR_6->linesize[1],
VAR_6->data[2] + VAR_7 * VAR_4 + VAR_5 * VAR_6->linesize[2] };
encode_rgb_frame(fs, VAR_12, VAR_2, VAR_3, VAR_6->linesize);
}
emms_c();
return 0;
}
| [
"static int FUNC_0(AVCodecContext *VAR_0, void *VAR_1)\n{",
"FFV1Context *fs = *(void **)VAR_1;",
"FFV1Context *f = fs->avctx->priv_data;",
"int VAR_2 = fs->slice_width;",
"int VAR_3 = fs->slice_height;",
"int VAR_4 = fs->slice_x;",
"int VAR_5 = fs->slice_y;",
"co... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19,
21,
23
],
[
27,
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39,
41
],
[
43
],
[
45,
47
],
[... |
13,115 | static int qemu_rbd_open(BlockDriverState *bs, const char *filename, int flags)
{
BDRVRBDState *s = bs->opaque;
char pool[RBD_MAX_POOL_NAME_SIZE];
char snap_buf[RBD_MAX_SNAP_NAME_SIZE];
char conf[RBD_MAX_CONF_SIZE];
char clientname_buf[RBD_MAX_CONF_SIZE];
char *clientname;
int r;
if (qemu_rbd_parsename(filename, pool, sizeof(pool),
snap_buf, sizeof(snap_buf),
s->name, sizeof(s->name),
conf, sizeof(conf)) < 0) {
return -EINVAL;
}
s->snap = NULL;
if (snap_buf[0] != '\0') {
s->snap = g_strdup(snap_buf);
}
clientname = qemu_rbd_parse_clientname(conf, clientname_buf);
r = rados_create(&s->cluster, clientname);
if (r < 0) {
error_report("error initializing");
return r;
}
if (strstr(conf, "conf=") == NULL) {
r = rados_conf_read_file(s->cluster, NULL);
if (r < 0) {
error_report("error reading config file");
rados_shutdown(s->cluster);
return r;
}
}
if (conf[0] != '\0') {
r = qemu_rbd_set_conf(s->cluster, conf);
if (r < 0) {
error_report("error setting config options");
rados_shutdown(s->cluster);
return r;
}
}
r = rados_connect(s->cluster);
if (r < 0) {
error_report("error connecting");
rados_shutdown(s->cluster);
return r;
}
r = rados_ioctx_create(s->cluster, pool, &s->io_ctx);
if (r < 0) {
error_report("error opening pool %s", pool);
rados_shutdown(s->cluster);
return r;
}
r = rbd_open(s->io_ctx, s->name, &s->image, s->snap);
if (r < 0) {
error_report("error reading header from %s", s->name);
rados_ioctx_destroy(s->io_ctx);
rados_shutdown(s->cluster);
return r;
}
bs->read_only = (s->snap != NULL);
s->event_reader_pos = 0;
r = qemu_pipe(s->fds);
if (r < 0) {
error_report("error opening eventfd");
goto failed;
}
fcntl(s->fds[0], F_SETFL, O_NONBLOCK);
fcntl(s->fds[1], F_SETFL, O_NONBLOCK);
qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader,
NULL, qemu_rbd_aio_flush_cb, NULL, s);
return 0;
failed:
rbd_close(s->image);
rados_ioctx_destroy(s->io_ctx);
rados_shutdown(s->cluster);
return r;
}
| true | qemu | eb93d5d9906cfa9fb6c3039a310d796dddfddeea | static int qemu_rbd_open(BlockDriverState *bs, const char *filename, int flags)
{
BDRVRBDState *s = bs->opaque;
char pool[RBD_MAX_POOL_NAME_SIZE];
char snap_buf[RBD_MAX_SNAP_NAME_SIZE];
char conf[RBD_MAX_CONF_SIZE];
char clientname_buf[RBD_MAX_CONF_SIZE];
char *clientname;
int r;
if (qemu_rbd_parsename(filename, pool, sizeof(pool),
snap_buf, sizeof(snap_buf),
s->name, sizeof(s->name),
conf, sizeof(conf)) < 0) {
return -EINVAL;
}
s->snap = NULL;
if (snap_buf[0] != '\0') {
s->snap = g_strdup(snap_buf);
}
clientname = qemu_rbd_parse_clientname(conf, clientname_buf);
r = rados_create(&s->cluster, clientname);
if (r < 0) {
error_report("error initializing");
return r;
}
if (strstr(conf, "conf=") == NULL) {
r = rados_conf_read_file(s->cluster, NULL);
if (r < 0) {
error_report("error reading config file");
rados_shutdown(s->cluster);
return r;
}
}
if (conf[0] != '\0') {
r = qemu_rbd_set_conf(s->cluster, conf);
if (r < 0) {
error_report("error setting config options");
rados_shutdown(s->cluster);
return r;
}
}
r = rados_connect(s->cluster);
if (r < 0) {
error_report("error connecting");
rados_shutdown(s->cluster);
return r;
}
r = rados_ioctx_create(s->cluster, pool, &s->io_ctx);
if (r < 0) {
error_report("error opening pool %s", pool);
rados_shutdown(s->cluster);
return r;
}
r = rbd_open(s->io_ctx, s->name, &s->image, s->snap);
if (r < 0) {
error_report("error reading header from %s", s->name);
rados_ioctx_destroy(s->io_ctx);
rados_shutdown(s->cluster);
return r;
}
bs->read_only = (s->snap != NULL);
s->event_reader_pos = 0;
r = qemu_pipe(s->fds);
if (r < 0) {
error_report("error opening eventfd");
goto failed;
}
fcntl(s->fds[0], F_SETFL, O_NONBLOCK);
fcntl(s->fds[1], F_SETFL, O_NONBLOCK);
qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader,
NULL, qemu_rbd_aio_flush_cb, NULL, s);
return 0;
failed:
rbd_close(s->image);
rados_ioctx_destroy(s->io_ctx);
rados_shutdown(s->cluster);
return r;
}
| {
"code": [
" s->snap = NULL;",
" if (snap_buf[0] != '\\0') {",
" s->snap = g_strdup(snap_buf);",
" rados_shutdown(s->cluster);",
" return r;",
" rados_shutdown(s->cluster);",
" return r;",
" rados_shutdown(s->cluster);",
" return r;",
" rados_shutdown(s->cluster);",
" return r;",
" rados_ioctx_destroy(s->io_ctx);",
" rados_shutdown(s->cluster);",
" return r;"
],
"line_no": [
33,
35,
37,
65,
67,
65,
67,
99,
51,
99,
51,
127,
99,
51
]
} | static int FUNC_0(BlockDriverState *VAR_0, const char *VAR_1, int VAR_2)
{
BDRVRBDState *s = VAR_0->opaque;
char VAR_3[RBD_MAX_POOL_NAME_SIZE];
char VAR_4[RBD_MAX_SNAP_NAME_SIZE];
char VAR_5[RBD_MAX_CONF_SIZE];
char VAR_6[RBD_MAX_CONF_SIZE];
char *VAR_7;
int VAR_8;
if (qemu_rbd_parsename(VAR_1, VAR_3, sizeof(VAR_3),
VAR_4, sizeof(VAR_4),
s->name, sizeof(s->name),
VAR_5, sizeof(VAR_5)) < 0) {
return -EINVAL;
}
s->snap = NULL;
if (VAR_4[0] != '\0') {
s->snap = g_strdup(VAR_4);
}
VAR_7 = qemu_rbd_parse_clientname(VAR_5, VAR_6);
VAR_8 = rados_create(&s->cluster, VAR_7);
if (VAR_8 < 0) {
error_report("error initializing");
return VAR_8;
}
if (strstr(VAR_5, "VAR_5=") == NULL) {
VAR_8 = rados_conf_read_file(s->cluster, NULL);
if (VAR_8 < 0) {
error_report("error reading config file");
rados_shutdown(s->cluster);
return VAR_8;
}
}
if (VAR_5[0] != '\0') {
VAR_8 = qemu_rbd_set_conf(s->cluster, VAR_5);
if (VAR_8 < 0) {
error_report("error setting config options");
rados_shutdown(s->cluster);
return VAR_8;
}
}
VAR_8 = rados_connect(s->cluster);
if (VAR_8 < 0) {
error_report("error connecting");
rados_shutdown(s->cluster);
return VAR_8;
}
VAR_8 = rados_ioctx_create(s->cluster, VAR_3, &s->io_ctx);
if (VAR_8 < 0) {
error_report("error opening VAR_3 %s", VAR_3);
rados_shutdown(s->cluster);
return VAR_8;
}
VAR_8 = rbd_open(s->io_ctx, s->name, &s->image, s->snap);
if (VAR_8 < 0) {
error_report("error reading header from %s", s->name);
rados_ioctx_destroy(s->io_ctx);
rados_shutdown(s->cluster);
return VAR_8;
}
VAR_0->read_only = (s->snap != NULL);
s->event_reader_pos = 0;
VAR_8 = qemu_pipe(s->fds);
if (VAR_8 < 0) {
error_report("error opening eventfd");
goto failed;
}
fcntl(s->fds[0], F_SETFL, O_NONBLOCK);
fcntl(s->fds[1], F_SETFL, O_NONBLOCK);
qemu_aio_set_fd_handler(s->fds[RBD_FD_READ], qemu_rbd_aio_event_reader,
NULL, qemu_rbd_aio_flush_cb, NULL, s);
return 0;
failed:
rbd_close(s->image);
rados_ioctx_destroy(s->io_ctx);
rados_shutdown(s->cluster);
return VAR_8;
}
| [
"static int FUNC_0(BlockDriverState *VAR_0, const char *VAR_1, int VAR_2)\n{",
"BDRVRBDState *s = VAR_0->opaque;",
"char VAR_3[RBD_MAX_POOL_NAME_SIZE];",
"char VAR_4[RBD_MAX_SNAP_NAME_SIZE];",
"char VAR_5[RBD_MAX_CONF_SIZE];",
"char VAR_6[RBD_MAX_CONF_SIZE];",
"char *VAR_7;",
"int VAR_8;",
"if (qemu... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
21,
23,
25,
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
43
],
[
45
],
[
47
],
[... |
13,116 | static inline int coupling_strategy(AC3DecodeContext *s, int blk,
uint8_t *bit_alloc_stages)
{
GetBitContext *bc = &s->gbc;
int fbw_channels = s->fbw_channels;
int channel_mode = s->channel_mode;
int ch;
memset(bit_alloc_stages, 3, AC3_MAX_CHANNELS);
if (!s->eac3)
s->cpl_in_use[blk] = get_bits1(bc);
if (s->cpl_in_use[blk]) {
/* coupling in use */
int cpl_start_subband, cpl_end_subband;
if (channel_mode < AC3_CHMODE_STEREO) {
av_log(s->avctx, AV_LOG_ERROR, "coupling not allowed in mono or dual-mono\n");
return AVERROR_INVALIDDATA;
}
/* check for enhanced coupling */
if (s->eac3 && get_bits1(bc)) {
/* TODO: parse enhanced coupling strategy info */
avpriv_request_sample(s->avctx, "Enhanced coupling");
return AVERROR_PATCHWELCOME;
}
/* determine which channels are coupled */
if (s->eac3 && s->channel_mode == AC3_CHMODE_STEREO) {
s->channel_in_cpl[1] = 1;
s->channel_in_cpl[2] = 1;
} else {
for (ch = 1; ch <= fbw_channels; ch++)
s->channel_in_cpl[ch] = get_bits1(bc);
}
/* phase flags in use */
if (channel_mode == AC3_CHMODE_STEREO)
s->phase_flags_in_use = get_bits1(bc);
/* coupling frequency range */
cpl_start_subband = get_bits(bc, 4);
cpl_end_subband = s->spx_in_use ? (s->spx_src_start_freq - 37) / 12 :
get_bits(bc, 4) + 3;
if (cpl_start_subband >= cpl_end_subband) {
av_log(s->avctx, AV_LOG_ERROR, "invalid coupling range (%d >= %d)\n",
cpl_start_subband, cpl_end_subband);
return AVERROR_INVALIDDATA;
}
s->start_freq[CPL_CH] = cpl_start_subband * 12 + 37;
s->end_freq[CPL_CH] = cpl_end_subband * 12 + 37;
decode_band_structure(bc, blk, s->eac3, 0, cpl_start_subband,
cpl_end_subband,
ff_eac3_default_cpl_band_struct,
&s->num_cpl_bands, s->cpl_band_sizes);
} else {
/* coupling not in use */
for (ch = 1; ch <= fbw_channels; ch++) {
s->channel_in_cpl[ch] = 0;
s->first_cpl_coords[ch] = 1;
}
s->first_cpl_leak = s->eac3;
s->phase_flags_in_use = 0;
}
return 0;
}
| true | FFmpeg | 9351a156de724edb69ba6e1f05884fe806a13a21 | static inline int coupling_strategy(AC3DecodeContext *s, int blk,
uint8_t *bit_alloc_stages)
{
GetBitContext *bc = &s->gbc;
int fbw_channels = s->fbw_channels;
int channel_mode = s->channel_mode;
int ch;
memset(bit_alloc_stages, 3, AC3_MAX_CHANNELS);
if (!s->eac3)
s->cpl_in_use[blk] = get_bits1(bc);
if (s->cpl_in_use[blk]) {
int cpl_start_subband, cpl_end_subband;
if (channel_mode < AC3_CHMODE_STEREO) {
av_log(s->avctx, AV_LOG_ERROR, "coupling not allowed in mono or dual-mono\n");
return AVERROR_INVALIDDATA;
}
if (s->eac3 && get_bits1(bc)) {
avpriv_request_sample(s->avctx, "Enhanced coupling");
return AVERROR_PATCHWELCOME;
}
if (s->eac3 && s->channel_mode == AC3_CHMODE_STEREO) {
s->channel_in_cpl[1] = 1;
s->channel_in_cpl[2] = 1;
} else {
for (ch = 1; ch <= fbw_channels; ch++)
s->channel_in_cpl[ch] = get_bits1(bc);
}
if (channel_mode == AC3_CHMODE_STEREO)
s->phase_flags_in_use = get_bits1(bc);
cpl_start_subband = get_bits(bc, 4);
cpl_end_subband = s->spx_in_use ? (s->spx_src_start_freq - 37) / 12 :
get_bits(bc, 4) + 3;
if (cpl_start_subband >= cpl_end_subband) {
av_log(s->avctx, AV_LOG_ERROR, "invalid coupling range (%d >= %d)\n",
cpl_start_subband, cpl_end_subband);
return AVERROR_INVALIDDATA;
}
s->start_freq[CPL_CH] = cpl_start_subband * 12 + 37;
s->end_freq[CPL_CH] = cpl_end_subband * 12 + 37;
decode_band_structure(bc, blk, s->eac3, 0, cpl_start_subband,
cpl_end_subband,
ff_eac3_default_cpl_band_struct,
&s->num_cpl_bands, s->cpl_band_sizes);
} else {
for (ch = 1; ch <= fbw_channels; ch++) {
s->channel_in_cpl[ch] = 0;
s->first_cpl_coords[ch] = 1;
}
s->first_cpl_leak = s->eac3;
s->phase_flags_in_use = 0;
}
return 0;
}
| {
"code": [
" } else {",
" &s->num_cpl_bands, s->cpl_band_sizes);"
],
"line_no": [
113,
111
]
} | static inline int FUNC_0(AC3DecodeContext *VAR_0, int VAR_1,
uint8_t *VAR_2)
{
GetBitContext *bc = &VAR_0->gbc;
int VAR_3 = VAR_0->VAR_3;
int VAR_4 = VAR_0->VAR_4;
int VAR_5;
memset(VAR_2, 3, AC3_MAX_CHANNELS);
if (!VAR_0->eac3)
VAR_0->cpl_in_use[VAR_1] = get_bits1(bc);
if (VAR_0->cpl_in_use[VAR_1]) {
int VAR_6, VAR_7;
if (VAR_4 < AC3_CHMODE_STEREO) {
av_log(VAR_0->avctx, AV_LOG_ERROR, "coupling not allowed in mono or dual-mono\n");
return AVERROR_INVALIDDATA;
}
if (VAR_0->eac3 && get_bits1(bc)) {
avpriv_request_sample(VAR_0->avctx, "Enhanced coupling");
return AVERROR_PATCHWELCOME;
}
if (VAR_0->eac3 && VAR_0->VAR_4 == AC3_CHMODE_STEREO) {
VAR_0->channel_in_cpl[1] = 1;
VAR_0->channel_in_cpl[2] = 1;
} else {
for (VAR_5 = 1; VAR_5 <= VAR_3; VAR_5++)
VAR_0->channel_in_cpl[VAR_5] = get_bits1(bc);
}
if (VAR_4 == AC3_CHMODE_STEREO)
VAR_0->phase_flags_in_use = get_bits1(bc);
VAR_6 = get_bits(bc, 4);
VAR_7 = VAR_0->spx_in_use ? (VAR_0->spx_src_start_freq - 37) / 12 :
get_bits(bc, 4) + 3;
if (VAR_6 >= VAR_7) {
av_log(VAR_0->avctx, AV_LOG_ERROR, "invalid coupling range (%d >= %d)\n",
VAR_6, VAR_7);
return AVERROR_INVALIDDATA;
}
VAR_0->start_freq[CPL_CH] = VAR_6 * 12 + 37;
VAR_0->end_freq[CPL_CH] = VAR_7 * 12 + 37;
decode_band_structure(bc, VAR_1, VAR_0->eac3, 0, VAR_6,
VAR_7,
ff_eac3_default_cpl_band_struct,
&VAR_0->num_cpl_bands, VAR_0->cpl_band_sizes);
} else {
for (VAR_5 = 1; VAR_5 <= VAR_3; VAR_5++) {
VAR_0->channel_in_cpl[VAR_5] = 0;
VAR_0->first_cpl_coords[VAR_5] = 1;
}
VAR_0->first_cpl_leak = VAR_0->eac3;
VAR_0->phase_flags_in_use = 0;
}
return 0;
}
| [
"static inline int FUNC_0(AC3DecodeContext *VAR_0, int VAR_1,\nuint8_t *VAR_2)\n{",
"GetBitContext *bc = &VAR_0->gbc;",
"int VAR_3 = VAR_0->VAR_3;",
"int VAR_4 = VAR_0->VAR_4;",
"int VAR_5;",
"memset(VAR_2, 3, AC3_MAX_CHANNELS);",
"if (!VAR_0->eac3)\nVAR_0->cpl_in_use[VAR_1] = get_bits1(bc);",
"if (VA... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
17
],
[
19,
21
],
[
23
],
[
27
],
[
31
],
[
33
],
[
35
],
[
37
],
[
43
],
[
47
],
[
49
],
[
51
],
[
57
],
[
59
... |
13,117 | static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *buf, const uint8_t *end_buf)
{
int tag, type, count, off, value = 0;
int i, j;
uint32_t *pal;
const uint8_t *rp, *gp, *bp;
tag = tget_short(&buf, s->le);
type = tget_short(&buf, s->le);
count = tget_long(&buf, s->le);
off = tget_long(&buf, s->le);
if(count == 1){
switch(type){
case TIFF_BYTE:
case TIFF_SHORT:
buf -= 4;
value = tget(&buf, type, s->le);
buf = NULL;
break;
case TIFF_LONG:
value = off;
buf = NULL;
break;
case TIFF_STRING:
if(count <= 4){
buf -= 4;
break;
}
default:
value = -1;
buf = start + off;
}
}else if(type_sizes[type] * count <= 4){
buf -= 4;
}else{
buf = start + off;
}
if(buf && (buf < start || buf > end_buf)){
av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
return -1;
}
switch(tag){
case TIFF_WIDTH:
s->width = value;
break;
case TIFF_HEIGHT:
s->height = value;
break;
case TIFF_BPP:
if(count == 1) s->bpp = value;
else{
switch(type){
case TIFF_BYTE:
s->bpp = (off & 0xFF) + ((off >> 8) & 0xFF) + ((off >> 16) & 0xFF) + ((off >> 24) & 0xFF);
break;
case TIFF_SHORT:
case TIFF_LONG:
s->bpp = 0;
for(i = 0; i < count; i++) s->bpp += tget(&buf, type, s->le);
break;
default:
s->bpp = -1;
}
}
switch(s->bpp){
case 1:
s->avctx->pix_fmt = PIX_FMT_MONOBLACK;
break;
case 8:
s->avctx->pix_fmt = PIX_FMT_PAL8;
break;
case 24:
s->avctx->pix_fmt = PIX_FMT_RGB24;
break;
case 16:
if(count == 1){
s->avctx->pix_fmt = PIX_FMT_GRAY16BE;
}else{
av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%i)\n", s->bpp);
return -1;
}
break;
case 32:
if(count == 4){
s->avctx->pix_fmt = PIX_FMT_RGBA;
}else{
av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%d, %d components)\n", s->bpp, count);
return -1;
}
break;
default:
av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%d, %d components)\n", s->bpp, count);
return -1;
}
if(s->width != s->avctx->width || s->height != s->avctx->height){
if(avcodec_check_dimensions(s->avctx, s->width, s->height))
return -1;
avcodec_set_dimensions(s->avctx, s->width, s->height);
}
if(s->picture.data[0])
s->avctx->release_buffer(s->avctx, &s->picture);
if(s->avctx->get_buffer(s->avctx, &s->picture) < 0){
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
if(s->bpp == 8){
/* make default grayscale pal */
pal = (uint32_t *) s->picture.data[1];
for(i = 0; i < 256; i++)
pal[i] = i * 0x010101;
}
break;
case TIFF_COMPR:
s->compr = value;
s->predictor = 0;
switch(s->compr){
case TIFF_RAW:
case TIFF_PACKBITS:
case TIFF_LZW:
case TIFF_CCITT_RLE:
break;
case TIFF_G3:
case TIFF_G4:
s->fax_opts = 0;
break;
case TIFF_DEFLATE:
case TIFF_ADOBE_DEFLATE:
#if CONFIG_ZLIB
break;
#else
av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
return -1;
#endif
case TIFF_JPEG:
case TIFF_NEWJPEG:
av_log(s->avctx, AV_LOG_ERROR, "JPEG compression is not supported\n");
return -1;
default:
av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n", s->compr);
return -1;
}
break;
case TIFF_ROWSPERSTRIP:
if(type == TIFF_LONG && value == -1)
value = s->avctx->height;
if(value < 1){
av_log(s->avctx, AV_LOG_ERROR, "Incorrect value of rows per strip\n");
return -1;
}
s->rps = value;
break;
case TIFF_STRIP_OFFS:
if(count == 1){
s->stripdata = NULL;
s->stripoff = value;
}else
s->stripdata = start + off;
s->strips = count;
if(s->strips == 1) s->rps = s->height;
s->sot = type;
if(s->stripdata > end_buf){
av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
return -1;
}
break;
case TIFF_STRIP_SIZE:
if(count == 1){
s->stripsizes = NULL;
s->stripsize = value;
s->strips = 1;
}else{
s->stripsizes = start + off;
}
s->strips = count;
s->sstype = type;
if(s->stripsizes > end_buf){
av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
return -1;
}
break;
case TIFF_PREDICTOR:
s->predictor = value;
break;
case TIFF_INVERT:
switch(value){
case 0:
s->invert = 1;
break;
case 1:
s->invert = 0;
break;
case 2:
case 3:
break;
default:
av_log(s->avctx, AV_LOG_ERROR, "Color mode %d is not supported\n", value);
return -1;
}
break;
case TIFF_PAL:
if(s->avctx->pix_fmt != PIX_FMT_PAL8){
av_log(s->avctx, AV_LOG_ERROR, "Palette met but this is not palettized format\n");
return -1;
}
pal = (uint32_t *) s->picture.data[1];
off = type_sizes[type];
rp = buf;
gp = buf + count / 3 * off;
bp = buf + count / 3 * off * 2;
off = (type_sizes[type] - 1) << 3;
for(i = 0; i < count / 3; i++){
j = (tget(&rp, type, s->le) >> off) << 16;
j |= (tget(&gp, type, s->le) >> off) << 8;
j |= tget(&bp, type, s->le) >> off;
pal[i] = j;
}
break;
case TIFF_PLANAR:
if(value == 2){
av_log(s->avctx, AV_LOG_ERROR, "Planar format is not supported\n");
return -1;
}
break;
case TIFF_T4OPTIONS:
case TIFF_T6OPTIONS:
s->fax_opts = value;
break;
}
return 0;
}
| false | FFmpeg | 9706d1c766b609961bea44d475c84c01b7ee10a5 | static int tiff_decode_tag(TiffContext *s, const uint8_t *start, const uint8_t *buf, const uint8_t *end_buf)
{
int tag, type, count, off, value = 0;
int i, j;
uint32_t *pal;
const uint8_t *rp, *gp, *bp;
tag = tget_short(&buf, s->le);
type = tget_short(&buf, s->le);
count = tget_long(&buf, s->le);
off = tget_long(&buf, s->le);
if(count == 1){
switch(type){
case TIFF_BYTE:
case TIFF_SHORT:
buf -= 4;
value = tget(&buf, type, s->le);
buf = NULL;
break;
case TIFF_LONG:
value = off;
buf = NULL;
break;
case TIFF_STRING:
if(count <= 4){
buf -= 4;
break;
}
default:
value = -1;
buf = start + off;
}
}else if(type_sizes[type] * count <= 4){
buf -= 4;
}else{
buf = start + off;
}
if(buf && (buf < start || buf > end_buf)){
av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
return -1;
}
switch(tag){
case TIFF_WIDTH:
s->width = value;
break;
case TIFF_HEIGHT:
s->height = value;
break;
case TIFF_BPP:
if(count == 1) s->bpp = value;
else{
switch(type){
case TIFF_BYTE:
s->bpp = (off & 0xFF) + ((off >> 8) & 0xFF) + ((off >> 16) & 0xFF) + ((off >> 24) & 0xFF);
break;
case TIFF_SHORT:
case TIFF_LONG:
s->bpp = 0;
for(i = 0; i < count; i++) s->bpp += tget(&buf, type, s->le);
break;
default:
s->bpp = -1;
}
}
switch(s->bpp){
case 1:
s->avctx->pix_fmt = PIX_FMT_MONOBLACK;
break;
case 8:
s->avctx->pix_fmt = PIX_FMT_PAL8;
break;
case 24:
s->avctx->pix_fmt = PIX_FMT_RGB24;
break;
case 16:
if(count == 1){
s->avctx->pix_fmt = PIX_FMT_GRAY16BE;
}else{
av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%i)\n", s->bpp);
return -1;
}
break;
case 32:
if(count == 4){
s->avctx->pix_fmt = PIX_FMT_RGBA;
}else{
av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%d, %d components)\n", s->bpp, count);
return -1;
}
break;
default:
av_log(s->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%d, %d components)\n", s->bpp, count);
return -1;
}
if(s->width != s->avctx->width || s->height != s->avctx->height){
if(avcodec_check_dimensions(s->avctx, s->width, s->height))
return -1;
avcodec_set_dimensions(s->avctx, s->width, s->height);
}
if(s->picture.data[0])
s->avctx->release_buffer(s->avctx, &s->picture);
if(s->avctx->get_buffer(s->avctx, &s->picture) < 0){
av_log(s->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
if(s->bpp == 8){
pal = (uint32_t *) s->picture.data[1];
for(i = 0; i < 256; i++)
pal[i] = i * 0x010101;
}
break;
case TIFF_COMPR:
s->compr = value;
s->predictor = 0;
switch(s->compr){
case TIFF_RAW:
case TIFF_PACKBITS:
case TIFF_LZW:
case TIFF_CCITT_RLE:
break;
case TIFF_G3:
case TIFF_G4:
s->fax_opts = 0;
break;
case TIFF_DEFLATE:
case TIFF_ADOBE_DEFLATE:
#if CONFIG_ZLIB
break;
#else
av_log(s->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
return -1;
#endif
case TIFF_JPEG:
case TIFF_NEWJPEG:
av_log(s->avctx, AV_LOG_ERROR, "JPEG compression is not supported\n");
return -1;
default:
av_log(s->avctx, AV_LOG_ERROR, "Unknown compression method %i\n", s->compr);
return -1;
}
break;
case TIFF_ROWSPERSTRIP:
if(type == TIFF_LONG && value == -1)
value = s->avctx->height;
if(value < 1){
av_log(s->avctx, AV_LOG_ERROR, "Incorrect value of rows per strip\n");
return -1;
}
s->rps = value;
break;
case TIFF_STRIP_OFFS:
if(count == 1){
s->stripdata = NULL;
s->stripoff = value;
}else
s->stripdata = start + off;
s->strips = count;
if(s->strips == 1) s->rps = s->height;
s->sot = type;
if(s->stripdata > end_buf){
av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
return -1;
}
break;
case TIFF_STRIP_SIZE:
if(count == 1){
s->stripsizes = NULL;
s->stripsize = value;
s->strips = 1;
}else{
s->stripsizes = start + off;
}
s->strips = count;
s->sstype = type;
if(s->stripsizes > end_buf){
av_log(s->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
return -1;
}
break;
case TIFF_PREDICTOR:
s->predictor = value;
break;
case TIFF_INVERT:
switch(value){
case 0:
s->invert = 1;
break;
case 1:
s->invert = 0;
break;
case 2:
case 3:
break;
default:
av_log(s->avctx, AV_LOG_ERROR, "Color mode %d is not supported\n", value);
return -1;
}
break;
case TIFF_PAL:
if(s->avctx->pix_fmt != PIX_FMT_PAL8){
av_log(s->avctx, AV_LOG_ERROR, "Palette met but this is not palettized format\n");
return -1;
}
pal = (uint32_t *) s->picture.data[1];
off = type_sizes[type];
rp = buf;
gp = buf + count / 3 * off;
bp = buf + count / 3 * off * 2;
off = (type_sizes[type] - 1) << 3;
for(i = 0; i < count / 3; i++){
j = (tget(&rp, type, s->le) >> off) << 16;
j |= (tget(&gp, type, s->le) >> off) << 8;
j |= tget(&bp, type, s->le) >> off;
pal[i] = j;
}
break;
case TIFF_PLANAR:
if(value == 2){
av_log(s->avctx, AV_LOG_ERROR, "Planar format is not supported\n");
return -1;
}
break;
case TIFF_T4OPTIONS:
case TIFF_T6OPTIONS:
s->fax_opts = value;
break;
}
return 0;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(TiffContext *VAR_0, const uint8_t *VAR_1, const uint8_t *VAR_2, const uint8_t *VAR_3)
{
int VAR_4, VAR_5, VAR_6, VAR_7, VAR_8 = 0;
int VAR_9, VAR_10;
uint32_t *pal;
const uint8_t *VAR_11, *gp, *bp;
VAR_4 = tget_short(&VAR_2, VAR_0->le);
VAR_5 = tget_short(&VAR_2, VAR_0->le);
VAR_6 = tget_long(&VAR_2, VAR_0->le);
VAR_7 = tget_long(&VAR_2, VAR_0->le);
if(VAR_6 == 1){
switch(VAR_5){
case TIFF_BYTE:
case TIFF_SHORT:
VAR_2 -= 4;
VAR_8 = tget(&VAR_2, VAR_5, VAR_0->le);
VAR_2 = NULL;
break;
case TIFF_LONG:
VAR_8 = VAR_7;
VAR_2 = NULL;
break;
case TIFF_STRING:
if(VAR_6 <= 4){
VAR_2 -= 4;
break;
}
default:
VAR_8 = -1;
VAR_2 = VAR_1 + VAR_7;
}
}else if(type_sizes[VAR_5] * VAR_6 <= 4){
VAR_2 -= 4;
}else{
VAR_2 = VAR_1 + VAR_7;
}
if(VAR_2 && (VAR_2 < VAR_1 || VAR_2 > VAR_3)){
av_log(VAR_0->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
return -1;
}
switch(VAR_4){
case TIFF_WIDTH:
VAR_0->width = VAR_8;
break;
case TIFF_HEIGHT:
VAR_0->height = VAR_8;
break;
case TIFF_BPP:
if(VAR_6 == 1) VAR_0->bpp = VAR_8;
else{
switch(VAR_5){
case TIFF_BYTE:
VAR_0->bpp = (VAR_7 & 0xFF) + ((VAR_7 >> 8) & 0xFF) + ((VAR_7 >> 16) & 0xFF) + ((VAR_7 >> 24) & 0xFF);
break;
case TIFF_SHORT:
case TIFF_LONG:
VAR_0->bpp = 0;
for(VAR_9 = 0; VAR_9 < VAR_6; VAR_9++) VAR_0->bpp += tget(&VAR_2, VAR_5, VAR_0->le);
break;
default:
VAR_0->bpp = -1;
}
}
switch(VAR_0->bpp){
case 1:
VAR_0->avctx->pix_fmt = PIX_FMT_MONOBLACK;
break;
case 8:
VAR_0->avctx->pix_fmt = PIX_FMT_PAL8;
break;
case 24:
VAR_0->avctx->pix_fmt = PIX_FMT_RGB24;
break;
case 16:
if(VAR_6 == 1){
VAR_0->avctx->pix_fmt = PIX_FMT_GRAY16BE;
}else{
av_log(VAR_0->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%VAR_9)\n", VAR_0->bpp);
return -1;
}
break;
case 32:
if(VAR_6 == 4){
VAR_0->avctx->pix_fmt = PIX_FMT_RGBA;
}else{
av_log(VAR_0->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%d, %d components)\n", VAR_0->bpp, VAR_6);
return -1;
}
break;
default:
av_log(VAR_0->avctx, AV_LOG_ERROR, "This format is not supported (bpp=%d, %d components)\n", VAR_0->bpp, VAR_6);
return -1;
}
if(VAR_0->width != VAR_0->avctx->width || VAR_0->height != VAR_0->avctx->height){
if(avcodec_check_dimensions(VAR_0->avctx, VAR_0->width, VAR_0->height))
return -1;
avcodec_set_dimensions(VAR_0->avctx, VAR_0->width, VAR_0->height);
}
if(VAR_0->picture.data[0])
VAR_0->avctx->release_buffer(VAR_0->avctx, &VAR_0->picture);
if(VAR_0->avctx->get_buffer(VAR_0->avctx, &VAR_0->picture) < 0){
av_log(VAR_0->avctx, AV_LOG_ERROR, "get_buffer() failed\n");
return -1;
}
if(VAR_0->bpp == 8){
pal = (uint32_t *) VAR_0->picture.data[1];
for(VAR_9 = 0; VAR_9 < 256; VAR_9++)
pal[VAR_9] = VAR_9 * 0x010101;
}
break;
case TIFF_COMPR:
VAR_0->compr = VAR_8;
VAR_0->predictor = 0;
switch(VAR_0->compr){
case TIFF_RAW:
case TIFF_PACKBITS:
case TIFF_LZW:
case TIFF_CCITT_RLE:
break;
case TIFF_G3:
case TIFF_G4:
VAR_0->fax_opts = 0;
break;
case TIFF_DEFLATE:
case TIFF_ADOBE_DEFLATE:
#if CONFIG_ZLIB
break;
#else
av_log(VAR_0->avctx, AV_LOG_ERROR, "Deflate: ZLib not compiled in\n");
return -1;
#endif
case TIFF_JPEG:
case TIFF_NEWJPEG:
av_log(VAR_0->avctx, AV_LOG_ERROR, "JPEG compression is not supported\n");
return -1;
default:
av_log(VAR_0->avctx, AV_LOG_ERROR, "Unknown compression method %VAR_9\n", VAR_0->compr);
return -1;
}
break;
case TIFF_ROWSPERSTRIP:
if(VAR_5 == TIFF_LONG && VAR_8 == -1)
VAR_8 = VAR_0->avctx->height;
if(VAR_8 < 1){
av_log(VAR_0->avctx, AV_LOG_ERROR, "Incorrect VAR_8 of rows per strip\n");
return -1;
}
VAR_0->rps = VAR_8;
break;
case TIFF_STRIP_OFFS:
if(VAR_6 == 1){
VAR_0->stripdata = NULL;
VAR_0->stripoff = VAR_8;
}else
VAR_0->stripdata = VAR_1 + VAR_7;
VAR_0->strips = VAR_6;
if(VAR_0->strips == 1) VAR_0->rps = VAR_0->height;
VAR_0->sot = VAR_5;
if(VAR_0->stripdata > VAR_3){
av_log(VAR_0->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
return -1;
}
break;
case TIFF_STRIP_SIZE:
if(VAR_6 == 1){
VAR_0->stripsizes = NULL;
VAR_0->stripsize = VAR_8;
VAR_0->strips = 1;
}else{
VAR_0->stripsizes = VAR_1 + VAR_7;
}
VAR_0->strips = VAR_6;
VAR_0->sstype = VAR_5;
if(VAR_0->stripsizes > VAR_3){
av_log(VAR_0->avctx, AV_LOG_ERROR, "Tag referencing position outside the image\n");
return -1;
}
break;
case TIFF_PREDICTOR:
VAR_0->predictor = VAR_8;
break;
case TIFF_INVERT:
switch(VAR_8){
case 0:
VAR_0->invert = 1;
break;
case 1:
VAR_0->invert = 0;
break;
case 2:
case 3:
break;
default:
av_log(VAR_0->avctx, AV_LOG_ERROR, "Color mode %d is not supported\n", VAR_8);
return -1;
}
break;
case TIFF_PAL:
if(VAR_0->avctx->pix_fmt != PIX_FMT_PAL8){
av_log(VAR_0->avctx, AV_LOG_ERROR, "Palette met but this is not palettized format\n");
return -1;
}
pal = (uint32_t *) VAR_0->picture.data[1];
VAR_7 = type_sizes[VAR_5];
VAR_11 = VAR_2;
gp = VAR_2 + VAR_6 / 3 * VAR_7;
bp = VAR_2 + VAR_6 / 3 * VAR_7 * 2;
VAR_7 = (type_sizes[VAR_5] - 1) << 3;
for(VAR_9 = 0; VAR_9 < VAR_6 / 3; VAR_9++){
VAR_10 = (tget(&VAR_11, VAR_5, VAR_0->le) >> VAR_7) << 16;
VAR_10 |= (tget(&gp, VAR_5, VAR_0->le) >> VAR_7) << 8;
VAR_10 |= tget(&bp, VAR_5, VAR_0->le) >> VAR_7;
pal[VAR_9] = VAR_10;
}
break;
case TIFF_PLANAR:
if(VAR_8 == 2){
av_log(VAR_0->avctx, AV_LOG_ERROR, "Planar format is not supported\n");
return -1;
}
break;
case TIFF_T4OPTIONS:
case TIFF_T6OPTIONS:
VAR_0->fax_opts = VAR_8;
break;
}
return 0;
}
| [
"static int FUNC_0(TiffContext *VAR_0, const uint8_t *VAR_1, const uint8_t *VAR_2, const uint8_t *VAR_3)\n{",
"int VAR_4, VAR_5, VAR_6, VAR_7, VAR_8 = 0;",
"int VAR_9, VAR_10;",
"uint32_t *pal;",
"const uint8_t *VAR_11, *gp, *bp;",
"VAR_4 = tget_short(&VAR_2, VAR_0->le);",
"VAR_5 = tget_short(&VAR_2, VA... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
15
],
[
17
],
[
19
],
[
21
],
[
25
],
[
27
],
[
29,
31,
33
],
[
35
],
[
37
],
[
39
],
[
41,
43
],
[
45
],
[
47
],
[... |
13,119 | static int slirp_hostfwd(SlirpState *s, const char *redir_str,
int legacy_format)
{
struct in_addr host_addr = { .s_addr = INADDR_ANY };
struct in_addr guest_addr = { .s_addr = 0 };
int host_port, guest_port;
const char *p;
char buf[256];
int is_udp;
char *end;
p = redir_str;
if (!p || get_str_sep(buf, sizeof(buf), &p, ':') < 0) {
goto fail_syntax;
}
if (!strcmp(buf, "tcp") || buf[0] == '\0') {
is_udp = 0;
} else if (!strcmp(buf, "udp")) {
is_udp = 1;
} else {
goto fail_syntax;
}
if (!legacy_format) {
if (get_str_sep(buf, sizeof(buf), &p, ':') < 0) {
goto fail_syntax;
}
if (buf[0] != '\0' && !inet_aton(buf, &host_addr)) {
goto fail_syntax;
}
}
if (get_str_sep(buf, sizeof(buf), &p, legacy_format ? ':' : '-') < 0) {
goto fail_syntax;
}
host_port = strtol(buf, &end, 0);
if (*end != '\0' || host_port < 0 || host_port > 65535) {
goto fail_syntax;
}
if (get_str_sep(buf, sizeof(buf), &p, ':') < 0) {
goto fail_syntax;
}
if (buf[0] != '\0' && !inet_aton(buf, &guest_addr)) {
goto fail_syntax;
}
guest_port = strtol(p, &end, 0);
if (*end != '\0' || guest_port < 1 || guest_port > 65535) {
goto fail_syntax;
}
if (slirp_add_hostfwd(s->slirp, is_udp, host_addr, host_port, guest_addr,
guest_port) < 0) {
error_report("could not set up host forwarding rule '%s'",
redir_str);
return -1;
}
return 0;
fail_syntax:
error_report("invalid host forwarding rule '%s'", redir_str);
return -1;
}
| true | qemu | 5c843af22604edecda10d4bb89d4eede9e1bd3d0 | static int slirp_hostfwd(SlirpState *s, const char *redir_str,
int legacy_format)
{
struct in_addr host_addr = { .s_addr = INADDR_ANY };
struct in_addr guest_addr = { .s_addr = 0 };
int host_port, guest_port;
const char *p;
char buf[256];
int is_udp;
char *end;
p = redir_str;
if (!p || get_str_sep(buf, sizeof(buf), &p, ':') < 0) {
goto fail_syntax;
}
if (!strcmp(buf, "tcp") || buf[0] == '\0') {
is_udp = 0;
} else if (!strcmp(buf, "udp")) {
is_udp = 1;
} else {
goto fail_syntax;
}
if (!legacy_format) {
if (get_str_sep(buf, sizeof(buf), &p, ':') < 0) {
goto fail_syntax;
}
if (buf[0] != '\0' && !inet_aton(buf, &host_addr)) {
goto fail_syntax;
}
}
if (get_str_sep(buf, sizeof(buf), &p, legacy_format ? ':' : '-') < 0) {
goto fail_syntax;
}
host_port = strtol(buf, &end, 0);
if (*end != '\0' || host_port < 0 || host_port > 65535) {
goto fail_syntax;
}
if (get_str_sep(buf, sizeof(buf), &p, ':') < 0) {
goto fail_syntax;
}
if (buf[0] != '\0' && !inet_aton(buf, &guest_addr)) {
goto fail_syntax;
}
guest_port = strtol(p, &end, 0);
if (*end != '\0' || guest_port < 1 || guest_port > 65535) {
goto fail_syntax;
}
if (slirp_add_hostfwd(s->slirp, is_udp, host_addr, host_port, guest_addr,
guest_port) < 0) {
error_report("could not set up host forwarding rule '%s'",
redir_str);
return -1;
}
return 0;
fail_syntax:
error_report("invalid host forwarding rule '%s'", redir_str);
return -1;
}
| {
"code": [
" int legacy_format)",
" error_report(\"could not set up host forwarding rule '%s'\",",
" redir_str);",
" error_report(\"invalid host forwarding rule '%s'\", redir_str);"
],
"line_no": [
3,
109,
111,
123
]
} | static int FUNC_0(SlirpState *VAR_0, const char *VAR_1,
int VAR_2)
{
struct in_addr VAR_3 = { .s_addr = INADDR_ANY };
struct in_addr VAR_4 = { .s_addr = 0 };
int VAR_5, VAR_6;
const char *VAR_7;
char VAR_8[256];
int VAR_9;
char *VAR_10;
VAR_7 = VAR_1;
if (!VAR_7 || get_str_sep(VAR_8, sizeof(VAR_8), &VAR_7, ':') < 0) {
goto fail_syntax;
}
if (!strcmp(VAR_8, "tcp") || VAR_8[0] == '\0') {
VAR_9 = 0;
} else if (!strcmp(VAR_8, "udp")) {
VAR_9 = 1;
} else {
goto fail_syntax;
}
if (!VAR_2) {
if (get_str_sep(VAR_8, sizeof(VAR_8), &VAR_7, ':') < 0) {
goto fail_syntax;
}
if (VAR_8[0] != '\0' && !inet_aton(VAR_8, &VAR_3)) {
goto fail_syntax;
}
}
if (get_str_sep(VAR_8, sizeof(VAR_8), &VAR_7, VAR_2 ? ':' : '-') < 0) {
goto fail_syntax;
}
VAR_5 = strtol(VAR_8, &VAR_10, 0);
if (*VAR_10 != '\0' || VAR_5 < 0 || VAR_5 > 65535) {
goto fail_syntax;
}
if (get_str_sep(VAR_8, sizeof(VAR_8), &VAR_7, ':') < 0) {
goto fail_syntax;
}
if (VAR_8[0] != '\0' && !inet_aton(VAR_8, &VAR_4)) {
goto fail_syntax;
}
VAR_6 = strtol(VAR_7, &VAR_10, 0);
if (*VAR_10 != '\0' || VAR_6 < 1 || VAR_6 > 65535) {
goto fail_syntax;
}
if (slirp_add_hostfwd(VAR_0->slirp, VAR_9, VAR_3, VAR_5, VAR_4,
VAR_6) < 0) {
error_report("could not set up host forwarding rule '%VAR_0'",
VAR_1);
return -1;
}
return 0;
fail_syntax:
error_report("invalid host forwarding rule '%VAR_0'", VAR_1);
return -1;
}
| [
"static int FUNC_0(SlirpState *VAR_0, const char *VAR_1,\nint VAR_2)\n{",
"struct in_addr VAR_3 = { .s_addr = INADDR_ANY };",
"struct in_addr VAR_4 = { .s_addr = 0 };",
"int VAR_5, VAR_6;",
"const char *VAR_7;",
"char VAR_8[256];",
"int VAR_9;",
"char *VAR_10;",
"VAR_7 = VAR_1;",
"if (!VAR_7 || ge... | [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[... |
13,120 | int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
{
if (dma_has_iommu(dma)) {
return iommu_dma_memory_set(dma, addr, c, len);
}
do_dma_memory_set(addr, c, len);
return 0;
} | true | qemu | 7a0bac4da9c6a2e36d388412f3b4074b10429e8e | int dma_memory_set(DMAContext *dma, dma_addr_t addr, uint8_t c, dma_addr_t len)
{
if (dma_has_iommu(dma)) {
return iommu_dma_memory_set(dma, addr, c, len);
}
do_dma_memory_set(addr, c, len);
return 0;
} | {
"code": [],
"line_no": []
} | int FUNC_0(DMAContext *VAR_0, dma_addr_t VAR_1, uint8_t VAR_2, dma_addr_t VAR_3)
{
if (dma_has_iommu(VAR_0)) {
return iommu_dma_memory_set(VAR_0, VAR_1, VAR_2, VAR_3);
}
do_dma_memory_set(VAR_1, VAR_2, VAR_3);
return 0;
} | [
"int FUNC_0(DMAContext *VAR_0, dma_addr_t VAR_1, uint8_t VAR_2, dma_addr_t VAR_3)\n{",
"if (dma_has_iommu(VAR_0)) {",
"return iommu_dma_memory_set(VAR_0, VAR_1, VAR_2, VAR_3);",
"}",
"do_dma_memory_set(VAR_1, VAR_2, VAR_3);",
"return 0;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
2
],
[
3
],
[
4
],
[
5
],
[
6
],
[
7
],
[
8
]
] |
13,122 | static int loadvm_postcopy_handle_run(MigrationIncomingState *mis)
{
PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_RUNNING);
trace_loadvm_postcopy_handle_run();
if (ps != POSTCOPY_INCOMING_LISTENING) {
error_report("CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps);
return -1;
}
mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, NULL);
qemu_bh_schedule(mis->bh);
/* We need to finish reading the stream from the package
* and also stop reading anything more from the stream that loaded the
* package (since it's now being read by the listener thread).
* LOADVM_QUIT will quit all the layers of nested loadvm loops.
*/
return LOADVM_QUIT;
}
| true | qemu | 864699227911909ef1e33ecf91bf3c900715a9b1 | static int loadvm_postcopy_handle_run(MigrationIncomingState *mis)
{
PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_RUNNING);
trace_loadvm_postcopy_handle_run();
if (ps != POSTCOPY_INCOMING_LISTENING) {
error_report("CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps);
return -1;
}
mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, NULL);
qemu_bh_schedule(mis->bh);
return LOADVM_QUIT;
}
| {
"code": [
" mis->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, NULL);",
" qemu_bh_schedule(mis->bh);"
],
"line_no": [
21,
23
]
} | static int FUNC_0(MigrationIncomingState *VAR_0)
{
PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_RUNNING);
trace_loadvm_postcopy_handle_run();
if (ps != POSTCOPY_INCOMING_LISTENING) {
error_report("CMD_POSTCOPY_RUN in wrong postcopy state (%d)", ps);
return -1;
}
VAR_0->bh = qemu_bh_new(loadvm_postcopy_handle_run_bh, NULL);
qemu_bh_schedule(VAR_0->bh);
return LOADVM_QUIT;
}
| [
"static int FUNC_0(MigrationIncomingState *VAR_0)\n{",
"PostcopyState ps = postcopy_state_set(POSTCOPY_INCOMING_RUNNING);",
"trace_loadvm_postcopy_handle_run();",
"if (ps != POSTCOPY_INCOMING_LISTENING) {",
"error_report(\"CMD_POSTCOPY_RUN in wrong postcopy state (%d)\", ps);",
"return -1;",
"}",
"VAR... | [
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
21
],
[
23
],
[
37
],
[
39
]
] |
13,123 | int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx,
uint8_t *buf, int buf_size,
const short *samples)
{
AVPacket pkt;
AVFrame *frame;
int ret, samples_size, got_packet;
av_init_packet(&pkt);
pkt.data = buf;
pkt.size = buf_size;
if (samples) {
frame = av_frame_alloc();
if (avctx->frame_size) {
frame->nb_samples = avctx->frame_size;
} else {
/* if frame_size is not set, the number of samples must be
* calculated from the buffer size */
int64_t nb_samples;
if (!av_get_bits_per_sample(avctx->codec_id)) {
av_log(avctx, AV_LOG_ERROR, "avcodec_encode_audio() does not "
"support this codec\n");
av_frame_free(&frame);
return AVERROR(EINVAL);
}
nb_samples = (int64_t)buf_size * 8 /
(av_get_bits_per_sample(avctx->codec_id) *
avctx->channels);
if (nb_samples >= INT_MAX) {
av_frame_free(&frame);
return AVERROR(EINVAL);
}
frame->nb_samples = nb_samples;
}
/* it is assumed that the samples buffer is large enough based on the
* relevant parameters */
samples_size = av_samples_get_buffer_size(NULL, avctx->channels,
frame->nb_samples,
avctx->sample_fmt, 1);
if ((ret = avcodec_fill_audio_frame(frame, avctx->channels,
avctx->sample_fmt,
(const uint8_t *)samples,
samples_size, 1)) < 0) {
av_frame_free(&frame);
return ret;
}
/* fabricate frame pts from sample count.
* this is needed because the avcodec_encode_audio() API does not have
* a way for the user to provide pts */
if (avctx->sample_rate && avctx->time_base.num)
frame->pts = ff_samples_to_time_base(avctx,
avctx->internal->sample_count);
else
frame->pts = AV_NOPTS_VALUE;
avctx->internal->sample_count += frame->nb_samples;
} else {
frame = NULL;
}
got_packet = 0;
ret = avcodec_encode_audio2(avctx, &pkt, frame, &got_packet);
if (!ret && got_packet && avctx->coded_frame) {
avctx->coded_frame->pts = pkt.pts;
avctx->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY);
}
/* free any side data since we cannot return it */
av_packet_free_side_data(&pkt);
if (frame && frame->extended_data != frame->data)
av_freep(&frame->extended_data);
av_frame_free(&frame);
return ret ? ret : pkt.size;
} | true | FFmpeg | 38004051b53ddecb518053e6dadafa9adc4fc1b2 | int attribute_align_arg avcodec_encode_audio(AVCodecContext *avctx,
uint8_t *buf, int buf_size,
const short *samples)
{
AVPacket pkt;
AVFrame *frame;
int ret, samples_size, got_packet;
av_init_packet(&pkt);
pkt.data = buf;
pkt.size = buf_size;
if (samples) {
frame = av_frame_alloc();
if (avctx->frame_size) {
frame->nb_samples = avctx->frame_size;
} else {
int64_t nb_samples;
if (!av_get_bits_per_sample(avctx->codec_id)) {
av_log(avctx, AV_LOG_ERROR, "avcodec_encode_audio() does not "
"support this codec\n");
av_frame_free(&frame);
return AVERROR(EINVAL);
}
nb_samples = (int64_t)buf_size * 8 /
(av_get_bits_per_sample(avctx->codec_id) *
avctx->channels);
if (nb_samples >= INT_MAX) {
av_frame_free(&frame);
return AVERROR(EINVAL);
}
frame->nb_samples = nb_samples;
}
samples_size = av_samples_get_buffer_size(NULL, avctx->channels,
frame->nb_samples,
avctx->sample_fmt, 1);
if ((ret = avcodec_fill_audio_frame(frame, avctx->channels,
avctx->sample_fmt,
(const uint8_t *)samples,
samples_size, 1)) < 0) {
av_frame_free(&frame);
return ret;
}
if (avctx->sample_rate && avctx->time_base.num)
frame->pts = ff_samples_to_time_base(avctx,
avctx->internal->sample_count);
else
frame->pts = AV_NOPTS_VALUE;
avctx->internal->sample_count += frame->nb_samples;
} else {
frame = NULL;
}
got_packet = 0;
ret = avcodec_encode_audio2(avctx, &pkt, frame, &got_packet);
if (!ret && got_packet && avctx->coded_frame) {
avctx->coded_frame->pts = pkt.pts;
avctx->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY);
}
av_packet_free_side_data(&pkt);
if (frame && frame->extended_data != frame->data)
av_freep(&frame->extended_data);
av_frame_free(&frame);
return ret ? ret : pkt.size;
} | {
"code": [],
"line_no": []
} | int VAR_0 avcodec_encode_audio(AVCodecContext *avctx,
uint8_t *buf, int buf_size,
const short *samples)
{
AVPacket pkt;
AVFrame *frame;
int ret, samples_size, got_packet;
av_init_packet(&pkt);
pkt.data = buf;
pkt.size = buf_size;
if (samples) {
frame = av_frame_alloc();
if (avctx->frame_size) {
frame->nb_samples = avctx->frame_size;
} else {
int64_t nb_samples;
if (!av_get_bits_per_sample(avctx->codec_id)) {
av_log(avctx, AV_LOG_ERROR, "avcodec_encode_audio() does not "
"support this codec\n");
av_frame_free(&frame);
return AVERROR(EINVAL);
}
nb_samples = (int64_t)buf_size * 8 /
(av_get_bits_per_sample(avctx->codec_id) *
avctx->channels);
if (nb_samples >= INT_MAX) {
av_frame_free(&frame);
return AVERROR(EINVAL);
}
frame->nb_samples = nb_samples;
}
samples_size = av_samples_get_buffer_size(NULL, avctx->channels,
frame->nb_samples,
avctx->sample_fmt, 1);
if ((ret = avcodec_fill_audio_frame(frame, avctx->channels,
avctx->sample_fmt,
(const uint8_t *)samples,
samples_size, 1)) < 0) {
av_frame_free(&frame);
return ret;
}
if (avctx->sample_rate && avctx->time_base.num)
frame->pts = ff_samples_to_time_base(avctx,
avctx->internal->sample_count);
else
frame->pts = AV_NOPTS_VALUE;
avctx->internal->sample_count += frame->nb_samples;
} else {
frame = NULL;
}
got_packet = 0;
ret = avcodec_encode_audio2(avctx, &pkt, frame, &got_packet);
if (!ret && got_packet && avctx->coded_frame) {
avctx->coded_frame->pts = pkt.pts;
avctx->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY);
}
av_packet_free_side_data(&pkt);
if (frame && frame->extended_data != frame->data)
av_freep(&frame->extended_data);
av_frame_free(&frame);
return ret ? ret : pkt.size;
} | [
"int VAR_0 avcodec_encode_audio(AVCodecContext *avctx,\nuint8_t *buf, int buf_size,\nconst short *samples)\n{",
"AVPacket pkt;",
"AVFrame *frame;",
"int ret, samples_size, got_packet;",
"av_init_packet(&pkt);",
"pkt.data = buf;",
"pkt.size = buf_size;",
"if (samples) {",
"frame = av_frame_alloc();",... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7
],
[
9
],
[
11
],
[
13
],
[
17
],
[
19
],
[
21
],
[
25
],
[
27
],
[
33
],
[
35
],
[
37
],
[
43
],
[
45
],
[
47,
49
],
[
51
],
[
53
],
[
55
],
[... |
13,124 | void main_loop_wait(int nonblocking)
{
IOHandlerRecord *ioh;
fd_set rfds, wfds, xfds;
int ret, nfds;
struct timeval tv;
int timeout;
if (nonblocking)
timeout = 0;
else {
timeout = qemu_calculate_timeout();
qemu_bh_update_timeout(&timeout);
}
os_host_main_loop_wait(&timeout);
/* poll any events */
/* XXX: separate device handlers from system ones */
nfds = -1;
FD_ZERO(&rfds);
FD_ZERO(&wfds);
FD_ZERO(&xfds);
QLIST_FOREACH(ioh, &io_handlers, next) {
if (ioh->deleted)
continue;
if (ioh->fd_read &&
(!ioh->fd_read_poll ||
ioh->fd_read_poll(ioh->opaque) != 0)) {
FD_SET(ioh->fd, &rfds);
if (ioh->fd > nfds)
nfds = ioh->fd;
}
if (ioh->fd_write) {
FD_SET(ioh->fd, &wfds);
if (ioh->fd > nfds)
nfds = ioh->fd;
}
}
tv.tv_sec = timeout / 1000;
tv.tv_usec = (timeout % 1000) * 1000;
slirp_select_fill(&nfds, &rfds, &wfds, &xfds);
qemu_mutex_unlock_iothread();
ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv);
qemu_mutex_lock_iothread();
if (ret > 0) {
IOHandlerRecord *pioh;
QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
if (ioh->deleted) {
QLIST_REMOVE(ioh, next);
qemu_free(ioh);
continue;
}
if (ioh->fd_read && FD_ISSET(ioh->fd, &rfds)) {
ioh->fd_read(ioh->opaque);
}
if (ioh->fd_write && FD_ISSET(ioh->fd, &wfds)) {
ioh->fd_write(ioh->opaque);
}
}
}
slirp_select_poll(&rfds, &wfds, &xfds, (ret < 0));
qemu_run_all_timers();
/* Check bottom-halves last in case any of the earlier events triggered
them. */
qemu_bh_poll();
}
| true | qemu | 0290b57bdfec83ca78b6d119ea9847bb17943328 | void main_loop_wait(int nonblocking)
{
IOHandlerRecord *ioh;
fd_set rfds, wfds, xfds;
int ret, nfds;
struct timeval tv;
int timeout;
if (nonblocking)
timeout = 0;
else {
timeout = qemu_calculate_timeout();
qemu_bh_update_timeout(&timeout);
}
os_host_main_loop_wait(&timeout);
nfds = -1;
FD_ZERO(&rfds);
FD_ZERO(&wfds);
FD_ZERO(&xfds);
QLIST_FOREACH(ioh, &io_handlers, next) {
if (ioh->deleted)
continue;
if (ioh->fd_read &&
(!ioh->fd_read_poll ||
ioh->fd_read_poll(ioh->opaque) != 0)) {
FD_SET(ioh->fd, &rfds);
if (ioh->fd > nfds)
nfds = ioh->fd;
}
if (ioh->fd_write) {
FD_SET(ioh->fd, &wfds);
if (ioh->fd > nfds)
nfds = ioh->fd;
}
}
tv.tv_sec = timeout / 1000;
tv.tv_usec = (timeout % 1000) * 1000;
slirp_select_fill(&nfds, &rfds, &wfds, &xfds);
qemu_mutex_unlock_iothread();
ret = select(nfds + 1, &rfds, &wfds, &xfds, &tv);
qemu_mutex_lock_iothread();
if (ret > 0) {
IOHandlerRecord *pioh;
QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
if (ioh->deleted) {
QLIST_REMOVE(ioh, next);
qemu_free(ioh);
continue;
}
if (ioh->fd_read && FD_ISSET(ioh->fd, &rfds)) {
ioh->fd_read(ioh->opaque);
}
if (ioh->fd_write && FD_ISSET(ioh->fd, &wfds)) {
ioh->fd_write(ioh->opaque);
}
}
}
slirp_select_poll(&rfds, &wfds, &xfds, (ret < 0));
qemu_run_all_timers();
qemu_bh_poll();
}
| {
"code": [
" if (ioh->deleted) {",
" QLIST_REMOVE(ioh, next);",
" qemu_free(ioh);",
" continue;",
" if (ioh->fd_read && FD_ISSET(ioh->fd, &rfds)) {",
" if (ioh->fd_write && FD_ISSET(ioh->fd, &wfds)) {"
],
"line_no": [
105,
107,
109,
111,
115,
121
]
} | void FUNC_0(int VAR_0)
{
IOHandlerRecord *ioh;
fd_set rfds, wfds, xfds;
int VAR_1, VAR_2;
struct timeval VAR_3;
int VAR_4;
if (VAR_0)
VAR_4 = 0;
else {
VAR_4 = qemu_calculate_timeout();
qemu_bh_update_timeout(&VAR_4);
}
os_host_main_loop_wait(&VAR_4);
VAR_2 = -1;
FD_ZERO(&rfds);
FD_ZERO(&wfds);
FD_ZERO(&xfds);
QLIST_FOREACH(ioh, &io_handlers, next) {
if (ioh->deleted)
continue;
if (ioh->fd_read &&
(!ioh->fd_read_poll ||
ioh->fd_read_poll(ioh->opaque) != 0)) {
FD_SET(ioh->fd, &rfds);
if (ioh->fd > VAR_2)
VAR_2 = ioh->fd;
}
if (ioh->fd_write) {
FD_SET(ioh->fd, &wfds);
if (ioh->fd > VAR_2)
VAR_2 = ioh->fd;
}
}
VAR_3.tv_sec = VAR_4 / 1000;
VAR_3.tv_usec = (VAR_4 % 1000) * 1000;
slirp_select_fill(&VAR_2, &rfds, &wfds, &xfds);
qemu_mutex_unlock_iothread();
VAR_1 = select(VAR_2 + 1, &rfds, &wfds, &xfds, &VAR_3);
qemu_mutex_lock_iothread();
if (VAR_1 > 0) {
IOHandlerRecord *pioh;
QLIST_FOREACH_SAFE(ioh, &io_handlers, next, pioh) {
if (ioh->deleted) {
QLIST_REMOVE(ioh, next);
qemu_free(ioh);
continue;
}
if (ioh->fd_read && FD_ISSET(ioh->fd, &rfds)) {
ioh->fd_read(ioh->opaque);
}
if (ioh->fd_write && FD_ISSET(ioh->fd, &wfds)) {
ioh->fd_write(ioh->opaque);
}
}
}
slirp_select_poll(&rfds, &wfds, &xfds, (VAR_1 < 0));
qemu_run_all_timers();
qemu_bh_poll();
}
| [
"void FUNC_0(int VAR_0)\n{",
"IOHandlerRecord *ioh;",
"fd_set rfds, wfds, xfds;",
"int VAR_1, VAR_2;",
"struct timeval VAR_3;",
"int VAR_4;",
"if (VAR_0)\nVAR_4 = 0;",
"else {",
"VAR_4 = qemu_calculate_timeout();",
"qemu_bh_update_timeout(&VAR_4);",
"}",
"os_host_main_loop_wait(&VAR_4);",
"V... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
1,
1,
0,
1,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
17,
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
31
],
[
39
],
[
41
],
[
43
],
[
45
],
[
47
],
[
49,
51
],
[
53,... |
13,125 | static void gen_lswx(DisasContext *ctx)
{
TCGv t0;
TCGv_i32 t1, t2, t3;
gen_set_access_type(ctx, ACCESS_INT);
/* NIP cannot be restored if the memory exception comes from an helper */
gen_update_nip(ctx, ctx->nip - 4);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
t1 = tcg_const_i32(rD(ctx->opcode));
t2 = tcg_const_i32(rA(ctx->opcode));
t3 = tcg_const_i32(rB(ctx->opcode));
gen_helper_lswx(cpu_env, t0, t1, t2, t3);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
tcg_temp_free_i32(t2);
tcg_temp_free_i32(t3);
}
| true | qemu | e41029b378b4a65a0b89b5a8dc087aca6b5d012d | static void gen_lswx(DisasContext *ctx)
{
TCGv t0;
TCGv_i32 t1, t2, t3;
gen_set_access_type(ctx, ACCESS_INT);
gen_update_nip(ctx, ctx->nip - 4);
t0 = tcg_temp_new();
gen_addr_reg_index(ctx, t0);
t1 = tcg_const_i32(rD(ctx->opcode));
t2 = tcg_const_i32(rA(ctx->opcode));
t3 = tcg_const_i32(rB(ctx->opcode));
gen_helper_lswx(cpu_env, t0, t1, t2, t3);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
tcg_temp_free_i32(t2);
tcg_temp_free_i32(t3);
}
| {
"code": [
" gen_update_nip(ctx, ctx->nip - 4);",
" gen_update_nip(ctx, ctx->nip - 4);",
" gen_update_nip(ctx, ctx->nip - 4);",
" gen_update_nip(ctx, ctx->nip - 4);"
],
"line_no": [
13,
13,
13,
13
]
} | static void FUNC_0(DisasContext *VAR_0)
{
TCGv t0;
TCGv_i32 t1, t2, t3;
gen_set_access_type(VAR_0, ACCESS_INT);
gen_update_nip(VAR_0, VAR_0->nip - 4);
t0 = tcg_temp_new();
gen_addr_reg_index(VAR_0, t0);
t1 = tcg_const_i32(rD(VAR_0->opcode));
t2 = tcg_const_i32(rA(VAR_0->opcode));
t3 = tcg_const_i32(rB(VAR_0->opcode));
gen_helper_lswx(cpu_env, t0, t1, t2, t3);
tcg_temp_free(t0);
tcg_temp_free_i32(t1);
tcg_temp_free_i32(t2);
tcg_temp_free_i32(t3);
}
| [
"static void FUNC_0(DisasContext *VAR_0)\n{",
"TCGv t0;",
"TCGv_i32 t1, t2, t3;",
"gen_set_access_type(VAR_0, ACCESS_INT);",
"gen_update_nip(VAR_0, VAR_0->nip - 4);",
"t0 = tcg_temp_new();",
"gen_addr_reg_index(VAR_0, t0);",
"t1 = tcg_const_i32(rD(VAR_0->opcode));",
"t2 = tcg_const_i32(rA(VAR_0->opc... | [
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
]
] |
13,126 | static void get_sensor_reading(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
uint8_t *rsp, unsigned int *rsp_len,
unsigned int max_rsp_len)
{
IPMISensor *sens;
IPMI_CHECK_CMD_LEN(3);
if ((cmd[2] > MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
return;
}
sens = ibs->sensors + cmd[2];
IPMI_ADD_RSP_DATA(sens->reading);
IPMI_ADD_RSP_DATA(IPMI_SENSOR_GET_RET_STATUS(sens));
IPMI_ADD_RSP_DATA(sens->states & 0xff);
if (IPMI_SENSOR_IS_DISCRETE(sens)) {
IPMI_ADD_RSP_DATA((sens->states >> 8) & 0xff);
}
}
| true | qemu | 73d60fa5fae60c8e07e1f295d8c7fd5d04320160 | static void get_sensor_reading(IPMIBmcSim *ibs,
uint8_t *cmd, unsigned int cmd_len,
uint8_t *rsp, unsigned int *rsp_len,
unsigned int max_rsp_len)
{
IPMISensor *sens;
IPMI_CHECK_CMD_LEN(3);
if ((cmd[2] > MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(ibs->sensors + cmd[2])) {
rsp[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
return;
}
sens = ibs->sensors + cmd[2];
IPMI_ADD_RSP_DATA(sens->reading);
IPMI_ADD_RSP_DATA(IPMI_SENSOR_GET_RET_STATUS(sens));
IPMI_ADD_RSP_DATA(sens->states & 0xff);
if (IPMI_SENSOR_IS_DISCRETE(sens)) {
IPMI_ADD_RSP_DATA((sens->states >> 8) & 0xff);
}
}
| {
"code": [
" if ((cmd[2] > MAX_SENSORS) ||",
" if ((cmd[2] > MAX_SENSORS) ||",
" if ((cmd[2] > MAX_SENSORS) ||",
" if ((cmd[2] > MAX_SENSORS) ||",
" if ((cmd[2] > MAX_SENSORS) ||",
" if ((cmd[2] > MAX_SENSORS) ||",
" if ((cmd[2] > MAX_SENSORS) ||"
],
"line_no": [
17,
17,
17,
17,
17,
17,
17
]
} | static void FUNC_0(IPMIBmcSim *VAR_0,
uint8_t *VAR_1, unsigned int VAR_2,
uint8_t *VAR_3, unsigned int *VAR_4,
unsigned int VAR_5)
{
IPMISensor *sens;
IPMI_CHECK_CMD_LEN(3);
if ((VAR_1[2] > MAX_SENSORS) ||
!IPMI_SENSOR_GET_PRESENT(VAR_0->sensors + VAR_1[2])) {
VAR_3[2] = IPMI_CC_REQ_ENTRY_NOT_PRESENT;
return;
}
sens = VAR_0->sensors + VAR_1[2];
IPMI_ADD_RSP_DATA(sens->reading);
IPMI_ADD_RSP_DATA(IPMI_SENSOR_GET_RET_STATUS(sens));
IPMI_ADD_RSP_DATA(sens->states & 0xff);
if (IPMI_SENSOR_IS_DISCRETE(sens)) {
IPMI_ADD_RSP_DATA((sens->states >> 8) & 0xff);
}
}
| [
"static void FUNC_0(IPMIBmcSim *VAR_0,\nuint8_t *VAR_1, unsigned int VAR_2,\nuint8_t *VAR_3, unsigned int *VAR_4,\nunsigned int VAR_5)\n{",
"IPMISensor *sens;",
"IPMI_CHECK_CMD_LEN(3);",
"if ((VAR_1[2] > MAX_SENSORS) ||\n!IPMI_SENSOR_GET_PRESENT(VAR_0->sensors + VAR_1[2])) {",
"VAR_3[2] = IPMI_CC_REQ_ENTRY_... | [
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7,
9
],
[
11
],
[
15
],
[
17,
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41
]
] |
13,127 | int spapr_tce_dma_read(VIOsPAPRDevice *dev, uint64_t taddr, void *buf,
uint32_t size)
{
#ifdef DEBUG_TCE
fprintf(stderr, "spapr_tce_dma_write taddr=0x%llx size=0x%x\n",
(unsigned long long)taddr, size);
#endif
while (size) {
uint64_t tce;
uint32_t lsize;
uint64_t txaddr;
/* Check if we are in bound */
if (taddr >= dev->rtce_window_size) {
#ifdef DEBUG_TCE
fprintf(stderr, "spapr_tce_dma_read out of bounds\n");
#endif
return H_DEST_PARM;
tce = dev->rtce_table[taddr >> SPAPR_VIO_TCE_PAGE_SHIFT].tce;
/* How much til end of page ? */
lsize = MIN(size, ((~taddr) & SPAPR_VIO_TCE_PAGE_MASK) + 1);
/* Check TCE */
if (!(tce & 1)) {
return H_DEST_PARM;
/* Translate */
txaddr = (tce & ~SPAPR_VIO_TCE_PAGE_MASK) |
(taddr & SPAPR_VIO_TCE_PAGE_MASK);
#ifdef DEBUG_TCE
fprintf(stderr, " -> write to txaddr=0x%llx, size=0x%x\n",
(unsigned long long)txaddr, lsize);
#endif
/* Do it */
cpu_physical_memory_read(txaddr, buf, lsize);
buf += lsize;
taddr += lsize;
size -= lsize;
return H_SUCCESS; | true | qemu | 08942ac17922d923a7cc5cf9854e9cc4b150b942 | int spapr_tce_dma_read(VIOsPAPRDevice *dev, uint64_t taddr, void *buf,
uint32_t size)
{
#ifdef DEBUG_TCE
fprintf(stderr, "spapr_tce_dma_write taddr=0x%llx size=0x%x\n",
(unsigned long long)taddr, size);
#endif
while (size) {
uint64_t tce;
uint32_t lsize;
uint64_t txaddr;
if (taddr >= dev->rtce_window_size) {
#ifdef DEBUG_TCE
fprintf(stderr, "spapr_tce_dma_read out of bounds\n");
#endif
return H_DEST_PARM;
tce = dev->rtce_table[taddr >> SPAPR_VIO_TCE_PAGE_SHIFT].tce;
lsize = MIN(size, ((~taddr) & SPAPR_VIO_TCE_PAGE_MASK) + 1);
if (!(tce & 1)) {
return H_DEST_PARM;
txaddr = (tce & ~SPAPR_VIO_TCE_PAGE_MASK) |
(taddr & SPAPR_VIO_TCE_PAGE_MASK);
#ifdef DEBUG_TCE
fprintf(stderr, " -> write to txaddr=0x%llx, size=0x%x\n",
(unsigned long long)txaddr, lsize);
#endif
cpu_physical_memory_read(txaddr, buf, lsize);
buf += lsize;
taddr += lsize;
size -= lsize;
return H_SUCCESS; | {
"code": [],
"line_no": []
} | int FUNC_0(VIOsPAPRDevice *VAR_0, uint64_t VAR_1, void *VAR_2,
uint32_t VAR_3)
{
#ifdef DEBUG_TCE
fprintf(stderr, "spapr_tce_dma_write VAR_1=0x%llx VAR_3=0x%x\n",
(unsigned long long)VAR_1, VAR_3);
#endif
while (VAR_3) {
uint64_t tce;
uint32_t lsize;
uint64_t txaddr;
if (VAR_1 >= VAR_0->rtce_window_size) {
#ifdef DEBUG_TCE
fprintf(stderr, "FUNC_0 out of bounds\n");
#endif
return H_DEST_PARM;
tce = VAR_0->rtce_table[VAR_1 >> SPAPR_VIO_TCE_PAGE_SHIFT].tce;
lsize = MIN(VAR_3, ((~VAR_1) & SPAPR_VIO_TCE_PAGE_MASK) + 1);
if (!(tce & 1)) {
return H_DEST_PARM;
txaddr = (tce & ~SPAPR_VIO_TCE_PAGE_MASK) |
(VAR_1 & SPAPR_VIO_TCE_PAGE_MASK);
#ifdef DEBUG_TCE
fprintf(stderr, " -> write to txaddr=0x%llx, VAR_3=0x%x\n",
(unsigned long long)txaddr, lsize);
#endif
cpu_physical_memory_read(txaddr, VAR_2, lsize);
VAR_2 += lsize;
VAR_1 += lsize;
VAR_3 -= lsize;
return H_SUCCESS; | [
"int FUNC_0(VIOsPAPRDevice *VAR_0, uint64_t VAR_1, void *VAR_2,\nuint32_t VAR_3)\n{",
"#ifdef DEBUG_TCE\nfprintf(stderr, \"spapr_tce_dma_write VAR_1=0x%llx VAR_3=0x%x\\n\",\n(unsigned long long)VAR_1, VAR_3);",
"#endif\nwhile (VAR_3) {",
"uint64_t tce;",
"uint32_t lsize;",
"uint64_t txaddr;",
"if (VAR_1... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
2,
3
],
[
4,
5,
6
],
[
7,
8
],
[
9
],
[
10
],
[
11
],
[
13
],
[
14,
15
],
[
16,
17
],
[
18
],
[
20
],
[
22
],
[
23
],
[
25,
26
],
[
27,
28,
29
],
[
... |
13,128 | static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, int src_size)
{
const uint8_t *s = src;
const uint8_t *end;
const uint8_t *mm_end;
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
__asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm__ volatile(
"movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t"
::"m"(red_15mask),"m"(green_15mask));
mm_end = end - 15;
while (s < mm_end) {
__asm__ volatile(
PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t"
"movd 4%1, %%mm3 \n\t"
"punpckldq 8%1, %%mm0 \n\t"
"punpckldq 12%1, %%mm3 \n\t"
"movq %%mm0, %%mm1 \n\t"
"movq %%mm0, %%mm2 \n\t"
"movq %%mm3, %%mm4 \n\t"
"movq %%mm3, %%mm5 \n\t"
"psllq $7, %%mm0 \n\t"
"psllq $7, %%mm3 \n\t"
"pand %%mm7, %%mm0 \n\t"
"pand %%mm7, %%mm3 \n\t"
"psrlq $6, %%mm1 \n\t"
"psrlq $6, %%mm4 \n\t"
"pand %%mm6, %%mm1 \n\t"
"pand %%mm6, %%mm4 \n\t"
"psrlq $19, %%mm2 \n\t"
"psrlq $19, %%mm5 \n\t"
"pand %2, %%mm2 \n\t"
"pand %2, %%mm5 \n\t"
"por %%mm1, %%mm0 \n\t"
"por %%mm4, %%mm3 \n\t"
"por %%mm2, %%mm0 \n\t"
"por %%mm5, %%mm3 \n\t"
"psllq $16, %%mm3 \n\t"
"por %%mm3, %%mm0 \n\t"
MOVNTQ" %%mm0, %0 \n\t"
:"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
d += 4;
s += 16;
}
__asm__ volatile(SFENCE:::"memory");
__asm__ volatile(EMMS:::"memory");
while (s < end) {
register int rgb = *(const uint32_t*)s; s += 4;
*d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19);
}
}
| true | FFmpeg | 90540c2d5ace46a1e9789c75fde0b1f7dbb12a9b | static inline void RENAME(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, int src_size)
{
const uint8_t *s = src;
const uint8_t *end;
const uint8_t *mm_end;
uint16_t *d = (uint16_t *)dst;
end = s + src_size;
__asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm__ volatile(
"movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t"
::"m"(red_15mask),"m"(green_15mask));
mm_end = end - 15;
while (s < mm_end) {
__asm__ volatile(
PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t"
"movd 4%1, %%mm3 \n\t"
"punpckldq 8%1, %%mm0 \n\t"
"punpckldq 12%1, %%mm3 \n\t"
"movq %%mm0, %%mm1 \n\t"
"movq %%mm0, %%mm2 \n\t"
"movq %%mm3, %%mm4 \n\t"
"movq %%mm3, %%mm5 \n\t"
"psllq $7, %%mm0 \n\t"
"psllq $7, %%mm3 \n\t"
"pand %%mm7, %%mm0 \n\t"
"pand %%mm7, %%mm3 \n\t"
"psrlq $6, %%mm1 \n\t"
"psrlq $6, %%mm4 \n\t"
"pand %%mm6, %%mm1 \n\t"
"pand %%mm6, %%mm4 \n\t"
"psrlq $19, %%mm2 \n\t"
"psrlq $19, %%mm5 \n\t"
"pand %2, %%mm2 \n\t"
"pand %2, %%mm5 \n\t"
"por %%mm1, %%mm0 \n\t"
"por %%mm4, %%mm3 \n\t"
"por %%mm2, %%mm0 \n\t"
"por %%mm5, %%mm3 \n\t"
"psllq $16, %%mm3 \n\t"
"por %%mm3, %%mm0 \n\t"
MOVNTQ" %%mm0, %0 \n\t"
:"=m"(*d):"m"(*s),"m"(blue_15mask):"memory");
d += 4;
s += 16;
}
__asm__ volatile(SFENCE:::"memory");
__asm__ volatile(EMMS:::"memory");
while (s < end) {
register int rgb = *(const uint32_t*)s; s += 4;
*d++ = ((rgb&0xF8)<<7) + ((rgb&0xF800)>>6) + ((rgb&0xF80000)>>19);
}
}
| {
"code": [
" PREFETCH\" 32%1 \\n\\t\"",
" \"movd %1, %%mm0 \\n\\t\"",
" PREFETCH\" 32%1 \\n\\t\"",
" PREFETCH\" 32%1 \\n\\t\"",
" \"movd %1, %%mm0 \\n\\t\"",
" \"movd 4%1, %%mm3 \\n\\t\"",
" \"punpckldq 8%1, %%mm0 \\n\\t\"",
" \"punpckldq 12%1, %%mm3 \\n\\t\"",
" MOVNTQ\" %%mm0, %0 \\n\\t\"",
" PREFETCH\" 32%1 \\n\\t\"",
" \"movd %1, %%mm0 \\n\\t\"",
" \"movd 4%1, %%mm3 \\n\\t\"",
" \"punpckldq 8%1, %%mm0 \\n\\t\"",
" \"punpckldq 12%1, %%mm3 \\n\\t\"",
" MOVNTQ\" %%mm0, %0 \\n\\t\"",
" :\"=m\"(*d):\"m\"(*s),\"m\"(blue_15mask):\"memory\");",
" PREFETCH\" 32%1 \\n\\t\"",
" \"movd %1, %%mm0 \\n\\t\"",
" MOVNTQ\" %%mm0, %0 \\n\\t\"",
" PREFETCH\" 32%1 \\n\\t\"",
" \"movd %1, %%mm0 \\n\\t\"",
" MOVNTQ\" %%mm0, %0 \\n\\t\"",
" PREFETCH\" 32%1 \\n\\t\"",
" \"movd %1, %%mm0 \\n\\t\"",
" MOVNTQ\" %%mm0, %0 \\n\\t\"",
" :\"=m\"(*d):\"m\"(*s),\"m\"(blue_15mask):\"memory\");",
" :\"=m\"(*d):\"m\"(*s),\"m\"(blue_15mask):\"memory\");",
" PREFETCH\" 32%1 \\n\\t\"",
" PREFETCH\" 32%1 \\n\\t\"",
" PREFETCH\" 32%1 \\n\\t\"",
" PREFETCH\" 32%1 \\n\\t\""
],
"line_no": [
31,
33,
31,
31,
33,
35,
37,
39,
85,
31,
33,
35,
37,
39,
85,
87,
31,
33,
85,
31,
33,
85,
31,
33,
85,
87,
87,
31,
31,
31,
31
]
} | static inline void FUNC_0(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, int src_size)
{
const uint8_t *VAR_0 = src;
const uint8_t *VAR_1;
const uint8_t *VAR_2;
uint16_t *d = (uint16_t *)dst;
VAR_1 = VAR_0 + src_size;
__asm__ volatile(PREFETCH" %0"::"m"(*src):"memory");
__asm__ volatile(
"movq %0, %%mm7 \n\t"
"movq %1, %%mm6 \n\t"
::"m"(red_15mask),"m"(green_15mask));
VAR_2 = VAR_1 - 15;
while (VAR_0 < VAR_2) {
__asm__ volatile(
PREFETCH" 32%1 \n\t"
"movd %1, %%mm0 \n\t"
"movd 4%1, %%mm3 \n\t"
"punpckldq 8%1, %%mm0 \n\t"
"punpckldq 12%1, %%mm3 \n\t"
"movq %%mm0, %%mm1 \n\t"
"movq %%mm0, %%mm2 \n\t"
"movq %%mm3, %%mm4 \n\t"
"movq %%mm3, %%mm5 \n\t"
"psllq $7, %%mm0 \n\t"
"psllq $7, %%mm3 \n\t"
"pand %%mm7, %%mm0 \n\t"
"pand %%mm7, %%mm3 \n\t"
"psrlq $6, %%mm1 \n\t"
"psrlq $6, %%mm4 \n\t"
"pand %%mm6, %%mm1 \n\t"
"pand %%mm6, %%mm4 \n\t"
"psrlq $19, %%mm2 \n\t"
"psrlq $19, %%mm5 \n\t"
"pand %2, %%mm2 \n\t"
"pand %2, %%mm5 \n\t"
"por %%mm1, %%mm0 \n\t"
"por %%mm4, %%mm3 \n\t"
"por %%mm2, %%mm0 \n\t"
"por %%mm5, %%mm3 \n\t"
"psllq $16, %%mm3 \n\t"
"por %%mm3, %%mm0 \n\t"
MOVNTQ" %%mm0, %0 \n\t"
:"=m"(*d):"m"(*VAR_0),"m"(blue_15mask):"memory");
d += 4;
VAR_0 += 16;
}
__asm__ volatile(SFENCE:::"memory");
__asm__ volatile(EMMS:::"memory");
while (VAR_0 < VAR_1) {
register int VAR_3 = *(const uint32_t*)VAR_0; VAR_0 += 4;
*d++ = ((VAR_3&0xF8)<<7) + ((VAR_3&0xF800)>>6) + ((VAR_3&0xF80000)>>19);
}
}
| [
"static inline void FUNC_0(rgb32tobgr15)(const uint8_t *src, uint8_t *dst, int src_size)\n{",
"const uint8_t *VAR_0 = src;",
"const uint8_t *VAR_1;",
"const uint8_t *VAR_2;",
"uint16_t *d = (uint16_t *)dst;",
"VAR_1 = VAR_0 + src_size;",
"__asm__ volatile(PREFETCH\" %0\"::\"m\"(*src):\"memory\");",
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17,
19,
21,
23
],
[
25
],
[
27
],
[
29,
31,
33,
35,
37,
39,
41,
43,
45,
47,
49,
51,
53,
55,
57,
59,... |
13,129 | void usb_test_hotplug(const char *hcd_id, const int port,
void (*port_check)(void))
{
QDict *response;
char *cmd;
cmd = g_strdup_printf("{'execute': 'device_add',"
" 'arguments': {"
" 'driver': 'usb-tablet',"
" 'port': '%d',"
" 'bus': '%s.0',"
" 'id': 'usbdev%d'"
"}}", port, hcd_id, port);
response = qmp(cmd);
g_free(cmd);
g_assert(response);
g_assert(!qdict_haskey(response, "error"));
if (port_check) {
port_check();
}
cmd = g_strdup_printf("{'execute': 'device_del',"
" 'arguments': {"
" 'id': 'usbdev%d'"
"}}", port);
response = qmp(cmd);
g_free(cmd);
g_assert(response);
g_assert(qdict_haskey(response, "event"));
g_assert(!strcmp(qdict_get_str(response, "event"), "DEVICE_DELETED"));
} | true | qemu | dc491fead04a92a612df93b85b0ebf9dcc3f6684 | void usb_test_hotplug(const char *hcd_id, const int port,
void (*port_check)(void))
{
QDict *response;
char *cmd;
cmd = g_strdup_printf("{'execute': 'device_add',"
" 'arguments': {"
" 'driver': 'usb-tablet',"
" 'port': '%d',"
" 'bus': '%s.0',"
" 'id': 'usbdev%d'"
"}}", port, hcd_id, port);
response = qmp(cmd);
g_free(cmd);
g_assert(response);
g_assert(!qdict_haskey(response, "error"));
if (port_check) {
port_check();
}
cmd = g_strdup_printf("{'execute': 'device_del',"
" 'arguments': {"
" 'id': 'usbdev%d'"
"}}", port);
response = qmp(cmd);
g_free(cmd);
g_assert(response);
g_assert(qdict_haskey(response, "event"));
g_assert(!strcmp(qdict_get_str(response, "event"), "DEVICE_DELETED"));
} | {
"code": [],
"line_no": []
} | void FUNC_0(const char *VAR_0, const int VAR_1,
void (*VAR_2)(void))
{
QDict *response;
char *VAR_3;
VAR_3 = g_strdup_printf("{'execute': 'device_add',"
" 'arguments': {"
" 'driver': 'usb-tablet',"
" 'VAR_1': '%d',"
" 'bus': '%s.0',"
" 'id': 'usbdev%d'"
"}}", VAR_1, VAR_0, VAR_1);
response = qmp(VAR_3);
g_free(VAR_3);
g_assert(response);
g_assert(!qdict_haskey(response, "error"));
if (VAR_2) {
VAR_2();
}
VAR_3 = g_strdup_printf("{'execute': 'device_del',"
" 'arguments': {"
" 'id': 'usbdev%d'"
"}}", VAR_1);
response = qmp(VAR_3);
g_free(VAR_3);
g_assert(response);
g_assert(qdict_haskey(response, "event"));
g_assert(!strcmp(qdict_get_str(response, "event"), "DEVICE_DELETED"));
} | [
"void FUNC_0(const char *VAR_0, const int VAR_1,\nvoid (*VAR_2)(void))\n{",
"QDict *response;",
"char *VAR_3;",
"VAR_3 = g_strdup_printf(\"{'execute': 'device_add',\"",
"\" 'arguments': {\"",
"\" 'driver': 'usb-tablet',\"\n\" 'VAR_1': '%d',\"\n\" 'bus': '%s.0',\"\n\" 'id': 'usbdev%d'\"\n\"}}\", V... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17,
19,
21,
23,
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
38
],
[
40
],
[
42
],
[
46
],
[
48
],
[
50,
52
],
[
54
... |
13,131 | static inline int target_to_host_errno(int err)
{
if (target_to_host_errno_table[err])
return target_to_host_errno_table[err];
return err;
}
| true | qemu | 2466119c9551d606a0f92f9832e0c865bc04b488 | static inline int target_to_host_errno(int err)
{
if (target_to_host_errno_table[err])
return target_to_host_errno_table[err];
return err;
}
| {
"code": [
" if (target_to_host_errno_table[err])"
],
"line_no": [
5
]
} | static inline int FUNC_0(int VAR_0)
{
if (target_to_host_errno_table[VAR_0])
return target_to_host_errno_table[VAR_0];
return VAR_0;
}
| [
"static inline int FUNC_0(int VAR_0)\n{",
"if (target_to_host_errno_table[VAR_0])\nreturn target_to_host_errno_table[VAR_0];",
"return VAR_0;",
"}"
] | [
0,
1,
0,
0
] | [
[
1,
3
],
[
5,
7
],
[
9
],
[
11
]
] |
13,132 | int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
uint8_t ar)
{
CPUS390XState *env = &cpu->env;
S390PCIBusDevice *pbdev;
MemoryRegion *mr;
int i;
uint32_t fh;
uint8_t pcias;
uint8_t len;
uint8_t buffer[128];
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 6);
return 0;
}
fh = env->regs[r1] >> 32;
pcias = (env->regs[r1] >> 16) & 0xf;
len = env->regs[r1] & 0xff;
if (pcias > 5) {
DPRINTF("pcistb invalid space\n");
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
return 0;
}
switch (len) {
case 16:
case 32:
case 64:
case 128:
break;
default:
program_interrupt(env, PGM_SPECIFICATION, 6);
return 0;
}
pbdev = s390_pci_find_dev_by_fh(fh);
if (!pbdev) {
DPRINTF("pcistb no pci dev fh 0x%x\n", fh);
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
}
switch (pbdev->state) {
case ZPCI_FS_RESERVED:
case ZPCI_FS_STANDBY:
case ZPCI_FS_DISABLED:
case ZPCI_FS_PERMANENT_ERROR:
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
case ZPCI_FS_ERROR:
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
return 0;
default:
break;
}
mr = pbdev->pdev->io_regions[pcias].memory;
if (!memory_region_access_valid(mr, env->regs[r3], len, true)) {
program_interrupt(env, PGM_ADDRESSING, 6);
return 0;
}
if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) {
return 0;
}
for (i = 0; i < len / 8; i++) {
memory_region_dispatch_write(mr, env->regs[r3] + i * 8,
ldq_p(buffer + i * 8), 8,
MEMTXATTRS_UNSPECIFIED);
}
setcc(cpu, ZPCI_PCI_LS_OK);
return 0;
}
| true | qemu | 88ee13c7b656e5504613b527f3a51591e9afae69 | int pcistb_service_call(S390CPU *cpu, uint8_t r1, uint8_t r3, uint64_t gaddr,
uint8_t ar)
{
CPUS390XState *env = &cpu->env;
S390PCIBusDevice *pbdev;
MemoryRegion *mr;
int i;
uint32_t fh;
uint8_t pcias;
uint8_t len;
uint8_t buffer[128];
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 6);
return 0;
}
fh = env->regs[r1] >> 32;
pcias = (env->regs[r1] >> 16) & 0xf;
len = env->regs[r1] & 0xff;
if (pcias > 5) {
DPRINTF("pcistb invalid space\n");
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r1, ZPCI_PCI_ST_INVAL_AS);
return 0;
}
switch (len) {
case 16:
case 32:
case 64:
case 128:
break;
default:
program_interrupt(env, PGM_SPECIFICATION, 6);
return 0;
}
pbdev = s390_pci_find_dev_by_fh(fh);
if (!pbdev) {
DPRINTF("pcistb no pci dev fh 0x%x\n", fh);
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
}
switch (pbdev->state) {
case ZPCI_FS_RESERVED:
case ZPCI_FS_STANDBY:
case ZPCI_FS_DISABLED:
case ZPCI_FS_PERMANENT_ERROR:
setcc(cpu, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
case ZPCI_FS_ERROR:
setcc(cpu, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, r1, ZPCI_PCI_ST_BLOCKED);
return 0;
default:
break;
}
mr = pbdev->pdev->io_regions[pcias].memory;
if (!memory_region_access_valid(mr, env->regs[r3], len, true)) {
program_interrupt(env, PGM_ADDRESSING, 6);
return 0;
}
if (s390_cpu_virt_mem_read(cpu, gaddr, ar, buffer, len)) {
return 0;
}
for (i = 0; i < len / 8; i++) {
memory_region_dispatch_write(mr, env->regs[r3] + i * 8,
ldq_p(buffer + i * 8), 8,
MEMTXATTRS_UNSPECIFIED);
}
setcc(cpu, ZPCI_PCI_LS_OK);
return 0;
}
| {
"code": [
" program_interrupt(env, PGM_ADDRESSING, 6);",
" memory_region_dispatch_write(mr, env->regs[r3] + i * 8,"
],
"line_no": [
127,
145
]
} | int FUNC_0(S390CPU *VAR_0, uint8_t VAR_1, uint8_t VAR_2, uint64_t VAR_3,
uint8_t VAR_4)
{
CPUS390XState *env = &VAR_0->env;
S390PCIBusDevice *pbdev;
MemoryRegion *mr;
int VAR_5;
uint32_t fh;
uint8_t pcias;
uint8_t len;
uint8_t buffer[128];
if (env->psw.mask & PSW_MASK_PSTATE) {
program_interrupt(env, PGM_PRIVILEGED, 6);
return 0;
}
fh = env->regs[VAR_1] >> 32;
pcias = (env->regs[VAR_1] >> 16) & 0xf;
len = env->regs[VAR_1] & 0xff;
if (pcias > 5) {
DPRINTF("pcistb invalid space\n");
setcc(VAR_0, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, VAR_1, ZPCI_PCI_ST_INVAL_AS);
return 0;
}
switch (len) {
case 16:
case 32:
case 64:
case 128:
break;
default:
program_interrupt(env, PGM_SPECIFICATION, 6);
return 0;
}
pbdev = s390_pci_find_dev_by_fh(fh);
if (!pbdev) {
DPRINTF("pcistb no pci dev fh 0x%x\n", fh);
setcc(VAR_0, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
}
switch (pbdev->state) {
case ZPCI_FS_RESERVED:
case ZPCI_FS_STANDBY:
case ZPCI_FS_DISABLED:
case ZPCI_FS_PERMANENT_ERROR:
setcc(VAR_0, ZPCI_PCI_LS_INVAL_HANDLE);
return 0;
case ZPCI_FS_ERROR:
setcc(VAR_0, ZPCI_PCI_LS_ERR);
s390_set_status_code(env, VAR_1, ZPCI_PCI_ST_BLOCKED);
return 0;
default:
break;
}
mr = pbdev->pdev->io_regions[pcias].memory;
if (!memory_region_access_valid(mr, env->regs[VAR_2], len, true)) {
program_interrupt(env, PGM_ADDRESSING, 6);
return 0;
}
if (s390_cpu_virt_mem_read(VAR_0, VAR_3, VAR_4, buffer, len)) {
return 0;
}
for (VAR_5 = 0; VAR_5 < len / 8; VAR_5++) {
memory_region_dispatch_write(mr, env->regs[VAR_2] + VAR_5 * 8,
ldq_p(buffer + VAR_5 * 8), 8,
MEMTXATTRS_UNSPECIFIED);
}
setcc(VAR_0, ZPCI_PCI_LS_OK);
return 0;
}
| [
"int FUNC_0(S390CPU *VAR_0, uint8_t VAR_1, uint8_t VAR_2, uint64_t VAR_3,\nuint8_t VAR_4)\n{",
"CPUS390XState *env = &VAR_0->env;",
"S390PCIBusDevice *pbdev;",
"MemoryRegion *mr;",
"int VAR_5;",
"uint32_t fh;",
"uint8_t pcias;",
"uint8_t len;",
"uint8_t buffer[128];",
"if (env->psw.mask & PSW_MASK... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
25
],
[
27
],
[
29
],
[
31
],
[
35
],
[
37
],
[
39
],
[
43
],
[
45
],
[
47
],
[... |
13,133 | static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
{
unsigned int opc, rs1, rs2, rd;
TCGv cpu_src1, cpu_src2, cpu_tmp1, cpu_tmp2;
TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
target_long simm;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(dc->pc);
}
opc = GET_FIELD(insn, 0, 1);
rd = GET_FIELD(insn, 2, 6);
cpu_tmp1 = cpu_src1 = tcg_temp_new();
cpu_tmp2 = cpu_src2 = tcg_temp_new();
switch (opc) {
case 0: /* branches/sethi */
{
unsigned int xop = GET_FIELD(insn, 7, 9);
int32_t target;
switch (xop) {
#ifdef TARGET_SPARC64
case 0x1: /* V9 BPcc */
{
int cc;
target = GET_FIELD_SP(insn, 0, 18);
target = sign_extend(target, 19);
target <<= 2;
cc = GET_FIELD_SP(insn, 20, 21);
if (cc == 0)
do_branch(dc, target, insn, 0);
else if (cc == 2)
do_branch(dc, target, insn, 1);
else
goto illegal_insn;
goto jmp_insn;
}
case 0x3: /* V9 BPr */
{
target = GET_FIELD_SP(insn, 0, 13) |
(GET_FIELD_SP(insn, 20, 21) << 14);
target = sign_extend(target, 16);
target <<= 2;
cpu_src1 = get_src1(dc, insn);
do_branch_reg(dc, target, insn, cpu_src1);
goto jmp_insn;
}
case 0x5: /* V9 FBPcc */
{
int cc = GET_FIELD_SP(insn, 20, 21);
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
target = GET_FIELD_SP(insn, 0, 18);
target = sign_extend(target, 19);
target <<= 2;
do_fbranch(dc, target, insn, cc);
goto jmp_insn;
}
#else
case 0x7: /* CBN+x */
{
goto ncp_insn;
}
#endif
case 0x2: /* BN+x */
{
target = GET_FIELD(insn, 10, 31);
target = sign_extend(target, 22);
target <<= 2;
do_branch(dc, target, insn, 0);
goto jmp_insn;
}
case 0x6: /* FBN+x */
{
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
target = GET_FIELD(insn, 10, 31);
target = sign_extend(target, 22);
target <<= 2;
do_fbranch(dc, target, insn, 0);
goto jmp_insn;
}
case 0x4: /* SETHI */
/* Special-case %g0 because that's the canonical nop. */
if (rd) {
uint32_t value = GET_FIELD(insn, 10, 31);
TCGv t = gen_dest_gpr(dc, rd);
tcg_gen_movi_tl(t, value << 10);
gen_store_gpr(dc, rd, t);
}
break;
case 0x0: /* UNIMPL */
default:
goto illegal_insn;
}
break;
}
break;
case 1: /*CALL*/
{
target_long target = GET_FIELDs(insn, 2, 31) << 2;
TCGv o7 = gen_dest_gpr(dc, 15);
tcg_gen_movi_tl(o7, dc->pc);
gen_store_gpr(dc, 15, o7);
target += dc->pc;
gen_mov_pc_npc(dc);
#ifdef TARGET_SPARC64
if (unlikely(AM_CHECK(dc))) {
target &= 0xffffffffULL;
}
#endif
dc->npc = target;
}
goto jmp_insn;
case 2: /* FPU & Logical Operations */
{
unsigned int xop = GET_FIELD(insn, 7, 12);
if (xop == 0x3a) { /* generate trap */
int cond = GET_FIELD(insn, 3, 6);
TCGv_i32 trap;
int l1 = -1, mask;
if (cond == 0) {
/* Trap never. */
break;
}
save_state(dc);
if (cond != 8) {
/* Conditional trap. */
DisasCompare cmp;
#ifdef TARGET_SPARC64
/* V9 icc/xcc */
int cc = GET_FIELD_SP(insn, 11, 12);
if (cc == 0) {
gen_compare(&cmp, 0, cond, dc);
} else if (cc == 2) {
gen_compare(&cmp, 1, cond, dc);
} else {
goto illegal_insn;
}
#else
gen_compare(&cmp, 0, cond, dc);
#endif
l1 = gen_new_label();
tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
cmp.c1, cmp.c2, l1);
free_compare(&cmp);
}
mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
? UA2005_HTRAP_MASK : V8_TRAP_MASK);
/* Don't use the normal temporaries, as they may well have
gone out of scope with the branch above. While we're
doing that we might as well pre-truncate to 32-bit. */
trap = tcg_temp_new_i32();
rs1 = GET_FIELD_SP(insn, 14, 18);
if (IS_IMM) {
rs2 = GET_FIELD_SP(insn, 0, 6);
if (rs1 == 0) {
tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
/* Signal that the trap value is fully constant. */
mask = 0;
} else {
TCGv t1 = gen_load_gpr(dc, rs1);
tcg_gen_trunc_tl_i32(trap, t1);
tcg_gen_addi_i32(trap, trap, rs2);
}
} else {
TCGv t1, t2;
rs2 = GET_FIELD_SP(insn, 0, 4);
t1 = gen_load_gpr(dc, rs1);
t2 = gen_load_gpr(dc, rs2);
tcg_gen_add_tl(t1, t1, t2);
tcg_gen_trunc_tl_i32(trap, t1);
}
if (mask != 0) {
tcg_gen_andi_i32(trap, trap, mask);
tcg_gen_addi_i32(trap, trap, TT_TRAP);
}
gen_helper_raise_exception(cpu_env, trap);
tcg_temp_free_i32(trap);
if (cond == 8) {
/* An unconditional trap ends the TB. */
dc->is_br = 1;
goto jmp_insn;
} else {
/* A conditional trap falls through to the next insn. */
gen_set_label(l1);
break;
}
} else if (xop == 0x28) {
rs1 = GET_FIELD(insn, 13, 17);
switch(rs1) {
case 0: /* rdy */
#ifndef TARGET_SPARC64
case 0x01 ... 0x0e: /* undefined in the SPARCv8
manual, rdy on the microSPARC
II */
case 0x0f: /* stbar in the SPARCv8 manual,
rdy on the microSPARC II */
case 0x10 ... 0x1f: /* implementation-dependent in the
SPARCv8 manual, rdy on the
microSPARC II */
/* Read Asr17 */
if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
TCGv t = gen_dest_gpr(dc, rd);
/* Read Asr17 for a Leon3 monoprocessor */
tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
gen_store_gpr(dc, rd, t);
break;
}
#endif
gen_store_gpr(dc, rd, cpu_y);
break;
#ifdef TARGET_SPARC64
case 0x2: /* V9 rdccr */
update_psr(dc);
gen_helper_rdccr(cpu_dst, cpu_env);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x3: /* V9 rdasi */
tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x4: /* V9 rdtick */
{
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
gen_helper_tick_get_count(cpu_dst, r_tickptr);
tcg_temp_free_ptr(r_tickptr);
gen_store_gpr(dc, rd, cpu_dst);
}
break;
case 0x5: /* V9 rdpc */
{
TCGv t = gen_dest_gpr(dc, rd);
if (unlikely(AM_CHECK(dc))) {
tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
} else {
tcg_gen_movi_tl(t, dc->pc);
}
gen_store_gpr(dc, rd, t);
}
break;
case 0x6: /* V9 rdfprs */
tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0xf: /* V9 membar */
break; /* no effect */
case 0x13: /* Graphics Status */
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
gen_store_gpr(dc, rd, cpu_gsr);
break;
case 0x16: /* Softint */
tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x17: /* Tick compare */
gen_store_gpr(dc, rd, cpu_tick_cmpr);
break;
case 0x18: /* System tick */
{
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, stick));
gen_helper_tick_get_count(cpu_dst, r_tickptr);
tcg_temp_free_ptr(r_tickptr);
gen_store_gpr(dc, rd, cpu_dst);
}
break;
case 0x19: /* System tick compare */
gen_store_gpr(dc, rd, cpu_stick_cmpr);
break;
case 0x10: /* Performance Control */
case 0x11: /* Performance Instrumentation Counter */
case 0x12: /* Dispatch Control */
case 0x14: /* Softint set, WO */
case 0x15: /* Softint clear, WO */
#endif
default:
goto illegal_insn;
}
#if !defined(CONFIG_USER_ONLY)
} else if (xop == 0x29) { /* rdpsr / UA2005 rdhpr */
#ifndef TARGET_SPARC64
if (!supervisor(dc)) {
goto priv_insn;
}
update_psr(dc);
gen_helper_rdpsr(cpu_dst, cpu_env);
#else
CHECK_IU_FEATURE(dc, HYPV);
if (!hypervisor(dc))
goto priv_insn;
rs1 = GET_FIELD(insn, 13, 17);
switch (rs1) {
case 0: // hpstate
// gen_op_rdhpstate();
break;
case 1: // htstate
// gen_op_rdhtstate();
break;
case 3: // hintp
tcg_gen_mov_tl(cpu_dst, cpu_hintp);
break;
case 5: // htba
tcg_gen_mov_tl(cpu_dst, cpu_htba);
break;
case 6: // hver
tcg_gen_mov_tl(cpu_dst, cpu_hver);
break;
case 31: // hstick_cmpr
tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
break;
default:
goto illegal_insn;
}
#endif
gen_store_gpr(dc, rd, cpu_dst);
break;
} else if (xop == 0x2a) { /* rdwim / V9 rdpr */
if (!supervisor(dc))
goto priv_insn;
#ifdef TARGET_SPARC64
rs1 = GET_FIELD(insn, 13, 17);
switch (rs1) {
case 0: // tpc
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tpc));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 1: // tnpc
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tnpc));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 2: // tstate
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tstate));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 3: // tt
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_ld_i32(cpu_tmp32, r_tsptr,
offsetof(trap_state, tt));
tcg_temp_free_ptr(r_tsptr);
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
}
break;
case 4: // tick
{
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
gen_helper_tick_get_count(cpu_tmp0, r_tickptr);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 5: // tba
tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
break;
case 6: // pstate
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, pstate));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 7: // tl
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, tl));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 8: // pil
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, psrpil));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 9: // cwp
gen_helper_rdcwp(cpu_tmp0, cpu_env);
break;
case 10: // cansave
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, cansave));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 11: // canrestore
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, canrestore));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 12: // cleanwin
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, cleanwin));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 13: // otherwin
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, otherwin));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 14: // wstate
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, wstate));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 16: // UA2005 gl
CHECK_IU_FEATURE(dc, GL);
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, gl));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 26: // UA2005 strand status
CHECK_IU_FEATURE(dc, HYPV);
if (!hypervisor(dc))
goto priv_insn;
tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
break;
case 31: // ver
tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
break;
case 15: // fq
default:
goto illegal_insn;
}
#else
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
#endif
gen_store_gpr(dc, rd, cpu_tmp0);
break;
} else if (xop == 0x2b) { /* rdtbr / V9 flushw */
#ifdef TARGET_SPARC64
save_state(dc);
gen_helper_flushw(cpu_env);
#else
if (!supervisor(dc))
goto priv_insn;
gen_store_gpr(dc, rd, cpu_tbr);
#endif
break;
#endif
} else if (xop == 0x34) { /* FPU Operations */
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
gen_op_clear_ieee_excp_and_FTT();
rs1 = GET_FIELD(insn, 13, 17);
rs2 = GET_FIELD(insn, 27, 31);
xop = GET_FIELD(insn, 18, 26);
save_state(dc);
switch (xop) {
case 0x1: /* fmovs */
cpu_src1_32 = gen_load_fpr_F(dc, rs2);
gen_store_fpr_F(dc, rd, cpu_src1_32);
break;
case 0x5: /* fnegs */
gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
break;
case 0x9: /* fabss */
gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
break;
case 0x29: /* fsqrts */
CHECK_FPU_FEATURE(dc, FSQRT);
gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
break;
case 0x2a: /* fsqrtd */
CHECK_FPU_FEATURE(dc, FSQRT);
gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
break;
case 0x2b: /* fsqrtq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
break;
case 0x41: /* fadds */
gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
break;
case 0x42: /* faddd */
gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
break;
case 0x43: /* faddq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
break;
case 0x45: /* fsubs */
gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
break;
case 0x46: /* fsubd */
gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
break;
case 0x47: /* fsubq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
break;
case 0x49: /* fmuls */
CHECK_FPU_FEATURE(dc, FMUL);
gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
break;
case 0x4a: /* fmuld */
CHECK_FPU_FEATURE(dc, FMUL);
gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
break;
case 0x4b: /* fmulq */
CHECK_FPU_FEATURE(dc, FLOAT128);
CHECK_FPU_FEATURE(dc, FMUL);
gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
break;
case 0x4d: /* fdivs */
gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
break;
case 0x4e: /* fdivd */
gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
break;
case 0x4f: /* fdivq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
break;
case 0x69: /* fsmuld */
CHECK_FPU_FEATURE(dc, FSMULD);
gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
break;
case 0x6e: /* fdmulq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
break;
case 0xc4: /* fitos */
gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
break;
case 0xc6: /* fdtos */
gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
break;
case 0xc7: /* fqtos */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
break;
case 0xc8: /* fitod */
gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
break;
case 0xc9: /* fstod */
gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
break;
case 0xcb: /* fqtod */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
break;
case 0xcc: /* fitoq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
break;
case 0xcd: /* fstoq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
break;
case 0xce: /* fdtoq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
break;
case 0xd1: /* fstoi */
gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
break;
case 0xd2: /* fdtoi */
gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
break;
case 0xd3: /* fqtoi */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
break;
#ifdef TARGET_SPARC64
case 0x2: /* V9 fmovd */
cpu_src1_64 = gen_load_fpr_D(dc, rs2);
gen_store_fpr_D(dc, rd, cpu_src1_64);
break;
case 0x3: /* V9 fmovq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_move_Q(rd, rs2);
break;
case 0x6: /* V9 fnegd */
gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
break;
case 0x7: /* V9 fnegq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
break;
case 0xa: /* V9 fabsd */
gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
break;
case 0xb: /* V9 fabsq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
break;
case 0x81: /* V9 fstox */
gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
break;
case 0x82: /* V9 fdtox */
gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
break;
case 0x83: /* V9 fqtox */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
break;
case 0x84: /* V9 fxtos */
gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
break;
case 0x88: /* V9 fxtod */
gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
break;
case 0x8c: /* V9 fxtoq */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
break;
#endif
default:
goto illegal_insn;
}
} else if (xop == 0x35) { /* FPU Operations */
#ifdef TARGET_SPARC64
int cond;
#endif
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
gen_op_clear_ieee_excp_and_FTT();
rs1 = GET_FIELD(insn, 13, 17);
rs2 = GET_FIELD(insn, 27, 31);
xop = GET_FIELD(insn, 18, 26);
save_state(dc);
#ifdef TARGET_SPARC64
#define FMOVR(sz) \
do { \
DisasCompare cmp; \
cond = GET_FIELD_SP(insn, 14, 17); \
cpu_src1 = get_src1(dc, insn); \
gen_compare_reg(&cmp, cond, cpu_src1); \
gen_fmov##sz(dc, &cmp, rd, rs2); \
free_compare(&cmp); \
} while (0)
if ((xop & 0x11f) == 0x005) { /* V9 fmovsr */
FMOVR(s);
break;
} else if ((xop & 0x11f) == 0x006) { // V9 fmovdr
FMOVR(d);
break;
} else if ((xop & 0x11f) == 0x007) { // V9 fmovqr
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVR(q);
break;
}
#undef FMOVR
#endif
switch (xop) {
#ifdef TARGET_SPARC64
#define FMOVCC(fcc, sz) \
do { \
DisasCompare cmp; \
cond = GET_FIELD_SP(insn, 14, 17); \
gen_fcompare(&cmp, fcc, cond); \
gen_fmov##sz(dc, &cmp, rd, rs2); \
free_compare(&cmp); \
} while (0)
case 0x001: /* V9 fmovscc %fcc0 */
FMOVCC(0, s);
break;
case 0x002: /* V9 fmovdcc %fcc0 */
FMOVCC(0, d);
break;
case 0x003: /* V9 fmovqcc %fcc0 */
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(0, q);
break;
case 0x041: /* V9 fmovscc %fcc1 */
FMOVCC(1, s);
break;
case 0x042: /* V9 fmovdcc %fcc1 */
FMOVCC(1, d);
break;
case 0x043: /* V9 fmovqcc %fcc1 */
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(1, q);
break;
case 0x081: /* V9 fmovscc %fcc2 */
FMOVCC(2, s);
break;
case 0x082: /* V9 fmovdcc %fcc2 */
FMOVCC(2, d);
break;
case 0x083: /* V9 fmovqcc %fcc2 */
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(2, q);
break;
case 0x0c1: /* V9 fmovscc %fcc3 */
FMOVCC(3, s);
break;
case 0x0c2: /* V9 fmovdcc %fcc3 */
FMOVCC(3, d);
break;
case 0x0c3: /* V9 fmovqcc %fcc3 */
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(3, q);
break;
#undef FMOVCC
#define FMOVCC(xcc, sz) \
do { \
DisasCompare cmp; \
cond = GET_FIELD_SP(insn, 14, 17); \
gen_compare(&cmp, xcc, cond, dc); \
gen_fmov##sz(dc, &cmp, rd, rs2); \
free_compare(&cmp); \
} while (0)
case 0x101: /* V9 fmovscc %icc */
FMOVCC(0, s);
break;
case 0x102: /* V9 fmovdcc %icc */
FMOVCC(0, d);
break;
case 0x103: /* V9 fmovqcc %icc */
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(0, q);
break;
case 0x181: /* V9 fmovscc %xcc */
FMOVCC(1, s);
break;
case 0x182: /* V9 fmovdcc %xcc */
FMOVCC(1, d);
break;
case 0x183: /* V9 fmovqcc %xcc */
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(1, q);
break;
#undef FMOVCC
#endif
case 0x51: /* fcmps, V9 %fcc */
cpu_src1_32 = gen_load_fpr_F(dc, rs1);
cpu_src2_32 = gen_load_fpr_F(dc, rs2);
gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
break;
case 0x52: /* fcmpd, V9 %fcc */
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
break;
case 0x53: /* fcmpq, V9 %fcc */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_op_load_fpr_QT0(QFPREG(rs1));
gen_op_load_fpr_QT1(QFPREG(rs2));
gen_op_fcmpq(rd & 3);
break;
case 0x55: /* fcmpes, V9 %fcc */
cpu_src1_32 = gen_load_fpr_F(dc, rs1);
cpu_src2_32 = gen_load_fpr_F(dc, rs2);
gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
break;
case 0x56: /* fcmped, V9 %fcc */
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
break;
case 0x57: /* fcmpeq, V9 %fcc */
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_op_load_fpr_QT0(QFPREG(rs1));
gen_op_load_fpr_QT1(QFPREG(rs2));
gen_op_fcmpeq(rd & 3);
break;
default:
goto illegal_insn;
}
} else if (xop == 0x2) {
TCGv dst = gen_dest_gpr(dc, rd);
rs1 = GET_FIELD(insn, 13, 17);
if (rs1 == 0) {
/* clr/mov shortcut : or %g0, x, y -> mov x, y */
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 19, 31);
tcg_gen_movi_tl(dst, simm);
gen_store_gpr(dc, rd, dst);
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
if (rs2 == 0) {
tcg_gen_movi_tl(dst, 0);
gen_store_gpr(dc, rd, dst);
} else {
cpu_src2 = gen_load_gpr(dc, rs2);
gen_store_gpr(dc, rd, cpu_src2);
}
}
} else {
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 19, 31);
tcg_gen_ori_tl(dst, cpu_src1, simm);
gen_store_gpr(dc, rd, dst);
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
if (rs2 == 0) {
/* mov shortcut: or x, %g0, y -> mov x, y */
gen_store_gpr(dc, rd, cpu_src1);
} else {
cpu_src2 = gen_load_gpr(dc, rs2);
tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, dst);
}
}
}
#ifdef TARGET_SPARC64
} else if (xop == 0x25) { /* sll, V9 sllx */
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 20, 31);
if (insn & (1 << 12)) {
tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
} else {
tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
}
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
cpu_src2 = gen_load_gpr(dc, rs2);
if (insn & (1 << 12)) {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
} else {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
}
tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_store_gpr(dc, rd, cpu_dst);
} else if (xop == 0x26) { /* srl, V9 srlx */
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 20, 31);
if (insn & (1 << 12)) {
tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
} else {
tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
}
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
cpu_src2 = gen_load_gpr(dc, rs2);
if (insn & (1 << 12)) {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
} else {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
}
}
gen_store_gpr(dc, rd, cpu_dst);
} else if (xop == 0x27) { /* sra, V9 srax */
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 20, 31);
if (insn & (1 << 12)) {
tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
} else {
tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
}
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
cpu_src2 = gen_load_gpr(dc, rs2);
if (insn & (1 << 12)) {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
} else {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
}
}
gen_store_gpr(dc, rd, cpu_dst);
#endif
} else if (xop < 0x36) {
if (xop < 0x20) {
cpu_src1 = get_src1(dc, insn);
cpu_src2 = get_src2(dc, insn);
switch (xop & ~0x10) {
case 0x0: /* add */
if (xop & 0x10) {
gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
dc->cc_op = CC_OP_ADD;
} else {
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
}
break;
case 0x1: /* and */
tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x2: /* or */
tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x3: /* xor */
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x4: /* sub */
if (xop & 0x10) {
gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
dc->cc_op = CC_OP_SUB;
} else {
tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
}
break;
case 0x5: /* andn */
tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x6: /* orn */
tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x7: /* xorn */
tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x8: /* addx, V9 addc */
gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
(xop & 0x10));
break;
#ifdef TARGET_SPARC64
case 0x9: /* V9 mulx */
tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
break;
#endif
case 0xa: /* umul */
CHECK_IU_FEATURE(dc, MUL);
gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0xb: /* smul */
CHECK_IU_FEATURE(dc, MUL);
gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0xc: /* subx, V9 subc */
gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
(xop & 0x10));
break;
#ifdef TARGET_SPARC64
case 0xd: /* V9 udivx */
gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
break;
#endif
case 0xe: /* udiv */
CHECK_IU_FEATURE(dc, DIV);
if (xop & 0x10) {
gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
cpu_src2);
dc->cc_op = CC_OP_DIV;
} else {
gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
cpu_src2);
}
break;
case 0xf: /* sdiv */
CHECK_IU_FEATURE(dc, DIV);
if (xop & 0x10) {
gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
cpu_src2);
dc->cc_op = CC_OP_DIV;
} else {
gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
cpu_src2);
}
break;
default:
goto illegal_insn;
}
gen_store_gpr(dc, rd, cpu_dst);
} else {
cpu_src1 = get_src1(dc, insn);
cpu_src2 = get_src2(dc, insn);
switch (xop) {
case 0x20: /* taddcc */
gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
dc->cc_op = CC_OP_TADD;
break;
case 0x21: /* tsubcc */
gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
dc->cc_op = CC_OP_TSUB;
break;
case 0x22: /* taddcctv */
gen_helper_taddcctv(cpu_dst, cpu_env,
cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
dc->cc_op = CC_OP_TADDTV;
break;
case 0x23: /* tsubcctv */
gen_helper_tsubcctv(cpu_dst, cpu_env,
cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
dc->cc_op = CC_OP_TSUBTV;
break;
case 0x24: /* mulscc */
update_psr(dc);
gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
dc->cc_op = CC_OP_ADD;
break;
#ifndef TARGET_SPARC64
case 0x25: /* sll */
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 20, 31);
tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
} else { /* register */
tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x26: /* srl */
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 20, 31);
tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
} else { /* register */
tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x27: /* sra */
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 20, 31);
tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
} else { /* register */
tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_store_gpr(dc, rd, cpu_dst);
break;
#endif
case 0x30:
{
switch(rd) {
case 0: /* wry */
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
break;
#ifndef TARGET_SPARC64
case 0x01 ... 0x0f: /* undefined in the
SPARCv8 manual, nop
on the microSPARC
II */
case 0x10 ... 0x1f: /* implementation-dependent
in the SPARCv8
manual, nop on the
microSPARC II */
break;
#else
case 0x2: /* V9 wrccr */
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
gen_helper_wrccr(cpu_env, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
dc->cc_op = CC_OP_FLAGS;
break;
case 0x3: /* V9 wrasi */
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_andi_tl(cpu_dst, cpu_dst, 0xff);
tcg_gen_trunc_tl_i32(cpu_asi, cpu_dst);
break;
case 0x6: /* V9 wrfprs */
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_trunc_tl_i32(cpu_fprs, cpu_dst);
save_state(dc);
gen_op_next_insn();
tcg_gen_exit_tb(0);
dc->is_br = 1;
break;
case 0xf: /* V9 sir, nop if user */
#if !defined(CONFIG_USER_ONLY)
if (supervisor(dc)) {
; // XXX
}
#endif
break;
case 0x13: /* Graphics Status */
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
break;
case 0x14: /* Softint set */
if (!supervisor(dc))
goto illegal_insn;
tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
gen_helper_set_softint(cpu_env, cpu_tmp64);
break;
case 0x15: /* Softint clear */
if (!supervisor(dc))
goto illegal_insn;
tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
gen_helper_clear_softint(cpu_env, cpu_tmp64);
break;
case 0x16: /* Softint write */
if (!supervisor(dc))
goto illegal_insn;
tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
gen_helper_write_softint(cpu_env, cpu_tmp64);
break;
case 0x17: /* Tick compare */
#if !defined(CONFIG_USER_ONLY)
if (!supervisor(dc))
goto illegal_insn;
#endif
{
TCGv_ptr r_tickptr;
tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
cpu_src2);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
gen_helper_tick_set_limit(r_tickptr,
cpu_tick_cmpr);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 0x18: /* System tick */
#if !defined(CONFIG_USER_ONLY)
if (!supervisor(dc))
goto illegal_insn;
#endif
{
TCGv_ptr r_tickptr;
tcg_gen_xor_tl(cpu_dst, cpu_src1,
cpu_src2);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, stick));
gen_helper_tick_set_count(r_tickptr,
cpu_dst);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 0x19: /* System tick compare */
#if !defined(CONFIG_USER_ONLY)
if (!supervisor(dc))
goto illegal_insn;
#endif
{
TCGv_ptr r_tickptr;
tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
cpu_src2);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, stick));
gen_helper_tick_set_limit(r_tickptr,
cpu_stick_cmpr);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 0x10: /* Performance Control */
case 0x11: /* Performance Instrumentation
Counter */
case 0x12: /* Dispatch Control */
#endif
default:
goto illegal_insn;
}
}
break;
#if !defined(CONFIG_USER_ONLY)
case 0x31: /* wrpsr, V9 saved, restored */
{
if (!supervisor(dc))
goto priv_insn;
#ifdef TARGET_SPARC64
switch (rd) {
case 0:
gen_helper_saved(cpu_env);
break;
case 1:
gen_helper_restored(cpu_env);
break;
case 2: /* UA2005 allclean */
case 3: /* UA2005 otherw */
case 4: /* UA2005 normalw */
case 5: /* UA2005 invalw */
// XXX
default:
goto illegal_insn;
}
#else
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
gen_helper_wrpsr(cpu_env, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
dc->cc_op = CC_OP_FLAGS;
save_state(dc);
gen_op_next_insn();
tcg_gen_exit_tb(0);
dc->is_br = 1;
#endif
}
break;
case 0x32: /* wrwim, V9 wrpr */
{
if (!supervisor(dc))
goto priv_insn;
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
#ifdef TARGET_SPARC64
switch (rd) {
case 0: // tpc
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tpc));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 1: // tnpc
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tnpc));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 2: // tstate
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state,
tstate));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 3: // tt
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, r_tsptr,
offsetof(trap_state, tt));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 4: // tick
{
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
gen_helper_tick_set_count(r_tickptr,
cpu_tmp0);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 5: // tba
tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
break;
case 6: // pstate
save_state(dc);
gen_helper_wrpstate(cpu_env, cpu_tmp0);
dc->npc = DYNAMIC_PC;
break;
case 7: // tl
save_state(dc);
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, tl));
dc->npc = DYNAMIC_PC;
break;
case 8: // pil
gen_helper_wrpil(cpu_env, cpu_tmp0);
break;
case 9: // cwp
gen_helper_wrcwp(cpu_env, cpu_tmp0);
break;
case 10: // cansave
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
cansave));
break;
case 11: // canrestore
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
canrestore));
break;
case 12: // cleanwin
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
cleanwin));
break;
case 13: // otherwin
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
otherwin));
break;
case 14: // wstate
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
wstate));
break;
case 16: // UA2005 gl
CHECK_IU_FEATURE(dc, GL);
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, gl));
break;
case 26: // UA2005 strand status
CHECK_IU_FEATURE(dc, HYPV);
if (!hypervisor(dc))
goto priv_insn;
tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
break;
default:
goto illegal_insn;
}
#else
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
if (dc->def->nwindows != 32)
tcg_gen_andi_tl(cpu_tmp32, cpu_tmp32,
(1 << dc->def->nwindows) - 1);
tcg_gen_mov_i32(cpu_wim, cpu_tmp32);
#endif
}
break;
case 0x33: /* wrtbr, UA2005 wrhpr */
{
#ifndef TARGET_SPARC64
if (!supervisor(dc))
goto priv_insn;
tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
#else
CHECK_IU_FEATURE(dc, HYPV);
if (!hypervisor(dc))
goto priv_insn;
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
switch (rd) {
case 0: // hpstate
// XXX gen_op_wrhpstate();
save_state(dc);
gen_op_next_insn();
tcg_gen_exit_tb(0);
dc->is_br = 1;
break;
case 1: // htstate
// XXX gen_op_wrhtstate();
break;
case 3: // hintp
tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
break;
case 5: // htba
tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
break;
case 31: // hstick_cmpr
{
TCGv_ptr r_tickptr;
tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, hstick));
gen_helper_tick_set_limit(r_tickptr,
cpu_hstick_cmpr);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 6: // hver readonly
default:
goto illegal_insn;
}
#endif
}
break;
#endif
#ifdef TARGET_SPARC64
case 0x2c: /* V9 movcc */
{
int cc = GET_FIELD_SP(insn, 11, 12);
int cond = GET_FIELD_SP(insn, 14, 17);
DisasCompare cmp;
TCGv dst;
if (insn & (1 << 18)) {
if (cc == 0) {
gen_compare(&cmp, 0, cond, dc);
} else if (cc == 2) {
gen_compare(&cmp, 1, cond, dc);
} else {
goto illegal_insn;
}
} else {
gen_fcompare(&cmp, cc, cond);
}
/* The get_src2 above loaded the normal 13-bit
immediate field, not the 11-bit field we have
in movcc. But it did handle the reg case. */
if (IS_IMM) {
simm = GET_FIELD_SPs(insn, 0, 10);
tcg_gen_movi_tl(cpu_src2, simm);
}
dst = gen_load_gpr(dc, rd);
tcg_gen_movcond_tl(cmp.cond, dst,
cmp.c1, cmp.c2,
cpu_src2, dst);
free_compare(&cmp);
gen_store_gpr(dc, rd, dst);
break;
}
case 0x2d: /* V9 sdivx */
gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x2e: /* V9 popc */
gen_helper_popc(cpu_dst, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x2f: /* V9 movr */
{
int cond = GET_FIELD_SP(insn, 10, 12);
DisasCompare cmp;
TCGv dst;
gen_compare_reg(&cmp, cond, cpu_src1);
/* The get_src2 above loaded the normal 13-bit
immediate field, not the 10-bit field we have
in movr. But it did handle the reg case. */
if (IS_IMM) {
simm = GET_FIELD_SPs(insn, 0, 9);
tcg_gen_movi_tl(cpu_src2, simm);
}
dst = gen_load_gpr(dc, rd);
tcg_gen_movcond_tl(cmp.cond, dst,
cmp.c1, cmp.c2,
cpu_src2, dst);
free_compare(&cmp);
gen_store_gpr(dc, rd, dst);
break;
}
#endif
default:
goto illegal_insn;
}
}
} else if (xop == 0x36) { /* UltraSparc shutdown, VIS, V8 CPop1 */
#ifdef TARGET_SPARC64
int opf = GET_FIELD_SP(insn, 5, 13);
rs1 = GET_FIELD(insn, 13, 17);
rs2 = GET_FIELD(insn, 27, 31);
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
switch (opf) {
case 0x000: /* VIS I edge8cc */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x001: /* VIS II edge8n */
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x002: /* VIS I edge8lcc */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x003: /* VIS II edge8ln */
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x004: /* VIS I edge16cc */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x005: /* VIS II edge16n */
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x006: /* VIS I edge16lcc */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x007: /* VIS II edge16ln */
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x008: /* VIS I edge32cc */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x009: /* VIS II edge32n */
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x00a: /* VIS I edge32lcc */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x00b: /* VIS II edge32ln */
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x010: /* VIS I array8 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x012: /* VIS I array16 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x014: /* VIS I array32 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x018: /* VIS I alignaddr */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x01a: /* VIS I alignaddrl */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x019: /* VIS II bmask */
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x020: /* VIS I fcmple16 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x022: /* VIS I fcmpne16 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x024: /* VIS I fcmple32 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x026: /* VIS I fcmpne32 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x028: /* VIS I fcmpgt16 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x02a: /* VIS I fcmpeq16 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x02c: /* VIS I fcmpgt32 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x02e: /* VIS I fcmpeq32 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x031: /* VIS I fmul8x16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
break;
case 0x033: /* VIS I fmul8x16au */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
break;
case 0x035: /* VIS I fmul8x16al */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
break;
case 0x036: /* VIS I fmul8sux16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
break;
case 0x037: /* VIS I fmul8ulx16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
break;
case 0x038: /* VIS I fmuld8sux16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
break;
case 0x039: /* VIS I fmuld8ulx16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
break;
case 0x03a: /* VIS I fpack32 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
break;
case 0x03b: /* VIS I fpack16 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs2);
cpu_dst_32 = gen_dest_fpr_F();
gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
gen_store_fpr_F(dc, rd, cpu_dst_32);
break;
case 0x03d: /* VIS I fpackfix */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs2);
cpu_dst_32 = gen_dest_fpr_F();
gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
gen_store_fpr_F(dc, rd, cpu_dst_32);
break;
case 0x03e: /* VIS I pdist */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
break;
case 0x048: /* VIS I faligndata */
CHECK_FPU_FEATURE(dc, VIS1);
gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
break;
case 0x04b: /* VIS I fpmerge */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
break;
case 0x04c: /* VIS II bshuffle */
CHECK_FPU_FEATURE(dc, VIS2);
gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
break;
case 0x04d: /* VIS I fexpand */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
break;
case 0x050: /* VIS I fpadd16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
break;
case 0x051: /* VIS I fpadd16s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
break;
case 0x052: /* VIS I fpadd32 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
break;
case 0x053: /* VIS I fpadd32s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
break;
case 0x054: /* VIS I fpsub16 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
break;
case 0x055: /* VIS I fpsub16s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
break;
case 0x056: /* VIS I fpsub32 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
break;
case 0x057: /* VIS I fpsub32s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
break;
case 0x060: /* VIS I fzero */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_dst_64 = gen_dest_fpr_D();
tcg_gen_movi_i64(cpu_dst_64, 0);
gen_store_fpr_D(dc, rd, cpu_dst_64);
break;
case 0x061: /* VIS I fzeros */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_dst_32 = gen_dest_fpr_F();
tcg_gen_movi_i32(cpu_dst_32, 0);
gen_store_fpr_F(dc, rd, cpu_dst_32);
break;
case 0x062: /* VIS I fnor */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
break;
case 0x063: /* VIS I fnors */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
break;
case 0x064: /* VIS I fandnot2 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
break;
case 0x065: /* VIS I fandnot2s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
break;
case 0x066: /* VIS I fnot2 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
break;
case 0x067: /* VIS I fnot2s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
break;
case 0x068: /* VIS I fandnot1 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
break;
case 0x069: /* VIS I fandnot1s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
break;
case 0x06a: /* VIS I fnot1 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
break;
case 0x06b: /* VIS I fnot1s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
break;
case 0x06c: /* VIS I fxor */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
break;
case 0x06d: /* VIS I fxors */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
break;
case 0x06e: /* VIS I fnand */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
break;
case 0x06f: /* VIS I fnands */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
break;
case 0x070: /* VIS I fand */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
break;
case 0x071: /* VIS I fands */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
break;
case 0x072: /* VIS I fxnor */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
break;
case 0x073: /* VIS I fxnors */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
break;
case 0x074: /* VIS I fsrc1 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
gen_store_fpr_D(dc, rd, cpu_src1_64);
break;
case 0x075: /* VIS I fsrc1s */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_32 = gen_load_fpr_F(dc, rs1);
gen_store_fpr_F(dc, rd, cpu_src1_32);
break;
case 0x076: /* VIS I fornot2 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
break;
case 0x077: /* VIS I fornot2s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
break;
case 0x078: /* VIS I fsrc2 */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs2);
gen_store_fpr_D(dc, rd, cpu_src1_64);
break;
case 0x079: /* VIS I fsrc2s */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_32 = gen_load_fpr_F(dc, rs2);
gen_store_fpr_F(dc, rd, cpu_src1_32);
break;
case 0x07a: /* VIS I fornot1 */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
break;
case 0x07b: /* VIS I fornot1s */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
break;
case 0x07c: /* VIS I for */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
break;
case 0x07d: /* VIS I fors */
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
break;
case 0x07e: /* VIS I fone */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_dst_64 = gen_dest_fpr_D();
tcg_gen_movi_i64(cpu_dst_64, -1);
gen_store_fpr_D(dc, rd, cpu_dst_64);
break;
case 0x07f: /* VIS I fones */
CHECK_FPU_FEATURE(dc, VIS1);
cpu_dst_32 = gen_dest_fpr_F();
tcg_gen_movi_i32(cpu_dst_32, -1);
gen_store_fpr_F(dc, rd, cpu_dst_32);
break;
case 0x080: /* VIS I shutdown */
case 0x081: /* VIS II siam */
// XXX
goto illegal_insn;
default:
goto illegal_insn;
}
#else
goto ncp_insn;
#endif
} else if (xop == 0x37) { /* V8 CPop2, V9 impdep2 */
#ifdef TARGET_SPARC64
goto illegal_insn;
#else
goto ncp_insn;
#endif
#ifdef TARGET_SPARC64
} else if (xop == 0x39) { /* V9 return */
TCGv_i32 r_const;
save_state(dc);
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 19, 31);
tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
if (rs2) {
cpu_src2 = gen_load_gpr(dc, rs2);
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
} else {
tcg_gen_mov_tl(cpu_dst, cpu_src1);
}
}
gen_helper_restore(cpu_env);
gen_mov_pc_npc(dc);
r_const = tcg_const_i32(3);
gen_helper_check_align(cpu_env, cpu_dst, r_const);
tcg_temp_free_i32(r_const);
tcg_gen_mov_tl(cpu_npc, cpu_dst);
dc->npc = DYNAMIC_PC;
goto jmp_insn;
#endif
} else {
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 19, 31);
tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
if (rs2) {
cpu_src2 = gen_load_gpr(dc, rs2);
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
} else {
tcg_gen_mov_tl(cpu_dst, cpu_src1);
}
}
switch (xop) {
case 0x38: /* jmpl */
{
TCGv t;
TCGv_i32 r_const;
t = gen_dest_gpr(dc, rd);
tcg_gen_movi_tl(t, dc->pc);
gen_store_gpr(dc, rd, t);
gen_mov_pc_npc(dc);
r_const = tcg_const_i32(3);
gen_helper_check_align(cpu_env, cpu_dst, r_const);
tcg_temp_free_i32(r_const);
gen_address_mask(dc, cpu_dst);
tcg_gen_mov_tl(cpu_npc, cpu_dst);
dc->npc = DYNAMIC_PC;
}
goto jmp_insn;
#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
case 0x39: /* rett, V9 return */
{
TCGv_i32 r_const;
if (!supervisor(dc))
goto priv_insn;
gen_mov_pc_npc(dc);
r_const = tcg_const_i32(3);
gen_helper_check_align(cpu_env, cpu_dst, r_const);
tcg_temp_free_i32(r_const);
tcg_gen_mov_tl(cpu_npc, cpu_dst);
dc->npc = DYNAMIC_PC;
gen_helper_rett(cpu_env);
}
goto jmp_insn;
#endif
case 0x3b: /* flush */
if (!((dc)->def->features & CPU_FEATURE_FLUSH))
goto unimp_flush;
/* nop */
break;
case 0x3c: /* save */
save_state(dc);
gen_helper_save(cpu_env);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x3d: /* restore */
save_state(dc);
gen_helper_restore(cpu_env);
gen_store_gpr(dc, rd, cpu_dst);
break;
#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
case 0x3e: /* V9 done/retry */
{
switch (rd) {
case 0:
if (!supervisor(dc))
goto priv_insn;
dc->npc = DYNAMIC_PC;
dc->pc = DYNAMIC_PC;
gen_helper_done(cpu_env);
goto jmp_insn;
case 1:
if (!supervisor(dc))
goto priv_insn;
dc->npc = DYNAMIC_PC;
dc->pc = DYNAMIC_PC;
gen_helper_retry(cpu_env);
goto jmp_insn;
default:
goto illegal_insn;
}
}
break;
#endif
default:
goto illegal_insn;
}
}
break;
}
break;
case 3: /* load/store instructions */
{
unsigned int xop = GET_FIELD(insn, 7, 12);
cpu_src1 = get_src1(dc, insn);
if (xop == 0x3c || xop == 0x3e) { // V9 casa/casxa
rs2 = GET_FIELD(insn, 27, 31);
cpu_src2 = gen_load_gpr(dc, rs2);
tcg_gen_mov_tl(cpu_addr, cpu_src1);
} else if (IS_IMM) { /* immediate */
simm = GET_FIELDs(insn, 19, 31);
tcg_gen_addi_tl(cpu_addr, cpu_src1, simm);
} else { /* register */
rs2 = GET_FIELD(insn, 27, 31);
if (rs2 != 0) {
cpu_src2 = gen_load_gpr(dc, rs2);
tcg_gen_add_tl(cpu_addr, cpu_src1, cpu_src2);
} else {
tcg_gen_mov_tl(cpu_addr, cpu_src1);
}
}
if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
(xop > 0x17 && xop <= 0x1d ) ||
(xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
TCGv cpu_val = gen_dest_gpr(dc, rd);
switch (xop) {
case 0x0: /* ld, V9 lduw, load unsigned word */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x1: /* ldub, load unsigned byte */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x2: /* lduh, load unsigned halfword */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x3: /* ldd, load double word */
if (rd & 1)
goto illegal_insn;
else {
TCGv_i32 r_const;
save_state(dc);
r_const = tcg_const_i32(7);
/* XXX remove alignment check */
gen_helper_check_align(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
tcg_gen_trunc_i64_tl(cpu_tmp0, cpu_tmp64);
tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffffULL);
gen_store_gpr(dc, rd + 1, cpu_tmp0);
tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
tcg_gen_trunc_i64_tl(cpu_val, cpu_tmp64);
tcg_gen_andi_tl(cpu_val, cpu_val, 0xffffffffULL);
}
break;
case 0x9: /* ldsb, load signed byte */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0xa: /* ldsh, load signed halfword */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0xd: /* ldstub -- XXX: should be atomically */
{
TCGv r_const;
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
r_const = tcg_const_tl(0xff);
tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
tcg_temp_free(r_const);
}
break;
case 0x0f: /* swap, swap register with memory. Also
atomically */
CHECK_IU_FEATURE(dc, SWAP);
cpu_src1 = gen_load_gpr(dc, rd);
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
tcg_gen_mov_tl(cpu_val, cpu_tmp0);
break;
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
case 0x10: /* lda, V9 lduwa, load word alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
break;
case 0x11: /* lduba, load unsigned byte alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
break;
case 0x12: /* lduha, load unsigned halfword alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
break;
case 0x13: /* ldda, load double word alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
if (rd & 1)
goto illegal_insn;
save_state(dc);
gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
goto skip_move;
case 0x19: /* ldsba, load signed byte alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
break;
case 0x1a: /* ldsha, load signed halfword alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
break;
case 0x1d: /* ldstuba -- XXX: should be atomically */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ldstub_asi(cpu_val, cpu_addr, insn);
break;
case 0x1f: /* swapa, swap reg with alt. memory. Also
atomically */
CHECK_IU_FEATURE(dc, SWAP);
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
cpu_src1 = gen_load_gpr(dc, rd);
gen_swap_asi(cpu_val, cpu_src1, cpu_addr, insn);
break;
#ifndef TARGET_SPARC64
case 0x30: /* ldc */
case 0x31: /* ldcsr */
case 0x33: /* lddc */
goto ncp_insn;
#endif
#endif
#ifdef TARGET_SPARC64
case 0x08: /* V9 ldsw */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x0b: /* V9 ldx */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x18: /* V9 ldswa */
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
break;
case 0x1b: /* V9 ldxa */
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
break;
case 0x2d: /* V9 prefetch, no effect */
goto skip_move;
case 0x30: /* V9 ldfa */
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
save_state(dc);
gen_ldf_asi(cpu_addr, insn, 4, rd);
gen_update_fprs_dirty(rd);
goto skip_move;
case 0x33: /* V9 lddfa */
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
save_state(dc);
gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
gen_update_fprs_dirty(DFPREG(rd));
goto skip_move;
case 0x3d: /* V9 prefetcha, no effect */
goto skip_move;
case 0x32: /* V9 ldqfa */
CHECK_FPU_FEATURE(dc, FLOAT128);
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
save_state(dc);
gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
gen_update_fprs_dirty(QFPREG(rd));
goto skip_move;
#endif
default:
goto illegal_insn;
}
gen_store_gpr(dc, rd, cpu_val);
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
skip_move: ;
#endif
} else if (xop >= 0x20 && xop < 0x24) {
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
save_state(dc);
switch (xop) {
case 0x20: /* ldf, load fpreg */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
cpu_dst_32 = gen_dest_fpr_F();
tcg_gen_trunc_tl_i32(cpu_dst_32, cpu_tmp0);
gen_store_fpr_F(dc, rd, cpu_dst_32);
break;
case 0x21: /* ldfsr, V9 ldxfsr */
#ifdef TARGET_SPARC64
gen_address_mask(dc, cpu_addr);
if (rd == 1) {
tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
gen_helper_ldxfsr(cpu_env, cpu_tmp64);
} else {
tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
gen_helper_ldfsr(cpu_env, cpu_tmp32);
}
#else
{
tcg_gen_qemu_ld32u(cpu_tmp32, cpu_addr, dc->mem_idx);
gen_helper_ldfsr(cpu_env, cpu_tmp32);
}
#endif
break;
case 0x22: /* ldqf, load quad fpreg */
{
TCGv_i32 r_const;
CHECK_FPU_FEATURE(dc, FLOAT128);
r_const = tcg_const_i32(dc->mem_idx);
gen_address_mask(dc, cpu_addr);
gen_helper_ldqf(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
gen_op_store_QT0_fpr(QFPREG(rd));
gen_update_fprs_dirty(QFPREG(rd));
}
break;
case 0x23: /* lddf, load double fpreg */
gen_address_mask(dc, cpu_addr);
cpu_dst_64 = gen_dest_fpr_D();
tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
gen_store_fpr_D(dc, rd, cpu_dst_64);
break;
default:
goto illegal_insn;
}
} else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
xop == 0xe || xop == 0x1e) {
TCGv cpu_val = gen_load_gpr(dc, rd);
switch (xop) {
case 0x4: /* st, store word */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x5: /* stb, store byte */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x6: /* sth, store halfword */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x7: /* std, store double word */
if (rd & 1)
goto illegal_insn;
else {
TCGv_i32 r_const;
TCGv lo;
save_state(dc);
gen_address_mask(dc, cpu_addr);
r_const = tcg_const_i32(7);
/* XXX remove alignment check */
gen_helper_check_align(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
lo = gen_load_gpr(dc, rd + 1);
tcg_gen_concat_tl_i64(cpu_tmp64, lo, cpu_val);
tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
}
break;
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
case 0x14: /* sta, V9 stwa, store word alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_st_asi(cpu_val, cpu_addr, insn, 4);
dc->npc = DYNAMIC_PC;
break;
case 0x15: /* stba, store byte alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_st_asi(cpu_val, cpu_addr, insn, 1);
dc->npc = DYNAMIC_PC;
break;
case 0x16: /* stha, store halfword alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_st_asi(cpu_val, cpu_addr, insn, 2);
dc->npc = DYNAMIC_PC;
break;
case 0x17: /* stda, store double word alternate */
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
if (rd & 1)
goto illegal_insn;
else {
save_state(dc);
gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
}
break;
#endif
#ifdef TARGET_SPARC64
case 0x0e: /* V9 stx */
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x1e: /* V9 stxa */
save_state(dc);
gen_st_asi(cpu_val, cpu_addr, insn, 8);
dc->npc = DYNAMIC_PC;
break;
#endif
default:
goto illegal_insn;
}
} else if (xop > 0x23 && xop < 0x28) {
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
save_state(dc);
switch (xop) {
case 0x24: /* stf, store fpreg */
gen_address_mask(dc, cpu_addr);
cpu_src1_32 = gen_load_fpr_F(dc, rd);
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_src1_32);
tcg_gen_qemu_st32(cpu_tmp0, cpu_addr, dc->mem_idx);
break;
case 0x25: /* stfsr, V9 stxfsr */
#ifdef TARGET_SPARC64
gen_address_mask(dc, cpu_addr);
tcg_gen_ld_i64(cpu_tmp64, cpu_env, offsetof(CPUSPARCState, fsr));
if (rd == 1)
tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
else
tcg_gen_qemu_st32(cpu_tmp64, cpu_addr, dc->mem_idx);
#else
tcg_gen_ld_i32(cpu_tmp32, cpu_env, offsetof(CPUSPARCState, fsr));
tcg_gen_qemu_st32(cpu_tmp32, cpu_addr, dc->mem_idx);
#endif
break;
case 0x26:
#ifdef TARGET_SPARC64
/* V9 stqf, store quad fpreg */
{
TCGv_i32 r_const;
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_op_load_fpr_QT0(QFPREG(rd));
r_const = tcg_const_i32(dc->mem_idx);
gen_address_mask(dc, cpu_addr);
gen_helper_stqf(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
}
break;
#else /* !TARGET_SPARC64 */
/* stdfq, store floating point queue */
#if defined(CONFIG_USER_ONLY)
goto illegal_insn;
#else
if (!supervisor(dc))
goto priv_insn;
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
goto nfq_insn;
#endif
#endif
case 0x27: /* stdf, store double fpreg */
gen_address_mask(dc, cpu_addr);
cpu_src1_64 = gen_load_fpr_D(dc, rd);
tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
break;
default:
goto illegal_insn;
}
} else if (xop > 0x33 && xop < 0x3f) {
save_state(dc);
switch (xop) {
#ifdef TARGET_SPARC64
case 0x34: /* V9 stfa */
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
gen_stf_asi(cpu_addr, insn, 4, rd);
break;
case 0x36: /* V9 stqfa */
{
TCGv_i32 r_const;
CHECK_FPU_FEATURE(dc, FLOAT128);
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
r_const = tcg_const_i32(7);
gen_helper_check_align(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
}
break;
case 0x37: /* V9 stdfa */
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
break;
case 0x3c: /* V9 casa */
gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
break;
case 0x3e: /* V9 casxa */
gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
break;
#else
case 0x34: /* stc */
case 0x35: /* stcsr */
case 0x36: /* stdcq */
case 0x37: /* stdc */
goto ncp_insn;
#endif
default:
goto illegal_insn;
}
} else
goto illegal_insn;
}
break;
}
/* default case for non jump instructions */
if (dc->npc == DYNAMIC_PC) {
dc->pc = DYNAMIC_PC;
gen_op_next_insn();
} else if (dc->npc == JUMP_PC) {
/* we can do a static jump */
gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
dc->is_br = 1;
} else {
dc->pc = dc->npc;
dc->npc = dc->npc + 4;
}
jmp_insn:
goto egress;
illegal_insn:
{
TCGv_i32 r_const;
save_state(dc);
r_const = tcg_const_i32(TT_ILL_INSN);
gen_helper_raise_exception(cpu_env, r_const);
tcg_temp_free_i32(r_const);
dc->is_br = 1;
}
goto egress;
unimp_flush:
{
TCGv_i32 r_const;
save_state(dc);
r_const = tcg_const_i32(TT_UNIMP_FLUSH);
gen_helper_raise_exception(cpu_env, r_const);
tcg_temp_free_i32(r_const);
dc->is_br = 1;
}
goto egress;
#if !defined(CONFIG_USER_ONLY)
priv_insn:
{
TCGv_i32 r_const;
save_state(dc);
r_const = tcg_const_i32(TT_PRIV_INSN);
gen_helper_raise_exception(cpu_env, r_const);
tcg_temp_free_i32(r_const);
dc->is_br = 1;
}
goto egress;
#endif
nfpu_insn:
save_state(dc);
gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
dc->is_br = 1;
goto egress;
#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
nfq_insn:
save_state(dc);
gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
dc->is_br = 1;
goto egress;
#endif
#ifndef TARGET_SPARC64
ncp_insn:
{
TCGv r_const;
save_state(dc);
r_const = tcg_const_i32(TT_NCP_INSN);
gen_helper_raise_exception(cpu_env, r_const);
tcg_temp_free(r_const);
dc->is_br = 1;
}
goto egress;
#endif
egress:
tcg_temp_free(cpu_tmp1);
tcg_temp_free(cpu_tmp2);
if (dc->n_t32 != 0) {
int i;
for (i = dc->n_t32 - 1; i >= 0; --i) {
tcg_temp_free_i32(dc->t32[i]);
}
dc->n_t32 = 0;
}
if (dc->n_ttl != 0) {
int i;
for (i = dc->n_ttl - 1; i >= 0; --i) {
tcg_temp_free(dc->ttl[i]);
}
dc->n_ttl = 0;
}
}
| true | qemu | a4273524875a960e8ef22ed676853e5988fefbea | static void disas_sparc_insn(DisasContext * dc, unsigned int insn)
{
unsigned int opc, rs1, rs2, rd;
TCGv cpu_src1, cpu_src2, cpu_tmp1, cpu_tmp2;
TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
target_long simm;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(dc->pc);
}
opc = GET_FIELD(insn, 0, 1);
rd = GET_FIELD(insn, 2, 6);
cpu_tmp1 = cpu_src1 = tcg_temp_new();
cpu_tmp2 = cpu_src2 = tcg_temp_new();
switch (opc) {
case 0:
{
unsigned int xop = GET_FIELD(insn, 7, 9);
int32_t target;
switch (xop) {
#ifdef TARGET_SPARC64
case 0x1:
{
int cc;
target = GET_FIELD_SP(insn, 0, 18);
target = sign_extend(target, 19);
target <<= 2;
cc = GET_FIELD_SP(insn, 20, 21);
if (cc == 0)
do_branch(dc, target, insn, 0);
else if (cc == 2)
do_branch(dc, target, insn, 1);
else
goto illegal_insn;
goto jmp_insn;
}
case 0x3:
{
target = GET_FIELD_SP(insn, 0, 13) |
(GET_FIELD_SP(insn, 20, 21) << 14);
target = sign_extend(target, 16);
target <<= 2;
cpu_src1 = get_src1(dc, insn);
do_branch_reg(dc, target, insn, cpu_src1);
goto jmp_insn;
}
case 0x5:
{
int cc = GET_FIELD_SP(insn, 20, 21);
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
target = GET_FIELD_SP(insn, 0, 18);
target = sign_extend(target, 19);
target <<= 2;
do_fbranch(dc, target, insn, cc);
goto jmp_insn;
}
#else
case 0x7:
{
goto ncp_insn;
}
#endif
case 0x2:
{
target = GET_FIELD(insn, 10, 31);
target = sign_extend(target, 22);
target <<= 2;
do_branch(dc, target, insn, 0);
goto jmp_insn;
}
case 0x6:
{
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
target = GET_FIELD(insn, 10, 31);
target = sign_extend(target, 22);
target <<= 2;
do_fbranch(dc, target, insn, 0);
goto jmp_insn;
}
case 0x4:
if (rd) {
uint32_t value = GET_FIELD(insn, 10, 31);
TCGv t = gen_dest_gpr(dc, rd);
tcg_gen_movi_tl(t, value << 10);
gen_store_gpr(dc, rd, t);
}
break;
case 0x0:
default:
goto illegal_insn;
}
break;
}
break;
case 1:
{
target_long target = GET_FIELDs(insn, 2, 31) << 2;
TCGv o7 = gen_dest_gpr(dc, 15);
tcg_gen_movi_tl(o7, dc->pc);
gen_store_gpr(dc, 15, o7);
target += dc->pc;
gen_mov_pc_npc(dc);
#ifdef TARGET_SPARC64
if (unlikely(AM_CHECK(dc))) {
target &= 0xffffffffULL;
}
#endif
dc->npc = target;
}
goto jmp_insn;
case 2:
{
unsigned int xop = GET_FIELD(insn, 7, 12);
if (xop == 0x3a) {
int cond = GET_FIELD(insn, 3, 6);
TCGv_i32 trap;
int l1 = -1, mask;
if (cond == 0) {
break;
}
save_state(dc);
if (cond != 8) {
DisasCompare cmp;
#ifdef TARGET_SPARC64
int cc = GET_FIELD_SP(insn, 11, 12);
if (cc == 0) {
gen_compare(&cmp, 0, cond, dc);
} else if (cc == 2) {
gen_compare(&cmp, 1, cond, dc);
} else {
goto illegal_insn;
}
#else
gen_compare(&cmp, 0, cond, dc);
#endif
l1 = gen_new_label();
tcg_gen_brcond_tl(tcg_invert_cond(cmp.cond),
cmp.c1, cmp.c2, l1);
free_compare(&cmp);
}
mask = ((dc->def->features & CPU_FEATURE_HYPV) && supervisor(dc)
? UA2005_HTRAP_MASK : V8_TRAP_MASK);
trap = tcg_temp_new_i32();
rs1 = GET_FIELD_SP(insn, 14, 18);
if (IS_IMM) {
rs2 = GET_FIELD_SP(insn, 0, 6);
if (rs1 == 0) {
tcg_gen_movi_i32(trap, (rs2 & mask) + TT_TRAP);
mask = 0;
} else {
TCGv t1 = gen_load_gpr(dc, rs1);
tcg_gen_trunc_tl_i32(trap, t1);
tcg_gen_addi_i32(trap, trap, rs2);
}
} else {
TCGv t1, t2;
rs2 = GET_FIELD_SP(insn, 0, 4);
t1 = gen_load_gpr(dc, rs1);
t2 = gen_load_gpr(dc, rs2);
tcg_gen_add_tl(t1, t1, t2);
tcg_gen_trunc_tl_i32(trap, t1);
}
if (mask != 0) {
tcg_gen_andi_i32(trap, trap, mask);
tcg_gen_addi_i32(trap, trap, TT_TRAP);
}
gen_helper_raise_exception(cpu_env, trap);
tcg_temp_free_i32(trap);
if (cond == 8) {
dc->is_br = 1;
goto jmp_insn;
} else {
gen_set_label(l1);
break;
}
} else if (xop == 0x28) {
rs1 = GET_FIELD(insn, 13, 17);
switch(rs1) {
case 0:
#ifndef TARGET_SPARC64
case 0x01 ... 0x0e:
case 0x0f:
case 0x10 ... 0x1f:
if (rs1 == 0x11 && dc->def->features & CPU_FEATURE_ASR17) {
TCGv t = gen_dest_gpr(dc, rd);
tcg_gen_movi_tl(t, (1 << 8) | (dc->def->nwindows - 1));
gen_store_gpr(dc, rd, t);
break;
}
#endif
gen_store_gpr(dc, rd, cpu_y);
break;
#ifdef TARGET_SPARC64
case 0x2:
update_psr(dc);
gen_helper_rdccr(cpu_dst, cpu_env);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x3:
tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x4:
{
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
gen_helper_tick_get_count(cpu_dst, r_tickptr);
tcg_temp_free_ptr(r_tickptr);
gen_store_gpr(dc, rd, cpu_dst);
}
break;
case 0x5:
{
TCGv t = gen_dest_gpr(dc, rd);
if (unlikely(AM_CHECK(dc))) {
tcg_gen_movi_tl(t, dc->pc & 0xffffffffULL);
} else {
tcg_gen_movi_tl(t, dc->pc);
}
gen_store_gpr(dc, rd, t);
}
break;
case 0x6:
tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0xf:
break;
case 0x13:
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
gen_store_gpr(dc, rd, cpu_gsr);
break;
case 0x16:
tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x17:
gen_store_gpr(dc, rd, cpu_tick_cmpr);
break;
case 0x18:
{
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, stick));
gen_helper_tick_get_count(cpu_dst, r_tickptr);
tcg_temp_free_ptr(r_tickptr);
gen_store_gpr(dc, rd, cpu_dst);
}
break;
case 0x19:
gen_store_gpr(dc, rd, cpu_stick_cmpr);
break;
case 0x10:
case 0x11:
case 0x12:
case 0x14:
case 0x15:
#endif
default:
goto illegal_insn;
}
#if !defined(CONFIG_USER_ONLY)
} else if (xop == 0x29) {
#ifndef TARGET_SPARC64
if (!supervisor(dc)) {
goto priv_insn;
}
update_psr(dc);
gen_helper_rdpsr(cpu_dst, cpu_env);
#else
CHECK_IU_FEATURE(dc, HYPV);
if (!hypervisor(dc))
goto priv_insn;
rs1 = GET_FIELD(insn, 13, 17);
switch (rs1) {
case 0:
break;
case 1:
break;
case 3:
tcg_gen_mov_tl(cpu_dst, cpu_hintp);
break;
case 5:
tcg_gen_mov_tl(cpu_dst, cpu_htba);
break;
case 6:
tcg_gen_mov_tl(cpu_dst, cpu_hver);
break;
case 31:
tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
break;
default:
goto illegal_insn;
}
#endif
gen_store_gpr(dc, rd, cpu_dst);
break;
} else if (xop == 0x2a) {
if (!supervisor(dc))
goto priv_insn;
#ifdef TARGET_SPARC64
rs1 = GET_FIELD(insn, 13, 17);
switch (rs1) {
case 0:
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tpc));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 1:
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tnpc));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 2:
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tstate));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 3:
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_ld_i32(cpu_tmp32, r_tsptr,
offsetof(trap_state, tt));
tcg_temp_free_ptr(r_tsptr);
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
}
break;
case 4:
{
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
gen_helper_tick_get_count(cpu_tmp0, r_tickptr);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 5:
tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
break;
case 6:
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, pstate));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 7:
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, tl));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 8:
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, psrpil));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 9:
gen_helper_rdcwp(cpu_tmp0, cpu_env);
break;
case 10:
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, cansave));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 11:
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, canrestore));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 12:
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, cleanwin));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 13:
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, otherwin));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 14:
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, wstate));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 16:
CHECK_IU_FEATURE(dc, GL);
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, gl));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 26:
CHECK_IU_FEATURE(dc, HYPV);
if (!hypervisor(dc))
goto priv_insn;
tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
break;
case 31:
tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
break;
case 15:
default:
goto illegal_insn;
}
#else
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
#endif
gen_store_gpr(dc, rd, cpu_tmp0);
break;
} else if (xop == 0x2b) {
#ifdef TARGET_SPARC64
save_state(dc);
gen_helper_flushw(cpu_env);
#else
if (!supervisor(dc))
goto priv_insn;
gen_store_gpr(dc, rd, cpu_tbr);
#endif
break;
#endif
} else if (xop == 0x34) {
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
gen_op_clear_ieee_excp_and_FTT();
rs1 = GET_FIELD(insn, 13, 17);
rs2 = GET_FIELD(insn, 27, 31);
xop = GET_FIELD(insn, 18, 26);
save_state(dc);
switch (xop) {
case 0x1:
cpu_src1_32 = gen_load_fpr_F(dc, rs2);
gen_store_fpr_F(dc, rd, cpu_src1_32);
break;
case 0x5:
gen_ne_fop_FF(dc, rd, rs2, gen_helper_fnegs);
break;
case 0x9:
gen_ne_fop_FF(dc, rd, rs2, gen_helper_fabss);
break;
case 0x29:
CHECK_FPU_FEATURE(dc, FSQRT);
gen_fop_FF(dc, rd, rs2, gen_helper_fsqrts);
break;
case 0x2a:
CHECK_FPU_FEATURE(dc, FSQRT);
gen_fop_DD(dc, rd, rs2, gen_helper_fsqrtd);
break;
case 0x2b:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_QQ(dc, rd, rs2, gen_helper_fsqrtq);
break;
case 0x41:
gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fadds);
break;
case 0x42:
gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_faddd);
break;
case 0x43:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_faddq);
break;
case 0x45:
gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fsubs);
break;
case 0x46:
gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fsubd);
break;
case 0x47:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fsubq);
break;
case 0x49:
CHECK_FPU_FEATURE(dc, FMUL);
gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fmuls);
break;
case 0x4a:
CHECK_FPU_FEATURE(dc, FMUL);
gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld);
break;
case 0x4b:
CHECK_FPU_FEATURE(dc, FLOAT128);
CHECK_FPU_FEATURE(dc, FMUL);
gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fmulq);
break;
case 0x4d:
gen_fop_FFF(dc, rd, rs1, rs2, gen_helper_fdivs);
break;
case 0x4e:
gen_fop_DDD(dc, rd, rs1, rs2, gen_helper_fdivd);
break;
case 0x4f:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_QQQ(dc, rd, rs1, rs2, gen_helper_fdivq);
break;
case 0x69:
CHECK_FPU_FEATURE(dc, FSMULD);
gen_fop_DFF(dc, rd, rs1, rs2, gen_helper_fsmuld);
break;
case 0x6e:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_QDD(dc, rd, rs1, rs2, gen_helper_fdmulq);
break;
case 0xc4:
gen_fop_FF(dc, rd, rs2, gen_helper_fitos);
break;
case 0xc6:
gen_fop_FD(dc, rd, rs2, gen_helper_fdtos);
break;
case 0xc7:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_FQ(dc, rd, rs2, gen_helper_fqtos);
break;
case 0xc8:
gen_ne_fop_DF(dc, rd, rs2, gen_helper_fitod);
break;
case 0xc9:
gen_ne_fop_DF(dc, rd, rs2, gen_helper_fstod);
break;
case 0xcb:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_DQ(dc, rd, rs2, gen_helper_fqtod);
break;
case 0xcc:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QF(dc, rd, rs2, gen_helper_fitoq);
break;
case 0xcd:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QF(dc, rd, rs2, gen_helper_fstoq);
break;
case 0xce:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QD(dc, rd, rs2, gen_helper_fdtoq);
break;
case 0xd1:
gen_fop_FF(dc, rd, rs2, gen_helper_fstoi);
break;
case 0xd2:
gen_fop_FD(dc, rd, rs2, gen_helper_fdtoi);
break;
case 0xd3:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_FQ(dc, rd, rs2, gen_helper_fqtoi);
break;
#ifdef TARGET_SPARC64
case 0x2:
cpu_src1_64 = gen_load_fpr_D(dc, rs2);
gen_store_fpr_D(dc, rd, cpu_src1_64);
break;
case 0x3:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_move_Q(rd, rs2);
break;
case 0x6:
gen_ne_fop_DD(dc, rd, rs2, gen_helper_fnegd);
break;
case 0x7:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fnegq);
break;
case 0xa:
gen_ne_fop_DD(dc, rd, rs2, gen_helper_fabsd);
break;
case 0xb:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QQ(dc, rd, rs2, gen_helper_fabsq);
break;
case 0x81:
gen_fop_DF(dc, rd, rs2, gen_helper_fstox);
break;
case 0x82:
gen_fop_DD(dc, rd, rs2, gen_helper_fdtox);
break;
case 0x83:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_fop_DQ(dc, rd, rs2, gen_helper_fqtox);
break;
case 0x84:
gen_fop_FD(dc, rd, rs2, gen_helper_fxtos);
break;
case 0x88:
gen_fop_DD(dc, rd, rs2, gen_helper_fxtod);
break;
case 0x8c:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_ne_fop_QD(dc, rd, rs2, gen_helper_fxtoq);
break;
#endif
default:
goto illegal_insn;
}
} else if (xop == 0x35) {
#ifdef TARGET_SPARC64
int cond;
#endif
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
gen_op_clear_ieee_excp_and_FTT();
rs1 = GET_FIELD(insn, 13, 17);
rs2 = GET_FIELD(insn, 27, 31);
xop = GET_FIELD(insn, 18, 26);
save_state(dc);
#ifdef TARGET_SPARC64
#define FMOVR(sz) \
do { \
DisasCompare cmp; \
cond = GET_FIELD_SP(insn, 14, 17); \
cpu_src1 = get_src1(dc, insn); \
gen_compare_reg(&cmp, cond, cpu_src1); \
gen_fmov##sz(dc, &cmp, rd, rs2); \
free_compare(&cmp); \
} while (0)
if ((xop & 0x11f) == 0x005) {
FMOVR(s);
break;
} else if ((xop & 0x11f) == 0x006) {
FMOVR(d);
break;
} else if ((xop & 0x11f) == 0x007) {
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVR(q);
break;
}
#undef FMOVR
#endif
switch (xop) {
#ifdef TARGET_SPARC64
#define FMOVCC(fcc, sz) \
do { \
DisasCompare cmp; \
cond = GET_FIELD_SP(insn, 14, 17); \
gen_fcompare(&cmp, fcc, cond); \
gen_fmov##sz(dc, &cmp, rd, rs2); \
free_compare(&cmp); \
} while (0)
case 0x001:
FMOVCC(0, s);
break;
case 0x002:
FMOVCC(0, d);
break;
case 0x003:
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(0, q);
break;
case 0x041:
FMOVCC(1, s);
break;
case 0x042:
FMOVCC(1, d);
break;
case 0x043:
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(1, q);
break;
case 0x081:
FMOVCC(2, s);
break;
case 0x082:
FMOVCC(2, d);
break;
case 0x083:
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(2, q);
break;
case 0x0c1:
FMOVCC(3, s);
break;
case 0x0c2:
FMOVCC(3, d);
break;
case 0x0c3:
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(3, q);
break;
#undef FMOVCC
#define FMOVCC(xcc, sz) \
do { \
DisasCompare cmp; \
cond = GET_FIELD_SP(insn, 14, 17); \
gen_compare(&cmp, xcc, cond, dc); \
gen_fmov##sz(dc, &cmp, rd, rs2); \
free_compare(&cmp); \
} while (0)
case 0x101:
FMOVCC(0, s);
break;
case 0x102:
FMOVCC(0, d);
break;
case 0x103:
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(0, q);
break;
case 0x181:
FMOVCC(1, s);
break;
case 0x182:
FMOVCC(1, d);
break;
case 0x183:
CHECK_FPU_FEATURE(dc, FLOAT128);
FMOVCC(1, q);
break;
#undef FMOVCC
#endif
case 0x51:
cpu_src1_32 = gen_load_fpr_F(dc, rs1);
cpu_src2_32 = gen_load_fpr_F(dc, rs2);
gen_op_fcmps(rd & 3, cpu_src1_32, cpu_src2_32);
break;
case 0x52:
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_op_fcmpd(rd & 3, cpu_src1_64, cpu_src2_64);
break;
case 0x53:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_op_load_fpr_QT0(QFPREG(rs1));
gen_op_load_fpr_QT1(QFPREG(rs2));
gen_op_fcmpq(rd & 3);
break;
case 0x55:
cpu_src1_32 = gen_load_fpr_F(dc, rs1);
cpu_src2_32 = gen_load_fpr_F(dc, rs2);
gen_op_fcmpes(rd & 3, cpu_src1_32, cpu_src2_32);
break;
case 0x56:
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_op_fcmped(rd & 3, cpu_src1_64, cpu_src2_64);
break;
case 0x57:
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_op_load_fpr_QT0(QFPREG(rs1));
gen_op_load_fpr_QT1(QFPREG(rs2));
gen_op_fcmpeq(rd & 3);
break;
default:
goto illegal_insn;
}
} else if (xop == 0x2) {
TCGv dst = gen_dest_gpr(dc, rd);
rs1 = GET_FIELD(insn, 13, 17);
if (rs1 == 0) {
if (IS_IMM) {
simm = GET_FIELDs(insn, 19, 31);
tcg_gen_movi_tl(dst, simm);
gen_store_gpr(dc, rd, dst);
} else {
rs2 = GET_FIELD(insn, 27, 31);
if (rs2 == 0) {
tcg_gen_movi_tl(dst, 0);
gen_store_gpr(dc, rd, dst);
} else {
cpu_src2 = gen_load_gpr(dc, rs2);
gen_store_gpr(dc, rd, cpu_src2);
}
}
} else {
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) {
simm = GET_FIELDs(insn, 19, 31);
tcg_gen_ori_tl(dst, cpu_src1, simm);
gen_store_gpr(dc, rd, dst);
} else {
rs2 = GET_FIELD(insn, 27, 31);
if (rs2 == 0) {
gen_store_gpr(dc, rd, cpu_src1);
} else {
cpu_src2 = gen_load_gpr(dc, rs2);
tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, dst);
}
}
}
#ifdef TARGET_SPARC64
} else if (xop == 0x25) {
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) {
simm = GET_FIELDs(insn, 20, 31);
if (insn & (1 << 12)) {
tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
} else {
tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
}
} else {
rs2 = GET_FIELD(insn, 27, 31);
cpu_src2 = gen_load_gpr(dc, rs2);
if (insn & (1 << 12)) {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
} else {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
}
tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_store_gpr(dc, rd, cpu_dst);
} else if (xop == 0x26) {
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) {
simm = GET_FIELDs(insn, 20, 31);
if (insn & (1 << 12)) {
tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
} else {
tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
}
} else {
rs2 = GET_FIELD(insn, 27, 31);
cpu_src2 = gen_load_gpr(dc, rs2);
if (insn & (1 << 12)) {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
} else {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
}
}
gen_store_gpr(dc, rd, cpu_dst);
} else if (xop == 0x27) {
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) {
simm = GET_FIELDs(insn, 20, 31);
if (insn & (1 << 12)) {
tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
} else {
tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
}
} else {
rs2 = GET_FIELD(insn, 27, 31);
cpu_src2 = gen_load_gpr(dc, rs2);
if (insn & (1 << 12)) {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
} else {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
}
}
gen_store_gpr(dc, rd, cpu_dst);
#endif
} else if (xop < 0x36) {
if (xop < 0x20) {
cpu_src1 = get_src1(dc, insn);
cpu_src2 = get_src2(dc, insn);
switch (xop & ~0x10) {
case 0x0:
if (xop & 0x10) {
gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
dc->cc_op = CC_OP_ADD;
} else {
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
}
break;
case 0x1:
tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x2:
tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x3:
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x4:
if (xop & 0x10) {
gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
dc->cc_op = CC_OP_SUB;
} else {
tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
}
break;
case 0x5:
tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x6:
tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x7:
tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0x8:
gen_op_addx_int(dc, cpu_dst, cpu_src1, cpu_src2,
(xop & 0x10));
break;
#ifdef TARGET_SPARC64
case 0x9:
tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
break;
#endif
case 0xa:
CHECK_IU_FEATURE(dc, MUL);
gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0xb:
CHECK_IU_FEATURE(dc, MUL);
gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
if (xop & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
dc->cc_op = CC_OP_LOGIC;
}
break;
case 0xc:
gen_op_subx_int(dc, cpu_dst, cpu_src1, cpu_src2,
(xop & 0x10));
break;
#ifdef TARGET_SPARC64
case 0xd:
gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
break;
#endif
case 0xe:
CHECK_IU_FEATURE(dc, DIV);
if (xop & 0x10) {
gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
cpu_src2);
dc->cc_op = CC_OP_DIV;
} else {
gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
cpu_src2);
}
break;
case 0xf:
CHECK_IU_FEATURE(dc, DIV);
if (xop & 0x10) {
gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
cpu_src2);
dc->cc_op = CC_OP_DIV;
} else {
gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
cpu_src2);
}
break;
default:
goto illegal_insn;
}
gen_store_gpr(dc, rd, cpu_dst);
} else {
cpu_src1 = get_src1(dc, insn);
cpu_src2 = get_src2(dc, insn);
switch (xop) {
case 0x20:
gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
dc->cc_op = CC_OP_TADD;
break;
case 0x21:
gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
dc->cc_op = CC_OP_TSUB;
break;
case 0x22:
gen_helper_taddcctv(cpu_dst, cpu_env,
cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
dc->cc_op = CC_OP_TADDTV;
break;
case 0x23:
gen_helper_tsubcctv(cpu_dst, cpu_env,
cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
dc->cc_op = CC_OP_TSUBTV;
break;
case 0x24:
update_psr(dc);
gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
dc->cc_op = CC_OP_ADD;
break;
#ifndef TARGET_SPARC64
case 0x25:
if (IS_IMM) {
simm = GET_FIELDs(insn, 20, 31);
tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
} else {
tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x26:
if (IS_IMM) {
simm = GET_FIELDs(insn, 20, 31);
tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
} else {
tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x27:
if (IS_IMM) {
simm = GET_FIELDs(insn, 20, 31);
tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
} else {
tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_store_gpr(dc, rd, cpu_dst);
break;
#endif
case 0x30:
{
switch(rd) {
case 0:
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
break;
#ifndef TARGET_SPARC64
case 0x01 ... 0x0f:
case 0x10 ... 0x1f:
break;
#else
case 0x2:
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
gen_helper_wrccr(cpu_env, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
dc->cc_op = CC_OP_FLAGS;
break;
case 0x3:
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_andi_tl(cpu_dst, cpu_dst, 0xff);
tcg_gen_trunc_tl_i32(cpu_asi, cpu_dst);
break;
case 0x6:
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_trunc_tl_i32(cpu_fprs, cpu_dst);
save_state(dc);
gen_op_next_insn();
tcg_gen_exit_tb(0);
dc->is_br = 1;
break;
case 0xf:
#if !defined(CONFIG_USER_ONLY)
if (supervisor(dc)) {
;
}
#endif
break;
case 0x13:
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
break;
case 0x14:
if (!supervisor(dc))
goto illegal_insn;
tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
gen_helper_set_softint(cpu_env, cpu_tmp64);
break;
case 0x15:
if (!supervisor(dc))
goto illegal_insn;
tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
gen_helper_clear_softint(cpu_env, cpu_tmp64);
break;
case 0x16:
if (!supervisor(dc))
goto illegal_insn;
tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
gen_helper_write_softint(cpu_env, cpu_tmp64);
break;
case 0x17:
#if !defined(CONFIG_USER_ONLY)
if (!supervisor(dc))
goto illegal_insn;
#endif
{
TCGv_ptr r_tickptr;
tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
cpu_src2);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
gen_helper_tick_set_limit(r_tickptr,
cpu_tick_cmpr);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 0x18:
#if !defined(CONFIG_USER_ONLY)
if (!supervisor(dc))
goto illegal_insn;
#endif
{
TCGv_ptr r_tickptr;
tcg_gen_xor_tl(cpu_dst, cpu_src1,
cpu_src2);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, stick));
gen_helper_tick_set_count(r_tickptr,
cpu_dst);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 0x19:
#if !defined(CONFIG_USER_ONLY)
if (!supervisor(dc))
goto illegal_insn;
#endif
{
TCGv_ptr r_tickptr;
tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
cpu_src2);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, stick));
gen_helper_tick_set_limit(r_tickptr,
cpu_stick_cmpr);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 0x10:
case 0x11:
case 0x12:
#endif
default:
goto illegal_insn;
}
}
break;
#if !defined(CONFIG_USER_ONLY)
case 0x31:
{
if (!supervisor(dc))
goto priv_insn;
#ifdef TARGET_SPARC64
switch (rd) {
case 0:
gen_helper_saved(cpu_env);
break;
case 1:
gen_helper_restored(cpu_env);
break;
case 2:
case 3:
case 4:
case 5:
default:
goto illegal_insn;
}
#else
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
gen_helper_wrpsr(cpu_env, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
dc->cc_op = CC_OP_FLAGS;
save_state(dc);
gen_op_next_insn();
tcg_gen_exit_tb(0);
dc->is_br = 1;
#endif
}
break;
case 0x32:
{
if (!supervisor(dc))
goto priv_insn;
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
#ifdef TARGET_SPARC64
switch (rd) {
case 0:
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tpc));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 1:
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tnpc));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 2:
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state,
tstate));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 3:
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, r_tsptr,
offsetof(trap_state, tt));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 4:
{
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
gen_helper_tick_set_count(r_tickptr,
cpu_tmp0);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 5:
tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
break;
case 6:
save_state(dc);
gen_helper_wrpstate(cpu_env, cpu_tmp0);
dc->npc = DYNAMIC_PC;
break;
case 7:
save_state(dc);
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, tl));
dc->npc = DYNAMIC_PC;
break;
case 8:
gen_helper_wrpil(cpu_env, cpu_tmp0);
break;
case 9:
gen_helper_wrcwp(cpu_env, cpu_tmp0);
break;
case 10:
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
cansave));
break;
case 11:
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
canrestore));
break;
case 12:
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
cleanwin));
break;
case 13:
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
otherwin));
break;
case 14:
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
wstate));
break;
case 16:
CHECK_IU_FEATURE(dc, GL);
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, gl));
break;
case 26:
CHECK_IU_FEATURE(dc, HYPV);
if (!hypervisor(dc))
goto priv_insn;
tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
break;
default:
goto illegal_insn;
}
#else
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
if (dc->def->nwindows != 32)
tcg_gen_andi_tl(cpu_tmp32, cpu_tmp32,
(1 << dc->def->nwindows) - 1);
tcg_gen_mov_i32(cpu_wim, cpu_tmp32);
#endif
}
break;
case 0x33:
{
#ifndef TARGET_SPARC64
if (!supervisor(dc))
goto priv_insn;
tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
#else
CHECK_IU_FEATURE(dc, HYPV);
if (!hypervisor(dc))
goto priv_insn;
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
switch (rd) {
case 0:
gen_op_wrhpstate();
save_state(dc);
gen_op_next_insn();
tcg_gen_exit_tb(0);
dc->is_br = 1;
break;
case 1:
gen_op_wrhtstate();
break;
case 3:
tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
break;
case 5:
tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
break;
case 31:
{
TCGv_ptr r_tickptr;
tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, hstick));
gen_helper_tick_set_limit(r_tickptr,
cpu_hstick_cmpr);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 6: readonly
default:
goto illegal_insn;
}
#endif
}
break;
#endif
#ifdef TARGET_SPARC64
case 0x2c:
{
int cc = GET_FIELD_SP(insn, 11, 12);
int cond = GET_FIELD_SP(insn, 14, 17);
DisasCompare cmp;
TCGv dst;
if (insn & (1 << 18)) {
if (cc == 0) {
gen_compare(&cmp, 0, cond, dc);
} else if (cc == 2) {
gen_compare(&cmp, 1, cond, dc);
} else {
goto illegal_insn;
}
} else {
gen_fcompare(&cmp, cc, cond);
}
if (IS_IMM) {
simm = GET_FIELD_SPs(insn, 0, 10);
tcg_gen_movi_tl(cpu_src2, simm);
}
dst = gen_load_gpr(dc, rd);
tcg_gen_movcond_tl(cmp.cond, dst,
cmp.c1, cmp.c2,
cpu_src2, dst);
free_compare(&cmp);
gen_store_gpr(dc, rd, dst);
break;
}
case 0x2d:
gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x2e:
gen_helper_popc(cpu_dst, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x2f:
{
int cond = GET_FIELD_SP(insn, 10, 12);
DisasCompare cmp;
TCGv dst;
gen_compare_reg(&cmp, cond, cpu_src1);
if (IS_IMM) {
simm = GET_FIELD_SPs(insn, 0, 9);
tcg_gen_movi_tl(cpu_src2, simm);
}
dst = gen_load_gpr(dc, rd);
tcg_gen_movcond_tl(cmp.cond, dst,
cmp.c1, cmp.c2,
cpu_src2, dst);
free_compare(&cmp);
gen_store_gpr(dc, rd, dst);
break;
}
#endif
default:
goto illegal_insn;
}
}
} else if (xop == 0x36) {
#ifdef TARGET_SPARC64
int opf = GET_FIELD_SP(insn, 5, 13);
rs1 = GET_FIELD(insn, 13, 17);
rs2 = GET_FIELD(insn, 27, 31);
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
switch (opf) {
case 0x000:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x001:
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x002:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x003:
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x004:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x005:
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x006:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x007:
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x008:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x009:
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x00a:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x00b:
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_edge(dc, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x010:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x012:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x014:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x018:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x01a:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x019:
CHECK_FPU_FEATURE(dc, VIS2);
cpu_src1 = gen_load_gpr(dc, rs1);
cpu_src2 = gen_load_gpr(dc, rs2);
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x020:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x022:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x024:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x026:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x028:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x02a:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x02c:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x02e:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
cpu_src2_64 = gen_load_fpr_D(dc, rs2);
gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x031:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16);
break;
case 0x033:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16au);
break;
case 0x035:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8x16al);
break;
case 0x036:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8sux16);
break;
case 0x037:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmul8ulx16);
break;
case 0x038:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8sux16);
break;
case 0x039:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fmuld8ulx16);
break;
case 0x03a:
CHECK_FPU_FEATURE(dc, VIS1);
gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpack32);
break;
case 0x03b:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs2);
cpu_dst_32 = gen_dest_fpr_F();
gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
gen_store_fpr_F(dc, rd, cpu_dst_32);
break;
case 0x03d:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs2);
cpu_dst_32 = gen_dest_fpr_F();
gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
gen_store_fpr_F(dc, rd, cpu_dst_32);
break;
case 0x03e:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDDD(dc, rd, rs1, rs2, gen_helper_pdist);
break;
case 0x048:
CHECK_FPU_FEATURE(dc, VIS1);
gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_faligndata);
break;
case 0x04b:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpmerge);
break;
case 0x04c:
CHECK_FPU_FEATURE(dc, VIS2);
gen_gsr_fop_DDD(dc, rd, rs1, rs2, gen_helper_bshuffle);
break;
case 0x04d:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fexpand);
break;
case 0x050:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd16);
break;
case 0x051:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpadd16s);
break;
case 0x052:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpadd32);
break;
case 0x053:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_add_i32);
break;
case 0x054:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub16);
break;
case 0x055:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, gen_helper_fpsub16s);
break;
case 0x056:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, gen_helper_fpsub32);
break;
case 0x057:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_sub_i32);
break;
case 0x060:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_dst_64 = gen_dest_fpr_D();
tcg_gen_movi_i64(cpu_dst_64, 0);
gen_store_fpr_D(dc, rd, cpu_dst_64);
break;
case 0x061:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_dst_32 = gen_dest_fpr_F();
tcg_gen_movi_i32(cpu_dst_32, 0);
gen_store_fpr_F(dc, rd, cpu_dst_32);
break;
case 0x062:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nor_i64);
break;
case 0x063:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nor_i32);
break;
case 0x064:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_andc_i64);
break;
case 0x065:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_andc_i32);
break;
case 0x066:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DD(dc, rd, rs2, tcg_gen_not_i64);
break;
case 0x067:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FF(dc, rd, rs2, tcg_gen_not_i32);
break;
case 0x068:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_andc_i64);
break;
case 0x069:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_andc_i32);
break;
case 0x06a:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DD(dc, rd, rs1, tcg_gen_not_i64);
break;
case 0x06b:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FF(dc, rd, rs1, tcg_gen_not_i32);
break;
case 0x06c:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_xor_i64);
break;
case 0x06d:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_xor_i32);
break;
case 0x06e:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_nand_i64);
break;
case 0x06f:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_nand_i32);
break;
case 0x070:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_and_i64);
break;
case 0x071:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_and_i32);
break;
case 0x072:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_eqv_i64);
break;
case 0x073:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_eqv_i32);
break;
case 0x074:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs1);
gen_store_fpr_D(dc, rd, cpu_src1_64);
break;
case 0x075:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_32 = gen_load_fpr_F(dc, rs1);
gen_store_fpr_F(dc, rd, cpu_src1_32);
break;
case 0x076:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_orc_i64);
break;
case 0x077:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_orc_i32);
break;
case 0x078:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_64 = gen_load_fpr_D(dc, rs2);
gen_store_fpr_D(dc, rd, cpu_src1_64);
break;
case 0x079:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_src1_32 = gen_load_fpr_F(dc, rs2);
gen_store_fpr_F(dc, rd, cpu_src1_32);
break;
case 0x07a:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs2, rs1, tcg_gen_orc_i64);
break;
case 0x07b:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs2, rs1, tcg_gen_orc_i32);
break;
case 0x07c:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_DDD(dc, rd, rs1, rs2, tcg_gen_or_i64);
break;
case 0x07d:
CHECK_FPU_FEATURE(dc, VIS1);
gen_ne_fop_FFF(dc, rd, rs1, rs2, tcg_gen_or_i32);
break;
case 0x07e:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_dst_64 = gen_dest_fpr_D();
tcg_gen_movi_i64(cpu_dst_64, -1);
gen_store_fpr_D(dc, rd, cpu_dst_64);
break;
case 0x07f:
CHECK_FPU_FEATURE(dc, VIS1);
cpu_dst_32 = gen_dest_fpr_F();
tcg_gen_movi_i32(cpu_dst_32, -1);
gen_store_fpr_F(dc, rd, cpu_dst_32);
break;
case 0x080:
case 0x081:
goto illegal_insn;
default:
goto illegal_insn;
}
#else
goto ncp_insn;
#endif
} else if (xop == 0x37) {
#ifdef TARGET_SPARC64
goto illegal_insn;
#else
goto ncp_insn;
#endif
#ifdef TARGET_SPARC64
} else if (xop == 0x39) {
TCGv_i32 r_const;
save_state(dc);
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) {
simm = GET_FIELDs(insn, 19, 31);
tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
} else {
rs2 = GET_FIELD(insn, 27, 31);
if (rs2) {
cpu_src2 = gen_load_gpr(dc, rs2);
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
} else {
tcg_gen_mov_tl(cpu_dst, cpu_src1);
}
}
gen_helper_restore(cpu_env);
gen_mov_pc_npc(dc);
r_const = tcg_const_i32(3);
gen_helper_check_align(cpu_env, cpu_dst, r_const);
tcg_temp_free_i32(r_const);
tcg_gen_mov_tl(cpu_npc, cpu_dst);
dc->npc = DYNAMIC_PC;
goto jmp_insn;
#endif
} else {
cpu_src1 = get_src1(dc, insn);
if (IS_IMM) {
simm = GET_FIELDs(insn, 19, 31);
tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
} else {
rs2 = GET_FIELD(insn, 27, 31);
if (rs2) {
cpu_src2 = gen_load_gpr(dc, rs2);
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
} else {
tcg_gen_mov_tl(cpu_dst, cpu_src1);
}
}
switch (xop) {
case 0x38:
{
TCGv t;
TCGv_i32 r_const;
t = gen_dest_gpr(dc, rd);
tcg_gen_movi_tl(t, dc->pc);
gen_store_gpr(dc, rd, t);
gen_mov_pc_npc(dc);
r_const = tcg_const_i32(3);
gen_helper_check_align(cpu_env, cpu_dst, r_const);
tcg_temp_free_i32(r_const);
gen_address_mask(dc, cpu_dst);
tcg_gen_mov_tl(cpu_npc, cpu_dst);
dc->npc = DYNAMIC_PC;
}
goto jmp_insn;
#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
case 0x39:
{
TCGv_i32 r_const;
if (!supervisor(dc))
goto priv_insn;
gen_mov_pc_npc(dc);
r_const = tcg_const_i32(3);
gen_helper_check_align(cpu_env, cpu_dst, r_const);
tcg_temp_free_i32(r_const);
tcg_gen_mov_tl(cpu_npc, cpu_dst);
dc->npc = DYNAMIC_PC;
gen_helper_rett(cpu_env);
}
goto jmp_insn;
#endif
case 0x3b:
if (!((dc)->def->features & CPU_FEATURE_FLUSH))
goto unimp_flush;
break;
case 0x3c:
save_state(dc);
gen_helper_save(cpu_env);
gen_store_gpr(dc, rd, cpu_dst);
break;
case 0x3d:
save_state(dc);
gen_helper_restore(cpu_env);
gen_store_gpr(dc, rd, cpu_dst);
break;
#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
case 0x3e:
{
switch (rd) {
case 0:
if (!supervisor(dc))
goto priv_insn;
dc->npc = DYNAMIC_PC;
dc->pc = DYNAMIC_PC;
gen_helper_done(cpu_env);
goto jmp_insn;
case 1:
if (!supervisor(dc))
goto priv_insn;
dc->npc = DYNAMIC_PC;
dc->pc = DYNAMIC_PC;
gen_helper_retry(cpu_env);
goto jmp_insn;
default:
goto illegal_insn;
}
}
break;
#endif
default:
goto illegal_insn;
}
}
break;
}
break;
case 3:
{
unsigned int xop = GET_FIELD(insn, 7, 12);
cpu_src1 = get_src1(dc, insn);
if (xop == 0x3c || xop == 0x3e) {
rs2 = GET_FIELD(insn, 27, 31);
cpu_src2 = gen_load_gpr(dc, rs2);
tcg_gen_mov_tl(cpu_addr, cpu_src1);
} else if (IS_IMM) {
simm = GET_FIELDs(insn, 19, 31);
tcg_gen_addi_tl(cpu_addr, cpu_src1, simm);
} else {
rs2 = GET_FIELD(insn, 27, 31);
if (rs2 != 0) {
cpu_src2 = gen_load_gpr(dc, rs2);
tcg_gen_add_tl(cpu_addr, cpu_src1, cpu_src2);
} else {
tcg_gen_mov_tl(cpu_addr, cpu_src1);
}
}
if (xop < 4 || (xop > 7 && xop < 0x14 && xop != 0x0e) ||
(xop > 0x17 && xop <= 0x1d ) ||
(xop > 0x2c && xop <= 0x33) || xop == 0x1f || xop == 0x3d) {
TCGv cpu_val = gen_dest_gpr(dc, rd);
switch (xop) {
case 0x0:
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld32u(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x1:
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld8u(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x2:
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld16u(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x3:
if (rd & 1)
goto illegal_insn;
else {
TCGv_i32 r_const;
save_state(dc);
r_const = tcg_const_i32(7);
gen_helper_check_align(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
tcg_gen_trunc_i64_tl(cpu_tmp0, cpu_tmp64);
tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffffULL);
gen_store_gpr(dc, rd + 1, cpu_tmp0);
tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
tcg_gen_trunc_i64_tl(cpu_val, cpu_tmp64);
tcg_gen_andi_tl(cpu_val, cpu_val, 0xffffffffULL);
}
break;
case 0x9:
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0xa:
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld16s(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0xd:
{
TCGv r_const;
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld8s(cpu_val, cpu_addr, dc->mem_idx);
r_const = tcg_const_tl(0xff);
tcg_gen_qemu_st8(r_const, cpu_addr, dc->mem_idx);
tcg_temp_free(r_const);
}
break;
case 0x0f:
CHECK_IU_FEATURE(dc, SWAP);
cpu_src1 = gen_load_gpr(dc, rd);
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
tcg_gen_qemu_st32(cpu_src1, cpu_addr, dc->mem_idx);
tcg_gen_mov_tl(cpu_val, cpu_tmp0);
break;
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
case 0x10:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 4, 0);
break;
case 0x11:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 1, 0);
break;
case 0x12:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 2, 0);
break;
case 0x13:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
if (rd & 1)
goto illegal_insn;
save_state(dc);
gen_ldda_asi(dc, cpu_val, cpu_addr, insn, rd);
goto skip_move;
case 0x19:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 1, 1);
break;
case 0x1a:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 2, 1);
break;
case 0x1d:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_ldstub_asi(cpu_val, cpu_addr, insn);
break;
case 0x1f:
CHECK_IU_FEATURE(dc, SWAP);
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
cpu_src1 = gen_load_gpr(dc, rd);
gen_swap_asi(cpu_val, cpu_src1, cpu_addr, insn);
break;
#ifndef TARGET_SPARC64
case 0x30:
case 0x31:
case 0x33:
goto ncp_insn;
#endif
#endif
#ifdef TARGET_SPARC64
case 0x08:
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld32s(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x0b:
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld64(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x18:
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 4, 1);
break;
case 0x1b:
save_state(dc);
gen_ld_asi(cpu_val, cpu_addr, insn, 8, 0);
break;
case 0x2d:
goto skip_move;
case 0x30:
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
save_state(dc);
gen_ldf_asi(cpu_addr, insn, 4, rd);
gen_update_fprs_dirty(rd);
goto skip_move;
case 0x33:
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
save_state(dc);
gen_ldf_asi(cpu_addr, insn, 8, DFPREG(rd));
gen_update_fprs_dirty(DFPREG(rd));
goto skip_move;
case 0x3d:
goto skip_move;
case 0x32:
CHECK_FPU_FEATURE(dc, FLOAT128);
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
save_state(dc);
gen_ldf_asi(cpu_addr, insn, 16, QFPREG(rd));
gen_update_fprs_dirty(QFPREG(rd));
goto skip_move;
#endif
default:
goto illegal_insn;
}
gen_store_gpr(dc, rd, cpu_val);
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
skip_move: ;
#endif
} else if (xop >= 0x20 && xop < 0x24) {
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
save_state(dc);
switch (xop) {
case 0x20:
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
cpu_dst_32 = gen_dest_fpr_F();
tcg_gen_trunc_tl_i32(cpu_dst_32, cpu_tmp0);
gen_store_fpr_F(dc, rd, cpu_dst_32);
break;
case 0x21:
#ifdef TARGET_SPARC64
gen_address_mask(dc, cpu_addr);
if (rd == 1) {
tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, dc->mem_idx);
gen_helper_ldxfsr(cpu_env, cpu_tmp64);
} else {
tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, dc->mem_idx);
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
gen_helper_ldfsr(cpu_env, cpu_tmp32);
}
#else
{
tcg_gen_qemu_ld32u(cpu_tmp32, cpu_addr, dc->mem_idx);
gen_helper_ldfsr(cpu_env, cpu_tmp32);
}
#endif
break;
case 0x22:
{
TCGv_i32 r_const;
CHECK_FPU_FEATURE(dc, FLOAT128);
r_const = tcg_const_i32(dc->mem_idx);
gen_address_mask(dc, cpu_addr);
gen_helper_ldqf(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
gen_op_store_QT0_fpr(QFPREG(rd));
gen_update_fprs_dirty(QFPREG(rd));
}
break;
case 0x23:
gen_address_mask(dc, cpu_addr);
cpu_dst_64 = gen_dest_fpr_D();
tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, dc->mem_idx);
gen_store_fpr_D(dc, rd, cpu_dst_64);
break;
default:
goto illegal_insn;
}
} else if (xop < 8 || (xop >= 0x14 && xop < 0x18) ||
xop == 0xe || xop == 0x1e) {
TCGv cpu_val = gen_load_gpr(dc, rd);
switch (xop) {
case 0x4:
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_st32(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x5:
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_st8(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x6:
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_st16(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x7:
if (rd & 1)
goto illegal_insn;
else {
TCGv_i32 r_const;
TCGv lo;
save_state(dc);
gen_address_mask(dc, cpu_addr);
r_const = tcg_const_i32(7);
gen_helper_check_align(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
lo = gen_load_gpr(dc, rd + 1);
tcg_gen_concat_tl_i64(cpu_tmp64, lo, cpu_val);
tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
}
break;
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
case 0x14:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_st_asi(cpu_val, cpu_addr, insn, 4);
dc->npc = DYNAMIC_PC;
break;
case 0x15:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_st_asi(cpu_val, cpu_addr, insn, 1);
dc->npc = DYNAMIC_PC;
break;
case 0x16:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
save_state(dc);
gen_st_asi(cpu_val, cpu_addr, insn, 2);
dc->npc = DYNAMIC_PC;
break;
case 0x17:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(dc))
goto priv_insn;
#endif
if (rd & 1)
goto illegal_insn;
else {
save_state(dc);
gen_stda_asi(dc, cpu_val, cpu_addr, insn, rd);
}
break;
#endif
#ifdef TARGET_SPARC64
case 0x0e:
gen_address_mask(dc, cpu_addr);
tcg_gen_qemu_st64(cpu_val, cpu_addr, dc->mem_idx);
break;
case 0x1e:
save_state(dc);
gen_st_asi(cpu_val, cpu_addr, insn, 8);
dc->npc = DYNAMIC_PC;
break;
#endif
default:
goto illegal_insn;
}
} else if (xop > 0x23 && xop < 0x28) {
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
save_state(dc);
switch (xop) {
case 0x24:
gen_address_mask(dc, cpu_addr);
cpu_src1_32 = gen_load_fpr_F(dc, rd);
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_src1_32);
tcg_gen_qemu_st32(cpu_tmp0, cpu_addr, dc->mem_idx);
break;
case 0x25:
#ifdef TARGET_SPARC64
gen_address_mask(dc, cpu_addr);
tcg_gen_ld_i64(cpu_tmp64, cpu_env, offsetof(CPUSPARCState, fsr));
if (rd == 1)
tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, dc->mem_idx);
else
tcg_gen_qemu_st32(cpu_tmp64, cpu_addr, dc->mem_idx);
#else
tcg_gen_ld_i32(cpu_tmp32, cpu_env, offsetof(CPUSPARCState, fsr));
tcg_gen_qemu_st32(cpu_tmp32, cpu_addr, dc->mem_idx);
#endif
break;
case 0x26:
#ifdef TARGET_SPARC64
{
TCGv_i32 r_const;
CHECK_FPU_FEATURE(dc, FLOAT128);
gen_op_load_fpr_QT0(QFPREG(rd));
r_const = tcg_const_i32(dc->mem_idx);
gen_address_mask(dc, cpu_addr);
gen_helper_stqf(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
}
break;
#else
#if defined(CONFIG_USER_ONLY)
goto illegal_insn;
#else
if (!supervisor(dc))
goto priv_insn;
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
goto nfq_insn;
#endif
#endif
case 0x27:
gen_address_mask(dc, cpu_addr);
cpu_src1_64 = gen_load_fpr_D(dc, rd);
tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, dc->mem_idx);
break;
default:
goto illegal_insn;
}
} else if (xop > 0x33 && xop < 0x3f) {
save_state(dc);
switch (xop) {
#ifdef TARGET_SPARC64
case 0x34:
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
gen_stf_asi(cpu_addr, insn, 4, rd);
break;
case 0x36:
{
TCGv_i32 r_const;
CHECK_FPU_FEATURE(dc, FLOAT128);
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
r_const = tcg_const_i32(7);
gen_helper_check_align(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
gen_stf_asi(cpu_addr, insn, 16, QFPREG(rd));
}
break;
case 0x37:
if (gen_trap_ifnofpu(dc)) {
goto jmp_insn;
}
gen_stf_asi(cpu_addr, insn, 8, DFPREG(rd));
break;
case 0x3c:
gen_cas_asi(dc, cpu_addr, cpu_src2, insn, rd);
break;
case 0x3e:
gen_casx_asi(dc, cpu_addr, cpu_src2, insn, rd);
break;
#else
case 0x34:
case 0x35:
case 0x36:
case 0x37:
goto ncp_insn;
#endif
default:
goto illegal_insn;
}
} else
goto illegal_insn;
}
break;
}
if (dc->npc == DYNAMIC_PC) {
dc->pc = DYNAMIC_PC;
gen_op_next_insn();
} else if (dc->npc == JUMP_PC) {
gen_branch2(dc, dc->jump_pc[0], dc->jump_pc[1], cpu_cond);
dc->is_br = 1;
} else {
dc->pc = dc->npc;
dc->npc = dc->npc + 4;
}
jmp_insn:
goto egress;
illegal_insn:
{
TCGv_i32 r_const;
save_state(dc);
r_const = tcg_const_i32(TT_ILL_INSN);
gen_helper_raise_exception(cpu_env, r_const);
tcg_temp_free_i32(r_const);
dc->is_br = 1;
}
goto egress;
unimp_flush:
{
TCGv_i32 r_const;
save_state(dc);
r_const = tcg_const_i32(TT_UNIMP_FLUSH);
gen_helper_raise_exception(cpu_env, r_const);
tcg_temp_free_i32(r_const);
dc->is_br = 1;
}
goto egress;
#if !defined(CONFIG_USER_ONLY)
priv_insn:
{
TCGv_i32 r_const;
save_state(dc);
r_const = tcg_const_i32(TT_PRIV_INSN);
gen_helper_raise_exception(cpu_env, r_const);
tcg_temp_free_i32(r_const);
dc->is_br = 1;
}
goto egress;
#endif
nfpu_insn:
save_state(dc);
gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
dc->is_br = 1;
goto egress;
#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
nfq_insn:
save_state(dc);
gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
dc->is_br = 1;
goto egress;
#endif
#ifndef TARGET_SPARC64
ncp_insn:
{
TCGv r_const;
save_state(dc);
r_const = tcg_const_i32(TT_NCP_INSN);
gen_helper_raise_exception(cpu_env, r_const);
tcg_temp_free(r_const);
dc->is_br = 1;
}
goto egress;
#endif
egress:
tcg_temp_free(cpu_tmp1);
tcg_temp_free(cpu_tmp2);
if (dc->n_t32 != 0) {
int i;
for (i = dc->n_t32 - 1; i >= 0; --i) {
tcg_temp_free_i32(dc->t32[i]);
}
dc->n_t32 = 0;
}
if (dc->n_ttl != 0) {
int i;
for (i = dc->n_ttl - 1; i >= 0; --i) {
tcg_temp_free(dc->ttl[i]);
}
dc->n_ttl = 0;
}
}
| {
"code": [
" TCGv cpu_src1, cpu_src2, cpu_tmp1, cpu_tmp2;",
" cpu_tmp1 = cpu_src1 = tcg_temp_new();",
" cpu_tmp2 = cpu_src2 = tcg_temp_new();",
" rs2 = GET_FIELD(insn, 27, 31);",
" cpu_src2 = gen_load_gpr(dc, rs2);",
" } else",
" tcg_temp_free(cpu_tmp1);",
" tcg_temp_free(cpu_tmp2);"
],
"line_no": [
7,
33,
35,
983,
4247,
5219,
5379,
5381
]
} | static void FUNC_0(DisasContext * VAR_0, unsigned int VAR_1)
{
unsigned int VAR_2, VAR_3, VAR_4, VAR_5;
TCGv cpu_src1, cpu_src2, cpu_tmp1, cpu_tmp2;
TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;
TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;
target_long simm;
if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_OP | CPU_LOG_TB_OP_OPT))) {
tcg_gen_debug_insn_start(VAR_0->pc);
}
VAR_2 = GET_FIELD(VAR_1, 0, 1);
VAR_5 = GET_FIELD(VAR_1, 2, 6);
cpu_tmp1 = cpu_src1 = tcg_temp_new();
cpu_tmp2 = cpu_src2 = tcg_temp_new();
switch (VAR_2) {
case 0:
{
unsigned int VAR_10 = GET_FIELD(VAR_1, 7, 9);
int32_t target;
switch (VAR_10) {
#ifdef TARGET_SPARC64
case 0x1:
{
int cc;
target = GET_FIELD_SP(VAR_1, 0, 18);
target = sign_extend(target, 19);
target <<= 2;
cc = GET_FIELD_SP(VAR_1, 20, 21);
if (cc == 0)
do_branch(VAR_0, target, VAR_1, 0);
else if (cc == 2)
do_branch(VAR_0, target, VAR_1, 1);
else
goto illegal_insn;
goto jmp_insn;
}
case 0x3:
{
target = GET_FIELD_SP(VAR_1, 0, 13) |
(GET_FIELD_SP(VAR_1, 20, 21) << 14);
target = sign_extend(target, 16);
target <<= 2;
cpu_src1 = get_src1(VAR_0, VAR_1);
do_branch_reg(VAR_0, target, VAR_1, cpu_src1);
goto jmp_insn;
}
case 0x5:
{
int cc = GET_FIELD_SP(VAR_1, 20, 21);
if (gen_trap_ifnofpu(VAR_0)) {
goto jmp_insn;
}
target = GET_FIELD_SP(VAR_1, 0, 18);
target = sign_extend(target, 19);
target <<= 2;
do_fbranch(VAR_0, target, VAR_1, cc);
goto jmp_insn;
}
#else
case 0x7:
{
goto ncp_insn;
}
#endif
case 0x2:
{
target = GET_FIELD(VAR_1, 10, 31);
target = sign_extend(target, 22);
target <<= 2;
do_branch(VAR_0, target, VAR_1, 0);
goto jmp_insn;
}
case 0x6:
{
if (gen_trap_ifnofpu(VAR_0)) {
goto jmp_insn;
}
target = GET_FIELD(VAR_1, 10, 31);
target = sign_extend(target, 22);
target <<= 2;
do_fbranch(VAR_0, target, VAR_1, 0);
goto jmp_insn;
}
case 0x4:
if (VAR_5) {
uint32_t value = GET_FIELD(VAR_1, 10, 31);
TCGv t = gen_dest_gpr(VAR_0, VAR_5);
tcg_gen_movi_tl(t, value << 10);
gen_store_gpr(VAR_0, VAR_5, t);
}
break;
case 0x0:
default:
goto illegal_insn;
}
break;
}
break;
case 1:
{
target_long target = GET_FIELDs(VAR_1, 2, 31) << 2;
TCGv o7 = gen_dest_gpr(VAR_0, 15);
tcg_gen_movi_tl(o7, VAR_0->pc);
gen_store_gpr(VAR_0, 15, o7);
target += VAR_0->pc;
gen_mov_pc_npc(VAR_0);
#ifdef TARGET_SPARC64
if (unlikely(AM_CHECK(VAR_0))) {
target &= 0xffffffffULL;
}
#endif
VAR_0->npc = target;
}
goto jmp_insn;
case 2:
{
unsigned int VAR_10 = GET_FIELD(VAR_1, 7, 12);
if (VAR_10 == 0x3a) {
int VAR_7 = GET_FIELD(VAR_1, 3, 6);
TCGv_i32 trap;
int VAR_8 = -1, VAR_9;
if (VAR_7 == 0) {
break;
}
save_state(VAR_0);
if (VAR_7 != 8) {
DisasCompare cmp;
#ifdef TARGET_SPARC64
int cc = GET_FIELD_SP(VAR_1, 11, 12);
if (cc == 0) {
gen_compare(&cmp, 0, VAR_7, VAR_0);
} else if (cc == 2) {
gen_compare(&cmp, 1, VAR_7, VAR_0);
} else {
goto illegal_insn;
}
#else
gen_compare(&cmp, 0, VAR_7, VAR_0);
#endif
VAR_8 = gen_new_label();
tcg_gen_brcond_tl(tcg_invert_cond(cmp.VAR_7),
cmp.c1, cmp.c2, VAR_8);
free_compare(&cmp);
}
VAR_9 = ((VAR_0->def->features & CPU_FEATURE_HYPV) && supervisor(VAR_0)
? UA2005_HTRAP_MASK : V8_TRAP_MASK);
trap = tcg_temp_new_i32();
VAR_3 = GET_FIELD_SP(VAR_1, 14, 18);
if (IS_IMM) {
VAR_4 = GET_FIELD_SP(VAR_1, 0, 6);
if (VAR_3 == 0) {
tcg_gen_movi_i32(trap, (VAR_4 & VAR_9) + TT_TRAP);
VAR_9 = 0;
} else {
TCGv t1 = gen_load_gpr(VAR_0, VAR_3);
tcg_gen_trunc_tl_i32(trap, t1);
tcg_gen_addi_i32(trap, trap, VAR_4);
}
} else {
TCGv t1, t2;
VAR_4 = GET_FIELD_SP(VAR_1, 0, 4);
t1 = gen_load_gpr(VAR_0, VAR_3);
t2 = gen_load_gpr(VAR_0, VAR_4);
tcg_gen_add_tl(t1, t1, t2);
tcg_gen_trunc_tl_i32(trap, t1);
}
if (VAR_9 != 0) {
tcg_gen_andi_i32(trap, trap, VAR_9);
tcg_gen_addi_i32(trap, trap, TT_TRAP);
}
gen_helper_raise_exception(cpu_env, trap);
tcg_temp_free_i32(trap);
if (VAR_7 == 8) {
VAR_0->is_br = 1;
goto jmp_insn;
} else {
gen_set_label(VAR_8);
break;
}
} else if (VAR_10 == 0x28) {
VAR_3 = GET_FIELD(VAR_1, 13, 17);
switch(VAR_3) {
case 0:
#ifndef TARGET_SPARC64
case 0x01 ... 0x0e:
case 0x0f:
case 0x10 ... 0x1f:
if (VAR_3 == 0x11 && VAR_0->def->features & CPU_FEATURE_ASR17) {
TCGv t = gen_dest_gpr(VAR_0, VAR_5);
tcg_gen_movi_tl(t, (1 << 8) | (VAR_0->def->nwindows - 1));
gen_store_gpr(VAR_0, VAR_5, t);
break;
}
#endif
gen_store_gpr(VAR_0, VAR_5, cpu_y);
break;
#ifdef TARGET_SPARC64
case 0x2:
update_psr(VAR_0);
gen_helper_rdccr(cpu_dst, cpu_env);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x3:
tcg_gen_ext_i32_tl(cpu_dst, cpu_asi);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x4:
{
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
gen_helper_tick_get_count(cpu_dst, r_tickptr);
tcg_temp_free_ptr(r_tickptr);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
}
break;
case 0x5:
{
TCGv t = gen_dest_gpr(VAR_0, VAR_5);
if (unlikely(AM_CHECK(VAR_0))) {
tcg_gen_movi_tl(t, VAR_0->pc & 0xffffffffULL);
} else {
tcg_gen_movi_tl(t, VAR_0->pc);
}
gen_store_gpr(VAR_0, VAR_5, t);
}
break;
case 0x6:
tcg_gen_ext_i32_tl(cpu_dst, cpu_fprs);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0xf:
break;
case 0x13:
if (gen_trap_ifnofpu(VAR_0)) {
goto jmp_insn;
}
gen_store_gpr(VAR_0, VAR_5, cpu_gsr);
break;
case 0x16:
tcg_gen_ext_i32_tl(cpu_dst, cpu_softint);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x17:
gen_store_gpr(VAR_0, VAR_5, cpu_tick_cmpr);
break;
case 0x18:
{
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, stick));
gen_helper_tick_get_count(cpu_dst, r_tickptr);
tcg_temp_free_ptr(r_tickptr);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
}
break;
case 0x19:
gen_store_gpr(VAR_0, VAR_5, cpu_stick_cmpr);
break;
case 0x10:
case 0x11:
case 0x12:
case 0x14:
case 0x15:
#endif
default:
goto illegal_insn;
}
#if !defined(CONFIG_USER_ONLY)
} else if (VAR_10 == 0x29) {
#ifndef TARGET_SPARC64
if (!supervisor(VAR_0)) {
goto priv_insn;
}
update_psr(VAR_0);
gen_helper_rdpsr(cpu_dst, cpu_env);
#else
CHECK_IU_FEATURE(VAR_0, HYPV);
if (!hypervisor(VAR_0))
goto priv_insn;
VAR_3 = GET_FIELD(VAR_1, 13, 17);
switch (VAR_3) {
case 0:
break;
case 1:
break;
case 3:
tcg_gen_mov_tl(cpu_dst, cpu_hintp);
break;
case 5:
tcg_gen_mov_tl(cpu_dst, cpu_htba);
break;
case 6:
tcg_gen_mov_tl(cpu_dst, cpu_hver);
break;
case 31:
tcg_gen_mov_tl(cpu_dst, cpu_hstick_cmpr);
break;
default:
goto illegal_insn;
}
#endif
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
} else if (VAR_10 == 0x2a) {
if (!supervisor(VAR_0))
goto priv_insn;
#ifdef TARGET_SPARC64
VAR_3 = GET_FIELD(VAR_1, 13, 17);
switch (VAR_3) {
case 0:
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tpc));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 1:
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tnpc));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 2:
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_ld_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tstate));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 3:
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_ld_i32(cpu_tmp32, r_tsptr,
offsetof(trap_state, tt));
tcg_temp_free_ptr(r_tsptr);
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
}
break;
case 4:
{
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
gen_helper_tick_get_count(cpu_tmp0, r_tickptr);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 5:
tcg_gen_mov_tl(cpu_tmp0, cpu_tbr);
break;
case 6:
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, pstate));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 7:
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, tl));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 8:
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, psrpil));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 9:
gen_helper_rdcwp(cpu_tmp0, cpu_env);
break;
case 10:
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, cansave));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 11:
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, canrestore));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 12:
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, cleanwin));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 13:
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, otherwin));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 14:
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, wstate));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 16:
CHECK_IU_FEATURE(VAR_0, GL);
tcg_gen_ld_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, gl));
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_tmp32);
break;
case 26:
CHECK_IU_FEATURE(VAR_0, HYPV);
if (!hypervisor(VAR_0))
goto priv_insn;
tcg_gen_mov_tl(cpu_tmp0, cpu_ssr);
break;
case 31:
tcg_gen_mov_tl(cpu_tmp0, cpu_ver);
break;
case 15:
default:
goto illegal_insn;
}
#else
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_wim);
#endif
gen_store_gpr(VAR_0, VAR_5, cpu_tmp0);
break;
} else if (VAR_10 == 0x2b) {
#ifdef TARGET_SPARC64
save_state(VAR_0);
gen_helper_flushw(cpu_env);
#else
if (!supervisor(VAR_0))
goto priv_insn;
gen_store_gpr(VAR_0, VAR_5, cpu_tbr);
#endif
break;
#endif
} else if (VAR_10 == 0x34) {
if (gen_trap_ifnofpu(VAR_0)) {
goto jmp_insn;
}
gen_op_clear_ieee_excp_and_FTT();
VAR_3 = GET_FIELD(VAR_1, 13, 17);
VAR_4 = GET_FIELD(VAR_1, 27, 31);
VAR_10 = GET_FIELD(VAR_1, 18, 26);
save_state(VAR_0);
switch (VAR_10) {
case 0x1:
cpu_src1_32 = gen_load_fpr_F(VAR_0, VAR_4);
gen_store_fpr_F(VAR_0, VAR_5, cpu_src1_32);
break;
case 0x5:
gen_ne_fop_FF(VAR_0, VAR_5, VAR_4, gen_helper_fnegs);
break;
case 0x9:
gen_ne_fop_FF(VAR_0, VAR_5, VAR_4, gen_helper_fabss);
break;
case 0x29:
CHECK_FPU_FEATURE(VAR_0, FSQRT);
gen_fop_FF(VAR_0, VAR_5, VAR_4, gen_helper_fsqrts);
break;
case 0x2a:
CHECK_FPU_FEATURE(VAR_0, FSQRT);
gen_fop_DD(VAR_0, VAR_5, VAR_4, gen_helper_fsqrtd);
break;
case 0x2b:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_fop_QQ(VAR_0, VAR_5, VAR_4, gen_helper_fsqrtq);
break;
case 0x41:
gen_fop_FFF(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fadds);
break;
case 0x42:
gen_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_faddd);
break;
case 0x43:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_fop_QQQ(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_faddq);
break;
case 0x45:
gen_fop_FFF(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fsubs);
break;
case 0x46:
gen_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fsubd);
break;
case 0x47:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_fop_QQQ(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fsubq);
break;
case 0x49:
CHECK_FPU_FEATURE(VAR_0, FMUL);
gen_fop_FFF(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fmuls);
break;
case 0x4a:
CHECK_FPU_FEATURE(VAR_0, FMUL);
gen_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fmuld);
break;
case 0x4b:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
CHECK_FPU_FEATURE(VAR_0, FMUL);
gen_fop_QQQ(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fmulq);
break;
case 0x4d:
gen_fop_FFF(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fdivs);
break;
case 0x4e:
gen_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fdivd);
break;
case 0x4f:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_fop_QQQ(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fdivq);
break;
case 0x69:
CHECK_FPU_FEATURE(VAR_0, FSMULD);
gen_fop_DFF(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fsmuld);
break;
case 0x6e:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_fop_QDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fdmulq);
break;
case 0xc4:
gen_fop_FF(VAR_0, VAR_5, VAR_4, gen_helper_fitos);
break;
case 0xc6:
gen_fop_FD(VAR_0, VAR_5, VAR_4, gen_helper_fdtos);
break;
case 0xc7:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_fop_FQ(VAR_0, VAR_5, VAR_4, gen_helper_fqtos);
break;
case 0xc8:
gen_ne_fop_DF(VAR_0, VAR_5, VAR_4, gen_helper_fitod);
break;
case 0xc9:
gen_ne_fop_DF(VAR_0, VAR_5, VAR_4, gen_helper_fstod);
break;
case 0xcb:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_fop_DQ(VAR_0, VAR_5, VAR_4, gen_helper_fqtod);
break;
case 0xcc:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_ne_fop_QF(VAR_0, VAR_5, VAR_4, gen_helper_fitoq);
break;
case 0xcd:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_ne_fop_QF(VAR_0, VAR_5, VAR_4, gen_helper_fstoq);
break;
case 0xce:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_ne_fop_QD(VAR_0, VAR_5, VAR_4, gen_helper_fdtoq);
break;
case 0xd1:
gen_fop_FF(VAR_0, VAR_5, VAR_4, gen_helper_fstoi);
break;
case 0xd2:
gen_fop_FD(VAR_0, VAR_5, VAR_4, gen_helper_fdtoi);
break;
case 0xd3:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_fop_FQ(VAR_0, VAR_5, VAR_4, gen_helper_fqtoi);
break;
#ifdef TARGET_SPARC64
case 0x2:
cpu_src1_64 = gen_load_fpr_D(VAR_0, VAR_4);
gen_store_fpr_D(VAR_0, VAR_5, cpu_src1_64);
break;
case 0x3:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_move_Q(VAR_5, VAR_4);
break;
case 0x6:
gen_ne_fop_DD(VAR_0, VAR_5, VAR_4, gen_helper_fnegd);
break;
case 0x7:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_ne_fop_QQ(VAR_0, VAR_5, VAR_4, gen_helper_fnegq);
break;
case 0xa:
gen_ne_fop_DD(VAR_0, VAR_5, VAR_4, gen_helper_fabsd);
break;
case 0xb:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_ne_fop_QQ(VAR_0, VAR_5, VAR_4, gen_helper_fabsq);
break;
case 0x81:
gen_fop_DF(VAR_0, VAR_5, VAR_4, gen_helper_fstox);
break;
case 0x82:
gen_fop_DD(VAR_0, VAR_5, VAR_4, gen_helper_fdtox);
break;
case 0x83:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_fop_DQ(VAR_0, VAR_5, VAR_4, gen_helper_fqtox);
break;
case 0x84:
gen_fop_FD(VAR_0, VAR_5, VAR_4, gen_helper_fxtos);
break;
case 0x88:
gen_fop_DD(VAR_0, VAR_5, VAR_4, gen_helper_fxtod);
break;
case 0x8c:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_ne_fop_QD(VAR_0, VAR_5, VAR_4, gen_helper_fxtoq);
break;
#endif
default:
goto illegal_insn;
}
} else if (VAR_10 == 0x35) {
#ifdef TARGET_SPARC64
int VAR_7;
#endif
if (gen_trap_ifnofpu(VAR_0)) {
goto jmp_insn;
}
gen_op_clear_ieee_excp_and_FTT();
VAR_3 = GET_FIELD(VAR_1, 13, 17);
VAR_4 = GET_FIELD(VAR_1, 27, 31);
VAR_10 = GET_FIELD(VAR_1, 18, 26);
save_state(VAR_0);
#ifdef TARGET_SPARC64
#define FMOVR(sz) \
do { \
DisasCompare cmp; \
VAR_7 = GET_FIELD_SP(VAR_1, 14, 17); \
cpu_src1 = get_src1(VAR_0, VAR_1); \
gen_compare_reg(&cmp, VAR_7, cpu_src1); \
gen_fmov##sz(VAR_0, &cmp, VAR_5, VAR_4); \
free_compare(&cmp); \
} while (0)
if ((VAR_10 & 0x11f) == 0x005) {
FMOVR(s);
break;
} else if ((VAR_10 & 0x11f) == 0x006) {
FMOVR(d);
break;
} else if ((VAR_10 & 0x11f) == 0x007) {
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
FMOVR(q);
break;
}
#undef FMOVR
#endif
switch (VAR_10) {
#ifdef TARGET_SPARC64
#define FMOVCC(fcc, sz) \
do { \
DisasCompare cmp; \
VAR_7 = GET_FIELD_SP(VAR_1, 14, 17); \
gen_fcompare(&cmp, fcc, VAR_7); \
gen_fmov##sz(VAR_0, &cmp, VAR_5, VAR_4); \
free_compare(&cmp); \
} while (0)
case 0x001:
FMOVCC(0, s);
break;
case 0x002:
FMOVCC(0, d);
break;
case 0x003:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
FMOVCC(0, q);
break;
case 0x041:
FMOVCC(1, s);
break;
case 0x042:
FMOVCC(1, d);
break;
case 0x043:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
FMOVCC(1, q);
break;
case 0x081:
FMOVCC(2, s);
break;
case 0x082:
FMOVCC(2, d);
break;
case 0x083:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
FMOVCC(2, q);
break;
case 0x0c1:
FMOVCC(3, s);
break;
case 0x0c2:
FMOVCC(3, d);
break;
case 0x0c3:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
FMOVCC(3, q);
break;
#undef FMOVCC
#define FMOVCC(xcc, sz) \
do { \
DisasCompare cmp; \
VAR_7 = GET_FIELD_SP(VAR_1, 14, 17); \
gen_compare(&cmp, xcc, VAR_7, VAR_0); \
gen_fmov##sz(VAR_0, &cmp, VAR_5, VAR_4); \
free_compare(&cmp); \
} while (0)
case 0x101:
FMOVCC(0, s);
break;
case 0x102:
FMOVCC(0, d);
break;
case 0x103:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
FMOVCC(0, q);
break;
case 0x181:
FMOVCC(1, s);
break;
case 0x182:
FMOVCC(1, d);
break;
case 0x183:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
FMOVCC(1, q);
break;
#undef FMOVCC
#endif
case 0x51:
cpu_src1_32 = gen_load_fpr_F(VAR_0, VAR_3);
cpu_src2_32 = gen_load_fpr_F(VAR_0, VAR_4);
gen_op_fcmps(VAR_5 & 3, cpu_src1_32, cpu_src2_32);
break;
case 0x52:
cpu_src1_64 = gen_load_fpr_D(VAR_0, VAR_3);
cpu_src2_64 = gen_load_fpr_D(VAR_0, VAR_4);
gen_op_fcmpd(VAR_5 & 3, cpu_src1_64, cpu_src2_64);
break;
case 0x53:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_op_load_fpr_QT0(QFPREG(VAR_3));
gen_op_load_fpr_QT1(QFPREG(VAR_4));
gen_op_fcmpq(VAR_5 & 3);
break;
case 0x55:
cpu_src1_32 = gen_load_fpr_F(VAR_0, VAR_3);
cpu_src2_32 = gen_load_fpr_F(VAR_0, VAR_4);
gen_op_fcmpes(VAR_5 & 3, cpu_src1_32, cpu_src2_32);
break;
case 0x56:
cpu_src1_64 = gen_load_fpr_D(VAR_0, VAR_3);
cpu_src2_64 = gen_load_fpr_D(VAR_0, VAR_4);
gen_op_fcmped(VAR_5 & 3, cpu_src1_64, cpu_src2_64);
break;
case 0x57:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_op_load_fpr_QT0(QFPREG(VAR_3));
gen_op_load_fpr_QT1(QFPREG(VAR_4));
gen_op_fcmpeq(VAR_5 & 3);
break;
default:
goto illegal_insn;
}
} else if (VAR_10 == 0x2) {
TCGv dst = gen_dest_gpr(VAR_0, VAR_5);
VAR_3 = GET_FIELD(VAR_1, 13, 17);
if (VAR_3 == 0) {
if (IS_IMM) {
simm = GET_FIELDs(VAR_1, 19, 31);
tcg_gen_movi_tl(dst, simm);
gen_store_gpr(VAR_0, VAR_5, dst);
} else {
VAR_4 = GET_FIELD(VAR_1, 27, 31);
if (VAR_4 == 0) {
tcg_gen_movi_tl(dst, 0);
gen_store_gpr(VAR_0, VAR_5, dst);
} else {
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_store_gpr(VAR_0, VAR_5, cpu_src2);
}
}
} else {
cpu_src1 = get_src1(VAR_0, VAR_1);
if (IS_IMM) {
simm = GET_FIELDs(VAR_1, 19, 31);
tcg_gen_ori_tl(dst, cpu_src1, simm);
gen_store_gpr(VAR_0, VAR_5, dst);
} else {
VAR_4 = GET_FIELD(VAR_1, 27, 31);
if (VAR_4 == 0) {
gen_store_gpr(VAR_0, VAR_5, cpu_src1);
} else {
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
tcg_gen_or_tl(dst, cpu_src1, cpu_src2);
gen_store_gpr(VAR_0, VAR_5, dst);
}
}
}
#ifdef TARGET_SPARC64
} else if (VAR_10 == 0x25) {
cpu_src1 = get_src1(VAR_0, VAR_1);
if (IS_IMM) {
simm = GET_FIELDs(VAR_1, 20, 31);
if (VAR_1 & (1 << 12)) {
tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x3f);
} else {
tcg_gen_shli_i64(cpu_dst, cpu_src1, simm & 0x1f);
}
} else {
VAR_4 = GET_FIELD(VAR_1, 27, 31);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
if (VAR_1 & (1 << 12)) {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
} else {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
}
tcg_gen_shl_i64(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
} else if (VAR_10 == 0x26) {
cpu_src1 = get_src1(VAR_0, VAR_1);
if (IS_IMM) {
simm = GET_FIELDs(VAR_1, 20, 31);
if (VAR_1 & (1 << 12)) {
tcg_gen_shri_i64(cpu_dst, cpu_src1, simm & 0x3f);
} else {
tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
tcg_gen_shri_i64(cpu_dst, cpu_dst, simm & 0x1f);
}
} else {
VAR_4 = GET_FIELD(VAR_1, 27, 31);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
if (VAR_1 & (1 << 12)) {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
tcg_gen_shr_i64(cpu_dst, cpu_src1, cpu_tmp0);
} else {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_andi_i64(cpu_dst, cpu_src1, 0xffffffffULL);
tcg_gen_shr_i64(cpu_dst, cpu_dst, cpu_tmp0);
}
}
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
} else if (VAR_10 == 0x27) {
cpu_src1 = get_src1(VAR_0, VAR_1);
if (IS_IMM) {
simm = GET_FIELDs(VAR_1, 20, 31);
if (VAR_1 & (1 << 12)) {
tcg_gen_sari_i64(cpu_dst, cpu_src1, simm & 0x3f);
} else {
tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
tcg_gen_sari_i64(cpu_dst, cpu_dst, simm & 0x1f);
}
} else {
VAR_4 = GET_FIELD(VAR_1, 27, 31);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
if (VAR_1 & (1 << 12)) {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x3f);
tcg_gen_sar_i64(cpu_dst, cpu_src1, cpu_tmp0);
} else {
tcg_gen_andi_i64(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_ext32s_i64(cpu_dst, cpu_src1);
tcg_gen_sar_i64(cpu_dst, cpu_dst, cpu_tmp0);
}
}
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
#endif
} else if (VAR_10 < 0x36) {
if (VAR_10 < 0x20) {
cpu_src1 = get_src1(VAR_0, VAR_1);
cpu_src2 = get_src2(VAR_0, VAR_1);
switch (VAR_10 & ~0x10) {
case 0x0:
if (VAR_10 & 0x10) {
gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
VAR_0->cc_op = CC_OP_ADD;
} else {
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
}
break;
case 0x1:
tcg_gen_and_tl(cpu_dst, cpu_src1, cpu_src2);
if (VAR_10 & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
VAR_0->cc_op = CC_OP_LOGIC;
}
break;
case 0x2:
tcg_gen_or_tl(cpu_dst, cpu_src1, cpu_src2);
if (VAR_10 & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
VAR_0->cc_op = CC_OP_LOGIC;
}
break;
case 0x3:
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
if (VAR_10 & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
VAR_0->cc_op = CC_OP_LOGIC;
}
break;
case 0x4:
if (VAR_10 & 0x10) {
gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_SUB);
VAR_0->cc_op = CC_OP_SUB;
} else {
tcg_gen_sub_tl(cpu_dst, cpu_src1, cpu_src2);
}
break;
case 0x5:
tcg_gen_andc_tl(cpu_dst, cpu_src1, cpu_src2);
if (VAR_10 & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
VAR_0->cc_op = CC_OP_LOGIC;
}
break;
case 0x6:
tcg_gen_orc_tl(cpu_dst, cpu_src1, cpu_src2);
if (VAR_10 & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
VAR_0->cc_op = CC_OP_LOGIC;
}
break;
case 0x7:
tcg_gen_eqv_tl(cpu_dst, cpu_src1, cpu_src2);
if (VAR_10 & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
VAR_0->cc_op = CC_OP_LOGIC;
}
break;
case 0x8:
gen_op_addx_int(VAR_0, cpu_dst, cpu_src1, cpu_src2,
(VAR_10 & 0x10));
break;
#ifdef TARGET_SPARC64
case 0x9:
tcg_gen_mul_i64(cpu_dst, cpu_src1, cpu_src2);
break;
#endif
case 0xa:
CHECK_IU_FEATURE(VAR_0, MUL);
gen_op_umul(cpu_dst, cpu_src1, cpu_src2);
if (VAR_10 & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
VAR_0->cc_op = CC_OP_LOGIC;
}
break;
case 0xb:
CHECK_IU_FEATURE(VAR_0, MUL);
gen_op_smul(cpu_dst, cpu_src1, cpu_src2);
if (VAR_10 & 0x10) {
tcg_gen_mov_tl(cpu_cc_dst, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_LOGIC);
VAR_0->cc_op = CC_OP_LOGIC;
}
break;
case 0xc:
gen_op_subx_int(VAR_0, cpu_dst, cpu_src1, cpu_src2,
(VAR_10 & 0x10));
break;
#ifdef TARGET_SPARC64
case 0xd:
gen_helper_udivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
break;
#endif
case 0xe:
CHECK_IU_FEATURE(VAR_0, DIV);
if (VAR_10 & 0x10) {
gen_helper_udiv_cc(cpu_dst, cpu_env, cpu_src1,
cpu_src2);
VAR_0->cc_op = CC_OP_DIV;
} else {
gen_helper_udiv(cpu_dst, cpu_env, cpu_src1,
cpu_src2);
}
break;
case 0xf:
CHECK_IU_FEATURE(VAR_0, DIV);
if (VAR_10 & 0x10) {
gen_helper_sdiv_cc(cpu_dst, cpu_env, cpu_src1,
cpu_src2);
VAR_0->cc_op = CC_OP_DIV;
} else {
gen_helper_sdiv(cpu_dst, cpu_env, cpu_src1,
cpu_src2);
}
break;
default:
goto illegal_insn;
}
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
} else {
cpu_src1 = get_src1(VAR_0, VAR_1);
cpu_src2 = get_src2(VAR_0, VAR_1);
switch (VAR_10) {
case 0x20:
gen_op_add_cc(cpu_dst, cpu_src1, cpu_src2);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_TADD);
VAR_0->cc_op = CC_OP_TADD;
break;
case 0x21:
gen_op_sub_cc(cpu_dst, cpu_src1, cpu_src2);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_TSUB);
VAR_0->cc_op = CC_OP_TSUB;
break;
case 0x22:
gen_helper_taddcctv(cpu_dst, cpu_env,
cpu_src1, cpu_src2);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
VAR_0->cc_op = CC_OP_TADDTV;
break;
case 0x23:
gen_helper_tsubcctv(cpu_dst, cpu_env,
cpu_src1, cpu_src2);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
VAR_0->cc_op = CC_OP_TSUBTV;
break;
case 0x24:
update_psr(VAR_0);
gen_op_mulscc(cpu_dst, cpu_src1, cpu_src2);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_ADD);
VAR_0->cc_op = CC_OP_ADD;
break;
#ifndef TARGET_SPARC64
case 0x25:
if (IS_IMM) {
simm = GET_FIELDs(VAR_1, 20, 31);
tcg_gen_shli_tl(cpu_dst, cpu_src1, simm & 0x1f);
} else {
tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_shl_tl(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x26:
if (IS_IMM) {
simm = GET_FIELDs(VAR_1, 20, 31);
tcg_gen_shri_tl(cpu_dst, cpu_src1, simm & 0x1f);
} else {
tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_shr_tl(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x27:
if (IS_IMM) {
simm = GET_FIELDs(VAR_1, 20, 31);
tcg_gen_sari_tl(cpu_dst, cpu_src1, simm & 0x1f);
} else {
tcg_gen_andi_tl(cpu_tmp0, cpu_src2, 0x1f);
tcg_gen_sar_tl(cpu_dst, cpu_src1, cpu_tmp0);
}
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
#endif
case 0x30:
{
switch(VAR_5) {
case 0:
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
tcg_gen_andi_tl(cpu_y, cpu_tmp0, 0xffffffff);
break;
#ifndef TARGET_SPARC64
case 0x01 ... 0x0f:
case 0x10 ... 0x1f:
break;
#else
case 0x2:
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
gen_helper_wrccr(cpu_env, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
VAR_0->cc_op = CC_OP_FLAGS;
break;
case 0x3:
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_andi_tl(cpu_dst, cpu_dst, 0xff);
tcg_gen_trunc_tl_i32(cpu_asi, cpu_dst);
break;
case 0x6:
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_trunc_tl_i32(cpu_fprs, cpu_dst);
save_state(VAR_0);
gen_op_next_insn();
tcg_gen_exit_tb(0);
VAR_0->is_br = 1;
break;
case 0xf:
#if !defined(CONFIG_USER_ONLY)
if (supervisor(VAR_0)) {
;
}
#endif
break;
case 0x13:
if (gen_trap_ifnofpu(VAR_0)) {
goto jmp_insn;
}
tcg_gen_xor_tl(cpu_gsr, cpu_src1, cpu_src2);
break;
case 0x14:
if (!supervisor(VAR_0))
goto illegal_insn;
tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
gen_helper_set_softint(cpu_env, cpu_tmp64);
break;
case 0x15:
if (!supervisor(VAR_0))
goto illegal_insn;
tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
gen_helper_clear_softint(cpu_env, cpu_tmp64);
break;
case 0x16:
if (!supervisor(VAR_0))
goto illegal_insn;
tcg_gen_xor_tl(cpu_tmp64, cpu_src1, cpu_src2);
gen_helper_write_softint(cpu_env, cpu_tmp64);
break;
case 0x17:
#if !defined(CONFIG_USER_ONLY)
if (!supervisor(VAR_0))
goto illegal_insn;
#endif
{
TCGv_ptr r_tickptr;
tcg_gen_xor_tl(cpu_tick_cmpr, cpu_src1,
cpu_src2);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
gen_helper_tick_set_limit(r_tickptr,
cpu_tick_cmpr);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 0x18:
#if !defined(CONFIG_USER_ONLY)
if (!supervisor(VAR_0))
goto illegal_insn;
#endif
{
TCGv_ptr r_tickptr;
tcg_gen_xor_tl(cpu_dst, cpu_src1,
cpu_src2);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, stick));
gen_helper_tick_set_count(r_tickptr,
cpu_dst);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 0x19:
#if !defined(CONFIG_USER_ONLY)
if (!supervisor(VAR_0))
goto illegal_insn;
#endif
{
TCGv_ptr r_tickptr;
tcg_gen_xor_tl(cpu_stick_cmpr, cpu_src1,
cpu_src2);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, stick));
gen_helper_tick_set_limit(r_tickptr,
cpu_stick_cmpr);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 0x10:
case 0x11:
case 0x12:
#endif
default:
goto illegal_insn;
}
}
break;
#if !defined(CONFIG_USER_ONLY)
case 0x31:
{
if (!supervisor(VAR_0))
goto priv_insn;
#ifdef TARGET_SPARC64
switch (VAR_5) {
case 0:
gen_helper_saved(cpu_env);
break;
case 1:
gen_helper_restored(cpu_env);
break;
case 2:
case 3:
case 4:
case 5:
default:
goto illegal_insn;
}
#else
tcg_gen_xor_tl(cpu_dst, cpu_src1, cpu_src2);
gen_helper_wrpsr(cpu_env, cpu_dst);
tcg_gen_movi_i32(cpu_cc_op, CC_OP_FLAGS);
VAR_0->cc_op = CC_OP_FLAGS;
save_state(VAR_0);
gen_op_next_insn();
tcg_gen_exit_tb(0);
VAR_0->is_br = 1;
#endif
}
break;
case 0x32:
{
if (!supervisor(VAR_0))
goto priv_insn;
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
#ifdef TARGET_SPARC64
switch (VAR_5) {
case 0:
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tpc));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 1:
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state, tnpc));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 2:
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_st_tl(cpu_tmp0, r_tsptr,
offsetof(trap_state,
tstate));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 3:
{
TCGv_ptr r_tsptr;
r_tsptr = tcg_temp_new_ptr();
gen_load_trap_state_at_tl(r_tsptr, cpu_env);
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, r_tsptr,
offsetof(trap_state, tt));
tcg_temp_free_ptr(r_tsptr);
}
break;
case 4:
{
TCGv_ptr r_tickptr;
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, tick));
gen_helper_tick_set_count(r_tickptr,
cpu_tmp0);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 5:
tcg_gen_mov_tl(cpu_tbr, cpu_tmp0);
break;
case 6:
save_state(VAR_0);
gen_helper_wrpstate(cpu_env, cpu_tmp0);
VAR_0->npc = DYNAMIC_PC;
break;
case 7:
save_state(VAR_0);
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, tl));
VAR_0->npc = DYNAMIC_PC;
break;
case 8:
gen_helper_wrpil(cpu_env, cpu_tmp0);
break;
case 9:
gen_helper_wrcwp(cpu_env, cpu_tmp0);
break;
case 10:
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
cansave));
break;
case 11:
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
canrestore));
break;
case 12:
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
cleanwin));
break;
case 13:
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
otherwin));
break;
case 14:
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState,
wstate));
break;
case 16:
CHECK_IU_FEATURE(VAR_0, GL);
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
tcg_gen_st_i32(cpu_tmp32, cpu_env,
offsetof(CPUSPARCState, gl));
break;
case 26:
CHECK_IU_FEATURE(VAR_0, HYPV);
if (!hypervisor(VAR_0))
goto priv_insn;
tcg_gen_mov_tl(cpu_ssr, cpu_tmp0);
break;
default:
goto illegal_insn;
}
#else
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
if (VAR_0->def->nwindows != 32)
tcg_gen_andi_tl(cpu_tmp32, cpu_tmp32,
(1 << VAR_0->def->nwindows) - 1);
tcg_gen_mov_i32(cpu_wim, cpu_tmp32);
#endif
}
break;
case 0x33:
{
#ifndef TARGET_SPARC64
if (!supervisor(VAR_0))
goto priv_insn;
tcg_gen_xor_tl(cpu_tbr, cpu_src1, cpu_src2);
#else
CHECK_IU_FEATURE(VAR_0, HYPV);
if (!hypervisor(VAR_0))
goto priv_insn;
tcg_gen_xor_tl(cpu_tmp0, cpu_src1, cpu_src2);
switch (VAR_5) {
case 0:
gen_op_wrhpstate();
save_state(VAR_0);
gen_op_next_insn();
tcg_gen_exit_tb(0);
VAR_0->is_br = 1;
break;
case 1:
gen_op_wrhtstate();
break;
case 3:
tcg_gen_mov_tl(cpu_hintp, cpu_tmp0);
break;
case 5:
tcg_gen_mov_tl(cpu_htba, cpu_tmp0);
break;
case 31:
{
TCGv_ptr r_tickptr;
tcg_gen_mov_tl(cpu_hstick_cmpr, cpu_tmp0);
r_tickptr = tcg_temp_new_ptr();
tcg_gen_ld_ptr(r_tickptr, cpu_env,
offsetof(CPUSPARCState, hstick));
gen_helper_tick_set_limit(r_tickptr,
cpu_hstick_cmpr);
tcg_temp_free_ptr(r_tickptr);
}
break;
case 6: readonly
default:
goto illegal_insn;
}
#endif
}
break;
#endif
#ifdef TARGET_SPARC64
case 0x2c:
{
int cc = GET_FIELD_SP(VAR_1, 11, 12);
int VAR_7 = GET_FIELD_SP(VAR_1, 14, 17);
DisasCompare cmp;
TCGv dst;
if (VAR_1 & (1 << 18)) {
if (cc == 0) {
gen_compare(&cmp, 0, VAR_7, VAR_0);
} else if (cc == 2) {
gen_compare(&cmp, 1, VAR_7, VAR_0);
} else {
goto illegal_insn;
}
} else {
gen_fcompare(&cmp, cc, VAR_7);
}
if (IS_IMM) {
simm = GET_FIELD_SPs(VAR_1, 0, 10);
tcg_gen_movi_tl(cpu_src2, simm);
}
dst = gen_load_gpr(VAR_0, VAR_5);
tcg_gen_movcond_tl(cmp.VAR_7, dst,
cmp.c1, cmp.c2,
cpu_src2, dst);
free_compare(&cmp);
gen_store_gpr(VAR_0, VAR_5, dst);
break;
}
case 0x2d:
gen_helper_sdivx(cpu_dst, cpu_env, cpu_src1, cpu_src2);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x2e:
gen_helper_popc(cpu_dst, cpu_src2);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x2f:
{
int VAR_7 = GET_FIELD_SP(VAR_1, 10, 12);
DisasCompare cmp;
TCGv dst;
gen_compare_reg(&cmp, VAR_7, cpu_src1);
if (IS_IMM) {
simm = GET_FIELD_SPs(VAR_1, 0, 9);
tcg_gen_movi_tl(cpu_src2, simm);
}
dst = gen_load_gpr(VAR_0, VAR_5);
tcg_gen_movcond_tl(cmp.VAR_7, dst,
cmp.c1, cmp.c2,
cpu_src2, dst);
free_compare(&cmp);
gen_store_gpr(VAR_0, VAR_5, dst);
break;
}
#endif
default:
goto illegal_insn;
}
}
} else if (VAR_10 == 0x36) {
#ifdef TARGET_SPARC64
int opf = GET_FIELD_SP(VAR_1, 5, 13);
VAR_3 = GET_FIELD(VAR_1, 13, 17);
VAR_4 = GET_FIELD(VAR_1, 27, 31);
if (gen_trap_ifnofpu(VAR_0)) {
goto jmp_insn;
}
switch (opf) {
case 0x000:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_edge(VAR_0, cpu_dst, cpu_src1, cpu_src2, 8, 1, 0);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x001:
CHECK_FPU_FEATURE(VAR_0, VIS2);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_edge(VAR_0, cpu_dst, cpu_src1, cpu_src2, 8, 0, 0);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x002:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_edge(VAR_0, cpu_dst, cpu_src1, cpu_src2, 8, 1, 1);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x003:
CHECK_FPU_FEATURE(VAR_0, VIS2);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_edge(VAR_0, cpu_dst, cpu_src1, cpu_src2, 8, 0, 1);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x004:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_edge(VAR_0, cpu_dst, cpu_src1, cpu_src2, 16, 1, 0);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x005:
CHECK_FPU_FEATURE(VAR_0, VIS2);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_edge(VAR_0, cpu_dst, cpu_src1, cpu_src2, 16, 0, 0);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x006:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_edge(VAR_0, cpu_dst, cpu_src1, cpu_src2, 16, 1, 1);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x007:
CHECK_FPU_FEATURE(VAR_0, VIS2);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_edge(VAR_0, cpu_dst, cpu_src1, cpu_src2, 16, 0, 1);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x008:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_edge(VAR_0, cpu_dst, cpu_src1, cpu_src2, 32, 1, 0);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x009:
CHECK_FPU_FEATURE(VAR_0, VIS2);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_edge(VAR_0, cpu_dst, cpu_src1, cpu_src2, 32, 0, 0);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x00a:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_edge(VAR_0, cpu_dst, cpu_src1, cpu_src2, 32, 1, 1);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x00b:
CHECK_FPU_FEATURE(VAR_0, VIS2);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_edge(VAR_0, cpu_dst, cpu_src1, cpu_src2, 32, 0, 1);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x010:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x012:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_shli_i64(cpu_dst, cpu_dst, 1);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x014:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_helper_array8(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_shli_i64(cpu_dst, cpu_dst, 2);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x018:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 0);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x01a:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
gen_alignaddr(cpu_dst, cpu_src1, cpu_src2, 1);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x019:
CHECK_FPU_FEATURE(VAR_0, VIS2);
cpu_src1 = gen_load_gpr(VAR_0, VAR_3);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
tcg_gen_deposit_tl(cpu_gsr, cpu_gsr, cpu_dst, 32, 32);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x020:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1_64 = gen_load_fpr_D(VAR_0, VAR_3);
cpu_src2_64 = gen_load_fpr_D(VAR_0, VAR_4);
gen_helper_fcmple16(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x022:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1_64 = gen_load_fpr_D(VAR_0, VAR_3);
cpu_src2_64 = gen_load_fpr_D(VAR_0, VAR_4);
gen_helper_fcmpne16(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x024:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1_64 = gen_load_fpr_D(VAR_0, VAR_3);
cpu_src2_64 = gen_load_fpr_D(VAR_0, VAR_4);
gen_helper_fcmple32(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x026:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1_64 = gen_load_fpr_D(VAR_0, VAR_3);
cpu_src2_64 = gen_load_fpr_D(VAR_0, VAR_4);
gen_helper_fcmpne32(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x028:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1_64 = gen_load_fpr_D(VAR_0, VAR_3);
cpu_src2_64 = gen_load_fpr_D(VAR_0, VAR_4);
gen_helper_fcmpgt16(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x02a:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1_64 = gen_load_fpr_D(VAR_0, VAR_3);
cpu_src2_64 = gen_load_fpr_D(VAR_0, VAR_4);
gen_helper_fcmpeq16(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x02c:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1_64 = gen_load_fpr_D(VAR_0, VAR_3);
cpu_src2_64 = gen_load_fpr_D(VAR_0, VAR_4);
gen_helper_fcmpgt32(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x02e:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1_64 = gen_load_fpr_D(VAR_0, VAR_3);
cpu_src2_64 = gen_load_fpr_D(VAR_0, VAR_4);
gen_helper_fcmpeq32(cpu_dst, cpu_src1_64, cpu_src2_64);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x031:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fmul8x16);
break;
case 0x033:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fmul8x16au);
break;
case 0x035:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fmul8x16al);
break;
case 0x036:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fmul8sux16);
break;
case 0x037:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fmul8ulx16);
break;
case 0x038:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fmuld8sux16);
break;
case 0x039:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fmuld8ulx16);
break;
case 0x03a:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_gsr_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fpack32);
break;
case 0x03b:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1_64 = gen_load_fpr_D(VAR_0, VAR_4);
cpu_dst_32 = gen_dest_fpr_F();
gen_helper_fpack16(cpu_dst_32, cpu_gsr, cpu_src1_64);
gen_store_fpr_F(VAR_0, VAR_5, cpu_dst_32);
break;
case 0x03d:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1_64 = gen_load_fpr_D(VAR_0, VAR_4);
cpu_dst_32 = gen_dest_fpr_F();
gen_helper_fpackfix(cpu_dst_32, cpu_gsr, cpu_src1_64);
gen_store_fpr_F(VAR_0, VAR_5, cpu_dst_32);
break;
case 0x03e:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_pdist);
break;
case 0x048:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_gsr_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_faligndata);
break;
case 0x04b:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fpmerge);
break;
case 0x04c:
CHECK_FPU_FEATURE(VAR_0, VIS2);
gen_gsr_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_bshuffle);
break;
case 0x04d:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fexpand);
break;
case 0x050:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fpadd16);
break;
case 0x051:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_FFF(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fpadd16s);
break;
case 0x052:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fpadd32);
break;
case 0x053:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_FFF(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_add_i32);
break;
case 0x054:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fpsub16);
break;
case 0x055:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_FFF(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fpsub16s);
break;
case 0x056:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, gen_helper_fpsub32);
break;
case 0x057:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_FFF(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_sub_i32);
break;
case 0x060:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_dst_64 = gen_dest_fpr_D();
tcg_gen_movi_i64(cpu_dst_64, 0);
gen_store_fpr_D(VAR_0, VAR_5, cpu_dst_64);
break;
case 0x061:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_dst_32 = gen_dest_fpr_F();
tcg_gen_movi_i32(cpu_dst_32, 0);
gen_store_fpr_F(VAR_0, VAR_5, cpu_dst_32);
break;
case 0x062:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_nor_i64);
break;
case 0x063:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_FFF(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_nor_i32);
break;
case 0x064:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_andc_i64);
break;
case 0x065:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_FFF(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_andc_i32);
break;
case 0x066:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DD(VAR_0, VAR_5, VAR_4, tcg_gen_not_i64);
break;
case 0x067:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_FF(VAR_0, VAR_5, VAR_4, tcg_gen_not_i32);
break;
case 0x068:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_4, VAR_3, tcg_gen_andc_i64);
break;
case 0x069:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_FFF(VAR_0, VAR_5, VAR_4, VAR_3, tcg_gen_andc_i32);
break;
case 0x06a:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DD(VAR_0, VAR_5, VAR_3, tcg_gen_not_i64);
break;
case 0x06b:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_FF(VAR_0, VAR_5, VAR_3, tcg_gen_not_i32);
break;
case 0x06c:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_xor_i64);
break;
case 0x06d:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_FFF(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_xor_i32);
break;
case 0x06e:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_nand_i64);
break;
case 0x06f:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_FFF(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_nand_i32);
break;
case 0x070:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_and_i64);
break;
case 0x071:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_FFF(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_and_i32);
break;
case 0x072:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_eqv_i64);
break;
case 0x073:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_FFF(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_eqv_i32);
break;
case 0x074:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1_64 = gen_load_fpr_D(VAR_0, VAR_3);
gen_store_fpr_D(VAR_0, VAR_5, cpu_src1_64);
break;
case 0x075:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1_32 = gen_load_fpr_F(VAR_0, VAR_3);
gen_store_fpr_F(VAR_0, VAR_5, cpu_src1_32);
break;
case 0x076:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_orc_i64);
break;
case 0x077:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_FFF(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_orc_i32);
break;
case 0x078:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1_64 = gen_load_fpr_D(VAR_0, VAR_4);
gen_store_fpr_D(VAR_0, VAR_5, cpu_src1_64);
break;
case 0x079:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_src1_32 = gen_load_fpr_F(VAR_0, VAR_4);
gen_store_fpr_F(VAR_0, VAR_5, cpu_src1_32);
break;
case 0x07a:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_4, VAR_3, tcg_gen_orc_i64);
break;
case 0x07b:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_FFF(VAR_0, VAR_5, VAR_4, VAR_3, tcg_gen_orc_i32);
break;
case 0x07c:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_DDD(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_or_i64);
break;
case 0x07d:
CHECK_FPU_FEATURE(VAR_0, VIS1);
gen_ne_fop_FFF(VAR_0, VAR_5, VAR_3, VAR_4, tcg_gen_or_i32);
break;
case 0x07e:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_dst_64 = gen_dest_fpr_D();
tcg_gen_movi_i64(cpu_dst_64, -1);
gen_store_fpr_D(VAR_0, VAR_5, cpu_dst_64);
break;
case 0x07f:
CHECK_FPU_FEATURE(VAR_0, VIS1);
cpu_dst_32 = gen_dest_fpr_F();
tcg_gen_movi_i32(cpu_dst_32, -1);
gen_store_fpr_F(VAR_0, VAR_5, cpu_dst_32);
break;
case 0x080:
case 0x081:
goto illegal_insn;
default:
goto illegal_insn;
}
#else
goto ncp_insn;
#endif
} else if (VAR_10 == 0x37) {
#ifdef TARGET_SPARC64
goto illegal_insn;
#else
goto ncp_insn;
#endif
#ifdef TARGET_SPARC64
} else if (VAR_10 == 0x39) {
TCGv_i32 r_const;
save_state(VAR_0);
cpu_src1 = get_src1(VAR_0, VAR_1);
if (IS_IMM) {
simm = GET_FIELDs(VAR_1, 19, 31);
tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
} else {
VAR_4 = GET_FIELD(VAR_1, 27, 31);
if (VAR_4) {
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
} else {
tcg_gen_mov_tl(cpu_dst, cpu_src1);
}
}
gen_helper_restore(cpu_env);
gen_mov_pc_npc(VAR_0);
r_const = tcg_const_i32(3);
gen_helper_check_align(cpu_env, cpu_dst, r_const);
tcg_temp_free_i32(r_const);
tcg_gen_mov_tl(cpu_npc, cpu_dst);
VAR_0->npc = DYNAMIC_PC;
goto jmp_insn;
#endif
} else {
cpu_src1 = get_src1(VAR_0, VAR_1);
if (IS_IMM) {
simm = GET_FIELDs(VAR_1, 19, 31);
tcg_gen_addi_tl(cpu_dst, cpu_src1, simm);
} else {
VAR_4 = GET_FIELD(VAR_1, 27, 31);
if (VAR_4) {
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
tcg_gen_add_tl(cpu_dst, cpu_src1, cpu_src2);
} else {
tcg_gen_mov_tl(cpu_dst, cpu_src1);
}
}
switch (VAR_10) {
case 0x38:
{
TCGv t;
TCGv_i32 r_const;
t = gen_dest_gpr(VAR_0, VAR_5);
tcg_gen_movi_tl(t, VAR_0->pc);
gen_store_gpr(VAR_0, VAR_5, t);
gen_mov_pc_npc(VAR_0);
r_const = tcg_const_i32(3);
gen_helper_check_align(cpu_env, cpu_dst, r_const);
tcg_temp_free_i32(r_const);
gen_address_mask(VAR_0, cpu_dst);
tcg_gen_mov_tl(cpu_npc, cpu_dst);
VAR_0->npc = DYNAMIC_PC;
}
goto jmp_insn;
#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
case 0x39:
{
TCGv_i32 r_const;
if (!supervisor(VAR_0))
goto priv_insn;
gen_mov_pc_npc(VAR_0);
r_const = tcg_const_i32(3);
gen_helper_check_align(cpu_env, cpu_dst, r_const);
tcg_temp_free_i32(r_const);
tcg_gen_mov_tl(cpu_npc, cpu_dst);
VAR_0->npc = DYNAMIC_PC;
gen_helper_rett(cpu_env);
}
goto jmp_insn;
#endif
case 0x3b:
if (!((VAR_0)->def->features & CPU_FEATURE_FLUSH))
goto unimp_flush;
break;
case 0x3c:
save_state(VAR_0);
gen_helper_save(cpu_env);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
case 0x3d:
save_state(VAR_0);
gen_helper_restore(cpu_env);
gen_store_gpr(VAR_0, VAR_5, cpu_dst);
break;
#if !defined(CONFIG_USER_ONLY) && defined(TARGET_SPARC64)
case 0x3e:
{
switch (VAR_5) {
case 0:
if (!supervisor(VAR_0))
goto priv_insn;
VAR_0->npc = DYNAMIC_PC;
VAR_0->pc = DYNAMIC_PC;
gen_helper_done(cpu_env);
goto jmp_insn;
case 1:
if (!supervisor(VAR_0))
goto priv_insn;
VAR_0->npc = DYNAMIC_PC;
VAR_0->pc = DYNAMIC_PC;
gen_helper_retry(cpu_env);
goto jmp_insn;
default:
goto illegal_insn;
}
}
break;
#endif
default:
goto illegal_insn;
}
}
break;
}
break;
case 3:
{
unsigned int VAR_10 = GET_FIELD(VAR_1, 7, 12);
cpu_src1 = get_src1(VAR_0, VAR_1);
if (VAR_10 == 0x3c || VAR_10 == 0x3e) {
VAR_4 = GET_FIELD(VAR_1, 27, 31);
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
tcg_gen_mov_tl(cpu_addr, cpu_src1);
} else if (IS_IMM) {
simm = GET_FIELDs(VAR_1, 19, 31);
tcg_gen_addi_tl(cpu_addr, cpu_src1, simm);
} else {
VAR_4 = GET_FIELD(VAR_1, 27, 31);
if (VAR_4 != 0) {
cpu_src2 = gen_load_gpr(VAR_0, VAR_4);
tcg_gen_add_tl(cpu_addr, cpu_src1, cpu_src2);
} else {
tcg_gen_mov_tl(cpu_addr, cpu_src1);
}
}
if (VAR_10 < 4 || (VAR_10 > 7 && VAR_10 < 0x14 && VAR_10 != 0x0e) ||
(VAR_10 > 0x17 && VAR_10 <= 0x1d ) ||
(VAR_10 > 0x2c && VAR_10 <= 0x33) || VAR_10 == 0x1f || VAR_10 == 0x3d) {
TCGv cpu_val = gen_dest_gpr(VAR_0, VAR_5);
switch (VAR_10) {
case 0x0:
gen_address_mask(VAR_0, cpu_addr);
tcg_gen_qemu_ld32u(cpu_val, cpu_addr, VAR_0->mem_idx);
break;
case 0x1:
gen_address_mask(VAR_0, cpu_addr);
tcg_gen_qemu_ld8u(cpu_val, cpu_addr, VAR_0->mem_idx);
break;
case 0x2:
gen_address_mask(VAR_0, cpu_addr);
tcg_gen_qemu_ld16u(cpu_val, cpu_addr, VAR_0->mem_idx);
break;
case 0x3:
if (VAR_5 & 1)
goto illegal_insn;
else {
TCGv_i32 r_const;
save_state(VAR_0);
r_const = tcg_const_i32(7);
gen_helper_check_align(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
gen_address_mask(VAR_0, cpu_addr);
tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, VAR_0->mem_idx);
tcg_gen_trunc_i64_tl(cpu_tmp0, cpu_tmp64);
tcg_gen_andi_tl(cpu_tmp0, cpu_tmp0, 0xffffffffULL);
gen_store_gpr(VAR_0, VAR_5 + 1, cpu_tmp0);
tcg_gen_shri_i64(cpu_tmp64, cpu_tmp64, 32);
tcg_gen_trunc_i64_tl(cpu_val, cpu_tmp64);
tcg_gen_andi_tl(cpu_val, cpu_val, 0xffffffffULL);
}
break;
case 0x9:
gen_address_mask(VAR_0, cpu_addr);
tcg_gen_qemu_ld8s(cpu_val, cpu_addr, VAR_0->mem_idx);
break;
case 0xa:
gen_address_mask(VAR_0, cpu_addr);
tcg_gen_qemu_ld16s(cpu_val, cpu_addr, VAR_0->mem_idx);
break;
case 0xd:
{
TCGv r_const;
gen_address_mask(VAR_0, cpu_addr);
tcg_gen_qemu_ld8s(cpu_val, cpu_addr, VAR_0->mem_idx);
r_const = tcg_const_tl(0xff);
tcg_gen_qemu_st8(r_const, cpu_addr, VAR_0->mem_idx);
tcg_temp_free(r_const);
}
break;
case 0x0f:
CHECK_IU_FEATURE(VAR_0, SWAP);
cpu_src1 = gen_load_gpr(VAR_0, VAR_5);
gen_address_mask(VAR_0, cpu_addr);
tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, VAR_0->mem_idx);
tcg_gen_qemu_st32(cpu_src1, cpu_addr, VAR_0->mem_idx);
tcg_gen_mov_tl(cpu_val, cpu_tmp0);
break;
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
case 0x10:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(VAR_0))
goto priv_insn;
#endif
save_state(VAR_0);
gen_ld_asi(cpu_val, cpu_addr, VAR_1, 4, 0);
break;
case 0x11:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(VAR_0))
goto priv_insn;
#endif
save_state(VAR_0);
gen_ld_asi(cpu_val, cpu_addr, VAR_1, 1, 0);
break;
case 0x12:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(VAR_0))
goto priv_insn;
#endif
save_state(VAR_0);
gen_ld_asi(cpu_val, cpu_addr, VAR_1, 2, 0);
break;
case 0x13:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(VAR_0))
goto priv_insn;
#endif
if (VAR_5 & 1)
goto illegal_insn;
save_state(VAR_0);
gen_ldda_asi(VAR_0, cpu_val, cpu_addr, VAR_1, VAR_5);
goto skip_move;
case 0x19:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(VAR_0))
goto priv_insn;
#endif
save_state(VAR_0);
gen_ld_asi(cpu_val, cpu_addr, VAR_1, 1, 1);
break;
case 0x1a:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(VAR_0))
goto priv_insn;
#endif
save_state(VAR_0);
gen_ld_asi(cpu_val, cpu_addr, VAR_1, 2, 1);
break;
case 0x1d:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(VAR_0))
goto priv_insn;
#endif
save_state(VAR_0);
gen_ldstub_asi(cpu_val, cpu_addr, VAR_1);
break;
case 0x1f:
CHECK_IU_FEATURE(VAR_0, SWAP);
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(VAR_0))
goto priv_insn;
#endif
save_state(VAR_0);
cpu_src1 = gen_load_gpr(VAR_0, VAR_5);
gen_swap_asi(cpu_val, cpu_src1, cpu_addr, VAR_1);
break;
#ifndef TARGET_SPARC64
case 0x30:
case 0x31:
case 0x33:
goto ncp_insn;
#endif
#endif
#ifdef TARGET_SPARC64
case 0x08:
gen_address_mask(VAR_0, cpu_addr);
tcg_gen_qemu_ld32s(cpu_val, cpu_addr, VAR_0->mem_idx);
break;
case 0x0b:
gen_address_mask(VAR_0, cpu_addr);
tcg_gen_qemu_ld64(cpu_val, cpu_addr, VAR_0->mem_idx);
break;
case 0x18:
save_state(VAR_0);
gen_ld_asi(cpu_val, cpu_addr, VAR_1, 4, 1);
break;
case 0x1b:
save_state(VAR_0);
gen_ld_asi(cpu_val, cpu_addr, VAR_1, 8, 0);
break;
case 0x2d:
goto skip_move;
case 0x30:
if (gen_trap_ifnofpu(VAR_0)) {
goto jmp_insn;
}
save_state(VAR_0);
gen_ldf_asi(cpu_addr, VAR_1, 4, VAR_5);
gen_update_fprs_dirty(VAR_5);
goto skip_move;
case 0x33:
if (gen_trap_ifnofpu(VAR_0)) {
goto jmp_insn;
}
save_state(VAR_0);
gen_ldf_asi(cpu_addr, VAR_1, 8, DFPREG(VAR_5));
gen_update_fprs_dirty(DFPREG(VAR_5));
goto skip_move;
case 0x3d:
goto skip_move;
case 0x32:
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
if (gen_trap_ifnofpu(VAR_0)) {
goto jmp_insn;
}
save_state(VAR_0);
gen_ldf_asi(cpu_addr, VAR_1, 16, QFPREG(VAR_5));
gen_update_fprs_dirty(QFPREG(VAR_5));
goto skip_move;
#endif
default:
goto illegal_insn;
}
gen_store_gpr(VAR_0, VAR_5, cpu_val);
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
skip_move: ;
#endif
} else if (VAR_10 >= 0x20 && VAR_10 < 0x24) {
if (gen_trap_ifnofpu(VAR_0)) {
goto jmp_insn;
}
save_state(VAR_0);
switch (VAR_10) {
case 0x20:
gen_address_mask(VAR_0, cpu_addr);
tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, VAR_0->mem_idx);
cpu_dst_32 = gen_dest_fpr_F();
tcg_gen_trunc_tl_i32(cpu_dst_32, cpu_tmp0);
gen_store_fpr_F(VAR_0, VAR_5, cpu_dst_32);
break;
case 0x21:
#ifdef TARGET_SPARC64
gen_address_mask(VAR_0, cpu_addr);
if (VAR_5 == 1) {
tcg_gen_qemu_ld64(cpu_tmp64, cpu_addr, VAR_0->mem_idx);
gen_helper_ldxfsr(cpu_env, cpu_tmp64);
} else {
tcg_gen_qemu_ld32u(cpu_tmp0, cpu_addr, VAR_0->mem_idx);
tcg_gen_trunc_tl_i32(cpu_tmp32, cpu_tmp0);
gen_helper_ldfsr(cpu_env, cpu_tmp32);
}
#else
{
tcg_gen_qemu_ld32u(cpu_tmp32, cpu_addr, VAR_0->mem_idx);
gen_helper_ldfsr(cpu_env, cpu_tmp32);
}
#endif
break;
case 0x22:
{
TCGv_i32 r_const;
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
r_const = tcg_const_i32(VAR_0->mem_idx);
gen_address_mask(VAR_0, cpu_addr);
gen_helper_ldqf(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
gen_op_store_QT0_fpr(QFPREG(VAR_5));
gen_update_fprs_dirty(QFPREG(VAR_5));
}
break;
case 0x23:
gen_address_mask(VAR_0, cpu_addr);
cpu_dst_64 = gen_dest_fpr_D();
tcg_gen_qemu_ld64(cpu_dst_64, cpu_addr, VAR_0->mem_idx);
gen_store_fpr_D(VAR_0, VAR_5, cpu_dst_64);
break;
default:
goto illegal_insn;
}
} else if (VAR_10 < 8 || (VAR_10 >= 0x14 && VAR_10 < 0x18) ||
VAR_10 == 0xe || VAR_10 == 0x1e) {
TCGv cpu_val = gen_load_gpr(VAR_0, VAR_5);
switch (VAR_10) {
case 0x4:
gen_address_mask(VAR_0, cpu_addr);
tcg_gen_qemu_st32(cpu_val, cpu_addr, VAR_0->mem_idx);
break;
case 0x5:
gen_address_mask(VAR_0, cpu_addr);
tcg_gen_qemu_st8(cpu_val, cpu_addr, VAR_0->mem_idx);
break;
case 0x6:
gen_address_mask(VAR_0, cpu_addr);
tcg_gen_qemu_st16(cpu_val, cpu_addr, VAR_0->mem_idx);
break;
case 0x7:
if (VAR_5 & 1)
goto illegal_insn;
else {
TCGv_i32 r_const;
TCGv lo;
save_state(VAR_0);
gen_address_mask(VAR_0, cpu_addr);
r_const = tcg_const_i32(7);
gen_helper_check_align(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
lo = gen_load_gpr(VAR_0, VAR_5 + 1);
tcg_gen_concat_tl_i64(cpu_tmp64, lo, cpu_val);
tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, VAR_0->mem_idx);
}
break;
#if !defined(CONFIG_USER_ONLY) || defined(TARGET_SPARC64)
case 0x14:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(VAR_0))
goto priv_insn;
#endif
save_state(VAR_0);
gen_st_asi(cpu_val, cpu_addr, VAR_1, 4);
VAR_0->npc = DYNAMIC_PC;
break;
case 0x15:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(VAR_0))
goto priv_insn;
#endif
save_state(VAR_0);
gen_st_asi(cpu_val, cpu_addr, VAR_1, 1);
VAR_0->npc = DYNAMIC_PC;
break;
case 0x16:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(VAR_0))
goto priv_insn;
#endif
save_state(VAR_0);
gen_st_asi(cpu_val, cpu_addr, VAR_1, 2);
VAR_0->npc = DYNAMIC_PC;
break;
case 0x17:
#ifndef TARGET_SPARC64
if (IS_IMM)
goto illegal_insn;
if (!supervisor(VAR_0))
goto priv_insn;
#endif
if (VAR_5 & 1)
goto illegal_insn;
else {
save_state(VAR_0);
gen_stda_asi(VAR_0, cpu_val, cpu_addr, VAR_1, VAR_5);
}
break;
#endif
#ifdef TARGET_SPARC64
case 0x0e:
gen_address_mask(VAR_0, cpu_addr);
tcg_gen_qemu_st64(cpu_val, cpu_addr, VAR_0->mem_idx);
break;
case 0x1e:
save_state(VAR_0);
gen_st_asi(cpu_val, cpu_addr, VAR_1, 8);
VAR_0->npc = DYNAMIC_PC;
break;
#endif
default:
goto illegal_insn;
}
} else if (VAR_10 > 0x23 && VAR_10 < 0x28) {
if (gen_trap_ifnofpu(VAR_0)) {
goto jmp_insn;
}
save_state(VAR_0);
switch (VAR_10) {
case 0x24:
gen_address_mask(VAR_0, cpu_addr);
cpu_src1_32 = gen_load_fpr_F(VAR_0, VAR_5);
tcg_gen_ext_i32_tl(cpu_tmp0, cpu_src1_32);
tcg_gen_qemu_st32(cpu_tmp0, cpu_addr, VAR_0->mem_idx);
break;
case 0x25:
#ifdef TARGET_SPARC64
gen_address_mask(VAR_0, cpu_addr);
tcg_gen_ld_i64(cpu_tmp64, cpu_env, offsetof(CPUSPARCState, fsr));
if (VAR_5 == 1)
tcg_gen_qemu_st64(cpu_tmp64, cpu_addr, VAR_0->mem_idx);
else
tcg_gen_qemu_st32(cpu_tmp64, cpu_addr, VAR_0->mem_idx);
#else
tcg_gen_ld_i32(cpu_tmp32, cpu_env, offsetof(CPUSPARCState, fsr));
tcg_gen_qemu_st32(cpu_tmp32, cpu_addr, VAR_0->mem_idx);
#endif
break;
case 0x26:
#ifdef TARGET_SPARC64
{
TCGv_i32 r_const;
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
gen_op_load_fpr_QT0(QFPREG(VAR_5));
r_const = tcg_const_i32(VAR_0->mem_idx);
gen_address_mask(VAR_0, cpu_addr);
gen_helper_stqf(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
}
break;
#else
#if defined(CONFIG_USER_ONLY)
goto illegal_insn;
#else
if (!supervisor(VAR_0))
goto priv_insn;
if (gen_trap_ifnofpu(VAR_0)) {
goto jmp_insn;
}
goto nfq_insn;
#endif
#endif
case 0x27:
gen_address_mask(VAR_0, cpu_addr);
cpu_src1_64 = gen_load_fpr_D(VAR_0, VAR_5);
tcg_gen_qemu_st64(cpu_src1_64, cpu_addr, VAR_0->mem_idx);
break;
default:
goto illegal_insn;
}
} else if (VAR_10 > 0x33 && VAR_10 < 0x3f) {
save_state(VAR_0);
switch (VAR_10) {
#ifdef TARGET_SPARC64
case 0x34:
if (gen_trap_ifnofpu(VAR_0)) {
goto jmp_insn;
}
gen_stf_asi(cpu_addr, VAR_1, 4, VAR_5);
break;
case 0x36:
{
TCGv_i32 r_const;
CHECK_FPU_FEATURE(VAR_0, FLOAT128);
if (gen_trap_ifnofpu(VAR_0)) {
goto jmp_insn;
}
r_const = tcg_const_i32(7);
gen_helper_check_align(cpu_env, cpu_addr, r_const);
tcg_temp_free_i32(r_const);
gen_stf_asi(cpu_addr, VAR_1, 16, QFPREG(VAR_5));
}
break;
case 0x37:
if (gen_trap_ifnofpu(VAR_0)) {
goto jmp_insn;
}
gen_stf_asi(cpu_addr, VAR_1, 8, DFPREG(VAR_5));
break;
case 0x3c:
gen_cas_asi(VAR_0, cpu_addr, cpu_src2, VAR_1, VAR_5);
break;
case 0x3e:
gen_casx_asi(VAR_0, cpu_addr, cpu_src2, VAR_1, VAR_5);
break;
#else
case 0x34:
case 0x35:
case 0x36:
case 0x37:
goto ncp_insn;
#endif
default:
goto illegal_insn;
}
} else
goto illegal_insn;
}
break;
}
if (VAR_0->npc == DYNAMIC_PC) {
VAR_0->pc = DYNAMIC_PC;
gen_op_next_insn();
} else if (VAR_0->npc == JUMP_PC) {
gen_branch2(VAR_0, VAR_0->jump_pc[0], VAR_0->jump_pc[1], cpu_cond);
VAR_0->is_br = 1;
} else {
VAR_0->pc = VAR_0->npc;
VAR_0->npc = VAR_0->npc + 4;
}
jmp_insn:
goto egress;
illegal_insn:
{
TCGv_i32 r_const;
save_state(VAR_0);
r_const = tcg_const_i32(TT_ILL_INSN);
gen_helper_raise_exception(cpu_env, r_const);
tcg_temp_free_i32(r_const);
VAR_0->is_br = 1;
}
goto egress;
unimp_flush:
{
TCGv_i32 r_const;
save_state(VAR_0);
r_const = tcg_const_i32(TT_UNIMP_FLUSH);
gen_helper_raise_exception(cpu_env, r_const);
tcg_temp_free_i32(r_const);
VAR_0->is_br = 1;
}
goto egress;
#if !defined(CONFIG_USER_ONLY)
priv_insn:
{
TCGv_i32 r_const;
save_state(VAR_0);
r_const = tcg_const_i32(TT_PRIV_INSN);
gen_helper_raise_exception(cpu_env, r_const);
tcg_temp_free_i32(r_const);
VAR_0->is_br = 1;
}
goto egress;
#endif
nfpu_insn:
save_state(VAR_0);
gen_op_fpexception_im(FSR_FTT_UNIMPFPOP);
VAR_0->is_br = 1;
goto egress;
#if !defined(CONFIG_USER_ONLY) && !defined(TARGET_SPARC64)
nfq_insn:
save_state(VAR_0);
gen_op_fpexception_im(FSR_FTT_SEQ_ERROR);
VAR_0->is_br = 1;
goto egress;
#endif
#ifndef TARGET_SPARC64
ncp_insn:
{
TCGv r_const;
save_state(VAR_0);
r_const = tcg_const_i32(TT_NCP_INSN);
gen_helper_raise_exception(cpu_env, r_const);
tcg_temp_free(r_const);
VAR_0->is_br = 1;
}
goto egress;
#endif
egress:
tcg_temp_free(cpu_tmp1);
tcg_temp_free(cpu_tmp2);
if (VAR_0->n_t32 != 0) {
int VAR_11;
for (VAR_11 = VAR_0->n_t32 - 1; VAR_11 >= 0; --VAR_11) {
tcg_temp_free_i32(VAR_0->t32[VAR_11]);
}
VAR_0->n_t32 = 0;
}
if (VAR_0->n_ttl != 0) {
int VAR_11;
for (VAR_11 = VAR_0->n_ttl - 1; VAR_11 >= 0; --VAR_11) {
tcg_temp_free(VAR_0->ttl[VAR_11]);
}
VAR_0->n_ttl = 0;
}
}
| [
"static void FUNC_0(DisasContext * VAR_0, unsigned int VAR_1)\n{",
"unsigned int VAR_2, VAR_3, VAR_4, VAR_5;",
"TCGv cpu_src1, cpu_src2, cpu_tmp1, cpu_tmp2;",
"TCGv_i32 cpu_src1_32, cpu_src2_32, cpu_dst_32;",
"TCGv_i64 cpu_src1_64, cpu_src2_64, cpu_dst_64;",
"target_long simm;",
"if (unlikely(qemu_logle... | [
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
17
],
[
19
],
[
21
],
[
25
],
[
29
],
[
33
],
[
35
],
[
39
],
[
41,
43
],
[
45
],
[
47
],
[
49
],
[
51,
53,... |
13,134 | MigrationState *migrate_init(const MigrationParams *params)
{
MigrationState *s = migrate_get_current();
int64_t bandwidth_limit = s->bandwidth_limit;
bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
int64_t xbzrle_cache_size = s->xbzrle_cache_size;
int compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
int compress_thread_count =
s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
int decompress_thread_count =
s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
int x_cpu_throttle_initial =
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];
int x_cpu_throttle_increment =
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];
memcpy(enabled_capabilities, s->enabled_capabilities,
sizeof(enabled_capabilities));
memset(s, 0, sizeof(*s));
s->params = *params;
memcpy(s->enabled_capabilities, enabled_capabilities,
sizeof(enabled_capabilities));
s->xbzrle_cache_size = xbzrle_cache_size;
s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
compress_thread_count;
s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
decompress_thread_count;
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] =
x_cpu_throttle_initial;
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] =
x_cpu_throttle_increment;
s->bandwidth_limit = bandwidth_limit;
migrate_set_state(s, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
QSIMPLEQ_INIT(&s->src_page_requests);
s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
return s;
}
| true | qemu | 389775d1f67b2c8f44f9473b1e5363735972e389 | MigrationState *migrate_init(const MigrationParams *params)
{
MigrationState *s = migrate_get_current();
int64_t bandwidth_limit = s->bandwidth_limit;
bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
int64_t xbzrle_cache_size = s->xbzrle_cache_size;
int compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
int compress_thread_count =
s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
int decompress_thread_count =
s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
int x_cpu_throttle_initial =
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];
int x_cpu_throttle_increment =
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];
memcpy(enabled_capabilities, s->enabled_capabilities,
sizeof(enabled_capabilities));
memset(s, 0, sizeof(*s));
s->params = *params;
memcpy(s->enabled_capabilities, enabled_capabilities,
sizeof(enabled_capabilities));
s->xbzrle_cache_size = xbzrle_cache_size;
s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;
s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
compress_thread_count;
s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
decompress_thread_count;
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] =
x_cpu_throttle_initial;
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] =
x_cpu_throttle_increment;
s->bandwidth_limit = bandwidth_limit;
migrate_set_state(s, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
QSIMPLEQ_INIT(&s->src_page_requests);
s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
return s;
}
| {
"code": [
" int64_t bandwidth_limit = s->bandwidth_limit;",
" bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];",
" int64_t xbzrle_cache_size = s->xbzrle_cache_size;",
" int compress_level = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];",
" int compress_thread_count =",
" s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];",
" int decompress_thread_count =",
" s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];",
" int x_cpu_throttle_initial =",
" s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];",
" int x_cpu_throttle_increment =",
" s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];",
" memcpy(enabled_capabilities, s->enabled_capabilities,",
" sizeof(enabled_capabilities));",
" memset(s, 0, sizeof(*s));",
" memcpy(s->enabled_capabilities, enabled_capabilities,",
" sizeof(enabled_capabilities));",
" s->xbzrle_cache_size = xbzrle_cache_size;",
" s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = compress_level;",
" s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =",
" compress_thread_count;",
" s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =",
" decompress_thread_count;",
" s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] =",
" x_cpu_throttle_initial;",
" s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] =",
" x_cpu_throttle_increment;",
" s->bandwidth_limit = bandwidth_limit;"
],
"line_no": [
7,
9,
11,
13,
15,
17,
19,
21,
23,
25,
27,
29,
33,
35,
39,
43,
35,
47,
51,
53,
55,
57,
59,
61,
63,
65,
67,
69
]
} | MigrationState *FUNC_0(const MigrationParams *params)
{
MigrationState *s = migrate_get_current();
int64_t bandwidth_limit = s->bandwidth_limit;
bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];
int64_t xbzrle_cache_size = s->xbzrle_cache_size;
int VAR_0 = s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL];
int VAR_1 =
s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS];
int VAR_2 =
s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS];
int VAR_3 =
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];
int VAR_4 =
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];
memcpy(enabled_capabilities, s->enabled_capabilities,
sizeof(enabled_capabilities));
memset(s, 0, sizeof(*s));
s->params = *params;
memcpy(s->enabled_capabilities, enabled_capabilities,
sizeof(enabled_capabilities));
s->xbzrle_cache_size = xbzrle_cache_size;
s->parameters[MIGRATION_PARAMETER_COMPRESS_LEVEL] = VAR_0;
s->parameters[MIGRATION_PARAMETER_COMPRESS_THREADS] =
VAR_1;
s->parameters[MIGRATION_PARAMETER_DECOMPRESS_THREADS] =
VAR_2;
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL] =
VAR_3;
s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT] =
VAR_4;
s->bandwidth_limit = bandwidth_limit;
migrate_set_state(s, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
QSIMPLEQ_INIT(&s->src_page_requests);
s->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
return s;
}
| [
"MigrationState *FUNC_0(const MigrationParams *params)\n{",
"MigrationState *s = migrate_get_current();",
"int64_t bandwidth_limit = s->bandwidth_limit;",
"bool enabled_capabilities[MIGRATION_CAPABILITY_MAX];",
"int64_t xbzrle_cache_size = s->xbzrle_cache_size;",
"int VAR_0 = s->parameters[MIGRATION_PARAM... | [
0,
0,
1,
1,
1,
1,
1,
1,
1,
1,
1,
1,
0,
1,
1,
1,
1,
1,
1,
1,
1,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15,
17
],
[
19,
21
],
[
23,
25
],
[
27,
29
],
[
33,
35
],
[
39
],
[
41
],
[
43,
45
],
[
47
],
[
51
],
[
53,... |
13,138 | static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
const uint8_t *src, x86_reg stride,
int rnd, int64_t shift)
{
__asm__ volatile(
"mov $3, %%"REG_c" \n\t"
LOAD_ROUNDER_MMX("%5")
"movq "MANGLE(ff_pw_9)", %%mm6 \n\t"
"1: \n\t"
"movd (%0), %%mm2 \n\t"
"add %2, %0 \n\t"
"movd (%0), %%mm3 \n\t"
"punpcklbw %%mm0, %%mm2 \n\t"
"punpcklbw %%mm0, %%mm3 \n\t"
SHIFT2_LINE( 0, 1, 2, 3, 4)
SHIFT2_LINE( 24, 2, 3, 4, 1)
SHIFT2_LINE( 48, 3, 4, 1, 2)
SHIFT2_LINE( 72, 4, 1, 2, 3)
SHIFT2_LINE( 96, 1, 2, 3, 4)
SHIFT2_LINE(120, 2, 3, 4, 1)
SHIFT2_LINE(144, 3, 4, 1, 2)
SHIFT2_LINE(168, 4, 1, 2, 3)
"sub %6, %0 \n\t"
"add $8, %1 \n\t"
"dec %%"REG_c" \n\t"
"jnz 1b \n\t"
: "+r"(src), "+r"(dst)
: "r"(stride), "r"(-2*stride),
"m"(shift), "m"(rnd), "r"(9*stride-4)
NAMED_CONSTRAINTS_ADD(ff_pw_9)
: "%"REG_c, "memory"
);
}
| false | FFmpeg | ab5f43e6342c4c07faf5c9ae87628d7d7c83abb6 | static void vc1_put_ver_16b_shift2_mmx(int16_t *dst,
const uint8_t *src, x86_reg stride,
int rnd, int64_t shift)
{
__asm__ volatile(
"mov $3, %%"REG_c" \n\t"
LOAD_ROUNDER_MMX("%5")
"movq "MANGLE(ff_pw_9)", %%mm6 \n\t"
"1: \n\t"
"movd (%0), %%mm2 \n\t"
"add %2, %0 \n\t"
"movd (%0), %%mm3 \n\t"
"punpcklbw %%mm0, %%mm2 \n\t"
"punpcklbw %%mm0, %%mm3 \n\t"
SHIFT2_LINE( 0, 1, 2, 3, 4)
SHIFT2_LINE( 24, 2, 3, 4, 1)
SHIFT2_LINE( 48, 3, 4, 1, 2)
SHIFT2_LINE( 72, 4, 1, 2, 3)
SHIFT2_LINE( 96, 1, 2, 3, 4)
SHIFT2_LINE(120, 2, 3, 4, 1)
SHIFT2_LINE(144, 3, 4, 1, 2)
SHIFT2_LINE(168, 4, 1, 2, 3)
"sub %6, %0 \n\t"
"add $8, %1 \n\t"
"dec %%"REG_c" \n\t"
"jnz 1b \n\t"
: "+r"(src), "+r"(dst)
: "r"(stride), "r"(-2*stride),
"m"(shift), "m"(rnd), "r"(9*stride-4)
NAMED_CONSTRAINTS_ADD(ff_pw_9)
: "%"REG_c, "memory"
);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(int16_t *VAR_0,
const uint8_t *VAR_1, x86_reg VAR_2,
int VAR_3, int64_t VAR_4)
{
__asm__ volatile(
"mov $3, %%"REG_c" \n\t"
LOAD_ROUNDER_MMX("%5")
"movq "MANGLE(ff_pw_9)", %%mm6 \n\t"
"1: \n\t"
"movd (%0), %%mm2 \n\t"
"add %2, %0 \n\t"
"movd (%0), %%mm3 \n\t"
"punpcklbw %%mm0, %%mm2 \n\t"
"punpcklbw %%mm0, %%mm3 \n\t"
SHIFT2_LINE( 0, 1, 2, 3, 4)
SHIFT2_LINE( 24, 2, 3, 4, 1)
SHIFT2_LINE( 48, 3, 4, 1, 2)
SHIFT2_LINE( 72, 4, 1, 2, 3)
SHIFT2_LINE( 96, 1, 2, 3, 4)
SHIFT2_LINE(120, 2, 3, 4, 1)
SHIFT2_LINE(144, 3, 4, 1, 2)
SHIFT2_LINE(168, 4, 1, 2, 3)
"sub %6, %0 \n\t"
"add $8, %1 \n\t"
"dec %%"REG_c" \n\t"
"jnz 1b \n\t"
: "+r"(VAR_1), "+r"(VAR_0)
: "r"(VAR_2), "r"(-2*VAR_2),
"m"(VAR_4), "m"(VAR_3), "r"(9*VAR_2-4)
NAMED_CONSTRAINTS_ADD(ff_pw_9)
: "%"REG_c, "memory"
);
}
| [
"static void FUNC_0(int16_t *VAR_0,\nconst uint8_t *VAR_1, x86_reg VAR_2,\nint VAR_3, int64_t VAR_4)\n{",
"__asm__ volatile(\n\"mov $3, %%\"REG_c\" \\n\\t\"\nLOAD_ROUNDER_MMX(\"%5\")\n\"movq \"MANGLE(ff_pw_9)\", %%mm6 \\n\\t\"\n\"1: \\n\\t\"\n\"movd (%0), %... | [
0,
0,
0
] | [
[
1,
3,
5,
7
],
[
9,
11,
13,
15,
17,
19,
21,
23,
25,
27,
29,
31,
33,
35,
37,
39,
41,
43,
45,
47,
49,
51,
53,
55,
57,
59,
61,
63
],
[
65
]
] |
13,139 | static int mov_read_custom(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
int64_t end = avio_tell(pb) + atom.size;
uint8_t *key = NULL, *val = NULL, *mean = NULL;
int i;
AVStream *st;
MOVStreamContext *sc;
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
for (i = 0; i < 3; i++) {
uint8_t **p;
uint32_t len, tag;
int ret;
if (end - avio_tell(pb) <= 12)
break;
len = avio_rb32(pb);
tag = avio_rl32(pb);
avio_skip(pb, 4); // flags
if (len < 12 || len - 12 > end - avio_tell(pb))
break;
len -= 12;
if (tag == MKTAG('m', 'e', 'a', 'n'))
p = &mean;
else if (tag == MKTAG('n', 'a', 'm', 'e'))
p = &key;
else if (tag == MKTAG('d', 'a', 't', 'a') && len > 4) {
avio_skip(pb, 4);
len -= 4;
p = &val;
} else
break;
*p = av_malloc(len + 1);
if (!*p)
break;
ret = ffio_read_size(pb, *p, len);
if (ret < 0) {
av_freep(p);
return ret;
}
(*p)[len] = 0;
}
if (mean && key && val) {
if (strcmp(key, "iTunSMPB") == 0) {
int priming, remainder, samples;
if(sscanf(val, "%*X %X %X %X", &priming, &remainder, &samples) == 3){
if(priming>0 && priming<16384)
sc->start_pad = priming;
}
}
if (strcmp(key, "cdec") != 0) {
av_dict_set(&c->fc->metadata, key, val,
AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
key = val = NULL;
}
} else {
av_log(c->fc, AV_LOG_VERBOSE,
"Unhandled or malformed custom metadata of size %"PRId64"\n", atom.size);
}
avio_seek(pb, end, SEEK_SET);
av_freep(&key);
av_freep(&val);
av_freep(&mean);
return 0;
}
| true | FFmpeg | e22bd239c046014652a3487f542f2ab7b34f7a62 | static int mov_read_custom(MOVContext *c, AVIOContext *pb, MOVAtom atom)
{
int64_t end = avio_tell(pb) + atom.size;
uint8_t *key = NULL, *val = NULL, *mean = NULL;
int i;
AVStream *st;
MOVStreamContext *sc;
if (c->fc->nb_streams < 1)
return 0;
st = c->fc->streams[c->fc->nb_streams-1];
sc = st->priv_data;
for (i = 0; i < 3; i++) {
uint8_t **p;
uint32_t len, tag;
int ret;
if (end - avio_tell(pb) <= 12)
break;
len = avio_rb32(pb);
tag = avio_rl32(pb);
avio_skip(pb, 4);
if (len < 12 || len - 12 > end - avio_tell(pb))
break;
len -= 12;
if (tag == MKTAG('m', 'e', 'a', 'n'))
p = &mean;
else if (tag == MKTAG('n', 'a', 'm', 'e'))
p = &key;
else if (tag == MKTAG('d', 'a', 't', 'a') && len > 4) {
avio_skip(pb, 4);
len -= 4;
p = &val;
} else
break;
*p = av_malloc(len + 1);
if (!*p)
break;
ret = ffio_read_size(pb, *p, len);
if (ret < 0) {
av_freep(p);
return ret;
}
(*p)[len] = 0;
}
if (mean && key && val) {
if (strcmp(key, "iTunSMPB") == 0) {
int priming, remainder, samples;
if(sscanf(val, "%*X %X %X %X", &priming, &remainder, &samples) == 3){
if(priming>0 && priming<16384)
sc->start_pad = priming;
}
}
if (strcmp(key, "cdec") != 0) {
av_dict_set(&c->fc->metadata, key, val,
AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
key = val = NULL;
}
} else {
av_log(c->fc, AV_LOG_VERBOSE,
"Unhandled or malformed custom metadata of size %"PRId64"\n", atom.size);
}
avio_seek(pb, end, SEEK_SET);
av_freep(&key);
av_freep(&val);
av_freep(&mean);
return 0;
}
| {
"code": [
" int ret;",
" return ret;",
" return 0;"
],
"line_no": [
33,
93,
147
]
} | static int FUNC_0(MOVContext *VAR_0, AVIOContext *VAR_1, MOVAtom VAR_2)
{
int64_t end = avio_tell(VAR_1) + VAR_2.size;
uint8_t *key = NULL, *val = NULL, *mean = NULL;
int VAR_3;
AVStream *st;
MOVStreamContext *sc;
if (VAR_0->fc->nb_streams < 1)
return 0;
st = VAR_0->fc->streams[VAR_0->fc->nb_streams-1];
sc = st->priv_data;
for (VAR_3 = 0; VAR_3 < 3; VAR_3++) {
uint8_t **p;
uint32_t len, tag;
int VAR_4;
if (end - avio_tell(VAR_1) <= 12)
break;
len = avio_rb32(VAR_1);
tag = avio_rl32(VAR_1);
avio_skip(VAR_1, 4);
if (len < 12 || len - 12 > end - avio_tell(VAR_1))
break;
len -= 12;
if (tag == MKTAG('m', 'e', 'a', 'n'))
p = &mean;
else if (tag == MKTAG('n', 'a', 'm', 'e'))
p = &key;
else if (tag == MKTAG('d', 'a', 't', 'a') && len > 4) {
avio_skip(VAR_1, 4);
len -= 4;
p = &val;
} else
break;
*p = av_malloc(len + 1);
if (!*p)
break;
VAR_4 = ffio_read_size(VAR_1, *p, len);
if (VAR_4 < 0) {
av_freep(p);
return VAR_4;
}
(*p)[len] = 0;
}
if (mean && key && val) {
if (strcmp(key, "iTunSMPB") == 0) {
int VAR_5, VAR_6, VAR_7;
if(sscanf(val, "%*X %X %X %X", &VAR_5, &VAR_6, &VAR_7) == 3){
if(VAR_5>0 && VAR_5<16384)
sc->start_pad = VAR_5;
}
}
if (strcmp(key, "cdec") != 0) {
av_dict_set(&VAR_0->fc->metadata, key, val,
AV_DICT_DONT_STRDUP_KEY | AV_DICT_DONT_STRDUP_VAL);
key = val = NULL;
}
} else {
av_log(VAR_0->fc, AV_LOG_VERBOSE,
"Unhandled or malformed custom metadata of size %"PRId64"\n", VAR_2.size);
}
avio_seek(VAR_1, end, SEEK_SET);
av_freep(&key);
av_freep(&val);
av_freep(&mean);
return 0;
}
| [
"static int FUNC_0(MOVContext *VAR_0, AVIOContext *VAR_1, MOVAtom VAR_2)\n{",
"int64_t end = avio_tell(VAR_1) + VAR_2.size;",
"uint8_t *key = NULL, *val = NULL, *mean = NULL;",
"int VAR_3;",
"AVStream *st;",
"MOVStreamContext *sc;",
"if (VAR_0->fc->nb_streams < 1)\nreturn 0;",
"st = VAR_0->fc->streams... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
17,
19
],
[
21
],
[
23
],
[
27
],
[
29
],
[
31
],
[
33
],
[
37,
39
],
[
43
],
[
45
],
[
47
],
[
51,
53
],
[... |
13,141 | static AVBufferRef *dxva2_pool_alloc(void *opaque, int size)
{
AVHWFramesContext *ctx = (AVHWFramesContext*)opaque;
DXVA2FramesContext *s = ctx->internal->priv;
AVDXVA2FramesContext *hwctx = ctx->hwctx;
if (s->nb_surfaces_used < hwctx->nb_surfaces) {
s->nb_surfaces_used++;
return av_buffer_create((uint8_t*)s->surfaces_internal[s->nb_surfaces_used - 1],
sizeof(*hwctx->surfaces), NULL, 0, 0);
}
return NULL;
}
| true | FFmpeg | 3d040513a1de4797a4f81dde4984395f51db76b7 | static AVBufferRef *dxva2_pool_alloc(void *opaque, int size)
{
AVHWFramesContext *ctx = (AVHWFramesContext*)opaque;
DXVA2FramesContext *s = ctx->internal->priv;
AVDXVA2FramesContext *hwctx = ctx->hwctx;
if (s->nb_surfaces_used < hwctx->nb_surfaces) {
s->nb_surfaces_used++;
return av_buffer_create((uint8_t*)s->surfaces_internal[s->nb_surfaces_used - 1],
sizeof(*hwctx->surfaces), NULL, 0, 0);
}
return NULL;
}
| {
"code": [
" sizeof(*hwctx->surfaces), NULL, 0, 0);"
],
"line_no": [
19
]
} | static AVBufferRef *FUNC_0(void *opaque, int size)
{
AVHWFramesContext *ctx = (AVHWFramesContext*)opaque;
DXVA2FramesContext *s = ctx->internal->priv;
AVDXVA2FramesContext *hwctx = ctx->hwctx;
if (s->nb_surfaces_used < hwctx->nb_surfaces) {
s->nb_surfaces_used++;
return av_buffer_create((uint8_t*)s->surfaces_internal[s->nb_surfaces_used - 1],
sizeof(*hwctx->surfaces), NULL, 0, 0);
}
return NULL;
}
| [
"static AVBufferRef *FUNC_0(void *opaque, int size)\n{",
"AVHWFramesContext *ctx = (AVHWFramesContext*)opaque;",
"DXVA2FramesContext *s = ctx->internal->priv;",
"AVDXVA2FramesContext *hwctx = ctx->hwctx;",
"if (s->nb_surfaces_used < hwctx->nb_surfaces) {",
"s->nb_surfaces_used++;",
"return av... | [
0,
0,
0,
0,
0,
0,
1,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17,
19
],
[
21
],
[
25
],
[
27
]
] |
13,142 | void put_string(PutBitContext * pbc, char *s)
{
while(*s){
put_bits(pbc, 8, *s);
s++;
}
put_bits(pbc, 8, 0);
}
| true | FFmpeg | 99683a307776a7638ccce236a4ce5aa3e914e77d | void put_string(PutBitContext * pbc, char *s)
{
while(*s){
put_bits(pbc, 8, *s);
s++;
}
put_bits(pbc, 8, 0);
}
| {
"code": [
"void put_string(PutBitContext * pbc, char *s)",
" put_bits(pbc, 8, 0);"
],
"line_no": [
1,
13
]
} | void FUNC_0(PutBitContext * VAR_0, char *VAR_1)
{
while(*VAR_1){
put_bits(VAR_0, 8, *VAR_1);
VAR_1++;
}
put_bits(VAR_0, 8, 0);
}
| [
"void FUNC_0(PutBitContext * VAR_0, char *VAR_1)\n{",
"while(*VAR_1){",
"put_bits(VAR_0, 8, *VAR_1);",
"VAR_1++;",
"}",
"put_bits(VAR_0, 8, 0);",
"}"
] | [
1,
0,
0,
0,
0,
1,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
]
] |
13,143 | static void mp_decode_frame_helper(MotionPixelsContext *mp, GetBitContext *gb)
{
YuvPixel p;
int y, y0;
for (y = 0; y < mp->avctx->height; ++y) {
if (mp->changes_map[y * mp->avctx->width] != 0) {
memset(mp->gradient_scale, 1, sizeof(mp->gradient_scale));
p = mp_get_yuv_from_rgb(mp, 0, y);
} else {
p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb));
p.y = av_clip(p.y, 0, 31);
if ((y & 3) == 0) {
p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb));
p.v = av_clip(p.v, -32, 31);
p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb));
p.u = av_clip(p.u, -32, 31);
}
mp->vpt[y] = p;
mp_set_rgb_from_yuv(mp, 0, y, &p);
}
}
for (y0 = 0; y0 < 2; ++y0)
for (y = y0; y < mp->avctx->height; y += 2)
mp_decode_line(mp, gb, y);
} | true | FFmpeg | 05b0337025f629d0a6c6321147f06d0da5e32a62 | static void mp_decode_frame_helper(MotionPixelsContext *mp, GetBitContext *gb)
{
YuvPixel p;
int y, y0;
for (y = 0; y < mp->avctx->height; ++y) {
if (mp->changes_map[y * mp->avctx->width] != 0) {
memset(mp->gradient_scale, 1, sizeof(mp->gradient_scale));
p = mp_get_yuv_from_rgb(mp, 0, y);
} else {
p.y += mp_gradient(mp, 0, mp_get_vlc(mp, gb));
p.y = av_clip(p.y, 0, 31);
if ((y & 3) == 0) {
p.v += mp_gradient(mp, 1, mp_get_vlc(mp, gb));
p.v = av_clip(p.v, -32, 31);
p.u += mp_gradient(mp, 2, mp_get_vlc(mp, gb));
p.u = av_clip(p.u, -32, 31);
}
mp->vpt[y] = p;
mp_set_rgb_from_yuv(mp, 0, y, &p);
}
}
for (y0 = 0; y0 < 2; ++y0)
for (y = y0; y < mp->avctx->height; y += 2)
mp_decode_line(mp, gb, y);
} | {
"code": [],
"line_no": []
} | static void FUNC_0(MotionPixelsContext *VAR_0, GetBitContext *VAR_1)
{
YuvPixel p;
int VAR_2, VAR_3;
for (VAR_2 = 0; VAR_2 < VAR_0->avctx->height; ++VAR_2) {
if (VAR_0->changes_map[VAR_2 * VAR_0->avctx->width] != 0) {
memset(VAR_0->gradient_scale, 1, sizeof(VAR_0->gradient_scale));
p = mp_get_yuv_from_rgb(VAR_0, 0, VAR_2);
} else {
p.VAR_2 += mp_gradient(VAR_0, 0, mp_get_vlc(VAR_0, VAR_1));
p.VAR_2 = av_clip(p.VAR_2, 0, 31);
if ((VAR_2 & 3) == 0) {
p.v += mp_gradient(VAR_0, 1, mp_get_vlc(VAR_0, VAR_1));
p.v = av_clip(p.v, -32, 31);
p.u += mp_gradient(VAR_0, 2, mp_get_vlc(VAR_0, VAR_1));
p.u = av_clip(p.u, -32, 31);
}
VAR_0->vpt[VAR_2] = p;
mp_set_rgb_from_yuv(VAR_0, 0, VAR_2, &p);
}
}
for (VAR_3 = 0; VAR_3 < 2; ++VAR_3)
for (VAR_2 = VAR_3; VAR_2 < VAR_0->avctx->height; VAR_2 += 2)
mp_decode_line(VAR_0, VAR_1, VAR_2);
} | [
"static void FUNC_0(MotionPixelsContext *VAR_0, GetBitContext *VAR_1)\n{",
"YuvPixel p;",
"int VAR_2, VAR_3;",
"for (VAR_2 = 0; VAR_2 < VAR_0->avctx->height; ++VAR_2) {",
"if (VAR_0->changes_map[VAR_2 * VAR_0->avctx->width] != 0) {",
"memset(VAR_0->gradient_scale, 1, sizeof(VAR_0->gradient_scale));",
"p... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
2
],
[
3
],
[
4
],
[
5
],
[
6
],
[
7
],
[
8
],
[
9
],
[
10
],
[
11
],
[
12
],
[
13
],
[
14
],
[
15
],
[
16
],
[
17
],
[
18
],
[
19
],
[
20
],
[
21
],... |
13,144 | static int find_start_code(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
{
const uint8_t *buf_ptr= *pbuf_ptr;
buf_ptr++; //gurantees that -1 is within the array
buf_end -= 2; // gurantees that +2 is within the array
while (buf_ptr < buf_end) {
if(*buf_ptr==0){
while(buf_ptr < buf_end && buf_ptr[1]==0)
buf_ptr++;
if(buf_ptr[-1] == 0 && buf_ptr[1] == 1){
*pbuf_ptr = buf_ptr+3;
return buf_ptr[2] + 0x100;
}
}
buf_ptr += 2;
}
buf_end += 2; //undo the hack above
*pbuf_ptr = buf_end;
return -1;
}
| true | FFmpeg | dd1a74d25decd6eaa7c78a7062fa12edb043efaf | static int find_start_code(const uint8_t **pbuf_ptr, const uint8_t *buf_end)
{
const uint8_t *buf_ptr= *pbuf_ptr;
buf_ptr++;
buf_end -= 2;
while (buf_ptr < buf_end) {
if(*buf_ptr==0){
while(buf_ptr < buf_end && buf_ptr[1]==0)
buf_ptr++;
if(buf_ptr[-1] == 0 && buf_ptr[1] == 1){
*pbuf_ptr = buf_ptr+3;
return buf_ptr[2] + 0x100;
}
}
buf_ptr += 2;
}
buf_end += 2;
*pbuf_ptr = buf_end;
return -1;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(const uint8_t **VAR_0, const uint8_t *VAR_1)
{
const uint8_t *VAR_2= *VAR_0;
VAR_2++;
VAR_1 -= 2;
while (VAR_2 < VAR_1) {
if(*VAR_2==0){
while(VAR_2 < VAR_1 && VAR_2[1]==0)
VAR_2++;
if(VAR_2[-1] == 0 && VAR_2[1] == 1){
*VAR_0 = VAR_2+3;
return VAR_2[2] + 0x100;
}
}
VAR_2 += 2;
}
VAR_1 += 2;
*VAR_0 = VAR_1;
return -1;
}
| [
"static int FUNC_0(const uint8_t **VAR_0, const uint8_t *VAR_1)\n{",
"const uint8_t *VAR_2= *VAR_0;",
"VAR_2++;",
"VAR_1 -= 2;",
"while (VAR_2 < VAR_1) {",
"if(*VAR_2==0){",
"while(VAR_2 < VAR_1 && VAR_2[1]==0)\nVAR_2++;",
"if(VAR_2[-1] == 0 && VAR_2[1] == 1){",
"*VAR_0 = VAR_2+3;",
"return VAR_2[... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
11
],
[
15
],
[
17
],
[
19,
21
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
43
],
[
45
],
[
47
]
] |
13,145 | static void pc_q35_2_4_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_2_5_machine_options(m);
m->alias = NULL;
pcmc->broken_reserved_end = true;
pcmc->inter_dimm_gap = false;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_4);
}
| true | qemu | 340065e5a11a515382c8b1112424c97e86ad2a3f | static void pc_q35_2_4_machine_options(MachineClass *m)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(m);
pc_q35_2_5_machine_options(m);
m->alias = NULL;
pcmc->broken_reserved_end = true;
pcmc->inter_dimm_gap = false;
SET_MACHINE_COMPAT(m, PC_COMPAT_2_4);
}
| {
"code": [
" pcmc->inter_dimm_gap = false;",
" pcmc->inter_dimm_gap = false;"
],
"line_no": [
13,
13
]
} | static void FUNC_0(MachineClass *VAR_0)
{
PCMachineClass *pcmc = PC_MACHINE_CLASS(VAR_0);
pc_q35_2_5_machine_options(VAR_0);
VAR_0->alias = NULL;
pcmc->broken_reserved_end = true;
pcmc->inter_dimm_gap = false;
SET_MACHINE_COMPAT(VAR_0, PC_COMPAT_2_4);
}
| [
"static void FUNC_0(MachineClass *VAR_0)\n{",
"PCMachineClass *pcmc = PC_MACHINE_CLASS(VAR_0);",
"pc_q35_2_5_machine_options(VAR_0);",
"VAR_0->alias = NULL;",
"pcmc->broken_reserved_end = true;",
"pcmc->inter_dimm_gap = false;",
"SET_MACHINE_COMPAT(VAR_0, PC_COMPAT_2_4);",
"}"
] | [
0,
0,
0,
0,
0,
1,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
]
] |
13,146 | av_cold int ff_nvenc_encode_init(AVCodecContext *avctx)
{
int ret;
if ((ret = nvenc_load_libraries(avctx)) < 0)
return ret;
if ((ret = nvenc_setup_device(avctx)) < 0)
return ret;
if ((ret = nvenc_setup_encoder(avctx)) < 0)
return ret;
if ((ret = nvenc_setup_surfaces(avctx)) < 0)
return ret;
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
if ((ret = nvenc_setup_extradata(avctx)) < 0)
return ret;
}
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
return 0;
}
| false | FFmpeg | d6604b29ef544793479d7fb4e05ef6622bb3e534 | av_cold int ff_nvenc_encode_init(AVCodecContext *avctx)
{
int ret;
if ((ret = nvenc_load_libraries(avctx)) < 0)
return ret;
if ((ret = nvenc_setup_device(avctx)) < 0)
return ret;
if ((ret = nvenc_setup_encoder(avctx)) < 0)
return ret;
if ((ret = nvenc_setup_surfaces(avctx)) < 0)
return ret;
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
if ((ret = nvenc_setup_extradata(avctx)) < 0)
return ret;
}
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
return 0;
}
| {
"code": [],
"line_no": []
} | av_cold int FUNC_0(AVCodecContext *avctx)
{
int VAR_0;
if ((VAR_0 = nvenc_load_libraries(avctx)) < 0)
return VAR_0;
if ((VAR_0 = nvenc_setup_device(avctx)) < 0)
return VAR_0;
if ((VAR_0 = nvenc_setup_encoder(avctx)) < 0)
return VAR_0;
if ((VAR_0 = nvenc_setup_surfaces(avctx)) < 0)
return VAR_0;
if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
if ((VAR_0 = nvenc_setup_extradata(avctx)) < 0)
return VAR_0;
}
avctx->coded_frame = av_frame_alloc();
if (!avctx->coded_frame)
return AVERROR(ENOMEM);
return 0;
}
| [
"av_cold int FUNC_0(AVCodecContext *avctx)\n{",
"int VAR_0;",
"if ((VAR_0 = nvenc_load_libraries(avctx)) < 0)\nreturn VAR_0;",
"if ((VAR_0 = nvenc_setup_device(avctx)) < 0)\nreturn VAR_0;",
"if ((VAR_0 = nvenc_setup_encoder(avctx)) < 0)\nreturn VAR_0;",
"if ((VAR_0 = nvenc_setup_surfaces(avctx)) < 0)\nret... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9,
11
],
[
15,
17
],
[
21,
23
],
[
27,
29
],
[
33
],
[
35,
37
],
[
39
],
[
43
],
[
45,
47
],
[
51
],
[
53
]
] |
13,148 | static int qemu_rdma_exchange_get_response(RDMAContext *rdma,
RDMAControlHeader *head, int expecting, int idx)
{
uint32_t byte_len;
int ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RECV_CONTROL + idx,
&byte_len);
if (ret < 0) {
fprintf(stderr, "rdma migration: recv polling control error!\n");
return ret;
}
network_to_control((void *) rdma->wr_data[idx].control);
memcpy(head, rdma->wr_data[idx].control, sizeof(RDMAControlHeader));
DDDPRINTF("CONTROL: %s receiving...\n", control_desc[expecting]);
if (expecting == RDMA_CONTROL_NONE) {
DDDPRINTF("Surprise: got %s (%d)\n",
control_desc[head->type], head->type);
} else if (head->type != expecting || head->type == RDMA_CONTROL_ERROR) {
fprintf(stderr, "Was expecting a %s (%d) control message"
", but got: %s (%d), length: %d\n",
control_desc[expecting], expecting,
control_desc[head->type], head->type, head->len);
return -EIO;
}
if (head->len > RDMA_CONTROL_MAX_BUFFER - sizeof(*head)) {
fprintf(stderr, "too long length: %d\n", head->len);
return -EINVAL;
}
if (sizeof(*head) + head->len != byte_len) {
fprintf(stderr, "Malformed length: %d byte_len %d\n",
head->len, byte_len);
return -EINVAL;
}
return 0;
}
| true | qemu | 60fe637bf0e4d7989e21e50f52526444765c63b4 | static int qemu_rdma_exchange_get_response(RDMAContext *rdma,
RDMAControlHeader *head, int expecting, int idx)
{
uint32_t byte_len;
int ret = qemu_rdma_block_for_wrid(rdma, RDMA_WRID_RECV_CONTROL + idx,
&byte_len);
if (ret < 0) {
fprintf(stderr, "rdma migration: recv polling control error!\n");
return ret;
}
network_to_control((void *) rdma->wr_data[idx].control);
memcpy(head, rdma->wr_data[idx].control, sizeof(RDMAControlHeader));
DDDPRINTF("CONTROL: %s receiving...\n", control_desc[expecting]);
if (expecting == RDMA_CONTROL_NONE) {
DDDPRINTF("Surprise: got %s (%d)\n",
control_desc[head->type], head->type);
} else if (head->type != expecting || head->type == RDMA_CONTROL_ERROR) {
fprintf(stderr, "Was expecting a %s (%d) control message"
", but got: %s (%d), length: %d\n",
control_desc[expecting], expecting,
control_desc[head->type], head->type, head->len);
return -EIO;
}
if (head->len > RDMA_CONTROL_MAX_BUFFER - sizeof(*head)) {
fprintf(stderr, "too long length: %d\n", head->len);
return -EINVAL;
}
if (sizeof(*head) + head->len != byte_len) {
fprintf(stderr, "Malformed length: %d byte_len %d\n",
head->len, byte_len);
return -EINVAL;
}
return 0;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(RDMAContext *VAR_0,
RDMAControlHeader *VAR_1, int VAR_2, int VAR_3)
{
uint32_t byte_len;
int VAR_4 = qemu_rdma_block_for_wrid(VAR_0, RDMA_WRID_RECV_CONTROL + VAR_3,
&byte_len);
if (VAR_4 < 0) {
fprintf(stderr, "VAR_0 migration: recv polling control error!\n");
return VAR_4;
}
network_to_control((void *) VAR_0->wr_data[VAR_3].control);
memcpy(VAR_1, VAR_0->wr_data[VAR_3].control, sizeof(RDMAControlHeader));
DDDPRINTF("CONTROL: %s receiving...\n", control_desc[VAR_2]);
if (VAR_2 == RDMA_CONTROL_NONE) {
DDDPRINTF("Surprise: got %s (%d)\n",
control_desc[VAR_1->type], VAR_1->type);
} else if (VAR_1->type != VAR_2 || VAR_1->type == RDMA_CONTROL_ERROR) {
fprintf(stderr, "Was VAR_2 a %s (%d) control message"
", but got: %s (%d), length: %d\n",
control_desc[VAR_2], VAR_2,
control_desc[VAR_1->type], VAR_1->type, VAR_1->len);
return -EIO;
}
if (VAR_1->len > RDMA_CONTROL_MAX_BUFFER - sizeof(*VAR_1)) {
fprintf(stderr, "too long length: %d\n", VAR_1->len);
return -EINVAL;
}
if (sizeof(*VAR_1) + VAR_1->len != byte_len) {
fprintf(stderr, "Malformed length: %d byte_len %d\n",
VAR_1->len, byte_len);
return -EINVAL;
}
return 0;
}
| [
"static int FUNC_0(RDMAContext *VAR_0,\nRDMAControlHeader *VAR_1, int VAR_2, int VAR_3)\n{",
"uint32_t byte_len;",
"int VAR_4 = qemu_rdma_block_for_wrid(VAR_0, RDMA_WRID_RECV_CONTROL + VAR_3,\n&byte_len);",
"if (VAR_4 < 0) {",
"fprintf(stderr, \"VAR_0 migration: recv polling control error!\\n\");",
"retur... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9,
11
],
[
15
],
[
17
],
[
19
],
[
21
],
[
25
],
[
27
],
[
31
],
[
35
],
[
37,
39
],
[
41
],
[
43,
45,
47,
49
],
[
51
],
[
53
],
[
55
... |
13,150 | static inline void kqemu_load_seg(struct kqemu_segment_cache *ksc,
const SegmentCache *sc)
{
ksc->selector = sc->selector;
ksc->flags = sc->flags;
ksc->limit = sc->limit;
ksc->base = sc->base;
}
| false | qemu | 4a1418e07bdcfaa3177739e04707ecaec75d89e1 | static inline void kqemu_load_seg(struct kqemu_segment_cache *ksc,
const SegmentCache *sc)
{
ksc->selector = sc->selector;
ksc->flags = sc->flags;
ksc->limit = sc->limit;
ksc->base = sc->base;
}
| {
"code": [],
"line_no": []
} | static inline void FUNC_0(struct kqemu_segment_cache *VAR_0,
const SegmentCache *VAR_1)
{
VAR_0->selector = VAR_1->selector;
VAR_0->flags = VAR_1->flags;
VAR_0->limit = VAR_1->limit;
VAR_0->base = VAR_1->base;
}
| [
"static inline void FUNC_0(struct kqemu_segment_cache *VAR_0,\nconst SegmentCache *VAR_1)\n{",
"VAR_0->selector = VAR_1->selector;",
"VAR_0->flags = VAR_1->flags;",
"VAR_0->limit = VAR_1->limit;",
"VAR_0->base = VAR_1->base;",
"}"
] | [
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
]
] |
13,151 | int avcodec_decode_audio2(AVCodecContext *avctx, int16_t *samples,
int *frame_size_ptr,
uint8_t *buf, int buf_size)
{
int ret;
//FIXME remove the check below _after_ ensuring that all audio check that the available space is enough
if(*frame_size_ptr < AVCODEC_MAX_AUDIO_FRAME_SIZE){
av_log(avctx, AV_LOG_ERROR, "buffer smaller than AVCODEC_MAX_AUDIO_FRAME_SIZE\n");
return -1;
}
if(*frame_size_ptr < FF_MIN_BUFFER_SIZE ||
*frame_size_ptr < avctx->channels * avctx->frame_size * sizeof(int16_t) ||
*frame_size_ptr < buf_size){
av_log(avctx, AV_LOG_ERROR, "buffer %d too small\n", *frame_size_ptr);
return -1;
}
if((avctx->codec->capabilities & CODEC_CAP_DELAY) || buf_size){
ret = avctx->codec->decode(avctx, samples, frame_size_ptr,
buf, buf_size);
avctx->frame_number++;
}else{
ret= 0;
*frame_size_ptr=0;
}
return ret;
}
| false | FFmpeg | 9c856d62be91f9bf4c4c671ce5cea1feef4936c7 | int avcodec_decode_audio2(AVCodecContext *avctx, int16_t *samples,
int *frame_size_ptr,
uint8_t *buf, int buf_size)
{
int ret;
if(*frame_size_ptr < AVCODEC_MAX_AUDIO_FRAME_SIZE){
av_log(avctx, AV_LOG_ERROR, "buffer smaller than AVCODEC_MAX_AUDIO_FRAME_SIZE\n");
return -1;
}
if(*frame_size_ptr < FF_MIN_BUFFER_SIZE ||
*frame_size_ptr < avctx->channels * avctx->frame_size * sizeof(int16_t) ||
*frame_size_ptr < buf_size){
av_log(avctx, AV_LOG_ERROR, "buffer %d too small\n", *frame_size_ptr);
return -1;
}
if((avctx->codec->capabilities & CODEC_CAP_DELAY) || buf_size){
ret = avctx->codec->decode(avctx, samples, frame_size_ptr,
buf, buf_size);
avctx->frame_number++;
}else{
ret= 0;
*frame_size_ptr=0;
}
return ret;
}
| {
"code": [],
"line_no": []
} | int FUNC_0(AVCodecContext *VAR_0, int16_t *VAR_1,
int *VAR_2,
uint8_t *VAR_3, int VAR_4)
{
int VAR_5;
if(*VAR_2 < AVCODEC_MAX_AUDIO_FRAME_SIZE){
av_log(VAR_0, AV_LOG_ERROR, "buffer smaller than AVCODEC_MAX_AUDIO_FRAME_SIZE\n");
return -1;
}
if(*VAR_2 < FF_MIN_BUFFER_SIZE ||
*VAR_2 < VAR_0->channels * VAR_0->frame_size * sizeof(int16_t) ||
*VAR_2 < VAR_4){
av_log(VAR_0, AV_LOG_ERROR, "buffer %d too small\n", *VAR_2);
return -1;
}
if((VAR_0->codec->capabilities & CODEC_CAP_DELAY) || VAR_4){
VAR_5 = VAR_0->codec->decode(VAR_0, VAR_1, VAR_2,
VAR_3, VAR_4);
VAR_0->frame_number++;
}else{
VAR_5= 0;
*VAR_2=0;
}
return VAR_5;
}
| [
"int FUNC_0(AVCodecContext *VAR_0, int16_t *VAR_1,\nint *VAR_2,\nuint8_t *VAR_3, int VAR_4)\n{",
"int VAR_5;",
"if(*VAR_2 < AVCODEC_MAX_AUDIO_FRAME_SIZE){",
"av_log(VAR_0, AV_LOG_ERROR, \"buffer smaller than AVCODEC_MAX_AUDIO_FRAME_SIZE\\n\");",
"return -1;",
"}",
"if(*VAR_2 < FF_MIN_BUFFER_SIZE ||\n*VA... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7
],
[
9
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23,
25,
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37,
39
],
[
41
],
[
43
],
[
45
],
[
47
],
[
49
],
[... |
13,152 | static int64_t ffm_read_write_index(int fd)
{
uint8_t buf[8];
lseek(fd, 8, SEEK_SET);
if (read(fd, buf, 8) != 8)
return AVERROR(EIO);
return AV_RB64(buf);
}
| false | FFmpeg | 71bc8c95d7cac552d3a2cb3120e41207dfb48e50 | static int64_t ffm_read_write_index(int fd)
{
uint8_t buf[8];
lseek(fd, 8, SEEK_SET);
if (read(fd, buf, 8) != 8)
return AVERROR(EIO);
return AV_RB64(buf);
}
| {
"code": [],
"line_no": []
} | static int64_t FUNC_0(int fd)
{
uint8_t buf[8];
lseek(fd, 8, SEEK_SET);
if (read(fd, buf, 8) != 8)
return AVERROR(EIO);
return AV_RB64(buf);
}
| [
"static int64_t FUNC_0(int fd)\n{",
"uint8_t buf[8];",
"lseek(fd, 8, SEEK_SET);",
"if (read(fd, buf, 8) != 8)\nreturn AVERROR(EIO);",
"return AV_RB64(buf);",
"}"
] | [
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
11,
13
],
[
15
],
[
17
]
] |
13,153 | static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIONet *n = VIRTIO_NET(vdev);
struct virtio_net_ctrl_hdr ctrl;
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
VirtQueueElement elem;
size_t s;
struct iovec *iov, *iov2;
unsigned int iov_cnt;
while (virtqueue_pop(vq, &elem)) {
if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) ||
iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) {
error_report("virtio-net ctrl missing headers");
exit(1);
}
iov_cnt = elem.out_num;
iov2 = iov = g_memdup(elem.out_sg, sizeof(struct iovec) * elem.out_num);
s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
if (s != sizeof(ctrl)) {
status = VIRTIO_NET_ERR;
} else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
}
s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status));
assert(s == sizeof(status));
virtqueue_push(vq, &elem, sizeof(status));
virtio_notify(vdev, vq);
g_free(iov2);
}
}
| false | qemu | 51b19ebe4320f3dcd93cea71235c1219318ddfd2 | static void virtio_net_handle_ctrl(VirtIODevice *vdev, VirtQueue *vq)
{
VirtIONet *n = VIRTIO_NET(vdev);
struct virtio_net_ctrl_hdr ctrl;
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
VirtQueueElement elem;
size_t s;
struct iovec *iov, *iov2;
unsigned int iov_cnt;
while (virtqueue_pop(vq, &elem)) {
if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) ||
iov_size(elem.out_sg, elem.out_num) < sizeof(ctrl)) {
error_report("virtio-net ctrl missing headers");
exit(1);
}
iov_cnt = elem.out_num;
iov2 = iov = g_memdup(elem.out_sg, sizeof(struct iovec) * elem.out_num);
s = iov_to_buf(iov, iov_cnt, 0, &ctrl, sizeof(ctrl));
iov_discard_front(&iov, &iov_cnt, sizeof(ctrl));
if (s != sizeof(ctrl)) {
status = VIRTIO_NET_ERR;
} else if (ctrl.class == VIRTIO_NET_CTRL_RX) {
status = virtio_net_handle_rx_mode(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_MAC) {
status = virtio_net_handle_mac(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_VLAN) {
status = virtio_net_handle_vlan_table(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_ANNOUNCE) {
status = virtio_net_handle_announce(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_MQ) {
status = virtio_net_handle_mq(n, ctrl.cmd, iov, iov_cnt);
} else if (ctrl.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
status = virtio_net_handle_offloads(n, ctrl.cmd, iov, iov_cnt);
}
s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status));
assert(s == sizeof(status));
virtqueue_push(vq, &elem, sizeof(status));
virtio_notify(vdev, vq);
g_free(iov2);
}
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(VirtIODevice *VAR_0, VirtQueue *VAR_1)
{
VirtIONet *n = VIRTIO_NET(VAR_0);
struct virtio_net_ctrl_hdr VAR_2;
virtio_net_ctrl_ack status = VIRTIO_NET_ERR;
VirtQueueElement elem;
size_t s;
struct iovec *VAR_3, *VAR_4;
unsigned int VAR_5;
while (virtqueue_pop(VAR_1, &elem)) {
if (iov_size(elem.in_sg, elem.in_num) < sizeof(status) ||
iov_size(elem.out_sg, elem.out_num) < sizeof(VAR_2)) {
error_report("virtio-net VAR_2 missing headers");
exit(1);
}
VAR_5 = elem.out_num;
VAR_4 = VAR_3 = g_memdup(elem.out_sg, sizeof(struct iovec) * elem.out_num);
s = iov_to_buf(VAR_3, VAR_5, 0, &VAR_2, sizeof(VAR_2));
iov_discard_front(&VAR_3, &VAR_5, sizeof(VAR_2));
if (s != sizeof(VAR_2)) {
status = VIRTIO_NET_ERR;
} else if (VAR_2.class == VIRTIO_NET_CTRL_RX) {
status = virtio_net_handle_rx_mode(n, VAR_2.cmd, VAR_3, VAR_5);
} else if (VAR_2.class == VIRTIO_NET_CTRL_MAC) {
status = virtio_net_handle_mac(n, VAR_2.cmd, VAR_3, VAR_5);
} else if (VAR_2.class == VIRTIO_NET_CTRL_VLAN) {
status = virtio_net_handle_vlan_table(n, VAR_2.cmd, VAR_3, VAR_5);
} else if (VAR_2.class == VIRTIO_NET_CTRL_ANNOUNCE) {
status = virtio_net_handle_announce(n, VAR_2.cmd, VAR_3, VAR_5);
} else if (VAR_2.class == VIRTIO_NET_CTRL_MQ) {
status = virtio_net_handle_mq(n, VAR_2.cmd, VAR_3, VAR_5);
} else if (VAR_2.class == VIRTIO_NET_CTRL_GUEST_OFFLOADS) {
status = virtio_net_handle_offloads(n, VAR_2.cmd, VAR_3, VAR_5);
}
s = iov_from_buf(elem.in_sg, elem.in_num, 0, &status, sizeof(status));
assert(s == sizeof(status));
virtqueue_push(VAR_1, &elem, sizeof(status));
virtio_notify(VAR_0, VAR_1);
g_free(VAR_4);
}
}
| [
"static void FUNC_0(VirtIODevice *VAR_0, VirtQueue *VAR_1)\n{",
"VirtIONet *n = VIRTIO_NET(VAR_0);",
"struct virtio_net_ctrl_hdr VAR_2;",
"virtio_net_ctrl_ack status = VIRTIO_NET_ERR;",
"VirtQueueElement elem;",
"size_t s;",
"struct iovec *VAR_3, *VAR_4;",
"unsigned int VAR_5;",
"while (virtqueue_po... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
21
],
[
23,
25
],
[
27
],
[
29
],
[
31
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
45
],
[... |
13,157 | void cpu_exec_realizefn(CPUState *cpu, Error **errp)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
cpu_list_add(cpu);
if (tcg_enabled() && !cc->tcg_initialized) {
cc->tcg_initialized = true;
cc->tcg_initialize();
}
#ifndef CONFIG_USER_ONLY
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
}
if (cc->vmsd != NULL) {
vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
}
#endif
}
| false | qemu | 2dda635410e95843562e5257a8f173e7115a7a1e | void cpu_exec_realizefn(CPUState *cpu, Error **errp)
{
CPUClass *cc = CPU_GET_CLASS(cpu);
cpu_list_add(cpu);
if (tcg_enabled() && !cc->tcg_initialized) {
cc->tcg_initialized = true;
cc->tcg_initialize();
}
#ifndef CONFIG_USER_ONLY
if (qdev_get_vmsd(DEVICE(cpu)) == NULL) {
vmstate_register(NULL, cpu->cpu_index, &vmstate_cpu_common, cpu);
}
if (cc->vmsd != NULL) {
vmstate_register(NULL, cpu->cpu_index, cc->vmsd, cpu);
}
#endif
}
| {
"code": [],
"line_no": []
} | void FUNC_0(CPUState *VAR_0, Error **VAR_1)
{
CPUClass *cc = CPU_GET_CLASS(VAR_0);
cpu_list_add(VAR_0);
if (tcg_enabled() && !cc->tcg_initialized) {
cc->tcg_initialized = true;
cc->tcg_initialize();
}
#ifndef CONFIG_USER_ONLY
if (qdev_get_vmsd(DEVICE(VAR_0)) == NULL) {
vmstate_register(NULL, VAR_0->cpu_index, &vmstate_cpu_common, VAR_0);
}
if (cc->vmsd != NULL) {
vmstate_register(NULL, VAR_0->cpu_index, cc->vmsd, VAR_0);
}
#endif
}
| [
"void FUNC_0(CPUState *VAR_0, Error **VAR_1)\n{",
"CPUClass *cc = CPU_GET_CLASS(VAR_0);",
"cpu_list_add(VAR_0);",
"if (tcg_enabled() && !cc->tcg_initialized) {",
"cc->tcg_initialized = true;",
"cc->tcg_initialize();",
"}",
"#ifndef CONFIG_USER_ONLY\nif (qdev_get_vmsd(DEVICE(VAR_0)) == NULL) {",
"vms... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
13
],
[
15
],
[
17
],
[
19
],
[
23,
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37,
39
]
] |
13,158 | float32 helper_fitos(CPUSPARCState *env, int32_t src)
{
/* Inexact error possible converting int to float. */
float32 ret;
clear_float_exceptions(env);
ret = int32_to_float32(src, &env->fp_status);
check_ieee_exceptions(env);
return ret;
}
| false | qemu | 7385aed20db5d83979f683b9d0048674411e963c | float32 helper_fitos(CPUSPARCState *env, int32_t src)
{
float32 ret;
clear_float_exceptions(env);
ret = int32_to_float32(src, &env->fp_status);
check_ieee_exceptions(env);
return ret;
}
| {
"code": [],
"line_no": []
} | float32 FUNC_0(CPUSPARCState *env, int32_t src)
{
float32 ret;
clear_float_exceptions(env);
ret = int32_to_float32(src, &env->fp_status);
check_ieee_exceptions(env);
return ret;
}
| [
"float32 FUNC_0(CPUSPARCState *env, int32_t src)\n{",
"float32 ret;",
"clear_float_exceptions(env);",
"ret = int32_to_float32(src, &env->fp_status);",
"check_ieee_exceptions(env);",
"return ret;",
"}"
] | [
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
]
] |
13,159 | static void test_wait_event_notifier(void)
{
EventNotifierTestData data = { .n = 0, .active = 1 };
event_notifier_init(&data.e, false);
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
g_assert(!aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 0);
g_assert_cmpint(data.active, ==, 1);
event_notifier_set(&data.e);
g_assert(aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 1);
g_assert_cmpint(data.active, ==, 0);
g_assert(!aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 1);
g_assert_cmpint(data.active, ==, 0);
aio_set_event_notifier(ctx, &data.e, NULL);
g_assert(!aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 1);
event_notifier_cleanup(&data.e);
}
| false | qemu | 12d69ac03b45156356b240424623719f15d8143e | static void test_wait_event_notifier(void)
{
EventNotifierTestData data = { .n = 0, .active = 1 };
event_notifier_init(&data.e, false);
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
g_assert(!aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 0);
g_assert_cmpint(data.active, ==, 1);
event_notifier_set(&data.e);
g_assert(aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 1);
g_assert_cmpint(data.active, ==, 0);
g_assert(!aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 1);
g_assert_cmpint(data.active, ==, 0);
aio_set_event_notifier(ctx, &data.e, NULL);
g_assert(!aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 1);
event_notifier_cleanup(&data.e);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(void)
{
EventNotifierTestData data = { .n = 0, .active = 1 };
event_notifier_init(&data.e, false);
aio_set_event_notifier(ctx, &data.e, event_ready_cb);
g_assert(!aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 0);
g_assert_cmpint(data.active, ==, 1);
event_notifier_set(&data.e);
g_assert(aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 1);
g_assert_cmpint(data.active, ==, 0);
g_assert(!aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 1);
g_assert_cmpint(data.active, ==, 0);
aio_set_event_notifier(ctx, &data.e, NULL);
g_assert(!aio_poll(ctx, false));
g_assert_cmpint(data.n, ==, 1);
event_notifier_cleanup(&data.e);
}
| [
"static void FUNC_0(void)\n{",
"EventNotifierTestData data = { .n = 0, .active = 1 };",
"event_notifier_init(&data.e, false);",
"aio_set_event_notifier(ctx, &data.e, event_ready_cb);",
"g_assert(!aio_poll(ctx, false));",
"g_assert_cmpint(data.n, ==, 0);",
"g_assert_cmpint(data.active, ==, 1);",
"event... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
19
],
[
21
],
[
23
],
[
25
],
[
29
],
[
31
],
[
33
],
[
37
],
[
39
],
[
41
],
[
45
],
[
47
]
] |
13,160 | static void bswap_shdr(struct elf_shdr *shdr)
{
bswap32s(&shdr->sh_name);
bswap32s(&shdr->sh_type);
bswaptls(&shdr->sh_flags);
bswaptls(&shdr->sh_addr);
bswaptls(&shdr->sh_offset);
bswaptls(&shdr->sh_size);
bswap32s(&shdr->sh_link);
bswap32s(&shdr->sh_info);
bswaptls(&shdr->sh_addralign);
bswaptls(&shdr->sh_entsize);
}
| false | qemu | 991f8f0c91d65cebf51fa931450e02b0d5209012 | static void bswap_shdr(struct elf_shdr *shdr)
{
bswap32s(&shdr->sh_name);
bswap32s(&shdr->sh_type);
bswaptls(&shdr->sh_flags);
bswaptls(&shdr->sh_addr);
bswaptls(&shdr->sh_offset);
bswaptls(&shdr->sh_size);
bswap32s(&shdr->sh_link);
bswap32s(&shdr->sh_info);
bswaptls(&shdr->sh_addralign);
bswaptls(&shdr->sh_entsize);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(struct elf_shdr *VAR_0)
{
bswap32s(&VAR_0->sh_name);
bswap32s(&VAR_0->sh_type);
bswaptls(&VAR_0->sh_flags);
bswaptls(&VAR_0->sh_addr);
bswaptls(&VAR_0->sh_offset);
bswaptls(&VAR_0->sh_size);
bswap32s(&VAR_0->sh_link);
bswap32s(&VAR_0->sh_info);
bswaptls(&VAR_0->sh_addralign);
bswaptls(&VAR_0->sh_entsize);
}
| [
"static void FUNC_0(struct elf_shdr *VAR_0)\n{",
"bswap32s(&VAR_0->sh_name);",
"bswap32s(&VAR_0->sh_type);",
"bswaptls(&VAR_0->sh_flags);",
"bswaptls(&VAR_0->sh_addr);",
"bswaptls(&VAR_0->sh_offset);",
"bswaptls(&VAR_0->sh_size);",
"bswap32s(&VAR_0->sh_link);",
"bswap32s(&VAR_0->sh_info);",
"bswap... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
],
[
25
]
] |
13,161 | void fdt_build_clock_node(void *host_fdt, void *guest_fdt,
uint32_t host_phandle,
uint32_t guest_phandle)
{
char *node_path = NULL;
char *nodename;
const void *r;
int ret, node_offset, prop_len, path_len = 16;
node_offset = fdt_node_offset_by_phandle(host_fdt, host_phandle);
if (node_offset <= 0) {
error_setg(&error_fatal,
"not able to locate clock handle %d in host device tree",
host_phandle);
}
node_path = g_malloc(path_len);
while ((ret = fdt_get_path(host_fdt, node_offset, node_path, path_len))
== -FDT_ERR_NOSPACE) {
path_len += 16;
node_path = g_realloc(node_path, path_len);
}
if (ret < 0) {
error_setg(&error_fatal,
"not able to retrieve node path for clock handle %d",
host_phandle);
}
r = qemu_fdt_getprop(host_fdt, node_path, "compatible", &prop_len,
&error_fatal);
if (strcmp(r, "fixed-clock")) {
error_setg(&error_fatal,
"clock handle %d is not a fixed clock", host_phandle);
}
nodename = strrchr(node_path, '/');
qemu_fdt_add_subnode(guest_fdt, nodename);
copy_properties_from_host(clock_copied_properties,
ARRAY_SIZE(clock_copied_properties),
host_fdt, guest_fdt,
node_path, nodename);
qemu_fdt_setprop_cell(guest_fdt, nodename, "phandle", guest_phandle);
g_free(node_path);
}
| false | qemu | cf5a13e370afad57f1cfab0a8871ed839f5eda48 | void fdt_build_clock_node(void *host_fdt, void *guest_fdt,
uint32_t host_phandle,
uint32_t guest_phandle)
{
char *node_path = NULL;
char *nodename;
const void *r;
int ret, node_offset, prop_len, path_len = 16;
node_offset = fdt_node_offset_by_phandle(host_fdt, host_phandle);
if (node_offset <= 0) {
error_setg(&error_fatal,
"not able to locate clock handle %d in host device tree",
host_phandle);
}
node_path = g_malloc(path_len);
while ((ret = fdt_get_path(host_fdt, node_offset, node_path, path_len))
== -FDT_ERR_NOSPACE) {
path_len += 16;
node_path = g_realloc(node_path, path_len);
}
if (ret < 0) {
error_setg(&error_fatal,
"not able to retrieve node path for clock handle %d",
host_phandle);
}
r = qemu_fdt_getprop(host_fdt, node_path, "compatible", &prop_len,
&error_fatal);
if (strcmp(r, "fixed-clock")) {
error_setg(&error_fatal,
"clock handle %d is not a fixed clock", host_phandle);
}
nodename = strrchr(node_path, '/');
qemu_fdt_add_subnode(guest_fdt, nodename);
copy_properties_from_host(clock_copied_properties,
ARRAY_SIZE(clock_copied_properties),
host_fdt, guest_fdt,
node_path, nodename);
qemu_fdt_setprop_cell(guest_fdt, nodename, "phandle", guest_phandle);
g_free(node_path);
}
| {
"code": [],
"line_no": []
} | void FUNC_0(void *VAR_0, void *VAR_1,
uint32_t VAR_2,
uint32_t VAR_3)
{
char *VAR_4 = NULL;
char *VAR_5;
const void *VAR_6;
int VAR_7, VAR_8, VAR_9, VAR_10 = 16;
VAR_8 = fdt_node_offset_by_phandle(VAR_0, VAR_2);
if (VAR_8 <= 0) {
error_setg(&error_fatal,
"not able to locate clock handle %d in host device tree",
VAR_2);
}
VAR_4 = g_malloc(VAR_10);
while ((VAR_7 = fdt_get_path(VAR_0, VAR_8, VAR_4, VAR_10))
== -FDT_ERR_NOSPACE) {
VAR_10 += 16;
VAR_4 = g_realloc(VAR_4, VAR_10);
}
if (VAR_7 < 0) {
error_setg(&error_fatal,
"not able to retrieve node path for clock handle %d",
VAR_2);
}
VAR_6 = qemu_fdt_getprop(VAR_0, VAR_4, "compatible", &VAR_9,
&error_fatal);
if (strcmp(VAR_6, "fixed-clock")) {
error_setg(&error_fatal,
"clock handle %d is not a fixed clock", VAR_2);
}
VAR_5 = strrchr(VAR_4, '/');
qemu_fdt_add_subnode(VAR_1, VAR_5);
copy_properties_from_host(clock_copied_properties,
ARRAY_SIZE(clock_copied_properties),
VAR_0, VAR_1,
VAR_4, VAR_5);
qemu_fdt_setprop_cell(VAR_1, VAR_5, "phandle", VAR_3);
g_free(VAR_4);
}
| [
"void FUNC_0(void *VAR_0, void *VAR_1,\nuint32_t VAR_2,\nuint32_t VAR_3)\n{",
"char *VAR_4 = NULL;",
"char *VAR_5;",
"const void *VAR_6;",
"int VAR_7, VAR_8, VAR_9, VAR_10 = 16;",
"VAR_8 = fdt_node_offset_by_phandle(VAR_0, VAR_2);",
"if (VAR_8 <= 0) {",
"error_setg(&error_fatal,\n\"not able to locate ... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
19
],
[
21
],
[
23,
25,
27
],
[
29
],
[
31
],
[
33,
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
45,
47,
49
],
[... |
13,162 | static always_inline int _find_pte (mmu_ctx_t *ctx, int is_64b, int h, int rw)
{
target_ulong base, pte0, pte1;
int i, good = -1;
int ret, r;
ret = -1; /* No entry found */
base = ctx->pg_addr[h];
for (i = 0; i < 8; i++) {
#if defined(TARGET_PPC64)
if (is_64b) {
pte0 = ldq_phys(base + (i * 16));
pte1 = ldq_phys(base + (i * 16) + 8);
r = pte64_check(ctx, pte0, pte1, h, rw);
#if defined (DEBUG_MMU)
if (loglevel != 0) {
fprintf(logfile, "Load pte from 0x" ADDRX " => 0x" ADDRX
" 0x" ADDRX " %d %d %d 0x" ADDRX "\n",
base + (i * 16), pte0, pte1,
(int)(pte0 & 1), h, (int)((pte0 >> 1) & 1),
ctx->ptem);
}
#endif
} else
#endif
{
pte0 = ldl_phys(base + (i * 8));
pte1 = ldl_phys(base + (i * 8) + 4);
r = pte32_check(ctx, pte0, pte1, h, rw);
#if defined (DEBUG_MMU)
if (loglevel != 0) {
fprintf(logfile, "Load pte from 0x" ADDRX " => 0x" ADDRX
" 0x" ADDRX " %d %d %d 0x" ADDRX "\n",
base + (i * 8), pte0, pte1,
(int)(pte0 >> 31), h, (int)((pte0 >> 6) & 1),
ctx->ptem);
}
#endif
}
switch (r) {
case -3:
/* PTE inconsistency */
return -1;
case -2:
/* Access violation */
ret = -2;
good = i;
break;
case -1:
default:
/* No PTE match */
break;
case 0:
/* access granted */
/* XXX: we should go on looping to check all PTEs consistency
* but if we can speed-up the whole thing as the
* result would be undefined if PTEs are not consistent.
*/
ret = 0;
good = i;
goto done;
}
}
if (good != -1) {
done:
#if defined (DEBUG_MMU)
if (loglevel != 0) {
fprintf(logfile, "found PTE at addr 0x" PADDRX " prot=0x%01x "
"ret=%d\n",
ctx->raddr, ctx->prot, ret);
}
#endif
/* Update page flags */
pte1 = ctx->raddr;
if (pte_update_flags(ctx, &pte1, ret, rw) == 1) {
#if defined(TARGET_PPC64)
if (is_64b) {
stq_phys_notdirty(base + (good * 16) + 8, pte1);
} else
#endif
{
stl_phys_notdirty(base + (good * 8) + 4, pte1);
}
}
}
return ret;
}
| false | qemu | b227a8e9aa5f27d29f77ba90d5eb9d0662a1175e | static always_inline int _find_pte (mmu_ctx_t *ctx, int is_64b, int h, int rw)
{
target_ulong base, pte0, pte1;
int i, good = -1;
int ret, r;
ret = -1;
base = ctx->pg_addr[h];
for (i = 0; i < 8; i++) {
#if defined(TARGET_PPC64)
if (is_64b) {
pte0 = ldq_phys(base + (i * 16));
pte1 = ldq_phys(base + (i * 16) + 8);
r = pte64_check(ctx, pte0, pte1, h, rw);
#if defined (DEBUG_MMU)
if (loglevel != 0) {
fprintf(logfile, "Load pte from 0x" ADDRX " => 0x" ADDRX
" 0x" ADDRX " %d %d %d 0x" ADDRX "\n",
base + (i * 16), pte0, pte1,
(int)(pte0 & 1), h, (int)((pte0 >> 1) & 1),
ctx->ptem);
}
#endif
} else
#endif
{
pte0 = ldl_phys(base + (i * 8));
pte1 = ldl_phys(base + (i * 8) + 4);
r = pte32_check(ctx, pte0, pte1, h, rw);
#if defined (DEBUG_MMU)
if (loglevel != 0) {
fprintf(logfile, "Load pte from 0x" ADDRX " => 0x" ADDRX
" 0x" ADDRX " %d %d %d 0x" ADDRX "\n",
base + (i * 8), pte0, pte1,
(int)(pte0 >> 31), h, (int)((pte0 >> 6) & 1),
ctx->ptem);
}
#endif
}
switch (r) {
case -3:
return -1;
case -2:
ret = -2;
good = i;
break;
case -1:
default:
break;
case 0:
ret = 0;
good = i;
goto done;
}
}
if (good != -1) {
done:
#if defined (DEBUG_MMU)
if (loglevel != 0) {
fprintf(logfile, "found PTE at addr 0x" PADDRX " prot=0x%01x "
"ret=%d\n",
ctx->raddr, ctx->prot, ret);
}
#endif
pte1 = ctx->raddr;
if (pte_update_flags(ctx, &pte1, ret, rw) == 1) {
#if defined(TARGET_PPC64)
if (is_64b) {
stq_phys_notdirty(base + (good * 16) + 8, pte1);
} else
#endif
{
stl_phys_notdirty(base + (good * 8) + 4, pte1);
}
}
}
return ret;
}
| {
"code": [],
"line_no": []
} | static always_inline int FUNC_0 (mmu_ctx_t *ctx, int is_64b, int h, int rw)
{
target_ulong base, pte0, pte1;
int VAR_0, VAR_1 = -1;
int VAR_2, VAR_3;
VAR_2 = -1;
base = ctx->pg_addr[h];
for (VAR_0 = 0; VAR_0 < 8; VAR_0++) {
#if defined(TARGET_PPC64)
if (is_64b) {
pte0 = ldq_phys(base + (VAR_0 * 16));
pte1 = ldq_phys(base + (VAR_0 * 16) + 8);
VAR_3 = pte64_check(ctx, pte0, pte1, h, rw);
#if defined (DEBUG_MMU)
if (loglevel != 0) {
fprintf(logfile, "Load pte from 0x" ADDRX " => 0x" ADDRX
" 0x" ADDRX " %d %d %d 0x" ADDRX "\n",
base + (VAR_0 * 16), pte0, pte1,
(int)(pte0 & 1), h, (int)((pte0 >> 1) & 1),
ctx->ptem);
}
#endif
} else
#endif
{
pte0 = ldl_phys(base + (VAR_0 * 8));
pte1 = ldl_phys(base + (VAR_0 * 8) + 4);
VAR_3 = pte32_check(ctx, pte0, pte1, h, rw);
#if defined (DEBUG_MMU)
if (loglevel != 0) {
fprintf(logfile, "Load pte from 0x" ADDRX " => 0x" ADDRX
" 0x" ADDRX " %d %d %d 0x" ADDRX "\n",
base + (VAR_0 * 8), pte0, pte1,
(int)(pte0 >> 31), h, (int)((pte0 >> 6) & 1),
ctx->ptem);
}
#endif
}
switch (VAR_3) {
case -3:
return -1;
case -2:
VAR_2 = -2;
VAR_1 = VAR_0;
break;
case -1:
default:
break;
case 0:
VAR_2 = 0;
VAR_1 = VAR_0;
goto done;
}
}
if (VAR_1 != -1) {
done:
#if defined (DEBUG_MMU)
if (loglevel != 0) {
fprintf(logfile, "found PTE at addr 0x" PADDRX " prot=0x%01x "
"VAR_2=%d\n",
ctx->raddr, ctx->prot, VAR_2);
}
#endif
pte1 = ctx->raddr;
if (pte_update_flags(ctx, &pte1, VAR_2, rw) == 1) {
#if defined(TARGET_PPC64)
if (is_64b) {
stq_phys_notdirty(base + (VAR_1 * 16) + 8, pte1);
} else
#endif
{
stl_phys_notdirty(base + (VAR_1 * 8) + 4, pte1);
}
}
}
return VAR_2;
}
| [
"static always_inline int FUNC_0 (mmu_ctx_t *ctx, int is_64b, int h, int rw)\n{",
"target_ulong base, pte0, pte1;",
"int VAR_0, VAR_1 = -1;",
"int VAR_2, VAR_3;",
"VAR_2 = -1;",
"base = ctx->pg_addr[h];",
"for (VAR_0 = 0; VAR_0 < 8; VAR_0++) {",
"#if defined(TARGET_PPC64)\nif (is_64b) {",
"pte0 = ld... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17
],
[
19,
21
],
[
23
],
[
25
],
[
27
],
[
29,
31
],
[
33,
35,
37,
39,
41
],
[
43
],
[
45,
47
],
[
49,
51
... |
13,164 | static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
FeatureWord w,
int bitnr)
{
Object *obj = OBJECT(cpu);
int i;
char **names;
FeatureWordInfo *fi = &feature_word_info[w];
if (!fi->feat_names[bitnr]) {
return;
}
names = g_strsplit(fi->feat_names[bitnr], "|", 0);
feat2prop(names[0]);
x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
for (i = 1; names[i]; i++) {
feat2prop(names[i]);
object_property_add_alias(obj, names[i], obj, names[0],
&error_abort);
}
g_strfreev(names);
}
| false | qemu | fc7dfd205f3287893c436d932a167bffa30579c8 | static void x86_cpu_register_feature_bit_props(X86CPU *cpu,
FeatureWord w,
int bitnr)
{
Object *obj = OBJECT(cpu);
int i;
char **names;
FeatureWordInfo *fi = &feature_word_info[w];
if (!fi->feat_names[bitnr]) {
return;
}
names = g_strsplit(fi->feat_names[bitnr], "|", 0);
feat2prop(names[0]);
x86_cpu_register_bit_prop(cpu, names[0], &cpu->env.features[w], bitnr);
for (i = 1; names[i]; i++) {
feat2prop(names[i]);
object_property_add_alias(obj, names[i], obj, names[0],
&error_abort);
}
g_strfreev(names);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(X86CPU *VAR_0,
FeatureWord VAR_1,
int VAR_2)
{
Object *obj = OBJECT(VAR_0);
int VAR_3;
char **VAR_4;
FeatureWordInfo *fi = &feature_word_info[VAR_1];
if (!fi->feat_names[VAR_2]) {
return;
}
VAR_4 = g_strsplit(fi->feat_names[VAR_2], "|", 0);
feat2prop(VAR_4[0]);
x86_cpu_register_bit_prop(VAR_0, VAR_4[0], &VAR_0->env.features[VAR_1], VAR_2);
for (VAR_3 = 1; VAR_4[VAR_3]; VAR_3++) {
feat2prop(VAR_4[VAR_3]);
object_property_add_alias(obj, VAR_4[VAR_3], obj, VAR_4[0],
&error_abort);
}
g_strfreev(VAR_4);
}
| [
"static void FUNC_0(X86CPU *VAR_0,\nFeatureWord VAR_1,\nint VAR_2)\n{",
"Object *obj = OBJECT(VAR_0);",
"int VAR_3;",
"char **VAR_4;",
"FeatureWordInfo *fi = &feature_word_info[VAR_1];",
"if (!fi->feat_names[VAR_2]) {",
"return;",
"}",
"VAR_4 = g_strsplit(fi->feat_names[VAR_2], \"|\", 0);",
"feat2... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
19
],
[
21
],
[
23
],
[
27
],
[
31
],
[
33
],
[
37
],
[
39
],
[
41,
43
],
[
45
],
[
49
],
[
51
]
] |
13,165 | static void reschedule_dma(void *opaque)
{
DMAAIOCB *dbs = (DMAAIOCB *)opaque;
qemu_bh_delete(dbs->bh);
dbs->bh = NULL;
dma_bdrv_cb(dbs, 0);
}
| false | qemu | 4be746345f13e99e468c60acbd3a355e8183e3ce | static void reschedule_dma(void *opaque)
{
DMAAIOCB *dbs = (DMAAIOCB *)opaque;
qemu_bh_delete(dbs->bh);
dbs->bh = NULL;
dma_bdrv_cb(dbs, 0);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(void *VAR_0)
{
DMAAIOCB *dbs = (DMAAIOCB *)VAR_0;
qemu_bh_delete(dbs->bh);
dbs->bh = NULL;
dma_bdrv_cb(dbs, 0);
}
| [
"static void FUNC_0(void *VAR_0)\n{",
"DMAAIOCB *dbs = (DMAAIOCB *)VAR_0;",
"qemu_bh_delete(dbs->bh);",
"dbs->bh = NULL;",
"dma_bdrv_cb(dbs, 0);",
"}"
] | [
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
11
],
[
13
],
[
15
]
] |
13,166 | static inline void gen_lookup_tb(DisasContext *s)
{
tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
s->is_jmp = DISAS_JUMP;
}
| false | qemu | 8a6b28c7b5104263344508df0f4bce97f22cfcaf | static inline void gen_lookup_tb(DisasContext *s)
{
tcg_gen_movi_i32(cpu_R[15], s->pc & ~1);
s->is_jmp = DISAS_JUMP;
}
| {
"code": [],
"line_no": []
} | static inline void FUNC_0(DisasContext *VAR_0)
{
tcg_gen_movi_i32(cpu_R[15], VAR_0->pc & ~1);
VAR_0->is_jmp = DISAS_JUMP;
}
| [
"static inline void FUNC_0(DisasContext *VAR_0)\n{",
"tcg_gen_movi_i32(cpu_R[15], VAR_0->pc & ~1);",
"VAR_0->is_jmp = DISAS_JUMP;",
"}"
] | [
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
]
] |
13,168 | int kvm_arch_init_vcpu(CPUState *env)
{
struct {
struct kvm_cpuid2 cpuid;
struct kvm_cpuid_entry2 entries[100];
} __attribute__((packed)) cpuid_data;
uint32_t limit, i, j, cpuid_i;
uint32_t unused;
struct kvm_cpuid_entry2 *c;
uint32_t signature[3];
env->cpuid_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX);
i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_ECX);
env->cpuid_ext_features |= i;
env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
0, R_EDX);
env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
0, R_ECX);
env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(env, 0x8000000A,
0, R_EDX);
cpuid_i = 0;
/* Paravirtualization CPUIDs */
memcpy(signature, "KVMKVMKVM\0\0\0", 12);
c = &cpuid_data.entries[cpuid_i++];
memset(c, 0, sizeof(*c));
c->function = KVM_CPUID_SIGNATURE;
c->eax = 0;
c->ebx = signature[0];
c->ecx = signature[1];
c->edx = signature[2];
c = &cpuid_data.entries[cpuid_i++];
memset(c, 0, sizeof(*c));
c->function = KVM_CPUID_FEATURES;
c->eax = env->cpuid_kvm_features & kvm_arch_get_supported_cpuid(env,
KVM_CPUID_FEATURES, 0, R_EAX);
has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
for (i = 0; i <= limit; i++) {
c = &cpuid_data.entries[cpuid_i++];
switch (i) {
case 2: {
/* Keep reading function 2 till all the input is received */
int times;
c->function = i;
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
KVM_CPUID_FLAG_STATE_READ_NEXT;
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
times = c->eax & 0xff;
for (j = 1; j < times; ++j) {
c = &cpuid_data.entries[cpuid_i++];
c->function = i;
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
}
break;
}
case 4:
case 0xb:
case 0xd:
for (j = 0; ; j++) {
c->function = i;
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
c->index = j;
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
if (i == 4 && c->eax == 0) {
break;
}
if (i == 0xb && !(c->ecx & 0xff00)) {
break;
}
if (i == 0xd && c->eax == 0) {
break;
}
c = &cpuid_data.entries[cpuid_i++];
}
break;
default:
c->function = i;
c->flags = 0;
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
break;
}
}
cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
for (i = 0x80000000; i <= limit; i++) {
c = &cpuid_data.entries[cpuid_i++];
c->function = i;
c->flags = 0;
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
}
/* Call Centaur's CPUID instructions they are supported. */
if (env->cpuid_xlevel2 > 0) {
env->cpuid_ext4_features &=
kvm_arch_get_supported_cpuid(env, 0xC0000001, 0, R_EDX);
cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
for (i = 0xC0000000; i <= limit; i++) {
c = &cpuid_data.entries[cpuid_i++];
c->function = i;
c->flags = 0;
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
}
}
cpuid_data.cpuid.nent = cpuid_i;
if (((env->cpuid_version >> 8)&0xF) >= 6
&& (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)
&& kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) {
uint64_t mcg_cap;
int banks;
int ret;
ret = kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks);
if (ret < 0) {
fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
return ret;
}
if (banks > MCE_BANKS_DEF) {
banks = MCE_BANKS_DEF;
}
mcg_cap &= MCE_CAP_DEF;
mcg_cap |= banks;
ret = kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, &mcg_cap);
if (ret < 0) {
fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
return ret;
}
env->mcg_cap = mcg_cap;
}
qemu_add_vm_change_state_handler(cpu_update_state, env);
return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
}
| false | qemu | ba9bc59e1f5dc91caf35e0ef08da137b3a5e7386 | int kvm_arch_init_vcpu(CPUState *env)
{
struct {
struct kvm_cpuid2 cpuid;
struct kvm_cpuid_entry2 entries[100];
} __attribute__((packed)) cpuid_data;
uint32_t limit, i, j, cpuid_i;
uint32_t unused;
struct kvm_cpuid_entry2 *c;
uint32_t signature[3];
env->cpuid_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_EDX);
i = env->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
env->cpuid_ext_features &= kvm_arch_get_supported_cpuid(env, 1, 0, R_ECX);
env->cpuid_ext_features |= i;
env->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
0, R_EDX);
env->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(env, 0x80000001,
0, R_ECX);
env->cpuid_svm_features &= kvm_arch_get_supported_cpuid(env, 0x8000000A,
0, R_EDX);
cpuid_i = 0;
memcpy(signature, "KVMKVMKVM\0\0\0", 12);
c = &cpuid_data.entries[cpuid_i++];
memset(c, 0, sizeof(*c));
c->function = KVM_CPUID_SIGNATURE;
c->eax = 0;
c->ebx = signature[0];
c->ecx = signature[1];
c->edx = signature[2];
c = &cpuid_data.entries[cpuid_i++];
memset(c, 0, sizeof(*c));
c->function = KVM_CPUID_FEATURES;
c->eax = env->cpuid_kvm_features & kvm_arch_get_supported_cpuid(env,
KVM_CPUID_FEATURES, 0, R_EAX);
has_msr_async_pf_en = c->eax & (1 << KVM_FEATURE_ASYNC_PF);
cpu_x86_cpuid(env, 0, 0, &limit, &unused, &unused, &unused);
for (i = 0; i <= limit; i++) {
c = &cpuid_data.entries[cpuid_i++];
switch (i) {
case 2: {
int times;
c->function = i;
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
KVM_CPUID_FLAG_STATE_READ_NEXT;
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
times = c->eax & 0xff;
for (j = 1; j < times; ++j) {
c = &cpuid_data.entries[cpuid_i++];
c->function = i;
c->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
}
break;
}
case 4:
case 0xb:
case 0xd:
for (j = 0; ; j++) {
c->function = i;
c->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
c->index = j;
cpu_x86_cpuid(env, i, j, &c->eax, &c->ebx, &c->ecx, &c->edx);
if (i == 4 && c->eax == 0) {
break;
}
if (i == 0xb && !(c->ecx & 0xff00)) {
break;
}
if (i == 0xd && c->eax == 0) {
break;
}
c = &cpuid_data.entries[cpuid_i++];
}
break;
default:
c->function = i;
c->flags = 0;
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
break;
}
}
cpu_x86_cpuid(env, 0x80000000, 0, &limit, &unused, &unused, &unused);
for (i = 0x80000000; i <= limit; i++) {
c = &cpuid_data.entries[cpuid_i++];
c->function = i;
c->flags = 0;
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
}
if (env->cpuid_xlevel2 > 0) {
env->cpuid_ext4_features &=
kvm_arch_get_supported_cpuid(env, 0xC0000001, 0, R_EDX);
cpu_x86_cpuid(env, 0xC0000000, 0, &limit, &unused, &unused, &unused);
for (i = 0xC0000000; i <= limit; i++) {
c = &cpuid_data.entries[cpuid_i++];
c->function = i;
c->flags = 0;
cpu_x86_cpuid(env, i, 0, &c->eax, &c->ebx, &c->ecx, &c->edx);
}
}
cpuid_data.cpuid.nent = cpuid_i;
if (((env->cpuid_version >> 8)&0xF) >= 6
&& (env->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)
&& kvm_check_extension(env->kvm_state, KVM_CAP_MCE) > 0) {
uint64_t mcg_cap;
int banks;
int ret;
ret = kvm_get_mce_cap_supported(env->kvm_state, &mcg_cap, &banks);
if (ret < 0) {
fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-ret));
return ret;
}
if (banks > MCE_BANKS_DEF) {
banks = MCE_BANKS_DEF;
}
mcg_cap &= MCE_CAP_DEF;
mcg_cap |= banks;
ret = kvm_vcpu_ioctl(env, KVM_X86_SETUP_MCE, &mcg_cap);
if (ret < 0) {
fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-ret));
return ret;
}
env->mcg_cap = mcg_cap;
}
qemu_add_vm_change_state_handler(cpu_update_state, env);
return kvm_vcpu_ioctl(env, KVM_SET_CPUID2, &cpuid_data);
}
| {
"code": [],
"line_no": []
} | int FUNC_0(CPUState *VAR_0)
{
struct {
struct kvm_cpuid2 cpuid;
struct kvm_cpuid_entry2 entries[100];
} __attribute__((packed)) VAR_1;
uint32_t limit, i, j, cpuid_i;
uint32_t unused;
struct kvm_cpuid_entry2 *VAR_2;
uint32_t signature[3];
VAR_0->cpuid_features &= kvm_arch_get_supported_cpuid(VAR_0, 1, 0, R_EDX);
i = VAR_0->cpuid_ext_features & CPUID_EXT_HYPERVISOR;
VAR_0->cpuid_ext_features &= kvm_arch_get_supported_cpuid(VAR_0, 1, 0, R_ECX);
VAR_0->cpuid_ext_features |= i;
VAR_0->cpuid_ext2_features &= kvm_arch_get_supported_cpuid(VAR_0, 0x80000001,
0, R_EDX);
VAR_0->cpuid_ext3_features &= kvm_arch_get_supported_cpuid(VAR_0, 0x80000001,
0, R_ECX);
VAR_0->cpuid_svm_features &= kvm_arch_get_supported_cpuid(VAR_0, 0x8000000A,
0, R_EDX);
cpuid_i = 0;
memcpy(signature, "KVMKVMKVM\0\0\0", 12);
VAR_2 = &VAR_1.entries[cpuid_i++];
memset(VAR_2, 0, sizeof(*VAR_2));
VAR_2->function = KVM_CPUID_SIGNATURE;
VAR_2->eax = 0;
VAR_2->ebx = signature[0];
VAR_2->ecx = signature[1];
VAR_2->edx = signature[2];
VAR_2 = &VAR_1.entries[cpuid_i++];
memset(VAR_2, 0, sizeof(*VAR_2));
VAR_2->function = KVM_CPUID_FEATURES;
VAR_2->eax = VAR_0->cpuid_kvm_features & kvm_arch_get_supported_cpuid(VAR_0,
KVM_CPUID_FEATURES, 0, R_EAX);
has_msr_async_pf_en = VAR_2->eax & (1 << KVM_FEATURE_ASYNC_PF);
cpu_x86_cpuid(VAR_0, 0, 0, &limit, &unused, &unused, &unused);
for (i = 0; i <= limit; i++) {
VAR_2 = &VAR_1.entries[cpuid_i++];
switch (i) {
case 2: {
int times;
VAR_2->function = i;
VAR_2->flags = KVM_CPUID_FLAG_STATEFUL_FUNC |
KVM_CPUID_FLAG_STATE_READ_NEXT;
cpu_x86_cpuid(VAR_0, i, 0, &VAR_2->eax, &VAR_2->ebx, &VAR_2->ecx, &VAR_2->edx);
times = VAR_2->eax & 0xff;
for (j = 1; j < times; ++j) {
VAR_2 = &VAR_1.entries[cpuid_i++];
VAR_2->function = i;
VAR_2->flags = KVM_CPUID_FLAG_STATEFUL_FUNC;
cpu_x86_cpuid(VAR_0, i, 0, &VAR_2->eax, &VAR_2->ebx, &VAR_2->ecx, &VAR_2->edx);
}
break;
}
case 4:
case 0xb:
case 0xd:
for (j = 0; ; j++) {
VAR_2->function = i;
VAR_2->flags = KVM_CPUID_FLAG_SIGNIFCANT_INDEX;
VAR_2->index = j;
cpu_x86_cpuid(VAR_0, i, j, &VAR_2->eax, &VAR_2->ebx, &VAR_2->ecx, &VAR_2->edx);
if (i == 4 && VAR_2->eax == 0) {
break;
}
if (i == 0xb && !(VAR_2->ecx & 0xff00)) {
break;
}
if (i == 0xd && VAR_2->eax == 0) {
break;
}
VAR_2 = &VAR_1.entries[cpuid_i++];
}
break;
default:
VAR_2->function = i;
VAR_2->flags = 0;
cpu_x86_cpuid(VAR_0, i, 0, &VAR_2->eax, &VAR_2->ebx, &VAR_2->ecx, &VAR_2->edx);
break;
}
}
cpu_x86_cpuid(VAR_0, 0x80000000, 0, &limit, &unused, &unused, &unused);
for (i = 0x80000000; i <= limit; i++) {
VAR_2 = &VAR_1.entries[cpuid_i++];
VAR_2->function = i;
VAR_2->flags = 0;
cpu_x86_cpuid(VAR_0, i, 0, &VAR_2->eax, &VAR_2->ebx, &VAR_2->ecx, &VAR_2->edx);
}
if (VAR_0->cpuid_xlevel2 > 0) {
VAR_0->cpuid_ext4_features &=
kvm_arch_get_supported_cpuid(VAR_0, 0xC0000001, 0, R_EDX);
cpu_x86_cpuid(VAR_0, 0xC0000000, 0, &limit, &unused, &unused, &unused);
for (i = 0xC0000000; i <= limit; i++) {
VAR_2 = &VAR_1.entries[cpuid_i++];
VAR_2->function = i;
VAR_2->flags = 0;
cpu_x86_cpuid(VAR_0, i, 0, &VAR_2->eax, &VAR_2->ebx, &VAR_2->ecx, &VAR_2->edx);
}
}
VAR_1.cpuid.nent = cpuid_i;
if (((VAR_0->cpuid_version >> 8)&0xF) >= 6
&& (VAR_0->cpuid_features&(CPUID_MCE|CPUID_MCA)) == (CPUID_MCE|CPUID_MCA)
&& kvm_check_extension(VAR_0->kvm_state, KVM_CAP_MCE) > 0) {
uint64_t mcg_cap;
int VAR_3;
int VAR_4;
VAR_4 = kvm_get_mce_cap_supported(VAR_0->kvm_state, &mcg_cap, &VAR_3);
if (VAR_4 < 0) {
fprintf(stderr, "kvm_get_mce_cap_supported: %s", strerror(-VAR_4));
return VAR_4;
}
if (VAR_3 > MCE_BANKS_DEF) {
VAR_3 = MCE_BANKS_DEF;
}
mcg_cap &= MCE_CAP_DEF;
mcg_cap |= VAR_3;
VAR_4 = kvm_vcpu_ioctl(VAR_0, KVM_X86_SETUP_MCE, &mcg_cap);
if (VAR_4 < 0) {
fprintf(stderr, "KVM_X86_SETUP_MCE: %s", strerror(-VAR_4));
return VAR_4;
}
VAR_0->mcg_cap = mcg_cap;
}
qemu_add_vm_change_state_handler(cpu_update_state, VAR_0);
return kvm_vcpu_ioctl(VAR_0, KVM_SET_CPUID2, &VAR_1);
}
| [
"int FUNC_0(CPUState *VAR_0)\n{",
"struct {",
"struct kvm_cpuid2 cpuid;",
"struct kvm_cpuid_entry2 entries[100];",
"} __attribute__((packed)) VAR_1;",
"uint32_t limit, i, j, cpuid_i;",
"uint32_t unused;",
"struct kvm_cpuid_entry2 *VAR_2;",
"uint32_t signature[3];",
"VAR_0->cpuid_features &= kvm_ar... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
23
],
[
27
],
[
29
],
[
31
],
[
35,
37
],
[
39,
41
],
[
43,
45
],
[
51
],
[
57
],
[... |
13,169 | static int pci_bridge_initfn(PCIDevice *dev)
{
PCIBridge *s = DO_UPCAST(PCIBridge, dev, dev);
pci_config_set_vendor_id(s->dev.config, s->vid);
pci_config_set_device_id(s->dev.config, s->did);
s->dev.config[0x04] = 0x06; // command = bus master, pci mem
s->dev.config[0x05] = 0x00;
s->dev.config[0x06] = 0xa0; // status = fast back-to-back, 66MHz, no error
s->dev.config[0x07] = 0x00; // status = fast devsel
s->dev.config[0x08] = 0x00; // revision
s->dev.config[0x09] = 0x00; // programming i/f
pci_config_set_class(s->dev.config, PCI_CLASS_BRIDGE_PCI);
s->dev.config[0x0D] = 0x10; // latency_timer
s->dev.config[PCI_HEADER_TYPE] =
PCI_HEADER_TYPE_MULTI_FUNCTION | PCI_HEADER_TYPE_BRIDGE; // header_type
s->dev.config[0x1E] = 0xa0; // secondary status
return 0;
}
| false | qemu | 74c01823badbf4637c18ac4cad5967b4f9669514 | static int pci_bridge_initfn(PCIDevice *dev)
{
PCIBridge *s = DO_UPCAST(PCIBridge, dev, dev);
pci_config_set_vendor_id(s->dev.config, s->vid);
pci_config_set_device_id(s->dev.config, s->did);
s->dev.config[0x04] = 0x06;
s->dev.config[0x05] = 0x00;
s->dev.config[0x06] = 0xa0;
s->dev.config[0x07] = 0x00;
s->dev.config[0x08] = 0x00;
s->dev.config[0x09] = 0x00;
pci_config_set_class(s->dev.config, PCI_CLASS_BRIDGE_PCI);
s->dev.config[0x0D] = 0x10;
s->dev.config[PCI_HEADER_TYPE] =
PCI_HEADER_TYPE_MULTI_FUNCTION | PCI_HEADER_TYPE_BRIDGE;
s->dev.config[0x1E] = 0xa0;
return 0;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(PCIDevice *VAR_0)
{
PCIBridge *s = DO_UPCAST(PCIBridge, VAR_0, VAR_0);
pci_config_set_vendor_id(s->VAR_0.config, s->vid);
pci_config_set_device_id(s->VAR_0.config, s->did);
s->VAR_0.config[0x04] = 0x06;
s->VAR_0.config[0x05] = 0x00;
s->VAR_0.config[0x06] = 0xa0;
s->VAR_0.config[0x07] = 0x00;
s->VAR_0.config[0x08] = 0x00;
s->VAR_0.config[0x09] = 0x00;
pci_config_set_class(s->VAR_0.config, PCI_CLASS_BRIDGE_PCI);
s->VAR_0.config[0x0D] = 0x10;
s->VAR_0.config[PCI_HEADER_TYPE] =
PCI_HEADER_TYPE_MULTI_FUNCTION | PCI_HEADER_TYPE_BRIDGE;
s->VAR_0.config[0x1E] = 0xa0;
return 0;
}
| [
"static int FUNC_0(PCIDevice *VAR_0)\n{",
"PCIBridge *s = DO_UPCAST(PCIBridge, VAR_0, VAR_0);",
"pci_config_set_vendor_id(s->VAR_0.config, s->vid);",
"pci_config_set_device_id(s->VAR_0.config, s->did);",
"s->VAR_0.config[0x04] = 0x06;",
"s->VAR_0.config[0x05] = 0x00;",
"s->VAR_0.config[0x06] = 0xa0;",
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
11
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31,
33
],
[
35
],
[
37
],
[
39
]
] |
13,170 | static void write_palette(const char *key, QObject *obj, void *opaque)
{
struct palette_cb_priv *priv = opaque;
VncState *vs = priv->vs;
uint32_t bytes = vs->clientds.pf.bytes_per_pixel;
uint8_t idx = qint_get_int(qobject_to_qint(obj));
if (bytes == 4) {
uint32_t color = tight_palette_buf2rgb(32, (uint8_t *)key);
((uint32_t*)priv->header)[idx] = color;
} else {
uint16_t color = tight_palette_buf2rgb(16, (uint8_t *)key);
((uint16_t*)priv->header)[idx] = color;
}
}
| false | qemu | 245f7b51c0ea04fb2224b1127430a096c91aee70 | static void write_palette(const char *key, QObject *obj, void *opaque)
{
struct palette_cb_priv *priv = opaque;
VncState *vs = priv->vs;
uint32_t bytes = vs->clientds.pf.bytes_per_pixel;
uint8_t idx = qint_get_int(qobject_to_qint(obj));
if (bytes == 4) {
uint32_t color = tight_palette_buf2rgb(32, (uint8_t *)key);
((uint32_t*)priv->header)[idx] = color;
} else {
uint16_t color = tight_palette_buf2rgb(16, (uint8_t *)key);
((uint16_t*)priv->header)[idx] = color;
}
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(const char *VAR_0, QObject *VAR_1, void *VAR_2)
{
struct palette_cb_priv *VAR_3 = VAR_2;
VncState *vs = VAR_3->vs;
uint32_t bytes = vs->clientds.pf.bytes_per_pixel;
uint8_t idx = qint_get_int(qobject_to_qint(VAR_1));
if (bytes == 4) {
uint32_t color = tight_palette_buf2rgb(32, (uint8_t *)VAR_0);
((uint32_t*)VAR_3->header)[idx] = color;
} else {
uint16_t color = tight_palette_buf2rgb(16, (uint8_t *)VAR_0);
((uint16_t*)VAR_3->header)[idx] = color;
}
}
| [
"static void FUNC_0(const char *VAR_0, QObject *VAR_1, void *VAR_2)\n{",
"struct palette_cb_priv *VAR_3 = VAR_2;",
"VncState *vs = VAR_3->vs;",
"uint32_t bytes = vs->clientds.pf.bytes_per_pixel;",
"uint8_t idx = qint_get_int(qobject_to_qint(VAR_1));",
"if (bytes == 4) {",
"uint32_t color = tight_palette... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
15
],
[
17
],
[
21
],
[
23
],
[
25
],
[
29
],
[
31
],
[
33
]
] |
13,171 | static void rtas_get_xive(sPAPREnvironment *spapr, uint32_t token,
uint32_t nargs, target_ulong args,
uint32_t nret, target_ulong rets)
{
struct ics_state *ics = spapr->icp->ics;
uint32_t nr;
if ((nargs != 1) || (nret != 3)) {
rtas_st(rets, 0, -3);
return;
}
nr = rtas_ld(args, 0);
if (!ics_valid_irq(ics, nr)) {
rtas_st(rets, 0, -3);
return;
}
rtas_st(rets, 0, 0); /* Success */
rtas_st(rets, 1, ics->irqs[nr - ics->offset].server);
rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority);
}
| false | qemu | 210b580b106fa798149e28aa13c66b325a43204e | static void rtas_get_xive(sPAPREnvironment *spapr, uint32_t token,
uint32_t nargs, target_ulong args,
uint32_t nret, target_ulong rets)
{
struct ics_state *ics = spapr->icp->ics;
uint32_t nr;
if ((nargs != 1) || (nret != 3)) {
rtas_st(rets, 0, -3);
return;
}
nr = rtas_ld(args, 0);
if (!ics_valid_irq(ics, nr)) {
rtas_st(rets, 0, -3);
return;
}
rtas_st(rets, 0, 0);
rtas_st(rets, 1, ics->irqs[nr - ics->offset].server);
rtas_st(rets, 2, ics->irqs[nr - ics->offset].priority);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(sPAPREnvironment *VAR_0, uint32_t VAR_1,
uint32_t VAR_2, target_ulong VAR_3,
uint32_t VAR_4, target_ulong VAR_5)
{
struct ics_state *VAR_6 = VAR_0->icp->VAR_6;
uint32_t nr;
if ((VAR_2 != 1) || (VAR_4 != 3)) {
rtas_st(VAR_5, 0, -3);
return;
}
nr = rtas_ld(VAR_3, 0);
if (!ics_valid_irq(VAR_6, nr)) {
rtas_st(VAR_5, 0, -3);
return;
}
rtas_st(VAR_5, 0, 0);
rtas_st(VAR_5, 1, VAR_6->irqs[nr - VAR_6->offset].server);
rtas_st(VAR_5, 2, VAR_6->irqs[nr - VAR_6->offset].priority);
}
| [
"static void FUNC_0(sPAPREnvironment *VAR_0, uint32_t VAR_1,\nuint32_t VAR_2, target_ulong VAR_3,\nuint32_t VAR_4, target_ulong VAR_5)\n{",
"struct ics_state *VAR_6 = VAR_0->icp->VAR_6;",
"uint32_t nr;",
"if ((VAR_2 != 1) || (VAR_4 != 3)) {",
"rtas_st(VAR_5, 0, -3);",
"return;",
"}",
"nr = rtas_ld(VAR... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7
],
[
9
],
[
11
],
[
15
],
[
17
],
[
19
],
[
21
],
[
25
],
[
29
],
[
31
],
[
33
],
[
35
],
[
39
],
[
41
],
[
43
],
[
45
]
] |
13,173 | VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_RET_CORUPT)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_BUF_END_BEFORE_LE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_INVALID_FILE_SELECTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_FCI_FORMAT_INVALID)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_CHANGE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_FILE_FILLED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_EXC_ERROR)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_EXC_ERROR_CHANGE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_EXC_ERROR_MEMORY_FAILURE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_WRONG_LENGTH)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_CLA_NOT_SUPPORTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_CHANNEL_NOT_SUPPORTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_SECURE_NOT_SUPPORTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_COMMAND_NOT_SUPPORTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(
VCARD7816_STATUS_ERROR_COMMAND_INCOMPATIBLE_WITH_FILE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_SECURITY_NOT_SATISFIED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_AUTHENTICATION_BLOCKED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_DATA_INVALID)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_CONDITION_NOT_SATISFIED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_DATA_NO_EF)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_SM_OBJECT_MISSING)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_SM_OBJECT_INCORRECT)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_WRONG_PARAMETERS)
VCARD_RESPONSE_NEW_STATIC_STATUS(
VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_IN_DATA)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_FUNCTION_NOT_SUPPORTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_FILE_NOT_FOUND)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_RECORD_NOT_FOUND)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_NO_SPACE_FOR_FILE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_LC_TLV_INCONSISTENT)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_P1_P2_INCORRECT)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_LC_P1_P2_INCONSISTENT)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_DATA_NOT_FOUND)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_2)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_INS_CODE_INVALID)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_CLA_INVALID)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_GENERAL)
/*
* return a single response code. This function cannot fail. It will always
* return a response.
*/
VCardResponse *
vcard_make_response(vcard_7816_status_t status)
{
VCardResponse *response = NULL;
switch (status) {
/* known 7816 response codes */
case VCARD7816_STATUS_SUCCESS:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_SUCCESS);
case VCARD7816_STATUS_WARNING:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING);
case VCARD7816_STATUS_WARNING_RET_CORUPT:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_RET_CORUPT);
case VCARD7816_STATUS_WARNING_BUF_END_BEFORE_LE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_BUF_END_BEFORE_LE);
case VCARD7816_STATUS_WARNING_INVALID_FILE_SELECTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_INVALID_FILE_SELECTED);
case VCARD7816_STATUS_WARNING_FCI_FORMAT_INVALID:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_FCI_FORMAT_INVALID);
case VCARD7816_STATUS_WARNING_CHANGE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_CHANGE);
case VCARD7816_STATUS_WARNING_FILE_FILLED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_FILE_FILLED);
case VCARD7816_STATUS_EXC_ERROR:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_EXC_ERROR);
case VCARD7816_STATUS_EXC_ERROR_CHANGE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_EXC_ERROR_CHANGE);
case VCARD7816_STATUS_EXC_ERROR_MEMORY_FAILURE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_EXC_ERROR_MEMORY_FAILURE);
case VCARD7816_STATUS_ERROR_WRONG_LENGTH:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_WRONG_LENGTH);
case VCARD7816_STATUS_ERROR_CLA_NOT_SUPPORTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_CLA_NOT_SUPPORTED);
case VCARD7816_STATUS_ERROR_CHANNEL_NOT_SUPPORTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_CHANNEL_NOT_SUPPORTED);
case VCARD7816_STATUS_ERROR_SECURE_NOT_SUPPORTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_SECURE_NOT_SUPPORTED);
case VCARD7816_STATUS_ERROR_COMMAND_NOT_SUPPORTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_COMMAND_NOT_SUPPORTED);
case VCARD7816_STATUS_ERROR_COMMAND_INCOMPATIBLE_WITH_FILE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_COMMAND_INCOMPATIBLE_WITH_FILE);
case VCARD7816_STATUS_ERROR_SECURITY_NOT_SATISFIED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_SECURITY_NOT_SATISFIED);
case VCARD7816_STATUS_ERROR_AUTHENTICATION_BLOCKED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_AUTHENTICATION_BLOCKED);
case VCARD7816_STATUS_ERROR_DATA_INVALID:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_DATA_INVALID);
case VCARD7816_STATUS_ERROR_CONDITION_NOT_SATISFIED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_CONDITION_NOT_SATISFIED);
case VCARD7816_STATUS_ERROR_DATA_NO_EF:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_DATA_NO_EF);
case VCARD7816_STATUS_ERROR_SM_OBJECT_MISSING:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_SM_OBJECT_MISSING);
case VCARD7816_STATUS_ERROR_SM_OBJECT_INCORRECT:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_SM_OBJECT_INCORRECT);
case VCARD7816_STATUS_ERROR_WRONG_PARAMETERS:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_WRONG_PARAMETERS);
case VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_IN_DATA:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_IN_DATA);
case VCARD7816_STATUS_ERROR_FUNCTION_NOT_SUPPORTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_FUNCTION_NOT_SUPPORTED);
case VCARD7816_STATUS_ERROR_FILE_NOT_FOUND:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_FILE_NOT_FOUND);
case VCARD7816_STATUS_ERROR_RECORD_NOT_FOUND:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_RECORD_NOT_FOUND);
case VCARD7816_STATUS_ERROR_NO_SPACE_FOR_FILE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_NO_SPACE_FOR_FILE);
case VCARD7816_STATUS_ERROR_LC_TLV_INCONSISTENT:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_LC_TLV_INCONSISTENT);
case VCARD7816_STATUS_ERROR_P1_P2_INCORRECT:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_P1_P2_INCORRECT);
case VCARD7816_STATUS_ERROR_LC_P1_P2_INCONSISTENT:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_LC_P1_P2_INCONSISTENT);
case VCARD7816_STATUS_ERROR_DATA_NOT_FOUND:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_DATA_NOT_FOUND);
case VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_2:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_2);
case VCARD7816_STATUS_ERROR_INS_CODE_INVALID:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_INS_CODE_INVALID);
case VCARD7816_STATUS_ERROR_CLA_INVALID:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_CLA_INVALID);
case VCARD7816_STATUS_ERROR_GENERAL:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_GENERAL);
default:
/* we don't know this status code, create a response buffer to
* hold it */
response = vcard_response_new_status(status);
if (response == NULL) {
/* couldn't allocate the buffer, return memmory error */
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_EXC_ERROR_MEMORY_FAILURE);
}
}
assert(response);
return response;
}
| false | qemu | 1687a089f103f9b7a1b4a1555068054cb46ee9e9 | VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_RET_CORUPT)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_BUF_END_BEFORE_LE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_INVALID_FILE_SELECTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_FCI_FORMAT_INVALID)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_CHANGE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_FILE_FILLED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_EXC_ERROR)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_EXC_ERROR_CHANGE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_EXC_ERROR_MEMORY_FAILURE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_WRONG_LENGTH)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_CLA_NOT_SUPPORTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_CHANNEL_NOT_SUPPORTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_SECURE_NOT_SUPPORTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_COMMAND_NOT_SUPPORTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(
VCARD7816_STATUS_ERROR_COMMAND_INCOMPATIBLE_WITH_FILE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_SECURITY_NOT_SATISFIED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_AUTHENTICATION_BLOCKED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_DATA_INVALID)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_CONDITION_NOT_SATISFIED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_DATA_NO_EF)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_SM_OBJECT_MISSING)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_SM_OBJECT_INCORRECT)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_WRONG_PARAMETERS)
VCARD_RESPONSE_NEW_STATIC_STATUS(
VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_IN_DATA)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_FUNCTION_NOT_SUPPORTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_FILE_NOT_FOUND)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_RECORD_NOT_FOUND)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_NO_SPACE_FOR_FILE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_LC_TLV_INCONSISTENT)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_P1_P2_INCORRECT)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_LC_P1_P2_INCONSISTENT)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_DATA_NOT_FOUND)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_2)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_INS_CODE_INVALID)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_CLA_INVALID)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_GENERAL)
VCardResponse *
vcard_make_response(vcard_7816_status_t status)
{
VCardResponse *response = NULL;
switch (status) {
case VCARD7816_STATUS_SUCCESS:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_SUCCESS);
case VCARD7816_STATUS_WARNING:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING);
case VCARD7816_STATUS_WARNING_RET_CORUPT:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_RET_CORUPT);
case VCARD7816_STATUS_WARNING_BUF_END_BEFORE_LE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_BUF_END_BEFORE_LE);
case VCARD7816_STATUS_WARNING_INVALID_FILE_SELECTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_INVALID_FILE_SELECTED);
case VCARD7816_STATUS_WARNING_FCI_FORMAT_INVALID:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_FCI_FORMAT_INVALID);
case VCARD7816_STATUS_WARNING_CHANGE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_CHANGE);
case VCARD7816_STATUS_WARNING_FILE_FILLED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_FILE_FILLED);
case VCARD7816_STATUS_EXC_ERROR:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_EXC_ERROR);
case VCARD7816_STATUS_EXC_ERROR_CHANGE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_EXC_ERROR_CHANGE);
case VCARD7816_STATUS_EXC_ERROR_MEMORY_FAILURE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_EXC_ERROR_MEMORY_FAILURE);
case VCARD7816_STATUS_ERROR_WRONG_LENGTH:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_WRONG_LENGTH);
case VCARD7816_STATUS_ERROR_CLA_NOT_SUPPORTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_CLA_NOT_SUPPORTED);
case VCARD7816_STATUS_ERROR_CHANNEL_NOT_SUPPORTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_CHANNEL_NOT_SUPPORTED);
case VCARD7816_STATUS_ERROR_SECURE_NOT_SUPPORTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_SECURE_NOT_SUPPORTED);
case VCARD7816_STATUS_ERROR_COMMAND_NOT_SUPPORTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_COMMAND_NOT_SUPPORTED);
case VCARD7816_STATUS_ERROR_COMMAND_INCOMPATIBLE_WITH_FILE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_COMMAND_INCOMPATIBLE_WITH_FILE);
case VCARD7816_STATUS_ERROR_SECURITY_NOT_SATISFIED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_SECURITY_NOT_SATISFIED);
case VCARD7816_STATUS_ERROR_AUTHENTICATION_BLOCKED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_AUTHENTICATION_BLOCKED);
case VCARD7816_STATUS_ERROR_DATA_INVALID:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_DATA_INVALID);
case VCARD7816_STATUS_ERROR_CONDITION_NOT_SATISFIED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_CONDITION_NOT_SATISFIED);
case VCARD7816_STATUS_ERROR_DATA_NO_EF:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_DATA_NO_EF);
case VCARD7816_STATUS_ERROR_SM_OBJECT_MISSING:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_SM_OBJECT_MISSING);
case VCARD7816_STATUS_ERROR_SM_OBJECT_INCORRECT:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_SM_OBJECT_INCORRECT);
case VCARD7816_STATUS_ERROR_WRONG_PARAMETERS:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_WRONG_PARAMETERS);
case VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_IN_DATA:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_IN_DATA);
case VCARD7816_STATUS_ERROR_FUNCTION_NOT_SUPPORTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_FUNCTION_NOT_SUPPORTED);
case VCARD7816_STATUS_ERROR_FILE_NOT_FOUND:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_FILE_NOT_FOUND);
case VCARD7816_STATUS_ERROR_RECORD_NOT_FOUND:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_RECORD_NOT_FOUND);
case VCARD7816_STATUS_ERROR_NO_SPACE_FOR_FILE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_NO_SPACE_FOR_FILE);
case VCARD7816_STATUS_ERROR_LC_TLV_INCONSISTENT:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_LC_TLV_INCONSISTENT);
case VCARD7816_STATUS_ERROR_P1_P2_INCORRECT:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_P1_P2_INCORRECT);
case VCARD7816_STATUS_ERROR_LC_P1_P2_INCONSISTENT:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_LC_P1_P2_INCONSISTENT);
case VCARD7816_STATUS_ERROR_DATA_NOT_FOUND:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_DATA_NOT_FOUND);
case VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_2:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_2);
case VCARD7816_STATUS_ERROR_INS_CODE_INVALID:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_INS_CODE_INVALID);
case VCARD7816_STATUS_ERROR_CLA_INVALID:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_CLA_INVALID);
case VCARD7816_STATUS_ERROR_GENERAL:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_GENERAL);
default:
response = vcard_response_new_status(status);
if (response == NULL) {
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_EXC_ERROR_MEMORY_FAILURE);
}
}
assert(response);
return response;
}
| {
"code": [],
"line_no": []
} | VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_RET_CORUPT)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_BUF_END_BEFORE_LE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_INVALID_FILE_SELECTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_FCI_FORMAT_INVALID)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_CHANGE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_FILE_FILLED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_EXC_ERROR)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_EXC_ERROR_CHANGE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_EXC_ERROR_MEMORY_FAILURE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_WRONG_LENGTH)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_CLA_NOT_SUPPORTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_CHANNEL_NOT_SUPPORTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_SECURE_NOT_SUPPORTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_COMMAND_NOT_SUPPORTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(
VCARD7816_STATUS_ERROR_COMMAND_INCOMPATIBLE_WITH_FILE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_SECURITY_NOT_SATISFIED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_AUTHENTICATION_BLOCKED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_DATA_INVALID)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_CONDITION_NOT_SATISFIED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_DATA_NO_EF)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_SM_OBJECT_MISSING)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_SM_OBJECT_INCORRECT)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_WRONG_PARAMETERS)
VCARD_RESPONSE_NEW_STATIC_STATUS(
VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_IN_DATA)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_FUNCTION_NOT_SUPPORTED)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_FILE_NOT_FOUND)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_RECORD_NOT_FOUND)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_NO_SPACE_FOR_FILE)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_LC_TLV_INCONSISTENT)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_P1_P2_INCORRECT)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_LC_P1_P2_INCONSISTENT)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_DATA_NOT_FOUND)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_2)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_INS_CODE_INVALID)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_CLA_INVALID)
VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_ERROR_GENERAL)
VCardResponse *
vcard_make_response(vcard_7816_status_t status)
{
VCardResponse *response = NULL;
switch (status) {
case VCARD7816_STATUS_SUCCESS:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_SUCCESS);
case VCARD7816_STATUS_WARNING:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING);
case VCARD7816_STATUS_WARNING_RET_CORUPT:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_RET_CORUPT);
case VCARD7816_STATUS_WARNING_BUF_END_BEFORE_LE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_BUF_END_BEFORE_LE);
case VCARD7816_STATUS_WARNING_INVALID_FILE_SELECTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_INVALID_FILE_SELECTED);
case VCARD7816_STATUS_WARNING_FCI_FORMAT_INVALID:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_FCI_FORMAT_INVALID);
case VCARD7816_STATUS_WARNING_CHANGE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_CHANGE);
case VCARD7816_STATUS_WARNING_FILE_FILLED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_WARNING_FILE_FILLED);
case VCARD7816_STATUS_EXC_ERROR:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_EXC_ERROR);
case VCARD7816_STATUS_EXC_ERROR_CHANGE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_EXC_ERROR_CHANGE);
case VCARD7816_STATUS_EXC_ERROR_MEMORY_FAILURE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_EXC_ERROR_MEMORY_FAILURE);
case VCARD7816_STATUS_ERROR_WRONG_LENGTH:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_WRONG_LENGTH);
case VCARD7816_STATUS_ERROR_CLA_NOT_SUPPORTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_CLA_NOT_SUPPORTED);
case VCARD7816_STATUS_ERROR_CHANNEL_NOT_SUPPORTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_CHANNEL_NOT_SUPPORTED);
case VCARD7816_STATUS_ERROR_SECURE_NOT_SUPPORTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_SECURE_NOT_SUPPORTED);
case VCARD7816_STATUS_ERROR_COMMAND_NOT_SUPPORTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_COMMAND_NOT_SUPPORTED);
case VCARD7816_STATUS_ERROR_COMMAND_INCOMPATIBLE_WITH_FILE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_COMMAND_INCOMPATIBLE_WITH_FILE);
case VCARD7816_STATUS_ERROR_SECURITY_NOT_SATISFIED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_SECURITY_NOT_SATISFIED);
case VCARD7816_STATUS_ERROR_AUTHENTICATION_BLOCKED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_AUTHENTICATION_BLOCKED);
case VCARD7816_STATUS_ERROR_DATA_INVALID:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_DATA_INVALID);
case VCARD7816_STATUS_ERROR_CONDITION_NOT_SATISFIED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_CONDITION_NOT_SATISFIED);
case VCARD7816_STATUS_ERROR_DATA_NO_EF:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_DATA_NO_EF);
case VCARD7816_STATUS_ERROR_SM_OBJECT_MISSING:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_SM_OBJECT_MISSING);
case VCARD7816_STATUS_ERROR_SM_OBJECT_INCORRECT:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_SM_OBJECT_INCORRECT);
case VCARD7816_STATUS_ERROR_WRONG_PARAMETERS:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_WRONG_PARAMETERS);
case VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_IN_DATA:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_IN_DATA);
case VCARD7816_STATUS_ERROR_FUNCTION_NOT_SUPPORTED:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_FUNCTION_NOT_SUPPORTED);
case VCARD7816_STATUS_ERROR_FILE_NOT_FOUND:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_FILE_NOT_FOUND);
case VCARD7816_STATUS_ERROR_RECORD_NOT_FOUND:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_RECORD_NOT_FOUND);
case VCARD7816_STATUS_ERROR_NO_SPACE_FOR_FILE:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_NO_SPACE_FOR_FILE);
case VCARD7816_STATUS_ERROR_LC_TLV_INCONSISTENT:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_LC_TLV_INCONSISTENT);
case VCARD7816_STATUS_ERROR_P1_P2_INCORRECT:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_P1_P2_INCORRECT);
case VCARD7816_STATUS_ERROR_LC_P1_P2_INCONSISTENT:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_LC_P1_P2_INCONSISTENT);
case VCARD7816_STATUS_ERROR_DATA_NOT_FOUND:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_DATA_NOT_FOUND);
case VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_2:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_WRONG_PARAMETERS_2);
case VCARD7816_STATUS_ERROR_INS_CODE_INVALID:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_INS_CODE_INVALID);
case VCARD7816_STATUS_ERROR_CLA_INVALID:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_CLA_INVALID);
case VCARD7816_STATUS_ERROR_GENERAL:
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_ERROR_GENERAL);
default:
response = vcard_response_new_status(status);
if (response == NULL) {
return VCARD_RESPONSE_GET_STATIC(
VCARD7816_STATUS_EXC_ERROR_MEMORY_FAILURE);
}
}
assert(response);
return response;
}
| [
"VCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING)\nVCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_RET_CORUPT)\nVCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_BUF_END_BEFORE_LE)\nVCARD_RESPONSE_NEW_STATIC_STATUS(VCARD7816_STATUS_WARNING_INVALID_FILE_SELECTED)\nVCARD_RESPONSE_NEW_STATI... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7,
9,
11,
13,
15,
17,
19,
21,
23,
25,
27,
29,
31,
33,
35,
37,
39,
41,
43,
45,
47,
49,
51,
53,
55,
57,
59,
61,
63,
65,
67,
69,
71,
73,
75,
77,
89,... |
13,174 | static inline void dv_decode_video_segment(DVVideoContext *s,
uint8_t *buf_ptr1,
const uint16_t *mb_pos_ptr)
{
int quant, dc, dct_mode, class1, j;
int mb_index, mb_x, mb_y, v, last_index;
DCTELEM *block, *block1;
int c_offset;
uint8_t *y_ptr;
void (*idct_put)(uint8_t *dest, int line_size, DCTELEM *block);
uint8_t *buf_ptr;
PutBitContext pb, vs_pb;
GetBitContext gb;
BlockInfo mb_data[5 * 6], *mb, *mb1;
DCTELEM sblock[5*6][64] __align8;
uint8_t mb_bit_buffer[80 + 4]; /* allow some slack */
uint8_t vs_bit_buffer[5 * 80 + 4]; /* allow some slack */
memset(sblock, 0, sizeof(sblock));
/* pass 1 : read DC and AC coefficients in blocks */
buf_ptr = buf_ptr1;
block1 = &sblock[0][0];
mb1 = mb_data;
init_put_bits(&vs_pb, vs_bit_buffer, 5 * 80);
for(mb_index = 0; mb_index < 5; mb_index++, mb1 += 6, block1 += 6 * 64) {
/* skip header */
quant = buf_ptr[3] & 0x0f;
buf_ptr += 4;
init_put_bits(&pb, mb_bit_buffer, 80);
mb = mb1;
block = block1;
for(j = 0;j < 6; j++) {
last_index = block_sizes[j];
init_get_bits(&gb, buf_ptr, last_index);
/* get the dc */
dc = get_sbits(&gb, 9);
dct_mode = get_bits1(&gb);
mb->dct_mode = dct_mode;
mb->scan_table = s->dv_zigzag[dct_mode];
class1 = get_bits(&gb, 2);
mb->shift_table = s->dv_idct_shift[class1 == 3][dct_mode]
[quant + dv_quant_offset[class1]];
dc = dc << 2;
/* convert to unsigned because 128 is not added in the
standard IDCT */
dc += 1024;
block[0] = dc;
buf_ptr += last_index >> 3;
mb->pos = 0;
mb->partial_bit_count = 0;
#ifdef VLC_DEBUG
printf("MB block: %d, %d ", mb_index, j);
#endif
dv_decode_ac(&gb, mb, block);
/* write the remaining bits in a new buffer only if the
block is finished */
if (mb->pos >= 64)
bit_copy(&pb, &gb);
block += 64;
mb++;
}
/* pass 2 : we can do it just after */
#ifdef VLC_DEBUG
printf("***pass 2 size=%d MB#=%d\n", put_bits_count(&pb), mb_index);
#endif
block = block1;
mb = mb1;
init_get_bits(&gb, mb_bit_buffer, put_bits_count(&pb));
flush_put_bits(&pb);
for(j = 0;j < 6; j++, block += 64, mb++) {
if (mb->pos < 64 && get_bits_left(&gb) > 0) {
dv_decode_ac(&gb, mb, block);
/* if still not finished, no need to parse other blocks */
if (mb->pos < 64)
break;
}
}
/* all blocks are finished, so the extra bytes can be used at
the video segment level */
if (j >= 6)
bit_copy(&vs_pb, &gb);
}
/* we need a pass other the whole video segment */
#ifdef VLC_DEBUG
printf("***pass 3 size=%d\n", put_bits_count(&vs_pb));
#endif
block = &sblock[0][0];
mb = mb_data;
init_get_bits(&gb, vs_bit_buffer, put_bits_count(&vs_pb));
flush_put_bits(&vs_pb);
for(mb_index = 0; mb_index < 5; mb_index++) {
for(j = 0;j < 6; j++) {
if (mb->pos < 64) {
#ifdef VLC_DEBUG
printf("start %d:%d\n", mb_index, j);
#endif
dv_decode_ac(&gb, mb, block);
}
if (mb->pos >= 64 && mb->pos < 127)
av_log(NULL, AV_LOG_ERROR, "AC EOB marker is absent pos=%d\n", mb->pos);
block += 64;
mb++;
}
}
/* compute idct and place blocks */
block = &sblock[0][0];
mb = mb_data;
for(mb_index = 0; mb_index < 5; mb_index++) {
v = *mb_pos_ptr++;
mb_x = v & 0xff;
mb_y = v >> 8;
y_ptr = s->picture.data[0] + (mb_y * s->picture.linesize[0] * 8) + (mb_x * 8);
if (s->sys->pix_fmt == PIX_FMT_YUV411P)
c_offset = (mb_y * s->picture.linesize[1] * 8) + ((mb_x >> 2) * 8);
else
c_offset = ((mb_y >> 1) * s->picture.linesize[1] * 8) + ((mb_x >> 1) * 8);
for(j = 0;j < 6; j++) {
idct_put = s->idct_put[mb->dct_mode];
if (j < 4) {
if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x < (704 / 8)) {
/* NOTE: at end of line, the macroblock is handled as 420 */
idct_put(y_ptr + (j * 8), s->picture.linesize[0], block);
} else {
idct_put(y_ptr + ((j & 1) * 8) + ((j >> 1) * 8 * s->picture.linesize[0]),
s->picture.linesize[0], block);
}
} else {
if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) {
uint64_t aligned_pixels[64/8];
uint8_t *pixels= (uint8_t*)aligned_pixels;
uint8_t *c_ptr, *c_ptr1, *ptr;
int y, linesize;
/* NOTE: at end of line, the macroblock is handled as 420 */
idct_put(pixels, 8, block);
linesize = s->picture.linesize[6 - j];
c_ptr = s->picture.data[6 - j] + c_offset;
ptr = pixels;
for(y = 0;y < 8; y++) {
/* convert to 411P */
c_ptr1 = c_ptr + 8*linesize;
c_ptr[0]= ptr[0]; c_ptr1[0]= ptr[4];
c_ptr[1]= ptr[1]; c_ptr1[1]= ptr[5];
c_ptr[2]= ptr[2]; c_ptr1[2]= ptr[6];
c_ptr[3]= ptr[3]; c_ptr1[3]= ptr[7];
c_ptr += linesize;
ptr += 8;
}
} else {
/* don't ask me why they inverted Cb and Cr ! */
idct_put(s->picture.data[6 - j] + c_offset,
s->picture.linesize[6 - j], block);
}
}
block += 64;
mb++;
}
}
}
| false | FFmpeg | c619ff6daf93a8f3c03decf2d3345d2474c3db91 | static inline void dv_decode_video_segment(DVVideoContext *s,
uint8_t *buf_ptr1,
const uint16_t *mb_pos_ptr)
{
int quant, dc, dct_mode, class1, j;
int mb_index, mb_x, mb_y, v, last_index;
DCTELEM *block, *block1;
int c_offset;
uint8_t *y_ptr;
void (*idct_put)(uint8_t *dest, int line_size, DCTELEM *block);
uint8_t *buf_ptr;
PutBitContext pb, vs_pb;
GetBitContext gb;
BlockInfo mb_data[5 * 6], *mb, *mb1;
DCTELEM sblock[5*6][64] __align8;
uint8_t mb_bit_buffer[80 + 4];
uint8_t vs_bit_buffer[5 * 80 + 4];
memset(sblock, 0, sizeof(sblock));
buf_ptr = buf_ptr1;
block1 = &sblock[0][0];
mb1 = mb_data;
init_put_bits(&vs_pb, vs_bit_buffer, 5 * 80);
for(mb_index = 0; mb_index < 5; mb_index++, mb1 += 6, block1 += 6 * 64) {
quant = buf_ptr[3] & 0x0f;
buf_ptr += 4;
init_put_bits(&pb, mb_bit_buffer, 80);
mb = mb1;
block = block1;
for(j = 0;j < 6; j++) {
last_index = block_sizes[j];
init_get_bits(&gb, buf_ptr, last_index);
dc = get_sbits(&gb, 9);
dct_mode = get_bits1(&gb);
mb->dct_mode = dct_mode;
mb->scan_table = s->dv_zigzag[dct_mode];
class1 = get_bits(&gb, 2);
mb->shift_table = s->dv_idct_shift[class1 == 3][dct_mode]
[quant + dv_quant_offset[class1]];
dc = dc << 2;
dc += 1024;
block[0] = dc;
buf_ptr += last_index >> 3;
mb->pos = 0;
mb->partial_bit_count = 0;
#ifdef VLC_DEBUG
printf("MB block: %d, %d ", mb_index, j);
#endif
dv_decode_ac(&gb, mb, block);
if (mb->pos >= 64)
bit_copy(&pb, &gb);
block += 64;
mb++;
}
#ifdef VLC_DEBUG
printf("***pass 2 size=%d MB#=%d\n", put_bits_count(&pb), mb_index);
#endif
block = block1;
mb = mb1;
init_get_bits(&gb, mb_bit_buffer, put_bits_count(&pb));
flush_put_bits(&pb);
for(j = 0;j < 6; j++, block += 64, mb++) {
if (mb->pos < 64 && get_bits_left(&gb) > 0) {
dv_decode_ac(&gb, mb, block);
if (mb->pos < 64)
break;
}
}
if (j >= 6)
bit_copy(&vs_pb, &gb);
}
#ifdef VLC_DEBUG
printf("***pass 3 size=%d\n", put_bits_count(&vs_pb));
#endif
block = &sblock[0][0];
mb = mb_data;
init_get_bits(&gb, vs_bit_buffer, put_bits_count(&vs_pb));
flush_put_bits(&vs_pb);
for(mb_index = 0; mb_index < 5; mb_index++) {
for(j = 0;j < 6; j++) {
if (mb->pos < 64) {
#ifdef VLC_DEBUG
printf("start %d:%d\n", mb_index, j);
#endif
dv_decode_ac(&gb, mb, block);
}
if (mb->pos >= 64 && mb->pos < 127)
av_log(NULL, AV_LOG_ERROR, "AC EOB marker is absent pos=%d\n", mb->pos);
block += 64;
mb++;
}
}
block = &sblock[0][0];
mb = mb_data;
for(mb_index = 0; mb_index < 5; mb_index++) {
v = *mb_pos_ptr++;
mb_x = v & 0xff;
mb_y = v >> 8;
y_ptr = s->picture.data[0] + (mb_y * s->picture.linesize[0] * 8) + (mb_x * 8);
if (s->sys->pix_fmt == PIX_FMT_YUV411P)
c_offset = (mb_y * s->picture.linesize[1] * 8) + ((mb_x >> 2) * 8);
else
c_offset = ((mb_y >> 1) * s->picture.linesize[1] * 8) + ((mb_x >> 1) * 8);
for(j = 0;j < 6; j++) {
idct_put = s->idct_put[mb->dct_mode];
if (j < 4) {
if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x < (704 / 8)) {
idct_put(y_ptr + (j * 8), s->picture.linesize[0], block);
} else {
idct_put(y_ptr + ((j & 1) * 8) + ((j >> 1) * 8 * s->picture.linesize[0]),
s->picture.linesize[0], block);
}
} else {
if (s->sys->pix_fmt == PIX_FMT_YUV411P && mb_x >= (704 / 8)) {
uint64_t aligned_pixels[64/8];
uint8_t *pixels= (uint8_t*)aligned_pixels;
uint8_t *c_ptr, *c_ptr1, *ptr;
int y, linesize;
idct_put(pixels, 8, block);
linesize = s->picture.linesize[6 - j];
c_ptr = s->picture.data[6 - j] + c_offset;
ptr = pixels;
for(y = 0;y < 8; y++) {
c_ptr1 = c_ptr + 8*linesize;
c_ptr[0]= ptr[0]; c_ptr1[0]= ptr[4];
c_ptr[1]= ptr[1]; c_ptr1[1]= ptr[5];
c_ptr[2]= ptr[2]; c_ptr1[2]= ptr[6];
c_ptr[3]= ptr[3]; c_ptr1[3]= ptr[7];
c_ptr += linesize;
ptr += 8;
}
} else {
idct_put(s->picture.data[6 - j] + c_offset,
s->picture.linesize[6 - j], block);
}
}
block += 64;
mb++;
}
}
}
| {
"code": [],
"line_no": []
} | static inline void FUNC_0(DVVideoContext *VAR_0,
uint8_t *VAR_1,
const uint16_t *VAR_2)
{
int VAR_3, VAR_4, VAR_5, VAR_6, VAR_7;
int VAR_8, VAR_9, VAR_10, VAR_11, VAR_12;
DCTELEM *VAR_17, *block1;
int VAR_13;
uint8_t *y_ptr;
void (*VAR_14)(uint8_t *VAR_15, int VAR_16, DCTELEM *VAR_17);
uint8_t *buf_ptr;
PutBitContext pb, vs_pb;
GetBitContext gb;
BlockInfo mb_data[5 * 6], *mb, *mb1;
DCTELEM sblock[5*6][64] __align8;
uint8_t mb_bit_buffer[80 + 4];
uint8_t vs_bit_buffer[5 * 80 + 4];
memset(sblock, 0, sizeof(sblock));
buf_ptr = VAR_1;
block1 = &sblock[0][0];
mb1 = mb_data;
init_put_bits(&vs_pb, vs_bit_buffer, 5 * 80);
for(VAR_8 = 0; VAR_8 < 5; VAR_8++, mb1 += 6, block1 += 6 * 64) {
VAR_3 = buf_ptr[3] & 0x0f;
buf_ptr += 4;
init_put_bits(&pb, mb_bit_buffer, 80);
mb = mb1;
VAR_17 = block1;
for(VAR_7 = 0;VAR_7 < 6; VAR_7++) {
VAR_12 = block_sizes[VAR_7];
init_get_bits(&gb, buf_ptr, VAR_12);
VAR_4 = get_sbits(&gb, 9);
VAR_5 = get_bits1(&gb);
mb->VAR_5 = VAR_5;
mb->scan_table = VAR_0->dv_zigzag[VAR_5];
VAR_6 = get_bits(&gb, 2);
mb->shift_table = VAR_0->dv_idct_shift[VAR_6 == 3][VAR_5]
[VAR_3 + dv_quant_offset[VAR_6]];
VAR_4 = VAR_4 << 2;
VAR_4 += 1024;
VAR_17[0] = VAR_4;
buf_ptr += VAR_12 >> 3;
mb->pos = 0;
mb->partial_bit_count = 0;
#ifdef VLC_DEBUG
printf("MB VAR_17: %d, %d ", VAR_8, VAR_7);
#endif
dv_decode_ac(&gb, mb, VAR_17);
if (mb->pos >= 64)
bit_copy(&pb, &gb);
VAR_17 += 64;
mb++;
}
#ifdef VLC_DEBUG
printf("***pass 2 size=%d MB#=%d\n", put_bits_count(&pb), VAR_8);
#endif
VAR_17 = block1;
mb = mb1;
init_get_bits(&gb, mb_bit_buffer, put_bits_count(&pb));
flush_put_bits(&pb);
for(VAR_7 = 0;VAR_7 < 6; VAR_7++, VAR_17 += 64, mb++) {
if (mb->pos < 64 && get_bits_left(&gb) > 0) {
dv_decode_ac(&gb, mb, VAR_17);
if (mb->pos < 64)
break;
}
}
if (VAR_7 >= 6)
bit_copy(&vs_pb, &gb);
}
#ifdef VLC_DEBUG
printf("***pass 3 size=%d\n", put_bits_count(&vs_pb));
#endif
VAR_17 = &sblock[0][0];
mb = mb_data;
init_get_bits(&gb, vs_bit_buffer, put_bits_count(&vs_pb));
flush_put_bits(&vs_pb);
for(VAR_8 = 0; VAR_8 < 5; VAR_8++) {
for(VAR_7 = 0;VAR_7 < 6; VAR_7++) {
if (mb->pos < 64) {
#ifdef VLC_DEBUG
printf("start %d:%d\n", VAR_8, VAR_7);
#endif
dv_decode_ac(&gb, mb, VAR_17);
}
if (mb->pos >= 64 && mb->pos < 127)
av_log(NULL, AV_LOG_ERROR, "AC EOB marker is absent pos=%d\n", mb->pos);
VAR_17 += 64;
mb++;
}
}
VAR_17 = &sblock[0][0];
mb = mb_data;
for(VAR_8 = 0; VAR_8 < 5; VAR_8++) {
VAR_11 = *VAR_2++;
VAR_9 = VAR_11 & 0xff;
VAR_10 = VAR_11 >> 8;
y_ptr = VAR_0->picture.data[0] + (VAR_10 * VAR_0->picture.VAR_19[0] * 8) + (VAR_9 * 8);
if (VAR_0->sys->pix_fmt == PIX_FMT_YUV411P)
VAR_13 = (VAR_10 * VAR_0->picture.VAR_19[1] * 8) + ((VAR_9 >> 2) * 8);
else
VAR_13 = ((VAR_10 >> 1) * VAR_0->picture.VAR_19[1] * 8) + ((VAR_9 >> 1) * 8);
for(VAR_7 = 0;VAR_7 < 6; VAR_7++) {
VAR_14 = VAR_0->VAR_14[mb->VAR_5];
if (VAR_7 < 4) {
if (VAR_0->sys->pix_fmt == PIX_FMT_YUV411P && VAR_9 < (704 / 8)) {
VAR_14(y_ptr + (VAR_7 * 8), VAR_0->picture.VAR_19[0], VAR_17);
} else {
VAR_14(y_ptr + ((VAR_7 & 1) * 8) + ((VAR_7 >> 1) * 8 * VAR_0->picture.VAR_19[0]),
VAR_0->picture.VAR_19[0], VAR_17);
}
} else {
if (VAR_0->sys->pix_fmt == PIX_FMT_YUV411P && VAR_9 >= (704 / 8)) {
uint64_t aligned_pixels[64/8];
uint8_t *pixels= (uint8_t*)aligned_pixels;
uint8_t *c_ptr, *c_ptr1, *ptr;
int VAR_18, VAR_19;
VAR_14(pixels, 8, VAR_17);
VAR_19 = VAR_0->picture.VAR_19[6 - VAR_7];
c_ptr = VAR_0->picture.data[6 - VAR_7] + VAR_13;
ptr = pixels;
for(VAR_18 = 0;VAR_18 < 8; VAR_18++) {
c_ptr1 = c_ptr + 8*VAR_19;
c_ptr[0]= ptr[0]; c_ptr1[0]= ptr[4];
c_ptr[1]= ptr[1]; c_ptr1[1]= ptr[5];
c_ptr[2]= ptr[2]; c_ptr1[2]= ptr[6];
c_ptr[3]= ptr[3]; c_ptr1[3]= ptr[7];
c_ptr += VAR_19;
ptr += 8;
}
} else {
VAR_14(VAR_0->picture.data[6 - VAR_7] + VAR_13,
VAR_0->picture.VAR_19[6 - VAR_7], VAR_17);
}
}
VAR_17 += 64;
mb++;
}
}
}
| [
"static inline void FUNC_0(DVVideoContext *VAR_0,\nuint8_t *VAR_1,\nconst uint16_t *VAR_2)\n{",
"int VAR_3, VAR_4, VAR_5, VAR_6, VAR_7;",
"int VAR_8, VAR_9, VAR_10, VAR_11, VAR_12;",
"DCTELEM *VAR_17, *block1;",
"int VAR_13;",
"uint8_t *y_ptr;",
"void (*VAR_14)(uint8_t *VAR_15, int VAR_16, DCTELEM *VAR_... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
[
1,
3,
5,
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
37
],
[
43
],
[
45
],
[
47
],
[
49
... |
13,175 | BlockDeviceInfo *bdrv_block_device_info(BlockDriverState *bs, Error **errp)
{
ImageInfo **p_image_info;
BlockDriverState *bs0;
BlockDeviceInfo *info = g_malloc0(sizeof(*info));
info->file = g_strdup(bs->filename);
info->ro = bs->read_only;
info->drv = g_strdup(bs->drv->format_name);
info->encrypted = bs->encrypted;
info->encryption_key_missing = bdrv_key_required(bs);
info->cache = g_new(BlockdevCacheInfo, 1);
*info->cache = (BlockdevCacheInfo) {
.writeback = bdrv_enable_write_cache(bs),
.direct = !!(bs->open_flags & BDRV_O_NOCACHE),
.no_flush = !!(bs->open_flags & BDRV_O_NO_FLUSH),
};
if (bs->node_name[0]) {
info->has_node_name = true;
info->node_name = g_strdup(bs->node_name);
}
if (bs->backing_file[0]) {
info->has_backing_file = true;
info->backing_file = g_strdup(bs->backing_file);
}
info->backing_file_depth = bdrv_get_backing_file_depth(bs);
info->detect_zeroes = bs->detect_zeroes;
if (bs->io_limits_enabled) {
ThrottleConfig cfg;
throttle_group_get_config(bs, &cfg);
info->bps = cfg.buckets[THROTTLE_BPS_TOTAL].avg;
info->bps_rd = cfg.buckets[THROTTLE_BPS_READ].avg;
info->bps_wr = cfg.buckets[THROTTLE_BPS_WRITE].avg;
info->iops = cfg.buckets[THROTTLE_OPS_TOTAL].avg;
info->iops_rd = cfg.buckets[THROTTLE_OPS_READ].avg;
info->iops_wr = cfg.buckets[THROTTLE_OPS_WRITE].avg;
info->has_bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
info->bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
info->has_bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
info->bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
info->has_bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
info->bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
info->has_iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
info->iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
info->has_iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
info->iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
info->has_iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
info->iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
info->has_iops_size = cfg.op_size;
info->iops_size = cfg.op_size;
info->has_group = true;
info->group = g_strdup(throttle_group_get_name(bs));
}
info->write_threshold = bdrv_write_threshold_get(bs);
bs0 = bs;
p_image_info = &info->image;
while (1) {
Error *local_err = NULL;
bdrv_query_image_info(bs0, p_image_info, &local_err);
if (local_err) {
error_propagate(errp, local_err);
qapi_free_BlockDeviceInfo(info);
return NULL;
}
if (bs0->drv && bs0->backing) {
bs0 = bs0->backing->bs;
(*p_image_info)->has_backing_image = true;
p_image_info = &((*p_image_info)->backing_image);
} else {
break;
}
}
return info;
}
| false | qemu | a0d64a61db602696f4f1895a890c65eda5b3b618 | BlockDeviceInfo *bdrv_block_device_info(BlockDriverState *bs, Error **errp)
{
ImageInfo **p_image_info;
BlockDriverState *bs0;
BlockDeviceInfo *info = g_malloc0(sizeof(*info));
info->file = g_strdup(bs->filename);
info->ro = bs->read_only;
info->drv = g_strdup(bs->drv->format_name);
info->encrypted = bs->encrypted;
info->encryption_key_missing = bdrv_key_required(bs);
info->cache = g_new(BlockdevCacheInfo, 1);
*info->cache = (BlockdevCacheInfo) {
.writeback = bdrv_enable_write_cache(bs),
.direct = !!(bs->open_flags & BDRV_O_NOCACHE),
.no_flush = !!(bs->open_flags & BDRV_O_NO_FLUSH),
};
if (bs->node_name[0]) {
info->has_node_name = true;
info->node_name = g_strdup(bs->node_name);
}
if (bs->backing_file[0]) {
info->has_backing_file = true;
info->backing_file = g_strdup(bs->backing_file);
}
info->backing_file_depth = bdrv_get_backing_file_depth(bs);
info->detect_zeroes = bs->detect_zeroes;
if (bs->io_limits_enabled) {
ThrottleConfig cfg;
throttle_group_get_config(bs, &cfg);
info->bps = cfg.buckets[THROTTLE_BPS_TOTAL].avg;
info->bps_rd = cfg.buckets[THROTTLE_BPS_READ].avg;
info->bps_wr = cfg.buckets[THROTTLE_BPS_WRITE].avg;
info->iops = cfg.buckets[THROTTLE_OPS_TOTAL].avg;
info->iops_rd = cfg.buckets[THROTTLE_OPS_READ].avg;
info->iops_wr = cfg.buckets[THROTTLE_OPS_WRITE].avg;
info->has_bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
info->bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
info->has_bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
info->bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
info->has_bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
info->bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
info->has_iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
info->iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
info->has_iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
info->iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
info->has_iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
info->iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
info->has_iops_size = cfg.op_size;
info->iops_size = cfg.op_size;
info->has_group = true;
info->group = g_strdup(throttle_group_get_name(bs));
}
info->write_threshold = bdrv_write_threshold_get(bs);
bs0 = bs;
p_image_info = &info->image;
while (1) {
Error *local_err = NULL;
bdrv_query_image_info(bs0, p_image_info, &local_err);
if (local_err) {
error_propagate(errp, local_err);
qapi_free_BlockDeviceInfo(info);
return NULL;
}
if (bs0->drv && bs0->backing) {
bs0 = bs0->backing->bs;
(*p_image_info)->has_backing_image = true;
p_image_info = &((*p_image_info)->backing_image);
} else {
break;
}
}
return info;
}
| {
"code": [],
"line_no": []
} | BlockDeviceInfo *FUNC_0(BlockDriverState *bs, Error **errp)
{
ImageInfo **p_image_info;
BlockDriverState *bs0;
BlockDeviceInfo *info = g_malloc0(sizeof(*info));
info->file = g_strdup(bs->filename);
info->ro = bs->read_only;
info->drv = g_strdup(bs->drv->format_name);
info->encrypted = bs->encrypted;
info->encryption_key_missing = bdrv_key_required(bs);
info->cache = g_new(BlockdevCacheInfo, 1);
*info->cache = (BlockdevCacheInfo) {
.writeback = bdrv_enable_write_cache(bs),
.direct = !!(bs->open_flags & BDRV_O_NOCACHE),
.no_flush = !!(bs->open_flags & BDRV_O_NO_FLUSH),
};
if (bs->node_name[0]) {
info->has_node_name = true;
info->node_name = g_strdup(bs->node_name);
}
if (bs->backing_file[0]) {
info->has_backing_file = true;
info->backing_file = g_strdup(bs->backing_file);
}
info->backing_file_depth = bdrv_get_backing_file_depth(bs);
info->detect_zeroes = bs->detect_zeroes;
if (bs->io_limits_enabled) {
ThrottleConfig cfg;
throttle_group_get_config(bs, &cfg);
info->bps = cfg.buckets[THROTTLE_BPS_TOTAL].avg;
info->bps_rd = cfg.buckets[THROTTLE_BPS_READ].avg;
info->bps_wr = cfg.buckets[THROTTLE_BPS_WRITE].avg;
info->iops = cfg.buckets[THROTTLE_OPS_TOTAL].avg;
info->iops_rd = cfg.buckets[THROTTLE_OPS_READ].avg;
info->iops_wr = cfg.buckets[THROTTLE_OPS_WRITE].avg;
info->has_bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
info->bps_max = cfg.buckets[THROTTLE_BPS_TOTAL].max;
info->has_bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
info->bps_rd_max = cfg.buckets[THROTTLE_BPS_READ].max;
info->has_bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
info->bps_wr_max = cfg.buckets[THROTTLE_BPS_WRITE].max;
info->has_iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
info->iops_max = cfg.buckets[THROTTLE_OPS_TOTAL].max;
info->has_iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
info->iops_rd_max = cfg.buckets[THROTTLE_OPS_READ].max;
info->has_iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
info->iops_wr_max = cfg.buckets[THROTTLE_OPS_WRITE].max;
info->has_iops_size = cfg.op_size;
info->iops_size = cfg.op_size;
info->has_group = true;
info->group = g_strdup(throttle_group_get_name(bs));
}
info->write_threshold = bdrv_write_threshold_get(bs);
bs0 = bs;
p_image_info = &info->image;
while (1) {
Error *local_err = NULL;
bdrv_query_image_info(bs0, p_image_info, &local_err);
if (local_err) {
error_propagate(errp, local_err);
qapi_free_BlockDeviceInfo(info);
return NULL;
}
if (bs0->drv && bs0->backing) {
bs0 = bs0->backing->bs;
(*p_image_info)->has_backing_image = true;
p_image_info = &((*p_image_info)->backing_image);
} else {
break;
}
}
return info;
}
| [
"BlockDeviceInfo *FUNC_0(BlockDriverState *bs, Error **errp)\n{",
"ImageInfo **p_image_info;",
"BlockDriverState *bs0;",
"BlockDeviceInfo *info = g_malloc0(sizeof(*info));",
"info->file = g_strdup(bs->filename);",
"info->ro = bs->read_only;",
"info->drv ... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
25
],
[
27
],
[
29,
31,
33,
35
],
[
39
],
[
41
],
[
43
],
[
45
],
[
49
],
[
51
],
[... |
13,177 | void tcg_set_frame(TCGContext *s, int reg, intptr_t start, intptr_t size)
{
s->frame_start = start;
s->frame_end = start + size;
s->frame_reg = reg;
}
| false | qemu | b3a62939561e07bc34493444fa926b6137cba4e8 | void tcg_set_frame(TCGContext *s, int reg, intptr_t start, intptr_t size)
{
s->frame_start = start;
s->frame_end = start + size;
s->frame_reg = reg;
}
| {
"code": [],
"line_no": []
} | void FUNC_0(TCGContext *VAR_0, int VAR_1, intptr_t VAR_2, intptr_t VAR_3)
{
VAR_0->frame_start = VAR_2;
VAR_0->frame_end = VAR_2 + VAR_3;
VAR_0->frame_reg = VAR_1;
}
| [
"void FUNC_0(TCGContext *VAR_0, int VAR_1, intptr_t VAR_2, intptr_t VAR_3)\n{",
"VAR_0->frame_start = VAR_2;",
"VAR_0->frame_end = VAR_2 + VAR_3;",
"VAR_0->frame_reg = VAR_1;",
"}"
] | [
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
]
] |
13,178 | static int target_to_host_fcntl_cmd(int cmd)
{
switch(cmd) {
case TARGET_F_DUPFD:
case TARGET_F_GETFD:
case TARGET_F_SETFD:
case TARGET_F_GETFL:
case TARGET_F_SETFL:
return cmd;
case TARGET_F_GETLK:
return F_GETLK;
case TARGET_F_SETLK:
return F_SETLK;
case TARGET_F_SETLKW:
return F_SETLKW;
case TARGET_F_GETOWN:
return F_GETOWN;
case TARGET_F_SETOWN:
return F_SETOWN;
case TARGET_F_GETSIG:
return F_GETSIG;
case TARGET_F_SETSIG:
return F_SETSIG;
#if TARGET_ABI_BITS == 32
case TARGET_F_GETLK64:
return F_GETLK64;
case TARGET_F_SETLK64:
return F_SETLK64;
case TARGET_F_SETLKW64:
return F_SETLKW64;
#endif
case TARGET_F_SETLEASE:
return F_SETLEASE;
case TARGET_F_GETLEASE:
return F_GETLEASE;
#ifdef F_DUPFD_CLOEXEC
case TARGET_F_DUPFD_CLOEXEC:
return F_DUPFD_CLOEXEC;
#endif
case TARGET_F_NOTIFY:
return F_NOTIFY;
#ifdef F_GETOWN_EX
case TARGET_F_GETOWN_EX:
return F_GETOWN_EX;
#endif
#ifdef F_SETOWN_EX
case TARGET_F_SETOWN_EX:
return F_SETOWN_EX;
#endif
default:
return -TARGET_EINVAL;
}
return -TARGET_EINVAL;
}
| false | qemu | 213d3e9ea27f7fc55db7272c05255294b52ed3e4 | static int target_to_host_fcntl_cmd(int cmd)
{
switch(cmd) {
case TARGET_F_DUPFD:
case TARGET_F_GETFD:
case TARGET_F_SETFD:
case TARGET_F_GETFL:
case TARGET_F_SETFL:
return cmd;
case TARGET_F_GETLK:
return F_GETLK;
case TARGET_F_SETLK:
return F_SETLK;
case TARGET_F_SETLKW:
return F_SETLKW;
case TARGET_F_GETOWN:
return F_GETOWN;
case TARGET_F_SETOWN:
return F_SETOWN;
case TARGET_F_GETSIG:
return F_GETSIG;
case TARGET_F_SETSIG:
return F_SETSIG;
#if TARGET_ABI_BITS == 32
case TARGET_F_GETLK64:
return F_GETLK64;
case TARGET_F_SETLK64:
return F_SETLK64;
case TARGET_F_SETLKW64:
return F_SETLKW64;
#endif
case TARGET_F_SETLEASE:
return F_SETLEASE;
case TARGET_F_GETLEASE:
return F_GETLEASE;
#ifdef F_DUPFD_CLOEXEC
case TARGET_F_DUPFD_CLOEXEC:
return F_DUPFD_CLOEXEC;
#endif
case TARGET_F_NOTIFY:
return F_NOTIFY;
#ifdef F_GETOWN_EX
case TARGET_F_GETOWN_EX:
return F_GETOWN_EX;
#endif
#ifdef F_SETOWN_EX
case TARGET_F_SETOWN_EX:
return F_SETOWN_EX;
#endif
default:
return -TARGET_EINVAL;
}
return -TARGET_EINVAL;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(int VAR_0)
{
switch(VAR_0) {
case TARGET_F_DUPFD:
case TARGET_F_GETFD:
case TARGET_F_SETFD:
case TARGET_F_GETFL:
case TARGET_F_SETFL:
return VAR_0;
case TARGET_F_GETLK:
return F_GETLK;
case TARGET_F_SETLK:
return F_SETLK;
case TARGET_F_SETLKW:
return F_SETLKW;
case TARGET_F_GETOWN:
return F_GETOWN;
case TARGET_F_SETOWN:
return F_SETOWN;
case TARGET_F_GETSIG:
return F_GETSIG;
case TARGET_F_SETSIG:
return F_SETSIG;
#if TARGET_ABI_BITS == 32
case TARGET_F_GETLK64:
return F_GETLK64;
case TARGET_F_SETLK64:
return F_SETLK64;
case TARGET_F_SETLKW64:
return F_SETLKW64;
#endif
case TARGET_F_SETLEASE:
return F_SETLEASE;
case TARGET_F_GETLEASE:
return F_GETLEASE;
#ifdef F_DUPFD_CLOEXEC
case TARGET_F_DUPFD_CLOEXEC:
return F_DUPFD_CLOEXEC;
#endif
case TARGET_F_NOTIFY:
return F_NOTIFY;
#ifdef F_GETOWN_EX
case TARGET_F_GETOWN_EX:
return F_GETOWN_EX;
#endif
#ifdef F_SETOWN_EX
case TARGET_F_SETOWN_EX:
return F_SETOWN_EX;
#endif
default:
return -TARGET_EINVAL;
}
return -TARGET_EINVAL;
}
| [
"static int FUNC_0(int VAR_0)\n{",
"switch(VAR_0) {",
"case TARGET_F_DUPFD:\ncase TARGET_F_GETFD:\ncase TARGET_F_SETFD:\ncase TARGET_F_GETFL:\ncase TARGET_F_SETFL:\nreturn VAR_0;",
"case TARGET_F_GETLK:\nreturn F_GETLK;",
"case TARGET_F_SETLK:\nreturn F_SETLK;",
"case TARGET_F_SETLKW:\nreturn F_SETLKW;",
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7,
9,
11,
13,
15,
17
],
[
19,
21
],
[
23,
25
],
[
27,
29
],
[
31,
33
],
[
35,
37
],
[
39,
41
],
[
43,
45
],
[
47,
49,
51
],
[
53,
55
],
[... |
13,179 | static void test_io_channel_ipv6(bool async)
{
SocketAddress *listen_addr = g_new0(SocketAddress, 1);
SocketAddress *connect_addr = g_new0(SocketAddress, 1);
listen_addr->type = SOCKET_ADDRESS_KIND_INET;
listen_addr->u.inet = g_new(InetSocketAddress, 1);
*listen_addr->u.inet = (InetSocketAddress) {
.host = g_strdup("::1"),
.port = NULL, /* Auto-select */
};
connect_addr->type = SOCKET_ADDRESS_KIND_INET;
connect_addr->u.inet = g_new(InetSocketAddress, 1);
*connect_addr->u.inet = (InetSocketAddress) {
.host = g_strdup("::1"),
.port = NULL, /* Filled in later */
};
test_io_channel(async, listen_addr, connect_addr, false);
qapi_free_SocketAddress(listen_addr);
qapi_free_SocketAddress(connect_addr);
}
| false | qemu | 32bafa8fdd098d52fbf1102d5a5e48d29398c0aa | static void test_io_channel_ipv6(bool async)
{
SocketAddress *listen_addr = g_new0(SocketAddress, 1);
SocketAddress *connect_addr = g_new0(SocketAddress, 1);
listen_addr->type = SOCKET_ADDRESS_KIND_INET;
listen_addr->u.inet = g_new(InetSocketAddress, 1);
*listen_addr->u.inet = (InetSocketAddress) {
.host = g_strdup("::1"),
.port = NULL,
};
connect_addr->type = SOCKET_ADDRESS_KIND_INET;
connect_addr->u.inet = g_new(InetSocketAddress, 1);
*connect_addr->u.inet = (InetSocketAddress) {
.host = g_strdup("::1"),
.port = NULL,
};
test_io_channel(async, listen_addr, connect_addr, false);
qapi_free_SocketAddress(listen_addr);
qapi_free_SocketAddress(connect_addr);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(bool VAR_0)
{
SocketAddress *listen_addr = g_new0(SocketAddress, 1);
SocketAddress *connect_addr = g_new0(SocketAddress, 1);
listen_addr->type = SOCKET_ADDRESS_KIND_INET;
listen_addr->u.inet = g_new(InetSocketAddress, 1);
*listen_addr->u.inet = (InetSocketAddress) {
.host = g_strdup("::1"),
.port = NULL,
};
connect_addr->type = SOCKET_ADDRESS_KIND_INET;
connect_addr->u.inet = g_new(InetSocketAddress, 1);
*connect_addr->u.inet = (InetSocketAddress) {
.host = g_strdup("::1"),
.port = NULL,
};
test_io_channel(VAR_0, listen_addr, connect_addr, false);
qapi_free_SocketAddress(listen_addr);
qapi_free_SocketAddress(connect_addr);
}
| [
"static void FUNC_0(bool VAR_0)\n{",
"SocketAddress *listen_addr = g_new0(SocketAddress, 1);",
"SocketAddress *connect_addr = g_new0(SocketAddress, 1);",
"listen_addr->type = SOCKET_ADDRESS_KIND_INET;",
"listen_addr->u.inet = g_new(InetSocketAddress, 1);",
"*listen_addr->u.inet = (InetSocketAddress) {",
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
11
],
[
13
],
[
15
],
[
17,
19,
21
],
[
25
],
[
27
],
[
29
],
[
31,
33,
35
],
[
39
],
[
43
],
[
45
],
[
47
]
] |
13,181 | ssize_t nbd_receive_reply(QIOChannel *ioc, NBDReply *reply)
{
uint8_t buf[NBD_REPLY_SIZE];
uint32_t magic;
ssize_t ret;
ret = read_sync(ioc, buf, sizeof(buf));
if (ret <= 0) {
return ret;
}
if (ret != sizeof(buf)) {
LOG("read failed");
return -EINVAL;
}
/* Reply
[ 0 .. 3] magic (NBD_REPLY_MAGIC)
[ 4 .. 7] error (0 == no error)
[ 7 .. 15] handle
*/
magic = ldl_be_p(buf);
reply->error = ldl_be_p(buf + 4);
reply->handle = ldq_be_p(buf + 8);
reply->error = nbd_errno_to_system_errno(reply->error);
if (reply->error == ESHUTDOWN) {
/* This works even on mingw which lacks a native ESHUTDOWN */
LOG("server shutting down");
return -EINVAL;
}
TRACE("Got reply: { magic = 0x%" PRIx32 ", .error = % " PRId32
", handle = %" PRIu64" }",
magic, reply->error, reply->handle);
if (magic != NBD_REPLY_MAGIC) {
LOG("invalid magic (got 0x%" PRIx32 ")", magic);
return -EINVAL;
}
return 0;
}
| false | qemu | a12a712a7dfbd2e2f4882ef2c90a9b2162166dd7 | ssize_t nbd_receive_reply(QIOChannel *ioc, NBDReply *reply)
{
uint8_t buf[NBD_REPLY_SIZE];
uint32_t magic;
ssize_t ret;
ret = read_sync(ioc, buf, sizeof(buf));
if (ret <= 0) {
return ret;
}
if (ret != sizeof(buf)) {
LOG("read failed");
return -EINVAL;
}
magic = ldl_be_p(buf);
reply->error = ldl_be_p(buf + 4);
reply->handle = ldq_be_p(buf + 8);
reply->error = nbd_errno_to_system_errno(reply->error);
if (reply->error == ESHUTDOWN) {
LOG("server shutting down");
return -EINVAL;
}
TRACE("Got reply: { magic = 0x%" PRIx32 ", .error = % " PRId32
", handle = %" PRIu64" }",
magic, reply->error, reply->handle);
if (magic != NBD_REPLY_MAGIC) {
LOG("invalid magic (got 0x%" PRIx32 ")", magic);
return -EINVAL;
}
return 0;
}
| {
"code": [],
"line_no": []
} | ssize_t FUNC_0(QIOChannel *ioc, NBDReply *reply)
{
uint8_t buf[NBD_REPLY_SIZE];
uint32_t magic;
ssize_t ret;
ret = read_sync(ioc, buf, sizeof(buf));
if (ret <= 0) {
return ret;
}
if (ret != sizeof(buf)) {
LOG("read failed");
return -EINVAL;
}
magic = ldl_be_p(buf);
reply->error = ldl_be_p(buf + 4);
reply->handle = ldq_be_p(buf + 8);
reply->error = nbd_errno_to_system_errno(reply->error);
if (reply->error == ESHUTDOWN) {
LOG("server shutting down");
return -EINVAL;
}
TRACE("Got reply: { magic = 0x%" PRIx32 ", .error = % " PRId32
", handle = %" PRIu64" }",
magic, reply->error, reply->handle);
if (magic != NBD_REPLY_MAGIC) {
LOG("invalid magic (got 0x%" PRIx32 ")", magic);
return -EINVAL;
}
return 0;
}
| [
"ssize_t FUNC_0(QIOChannel *ioc, NBDReply *reply)\n{",
"uint8_t buf[NBD_REPLY_SIZE];",
"uint32_t magic;",
"ssize_t ret;",
"ret = read_sync(ioc, buf, sizeof(buf));",
"if (ret <= 0) {",
"return ret;",
"}",
"if (ret != sizeof(buf)) {",
"LOG(\"read failed\");",
"return -EINVAL;",
"}",
"magic = l... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17
],
[
19
],
[
23
],
[
25
],
[
27
],
[
29
],
[
45
],
[
47
],
[
49
],
[
53
],
[
57
],
[
61
],
[
63
],
[
65
... |
13,183 | void qemu_map_cache_init(void)
{
unsigned long size;
struct rlimit rlimit_as;
mapcache = qemu_mallocz(sizeof (MapCache));
QTAILQ_INIT(&mapcache->locked_entries);
mapcache->last_address_index = -1;
getrlimit(RLIMIT_AS, &rlimit_as);
rlimit_as.rlim_cur = rlimit_as.rlim_max;
setrlimit(RLIMIT_AS, &rlimit_as);
mapcache->max_mcache_size = rlimit_as.rlim_max;
mapcache->nr_buckets =
(((mapcache->max_mcache_size >> XC_PAGE_SHIFT) +
(1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
(MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
size = mapcache->nr_buckets * sizeof (MapCacheEntry);
size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
DPRINTF("qemu_map_cache_init, nr_buckets = %lx size %lu\n", mapcache->nr_buckets, size);
mapcache->entry = qemu_mallocz(size);
}
| false | qemu | ea6c5f8ffe6de12e04e63acbb9937683b30216e2 | void qemu_map_cache_init(void)
{
unsigned long size;
struct rlimit rlimit_as;
mapcache = qemu_mallocz(sizeof (MapCache));
QTAILQ_INIT(&mapcache->locked_entries);
mapcache->last_address_index = -1;
getrlimit(RLIMIT_AS, &rlimit_as);
rlimit_as.rlim_cur = rlimit_as.rlim_max;
setrlimit(RLIMIT_AS, &rlimit_as);
mapcache->max_mcache_size = rlimit_as.rlim_max;
mapcache->nr_buckets =
(((mapcache->max_mcache_size >> XC_PAGE_SHIFT) +
(1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
(MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
size = mapcache->nr_buckets * sizeof (MapCacheEntry);
size = (size + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
DPRINTF("qemu_map_cache_init, nr_buckets = %lx size %lu\n", mapcache->nr_buckets, size);
mapcache->entry = qemu_mallocz(size);
}
| {
"code": [],
"line_no": []
} | void FUNC_0(void)
{
unsigned long VAR_0;
struct rlimit VAR_1;
mapcache = qemu_mallocz(sizeof (MapCache));
QTAILQ_INIT(&mapcache->locked_entries);
mapcache->last_address_index = -1;
getrlimit(RLIMIT_AS, &VAR_1);
VAR_1.rlim_cur = VAR_1.rlim_max;
setrlimit(RLIMIT_AS, &VAR_1);
mapcache->max_mcache_size = VAR_1.rlim_max;
mapcache->nr_buckets =
(((mapcache->max_mcache_size >> XC_PAGE_SHIFT) +
(1UL << (MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT)) - 1) >>
(MCACHE_BUCKET_SHIFT - XC_PAGE_SHIFT));
VAR_0 = mapcache->nr_buckets * sizeof (MapCacheEntry);
VAR_0 = (VAR_0 + XC_PAGE_SIZE - 1) & ~(XC_PAGE_SIZE - 1);
DPRINTF("FUNC_0, nr_buckets = %lx VAR_0 %lu\n", mapcache->nr_buckets, VAR_0);
mapcache->entry = qemu_mallocz(VAR_0);
}
| [
"void FUNC_0(void)\n{",
"unsigned long VAR_0;",
"struct rlimit VAR_1;",
"mapcache = qemu_mallocz(sizeof (MapCache));",
"QTAILQ_INIT(&mapcache->locked_entries);",
"mapcache->last_address_index = -1;",
"getrlimit(RLIMIT_AS, &VAR_1);",
"VAR_1.rlim_cur = VAR_1.rlim_max;",
"setrlimit(RLIMIT_AS, &VAR_1);"... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
11
],
[
15
],
[
17
],
[
21
],
[
23
],
[
25
],
[
27
],
[
31,
33,
35,
37
],
[
41
],
[
43
],
[
45
],
[
47
],
[
49
]
] |
13,184 | static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
{
switch (fccno) {
case 0:
gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
break;
case 1:
gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
break;
case 2:
gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
break;
case 3:
gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
break;
}
}
| false | qemu | 7385aed20db5d83979f683b9d0048674411e963c | static inline void gen_op_fcmpes(int fccno, TCGv_i32 r_rs1, TCGv_i32 r_rs2)
{
switch (fccno) {
case 0:
gen_helper_fcmpes(cpu_env, r_rs1, r_rs2);
break;
case 1:
gen_helper_fcmpes_fcc1(cpu_env, r_rs1, r_rs2);
break;
case 2:
gen_helper_fcmpes_fcc2(cpu_env, r_rs1, r_rs2);
break;
case 3:
gen_helper_fcmpes_fcc3(cpu_env, r_rs1, r_rs2);
break;
}
}
| {
"code": [],
"line_no": []
} | static inline void FUNC_0(int VAR_0, TCGv_i32 VAR_1, TCGv_i32 VAR_2)
{
switch (VAR_0) {
case 0:
gen_helper_fcmpes(cpu_env, VAR_1, VAR_2);
break;
case 1:
gen_helper_fcmpes_fcc1(cpu_env, VAR_1, VAR_2);
break;
case 2:
gen_helper_fcmpes_fcc2(cpu_env, VAR_1, VAR_2);
break;
case 3:
gen_helper_fcmpes_fcc3(cpu_env, VAR_1, VAR_2);
break;
}
}
| [
"static inline void FUNC_0(int VAR_0, TCGv_i32 VAR_1, TCGv_i32 VAR_2)\n{",
"switch (VAR_0) {",
"case 0:\ngen_helper_fcmpes(cpu_env, VAR_1, VAR_2);",
"break;",
"case 1:\ngen_helper_fcmpes_fcc1(cpu_env, VAR_1, VAR_2);",
"break;",
"case 2:\ngen_helper_fcmpes_fcc2(cpu_env, VAR_1, VAR_2);",
"break;",
"ca... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7,
9
],
[
11
],
[
13,
15
],
[
17
],
[
19,
21
],
[
23
],
[
25,
27
],
[
29
],
[
31
],
[
33
]
] |
13,185 | static inline int decode_bytes(const uint8_t* inbuffer, uint8_t* out, int bytes){
int i, off;
uint32_t c;
const uint32_t* buf;
uint32_t* obuf = (uint32_t*) out;
/* FIXME: 64 bit platforms would be able to do 64 bits at a time.
* I'm too lazy though, should be something like
* for(i=0 ; i<bitamount/64 ; i++)
* (int64_t)out[i] = 0x37c511f237c511f2^av_be2ne64(int64_t)in[i]);
* Buffer alignment needs to be checked. */
off = (intptr_t)inbuffer & 3;
buf = (const uint32_t*) (inbuffer - off);
c = av_be2ne32((0x37c511f2 >> (off*8)) | (0x37c511f2 << (32-(off*8))));
bytes += 3 + off;
for (i = 0; i < bytes/4; i++)
obuf[i] = c ^ buf[i];
return off;
}
| false | FFmpeg | b7581b5c839d1e293bb9dc34352a76df9d3158a9 | static inline int decode_bytes(const uint8_t* inbuffer, uint8_t* out, int bytes){
int i, off;
uint32_t c;
const uint32_t* buf;
uint32_t* obuf = (uint32_t*) out;
off = (intptr_t)inbuffer & 3;
buf = (const uint32_t*) (inbuffer - off);
c = av_be2ne32((0x37c511f2 >> (off*8)) | (0x37c511f2 << (32-(off*8))));
bytes += 3 + off;
for (i = 0; i < bytes/4; i++)
obuf[i] = c ^ buf[i];
return off;
}
| {
"code": [],
"line_no": []
} | static inline int FUNC_0(const uint8_t* VAR_0, uint8_t* VAR_1, int VAR_2){
int VAR_3, VAR_4;
uint32_t c;
const uint32_t* VAR_5;
uint32_t* obuf = (uint32_t*) VAR_1;
VAR_4 = (intptr_t)VAR_0 & 3;
VAR_5 = (const uint32_t*) (VAR_0 - VAR_4);
c = av_be2ne32((0x37c511f2 >> (VAR_4*8)) | (0x37c511f2 << (32-(VAR_4*8))));
VAR_2 += 3 + VAR_4;
for (VAR_3 = 0; VAR_3 < VAR_2/4; VAR_3++)
obuf[VAR_3] = c ^ VAR_5[VAR_3];
return VAR_4;
}
| [
"static inline int FUNC_0(const uint8_t* VAR_0, uint8_t* VAR_1, int VAR_2){",
"int VAR_3, VAR_4;",
"uint32_t c;",
"const uint32_t* VAR_5;",
"uint32_t* obuf = (uint32_t*) VAR_1;",
"VAR_4 = (intptr_t)VAR_0 & 3;",
"VAR_5 = (const uint32_t*) (VAR_0 - VAR_4);",
"c = av_be2ne32((0x37c511f2 >> (VAR_4*8)) | (... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1
],
[
3
],
[
5
],
[
7
],
[
9
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
37
],
[
39
]
] |
13,187 | int net_init_socket(const Netdev *netdev, const char *name,
NetClientState *peer, Error **errp)
{
/* FIXME error_setg(errp, ...) on failure */
Error *err = NULL;
const NetdevSocketOptions *sock;
assert(netdev->type == NET_CLIENT_DRIVER_SOCKET);
sock = &netdev->u.socket;
if (sock->has_fd + sock->has_listen + sock->has_connect + sock->has_mcast +
sock->has_udp != 1) {
error_report("exactly one of fd=, listen=, connect=, mcast= or udp="
" is required");
return -1;
}
if (sock->has_localaddr && !sock->has_mcast && !sock->has_udp) {
error_report("localaddr= is only valid with mcast= or udp=");
return -1;
}
if (sock->has_fd) {
int fd;
fd = monitor_fd_param(cur_mon, sock->fd, &err);
if (fd == -1) {
error_report_err(err);
return -1;
}
qemu_set_nonblock(fd);
if (!net_socket_fd_init(peer, "socket", name, fd, 1)) {
return -1;
}
return 0;
}
if (sock->has_listen) {
if (net_socket_listen_init(peer, "socket", name, sock->listen) == -1) {
return -1;
}
return 0;
}
if (sock->has_connect) {
if (net_socket_connect_init(peer, "socket", name, sock->connect) ==
-1) {
return -1;
}
return 0;
}
if (sock->has_mcast) {
/* if sock->localaddr is missing, it has been initialized to "all bits
* zero" */
if (net_socket_mcast_init(peer, "socket", name, sock->mcast,
sock->localaddr) == -1) {
return -1;
}
return 0;
}
assert(sock->has_udp);
if (!sock->has_localaddr) {
error_report("localaddr= is mandatory with udp=");
return -1;
}
if (net_socket_udp_init(peer, "socket", name, sock->udp, sock->localaddr) ==
-1) {
return -1;
}
return 0;
}
| true | qemu | 0f8c289ad539feb5135c545bea947b310a893f4b | int net_init_socket(const Netdev *netdev, const char *name,
NetClientState *peer, Error **errp)
{
Error *err = NULL;
const NetdevSocketOptions *sock;
assert(netdev->type == NET_CLIENT_DRIVER_SOCKET);
sock = &netdev->u.socket;
if (sock->has_fd + sock->has_listen + sock->has_connect + sock->has_mcast +
sock->has_udp != 1) {
error_report("exactly one of fd=, listen=, connect=, mcast= or udp="
" is required");
return -1;
}
if (sock->has_localaddr && !sock->has_mcast && !sock->has_udp) {
error_report("localaddr= is only valid with mcast= or udp=");
return -1;
}
if (sock->has_fd) {
int fd;
fd = monitor_fd_param(cur_mon, sock->fd, &err);
if (fd == -1) {
error_report_err(err);
return -1;
}
qemu_set_nonblock(fd);
if (!net_socket_fd_init(peer, "socket", name, fd, 1)) {
return -1;
}
return 0;
}
if (sock->has_listen) {
if (net_socket_listen_init(peer, "socket", name, sock->listen) == -1) {
return -1;
}
return 0;
}
if (sock->has_connect) {
if (net_socket_connect_init(peer, "socket", name, sock->connect) ==
-1) {
return -1;
}
return 0;
}
if (sock->has_mcast) {
if (net_socket_mcast_init(peer, "socket", name, sock->mcast,
sock->localaddr) == -1) {
return -1;
}
return 0;
}
assert(sock->has_udp);
if (!sock->has_localaddr) {
error_report("localaddr= is mandatory with udp=");
return -1;
}
if (net_socket_udp_init(peer, "socket", name, sock->udp, sock->localaddr) ==
-1) {
return -1;
}
return 0;
}
| {
"code": [
" if (sock->has_fd + sock->has_listen + sock->has_connect + sock->has_mcast +",
" sock->has_udp != 1) {",
" error_report(\"exactly one of fd=, listen=, connect=, mcast= or udp=\"",
" if (!net_socket_fd_init(peer, \"socket\", name, fd, 1)) {"
],
"line_no": [
21,
23,
25,
63
]
} | int FUNC_0(const Netdev *VAR_0, const char *VAR_1,
NetClientState *VAR_2, Error **VAR_3)
{
Error *err = NULL;
const NetdevSocketOptions *VAR_4;
assert(VAR_0->type == NET_CLIENT_DRIVER_SOCKET);
VAR_4 = &VAR_0->u.socket;
if (VAR_4->has_fd + VAR_4->has_listen + VAR_4->has_connect + VAR_4->has_mcast +
VAR_4->has_udp != 1) {
error_report("exactly one of VAR_5=, listen=, connect=, mcast= or udp="
" is required");
return -1;
}
if (VAR_4->has_localaddr && !VAR_4->has_mcast && !VAR_4->has_udp) {
error_report("localaddr= is only valid with mcast= or udp=");
return -1;
}
if (VAR_4->has_fd) {
int VAR_5;
VAR_5 = monitor_fd_param(cur_mon, VAR_4->VAR_5, &err);
if (VAR_5 == -1) {
error_report_err(err);
return -1;
}
qemu_set_nonblock(VAR_5);
if (!net_socket_fd_init(VAR_2, "socket", VAR_1, VAR_5, 1)) {
return -1;
}
return 0;
}
if (VAR_4->has_listen) {
if (net_socket_listen_init(VAR_2, "socket", VAR_1, VAR_4->listen) == -1) {
return -1;
}
return 0;
}
if (VAR_4->has_connect) {
if (net_socket_connect_init(VAR_2, "socket", VAR_1, VAR_4->connect) ==
-1) {
return -1;
}
return 0;
}
if (VAR_4->has_mcast) {
if (net_socket_mcast_init(VAR_2, "socket", VAR_1, VAR_4->mcast,
VAR_4->localaddr) == -1) {
return -1;
}
return 0;
}
assert(VAR_4->has_udp);
if (!VAR_4->has_localaddr) {
error_report("localaddr= is mandatory with udp=");
return -1;
}
if (net_socket_udp_init(VAR_2, "socket", VAR_1, VAR_4->udp, VAR_4->localaddr) ==
-1) {
return -1;
}
return 0;
}
| [
"int FUNC_0(const Netdev *VAR_0, const char *VAR_1,\nNetClientState *VAR_2, Error **VAR_3)\n{",
"Error *err = NULL;",
"const NetdevSocketOptions *VAR_4;",
"assert(VAR_0->type == NET_CLIENT_DRIVER_SOCKET);",
"VAR_4 = &VAR_0->u.socket;",
"if (VAR_4->has_fd + VAR_4->has_listen + VAR_4->has_connect + VAR_4->h... | [
0,
0,
0,
0,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
9
],
[
11
],
[
15
],
[
17
],
[
21,
23
],
[
25,
27
],
[
29
],
[
31
],
[
35
],
[
37
],
[
39
],
[
41
],
[
45
],
[
47
],
[
51
],
[
53
],
[
55
],
... |
13,188 | static void isabus_fdc_realize(DeviceState *dev, Error **errp)
{
ISADevice *isadev = ISA_DEVICE(dev);
FDCtrlISABus *isa = ISA_FDC(dev);
FDCtrl *fdctrl = &isa->state;
Error *err = NULL;
isa_register_portio_list(isadev, isa->iobase, fdc_portio_list, fdctrl,
"fdc");
isa_init_irq(isadev, &fdctrl->irq, isa->irq);
fdctrl->dma_chann = isa->dma;
if (fdctrl->dma_chann != -1) {
fdctrl->dma = isa_get_dma(isa_bus_from_device(isadev), isa->dma);
assert(fdctrl->dma);
}
qdev_set_legacy_instance_id(dev, isa->iobase, 2);
fdctrl_realize_common(fdctrl, &err);
if (err != NULL) {
error_propagate(errp, err);
return;
}
}
| true | qemu | e305a16510afa74eec20390479e349402e55ef4c | static void isabus_fdc_realize(DeviceState *dev, Error **errp)
{
ISADevice *isadev = ISA_DEVICE(dev);
FDCtrlISABus *isa = ISA_FDC(dev);
FDCtrl *fdctrl = &isa->state;
Error *err = NULL;
isa_register_portio_list(isadev, isa->iobase, fdc_portio_list, fdctrl,
"fdc");
isa_init_irq(isadev, &fdctrl->irq, isa->irq);
fdctrl->dma_chann = isa->dma;
if (fdctrl->dma_chann != -1) {
fdctrl->dma = isa_get_dma(isa_bus_from_device(isadev), isa->dma);
assert(fdctrl->dma);
}
qdev_set_legacy_instance_id(dev, isa->iobase, 2);
fdctrl_realize_common(fdctrl, &err);
if (err != NULL) {
error_propagate(errp, err);
return;
}
}
| {
"code": [
" isa_register_portio_list(isadev, isa->iobase, fdc_portio_list, fdctrl,"
],
"line_no": [
15
]
} | static void FUNC_0(DeviceState *VAR_0, Error **VAR_1)
{
ISADevice *isadev = ISA_DEVICE(VAR_0);
FDCtrlISABus *isa = ISA_FDC(VAR_0);
FDCtrl *fdctrl = &isa->state;
Error *err = NULL;
isa_register_portio_list(isadev, isa->iobase, fdc_portio_list, fdctrl,
"fdc");
isa_init_irq(isadev, &fdctrl->irq, isa->irq);
fdctrl->dma_chann = isa->dma;
if (fdctrl->dma_chann != -1) {
fdctrl->dma = isa_get_dma(isa_bus_from_device(isadev), isa->dma);
assert(fdctrl->dma);
}
qdev_set_legacy_instance_id(VAR_0, isa->iobase, 2);
fdctrl_realize_common(fdctrl, &err);
if (err != NULL) {
error_propagate(VAR_1, err);
return;
}
}
| [
"static void FUNC_0(DeviceState *VAR_0, Error **VAR_1)\n{",
"ISADevice *isadev = ISA_DEVICE(VAR_0);",
"FDCtrlISABus *isa = ISA_FDC(VAR_0);",
"FDCtrl *fdctrl = &isa->state;",
"Error *err = NULL;",
"isa_register_portio_list(isadev, isa->iobase, fdc_portio_list, fdctrl,\n\"fdc\");",
"isa_init_irq(isadev, &... | [
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
15,
17
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
45
],
[
47
]
] |
13,189 | static void network_to_register(RDMARegister *reg)
{
reg->key.current_addr = ntohll(reg->key.current_addr);
reg->current_index = ntohl(reg->current_index);
reg->chunks = ntohll(reg->chunks);
}
| true | qemu | 60fe637bf0e4d7989e21e50f52526444765c63b4 | static void network_to_register(RDMARegister *reg)
{
reg->key.current_addr = ntohll(reg->key.current_addr);
reg->current_index = ntohl(reg->current_index);
reg->chunks = ntohll(reg->chunks);
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(RDMARegister *VAR_0)
{
VAR_0->key.current_addr = ntohll(VAR_0->key.current_addr);
VAR_0->current_index = ntohl(VAR_0->current_index);
VAR_0->chunks = ntohll(VAR_0->chunks);
}
| [
"static void FUNC_0(RDMARegister *VAR_0)\n{",
"VAR_0->key.current_addr = ntohll(VAR_0->key.current_addr);",
"VAR_0->current_index = ntohl(VAR_0->current_index);",
"VAR_0->chunks = ntohll(VAR_0->chunks);",
"}"
] | [
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
]
] |
13,190 | ogg_get_length (AVFormatContext * s)
{
ogg_t *ogg = s->priv_data;
int idx = -1, i;
offset_t size, end;
if(s->pb.is_streamed)
return 0;
// already set
if (s->duration != AV_NOPTS_VALUE)
return 0;
size = url_fsize(&s->pb);
if(size < 0)
return 0;
end = size > MAX_PAGE_SIZE? size - MAX_PAGE_SIZE: size;
ogg_save (s);
url_fseek (&s->pb, end, SEEK_SET);
while (!ogg_read_page (s, &i)){
if (ogg->streams[i].granule != -1 && ogg->streams[i].granule != 0)
idx = i;
}
if (idx != -1){
s->streams[idx]->duration =
ogg_gptopts (s, idx, ogg->streams[idx].granule);
}
ogg->size = size;
ogg_restore (s, 0);
ogg_save (s);
while (!ogg_read_page (s, &i)) {
if (i == idx && ogg->streams[i].granule != -1 && ogg->streams[i].granule != 0)
break;
}
if (i == idx) {
s->streams[idx]->start_time = ogg_gptopts (s, idx, ogg->streams[idx].granule);
s->streams[idx]->duration -= s->streams[idx]->start_time;
}
ogg_restore (s, 0);
return 0;
}
| true | FFmpeg | e22f2aaf99c59d788f292c4d7594493068eb4d69 | ogg_get_length (AVFormatContext * s)
{
ogg_t *ogg = s->priv_data;
int idx = -1, i;
offset_t size, end;
if(s->pb.is_streamed)
return 0;
if (s->duration != AV_NOPTS_VALUE)
return 0;
size = url_fsize(&s->pb);
if(size < 0)
return 0;
end = size > MAX_PAGE_SIZE? size - MAX_PAGE_SIZE: size;
ogg_save (s);
url_fseek (&s->pb, end, SEEK_SET);
while (!ogg_read_page (s, &i)){
if (ogg->streams[i].granule != -1 && ogg->streams[i].granule != 0)
idx = i;
}
if (idx != -1){
s->streams[idx]->duration =
ogg_gptopts (s, idx, ogg->streams[idx].granule);
}
ogg->size = size;
ogg_restore (s, 0);
ogg_save (s);
while (!ogg_read_page (s, &i)) {
if (i == idx && ogg->streams[i].granule != -1 && ogg->streams[i].granule != 0)
break;
}
if (i == idx) {
s->streams[idx]->start_time = ogg_gptopts (s, idx, ogg->streams[idx].granule);
s->streams[idx]->duration -= s->streams[idx]->start_time;
}
ogg_restore (s, 0);
return 0;
}
| {
"code": [
" if (ogg->streams[i].granule != -1 && ogg->streams[i].granule != 0)"
],
"line_no": [
45
]
} | FUNC_0 (AVFormatContext * VAR_0)
{
ogg_t *ogg = VAR_0->priv_data;
int VAR_1 = -1, VAR_2;
offset_t size, end;
if(VAR_0->pb.is_streamed)
return 0;
if (VAR_0->duration != AV_NOPTS_VALUE)
return 0;
size = url_fsize(&VAR_0->pb);
if(size < 0)
return 0;
end = size > MAX_PAGE_SIZE? size - MAX_PAGE_SIZE: size;
ogg_save (VAR_0);
url_fseek (&VAR_0->pb, end, SEEK_SET);
while (!ogg_read_page (VAR_0, &VAR_2)){
if (ogg->streams[VAR_2].granule != -1 && ogg->streams[VAR_2].granule != 0)
VAR_1 = VAR_2;
}
if (VAR_1 != -1){
VAR_0->streams[VAR_1]->duration =
ogg_gptopts (VAR_0, VAR_1, ogg->streams[VAR_1].granule);
}
ogg->size = size;
ogg_restore (VAR_0, 0);
ogg_save (VAR_0);
while (!ogg_read_page (VAR_0, &VAR_2)) {
if (VAR_2 == VAR_1 && ogg->streams[VAR_2].granule != -1 && ogg->streams[VAR_2].granule != 0)
break;
}
if (VAR_2 == VAR_1) {
VAR_0->streams[VAR_1]->start_time = ogg_gptopts (VAR_0, VAR_1, ogg->streams[VAR_1].granule);
VAR_0->streams[VAR_1]->duration -= VAR_0->streams[VAR_1]->start_time;
}
ogg_restore (VAR_0, 0);
return 0;
}
| [
"FUNC_0 (AVFormatContext * VAR_0)\n{",
"ogg_t *ogg = VAR_0->priv_data;",
"int VAR_1 = -1, VAR_2;",
"offset_t size, end;",
"if(VAR_0->pb.is_streamed)\nreturn 0;",
"if (VAR_0->duration != AV_NOPTS_VALUE)\nreturn 0;",
"size = url_fsize(&VAR_0->pb);",
"if(size < 0)\nreturn 0;",
"end = size > MAX_PAGE_SI... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13,
15
],
[
21,
23
],
[
27
],
[
29,
31
],
[
33
],
[
37
],
[
39
],
[
43
],
[
45,
47
],
[
49
],
[
53
],
[
55,
57
],
[
59
],
[... |
13,191 | static void qemu_event_increment(void)
{
SetEvent(qemu_event_handle);
}
| true | qemu | de1c90cf8b73992b4197561850d2da1075fb82eb | static void qemu_event_increment(void)
{
SetEvent(qemu_event_handle);
}
| {
"code": [
" SetEvent(qemu_event_handle);"
],
"line_no": [
5
]
} | static void FUNC_0(void)
{
SetEvent(qemu_event_handle);
}
| [
"static void FUNC_0(void)\n{",
"SetEvent(qemu_event_handle);",
"}"
] | [
0,
1,
0
] | [
[
1,
3
],
[
5
],
[
7
]
] |
13,192 | static int qemu_balloon(ram_addr_t target, MonitorCompletion cb, void *opaque)
{
if (!balloon_event_fn) {
return 0;
}
trace_balloon_event(balloon_opaque, target);
balloon_event_fn(balloon_opaque, target, cb, opaque);
return 1;
}
| true | qemu | 30fb2ca603e8b8d0f02630ef18bc0d0637a88ffa | static int qemu_balloon(ram_addr_t target, MonitorCompletion cb, void *opaque)
{
if (!balloon_event_fn) {
return 0;
}
trace_balloon_event(balloon_opaque, target);
balloon_event_fn(balloon_opaque, target, cb, opaque);
return 1;
}
| {
"code": [
"static int qemu_balloon(ram_addr_t target, MonitorCompletion cb, void *opaque)",
" balloon_event_fn(balloon_opaque, target, cb, opaque);",
" if (!balloon_event_fn) {"
],
"line_no": [
1,
13,
5
]
} | static int FUNC_0(ram_addr_t VAR_0, MonitorCompletion VAR_1, void *VAR_2)
{
if (!balloon_event_fn) {
return 0;
}
trace_balloon_event(balloon_opaque, VAR_0);
balloon_event_fn(balloon_opaque, VAR_0, VAR_1, VAR_2);
return 1;
}
| [
"static int FUNC_0(ram_addr_t VAR_0, MonitorCompletion VAR_1, void *VAR_2)\n{",
"if (!balloon_event_fn) {",
"return 0;",
"}",
"trace_balloon_event(balloon_opaque, VAR_0);",
"balloon_event_fn(balloon_opaque, VAR_0, VAR_1, VAR_2);",
"return 1;",
"}"
] | [
1,
1,
0,
0,
0,
1,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
]
] |
13,193 | void helper_wrmsr(void)
{
uint64_t val;
helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
switch((uint32_t)ECX) {
case MSR_IA32_SYSENTER_CS:
env->sysenter_cs = val & 0xffff;
case MSR_IA32_SYSENTER_ESP:
env->sysenter_esp = val;
case MSR_IA32_SYSENTER_EIP:
env->sysenter_eip = val;
case MSR_IA32_APICBASE:
cpu_set_apic_base(env, val);
case MSR_EFER:
{
uint64_t update_mask;
update_mask = 0;
if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
update_mask |= MSR_EFER_SCE;
if (env->cpuid_ext2_features & CPUID_EXT2_LM)
update_mask |= MSR_EFER_LME;
if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
update_mask |= MSR_EFER_FFXSR;
if (env->cpuid_ext2_features & CPUID_EXT2_NX)
update_mask |= MSR_EFER_NXE;
if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
update_mask |= MSR_EFER_SVME;
cpu_load_efer(env, (env->efer & ~update_mask) |
(val & update_mask));
}
case MSR_STAR:
env->star = val;
case MSR_PAT:
env->pat = val;
case MSR_VM_HSAVE_PA:
env->vm_hsave = val;
#ifdef TARGET_X86_64
case MSR_LSTAR:
env->lstar = val;
case MSR_CSTAR:
env->cstar = val;
case MSR_FMASK:
env->fmask = val;
case MSR_FSBASE:
env->segs[R_FS].base = val;
case MSR_GSBASE:
env->segs[R_GS].base = val;
case MSR_KERNELGSBASE:
env->kernelgsbase = val;
#endif
default:
/* XXX: exception ? */
}
} | true | qemu | 165d9b82eb8c877ee691a7b7bde5930bc2d07037 | void helper_wrmsr(void)
{
uint64_t val;
helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
switch((uint32_t)ECX) {
case MSR_IA32_SYSENTER_CS:
env->sysenter_cs = val & 0xffff;
case MSR_IA32_SYSENTER_ESP:
env->sysenter_esp = val;
case MSR_IA32_SYSENTER_EIP:
env->sysenter_eip = val;
case MSR_IA32_APICBASE:
cpu_set_apic_base(env, val);
case MSR_EFER:
{
uint64_t update_mask;
update_mask = 0;
if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
update_mask |= MSR_EFER_SCE;
if (env->cpuid_ext2_features & CPUID_EXT2_LM)
update_mask |= MSR_EFER_LME;
if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
update_mask |= MSR_EFER_FFXSR;
if (env->cpuid_ext2_features & CPUID_EXT2_NX)
update_mask |= MSR_EFER_NXE;
if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
update_mask |= MSR_EFER_SVME;
cpu_load_efer(env, (env->efer & ~update_mask) |
(val & update_mask));
}
case MSR_STAR:
env->star = val;
case MSR_PAT:
env->pat = val;
case MSR_VM_HSAVE_PA:
env->vm_hsave = val;
#ifdef TARGET_X86_64
case MSR_LSTAR:
env->lstar = val;
case MSR_CSTAR:
env->cstar = val;
case MSR_FMASK:
env->fmask = val;
case MSR_FSBASE:
env->segs[R_FS].base = val;
case MSR_GSBASE:
env->segs[R_GS].base = val;
case MSR_KERNELGSBASE:
env->kernelgsbase = val;
#endif
default:
}
} | {
"code": [],
"line_no": []
} | void FUNC_0(void)
{
uint64_t val;
helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);
val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);
switch((uint32_t)ECX) {
case MSR_IA32_SYSENTER_CS:
env->sysenter_cs = val & 0xffff;
case MSR_IA32_SYSENTER_ESP:
env->sysenter_esp = val;
case MSR_IA32_SYSENTER_EIP:
env->sysenter_eip = val;
case MSR_IA32_APICBASE:
cpu_set_apic_base(env, val);
case MSR_EFER:
{
uint64_t update_mask;
update_mask = 0;
if (env->cpuid_ext2_features & CPUID_EXT2_SYSCALL)
update_mask |= MSR_EFER_SCE;
if (env->cpuid_ext2_features & CPUID_EXT2_LM)
update_mask |= MSR_EFER_LME;
if (env->cpuid_ext2_features & CPUID_EXT2_FFXSR)
update_mask |= MSR_EFER_FFXSR;
if (env->cpuid_ext2_features & CPUID_EXT2_NX)
update_mask |= MSR_EFER_NXE;
if (env->cpuid_ext3_features & CPUID_EXT3_SVM)
update_mask |= MSR_EFER_SVME;
cpu_load_efer(env, (env->efer & ~update_mask) |
(val & update_mask));
}
case MSR_STAR:
env->star = val;
case MSR_PAT:
env->pat = val;
case MSR_VM_HSAVE_PA:
env->vm_hsave = val;
#ifdef TARGET_X86_64
case MSR_LSTAR:
env->lstar = val;
case MSR_CSTAR:
env->cstar = val;
case MSR_FMASK:
env->fmask = val;
case MSR_FSBASE:
env->segs[R_FS].base = val;
case MSR_GSBASE:
env->segs[R_GS].base = val;
case MSR_KERNELGSBASE:
env->kernelgsbase = val;
#endif
default:
}
} | [
"void FUNC_0(void)\n{",
"uint64_t val;",
"helper_svm_check_intercept_param(SVM_EXIT_MSR, 1);",
"val = ((uint32_t)EAX) | ((uint64_t)((uint32_t)EDX) << 32);",
"switch((uint32_t)ECX) {",
"case MSR_IA32_SYSENTER_CS:\nenv->sysenter_cs = val & 0xffff;",
"case MSR_IA32_SYSENTER_ESP:\nenv->sysenter_esp = val;",... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
13
],
[
17
],
[
19,
21
],
[
24,
26
],
[
29,
31
],
[
34,
36
],
[
39,
41
],
[
43
],
[
45
],
[
47,
49
],
[
51,
53
],
[
55,
57
],
[
59... |
13,194 | static int rm_write_header(AVFormatContext *s)
{
RMMuxContext *rm = s->priv_data;
StreamInfo *stream;
int n;
AVCodecContext *codec;
for(n=0;n<s->nb_streams;n++) {
s->streams[n]->id = n;
codec = s->streams[n]->codec;
stream = &rm->streams[n];
memset(stream, 0, sizeof(StreamInfo));
stream->num = n;
stream->bit_rate = codec->bit_rate;
stream->enc = codec;
switch(codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
rm->audio_stream = stream;
stream->frame_rate = (float)codec->sample_rate / (float)codec->frame_size;
/* XXX: dummy values */
stream->packet_max_size = 1024;
stream->nb_packets = 0;
stream->total_frames = stream->nb_packets;
break;
case AVMEDIA_TYPE_VIDEO:
rm->video_stream = stream;
stream->frame_rate = (float)codec->time_base.den / (float)codec->time_base.num;
/* XXX: dummy values */
stream->packet_max_size = 4096;
stream->nb_packets = 0;
stream->total_frames = stream->nb_packets;
break;
default:
return -1;
if (rv10_write_header(s, 0, 0))
return AVERROR_INVALIDDATA;
avio_flush(s->pb);
return 0; | true | FFmpeg | 755667eebc0b6303814faadef047870071ccb5c6 | static int rm_write_header(AVFormatContext *s)
{
RMMuxContext *rm = s->priv_data;
StreamInfo *stream;
int n;
AVCodecContext *codec;
for(n=0;n<s->nb_streams;n++) {
s->streams[n]->id = n;
codec = s->streams[n]->codec;
stream = &rm->streams[n];
memset(stream, 0, sizeof(StreamInfo));
stream->num = n;
stream->bit_rate = codec->bit_rate;
stream->enc = codec;
switch(codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
rm->audio_stream = stream;
stream->frame_rate = (float)codec->sample_rate / (float)codec->frame_size;
stream->packet_max_size = 1024;
stream->nb_packets = 0;
stream->total_frames = stream->nb_packets;
break;
case AVMEDIA_TYPE_VIDEO:
rm->video_stream = stream;
stream->frame_rate = (float)codec->time_base.den / (float)codec->time_base.num;
stream->packet_max_size = 4096;
stream->nb_packets = 0;
stream->total_frames = stream->nb_packets;
break;
default:
return -1;
if (rv10_write_header(s, 0, 0))
return AVERROR_INVALIDDATA;
avio_flush(s->pb);
return 0; | {
"code": [],
"line_no": []
} | static int FUNC_0(AVFormatContext *VAR_0)
{
RMMuxContext *rm = VAR_0->priv_data;
StreamInfo *stream;
int VAR_1;
AVCodecContext *codec;
for(VAR_1=0;VAR_1<VAR_0->nb_streams;VAR_1++) {
VAR_0->streams[VAR_1]->id = VAR_1;
codec = VAR_0->streams[VAR_1]->codec;
stream = &rm->streams[VAR_1];
memset(stream, 0, sizeof(StreamInfo));
stream->num = VAR_1;
stream->bit_rate = codec->bit_rate;
stream->enc = codec;
switch(codec->codec_type) {
case AVMEDIA_TYPE_AUDIO:
rm->audio_stream = stream;
stream->frame_rate = (float)codec->sample_rate / (float)codec->frame_size;
stream->packet_max_size = 1024;
stream->nb_packets = 0;
stream->total_frames = stream->nb_packets;
break;
case AVMEDIA_TYPE_VIDEO:
rm->video_stream = stream;
stream->frame_rate = (float)codec->time_base.den / (float)codec->time_base.num;
stream->packet_max_size = 4096;
stream->nb_packets = 0;
stream->total_frames = stream->nb_packets;
break;
default:
return -1;
if (rv10_write_header(VAR_0, 0, 0))
return AVERROR_INVALIDDATA;
avio_flush(VAR_0->pb);
return 0; | [
"static int FUNC_0(AVFormatContext *VAR_0)\n{",
"RMMuxContext *rm = VAR_0->priv_data;",
"StreamInfo *stream;",
"int VAR_1;",
"AVCodecContext *codec;",
"for(VAR_1=0;VAR_1<VAR_0->nb_streams;VAR_1++) {",
"VAR_0->streams[VAR_1]->id = VAR_1;",
"codec = VAR_0->streams[VAR_1]->codec;",
"stream = &rm->strea... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
2
],
[
3
],
[
4
],
[
5
],
[
6
],
[
7
],
[
8
],
[
9
],
[
10
],
[
11
],
[
12
],
[
13
],
[
14
],
[
15
],
[
16,
17
],
[
18
],
[
20
],
[
21
],
[
22
],
[
... |
13,195 | static inline void mix_stereo_to_mono(AC3DecodeContext *ctx)
{
int i;
float (*output)[256] = ctx->audio_block.block_output;
for (i = 0; i < 256; i++)
output[1][i] += output[2][i];
memset(output[2], 0, sizeof(output[2]));
}
| false | FFmpeg | 486637af8ef29ec215e0e0b7ecd3b5470f0e04e5 | static inline void mix_stereo_to_mono(AC3DecodeContext *ctx)
{
int i;
float (*output)[256] = ctx->audio_block.block_output;
for (i = 0; i < 256; i++)
output[1][i] += output[2][i];
memset(output[2], 0, sizeof(output[2]));
}
| {
"code": [],
"line_no": []
} | static inline void FUNC_0(AC3DecodeContext *VAR_0)
{
int VAR_1;
float (*VAR_2)[256] = VAR_0->audio_block.block_output;
for (VAR_1 = 0; VAR_1 < 256; VAR_1++)
VAR_2[1][VAR_1] += VAR_2[2][VAR_1];
memset(VAR_2[2], 0, sizeof(VAR_2[2]));
}
| [
"static inline void FUNC_0(AC3DecodeContext *VAR_0)\n{",
"int VAR_1;",
"float (*VAR_2)[256] = VAR_0->audio_block.block_output;",
"for (VAR_1 = 0; VAR_1 < 256; VAR_1++)",
"VAR_2[1][VAR_1] += VAR_2[2][VAR_1];",
"memset(VAR_2[2], 0, sizeof(VAR_2[2]));",
"}"
] | [
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
11
],
[
13
],
[
15
],
[
17
]
] |
13,196 | static int msmpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr)
{
int level, pred;
if(s->msmpeg4_version<=2){
if (n < 4) {
level = get_vlc2(&s->gb, v2_dc_lum_vlc.table, DC_VLC_BITS, 3);
} else {
level = get_vlc2(&s->gb, v2_dc_chroma_vlc.table, DC_VLC_BITS, 3);
}
if (level < 0)
return -1;
level-=256;
}else{ //FIXME optimize use unified tables & index
if (n < 4) {
level = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
} else {
level = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
}
if (level < 0){
av_log(s->avctx, AV_LOG_ERROR, "illegal dc vlc\n");
return -1;
}
if (level == DC_MAX) {
level = get_bits(&s->gb, 8);
if (get_bits1(&s->gb))
level = -level;
} else if (level != 0) {
if (get_bits1(&s->gb))
level = -level;
}
}
if(s->msmpeg4_version==1){
int32_t *dc_val;
pred = msmpeg4v1_pred_dc(s, n, &dc_val);
level += pred;
/* update predictor */
*dc_val= level;
}else{
int16_t *dc_val;
pred = ff_msmpeg4_pred_dc(s, n, &dc_val, dir_ptr);
level += pred;
/* update predictor */
if (n < 4) {
*dc_val = level * s->y_dc_scale;
} else {
*dc_val = level * s->c_dc_scale;
}
}
return level;
}
| false | FFmpeg | 81230e2612a9c88e5b35ed2f67d828450cc50abf | static int msmpeg4_decode_dc(MpegEncContext * s, int n, int *dir_ptr)
{
int level, pred;
if(s->msmpeg4_version<=2){
if (n < 4) {
level = get_vlc2(&s->gb, v2_dc_lum_vlc.table, DC_VLC_BITS, 3);
} else {
level = get_vlc2(&s->gb, v2_dc_chroma_vlc.table, DC_VLC_BITS, 3);
}
if (level < 0)
return -1;
level-=256;
}else{
if (n < 4) {
level = get_vlc2(&s->gb, ff_msmp4_dc_luma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
} else {
level = get_vlc2(&s->gb, ff_msmp4_dc_chroma_vlc[s->dc_table_index].table, DC_VLC_BITS, 3);
}
if (level < 0){
av_log(s->avctx, AV_LOG_ERROR, "illegal dc vlc\n");
return -1;
}
if (level == DC_MAX) {
level = get_bits(&s->gb, 8);
if (get_bits1(&s->gb))
level = -level;
} else if (level != 0) {
if (get_bits1(&s->gb))
level = -level;
}
}
if(s->msmpeg4_version==1){
int32_t *dc_val;
pred = msmpeg4v1_pred_dc(s, n, &dc_val);
level += pred;
*dc_val= level;
}else{
int16_t *dc_val;
pred = ff_msmpeg4_pred_dc(s, n, &dc_val, dir_ptr);
level += pred;
if (n < 4) {
*dc_val = level * s->y_dc_scale;
} else {
*dc_val = level * s->c_dc_scale;
}
}
return level;
}
| {
"code": [],
"line_no": []
} | static int FUNC_0(MpegEncContext * VAR_0, int VAR_1, int *VAR_2)
{
int VAR_3, VAR_4;
if(VAR_0->msmpeg4_version<=2){
if (VAR_1 < 4) {
VAR_3 = get_vlc2(&VAR_0->gb, v2_dc_lum_vlc.table, DC_VLC_BITS, 3);
} else {
VAR_3 = get_vlc2(&VAR_0->gb, v2_dc_chroma_vlc.table, DC_VLC_BITS, 3);
}
if (VAR_3 < 0)
return -1;
VAR_3-=256;
}else{
if (VAR_1 < 4) {
VAR_3 = get_vlc2(&VAR_0->gb, ff_msmp4_dc_luma_vlc[VAR_0->dc_table_index].table, DC_VLC_BITS, 3);
} else {
VAR_3 = get_vlc2(&VAR_0->gb, ff_msmp4_dc_chroma_vlc[VAR_0->dc_table_index].table, DC_VLC_BITS, 3);
}
if (VAR_3 < 0){
av_log(VAR_0->avctx, AV_LOG_ERROR, "illegal dc vlc\VAR_1");
return -1;
}
if (VAR_3 == DC_MAX) {
VAR_3 = get_bits(&VAR_0->gb, 8);
if (get_bits1(&VAR_0->gb))
VAR_3 = -VAR_3;
} else if (VAR_3 != 0) {
if (get_bits1(&VAR_0->gb))
VAR_3 = -VAR_3;
}
}
if(VAR_0->msmpeg4_version==1){
int32_t *dc_val;
VAR_4 = msmpeg4v1_pred_dc(VAR_0, VAR_1, &dc_val);
VAR_3 += VAR_4;
*dc_val= VAR_3;
}else{
int16_t *dc_val;
VAR_4 = ff_msmpeg4_pred_dc(VAR_0, VAR_1, &dc_val, VAR_2);
VAR_3 += VAR_4;
if (VAR_1 < 4) {
*dc_val = VAR_3 * VAR_0->y_dc_scale;
} else {
*dc_val = VAR_3 * VAR_0->c_dc_scale;
}
}
return VAR_3;
}
| [
"static int FUNC_0(MpegEncContext * VAR_0, int VAR_1, int *VAR_2)\n{",
"int VAR_3, VAR_4;",
"if(VAR_0->msmpeg4_version<=2){",
"if (VAR_1 < 4) {",
"VAR_3 = get_vlc2(&VAR_0->gb, v2_dc_lum_vlc.table, DC_VLC_BITS, 3);",
"} else {",
"VAR_3 = get_vlc2(&VAR_0->gb, v2_dc_chroma_vlc.table, DC_VLC_BITS, 3);",
"... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21,
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
... |
13,197 | void ff_put_h264_qpel16_mc21_msa(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
avc_luma_midv_qrt_16w_msa(src - (2 * stride) - 2,
stride, dst, stride, 16, 0);
}
| false | FFmpeg | 662234a9a22f1cd0f0ac83b8bb1ffadedca90c0a | void ff_put_h264_qpel16_mc21_msa(uint8_t *dst, const uint8_t *src,
ptrdiff_t stride)
{
avc_luma_midv_qrt_16w_msa(src - (2 * stride) - 2,
stride, dst, stride, 16, 0);
}
| {
"code": [],
"line_no": []
} | void FUNC_0(uint8_t *VAR_0, const uint8_t *VAR_1,
ptrdiff_t VAR_2)
{
avc_luma_midv_qrt_16w_msa(VAR_1 - (2 * VAR_2) - 2,
VAR_2, VAR_0, VAR_2, 16, 0);
}
| [
"void FUNC_0(uint8_t *VAR_0, const uint8_t *VAR_1,\nptrdiff_t VAR_2)\n{",
"avc_luma_midv_qrt_16w_msa(VAR_1 - (2 * VAR_2) - 2,\nVAR_2, VAR_0, VAR_2, 16, 0);",
"}"
] | [
0,
0,
0
] | [
[
1,
3,
5
],
[
7,
9
],
[
11
]
] |
13,198 | av_cold int ff_ivi_init_tiles(IVIPlaneDesc *planes, int tile_width, int tile_height)
{
int p, b, x, y, x_tiles, y_tiles, t_width, t_height;
IVIBandDesc *band;
IVITile *tile, *ref_tile;
for (p = 0; p < 3; p++) {
t_width = !p ? tile_width : (tile_width + 3) >> 2;
t_height = !p ? tile_height : (tile_height + 3) >> 2;
if (!p && planes[0].num_bands == 4) {
t_width >>= 1;
t_height >>= 1;
}
if(t_width<=0 || t_height<=0)
return AVERROR(EINVAL);
for (b = 0; b < planes[p].num_bands; b++) {
band = &planes[p].bands[b];
x_tiles = IVI_NUM_TILES(band->width, t_width);
y_tiles = IVI_NUM_TILES(band->height, t_height);
band->num_tiles = x_tiles * y_tiles;
av_freep(&band->tiles);
band->tiles = av_mallocz(band->num_tiles * sizeof(IVITile));
if (!band->tiles)
return AVERROR(ENOMEM);
tile = band->tiles;
/* use the first luma band as reference for motion vectors
* and quant */
ref_tile = planes[0].bands[0].tiles;
for (y = 0; y < band->height; y += t_height) {
for (x = 0; x < band->width; x += t_width) {
tile->xpos = x;
tile->ypos = y;
tile->mb_size = band->mb_size;
tile->width = FFMIN(band->width - x, t_width);
tile->height = FFMIN(band->height - y, t_height);
tile->is_empty = tile->data_size = 0;
/* calculate number of macroblocks */
tile->num_MBs = IVI_MBs_PER_TILE(tile->width, tile->height,
band->mb_size);
av_freep(&tile->mbs);
tile->mbs = av_malloc(tile->num_MBs * sizeof(IVIMbInfo));
if (!tile->mbs)
return AVERROR(ENOMEM);
tile->ref_mbs = 0;
if (p || b) {
tile->ref_mbs = ref_tile->mbs;
ref_tile++;
}
tile++;
}
}
}// for b
}// for p
return 0;
}
| false | FFmpeg | dab70c62d20081bcf879b7b6bc3ffabc2e331542 | av_cold int ff_ivi_init_tiles(IVIPlaneDesc *planes, int tile_width, int tile_height)
{
int p, b, x, y, x_tiles, y_tiles, t_width, t_height;
IVIBandDesc *band;
IVITile *tile, *ref_tile;
for (p = 0; p < 3; p++) {
t_width = !p ? tile_width : (tile_width + 3) >> 2;
t_height = !p ? tile_height : (tile_height + 3) >> 2;
if (!p && planes[0].num_bands == 4) {
t_width >>= 1;
t_height >>= 1;
}
if(t_width<=0 || t_height<=0)
return AVERROR(EINVAL);
for (b = 0; b < planes[p].num_bands; b++) {
band = &planes[p].bands[b];
x_tiles = IVI_NUM_TILES(band->width, t_width);
y_tiles = IVI_NUM_TILES(band->height, t_height);
band->num_tiles = x_tiles * y_tiles;
av_freep(&band->tiles);
band->tiles = av_mallocz(band->num_tiles * sizeof(IVITile));
if (!band->tiles)
return AVERROR(ENOMEM);
tile = band->tiles;
ref_tile = planes[0].bands[0].tiles;
for (y = 0; y < band->height; y += t_height) {
for (x = 0; x < band->width; x += t_width) {
tile->xpos = x;
tile->ypos = y;
tile->mb_size = band->mb_size;
tile->width = FFMIN(band->width - x, t_width);
tile->height = FFMIN(band->height - y, t_height);
tile->is_empty = tile->data_size = 0;
tile->num_MBs = IVI_MBs_PER_TILE(tile->width, tile->height,
band->mb_size);
av_freep(&tile->mbs);
tile->mbs = av_malloc(tile->num_MBs * sizeof(IVIMbInfo));
if (!tile->mbs)
return AVERROR(ENOMEM);
tile->ref_mbs = 0;
if (p || b) {
tile->ref_mbs = ref_tile->mbs;
ref_tile++;
}
tile++;
}
}
}
}
return 0;
}
| {
"code": [],
"line_no": []
} | av_cold int FUNC_0(IVIPlaneDesc *planes, int tile_width, int tile_height)
{
int VAR_0, VAR_1, VAR_2, VAR_3, VAR_4, VAR_5, VAR_6, VAR_7;
IVIBandDesc *band;
IVITile *tile, *ref_tile;
for (VAR_0 = 0; VAR_0 < 3; VAR_0++) {
VAR_6 = !VAR_0 ? tile_width : (tile_width + 3) >> 2;
VAR_7 = !VAR_0 ? tile_height : (tile_height + 3) >> 2;
if (!VAR_0 && planes[0].num_bands == 4) {
VAR_6 >>= 1;
VAR_7 >>= 1;
}
if(VAR_6<=0 || VAR_7<=0)
return AVERROR(EINVAL);
for (VAR_1 = 0; VAR_1 < planes[VAR_0].num_bands; VAR_1++) {
band = &planes[VAR_0].bands[VAR_1];
VAR_4 = IVI_NUM_TILES(band->width, VAR_6);
VAR_5 = IVI_NUM_TILES(band->height, VAR_7);
band->num_tiles = VAR_4 * VAR_5;
av_freep(&band->tiles);
band->tiles = av_mallocz(band->num_tiles * sizeof(IVITile));
if (!band->tiles)
return AVERROR(ENOMEM);
tile = band->tiles;
ref_tile = planes[0].bands[0].tiles;
for (VAR_3 = 0; VAR_3 < band->height; VAR_3 += VAR_7) {
for (VAR_2 = 0; VAR_2 < band->width; VAR_2 += VAR_6) {
tile->xpos = VAR_2;
tile->ypos = VAR_3;
tile->mb_size = band->mb_size;
tile->width = FFMIN(band->width - VAR_2, VAR_6);
tile->height = FFMIN(band->height - VAR_3, VAR_7);
tile->is_empty = tile->data_size = 0;
tile->num_MBs = IVI_MBs_PER_TILE(tile->width, tile->height,
band->mb_size);
av_freep(&tile->mbs);
tile->mbs = av_malloc(tile->num_MBs * sizeof(IVIMbInfo));
if (!tile->mbs)
return AVERROR(ENOMEM);
tile->ref_mbs = 0;
if (VAR_0 || VAR_1) {
tile->ref_mbs = ref_tile->mbs;
ref_tile++;
}
tile++;
}
}
}
}
return 0;
}
| [
"av_cold int FUNC_0(IVIPlaneDesc *planes, int tile_width, int tile_height)\n{",
"int VAR_0, VAR_1, VAR_2, VAR_3, VAR_4, VAR_5, VAR_6, VAR_7;",
"IVIBandDesc *band;",
"IVITile *tile, *ref_tile;",
"for (VAR_0 = 0; VAR_0 < 3; VAR_0++) {",
"VAR_6 = !VAR_0 ? tile_width : (tile_width + 3) >> 2;",
... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29,
31
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
47
],
[
49
],
[... |
13,199 | static int mxf_read_content_storage(void *arg, AVIOContext *pb, int tag, int size, UID uid)
{
MXFContext *mxf = arg;
switch (tag) {
case 0x1901:
mxf->packages_count = avio_rb32(pb);
if (mxf->packages_count >= UINT_MAX / sizeof(UID))
return -1;
mxf->packages_refs = av_malloc(mxf->packages_count * sizeof(UID));
if (!mxf->packages_refs)
return -1;
avio_skip(pb, 4); /* useless size of objects, always 16 according to specs */
avio_read(pb, (uint8_t *)mxf->packages_refs, mxf->packages_count * sizeof(UID));
break;
}
return 0;
}
| true | FFmpeg | fd34dbea58e097609ff09cf7dcc59f74930195d3 | static int mxf_read_content_storage(void *arg, AVIOContext *pb, int tag, int size, UID uid)
{
MXFContext *mxf = arg;
switch (tag) {
case 0x1901:
mxf->packages_count = avio_rb32(pb);
if (mxf->packages_count >= UINT_MAX / sizeof(UID))
return -1;
mxf->packages_refs = av_malloc(mxf->packages_count * sizeof(UID));
if (!mxf->packages_refs)
return -1;
avio_skip(pb, 4);
avio_read(pb, (uint8_t *)mxf->packages_refs, mxf->packages_count * sizeof(UID));
break;
}
return 0;
}
| {
"code": [
"static int mxf_read_content_storage(void *arg, AVIOContext *pb, int tag, int size, UID uid)"
],
"line_no": [
1
]
} | static int FUNC_0(void *VAR_0, AVIOContext *VAR_1, int VAR_2, int VAR_3, UID VAR_4)
{
MXFContext *mxf = VAR_0;
switch (VAR_2) {
case 0x1901:
mxf->packages_count = avio_rb32(VAR_1);
if (mxf->packages_count >= UINT_MAX / sizeof(UID))
return -1;
mxf->packages_refs = av_malloc(mxf->packages_count * sizeof(UID));
if (!mxf->packages_refs)
return -1;
avio_skip(VAR_1, 4);
avio_read(VAR_1, (uint8_t *)mxf->packages_refs, mxf->packages_count * sizeof(UID));
break;
}
return 0;
}
| [
"static int FUNC_0(void *VAR_0, AVIOContext *VAR_1, int VAR_2, int VAR_3, UID VAR_4)\n{",
"MXFContext *mxf = VAR_0;",
"switch (VAR_2) {",
"case 0x1901:\nmxf->packages_count = avio_rb32(VAR_1);",
"if (mxf->packages_count >= UINT_MAX / sizeof(UID))\nreturn -1;",
"mxf->packages_refs = av_malloc(mxf->packages... | [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9,
11
],
[
13,
15
],
[
17
],
[
19,
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
]
] |
13,200 | int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
{
int64_t r = 0;
av_assert2(c > 0);
av_assert2(b >=0);
av_assert2((unsigned)(rnd&~AV_ROUND_PASS_MINMAX)<=5 && (rnd&~AV_ROUND_PASS_MINMAX)!=4);
if (c <= 0 || b < 0 || !((unsigned)(rnd&~AV_ROUND_PASS_MINMAX)<=5 && (rnd&~AV_ROUND_PASS_MINMAX)!=4))
return INT64_MIN;
if (rnd & AV_ROUND_PASS_MINMAX) {
if (a == INT64_MIN || a == INT64_MAX)
return a;
rnd -= AV_ROUND_PASS_MINMAX;
}
if (a < 0)
return -(uint64_t)av_rescale_rnd(-FFMAX(a, -INT64_MAX), b, c, rnd ^ ((rnd >> 1) & 1));
if (rnd == AV_ROUND_NEAR_INF)
r = c / 2;
else if (rnd & 1)
r = c - 1;
if (b <= INT_MAX && c <= INT_MAX) {
if (a <= INT_MAX)
return (a * b + r) / c;
else {
int64_t ad = a / c;
int64_t a2 = (a % c * b + r) / c;
if (ad >= INT32_MAX && ad > (INT64_MAX - a2) / b)
return INT64_MIN;
return ad * b + a2;
}
} else {
#if 1
uint64_t a0 = a & 0xFFFFFFFF;
uint64_t a1 = a >> 32;
uint64_t b0 = b & 0xFFFFFFFF;
uint64_t b1 = b >> 32;
uint64_t t1 = a0 * b1 + a1 * b0;
uint64_t t1a = t1 << 32;
int i;
a0 = a0 * b0 + t1a;
a1 = a1 * b1 + (t1 >> 32) + (a0 < t1a);
a0 += r;
a1 += a0 < r;
for (i = 63; i >= 0; i--) {
a1 += a1 + ((a0 >> i) & 1);
t1 += t1;
if (c <= a1) {
a1 -= c;
t1++;
}
}
if (t1 > INT64_MAX)
return INT64_MIN;
return t1;
}
#else
AVInteger ai;
ai = av_mul_i(av_int2i(a), av_int2i(b));
ai = av_add_i(ai, av_int2i(r));
return av_i2int(av_div_i(ai, av_int2i(c)));
}
#endif
}
| true | FFmpeg | bc8b1e694cc395fdf5e2917377ef11263c937d85 | int64_t av_rescale_rnd(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
{
int64_t r = 0;
av_assert2(c > 0);
av_assert2(b >=0);
av_assert2((unsigned)(rnd&~AV_ROUND_PASS_MINMAX)<=5 && (rnd&~AV_ROUND_PASS_MINMAX)!=4);
if (c <= 0 || b < 0 || !((unsigned)(rnd&~AV_ROUND_PASS_MINMAX)<=5 && (rnd&~AV_ROUND_PASS_MINMAX)!=4))
return INT64_MIN;
if (rnd & AV_ROUND_PASS_MINMAX) {
if (a == INT64_MIN || a == INT64_MAX)
return a;
rnd -= AV_ROUND_PASS_MINMAX;
}
if (a < 0)
return -(uint64_t)av_rescale_rnd(-FFMAX(a, -INT64_MAX), b, c, rnd ^ ((rnd >> 1) & 1));
if (rnd == AV_ROUND_NEAR_INF)
r = c / 2;
else if (rnd & 1)
r = c - 1;
if (b <= INT_MAX && c <= INT_MAX) {
if (a <= INT_MAX)
return (a * b + r) / c;
else {
int64_t ad = a / c;
int64_t a2 = (a % c * b + r) / c;
if (ad >= INT32_MAX && ad > (INT64_MAX - a2) / b)
return INT64_MIN;
return ad * b + a2;
}
} else {
#if 1
uint64_t a0 = a & 0xFFFFFFFF;
uint64_t a1 = a >> 32;
uint64_t b0 = b & 0xFFFFFFFF;
uint64_t b1 = b >> 32;
uint64_t t1 = a0 * b1 + a1 * b0;
uint64_t t1a = t1 << 32;
int i;
a0 = a0 * b0 + t1a;
a1 = a1 * b1 + (t1 >> 32) + (a0 < t1a);
a0 += r;
a1 += a0 < r;
for (i = 63; i >= 0; i--) {
a1 += a1 + ((a0 >> i) & 1);
t1 += t1;
if (c <= a1) {
a1 -= c;
t1++;
}
}
if (t1 > INT64_MAX)
return INT64_MIN;
return t1;
}
#else
AVInteger ai;
ai = av_mul_i(av_int2i(a), av_int2i(b));
ai = av_add_i(ai, av_int2i(r));
return av_i2int(av_div_i(ai, av_int2i(c)));
}
#endif
}
| {
"code": [
" if (ad >= INT32_MAX && ad > (INT64_MAX - a2) / b)"
],
"line_no": [
61
]
} | int64_t FUNC_0(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)
{
int64_t r = 0;
av_assert2(c > 0);
av_assert2(b >=0);
av_assert2((unsigned)(rnd&~AV_ROUND_PASS_MINMAX)<=5 && (rnd&~AV_ROUND_PASS_MINMAX)!=4);
if (c <= 0 || b < 0 || !((unsigned)(rnd&~AV_ROUND_PASS_MINMAX)<=5 && (rnd&~AV_ROUND_PASS_MINMAX)!=4))
return INT64_MIN;
if (rnd & AV_ROUND_PASS_MINMAX) {
if (a == INT64_MIN || a == INT64_MAX)
return a;
rnd -= AV_ROUND_PASS_MINMAX;
}
if (a < 0)
return -(uint64_t)FUNC_0(-FFMAX(a, -INT64_MAX), b, c, rnd ^ ((rnd >> 1) & 1));
if (rnd == AV_ROUND_NEAR_INF)
r = c / 2;
else if (rnd & 1)
r = c - 1;
if (b <= INT_MAX && c <= INT_MAX) {
if (a <= INT_MAX)
return (a * b + r) / c;
else {
int64_t ad = a / c;
int64_t a2 = (a % c * b + r) / c;
if (ad >= INT32_MAX && ad > (INT64_MAX - a2) / b)
return INT64_MIN;
return ad * b + a2;
}
} else {
#if 1
uint64_t a0 = a & 0xFFFFFFFF;
uint64_t a1 = a >> 32;
uint64_t b0 = b & 0xFFFFFFFF;
uint64_t b1 = b >> 32;
uint64_t t1 = a0 * b1 + a1 * b0;
uint64_t t1a = t1 << 32;
int VAR_0;
a0 = a0 * b0 + t1a;
a1 = a1 * b1 + (t1 >> 32) + (a0 < t1a);
a0 += r;
a1 += a0 < r;
for (VAR_0 = 63; VAR_0 >= 0; VAR_0--) {
a1 += a1 + ((a0 >> VAR_0) & 1);
t1 += t1;
if (c <= a1) {
a1 -= c;
t1++;
}
}
if (t1 > INT64_MAX)
return INT64_MIN;
return t1;
}
#else
AVInteger ai;
ai = av_mul_i(av_int2i(a), av_int2i(b));
ai = av_add_i(ai, av_int2i(r));
return av_i2int(av_div_i(ai, av_int2i(c)));
}
#endif
}
| [
"int64_t FUNC_0(int64_t a, int64_t b, int64_t c, enum AVRounding rnd)\n{",
"int64_t r = 0;",
"av_assert2(c > 0);",
"av_assert2(b >=0);",
"av_assert2((unsigned)(rnd&~AV_ROUND_PASS_MINMAX)<=5 && (rnd&~AV_ROUND_PASS_MINMAX)!=4);",
"if (c <= 0 || b < 0 || !((unsigned)(rnd&~AV_ROUND_PASS_MINMAX)<=5 && (rnd&~AV... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
15,
17
],
[
21
],
[
23,
25
],
[
27
],
[
29
],
[
33,
35
],
[
39,
41
],
[
43,
45
],
[
49
],
[
51,
53
],
[
55
],
[
57
... |
13,201 | static void gen_check_cpenable(DisasContext *dc, unsigned cp)
{
if (option_enabled(dc, XTENSA_OPTION_COPROCESSOR) &&
!(dc->cpenable & (1 << cp))) {
gen_exception_cause(dc, COPROCESSOR0_DISABLED + cp);
dc->is_jmp = DISAS_UPDATE;
}
}
| true | qemu | 97e89ee914411384dcda771d38bf89f13726d71e | static void gen_check_cpenable(DisasContext *dc, unsigned cp)
{
if (option_enabled(dc, XTENSA_OPTION_COPROCESSOR) &&
!(dc->cpenable & (1 << cp))) {
gen_exception_cause(dc, COPROCESSOR0_DISABLED + cp);
dc->is_jmp = DISAS_UPDATE;
}
}
| {
"code": [
"static void gen_check_cpenable(DisasContext *dc, unsigned cp)"
],
"line_no": [
1
]
} | static void FUNC_0(DisasContext *VAR_0, unsigned VAR_1)
{
if (option_enabled(VAR_0, XTENSA_OPTION_COPROCESSOR) &&
!(VAR_0->cpenable & (1 << VAR_1))) {
gen_exception_cause(VAR_0, COPROCESSOR0_DISABLED + VAR_1);
VAR_0->is_jmp = DISAS_UPDATE;
}
}
| [
"static void FUNC_0(DisasContext *VAR_0, unsigned VAR_1)\n{",
"if (option_enabled(VAR_0, XTENSA_OPTION_COPROCESSOR) &&\n!(VAR_0->cpenable & (1 << VAR_1))) {",
"gen_exception_cause(VAR_0, COPROCESSOR0_DISABLED + VAR_1);",
"VAR_0->is_jmp = DISAS_UPDATE;",
"}",
"}"
] | [
1,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5,
7
],
[
9
],
[
11
],
[
13
],
[
15
]
] |
13,202 | static void gen_write_xer(TCGv src)
{
tcg_gen_andi_tl(cpu_xer, src,
~((1u << XER_SO) | (1u << XER_OV) | (1u << XER_CA)));
tcg_gen_extract_tl(cpu_so, src, XER_SO, 1);
tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1);
tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1);
}
| true | qemu | dd09c36159858c66ab6e47c688e4177dd3912bf0 | static void gen_write_xer(TCGv src)
{
tcg_gen_andi_tl(cpu_xer, src,
~((1u << XER_SO) | (1u << XER_OV) | (1u << XER_CA)));
tcg_gen_extract_tl(cpu_so, src, XER_SO, 1);
tcg_gen_extract_tl(cpu_ov, src, XER_OV, 1);
tcg_gen_extract_tl(cpu_ca, src, XER_CA, 1);
}
| {
"code": [
" ~((1u << XER_SO) | (1u << XER_OV) | (1u << XER_CA)));"
],
"line_no": [
7
]
} | static void FUNC_0(TCGv VAR_0)
{
tcg_gen_andi_tl(cpu_xer, VAR_0,
~((1u << XER_SO) | (1u << XER_OV) | (1u << XER_CA)));
tcg_gen_extract_tl(cpu_so, VAR_0, XER_SO, 1);
tcg_gen_extract_tl(cpu_ov, VAR_0, XER_OV, 1);
tcg_gen_extract_tl(cpu_ca, VAR_0, XER_CA, 1);
}
| [
"static void FUNC_0(TCGv VAR_0)\n{",
"tcg_gen_andi_tl(cpu_xer, VAR_0,\n~((1u << XER_SO) | (1u << XER_OV) | (1u << XER_CA)));",
"tcg_gen_extract_tl(cpu_so, VAR_0, XER_SO, 1);",
"tcg_gen_extract_tl(cpu_ov, VAR_0, XER_OV, 1);",
"tcg_gen_extract_tl(cpu_ca, VAR_0, XER_CA, 1);",
"}"
] | [
0,
1,
0,
0,
0,
0
] | [
[
1,
3
],
[
5,
7
],
[
9
],
[
11
],
[
13
],
[
15
]
] |
13,203 | void decode_mvs(VP8Context *s, VP8Macroblock *mb,
int mb_x, int mb_y, int layout)
{
VP8Macroblock *mb_edge[3] = { 0 /* top */,
mb - 1 /* left */,
0 /* top-left */ };
enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
int idx = CNT_ZERO;
int cur_sign_bias = s->sign_bias[mb->ref_frame];
int8_t *sign_bias = s->sign_bias;
VP56mv near_mv[4];
uint8_t cnt[4] = { 0 };
VP56RangeCoder *c = &s->c;
if (!layout) { // layout is inlined (s->mb_layout is not)
mb_edge[0] = mb + 2;
mb_edge[2] = mb + 1;
} else {
mb_edge[0] = mb - s->mb_width - 1;
mb_edge[2] = mb - s->mb_width - 2;
}
AV_ZERO32(&near_mv[0]);
AV_ZERO32(&near_mv[1]);
AV_ZERO32(&near_mv[2]);
/* Process MB on top, left and top-left */
#define MV_EDGE_CHECK(n) \
{ \
VP8Macroblock *edge = mb_edge[n]; \
int edge_ref = edge->ref_frame; \
if (edge_ref != VP56_FRAME_CURRENT) { \
uint32_t mv = AV_RN32A(&edge->mv); \
if (mv) { \
if (cur_sign_bias != sign_bias[edge_ref]) { \
/* SWAR negate of the values in mv. */ \
mv = ~mv; \
mv = ((mv & 0x7fff7fff) + \
0x00010001) ^ (mv & 0x80008000); \
} \
if (!n || mv != AV_RN32A(&near_mv[idx])) \
AV_WN32A(&near_mv[++idx], mv); \
cnt[idx] += 1 + (n != 2); \
} else \
cnt[CNT_ZERO] += 1 + (n != 2); \
} \
}
MV_EDGE_CHECK(0)
MV_EDGE_CHECK(1)
MV_EDGE_CHECK(2)
mb->partitioning = VP8_SPLITMVMODE_NONE;
if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
mb->mode = VP8_MVMODE_MV;
/* If we have three distinct MVs, merge first and last if they're the same */
if (cnt[CNT_SPLITMV] &&
AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
cnt[CNT_NEAREST] += 1;
/* Swap near and nearest if necessary */
if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
}
if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
/* Choose the best mv out of 0,0 and the nearest mv */
clamp_mv(s, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
(mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
(mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
mb->mode = VP8_MVMODE_SPLIT;
mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout) - 1];
} else {
mb->mv.y += read_mv_component(c, s->prob->mvc[0]);
mb->mv.x += read_mv_component(c, s->prob->mvc[1]);
mb->bmv[0] = mb->mv;
}
} else {
clamp_mv(s, &mb->mv, &near_mv[CNT_NEAR]);
mb->bmv[0] = mb->mv;
}
} else {
clamp_mv(s, &mb->mv, &near_mv[CNT_NEAREST]);
mb->bmv[0] = mb->mv;
}
} else {
mb->mode = VP8_MVMODE_ZERO;
AV_ZERO32(&mb->mv);
mb->bmv[0] = mb->mv;
}
}
| true | FFmpeg | ac4b32df71bd932838043a4838b86d11e169707f | void decode_mvs(VP8Context *s, VP8Macroblock *mb,
int mb_x, int mb_y, int layout)
{
VP8Macroblock *mb_edge[3] = { 0 ,
mb - 1 ,
0 };
enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
int idx = CNT_ZERO;
int cur_sign_bias = s->sign_bias[mb->ref_frame];
int8_t *sign_bias = s->sign_bias;
VP56mv near_mv[4];
uint8_t cnt[4] = { 0 };
VP56RangeCoder *c = &s->c;
if (!layout) {
mb_edge[0] = mb + 2;
mb_edge[2] = mb + 1;
} else {
mb_edge[0] = mb - s->mb_width - 1;
mb_edge[2] = mb - s->mb_width - 2;
}
AV_ZERO32(&near_mv[0]);
AV_ZERO32(&near_mv[1]);
AV_ZERO32(&near_mv[2]);
#define MV_EDGE_CHECK(n) \
{ \
VP8Macroblock *edge = mb_edge[n]; \
int edge_ref = edge->ref_frame; \
if (edge_ref != VP56_FRAME_CURRENT) { \
uint32_t mv = AV_RN32A(&edge->mv); \
if (mv) { \
if (cur_sign_bias != sign_bias[edge_ref]) { \
\
mv = ~mv; \
mv = ((mv & 0x7fff7fff) + \
0x00010001) ^ (mv & 0x80008000); \
} \
if (!n || mv != AV_RN32A(&near_mv[idx])) \
AV_WN32A(&near_mv[++idx], mv); \
cnt[idx] += 1 + (n != 2); \
} else \
cnt[CNT_ZERO] += 1 + (n != 2); \
} \
}
MV_EDGE_CHECK(0)
MV_EDGE_CHECK(1)
MV_EDGE_CHECK(2)
mb->partitioning = VP8_SPLITMVMODE_NONE;
if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
mb->mode = VP8_MVMODE_MV;
if (cnt[CNT_SPLITMV] &&
AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
cnt[CNT_NEAREST] += 1;
if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
}
if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
clamp_mv(s, &mb->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
(mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
(mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
mb->mode = VP8_MVMODE_SPLIT;
mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout) - 1];
} else {
mb->mv.y += read_mv_component(c, s->prob->mvc[0]);
mb->mv.x += read_mv_component(c, s->prob->mvc[1]);
mb->bmv[0] = mb->mv;
}
} else {
clamp_mv(s, &mb->mv, &near_mv[CNT_NEAR]);
mb->bmv[0] = mb->mv;
}
} else {
clamp_mv(s, &mb->mv, &near_mv[CNT_NEAREST]);
mb->bmv[0] = mb->mv;
}
} else {
mb->mode = VP8_MVMODE_ZERO;
AV_ZERO32(&mb->mv);
mb->bmv[0] = mb->mv;
}
}
| {
"code": [
"void decode_mvs(VP8Context *s, VP8Macroblock *mb,",
" int mb_x, int mb_y, int layout)",
" mb->mv = mb->bmv[decode_splitmvs(s, c, mb, layout) - 1];",
" mb->mv.y += read_mv_component(c, s->prob->mvc[0]);",
" mb->mv.x += read_mv_component(c, s->prob->mvc[1]);"
],
"line_no": [
1,
3,
157,
161,
163
]
} | void FUNC_0(VP8Context *VAR_0, VP8Macroblock *VAR_1,
int VAR_2, int VAR_3, int VAR_4)
{
VP8Macroblock *mb_edge[3] = { 0 ,
VAR_1 - 1 ,
0 };
enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };
enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };
int VAR_5 = CNT_ZERO;
int VAR_6 = VAR_0->sign_bias[VAR_1->ref_frame];
int8_t *sign_bias = VAR_0->sign_bias;
VP56mv near_mv[4];
uint8_t cnt[4] = { 0 };
VP56RangeCoder *c = &VAR_0->c;
if (!VAR_4) {
mb_edge[0] = VAR_1 + 2;
mb_edge[2] = VAR_1 + 1;
} else {
mb_edge[0] = VAR_1 - VAR_0->mb_width - 1;
mb_edge[2] = VAR_1 - VAR_0->mb_width - 2;
}
AV_ZERO32(&near_mv[0]);
AV_ZERO32(&near_mv[1]);
AV_ZERO32(&near_mv[2]);
#define MV_EDGE_CHECK(n) \
{ \
VP8Macroblock *edge = mb_edge[n]; \
int VAR_7 = edge->ref_frame; \
if (VAR_7 != VP56_FRAME_CURRENT) { \
uint32_t mv = AV_RN32A(&edge->mv); \
if (mv) { \
if (VAR_6 != sign_bias[VAR_7]) { \
\
mv = ~mv; \
mv = ((mv & 0x7fff7fff) + \
0x00010001) ^ (mv & 0x80008000); \
} \
if (!n || mv != AV_RN32A(&near_mv[VAR_5])) \
AV_WN32A(&near_mv[++VAR_5], mv); \
cnt[VAR_5] += 1 + (n != 2); \
} else \
cnt[CNT_ZERO] += 1 + (n != 2); \
} \
}
MV_EDGE_CHECK(0)
MV_EDGE_CHECK(1)
MV_EDGE_CHECK(2)
VAR_1->partitioning = VP8_SPLITMVMODE_NONE;
if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_ZERO]][0])) {
VAR_1->mode = VP8_MVMODE_MV;
if (cnt[CNT_SPLITMV] &&
AV_RN32A(&near_mv[1 + VP8_EDGE_TOP]) == AV_RN32A(&near_mv[1 + VP8_EDGE_TOPLEFT]))
cnt[CNT_NEAREST] += 1;
if (cnt[CNT_NEAR] > cnt[CNT_NEAREST]) {
FFSWAP(uint8_t, cnt[CNT_NEAREST], cnt[CNT_NEAR]);
FFSWAP( VP56mv, near_mv[CNT_NEAREST], near_mv[CNT_NEAR]);
}
if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAREST]][1])) {
if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_NEAR]][2])) {
clamp_mv(VAR_0, &VAR_1->mv, &near_mv[CNT_ZERO + (cnt[CNT_NEAREST] >= cnt[CNT_ZERO])]);
cnt[CNT_SPLITMV] = ((mb_edge[VP8_EDGE_LEFT]->mode == VP8_MVMODE_SPLIT) +
(mb_edge[VP8_EDGE_TOP]->mode == VP8_MVMODE_SPLIT)) * 2 +
(mb_edge[VP8_EDGE_TOPLEFT]->mode == VP8_MVMODE_SPLIT);
if (vp56_rac_get_prob_branchy(c, vp8_mode_contexts[cnt[CNT_SPLITMV]][3])) {
VAR_1->mode = VP8_MVMODE_SPLIT;
VAR_1->mv = VAR_1->bmv[decode_splitmvs(VAR_0, c, VAR_1, VAR_4) - 1];
} else {
VAR_1->mv.y += read_mv_component(c, VAR_0->prob->mvc[0]);
VAR_1->mv.x += read_mv_component(c, VAR_0->prob->mvc[1]);
VAR_1->bmv[0] = VAR_1->mv;
}
} else {
clamp_mv(VAR_0, &VAR_1->mv, &near_mv[CNT_NEAR]);
VAR_1->bmv[0] = VAR_1->mv;
}
} else {
clamp_mv(VAR_0, &VAR_1->mv, &near_mv[CNT_NEAREST]);
VAR_1->bmv[0] = VAR_1->mv;
}
} else {
VAR_1->mode = VP8_MVMODE_ZERO;
AV_ZERO32(&VAR_1->mv);
VAR_1->bmv[0] = VAR_1->mv;
}
}
| [
"void FUNC_0(VP8Context *VAR_0, VP8Macroblock *VAR_1,\nint VAR_2, int VAR_3, int VAR_4)\n{",
"VP8Macroblock *mb_edge[3] = { 0 ,",
"VAR_1 - 1 ,\n0 };",
"enum { CNT_ZERO, CNT_NEAREST, CNT_NEAR, CNT_SPLITMV };",
"enum { VP8_EDGE_TOP, VP8_EDGE_LEFT, VP8_EDGE_TOPLEFT };",
"int VAR_5 = CNT_ZERO;",
... | [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
1,
1,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
[
1,
3,
5
],
[
7
],
[
9,
11
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
],
[
47
... |
13,206 | static void fw_cfg_initfn(Object *obj)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
FWCfgState *s = FW_CFG(obj);
memory_region_init_io(&s->ctl_iomem, OBJECT(s), &fw_cfg_ctl_mem_ops, s,
"fwcfg.ctl", FW_CFG_SIZE);
sysbus_init_mmio(sbd, &s->ctl_iomem);
memory_region_init_io(&s->data_iomem, OBJECT(s), &fw_cfg_data_mem_ops, s,
"fwcfg.data", FW_CFG_DATA_SIZE);
sysbus_init_mmio(sbd, &s->data_iomem);
/* In case ctl and data overlap: */
memory_region_init_io(&s->comb_iomem, OBJECT(s), &fw_cfg_comb_mem_ops, s,
"fwcfg", FW_CFG_SIZE);
}
| true | qemu | 5712db6ae5101db645f71edc393368cd59bfd314 | static void fw_cfg_initfn(Object *obj)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(obj);
FWCfgState *s = FW_CFG(obj);
memory_region_init_io(&s->ctl_iomem, OBJECT(s), &fw_cfg_ctl_mem_ops, s,
"fwcfg.ctl", FW_CFG_SIZE);
sysbus_init_mmio(sbd, &s->ctl_iomem);
memory_region_init_io(&s->data_iomem, OBJECT(s), &fw_cfg_data_mem_ops, s,
"fwcfg.data", FW_CFG_DATA_SIZE);
sysbus_init_mmio(sbd, &s->data_iomem);
memory_region_init_io(&s->comb_iomem, OBJECT(s), &fw_cfg_comb_mem_ops, s,
"fwcfg", FW_CFG_SIZE);
}
| {
"code": [
"static void fw_cfg_initfn(Object *obj)",
" SysBusDevice *sbd = SYS_BUS_DEVICE(obj);",
" FWCfgState *s = FW_CFG(obj);",
" memory_region_init_io(&s->ctl_iomem, OBJECT(s), &fw_cfg_ctl_mem_ops, s,",
" \"fwcfg.ctl\", FW_CFG_SIZE);",
" sysbus_init_mmio(sbd, &s->ctl_iomem);",
" memory_region_init_io(&s->data_iomem, OBJECT(s), &fw_cfg_data_mem_ops, s,",
" \"fwcfg.data\", FW_CFG_DATA_SIZE);",
" sysbus_init_mmio(sbd, &s->data_iomem);",
" memory_region_init_io(&s->comb_iomem, OBJECT(s), &fw_cfg_comb_mem_ops, s,",
" \"fwcfg\", FW_CFG_SIZE);"
],
"line_no": [
1,
5,
7,
11,
13,
15,
17,
19,
21,
25,
27
]
} | static void FUNC_0(Object *VAR_0)
{
SysBusDevice *sbd = SYS_BUS_DEVICE(VAR_0);
FWCfgState *s = FW_CFG(VAR_0);
memory_region_init_io(&s->ctl_iomem, OBJECT(s), &fw_cfg_ctl_mem_ops, s,
"fwcfg.ctl", FW_CFG_SIZE);
sysbus_init_mmio(sbd, &s->ctl_iomem);
memory_region_init_io(&s->data_iomem, OBJECT(s), &fw_cfg_data_mem_ops, s,
"fwcfg.data", FW_CFG_DATA_SIZE);
sysbus_init_mmio(sbd, &s->data_iomem);
memory_region_init_io(&s->comb_iomem, OBJECT(s), &fw_cfg_comb_mem_ops, s,
"fwcfg", FW_CFG_SIZE);
}
| [
"static void FUNC_0(Object *VAR_0)\n{",
"SysBusDevice *sbd = SYS_BUS_DEVICE(VAR_0);",
"FWCfgState *s = FW_CFG(VAR_0);",
"memory_region_init_io(&s->ctl_iomem, OBJECT(s), &fw_cfg_ctl_mem_ops, s,\n\"fwcfg.ctl\", FW_CFG_SIZE);",
"sysbus_init_mmio(sbd, &s->ctl_iomem);",
"memory_region_init_io(&s->data_iomem, O... | [
1,
1,
1,
1,
1,
1,
1,
1,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
11,
13
],
[
15
],
[
17,
19
],
[
21
],
[
25,
27
],
[
29
]
] |
13,207 | int av_get_cpu_flags(void)
{
int flags = cpu_flags;
if (flags == -1) {
flags = get_cpu_flags();
cpu_flags = flags;
}
return flags;
}
| true | FFmpeg | fed50c4304eecb352e29ce789cdb96ea84d6162f | int av_get_cpu_flags(void)
{
int flags = cpu_flags;
if (flags == -1) {
flags = get_cpu_flags();
cpu_flags = flags;
}
return flags;
}
| {
"code": [
" int flags = cpu_flags;",
" cpu_flags = flags;"
],
"line_no": [
5,
11
]
} | int FUNC_0(void)
{
int VAR_0 = cpu_flags;
if (VAR_0 == -1) {
VAR_0 = get_cpu_flags();
cpu_flags = VAR_0;
}
return VAR_0;
}
| [
"int FUNC_0(void)\n{",
"int VAR_0 = cpu_flags;",
"if (VAR_0 == -1) {",
"VAR_0 = get_cpu_flags();",
"cpu_flags = VAR_0;",
"}",
"return VAR_0;",
"}"
] | [
0,
1,
0,
0,
1,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
17
]
] |
13,208 | static ExitStatus trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
unsigned rt = extract32(insn, 0, 5);
unsigned rb = extract32(insn, 16, 5);
unsigned ra = extract32(insn, 21, 5);
return do_fop_weww(ctx, rt, ra, rb, di->f_weww);
}
| true | qemu | eff235eb2bcd7092901f4698a7907e742f3b7f2f | static ExitStatus trans_fop_weww_0c(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
unsigned rt = extract32(insn, 0, 5);
unsigned rb = extract32(insn, 16, 5);
unsigned ra = extract32(insn, 21, 5);
return do_fop_weww(ctx, rt, ra, rb, di->f_weww);
}
| {
"code": [
" return do_fop_weww(ctx, rt, ra, rb, di->f_weww);",
" return do_fop_weww(ctx, rt, ra, rb, di->f_weww);"
],
"line_no": [
13,
13
]
} | static ExitStatus FUNC_0(DisasContext *ctx, uint32_t insn,
const DisasInsn *di)
{
unsigned VAR_0 = extract32(insn, 0, 5);
unsigned VAR_1 = extract32(insn, 16, 5);
unsigned VAR_2 = extract32(insn, 21, 5);
return do_fop_weww(ctx, VAR_0, VAR_2, VAR_1, di->f_weww);
}
| [
"static ExitStatus FUNC_0(DisasContext *ctx, uint32_t insn,\nconst DisasInsn *di)\n{",
"unsigned VAR_0 = extract32(insn, 0, 5);",
"unsigned VAR_1 = extract32(insn, 16, 5);",
"unsigned VAR_2 = extract32(insn, 21, 5);",
"return do_fop_weww(ctx, VAR_0, VAR_2, VAR_1, di->f_weww);",
"}"
] | [
0,
0,
0,
0,
1,
0
] | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
]
] |
13,209 | static int inc_refcounts(BlockDriverState *bs,
BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size,
int64_t offset, int64_t size)
{
BDRVQcow2State *s = bs->opaque;
uint64_t start, last, cluster_offset, k, refcount;
int ret;
if (size <= 0) {
return 0;
}
start = start_of_cluster(s, offset);
last = start_of_cluster(s, offset + size - 1);
for(cluster_offset = start; cluster_offset <= last;
cluster_offset += s->cluster_size) {
k = cluster_offset >> s->cluster_bits;
if (k >= *refcount_table_size) {
ret = realloc_refcount_array(s, refcount_table,
refcount_table_size, k + 1);
if (ret < 0) {
res->check_errors++;
return ret;
}
}
refcount = s->get_refcount(*refcount_table, k);
if (refcount == s->refcount_max) {
fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64
"\n", cluster_offset);
res->corruptions++;
continue;
}
s->set_refcount(*refcount_table, k, refcount + 1);
}
return 0;
} | true | qemu | 03bb78ed250874f76e0854aa4d5d95af1e12613e | static int inc_refcounts(BlockDriverState *bs,
BdrvCheckResult *res,
void **refcount_table,
int64_t *refcount_table_size,
int64_t offset, int64_t size)
{
BDRVQcow2State *s = bs->opaque;
uint64_t start, last, cluster_offset, k, refcount;
int ret;
if (size <= 0) {
return 0;
}
start = start_of_cluster(s, offset);
last = start_of_cluster(s, offset + size - 1);
for(cluster_offset = start; cluster_offset <= last;
cluster_offset += s->cluster_size) {
k = cluster_offset >> s->cluster_bits;
if (k >= *refcount_table_size) {
ret = realloc_refcount_array(s, refcount_table,
refcount_table_size, k + 1);
if (ret < 0) {
res->check_errors++;
return ret;
}
}
refcount = s->get_refcount(*refcount_table, k);
if (refcount == s->refcount_max) {
fprintf(stderr, "ERROR: overflow cluster offset=0x%" PRIx64
"\n", cluster_offset);
res->corruptions++;
continue;
}
s->set_refcount(*refcount_table, k, refcount + 1);
}
return 0;
} | {
"code": [],
"line_no": []
} | static int FUNC_0(BlockDriverState *VAR_0,
BdrvCheckResult *VAR_1,
void **VAR_2,
int64_t *VAR_3,
int64_t VAR_4, int64_t VAR_5)
{
BDRVQcow2State *s = VAR_0->opaque;
uint64_t start, last, cluster_offset, k, refcount;
int VAR_6;
if (VAR_5 <= 0) {
return 0;
}
start = start_of_cluster(s, VAR_4);
last = start_of_cluster(s, VAR_4 + VAR_5 - 1);
for(cluster_offset = start; cluster_offset <= last;
cluster_offset += s->cluster_size) {
k = cluster_offset >> s->cluster_bits;
if (k >= *VAR_3) {
VAR_6 = realloc_refcount_array(s, VAR_2,
VAR_3, k + 1);
if (VAR_6 < 0) {
VAR_1->check_errors++;
return VAR_6;
}
}
refcount = s->get_refcount(*VAR_2, k);
if (refcount == s->refcount_max) {
fprintf(stderr, "ERROR: overflow cluster VAR_4=0x%" PRIx64
"\n", cluster_offset);
VAR_1->corruptions++;
continue;
}
s->set_refcount(*VAR_2, k, refcount + 1);
}
return 0;
} | [
"static int FUNC_0(BlockDriverState *VAR_0,\nBdrvCheckResult *VAR_1,\nvoid **VAR_2,\nint64_t *VAR_3,\nint64_t VAR_4, int64_t VAR_5)\n{",
"BDRVQcow2State *s = VAR_0->opaque;",
"uint64_t start, last, cluster_offset, k, refcount;",
"int VAR_6;",
"if (VAR_5 <= 0) {",
"return 0;",
"}",
"start = start_of_cl... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5,
7,
9,
11
],
[
13
],
[
15
],
[
17
],
[
21
],
[
23
],
[
25
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41,
43
],
[
45
],
[
47
],
[
49
],
[... |
13,210 | static int gdb_get_avr_reg(CPUState *env, uint8_t *mem_buf, int n)
{
if (n < 32) {
#ifdef WORDS_BIGENDIAN
stq_p(mem_buf, env->avr[n].u64[0]);
stq_p(mem_buf+8, env->avr[n].u64[1]);
#else
stq_p(mem_buf, env->avr[n].u64[1]);
stq_p(mem_buf+8, env->avr[n].u64[0]);
#endif
return 16;
}
if (n == 33) {
stl_p(mem_buf, env->vscr);
return 4;
}
if (n == 34) {
stl_p(mem_buf, (uint32_t)env->spr[SPR_VRSAVE]);
return 4;
}
return 0;
}
| true | qemu | 70976a7926b42d87e0c575412b85a8f5c1e48fad | static int gdb_get_avr_reg(CPUState *env, uint8_t *mem_buf, int n)
{
if (n < 32) {
#ifdef WORDS_BIGENDIAN
stq_p(mem_buf, env->avr[n].u64[0]);
stq_p(mem_buf+8, env->avr[n].u64[1]);
#else
stq_p(mem_buf, env->avr[n].u64[1]);
stq_p(mem_buf+8, env->avr[n].u64[0]);
#endif
return 16;
}
if (n == 33) {
stl_p(mem_buf, env->vscr);
return 4;
}
if (n == 34) {
stl_p(mem_buf, (uint32_t)env->spr[SPR_VRSAVE]);
return 4;
}
return 0;
}
| {
"code": [
" if (n == 33) {",
" if (n == 34) {",
" if (n == 33) {",
" if (n == 34) {",
" if (n == 33) {",
" if (n == 34) {",
" if (n == 33) {",
" if (n == 34) {"
],
"line_no": [
25,
33,
25,
33,
25,
33,
25,
33
]
} | static int FUNC_0(CPUState *VAR_0, uint8_t *VAR_1, int VAR_2)
{
if (VAR_2 < 32) {
#ifdef WORDS_BIGENDIAN
stq_p(VAR_1, VAR_0->avr[VAR_2].u64[0]);
stq_p(VAR_1+8, VAR_0->avr[VAR_2].u64[1]);
#else
stq_p(VAR_1, VAR_0->avr[VAR_2].u64[1]);
stq_p(VAR_1+8, VAR_0->avr[VAR_2].u64[0]);
#endif
return 16;
}
if (VAR_2 == 33) {
stl_p(VAR_1, VAR_0->vscr);
return 4;
}
if (VAR_2 == 34) {
stl_p(VAR_1, (uint32_t)VAR_0->spr[SPR_VRSAVE]);
return 4;
}
return 0;
}
| [
"static int FUNC_0(CPUState *VAR_0, uint8_t *VAR_1, int VAR_2)\n{",
"if (VAR_2 < 32) {",
"#ifdef WORDS_BIGENDIAN\nstq_p(VAR_1, VAR_0->avr[VAR_2].u64[0]);",
"stq_p(VAR_1+8, VAR_0->avr[VAR_2].u64[1]);",
"#else\nstq_p(VAR_1, VAR_0->avr[VAR_2].u64[1]);",
"stq_p(VAR_1+8, VAR_0->avr[VAR_2].u64[0]);",
"#endif\... | [
0,
0,
0,
0,
0,
0,
0,
0,
1,
0,
0,
0,
1,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7,
9
],
[
11
],
[
13,
15
],
[
17
],
[
19,
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
37
],
[
39
],
[
41
],
[
43
]
] |
13,211 | static void tpm_backend_worker_thread(gpointer data, gpointer user_data)
{
TPMBackend *s = TPM_BACKEND(user_data);
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
assert(k->handle_request != NULL);
k->handle_request(s, (TPMBackendCmd *)data);
qemu_bh_schedule(s->bh);
}
| true | qemu | ebca2df783a5a742bb93784524336d8cbb9e662b | static void tpm_backend_worker_thread(gpointer data, gpointer user_data)
{
TPMBackend *s = TPM_BACKEND(user_data);
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
assert(k->handle_request != NULL);
k->handle_request(s, (TPMBackendCmd *)data);
qemu_bh_schedule(s->bh);
}
| {
"code": [
" assert(k->handle_request != NULL);"
],
"line_no": [
11
]
} | static void FUNC_0(gpointer VAR_0, gpointer VAR_1)
{
TPMBackend *s = TPM_BACKEND(VAR_1);
TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);
assert(k->handle_request != NULL);
k->handle_request(s, (TPMBackendCmd *)VAR_0);
qemu_bh_schedule(s->bh);
}
| [
"static void FUNC_0(gpointer VAR_0, gpointer VAR_1)\n{",
"TPMBackend *s = TPM_BACKEND(VAR_1);",
"TPMBackendClass *k = TPM_BACKEND_GET_CLASS(s);",
"assert(k->handle_request != NULL);",
"k->handle_request(s, (TPMBackendCmd *)VAR_0);",
"qemu_bh_schedule(s->bh);",
"}"
] | [
0,
0,
0,
1,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
11
],
[
13
],
[
17
],
[
19
]
] |
13,212 | static uint_fast8_t vorbis_floor0_decode(vorbis_context *vc,
vorbis_floor_data *vfu, float *vec)
{
vorbis_floor0 *vf = &vfu->t0;
float *lsp = vf->lsp;
uint_fast32_t amplitude;
uint_fast32_t book_idx;
uint_fast8_t blockflag = vc->modes[vc->mode_number].blockflag;
amplitude = get_bits(&vc->gb, vf->amplitude_bits);
if (amplitude > 0) {
float last = 0;
uint_fast16_t lsp_len = 0;
uint_fast16_t idx;
vorbis_codebook codebook;
book_idx = get_bits(&vc->gb, ilog(vf->num_books));
if (book_idx >= vf->num_books) {
av_log(vc->avccontext, AV_LOG_ERROR,
"floor0 dec: booknumber too high!\n");
book_idx = 0;
//FIXME: look above
}
AV_DEBUG("floor0 dec: booknumber: %u\n", book_idx);
codebook = vc->codebooks[vf->book_list[book_idx]];
while (lsp_len<vf->order) {
int vec_off;
AV_DEBUG("floor0 dec: book dimension: %d\n", codebook.dimensions);
AV_DEBUG("floor0 dec: maximum depth: %d\n", codebook.maxdepth);
/* read temp vector */
vec_off = get_vlc2(&vc->gb, codebook.vlc.table,
codebook.nb_bits, codebook.maxdepth)
* codebook.dimensions;
AV_DEBUG("floor0 dec: vector offset: %d\n", vec_off);
/* copy each vector component and add last to it */
for (idx = 0; idx < codebook.dimensions; ++idx)
lsp[lsp_len+idx] = codebook.codevectors[vec_off+idx] + last;
last = lsp[lsp_len+idx-1]; /* set last to last vector component */
lsp_len += codebook.dimensions;
}
#ifdef V_DEBUG
/* DEBUG: output lsp coeffs */
{
int idx;
for (idx = 0; idx < lsp_len; ++idx)
AV_DEBUG("floor0 dec: coeff at %d is %f\n", idx, lsp[idx]);
}
#endif
/* synthesize floor output vector */
{
int i;
int order = vf->order;
float wstep = M_PI / vf->bark_map_size;
for (i = 0; i < order; i++)
lsp[i] = 2.0f * cos(lsp[i]);
AV_DEBUG("floor0 synth: map_size = %d; m = %d; wstep = %f\n",
vf->map_size, order, wstep);
i = 0;
while (i < vf->map_size[blockflag]) {
int j, iter_cond = vf->map[blockflag][i];
float p = 0.5f;
float q = 0.5f;
float two_cos_w = 2.0f * cos(wstep * iter_cond); // needed all times
/* similar part for the q and p products */
for (j = 0; j + 1 < order; j += 2) {
q *= lsp[j] - two_cos_w;
p *= lsp[j + 1] - two_cos_w;
}
if (j == order) { // even order
p *= p * (2.0f - two_cos_w);
q *= q * (2.0f + two_cos_w);
} else { // odd order
q *= two_cos_w-lsp[j]; // one more time for q
/* final step and square */
p *= p * (4.f - two_cos_w * two_cos_w);
q *= q;
}
/* calculate linear floor value */
{
q = exp((((amplitude*vf->amplitude_offset) /
(((1 << vf->amplitude_bits) - 1) * sqrt(p + q)))
- vf->amplitude_offset) * .11512925f);
}
/* fill vector */
do {
vec[i] = q; ++i;
} while (vf->map[blockflag][i] == iter_cond);
}
}
} else {
/* this channel is unused */
return 1;
}
AV_DEBUG(" Floor0 decoded\n");
return 0;
}
| true | FFmpeg | 3dde66752d59dfdd0f3727efd66e7202b3c75078 | static uint_fast8_t vorbis_floor0_decode(vorbis_context *vc,
vorbis_floor_data *vfu, float *vec)
{
vorbis_floor0 *vf = &vfu->t0;
float *lsp = vf->lsp;
uint_fast32_t amplitude;
uint_fast32_t book_idx;
uint_fast8_t blockflag = vc->modes[vc->mode_number].blockflag;
amplitude = get_bits(&vc->gb, vf->amplitude_bits);
if (amplitude > 0) {
float last = 0;
uint_fast16_t lsp_len = 0;
uint_fast16_t idx;
vorbis_codebook codebook;
book_idx = get_bits(&vc->gb, ilog(vf->num_books));
if (book_idx >= vf->num_books) {
av_log(vc->avccontext, AV_LOG_ERROR,
"floor0 dec: booknumber too high!\n");
book_idx = 0;
}
AV_DEBUG("floor0 dec: booknumber: %u\n", book_idx);
codebook = vc->codebooks[vf->book_list[book_idx]];
while (lsp_len<vf->order) {
int vec_off;
AV_DEBUG("floor0 dec: book dimension: %d\n", codebook.dimensions);
AV_DEBUG("floor0 dec: maximum depth: %d\n", codebook.maxdepth);
vec_off = get_vlc2(&vc->gb, codebook.vlc.table,
codebook.nb_bits, codebook.maxdepth)
* codebook.dimensions;
AV_DEBUG("floor0 dec: vector offset: %d\n", vec_off);
for (idx = 0; idx < codebook.dimensions; ++idx)
lsp[lsp_len+idx] = codebook.codevectors[vec_off+idx] + last;
last = lsp[lsp_len+idx-1];
lsp_len += codebook.dimensions;
}
#ifdef V_DEBUG
{
int idx;
for (idx = 0; idx < lsp_len; ++idx)
AV_DEBUG("floor0 dec: coeff at %d is %f\n", idx, lsp[idx]);
}
#endif
{
int i;
int order = vf->order;
float wstep = M_PI / vf->bark_map_size;
for (i = 0; i < order; i++)
lsp[i] = 2.0f * cos(lsp[i]);
AV_DEBUG("floor0 synth: map_size = %d; m = %d; wstep = %f\n",
vf->map_size, order, wstep);
i = 0;
while (i < vf->map_size[blockflag]) {
int j, iter_cond = vf->map[blockflag][i];
float p = 0.5f;
float q = 0.5f;
float two_cos_w = 2.0f * cos(wstep * iter_cond);
for (j = 0; j + 1 < order; j += 2) {
q *= lsp[j] - two_cos_w;
p *= lsp[j + 1] - two_cos_w;
}
if (j == order) {
p *= p * (2.0f - two_cos_w);
q *= q * (2.0f + two_cos_w);
} else {
q *= two_cos_w-lsp[j];
p *= p * (4.f - two_cos_w * two_cos_w);
q *= q;
}
{
q = exp((((amplitude*vf->amplitude_offset) /
(((1 << vf->amplitude_bits) - 1) * sqrt(p + q)))
- vf->amplitude_offset) * .11512925f);
}
do {
vec[i] = q; ++i;
} while (vf->map[blockflag][i] == iter_cond);
}
}
} else {
return 1;
}
AV_DEBUG(" Floor0 decoded\n");
return 0;
}
| {
"code": [
"static uint_fast8_t vorbis_floor0_decode(vorbis_context *vc,",
"static uint_fast8_t vorbis_floor0_decode(vorbis_context *vc,",
" vorbis_floor_data *vfu, float *vec)",
" vorbis_floor_data *vfu, float *vec)"
],
"line_no": [
1,
1,
3,
3
]
} | static uint_fast8_t FUNC_0(vorbis_context *vc,
vorbis_floor_data *vfu, float *vec)
{
vorbis_floor0 *vf = &vfu->t0;
float *VAR_0 = vf->VAR_0;
uint_fast32_t amplitude;
uint_fast32_t book_idx;
uint_fast8_t blockflag = vc->modes[vc->mode_number].blockflag;
amplitude = get_bits(&vc->gb, vf->amplitude_bits);
if (amplitude > 0) {
float VAR_1 = 0;
uint_fast16_t lsp_len = 0;
uint_fast16_t idx;
vorbis_codebook codebook;
book_idx = get_bits(&vc->gb, ilog(vf->num_books));
if (book_idx >= vf->num_books) {
av_log(vc->avccontext, AV_LOG_ERROR,
"floor0 dec: booknumber too high!\n");
book_idx = 0;
}
AV_DEBUG("floor0 dec: booknumber: %u\n", book_idx);
codebook = vc->codebooks[vf->book_list[book_idx]];
while (lsp_len<vf->VAR_4) {
int VAR_2;
AV_DEBUG("floor0 dec: book dimension: %d\n", codebook.dimensions);
AV_DEBUG("floor0 dec: maximum depth: %d\n", codebook.maxdepth);
VAR_2 = get_vlc2(&vc->gb, codebook.vlc.table,
codebook.nb_bits, codebook.maxdepth)
* codebook.dimensions;
AV_DEBUG("floor0 dec: vector offset: %d\n", VAR_2);
for (idx = 0; idx < codebook.dimensions; ++idx)
VAR_0[lsp_len+idx] = codebook.codevectors[VAR_2+idx] + VAR_1;
VAR_1 = VAR_0[lsp_len+idx-1];
lsp_len += codebook.dimensions;
}
#ifdef V_DEBUG
{
int idx;
for (idx = 0; idx < lsp_len; ++idx)
AV_DEBUG("floor0 dec: coeff at %d is %f\n", idx, VAR_0[idx]);
}
#endif
{
int VAR_3;
int VAR_4 = vf->VAR_4;
float VAR_5 = M_PI / vf->bark_map_size;
for (VAR_3 = 0; VAR_3 < VAR_4; VAR_3++)
VAR_0[VAR_3] = 2.0f * cos(VAR_0[VAR_3]);
AV_DEBUG("floor0 synth: map_size = %d; m = %d; VAR_5 = %f\n",
vf->map_size, VAR_4, VAR_5);
VAR_3 = 0;
while (VAR_3 < vf->map_size[blockflag]) {
int VAR_6, VAR_7 = vf->map[blockflag][VAR_3];
float VAR_8 = 0.5f;
float VAR_9 = 0.5f;
float VAR_10 = 2.0f * cos(VAR_5 * VAR_7);
for (VAR_6 = 0; VAR_6 + 1 < VAR_4; VAR_6 += 2) {
VAR_9 *= VAR_0[VAR_6] - VAR_10;
VAR_8 *= VAR_0[VAR_6 + 1] - VAR_10;
}
if (VAR_6 == VAR_4) {
VAR_8 *= VAR_8 * (2.0f - VAR_10);
VAR_9 *= VAR_9 * (2.0f + VAR_10);
} else {
VAR_9 *= VAR_10-VAR_0[VAR_6];
VAR_8 *= VAR_8 * (4.f - VAR_10 * VAR_10);
VAR_9 *= VAR_9;
}
{
VAR_9 = exp((((amplitude*vf->amplitude_offset) /
(((1 << vf->amplitude_bits) - 1) * sqrt(VAR_8 + VAR_9)))
- vf->amplitude_offset) * .11512925f);
}
do {
vec[VAR_3] = VAR_9; ++VAR_3;
} while (vf->map[blockflag][VAR_3] == VAR_7);
}
}
} else {
return 1;
}
AV_DEBUG(" Floor0 decoded\n");
return 0;
}
| [
"static uint_fast8_t FUNC_0(vorbis_context *vc,\nvorbis_floor_data *vfu, float *vec)\n{",
"vorbis_floor0 *vf = &vfu->t0;",
"float *VAR_0 = vf->VAR_0;",
"uint_fast32_t amplitude;",
"uint_fast32_t book_idx;",
"uint_fast8_t blockflag = vc->modes[vc->mode_number].blockflag;",
"amplitude = get_bits(&vc->gb, ... | [
1,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0... | [
[
1,
3,
5
],
[
7
],
[
9
],
[
11
],
[
13
],
[
15
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
33
],
[
35
],
[
37,
39
],
[
41
],
[
45
],
[
47
],
[
49
... |
13,214 | static void add_entry(TiffEncoderContext *s, enum TiffTags tag,
enum TiffTypes type, int count, const void *ptr_val)
{
uint8_t *entries_ptr = s->entries + 12 * s->num_entries;
assert(s->num_entries < TIFF_MAX_ENTRY);
bytestream_put_le16(&entries_ptr, tag);
bytestream_put_le16(&entries_ptr, type);
bytestream_put_le32(&entries_ptr, count);
if (type_sizes[type] * count <= 4) {
tnput(&entries_ptr, count, ptr_val, type, 0);
} else {
bytestream_put_le32(&entries_ptr, *s->buf - s->buf_start);
check_size(s, count * type_sizes2[type]);
tnput(s->buf, count, ptr_val, type, 0);
}
s->num_entries++;
}
| false | FFmpeg | 3c27275c1309190f2d6ed69140b67d014215b6c9 | static void add_entry(TiffEncoderContext *s, enum TiffTags tag,
enum TiffTypes type, int count, const void *ptr_val)
{
uint8_t *entries_ptr = s->entries + 12 * s->num_entries;
assert(s->num_entries < TIFF_MAX_ENTRY);
bytestream_put_le16(&entries_ptr, tag);
bytestream_put_le16(&entries_ptr, type);
bytestream_put_le32(&entries_ptr, count);
if (type_sizes[type] * count <= 4) {
tnput(&entries_ptr, count, ptr_val, type, 0);
} else {
bytestream_put_le32(&entries_ptr, *s->buf - s->buf_start);
check_size(s, count * type_sizes2[type]);
tnput(s->buf, count, ptr_val, type, 0);
}
s->num_entries++;
}
| {
"code": [],
"line_no": []
} | static void FUNC_0(TiffEncoderContext *VAR_0, enum TiffTags VAR_1,
enum TiffTypes VAR_2, int VAR_3, const void *VAR_4)
{
uint8_t *entries_ptr = VAR_0->entries + 12 * VAR_0->num_entries;
assert(VAR_0->num_entries < TIFF_MAX_ENTRY);
bytestream_put_le16(&entries_ptr, VAR_1);
bytestream_put_le16(&entries_ptr, VAR_2);
bytestream_put_le32(&entries_ptr, VAR_3);
if (type_sizes[VAR_2] * VAR_3 <= 4) {
tnput(&entries_ptr, VAR_3, VAR_4, VAR_2, 0);
} else {
bytestream_put_le32(&entries_ptr, *VAR_0->buf - VAR_0->buf_start);
check_size(VAR_0, VAR_3 * type_sizes2[VAR_2]);
tnput(VAR_0->buf, VAR_3, VAR_4, VAR_2, 0);
}
VAR_0->num_entries++;
}
| [
"static void FUNC_0(TiffEncoderContext *VAR_0, enum TiffTags VAR_1,\nenum TiffTypes VAR_2, int VAR_3, const void *VAR_4)\n{",
"uint8_t *entries_ptr = VAR_0->entries + 12 * VAR_0->num_entries;",
"assert(VAR_0->num_entries < TIFF_MAX_ENTRY);",
"bytestream_put_le16(&entries_ptr, VAR_1);",
"bytestream_put_le16(... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3,
5
],
[
7
],
[
11
],
[
15
],
[
17
],
[
19
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35
],
[
39
],
[
41
]
] |
13,215 | static int set_hwframe_ctx(AVCodecContext *ctx, AVBufferRef *hw_device_ctx)
{
AVBufferRef *hw_frames_ref;
AVHWFramesContext *frames_ctx = NULL;
int err = 0;
if (!(hw_frames_ref = av_hwframe_ctx_alloc(hw_device_ctx))) {
fprintf(stderr, "Failed to create VAAPI frame context.\n");
return -1;
}
frames_ctx = (AVHWFramesContext *)(hw_frames_ref->data);
frames_ctx->format = AV_PIX_FMT_VAAPI;
frames_ctx->sw_format = AV_PIX_FMT_NV12;
frames_ctx->width = width;
frames_ctx->height = height;
frames_ctx->initial_pool_size = 20;
if ((err = av_hwframe_ctx_init(hw_frames_ref)) < 0) {
fprintf(stderr, "Failed to initialize VAAPI frame context."
"Error code: %s\n",av_err2str(err));
return err;
}
ctx->hw_frames_ctx = av_buffer_ref(hw_frames_ref);
if (!ctx->hw_frames_ctx)
err = AVERROR(ENOMEM);
return err;
} | true | FFmpeg | a763d278274cfbda4e78e21b338b9b525fe22eab | static int set_hwframe_ctx(AVCodecContext *ctx, AVBufferRef *hw_device_ctx)
{
AVBufferRef *hw_frames_ref;
AVHWFramesContext *frames_ctx = NULL;
int err = 0;
if (!(hw_frames_ref = av_hwframe_ctx_alloc(hw_device_ctx))) {
fprintf(stderr, "Failed to create VAAPI frame context.\n");
return -1;
}
frames_ctx = (AVHWFramesContext *)(hw_frames_ref->data);
frames_ctx->format = AV_PIX_FMT_VAAPI;
frames_ctx->sw_format = AV_PIX_FMT_NV12;
frames_ctx->width = width;
frames_ctx->height = height;
frames_ctx->initial_pool_size = 20;
if ((err = av_hwframe_ctx_init(hw_frames_ref)) < 0) {
fprintf(stderr, "Failed to initialize VAAPI frame context."
"Error code: %s\n",av_err2str(err));
return err;
}
ctx->hw_frames_ctx = av_buffer_ref(hw_frames_ref);
if (!ctx->hw_frames_ctx)
err = AVERROR(ENOMEM);
return err;
} | {
"code": [],
"line_no": []
} | static int FUNC_0(AVCodecContext *VAR_0, AVBufferRef *VAR_1)
{
AVBufferRef *hw_frames_ref;
AVHWFramesContext *frames_ctx = NULL;
int VAR_2 = 0;
if (!(hw_frames_ref = av_hwframe_ctx_alloc(VAR_1))) {
fprintf(stderr, "Failed to create VAAPI frame context.\n");
return -1;
}
frames_ctx = (AVHWFramesContext *)(hw_frames_ref->data);
frames_ctx->format = AV_PIX_FMT_VAAPI;
frames_ctx->sw_format = AV_PIX_FMT_NV12;
frames_ctx->width = width;
frames_ctx->height = height;
frames_ctx->initial_pool_size = 20;
if ((VAR_2 = av_hwframe_ctx_init(hw_frames_ref)) < 0) {
fprintf(stderr, "Failed to initialize VAAPI frame context."
"Error code: %s\n",av_err2str(VAR_2));
return VAR_2;
}
VAR_0->hw_frames_ctx = av_buffer_ref(hw_frames_ref);
if (!VAR_0->hw_frames_ctx)
VAR_2 = AVERROR(ENOMEM);
return VAR_2;
} | [
"static int FUNC_0(AVCodecContext *VAR_0, AVBufferRef *VAR_1)\n{",
"AVBufferRef *hw_frames_ref;",
"AVHWFramesContext *frames_ctx = NULL;",
"int VAR_2 = 0;",
"if (!(hw_frames_ref = av_hwframe_ctx_alloc(VAR_1))) {",
"fprintf(stderr, \"Failed to create VAAPI frame context.\\n\");",
"return -1;",
"}",
"... | [
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0,
0
] | [
[
1,
3
],
[
5
],
[
7
],
[
9
],
[
13
],
[
15
],
[
17
],
[
19
],
[
21
],
[
23
],
[
25
],
[
27
],
[
29
],
[
31
],
[
33
],
[
35,
37
],
[
40
],
[
42
],
[
44
],
[... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.