repo_name
stringlengths
6
97
path
stringlengths
3
341
text
stringlengths
8
1.02M
pmp-tool/PMP
src/qemu/src-pmp/hw/audio/wm8750.c
<filename>src/qemu/src-pmp/hw/audio/wm8750.c /* * WM8750 audio CODEC. * * Copyright (c) 2006 Openedhand Ltd. * Written by <NAME> <<EMAIL>> * * This file is licensed under GNU GPL. */ #include "qemu/osdep.h" #include "hw/i2c/i2c.h" #include "hw/audio/wm8750.h" #include "audio/audio.h" #define IN_PORT_N 3 #define OUT_PORT_N 3 #define CODEC "wm8750" typedef struct { int adc; int adc_hz; int dac; int dac_hz; } WMRate; #define WM8750(obj) OBJECT_CHECK(WM8750State, (obj), TYPE_WM8750) typedef struct WM8750State { I2CSlave parent_obj; uint8_t i2c_data[2]; int i2c_len; QEMUSoundCard card; SWVoiceIn *adc_voice[IN_PORT_N]; SWVoiceOut *dac_voice[OUT_PORT_N]; int enable; void (*data_req)(void *, int, int); void *opaque; uint8_t data_in[4096]; uint8_t data_out[4096]; int idx_in, req_in; int idx_out, req_out; SWVoiceOut **out[2]; uint8_t outvol[7], outmute[2]; SWVoiceIn **in[2]; uint8_t invol[4], inmute[2]; uint8_t diff[2], pol, ds, monomix[2], alc, mute; uint8_t path[4], mpath[2], power, format; const WMRate *rate; uint8_t rate_vmstate; int adc_hz, dac_hz, ext_adc_hz, ext_dac_hz, master; } WM8750State; /* pow(10.0, -i / 20.0) * 255, i = 0..42 */ static const uint8_t wm8750_vol_db_table[] = { 255, 227, 203, 181, 161, 143, 128, 114, 102, 90, 81, 72, 64, 57, 51, 45, 40, 36, 32, 29, 26, 23, 20, 18, 16, 14, 13, 11, 10, 9, 8, 7, 6, 6, 5, 5, 4, 4, 3, 3, 3, 2, 2 }; #define WM8750_OUTVOL_TRANSFORM(x) wm8750_vol_db_table[(0x7f - x) / 3] #define WM8750_INVOL_TRANSFORM(x) (x << 2) static inline void wm8750_in_load(WM8750State *s) { if (s->idx_in + s->req_in <= sizeof(s->data_in)) return; s->idx_in = audio_MAX(0, (int) sizeof(s->data_in) - s->req_in); AUD_read(*s->in[0], s->data_in + s->idx_in, sizeof(s->data_in) - s->idx_in); } static inline void wm8750_out_flush(WM8750State *s) { int sent = 0; while (sent < s->idx_out) sent += AUD_write(*s->out[0], s->data_out + sent, s->idx_out - sent) ?: s->idx_out; s->idx_out = 0; } static void wm8750_audio_in_cb(void *opaque, int avail_b) { WM8750State *s = (WM8750State *) opaque; s->req_in = avail_b; s->data_req(s->opaque, s->req_out >> 2, avail_b >> 2); } static void wm8750_audio_out_cb(void *opaque, int free_b) { WM8750State *s = (WM8750State *) opaque; if (s->idx_out >= free_b) { s->idx_out = free_b; s->req_out = 0; wm8750_out_flush(s); } else s->req_out = free_b - s->idx_out; s->data_req(s->opaque, s->req_out >> 2, s->req_in >> 2); } static const WMRate wm_rate_table[] = { { 256, 48000, 256, 48000 }, /* SR: 00000 */ { 384, 48000, 384, 48000 }, /* SR: 00001 */ { 256, 48000, 1536, 8000 }, /* SR: 00010 */ { 384, 48000, 2304, 8000 }, /* SR: 00011 */ { 1536, 8000, 256, 48000 }, /* SR: 00100 */ { 2304, 8000, 384, 48000 }, /* SR: 00101 */ { 1536, 8000, 1536, 8000 }, /* SR: 00110 */ { 2304, 8000, 2304, 8000 }, /* SR: 00111 */ { 1024, 12000, 1024, 12000 }, /* SR: 01000 */ { 1526, 12000, 1536, 12000 }, /* SR: 01001 */ { 768, 16000, 768, 16000 }, /* SR: 01010 */ { 1152, 16000, 1152, 16000 }, /* SR: 01011 */ { 384, 32000, 384, 32000 }, /* SR: 01100 */ { 576, 32000, 576, 32000 }, /* SR: 01101 */ { 128, 96000, 128, 96000 }, /* SR: 01110 */ { 192, 96000, 192, 96000 }, /* SR: 01111 */ { 256, 44100, 256, 44100 }, /* SR: 10000 */ { 384, 44100, 384, 44100 }, /* SR: 10001 */ { 256, 44100, 1408, 8018 }, /* SR: 10010 */ { 384, 44100, 2112, 8018 }, /* SR: 10011 */ { 1408, 8018, 256, 44100 }, /* SR: 10100 */ { 2112, 8018, 384, 44100 }, /* SR: 10101 */ { 1408, 8018, 1408, 8018 }, /* SR: 10110 */ { 2112, 8018, 2112, 8018 }, /* SR: 10111 */ { 1024, 11025, 1024, 11025 }, /* SR: 11000 */ { 1536, 11025, 1536, 11025 }, /* SR: 11001 */ { 512, 22050, 512, 22050 }, /* SR: 11010 */ { 768, 22050, 768, 22050 }, /* SR: 11011 */ { 512, 24000, 512, 24000 }, /* SR: 11100 */ { 768, 24000, 768, 24000 }, /* SR: 11101 */ { 128, 88200, 128, 88200 }, /* SR: 11110 */ { 192, 88200, 192, 88200 }, /* SR: 11111 */ }; static void wm8750_vol_update(WM8750State *s) { /* FIXME: multiply all volumes by s->invol[2], s->invol[3] */ AUD_set_volume_in(s->adc_voice[0], s->mute, s->inmute[0] ? 0 : WM8750_INVOL_TRANSFORM(s->invol[0]), s->inmute[1] ? 0 : WM8750_INVOL_TRANSFORM(s->invol[1])); AUD_set_volume_in(s->adc_voice[1], s->mute, s->inmute[0] ? 0 : WM8750_INVOL_TRANSFORM(s->invol[0]), s->inmute[1] ? 0 : WM8750_INVOL_TRANSFORM(s->invol[1])); AUD_set_volume_in(s->adc_voice[2], s->mute, s->inmute[0] ? 0 : WM8750_INVOL_TRANSFORM(s->invol[0]), s->inmute[1] ? 0 : WM8750_INVOL_TRANSFORM(s->invol[1])); /* FIXME: multiply all volumes by s->outvol[0], s->outvol[1] */ /* Speaker: LOUT2VOL ROUT2VOL */ AUD_set_volume_out(s->dac_voice[0], s->mute, s->outmute[0] ? 0 : WM8750_OUTVOL_TRANSFORM(s->outvol[4]), s->outmute[1] ? 0 : WM8750_OUTVOL_TRANSFORM(s->outvol[5])); /* Headphone: LOUT1VOL ROUT1VOL */ AUD_set_volume_out(s->dac_voice[1], s->mute, s->outmute[0] ? 0 : WM8750_OUTVOL_TRANSFORM(s->outvol[2]), s->outmute[1] ? 0 : WM8750_OUTVOL_TRANSFORM(s->outvol[3])); /* MONOOUT: MONOVOL MONOVOL */ AUD_set_volume_out(s->dac_voice[2], s->mute, s->outmute[0] ? 0 : WM8750_OUTVOL_TRANSFORM(s->outvol[6]), s->outmute[1] ? 0 : WM8750_OUTVOL_TRANSFORM(s->outvol[6])); } static void wm8750_set_format(WM8750State *s) { int i; struct audsettings in_fmt; struct audsettings out_fmt; wm8750_out_flush(s); if (s->in[0] && *s->in[0]) AUD_set_active_in(*s->in[0], 0); if (s->out[0] && *s->out[0]) AUD_set_active_out(*s->out[0], 0); for (i = 0; i < IN_PORT_N; i ++) if (s->adc_voice[i]) { AUD_close_in(&s->card, s->adc_voice[i]); s->adc_voice[i] = NULL; } for (i = 0; i < OUT_PORT_N; i ++) if (s->dac_voice[i]) { AUD_close_out(&s->card, s->dac_voice[i]); s->dac_voice[i] = NULL; } if (!s->enable) return; /* Setup input */ in_fmt.endianness = 0; in_fmt.nchannels = 2; in_fmt.freq = s->adc_hz; in_fmt.fmt = AUDIO_FORMAT_S16; s->adc_voice[0] = AUD_open_in(&s->card, s->adc_voice[0], CODEC ".input1", s, wm8750_audio_in_cb, &in_fmt); s->adc_voice[1] = AUD_open_in(&s->card, s->adc_voice[1], CODEC ".input2", s, wm8750_audio_in_cb, &in_fmt); s->adc_voice[2] = AUD_open_in(&s->card, s->adc_voice[2], CODEC ".input3", s, wm8750_audio_in_cb, &in_fmt); /* Setup output */ out_fmt.endianness = 0; out_fmt.nchannels = 2; out_fmt.freq = s->dac_hz; out_fmt.fmt = AUDIO_FORMAT_S16; s->dac_voice[0] = AUD_open_out(&s->card, s->dac_voice[0], CODEC ".speaker", s, wm8750_audio_out_cb, &out_fmt); s->dac_voice[1] = AUD_open_out(&s->card, s->dac_voice[1], CODEC ".headphone", s, wm8750_audio_out_cb, &out_fmt); /* MONOMIX is also in stereo for simplicity */ s->dac_voice[2] = AUD_open_out(&s->card, s->dac_voice[2], CODEC ".monomix", s, wm8750_audio_out_cb, &out_fmt); /* no sense emulating OUT3 which is a mix of other outputs */ wm8750_vol_update(s); /* We should connect the left and right channels to their * respective inputs/outputs but we have completely no need * for mixing or combining paths to different ports, so we * connect both channels to where the left channel is routed. */ if (s->in[0] && *s->in[0]) AUD_set_active_in(*s->in[0], 1); if (s->out[0] && *s->out[0]) AUD_set_active_out(*s->out[0], 1); } static void wm8750_clk_update(WM8750State *s, int ext) { if (s->master || !s->ext_dac_hz) s->dac_hz = s->rate->dac_hz; else s->dac_hz = s->ext_dac_hz; if (s->master || !s->ext_adc_hz) s->adc_hz = s->rate->adc_hz; else s->adc_hz = s->ext_adc_hz; if (s->master || (!s->ext_dac_hz && !s->ext_adc_hz)) { if (!ext) wm8750_set_format(s); } else { if (ext) wm8750_set_format(s); } } static void wm8750_reset(I2CSlave *i2c) { WM8750State *s = WM8750(i2c); s->rate = &wm_rate_table[0]; s->enable = 0; wm8750_clk_update(s, 1); s->diff[0] = 0; s->diff[1] = 0; s->ds = 0; s->alc = 0; s->in[0] = &s->adc_voice[0]; s->invol[0] = 0x17; s->invol[1] = 0x17; s->invol[2] = 0xc3; s->invol[3] = 0xc3; s->out[0] = &s->dac_voice[0]; s->outvol[0] = 0xff; s->outvol[1] = 0xff; s->outvol[2] = 0x79; s->outvol[3] = 0x79; s->outvol[4] = 0x79; s->outvol[5] = 0x79; s->outvol[6] = 0x79; s->inmute[0] = 0; s->inmute[1] = 0; s->outmute[0] = 0; s->outmute[1] = 0; s->mute = 1; s->path[0] = 0; s->path[1] = 0; s->path[2] = 0; s->path[3] = 0; s->mpath[0] = 0; s->mpath[1] = 0; s->format = 0x0a; s->idx_in = sizeof(s->data_in); s->req_in = 0; s->idx_out = 0; s->req_out = 0; wm8750_vol_update(s); s->i2c_len = 0; } static int wm8750_event(I2CSlave *i2c, enum i2c_event event) { WM8750State *s = WM8750(i2c); switch (event) { case I2C_START_SEND: s->i2c_len = 0; break; case I2C_FINISH: #ifdef VERBOSE if (s->i2c_len < 2) printf("%s: message too short (%i bytes)\n", __func__, s->i2c_len); #endif break; default: break; } return 0; } #define WM8750_LINVOL 0x00 #define WM8750_RINVOL 0x01 #define WM8750_LOUT1V 0x02 #define WM8750_ROUT1V 0x03 #define WM8750_ADCDAC 0x05 #define WM8750_IFACE 0x07 #define WM8750_SRATE 0x08 #define WM8750_LDAC 0x0a #define WM8750_RDAC 0x0b #define WM8750_BASS 0x0c #define WM8750_TREBLE 0x0d #define WM8750_RESET 0x0f #define WM8750_3D 0x10 #define WM8750_ALC1 0x11 #define WM8750_ALC2 0x12 #define WM8750_ALC3 0x13 #define WM8750_NGATE 0x14 #define WM8750_LADC 0x15 #define WM8750_RADC 0x16 #define WM8750_ADCTL1 0x17 #define WM8750_ADCTL2 0x18 #define WM8750_PWR1 0x19 #define WM8750_PWR2 0x1a #define WM8750_ADCTL3 0x1b #define WM8750_ADCIN 0x1f #define WM8750_LADCIN 0x20 #define WM8750_RADCIN 0x21 #define WM8750_LOUTM1 0x22 #define WM8750_LOUTM2 0x23 #define WM8750_ROUTM1 0x24 #define WM8750_ROUTM2 0x25 #define WM8750_MOUTM1 0x26 #define WM8750_MOUTM2 0x27 #define WM8750_LOUT2V 0x28 #define WM8750_ROUT2V 0x29 #define WM8750_MOUTV 0x2a static int wm8750_tx(I2CSlave *i2c, uint8_t data) { WM8750State *s = WM8750(i2c); uint8_t cmd; uint16_t value; if (s->i2c_len >= 2) { #ifdef VERBOSE printf("%s: long message (%i bytes)\n", __func__, s->i2c_len); #endif return 1; } s->i2c_data[s->i2c_len ++] = data; if (s->i2c_len != 2) return 0; cmd = s->i2c_data[0] >> 1; value = ((s->i2c_data[0] << 8) | s->i2c_data[1]) & 0x1ff; switch (cmd) { case WM8750_LADCIN: /* ADC Signal Path Control (Left) */ s->diff[0] = (((value >> 6) & 3) == 3); /* LINSEL */ if (s->diff[0]) s->in[0] = &s->adc_voice[0 + s->ds * 1]; else s->in[0] = &s->adc_voice[((value >> 6) & 3) * 1 + 0]; break; case WM8750_RADCIN: /* ADC Signal Path Control (Right) */ s->diff[1] = (((value >> 6) & 3) == 3); /* RINSEL */ if (s->diff[1]) s->in[1] = &s->adc_voice[0 + s->ds * 1]; else s->in[1] = &s->adc_voice[((value >> 6) & 3) * 1 + 0]; break; case WM8750_ADCIN: /* ADC Input Mode */ s->ds = (value >> 8) & 1; /* DS */ if (s->diff[0]) s->in[0] = &s->adc_voice[0 + s->ds * 1]; if (s->diff[1]) s->in[1] = &s->adc_voice[0 + s->ds * 1]; s->monomix[0] = (value >> 6) & 3; /* MONOMIX */ break; case WM8750_ADCTL1: /* Additional Control (1) */ s->monomix[1] = (value >> 1) & 1; /* DMONOMIX */ break; case WM8750_PWR1: /* Power Management (1) */ s->enable = ((value >> 6) & 7) == 3; /* VMIDSEL, VREF */ wm8750_set_format(s); break; case WM8750_LINVOL: /* Left Channel PGA */ s->invol[0] = value & 0x3f; /* LINVOL */ s->inmute[0] = (value >> 7) & 1; /* LINMUTE */ wm8750_vol_update(s); break; case WM8750_RINVOL: /* Right Channel PGA */ s->invol[1] = value & 0x3f; /* RINVOL */ s->inmute[1] = (value >> 7) & 1; /* RINMUTE */ wm8750_vol_update(s); break; case WM8750_ADCDAC: /* ADC and DAC Control */ s->pol = (value >> 5) & 3; /* ADCPOL */ s->mute = (value >> 3) & 1; /* DACMU */ wm8750_vol_update(s); break; case WM8750_ADCTL3: /* Additional Control (3) */ break; case WM8750_LADC: /* Left ADC Digital Volume */ s->invol[2] = value & 0xff; /* LADCVOL */ wm8750_vol_update(s); break; case WM8750_RADC: /* Right ADC Digital Volume */ s->invol[3] = value & 0xff; /* RADCVOL */ wm8750_vol_update(s); break; case WM8750_ALC1: /* ALC Control (1) */ s->alc = (value >> 7) & 3; /* ALCSEL */ break; case WM8750_NGATE: /* Noise Gate Control */ case WM8750_3D: /* 3D enhance */ break; case WM8750_LDAC: /* Left Channel Digital Volume */ s->outvol[0] = value & 0xff; /* LDACVOL */ wm8750_vol_update(s); break; case WM8750_RDAC: /* Right Channel Digital Volume */ s->outvol[1] = value & 0xff; /* RDACVOL */ wm8750_vol_update(s); break; case WM8750_BASS: /* Bass Control */ break; case WM8750_LOUTM1: /* Left Mixer Control (1) */ s->path[0] = (value >> 8) & 1; /* LD2LO */ /* TODO: mute/unmute respective paths */ wm8750_vol_update(s); break; case WM8750_LOUTM2: /* Left Mixer Control (2) */ s->path[1] = (value >> 8) & 1; /* RD2LO */ /* TODO: mute/unmute respective paths */ wm8750_vol_update(s); break; case WM8750_ROUTM1: /* Right Mixer Control (1) */ s->path[2] = (value >> 8) & 1; /* LD2RO */ /* TODO: mute/unmute respective paths */ wm8750_vol_update(s); break; case WM8750_ROUTM2: /* Right Mixer Control (2) */ s->path[3] = (value >> 8) & 1; /* RD2RO */ /* TODO: mute/unmute respective paths */ wm8750_vol_update(s); break; case WM8750_MOUTM1: /* Mono Mixer Control (1) */ s->mpath[0] = (value >> 8) & 1; /* LD2MO */ /* TODO: mute/unmute respective paths */ wm8750_vol_update(s); break; case WM8750_MOUTM2: /* Mono Mixer Control (2) */ s->mpath[1] = (value >> 8) & 1; /* RD2MO */ /* TODO: mute/unmute respective paths */ wm8750_vol_update(s); break; case WM8750_LOUT1V: /* LOUT1 Volume */ s->outvol[2] = value & 0x7f; /* LOUT1VOL */ wm8750_vol_update(s); break; case WM8750_LOUT2V: /* LOUT2 Volume */ s->outvol[4] = value & 0x7f; /* LOUT2VOL */ wm8750_vol_update(s); break; case WM8750_ROUT1V: /* ROUT1 Volume */ s->outvol[3] = value & 0x7f; /* ROUT1VOL */ wm8750_vol_update(s); break; case WM8750_ROUT2V: /* ROUT2 Volume */ s->outvol[5] = value & 0x7f; /* ROUT2VOL */ wm8750_vol_update(s); break; case WM8750_MOUTV: /* MONOOUT Volume */ s->outvol[6] = value & 0x7f; /* MONOOUTVOL */ wm8750_vol_update(s); break; case WM8750_ADCTL2: /* Additional Control (2) */ break; case WM8750_PWR2: /* Power Management (2) */ s->power = value & 0x7e; /* TODO: mute/unmute respective paths */ wm8750_vol_update(s); break; case WM8750_IFACE: /* Digital Audio Interface Format */ s->format = value; s->master = (value >> 6) & 1; /* MS */ wm8750_clk_update(s, s->master); break; case WM8750_SRATE: /* Clocking and Sample Rate Control */ s->rate = &wm_rate_table[(value >> 1) & 0x1f]; wm8750_clk_update(s, 0); break; case WM8750_RESET: /* Reset */ wm8750_reset(I2C_SLAVE(s)); break; #ifdef VERBOSE default: printf("%s: unknown register %02x\n", __func__, cmd); #endif } return 0; } static uint8_t wm8750_rx(I2CSlave *i2c) { return 0x00; } static int wm8750_pre_save(void *opaque) { WM8750State *s = opaque; s->rate_vmstate = s->rate - wm_rate_table; return 0; } static int wm8750_post_load(void *opaque, int version_id) { WM8750State *s = opaque; s->rate = &wm_rate_table[s->rate_vmstate & 0x1f]; return 0; } static const VMStateDescription vmstate_wm8750 = { .name = CODEC, .version_id = 0, .minimum_version_id = 0, .pre_save = wm8750_pre_save, .post_load = wm8750_post_load, .fields = (VMStateField[]) { VMSTATE_UINT8_ARRAY(i2c_data, WM8750State, 2), VMSTATE_INT32(i2c_len, WM8750State), VMSTATE_INT32(enable, WM8750State), VMSTATE_INT32(idx_in, WM8750State), VMSTATE_INT32(req_in, WM8750State), VMSTATE_INT32(idx_out, WM8750State), VMSTATE_INT32(req_out, WM8750State), VMSTATE_UINT8_ARRAY(outvol, WM8750State, 7), VMSTATE_UINT8_ARRAY(outmute, WM8750State, 2), VMSTATE_UINT8_ARRAY(invol, WM8750State, 4), VMSTATE_UINT8_ARRAY(inmute, WM8750State, 2), VMSTATE_UINT8_ARRAY(diff, WM8750State, 2), VMSTATE_UINT8(pol, WM8750State), VMSTATE_UINT8(ds, WM8750State), VMSTATE_UINT8_ARRAY(monomix, WM8750State, 2), VMSTATE_UINT8(alc, WM8750State), VMSTATE_UINT8(mute, WM8750State), VMSTATE_UINT8_ARRAY(path, WM8750State, 4), VMSTATE_UINT8_ARRAY(mpath, WM8750State, 2), VMSTATE_UINT8(format, WM8750State), VMSTATE_UINT8(power, WM8750State), VMSTATE_UINT8(rate_vmstate, WM8750State), VMSTATE_I2C_SLAVE(parent_obj, WM8750State), VMSTATE_END_OF_LIST() } }; static void wm8750_realize(DeviceState *dev, Error **errp) { WM8750State *s = WM8750(dev); AUD_register_card(CODEC, &s->card); wm8750_reset(I2C_SLAVE(s)); } #if 0 static void wm8750_fini(I2CSlave *i2c) { WM8750State *s = WM8750(i2c); wm8750_reset(I2C_SLAVE(s)); AUD_remove_card(&s->card); g_free(s); } #endif void wm8750_data_req_set(DeviceState *dev, data_req_cb *data_req, void *opaque) { WM8750State *s = WM8750(dev); s->data_req = data_req; s->opaque = opaque; } void wm8750_dac_dat(void *opaque, uint32_t sample) { WM8750State *s = (WM8750State *) opaque; *(uint32_t *) &s->data_out[s->idx_out] = sample; s->req_out -= 4; s->idx_out += 4; if (s->idx_out >= sizeof(s->data_out) || s->req_out <= 0) wm8750_out_flush(s); } void *wm8750_dac_buffer(void *opaque, int samples) { WM8750State *s = (WM8750State *) opaque; /* XXX: Should check if there are <i>samples</i> free samples available */ void *ret = s->data_out + s->idx_out; s->idx_out += samples << 2; s->req_out -= samples << 2; return ret; } void wm8750_dac_commit(void *opaque) { WM8750State *s = (WM8750State *) opaque; wm8750_out_flush(s); } uint32_t wm8750_adc_dat(void *opaque) { WM8750State *s = (WM8750State *) opaque; uint32_t *data; if (s->idx_in >= sizeof(s->data_in)) { wm8750_in_load(s); if (s->idx_in >= sizeof(s->data_in)) { return 0x80008000; /* silence in AUDIO_FORMAT_S16 sample format */ } } data = (uint32_t *) &s->data_in[s->idx_in]; s->req_in -= 4; s->idx_in += 4; return *data; } void wm8750_set_bclk_in(void *opaque, int new_hz) { WM8750State *s = (WM8750State *) opaque; s->ext_adc_hz = new_hz; s->ext_dac_hz = new_hz; wm8750_clk_update(s, 1); } static void wm8750_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); dc->realize = wm8750_realize; sc->event = wm8750_event; sc->recv = wm8750_rx; sc->send = wm8750_tx; dc->vmsd = &vmstate_wm8750; } static const TypeInfo wm8750_info = { .name = TYPE_WM8750, .parent = TYPE_I2C_SLAVE, .instance_size = sizeof(WM8750State), .class_init = wm8750_class_init, }; static void wm8750_register_types(void) { type_register_static(&wm8750_info); } type_init(wm8750_register_types)
pmp-tool/PMP
src/qemu/src-pmp/hw/virtio/vhost-user-blk-pci.c
<reponame>pmp-tool/PMP /* * Vhost user blk PCI Bindings * * Copyright(C) 2017 Intel Corporation. * * Authors: * <NAME> <<EMAIL>> * * Largely based on the "vhost-user-scsi.c" and "vhost-scsi.c" implemented by: * <NAME> <<EMAIL>> * <NAME> <<EMAIL>> * <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU LGPL, version 2 or later. * See the COPYING.LIB file in the top-level directory. * */ #include "qemu/osdep.h" #include "standard-headers/linux/virtio_pci.h" #include "hw/virtio/virtio.h" #include "hw/virtio/vhost-user-blk.h" #include "hw/pci/pci.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "virtio-pci.h" typedef struct VHostUserBlkPCI VHostUserBlkPCI; /* * vhost-user-blk-pci: This extends VirtioPCIProxy. */ #define TYPE_VHOST_USER_BLK_PCI "vhost-user-blk-pci-base" #define VHOST_USER_BLK_PCI(obj) \ OBJECT_CHECK(VHostUserBlkPCI, (obj), TYPE_VHOST_USER_BLK_PCI) struct VHostUserBlkPCI { VirtIOPCIProxy parent_obj; VHostUserBlk vdev; }; static Property vhost_user_blk_pci_properties[] = { DEFINE_PROP_UINT32("class", VirtIOPCIProxy, class_code, 0), DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED), DEFINE_PROP_END_OF_LIST(), }; static void vhost_user_blk_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) { VHostUserBlkPCI *dev = VHOST_USER_BLK_PCI(vpci_dev); DeviceState *vdev = DEVICE(&dev->vdev); if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { vpci_dev->nvectors = dev->vdev.num_queues + 1; } qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); object_property_set_bool(OBJECT(vdev), true, "realized", errp); } static void vhost_user_blk_pci_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); dc->props = vhost_user_blk_pci_properties; k->realize = vhost_user_blk_pci_realize; pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK; pcidev_k->revision = VIRTIO_PCI_ABI_VERSION; pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; } static void vhost_user_blk_pci_instance_init(Object *obj) { VHostUserBlkPCI *dev = VHOST_USER_BLK_PCI(obj); virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), TYPE_VHOST_USER_BLK); object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), "bootindex", &error_abort); } static const VirtioPCIDeviceTypeInfo vhost_user_blk_pci_info = { .base_name = TYPE_VHOST_USER_BLK_PCI, .generic_name = "vhost-user-blk-pci", .transitional_name = "vhost-user-blk-pci-transitional", .non_transitional_name = "vhost-user-blk-pci-non-transitional", .instance_size = sizeof(VHostUserBlkPCI), .instance_init = vhost_user_blk_pci_instance_init, .class_init = vhost_user_blk_pci_class_init, }; static void vhost_user_blk_pci_register(void) { virtio_pci_types_register(&vhost_user_blk_pci_info); } type_init(vhost_user_blk_pci_register)
pmp-tool/PMP
src/qemu/src-pmp/ui/qemu-pixman.c
/* * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "qemu-common.h" #include "ui/console.h" #include "standard-headers/drm/drm_fourcc.h" PixelFormat qemu_pixelformat_from_pixman(pixman_format_code_t format) { PixelFormat pf; uint8_t bpp; bpp = pf.bits_per_pixel = PIXMAN_FORMAT_BPP(format); pf.bytes_per_pixel = PIXMAN_FORMAT_BPP(format) / 8; pf.depth = PIXMAN_FORMAT_DEPTH(format); pf.abits = PIXMAN_FORMAT_A(format); pf.rbits = PIXMAN_FORMAT_R(format); pf.gbits = PIXMAN_FORMAT_G(format); pf.bbits = PIXMAN_FORMAT_B(format); switch (PIXMAN_FORMAT_TYPE(format)) { case PIXMAN_TYPE_ARGB: pf.ashift = pf.bbits + pf.gbits + pf.rbits; pf.rshift = pf.bbits + pf.gbits; pf.gshift = pf.bbits; pf.bshift = 0; break; case PIXMAN_TYPE_ABGR: pf.ashift = pf.rbits + pf.gbits + pf.bbits; pf.bshift = pf.rbits + pf.gbits; pf.gshift = pf.rbits; pf.rshift = 0; break; case PIXMAN_TYPE_BGRA: pf.bshift = bpp - pf.bbits; pf.gshift = bpp - (pf.bbits + pf.gbits); pf.rshift = bpp - (pf.bbits + pf.gbits + pf.rbits); pf.ashift = 0; break; case PIXMAN_TYPE_RGBA: pf.rshift = bpp - pf.rbits; pf.gshift = bpp - (pf.rbits + pf.gbits); pf.bshift = bpp - (pf.rbits + pf.gbits + pf.bbits); pf.ashift = 0; break; default: g_assert_not_reached(); break; } pf.amax = (1 << pf.abits) - 1; pf.rmax = (1 << pf.rbits) - 1; pf.gmax = (1 << pf.gbits) - 1; pf.bmax = (1 << pf.bbits) - 1; pf.amask = pf.amax << pf.ashift; pf.rmask = pf.rmax << pf.rshift; pf.gmask = pf.gmax << pf.gshift; pf.bmask = pf.bmax << pf.bshift; return pf; } pixman_format_code_t qemu_default_pixman_format(int bpp, bool native_endian) { if (native_endian) { switch (bpp) { case 15: return PIXMAN_x1r5g5b5; case 16: return PIXMAN_r5g6b5; case 24: return PIXMAN_r8g8b8; case 32: return PIXMAN_x8r8g8b8; } } else { switch (bpp) { case 24: return PIXMAN_b8g8r8; case 32: return PIXMAN_b8g8r8x8; break; } } return 0; } /* Note: drm is little endian, pixman is native endian */ pixman_format_code_t qemu_drm_format_to_pixman(uint32_t drm_format) { static const struct { uint32_t drm_format; pixman_format_code_t pixman; } map[] = { { DRM_FORMAT_RGB888, PIXMAN_LE_r8g8b8 }, { DRM_FORMAT_ARGB8888, PIXMAN_LE_a8r8g8b8 }, { DRM_FORMAT_XRGB8888, PIXMAN_LE_x8r8g8b8 } }; int i; for (i = 0; i < ARRAY_SIZE(map); i++) { if (drm_format == map[i].drm_format) { return map[i].pixman; } } return 0; } int qemu_pixman_get_type(int rshift, int gshift, int bshift) { int type = PIXMAN_TYPE_OTHER; if (rshift > gshift && gshift > bshift) { if (bshift == 0) { type = PIXMAN_TYPE_ARGB; } else { type = PIXMAN_TYPE_RGBA; } } else if (rshift < gshift && gshift < bshift) { if (rshift == 0) { type = PIXMAN_TYPE_ABGR; } else { type = PIXMAN_TYPE_BGRA; } } return type; } pixman_format_code_t qemu_pixman_get_format(PixelFormat *pf) { pixman_format_code_t format; int type; type = qemu_pixman_get_type(pf->rshift, pf->gshift, pf->bshift); format = PIXMAN_FORMAT(pf->bits_per_pixel, type, pf->abits, pf->rbits, pf->gbits, pf->bbits); if (!pixman_format_supported_source(format)) { return 0; } return format; } /* * Return true for known-good pixman conversions. * * UIs using pixman for format conversion can hook this into * DisplayChangeListenerOps->dpy_gfx_check_format */ bool qemu_pixman_check_format(DisplayChangeListener *dcl, pixman_format_code_t format) { switch (format) { /* 32 bpp */ case PIXMAN_x8r8g8b8: case PIXMAN_a8r8g8b8: case PIXMAN_b8g8r8x8: case PIXMAN_b8g8r8a8: /* 24 bpp */ case PIXMAN_r8g8b8: case PIXMAN_b8g8r8: /* 16 bpp */ case PIXMAN_x1r5g5b5: case PIXMAN_r5g6b5: return true; default: return false; } } pixman_image_t *qemu_pixman_linebuf_create(pixman_format_code_t format, int width) { pixman_image_t *image = pixman_image_create_bits(format, width, 1, NULL, 0); assert(image != NULL); return image; } /* fill linebuf from framebuffer */ void qemu_pixman_linebuf_fill(pixman_image_t *linebuf, pixman_image_t *fb, int width, int x, int y) { pixman_image_composite(PIXMAN_OP_SRC, fb, NULL, linebuf, x, y, 0, 0, 0, 0, width, 1); } /* copy linebuf to framebuffer */ void qemu_pixman_linebuf_copy(pixman_image_t *fb, int width, int x, int y, pixman_image_t *linebuf) { pixman_image_composite(PIXMAN_OP_SRC, linebuf, NULL, fb, 0, 0, 0, 0, x, y, width, 1); } pixman_image_t *qemu_pixman_mirror_create(pixman_format_code_t format, pixman_image_t *image) { return pixman_image_create_bits(format, pixman_image_get_width(image), pixman_image_get_height(image), NULL, pixman_image_get_stride(image)); } void qemu_pixman_image_unref(pixman_image_t *image) { if (image == NULL) { return; } pixman_image_unref(image); } pixman_color_t qemu_pixman_color(PixelFormat *pf, uint32_t color) { pixman_color_t c; c.red = ((color & pf->rmask) >> pf->rshift) << (16 - pf->rbits); c.green = ((color & pf->gmask) >> pf->gshift) << (16 - pf->gbits); c.blue = ((color & pf->bmask) >> pf->bshift) << (16 - pf->bbits); c.alpha = ((color & pf->amask) >> pf->ashift) << (16 - pf->abits); return c; } pixman_image_t *qemu_pixman_glyph_from_vgafont(int height, const uint8_t *font, unsigned int ch) { pixman_image_t *glyph; uint8_t *data; bool bit; int x, y; glyph = pixman_image_create_bits(PIXMAN_a8, 8, height, NULL, 0); data = (uint8_t *)pixman_image_get_data(glyph); font += height * ch; for (y = 0; y < height; y++, font++) { for (x = 0; x < 8; x++, data++) { bit = (*font) & (1 << (7-x)); *data = bit ? 0xff : 0x00; } } return glyph; } void qemu_pixman_glyph_render(pixman_image_t *glyph, pixman_image_t *surface, pixman_color_t *fgcol, pixman_color_t *bgcol, int x, int y, int cw, int ch) { pixman_image_t *ifg = pixman_image_create_solid_fill(fgcol); pixman_image_t *ibg = pixman_image_create_solid_fill(bgcol); pixman_image_composite(PIXMAN_OP_SRC, ibg, NULL, surface, 0, 0, 0, 0, cw * x, ch * y, cw, ch); pixman_image_composite(PIXMAN_OP_OVER, ifg, glyph, surface, 0, 0, 0, 0, cw * x, ch * y, cw, ch); pixman_image_unref(ifg); pixman_image_unref(ibg); }
pmp-tool/PMP
src/qemu/src-pmp/hw/ide/ioport.c
<filename>src/qemu/src-pmp/hw/ide/ioport.c<gh_stars>1-10 /* * QEMU IDE disk and CD/DVD-ROM Emulator * * Copyright (c) 2003 <NAME> * Copyright (c) 2006 Openedhand Ltd. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #include "hw/hw.h" #include "hw/isa/isa.h" #include "qemu/error-report.h" #include "qemu/timer.h" #include "sysemu/sysemu.h" #include "sysemu/blockdev.h" #include "sysemu/dma.h" #include "hw/block/block.h" #include "sysemu/block-backend.h" #include "qapi/error.h" #include "qemu/cutils.h" #include "sysemu/replay.h" #include "hw/ide/internal.h" #include "trace.h" static const MemoryRegionPortio ide_portio_list[] = { { 0, 8, 1, .read = ide_ioport_read, .write = ide_ioport_write }, { 0, 1, 2, .read = ide_data_readw, .write = ide_data_writew }, { 0, 1, 4, .read = ide_data_readl, .write = ide_data_writel }, PORTIO_END_OF_LIST(), }; static const MemoryRegionPortio ide_portio2_list[] = { { 0, 1, 1, .read = ide_status_read, .write = ide_cmd_write }, PORTIO_END_OF_LIST(), }; void ide_init_ioport(IDEBus *bus, ISADevice *dev, int iobase, int iobase2) { /* ??? Assume only ISA and PCI configurations, and that the PCI-ISA bridge has been setup properly to always register with ISA. */ isa_register_portio_list(dev, &bus->portio_list, iobase, ide_portio_list, bus, "ide"); if (iobase2) { isa_register_portio_list(dev, &bus->portio2_list, iobase2, ide_portio2_list, bus, "ide"); } }
pmp-tool/PMP
src/qemu/src-pmp/target/openrisc/gdbstub.c
<reponame>pmp-tool/PMP<gh_stars>1-10 /* * OpenRISC gdb server stub * * Copyright (c) 2003-2005 <NAME> * Copyright (c) 2013 SUSE LINUX Products GmbH * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu-common.h" #include "cpu.h" #include "exec/gdbstub.h" int openrisc_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n) { OpenRISCCPU *cpu = OPENRISC_CPU(cs); CPUOpenRISCState *env = &cpu->env; if (n < 32) { return gdb_get_reg32(mem_buf, cpu_get_gpr(env, n)); } else { switch (n) { case 32: /* PPC */ return gdb_get_reg32(mem_buf, env->ppc); case 33: /* NPC (equals PC) */ return gdb_get_reg32(mem_buf, env->pc); case 34: /* SR */ return gdb_get_reg32(mem_buf, cpu_get_sr(env)); default: break; } } return 0; } int openrisc_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { OpenRISCCPU *cpu = OPENRISC_CPU(cs); CPUClass *cc = CPU_GET_CLASS(cs); CPUOpenRISCState *env = &cpu->env; uint32_t tmp; if (n > cc->gdb_num_core_regs) { return 0; } tmp = ldl_p(mem_buf); if (n < 32) { cpu_set_gpr(env, n, tmp); } else { switch (n) { case 32: /* PPC */ env->ppc = tmp; break; case 33: /* NPC (equals PC) */ /* If setting PC to something different, also clear delayed branch status. */ if (env->pc != tmp) { env->pc = tmp; env->dflag = 0; } break; case 34: /* SR */ cpu_set_sr(env, tmp); break; default: break; } } return 4; }
pmp-tool/PMP
src/qemu/src-pmp/hw/intc/pnv_xive.c
<gh_stars>1-10 /* * QEMU PowerPC XIVE interrupt controller model * * Copyright (c) 2017-2019, IBM Corporation. * * This code is licensed under the GPL version 2 or later. See the * COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "qemu/log.h" #include "qapi/error.h" #include "target/ppc/cpu.h" #include "sysemu/cpus.h" #include "sysemu/dma.h" #include "monitor/monitor.h" #include "hw/ppc/fdt.h" #include "hw/ppc/pnv.h" #include "hw/ppc/pnv_core.h" #include "hw/ppc/pnv_xscom.h" #include "hw/ppc/pnv_xive.h" #include "hw/ppc/xive_regs.h" #include "hw/ppc/ppc.h" #include <libfdt.h> #include "pnv_xive_regs.h" #define XIVE_DEBUG /* * Virtual structures table (VST) */ #define SBE_PER_BYTE 4 typedef struct XiveVstInfo { const char *name; uint32_t size; uint32_t max_blocks; } XiveVstInfo; static const XiveVstInfo vst_infos[] = { [VST_TSEL_IVT] = { "EAT", sizeof(XiveEAS), 16 }, [VST_TSEL_SBE] = { "SBE", 1, 16 }, [VST_TSEL_EQDT] = { "ENDT", sizeof(XiveEND), 16 }, [VST_TSEL_VPDT] = { "VPDT", sizeof(XiveNVT), 32 }, /* * Interrupt fifo backing store table (not modeled) : * * 0 - IPI, * 1 - HWD, * 2 - First escalate, * 3 - Second escalate, * 4 - Redistribution, * 5 - IPI cascaded queue ? */ [VST_TSEL_IRQ] = { "IRQ", 1, 6 }, }; #define xive_error(xive, fmt, ...) \ qemu_log_mask(LOG_GUEST_ERROR, "XIVE[%x] - " fmt "\n", \ (xive)->chip->chip_id, ## __VA_ARGS__); /* * QEMU version of the GETFIELD/SETFIELD macros * * TODO: It might be better to use the existing extract64() and * deposit64() but this means that all the register definitions will * change and become incompatible with the ones found in skiboot. * * Keep it as it is for now until we find a common ground. */ static inline uint64_t GETFIELD(uint64_t mask, uint64_t word) { return (word & mask) >> ctz64(mask); } static inline uint64_t SETFIELD(uint64_t mask, uint64_t word, uint64_t value) { return (word & ~mask) | ((value << ctz64(mask)) & mask); } /* * Remote access to controllers. HW uses MMIOs. For now, a simple scan * of the chips is good enough. * * TODO: Block scope support */ static PnvXive *pnv_xive_get_ic(uint8_t blk) { PnvMachineState *pnv = PNV_MACHINE(qdev_get_machine()); int i; for (i = 0; i < pnv->num_chips; i++) { Pnv9Chip *chip9 = PNV9_CHIP(pnv->chips[i]); PnvXive *xive = &chip9->xive; if (xive->chip->chip_id == blk) { return xive; } } return NULL; } /* * VST accessors for SBE, EAT, ENDT, NVT * * Indirect VST tables are arrays of VSDs pointing to a page (of same * size). Each page is a direct VST table. */ #define XIVE_VSD_SIZE 8 /* Indirect page size can be 4K, 64K, 2M, 16M. */ static uint64_t pnv_xive_vst_page_size_allowed(uint32_t page_shift) { return page_shift == 12 || page_shift == 16 || page_shift == 21 || page_shift == 24; } static uint64_t pnv_xive_vst_size(uint64_t vsd) { uint64_t vst_tsize = 1ull << (GETFIELD(VSD_TSIZE, vsd) + 12); /* * Read the first descriptor to get the page size of the indirect * table. */ if (VSD_INDIRECT & vsd) { uint32_t nr_pages = vst_tsize / XIVE_VSD_SIZE; uint32_t page_shift; vsd = ldq_be_dma(&address_space_memory, vsd & VSD_ADDRESS_MASK); page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; if (!pnv_xive_vst_page_size_allowed(page_shift)) { return 0; } return nr_pages * (1ull << page_shift); } return vst_tsize; } static uint64_t pnv_xive_vst_addr_direct(PnvXive *xive, uint32_t type, uint64_t vsd, uint32_t idx) { const XiveVstInfo *info = &vst_infos[type]; uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; return vst_addr + idx * info->size; } static uint64_t pnv_xive_vst_addr_indirect(PnvXive *xive, uint32_t type, uint64_t vsd, uint32_t idx) { const XiveVstInfo *info = &vst_infos[type]; uint64_t vsd_addr; uint32_t vsd_idx; uint32_t page_shift; uint32_t vst_per_page; /* Get the page size of the indirect table. */ vsd_addr = vsd & VSD_ADDRESS_MASK; vsd = ldq_be_dma(&address_space_memory, vsd_addr); if (!(vsd & VSD_ADDRESS_MASK)) { xive_error(xive, "VST: invalid %s entry %x !?", info->name, 0); return 0; } page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; if (!pnv_xive_vst_page_size_allowed(page_shift)) { xive_error(xive, "VST: invalid %s page shift %d", info->name, page_shift); return 0; } vst_per_page = (1ull << page_shift) / info->size; vsd_idx = idx / vst_per_page; /* Load the VSD we are looking for, if not already done */ if (vsd_idx) { vsd_addr = vsd_addr + vsd_idx * XIVE_VSD_SIZE; vsd = ldq_be_dma(&address_space_memory, vsd_addr); if (!(vsd & VSD_ADDRESS_MASK)) { xive_error(xive, "VST: invalid %s entry %x !?", info->name, 0); return 0; } /* * Check that the pages have a consistent size across the * indirect table */ if (page_shift != GETFIELD(VSD_TSIZE, vsd) + 12) { xive_error(xive, "VST: %s entry %x indirect page size differ !?", info->name, idx); return 0; } } return pnv_xive_vst_addr_direct(xive, type, vsd, (idx % vst_per_page)); } static uint64_t pnv_xive_vst_addr(PnvXive *xive, uint32_t type, uint8_t blk, uint32_t idx) { const XiveVstInfo *info = &vst_infos[type]; uint64_t vsd; uint32_t idx_max; if (blk >= info->max_blocks) { xive_error(xive, "VST: invalid block id %d for VST %s %d !?", blk, info->name, idx); return 0; } vsd = xive->vsds[type][blk]; /* Remote VST access */ if (GETFIELD(VSD_MODE, vsd) == VSD_MODE_FORWARD) { xive = pnv_xive_get_ic(blk); return xive ? pnv_xive_vst_addr(xive, type, blk, idx) : 0; } idx_max = pnv_xive_vst_size(vsd) / info->size - 1; if (idx > idx_max) { #ifdef XIVE_DEBUG xive_error(xive, "VST: %s entry %x/%x out of range [ 0 .. %x ] !?", info->name, blk, idx, idx_max); #endif return 0; } if (VSD_INDIRECT & vsd) { return pnv_xive_vst_addr_indirect(xive, type, vsd, idx); } return pnv_xive_vst_addr_direct(xive, type, vsd, idx); } static int pnv_xive_vst_read(PnvXive *xive, uint32_t type, uint8_t blk, uint32_t idx, void *data) { const XiveVstInfo *info = &vst_infos[type]; uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); if (!addr) { return -1; } cpu_physical_memory_read(addr, data, info->size); return 0; } #define XIVE_VST_WORD_ALL -1 static int pnv_xive_vst_write(PnvXive *xive, uint32_t type, uint8_t blk, uint32_t idx, void *data, uint32_t word_number) { const XiveVstInfo *info = &vst_infos[type]; uint64_t addr = pnv_xive_vst_addr(xive, type, blk, idx); if (!addr) { return -1; } if (word_number == XIVE_VST_WORD_ALL) { cpu_physical_memory_write(addr, data, info->size); } else { cpu_physical_memory_write(addr + word_number * 4, data + word_number * 4, 4); } return 0; } static int pnv_xive_get_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, XiveEND *end) { return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end); } static int pnv_xive_write_end(XiveRouter *xrtr, uint8_t blk, uint32_t idx, XiveEND *end, uint8_t word_number) { return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_EQDT, blk, idx, end, word_number); } static int pnv_xive_end_update(PnvXive *xive, uint8_t blk, uint32_t idx) { int i; uint64_t eqc_watch[4]; for (i = 0; i < ARRAY_SIZE(eqc_watch); i++) { eqc_watch[i] = cpu_to_be64(xive->regs[(VC_EQC_CWATCH_DAT0 >> 3) + i]); } return pnv_xive_vst_write(xive, VST_TSEL_EQDT, blk, idx, eqc_watch, XIVE_VST_WORD_ALL); } static int pnv_xive_get_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, XiveNVT *nvt) { return pnv_xive_vst_read(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt); } static int pnv_xive_write_nvt(XiveRouter *xrtr, uint8_t blk, uint32_t idx, XiveNVT *nvt, uint8_t word_number) { return pnv_xive_vst_write(PNV_XIVE(xrtr), VST_TSEL_VPDT, blk, idx, nvt, word_number); } static int pnv_xive_nvt_update(PnvXive *xive, uint8_t blk, uint32_t idx) { int i; uint64_t vpc_watch[8]; for (i = 0; i < ARRAY_SIZE(vpc_watch); i++) { vpc_watch[i] = cpu_to_be64(xive->regs[(PC_VPC_CWATCH_DAT0 >> 3) + i]); } return pnv_xive_vst_write(xive, VST_TSEL_VPDT, blk, idx, vpc_watch, XIVE_VST_WORD_ALL); } static int pnv_xive_get_eas(XiveRouter *xrtr, uint8_t blk, uint32_t idx, XiveEAS *eas) { PnvXive *xive = PNV_XIVE(xrtr); if (pnv_xive_get_ic(blk) != xive) { xive_error(xive, "VST: EAS %x is remote !?", XIVE_SRCNO(blk, idx)); return -1; } return pnv_xive_vst_read(xive, VST_TSEL_IVT, blk, idx, eas); } static int pnv_xive_eas_update(PnvXive *xive, uint8_t blk, uint32_t idx) { /* All done. */ return 0; } static XiveTCTX *pnv_xive_get_tctx(XiveRouter *xrtr, CPUState *cs) { PowerPCCPU *cpu = POWERPC_CPU(cs); XiveTCTX *tctx = XIVE_TCTX(pnv_cpu_state(cpu)->intc); PnvXive *xive = NULL; CPUPPCState *env = &cpu->env; int pir = env->spr_cb[SPR_PIR].default_value; /* * Perform an extra check on the HW thread enablement. * * The TIMA is shared among the chips and to identify the chip * from which the access is being done, we extract the chip id * from the PIR. */ xive = pnv_xive_get_ic((pir >> 8) & 0xf); if (!xive) { return NULL; } if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) { xive_error(PNV_XIVE(xrtr), "IC: CPU %x is not enabled", pir); } return tctx; } /* * The internal sources (IPIs) of the interrupt controller have no * knowledge of the XIVE chip on which they reside. Encode the block * id in the source interrupt number before forwarding the source * event notification to the Router. This is required on a multichip * system. */ static void pnv_xive_notify(XiveNotifier *xn, uint32_t srcno) { PnvXive *xive = PNV_XIVE(xn); uint8_t blk = xive->chip->chip_id; xive_router_notify(xn, XIVE_SRCNO(blk, srcno)); } /* * XIVE helpers */ static uint64_t pnv_xive_vc_size(PnvXive *xive) { return (~xive->regs[CQ_VC_BARM >> 3] + 1) & CQ_VC_BARM_MASK; } static uint64_t pnv_xive_edt_shift(PnvXive *xive) { return ctz64(pnv_xive_vc_size(xive) / XIVE_TABLE_EDT_MAX); } static uint64_t pnv_xive_pc_size(PnvXive *xive) { return (~xive->regs[CQ_PC_BARM >> 3] + 1) & CQ_PC_BARM_MASK; } static uint32_t pnv_xive_nr_ipis(PnvXive *xive) { uint8_t blk = xive->chip->chip_id; return pnv_xive_vst_size(xive->vsds[VST_TSEL_SBE][blk]) * SBE_PER_BYTE; } static uint32_t pnv_xive_nr_ends(PnvXive *xive) { uint8_t blk = xive->chip->chip_id; return pnv_xive_vst_size(xive->vsds[VST_TSEL_EQDT][blk]) / vst_infos[VST_TSEL_EQDT].size; } /* * EDT Table * * The Virtualization Controller MMIO region containing the IPI ESB * pages and END ESB pages is sub-divided into "sets" which map * portions of the VC region to the different ESB pages. It is * configured at runtime through the EDT "Domain Table" to let the * firmware decide how to split the VC address space between IPI ESB * pages and END ESB pages. */ /* * Computes the overall size of the IPI or the END ESB pages */ static uint64_t pnv_xive_edt_size(PnvXive *xive, uint64_t type) { uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); uint64_t size = 0; int i; for (i = 0; i < XIVE_TABLE_EDT_MAX; i++) { uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); if (edt_type == type) { size += edt_size; } } return size; } /* * Maps an offset of the VC region in the IPI or END region using the * layout defined by the EDT "Domaine Table" */ static uint64_t pnv_xive_edt_offset(PnvXive *xive, uint64_t vc_offset, uint64_t type) { int i; uint64_t edt_size = 1ull << pnv_xive_edt_shift(xive); uint64_t edt_offset = vc_offset; for (i = 0; i < XIVE_TABLE_EDT_MAX && (i * edt_size) < vc_offset; i++) { uint64_t edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[i]); if (edt_type != type) { edt_offset -= edt_size; } } return edt_offset; } static void pnv_xive_edt_resize(PnvXive *xive) { uint64_t ipi_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_IPI); uint64_t end_edt_size = pnv_xive_edt_size(xive, CQ_TDR_EDT_EQ); memory_region_set_size(&xive->ipi_edt_mmio, ipi_edt_size); memory_region_add_subregion(&xive->ipi_mmio, 0, &xive->ipi_edt_mmio); memory_region_set_size(&xive->end_edt_mmio, end_edt_size); memory_region_add_subregion(&xive->end_mmio, 0, &xive->end_edt_mmio); } /* * XIVE Table configuration. Only EDT is supported. */ static int pnv_xive_table_set_data(PnvXive *xive, uint64_t val) { uint64_t tsel = xive->regs[CQ_TAR >> 3] & CQ_TAR_TSEL; uint8_t tsel_index = GETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3]); uint64_t *xive_table; uint8_t max_index; switch (tsel) { case CQ_TAR_TSEL_BLK: max_index = ARRAY_SIZE(xive->blk); xive_table = xive->blk; break; case CQ_TAR_TSEL_MIG: max_index = ARRAY_SIZE(xive->mig); xive_table = xive->mig; break; case CQ_TAR_TSEL_EDT: max_index = ARRAY_SIZE(xive->edt); xive_table = xive->edt; break; case CQ_TAR_TSEL_VDT: max_index = ARRAY_SIZE(xive->vdt); xive_table = xive->vdt; break; default: xive_error(xive, "IC: invalid table %d", (int) tsel); return -1; } if (tsel_index >= max_index) { xive_error(xive, "IC: invalid index %d", (int) tsel_index); return -1; } xive_table[tsel_index] = val; if (xive->regs[CQ_TAR >> 3] & CQ_TAR_TBL_AUTOINC) { xive->regs[CQ_TAR >> 3] = SETFIELD(CQ_TAR_TSEL_INDEX, xive->regs[CQ_TAR >> 3], ++tsel_index); } /* * EDT configuration is complete. Resize the MMIO windows exposing * the IPI and the END ESBs in the VC region. */ if (tsel == CQ_TAR_TSEL_EDT && tsel_index == ARRAY_SIZE(xive->edt)) { pnv_xive_edt_resize(xive); } return 0; } /* * Virtual Structure Tables (VST) configuration */ static void pnv_xive_vst_set_exclusive(PnvXive *xive, uint8_t type, uint8_t blk, uint64_t vsd) { XiveENDSource *end_xsrc = &xive->end_source; XiveSource *xsrc = &xive->ipi_source; const XiveVstInfo *info = &vst_infos[type]; uint32_t page_shift = GETFIELD(VSD_TSIZE, vsd) + 12; uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; /* Basic checks */ if (VSD_INDIRECT & vsd) { if (!(xive->regs[VC_GLOBAL_CONFIG >> 3] & VC_GCONF_INDIRECT)) { xive_error(xive, "VST: %s indirect tables are not enabled", info->name); return; } if (!pnv_xive_vst_page_size_allowed(page_shift)) { xive_error(xive, "VST: invalid %s page shift %d", info->name, page_shift); return; } } if (!QEMU_IS_ALIGNED(vst_addr, 1ull << page_shift)) { xive_error(xive, "VST: %s table address 0x%"PRIx64" is not aligned with" " page shift %d", info->name, vst_addr, page_shift); return; } /* Record the table configuration (in SRAM on HW) */ xive->vsds[type][blk] = vsd; /* Now tune the models with the configuration provided by the FW */ switch (type) { case VST_TSEL_IVT: /* Nothing to be done */ break; case VST_TSEL_EQDT: /* * Backing store pages for the END. Compute the number of ENDs * provisioned by FW and resize the END ESB window accordingly. */ memory_region_set_size(&end_xsrc->esb_mmio, pnv_xive_nr_ends(xive) * (1ull << (end_xsrc->esb_shift + 1))); memory_region_add_subregion(&xive->end_edt_mmio, 0, &end_xsrc->esb_mmio); break; case VST_TSEL_SBE: /* * Backing store pages for the source PQ bits. The model does * not use these PQ bits backed in RAM because the XiveSource * model has its own. Compute the number of IRQs provisioned * by FW and resize the IPI ESB window accordingly. */ memory_region_set_size(&xsrc->esb_mmio, pnv_xive_nr_ipis(xive) * (1ull << xsrc->esb_shift)); memory_region_add_subregion(&xive->ipi_edt_mmio, 0, &xsrc->esb_mmio); break; case VST_TSEL_VPDT: /* Not modeled */ case VST_TSEL_IRQ: /* Not modeled */ /* * These tables contains the backing store pages for the * interrupt fifos of the VC sub-engine in case of overflow. */ break; default: g_assert_not_reached(); } } /* * Both PC and VC sub-engines are configured as each use the Virtual * Structure Tables : SBE, EAS, END and NVT. */ static void pnv_xive_vst_set_data(PnvXive *xive, uint64_t vsd, bool pc_engine) { uint8_t mode = GETFIELD(VSD_MODE, vsd); uint8_t type = GETFIELD(VST_TABLE_SELECT, xive->regs[VC_VSD_TABLE_ADDR >> 3]); uint8_t blk = GETFIELD(VST_TABLE_BLOCK, xive->regs[VC_VSD_TABLE_ADDR >> 3]); uint64_t vst_addr = vsd & VSD_ADDRESS_MASK; if (type > VST_TSEL_IRQ) { xive_error(xive, "VST: invalid table type %d", type); return; } if (blk >= vst_infos[type].max_blocks) { xive_error(xive, "VST: invalid block id %d for" " %s table", blk, vst_infos[type].name); return; } /* * Only take the VC sub-engine configuration into account because * the XiveRouter model combines both VC and PC sub-engines */ if (pc_engine) { return; } if (!vst_addr) { xive_error(xive, "VST: invalid %s table address", vst_infos[type].name); return; } switch (mode) { case VSD_MODE_FORWARD: xive->vsds[type][blk] = vsd; break; case VSD_MODE_EXCLUSIVE: pnv_xive_vst_set_exclusive(xive, type, blk, vsd); break; default: xive_error(xive, "VST: unsupported table mode %d", mode); return; } } /* * Interrupt controller MMIO region. The layout is compatible between * 4K and 64K pages : * * Page 0 sub-engine BARs * 0x000 - 0x3FF IC registers * 0x400 - 0x7FF PC registers * 0x800 - 0xFFF VC registers * * Page 1 Notify page (writes only) * 0x000 - 0x7FF HW interrupt triggers (PSI, PHB) * 0x800 - 0xFFF forwards and syncs * * Page 2 LSI Trigger page (writes only) (not modeled) * Page 3 LSI SB EOI page (reads only) (not modeled) * * Page 4-7 indirect TIMA */ /* * IC - registers MMIO */ static void pnv_xive_ic_reg_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) { PnvXive *xive = PNV_XIVE(opaque); MemoryRegion *sysmem = get_system_memory(); uint32_t reg = offset >> 3; bool is_chip0 = xive->chip->chip_id == 0; switch (offset) { /* * XIVE CQ (PowerBus bridge) settings */ case CQ_MSGSND: /* msgsnd for doorbells */ case CQ_FIRMASK_OR: /* FIR error reporting */ break; case CQ_PBI_CTL: if (val & CQ_PBI_PC_64K) { xive->pc_shift = 16; } if (val & CQ_PBI_VC_64K) { xive->vc_shift = 16; } break; case CQ_CFG_PB_GEN: /* PowerBus General Configuration */ /* * TODO: CQ_INT_ADDR_OPT for 1-block-per-chip mode */ break; /* * XIVE Virtualization Controller settings */ case VC_GLOBAL_CONFIG: break; /* * XIVE Presenter Controller settings */ case PC_GLOBAL_CONFIG: /* * PC_GCONF_CHIPID_OVR * Overrides Int command Chip ID with the Chip ID field (DEBUG) */ break; case PC_TCTXT_CFG: /* * TODO: block group support * * PC_TCTXT_CFG_BLKGRP_EN * PC_TCTXT_CFG_HARD_CHIPID_BLK : * Moves the chipid into block field for hardwired CAM compares. * Block offset value is adjusted to 0b0..01 & ThrdId * * Will require changes in xive_presenter_tctx_match(). I am * not sure how to handle that yet. */ /* Overrides hardwired chip ID with the chip ID field */ if (val & PC_TCTXT_CHIPID_OVERRIDE) { xive->tctx_chipid = GETFIELD(PC_TCTXT_CHIPID, val); } break; case PC_TCTXT_TRACK: /* * PC_TCTXT_TRACK_EN: * enable block tracking and exchange of block ownership * information between Interrupt controllers */ break; /* * Misc settings */ case VC_SBC_CONFIG: /* Store EOI configuration */ /* * Configure store EOI if required by firwmare (skiboot has removed * support recently though) */ if (val & (VC_SBC_CONF_CPLX_CIST | VC_SBC_CONF_CIST_BOTH)) { object_property_set_int(OBJECT(&xive->ipi_source), XIVE_SRC_STORE_EOI, "flags", &error_fatal); } break; case VC_EQC_CONFIG: /* TODO: silent escalation */ case VC_AIB_TX_ORDER_TAG2: /* relax ordering */ break; /* * XIVE BAR settings (XSCOM only) */ case CQ_RST_CTL: /* bit4: resets all BAR registers */ break; case CQ_IC_BAR: /* IC BAR. 8 pages */ xive->ic_shift = val & CQ_IC_BAR_64K ? 16 : 12; if (!(val & CQ_IC_BAR_VALID)) { xive->ic_base = 0; if (xive->regs[reg] & CQ_IC_BAR_VALID) { memory_region_del_subregion(&xive->ic_mmio, &xive->ic_reg_mmio); memory_region_del_subregion(&xive->ic_mmio, &xive->ic_notify_mmio); memory_region_del_subregion(&xive->ic_mmio, &xive->ic_lsi_mmio); memory_region_del_subregion(&xive->ic_mmio, &xive->tm_indirect_mmio); memory_region_del_subregion(sysmem, &xive->ic_mmio); } } else { xive->ic_base = val & ~(CQ_IC_BAR_VALID | CQ_IC_BAR_64K); if (!(xive->regs[reg] & CQ_IC_BAR_VALID)) { memory_region_add_subregion(sysmem, xive->ic_base, &xive->ic_mmio); memory_region_add_subregion(&xive->ic_mmio, 0, &xive->ic_reg_mmio); memory_region_add_subregion(&xive->ic_mmio, 1ul << xive->ic_shift, &xive->ic_notify_mmio); memory_region_add_subregion(&xive->ic_mmio, 2ul << xive->ic_shift, &xive->ic_lsi_mmio); memory_region_add_subregion(&xive->ic_mmio, 4ull << xive->ic_shift, &xive->tm_indirect_mmio); } } break; case CQ_TM1_BAR: /* TM BAR. 4 pages. Map only once */ case CQ_TM2_BAR: /* second TM BAR. for hotplug. Not modeled */ xive->tm_shift = val & CQ_TM_BAR_64K ? 16 : 12; if (!(val & CQ_TM_BAR_VALID)) { xive->tm_base = 0; if (xive->regs[reg] & CQ_TM_BAR_VALID && is_chip0) { memory_region_del_subregion(sysmem, &xive->tm_mmio); } } else { xive->tm_base = val & ~(CQ_TM_BAR_VALID | CQ_TM_BAR_64K); if (!(xive->regs[reg] & CQ_TM_BAR_VALID) && is_chip0) { memory_region_add_subregion(sysmem, xive->tm_base, &xive->tm_mmio); } } break; case CQ_PC_BARM: xive->regs[reg] = val; memory_region_set_size(&xive->pc_mmio, pnv_xive_pc_size(xive)); break; case CQ_PC_BAR: /* From 32M to 512G */ if (!(val & CQ_PC_BAR_VALID)) { xive->pc_base = 0; if (xive->regs[reg] & CQ_PC_BAR_VALID) { memory_region_del_subregion(sysmem, &xive->pc_mmio); } } else { xive->pc_base = val & ~(CQ_PC_BAR_VALID); if (!(xive->regs[reg] & CQ_PC_BAR_VALID)) { memory_region_add_subregion(sysmem, xive->pc_base, &xive->pc_mmio); } } break; case CQ_VC_BARM: xive->regs[reg] = val; memory_region_set_size(&xive->vc_mmio, pnv_xive_vc_size(xive)); break; case CQ_VC_BAR: /* From 64M to 4TB */ if (!(val & CQ_VC_BAR_VALID)) { xive->vc_base = 0; if (xive->regs[reg] & CQ_VC_BAR_VALID) { memory_region_del_subregion(sysmem, &xive->vc_mmio); } } else { xive->vc_base = val & ~(CQ_VC_BAR_VALID); if (!(xive->regs[reg] & CQ_VC_BAR_VALID)) { memory_region_add_subregion(sysmem, xive->vc_base, &xive->vc_mmio); } } break; /* * XIVE Table settings. */ case CQ_TAR: /* Table Address */ break; case CQ_TDR: /* Table Data */ pnv_xive_table_set_data(xive, val); break; /* * XIVE VC & PC Virtual Structure Table settings */ case VC_VSD_TABLE_ADDR: case PC_VSD_TABLE_ADDR: /* Virtual table selector */ break; case VC_VSD_TABLE_DATA: /* Virtual table setting */ case PC_VSD_TABLE_DATA: pnv_xive_vst_set_data(xive, val, offset == PC_VSD_TABLE_DATA); break; /* * Interrupt fifo overflow in memory backing store (Not modeled) */ case VC_IRQ_CONFIG_IPI: case VC_IRQ_CONFIG_HW: case VC_IRQ_CONFIG_CASCADE1: case VC_IRQ_CONFIG_CASCADE2: case VC_IRQ_CONFIG_REDIST: case VC_IRQ_CONFIG_IPI_CASC: break; /* * XIVE hardware thread enablement */ case PC_THREAD_EN_REG0: /* Physical Thread Enable */ case PC_THREAD_EN_REG1: /* Physical Thread Enable (fused core) */ break; case PC_THREAD_EN_REG0_SET: xive->regs[PC_THREAD_EN_REG0 >> 3] |= val; break; case PC_THREAD_EN_REG1_SET: xive->regs[PC_THREAD_EN_REG1 >> 3] |= val; break; case PC_THREAD_EN_REG0_CLR: xive->regs[PC_THREAD_EN_REG0 >> 3] &= ~val; break; case PC_THREAD_EN_REG1_CLR: xive->regs[PC_THREAD_EN_REG1 >> 3] &= ~val; break; /* * Indirect TIMA access set up. Defines the PIR of the HW thread * to use. */ case PC_TCTXT_INDIR0 ... PC_TCTXT_INDIR3: break; /* * XIVE PC & VC cache updates for EAS, NVT and END */ case VC_IVC_SCRUB_MASK: break; case VC_IVC_SCRUB_TRIG: pnv_xive_eas_update(xive, GETFIELD(PC_SCRUB_BLOCK_ID, val), GETFIELD(VC_SCRUB_OFFSET, val)); break; case VC_EQC_SCRUB_MASK: case VC_EQC_CWATCH_SPEC: case VC_EQC_CWATCH_DAT0 ... VC_EQC_CWATCH_DAT3: break; case VC_EQC_SCRUB_TRIG: pnv_xive_end_update(xive, GETFIELD(VC_SCRUB_BLOCK_ID, val), GETFIELD(VC_SCRUB_OFFSET, val)); break; case PC_VPC_SCRUB_MASK: case PC_VPC_CWATCH_SPEC: case PC_VPC_CWATCH_DAT0 ... PC_VPC_CWATCH_DAT7: break; case PC_VPC_SCRUB_TRIG: pnv_xive_nvt_update(xive, GETFIELD(PC_SCRUB_BLOCK_ID, val), GETFIELD(PC_SCRUB_OFFSET, val)); break; /* * XIVE PC & VC cache invalidation */ case PC_AT_KILL: break; case VC_AT_MACRO_KILL: break; case PC_AT_KILL_MASK: case VC_AT_MACRO_KILL_MASK: break; default: xive_error(xive, "IC: invalid write to reg=0x%"HWADDR_PRIx, offset); return; } xive->regs[reg] = val; } static uint64_t pnv_xive_ic_reg_read(void *opaque, hwaddr offset, unsigned size) { PnvXive *xive = PNV_XIVE(opaque); uint64_t val = 0; uint32_t reg = offset >> 3; switch (offset) { case CQ_CFG_PB_GEN: case CQ_IC_BAR: case CQ_TM1_BAR: case CQ_TM2_BAR: case CQ_PC_BAR: case CQ_PC_BARM: case CQ_VC_BAR: case CQ_VC_BARM: case CQ_TAR: case CQ_TDR: case CQ_PBI_CTL: case PC_TCTXT_CFG: case PC_TCTXT_TRACK: case PC_TCTXT_INDIR0: case PC_TCTXT_INDIR1: case PC_TCTXT_INDIR2: case PC_TCTXT_INDIR3: case PC_GLOBAL_CONFIG: case PC_VPC_SCRUB_MASK: case PC_VPC_CWATCH_SPEC: case PC_VPC_CWATCH_DAT0: case PC_VPC_CWATCH_DAT1: case PC_VPC_CWATCH_DAT2: case PC_VPC_CWATCH_DAT3: case PC_VPC_CWATCH_DAT4: case PC_VPC_CWATCH_DAT5: case PC_VPC_CWATCH_DAT6: case PC_VPC_CWATCH_DAT7: case VC_GLOBAL_CONFIG: case VC_AIB_TX_ORDER_TAG2: case VC_IRQ_CONFIG_IPI: case VC_IRQ_CONFIG_HW: case VC_IRQ_CONFIG_CASCADE1: case VC_IRQ_CONFIG_CASCADE2: case VC_IRQ_CONFIG_REDIST: case VC_IRQ_CONFIG_IPI_CASC: case VC_EQC_SCRUB_MASK: case VC_EQC_CWATCH_DAT0: case VC_EQC_CWATCH_DAT1: case VC_EQC_CWATCH_DAT2: case VC_EQC_CWATCH_DAT3: case VC_EQC_CWATCH_SPEC: case VC_IVC_SCRUB_MASK: case VC_SBC_CONFIG: case VC_AT_MACRO_KILL_MASK: case VC_VSD_TABLE_ADDR: case PC_VSD_TABLE_ADDR: case VC_VSD_TABLE_DATA: case PC_VSD_TABLE_DATA: case PC_THREAD_EN_REG0: case PC_THREAD_EN_REG1: val = xive->regs[reg]; break; /* * XIVE hardware thread enablement */ case PC_THREAD_EN_REG0_SET: case PC_THREAD_EN_REG0_CLR: val = xive->regs[PC_THREAD_EN_REG0 >> 3]; break; case PC_THREAD_EN_REG1_SET: case PC_THREAD_EN_REG1_CLR: val = xive->regs[PC_THREAD_EN_REG1 >> 3]; break; case CQ_MSGSND: /* Identifies which cores have msgsnd enabled. */ val = 0xffffff0000000000; break; /* * XIVE PC & VC cache updates for EAS, NVT and END */ case PC_VPC_SCRUB_TRIG: case VC_IVC_SCRUB_TRIG: case VC_EQC_SCRUB_TRIG: xive->regs[reg] &= ~VC_SCRUB_VALID; val = xive->regs[reg]; break; /* * XIVE PC & VC cache invalidation */ case PC_AT_KILL: xive->regs[reg] &= ~PC_AT_KILL_VALID; val = xive->regs[reg]; break; case VC_AT_MACRO_KILL: xive->regs[reg] &= ~VC_KILL_VALID; val = xive->regs[reg]; break; /* * XIVE synchronisation */ case VC_EQC_CONFIG: val = VC_EQC_SYNC_MASK; break; default: xive_error(xive, "IC: invalid read reg=0x%"HWADDR_PRIx, offset); } return val; } static const MemoryRegionOps pnv_xive_ic_reg_ops = { .read = pnv_xive_ic_reg_read, .write = pnv_xive_ic_reg_write, .endianness = DEVICE_BIG_ENDIAN, .valid = { .min_access_size = 8, .max_access_size = 8, }, .impl = { .min_access_size = 8, .max_access_size = 8, }, }; /* * IC - Notify MMIO port page (write only) */ #define PNV_XIVE_FORWARD_IPI 0x800 /* Forward IPI */ #define PNV_XIVE_FORWARD_HW 0x880 /* Forward HW */ #define PNV_XIVE_FORWARD_OS_ESC 0x900 /* Forward OS escalation */ #define PNV_XIVE_FORWARD_HW_ESC 0x980 /* Forward Hyp escalation */ #define PNV_XIVE_FORWARD_REDIS 0xa00 /* Forward Redistribution */ #define PNV_XIVE_RESERVED5 0xa80 /* Cache line 5 PowerBUS operation */ #define PNV_XIVE_RESERVED6 0xb00 /* Cache line 6 PowerBUS operation */ #define PNV_XIVE_RESERVED7 0xb80 /* Cache line 7 PowerBUS operation */ /* VC synchronisation */ #define PNV_XIVE_SYNC_IPI 0xc00 /* Sync IPI */ #define PNV_XIVE_SYNC_HW 0xc80 /* Sync HW */ #define PNV_XIVE_SYNC_OS_ESC 0xd00 /* Sync OS escalation */ #define PNV_XIVE_SYNC_HW_ESC 0xd80 /* Sync Hyp escalation */ #define PNV_XIVE_SYNC_REDIS 0xe00 /* Sync Redistribution */ /* PC synchronisation */ #define PNV_XIVE_SYNC_PULL 0xe80 /* Sync pull context */ #define PNV_XIVE_SYNC_PUSH 0xf00 /* Sync push context */ #define PNV_XIVE_SYNC_VPC 0xf80 /* Sync remove VPC store */ static void pnv_xive_ic_hw_trigger(PnvXive *xive, hwaddr addr, uint64_t val) { /* * Forward the source event notification directly to the Router. * The source interrupt number should already be correctly encoded * with the chip block id by the sending device (PHB, PSI). */ xive_router_notify(XIVE_NOTIFIER(xive), val); } static void pnv_xive_ic_notify_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { PnvXive *xive = PNV_XIVE(opaque); /* VC: HW triggers */ switch (addr) { case 0x000 ... 0x7FF: pnv_xive_ic_hw_trigger(opaque, addr, val); break; /* VC: Forwarded IRQs */ case PNV_XIVE_FORWARD_IPI: case PNV_XIVE_FORWARD_HW: case PNV_XIVE_FORWARD_OS_ESC: case PNV_XIVE_FORWARD_HW_ESC: case PNV_XIVE_FORWARD_REDIS: /* TODO: forwarded IRQs. Should be like HW triggers */ xive_error(xive, "IC: forwarded at @0x%"HWADDR_PRIx" IRQ 0x%"PRIx64, addr, val); break; /* VC syncs */ case PNV_XIVE_SYNC_IPI: case PNV_XIVE_SYNC_HW: case PNV_XIVE_SYNC_OS_ESC: case PNV_XIVE_SYNC_HW_ESC: case PNV_XIVE_SYNC_REDIS: break; /* PC syncs */ case PNV_XIVE_SYNC_PULL: case PNV_XIVE_SYNC_PUSH: case PNV_XIVE_SYNC_VPC: break; default: xive_error(xive, "IC: invalid notify write @%"HWADDR_PRIx, addr); } } static uint64_t pnv_xive_ic_notify_read(void *opaque, hwaddr addr, unsigned size) { PnvXive *xive = PNV_XIVE(opaque); /* loads are invalid */ xive_error(xive, "IC: invalid notify read @%"HWADDR_PRIx, addr); return -1; } static const MemoryRegionOps pnv_xive_ic_notify_ops = { .read = pnv_xive_ic_notify_read, .write = pnv_xive_ic_notify_write, .endianness = DEVICE_BIG_ENDIAN, .valid = { .min_access_size = 8, .max_access_size = 8, }, .impl = { .min_access_size = 8, .max_access_size = 8, }, }; /* * IC - LSI MMIO handlers (not modeled) */ static void pnv_xive_ic_lsi_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { PnvXive *xive = PNV_XIVE(opaque); xive_error(xive, "IC: LSI invalid write @%"HWADDR_PRIx, addr); } static uint64_t pnv_xive_ic_lsi_read(void *opaque, hwaddr addr, unsigned size) { PnvXive *xive = PNV_XIVE(opaque); xive_error(xive, "IC: LSI invalid read @%"HWADDR_PRIx, addr); return -1; } static const MemoryRegionOps pnv_xive_ic_lsi_ops = { .read = pnv_xive_ic_lsi_read, .write = pnv_xive_ic_lsi_write, .endianness = DEVICE_BIG_ENDIAN, .valid = { .min_access_size = 8, .max_access_size = 8, }, .impl = { .min_access_size = 8, .max_access_size = 8, }, }; /* * IC - Indirect TIMA MMIO handlers */ /* * When the TIMA is accessed from the indirect page, the thread id * (PIR) has to be configured in the IC registers before. This is used * for resets and for debug purpose also. */ static XiveTCTX *pnv_xive_get_indirect_tctx(PnvXive *xive) { uint64_t tctxt_indir = xive->regs[PC_TCTXT_INDIR0 >> 3]; PowerPCCPU *cpu = NULL; int pir; if (!(tctxt_indir & PC_TCTXT_INDIR_VALID)) { xive_error(xive, "IC: no indirect TIMA access in progress"); return NULL; } pir = GETFIELD(PC_TCTXT_INDIR_THRDID, tctxt_indir) & 0xff; cpu = ppc_get_vcpu_by_pir(pir); if (!cpu) { xive_error(xive, "IC: invalid PIR %x for indirect access", pir); return NULL; } /* Check that HW thread is XIVE enabled */ if (!(xive->regs[PC_THREAD_EN_REG0 >> 3] & PPC_BIT(pir & 0x3f))) { xive_error(xive, "IC: CPU %x is not enabled", pir); } return XIVE_TCTX(pnv_cpu_state(cpu)->intc); } static void xive_tm_indirect_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); xive_tctx_tm_write(tctx, offset, value, size); } static uint64_t xive_tm_indirect_read(void *opaque, hwaddr offset, unsigned size) { XiveTCTX *tctx = pnv_xive_get_indirect_tctx(PNV_XIVE(opaque)); return xive_tctx_tm_read(tctx, offset, size); } static const MemoryRegionOps xive_tm_indirect_ops = { .read = xive_tm_indirect_read, .write = xive_tm_indirect_write, .endianness = DEVICE_BIG_ENDIAN, .valid = { .min_access_size = 1, .max_access_size = 8, }, .impl = { .min_access_size = 1, .max_access_size = 8, }, }; /* * Interrupt controller XSCOM region. */ static uint64_t pnv_xive_xscom_read(void *opaque, hwaddr addr, unsigned size) { switch (addr >> 3) { case X_VC_EQC_CONFIG: /* FIXME (skiboot): This is the only XSCOM load. Bizarre. */ return VC_EQC_SYNC_MASK; default: return pnv_xive_ic_reg_read(opaque, addr, size); } } static void pnv_xive_xscom_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { pnv_xive_ic_reg_write(opaque, addr, val, size); } static const MemoryRegionOps pnv_xive_xscom_ops = { .read = pnv_xive_xscom_read, .write = pnv_xive_xscom_write, .endianness = DEVICE_BIG_ENDIAN, .valid = { .min_access_size = 8, .max_access_size = 8, }, .impl = { .min_access_size = 8, .max_access_size = 8, } }; /* * Virtualization Controller MMIO region containing the IPI and END ESB pages */ static uint64_t pnv_xive_vc_read(void *opaque, hwaddr offset, unsigned size) { PnvXive *xive = PNV_XIVE(opaque); uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); uint64_t edt_type = 0; uint64_t edt_offset; MemTxResult result; AddressSpace *edt_as = NULL; uint64_t ret = -1; if (edt_index < XIVE_TABLE_EDT_MAX) { edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); } switch (edt_type) { case CQ_TDR_EDT_IPI: edt_as = &xive->ipi_as; break; case CQ_TDR_EDT_EQ: edt_as = &xive->end_as; break; default: xive_error(xive, "VC: invalid EDT type for read @%"HWADDR_PRIx, offset); return -1; } /* Remap the offset for the targeted address space */ edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); ret = address_space_ldq(edt_as, edt_offset, MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { xive_error(xive, "VC: %s read failed at @0x%"HWADDR_PRIx " -> @0x%" HWADDR_PRIx, edt_type == CQ_TDR_EDT_IPI ? "IPI" : "END", offset, edt_offset); return -1; } return ret; } static void pnv_xive_vc_write(void *opaque, hwaddr offset, uint64_t val, unsigned size) { PnvXive *xive = PNV_XIVE(opaque); uint64_t edt_index = offset >> pnv_xive_edt_shift(xive); uint64_t edt_type = 0; uint64_t edt_offset; MemTxResult result; AddressSpace *edt_as = NULL; if (edt_index < XIVE_TABLE_EDT_MAX) { edt_type = GETFIELD(CQ_TDR_EDT_TYPE, xive->edt[edt_index]); } switch (edt_type) { case CQ_TDR_EDT_IPI: edt_as = &xive->ipi_as; break; case CQ_TDR_EDT_EQ: edt_as = &xive->end_as; break; default: xive_error(xive, "VC: invalid EDT type for write @%"HWADDR_PRIx, offset); return; } /* Remap the offset for the targeted address space */ edt_offset = pnv_xive_edt_offset(xive, offset, edt_type); address_space_stq(edt_as, edt_offset, val, MEMTXATTRS_UNSPECIFIED, &result); if (result != MEMTX_OK) { xive_error(xive, "VC: write failed at @0x%"HWADDR_PRIx, edt_offset); } } static const MemoryRegionOps pnv_xive_vc_ops = { .read = pnv_xive_vc_read, .write = pnv_xive_vc_write, .endianness = DEVICE_BIG_ENDIAN, .valid = { .min_access_size = 8, .max_access_size = 8, }, .impl = { .min_access_size = 8, .max_access_size = 8, }, }; /* * Presenter Controller MMIO region. The Virtualization Controller * updates the IPB in the NVT table when required. Not modeled. */ static uint64_t pnv_xive_pc_read(void *opaque, hwaddr addr, unsigned size) { PnvXive *xive = PNV_XIVE(opaque); xive_error(xive, "PC: invalid read @%"HWADDR_PRIx, addr); return -1; } static void pnv_xive_pc_write(void *opaque, hwaddr addr, uint64_t value, unsigned size) { PnvXive *xive = PNV_XIVE(opaque); xive_error(xive, "PC: invalid write to VC @%"HWADDR_PRIx, addr); } static const MemoryRegionOps pnv_xive_pc_ops = { .read = pnv_xive_pc_read, .write = pnv_xive_pc_write, .endianness = DEVICE_BIG_ENDIAN, .valid = { .min_access_size = 8, .max_access_size = 8, }, .impl = { .min_access_size = 8, .max_access_size = 8, }, }; void pnv_xive_pic_print_info(PnvXive *xive, Monitor *mon) { XiveRouter *xrtr = XIVE_ROUTER(xive); uint8_t blk = xive->chip->chip_id; uint32_t srcno0 = XIVE_SRCNO(blk, 0); uint32_t nr_ipis = pnv_xive_nr_ipis(xive); uint32_t nr_ends = pnv_xive_nr_ends(xive); XiveEAS eas; XiveEND end; int i; monitor_printf(mon, "XIVE[%x] Source %08x .. %08x\n", blk, srcno0, srcno0 + nr_ipis - 1); xive_source_pic_print_info(&xive->ipi_source, srcno0, mon); monitor_printf(mon, "XIVE[%x] EAT %08x .. %08x\n", blk, srcno0, srcno0 + nr_ipis - 1); for (i = 0; i < nr_ipis; i++) { if (xive_router_get_eas(xrtr, blk, i, &eas)) { break; } if (!xive_eas_is_masked(&eas)) { xive_eas_pic_print_info(&eas, i, mon); } } monitor_printf(mon, "XIVE[%x] ENDT %08x .. %08x\n", blk, 0, nr_ends - 1); for (i = 0; i < nr_ends; i++) { if (xive_router_get_end(xrtr, blk, i, &end)) { break; } xive_end_pic_print_info(&end, i, mon); } } static void pnv_xive_reset(void *dev) { PnvXive *xive = PNV_XIVE(dev); XiveSource *xsrc = &xive->ipi_source; XiveENDSource *end_xsrc = &xive->end_source; /* * Use the PnvChip id to identify the XIVE interrupt controller. * It can be overriden by configuration at runtime. */ xive->tctx_chipid = xive->chip->chip_id; /* Default page size (Should be changed at runtime to 64k) */ xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; /* Clear subregions */ if (memory_region_is_mapped(&xsrc->esb_mmio)) { memory_region_del_subregion(&xive->ipi_edt_mmio, &xsrc->esb_mmio); } if (memory_region_is_mapped(&xive->ipi_edt_mmio)) { memory_region_del_subregion(&xive->ipi_mmio, &xive->ipi_edt_mmio); } if (memory_region_is_mapped(&end_xsrc->esb_mmio)) { memory_region_del_subregion(&xive->end_edt_mmio, &end_xsrc->esb_mmio); } if (memory_region_is_mapped(&xive->end_edt_mmio)) { memory_region_del_subregion(&xive->end_mmio, &xive->end_edt_mmio); } } static void pnv_xive_init(Object *obj) { PnvXive *xive = PNV_XIVE(obj); object_initialize_child(obj, "ipi_source", &xive->ipi_source, sizeof(xive->ipi_source), TYPE_XIVE_SOURCE, &error_abort, NULL); object_initialize_child(obj, "end_source", &xive->end_source, sizeof(xive->end_source), TYPE_XIVE_END_SOURCE, &error_abort, NULL); } /* * Maximum number of IRQs and ENDs supported by HW */ #define PNV_XIVE_NR_IRQS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) #define PNV_XIVE_NR_ENDS (PNV9_XIVE_VC_SIZE / (1ull << XIVE_ESB_64K_2PAGE)) static void pnv_xive_realize(DeviceState *dev, Error **errp) { PnvXive *xive = PNV_XIVE(dev); XiveSource *xsrc = &xive->ipi_source; XiveENDSource *end_xsrc = &xive->end_source; Error *local_err = NULL; Object *obj; obj = object_property_get_link(OBJECT(dev), "chip", &local_err); if (!obj) { error_propagate(errp, local_err); error_prepend(errp, "required link 'chip' not found: "); return; } /* The PnvChip id identifies the XIVE interrupt controller. */ xive->chip = PNV_CHIP(obj); /* * The XiveSource and XiveENDSource objects are realized with the * maximum allowed HW configuration. The ESB MMIO regions will be * resized dynamically when the controller is configured by the FW * to limit accesses to resources not provisioned. */ object_property_set_int(OBJECT(xsrc), PNV_XIVE_NR_IRQS, "nr-irqs", &error_fatal); object_property_add_const_link(OBJECT(xsrc), "xive", OBJECT(xive), &error_fatal); object_property_set_bool(OBJECT(xsrc), true, "realized", &local_err); if (local_err) { error_propagate(errp, local_err); return; } object_property_set_int(OBJECT(end_xsrc), PNV_XIVE_NR_ENDS, "nr-ends", &error_fatal); object_property_add_const_link(OBJECT(end_xsrc), "xive", OBJECT(xive), &error_fatal); object_property_set_bool(OBJECT(end_xsrc), true, "realized", &local_err); if (local_err) { error_propagate(errp, local_err); return; } /* Default page size. Generally changed at runtime to 64k */ xive->ic_shift = xive->vc_shift = xive->pc_shift = 12; /* XSCOM region, used for initial configuration of the BARs */ memory_region_init_io(&xive->xscom_regs, OBJECT(dev), &pnv_xive_xscom_ops, xive, "xscom-xive", PNV9_XSCOM_XIVE_SIZE << 3); /* Interrupt controller MMIO regions */ memory_region_init(&xive->ic_mmio, OBJECT(dev), "xive-ic", PNV9_XIVE_IC_SIZE); memory_region_init_io(&xive->ic_reg_mmio, OBJECT(dev), &pnv_xive_ic_reg_ops, xive, "xive-ic-reg", 1 << xive->ic_shift); memory_region_init_io(&xive->ic_notify_mmio, OBJECT(dev), &pnv_xive_ic_notify_ops, xive, "xive-ic-notify", 1 << xive->ic_shift); /* The Pervasive LSI trigger and EOI pages (not modeled) */ memory_region_init_io(&xive->ic_lsi_mmio, OBJECT(dev), &pnv_xive_ic_lsi_ops, xive, "xive-ic-lsi", 2 << xive->ic_shift); /* Thread Interrupt Management Area (Indirect) */ memory_region_init_io(&xive->tm_indirect_mmio, OBJECT(dev), &xive_tm_indirect_ops, xive, "xive-tima-indirect", PNV9_XIVE_TM_SIZE); /* * Overall Virtualization Controller MMIO region containing the * IPI ESB pages and END ESB pages. The layout is defined by the * EDT "Domain table" and the accesses are dispatched using * address spaces for each. */ memory_region_init_io(&xive->vc_mmio, OBJECT(xive), &pnv_xive_vc_ops, xive, "xive-vc", PNV9_XIVE_VC_SIZE); memory_region_init(&xive->ipi_mmio, OBJECT(xive), "xive-vc-ipi", PNV9_XIVE_VC_SIZE); address_space_init(&xive->ipi_as, &xive->ipi_mmio, "xive-vc-ipi"); memory_region_init(&xive->end_mmio, OBJECT(xive), "xive-vc-end", PNV9_XIVE_VC_SIZE); address_space_init(&xive->end_as, &xive->end_mmio, "xive-vc-end"); /* * The MMIO windows exposing the IPI ESBs and the END ESBs in the * VC region. Their size is configured by the FW in the EDT table. */ memory_region_init(&xive->ipi_edt_mmio, OBJECT(xive), "xive-vc-ipi-edt", 0); memory_region_init(&xive->end_edt_mmio, OBJECT(xive), "xive-vc-end-edt", 0); /* Presenter Controller MMIO region (not modeled) */ memory_region_init_io(&xive->pc_mmio, OBJECT(xive), &pnv_xive_pc_ops, xive, "xive-pc", PNV9_XIVE_PC_SIZE); /* Thread Interrupt Management Area (Direct) */ memory_region_init_io(&xive->tm_mmio, OBJECT(xive), &xive_tm_ops, xive, "xive-tima", PNV9_XIVE_TM_SIZE); qemu_register_reset(pnv_xive_reset, dev); } static int pnv_xive_dt_xscom(PnvXScomInterface *dev, void *fdt, int xscom_offset) { const char compat[] = "ibm,power9-xive-x"; char *name; int offset; uint32_t lpc_pcba = PNV9_XSCOM_XIVE_BASE; uint32_t reg[] = { cpu_to_be32(lpc_pcba), cpu_to_be32(PNV9_XSCOM_XIVE_SIZE) }; name = g_strdup_printf("xive@%x", lpc_pcba); offset = fdt_add_subnode(fdt, xscom_offset, name); _FDT(offset); g_free(name); _FDT((fdt_setprop(fdt, offset, "reg", reg, sizeof(reg)))); _FDT((fdt_setprop(fdt, offset, "compatible", compat, sizeof(compat)))); return 0; } static Property pnv_xive_properties[] = { DEFINE_PROP_UINT64("ic-bar", PnvXive, ic_base, 0), DEFINE_PROP_UINT64("vc-bar", PnvXive, vc_base, 0), DEFINE_PROP_UINT64("pc-bar", PnvXive, pc_base, 0), DEFINE_PROP_UINT64("tm-bar", PnvXive, tm_base, 0), DEFINE_PROP_END_OF_LIST(), }; static void pnv_xive_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); PnvXScomInterfaceClass *xdc = PNV_XSCOM_INTERFACE_CLASS(klass); XiveRouterClass *xrc = XIVE_ROUTER_CLASS(klass); XiveNotifierClass *xnc = XIVE_NOTIFIER_CLASS(klass); xdc->dt_xscom = pnv_xive_dt_xscom; dc->desc = "PowerNV XIVE Interrupt Controller"; dc->realize = pnv_xive_realize; dc->props = pnv_xive_properties; xrc->get_eas = pnv_xive_get_eas; xrc->get_end = pnv_xive_get_end; xrc->write_end = pnv_xive_write_end; xrc->get_nvt = pnv_xive_get_nvt; xrc->write_nvt = pnv_xive_write_nvt; xrc->get_tctx = pnv_xive_get_tctx; xnc->notify = pnv_xive_notify; }; static const TypeInfo pnv_xive_info = { .name = TYPE_PNV_XIVE, .parent = TYPE_XIVE_ROUTER, .instance_init = pnv_xive_init, .instance_size = sizeof(PnvXive), .class_init = pnv_xive_class_init, .interfaces = (InterfaceInfo[]) { { TYPE_PNV_XSCOM_INTERFACE }, { } } }; static void pnv_xive_register_types(void) { type_register_static(&pnv_xive_info); } type_init(pnv_xive_register_types)
pmp-tool/PMP
src/qemu/src-pmp/hw/intc/xics_spapr.c
<filename>src/qemu/src-pmp/hw/intc/xics_spapr.c<gh_stars>1-10 /* * QEMU PowerPC pSeries Logical Partition (aka sPAPR) hardware System Emulator * * PAPR Virtualized Interrupt System, aka ICS/ICP aka xics * * Copyright (c) 2010,2011 <NAME>, IBM Corporation. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. * */ #include "qemu/osdep.h" #include "cpu.h" #include "hw/hw.h" #include "trace.h" #include "qemu/timer.h" #include "hw/ppc/spapr.h" #include "hw/ppc/spapr_cpu_core.h" #include "hw/ppc/xics.h" #include "hw/ppc/xics_spapr.h" #include "hw/ppc/fdt.h" #include "qapi/visitor.h" /* * Guest interfaces */ static target_ulong h_cppr(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong opcode, target_ulong *args) { target_ulong cppr = args[0]; icp_set_cppr(spapr_cpu_state(cpu)->icp, cppr); return H_SUCCESS; } static target_ulong h_ipi(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong opcode, target_ulong *args) { target_ulong mfrr = args[1]; ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), args[0]); if (!icp) { return H_PARAMETER; } icp_set_mfrr(icp, mfrr); return H_SUCCESS; } static target_ulong h_xirr(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong opcode, target_ulong *args) { uint32_t xirr = icp_accept(spapr_cpu_state(cpu)->icp); args[0] = xirr; return H_SUCCESS; } static target_ulong h_xirr_x(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong opcode, target_ulong *args) { uint32_t xirr = icp_accept(spapr_cpu_state(cpu)->icp); args[0] = xirr; args[1] = cpu_get_host_ticks(); return H_SUCCESS; } static target_ulong h_eoi(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong opcode, target_ulong *args) { target_ulong xirr = args[0]; icp_eoi(spapr_cpu_state(cpu)->icp, xirr); return H_SUCCESS; } static target_ulong h_ipoll(PowerPCCPU *cpu, SpaprMachineState *spapr, target_ulong opcode, target_ulong *args) { ICPState *icp = xics_icp_get(XICS_FABRIC(spapr), args[0]); uint32_t mfrr; uint32_t xirr; if (!icp) { return H_PARAMETER; } xirr = icp_ipoll(icp, &mfrr); args[0] = xirr; args[1] = mfrr; return H_SUCCESS; } static void rtas_set_xive(PowerPCCPU *cpu, SpaprMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { ICSState *ics = spapr->ics; uint32_t nr, srcno, server, priority; if ((nargs != 3) || (nret != 1)) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } if (!ics) { rtas_st(rets, 0, RTAS_OUT_HW_ERROR); return; } nr = rtas_ld(args, 0); server = rtas_ld(args, 1); priority = rtas_ld(args, 2); if (!ics_valid_irq(ics, nr) || !xics_icp_get(XICS_FABRIC(spapr), server) || (priority > 0xff)) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } srcno = nr - ics->offset; ics_simple_write_xive(ics, srcno, server, priority, priority); rtas_st(rets, 0, RTAS_OUT_SUCCESS); } static void rtas_get_xive(PowerPCCPU *cpu, SpaprMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { ICSState *ics = spapr->ics; uint32_t nr, srcno; if ((nargs != 1) || (nret != 3)) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } if (!ics) { rtas_st(rets, 0, RTAS_OUT_HW_ERROR); return; } nr = rtas_ld(args, 0); if (!ics_valid_irq(ics, nr)) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } rtas_st(rets, 0, RTAS_OUT_SUCCESS); srcno = nr - ics->offset; rtas_st(rets, 1, ics->irqs[srcno].server); rtas_st(rets, 2, ics->irqs[srcno].priority); } static void rtas_int_off(PowerPCCPU *cpu, SpaprMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { ICSState *ics = spapr->ics; uint32_t nr, srcno; if ((nargs != 1) || (nret != 1)) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } if (!ics) { rtas_st(rets, 0, RTAS_OUT_HW_ERROR); return; } nr = rtas_ld(args, 0); if (!ics_valid_irq(ics, nr)) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } srcno = nr - ics->offset; ics_simple_write_xive(ics, srcno, ics->irqs[srcno].server, 0xff, ics->irqs[srcno].priority); rtas_st(rets, 0, RTAS_OUT_SUCCESS); } static void rtas_int_on(PowerPCCPU *cpu, SpaprMachineState *spapr, uint32_t token, uint32_t nargs, target_ulong args, uint32_t nret, target_ulong rets) { ICSState *ics = spapr->ics; uint32_t nr, srcno; if ((nargs != 1) || (nret != 1)) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } if (!ics) { rtas_st(rets, 0, RTAS_OUT_HW_ERROR); return; } nr = rtas_ld(args, 0); if (!ics_valid_irq(ics, nr)) { rtas_st(rets, 0, RTAS_OUT_PARAM_ERROR); return; } srcno = nr - ics->offset; ics_simple_write_xive(ics, srcno, ics->irqs[srcno].server, ics->irqs[srcno].saved_priority, ics->irqs[srcno].saved_priority); rtas_st(rets, 0, RTAS_OUT_SUCCESS); } void xics_spapr_init(SpaprMachineState *spapr) { /* Registration of global state belongs into realize */ spapr_rtas_register(RTAS_IBM_SET_XIVE, "ibm,set-xive", rtas_set_xive); spapr_rtas_register(RTAS_IBM_GET_XIVE, "ibm,get-xive", rtas_get_xive); spapr_rtas_register(RTAS_IBM_INT_OFF, "ibm,int-off", rtas_int_off); spapr_rtas_register(RTAS_IBM_INT_ON, "ibm,int-on", rtas_int_on); spapr_register_hypercall(H_CPPR, h_cppr); spapr_register_hypercall(H_IPI, h_ipi); spapr_register_hypercall(H_XIRR, h_xirr); spapr_register_hypercall(H_XIRR_X, h_xirr_x); spapr_register_hypercall(H_EOI, h_eoi); spapr_register_hypercall(H_IPOLL, h_ipoll); } void spapr_dt_xics(SpaprMachineState *spapr, uint32_t nr_servers, void *fdt, uint32_t phandle) { uint32_t interrupt_server_ranges_prop[] = { 0, cpu_to_be32(nr_servers), }; int node; _FDT(node = fdt_add_subnode(fdt, 0, XICS_NODENAME)); _FDT(fdt_setprop_string(fdt, node, "device_type", "PowerPC-External-Interrupt-Presentation")); _FDT(fdt_setprop_string(fdt, node, "compatible", "IBM,ppc-xicp")); _FDT(fdt_setprop(fdt, node, "interrupt-controller", NULL, 0)); _FDT(fdt_setprop(fdt, node, "ibm,interrupt-server-ranges", interrupt_server_ranges_prop, sizeof(interrupt_server_ranges_prop))); _FDT(fdt_setprop_cell(fdt, node, "#interrupt-cells", 2)); _FDT(fdt_setprop_cell(fdt, node, "linux,phandle", phandle)); _FDT(fdt_setprop_cell(fdt, node, "phandle", phandle)); }
pmp-tool/PMP
src/qemu/src-pmp/tests/tcg/mips/user/ase/msa/int-dot-product/test_msa_dotp_u_h.c
<reponame>pmp-tool/PMP /* * Test program for MSA instruction DOTP_U.H * * Copyright (C) 2018 Wave Computing, Inc. * Copyright (C) 2018 <NAME> <<EMAIL>> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. * */ #include <sys/time.h> #include <stdint.h> #include "../../../../include/wrappers_msa.h" #include "../../../../include/test_inputs.h" #include "../../../../include/test_utils.h" #define TEST_COUNT_TOTAL ( \ (PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT) + \ (RANDOM_INPUTS_SHORT_COUNT) * (RANDOM_INPUTS_SHORT_COUNT)) int32_t main(void) { char *instruction_name = "DOTP_U.H"; int32_t ret; uint32_t i, j; struct timeval start, end; double elapsed_time; uint64_t b128_result[TEST_COUNT_TOTAL][2]; uint64_t b128_expect[TEST_COUNT_TOTAL][2] = { { 0xfc02fc02fc02fc02ULL, 0xfc02fc02fc02fc02ULL, }, /* 0 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x52ac52ac52ac52acULL, 0x52ac52ac52ac52acULL, }, { 0xa956a956a956a956ULL, 0xa956a956a956a956ULL, }, { 0x9668966896689668ULL, 0x9668966896689668ULL, }, { 0x659a659a659a659aULL, 0x659a659a659a659aULL, }, { 0x6f8f19e5c53a6f8fULL, 0x19e5c53a6f8f19e5ULL, }, { 0x8c73e21d36c88c73ULL, 0xe21d36c88c73e21dULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, /* 8 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x52ac52ac52ac52acULL, 0x52ac52ac52ac52acULL, }, /* 16 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0xe1c8e1c8e1c8e1c8ULL, 0xe1c8e1c8e1c8e1c8ULL, }, { 0x70e470e470e470e4ULL, 0x70e470e470e470e4ULL, }, { 0x0ef00ef00ef00ef0ULL, 0x0ef00ef00ef00ef0ULL, }, { 0x43bc43bc43bc43bcULL, 0x43bc43bc43bc43bcULL, }, { 0xf50abbee837cf50aULL, 0xbbee837cf50abbeeULL, }, { 0x5da296becf305da2ULL, 0x96becf305da296beULL, }, { 0xa956a956a956a956ULL, 0xa956a956a956a956ULL, }, /* 24 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x70e470e470e470e4ULL, 0x70e470e470e470e4ULL, }, { 0x3872387238723872ULL, 0x3872387238723872ULL, }, { 0x8778877887788778ULL, 0x8778877887788778ULL, }, { 0x21de21de21de21deULL, 0x21de21de21de21deULL, }, { 0x7a855df741be7a85ULL, 0x5df741be7a855df7ULL, }, { 0x2ed14b5f67982ed1ULL, 0x4b5f67982ed14b5fULL, }, { 0x9668966896689668ULL, 0x9668966896689668ULL, }, /* 32 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0ef00ef00ef00ef0ULL, 0x0ef00ef00ef00ef0ULL, }, { 0x8778877887788778ULL, 0x8778877887788778ULL, }, { 0x4520452045204520ULL, 0x4520452045204520ULL, }, { 0x5148514851485148ULL, 0x5148514851485148ULL, }, { 0x260ce1849dc8260cULL, 0xe1849dc8260ce184ULL, }, { 0x705cb4e4f8a0705cULL, 0xb4e4f8a0705cb4e4ULL, }, { 0x659a659a659a659aULL, 0x659a659a659a659aULL, }, /* 40 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x43bc43bc43bc43bcULL, 0x43bc43bc43bc43bcULL, }, { 0x21de21de21de21deULL, 0x21de21de21de21deULL, }, { 0x5148514851485148ULL, 0x5148514851485148ULL, }, { 0x1452145214521452ULL, 0x1452145214521452ULL, }, { 0x4983386127724983ULL, 0x3861277249833861ULL, }, { 0x1c172d393e281c17ULL, 0x2d393e281c172d39ULL, }, { 0x6f8f19e5c53a6f8fULL, 0x19e5c53a6f8f19e5ULL, }, /* 48 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0xf50abbee837cf50aULL, 0xbbee837cf50abbeeULL, }, { 0x7a855df741be7a85ULL, 0x5df741be7a855df7ULL, }, { 0x260ce1849dc8260cULL, 0xe1849dc8260ce184ULL, }, { 0x4983386127724983ULL, 0x3861277249833861ULL, }, { 0x180dd5895b04180dULL, 0xd5895b04180dd589ULL, }, { 0x5782445c6a365782ULL, 0x445c6a365782445cULL, }, { 0x8c73e21d36c88c73ULL, 0xe21d36c88c73e21dULL, }, /* 56 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x5da296becf305da2ULL, 0x96becf305da296beULL, }, { 0x2ed14b5f67982ed1ULL, 0x4b5f67982ed14b5fULL, }, { 0x705cb4e4f8a0705cULL, 0xb4e4f8a0705cb4e4ULL, }, { 0x1c172d393e281c17ULL, 0x2d393e281c172d39ULL, }, { 0x5782445c6a365782ULL, 0x445c6a365782445cULL, }, { 0x34f19dc1cc9234f1ULL, 0x9dc1cc9234f19dc1ULL, }, { 0x742471342bc42c39ULL, 0x3f6a22fd371d7990ULL, }, /* 64 */ { 0xd4044ee4444e4413ULL, 0x68a71195331b4430ULL, }, { 0x80a423cc6c264e27ULL, 0x62556624be531a60ULL, }, { 0x5c36512021725e8aULL, 0x8a465528c764a2e0ULL, }, { 0xd4044ee4444e4413ULL, 0x68a71195331b4430ULL, }, { 0x831d26496b929af1ULL, 0xef958b3d113a1254ULL, }, { 0xeb7041beae82700dULL, 0xd326aa88189c1f8aULL, }, { 0xa8721dc73869b21eULL, 0xf27179481e1be5e4ULL, }, { 0x80a423cc6c264e27ULL, 0x62556624be531a60ULL, }, /* 72 */ { 0xeb7041beae82700dULL, 0xd326aa88189c1f8aULL, }, { 0x9334e7282d128b79ULL, 0xbc319725797206e9ULL, }, { 0x670642166b8da1b6ULL, 0xe0d340587bf92d2aULL, }, { 0x5c36512021725e8aULL, 0x8a465528c764a2e0ULL, }, { 0xa8721dc73869b21eULL, 0xf27179481e1be5e4ULL, }, }; gettimeofday(&start, NULL); for (i = 0; i < PATTERN_INPUTS_SHORT_COUNT; i++) { for (j = 0; j < PATTERN_INPUTS_SHORT_COUNT; j++) { do_msa_DOTP_U_H(b128_pattern[i], b128_pattern[j], b128_result[PATTERN_INPUTS_SHORT_COUNT * i + j]); } } for (i = 0; i < RANDOM_INPUTS_SHORT_COUNT; i++) { for (j = 0; j < RANDOM_INPUTS_SHORT_COUNT; j++) { do_msa_DOTP_U_H(b128_random[i], b128_random[j], b128_result[((PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT)) + RANDOM_INPUTS_SHORT_COUNT * i + j]); } } gettimeofday(&end, NULL); elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; ret = check_results(instruction_name, TEST_COUNT_TOTAL, elapsed_time, &b128_result[0][0], &b128_expect[0][0]); return ret; }
pmp-tool/PMP
src/qemu/src-pmp/include/qemu-common.h
/* Common header file that is included by all of QEMU. * * This file is supposed to be included only by .c files. No header file should * depend on qemu-common.h, as this would easily lead to circular header * dependencies. * * If a header file uses a definition from qemu-common.h, that definition * must be moved to a separate header file, and the header that uses it * must include that header. */ #ifndef QEMU_COMMON_H #define QEMU_COMMON_H #include "qemu/fprintf-fn.h" #define TFR(expr) do { if ((expr) != -1) break; } while (errno == EINTR) /* Copyright string for -version arguments, About dialogs, etc */ #define QEMU_COPYRIGHT "Copyright (c) 2003-2019 " \ "<NAME> and the QEMU Project developers" /* Bug reporting information for --help arguments, About dialogs, etc */ #define QEMU_HELP_BOTTOM \ "See <https://qemu.org/contribute/report-a-bug> for how to report bugs.\n" \ "More information on the QEMU project at <https://qemu.org>." /* main function, renamed */ #if defined(CONFIG_COCOA) int qemu_main(int argc, char **argv, char **envp); #endif void qemu_get_timedate(struct tm *tm, int offset); int qemu_timedate_diff(struct tm *tm); #define qemu_isalnum(c) isalnum((unsigned char)(c)) #define qemu_isalpha(c) isalpha((unsigned char)(c)) #define qemu_iscntrl(c) iscntrl((unsigned char)(c)) #define qemu_isdigit(c) isdigit((unsigned char)(c)) #define qemu_isgraph(c) isgraph((unsigned char)(c)) #define qemu_islower(c) islower((unsigned char)(c)) #define qemu_isprint(c) isprint((unsigned char)(c)) #define qemu_ispunct(c) ispunct((unsigned char)(c)) #define qemu_isspace(c) isspace((unsigned char)(c)) #define qemu_isupper(c) isupper((unsigned char)(c)) #define qemu_isxdigit(c) isxdigit((unsigned char)(c)) #define qemu_tolower(c) tolower((unsigned char)(c)) #define qemu_toupper(c) toupper((unsigned char)(c)) #define qemu_isascii(c) isascii((unsigned char)(c)) #define qemu_toascii(c) toascii((unsigned char)(c)) void *qemu_oom_check(void *ptr); ssize_t qemu_write_full(int fd, const void *buf, size_t count) QEMU_WARN_UNUSED_RESULT; #ifndef _WIN32 int qemu_pipe(int pipefd[2]); /* like openpty() but also makes it raw; return master fd */ int qemu_openpty_raw(int *aslave, char *pty_name); #endif #ifdef _WIN32 /* MinGW needs type casts for the 'buf' and 'optval' arguments. */ #define qemu_getsockopt(sockfd, level, optname, optval, optlen) \ getsockopt(sockfd, level, optname, (void *)optval, optlen) #define qemu_setsockopt(sockfd, level, optname, optval, optlen) \ setsockopt(sockfd, level, optname, (const void *)optval, optlen) #define qemu_recv(sockfd, buf, len, flags) recv(sockfd, (void *)buf, len, flags) #define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \ sendto(sockfd, (const void *)buf, len, flags, destaddr, addrlen) #else #define qemu_getsockopt(sockfd, level, optname, optval, optlen) \ getsockopt(sockfd, level, optname, optval, optlen) #define qemu_setsockopt(sockfd, level, optname, optval, optlen) \ setsockopt(sockfd, level, optname, optval, optlen) #define qemu_recv(sockfd, buf, len, flags) recv(sockfd, buf, len, flags) #define qemu_sendto(sockfd, buf, len, flags, destaddr, addrlen) \ sendto(sockfd, buf, len, flags, destaddr, addrlen) #endif extern bool tcg_allowed; void tcg_exec_init(unsigned long tb_size); #ifdef CONFIG_TCG #define tcg_enabled() (tcg_allowed) #else #define tcg_enabled() 0 #endif void cpu_exec_init_all(void); void cpu_exec_step_atomic(CPUState *cpu); /** * set_preferred_target_page_bits: * @bits: number of bits needed to represent an address within the page * * Set the preferred target page size (the actual target page * size may be smaller than any given CPU's preference). * Returns true on success, false on failure (which can only happen * if this is called after the system has already finalized its * choice of page size and the requested page size is smaller than that). */ bool set_preferred_target_page_bits(int bits); /** * Sends a (part of) iovec down a socket, yielding when the socket is full, or * Receives data into a (part of) iovec from a socket, * yielding when there is no data in the socket. * The same interface as qemu_sendv_recvv(), with added yielding. * XXX should mark these as coroutine_fn */ ssize_t qemu_co_sendv_recvv(int sockfd, struct iovec *iov, unsigned iov_cnt, size_t offset, size_t bytes, bool do_send); #define qemu_co_recvv(sockfd, iov, iov_cnt, offset, bytes) \ qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, false) #define qemu_co_sendv(sockfd, iov, iov_cnt, offset, bytes) \ qemu_co_sendv_recvv(sockfd, iov, iov_cnt, offset, bytes, true) /** * The same as above, but with just a single buffer */ ssize_t qemu_co_send_recv(int sockfd, void *buf, size_t bytes, bool do_send); #define qemu_co_recv(sockfd, buf, bytes) \ qemu_co_send_recv(sockfd, buf, bytes, false) #define qemu_co_send(sockfd, buf, bytes) \ qemu_co_send_recv(sockfd, buf, bytes, true) void qemu_progress_init(int enabled, float min_skip); void qemu_progress_end(void); void qemu_progress_print(float delta, int max); const char *qemu_get_vm_name(void); #define QEMU_FILE_TYPE_BIOS 0 #define QEMU_FILE_TYPE_KEYMAP 1 char *qemu_find_file(int type, const char *name); /* OS specific functions */ void os_setup_early_signal_handling(void); char *os_find_datadir(void); int os_parse_cmd_args(int index, const char *optarg); #include "qemu/module.h" /* * Hexdump a buffer to a file. An optional string prefix is added to every line */ void qemu_hexdump(const char *buf, FILE *fp, const char *prefix, size_t size); /* * helper to parse debug environment variables */ int parse_debug_env(const char *name, int max, int initial); const char *qemu_ether_ntoa(const MACAddr *mac); char *size_to_str(uint64_t val); void page_size_init(void); /* returns non-zero if dump is in progress, otherwise zero is * returned. */ bool dump_in_progress(void); #endif
pmp-tool/PMP
src/qemu/src-pmp/hw/virtio/vhost-vsock-pci.c
<filename>src/qemu/src-pmp/hw/virtio/vhost-vsock-pci.c /* * Vhost vsock PCI Bindings * * Copyright 2015 Red Hat, Inc. * * Authors: * <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU GPL, version 2 or * (at your option) any later version. See the COPYING file in the * top-level directory. */ #include "qemu/osdep.h" #include "virtio-pci.h" #include "hw/virtio/vhost-vsock.h" typedef struct VHostVSockPCI VHostVSockPCI; /* * vhost-vsock-pci: This extends VirtioPCIProxy. */ #define TYPE_VHOST_VSOCK_PCI "vhost-vsock-pci-base" #define VHOST_VSOCK_PCI(obj) \ OBJECT_CHECK(VHostVSockPCI, (obj), TYPE_VHOST_VSOCK_PCI) struct VHostVSockPCI { VirtIOPCIProxy parent_obj; VHostVSock vdev; }; /* vhost-vsock-pci */ static Property vhost_vsock_pci_properties[] = { DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3), DEFINE_PROP_END_OF_LIST(), }; static void vhost_vsock_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) { VHostVSockPCI *dev = VHOST_VSOCK_PCI(vpci_dev); DeviceState *vdev = DEVICE(&dev->vdev); qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); object_property_set_bool(OBJECT(vdev), true, "realized", errp); } static void vhost_vsock_pci_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); k->realize = vhost_vsock_pci_realize; set_bit(DEVICE_CATEGORY_MISC, dc->categories); dc->props = vhost_vsock_pci_properties; pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_VSOCK; pcidev_k->revision = 0x00; pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER; } static void vhost_vsock_pci_instance_init(Object *obj) { VHostVSockPCI *dev = VHOST_VSOCK_PCI(obj); virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), TYPE_VHOST_VSOCK); } static const VirtioPCIDeviceTypeInfo vhost_vsock_pci_info = { .base_name = TYPE_VHOST_VSOCK_PCI, .generic_name = "vhost-vsock-pci", .transitional_name = "vhost-vsock-pci-transitional", .non_transitional_name = "vhost-vsock-pci-non-transitional", .instance_size = sizeof(VHostVSockPCI), .instance_init = vhost_vsock_pci_instance_init, .class_init = vhost_vsock_pci_class_init, }; static void virtio_pci_vhost_register(void) { virtio_pci_types_register(&vhost_vsock_pci_info); } type_init(virtio_pci_vhost_register)
pmp-tool/PMP
src/qemu/src-pmp/include/qemu/filemonitor.h
<filename>src/qemu/src-pmp/include/qemu/filemonitor.h /* * QEMU file monitor helper * * Copyright (c) 2018 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. * */ #ifndef QEMU_FILE_MONITOR_H #define QEMU_FILE_MONITOR_H #include "qemu-common.h" typedef struct QFileMonitor QFileMonitor; typedef enum { /* File has been created in a dir */ QFILE_MONITOR_EVENT_CREATED, /* File has been modified in a dir */ QFILE_MONITOR_EVENT_MODIFIED, /* File has been deleted in a dir */ QFILE_MONITOR_EVENT_DELETED, /* File has attributes changed */ QFILE_MONITOR_EVENT_ATTRIBUTES, /* Dir is no longer being monitored (due to deletion) */ QFILE_MONITOR_EVENT_IGNORED, } QFileMonitorEvent; /** * QFileMonitorHandler: * @id: id from qemu_file_monitor_add_watch() * @event: the file change that occurred * @filename: the name of the file affected * @opaque: opaque data provided to qemu_file_monitor_add_watch() * * Invoked whenever a file changes. If @event is * QFILE_MONITOR_EVENT_IGNORED, @filename will be * empty. * */ typedef void (*QFileMonitorHandler)(int64_t id, QFileMonitorEvent event, const char *filename, void *opaque); /** * qemu_file_monitor_new: * @errp: pointer to a NULL-initialized error object * * Create a handle for a file monitoring object. * * This object does locking internally to enable it to be * safe to use from multiple threads * * If the platform does not support file monitoring, an * error will be reported. Likewise if file monitoring * is supported, but cannot be initialized * * Currently this is implemented on Linux platforms with * the inotify subsystem. * * Returns: the new monitoring object, or NULL on error */ QFileMonitor *qemu_file_monitor_new(Error **errp); /** * qemu_file_monitor_free: * @mon: the file monitor context * * Free resources associated with the file monitor, * including any currently registered watches. */ void qemu_file_monitor_free(QFileMonitor *mon); /** * qemu_file_monitor_add_watch: * @mon: the file monitor context * @dirpath: the directory whose contents to watch * @filename: optional filename to filter on * @cb: the function to invoke when @dirpath has changes * @opaque: data to pass to @cb * @errp: pointer to a NULL-initialized error object * * Register to receive notifications of changes * in the directory @dirpath. All files in the * directory will be monitored. If the caller is * only interested in one specific file, @filename * can be used to filter events. * * Returns: a positive integer watch ID, or -1 on error */ int64_t qemu_file_monitor_add_watch(QFileMonitor *mon, const char *dirpath, const char *filename, QFileMonitorHandler cb, void *opaque, Error **errp); /** * qemu_file_monitor_remove_watch: * @mon: the file monitor context * @dirpath: the directory whose contents to unwatch * @id: id of the watch to remove * * Removes the file monitoring watch @id, associated * with the directory @dirpath. This must never be * called from a QFileMonitorHandler callback, or a * deadlock will result. */ void qemu_file_monitor_remove_watch(QFileMonitor *mon, const char *dirpath, int64_t id); #endif /* QEMU_FILE_MONITOR_H */
pmp-tool/PMP
src/qemu/src-pmp/chardev/char-mux.c
/* * QEMU System Emulator * * Copyright (c) 2003-2008 <NAME> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/option.h" #include "chardev/char.h" #include "sysemu/block-backend.h" #include "sysemu/sysemu.h" #include "chardev/char-mux.h" /* MUX driver for serial I/O splitting */ /* Called with chr_write_lock held. */ static int mux_chr_write(Chardev *chr, const uint8_t *buf, int len) { MuxChardev *d = MUX_CHARDEV(chr); int ret; if (!d->timestamps) { ret = qemu_chr_fe_write(&d->chr, buf, len); } else { int i; ret = 0; for (i = 0; i < len; i++) { if (d->linestart) { char buf1[64]; int64_t ti; int secs; ti = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); if (d->timestamps_start == -1) { d->timestamps_start = ti; } ti -= d->timestamps_start; secs = ti / 1000; snprintf(buf1, sizeof(buf1), "[%02d:%02d:%02d.%03d] ", secs / 3600, (secs / 60) % 60, secs % 60, (int)(ti % 1000)); /* XXX this blocks entire thread. Rewrite to use * qemu_chr_fe_write and background I/O callbacks */ qemu_chr_fe_write_all(&d->chr, (uint8_t *)buf1, strlen(buf1)); d->linestart = 0; } ret += qemu_chr_fe_write(&d->chr, buf + i, 1); if (buf[i] == '\n') { d->linestart = 1; } } } return ret; } static const char * const mux_help[] = { "% h print this help\n\r", "% x exit emulator\n\r", "% s save disk data back to file (if -snapshot)\n\r", "% t toggle console timestamps\n\r", "% b send break (magic sysrq)\n\r", "% c switch between console and monitor\n\r", "% % sends %\n\r", NULL }; int term_escape_char = 0x01; /* ctrl-a is used for escape */ static void mux_print_help(Chardev *chr) { int i, j; char ebuf[15] = "Escape-Char"; char cbuf[50] = "\n\r"; if (term_escape_char > 0 && term_escape_char < 26) { snprintf(cbuf, sizeof(cbuf), "\n\r"); snprintf(ebuf, sizeof(ebuf), "C-%c", term_escape_char - 1 + 'a'); } else { snprintf(cbuf, sizeof(cbuf), "\n\rEscape-Char set to Ascii: 0x%02x\n\r\n\r", term_escape_char); } /* XXX this blocks entire thread. Rewrite to use * qemu_chr_fe_write and background I/O callbacks */ qemu_chr_write_all(chr, (uint8_t *)cbuf, strlen(cbuf)); for (i = 0; mux_help[i] != NULL; i++) { for (j = 0; mux_help[i][j] != '\0'; j++) { if (mux_help[i][j] == '%') { qemu_chr_write_all(chr, (uint8_t *)ebuf, strlen(ebuf)); } else { qemu_chr_write_all(chr, (uint8_t *)&mux_help[i][j], 1); } } } } static void mux_chr_send_event(MuxChardev *d, int mux_nr, int event) { CharBackend *be = d->backends[mux_nr]; if (be && be->chr_event) { be->chr_event(be->opaque, event); } } static void mux_chr_be_event(Chardev *chr, int event) { MuxChardev *d = MUX_CHARDEV(chr); if (d->focus != -1) { mux_chr_send_event(d, d->focus, event); } } static int mux_proc_byte(Chardev *chr, MuxChardev *d, int ch) { if (d->term_got_escape) { d->term_got_escape = 0; if (ch == term_escape_char) { goto send_char; } switch (ch) { case '?': case 'h': mux_print_help(chr); break; case 'x': { const char *term = "QEMU: Terminated\n\r"; qemu_chr_write_all(chr, (uint8_t *)term, strlen(term)); exit(0); break; } case 's': blk_commit_all(); break; case 'b': qemu_chr_be_event(chr, CHR_EVENT_BREAK); break; case 'c': assert(d->mux_cnt > 0); /* handler registered with first fe */ /* Switch to the next registered device */ mux_set_focus(chr, (d->focus + 1) % d->mux_cnt); break; case 't': d->timestamps = !d->timestamps; d->timestamps_start = -1; d->linestart = 0; break; } } else if (ch == term_escape_char) { d->term_got_escape = 1; } else { send_char: return 1; } return 0; } static void mux_chr_accept_input(Chardev *chr) { MuxChardev *d = MUX_CHARDEV(chr); int m = d->focus; CharBackend *be = d->backends[m]; while (be && d->prod[m] != d->cons[m] && be->chr_can_read && be->chr_can_read(be->opaque)) { be->chr_read(be->opaque, &d->buffer[m][d->cons[m]++ & MUX_BUFFER_MASK], 1); } } static int mux_chr_can_read(void *opaque) { MuxChardev *d = MUX_CHARDEV(opaque); int m = d->focus; CharBackend *be = d->backends[m]; if ((d->prod[m] - d->cons[m]) < MUX_BUFFER_SIZE) { return 1; } if (be && be->chr_can_read) { return be->chr_can_read(be->opaque); } return 0; } static void mux_chr_read(void *opaque, const uint8_t *buf, int size) { Chardev *chr = CHARDEV(opaque); MuxChardev *d = MUX_CHARDEV(opaque); int m = d->focus; CharBackend *be = d->backends[m]; int i; mux_chr_accept_input(opaque); for (i = 0; i < size; i++) if (mux_proc_byte(chr, d, buf[i])) { if (d->prod[m] == d->cons[m] && be && be->chr_can_read && be->chr_can_read(be->opaque)) { be->chr_read(be->opaque, &buf[i], 1); } else { d->buffer[m][d->prod[m]++ & MUX_BUFFER_MASK] = buf[i]; } } } void mux_chr_send_all_event(Chardev *chr, int event) { MuxChardev *d = MUX_CHARDEV(chr); int i; if (!machine_init_done) { return; } /* Send the event to all registered listeners */ for (i = 0; i < d->mux_cnt; i++) { mux_chr_send_event(d, i, event); } } static void mux_chr_event(void *opaque, int event) { mux_chr_send_all_event(CHARDEV(opaque), event); } static GSource *mux_chr_add_watch(Chardev *s, GIOCondition cond) { MuxChardev *d = MUX_CHARDEV(s); Chardev *chr = qemu_chr_fe_get_driver(&d->chr); ChardevClass *cc = CHARDEV_GET_CLASS(chr); if (!cc->chr_add_watch) { return NULL; } return cc->chr_add_watch(chr, cond); } static void char_mux_finalize(Object *obj) { MuxChardev *d = MUX_CHARDEV(obj); int i; for (i = 0; i < d->mux_cnt; i++) { CharBackend *be = d->backends[i]; if (be) { be->chr = NULL; } } qemu_chr_fe_deinit(&d->chr, false); } static void mux_chr_update_read_handlers(Chardev *chr) { MuxChardev *d = MUX_CHARDEV(chr); /* Fix up the real driver with mux routines */ qemu_chr_fe_set_handlers_full(&d->chr, mux_chr_can_read, mux_chr_read, mux_chr_event, NULL, chr, chr->gcontext, true, false); } void mux_set_focus(Chardev *chr, int focus) { MuxChardev *d = MUX_CHARDEV(chr); assert(focus >= 0); assert(focus < d->mux_cnt); if (d->focus != -1) { mux_chr_send_event(d, d->focus, CHR_EVENT_MUX_OUT); } d->focus = focus; chr->be = d->backends[focus]; mux_chr_send_event(d, d->focus, CHR_EVENT_MUX_IN); } static void qemu_chr_open_mux(Chardev *chr, ChardevBackend *backend, bool *be_opened, Error **errp) { ChardevMux *mux = backend->u.mux.data; Chardev *drv; MuxChardev *d = MUX_CHARDEV(chr); drv = qemu_chr_find(mux->chardev); if (drv == NULL) { error_setg(errp, "mux: base chardev %s not found", mux->chardev); return; } d->focus = -1; /* only default to opened state if we've realized the initial * set of muxes */ *be_opened = machine_init_done; qemu_chr_fe_init(&d->chr, drv, errp); } static void qemu_chr_parse_mux(QemuOpts *opts, ChardevBackend *backend, Error **errp) { const char *chardev = qemu_opt_get(opts, "chardev"); ChardevMux *mux; if (chardev == NULL) { error_setg(errp, "chardev: mux: no chardev given"); return; } backend->type = CHARDEV_BACKEND_KIND_MUX; mux = backend->u.mux.data = g_new0(ChardevMux, 1); qemu_chr_parse_common(opts, qapi_ChardevMux_base(mux)); mux->chardev = g_strdup(chardev); } /** * Called after processing of default and command-line-specified * chardevs to deliver CHR_EVENT_OPENED events to any FEs attached * to a mux chardev. This is done here to ensure that * output/prompts/banners are only displayed for the FE that has * focus when initial command-line processing/machine init is * completed. * * After this point, any new FE attached to any new or existing * mux will receive CHR_EVENT_OPENED notifications for the BE * immediately. */ static int open_muxes(Chardev *chr) { /* send OPENED to all already-attached FEs */ mux_chr_send_all_event(chr, CHR_EVENT_OPENED); /* * mark mux as OPENED so any new FEs will immediately receive * OPENED event */ chr->be_open = 1; return 0; } static void char_mux_class_init(ObjectClass *oc, void *data) { ChardevClass *cc = CHARDEV_CLASS(oc); cc->parse = qemu_chr_parse_mux; cc->open = qemu_chr_open_mux; cc->chr_write = mux_chr_write; cc->chr_accept_input = mux_chr_accept_input; cc->chr_add_watch = mux_chr_add_watch; cc->chr_be_event = mux_chr_be_event; cc->chr_machine_done = open_muxes; cc->chr_update_read_handler = mux_chr_update_read_handlers; } static const TypeInfo char_mux_type_info = { .name = TYPE_CHARDEV_MUX, .parent = TYPE_CHARDEV, .class_init = char_mux_class_init, .instance_size = sizeof(MuxChardev), .instance_finalize = char_mux_finalize, }; static void register_types(void) { type_register_static(&char_mux_type_info); } type_init(register_types);
pmp-tool/PMP
src/qemu/src-pmp/target/riscv/cpu_user.h
#define xRA 1 /* return address (aka link register) */ #define xSP 2 /* stack pointer */ #define xGP 3 /* global pointer */ #define xTP 4 /* thread pointer */ #define xA0 10 /* gpr[10-17] are syscall arguments */ #define xA1 11 #define xA2 12 #define xA3 13 #define xA4 14 #define xA5 15 #define xA6 16 #define xA7 17 /* syscall number for RVI ABI */ #define xT0 5 /* syscall number for RVE ABI */
pmp-tool/PMP
src/qemu/src-pmp/tests/e1000-test.c
/* * QTest testcase for e1000 NIC * * Copyright (c) 2013-2014 SUSE LINUX Products GmbH * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "libqtest.h" #include "libqos/qgraph.h" #include "libqos/pci.h" typedef struct QE1000 QE1000; struct QE1000 { QOSGraphObject obj; QPCIDevice dev; }; static const char *models[] = { "e1000", "e1000-82540em", "e1000-82544gc", "e1000-82545em", }; static void *e1000_get_driver(void *obj, const char *interface) { QE1000 *e1000 = obj; if (!g_strcmp0(interface, "pci-device")) { return &e1000->dev; } fprintf(stderr, "%s not present in e1000e\n", interface); g_assert_not_reached(); } static void *e1000_create(void *pci_bus, QGuestAllocator *alloc, void *addr) { QE1000 *e1000 = g_new0(QE1000, 1); QPCIBus *bus = pci_bus; qpci_device_init(&e1000->dev, bus, addr); e1000->obj.get_driver = e1000_get_driver; return &e1000->obj; } static void e1000_register_nodes(void) { int i; QOSGraphEdgeOptions opts = { .extra_device_opts = "addr=04.0", }; add_qpci_address(&opts, &(QPCIAddress) { .devfn = QPCI_DEVFN(4, 0) }); for (i = 0; i < ARRAY_SIZE(models); i++) { qos_node_create_driver(models[i], e1000_create); qos_node_consumes(models[i], "pci-bus", &opts); qos_node_produces(models[i], "pci-device"); } } libqos_init(e1000_register_nodes);
pmp-tool/PMP
src/qemu/src-pmp/hw/ppc/pnv_occ.c
/* * QEMU PowerPC PowerNV Emulation of a few OCC related registers * * Copyright (c) 2015-2017, IBM Corporation. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License, version 2, as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "hw/hw.h" #include "sysemu/sysemu.h" #include "target/ppc/cpu.h" #include "qapi/error.h" #include "qemu/log.h" #include "hw/ppc/pnv.h" #include "hw/ppc/pnv_xscom.h" #include "hw/ppc/pnv_occ.h" #define OCB_OCI_OCCMISC 0x4020 #define OCB_OCI_OCCMISC_AND 0x4021 #define OCB_OCI_OCCMISC_OR 0x4022 static void pnv_occ_set_misc(PnvOCC *occ, uint64_t val) { bool irq_state; PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ); val &= 0xffff000000000000ull; occ->occmisc = val; irq_state = !!(val >> 63); pnv_psi_irq_set(occ->psi, poc->psi_irq, irq_state); } static uint64_t pnv_occ_power8_xscom_read(void *opaque, hwaddr addr, unsigned size) { PnvOCC *occ = PNV_OCC(opaque); uint32_t offset = addr >> 3; uint64_t val = 0; switch (offset) { case OCB_OCI_OCCMISC: val = occ->occmisc; break; default: qemu_log_mask(LOG_UNIMP, "OCC Unimplemented register: Ox%" HWADDR_PRIx "\n", addr >> 3); } return val; } static void pnv_occ_power8_xscom_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { PnvOCC *occ = PNV_OCC(opaque); uint32_t offset = addr >> 3; switch (offset) { case OCB_OCI_OCCMISC_AND: pnv_occ_set_misc(occ, occ->occmisc & val); break; case OCB_OCI_OCCMISC_OR: pnv_occ_set_misc(occ, occ->occmisc | val); break; case OCB_OCI_OCCMISC: pnv_occ_set_misc(occ, val); break; default: qemu_log_mask(LOG_UNIMP, "OCC Unimplemented register: Ox%" HWADDR_PRIx "\n", addr >> 3); } } static const MemoryRegionOps pnv_occ_power8_xscom_ops = { .read = pnv_occ_power8_xscom_read, .write = pnv_occ_power8_xscom_write, .valid.min_access_size = 8, .valid.max_access_size = 8, .impl.min_access_size = 8, .impl.max_access_size = 8, .endianness = DEVICE_BIG_ENDIAN, }; static void pnv_occ_power8_class_init(ObjectClass *klass, void *data) { PnvOCCClass *poc = PNV_OCC_CLASS(klass); poc->xscom_size = PNV_XSCOM_OCC_SIZE; poc->xscom_ops = &pnv_occ_power8_xscom_ops; poc->psi_irq = PSIHB_IRQ_OCC; } static const TypeInfo pnv_occ_power8_type_info = { .name = TYPE_PNV8_OCC, .parent = TYPE_PNV_OCC, .instance_size = sizeof(PnvOCC), .class_init = pnv_occ_power8_class_init, }; #define P9_OCB_OCI_OCCMISC 0x6080 #define P9_OCB_OCI_OCCMISC_CLEAR 0x6081 #define P9_OCB_OCI_OCCMISC_OR 0x6082 static uint64_t pnv_occ_power9_xscom_read(void *opaque, hwaddr addr, unsigned size) { PnvOCC *occ = PNV_OCC(opaque); uint32_t offset = addr >> 3; uint64_t val = 0; switch (offset) { case P9_OCB_OCI_OCCMISC: val = occ->occmisc; break; default: qemu_log_mask(LOG_UNIMP, "OCC Unimplemented register: Ox%" HWADDR_PRIx "\n", addr >> 3); } return val; } static void pnv_occ_power9_xscom_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { PnvOCC *occ = PNV_OCC(opaque); uint32_t offset = addr >> 3; switch (offset) { case P9_OCB_OCI_OCCMISC_CLEAR: pnv_occ_set_misc(occ, 0); break; case P9_OCB_OCI_OCCMISC_OR: pnv_occ_set_misc(occ, occ->occmisc | val); break; case P9_OCB_OCI_OCCMISC: pnv_occ_set_misc(occ, val); break; default: qemu_log_mask(LOG_UNIMP, "OCC Unimplemented register: Ox%" HWADDR_PRIx "\n", addr >> 3); } } static const MemoryRegionOps pnv_occ_power9_xscom_ops = { .read = pnv_occ_power9_xscom_read, .write = pnv_occ_power9_xscom_write, .valid.min_access_size = 8, .valid.max_access_size = 8, .impl.min_access_size = 8, .impl.max_access_size = 8, .endianness = DEVICE_BIG_ENDIAN, }; static void pnv_occ_power9_class_init(ObjectClass *klass, void *data) { PnvOCCClass *poc = PNV_OCC_CLASS(klass); poc->xscom_size = PNV9_XSCOM_OCC_SIZE; poc->xscom_ops = &pnv_occ_power9_xscom_ops; poc->psi_irq = PSIHB9_IRQ_OCC; } static const TypeInfo pnv_occ_power9_type_info = { .name = TYPE_PNV9_OCC, .parent = TYPE_PNV_OCC, .instance_size = sizeof(PnvOCC), .class_init = pnv_occ_power9_class_init, }; static void pnv_occ_realize(DeviceState *dev, Error **errp) { PnvOCC *occ = PNV_OCC(dev); PnvOCCClass *poc = PNV_OCC_GET_CLASS(occ); Object *obj; Error *local_err = NULL; occ->occmisc = 0; obj = object_property_get_link(OBJECT(dev), "psi", &local_err); if (!obj) { error_propagate(errp, local_err); error_prepend(errp, "required link 'psi' not found: "); return; } occ->psi = PNV_PSI(obj); /* XScom region for OCC registers */ pnv_xscom_region_init(&occ->xscom_regs, OBJECT(dev), poc->xscom_ops, occ, "xscom-occ", poc->xscom_size); } static void pnv_occ_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = pnv_occ_realize; dc->desc = "PowerNV OCC Controller"; } static const TypeInfo pnv_occ_type_info = { .name = TYPE_PNV_OCC, .parent = TYPE_DEVICE, .instance_size = sizeof(PnvOCC), .class_init = pnv_occ_class_init, .class_size = sizeof(PnvOCCClass), .abstract = true, }; static void pnv_occ_register_types(void) { type_register_static(&pnv_occ_type_info); type_register_static(&pnv_occ_power8_type_info); type_register_static(&pnv_occ_power9_type_info); } type_init(pnv_occ_register_types);
pmp-tool/PMP
src/qemu/src-pmp/target/riscv/insn_trans/trans_rva.inc.c
<reponame>pmp-tool/PMP<gh_stars>1-10 /* * RISC-V translation routines for the RV64A Standard Extension. * * Copyright (c) 2016-2017 <NAME>, <EMAIL> * Copyright (c) 2018 <NAME>, <EMAIL> * <NAME>, <EMAIL> * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ static inline bool gen_lr(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) { TCGv src1 = tcg_temp_new(); /* Put addr in load_res, data in load_val. */ gen_get_gpr(src1, a->rs1); if (a->rl) { tcg_gen_mb(TCG_MO_ALL | TCG_BAR_STRL); } tcg_gen_qemu_ld_tl(load_val, src1, ctx->mem_idx, mop); if (a->aq) { tcg_gen_mb(TCG_MO_ALL | TCG_BAR_LDAQ); } tcg_gen_mov_tl(load_res, src1); gen_set_gpr(a->rd, load_val); tcg_temp_free(src1); return true; } static inline bool gen_sc(DisasContext *ctx, arg_atomic *a, TCGMemOp mop) { TCGv src1 = tcg_temp_new(); TCGv src2 = tcg_temp_new(); TCGv dat = tcg_temp_new(); TCGLabel *l1 = gen_new_label(); TCGLabel *l2 = gen_new_label(); gen_get_gpr(src1, a->rs1); tcg_gen_brcond_tl(TCG_COND_NE, load_res, src1, l1); gen_get_gpr(src2, a->rs2); /* * Note that the TCG atomic primitives are SC, * so we can ignore AQ/RL along this path. */ tcg_gen_atomic_cmpxchg_tl(src1, load_res, load_val, src2, ctx->mem_idx, mop); tcg_gen_setcond_tl(TCG_COND_NE, dat, src1, load_val); gen_set_gpr(a->rd, dat); tcg_gen_br(l2); gen_set_label(l1); /* * Address comparion failure. However, we still need to * provide the memory barrier implied by AQ/RL. */ tcg_gen_mb(TCG_MO_ALL + a->aq * TCG_BAR_LDAQ + a->rl * TCG_BAR_STRL); tcg_gen_movi_tl(dat, 1); gen_set_gpr(a->rd, dat); gen_set_label(l2); tcg_temp_free(dat); tcg_temp_free(src1); tcg_temp_free(src2); return true; } static bool gen_amo(DisasContext *ctx, arg_atomic *a, void(*func)(TCGv, TCGv, TCGv, TCGArg, TCGMemOp), TCGMemOp mop) { TCGv src1 = tcg_temp_new(); TCGv src2 = tcg_temp_new(); gen_get_gpr(src1, a->rs1); gen_get_gpr(src2, a->rs2); (*func)(src2, src1, src2, ctx->mem_idx, mop); gen_set_gpr(a->rd, src2); tcg_temp_free(src1); tcg_temp_free(src2); return true; } static bool trans_lr_w(DisasContext *ctx, arg_lr_w *a) { REQUIRE_EXT(ctx, RVA); return gen_lr(ctx, a, (MO_ALIGN | MO_TESL)); } static bool trans_sc_w(DisasContext *ctx, arg_sc_w *a) { REQUIRE_EXT(ctx, RVA); return gen_sc(ctx, a, (MO_ALIGN | MO_TESL)); } static bool trans_amoswap_w(DisasContext *ctx, arg_amoswap_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amoadd_w(DisasContext *ctx, arg_amoadd_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amoxor_w(DisasContext *ctx, arg_amoxor_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amoand_w(DisasContext *ctx, arg_amoand_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amoor_w(DisasContext *ctx, arg_amoor_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amomin_w(DisasContext *ctx, arg_amomin_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amomax_w(DisasContext *ctx, arg_amomax_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amominu_w(DisasContext *ctx, arg_amominu_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TESL)); } static bool trans_amomaxu_w(DisasContext *ctx, arg_amomaxu_w *a) { REQUIRE_EXT(ctx, RVA); return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TESL)); } #ifdef TARGET_RISCV64 static bool trans_lr_d(DisasContext *ctx, arg_lr_d *a) { return gen_lr(ctx, a, MO_ALIGN | MO_TEQ); } static bool trans_sc_d(DisasContext *ctx, arg_sc_d *a) { return gen_sc(ctx, a, (MO_ALIGN | MO_TEQ)); } static bool trans_amoswap_d(DisasContext *ctx, arg_amoswap_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_xchg_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amoadd_d(DisasContext *ctx, arg_amoadd_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_fetch_add_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amoxor_d(DisasContext *ctx, arg_amoxor_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_fetch_xor_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amoand_d(DisasContext *ctx, arg_amoand_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_fetch_and_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amoor_d(DisasContext *ctx, arg_amoor_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_fetch_or_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amomin_d(DisasContext *ctx, arg_amomin_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smin_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amomax_d(DisasContext *ctx, arg_amomax_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_fetch_smax_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amominu_d(DisasContext *ctx, arg_amominu_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umin_tl, (MO_ALIGN | MO_TEQ)); } static bool trans_amomaxu_d(DisasContext *ctx, arg_amomaxu_d *a) { return gen_amo(ctx, a, &tcg_gen_atomic_fetch_umax_tl, (MO_ALIGN | MO_TEQ)); } #endif
pmp-tool/PMP
src/qemu/src-pmp/target/xtensa/translate.c
<filename>src/qemu/src-pmp/target/xtensa/translate.c /* * Xtensa ISA: * http://www.tensilica.com/products/literature-docs/documentation/xtensa-isa-databook.htm * * Copyright (c) 2011, <NAME>, Open Source and Linux Lab. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the Open Source and Linux Lab nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "disas/disas.h" #include "tcg-op.h" #include "qemu/log.h" #include "sysemu/sysemu.h" #include "exec/cpu_ldst.h" #include "exec/semihost.h" #include "exec/translator.h" #include "exec/helper-proto.h" #include "exec/helper-gen.h" #include "trace-tcg.h" #include "exec/log.h" struct DisasContext { DisasContextBase base; const XtensaConfig *config; uint32_t pc; int cring; int ring; uint32_t lbeg_off; uint32_t lend; bool sar_5bit; bool sar_m32_5bit; bool sar_m32_allocated; TCGv_i32 sar_m32; unsigned window; unsigned callinc; bool cwoe; bool debug; bool icount; TCGv_i32 next_icount; unsigned cpenable; uint32_t op_flags; xtensa_insnbuf insnbuf; xtensa_insnbuf slotbuf; }; static TCGv_i32 cpu_pc; static TCGv_i32 cpu_R[16]; static TCGv_i32 cpu_FR[16]; static TCGv_i32 cpu_MR[4]; static TCGv_i32 cpu_BR[16]; static TCGv_i32 cpu_BR4[4]; static TCGv_i32 cpu_BR8[2]; static TCGv_i32 cpu_SR[256]; static TCGv_i32 cpu_UR[256]; static TCGv_i32 cpu_windowbase_next; static GHashTable *xtensa_regfile_table; #include "exec/gen-icount.h" typedef struct XtensaReg { const char *name; uint64_t opt_bits; enum { SR_R = 1, SR_W = 2, SR_X = 4, SR_RW = 3, SR_RWX = 7, } access; } XtensaReg; #define XTENSA_REG_ACCESS(regname, opt, acc) { \ .name = (regname), \ .opt_bits = XTENSA_OPTION_BIT(opt), \ .access = (acc), \ } #define XTENSA_REG(regname, opt) XTENSA_REG_ACCESS(regname, opt, SR_RWX) #define XTENSA_REG_BITS_ACCESS(regname, opt, acc) { \ .name = (regname), \ .opt_bits = (opt), \ .access = (acc), \ } #define XTENSA_REG_BITS(regname, opt) \ XTENSA_REG_BITS_ACCESS(regname, opt, SR_RWX) static const XtensaReg sregnames[256] = { [LBEG] = XTENSA_REG("LBEG", XTENSA_OPTION_LOOP), [LEND] = XTENSA_REG("LEND", XTENSA_OPTION_LOOP), [LCOUNT] = XTENSA_REG("LCOUNT", XTENSA_OPTION_LOOP), [SAR] = XTENSA_REG_BITS("SAR", XTENSA_OPTION_ALL), [BR] = XTENSA_REG("BR", XTENSA_OPTION_BOOLEAN), [LITBASE] = XTENSA_REG("LITBASE", XTENSA_OPTION_EXTENDED_L32R), [SCOMPARE1] = XTENSA_REG("SCOMPARE1", XTENSA_OPTION_CONDITIONAL_STORE), [ACCLO] = XTENSA_REG("ACCLO", XTENSA_OPTION_MAC16), [ACCHI] = XTENSA_REG("ACCHI", XTENSA_OPTION_MAC16), [MR] = XTENSA_REG("MR0", XTENSA_OPTION_MAC16), [MR + 1] = XTENSA_REG("MR1", XTENSA_OPTION_MAC16), [MR + 2] = XTENSA_REG("MR2", XTENSA_OPTION_MAC16), [MR + 3] = XTENSA_REG("MR3", XTENSA_OPTION_MAC16), [PREFCTL] = XTENSA_REG_BITS("PREFCTL", XTENSA_OPTION_ALL), [WINDOW_BASE] = XTENSA_REG("WINDOW_BASE", XTENSA_OPTION_WINDOWED_REGISTER), [WINDOW_START] = XTENSA_REG("WINDOW_START", XTENSA_OPTION_WINDOWED_REGISTER), [PTEVADDR] = XTENSA_REG("PTEVADDR", XTENSA_OPTION_MMU), [MMID] = XTENSA_REG_BITS("MMID", XTENSA_OPTION_ALL), [RASID] = XTENSA_REG("RASID", XTENSA_OPTION_MMU), [ITLBCFG] = XTENSA_REG("ITLBCFG", XTENSA_OPTION_MMU), [DTLBCFG] = XTENSA_REG("DTLBCFG", XTENSA_OPTION_MMU), [IBREAKENABLE] = XTENSA_REG("IBREAKENABLE", XTENSA_OPTION_DEBUG), [MEMCTL] = XTENSA_REG_BITS("MEMCTL", XTENSA_OPTION_ALL), [CACHEATTR] = XTENSA_REG("CACHEATTR", XTENSA_OPTION_CACHEATTR), [ATOMCTL] = XTENSA_REG("ATOMCTL", XTENSA_OPTION_ATOMCTL), [DDR] = XTENSA_REG("DDR", XTENSA_OPTION_DEBUG), [IBREAKA] = XTENSA_REG("IBREAKA0", XTENSA_OPTION_DEBUG), [IBREAKA + 1] = XTENSA_REG("IBREAKA1", XTENSA_OPTION_DEBUG), [DBREAKA] = XTENSA_REG("DBREAKA0", XTENSA_OPTION_DEBUG), [DBREAKA + 1] = XTENSA_REG("DBREAKA1", XTENSA_OPTION_DEBUG), [DBREAKC] = XTENSA_REG("DBREAKC0", XTENSA_OPTION_DEBUG), [DBREAKC + 1] = XTENSA_REG("DBREAKC1", XTENSA_OPTION_DEBUG), [CONFIGID0] = XTENSA_REG_BITS_ACCESS("CONFIGID0", XTENSA_OPTION_ALL, SR_R), [EPC1] = XTENSA_REG("EPC1", XTENSA_OPTION_EXCEPTION), [EPC1 + 1] = XTENSA_REG("EPC2", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPC1 + 2] = XTENSA_REG("EPC3", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPC1 + 3] = XTENSA_REG("EPC4", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPC1 + 4] = XTENSA_REG("EPC5", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPC1 + 5] = XTENSA_REG("EPC6", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPC1 + 6] = XTENSA_REG("EPC7", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [DEPC] = XTENSA_REG("DEPC", XTENSA_OPTION_EXCEPTION), [EPS2] = XTENSA_REG("EPS2", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPS2 + 1] = XTENSA_REG("EPS3", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPS2 + 2] = XTENSA_REG("EPS4", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPS2 + 3] = XTENSA_REG("EPS5", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPS2 + 4] = XTENSA_REG("EPS6", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EPS2 + 5] = XTENSA_REG("EPS7", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [CONFIGID1] = XTENSA_REG_BITS_ACCESS("CONFIGID1", XTENSA_OPTION_ALL, SR_R), [EXCSAVE1] = XTENSA_REG("EXCSAVE1", XTENSA_OPTION_EXCEPTION), [EXCSAVE1 + 1] = XTENSA_REG("EXCSAVE2", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EXCSAVE1 + 2] = XTENSA_REG("EXCSAVE3", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EXCSAVE1 + 3] = XTENSA_REG("EXCSAVE4", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EXCSAVE1 + 4] = XTENSA_REG("EXCSAVE5", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EXCSAVE1 + 5] = XTENSA_REG("EXCSAVE6", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [EXCSAVE1 + 6] = XTENSA_REG("EXCSAVE7", XTENSA_OPTION_HIGH_PRIORITY_INTERRUPT), [CPENABLE] = XTENSA_REG("CPENABLE", XTENSA_OPTION_COPROCESSOR), [INTSET] = XTENSA_REG_ACCESS("INTSET", XTENSA_OPTION_INTERRUPT, SR_RW), [INTCLEAR] = XTENSA_REG_ACCESS("INTCLEAR", XTENSA_OPTION_INTERRUPT, SR_W), [INTENABLE] = XTENSA_REG("INTENABLE", XTENSA_OPTION_INTERRUPT), [PS] = XTENSA_REG_BITS("PS", XTENSA_OPTION_ALL), [VECBASE] = XTENSA_REG("VECBASE", XTENSA_OPTION_RELOCATABLE_VECTOR), [EXCCAUSE] = XTENSA_REG("EXCCAUSE", XTENSA_OPTION_EXCEPTION), [DEBUGCAUSE] = XTENSA_REG_ACCESS("DEBUGCAUSE", XTENSA_OPTION_DEBUG, SR_R), [CCOUNT] = XTENSA_REG("CCOUNT", XTENSA_OPTION_TIMER_INTERRUPT), [PRID] = XTENSA_REG_ACCESS("PRID", XTENSA_OPTION_PROCESSOR_ID, SR_R), [ICOUNT] = XTENSA_REG("ICOUNT", XTENSA_OPTION_DEBUG), [ICOUNTLEVEL] = XTENSA_REG("ICOUNTLEVEL", XTENSA_OPTION_DEBUG), [EXCVADDR] = XTENSA_REG("EXCVADDR", XTENSA_OPTION_EXCEPTION), [CCOMPARE] = XTENSA_REG("CCOMPARE0", XTENSA_OPTION_TIMER_INTERRUPT), [CCOMPARE + 1] = XTENSA_REG("CCOMPARE1", XTENSA_OPTION_TIMER_INTERRUPT), [CCOMPARE + 2] = XTENSA_REG("CCOMPARE2", XTENSA_OPTION_TIMER_INTERRUPT), [MISC] = XTENSA_REG("MISC0", XTENSA_OPTION_MISC_SR), [MISC + 1] = XTENSA_REG("MISC1", XTENSA_OPTION_MISC_SR), [MISC + 2] = XTENSA_REG("MISC2", XTENSA_OPTION_MISC_SR), [MISC + 3] = XTENSA_REG("MISC3", XTENSA_OPTION_MISC_SR), }; static const XtensaReg uregnames[256] = { [EXPSTATE] = XTENSA_REG_BITS("EXPSTATE", XTENSA_OPTION_ALL), [THREADPTR] = XTENSA_REG("THREADPTR", XTENSA_OPTION_THREAD_POINTER), [FCR] = XTENSA_REG("FCR", XTENSA_OPTION_FP_COPROCESSOR), [FSR] = XTENSA_REG("FSR", XTENSA_OPTION_FP_COPROCESSOR), }; void xtensa_translate_init(void) { static const char * const regnames[] = { "ar0", "ar1", "ar2", "ar3", "ar4", "ar5", "ar6", "ar7", "ar8", "ar9", "ar10", "ar11", "ar12", "ar13", "ar14", "ar15", }; static const char * const fregnames[] = { "f0", "f1", "f2", "f3", "f4", "f5", "f6", "f7", "f8", "f9", "f10", "f11", "f12", "f13", "f14", "f15", }; static const char * const mregnames[] = { "m0", "m1", "m2", "m3", }; static const char * const bregnames[] = { "b0", "b1", "b2", "b3", "b4", "b5", "b6", "b7", "b8", "b9", "b10", "b11", "b12", "b13", "b14", "b15", }; int i; cpu_pc = tcg_global_mem_new_i32(cpu_env, offsetof(CPUXtensaState, pc), "pc"); for (i = 0; i < 16; i++) { cpu_R[i] = tcg_global_mem_new_i32(cpu_env, offsetof(CPUXtensaState, regs[i]), regnames[i]); } for (i = 0; i < 16; i++) { cpu_FR[i] = tcg_global_mem_new_i32(cpu_env, offsetof(CPUXtensaState, fregs[i].f32[FP_F32_LOW]), fregnames[i]); } for (i = 0; i < 4; i++) { cpu_MR[i] = tcg_global_mem_new_i32(cpu_env, offsetof(CPUXtensaState, sregs[MR + i]), mregnames[i]); } for (i = 0; i < 16; i++) { cpu_BR[i] = tcg_global_mem_new_i32(cpu_env, offsetof(CPUXtensaState, sregs[BR]), bregnames[i]); if (i % 4 == 0) { cpu_BR4[i / 4] = tcg_global_mem_new_i32(cpu_env, offsetof(CPUXtensaState, sregs[BR]), bregnames[i]); } if (i % 8 == 0) { cpu_BR8[i / 8] = tcg_global_mem_new_i32(cpu_env, offsetof(CPUXtensaState, sregs[BR]), bregnames[i]); } } for (i = 0; i < 256; ++i) { if (sregnames[i].name) { cpu_SR[i] = tcg_global_mem_new_i32(cpu_env, offsetof(CPUXtensaState, sregs[i]), sregnames[i].name); } } for (i = 0; i < 256; ++i) { if (uregnames[i].name) { cpu_UR[i] = tcg_global_mem_new_i32(cpu_env, offsetof(CPUXtensaState, uregs[i]), uregnames[i].name); } } cpu_windowbase_next = tcg_global_mem_new_i32(cpu_env, offsetof(CPUXtensaState, windowbase_next), "windowbase_next"); } void **xtensa_get_regfile_by_name(const char *name) { if (xtensa_regfile_table == NULL) { xtensa_regfile_table = g_hash_table_new(g_str_hash, g_str_equal); g_hash_table_insert(xtensa_regfile_table, (void *)"AR", (void *)cpu_R); g_hash_table_insert(xtensa_regfile_table, (void *)"MR", (void *)cpu_MR); g_hash_table_insert(xtensa_regfile_table, (void *)"FR", (void *)cpu_FR); g_hash_table_insert(xtensa_regfile_table, (void *)"BR", (void *)cpu_BR); g_hash_table_insert(xtensa_regfile_table, (void *)"BR4", (void *)cpu_BR4); g_hash_table_insert(xtensa_regfile_table, (void *)"BR8", (void *)cpu_BR8); } return (void **)g_hash_table_lookup(xtensa_regfile_table, (void *)name); } static inline bool option_enabled(DisasContext *dc, int opt) { return xtensa_option_enabled(dc->config, opt); } static void init_sar_tracker(DisasContext *dc) { dc->sar_5bit = false; dc->sar_m32_5bit = false; dc->sar_m32_allocated = false; } static void reset_sar_tracker(DisasContext *dc) { if (dc->sar_m32_allocated) { tcg_temp_free(dc->sar_m32); } } static void gen_right_shift_sar(DisasContext *dc, TCGv_i32 sa) { tcg_gen_andi_i32(cpu_SR[SAR], sa, 0x1f); if (dc->sar_m32_5bit) { tcg_gen_discard_i32(dc->sar_m32); } dc->sar_5bit = true; dc->sar_m32_5bit = false; } static void gen_left_shift_sar(DisasContext *dc, TCGv_i32 sa) { TCGv_i32 tmp = tcg_const_i32(32); if (!dc->sar_m32_allocated) { dc->sar_m32 = tcg_temp_local_new_i32(); dc->sar_m32_allocated = true; } tcg_gen_andi_i32(dc->sar_m32, sa, 0x1f); tcg_gen_sub_i32(cpu_SR[SAR], tmp, dc->sar_m32); dc->sar_5bit = false; dc->sar_m32_5bit = true; tcg_temp_free(tmp); } static void gen_exception(DisasContext *dc, int excp) { TCGv_i32 tmp = tcg_const_i32(excp); gen_helper_exception(cpu_env, tmp); tcg_temp_free(tmp); } static void gen_exception_cause(DisasContext *dc, uint32_t cause) { TCGv_i32 tpc = tcg_const_i32(dc->pc); TCGv_i32 tcause = tcg_const_i32(cause); gen_helper_exception_cause(cpu_env, tpc, tcause); tcg_temp_free(tpc); tcg_temp_free(tcause); if (cause == ILLEGAL_INSTRUCTION_CAUSE || cause == SYSCALL_CAUSE) { dc->base.is_jmp = DISAS_NORETURN; } } static void gen_exception_cause_vaddr(DisasContext *dc, uint32_t cause, TCGv_i32 vaddr) { TCGv_i32 tpc = tcg_const_i32(dc->pc); TCGv_i32 tcause = tcg_const_i32(cause); gen_helper_exception_cause_vaddr(cpu_env, tpc, tcause, vaddr); tcg_temp_free(tpc); tcg_temp_free(tcause); } static void gen_debug_exception(DisasContext *dc, uint32_t cause) { TCGv_i32 tpc = tcg_const_i32(dc->pc); TCGv_i32 tcause = tcg_const_i32(cause); gen_helper_debug_exception(cpu_env, tpc, tcause); tcg_temp_free(tpc); tcg_temp_free(tcause); if (cause & (DEBUGCAUSE_IB | DEBUGCAUSE_BI | DEBUGCAUSE_BN)) { dc->base.is_jmp = DISAS_NORETURN; } } static bool gen_check_privilege(DisasContext *dc) { #ifndef CONFIG_USER_ONLY if (!dc->cring) { return true; } #endif gen_exception_cause(dc, PRIVILEGED_CAUSE); dc->base.is_jmp = DISAS_NORETURN; return false; } static bool gen_check_cpenable(DisasContext *dc, uint32_t cp_mask) { cp_mask &= ~dc->cpenable; if (option_enabled(dc, XTENSA_OPTION_COPROCESSOR) && cp_mask) { gen_exception_cause(dc, COPROCESSOR0_DISABLED + ctz32(cp_mask)); dc->base.is_jmp = DISAS_NORETURN; return false; } return true; } static int gen_postprocess(DisasContext *dc, int slot); static void gen_jump_slot(DisasContext *dc, TCGv dest, int slot) { tcg_gen_mov_i32(cpu_pc, dest); if (dc->icount) { tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount); } if (dc->base.singlestep_enabled) { gen_exception(dc, EXCP_DEBUG); } else { if (dc->op_flags & XTENSA_OP_POSTPROCESS) { slot = gen_postprocess(dc, slot); } if (slot >= 0) { tcg_gen_goto_tb(slot); tcg_gen_exit_tb(dc->base.tb, slot); } else { tcg_gen_exit_tb(NULL, 0); } } dc->base.is_jmp = DISAS_NORETURN; } static void gen_jump(DisasContext *dc, TCGv dest) { gen_jump_slot(dc, dest, -1); } static int adjust_jump_slot(DisasContext *dc, uint32_t dest, int slot) { if (((dc->base.pc_first ^ dest) & TARGET_PAGE_MASK) != 0) { return -1; } else { return slot; } } static void gen_jumpi(DisasContext *dc, uint32_t dest, int slot) { TCGv_i32 tmp = tcg_const_i32(dest); gen_jump_slot(dc, tmp, adjust_jump_slot(dc, dest, slot)); tcg_temp_free(tmp); } static void gen_callw_slot(DisasContext *dc, int callinc, TCGv_i32 dest, int slot) { TCGv_i32 tcallinc = tcg_const_i32(callinc); tcg_gen_deposit_i32(cpu_SR[PS], cpu_SR[PS], tcallinc, PS_CALLINC_SHIFT, PS_CALLINC_LEN); tcg_temp_free(tcallinc); tcg_gen_movi_i32(cpu_R[callinc << 2], (callinc << 30) | (dc->base.pc_next & 0x3fffffff)); gen_jump_slot(dc, dest, slot); } static bool gen_check_loop_end(DisasContext *dc, int slot) { if (dc->base.pc_next == dc->lend) { TCGLabel *label = gen_new_label(); tcg_gen_brcondi_i32(TCG_COND_EQ, cpu_SR[LCOUNT], 0, label); tcg_gen_subi_i32(cpu_SR[LCOUNT], cpu_SR[LCOUNT], 1); if (dc->lbeg_off) { gen_jumpi(dc, dc->base.pc_next - dc->lbeg_off, slot); } else { gen_jump(dc, cpu_SR[LBEG]); } gen_set_label(label); gen_jumpi(dc, dc->base.pc_next, -1); return true; } return false; } static void gen_jumpi_check_loop_end(DisasContext *dc, int slot) { if (!gen_check_loop_end(dc, slot)) { gen_jumpi(dc, dc->base.pc_next, slot); } } static void gen_brcond(DisasContext *dc, TCGCond cond, TCGv_i32 t0, TCGv_i32 t1, uint32_t addr) { TCGLabel *label = gen_new_label(); tcg_gen_brcond_i32(cond, t0, t1, label); gen_jumpi_check_loop_end(dc, 0); gen_set_label(label); gen_jumpi(dc, addr, 1); } static void gen_brcondi(DisasContext *dc, TCGCond cond, TCGv_i32 t0, uint32_t t1, uint32_t addr) { TCGv_i32 tmp = tcg_const_i32(t1); gen_brcond(dc, cond, t0, tmp, addr); tcg_temp_free(tmp); } static bool check_sr(DisasContext *dc, uint32_t sr, unsigned access) { if (!xtensa_option_bits_enabled(dc->config, sregnames[sr].opt_bits)) { if (sregnames[sr].name) { qemu_log_mask(LOG_GUEST_ERROR, "SR %s is not configured\n", sregnames[sr].name); } else { qemu_log_mask(LOG_UNIMP, "SR %d is not implemented\n", sr); } return false; } else if (!(sregnames[sr].access & access)) { static const char * const access_text[] = { [SR_R] = "rsr", [SR_W] = "wsr", [SR_X] = "xsr", }; assert(access < ARRAY_SIZE(access_text) && access_text[access]); qemu_log_mask(LOG_GUEST_ERROR, "SR %s is not available for %s\n", sregnames[sr].name, access_text[access]); return false; } return true; } #ifndef CONFIG_USER_ONLY static void gen_rsr_ccount(DisasContext *dc, TCGv_i32 d, uint32_t sr) { if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } gen_helper_update_ccount(cpu_env); tcg_gen_mov_i32(d, cpu_SR[sr]); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { gen_io_end(); } } static void gen_rsr_ptevaddr(DisasContext *dc, TCGv_i32 d, uint32_t sr) { tcg_gen_shri_i32(d, cpu_SR[EXCVADDR], 10); tcg_gen_or_i32(d, d, cpu_SR[sr]); tcg_gen_andi_i32(d, d, 0xfffffffc); } #endif static void gen_rsr(DisasContext *dc, TCGv_i32 d, uint32_t sr) { static void (* const rsr_handler[256])(DisasContext *dc, TCGv_i32 d, uint32_t sr) = { #ifndef CONFIG_USER_ONLY [CCOUNT] = gen_rsr_ccount, [INTSET] = gen_rsr_ccount, [PTEVADDR] = gen_rsr_ptevaddr, #endif }; if (rsr_handler[sr]) { rsr_handler[sr](dc, d, sr); } else { tcg_gen_mov_i32(d, cpu_SR[sr]); } } static void gen_wsr_sar(DisasContext *dc, uint32_t sr, TCGv_i32 s) { tcg_gen_andi_i32(cpu_SR[sr], s, 0x3f); if (dc->sar_m32_5bit) { tcg_gen_discard_i32(dc->sar_m32); } dc->sar_5bit = false; dc->sar_m32_5bit = false; } static void gen_wsr_br(DisasContext *dc, uint32_t sr, TCGv_i32 s) { tcg_gen_andi_i32(cpu_SR[sr], s, 0xffff); } static void gen_wsr_litbase(DisasContext *dc, uint32_t sr, TCGv_i32 s) { tcg_gen_andi_i32(cpu_SR[sr], s, 0xfffff001); } static void gen_wsr_acchi(DisasContext *dc, uint32_t sr, TCGv_i32 s) { tcg_gen_ext8s_i32(cpu_SR[sr], s); } #ifndef CONFIG_USER_ONLY static void gen_wsr_windowbase(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_mov_i32(cpu_windowbase_next, v); } static void gen_wsr_windowstart(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, (1 << dc->config->nareg / 4) - 1); } static void gen_wsr_ptevaddr(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, 0xffc00000); } static void gen_wsr_rasid(DisasContext *dc, uint32_t sr, TCGv_i32 v) { gen_helper_wsr_rasid(cpu_env, v); } static void gen_wsr_tlbcfg(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, 0x01130000); } static void gen_wsr_ibreakenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) { gen_helper_wsr_ibreakenable(cpu_env, v); } static void gen_wsr_memctl(DisasContext *dc, uint32_t sr, TCGv_i32 v) { gen_helper_wsr_memctl(cpu_env, v); } static void gen_wsr_atomctl(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, 0x3f); } static void gen_wsr_ibreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v) { unsigned id = sr - IBREAKA; TCGv_i32 tmp = tcg_const_i32(id); assert(id < dc->config->nibreak); gen_helper_wsr_ibreaka(cpu_env, tmp, v); tcg_temp_free(tmp); } static void gen_wsr_dbreaka(DisasContext *dc, uint32_t sr, TCGv_i32 v) { unsigned id = sr - DBREAKA; TCGv_i32 tmp = tcg_const_i32(id); assert(id < dc->config->ndbreak); gen_helper_wsr_dbreaka(cpu_env, tmp, v); tcg_temp_free(tmp); } static void gen_wsr_dbreakc(DisasContext *dc, uint32_t sr, TCGv_i32 v) { unsigned id = sr - DBREAKC; TCGv_i32 tmp = tcg_const_i32(id); assert(id < dc->config->ndbreak); gen_helper_wsr_dbreakc(cpu_env, tmp, v); tcg_temp_free(tmp); } static void gen_wsr_cpenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, 0xff); } static void gen_check_interrupts(DisasContext *dc) { if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } gen_helper_check_interrupts(cpu_env); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { gen_io_end(); } } static void gen_wsr_intset(DisasContext *dc, uint32_t sr, TCGv_i32 v) { gen_helper_intset(cpu_env, v); } static void gen_wsr_intclear(DisasContext *dc, uint32_t sr, TCGv_i32 v) { gen_helper_intclear(cpu_env, v); } static void gen_wsr_intenable(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_mov_i32(cpu_SR[sr], v); } static void gen_wsr_ps(DisasContext *dc, uint32_t sr, TCGv_i32 v) { uint32_t mask = PS_WOE | PS_CALLINC | PS_OWB | PS_UM | PS_EXCM | PS_INTLEVEL; if (option_enabled(dc, XTENSA_OPTION_MMU)) { mask |= PS_RING; } tcg_gen_andi_i32(cpu_SR[sr], v, mask); } static void gen_wsr_ccount(DisasContext *dc, uint32_t sr, TCGv_i32 v) { if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } gen_helper_wsr_ccount(cpu_env, v); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { gen_io_end(); } } static void gen_wsr_icount(DisasContext *dc, uint32_t sr, TCGv_i32 v) { if (dc->icount) { tcg_gen_mov_i32(dc->next_icount, v); } else { tcg_gen_mov_i32(cpu_SR[sr], v); } } static void gen_wsr_icountlevel(DisasContext *dc, uint32_t sr, TCGv_i32 v) { tcg_gen_andi_i32(cpu_SR[sr], v, 0xf); } static void gen_wsr_ccompare(DisasContext *dc, uint32_t sr, TCGv_i32 v) { uint32_t id = sr - CCOMPARE; TCGv_i32 tmp = tcg_const_i32(id); assert(id < dc->config->nccompare); tcg_gen_mov_i32(cpu_SR[sr], v); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } gen_helper_update_ccompare(cpu_env, tmp); tcg_temp_free(tmp); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { gen_io_end(); } } #else static void gen_check_interrupts(DisasContext *dc) { } #endif static void gen_wsr(DisasContext *dc, uint32_t sr, TCGv_i32 s) { static void (* const wsr_handler[256])(DisasContext *dc, uint32_t sr, TCGv_i32 v) = { [SAR] = gen_wsr_sar, [BR] = gen_wsr_br, [LITBASE] = gen_wsr_litbase, [ACCHI] = gen_wsr_acchi, #ifndef CONFIG_USER_ONLY [WINDOW_BASE] = gen_wsr_windowbase, [WINDOW_START] = gen_wsr_windowstart, [PTEVADDR] = gen_wsr_ptevaddr, [RASID] = gen_wsr_rasid, [ITLBCFG] = gen_wsr_tlbcfg, [DTLBCFG] = gen_wsr_tlbcfg, [IBREAKENABLE] = gen_wsr_ibreakenable, [MEMCTL] = gen_wsr_memctl, [ATOMCTL] = gen_wsr_atomctl, [IBREAKA] = gen_wsr_ibreaka, [IBREAKA + 1] = gen_wsr_ibreaka, [DBREAKA] = gen_wsr_dbreaka, [DBREAKA + 1] = gen_wsr_dbreaka, [DBREAKC] = gen_wsr_dbreakc, [DBREAKC + 1] = gen_wsr_dbreakc, [CPENABLE] = gen_wsr_cpenable, [INTSET] = gen_wsr_intset, [INTCLEAR] = gen_wsr_intclear, [INTENABLE] = gen_wsr_intenable, [PS] = gen_wsr_ps, [CCOUNT] = gen_wsr_ccount, [ICOUNT] = gen_wsr_icount, [ICOUNTLEVEL] = gen_wsr_icountlevel, [CCOMPARE] = gen_wsr_ccompare, [CCOMPARE + 1] = gen_wsr_ccompare, [CCOMPARE + 2] = gen_wsr_ccompare, #endif }; if (wsr_handler[sr]) { wsr_handler[sr](dc, sr, s); } else { tcg_gen_mov_i32(cpu_SR[sr], s); } } static void gen_wur(uint32_t ur, TCGv_i32 s) { switch (ur) { case FCR: gen_helper_wur_fcr(cpu_env, s); break; case FSR: tcg_gen_andi_i32(cpu_UR[ur], s, 0xffffff80); break; default: tcg_gen_mov_i32(cpu_UR[ur], s); break; } } static void gen_load_store_alignment(DisasContext *dc, int shift, TCGv_i32 addr, bool no_hw_alignment) { if (!option_enabled(dc, XTENSA_OPTION_UNALIGNED_EXCEPTION)) { tcg_gen_andi_i32(addr, addr, ~0 << shift); } else if (option_enabled(dc, XTENSA_OPTION_HW_ALIGNMENT) && no_hw_alignment) { TCGLabel *label = gen_new_label(); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, addr, ~(~0 << shift)); tcg_gen_brcondi_i32(TCG_COND_EQ, tmp, 0, label); gen_exception_cause_vaddr(dc, LOAD_STORE_ALIGNMENT_CAUSE, addr); gen_set_label(label); tcg_temp_free(tmp); } } #ifndef CONFIG_USER_ONLY static void gen_waiti(DisasContext *dc, uint32_t imm4) { TCGv_i32 pc = tcg_const_i32(dc->base.pc_next); TCGv_i32 intlevel = tcg_const_i32(imm4); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { gen_io_start(); } gen_helper_waiti(cpu_env, pc, intlevel); if (tb_cflags(dc->base.tb) & CF_USE_ICOUNT) { gen_io_end(); } tcg_temp_free(pc); tcg_temp_free(intlevel); } #endif static bool gen_window_check(DisasContext *dc, uint32_t mask) { unsigned r = 31 - clz32(mask); if (r / 4 > dc->window) { TCGv_i32 pc = tcg_const_i32(dc->pc); TCGv_i32 w = tcg_const_i32(r / 4); gen_helper_window_check(cpu_env, pc, w); dc->base.is_jmp = DISAS_NORETURN; return false; } return true; } static TCGv_i32 gen_mac16_m(TCGv_i32 v, bool hi, bool is_unsigned) { TCGv_i32 m = tcg_temp_new_i32(); if (hi) { (is_unsigned ? tcg_gen_shri_i32 : tcg_gen_sari_i32)(m, v, 16); } else { (is_unsigned ? tcg_gen_ext16u_i32 : tcg_gen_ext16s_i32)(m, v); } return m; } static void gen_zero_check(DisasContext *dc, const OpcodeArg arg[]) { TCGLabel *label = gen_new_label(); tcg_gen_brcondi_i32(TCG_COND_NE, arg[2].in, 0, label); gen_exception_cause(dc, INTEGER_DIVIDE_BY_ZERO_CAUSE); gen_set_label(label); } static inline unsigned xtensa_op0_insn_len(DisasContext *dc, uint8_t op0) { return xtensa_isa_length_from_chars(dc->config->isa, &op0); } static int gen_postprocess(DisasContext *dc, int slot) { uint32_t op_flags = dc->op_flags; if (op_flags & XTENSA_OP_CHECK_INTERRUPTS) { gen_check_interrupts(dc); } if (op_flags & XTENSA_OP_SYNC_REGISTER_WINDOW) { gen_helper_sync_windowbase(cpu_env); } if (op_flags & XTENSA_OP_EXIT_TB_M1) { slot = -1; } return slot; } struct opcode_arg_copy { uint32_t resource; void *temp; OpcodeArg *arg; }; struct opcode_arg_info { uint32_t resource; int index; }; struct slot_prop { XtensaOpcodeOps *ops; OpcodeArg arg[MAX_OPCODE_ARGS]; struct opcode_arg_info in[MAX_OPCODE_ARGS]; struct opcode_arg_info out[MAX_OPCODE_ARGS]; unsigned n_in; unsigned n_out; uint32_t op_flags; }; enum resource_type { RES_REGFILE, RES_STATE, RES_MAX, }; static uint32_t encode_resource(enum resource_type r, unsigned g, unsigned n) { assert(r < RES_MAX && g < 256 && n < 65536); return (r << 24) | (g << 16) | n; } static enum resource_type get_resource_type(uint32_t resource) { return resource >> 24; } /* * a depends on b if b must be executed before a, * because a's side effects will destroy b's inputs. */ static bool op_depends_on(const struct slot_prop *a, const struct slot_prop *b) { unsigned i = 0; unsigned j = 0; if (a->op_flags & XTENSA_OP_CONTROL_FLOW) { return true; } if ((a->op_flags & XTENSA_OP_LOAD_STORE) < (b->op_flags & XTENSA_OP_LOAD_STORE)) { return true; } while (i < a->n_out && j < b->n_in) { if (a->out[i].resource < b->in[j].resource) { ++i; } else if (a->out[i].resource > b->in[j].resource) { ++j; } else { return true; } } return false; } /* * Try to break a dependency on b, append temporary register copy records * to the end of copy and update n_copy in case of success. * This is not always possible: e.g. control flow must always be the last, * load/store must be first and state dependencies are not supported yet. */ static bool break_dependency(struct slot_prop *a, struct slot_prop *b, struct opcode_arg_copy *copy, unsigned *n_copy) { unsigned i = 0; unsigned j = 0; unsigned n = *n_copy; bool rv = false; if (a->op_flags & XTENSA_OP_CONTROL_FLOW) { return false; } if ((a->op_flags & XTENSA_OP_LOAD_STORE) < (b->op_flags & XTENSA_OP_LOAD_STORE)) { return false; } while (i < a->n_out && j < b->n_in) { if (a->out[i].resource < b->in[j].resource) { ++i; } else if (a->out[i].resource > b->in[j].resource) { ++j; } else { int index = b->in[j].index; if (get_resource_type(a->out[i].resource) != RES_REGFILE || index < 0) { return false; } copy[n].resource = b->in[j].resource; copy[n].arg = b->arg + index; ++n; ++j; rv = true; } } *n_copy = n; return rv; } /* * Calculate evaluation order for slot opcodes. * Build opcode order graph and output its nodes in topological sort order. * An edge a -> b in the graph means that opcode a must be followed by * opcode b. */ static bool tsort(struct slot_prop *slot, struct slot_prop *sorted[], unsigned n, struct opcode_arg_copy *copy, unsigned *n_copy) { struct tsnode { unsigned n_in_edge; unsigned n_out_edge; unsigned out_edge[MAX_INSN_SLOTS]; } node[MAX_INSN_SLOTS]; unsigned in[MAX_INSN_SLOTS]; unsigned i, j; unsigned n_in = 0; unsigned n_out = 0; unsigned n_edge = 0; unsigned in_idx = 0; unsigned node_idx = 0; for (i = 0; i < n; ++i) { node[i].n_in_edge = 0; node[i].n_out_edge = 0; } for (i = 0; i < n; ++i) { unsigned n_out_edge = 0; for (j = 0; j < n; ++j) { if (i != j && op_depends_on(slot + j, slot + i)) { node[i].out_edge[n_out_edge] = j; ++node[j].n_in_edge; ++n_out_edge; ++n_edge; } } node[i].n_out_edge = n_out_edge; } for (i = 0; i < n; ++i) { if (!node[i].n_in_edge) { in[n_in] = i; ++n_in; } } again: for (; in_idx < n_in; ++in_idx) { i = in[in_idx]; sorted[n_out] = slot + i; ++n_out; for (j = 0; j < node[i].n_out_edge; ++j) { --n_edge; if (--node[node[i].out_edge[j]].n_in_edge == 0) { in[n_in] = node[i].out_edge[j]; ++n_in; } } } if (n_edge) { for (; node_idx < n; ++node_idx) { struct tsnode *cnode = node + node_idx; if (cnode->n_in_edge) { for (j = 0; j < cnode->n_out_edge; ++j) { unsigned k = cnode->out_edge[j]; if (break_dependency(slot + k, slot + node_idx, copy, n_copy) && --node[k].n_in_edge == 0) { in[n_in] = k; ++n_in; --n_edge; cnode->out_edge[j] = cnode->out_edge[cnode->n_out_edge - 1]; --cnode->n_out_edge; goto again; } } } } } return n_edge == 0; } static void opcode_add_resource(struct slot_prop *op, uint32_t resource, char direction, int index) { switch (direction) { case 'm': case 'i': assert(op->n_in < ARRAY_SIZE(op->in)); op->in[op->n_in].resource = resource; op->in[op->n_in].index = index; ++op->n_in; /* fall through */ case 'o': if (direction == 'm' || direction == 'o') { assert(op->n_out < ARRAY_SIZE(op->out)); op->out[op->n_out].resource = resource; op->out[op->n_out].index = index; ++op->n_out; } break; default: g_assert_not_reached(); } } static int resource_compare(const void *a, const void *b) { const struct opcode_arg_info *pa = a; const struct opcode_arg_info *pb = b; return pa->resource < pb->resource ? -1 : (pa->resource > pb->resource ? 1 : 0); } static int arg_copy_compare(const void *a, const void *b) { const struct opcode_arg_copy *pa = a; const struct opcode_arg_copy *pb = b; return pa->resource < pb->resource ? -1 : (pa->resource > pb->resource ? 1 : 0); } static void disas_xtensa_insn(CPUXtensaState *env, DisasContext *dc) { xtensa_isa isa = dc->config->isa; unsigned char b[MAX_INSN_LENGTH] = {cpu_ldub_code(env, dc->pc)}; unsigned len = xtensa_op0_insn_len(dc, b[0]); xtensa_format fmt; int slot, slots; unsigned i; uint32_t op_flags = 0; struct slot_prop slot_prop[MAX_INSN_SLOTS]; struct slot_prop *ordered[MAX_INSN_SLOTS]; struct opcode_arg_copy arg_copy[MAX_INSN_SLOTS * MAX_OPCODE_ARGS]; unsigned n_arg_copy = 0; uint32_t debug_cause = 0; uint32_t windowed_register = 0; uint32_t coprocessor = 0; if (len == XTENSA_UNDEFINED) { qemu_log_mask(LOG_GUEST_ERROR, "unknown instruction length (pc = %08x)\n", dc->pc); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); return; } dc->base.pc_next = dc->pc + len; for (i = 1; i < len; ++i) { b[i] = cpu_ldub_code(env, dc->pc + i); } xtensa_insnbuf_from_chars(isa, dc->insnbuf, b, len); fmt = xtensa_format_decode(isa, dc->insnbuf); if (fmt == XTENSA_UNDEFINED) { qemu_log_mask(LOG_GUEST_ERROR, "unrecognized instruction format (pc = %08x)\n", dc->pc); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); return; } slots = xtensa_format_num_slots(isa, fmt); for (slot = 0; slot < slots; ++slot) { xtensa_opcode opc; int opnd, vopnd, opnds; OpcodeArg *arg = slot_prop[slot].arg; XtensaOpcodeOps *ops; xtensa_format_get_slot(isa, fmt, slot, dc->insnbuf, dc->slotbuf); opc = xtensa_opcode_decode(isa, fmt, slot, dc->slotbuf); if (opc == XTENSA_UNDEFINED) { qemu_log_mask(LOG_GUEST_ERROR, "unrecognized opcode in slot %d (pc = %08x)\n", slot, dc->pc); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); return; } opnds = xtensa_opcode_num_operands(isa, opc); for (opnd = vopnd = 0; opnd < opnds; ++opnd) { void **register_file = NULL; if (xtensa_operand_is_register(isa, opc, opnd)) { xtensa_regfile rf = xtensa_operand_regfile(isa, opc, opnd); register_file = dc->config->regfile[rf]; if (rf == dc->config->a_regfile) { uint32_t v; xtensa_operand_get_field(isa, opc, opnd, fmt, slot, dc->slotbuf, &v); xtensa_operand_decode(isa, opc, opnd, &v); windowed_register |= 1u << v; } } if (xtensa_operand_is_visible(isa, opc, opnd)) { uint32_t v; xtensa_operand_get_field(isa, opc, opnd, fmt, slot, dc->slotbuf, &v); xtensa_operand_decode(isa, opc, opnd, &v); arg[vopnd].raw_imm = v; if (xtensa_operand_is_PCrelative(isa, opc, opnd)) { xtensa_operand_undo_reloc(isa, opc, opnd, &v, dc->pc); } arg[vopnd].imm = v; if (register_file) { arg[vopnd].in = register_file[v]; arg[vopnd].out = register_file[v]; } ++vopnd; } } ops = dc->config->opcode_ops[opc]; slot_prop[slot].ops = ops; if (ops) { op_flags |= ops->op_flags; } else { qemu_log_mask(LOG_UNIMP, "unimplemented opcode '%s' in slot %d (pc = %08x)\n", xtensa_opcode_name(isa, opc), slot, dc->pc); op_flags |= XTENSA_OP_ILL; } if ((op_flags & XTENSA_OP_ILL) || (ops && ops->test_ill && ops->test_ill(dc, arg, ops->par))) { gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); return; } if (ops->op_flags & XTENSA_OP_DEBUG_BREAK) { debug_cause |= ops->par[0]; } if (ops->test_overflow) { windowed_register |= ops->test_overflow(dc, arg, ops->par); } coprocessor |= ops->coprocessor; if (slots > 1) { slot_prop[slot].n_in = 0; slot_prop[slot].n_out = 0; slot_prop[slot].op_flags = ops->op_flags & XTENSA_OP_LOAD_STORE; opnds = xtensa_opcode_num_operands(isa, opc); for (opnd = vopnd = 0; opnd < opnds; ++opnd) { bool visible = xtensa_operand_is_visible(isa, opc, opnd); if (xtensa_operand_is_register(isa, opc, opnd)) { xtensa_regfile rf = xtensa_operand_regfile(isa, opc, opnd); uint32_t v = 0; xtensa_operand_get_field(isa, opc, opnd, fmt, slot, dc->slotbuf, &v); xtensa_operand_decode(isa, opc, opnd, &v); opcode_add_resource(slot_prop + slot, encode_resource(RES_REGFILE, rf, v), xtensa_operand_inout(isa, opc, opnd), visible ? vopnd : -1); } if (visible) { ++vopnd; } } opnds = xtensa_opcode_num_stateOperands(isa, opc); for (opnd = 0; opnd < opnds; ++opnd) { xtensa_state state = xtensa_stateOperand_state(isa, opc, opnd); opcode_add_resource(slot_prop + slot, encode_resource(RES_STATE, 0, state), xtensa_stateOperand_inout(isa, opc, opnd), -1); } if (xtensa_opcode_is_branch(isa, opc) || xtensa_opcode_is_jump(isa, opc) || xtensa_opcode_is_loop(isa, opc) || xtensa_opcode_is_call(isa, opc)) { slot_prop[slot].op_flags |= XTENSA_OP_CONTROL_FLOW; } qsort(slot_prop[slot].in, slot_prop[slot].n_in, sizeof(slot_prop[slot].in[0]), resource_compare); qsort(slot_prop[slot].out, slot_prop[slot].n_out, sizeof(slot_prop[slot].out[0]), resource_compare); } } if (slots > 1) { if (!tsort(slot_prop, ordered, slots, arg_copy, &n_arg_copy)) { qemu_log_mask(LOG_UNIMP, "Circular resource dependencies (pc = %08x)\n", dc->pc); gen_exception_cause(dc, ILLEGAL_INSTRUCTION_CAUSE); return; } } else { ordered[0] = slot_prop + 0; } if ((op_flags & XTENSA_OP_PRIVILEGED) && !gen_check_privilege(dc)) { return; } if (op_flags & XTENSA_OP_SYSCALL) { gen_exception_cause(dc, SYSCALL_CAUSE); return; } if ((op_flags & XTENSA_OP_DEBUG_BREAK) && dc->debug) { gen_debug_exception(dc, debug_cause); return; } if (windowed_register && !gen_window_check(dc, windowed_register)) { return; } if (op_flags & XTENSA_OP_UNDERFLOW) { TCGv_i32 tmp = tcg_const_i32(dc->pc); gen_helper_test_underflow_retw(cpu_env, tmp); tcg_temp_free(tmp); } if (op_flags & XTENSA_OP_ALLOCA) { TCGv_i32 tmp = tcg_const_i32(dc->pc); gen_helper_movsp(cpu_env, tmp); tcg_temp_free(tmp); } if (coprocessor && !gen_check_cpenable(dc, coprocessor)) { return; } if (n_arg_copy) { uint32_t resource; void *temp; unsigned j; qsort(arg_copy, n_arg_copy, sizeof(*arg_copy), arg_copy_compare); for (i = j = 0; i < n_arg_copy; ++i) { if (i == 0 || arg_copy[i].resource != resource) { resource = arg_copy[i].resource; temp = tcg_temp_local_new(); tcg_gen_mov_i32(temp, arg_copy[i].arg->in); arg_copy[i].temp = temp; if (i != j) { arg_copy[j] = arg_copy[i]; } ++j; } arg_copy[i].arg->in = temp; } n_arg_copy = j; } if (op_flags & XTENSA_OP_DIVIDE_BY_ZERO) { for (slot = 0; slot < slots; ++slot) { if (slot_prop[slot].ops->op_flags & XTENSA_OP_DIVIDE_BY_ZERO) { gen_zero_check(dc, slot_prop[slot].arg); } } } dc->op_flags = op_flags; for (slot = 0; slot < slots; ++slot) { struct slot_prop *pslot = ordered[slot]; XtensaOpcodeOps *ops = pslot->ops; ops->translate(dc, pslot->arg, ops->par); } for (i = 0; i < n_arg_copy; ++i) { tcg_temp_free(arg_copy[i].temp); } if (dc->base.is_jmp == DISAS_NEXT) { gen_postprocess(dc, 0); dc->op_flags = 0; if (op_flags & XTENSA_OP_EXIT_TB_M1) { /* Change in mmu index, memory mapping or tb->flags; exit tb */ gen_jumpi_check_loop_end(dc, -1); } else if (op_flags & XTENSA_OP_EXIT_TB_0) { gen_jumpi_check_loop_end(dc, 0); } else { gen_check_loop_end(dc, 0); } } dc->pc = dc->base.pc_next; } static inline unsigned xtensa_insn_len(CPUXtensaState *env, DisasContext *dc) { uint8_t b0 = cpu_ldub_code(env, dc->pc); return xtensa_op0_insn_len(dc, b0); } static void gen_ibreak_check(CPUXtensaState *env, DisasContext *dc) { unsigned i; for (i = 0; i < dc->config->nibreak; ++i) { if ((env->sregs[IBREAKENABLE] & (1 << i)) && env->sregs[IBREAKA + i] == dc->pc) { gen_debug_exception(dc, DEBUGCAUSE_IB); break; } } } static void xtensa_tr_init_disas_context(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); CPUXtensaState *env = cpu->env_ptr; uint32_t tb_flags = dc->base.tb->flags; dc->config = env->config; dc->pc = dc->base.pc_first; dc->ring = tb_flags & XTENSA_TBFLAG_RING_MASK; dc->cring = (tb_flags & XTENSA_TBFLAG_EXCM) ? 0 : dc->ring; dc->lbeg_off = (dc->base.tb->cs_base & XTENSA_CSBASE_LBEG_OFF_MASK) >> XTENSA_CSBASE_LBEG_OFF_SHIFT; dc->lend = (dc->base.tb->cs_base & XTENSA_CSBASE_LEND_MASK) + (dc->base.pc_first & TARGET_PAGE_MASK); dc->debug = tb_flags & XTENSA_TBFLAG_DEBUG; dc->icount = tb_flags & XTENSA_TBFLAG_ICOUNT; dc->cpenable = (tb_flags & XTENSA_TBFLAG_CPENABLE_MASK) >> XTENSA_TBFLAG_CPENABLE_SHIFT; dc->window = ((tb_flags & XTENSA_TBFLAG_WINDOW_MASK) >> XTENSA_TBFLAG_WINDOW_SHIFT); dc->cwoe = tb_flags & XTENSA_TBFLAG_CWOE; dc->callinc = ((tb_flags & XTENSA_TBFLAG_CALLINC_MASK) >> XTENSA_TBFLAG_CALLINC_SHIFT); if (dc->config->isa) { dc->insnbuf = xtensa_insnbuf_alloc(dc->config->isa); dc->slotbuf = xtensa_insnbuf_alloc(dc->config->isa); } init_sar_tracker(dc); } static void xtensa_tr_tb_start(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); if (dc->icount) { dc->next_icount = tcg_temp_local_new_i32(); } } static void xtensa_tr_insn_start(DisasContextBase *dcbase, CPUState *cpu) { tcg_gen_insn_start(dcbase->pc_next); } static bool xtensa_tr_breakpoint_check(DisasContextBase *dcbase, CPUState *cpu, const CPUBreakpoint *bp) { DisasContext *dc = container_of(dcbase, DisasContext, base); tcg_gen_movi_i32(cpu_pc, dc->base.pc_next); gen_exception(dc, EXCP_DEBUG); dc->base.is_jmp = DISAS_NORETURN; /* The address covered by the breakpoint must be included in [tb->pc, tb->pc + tb->size) in order to for it to be properly cleared -- thus we increment the PC here so that the logic setting tb->size below does the right thing. */ dc->base.pc_next += 2; return true; } static void xtensa_tr_translate_insn(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); CPUXtensaState *env = cpu->env_ptr; target_ulong page_start; /* These two conditions only apply to the first insn in the TB, but this is the first TranslateOps hook that allows exiting. */ if ((tb_cflags(dc->base.tb) & CF_USE_ICOUNT) && (dc->base.tb->flags & XTENSA_TBFLAG_YIELD)) { gen_exception(dc, EXCP_YIELD); dc->base.is_jmp = DISAS_NORETURN; return; } if (dc->base.tb->flags & XTENSA_TBFLAG_EXCEPTION) { gen_exception(dc, EXCP_DEBUG); dc->base.is_jmp = DISAS_NORETURN; return; } if (dc->icount) { TCGLabel *label = gen_new_label(); tcg_gen_addi_i32(dc->next_icount, cpu_SR[ICOUNT], 1); tcg_gen_brcondi_i32(TCG_COND_NE, dc->next_icount, 0, label); tcg_gen_mov_i32(dc->next_icount, cpu_SR[ICOUNT]); if (dc->debug) { gen_debug_exception(dc, DEBUGCAUSE_IC); } gen_set_label(label); } if (dc->debug) { gen_ibreak_check(env, dc); } disas_xtensa_insn(env, dc); if (dc->icount) { tcg_gen_mov_i32(cpu_SR[ICOUNT], dc->next_icount); } /* End the TB if the next insn will cross into the next page. */ page_start = dc->base.pc_first & TARGET_PAGE_MASK; if (dc->base.is_jmp == DISAS_NEXT && (dc->pc - page_start >= TARGET_PAGE_SIZE || dc->pc - page_start + xtensa_insn_len(env, dc) > TARGET_PAGE_SIZE)) { dc->base.is_jmp = DISAS_TOO_MANY; } } static void xtensa_tr_tb_stop(DisasContextBase *dcbase, CPUState *cpu) { DisasContext *dc = container_of(dcbase, DisasContext, base); reset_sar_tracker(dc); if (dc->config->isa) { xtensa_insnbuf_free(dc->config->isa, dc->insnbuf); xtensa_insnbuf_free(dc->config->isa, dc->slotbuf); } if (dc->icount) { tcg_temp_free(dc->next_icount); } switch (dc->base.is_jmp) { case DISAS_NORETURN: break; case DISAS_TOO_MANY: if (dc->base.singlestep_enabled) { tcg_gen_movi_i32(cpu_pc, dc->pc); gen_exception(dc, EXCP_DEBUG); } else { gen_jumpi(dc, dc->pc, 0); } break; default: g_assert_not_reached(); } } static void xtensa_tr_disas_log(const DisasContextBase *dcbase, CPUState *cpu) { qemu_log("IN: %s\n", lookup_symbol(dcbase->pc_first)); log_target_disas(cpu, dcbase->pc_first, dcbase->tb->size); } static const TranslatorOps xtensa_translator_ops = { .init_disas_context = xtensa_tr_init_disas_context, .tb_start = xtensa_tr_tb_start, .insn_start = xtensa_tr_insn_start, .breakpoint_check = xtensa_tr_breakpoint_check, .translate_insn = xtensa_tr_translate_insn, .tb_stop = xtensa_tr_tb_stop, .disas_log = xtensa_tr_disas_log, }; void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb) { DisasContext dc = {}; translator_loop(&xtensa_translator_ops, &dc.base, cpu, tb); } void xtensa_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf, int flags) { XtensaCPU *cpu = XTENSA_CPU(cs); CPUXtensaState *env = &cpu->env; int i, j; cpu_fprintf(f, "PC=%08x\n\n", env->pc); for (i = j = 0; i < 256; ++i) { if (xtensa_option_bits_enabled(env->config, sregnames[i].opt_bits)) { cpu_fprintf(f, "%12s=%08x%c", sregnames[i].name, env->sregs[i], (j++ % 4) == 3 ? '\n' : ' '); } } cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n"); for (i = j = 0; i < 256; ++i) { if (xtensa_option_bits_enabled(env->config, uregnames[i].opt_bits)) { cpu_fprintf(f, "%s=%08x%c", uregnames[i].name, env->uregs[i], (j++ % 4) == 3 ? '\n' : ' '); } } cpu_fprintf(f, (j % 4) == 0 ? "\n" : "\n\n"); for (i = 0; i < 16; ++i) { cpu_fprintf(f, " A%02d=%08x%c", i, env->regs[i], (i % 4) == 3 ? '\n' : ' '); } xtensa_sync_phys_from_window(env); cpu_fprintf(f, "\n"); for (i = 0; i < env->config->nareg; ++i) { cpu_fprintf(f, "AR%02d=%08x ", i, env->phys_regs[i]); if (i % 4 == 3) { bool ws = (env->sregs[WINDOW_START] & (1 << (i / 4))) != 0; bool cw = env->sregs[WINDOW_BASE] == i / 4; cpu_fprintf(f, "%c%c\n", ws ? '<' : ' ', cw ? '=' : ' '); } } if ((flags & CPU_DUMP_FPU) && xtensa_option_enabled(env->config, XTENSA_OPTION_FP_COPROCESSOR)) { cpu_fprintf(f, "\n"); for (i = 0; i < 16; ++i) { cpu_fprintf(f, "F%02d=%08x (%+10.8e)%c", i, float32_val(env->fregs[i].f32[FP_F32_LOW]), *(float *)(env->fregs[i].f32 + FP_F32_LOW), (i % 2) == 1 ? '\n' : ' '); } } } void restore_state_to_opc(CPUXtensaState *env, TranslationBlock *tb, target_ulong *data) { env->pc = data[0]; } static void translate_abs(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 zero = tcg_const_i32(0); TCGv_i32 neg = tcg_temp_new_i32(); tcg_gen_neg_i32(neg, arg[1].in); tcg_gen_movcond_i32(TCG_COND_GE, arg[0].out, arg[1].in, zero, arg[1].in, neg); tcg_temp_free(neg); tcg_temp_free(zero); } static void translate_add(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_add_i32(arg[0].out, arg[1].in, arg[2].in); } static void translate_addi(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_addi_i32(arg[0].out, arg[1].in, arg[2].imm); } static void translate_addx(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, arg[1].in, par[0]); tcg_gen_add_i32(arg[0].out, tmp, arg[2].in); tcg_temp_free(tmp); } static void translate_all(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { uint32_t shift = par[1]; TCGv_i32 mask = tcg_const_i32(((1 << shift) - 1) << arg[1].imm); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, arg[1].in, mask); if (par[0]) { tcg_gen_addi_i32(tmp, tmp, 1 << arg[1].imm); } else { tcg_gen_add_i32(tmp, tmp, mask); } tcg_gen_shri_i32(tmp, tmp, arg[1].imm + shift); tcg_gen_deposit_i32(arg[0].out, arg[0].out, tmp, arg[0].imm, 1); tcg_temp_free(mask); tcg_temp_free(tmp); } static void translate_and(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_and_i32(arg[0].out, arg[1].in, arg[2].in); } static void translate_ball(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, arg[0].in, arg[1].in); gen_brcond(dc, par[0], tmp, arg[1].in, arg[2].imm); tcg_temp_free(tmp); } static void translate_bany(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_and_i32(tmp, arg[0].in, arg[1].in); gen_brcondi(dc, par[0], tmp, 0, arg[2].imm); tcg_temp_free(tmp); } static void translate_b(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_brcond(dc, par[0], arg[0].in, arg[1].in, arg[2].imm); } static void translate_bb(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { #ifdef TARGET_WORDS_BIGENDIAN TCGv_i32 bit = tcg_const_i32(0x80000000u); #else TCGv_i32 bit = tcg_const_i32(0x00000001u); #endif TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, arg[1].in, 0x1f); #ifdef TARGET_WORDS_BIGENDIAN tcg_gen_shr_i32(bit, bit, tmp); #else tcg_gen_shl_i32(bit, bit, tmp); #endif tcg_gen_and_i32(tmp, arg[0].in, bit); gen_brcondi(dc, par[0], tmp, 0, arg[2].imm); tcg_temp_free(tmp); tcg_temp_free(bit); } static void translate_bbi(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_temp_new_i32(); #ifdef TARGET_WORDS_BIGENDIAN tcg_gen_andi_i32(tmp, arg[0].in, 0x80000000u >> arg[1].imm); #else tcg_gen_andi_i32(tmp, arg[0].in, 0x00000001u << arg[1].imm); #endif gen_brcondi(dc, par[0], tmp, 0, arg[2].imm); tcg_temp_free(tmp); } static void translate_bi(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_brcondi(dc, par[0], arg[0].in, arg[1].imm, arg[2].imm); } static void translate_bz(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_brcondi(dc, par[0], arg[0].in, 0, arg[1].imm); } enum { BOOLEAN_AND, BOOLEAN_ANDC, BOOLEAN_OR, BOOLEAN_ORC, BOOLEAN_XOR, }; static void translate_boolean(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { static void (* const op[])(TCGv_i32, TCGv_i32, TCGv_i32) = { [BOOLEAN_AND] = tcg_gen_and_i32, [BOOLEAN_ANDC] = tcg_gen_andc_i32, [BOOLEAN_OR] = tcg_gen_or_i32, [BOOLEAN_ORC] = tcg_gen_orc_i32, [BOOLEAN_XOR] = tcg_gen_xor_i32, }; TCGv_i32 tmp1 = tcg_temp_new_i32(); TCGv_i32 tmp2 = tcg_temp_new_i32(); tcg_gen_shri_i32(tmp1, arg[1].in, arg[1].imm); tcg_gen_shri_i32(tmp2, arg[2].in, arg[2].imm); op[par[0]](tmp1, tmp1, tmp2); tcg_gen_deposit_i32(arg[0].out, arg[0].out, tmp1, arg[0].imm, 1); tcg_temp_free(tmp1); tcg_temp_free(tmp2); } static void translate_bp(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, arg[0].in, 1 << arg[0].imm); gen_brcondi(dc, par[0], tmp, 0, arg[1].imm); tcg_temp_free(tmp); } static void translate_call0(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_movi_i32(cpu_R[0], dc->base.pc_next); gen_jumpi(dc, arg[0].imm, 0); } static void translate_callw(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_const_i32(arg[0].imm); gen_callw_slot(dc, par[0], tmp, adjust_jump_slot(dc, arg[0].imm, 0)); tcg_temp_free(tmp); } static void translate_callx0(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_mov_i32(tmp, arg[0].in); tcg_gen_movi_i32(cpu_R[0], dc->base.pc_next); gen_jump(dc, tmp); tcg_temp_free(tmp); } static void translate_callxw(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_mov_i32(tmp, arg[0].in); gen_callw_slot(dc, par[0], tmp, -1); tcg_temp_free(tmp); } static void translate_clamps(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp1 = tcg_const_i32(-1u << arg[2].imm); TCGv_i32 tmp2 = tcg_const_i32((1 << arg[2].imm) - 1); tcg_gen_smax_i32(tmp1, tmp1, arg[1].in); tcg_gen_smin_i32(arg[0].out, tmp1, tmp2); tcg_temp_free(tmp1); tcg_temp_free(tmp2); } static void translate_clrb_expstate(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { /* TODO: GPIO32 may be a part of coprocessor */ tcg_gen_andi_i32(cpu_UR[EXPSTATE], cpu_UR[EXPSTATE], ~(1u << arg[0].imm)); } static void translate_const16(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 c = tcg_const_i32(arg[1].imm); tcg_gen_deposit_i32(arg[0].out, c, arg[0].in, 16, 16); tcg_temp_free(c); } static void translate_dcache(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 addr = tcg_temp_new_i32(); TCGv_i32 res = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm); tcg_gen_qemu_ld8u(res, addr, dc->cring); tcg_temp_free(addr); tcg_temp_free(res); } static void translate_depbits(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_deposit_i32(arg[1].out, arg[1].in, arg[0].in, arg[2].imm, arg[3].imm); } static bool test_ill_entry(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { if (arg[0].imm > 3 || !dc->cwoe) { qemu_log_mask(LOG_GUEST_ERROR, "Illegal entry instruction(pc = %08x)\n", dc->pc); return true; } else { return false; } } static uint32_t test_overflow_entry(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { return 1 << (dc->callinc * 4); } static void translate_entry(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 pc = tcg_const_i32(dc->pc); TCGv_i32 s = tcg_const_i32(arg[0].imm); TCGv_i32 imm = tcg_const_i32(arg[1].imm); gen_helper_entry(cpu_env, pc, s, imm); tcg_temp_free(imm); tcg_temp_free(s); tcg_temp_free(pc); } static void translate_extui(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { int maskimm = (1 << arg[3].imm) - 1; TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shri_i32(tmp, arg[1].in, arg[2].imm); tcg_gen_andi_i32(arg[0].out, tmp, maskimm); tcg_temp_free(tmp); } static void translate_icache(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { #ifndef CONFIG_USER_ONLY TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_movi_i32(cpu_pc, dc->pc); tcg_gen_addi_i32(addr, arg[0].in, arg[1].imm); gen_helper_itlb_hit_test(cpu_env, addr); tcg_temp_free(addr); #endif } static void translate_itlb(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { #ifndef CONFIG_USER_ONLY TCGv_i32 dtlb = tcg_const_i32(par[0]); gen_helper_itlb(cpu_env, arg[0].in, dtlb); tcg_temp_free(dtlb); #endif } static void translate_j(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_jumpi(dc, arg[0].imm, 0); } static void translate_jx(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_jump(dc, arg[0].in); } static void translate_l32e(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); gen_load_store_alignment(dc, 2, addr, false); tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->ring, MO_TEUL); tcg_temp_free(addr); } static void translate_ldst(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); if (par[0] & MO_SIZE) { gen_load_store_alignment(dc, par[0] & MO_SIZE, addr, par[1]); } if (par[2]) { if (par[1]) { tcg_gen_mb(TCG_BAR_STRL | TCG_MO_ALL); } tcg_gen_qemu_st_tl(arg[0].in, addr, dc->cring, par[0]); } else { tcg_gen_qemu_ld_tl(arg[0].out, addr, dc->cring, par[0]); if (par[1]) { tcg_gen_mb(TCG_BAR_LDAQ | TCG_MO_ALL); } } tcg_temp_free(addr); } static void translate_l32r(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp; if (dc->base.tb->flags & XTENSA_TBFLAG_LITBASE) { tmp = tcg_const_i32(arg[1].raw_imm - 1); tcg_gen_add_i32(tmp, cpu_SR[LITBASE], tmp); } else { tmp = tcg_const_i32(arg[1].imm); } tcg_gen_qemu_ld32u(arg[0].out, tmp, dc->cring); tcg_temp_free(tmp); } static void translate_loop(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { uint32_t lend = arg[1].imm; tcg_gen_subi_i32(cpu_SR[LCOUNT], arg[0].in, 1); tcg_gen_movi_i32(cpu_SR[LBEG], dc->base.pc_next); tcg_gen_movi_i32(cpu_SR[LEND], lend); if (par[0] != TCG_COND_NEVER) { TCGLabel *label = gen_new_label(); tcg_gen_brcondi_i32(par[0], arg[0].in, 0, label); gen_jumpi(dc, lend, 1); gen_set_label(label); } gen_jumpi(dc, dc->base.pc_next, 0); } enum { MAC16_UMUL, MAC16_MUL, MAC16_MULA, MAC16_MULS, MAC16_NONE, }; enum { MAC16_LL, MAC16_HL, MAC16_LH, MAC16_HH, MAC16_HX = 0x1, MAC16_XH = 0x2, }; static void translate_mac16(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { int op = par[0]; unsigned half = par[1]; uint32_t ld_offset = par[2]; unsigned off = ld_offset ? 2 : 0; TCGv_i32 vaddr = tcg_temp_new_i32(); TCGv_i32 mem32 = tcg_temp_new_i32(); if (ld_offset) { tcg_gen_addi_i32(vaddr, arg[1].in, ld_offset); gen_load_store_alignment(dc, 2, vaddr, false); tcg_gen_qemu_ld32u(mem32, vaddr, dc->cring); } if (op != MAC16_NONE) { TCGv_i32 m1 = gen_mac16_m(arg[off].in, half & MAC16_HX, op == MAC16_UMUL); TCGv_i32 m2 = gen_mac16_m(arg[off + 1].in, half & MAC16_XH, op == MAC16_UMUL); if (op == MAC16_MUL || op == MAC16_UMUL) { tcg_gen_mul_i32(cpu_SR[ACCLO], m1, m2); if (op == MAC16_UMUL) { tcg_gen_movi_i32(cpu_SR[ACCHI], 0); } else { tcg_gen_sari_i32(cpu_SR[ACCHI], cpu_SR[ACCLO], 31); } } else { TCGv_i32 lo = tcg_temp_new_i32(); TCGv_i32 hi = tcg_temp_new_i32(); tcg_gen_mul_i32(lo, m1, m2); tcg_gen_sari_i32(hi, lo, 31); if (op == MAC16_MULA) { tcg_gen_add2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI], cpu_SR[ACCLO], cpu_SR[ACCHI], lo, hi); } else { tcg_gen_sub2_i32(cpu_SR[ACCLO], cpu_SR[ACCHI], cpu_SR[ACCLO], cpu_SR[ACCHI], lo, hi); } tcg_gen_ext8s_i32(cpu_SR[ACCHI], cpu_SR[ACCHI]); tcg_temp_free_i32(lo); tcg_temp_free_i32(hi); } tcg_temp_free(m1); tcg_temp_free(m2); } if (ld_offset) { tcg_gen_mov_i32(arg[1].out, vaddr); tcg_gen_mov_i32(cpu_SR[MR + arg[0].imm], mem32); } tcg_temp_free(vaddr); tcg_temp_free(mem32); } static void translate_memw(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_mb(TCG_BAR_SC | TCG_MO_ALL); } static void translate_smin(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_smin_i32(arg[0].out, arg[1].in, arg[2].in); } static void translate_umin(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_umin_i32(arg[0].out, arg[1].in, arg[2].in); } static void translate_smax(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_smax_i32(arg[0].out, arg[1].in, arg[2].in); } static void translate_umax(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_umax_i32(arg[0].out, arg[1].in, arg[2].in); } static void translate_mov(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_mov_i32(arg[0].out, arg[1].in); } static void translate_movcond(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 zero = tcg_const_i32(0); tcg_gen_movcond_i32(par[0], arg[0].out, arg[2].in, zero, arg[1].in, arg[0].in); tcg_temp_free(zero); } static void translate_movi(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_movi_i32(arg[0].out, arg[1].imm); } static void translate_movp(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 zero = tcg_const_i32(0); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, arg[2].in, 1 << arg[2].imm); tcg_gen_movcond_i32(par[0], arg[0].out, tmp, zero, arg[1].in, arg[0].in); tcg_temp_free(tmp); tcg_temp_free(zero); } static void translate_movsp(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_mov_i32(arg[0].out, arg[1].in); } static void translate_mul16(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 v1 = tcg_temp_new_i32(); TCGv_i32 v2 = tcg_temp_new_i32(); if (par[0]) { tcg_gen_ext16s_i32(v1, arg[1].in); tcg_gen_ext16s_i32(v2, arg[2].in); } else { tcg_gen_ext16u_i32(v1, arg[1].in); tcg_gen_ext16u_i32(v2, arg[2].in); } tcg_gen_mul_i32(arg[0].out, v1, v2); tcg_temp_free(v2); tcg_temp_free(v1); } static void translate_mull(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_mul_i32(arg[0].out, arg[1].in, arg[2].in); } static void translate_mulh(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 lo = tcg_temp_new(); if (par[0]) { tcg_gen_muls2_i32(lo, arg[0].out, arg[1].in, arg[2].in); } else { tcg_gen_mulu2_i32(lo, arg[0].out, arg[1].in, arg[2].in); } tcg_temp_free(lo); } static void translate_neg(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_neg_i32(arg[0].out, arg[1].in); } static void translate_nop(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { } static void translate_nsa(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_clrsb_i32(arg[0].out, arg[1].in); } static void translate_nsau(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_clzi_i32(arg[0].out, arg[1].in, 32); } static void translate_or(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_or_i32(arg[0].out, arg[1].in, arg[2].in); } static void translate_ptlb(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { #ifndef CONFIG_USER_ONLY TCGv_i32 dtlb = tcg_const_i32(par[0]); tcg_gen_movi_i32(cpu_pc, dc->pc); gen_helper_ptlb(arg[0].out, cpu_env, arg[1].in, dtlb); tcg_temp_free(dtlb); #endif } static void translate_quos(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGLabel *label1 = gen_new_label(); TCGLabel *label2 = gen_new_label(); tcg_gen_brcondi_i32(TCG_COND_NE, arg[1].in, 0x80000000, label1); tcg_gen_brcondi_i32(TCG_COND_NE, arg[2].in, 0xffffffff, label1); tcg_gen_movi_i32(arg[0].out, par[0] ? 0x80000000 : 0); tcg_gen_br(label2); gen_set_label(label1); if (par[0]) { tcg_gen_div_i32(arg[0].out, arg[1].in, arg[2].in); } else { tcg_gen_rem_i32(arg[0].out, arg[1].in, arg[2].in); } gen_set_label(label2); } static void translate_quou(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_divu_i32(arg[0].out, arg[1].in, arg[2].in); } static void translate_read_impwire(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { /* TODO: GPIO32 may be a part of coprocessor */ tcg_gen_movi_i32(arg[0].out, 0); } static void translate_remu(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_remu_i32(arg[0].out, arg[1].in, arg[2].in); } static void translate_rer(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_helper_rer(arg[0].out, cpu_env, arg[1].in); } static void translate_ret(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_jump(dc, cpu_R[0]); } static bool test_ill_retw(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { if (!dc->cwoe) { qemu_log_mask(LOG_GUEST_ERROR, "Illegal retw instruction(pc = %08x)\n", dc->pc); return true; } else { TCGv_i32 tmp = tcg_const_i32(dc->pc); gen_helper_test_ill_retw(cpu_env, tmp); tcg_temp_free(tmp); return false; } } static void translate_retw(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_const_i32(1); tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]); tcg_gen_andc_i32(cpu_SR[WINDOW_START], cpu_SR[WINDOW_START], tmp); tcg_gen_movi_i32(tmp, dc->pc); tcg_gen_deposit_i32(tmp, tmp, cpu_R[0], 0, 30); gen_helper_retw(cpu_env, cpu_R[0]); gen_jump(dc, tmp); tcg_temp_free(tmp); } static void translate_rfde(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_jump(dc, cpu_SR[dc->config->ndepc ? DEPC : EPC1]); } static void translate_rfe(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); gen_jump(dc, cpu_SR[EPC1]); } static void translate_rfi(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_mov_i32(cpu_SR[PS], cpu_SR[EPS2 + arg[0].imm - 2]); gen_jump(dc, cpu_SR[EPC1 + arg[0].imm - 1]); } static void translate_rfw(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_const_i32(1); tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_EXCM); tcg_gen_shl_i32(tmp, tmp, cpu_SR[WINDOW_BASE]); if (par[0]) { tcg_gen_andc_i32(cpu_SR[WINDOW_START], cpu_SR[WINDOW_START], tmp); } else { tcg_gen_or_i32(cpu_SR[WINDOW_START], cpu_SR[WINDOW_START], tmp); } tcg_temp_free(tmp); gen_helper_restore_owb(cpu_env); gen_jump(dc, cpu_SR[EPC1]); } static void translate_rotw(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_addi_i32(cpu_windowbase_next, cpu_SR[WINDOW_BASE], arg[0].imm); } static void translate_rsil(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_mov_i32(arg[0].out, cpu_SR[PS]); tcg_gen_andi_i32(cpu_SR[PS], cpu_SR[PS], ~PS_INTLEVEL); tcg_gen_ori_i32(cpu_SR[PS], cpu_SR[PS], arg[1].imm); } static bool test_ill_rsr(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { return !check_sr(dc, par[0], SR_R); } static void translate_rsr(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_rsr(dc, arg[0].out, par[0]); } static void translate_rtlb(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { #ifndef CONFIG_USER_ONLY static void (* const helper[])(TCGv_i32 r, TCGv_env env, TCGv_i32 a1, TCGv_i32 a2) = { gen_helper_rtlb0, gen_helper_rtlb1, }; TCGv_i32 dtlb = tcg_const_i32(par[0]); helper[par[1]](arg[0].out, cpu_env, arg[1].in, dtlb); tcg_temp_free(dtlb); #endif } static void translate_rur(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { if (uregnames[par[0]].name) { tcg_gen_mov_i32(arg[0].out, cpu_UR[par[0]]); } else { qemu_log_mask(LOG_UNIMP, "RUR %d not implemented\n", par[0]); } } static void translate_setb_expstate(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { /* TODO: GPIO32 may be a part of coprocessor */ tcg_gen_ori_i32(cpu_UR[EXPSTATE], cpu_UR[EXPSTATE], 1u << arg[0].imm); } #ifdef CONFIG_USER_ONLY static void gen_check_atomctl(DisasContext *dc, TCGv_i32 addr) { } #else static void gen_check_atomctl(DisasContext *dc, TCGv_i32 addr) { TCGv_i32 tpc = tcg_const_i32(dc->pc); gen_helper_check_atomctl(cpu_env, tpc, addr); tcg_temp_free(tpc); } #endif static void translate_s32c1i(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_temp_local_new_i32(); TCGv_i32 addr = tcg_temp_local_new_i32(); tcg_gen_mov_i32(tmp, arg[0].in); tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); gen_load_store_alignment(dc, 2, addr, true); gen_check_atomctl(dc, addr); tcg_gen_atomic_cmpxchg_i32(arg[0].out, addr, cpu_SR[SCOMPARE1], tmp, dc->cring, MO_TEUL); tcg_temp_free(addr); tcg_temp_free(tmp); } static void translate_s32e(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); gen_load_store_alignment(dc, 2, addr, false); tcg_gen_qemu_st_tl(arg[0].in, addr, dc->ring, MO_TEUL); tcg_temp_free(addr); } static void translate_salt(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_setcond_i32(par[0], arg[0].out, arg[1].in, arg[2].in); } static void translate_sext(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { int shift = 31 - arg[2].imm; if (shift == 24) { tcg_gen_ext8s_i32(arg[0].out, arg[1].in); } else if (shift == 16) { tcg_gen_ext16s_i32(arg[0].out, arg[1].in); } else { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, arg[1].in, shift); tcg_gen_sari_i32(arg[0].out, tmp, shift); tcg_temp_free(tmp); } } static bool test_ill_simcall(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { #ifdef CONFIG_USER_ONLY bool ill = true; #else bool ill = !semihosting_enabled(); #endif if (ill) { qemu_log_mask(LOG_GUEST_ERROR, "SIMCALL but semihosting is disabled\n"); } return ill; } static void translate_simcall(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { #ifndef CONFIG_USER_ONLY gen_helper_simcall(cpu_env); #endif } /* * Note: 64 bit ops are used here solely because SAR values * have range 0..63 */ #define gen_shift_reg(cmd, reg) do { \ TCGv_i64 tmp = tcg_temp_new_i64(); \ tcg_gen_extu_i32_i64(tmp, reg); \ tcg_gen_##cmd##_i64(v, v, tmp); \ tcg_gen_extrl_i64_i32(arg[0].out, v); \ tcg_temp_free_i64(v); \ tcg_temp_free_i64(tmp); \ } while (0) #define gen_shift(cmd) gen_shift_reg(cmd, cpu_SR[SAR]) static void translate_sll(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { if (dc->sar_m32_5bit) { tcg_gen_shl_i32(arg[0].out, arg[1].in, dc->sar_m32); } else { TCGv_i64 v = tcg_temp_new_i64(); TCGv_i32 s = tcg_const_i32(32); tcg_gen_sub_i32(s, s, cpu_SR[SAR]); tcg_gen_andi_i32(s, s, 0x3f); tcg_gen_extu_i32_i64(v, arg[1].in); gen_shift_reg(shl, s); tcg_temp_free(s); } } static void translate_slli(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { if (arg[2].imm == 32) { qemu_log_mask(LOG_GUEST_ERROR, "slli a%d, a%d, 32 is undefined\n", arg[0].imm, arg[1].imm); } tcg_gen_shli_i32(arg[0].out, arg[1].in, arg[2].imm & 0x1f); } static void translate_sra(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { if (dc->sar_m32_5bit) { tcg_gen_sar_i32(arg[0].out, arg[1].in, cpu_SR[SAR]); } else { TCGv_i64 v = tcg_temp_new_i64(); tcg_gen_ext_i32_i64(v, arg[1].in); gen_shift(sar); } } static void translate_srai(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_sari_i32(arg[0].out, arg[1].in, arg[2].imm); } static void translate_src(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i64 v = tcg_temp_new_i64(); tcg_gen_concat_i32_i64(v, arg[2].in, arg[1].in); gen_shift(shr); } static void translate_srl(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { if (dc->sar_m32_5bit) { tcg_gen_shr_i32(arg[0].out, arg[1].in, cpu_SR[SAR]); } else { TCGv_i64 v = tcg_temp_new_i64(); tcg_gen_extu_i32_i64(v, arg[1].in); gen_shift(shr); } } #undef gen_shift #undef gen_shift_reg static void translate_srli(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_shri_i32(arg[0].out, arg[1].in, arg[2].imm); } static void translate_ssa8b(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, arg[0].in, 3); gen_left_shift_sar(dc, tmp); tcg_temp_free(tmp); } static void translate_ssa8l(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, arg[0].in, 3); gen_right_shift_sar(dc, tmp); tcg_temp_free(tmp); } static void translate_ssai(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_const_i32(arg[0].imm); gen_right_shift_sar(dc, tmp); tcg_temp_free(tmp); } static void translate_ssl(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_left_shift_sar(dc, arg[0].in); } static void translate_ssr(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_right_shift_sar(dc, arg[0].in); } static void translate_sub(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_sub_i32(arg[0].out, arg[1].in, arg[2].in); } static void translate_subx(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_shli_i32(tmp, arg[1].in, par[0]); tcg_gen_sub_i32(arg[0].out, tmp, arg[2].in); tcg_temp_free(tmp); } static void translate_waiti(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { #ifndef CONFIG_USER_ONLY gen_waiti(dc, arg[0].imm); #endif } static void translate_wtlb(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { #ifndef CONFIG_USER_ONLY TCGv_i32 dtlb = tcg_const_i32(par[0]); gen_helper_wtlb(cpu_env, arg[0].in, arg[1].in, dtlb); tcg_temp_free(dtlb); #endif } static void translate_wer(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_helper_wer(cpu_env, arg[0].in, arg[1].in); } static void translate_wrmsk_expstate(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { /* TODO: GPIO32 may be a part of coprocessor */ tcg_gen_and_i32(cpu_UR[EXPSTATE], arg[0].in, arg[1].in); } static bool test_ill_wsr(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { return !check_sr(dc, par[0], SR_W); } static void translate_wsr(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_wsr(dc, par[0], arg[0].in); } static void translate_wur(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { if (uregnames[par[0]].name) { gen_wur(par[0], arg[0].in); } else { qemu_log_mask(LOG_UNIMP, "WUR %d not implemented\n", par[0]); } } static void translate_xor(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_xor_i32(arg[0].out, arg[1].in, arg[2].in); } static bool test_ill_xsr(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { return !check_sr(dc, par[0], SR_X); } static void translate_xsr(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_mov_i32(tmp, arg[0].in); gen_rsr(dc, arg[0].out, par[0]); gen_wsr(dc, par[0], tmp); tcg_temp_free(tmp); } static const XtensaOpcodeOps core_ops[] = { { .name = "abs", .translate = translate_abs, }, { .name = (const char * const[]) { "add", "add.n", NULL, }, .translate = translate_add, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "addi", "addi.n", NULL, }, .translate = translate_addi, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = "addmi", .translate = translate_addi, }, { .name = "addx2", .translate = translate_addx, .par = (const uint32_t[]){1}, }, { .name = "addx4", .translate = translate_addx, .par = (const uint32_t[]){2}, }, { .name = "addx8", .translate = translate_addx, .par = (const uint32_t[]){3}, }, { .name = "all4", .translate = translate_all, .par = (const uint32_t[]){true, 4}, }, { .name = "all8", .translate = translate_all, .par = (const uint32_t[]){true, 8}, }, { .name = "and", .translate = translate_and, }, { .name = "andb", .translate = translate_boolean, .par = (const uint32_t[]){BOOLEAN_AND}, }, { .name = "andbc", .translate = translate_boolean, .par = (const uint32_t[]){BOOLEAN_ANDC}, }, { .name = "any4", .translate = translate_all, .par = (const uint32_t[]){false, 4}, }, { .name = "any8", .translate = translate_all, .par = (const uint32_t[]){false, 8}, }, { .name = (const char * const[]) { "ball", "ball.w15", "ball.w18", NULL, }, .translate = translate_ball, .par = (const uint32_t[]){TCG_COND_EQ}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bany", "bany.w15", "bany.w18", NULL, }, .translate = translate_bany, .par = (const uint32_t[]){TCG_COND_NE}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bbc", "bbc.w15", "bbc.w18", NULL, }, .translate = translate_bb, .par = (const uint32_t[]){TCG_COND_EQ}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bbci", "bbci.w15", "bbci.w18", NULL, }, .translate = translate_bbi, .par = (const uint32_t[]){TCG_COND_EQ}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bbs", "bbs.w15", "bbs.w18", NULL, }, .translate = translate_bb, .par = (const uint32_t[]){TCG_COND_NE}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bbsi", "bbsi.w15", "bbsi.w18", NULL, }, .translate = translate_bbi, .par = (const uint32_t[]){TCG_COND_NE}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "beq", "beq.w15", "beq.w18", NULL, }, .translate = translate_b, .par = (const uint32_t[]){TCG_COND_EQ}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "beqi", "beqi.w15", "beqi.w18", NULL, }, .translate = translate_bi, .par = (const uint32_t[]){TCG_COND_EQ}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "beqz", "beqz.n", "beqz.w15", "beqz.w18", NULL, }, .translate = translate_bz, .par = (const uint32_t[]){TCG_COND_EQ}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = "bf", .translate = translate_bp, .par = (const uint32_t[]){TCG_COND_EQ}, }, { .name = (const char * const[]) { "bge", "bge.w15", "bge.w18", NULL, }, .translate = translate_b, .par = (const uint32_t[]){TCG_COND_GE}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bgei", "bgei.w15", "bgei.w18", NULL, }, .translate = translate_bi, .par = (const uint32_t[]){TCG_COND_GE}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bgeu", "bgeu.w15", "bgeu.w18", NULL, }, .translate = translate_b, .par = (const uint32_t[]){TCG_COND_GEU}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bgeui", "bgeui.w15", "bgeui.w18", NULL, }, .translate = translate_bi, .par = (const uint32_t[]){TCG_COND_GEU}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bgez", "bgez.w15", "bgez.w18", NULL, }, .translate = translate_bz, .par = (const uint32_t[]){TCG_COND_GE}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "blt", "blt.w15", "blt.w18", NULL, }, .translate = translate_b, .par = (const uint32_t[]){TCG_COND_LT}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "blti", "blti.w15", "blti.w18", NULL, }, .translate = translate_bi, .par = (const uint32_t[]){TCG_COND_LT}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bltu", "bltu.w15", "bltu.w18", NULL, }, .translate = translate_b, .par = (const uint32_t[]){TCG_COND_LTU}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bltui", "bltui.w15", "bltui.w18", NULL, }, .translate = translate_bi, .par = (const uint32_t[]){TCG_COND_LTU}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bltz", "bltz.w15", "bltz.w18", NULL, }, .translate = translate_bz, .par = (const uint32_t[]){TCG_COND_LT}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bnall", "bnall.w15", "bnall.w18", NULL, }, .translate = translate_ball, .par = (const uint32_t[]){TCG_COND_NE}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bne", "bne.w15", "bne.w18", NULL, }, .translate = translate_b, .par = (const uint32_t[]){TCG_COND_NE}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bnei", "bnei.w15", "bnei.w18", NULL, }, .translate = translate_bi, .par = (const uint32_t[]){TCG_COND_NE}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bnez", "bnez.n", "bnez.w15", "bnez.w18", NULL, }, .translate = translate_bz, .par = (const uint32_t[]){TCG_COND_NE}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "bnone", "bnone.w15", "bnone.w18", NULL, }, .translate = translate_bany, .par = (const uint32_t[]){TCG_COND_EQ}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = "break", .translate = translate_nop, .par = (const uint32_t[]){DEBUGCAUSE_BI}, .op_flags = XTENSA_OP_DEBUG_BREAK, }, { .name = "break.n", .translate = translate_nop, .par = (const uint32_t[]){DEBUGCAUSE_BN}, .op_flags = XTENSA_OP_DEBUG_BREAK, }, { .name = "bt", .translate = translate_bp, .par = (const uint32_t[]){TCG_COND_NE}, }, { .name = "call0", .translate = translate_call0, }, { .name = "call12", .translate = translate_callw, .par = (const uint32_t[]){3}, }, { .name = "call4", .translate = translate_callw, .par = (const uint32_t[]){1}, }, { .name = "call8", .translate = translate_callw, .par = (const uint32_t[]){2}, }, { .name = "callx0", .translate = translate_callx0, }, { .name = "callx12", .translate = translate_callxw, .par = (const uint32_t[]){3}, }, { .name = "callx4", .translate = translate_callxw, .par = (const uint32_t[]){1}, }, { .name = "callx8", .translate = translate_callxw, .par = (const uint32_t[]){2}, }, { .name = "clamps", .translate = translate_clamps, }, { .name = "clrb_expstate", .translate = translate_clrb_expstate, }, { .name = "const16", .translate = translate_const16, }, { .name = "depbits", .translate = translate_depbits, }, { .name = "dhi", .translate = translate_dcache, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "dhu", .translate = translate_dcache, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "dhwb", .translate = translate_dcache, }, { .name = "dhwbi", .translate = translate_dcache, }, { .name = "dii", .translate = translate_nop, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "diu", .translate = translate_nop, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "diwb", .translate = translate_nop, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "diwbi", .translate = translate_nop, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "dpfl", .translate = translate_dcache, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "dpfr", .translate = translate_nop, }, { .name = "dpfro", .translate = translate_nop, }, { .name = "dpfw", .translate = translate_nop, }, { .name = "dpfwo", .translate = translate_nop, }, { .name = "dsync", .translate = translate_nop, }, { .name = "entry", .translate = translate_entry, .test_ill = test_ill_entry, .test_overflow = test_overflow_entry, .op_flags = XTENSA_OP_EXIT_TB_M1 | XTENSA_OP_SYNC_REGISTER_WINDOW, }, { .name = "esync", .translate = translate_nop, }, { .name = "excw", .translate = translate_nop, }, { .name = "extui", .translate = translate_extui, }, { .name = "extw", .translate = translate_memw, }, { .name = "hwwdtlba", .op_flags = XTENSA_OP_ILL, }, { .name = "hwwitlba", .op_flags = XTENSA_OP_ILL, }, { .name = "idtlb", .translate = translate_itlb, .par = (const uint32_t[]){true}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1, }, { .name = "ihi", .translate = translate_icache, }, { .name = "ihu", .translate = translate_icache, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "iii", .translate = translate_nop, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "iitlb", .translate = translate_itlb, .par = (const uint32_t[]){false}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1, }, { .name = "iiu", .translate = translate_nop, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = (const char * const[]) { "ill", "ill.n", NULL, }, .op_flags = XTENSA_OP_ILL | XTENSA_OP_NAME_ARRAY, }, { .name = "ipf", .translate = translate_nop, }, { .name = "ipfl", .translate = translate_icache, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "isync", .translate = translate_nop, }, { .name = "j", .translate = translate_j, }, { .name = "jx", .translate = translate_jx, }, { .name = "l16si", .translate = translate_ldst, .par = (const uint32_t[]){MO_TESW, false, false}, .op_flags = XTENSA_OP_LOAD, }, { .name = "l16ui", .translate = translate_ldst, .par = (const uint32_t[]){MO_TEUW, false, false}, .op_flags = XTENSA_OP_LOAD, }, { .name = "l32ai", .translate = translate_ldst, .par = (const uint32_t[]){MO_TEUL, true, false}, .op_flags = XTENSA_OP_LOAD, }, { .name = "l32e", .translate = translate_l32e, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_LOAD, }, { .name = (const char * const[]) { "l32i", "l32i.n", NULL, }, .translate = translate_ldst, .par = (const uint32_t[]){MO_TEUL, false, false}, .op_flags = XTENSA_OP_NAME_ARRAY | XTENSA_OP_LOAD, }, { .name = "l32r", .translate = translate_l32r, .op_flags = XTENSA_OP_LOAD, }, { .name = "l8ui", .translate = translate_ldst, .par = (const uint32_t[]){MO_UB, false, false}, .op_flags = XTENSA_OP_LOAD, }, { .name = "lddec", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_NONE, 0, -4}, .op_flags = XTENSA_OP_LOAD, }, { .name = "ldinc", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_NONE, 0, 4}, .op_flags = XTENSA_OP_LOAD, }, { .name = "ldpte", .op_flags = XTENSA_OP_ILL, }, { .name = (const char * const[]) { "loop", "loop.w15", NULL, }, .translate = translate_loop, .par = (const uint32_t[]){TCG_COND_NEVER}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "loopgtz", "loopgtz.w15", NULL, }, .translate = translate_loop, .par = (const uint32_t[]){TCG_COND_GT}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "loopnez", "loopnez.w15", NULL, }, .translate = translate_loop, .par = (const uint32_t[]){TCG_COND_NE}, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = "max", .translate = translate_smax, }, { .name = "maxu", .translate = translate_umax, }, { .name = "memw", .translate = translate_memw, }, { .name = "min", .translate = translate_smin, }, { .name = "minu", .translate = translate_umin, }, { .name = (const char * const[]) { "mov", "mov.n", NULL, }, .translate = translate_mov, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = "moveqz", .translate = translate_movcond, .par = (const uint32_t[]){TCG_COND_EQ}, }, { .name = "movf", .translate = translate_movp, .par = (const uint32_t[]){TCG_COND_EQ}, }, { .name = "movgez", .translate = translate_movcond, .par = (const uint32_t[]){TCG_COND_GE}, }, { .name = "movi", .translate = translate_movi, }, { .name = "movi.n", .translate = translate_movi, }, { .name = "movltz", .translate = translate_movcond, .par = (const uint32_t[]){TCG_COND_LT}, }, { .name = "movnez", .translate = translate_movcond, .par = (const uint32_t[]){TCG_COND_NE}, }, { .name = "movsp", .translate = translate_movsp, .op_flags = XTENSA_OP_ALLOCA, }, { .name = "movt", .translate = translate_movp, .par = (const uint32_t[]){TCG_COND_NE}, }, { .name = "mul.aa.hh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MUL, MAC16_HH, 0}, }, { .name = "mul.aa.hl", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MUL, MAC16_HL, 0}, }, { .name = "mul.aa.lh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MUL, MAC16_LH, 0}, }, { .name = "mul.aa.ll", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MUL, MAC16_LL, 0}, }, { .name = "mul.ad.hh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MUL, MAC16_HH, 0}, }, { .name = "mul.ad.hl", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MUL, MAC16_HL, 0}, }, { .name = "mul.ad.lh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MUL, MAC16_LH, 0}, }, { .name = "mul.ad.ll", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MUL, MAC16_LL, 0}, }, { .name = "mul.da.hh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MUL, MAC16_HH, 0}, }, { .name = "mul.da.hl", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MUL, MAC16_HL, 0}, }, { .name = "mul.da.lh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MUL, MAC16_LH, 0}, }, { .name = "mul.da.ll", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MUL, MAC16_LL, 0}, }, { .name = "mul.dd.hh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MUL, MAC16_HH, 0}, }, { .name = "mul.dd.hl", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MUL, MAC16_HL, 0}, }, { .name = "mul.dd.lh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MUL, MAC16_LH, 0}, }, { .name = "mul.dd.ll", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MUL, MAC16_LL, 0}, }, { .name = "mul16s", .translate = translate_mul16, .par = (const uint32_t[]){true}, }, { .name = "mul16u", .translate = translate_mul16, .par = (const uint32_t[]){false}, }, { .name = "mula.aa.hh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_HH, 0}, }, { .name = "mula.aa.hl", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_HL, 0}, }, { .name = "mula.aa.lh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_LH, 0}, }, { .name = "mula.aa.ll", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_LL, 0}, }, { .name = "mula.ad.hh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_HH, 0}, }, { .name = "mula.ad.hl", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_HL, 0}, }, { .name = "mula.ad.lh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_LH, 0}, }, { .name = "mula.ad.ll", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_LL, 0}, }, { .name = "mula.da.hh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_HH, 0}, }, { .name = "mula.da.hh.lddec", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_HH, -4}, }, { .name = "mula.da.hh.ldinc", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_HH, 4}, }, { .name = "mula.da.hl", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_HL, 0}, }, { .name = "mula.da.hl.lddec", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_HL, -4}, }, { .name = "mula.da.hl.ldinc", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_HL, 4}, }, { .name = "mula.da.lh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_LH, 0}, }, { .name = "mula.da.lh.lddec", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_LH, -4}, }, { .name = "mula.da.lh.ldinc", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_LH, 4}, }, { .name = "mula.da.ll", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_LL, 0}, }, { .name = "mula.da.ll.lddec", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_LL, -4}, }, { .name = "mula.da.ll.ldinc", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_LL, 4}, }, { .name = "mula.dd.hh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_HH, 0}, }, { .name = "mula.dd.hh.lddec", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_HH, -4}, }, { .name = "mula.dd.hh.ldinc", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_HH, 4}, }, { .name = "mula.dd.hl", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_HL, 0}, }, { .name = "mula.dd.hl.lddec", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_HL, -4}, }, { .name = "mula.dd.hl.ldinc", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_HL, 4}, }, { .name = "mula.dd.lh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_LH, 0}, }, { .name = "mula.dd.lh.lddec", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_LH, -4}, }, { .name = "mula.dd.lh.ldinc", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_LH, 4}, }, { .name = "mula.dd.ll", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_LL, 0}, }, { .name = "mula.dd.ll.lddec", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_LL, -4}, }, { .name = "mula.dd.ll.ldinc", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULA, MAC16_LL, 4}, }, { .name = "mull", .translate = translate_mull, }, { .name = "muls.aa.hh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULS, MAC16_HH, 0}, }, { .name = "muls.aa.hl", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULS, MAC16_HL, 0}, }, { .name = "muls.aa.lh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULS, MAC16_LH, 0}, }, { .name = "muls.aa.ll", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULS, MAC16_LL, 0}, }, { .name = "muls.ad.hh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULS, MAC16_HH, 0}, }, { .name = "muls.ad.hl", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULS, MAC16_HL, 0}, }, { .name = "muls.ad.lh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULS, MAC16_LH, 0}, }, { .name = "muls.ad.ll", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULS, MAC16_LL, 0}, }, { .name = "muls.da.hh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULS, MAC16_HH, 0}, }, { .name = "muls.da.hl", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULS, MAC16_HL, 0}, }, { .name = "muls.da.lh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULS, MAC16_LH, 0}, }, { .name = "muls.da.ll", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULS, MAC16_LL, 0}, }, { .name = "muls.dd.hh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULS, MAC16_HH, 0}, }, { .name = "muls.dd.hl", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULS, MAC16_HL, 0}, }, { .name = "muls.dd.lh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULS, MAC16_LH, 0}, }, { .name = "muls.dd.ll", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_MULS, MAC16_LL, 0}, }, { .name = "mulsh", .translate = translate_mulh, .par = (const uint32_t[]){true}, }, { .name = "muluh", .translate = translate_mulh, .par = (const uint32_t[]){false}, }, { .name = "neg", .translate = translate_neg, }, { .name = (const char * const[]) { "nop", "nop.n", NULL, }, .translate = translate_nop, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = "nsa", .translate = translate_nsa, }, { .name = "nsau", .translate = translate_nsau, }, { .name = "or", .translate = translate_or, }, { .name = "orb", .translate = translate_boolean, .par = (const uint32_t[]){BOOLEAN_OR}, }, { .name = "orbc", .translate = translate_boolean, .par = (const uint32_t[]){BOOLEAN_ORC}, }, { .name = "pdtlb", .translate = translate_ptlb, .par = (const uint32_t[]){true}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "pitlb", .translate = translate_ptlb, .par = (const uint32_t[]){false}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "quos", .translate = translate_quos, .par = (const uint32_t[]){true}, .op_flags = XTENSA_OP_DIVIDE_BY_ZERO, }, { .name = "quou", .translate = translate_quou, .op_flags = XTENSA_OP_DIVIDE_BY_ZERO, }, { .name = "rdtlb0", .translate = translate_rtlb, .par = (const uint32_t[]){true, 0}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rdtlb1", .translate = translate_rtlb, .par = (const uint32_t[]){true, 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "read_impwire", .translate = translate_read_impwire, }, { .name = "rems", .translate = translate_quos, .par = (const uint32_t[]){false}, .op_flags = XTENSA_OP_DIVIDE_BY_ZERO, }, { .name = "remu", .translate = translate_remu, .op_flags = XTENSA_OP_DIVIDE_BY_ZERO, }, { .name = "rer", .translate = translate_rer, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = (const char * const[]) { "ret", "ret.n", NULL, }, .translate = translate_ret, .op_flags = XTENSA_OP_NAME_ARRAY, }, { .name = (const char * const[]) { "retw", "retw.n", NULL, }, .translate = translate_retw, .test_ill = test_ill_retw, .op_flags = XTENSA_OP_UNDERFLOW | XTENSA_OP_NAME_ARRAY, }, { .name = "rfdd", .op_flags = XTENSA_OP_ILL, }, { .name = "rfde", .translate = translate_rfde, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rfdo", .op_flags = XTENSA_OP_ILL, }, { .name = "rfe", .translate = translate_rfe, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_CHECK_INTERRUPTS, }, { .name = "rfi", .translate = translate_rfi, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_CHECK_INTERRUPTS, }, { .name = "rfwo", .translate = translate_rfw, .par = (const uint32_t[]){true}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_CHECK_INTERRUPTS, }, { .name = "rfwu", .translate = translate_rfw, .par = (const uint32_t[]){false}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_CHECK_INTERRUPTS, }, { .name = "ritlb0", .translate = translate_rtlb, .par = (const uint32_t[]){false, 0}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "ritlb1", .translate = translate_rtlb, .par = (const uint32_t[]){false, 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rotw", .translate = translate_rotw, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1 | XTENSA_OP_SYNC_REGISTER_WINDOW, }, { .name = "rsil", .translate = translate_rsil, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0 | XTENSA_OP_CHECK_INTERRUPTS, }, { .name = "rsr.176", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){176}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.208", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){208}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.acchi", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){ACCHI}, }, { .name = "rsr.acclo", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){ACCLO}, }, { .name = "rsr.atomctl", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){ATOMCTL}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.br", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){BR}, }, { .name = "rsr.cacheattr", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){CACHEATTR}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.ccompare0", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){CCOMPARE}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.ccompare1", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){CCOMPARE + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.ccompare2", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){CCOMPARE + 2}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.ccount", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){CCOUNT}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "rsr.configid0", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){CONFIGID0}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.configid1", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){CONFIGID1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.cpenable", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){CPENABLE}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.dbreaka0", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){DBREAKA}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.dbreaka1", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){DBREAKA + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.dbreakc0", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){DBREAKC}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.dbreakc1", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){DBREAKC + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.ddr", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){DDR}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.debugcause", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){DEBUGCAUSE}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.depc", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){DEPC}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.dtlbcfg", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){DTLBCFG}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.epc1", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EPC1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.epc2", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EPC1 + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.epc3", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EPC1 + 2}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.epc4", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EPC1 + 3}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.epc5", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EPC1 + 4}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.epc6", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EPC1 + 5}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.epc7", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EPC1 + 6}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.eps2", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EPS2}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.eps3", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EPS2 + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.eps4", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EPS2 + 2}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.eps5", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EPS2 + 3}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.eps6", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EPS2 + 4}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.eps7", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EPS2 + 5}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.exccause", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EXCCAUSE}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.excsave1", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EXCSAVE1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.excsave2", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EXCSAVE1 + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.excsave3", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EXCSAVE1 + 2}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.excsave4", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EXCSAVE1 + 3}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.excsave5", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EXCSAVE1 + 4}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.excsave6", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EXCSAVE1 + 5}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.excsave7", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EXCSAVE1 + 6}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.excvaddr", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){EXCVADDR}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.ibreaka0", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){IBREAKA}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.ibreaka1", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){IBREAKA + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.ibreakenable", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){IBREAKENABLE}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.icount", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){ICOUNT}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.icountlevel", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){ICOUNTLEVEL}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.intclear", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){INTCLEAR}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.intenable", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){INTENABLE}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.interrupt", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){INTSET}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "rsr.intset", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){INTSET}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "rsr.itlbcfg", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){ITLBCFG}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.lbeg", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){LBEG}, }, { .name = "rsr.lcount", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){LCOUNT}, }, { .name = "rsr.lend", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){LEND}, }, { .name = "rsr.litbase", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){LITBASE}, }, { .name = "rsr.m0", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){MR}, }, { .name = "rsr.m1", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){MR + 1}, }, { .name = "rsr.m2", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){MR + 2}, }, { .name = "rsr.m3", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){MR + 3}, }, { .name = "rsr.memctl", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){MEMCTL}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.misc0", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){MISC}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.misc1", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){MISC + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.misc2", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){MISC + 2}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.misc3", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){MISC + 3}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.prefctl", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){PREFCTL}, }, { .name = "rsr.prid", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){PRID}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.ps", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){PS}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.ptevaddr", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){PTEVADDR}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.rasid", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){RASID}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.sar", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){SAR}, }, { .name = "rsr.scompare1", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){SCOMPARE1}, }, { .name = "rsr.vecbase", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){VECBASE}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.windowbase", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){WINDOW_BASE}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsr.windowstart", .translate = translate_rsr, .test_ill = test_ill_rsr, .par = (const uint32_t[]){WINDOW_START}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "rsync", .translate = translate_nop, }, { .name = "rur.expstate", .translate = translate_rur, .par = (const uint32_t[]){EXPSTATE}, }, { .name = "rur.fcr", .translate = translate_rur, .par = (const uint32_t[]){FCR}, .coprocessor = 0x1, }, { .name = "rur.fsr", .translate = translate_rur, .par = (const uint32_t[]){FSR}, .coprocessor = 0x1, }, { .name = "rur.threadptr", .translate = translate_rur, .par = (const uint32_t[]){THREADPTR}, }, { .name = "s16i", .translate = translate_ldst, .par = (const uint32_t[]){MO_TEUW, false, true}, .op_flags = XTENSA_OP_STORE, }, { .name = "s32c1i", .translate = translate_s32c1i, .op_flags = XTENSA_OP_LOAD | XTENSA_OP_STORE, }, { .name = "s32e", .translate = translate_s32e, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_STORE, }, { .name = (const char * const[]) { "s32i", "s32i.n", "s32nb", NULL, }, .translate = translate_ldst, .par = (const uint32_t[]){MO_TEUL, false, true}, .op_flags = XTENSA_OP_NAME_ARRAY | XTENSA_OP_STORE, }, { .name = "s32ri", .translate = translate_ldst, .par = (const uint32_t[]){MO_TEUL, true, true}, .op_flags = XTENSA_OP_STORE, }, { .name = "s8i", .translate = translate_ldst, .par = (const uint32_t[]){MO_UB, false, true}, .op_flags = XTENSA_OP_STORE, }, { .name = "salt", .translate = translate_salt, .par = (const uint32_t[]){TCG_COND_LT}, }, { .name = "saltu", .translate = translate_salt, .par = (const uint32_t[]){TCG_COND_LTU}, }, { .name = "setb_expstate", .translate = translate_setb_expstate, }, { .name = "sext", .translate = translate_sext, }, { .name = "simcall", .translate = translate_simcall, .test_ill = test_ill_simcall, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "sll", .translate = translate_sll, }, { .name = "slli", .translate = translate_slli, }, { .name = "sra", .translate = translate_sra, }, { .name = "srai", .translate = translate_srai, }, { .name = "src", .translate = translate_src, }, { .name = "srl", .translate = translate_srl, }, { .name = "srli", .translate = translate_srli, }, { .name = "ssa8b", .translate = translate_ssa8b, }, { .name = "ssa8l", .translate = translate_ssa8l, }, { .name = "ssai", .translate = translate_ssai, }, { .name = "ssl", .translate = translate_ssl, }, { .name = "ssr", .translate = translate_ssr, }, { .name = "sub", .translate = translate_sub, }, { .name = "subx2", .translate = translate_subx, .par = (const uint32_t[]){1}, }, { .name = "subx4", .translate = translate_subx, .par = (const uint32_t[]){2}, }, { .name = "subx8", .translate = translate_subx, .par = (const uint32_t[]){3}, }, { .name = "syscall", .op_flags = XTENSA_OP_SYSCALL, }, { .name = "umul.aa.hh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_UMUL, MAC16_HH, 0}, }, { .name = "umul.aa.hl", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_UMUL, MAC16_HL, 0}, }, { .name = "umul.aa.lh", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_UMUL, MAC16_LH, 0}, }, { .name = "umul.aa.ll", .translate = translate_mac16, .par = (const uint32_t[]){MAC16_UMUL, MAC16_LL, 0}, }, { .name = "waiti", .translate = translate_waiti, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "wdtlb", .translate = translate_wtlb, .par = (const uint32_t[]){true}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1, }, { .name = "wer", .translate = translate_wer, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "witlb", .translate = translate_wtlb, .par = (const uint32_t[]){false}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1, }, { .name = "wrmsk_expstate", .translate = translate_wrmsk_expstate, }, { .name = "wsr.176", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){176}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.208", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){208}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.acchi", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){ACCHI}, }, { .name = "wsr.acclo", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){ACCLO}, }, { .name = "wsr.atomctl", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){ATOMCTL}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.br", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){BR}, }, { .name = "wsr.cacheattr", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){CACHEATTR}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.ccompare0", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){CCOMPARE}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "wsr.ccompare1", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){CCOMPARE + 1}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "wsr.ccompare2", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){CCOMPARE + 2}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "wsr.ccount", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){CCOUNT}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "wsr.configid0", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){CONFIGID0}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.configid1", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){CONFIGID1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.cpenable", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){CPENABLE}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1, }, { .name = "wsr.dbreaka0", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){DBREAKA}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.dbreaka1", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){DBREAKA + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.dbreakc0", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){DBREAKC}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.dbreakc1", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){DBREAKC + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.ddr", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){DDR}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.debugcause", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){DEBUGCAUSE}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.depc", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){DEPC}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.dtlbcfg", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){DTLBCFG}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.epc1", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EPC1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.epc2", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EPC1 + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.epc3", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EPC1 + 2}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.epc4", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EPC1 + 3}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.epc5", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EPC1 + 4}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.epc6", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EPC1 + 5}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.epc7", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EPC1 + 6}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.eps2", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EPS2}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.eps3", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EPS2 + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.eps4", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EPS2 + 2}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.eps5", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EPS2 + 3}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.eps6", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EPS2 + 4}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.eps7", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EPS2 + 5}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.exccause", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EXCCAUSE}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.excsave1", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EXCSAVE1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.excsave2", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EXCSAVE1 + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.excsave3", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EXCSAVE1 + 2}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.excsave4", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EXCSAVE1 + 3}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.excsave5", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EXCSAVE1 + 4}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.excsave6", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EXCSAVE1 + 5}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.excsave7", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EXCSAVE1 + 6}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.excvaddr", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){EXCVADDR}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.ibreaka0", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){IBREAKA}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "wsr.ibreaka1", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){IBREAKA + 1}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "wsr.ibreakenable", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){IBREAKENABLE}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "wsr.icount", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){ICOUNT}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.icountlevel", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){ICOUNTLEVEL}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1, }, { .name = "wsr.intclear", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){INTCLEAR}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0 | XTENSA_OP_CHECK_INTERRUPTS, }, { .name = "wsr.intenable", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){INTENABLE}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0 | XTENSA_OP_CHECK_INTERRUPTS, }, { .name = "wsr.interrupt", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){INTSET}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0 | XTENSA_OP_CHECK_INTERRUPTS, }, { .name = "wsr.intset", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){INTSET}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0 | XTENSA_OP_CHECK_INTERRUPTS, }, { .name = "wsr.itlbcfg", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){ITLBCFG}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.lbeg", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){LBEG}, .op_flags = XTENSA_OP_EXIT_TB_M1, }, { .name = "wsr.lcount", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){LCOUNT}, }, { .name = "wsr.lend", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){LEND}, .op_flags = XTENSA_OP_EXIT_TB_M1, }, { .name = "wsr.litbase", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){LITBASE}, .op_flags = XTENSA_OP_EXIT_TB_M1, }, { .name = "wsr.m0", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){MR}, }, { .name = "wsr.m1", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){MR + 1}, }, { .name = "wsr.m2", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){MR + 2}, }, { .name = "wsr.m3", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){MR + 3}, }, { .name = "wsr.memctl", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){MEMCTL}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.misc0", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){MISC}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.misc1", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){MISC + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.misc2", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){MISC + 2}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.misc3", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){MISC + 3}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.mmid", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){MMID}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.prefctl", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){PREFCTL}, }, { .name = "wsr.prid", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){PRID}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.ps", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){PS}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1 | XTENSA_OP_CHECK_INTERRUPTS, }, { .name = "wsr.ptevaddr", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){PTEVADDR}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.rasid", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){RASID}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1, }, { .name = "wsr.sar", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){SAR}, }, { .name = "wsr.scompare1", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){SCOMPARE1}, }, { .name = "wsr.vecbase", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){VECBASE}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "wsr.windowbase", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){WINDOW_BASE}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1 | XTENSA_OP_SYNC_REGISTER_WINDOW, }, { .name = "wsr.windowstart", .translate = translate_wsr, .test_ill = test_ill_wsr, .par = (const uint32_t[]){WINDOW_START}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1, }, { .name = "wur.expstate", .translate = translate_wur, .par = (const uint32_t[]){EXPSTATE}, }, { .name = "wur.fcr", .translate = translate_wur, .par = (const uint32_t[]){FCR}, .coprocessor = 0x1, }, { .name = "wur.fsr", .translate = translate_wur, .par = (const uint32_t[]){FSR}, .coprocessor = 0x1, }, { .name = "wur.threadptr", .translate = translate_wur, .par = (const uint32_t[]){THREADPTR}, }, { .name = "xor", .translate = translate_xor, }, { .name = "xorb", .translate = translate_boolean, .par = (const uint32_t[]){BOOLEAN_XOR}, }, { .name = "xsr.176", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){176}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.208", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){208}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.acchi", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){ACCHI}, }, { .name = "xsr.acclo", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){ACCLO}, }, { .name = "xsr.atomctl", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){ATOMCTL}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.br", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){BR}, }, { .name = "xsr.cacheattr", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){CACHEATTR}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.ccompare0", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){CCOMPARE}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "xsr.ccompare1", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){CCOMPARE + 1}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "xsr.ccompare2", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){CCOMPARE + 2}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "xsr.ccount", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){CCOUNT}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "xsr.configid0", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){CONFIGID0}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.configid1", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){CONFIGID1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.cpenable", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){CPENABLE}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1, }, { .name = "xsr.dbreaka0", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){DBREAKA}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.dbreaka1", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){DBREAKA + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.dbreakc0", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){DBREAKC}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.dbreakc1", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){DBREAKC + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.ddr", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){DDR}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.debugcause", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){DEBUGCAUSE}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.depc", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){DEPC}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.dtlbcfg", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){DTLBCFG}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.epc1", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EPC1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.epc2", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EPC1 + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.epc3", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EPC1 + 2}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.epc4", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EPC1 + 3}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.epc5", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EPC1 + 4}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.epc6", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EPC1 + 5}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.epc7", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EPC1 + 6}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.eps2", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EPS2}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.eps3", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EPS2 + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.eps4", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EPS2 + 2}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.eps5", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EPS2 + 3}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.eps6", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EPS2 + 4}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.eps7", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EPS2 + 5}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.exccause", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EXCCAUSE}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.excsave1", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EXCSAVE1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.excsave2", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EXCSAVE1 + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.excsave3", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EXCSAVE1 + 2}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.excsave4", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EXCSAVE1 + 3}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.excsave5", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EXCSAVE1 + 4}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.excsave6", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EXCSAVE1 + 5}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.excsave7", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EXCSAVE1 + 6}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.excvaddr", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){EXCVADDR}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.ibreaka0", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){IBREAKA}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "xsr.ibreaka1", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){IBREAKA + 1}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "xsr.ibreakenable", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){IBREAKENABLE}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0, }, { .name = "xsr.icount", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){ICOUNT}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.icountlevel", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){ICOUNTLEVEL}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1, }, { .name = "xsr.intclear", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){INTCLEAR}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0 | XTENSA_OP_CHECK_INTERRUPTS, }, { .name = "xsr.intenable", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){INTENABLE}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0 | XTENSA_OP_CHECK_INTERRUPTS, }, { .name = "xsr.interrupt", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){INTSET}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0 | XTENSA_OP_CHECK_INTERRUPTS, }, { .name = "xsr.intset", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){INTSET}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_0 | XTENSA_OP_CHECK_INTERRUPTS, }, { .name = "xsr.itlbcfg", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){ITLBCFG}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.lbeg", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){LBEG}, .op_flags = XTENSA_OP_EXIT_TB_M1, }, { .name = "xsr.lcount", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){LCOUNT}, }, { .name = "xsr.lend", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){LEND}, .op_flags = XTENSA_OP_EXIT_TB_M1, }, { .name = "xsr.litbase", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){LITBASE}, .op_flags = XTENSA_OP_EXIT_TB_M1, }, { .name = "xsr.m0", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){MR}, }, { .name = "xsr.m1", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){MR + 1}, }, { .name = "xsr.m2", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){MR + 2}, }, { .name = "xsr.m3", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){MR + 3}, }, { .name = "xsr.memctl", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){MEMCTL}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.misc0", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){MISC}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.misc1", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){MISC + 1}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.misc2", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){MISC + 2}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.misc3", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){MISC + 3}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.prefctl", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){PREFCTL}, }, { .name = "xsr.prid", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){PRID}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.ps", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){PS}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1 | XTENSA_OP_CHECK_INTERRUPTS, }, { .name = "xsr.ptevaddr", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){PTEVADDR}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.rasid", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){RASID}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1, }, { .name = "xsr.sar", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){SAR}, }, { .name = "xsr.scompare1", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){SCOMPARE1}, }, { .name = "xsr.vecbase", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){VECBASE}, .op_flags = XTENSA_OP_PRIVILEGED, }, { .name = "xsr.windowbase", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){WINDOW_BASE}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1 | XTENSA_OP_SYNC_REGISTER_WINDOW, }, { .name = "xsr.windowstart", .translate = translate_xsr, .test_ill = test_ill_xsr, .par = (const uint32_t[]){WINDOW_START}, .op_flags = XTENSA_OP_PRIVILEGED | XTENSA_OP_EXIT_TB_M1, }, }; const XtensaOpcodeTranslators xtensa_core_opcodes = { .num_opcodes = ARRAY_SIZE(core_ops), .opcode = core_ops, }; static void translate_abs_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_helper_abs_s(arg[0].out, arg[1].in); } static void translate_add_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_helper_add_s(arg[0].out, cpu_env, arg[1].in, arg[2].in); } enum { COMPARE_UN, COMPARE_OEQ, COMPARE_UEQ, COMPARE_OLT, COMPARE_ULT, COMPARE_OLE, COMPARE_ULE, }; static void translate_compare_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { static void (* const helper[])(TCGv_env env, TCGv_i32 bit, TCGv_i32 s, TCGv_i32 t) = { [COMPARE_UN] = gen_helper_un_s, [COMPARE_OEQ] = gen_helper_oeq_s, [COMPARE_UEQ] = gen_helper_ueq_s, [COMPARE_OLT] = gen_helper_olt_s, [COMPARE_ULT] = gen_helper_ult_s, [COMPARE_OLE] = gen_helper_ole_s, [COMPARE_ULE] = gen_helper_ule_s, }; TCGv_i32 bit = tcg_const_i32(1 << arg[0].imm); helper[par[0]](cpu_env, bit, arg[1].in, arg[2].in); tcg_temp_free(bit); } static void translate_float_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 scale = tcg_const_i32(-arg[2].imm); if (par[0]) { gen_helper_uitof(arg[0].out, cpu_env, arg[1].in, scale); } else { gen_helper_itof(arg[0].out, cpu_env, arg[1].in, scale); } tcg_temp_free(scale); } static void translate_ftoi_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 rounding_mode = tcg_const_i32(par[0]); TCGv_i32 scale = tcg_const_i32(arg[2].imm); if (par[1]) { gen_helper_ftoui(arg[0].out, arg[1].in, rounding_mode, scale); } else { gen_helper_ftoi(arg[0].out, arg[1].in, rounding_mode, scale); } tcg_temp_free(rounding_mode); tcg_temp_free(scale); } static void translate_ldsti(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_addi_i32(addr, arg[1].in, arg[2].imm); gen_load_store_alignment(dc, 2, addr, false); if (par[0]) { tcg_gen_qemu_st32(arg[0].in, addr, dc->cring); } else { tcg_gen_qemu_ld32u(arg[0].out, addr, dc->cring); } if (par[1]) { tcg_gen_mov_i32(arg[1].out, addr); } tcg_temp_free(addr); } static void translate_ldstx(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 addr = tcg_temp_new_i32(); tcg_gen_add_i32(addr, arg[1].in, arg[2].in); gen_load_store_alignment(dc, 2, addr, false); if (par[0]) { tcg_gen_qemu_st32(arg[0].in, addr, dc->cring); } else { tcg_gen_qemu_ld32u(arg[0].out, addr, dc->cring); } if (par[1]) { tcg_gen_mov_i32(arg[1].out, addr); } tcg_temp_free(addr); } static void translate_madd_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_helper_madd_s(arg[0].out, cpu_env, arg[0].in, arg[1].in, arg[2].in); } static void translate_mov_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_mov_i32(arg[0].out, arg[1].in); } static void translate_movcond_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 zero = tcg_const_i32(0); tcg_gen_movcond_i32(par[0], arg[0].out, arg[2].in, zero, arg[1].in, arg[0].in); tcg_temp_free(zero); } static void translate_movp_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { TCGv_i32 zero = tcg_const_i32(0); TCGv_i32 tmp = tcg_temp_new_i32(); tcg_gen_andi_i32(tmp, arg[2].in, 1 << arg[2].imm); tcg_gen_movcond_i32(par[0], arg[0].out, tmp, zero, arg[1].in, arg[0].in); tcg_temp_free(tmp); tcg_temp_free(zero); } static void translate_mul_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_helper_mul_s(arg[0].out, cpu_env, arg[1].in, arg[2].in); } static void translate_msub_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_helper_msub_s(arg[0].out, cpu_env, arg[0].in, arg[1].in, arg[2].in); } static void translate_neg_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_helper_neg_s(arg[0].out, arg[1].in); } static void translate_rfr_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_mov_i32(arg[0].out, arg[1].in); } static void translate_sub_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { gen_helper_sub_s(arg[0].out, cpu_env, arg[1].in, arg[2].in); } static void translate_wfr_s(DisasContext *dc, const OpcodeArg arg[], const uint32_t par[]) { tcg_gen_mov_i32(arg[0].out, arg[1].in); } static const XtensaOpcodeOps fpu2000_ops[] = { { .name = "abs.s", .translate = translate_abs_s, .coprocessor = 0x1, }, { .name = "add.s", .translate = translate_add_s, .coprocessor = 0x1, }, { .name = "ceil.s", .translate = translate_ftoi_s, .par = (const uint32_t[]){float_round_up, false}, .coprocessor = 0x1, }, { .name = "float.s", .translate = translate_float_s, .par = (const uint32_t[]){false}, .coprocessor = 0x1, }, { .name = "floor.s", .translate = translate_ftoi_s, .par = (const uint32_t[]){float_round_down, false}, .coprocessor = 0x1, }, { .name = "lsi", .translate = translate_ldsti, .par = (const uint32_t[]){false, false}, .op_flags = XTENSA_OP_LOAD, .coprocessor = 0x1, }, { .name = "lsiu", .translate = translate_ldsti, .par = (const uint32_t[]){false, true}, .op_flags = XTENSA_OP_LOAD, .coprocessor = 0x1, }, { .name = "lsx", .translate = translate_ldstx, .par = (const uint32_t[]){false, false}, .op_flags = XTENSA_OP_LOAD, .coprocessor = 0x1, }, { .name = "lsxu", .translate = translate_ldstx, .par = (const uint32_t[]){false, true}, .op_flags = XTENSA_OP_LOAD, .coprocessor = 0x1, }, { .name = "madd.s", .translate = translate_madd_s, .coprocessor = 0x1, }, { .name = "mov.s", .translate = translate_mov_s, .coprocessor = 0x1, }, { .name = "moveqz.s", .translate = translate_movcond_s, .par = (const uint32_t[]){TCG_COND_EQ}, .coprocessor = 0x1, }, { .name = "movf.s", .translate = translate_movp_s, .par = (const uint32_t[]){TCG_COND_EQ}, .coprocessor = 0x1, }, { .name = "movgez.s", .translate = translate_movcond_s, .par = (const uint32_t[]){TCG_COND_GE}, .coprocessor = 0x1, }, { .name = "movltz.s", .translate = translate_movcond_s, .par = (const uint32_t[]){TCG_COND_LT}, .coprocessor = 0x1, }, { .name = "movnez.s", .translate = translate_movcond_s, .par = (const uint32_t[]){TCG_COND_NE}, .coprocessor = 0x1, }, { .name = "movt.s", .translate = translate_movp_s, .par = (const uint32_t[]){TCG_COND_NE}, .coprocessor = 0x1, }, { .name = "msub.s", .translate = translate_msub_s, .coprocessor = 0x1, }, { .name = "mul.s", .translate = translate_mul_s, .coprocessor = 0x1, }, { .name = "neg.s", .translate = translate_neg_s, .coprocessor = 0x1, }, { .name = "oeq.s", .translate = translate_compare_s, .par = (const uint32_t[]){COMPARE_OEQ}, .coprocessor = 0x1, }, { .name = "ole.s", .translate = translate_compare_s, .par = (const uint32_t[]){COMPARE_OLE}, .coprocessor = 0x1, }, { .name = "olt.s", .translate = translate_compare_s, .par = (const uint32_t[]){COMPARE_OLT}, .coprocessor = 0x1, }, { .name = "rfr", .translate = translate_rfr_s, .coprocessor = 0x1, }, { .name = "round.s", .translate = translate_ftoi_s, .par = (const uint32_t[]){float_round_nearest_even, false}, .coprocessor = 0x1, }, { .name = "ssi", .translate = translate_ldsti, .par = (const uint32_t[]){true, false}, .op_flags = XTENSA_OP_STORE, .coprocessor = 0x1, }, { .name = "ssiu", .translate = translate_ldsti, .par = (const uint32_t[]){true, true}, .op_flags = XTENSA_OP_STORE, .coprocessor = 0x1, }, { .name = "ssx", .translate = translate_ldstx, .par = (const uint32_t[]){true, false}, .op_flags = XTENSA_OP_STORE, .coprocessor = 0x1, }, { .name = "ssxu", .translate = translate_ldstx, .par = (const uint32_t[]){true, true}, .op_flags = XTENSA_OP_STORE, .coprocessor = 0x1, }, { .name = "sub.s", .translate = translate_sub_s, .coprocessor = 0x1, }, { .name = "trunc.s", .translate = translate_ftoi_s, .par = (const uint32_t[]){float_round_to_zero, false}, .coprocessor = 0x1, }, { .name = "ueq.s", .translate = translate_compare_s, .par = (const uint32_t[]){COMPARE_UEQ}, .coprocessor = 0x1, }, { .name = "ufloat.s", .translate = translate_float_s, .par = (const uint32_t[]){true}, .coprocessor = 0x1, }, { .name = "ule.s", .translate = translate_compare_s, .par = (const uint32_t[]){COMPARE_ULE}, .coprocessor = 0x1, }, { .name = "ult.s", .translate = translate_compare_s, .par = (const uint32_t[]){COMPARE_ULT}, .coprocessor = 0x1, }, { .name = "un.s", .translate = translate_compare_s, .par = (const uint32_t[]){COMPARE_UN}, .coprocessor = 0x1, }, { .name = "utrunc.s", .translate = translate_ftoi_s, .par = (const uint32_t[]){float_round_to_zero, true}, .coprocessor = 0x1, }, { .name = "wfr", .translate = translate_wfr_s, .coprocessor = 0x1, }, }; const XtensaOpcodeTranslators xtensa_fpu2000_opcodes = { .num_opcodes = ARRAY_SIZE(fpu2000_ops), .opcode = fpu2000_ops, };
pmp-tool/PMP
src/qemu/src-pmp/tests/check-qjson.c
<filename>src/qemu/src-pmp/tests/check-qjson.c /* * Copyright IBM, Corp. 2009 * Copyright (c) 2013, 2015 Red Hat Inc. * * Authors: * <NAME> <<EMAIL>> * <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU LGPL, version 2.1 or later. * See the COPYING.LIB file in the top-level directory. * */ #include "qemu/osdep.h" #include "qapi/error.h" #include "qapi/qmp/qbool.h" #include "qapi/qmp/qjson.h" #include "qapi/qmp/qlit.h" #include "qapi/qmp/qnull.h" #include "qapi/qmp/qnum.h" #include "qapi/qmp/qstring.h" #include "qemu/unicode.h" #include "qemu-common.h" static QString *from_json_str(const char *jstr, bool single, Error **errp) { char quote = single ? '\'' : '"'; char *qjstr = g_strdup_printf("%c%s%c", quote, jstr, quote); QString *ret = qobject_to(QString, qobject_from_json(qjstr, errp)); g_free(qjstr); return ret; } static char *to_json_str(QString *str) { QString *json = qobject_to_json(QOBJECT(str)); char *jstr; if (!json) { return NULL; } /* peel off double quotes */ jstr = g_strndup(qstring_get_str(json) + 1, qstring_get_length(json) - 2); qobject_unref(json); return jstr; } static void escaped_string(void) { struct { /* Content of JSON string to parse with qobject_from_json() */ const char *json_in; /* Expected parse output; to unparse with qobject_to_json() */ const char *utf8_out; int skip; } test_cases[] = { { "\\b\\f\\n\\r\\t\\\\\\\"", "\b\f\n\r\t\\\"" }, { "\\/\\'", "/'", .skip = 1 }, { "single byte utf-8 \\u0020", "single byte utf-8 ", .skip = 1 }, { "double byte utf-8 \\u00A2", "double byte utf-8 \xc2\xa2" }, { "triple byte utf-8 \\u20AC", "triple byte utf-8 \xe2\x82\xac" }, { "quadruple byte utf-8 \\uD834\\uDD1E", /* U+1D11E */ "quadruple byte utf-8 \xF0\x9D\x84\x9E" }, { "\\", NULL }, { "\\z", NULL }, { "\\ux", NULL }, { "\\u1x", NULL }, { "\\u12x", NULL }, { "\\u123x", NULL }, { "\\u12345", "\341\210\2645" }, { "\\u0000x", "\xC0\x80x" }, { "unpaired leading surrogate \\uD800", NULL }, { "unpaired leading surrogate \\uD800\\uCAFE", NULL }, { "unpaired leading surrogate \\uD800\\uD801\\uDC02", NULL }, { "unpaired trailing surrogate \\uDC00", NULL }, { "backward surrogate pair \\uDC00\\uD800", NULL }, { "noncharacter U+FDD0 \\uFDD0", NULL }, { "noncharacter U+FDEF \\uFDEF", NULL }, { "noncharacter U+1FFFE \\uD87F\\uDFFE", NULL }, { "noncharacter U+10FFFF \\uDC3F\\uDFFF", NULL }, {} }; int i, j; QString *cstr; char *jstr; for (i = 0; test_cases[i].json_in; i++) { for (j = 0; j < 2; j++) { if (test_cases[i].utf8_out) { cstr = from_json_str(test_cases[i].json_in, j, &error_abort); g_assert_cmpstr(qstring_get_try_str(cstr), ==, test_cases[i].utf8_out); if (!test_cases[i].skip) { jstr = to_json_str(cstr); g_assert_cmpstr(jstr, ==, test_cases[i].json_in); g_free(jstr); } qobject_unref(cstr); } else { cstr = from_json_str(test_cases[i].json_in, j, NULL); g_assert(!cstr); } } } } static void string_with_quotes(void) { const char *test_cases[] = { "\"the bee's knees\"", "'double quote \"'", NULL }; int i; QString *str; char *cstr; for (i = 0; test_cases[i]; i++) { str = qobject_to(QString, qobject_from_json(test_cases[i], &error_abort)); g_assert(str); cstr = g_strndup(test_cases[i] + 1, strlen(test_cases[i]) - 2); g_assert_cmpstr(qstring_get_str(str), ==, cstr); g_free(cstr); qobject_unref(str); } } static void utf8_string(void) { /* * Most test cases are scraped from <NAME>'s UTF-8 decoder * capability and stress test at * http://www.cl.cam.ac.uk/~mgk25/ucs/examples/UTF-8-test.txt */ static const struct { /* Content of JSON string to parse with qobject_from_json() */ const char *json_in; /* Expected parse output */ const char *utf8_out; /* Expected unparse output, defaults to @json_in */ const char *json_out; } test_cases[] = { /* 0 Control characters */ { /* * Note: \x00 is impossible, other representations of * U+0000 are covered under 4.3 */ "\x01\x02\x03\x04\x05\x06\x07" "\x08\x09\x0A\x0B\x0C\x0D\x0E\x0F" "\x10\x11\x12\x13\x14\x15\x16\x17" "\x18\x19\x1A\x1B\x1C\x1D\x1E\x1F", NULL, "\\u0001\\u0002\\u0003\\u0004\\u0005\\u0006\\u0007" "\\b\\t\\n\\u000B\\f\\r\\u000E\\u000F" "\\u0010\\u0011\\u0012\\u0013\\u0014\\u0015\\u0016\\u0017" "\\u0018\\u0019\\u001A\\u001B\\u001C\\u001D\\u001E\\u001F", }, /* 1 Some correct UTF-8 text */ { /* a bit of German */ "Falsches \xC3\x9C" "ben von Xylophonmusik qu\xC3\xA4lt" " jeden gr\xC3\xB6\xC3\x9F" "eren Zwerg.", "Falsches \xC3\x9C" "ben von Xylophonmusik qu\xC3\xA4lt" " jeden gr\xC3\xB6\xC3\x9F" "eren Zwerg.", "Falsches \\u00DCben von Xylophonmusik qu\\u00E4lt" " jeden gr\\u00F6\\u00DFeren Zwerg.", }, { /* a bit of Greek */ "\xCE\xBA\xE1\xBD\xB9\xCF\x83\xCE\xBC\xCE\xB5", "\xCE\xBA\xE1\xBD\xB9\xCF\x83\xCE\xBC\xCE\xB5", "\\u03BA\\u1F79\\u03C3\\u03BC\\u03B5", }, /* '%' character when not interpolating */ { "100%", "100%", }, /* 2 Boundary condition test cases */ /* 2.1 First possible sequence of a certain length */ /* * 2.1.1 1 byte U+0020 * Control characters are already covered by their own test * case under 0. Test the first 1 byte non-control character * here. */ { " ", " ", }, /* 2.1.2 2 bytes U+0080 */ { "\xC2\x80", "\xC2\x80", "\\u0080", }, /* 2.1.3 3 bytes U+0800 */ { "\xE0\xA0\x80", "\xE0\xA0\x80", "\\u0800", }, /* 2.1.4 4 bytes U+10000 */ { "\xF0\x90\x80\x80", "\xF0\x90\x80\x80", "\\uD800\\uDC00", }, /* 2.1.5 5 bytes U+200000 */ { "\xF8\x88\x80\x80\x80", NULL, "\\uFFFD", }, /* 2.1.6 6 bytes U+4000000 */ { "\xFC\x84\x80\x80\x80\x80", NULL, "\\uFFFD", }, /* 2.2 Last possible sequence of a certain length */ /* 2.2.1 1 byte U+007F */ { "\x7F", "\x7F", "\\u007F", }, /* 2.2.2 2 bytes U+07FF */ { "\xDF\xBF", "\xDF\xBF", "\\u07FF", }, /* * 2.2.3 3 bytes U+FFFC * The last possible sequence is actually U+FFFF. But that's * a noncharacter, and already covered by its own test case * under 5.3. Same for U+FFFE. U+FFFD is the last character * in the BMP, and covered under 2.3. Because of U+FFFD's * special role as replacement character, it's worth testing * U+FFFC here. */ { "\xEF\xBF\xBC", "\xEF\xBF\xBC", "\\uFFFC", }, /* 2.2.4 4 bytes U+1FFFFF */ { "\xF7\xBF\xBF\xBF", NULL, "\\uFFFD", }, /* 2.2.5 5 bytes U+3FFFFFF */ { "\xFB\xBF\xBF\xBF\xBF", NULL, "\\uFFFD", }, /* 2.2.6 6 bytes U+7FFFFFFF */ { "\xFD\xBF\xBF\xBF\xBF\xBF", NULL, "\\uFFFD", }, /* 2.3 Other boundary conditions */ { /* last one before surrogate range: U+D7FF */ "\xED\x9F\xBF", "\xED\x9F\xBF", "\\uD7FF", }, { /* first one after surrogate range: U+E000 */ "\xEE\x80\x80", "\xEE\x80\x80", "\\uE000", }, { /* last one in BMP: U+FFFD */ "\xEF\xBF\xBD", "\xEF\xBF\xBD", "\\uFFFD", }, { /* last one in last plane: U+10FFFD */ "\xF4\x8F\xBF\xBD", "\xF4\x8F\xBF\xBD", "\\uDBFF\\uDFFD" }, { /* first one beyond Unicode range: U+110000 */ "\xF4\x90\x80\x80", NULL, "\\uFFFD", }, /* 3 Malformed sequences */ /* 3.1 Unexpected continuation bytes */ /* 3.1.1 First continuation byte */ { "\x80", NULL, "\\uFFFD", }, /* 3.1.2 Last continuation byte */ { "\xBF", NULL, "\\uFFFD", }, /* 3.1.3 2 continuation bytes */ { "\x80\xBF", NULL, "\\uFFFD\\uFFFD", }, /* 3.1.4 3 continuation bytes */ { "\x80\xBF\x80", NULL, "\\uFFFD\\uFFFD\\uFFFD", }, /* 3.1.5 4 continuation bytes */ { "\x80\xBF\x80\xBF", NULL, "\\uFFFD\\uFFFD\\uFFFD\\uFFFD", }, /* 3.1.6 5 continuation bytes */ { "\x80\xBF\x80\xBF\x80", NULL, "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD", }, /* 3.1.7 6 continuation bytes */ { "\x80\xBF\x80\xBF\x80\xBF", NULL, "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD", }, /* 3.1.8 7 continuation bytes */ { "\x80\xBF\x80\xBF\x80\xBF\x80", NULL, "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD", }, /* 3.1.9 Sequence of all 64 possible continuation bytes */ { "\x80\x81\x82\x83\x84\x85\x86\x87" "\x88\x89\x8A\x8B\x8C\x8D\x8E\x8F" "\x90\x91\x92\x93\x94\x95\x96\x97" "\x98\x99\x9A\x9B\x9C\x9D\x9E\x9F" "\xA0\xA1\xA2\xA3\xA4\xA5\xA6\xA7" "\xA8\xA9\xAA\xAB\xAC\xAD\xAE\xAF" "\xB0\xB1\xB2\xB3\xB4\xB5\xB6\xB7" "\xB8\xB9\xBA\xBB\xBC\xBD\xBE\xBF", NULL, "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD" "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD" "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD" "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD" "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD" "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD" "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD" "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD", }, /* 3.2 Lonely start characters */ /* 3.2.1 All 32 first bytes of 2-byte sequences, followed by space */ { "\xC0 \xC1 \xC2 \xC3 \xC4 \xC5 \xC6 \xC7 " "\xC8 \xC9 \xCA \xCB \xCC \xCD \xCE \xCF " "\xD0 \xD1 \xD2 \xD3 \xD4 \xD5 \xD6 \xD7 " "\xD8 \xD9 \xDA \xDB \xDC \xDD \xDE \xDF ", NULL, "\\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD " "\\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD " "\\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD " "\\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD ", }, /* 3.2.2 All 16 first bytes of 3-byte sequences, followed by space */ { "\xE0 \xE1 \xE2 \xE3 \xE4 \xE5 \xE6 \xE7 " "\xE8 \xE9 \xEA \xEB \xEC \xED \xEE \xEF ", NULL, "\\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD " "\\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD ", }, /* 3.2.3 All 8 first bytes of 4-byte sequences, followed by space */ { "\xF0 \xF1 \xF2 \xF3 \xF4 \xF5 \xF6 \xF7 ", NULL, "\\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD \\uFFFD ", }, /* 3.2.4 All 4 first bytes of 5-byte sequences, followed by space */ { "\xF8 \xF9 \xFA \xFB ", NULL, "\\uFFFD \\uFFFD \\uFFFD \\uFFFD ", }, /* 3.2.5 All 2 first bytes of 6-byte sequences, followed by space */ { "\xFC \xFD ", NULL, "\\uFFFD \\uFFFD ", }, /* 3.3 Sequences with last continuation byte missing */ /* 3.3.1 2-byte sequence with last byte missing (U+0000) */ { "\xC0", NULL, "\\uFFFD", }, /* 3.3.2 3-byte sequence with last byte missing (U+0000) */ { "\xE0\x80", NULL, "\\uFFFD", }, /* 3.3.3 4-byte sequence with last byte missing (U+0000) */ { "\xF0\x80\x80", NULL, "\\uFFFD", }, /* 3.3.4 5-byte sequence with last byte missing (U+0000) */ { "\xF8\x80\x80\x80", NULL, "\\uFFFD", }, /* 3.3.5 6-byte sequence with last byte missing (U+0000) */ { "\xFC\x80\x80\x80\x80", NULL, "\\uFFFD", }, /* 3.3.6 2-byte sequence with last byte missing (U+07FF) */ { "\xDF", NULL, "\\uFFFD", }, /* 3.3.7 3-byte sequence with last byte missing (U+FFFF) */ { "\xEF\xBF", NULL, "\\uFFFD", }, /* 3.3.8 4-byte sequence with last byte missing (U+1FFFFF) */ { "\xF7\xBF\xBF", NULL, "\\uFFFD", }, /* 3.3.9 5-byte sequence with last byte missing (U+3FFFFFF) */ { "\xFB\xBF\xBF\xBF", NULL, "\\uFFFD", }, /* 3.3.10 6-byte sequence with last byte missing (U+7FFFFFFF) */ { "\xFD\xBF\xBF\xBF\xBF", NULL, "\\uFFFD", }, /* 3.4 Concatenation of incomplete sequences */ { "\xC0\xE0\x80\xF0\x80\x80\xF8\x80\x80\x80\xFC\x80\x80\x80\x80" "\xDF\xEF\xBF\xF7\xBF\xBF\xFB\xBF\xBF\xBF\xFD\xBF\xBF\xBF\xBF", NULL, "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD" "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD", }, /* 3.5 Impossible bytes */ { "\xFE", NULL, "\\uFFFD", }, { "\xFF", NULL, "\\uFFFD", }, { "\xFE\xFE\xFF\xFF", NULL, "\\uFFFD\\uFFFD\\uFFFD\\uFFFD", }, /* 4 Overlong sequences */ /* 4.1 Overlong '/' */ { "\xC0\xAF", NULL, "\\uFFFD", }, { "\xE0\x80\xAF", NULL, "\\uFFFD", }, { "\xF0\x80\x80\xAF", NULL, "\\uFFFD", }, { "\xF8\x80\x80\x80\xAF", NULL, "\\uFFFD", }, { "\xFC\x80\x80\x80\x80\xAF", NULL, "\\uFFFD", }, /* * 4.2 Maximum overlong sequences * Highest Unicode value that is still resulting in an * overlong sequence if represented with the given number of * bytes. This is a boundary test for safe UTF-8 decoders. */ { /* \U+007F */ "\xC1\xBF", NULL, "\\uFFFD", }, { /* \U+07FF */ "\xE0\x9F\xBF", NULL, "\\uFFFD", }, { /* * \U+FFFC * The actual maximum would be U+FFFF, but that's a * noncharacter. Testing U+FFFC seems more useful. See * also 2.2.3 */ "\xF0\x8F\xBF\xBC", NULL, "\\uFFFD", }, { /* \U+1FFFFF */ "\xF8\x87\xBF\xBF\xBF", NULL, "\\uFFFD", }, { /* \U+3FFFFFF */ "\xFC\x83\xBF\xBF\xBF\xBF", NULL, "\\uFFFD", }, /* 4.3 Overlong representation of the NUL character */ { /* \U+0000 */ "\xC0\x80", "\xC0\x80", "\\u0000", }, { /* \U+0000 */ "\xE0\x80\x80", NULL, "\\uFFFD", }, { /* \U+0000 */ "\xF0\x80\x80\x80", NULL, "\\uFFFD", }, { /* \U+0000 */ "\xF8\x80\x80\x80\x80", NULL, "\\uFFFD", }, { /* \U+0000 */ "\xFC\x80\x80\x80\x80\x80", NULL, "\\uFFFD", }, /* 5 Illegal code positions */ /* 5.1 Single UTF-16 surrogates */ { /* \U+D800 */ "\xED\xA0\x80", NULL, "\\uFFFD", }, { /* \U+DB7F */ "\xED\xAD\xBF", NULL, "\\uFFFD", }, { /* \U+DB80 */ "\xED\xAE\x80", NULL, "\\uFFFD", }, { /* \U+DBFF */ "\xED\xAF\xBF", NULL, "\\uFFFD", }, { /* \U+DC00 */ "\xED\xB0\x80", NULL, "\\uFFFD", }, { /* \U+DF80 */ "\xED\xBE\x80", NULL, "\\uFFFD", }, { /* \U+DFFF */ "\xED\xBF\xBF", NULL, "\\uFFFD", }, /* 5.2 Paired UTF-16 surrogates */ { /* \U+D800\U+DC00 */ "\xED\xA0\x80\xED\xB0\x80", NULL, "\\uFFFD\\uFFFD", }, { /* \U+D800\U+DFFF */ "\xED\xA0\x80\xED\xBF\xBF", NULL, "\\uFFFD\\uFFFD", }, { /* \U+DB7F\U+DC00 */ "\xED\xAD\xBF\xED\xB0\x80", NULL, "\\uFFFD\\uFFFD", }, { /* \U+DB7F\U+DFFF */ "\xED\xAD\xBF\xED\xBF\xBF", NULL, "\\uFFFD\\uFFFD", }, { /* \U+DB80\U+DC00 */ "\xED\xAE\x80\xED\xB0\x80", NULL, "\\uFFFD\\uFFFD", }, { /* \U+DB80\U+DFFF */ "\xED\xAE\x80\xED\xBF\xBF", NULL, "\\uFFFD\\uFFFD", }, { /* \U+DBFF\U+DC00 */ "\xED\xAF\xBF\xED\xB0\x80", NULL, "\\uFFFD\\uFFFD", }, { /* \U+DBFF\U+DFFF */ "\xED\xAF\xBF\xED\xBF\xBF", NULL, "\\uFFFD\\uFFFD", }, /* 5.3 Other illegal code positions */ /* BMP noncharacters */ { /* \U+FFFE */ "\xEF\xBF\xBE", NULL, "\\uFFFD", }, { /* \U+FFFF */ "\xEF\xBF\xBF", NULL, "\\uFFFD", }, { /* U+FDD0 */ "\xEF\xB7\x90", NULL, "\\uFFFD", }, { /* U+FDEF */ "\xEF\xB7\xAF", NULL, "\\uFFFD", }, /* Plane 1 .. 16 noncharacters */ { /* U+1FFFE U+1FFFF U+2FFFE U+2FFFF ... U+10FFFE U+10FFFF */ "\xF0\x9F\xBF\xBE\xF0\x9F\xBF\xBF" "\xF0\xAF\xBF\xBE\xF0\xAF\xBF\xBF" "\xF0\xBF\xBF\xBE\xF0\xBF\xBF\xBF" "\xF1\x8F\xBF\xBE\xF1\x8F\xBF\xBF" "\xF1\x9F\xBF\xBE\xF1\x9F\xBF\xBF" "\xF1\xAF\xBF\xBE\xF1\xAF\xBF\xBF" "\xF1\xBF\xBF\xBE\xF1\xBF\xBF\xBF" "\xF2\x8F\xBF\xBE\xF2\x8F\xBF\xBF" "\xF2\x9F\xBF\xBE\xF2\x9F\xBF\xBF" "\xF2\xAF\xBF\xBE\xF2\xAF\xBF\xBF" "\xF2\xBF\xBF\xBE\xF2\xBF\xBF\xBF" "\xF3\x8F\xBF\xBE\xF3\x8F\xBF\xBF" "\xF3\x9F\xBF\xBE\xF3\x9F\xBF\xBF" "\xF3\xAF\xBF\xBE\xF3\xAF\xBF\xBF" "\xF3\xBF\xBF\xBE\xF3\xBF\xBF\xBF" "\xF4\x8F\xBF\xBE\xF4\x8F\xBF\xBF", NULL, "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD" "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD" "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD" "\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD\\uFFFD", }, {} }; int i, j; QString *str; const char *json_in, *utf8_out, *utf8_in, *json_out, *tail; char *end, *in, *jstr; for (i = 0; test_cases[i].json_in; i++) { for (j = 0; j < 2; j++) { json_in = test_cases[i].json_in; utf8_out = test_cases[i].utf8_out; utf8_in = test_cases[i].utf8_out ?: test_cases[i].json_in; json_out = test_cases[i].json_out ?: test_cases[i].json_in; /* Parse @json_in, expect @utf8_out */ if (utf8_out) { str = from_json_str(json_in, j, &error_abort); g_assert_cmpstr(qstring_get_try_str(str), ==, utf8_out); qobject_unref(str); } else { str = from_json_str(json_in, j, NULL); g_assert(!str); /* * Failure may be due to any sequence, but *all* sequences * are expected to fail. Test each one in isolation. */ for (tail = json_in; *tail; tail = end) { mod_utf8_codepoint(tail, 6, &end); if (*end == ' ') { end++; } in = strndup(tail, end - tail); str = from_json_str(in, j, NULL); g_assert(!str); g_free(in); } } /* Unparse @utf8_in, expect @json_out */ str = qstring_from_str(utf8_in); jstr = to_json_str(str); g_assert_cmpstr(jstr, ==, json_out); qobject_unref(str); g_free(jstr); /* Parse @json_out right back, unless it has replacements */ if (!strstr(json_out, "\\uFFFD")) { str = from_json_str(json_out, j, &error_abort); g_assert_cmpstr(qstring_get_try_str(str), ==, utf8_in); qobject_unref(str); } } } } static void simple_number(void) { int i; struct { const char *encoded; int64_t decoded; int skip; } test_cases[] = { { "0", 0 }, { "1234", 1234 }, { "1", 1 }, { "-32", -32 }, { "-0", 0, .skip = 1 }, { }, }; for (i = 0; test_cases[i].encoded; i++) { QNum *qnum; int64_t val; qnum = qobject_to(QNum, qobject_from_json(test_cases[i].encoded, &error_abort)); g_assert(qnum); g_assert(qnum_get_try_int(qnum, &val)); g_assert_cmpint(val, ==, test_cases[i].decoded); if (test_cases[i].skip == 0) { QString *str; str = qobject_to_json(QOBJECT(qnum)); g_assert(strcmp(qstring_get_str(str), test_cases[i].encoded) == 0); qobject_unref(str); } qobject_unref(qnum); } } static void large_number(void) { const char *maxu64 = "18446744073709551615"; /* 2^64-1 */ const char *gtu64 = "18446744073709551616"; /* 2^64 */ const char *lti64 = "-9223372036854775809"; /* -2^63 - 1 */ QNum *qnum; QString *str; uint64_t val; int64_t ival; qnum = qobject_to(QNum, qobject_from_json(maxu64, &error_abort)); g_assert(qnum); g_assert_cmpuint(qnum_get_uint(qnum), ==, 18446744073709551615U); g_assert(!qnum_get_try_int(qnum, &ival)); str = qobject_to_json(QOBJECT(qnum)); g_assert_cmpstr(qstring_get_str(str), ==, maxu64); qobject_unref(str); qobject_unref(qnum); qnum = qobject_to(QNum, qobject_from_json(gtu64, &error_abort)); g_assert(qnum); g_assert_cmpfloat(qnum_get_double(qnum), ==, 18446744073709552e3); g_assert(!qnum_get_try_uint(qnum, &val)); g_assert(!qnum_get_try_int(qnum, &ival)); str = qobject_to_json(QOBJECT(qnum)); g_assert_cmpstr(qstring_get_str(str), ==, gtu64); qobject_unref(str); qobject_unref(qnum); qnum = qobject_to(QNum, qobject_from_json(lti64, &error_abort)); g_assert(qnum); g_assert_cmpfloat(qnum_get_double(qnum), ==, -92233720368547758e2); g_assert(!qnum_get_try_uint(qnum, &val)); g_assert(!qnum_get_try_int(qnum, &ival)); str = qobject_to_json(QOBJECT(qnum)); g_assert_cmpstr(qstring_get_str(str), ==, "-9223372036854775808"); qobject_unref(str); qobject_unref(qnum); } static void float_number(void) { int i; struct { const char *encoded; double decoded; int skip; } test_cases[] = { { "32.43", 32.43 }, { "0.222", 0.222 }, { "-32.12313", -32.12313 }, { "-32.20e-10", -32.20e-10, .skip = 1 }, { }, }; for (i = 0; test_cases[i].encoded; i++) { QObject *obj; QNum *qnum; obj = qobject_from_json(test_cases[i].encoded, &error_abort); qnum = qobject_to(QNum, obj); g_assert(qnum); g_assert(qnum_get_double(qnum) == test_cases[i].decoded); if (test_cases[i].skip == 0) { QString *str; str = qobject_to_json(obj); g_assert(strcmp(qstring_get_str(str), test_cases[i].encoded) == 0); qobject_unref(str); } qobject_unref(qnum); } } static void keyword_literal(void) { QObject *obj; QBool *qbool; QNull *null; QString *str; obj = qobject_from_json("true", &error_abort); qbool = qobject_to(QBool, obj); g_assert(qbool); g_assert(qbool_get_bool(qbool) == true); str = qobject_to_json(obj); g_assert(strcmp(qstring_get_str(str), "true") == 0); qobject_unref(str); qobject_unref(qbool); obj = qobject_from_json("false", &error_abort); qbool = qobject_to(QBool, obj); g_assert(qbool); g_assert(qbool_get_bool(qbool) == false); str = qobject_to_json(obj); g_assert(strcmp(qstring_get_str(str), "false") == 0); qobject_unref(str); qobject_unref(qbool); obj = qobject_from_json("null", &error_abort); g_assert(obj != NULL); g_assert(qobject_type(obj) == QTYPE_QNULL); null = qnull(); g_assert(QOBJECT(null) == obj); qobject_unref(obj); qobject_unref(null); } static void interpolation_valid(void) { long long value_lld = 0x123456789abcdefLL; int64_t value_d64 = value_lld; long value_ld = (long)value_lld; int value_d = (int)value_lld; unsigned long long value_llu = 0xfedcba9876543210ULL; uint64_t value_u64 = value_llu; unsigned long value_lu = (unsigned long)value_llu; unsigned value_u = (unsigned)value_llu; double value_f = 2.323423423; const char *value_s = "hello world"; QObject *value_p = QOBJECT(qnull()); QBool *qbool; QNum *qnum; QString *qstr; QObject *qobj; /* bool */ qbool = qobject_to(QBool, qobject_from_jsonf_nofail("%i", false)); g_assert(qbool); g_assert(qbool_get_bool(qbool) == false); qobject_unref(qbool); /* Test that non-zero values other than 1 get collapsed to true */ qbool = qobject_to(QBool, qobject_from_jsonf_nofail("%i", 2)); g_assert(qbool); g_assert(qbool_get_bool(qbool) == true); qobject_unref(qbool); /* number */ qnum = qobject_to(QNum, qobject_from_jsonf_nofail("%d", value_d)); g_assert_cmpint(qnum_get_int(qnum), ==, value_d); qobject_unref(qnum); qnum = qobject_to(QNum, qobject_from_jsonf_nofail("%ld", value_ld)); g_assert_cmpint(qnum_get_int(qnum), ==, value_ld); qobject_unref(qnum); qnum = qobject_to(QNum, qobject_from_jsonf_nofail("%lld", value_lld)); g_assert_cmpint(qnum_get_int(qnum), ==, value_lld); qobject_unref(qnum); qnum = qobject_to(QNum, qobject_from_jsonf_nofail("%" PRId64, value_d64)); g_assert_cmpint(qnum_get_int(qnum), ==, value_lld); qobject_unref(qnum); qnum = qobject_to(QNum, qobject_from_jsonf_nofail("%u", value_u)); g_assert_cmpuint(qnum_get_uint(qnum), ==, value_u); qobject_unref(qnum); qnum = qobject_to(QNum, qobject_from_jsonf_nofail("%lu", value_lu)); g_assert_cmpuint(qnum_get_uint(qnum), ==, value_lu); qobject_unref(qnum); qnum = qobject_to(QNum, qobject_from_jsonf_nofail("%llu", value_llu)); g_assert_cmpuint(qnum_get_uint(qnum), ==, value_llu); qobject_unref(qnum); qnum = qobject_to(QNum, qobject_from_jsonf_nofail("%" PRIu64, value_u64)); g_assert_cmpuint(qnum_get_uint(qnum), ==, value_llu); qobject_unref(qnum); qnum = qobject_to(QNum, qobject_from_jsonf_nofail("%f", value_f)); g_assert(qnum_get_double(qnum) == value_f); qobject_unref(qnum); /* string */ qstr = qobject_to(QString, qobject_from_jsonf_nofail("%s", value_s)); g_assert_cmpstr(qstring_get_try_str(qstr), ==, value_s); qobject_unref(qstr); /* object */ qobj = qobject_from_jsonf_nofail("%p", value_p); g_assert(qobj == value_p); } static void interpolation_unknown(void) { if (g_test_subprocess()) { qobject_from_jsonf_nofail("%x", 666); } g_test_trap_subprocess(NULL, 0, 0); g_test_trap_assert_failed(); g_test_trap_assert_stderr("*Unexpected error*" "invalid interpolation '%x'*"); } static void interpolation_string(void) { if (g_test_subprocess()) { qobject_from_jsonf_nofail("['%s', %s]", "eins", "zwei"); } g_test_trap_subprocess(NULL, 0, 0); g_test_trap_assert_failed(); g_test_trap_assert_stderr("*Unexpected error*" "can't interpolate into string*"); } static void simple_dict(void) { int i; struct { const char *encoded; QLitObject decoded; } test_cases[] = { { .encoded = "{\"foo\": 42, \"bar\": \"hello world\"}", .decoded = QLIT_QDICT(((QLitDictEntry[]){ { "foo", QLIT_QNUM(42) }, { "bar", QLIT_QSTR("hello world") }, { } })), }, { .encoded = "{}", .decoded = QLIT_QDICT(((QLitDictEntry[]){ { } })), }, { .encoded = "{\"foo\": 43}", .decoded = QLIT_QDICT(((QLitDictEntry[]){ { "foo", QLIT_QNUM(43) }, { } })), }, { } }; for (i = 0; test_cases[i].encoded; i++) { QObject *obj; QString *str; obj = qobject_from_json(test_cases[i].encoded, &error_abort); g_assert(qlit_equal_qobject(&test_cases[i].decoded, obj)); str = qobject_to_json(obj); qobject_unref(obj); obj = qobject_from_json(qstring_get_str(str), &error_abort); g_assert(qlit_equal_qobject(&test_cases[i].decoded, obj)); qobject_unref(obj); qobject_unref(str); } } /* * this generates json of the form: * a(0,m) = [0, 1, ..., m-1] * a(n,m) = { * 'key0': a(0,m), * 'key1': a(1,m), * ... * 'key(n-1)': a(n-1,m) * } */ static void gen_test_json(GString *gstr, int nest_level_max, int elem_count) { int i; g_assert(gstr); if (nest_level_max == 0) { g_string_append(gstr, "["); for (i = 0; i < elem_count; i++) { g_string_append_printf(gstr, "%d", i); if (i < elem_count - 1) { g_string_append_printf(gstr, ", "); } } g_string_append(gstr, "]"); return; } g_string_append(gstr, "{"); for (i = 0; i < nest_level_max; i++) { g_string_append_printf(gstr, "'key%d': ", i); gen_test_json(gstr, i, elem_count); if (i < nest_level_max - 1) { g_string_append(gstr, ","); } } g_string_append(gstr, "}"); } static void large_dict(void) { GString *gstr = g_string_new(""); QObject *obj; gen_test_json(gstr, 10, 100); obj = qobject_from_json(gstr->str, &error_abort); g_assert(obj != NULL); qobject_unref(obj); g_string_free(gstr, true); } static void simple_list(void) { int i; struct { const char *encoded; QLitObject decoded; } test_cases[] = { { .encoded = "[43,42]", .decoded = QLIT_QLIST(((QLitObject[]){ QLIT_QNUM(43), QLIT_QNUM(42), { } })), }, { .encoded = "[43]", .decoded = QLIT_QLIST(((QLitObject[]){ QLIT_QNUM(43), { } })), }, { .encoded = "[]", .decoded = QLIT_QLIST(((QLitObject[]){ { } })), }, { .encoded = "[{}]", .decoded = QLIT_QLIST(((QLitObject[]){ QLIT_QDICT(((QLitDictEntry[]){ {}, })), {}, })), }, { } }; for (i = 0; test_cases[i].encoded; i++) { QObject *obj; QString *str; obj = qobject_from_json(test_cases[i].encoded, &error_abort); g_assert(qlit_equal_qobject(&test_cases[i].decoded, obj)); str = qobject_to_json(obj); qobject_unref(obj); obj = qobject_from_json(qstring_get_str(str), &error_abort); g_assert(qlit_equal_qobject(&test_cases[i].decoded, obj)); qobject_unref(obj); qobject_unref(str); } } static void simple_whitespace(void) { int i; struct { const char *encoded; QLitObject decoded; } test_cases[] = { { .encoded = " [ 43 , 42 ]", .decoded = QLIT_QLIST(((QLitObject[]){ QLIT_QNUM(43), QLIT_QNUM(42), { } })), }, { .encoded = "\t[ 43 , { 'h' : 'b' },\r\n\t[ ], 42 ]\n", .decoded = QLIT_QLIST(((QLitObject[]){ QLIT_QNUM(43), QLIT_QDICT(((QLitDictEntry[]){ { "h", QLIT_QSTR("b") }, { }})), QLIT_QLIST(((QLitObject[]){ { }})), QLIT_QNUM(42), { } })), }, { .encoded = " [ 43 , { 'h' : 'b' , 'a' : 32 }, [ ], 42 ]", .decoded = QLIT_QLIST(((QLitObject[]){ QLIT_QNUM(43), QLIT_QDICT(((QLitDictEntry[]){ { "h", QLIT_QSTR("b") }, { "a", QLIT_QNUM(32) }, { }})), QLIT_QLIST(((QLitObject[]){ { }})), QLIT_QNUM(42), { } })), }, { } }; for (i = 0; test_cases[i].encoded; i++) { QObject *obj; QString *str; obj = qobject_from_json(test_cases[i].encoded, &error_abort); g_assert(qlit_equal_qobject(&test_cases[i].decoded, obj)); str = qobject_to_json(obj); qobject_unref(obj); obj = qobject_from_json(qstring_get_str(str), &error_abort); g_assert(qlit_equal_qobject(&test_cases[i].decoded, obj)); qobject_unref(obj); qobject_unref(str); } } static void simple_interpolation(void) { QObject *embedded_obj; QObject *obj; QLitObject decoded = QLIT_QLIST(((QLitObject[]){ QLIT_QNUM(1), QLIT_QSTR("100%"), QLIT_QLIST(((QLitObject[]){ QLIT_QNUM(32), QLIT_QNUM(42), {}})), {}})); embedded_obj = qobject_from_json("[32, 42]", &error_abort); g_assert(embedded_obj != NULL); obj = qobject_from_jsonf_nofail("[%d, '100%%', %p]", 1, embedded_obj); g_assert(qlit_equal_qobject(&decoded, obj)); qobject_unref(obj); } static void empty_input(void) { Error *err = NULL; QObject *obj; obj = qobject_from_json("", &err); error_free_or_abort(&err); g_assert(obj == NULL); } static void blank_input(void) { Error *err = NULL; QObject *obj; obj = qobject_from_json("\n ", &err); error_free_or_abort(&err); g_assert(obj == NULL); } static void junk_input(void) { /* Note: junk within strings is covered elsewhere */ Error *err = NULL; QObject *obj; obj = qobject_from_json("@", &err); error_free_or_abort(&err); g_assert(obj == NULL); obj = qobject_from_json("{\x01", &err); error_free_or_abort(&err); g_assert(obj == NULL); obj = qobject_from_json("[0\xFF]", &err); error_free_or_abort(&err); g_assert(obj == NULL); obj = qobject_from_json("00", &err); error_free_or_abort(&err); g_assert(obj == NULL); obj = qobject_from_json("[1e", &err); error_free_or_abort(&err); g_assert(obj == NULL); obj = qobject_from_json("truer", &err); error_free_or_abort(&err); g_assert(obj == NULL); } static void unterminated_string(void) { Error *err = NULL; QObject *obj = qobject_from_json("\"abc", &err); error_free_or_abort(&err); g_assert(obj == NULL); } static void unterminated_sq_string(void) { Error *err = NULL; QObject *obj = qobject_from_json("'abc", &err); error_free_or_abort(&err); g_assert(obj == NULL); } static void unterminated_escape(void) { Error *err = NULL; QObject *obj = qobject_from_json("\"abc\\\"", &err); error_free_or_abort(&err); g_assert(obj == NULL); } static void unterminated_array(void) { Error *err = NULL; QObject *obj = qobject_from_json("[32", &err); error_free_or_abort(&err); g_assert(obj == NULL); } static void unterminated_array_comma(void) { Error *err = NULL; QObject *obj = qobject_from_json("[32,", &err); error_free_or_abort(&err); g_assert(obj == NULL); } static void invalid_array_comma(void) { Error *err = NULL; QObject *obj = qobject_from_json("[32,}", &err); error_free_or_abort(&err); g_assert(obj == NULL); } static void unterminated_dict(void) { Error *err = NULL; QObject *obj = qobject_from_json("{'abc':32", &err); error_free_or_abort(&err); g_assert(obj == NULL); } static void unterminated_dict_comma(void) { Error *err = NULL; QObject *obj = qobject_from_json("{'abc':32,", &err); error_free_or_abort(&err); g_assert(obj == NULL); } static void invalid_dict_comma(void) { Error *err = NULL; QObject *obj = qobject_from_json("{'abc':32,}", &err); error_free_or_abort(&err); g_assert(obj == NULL); } static void unterminated_literal(void) { Error *err = NULL; QObject *obj = qobject_from_json("nul", &err); error_free_or_abort(&err); g_assert(obj == NULL); } static char *make_nest(char *buf, size_t cnt) { memset(buf, '[', cnt - 1); buf[cnt - 1] = '{'; buf[cnt] = '}'; memset(buf + cnt + 1, ']', cnt - 1); buf[2 * cnt] = 0; return buf; } static void limits_nesting(void) { Error *err = NULL; enum { max_nesting = 1024 }; /* see qobject/json-streamer.c */ char buf[2 * (max_nesting + 1) + 1]; QObject *obj; obj = qobject_from_json(make_nest(buf, max_nesting), &error_abort); g_assert(obj != NULL); qobject_unref(obj); obj = qobject_from_json(make_nest(buf, max_nesting + 1), &err); error_free_or_abort(&err); g_assert(obj == NULL); } static void multiple_values(void) { Error *err = NULL; QObject *obj; obj = qobject_from_json("false true", &err); error_free_or_abort(&err); g_assert(obj == NULL); obj = qobject_from_json("} true", &err); error_free_or_abort(&err); g_assert(obj == NULL); } int main(int argc, char **argv) { g_test_init(&argc, &argv, NULL); g_test_add_func("/literals/string/escaped", escaped_string); g_test_add_func("/literals/string/quotes", string_with_quotes); g_test_add_func("/literals/string/utf8", utf8_string); g_test_add_func("/literals/number/simple", simple_number); g_test_add_func("/literals/number/large", large_number); g_test_add_func("/literals/number/float", float_number); g_test_add_func("/literals/keyword", keyword_literal); g_test_add_func("/literals/interpolation/valid", interpolation_valid); g_test_add_func("/literals/interpolation/unkown", interpolation_unknown); g_test_add_func("/literals/interpolation/string", interpolation_string); g_test_add_func("/dicts/simple_dict", simple_dict); g_test_add_func("/dicts/large_dict", large_dict); g_test_add_func("/lists/simple_list", simple_list); g_test_add_func("/mixed/simple_whitespace", simple_whitespace); g_test_add_func("/mixed/interpolation", simple_interpolation); g_test_add_func("/errors/empty", empty_input); g_test_add_func("/errors/blank", blank_input); g_test_add_func("/errors/junk", junk_input); g_test_add_func("/errors/unterminated/string", unterminated_string); g_test_add_func("/errors/unterminated/escape", unterminated_escape); g_test_add_func("/errors/unterminated/sq_string", unterminated_sq_string); g_test_add_func("/errors/unterminated/array", unterminated_array); g_test_add_func("/errors/unterminated/array_comma", unterminated_array_comma); g_test_add_func("/errors/unterminated/dict", unterminated_dict); g_test_add_func("/errors/unterminated/dict_comma", unterminated_dict_comma); g_test_add_func("/errors/invalid_array_comma", invalid_array_comma); g_test_add_func("/errors/invalid_dict_comma", invalid_dict_comma); g_test_add_func("/errors/unterminated/literal", unterminated_literal); g_test_add_func("/errors/limits/nesting", limits_nesting); g_test_add_func("/errors/multiple_values", multiple_values); return g_test_run(); }
pmp-tool/PMP
src/qemu/src-pmp/tests/tcg/mips/user/ase/msa/int-add/test_msa_add_a_b.c
<reponame>pmp-tool/PMP /* * Test program for MSA instruction ADD_A.B * * Copyright (C) 2019 RT-RK Computer Based Systems LLC * Copyright (C) 2019 <NAME> <<EMAIL>> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. * */ #include <sys/time.h> #include <stdint.h> #include "../../../../include/wrappers_msa.h" #include "../../../../include/test_inputs_128.h" #include "../../../../include/test_utils_128.h" #define TEST_COUNT_TOTAL ( \ (PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT) + \ (RANDOM_INPUTS_SHORT_COUNT) * (RANDOM_INPUTS_SHORT_COUNT)) int32_t main(void) { char *instruction_name = "ADD_A.B"; int32_t ret; uint32_t i, j; struct timeval start, end; double elapsed_time; uint64_t b128_result[TEST_COUNT_TOTAL][2]; uint64_t b128_expect[TEST_COUNT_TOTAL][2] = { { 0x0202020202020202ULL, 0x0202020202020202ULL, }, /* 0 */ { 0x0101010101010101ULL, 0x0101010101010101ULL, }, { 0x5757575757575757ULL, 0x5757575757575757ULL, }, { 0x5656565656565656ULL, 0x5656565656565656ULL, }, { 0x3535353535353535ULL, 0x3535353535353535ULL, }, { 0x3434343434343434ULL, 0x3434343434343434ULL, }, { 0x1e73391e73391e73ULL, 0x391e73391e73391eULL, }, { 0x1d723a1d723a1d72ULL, 0x3a1d723a1d723a1dULL, }, { 0x0101010101010101ULL, 0x0101010101010101ULL, }, /* 8 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x5656565656565656ULL, 0x5656565656565656ULL, }, { 0x5555555555555555ULL, 0x5555555555555555ULL, }, { 0x3434343434343434ULL, 0x3434343434343434ULL, }, { 0x3333333333333333ULL, 0x3333333333333333ULL, }, { 0x1d72381d72381d72ULL, 0x381d72381d72381dULL, }, { 0x1c71391c71391c71ULL, 0x391c71391c71391cULL, }, { 0x5757575757575757ULL, 0x5757575757575757ULL, }, /* 16 */ { 0x5656565656565656ULL, 0x5656565656565656ULL, }, { 0xacacacacacacacacULL, 0xacacacacacacacacULL, }, { 0xababababababababULL, 0xababababababababULL, }, { 0x8a8a8a8a8a8a8a8aULL, 0x8a8a8a8a8a8a8a8aULL, }, { 0x8989898989898989ULL, 0x8989898989898989ULL, }, { 0x73c88e73c88e73c8ULL, 0x8e73c88e73c88e73ULL, }, { 0x72c78f72c78f72c7ULL, 0x8f72c78f72c78f72ULL, }, { 0x5656565656565656ULL, 0x5656565656565656ULL, }, /* 24 */ { 0x5555555555555555ULL, 0x5555555555555555ULL, }, { 0xababababababababULL, 0xababababababababULL, }, { 0xaaaaaaaaaaaaaaaaULL, 0xaaaaaaaaaaaaaaaaULL, }, { 0x8989898989898989ULL, 0x8989898989898989ULL, }, { 0x8888888888888888ULL, 0x8888888888888888ULL, }, { 0x72c78d72c78d72c7ULL, 0x8d72c78d72c78d72ULL, }, { 0x71c68e71c68e71c6ULL, 0x8e71c68e71c68e71ULL, }, { 0x3535353535353535ULL, 0x3535353535353535ULL, }, /* 32 */ { 0x3434343434343434ULL, 0x3434343434343434ULL, }, { 0x8a8a8a8a8a8a8a8aULL, 0x8a8a8a8a8a8a8a8aULL, }, { 0x8989898989898989ULL, 0x8989898989898989ULL, }, { 0x6868686868686868ULL, 0x6868686868686868ULL, }, { 0x6767676767676767ULL, 0x6767676767676767ULL, }, { 0x51a66c51a66c51a6ULL, 0x6c51a66c51a66c51ULL, }, { 0x50a56d50a56d50a5ULL, 0x6d50a56d50a56d50ULL, }, { 0x3434343434343434ULL, 0x3434343434343434ULL, }, /* 40 */ { 0x3333333333333333ULL, 0x3333333333333333ULL, }, { 0x8989898989898989ULL, 0x8989898989898989ULL, }, { 0x8888888888888888ULL, 0x8888888888888888ULL, }, { 0x6767676767676767ULL, 0x6767676767676767ULL, }, { 0x6666666666666666ULL, 0x6666666666666666ULL, }, { 0x50a56b50a56b50a5ULL, 0x6b50a56b50a56b50ULL, }, { 0x4fa46c4fa46c4fa4ULL, 0x6c4fa46c4fa46c4fULL, }, { 0x1e73391e73391e73ULL, 0x391e73391e73391eULL, }, /* 48 */ { 0x1d72381d72381d72ULL, 0x381d72381d72381dULL, }, { 0x73c88e73c88e73c8ULL, 0x8e73c88e73c88e73ULL, }, { 0x72c78d72c78d72c7ULL, 0x8d72c78d72c78d72ULL, }, { 0x51a66c51a66c51a6ULL, 0x6c51a66c51a66c51ULL, }, { 0x50a56b50a56b50a5ULL, 0x6b50a56b50a56b50ULL, }, { 0x3ae4703ae4703ae4ULL, 0x703ae4703ae4703aULL, }, { 0x39e37139e37139e3ULL, 0x7139e37139e37139ULL, }, { 0x1d723a1d723a1d72ULL, 0x3a1d723a1d723a1dULL, }, /* 56 */ { 0x1c71391c71391c71ULL, 0x391c71391c71391cULL, }, { 0x72c78f72c78f72c7ULL, 0x8f72c78f72c78f72ULL, }, { 0x71c68e71c68e71c6ULL, 0x8e71c68e71c68e71ULL, }, { 0x50a56d50a56d50a5ULL, 0x6d50a56d50a56d50ULL, }, { 0x4fa46c4fa46c4fa4ULL, 0x6c4fa46c4fa46c4fULL, }, { 0x39e37139e37139e3ULL, 0x7139e37139e37139ULL, }, { 0x38e27238e27238e2ULL, 0x7238e27238e27238ULL, }, { 0xf0d4346850c4aa80ULL, 0x96ce16bc04f6a018ULL, }, /* 64 */ { 0x7dac1a9775cf8e48ULL, 0x5d70507817baa210ULL, }, { 0xccc46c8a6f93cac0ULL, 0x728f455f57a67520ULL, }, { 0xe8b930818693738eULL, 0xbe76838659bd6e6cULL, }, { 0x7dac1a9775cf8e48ULL, 0x5d70507817baa210ULL, }, { 0x0a8400c69ada7210ULL, 0x24128a342a7ea408ULL, }, { 0x599c52b9949eae88ULL, 0x39317f1b6a6a7718ULL, }, { 0x759116b0ab9e5756ULL, 0x8518bd426c817064ULL, }, { 0xccc46c8a6f93cac0ULL, 0x728f455f57a67520ULL, }, /* 72 */ { 0x599c52b9949eae88ULL, 0x39317f1b6a6a7718ULL, }, { 0xa8b4a4ac8e62ea00ULL, 0x4e507402aa564a28ULL, }, { 0xc4a968a3a56293ceULL, 0x9a37b229ac6d4374ULL, }, { 0xe8b930818693738eULL, 0xbe76838659bd6e6cULL, }, { 0x759116b0ab9e5756ULL, 0x8518bd426c817064ULL, }, }; gettimeofday(&start, NULL); for (i = 0; i < PATTERN_INPUTS_SHORT_COUNT; i++) { for (j = 0; j < PATTERN_INPUTS_SHORT_COUNT; j++) { do_msa_ADD_A_B(b128_pattern[i], b128_pattern[j], b128_result[PATTERN_INPUTS_SHORT_COUNT * i + j]); } } for (i = 0; i < RANDOM_INPUTS_SHORT_COUNT; i++) { for (j = 0; j < RANDOM_INPUTS_SHORT_COUNT; j++) { do_msa_ADD_A_B(b128_random[i], b128_random[j], b128_result[((PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT)) + RANDOM_INPUTS_SHORT_COUNT * i + j]); } } gettimeofday(&end, NULL); elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; ret = check_results(instruction_name, TEST_COUNT_TOTAL, elapsed_time, &b128_result[0][0], &b128_expect[0][0]); return ret; }
pmp-tool/PMP
src/qemu/src-pmp/hw/display/virtio-gpu-pci.c
<gh_stars>1-10 /* * Virtio video device * * Copyright Red Hat * * Authors: * <NAME> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #include "qemu/osdep.h" #include "qapi/error.h" #include "hw/pci/pci.h" #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-pci.h" #include "hw/virtio/virtio-gpu.h" typedef struct VirtIOGPUPCI VirtIOGPUPCI; /* * virtio-gpu-pci: This extends VirtioPCIProxy. */ #define TYPE_VIRTIO_GPU_PCI "virtio-gpu-pci" #define VIRTIO_GPU_PCI(obj) \ OBJECT_CHECK(VirtIOGPUPCI, (obj), TYPE_VIRTIO_GPU_PCI) struct VirtIOGPUPCI { VirtIOPCIProxy parent_obj; VirtIOGPU vdev; }; static Property virtio_gpu_pci_properties[] = { DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy), DEFINE_PROP_END_OF_LIST(), }; static void virtio_gpu_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) { VirtIOGPUPCI *vgpu = VIRTIO_GPU_PCI(vpci_dev); VirtIOGPU *g = &vgpu->vdev; DeviceState *vdev = DEVICE(&vgpu->vdev); int i; Error *local_error = NULL; qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); virtio_pci_force_virtio_1(vpci_dev); object_property_set_bool(OBJECT(vdev), true, "realized", &local_error); if (local_error) { error_propagate(errp, local_error); return; } for (i = 0; i < g->conf.max_outputs; i++) { object_property_set_link(OBJECT(g->scanout[i].con), OBJECT(vpci_dev), "device", errp); } } static void virtio_gpu_pci_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); dc->props = virtio_gpu_pci_properties; dc->hotpluggable = false; k->realize = virtio_gpu_pci_realize; pcidev_k->class_id = PCI_CLASS_DISPLAY_OTHER; } static void virtio_gpu_initfn(Object *obj) { VirtIOGPUPCI *dev = VIRTIO_GPU_PCI(obj); virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), TYPE_VIRTIO_GPU); } static const VirtioPCIDeviceTypeInfo virtio_gpu_pci_info = { .generic_name = TYPE_VIRTIO_GPU_PCI, .instance_size = sizeof(VirtIOGPUPCI), .instance_init = virtio_gpu_initfn, .class_init = virtio_gpu_pci_class_init, }; static void virtio_gpu_pci_register_types(void) { virtio_pci_types_register(&virtio_gpu_pci_info); } type_init(virtio_gpu_pci_register_types)
pmp-tool/PMP
src/qemu/src-pmp/hw/i386/pc_sysfw.c
<filename>src/qemu/src-pmp/hw/i386/pc_sysfw.c<gh_stars>1-10 /* * QEMU PC System Firmware * * Copyright (c) 2003-2004 <NAME> * Copyright (c) 2011-2012 Intel Corporation * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #include "qapi/error.h" #include "sysemu/block-backend.h" #include "qemu/error-report.h" #include "qemu/option.h" #include "qemu/units.h" #include "hw/sysbus.h" #include "hw/hw.h" #include "hw/i386/pc.h" #include "hw/boards.h" #include "hw/loader.h" #include "sysemu/sysemu.h" #include "hw/block/flash.h" #include "sysemu/kvm.h" #define BIOS_FILENAME "bios.bin" /* * We don't have a theoretically justifiable exact lower bound on the base * address of any flash mapping. In practice, the IO-APIC MMIO range is * [0xFEE00000..0xFEE01000] -- see IO_APIC_DEFAULT_ADDRESS --, leaving free * only 18MB-4KB below 4G. For now, restrict the cumulative mapping to 8MB in * size. */ #define FLASH_SIZE_LIMIT (8 * MiB) #define FLASH_SECTOR_SIZE 4096 static void pc_isa_bios_init(MemoryRegion *rom_memory, MemoryRegion *flash_mem, int ram_size) { int isa_bios_size; MemoryRegion *isa_bios; uint64_t flash_size; void *flash_ptr, *isa_bios_ptr; flash_size = memory_region_size(flash_mem); /* map the last 128KB of the BIOS in ISA space */ isa_bios_size = MIN(flash_size, 128 * KiB); isa_bios = g_malloc(sizeof(*isa_bios)); memory_region_init_ram(isa_bios, NULL, "isa-bios", isa_bios_size, &error_fatal); memory_region_add_subregion_overlap(rom_memory, 0x100000 - isa_bios_size, isa_bios, 1); /* copy ISA rom image from top of flash memory */ flash_ptr = memory_region_get_ram_ptr(flash_mem); isa_bios_ptr = memory_region_get_ram_ptr(isa_bios); memcpy(isa_bios_ptr, ((uint8_t*)flash_ptr) + (flash_size - isa_bios_size), isa_bios_size); memory_region_set_readonly(isa_bios, true); } static PFlashCFI01 *pc_pflash_create(PCMachineState *pcms, const char *name, const char *alias_prop_name) { DeviceState *dev = qdev_create(NULL, TYPE_PFLASH_CFI01); qdev_prop_set_uint64(dev, "sector-length", FLASH_SECTOR_SIZE); qdev_prop_set_uint8(dev, "width", 1); qdev_prop_set_string(dev, "name", name); object_property_add_child(OBJECT(pcms), name, OBJECT(dev), &error_abort); object_property_add_alias(OBJECT(pcms), alias_prop_name, OBJECT(dev), "drive", &error_abort); return PFLASH_CFI01(dev); } void pc_system_flash_create(PCMachineState *pcms) { PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); if (pcmc->pci_enabled) { pcms->flash[0] = pc_pflash_create(pcms, "system.flash0", "pflash0"); pcms->flash[1] = pc_pflash_create(pcms, "system.flash1", "pflash1"); } } static void pc_system_flash_cleanup_unused(PCMachineState *pcms) { char *prop_name; int i; Object *dev_obj; assert(PC_MACHINE_GET_CLASS(pcms)->pci_enabled); for (i = 0; i < ARRAY_SIZE(pcms->flash); i++) { dev_obj = OBJECT(pcms->flash[i]); if (!object_property_get_bool(dev_obj, "realized", &error_abort)) { prop_name = g_strdup_printf("pflash%d", i); object_property_del(OBJECT(pcms), prop_name, &error_abort); g_free(prop_name); object_unparent(dev_obj); pcms->flash[i] = NULL; } } } /* * Map the pcms->flash[] from 4GiB downward, and realize. * Map them in descending order, i.e. pcms->flash[0] at the top, * without gaps. * Stop at the first pcms->flash[0] lacking a block backend. * Set each flash's size from its block backend. Fatal error if the * size isn't a non-zero multiple of 4KiB, or the total size exceeds * FLASH_SIZE_LIMIT. * * If pcms->flash[0] has a block backend, its memory is passed to * pc_isa_bios_init(). Merging several flash devices for isa-bios is * not supported. */ static void pc_system_flash_map(PCMachineState *pcms, MemoryRegion *rom_memory) { hwaddr total_size = 0; int i; BlockBackend *blk; int64_t size; PFlashCFI01 *system_flash; MemoryRegion *flash_mem; void *flash_ptr; int ret, flash_size; assert(PC_MACHINE_GET_CLASS(pcms)->pci_enabled); for (i = 0; i < ARRAY_SIZE(pcms->flash); i++) { system_flash = pcms->flash[i]; blk = pflash_cfi01_get_blk(system_flash); if (!blk) { break; } size = blk_getlength(blk); if (size < 0) { error_report("can't get size of block device %s: %s", blk_name(blk), strerror(-size)); exit(1); } if (size == 0 || size % FLASH_SECTOR_SIZE != 0) { error_report("system firmware block device %s has invalid size " "%" PRId64, blk_name(blk), size); info_report("its size must be a non-zero multiple of 0x%x", FLASH_SECTOR_SIZE); exit(1); } if ((hwaddr)size != size || total_size > HWADDR_MAX - size || total_size + size > FLASH_SIZE_LIMIT) { error_report("combined size of system firmware exceeds " "%" PRIu64 " bytes", FLASH_SIZE_LIMIT); exit(1); } total_size += size; qdev_prop_set_uint32(DEVICE(system_flash), "num-blocks", size / FLASH_SECTOR_SIZE); qdev_init_nofail(DEVICE(system_flash)); sysbus_mmio_map(SYS_BUS_DEVICE(system_flash), 0, 0x100000000ULL - total_size); if (i == 0) { flash_mem = pflash_cfi01_get_memory(system_flash); pc_isa_bios_init(rom_memory, flash_mem, size); /* Encrypt the pflash boot ROM */ if (kvm_memcrypt_enabled()) { flash_ptr = memory_region_get_ram_ptr(flash_mem); flash_size = memory_region_size(flash_mem); ret = kvm_memcrypt_encrypt_data(flash_ptr, flash_size); if (ret) { error_report("failed to encrypt pflash rom"); exit(1); } } } } } static void old_pc_system_rom_init(MemoryRegion *rom_memory, bool isapc_ram_fw) { char *filename; MemoryRegion *bios, *isa_bios; int bios_size, isa_bios_size; int ret; /* BIOS load */ if (bios_name == NULL) { bios_name = BIOS_FILENAME; } filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, bios_name); if (filename) { bios_size = get_image_size(filename); } else { bios_size = -1; } if (bios_size <= 0 || (bios_size % 65536) != 0) { goto bios_error; } bios = g_malloc(sizeof(*bios)); memory_region_init_ram(bios, NULL, "pc.bios", bios_size, &error_fatal); if (!isapc_ram_fw) { memory_region_set_readonly(bios, true); } ret = rom_add_file_fixed(bios_name, (uint32_t)(-bios_size), -1); if (ret != 0) { bios_error: fprintf(stderr, "qemu: could not load PC BIOS '%s'\n", bios_name); exit(1); } g_free(filename); /* map the last 128KB of the BIOS in ISA space */ isa_bios_size = MIN(bios_size, 128 * KiB); isa_bios = g_malloc(sizeof(*isa_bios)); memory_region_init_alias(isa_bios, NULL, "isa-bios", bios, bios_size - isa_bios_size, isa_bios_size); memory_region_add_subregion_overlap(rom_memory, 0x100000 - isa_bios_size, isa_bios, 1); if (!isapc_ram_fw) { memory_region_set_readonly(isa_bios, true); } /* map all the bios at the top of memory */ memory_region_add_subregion(rom_memory, (uint32_t)(-bios_size), bios); } void pc_system_firmware_init(PCMachineState *pcms, MemoryRegion *rom_memory) { PCMachineClass *pcmc = PC_MACHINE_GET_CLASS(pcms); int i; DriveInfo *pflash_drv; BlockBackend *pflash_blk[ARRAY_SIZE(pcms->flash)]; Location loc; if (!pcmc->pci_enabled) { old_pc_system_rom_init(rom_memory, true); return; } /* Map legacy -drive if=pflash to machine properties */ for (i = 0; i < ARRAY_SIZE(pcms->flash); i++) { pflash_blk[i] = pflash_cfi01_get_blk(pcms->flash[i]); pflash_drv = drive_get(IF_PFLASH, 0, i); if (!pflash_drv) { continue; } loc_push_none(&loc); qemu_opts_loc_restore(pflash_drv->opts); if (pflash_blk[i]) { error_report("clashes with -machine"); exit(1); } pflash_blk[i] = blk_by_legacy_dinfo(pflash_drv); qdev_prop_set_drive(DEVICE(pcms->flash[i]), "drive", pflash_blk[i], &error_fatal); loc_pop(&loc); } /* Reject gaps */ for (i = 1; i < ARRAY_SIZE(pcms->flash); i++) { if (pflash_blk[i] && !pflash_blk[i - 1]) { error_report("pflash%d requires pflash%d", i, i - 1); exit(1); } } if (!pflash_blk[0]) { /* Machine property pflash0 not set, use ROM mode */ old_pc_system_rom_init(rom_memory, false); } else { if (kvm_enabled() && !kvm_readonly_mem_enabled()) { /* * Older KVM cannot execute from device memory. So, flash * memory cannot be used unless the readonly memory kvm * capability is present. */ error_report("pflash with kvm requires KVM readonly memory support"); exit(1); } pc_system_flash_map(pcms, rom_memory); } pc_system_flash_cleanup_unused(pcms); }
pmp-tool/PMP
src/qemu/src-pmp/slirp/src/vmstate.h
<filename>src/qemu/src-pmp/slirp/src/vmstate.h /* SPDX-License-Identifier: BSD-3-Clause */ /* * QEMU migration/snapshot declarations * * Copyright (c) 2009-2011 Red Hat, Inc. * * Original author: <NAME> <<EMAIL>> * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * * 1. Redistributions of source code must retain the above * copyright notice, this list of conditions and the following * disclaimer. * * 2. Redistributions in binary form must reproduce the above * copyright notice, this list of conditions and the following * disclaimer in the documentation and/or other materials provided * with the distribution. * * 3. Neither the name of the copyright holder nor the names of its * contributors may be used to endorse or promote products derived * from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE * COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED * OF THE POSSIBILITY OF SUCH DAMAGE. */ #ifndef VMSTATE_H_ #define VMSTATE_H_ #include <unistd.h> #include <stdint.h> #include <stdbool.h> #include "slirp.h" #include "stream.h" #define stringify(s) tostring(s) #define tostring(s) #s typedef struct VMStateInfo VMStateInfo; typedef struct VMStateDescription VMStateDescription; typedef struct VMStateField VMStateField; int slirp_vmstate_save_state(SlirpOStream *f, const VMStateDescription *vmsd, void *opaque); int slirp_vmstate_load_state(SlirpIStream *f, const VMStateDescription *vmsd, void *opaque, int version_id); /* VMStateInfo allows customized migration of objects that don't fit in * any category in VMStateFlags. Additional information is always passed * into get and put in terms of field and vmdesc parameters. However * these two parameters should only be used in cases when customized * handling is needed, such as QTAILQ. For primitive data types such as * integer, field and vmdesc parameters should be ignored inside get/put. */ struct VMStateInfo { const char *name; int (*get)(SlirpIStream *f, void *pv, size_t size, const VMStateField *field); int (*put)(SlirpOStream *f, void *pv, size_t size, const VMStateField *field); }; enum VMStateFlags { /* Ignored */ VMS_SINGLE = 0x001, /* The struct member at opaque + VMStateField.offset is a pointer * to the actual field (e.g. struct a { uint8_t *b; * }). Dereference the pointer before using it as basis for * further pointer arithmetic (see e.g. VMS_ARRAY). Does not * affect the meaning of VMStateField.num_offset or * VMStateField.size_offset; see VMS_VARRAY* and VMS_VBUFFER for * those. */ VMS_POINTER = 0x002, /* The field is an array of fixed size. VMStateField.num contains * the number of entries in the array. The size of each entry is * given by VMStateField.size and / or opaque + * VMStateField.size_offset; see VMS_VBUFFER and * VMS_MULTIPLY. Each array entry will be processed individually * (VMStateField.info.get()/put() if VMS_STRUCT is not set, * recursion into VMStateField.vmsd if VMS_STRUCT is set). May not * be combined with VMS_VARRAY*. */ VMS_ARRAY = 0x004, /* The field is itself a struct, containing one or more * fields. Recurse into VMStateField.vmsd. Most useful in * combination with VMS_ARRAY / VMS_VARRAY*, recursing into each * array entry. */ VMS_STRUCT = 0x008, /* The field is an array of variable size. The int32_t at opaque + * VMStateField.num_offset contains the number of entries in the * array. See the VMS_ARRAY description regarding array handling * in general. May not be combined with VMS_ARRAY or any other * VMS_VARRAY*. */ VMS_VARRAY_INT32 = 0x010, /* Ignored */ VMS_BUFFER = 0x020, /* The field is a (fixed-size or variable-size) array of pointers * (e.g. struct a { uint8_t *b[]; }). Dereference each array entry * before using it. Note: Does not imply any one of VMS_ARRAY / * VMS_VARRAY*; these need to be set explicitly. */ VMS_ARRAY_OF_POINTER = 0x040, /* The field is an array of variable size. The uint16_t at opaque * + VMStateField.num_offset (subject to VMS_MULTIPLY_ELEMENTS) * contains the number of entries in the array. See the VMS_ARRAY * description regarding array handling in general. May not be * combined with VMS_ARRAY or any other VMS_VARRAY*. */ VMS_VARRAY_UINT16 = 0x080, /* The size of the individual entries (a single array entry if * VMS_ARRAY or any of VMS_VARRAY* are set, or the field itself if * neither is set) is variable (i.e. not known at compile-time), * but the same for all entries. Use the int32_t at opaque + * VMStateField.size_offset (subject to VMS_MULTIPLY) to determine * the size of each (and every) entry. */ VMS_VBUFFER = 0x100, /* Multiply the entry size given by the int32_t at opaque + * VMStateField.size_offset (see VMS_VBUFFER description) with * VMStateField.size to determine the number of bytes to be * allocated. Only valid in combination with VMS_VBUFFER. */ VMS_MULTIPLY = 0x200, /* The field is an array of variable size. The uint8_t at opaque + * VMStateField.num_offset (subject to VMS_MULTIPLY_ELEMENTS) * contains the number of entries in the array. See the VMS_ARRAY * description regarding array handling in general. May not be * combined with VMS_ARRAY or any other VMS_VARRAY*. */ VMS_VARRAY_UINT8 = 0x400, /* The field is an array of variable size. The uint32_t at opaque * + VMStateField.num_offset (subject to VMS_MULTIPLY_ELEMENTS) * contains the number of entries in the array. See the VMS_ARRAY * description regarding array handling in general. May not be * combined with VMS_ARRAY or any other VMS_VARRAY*. */ VMS_VARRAY_UINT32 = 0x800, /* Fail loading the serialised VM state if this field is missing * from the input. */ VMS_MUST_EXIST = 0x1000, /* When loading serialised VM state, allocate memory for the * (entire) field. Only valid in combination with * VMS_POINTER. Note: Not all combinations with other flags are * currently supported, e.g. VMS_ALLOC|VMS_ARRAY_OF_POINTER won't * cause the individual entries to be allocated. */ VMS_ALLOC = 0x2000, /* Multiply the number of entries given by the integer at opaque + * VMStateField.num_offset (see VMS_VARRAY*) with VMStateField.num * to determine the number of entries in the array. Only valid in * combination with one of VMS_VARRAY*. */ VMS_MULTIPLY_ELEMENTS = 0x4000, /* A structure field that is like VMS_STRUCT, but uses * VMStateField.struct_version_id to tell which version of the * structure we are referencing to use. */ VMS_VSTRUCT = 0x8000, }; struct VMStateField { const char *name; size_t offset; size_t size; size_t start; int num; size_t num_offset; size_t size_offset; const VMStateInfo *info; enum VMStateFlags flags; const VMStateDescription *vmsd; int version_id; int struct_version_id; bool (*field_exists)(void *opaque, int version_id); }; struct VMStateDescription { const char *name; int version_id; int (*pre_load)(void *opaque); int (*post_load)(void *opaque, int version_id); int (*pre_save)(void *opaque); VMStateField *fields; }; extern const VMStateInfo slirp_vmstate_info_int16; extern const VMStateInfo slirp_vmstate_info_int32; extern const VMStateInfo slirp_vmstate_info_uint8; extern const VMStateInfo slirp_vmstate_info_uint16; extern const VMStateInfo slirp_vmstate_info_uint32; /** Put this in the stream when migrating a null pointer.*/ #define VMS_NULLPTR_MARKER (0x30U) /* '0' */ extern const VMStateInfo slirp_vmstate_info_nullptr; extern const VMStateInfo slirp_vmstate_info_buffer; extern const VMStateInfo slirp_vmstate_info_tmp; #define type_check_array(t1,t2,n) ((t1(*)[n])0 - (t2*)0) #define type_check_pointer(t1,t2) ((t1**)0 - (t2*)0) #define typeof_field(type, field) typeof(((type *)0)->field) #define type_check(t1,t2) ((t1*)0 - (t2*)0) #define vmstate_offset_value(_state, _field, _type) \ (offsetof(_state, _field) + \ type_check(_type, typeof_field(_state, _field))) #define vmstate_offset_pointer(_state, _field, _type) \ (offsetof(_state, _field) + \ type_check_pointer(_type, typeof_field(_state, _field))) #define vmstate_offset_array(_state, _field, _type, _num) \ (offsetof(_state, _field) + \ type_check_array(_type, typeof_field(_state, _field), _num)) #define vmstate_offset_buffer(_state, _field) \ vmstate_offset_array(_state, _field, uint8_t, \ sizeof(typeof_field(_state, _field))) /* In the macros below, if there is a _version, that means the macro's * field will be processed only if the version being received is >= * the _version specified. In general, if you add a new field, you * would increment the structure's version and put that version * number into the new field so it would only be processed with the * new version. * * In particular, for VMSTATE_STRUCT() and friends the _version does * *NOT* pick the version of the sub-structure. It works just as * specified above. The version of the top-level structure received * is passed down to all sub-structures. This means that the * sub-structures must have version that are compatible with all the * structures that use them. * * If you want to specify the version of the sub-structure, use * VMSTATE_VSTRUCT(), which allows the specific sub-structure version * to be directly specified. */ #define VMSTATE_SINGLE_TEST(_field, _state, _test, _version, _info, _type) { \ .name = (stringify(_field)), \ .version_id = (_version), \ .field_exists = (_test), \ .size = sizeof(_type), \ .info = &(_info), \ .flags = VMS_SINGLE, \ .offset = vmstate_offset_value(_state, _field, _type), \ } #define VMSTATE_ARRAY(_field, _state, _num, _version, _info, _type) {\ .name = (stringify(_field)), \ .version_id = (_version), \ .num = (_num), \ .info = &(_info), \ .size = sizeof(_type), \ .flags = VMS_ARRAY, \ .offset = vmstate_offset_array(_state, _field, _type, _num), \ } #define VMSTATE_STRUCT_TEST(_field, _state, _test, _version, _vmsd, _type) { \ .name = (stringify(_field)), \ .version_id = (_version), \ .field_exists = (_test), \ .vmsd = &(_vmsd), \ .size = sizeof(_type), \ .flags = VMS_STRUCT, \ .offset = vmstate_offset_value(_state, _field, _type), \ } #define VMSTATE_STRUCT_POINTER_V(_field, _state, _version, _vmsd, _type) { \ .name = (stringify(_field)), \ .version_id = (_version), \ .vmsd = &(_vmsd), \ .size = sizeof(_type *), \ .flags = VMS_STRUCT|VMS_POINTER, \ .offset = vmstate_offset_pointer(_state, _field, _type), \ } #define VMSTATE_STRUCT_ARRAY_TEST(_field, _state, _num, _test, _version, _vmsd, _type) { \ .name = (stringify(_field)), \ .num = (_num), \ .field_exists = (_test), \ .version_id = (_version), \ .vmsd = &(_vmsd), \ .size = sizeof(_type), \ .flags = VMS_STRUCT|VMS_ARRAY, \ .offset = vmstate_offset_array(_state, _field, _type, _num),\ } #define VMSTATE_STATIC_BUFFER(_field, _state, _version, _test, _start, _size) { \ .name = (stringify(_field)), \ .version_id = (_version), \ .field_exists = (_test), \ .size = (_size - _start), \ .info = &slirp_vmstate_info_buffer, \ .flags = VMS_BUFFER, \ .offset = vmstate_offset_buffer(_state, _field) + _start, \ } #define VMSTATE_VBUFFER_UINT32(_field, _state, _version, _test, _field_size) { \ .name = (stringify(_field)), \ .version_id = (_version), \ .field_exists = (_test), \ .size_offset = vmstate_offset_value(_state, _field_size, uint32_t),\ .info = &slirp_vmstate_info_buffer, \ .flags = VMS_VBUFFER|VMS_POINTER, \ .offset = offsetof(_state, _field), \ } #define QEMU_BUILD_BUG_ON_STRUCT(x) \ struct { \ int:(x) ? -1 : 1; \ } #define QEMU_BUILD_BUG_ON_ZERO(x) (sizeof(QEMU_BUILD_BUG_ON_STRUCT(x)) - \ sizeof(QEMU_BUILD_BUG_ON_STRUCT(x))) /* Allocate a temporary of type 'tmp_type', set tmp->parent to _state * and execute the vmsd on the temporary. Note that we're working with * the whole of _state here, not a field within it. * We compile time check that: * That _tmp_type contains a 'parent' member that's a pointer to the * '_state' type * That the pointer is right at the start of _tmp_type. */ #define VMSTATE_WITH_TMP(_state, _tmp_type, _vmsd) { \ .name = "tmp", \ .size = sizeof(_tmp_type) + \ QEMU_BUILD_BUG_ON_ZERO(offsetof(_tmp_type, parent) != 0) + \ type_check_pointer(_state, \ typeof_field(_tmp_type, parent)), \ .vmsd = &(_vmsd), \ .info = &slirp_vmstate_info_tmp, \ } #define VMSTATE_SINGLE(_field, _state, _version, _info, _type) \ VMSTATE_SINGLE_TEST(_field, _state, NULL, _version, _info, _type) #define VMSTATE_STRUCT(_field, _state, _version, _vmsd, _type) \ VMSTATE_STRUCT_TEST(_field, _state, NULL, _version, _vmsd, _type) #define VMSTATE_STRUCT_POINTER(_field, _state, _vmsd, _type) \ VMSTATE_STRUCT_POINTER_V(_field, _state, 0, _vmsd, _type) #define VMSTATE_STRUCT_ARRAY(_field, _state, _num, _version, _vmsd, _type) \ VMSTATE_STRUCT_ARRAY_TEST(_field, _state, _num, NULL, _version, \ _vmsd, _type) #define VMSTATE_INT16_V(_f, _s, _v) \ VMSTATE_SINGLE(_f, _s, _v, slirp_vmstate_info_int16, int16_t) #define VMSTATE_INT32_V(_f, _s, _v) \ VMSTATE_SINGLE(_f, _s, _v, slirp_vmstate_info_int32, int32_t) #define VMSTATE_UINT8_V(_f, _s, _v) \ VMSTATE_SINGLE(_f, _s, _v, slirp_vmstate_info_uint8, uint8_t) #define VMSTATE_UINT16_V(_f, _s, _v) \ VMSTATE_SINGLE(_f, _s, _v, slirp_vmstate_info_uint16, uint16_t) #define VMSTATE_UINT32_V(_f, _s, _v) \ VMSTATE_SINGLE(_f, _s, _v, slirp_vmstate_info_uint32, uint32_t) #define VMSTATE_INT16(_f, _s) \ VMSTATE_INT16_V(_f, _s, 0) #define VMSTATE_INT32(_f, _s) \ VMSTATE_INT32_V(_f, _s, 0) #define VMSTATE_UINT8(_f, _s) \ VMSTATE_UINT8_V(_f, _s, 0) #define VMSTATE_UINT16(_f, _s) \ VMSTATE_UINT16_V(_f, _s, 0) #define VMSTATE_UINT32(_f, _s) \ VMSTATE_UINT32_V(_f, _s, 0) #define VMSTATE_UINT16_TEST(_f, _s, _t) \ VMSTATE_SINGLE_TEST(_f, _s, _t, 0, slirp_vmstate_info_uint16, uint16_t) #define VMSTATE_UINT32_TEST(_f, _s, _t) \ VMSTATE_SINGLE_TEST(_f, _s, _t, 0, slirp_vmstate_info_uint32, uint32_t) #define VMSTATE_INT16_ARRAY_V(_f, _s, _n, _v) \ VMSTATE_ARRAY(_f, _s, _n, _v, slirp_vmstate_info_int16, int16_t) #define VMSTATE_INT16_ARRAY(_f, _s, _n) \ VMSTATE_INT16_ARRAY_V(_f, _s, _n, 0) #define VMSTATE_BUFFER_V(_f, _s, _v) \ VMSTATE_STATIC_BUFFER(_f, _s, _v, NULL, 0, sizeof(typeof_field(_s, _f))) #define VMSTATE_BUFFER(_f, _s) \ VMSTATE_BUFFER_V(_f, _s, 0) #define VMSTATE_END_OF_LIST() \ {} #endif
pmp-tool/PMP
src/qemu/src-pmp/include/hw/ppc/pnv_psi.h
<filename>src/qemu/src-pmp/include/hw/ppc/pnv_psi.h /* * QEMU PowerPC PowerNV Processor Service Interface (PSI) model * * Copyright (c) 2015-2017, IBM Corporation. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef _PPC_PNV_PSI_H #define _PPC_PNV_PSI_H #include "hw/sysbus.h" #include "hw/ppc/xics.h" #include "hw/ppc/xive.h" #define TYPE_PNV_PSI "pnv-psi" #define PNV_PSI(obj) \ OBJECT_CHECK(PnvPsi, (obj), TYPE_PNV_PSI) #define PSIHB_XSCOM_MAX 0x20 typedef struct PnvPsi { SysBusDevice parent; MemoryRegion regs_mr; uint64_t bar; /* FSP region not supported */ /* MemoryRegion fsp_mr; */ uint64_t fsp_bar; /* Interrupt generation */ qemu_irq *qirqs; /* Registers */ uint64_t regs[PSIHB_XSCOM_MAX]; MemoryRegion xscom_regs; } PnvPsi; #define TYPE_PNV8_PSI TYPE_PNV_PSI "-POWER8" #define PNV8_PSI(obj) \ OBJECT_CHECK(Pnv8Psi, (obj), TYPE_PNV8_PSI) typedef struct Pnv8Psi { PnvPsi parent; ICSState ics; } Pnv8Psi; #define TYPE_PNV9_PSI TYPE_PNV_PSI "-POWER9" #define PNV9_PSI(obj) \ OBJECT_CHECK(Pnv9Psi, (obj), TYPE_PNV9_PSI) typedef struct Pnv9Psi { PnvPsi parent; XiveSource source; } Pnv9Psi; #define PNV_PSI_CLASS(klass) \ OBJECT_CLASS_CHECK(PnvPsiClass, (klass), TYPE_PNV_PSI) #define PNV_PSI_GET_CLASS(obj) \ OBJECT_GET_CLASS(PnvPsiClass, (obj), TYPE_PNV_PSI) typedef struct PnvPsiClass { SysBusDeviceClass parent_class; int chip_type; uint32_t xscom_pcba; uint32_t xscom_size; uint64_t bar_mask; void (*irq_set)(PnvPsi *psi, int, bool state); } PnvPsiClass; /* The PSI and FSP interrupts are muxed on the same IRQ number */ typedef enum PnvPsiIrq { PSIHB_IRQ_PSI, /* internal use only */ PSIHB_IRQ_FSP, /* internal use only */ PSIHB_IRQ_OCC, PSIHB_IRQ_FSI, PSIHB_IRQ_LPC_I2C, PSIHB_IRQ_LOCAL_ERR, PSIHB_IRQ_EXTERNAL, } PnvPsiIrq; #define PSI_NUM_INTERRUPTS 6 void pnv_psi_irq_set(PnvPsi *psi, int irq, bool state); /* P9 PSI Interrupts */ #define PSIHB9_IRQ_PSI 0 #define PSIHB9_IRQ_OCC 1 #define PSIHB9_IRQ_FSI 2 #define PSIHB9_IRQ_LPCHC 3 #define PSIHB9_IRQ_LOCAL_ERR 4 #define PSIHB9_IRQ_GLOBAL_ERR 5 #define PSIHB9_IRQ_TPM 6 #define PSIHB9_IRQ_LPC_SIRQ0 7 #define PSIHB9_IRQ_LPC_SIRQ1 8 #define PSIHB9_IRQ_LPC_SIRQ2 9 #define PSIHB9_IRQ_LPC_SIRQ3 10 #define PSIHB9_IRQ_SBE_I2C 11 #define PSIHB9_IRQ_DIO 12 #define PSIHB9_IRQ_PSU 13 #define PSIHB9_NUM_IRQS 14 void pnv_psi_pic_print_info(Pnv9Psi *psi, Monitor *mon); #endif /* _PPC_PNV_PSI_H */
pmp-tool/PMP
src/qemu/src-pmp/tests/libqos/virtio-blk.c
/* * libqos driver framework * * Copyright (c) 2018 <NAME> <<EMAIL>> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2 as published by the Free Software Foundation. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/> */ #include "qemu/osdep.h" #include "libqtest.h" #include "standard-headers/linux/virtio_blk.h" #include "libqos/qgraph.h" #include "libqos/virtio-blk.h" #define PCI_SLOT 0x04 #define PCI_FN 0x00 /* virtio-blk-device */ static void *qvirtio_blk_get_driver(QVirtioBlk *v_blk, const char *interface) { if (!g_strcmp0(interface, "virtio-blk")) { return v_blk; } if (!g_strcmp0(interface, "virtio")) { return v_blk->vdev; } fprintf(stderr, "%s not present in virtio-blk-device\n", interface); g_assert_not_reached(); } static void *qvirtio_blk_device_get_driver(void *object, const char *interface) { QVirtioBlkDevice *v_blk = object; return qvirtio_blk_get_driver(&v_blk->blk, interface); } static void *virtio_blk_device_create(void *virtio_dev, QGuestAllocator *t_alloc, void *addr) { QVirtioBlkDevice *virtio_blk = g_new0(QVirtioBlkDevice, 1); QVirtioBlk *interface = &virtio_blk->blk; interface->vdev = virtio_dev; virtio_blk->obj.get_driver = qvirtio_blk_device_get_driver; return &virtio_blk->obj; } /* virtio-blk-pci */ static void *qvirtio_blk_pci_get_driver(void *object, const char *interface) { QVirtioBlkPCI *v_blk = object; if (!g_strcmp0(interface, "pci-device")) { return v_blk->pci_vdev.pdev; } return qvirtio_blk_get_driver(&v_blk->blk, interface); } static void *virtio_blk_pci_create(void *pci_bus, QGuestAllocator *t_alloc, void *addr) { QVirtioBlkPCI *virtio_blk = g_new0(QVirtioBlkPCI, 1); QVirtioBlk *interface = &virtio_blk->blk; QOSGraphObject *obj = &virtio_blk->pci_vdev.obj; virtio_pci_init(&virtio_blk->pci_vdev, pci_bus, addr); interface->vdev = &virtio_blk->pci_vdev.vdev; g_assert_cmphex(interface->vdev->device_type, ==, VIRTIO_ID_BLOCK); obj->get_driver = qvirtio_blk_pci_get_driver; return obj; } static void virtio_blk_register_nodes(void) { /* FIXME: every test using these two nodes needs to setup a * -drive,id=drive0 otherwise QEMU is not going to start. * Therefore, we do not include "produces" edge for virtio * and pci-device yet. */ char *arg = g_strdup_printf("id=drv0,drive=drive0,addr=%x.%x", PCI_SLOT, PCI_FN); QPCIAddress addr = { .devfn = QPCI_DEVFN(PCI_SLOT, PCI_FN), }; QOSGraphEdgeOptions opts = { }; /* virtio-blk-device */ opts.extra_device_opts = "drive=drive0"; qos_node_create_driver("virtio-blk-device", virtio_blk_device_create); qos_node_consumes("virtio-blk-device", "virtio-bus", &opts); qos_node_produces("virtio-blk-device", "virtio-blk"); /* virtio-blk-pci */ opts.extra_device_opts = arg; add_qpci_address(&opts, &addr); qos_node_create_driver("virtio-blk-pci", virtio_blk_pci_create); qos_node_consumes("virtio-blk-pci", "pci-bus", &opts); qos_node_produces("virtio-blk-pci", "virtio-blk"); g_free(arg); } libqos_init(virtio_blk_register_nodes);
pmp-tool/PMP
src/qemu/src-pmp/crypto/afsplit.c
<reponame>pmp-tool/PMP<filename>src/qemu/src-pmp/crypto/afsplit.c /* * QEMU Crypto anti forensic information splitter * * Copyright (c) 2015-2016 Red Hat, Inc. * * Derived from cryptsetup package lib/luks1/af.c * * Copyright (C) 2004, <NAME> <<EMAIL>> * Copyright (C) 2009-2012, Red Hat, Inc. All rights reserved. * * This program is free software; you can redistribute it and/or * modify it under the terms of the GNU General Public License * as published by the Free Software Foundation; either version 2 * of the License, or (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu/bswap.h" #include "crypto/afsplit.h" #include "crypto/random.h" static void qcrypto_afsplit_xor(size_t blocklen, const uint8_t *in1, const uint8_t *in2, uint8_t *out) { size_t i; for (i = 0; i < blocklen; i++) { out[i] = in1[i] ^ in2[i]; } } static int qcrypto_afsplit_hash(QCryptoHashAlgorithm hash, size_t blocklen, uint8_t *block, Error **errp) { size_t digestlen = qcrypto_hash_digest_len(hash); size_t hashcount = blocklen / digestlen; size_t finallen = blocklen % digestlen; uint32_t i; if (finallen) { hashcount++; } else { finallen = digestlen; } for (i = 0; i < hashcount; i++) { uint8_t *out = NULL; size_t outlen = 0; uint32_t iv = cpu_to_be32(i); struct iovec in[] = { { .iov_base = &iv, .iov_len = sizeof(iv) }, { .iov_base = block + (i * digestlen), .iov_len = (i == (hashcount - 1)) ? finallen : digestlen }, }; if (qcrypto_hash_bytesv(hash, in, G_N_ELEMENTS(in), &out, &outlen, errp) < 0) { return -1; } assert(outlen == digestlen); memcpy(block + (i * digestlen), out, (i == (hashcount - 1)) ? finallen : digestlen); g_free(out); } return 0; } int qcrypto_afsplit_encode(QCryptoHashAlgorithm hash, size_t blocklen, uint32_t stripes, const uint8_t *in, uint8_t *out, Error **errp) { uint8_t *block = g_new0(uint8_t, blocklen); size_t i; int ret = -1; for (i = 0; i < (stripes - 1); i++) { if (qcrypto_random_bytes(out + (i * blocklen), blocklen, errp) < 0) { goto cleanup; } qcrypto_afsplit_xor(blocklen, out + (i * blocklen), block, block); if (qcrypto_afsplit_hash(hash, blocklen, block, errp) < 0) { goto cleanup; } } qcrypto_afsplit_xor(blocklen, in, block, out + (i * blocklen)); ret = 0; cleanup: g_free(block); return ret; } int qcrypto_afsplit_decode(QCryptoHashAlgorithm hash, size_t blocklen, uint32_t stripes, const uint8_t *in, uint8_t *out, Error **errp) { uint8_t *block = g_new0(uint8_t, blocklen); size_t i; int ret = -1; for (i = 0; i < (stripes - 1); i++) { qcrypto_afsplit_xor(blocklen, in + (i * blocklen), block, block); if (qcrypto_afsplit_hash(hash, blocklen, block, errp) < 0) { goto cleanup; } } qcrypto_afsplit_xor(blocklen, in + (i * blocklen), block, out); ret = 0; cleanup: g_free(block); return ret; }
pmp-tool/PMP
src/qemu/src-pmp/tests/tcg/mips/user/ase/msa/int-add/test_msa_adds_a_h.c
/* * Test program for MSA instruction ADDS_A.H * * Copyright (C) 2019 RT-RK Computer Based Systems LLC * Copyright (C) 2019 <NAME> <<EMAIL>> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. * */ #include <sys/time.h> #include <stdint.h> #include "../../../../include/wrappers_msa.h" #include "../../../../include/test_inputs_128.h" #include "../../../../include/test_utils_128.h" #define TEST_COUNT_TOTAL ( \ (PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT) + \ (RANDOM_INPUTS_SHORT_COUNT) * (RANDOM_INPUTS_SHORT_COUNT)) int32_t main(void) { char *instruction_name = "ADDS_A.H"; int32_t ret; uint32_t i, j; struct timeval start, end; double elapsed_time; uint64_t b128_result[TEST_COUNT_TOTAL][2]; uint64_t b128_expect[TEST_COUNT_TOTAL][2] = { { 0x0002000200020002ULL, 0x0002000200020002ULL, }, /* 0 */ { 0x0001000100010001ULL, 0x0001000100010001ULL, }, { 0x5557555755575557ULL, 0x5557555755575557ULL, }, { 0x5556555655565556ULL, 0x5556555655565556ULL, }, { 0x3335333533353335ULL, 0x3335333533353335ULL, }, { 0x3334333433343334ULL, 0x3334333433343334ULL, }, { 0x1c7338e471c91c73ULL, 0x38e471c91c7338e4ULL, }, { 0x1c7238e571c81c72ULL, 0x38e571c81c7238e5ULL, }, { 0x0001000100010001ULL, 0x0001000100010001ULL, }, /* 8 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x5556555655565556ULL, 0x5556555655565556ULL, }, { 0x5555555555555555ULL, 0x5555555555555555ULL, }, { 0x3334333433343334ULL, 0x3334333433343334ULL, }, { 0x3333333333333333ULL, 0x3333333333333333ULL, }, { 0x1c7238e371c81c72ULL, 0x38e371c81c7238e3ULL, }, { 0x1c7138e471c71c71ULL, 0x38e471c71c7138e4ULL, }, { 0x5557555755575557ULL, 0x5557555755575557ULL, }, /* 16 */ { 0x5556555655565556ULL, 0x5556555655565556ULL, }, { 0x7fff7fff7fff7fffULL, 0x7fff7fff7fff7fffULL, }, { 0x7fff7fff7fff7fffULL, 0x7fff7fff7fff7fffULL, }, { 0x7fff7fff7fff7fffULL, 0x7fff7fff7fff7fffULL, }, { 0x7fff7fff7fff7fffULL, 0x7fff7fff7fff7fffULL, }, { 0x71c87fff7fff71c8ULL, 0x7fff7fff71c87fffULL, }, { 0x71c77fff7fff71c7ULL, 0x7fff7fff71c77fffULL, }, { 0x5556555655565556ULL, 0x5556555655565556ULL, }, /* 24 */ { 0x5555555555555555ULL, 0x5555555555555555ULL, }, { 0x7fff7fff7fff7fffULL, 0x7fff7fff7fff7fffULL, }, { 0x7fff7fff7fff7fffULL, 0x7fff7fff7fff7fffULL, }, { 0x7fff7fff7fff7fffULL, 0x7fff7fff7fff7fffULL, }, { 0x7fff7fff7fff7fffULL, 0x7fff7fff7fff7fffULL, }, { 0x71c77fff7fff71c7ULL, 0x7fff7fff71c77fffULL, }, { 0x71c67fff7fff71c6ULL, 0x7fff7fff71c67fffULL, }, { 0x3335333533353335ULL, 0x3335333533353335ULL, }, /* 32 */ { 0x3334333433343334ULL, 0x3334333433343334ULL, }, { 0x7fff7fff7fff7fffULL, 0x7fff7fff7fff7fffULL, }, { 0x7fff7fff7fff7fffULL, 0x7fff7fff7fff7fffULL, }, { 0x6668666866686668ULL, 0x6668666866686668ULL, }, { 0x6667666766676667ULL, 0x6667666766676667ULL, }, { 0x4fa66c177fff4fa6ULL, 0x6c177fff4fa66c17ULL, }, { 0x4fa56c187fff4fa5ULL, 0x6c187fff4fa56c18ULL, }, { 0x3334333433343334ULL, 0x3334333433343334ULL, }, /* 40 */ { 0x3333333333333333ULL, 0x3333333333333333ULL, }, { 0x7fff7fff7fff7fffULL, 0x7fff7fff7fff7fffULL, }, { 0x7fff7fff7fff7fffULL, 0x7fff7fff7fff7fffULL, }, { 0x6667666766676667ULL, 0x6667666766676667ULL, }, { 0x6666666666666666ULL, 0x6666666666666666ULL, }, { 0x4fa56c167fff4fa5ULL, 0x6c167fff4fa56c16ULL, }, { 0x4fa46c177fff4fa4ULL, 0x6c177fff4fa46c17ULL, }, { 0x1c7338e471c91c73ULL, 0x38e471c91c7338e4ULL, }, /* 48 */ { 0x1c7238e371c81c72ULL, 0x38e371c81c7238e3ULL, }, { 0x71c87fff7fff71c8ULL, 0x7fff7fff71c87fffULL, }, { 0x71c77fff7fff71c7ULL, 0x7fff7fff71c77fffULL, }, { 0x4fa66c177fff4fa6ULL, 0x6c177fff4fa66c17ULL, }, { 0x4fa56c167fff4fa5ULL, 0x6c167fff4fa56c16ULL, }, { 0x38e471c67fff38e4ULL, 0x71c67fff38e471c6ULL, }, { 0x38e371c77fff38e3ULL, 0x71c77fff38e371c7ULL, }, { 0x1c7238e571c81c72ULL, 0x38e571c81c7238e5ULL, }, /* 56 */ { 0x1c7138e471c71c71ULL, 0x38e471c71c7138e4ULL, }, { 0x71c77fff7fff71c7ULL, 0x7fff7fff71c77fffULL, }, { 0x71c67fff7fff71c6ULL, 0x7fff7fff71c67fffULL, }, { 0x4fa56c187fff4fa5ULL, 0x6c187fff4fa56c18ULL, }, { 0x4fa46c177fff4fa4ULL, 0x6c177fff4fa46c17ULL, }, { 0x38e371c77fff38e3ULL, 0x71c77fff38e371c7ULL, }, { 0x38e271c87fff38e2ULL, 0x71c87fff38e271c8ULL, }, { 0x7fff326850c47fffULL, 0x7fff16bc030a7fffULL, }, /* 64 */ { 0x7bd8199775f57fffULL, 0x5e5e504416c47fffULL, }, { 0x7fff6a8a6e937fffULL, 0x733f445f565a7508ULL, }, { 0x7fff2f817fff72f2ULL, 0x7fff7fff58436d54ULL, }, { 0x7bd8199775f57fffULL, 0x5e5e504416c47fffULL, }, { 0x088400c67fff71f0ULL, 0x25ee7fff2a7e7fffULL, }, { 0x57e851b97fff7fffULL, 0x3acf7de76a147810ULL, }, { 0x749116b07fff56aaULL, 0x7fff7fff6bfd705cULL, }, { 0x7fff6a8a6e937fffULL, 0x733f445f565a7508ULL, }, /* 72 */ { 0x57e851b97fff7fffULL, 0x3acf7de76a147810ULL, }, { 0x7fff7fff7fff7fffULL, 0x4fb072027fff4a28ULL, }, { 0x7fff67a37fff7fffULL, 0x7fff7fff7fff4274ULL, }, { 0x7fff2f817fff72f2ULL, 0x7fff7fff58436d54ULL, }, { 0x749116b07fff56aaULL, 0x7fff7fff6bfd705cULL, }, }; gettimeofday(&start, NULL); for (i = 0; i < PATTERN_INPUTS_SHORT_COUNT; i++) { for (j = 0; j < PATTERN_INPUTS_SHORT_COUNT; j++) { do_msa_ADDS_A_H(b128_pattern[i], b128_pattern[j], b128_result[PATTERN_INPUTS_SHORT_COUNT * i + j]); } } for (i = 0; i < RANDOM_INPUTS_SHORT_COUNT; i++) { for (j = 0; j < RANDOM_INPUTS_SHORT_COUNT; j++) { do_msa_ADDS_A_H(b128_random[i], b128_random[j], b128_result[((PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT)) + RANDOM_INPUTS_SHORT_COUNT * i + j]); } } gettimeofday(&end, NULL); elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; ret = check_results(instruction_name, TEST_COUNT_TOTAL, elapsed_time, &b128_result[0][0], &b128_expect[0][0]); return ret; }
pmp-tool/PMP
src/qemu/src-pmp/tests/tcg/mips/user/ase/msa/interleave/test_msa_ilvev_w.c
/* * Test program for MSA instruction ILVEV.W * * Copyright (C) 2019 Wave Computing, Inc. * Copyright (C) 2019 <NAME> <<EMAIL>> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. * */ #include <sys/time.h> #include <stdint.h> #include "../../../../include/wrappers_msa.h" #include "../../../../include/test_inputs_128.h" #include "../../../../include/test_utils_128.h" #define TEST_COUNT_TOTAL ( \ (PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT) + \ (RANDOM_INPUTS_SHORT_COUNT) * (RANDOM_INPUTS_SHORT_COUNT)) int32_t main(void) { char *instruction_name = "ILVEV.W"; int32_t ret; uint32_t i, j; struct timeval start, end; double elapsed_time; uint64_t b128_result[TEST_COUNT_TOTAL][2]; uint64_t b128_expect[TEST_COUNT_TOTAL][2] = { { 0xffffffffffffffffULL, 0xffffffffffffffffULL, }, /* 0 */ { 0xffffffff00000000ULL, 0xffffffff00000000ULL, }, { 0xffffffffaaaaaaaaULL, 0xffffffffaaaaaaaaULL, }, { 0xffffffff55555555ULL, 0xffffffff55555555ULL, }, { 0xffffffffccccccccULL, 0xffffffffccccccccULL, }, { 0xffffffff33333333ULL, 0xffffffff33333333ULL, }, { 0xffffffff8e38e38eULL, 0xffffffffe38e38e3ULL, }, { 0xffffffff71c71c71ULL, 0xffffffff1c71c71cULL, }, { 0x00000000ffffffffULL, 0x00000000ffffffffULL, }, /* 8 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x00000000aaaaaaaaULL, 0x00000000aaaaaaaaULL, }, { 0x0000000055555555ULL, 0x0000000055555555ULL, }, { 0x00000000ccccccccULL, 0x00000000ccccccccULL, }, { 0x0000000033333333ULL, 0x0000000033333333ULL, }, { 0x000000008e38e38eULL, 0x00000000e38e38e3ULL, }, { 0x0000000071c71c71ULL, 0x000000001c71c71cULL, }, { 0xaaaaaaaaffffffffULL, 0xaaaaaaaaffffffffULL, }, /* 16 */ { 0xaaaaaaaa00000000ULL, 0xaaaaaaaa00000000ULL, }, { 0xaaaaaaaaaaaaaaaaULL, 0xaaaaaaaaaaaaaaaaULL, }, { 0xaaaaaaaa55555555ULL, 0xaaaaaaaa55555555ULL, }, { 0xaaaaaaaaccccccccULL, 0xaaaaaaaaccccccccULL, }, { 0xaaaaaaaa33333333ULL, 0xaaaaaaaa33333333ULL, }, { 0xaaaaaaaa8e38e38eULL, 0xaaaaaaaae38e38e3ULL, }, { 0xaaaaaaaa71c71c71ULL, 0xaaaaaaaa1c71c71cULL, }, { 0x55555555ffffffffULL, 0x55555555ffffffffULL, }, /* 24 */ { 0x5555555500000000ULL, 0x5555555500000000ULL, }, { 0x55555555aaaaaaaaULL, 0x55555555aaaaaaaaULL, }, { 0x5555555555555555ULL, 0x5555555555555555ULL, }, { 0x55555555ccccccccULL, 0x55555555ccccccccULL, }, { 0x5555555533333333ULL, 0x5555555533333333ULL, }, { 0x555555558e38e38eULL, 0x55555555e38e38e3ULL, }, { 0x5555555571c71c71ULL, 0x555555551c71c71cULL, }, { 0xccccccccffffffffULL, 0xccccccccffffffffULL, }, /* 32 */ { 0xcccccccc00000000ULL, 0xcccccccc00000000ULL, }, { 0xccccccccaaaaaaaaULL, 0xccccccccaaaaaaaaULL, }, { 0xcccccccc55555555ULL, 0xcccccccc55555555ULL, }, { 0xccccccccccccccccULL, 0xccccccccccccccccULL, }, { 0xcccccccc33333333ULL, 0xcccccccc33333333ULL, }, { 0xcccccccc8e38e38eULL, 0xcccccccce38e38e3ULL, }, { 0xcccccccc71c71c71ULL, 0xcccccccc1c71c71cULL, }, { 0x33333333ffffffffULL, 0x33333333ffffffffULL, }, /* 40 */ { 0x3333333300000000ULL, 0x3333333300000000ULL, }, { 0x33333333aaaaaaaaULL, 0x33333333aaaaaaaaULL, }, { 0x3333333355555555ULL, 0x3333333355555555ULL, }, { 0x33333333ccccccccULL, 0x33333333ccccccccULL, }, { 0x3333333333333333ULL, 0x3333333333333333ULL, }, { 0x333333338e38e38eULL, 0x33333333e38e38e3ULL, }, { 0x3333333371c71c71ULL, 0x333333331c71c71cULL, }, { 0x8e38e38effffffffULL, 0xe38e38e3ffffffffULL, }, /* 48 */ { 0x8e38e38e00000000ULL, 0xe38e38e300000000ULL, }, { 0x8e38e38eaaaaaaaaULL, 0xe38e38e3aaaaaaaaULL, }, { 0x8e38e38e55555555ULL, 0xe38e38e355555555ULL, }, { 0x8e38e38eccccccccULL, 0xe38e38e3ccccccccULL, }, { 0x8e38e38e33333333ULL, 0xe38e38e333333333ULL, }, { 0x8e38e38e8e38e38eULL, 0xe38e38e3e38e38e3ULL, }, { 0x8e38e38e71c71c71ULL, 0xe38e38e31c71c71cULL, }, { 0x71c71c71ffffffffULL, 0x1c71c71cffffffffULL, }, /* 56 */ { 0x71c71c7100000000ULL, 0x1c71c71c00000000ULL, }, { 0x71c71c71aaaaaaaaULL, 0x1c71c71caaaaaaaaULL, }, { 0x71c71c7155555555ULL, 0x1c71c71c55555555ULL, }, { 0x71c71c71ccccccccULL, 0x1c71c71cccccccccULL, }, { 0x71c71c7133333333ULL, 0x1c71c71c33333333ULL, }, { 0x71c71c718e38e38eULL, 0x1c71c71ce38e38e3ULL, }, { 0x71c71c7171c71c71ULL, 0x1c71c71c1c71c71cULL, }, { 0x2862554028625540ULL, 0xfe7bb00cfe7bb00cULL, }, /* 64 */ { 0x286255404d93c708ULL, 0xfe7bb00c153f52fcULL, }, { 0x28625540b9cf8b80ULL, 0xfe7bb00cab2b2514ULL, }, { 0x286255405e31e24eULL, 0xfe7bb00ca942e2a0ULL, }, { 0x4d93c70828625540ULL, 0x153f52fcfe7bb00cULL, }, { 0x4d93c7084d93c708ULL, 0x153f52fc153f52fcULL, }, { 0x4d93c708b9cf8b80ULL, 0x153f52fcab2b2514ULL, }, { 0x4d93c7085e31e24eULL, 0x153f52fca942e2a0ULL, }, { 0xb9cf8b8028625540ULL, 0xab2b2514fe7bb00cULL, }, /* 72 */ { 0xb9cf8b804d93c708ULL, 0xab2b2514153f52fcULL, }, { 0xb9cf8b80b9cf8b80ULL, 0xab2b2514ab2b2514ULL, }, { 0xb9cf8b805e31e24eULL, 0xab2b2514a942e2a0ULL, }, { 0x5e31e24e28625540ULL, 0xa942e2a0fe7bb00cULL, }, { 0x5e31e24e4d93c708ULL, 0xa942e2a0153f52fcULL, }, { 0x5e31e24eb9cf8b80ULL, 0xa942e2a0ab2b2514ULL, }, { 0x5e31e24e5e31e24eULL, 0xa942e2a0a942e2a0ULL, }, }; gettimeofday(&start, NULL); for (i = 0; i < PATTERN_INPUTS_SHORT_COUNT; i++) { for (j = 0; j < PATTERN_INPUTS_SHORT_COUNT; j++) { do_msa_ILVEV_W(b128_pattern[i], b128_pattern[j], b128_result[PATTERN_INPUTS_SHORT_COUNT * i + j]); } } for (i = 0; i < RANDOM_INPUTS_SHORT_COUNT; i++) { for (j = 0; j < RANDOM_INPUTS_SHORT_COUNT; j++) { do_msa_ILVEV_W(b128_random[i], b128_random[j], b128_result[((PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT)) + RANDOM_INPUTS_SHORT_COUNT * i + j]); } } gettimeofday(&end, NULL); elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; ret = check_results(instruction_name, TEST_COUNT_TOTAL, elapsed_time, &b128_result[0][0], &b128_expect[0][0]); return ret; }
pmp-tool/PMP
src/qemu/src-pmp/hw/timer/m41t80.c
/* * M41T80 serial rtc emulation * * Copyright (c) 2018 <NAME> * * This work is licensed under the GNU GPL license version 2 or later. * */ #include "qemu/osdep.h" #include "qemu/log.h" #include "qemu/timer.h" #include "qemu/bcd.h" #include "hw/i2c/i2c.h" #define TYPE_M41T80 "m41t80" #define M41T80(obj) OBJECT_CHECK(M41t80State, (obj), TYPE_M41T80) typedef struct M41t80State { I2CSlave parent_obj; int8_t addr; } M41t80State; static void m41t80_realize(DeviceState *dev, Error **errp) { M41t80State *s = M41T80(dev); s->addr = -1; } static int m41t80_send(I2CSlave *i2c, uint8_t data) { M41t80State *s = M41T80(i2c); if (s->addr < 0) { s->addr = data; } else { s->addr++; } return 0; } static uint8_t m41t80_recv(I2CSlave *i2c) { M41t80State *s = M41T80(i2c); struct tm now; qemu_timeval tv; if (s->addr < 0) { s->addr = 0; } if (s->addr >= 1 && s->addr <= 7) { qemu_get_timedate(&now, -1); } switch (s->addr++) { case 0: qemu_gettimeofday(&tv); return to_bcd(tv.tv_usec / 10000); case 1: return to_bcd(now.tm_sec); case 2: return to_bcd(now.tm_min); case 3: return to_bcd(now.tm_hour); case 4: return to_bcd(now.tm_wday); case 5: return to_bcd(now.tm_mday); case 6: return to_bcd(now.tm_mon + 1); case 7: return to_bcd(now.tm_year % 100); case 8 ... 19: qemu_log_mask(LOG_UNIMP, "%s: unimplemented register: %d\n", __func__, s->addr - 1); return 0; default: qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid register: %d\n", __func__, s->addr - 1); return 0; } } static int m41t80_event(I2CSlave *i2c, enum i2c_event event) { M41t80State *s = M41T80(i2c); if (event == I2C_START_SEND) { s->addr = -1; } return 0; } static void m41t80_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); I2CSlaveClass *sc = I2C_SLAVE_CLASS(klass); dc->realize = m41t80_realize; sc->send = m41t80_send; sc->recv = m41t80_recv; sc->event = m41t80_event; } static const TypeInfo m41t80_info = { .name = TYPE_M41T80, .parent = TYPE_I2C_SLAVE, .instance_size = sizeof(M41t80State), .class_init = m41t80_class_init, }; static void m41t80_register_types(void) { type_register_static(&m41t80_info); } type_init(m41t80_register_types)
pmp-tool/PMP
src/qemu/src-pmp/hw/scsi/vhost-scsi.c
<reponame>pmp-tool/PMP<filename>src/qemu/src-pmp/hw/scsi/vhost-scsi.c /* * vhost_scsi host device * * Copyright IBM, Corp. 2011 * * Authors: * <NAME> <<EMAIL>> * * Changes for QEMU mainline + tcm_vhost kernel upstream: * <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU LGPL, version 2 or later. * See the COPYING.LIB file in the top-level directory. * */ #include "qemu/osdep.h" #include <linux/vhost.h> #include <sys/ioctl.h> #include "qapi/error.h" #include "qemu/error-report.h" #include "qemu/queue.h" #include "monitor/monitor.h" #include "migration/blocker.h" #include "hw/virtio/vhost-scsi.h" #include "hw/virtio/vhost.h" #include "hw/virtio/virtio-scsi.h" #include "hw/virtio/virtio-bus.h" #include "hw/virtio/virtio-access.h" #include "hw/fw-path-provider.h" #include "qemu/cutils.h" /* Features supported by host kernel. */ static const int kernel_feature_bits[] = { VIRTIO_F_NOTIFY_ON_EMPTY, VIRTIO_RING_F_INDIRECT_DESC, VIRTIO_RING_F_EVENT_IDX, VIRTIO_SCSI_F_HOTPLUG, VHOST_INVALID_FEATURE_BIT }; static int vhost_scsi_set_endpoint(VHostSCSI *s) { VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); const VhostOps *vhost_ops = vsc->dev.vhost_ops; struct vhost_scsi_target backend; int ret; memset(&backend, 0, sizeof(backend)); pstrcpy(backend.vhost_wwpn, sizeof(backend.vhost_wwpn), vs->conf.wwpn); ret = vhost_ops->vhost_scsi_set_endpoint(&vsc->dev, &backend); if (ret < 0) { return -errno; } return 0; } static void vhost_scsi_clear_endpoint(VHostSCSI *s) { VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(s); VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); struct vhost_scsi_target backend; const VhostOps *vhost_ops = vsc->dev.vhost_ops; memset(&backend, 0, sizeof(backend)); pstrcpy(backend.vhost_wwpn, sizeof(backend.vhost_wwpn), vs->conf.wwpn); vhost_ops->vhost_scsi_clear_endpoint(&vsc->dev, &backend); } static int vhost_scsi_start(VHostSCSI *s) { int ret, abi_version; VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); const VhostOps *vhost_ops = vsc->dev.vhost_ops; ret = vhost_ops->vhost_scsi_get_abi_version(&vsc->dev, &abi_version); if (ret < 0) { return -errno; } if (abi_version > VHOST_SCSI_ABI_VERSION) { error_report("vhost-scsi: The running tcm_vhost kernel abi_version:" " %d is greater than vhost_scsi userspace supports: %d," " please upgrade your version of QEMU", abi_version, VHOST_SCSI_ABI_VERSION); return -ENOSYS; } ret = vhost_scsi_common_start(vsc); if (ret < 0) { return ret; } ret = vhost_scsi_set_endpoint(s); if (ret < 0) { error_report("Error setting vhost-scsi endpoint"); vhost_scsi_common_stop(vsc); } return ret; } static void vhost_scsi_stop(VHostSCSI *s) { VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); vhost_scsi_clear_endpoint(s); vhost_scsi_common_stop(vsc); } static void vhost_scsi_set_status(VirtIODevice *vdev, uint8_t val) { VHostSCSI *s = VHOST_SCSI(vdev); VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); bool start = (val & VIRTIO_CONFIG_S_DRIVER_OK); if (vsc->dev.started == start) { return; } if (start) { int ret; ret = vhost_scsi_start(s); if (ret < 0) { error_report("unable to start vhost-scsi: %s", strerror(-ret)); exit(1); } } else { vhost_scsi_stop(s); } } static void vhost_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq) { } static void vhost_scsi_realize(DeviceState *dev, Error **errp) { VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev); VHostSCSICommon *vsc = VHOST_SCSI_COMMON(dev); Error *err = NULL; int vhostfd = -1; int ret; if (!vs->conf.wwpn) { error_setg(errp, "vhost-scsi: missing wwpn"); return; } if (vs->conf.vhostfd) { vhostfd = monitor_fd_param(cur_mon, vs->conf.vhostfd, errp); if (vhostfd == -1) { error_prepend(errp, "vhost-scsi: unable to parse vhostfd: "); return; } } else { vhostfd = open("/dev/vhost-scsi", O_RDWR); if (vhostfd < 0) { error_setg(errp, "vhost-scsi: open vhost char device failed: %s", strerror(errno)); return; } } virtio_scsi_common_realize(dev, vhost_dummy_handle_output, vhost_dummy_handle_output, vhost_dummy_handle_output, &err); if (err != NULL) { error_propagate(errp, err); goto close_fd; } error_setg(&vsc->migration_blocker, "vhost-scsi does not support migration"); migrate_add_blocker(vsc->migration_blocker, &err); if (err) { error_propagate(errp, err); error_free(vsc->migration_blocker); goto close_fd; } vsc->dev.nvqs = VHOST_SCSI_VQ_NUM_FIXED + vs->conf.num_queues; vsc->dev.vqs = g_new0(struct vhost_virtqueue, vsc->dev.nvqs); vsc->dev.vq_index = 0; vsc->dev.backend_features = 0; ret = vhost_dev_init(&vsc->dev, (void *)(uintptr_t)vhostfd, VHOST_BACKEND_TYPE_KERNEL, 0); if (ret < 0) { error_setg(errp, "vhost-scsi: vhost initialization failed: %s", strerror(-ret)); goto free_vqs; } /* At present, channel and lun both are 0 for bootable vhost-scsi disk */ vsc->channel = 0; vsc->lun = 0; /* Note: we can also get the minimum tpgt from kernel */ vsc->target = vs->conf.boot_tpgt; return; free_vqs: migrate_del_blocker(vsc->migration_blocker); g_free(vsc->dev.vqs); close_fd: close(vhostfd); return; } static void vhost_scsi_unrealize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostSCSICommon *vsc = VHOST_SCSI_COMMON(dev); struct vhost_virtqueue *vqs = vsc->dev.vqs; migrate_del_blocker(vsc->migration_blocker); error_free(vsc->migration_blocker); /* This will stop vhost backend. */ vhost_scsi_set_status(vdev, 0); vhost_dev_cleanup(&vsc->dev); g_free(vqs); virtio_scsi_common_unrealize(dev, errp); } static Property vhost_scsi_properties[] = { DEFINE_PROP_STRING("vhostfd", VirtIOSCSICommon, conf.vhostfd), DEFINE_PROP_STRING("wwpn", VirtIOSCSICommon, conf.wwpn), DEFINE_PROP_UINT32("boot_tpgt", VirtIOSCSICommon, conf.boot_tpgt, 0), DEFINE_PROP_UINT32("num_queues", VirtIOSCSICommon, conf.num_queues, 1), DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSICommon, conf.virtqueue_size, 128), DEFINE_PROP_UINT32("max_sectors", VirtIOSCSICommon, conf.max_sectors, 0xFFFF), DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSICommon, conf.cmd_per_lun, 128), DEFINE_PROP_BIT64("t10_pi", VHostSCSICommon, host_features, VIRTIO_SCSI_F_T10_PI, false), DEFINE_PROP_END_OF_LIST(), }; static void vhost_scsi_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(klass); dc->props = vhost_scsi_properties; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); vdc->realize = vhost_scsi_realize; vdc->unrealize = vhost_scsi_unrealize; vdc->get_features = vhost_scsi_common_get_features; vdc->set_config = vhost_scsi_common_set_config; vdc->set_status = vhost_scsi_set_status; fwc->get_dev_path = vhost_scsi_common_get_fw_dev_path; } static void vhost_scsi_instance_init(Object *obj) { VHostSCSICommon *vsc = VHOST_SCSI_COMMON(obj); vsc->feature_bits = kernel_feature_bits; device_add_bootindex_property(obj, &vsc->bootindex, "bootindex", NULL, DEVICE(vsc), NULL); } static const TypeInfo vhost_scsi_info = { .name = TYPE_VHOST_SCSI, .parent = TYPE_VHOST_SCSI_COMMON, .instance_size = sizeof(VHostSCSI), .class_init = vhost_scsi_class_init, .instance_init = vhost_scsi_instance_init, .interfaces = (InterfaceInfo[]) { { TYPE_FW_PATH_PROVIDER }, { } }, }; static void virtio_register_types(void) { type_register_static(&vhost_scsi_info); } type_init(virtio_register_types)
pmp-tool/PMP
src/qemu/src-pmp/util/qsp.c
<filename>src/qemu/src-pmp/util/qsp.c /* * qsp.c - QEMU Synchronization Profiler * * Copyright (C) 2018, <NAME> <<EMAIL>> * * License: GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * * QSP profiles the time spent in synchronization primitives, which can * help diagnose performance problems, e.g. scalability issues when * contention is high. * * The primitives currently supported are mutexes, recursive mutexes and * condition variables. Note that not all related functions are intercepted; * instead we profile only those functions that can have a performance impact, * either due to blocking (e.g. cond_wait, mutex_lock) or cache line * contention (e.g. mutex_lock, mutex_trylock). * * QSP's design focuses on speed and scalability. This is achieved * by having threads do their profiling entirely on thread-local data. * The appropriate thread-local data is found via a QHT, i.e. a concurrent hash * table. To aggregate data in order to generate a report, we iterate over * all entries in the hash table. Depending on the number of threads and * synchronization objects this might be expensive, but note that it is * very rarely called -- reports are generated only when requested by users. * * Reports are generated as a table where each row represents a call site. A * call site is the triplet formed by the __file__ and __LINE__ of the caller * as well as the address of the "object" (i.e. mutex, rec. mutex or condvar) * being operated on. Optionally, call sites that operate on different objects * of the same type can be coalesced, which can be particularly useful when * profiling dynamically-allocated objects. * * Alternative designs considered: * * - Use an off-the-shelf profiler such as mutrace. This is not a viable option * for us because QEMU has __malloc_hook set (by one of the libraries it * uses); leaving this hook unset is required to avoid deadlock in mutrace. * * - Use a glib HT for each thread, protecting each HT with its own lock. * This isn't simpler than the current design, and is 10% slower in the * atomic_add-bench microbenchmark (-m option). * * - For reports, just use a binary tree as we aggregate data, instead of having * an intermediate hash table. This would simplify the code only slightly, but * would perform badly if there were many threads and objects to track. * * - Wrap operations on qsp entries with RCU read-side critical sections, so * that qsp_reset() can delete entries. Unfortunately, the overhead of calling * rcu_read_lock/unlock slows down atomic_add-bench -m by 24%. Having * a snapshot that is updated on qsp_reset() avoids this overhead. * * Related Work: * - <NAME>'s mutrace: http://0pointer.de/blog/projects/mutrace.html * - Lozi, David, Thomas, Lawall and Muller. "Remote Core Locking: Migrating * Critical-Section Execution to Improve the Performance of Multithreaded * Applications", USENIX ATC'12. */ #include "qemu/osdep.h" #include "qemu/thread.h" #include "qemu/timer.h" #include "qemu/qht.h" #include "qemu/rcu.h" #include "qemu/xxhash.h" enum QSPType { QSP_MUTEX, QSP_BQL_MUTEX, QSP_REC_MUTEX, QSP_CONDVAR, }; struct QSPCallSite { const void *obj; const char *file; /* i.e. __FILE__; shortened later */ int line; enum QSPType type; }; typedef struct QSPCallSite QSPCallSite; struct QSPEntry { void *thread_ptr; const QSPCallSite *callsite; uint64_t n_acqs; uint64_t ns; unsigned int n_objs; /* count of coalesced objs; only used for reporting */ }; typedef struct QSPEntry QSPEntry; struct QSPSnapshot { struct rcu_head rcu; struct qht ht; }; typedef struct QSPSnapshot QSPSnapshot; /* initial sizing for hash tables */ #define QSP_INITIAL_SIZE 64 /* If this file is moved, QSP_REL_PATH should be updated accordingly */ #define QSP_REL_PATH "util/qsp.c" /* this file's full path. Used to present all call sites with relative paths */ static size_t qsp_qemu_path_len; /* the address of qsp_thread gives us a unique 'thread ID' */ static __thread int qsp_thread; /* * Call sites are the same for all threads, so we track them in a separate hash * table to save memory. */ static struct qht qsp_callsite_ht; static struct qht qsp_ht; static QSPSnapshot *qsp_snapshot; static bool qsp_initialized, qsp_initializing; static const char * const qsp_typenames[] = { [QSP_MUTEX] = "mutex", [QSP_BQL_MUTEX] = "BQL mutex", [QSP_REC_MUTEX] = "rec_mutex", [QSP_CONDVAR] = "condvar", }; QemuMutexLockFunc qemu_bql_mutex_lock_func = qemu_mutex_lock_impl; QemuMutexLockFunc qemu_mutex_lock_func = qemu_mutex_lock_impl; QemuMutexTrylockFunc qemu_mutex_trylock_func = qemu_mutex_trylock_impl; QemuRecMutexLockFunc qemu_rec_mutex_lock_func = qemu_rec_mutex_lock_impl; QemuRecMutexTrylockFunc qemu_rec_mutex_trylock_func = qemu_rec_mutex_trylock_impl; QemuCondWaitFunc qemu_cond_wait_func = qemu_cond_wait_impl; /* * It pays off to _not_ hash callsite->file; hashing a string is slow, and * without it we still get a pretty unique hash. */ static inline uint32_t do_qsp_callsite_hash(const QSPCallSite *callsite, uint64_t ab) { uint64_t cd = (uint64_t)(uintptr_t)callsite->obj; uint32_t e = callsite->line; uint32_t f = callsite->type; return qemu_xxhash6(ab, cd, e, f); } static inline uint32_t qsp_callsite_hash(const QSPCallSite *callsite) { return do_qsp_callsite_hash(callsite, 0); } static inline uint32_t do_qsp_entry_hash(const QSPEntry *entry, uint64_t a) { return do_qsp_callsite_hash(entry->callsite, a); } static uint32_t qsp_entry_hash(const QSPEntry *entry) { return do_qsp_entry_hash(entry, (uint64_t)(uintptr_t)entry->thread_ptr); } static uint32_t qsp_entry_no_thread_hash(const QSPEntry *entry) { return do_qsp_entry_hash(entry, 0); } /* without the objects we need to hash the file name to get a decent hash */ static uint32_t qsp_entry_no_thread_obj_hash(const QSPEntry *entry) { const QSPCallSite *callsite = entry->callsite; uint64_t ab = g_str_hash(callsite->file); uint64_t cd = callsite->line; uint32_t e = callsite->type; return qemu_xxhash5(ab, cd, e); } static bool qsp_callsite_cmp(const void *ap, const void *bp) { const QSPCallSite *a = ap; const QSPCallSite *b = bp; return a == b || (a->obj == b->obj && a->line == b->line && a->type == b->type && (a->file == b->file || !strcmp(a->file, b->file))); } static bool qsp_callsite_no_obj_cmp(const void *ap, const void *bp) { const QSPCallSite *a = ap; const QSPCallSite *b = bp; return a == b || (a->line == b->line && a->type == b->type && (a->file == b->file || !strcmp(a->file, b->file))); } static bool qsp_entry_no_thread_cmp(const void *ap, const void *bp) { const QSPEntry *a = ap; const QSPEntry *b = bp; return qsp_callsite_cmp(a->callsite, b->callsite); } static bool qsp_entry_no_thread_obj_cmp(const void *ap, const void *bp) { const QSPEntry *a = ap; const QSPEntry *b = bp; return qsp_callsite_no_obj_cmp(a->callsite, b->callsite); } static bool qsp_entry_cmp(const void *ap, const void *bp) { const QSPEntry *a = ap; const QSPEntry *b = bp; return a->thread_ptr == b->thread_ptr && qsp_callsite_cmp(a->callsite, b->callsite); } /* * Normally we'd call this from a constructor function, but we want it to work * via libutil as well. */ static void qsp_do_init(void) { /* make sure this file's path in the tree is up to date with QSP_REL_PATH */ g_assert(strstr(__FILE__, QSP_REL_PATH)); qsp_qemu_path_len = strlen(__FILE__) - strlen(QSP_REL_PATH); qht_init(&qsp_ht, qsp_entry_cmp, QSP_INITIAL_SIZE, QHT_MODE_AUTO_RESIZE | QHT_MODE_RAW_MUTEXES); qht_init(&qsp_callsite_ht, qsp_callsite_cmp, QSP_INITIAL_SIZE, QHT_MODE_AUTO_RESIZE | QHT_MODE_RAW_MUTEXES); } static __attribute__((noinline)) void qsp_init__slowpath(void) { if (atomic_cmpxchg(&qsp_initializing, false, true) == false) { qsp_do_init(); atomic_set(&qsp_initialized, true); } else { while (!atomic_read(&qsp_initialized)) { cpu_relax(); } } } /* qsp_init() must be called from _all_ exported functions */ static inline void qsp_init(void) { if (likely(atomic_read(&qsp_initialized))) { return; } qsp_init__slowpath(); } static QSPCallSite *qsp_callsite_find(const QSPCallSite *orig) { QSPCallSite *callsite; uint32_t hash; hash = qsp_callsite_hash(orig); callsite = qht_lookup(&qsp_callsite_ht, orig, hash); if (callsite == NULL) { void *existing = NULL; callsite = g_new(QSPCallSite, 1); memcpy(callsite, orig, sizeof(*callsite)); qht_insert(&qsp_callsite_ht, callsite, hash, &existing); if (unlikely(existing)) { g_free(callsite); callsite = existing; } } return callsite; } static QSPEntry * qsp_entry_create(struct qht *ht, const QSPEntry *entry, uint32_t hash) { QSPEntry *e; void *existing = NULL; e = g_new0(QSPEntry, 1); e->thread_ptr = entry->thread_ptr; e->callsite = qsp_callsite_find(entry->callsite); qht_insert(ht, e, hash, &existing); if (unlikely(existing)) { g_free(e); e = existing; } return e; } static QSPEntry * qsp_entry_find(struct qht *ht, const QSPEntry *entry, uint32_t hash) { QSPEntry *e; e = qht_lookup(ht, entry, hash); if (e == NULL) { e = qsp_entry_create(ht, entry, hash); } return e; } /* * Note: Entries are never removed, so callers do not have to be in an RCU * read-side critical section. */ static QSPEntry *qsp_entry_get(const void *obj, const char *file, int line, enum QSPType type) { QSPCallSite callsite = { .obj = obj, .file = file, .line = line, .type = type, }; QSPEntry orig; uint32_t hash; qsp_init(); orig.thread_ptr = &qsp_thread; orig.callsite = &callsite; hash = qsp_entry_hash(&orig); return qsp_entry_find(&qsp_ht, &orig, hash); } /* * @e is in the global hash table; it is only written to by the current thread, * so we write to it atomically (as in "write once") to prevent torn reads. */ static inline void do_qsp_entry_record(QSPEntry *e, int64_t delta, bool acq) { atomic_set_u64(&e->ns, e->ns + delta); if (acq) { atomic_set_u64(&e->n_acqs, e->n_acqs + 1); } } static inline void qsp_entry_record(QSPEntry *e, int64_t delta) { do_qsp_entry_record(e, delta, true); } #define QSP_GEN_VOID(type_, qsp_t_, func_, impl_) \ static void func_(type_ *obj, const char *file, int line) \ { \ QSPEntry *e; \ int64_t t0, t1; \ \ t0 = get_clock(); \ impl_(obj, file, line); \ t1 = get_clock(); \ \ e = qsp_entry_get(obj, file, line, qsp_t_); \ qsp_entry_record(e, t1 - t0); \ } #define QSP_GEN_RET1(type_, qsp_t_, func_, impl_) \ static int func_(type_ *obj, const char *file, int line) \ { \ QSPEntry *e; \ int64_t t0, t1; \ int err; \ \ t0 = get_clock(); \ err = impl_(obj, file, line); \ t1 = get_clock(); \ \ e = qsp_entry_get(obj, file, line, qsp_t_); \ do_qsp_entry_record(e, t1 - t0, !err); \ return err; \ } QSP_GEN_VOID(QemuMutex, QSP_BQL_MUTEX, qsp_bql_mutex_lock, qemu_mutex_lock_impl) QSP_GEN_VOID(QemuMutex, QSP_MUTEX, qsp_mutex_lock, qemu_mutex_lock_impl) QSP_GEN_RET1(QemuMutex, QSP_MUTEX, qsp_mutex_trylock, qemu_mutex_trylock_impl) QSP_GEN_VOID(QemuRecMutex, QSP_REC_MUTEX, qsp_rec_mutex_lock, qemu_rec_mutex_lock_impl) QSP_GEN_RET1(QemuRecMutex, QSP_REC_MUTEX, qsp_rec_mutex_trylock, qemu_rec_mutex_trylock_impl) #undef QSP_GEN_RET1 #undef QSP_GEN_VOID static void qsp_cond_wait(QemuCond *cond, QemuMutex *mutex, const char *file, int line) { QSPEntry *e; int64_t t0, t1; t0 = get_clock(); qemu_cond_wait_impl(cond, mutex, file, line); t1 = get_clock(); e = qsp_entry_get(cond, file, line, QSP_CONDVAR); qsp_entry_record(e, t1 - t0); } bool qsp_is_enabled(void) { return atomic_read(&qemu_mutex_lock_func) == qsp_mutex_lock; } void qsp_enable(void) { atomic_set(&qemu_mutex_lock_func, qsp_mutex_lock); atomic_set(&qemu_mutex_trylock_func, qsp_mutex_trylock); atomic_set(&qemu_bql_mutex_lock_func, qsp_bql_mutex_lock); atomic_set(&qemu_rec_mutex_lock_func, qsp_rec_mutex_lock); atomic_set(&qemu_rec_mutex_trylock_func, qsp_rec_mutex_trylock); atomic_set(&qemu_cond_wait_func, qsp_cond_wait); } void qsp_disable(void) { atomic_set(&qemu_mutex_lock_func, qemu_mutex_lock_impl); atomic_set(&qemu_mutex_trylock_func, qemu_mutex_trylock_impl); atomic_set(&qemu_bql_mutex_lock_func, qemu_mutex_lock_impl); atomic_set(&qemu_rec_mutex_lock_func, qemu_rec_mutex_lock_impl); atomic_set(&qemu_rec_mutex_trylock_func, qemu_rec_mutex_trylock_impl); atomic_set(&qemu_cond_wait_func, qemu_cond_wait_impl); } static gint qsp_tree_cmp(gconstpointer ap, gconstpointer bp, gpointer up) { const QSPEntry *a = ap; const QSPEntry *b = bp; enum QSPSortBy sort_by = *(enum QSPSortBy *)up; const QSPCallSite *ca; const QSPCallSite *cb; switch (sort_by) { case QSP_SORT_BY_TOTAL_WAIT_TIME: if (a->ns > b->ns) { return -1; } else if (a->ns < b->ns) { return 1; } break; case QSP_SORT_BY_AVG_WAIT_TIME: { double avg_a = a->n_acqs ? a->ns / a->n_acqs : 0; double avg_b = b->n_acqs ? b->ns / b->n_acqs : 0; if (avg_a > avg_b) { return -1; } else if (avg_a < avg_b) { return 1; } break; } default: g_assert_not_reached(); } ca = a->callsite; cb = b->callsite; /* Break the tie with the object's address */ if (ca->obj < cb->obj) { return -1; } else if (ca->obj > cb->obj) { return 1; } else { int cmp; /* same obj. Break the tie with the callsite's file */ cmp = strcmp(ca->file, cb->file); if (cmp) { return cmp; } /* same callsite file. Break the tie with the callsite's line */ g_assert(ca->line != cb->line); if (ca->line < cb->line) { return -1; } else if (ca->line > cb->line) { return 1; } else { /* break the tie with the callsite's type */ return cb->type - ca->type; } } } static void qsp_sort(void *p, uint32_t h, void *userp) { QSPEntry *e = p; GTree *tree = userp; g_tree_insert(tree, e, NULL); } static void qsp_aggregate(void *p, uint32_t h, void *up) { struct qht *ht = up; const QSPEntry *e = p; QSPEntry *agg; uint32_t hash; hash = qsp_entry_no_thread_hash(e); agg = qsp_entry_find(ht, e, hash); /* * The entry is in the global hash table; read from it atomically (as in * "read once"). */ agg->ns += atomic_read_u64(&e->ns); agg->n_acqs += atomic_read_u64(&e->n_acqs); } static void qsp_iter_diff(void *p, uint32_t hash, void *htp) { struct qht *ht = htp; QSPEntry *old = p; QSPEntry *new; new = qht_lookup(ht, old, hash); /* entries are never deleted, so we must have this one */ g_assert(new != NULL); /* our reading of the stats happened after the snapshot was taken */ g_assert(new->n_acqs >= old->n_acqs); g_assert(new->ns >= old->ns); new->n_acqs -= old->n_acqs; new->ns -= old->ns; /* No point in reporting an empty entry */ if (new->n_acqs == 0 && new->ns == 0) { bool removed = qht_remove(ht, new, hash); g_assert(removed); g_free(new); } } static void qsp_diff(struct qht *orig, struct qht *new) { qht_iter(orig, qsp_iter_diff, new); } static void qsp_iter_callsite_coalesce(void *p, uint32_t h, void *htp) { struct qht *ht = htp; QSPEntry *old = p; QSPEntry *e; uint32_t hash; hash = qsp_entry_no_thread_obj_hash(old); e = qht_lookup(ht, old, hash); if (e == NULL) { e = qsp_entry_create(ht, old, hash); e->n_objs = 1; } else if (e->callsite->obj != old->callsite->obj) { e->n_objs++; } e->ns += old->ns; e->n_acqs += old->n_acqs; } static void qsp_ht_delete(void *p, uint32_t h, void *htp) { g_free(p); } static void qsp_mktree(GTree *tree, bool callsite_coalesce) { QSPSnapshot *snap; struct qht ht, coalesce_ht; struct qht *htp; /* * First, see if there's a prior snapshot, so that we read the global hash * table _after_ the snapshot has been created, which guarantees that * the entries we'll read will be a superset of the snapshot's entries. * * We must remain in an RCU read-side critical section until we're done * with the snapshot. */ rcu_read_lock(); snap = atomic_rcu_read(&qsp_snapshot); /* Aggregate all results from the global hash table into a local one */ qht_init(&ht, qsp_entry_no_thread_cmp, QSP_INITIAL_SIZE, QHT_MODE_AUTO_RESIZE | QHT_MODE_RAW_MUTEXES); qht_iter(&qsp_ht, qsp_aggregate, &ht); /* compute the difference wrt the snapshot, if any */ if (snap) { qsp_diff(&snap->ht, &ht); } /* done with the snapshot; RCU can reclaim it */ rcu_read_unlock(); htp = &ht; if (callsite_coalesce) { qht_init(&coalesce_ht, qsp_entry_no_thread_obj_cmp, QSP_INITIAL_SIZE, QHT_MODE_AUTO_RESIZE | QHT_MODE_RAW_MUTEXES); qht_iter(&ht, qsp_iter_callsite_coalesce, &coalesce_ht); /* free the previous hash table, and point htp to coalesce_ht */ qht_iter(&ht, qsp_ht_delete, NULL); qht_destroy(&ht); htp = &coalesce_ht; } /* sort the hash table elements by using a tree */ qht_iter(htp, qsp_sort, tree); /* free the hash table, but keep the elements (those are in the tree now) */ qht_destroy(htp); } /* free string with g_free */ static char *qsp_at(const QSPCallSite *callsite) { GString *s = g_string_new(NULL); const char *shortened; /* remove the absolute path to qemu */ if (unlikely(strlen(callsite->file) < qsp_qemu_path_len)) { shortened = callsite->file; } else { shortened = callsite->file + qsp_qemu_path_len; } g_string_append_printf(s, "%s:%u", shortened, callsite->line); return g_string_free(s, FALSE); } struct QSPReportEntry { const void *obj; char *callsite_at; const char *typename; double time_s; double ns_avg; uint64_t n_acqs; unsigned int n_objs; }; typedef struct QSPReportEntry QSPReportEntry; struct QSPReport { QSPReportEntry *entries; size_t n_entries; size_t max_n_entries; }; typedef struct QSPReport QSPReport; static gboolean qsp_tree_report(gpointer key, gpointer value, gpointer udata) { const QSPEntry *e = key; QSPReport *report = udata; QSPReportEntry *entry; if (report->n_entries == report->max_n_entries) { return TRUE; } entry = &report->entries[report->n_entries]; report->n_entries++; entry->obj = e->callsite->obj; entry->n_objs = e->n_objs; entry->callsite_at = qsp_at(e->callsite); entry->typename = qsp_typenames[e->callsite->type]; entry->time_s = e->ns * 1e-9; entry->n_acqs = e->n_acqs; entry->ns_avg = e->n_acqs ? e->ns / e->n_acqs : 0; return FALSE; } static void pr_report(const QSPReport *rep, FILE *f, fprintf_function pr) { char *dashes; size_t max_len = 0; int callsite_len = 0; int callsite_rspace; int n_dashes; size_t i; /* find out the maximum length of all 'callsite' fields */ for (i = 0; i < rep->n_entries; i++) { const QSPReportEntry *e = &rep->entries[i]; size_t len = strlen(e->callsite_at); if (len > max_len) { max_len = len; } } callsite_len = MAX(max_len, strlen("Call site")); /* white space to leave to the right of "Call site" */ callsite_rspace = callsite_len - strlen("Call site"); pr(f, "Type Object Call site%*s Wait Time (s) " " Count Average (us)\n", callsite_rspace, ""); /* build a horizontal rule with dashes */ n_dashes = 79 + callsite_rspace; dashes = g_malloc(n_dashes + 1); memset(dashes, '-', n_dashes); dashes[n_dashes] = '\0'; pr(f, "%s\n", dashes); for (i = 0; i < rep->n_entries; i++) { const QSPReportEntry *e = &rep->entries[i]; GString *s = g_string_new(NULL); g_string_append_printf(s, "%-9s ", e->typename); if (e->n_objs > 1) { g_string_append_printf(s, "[%12u]", e->n_objs); } else { g_string_append_printf(s, "%14p", e->obj); } g_string_append_printf(s, " %s%*s %13.5f %12" PRIu64 " %12.2f\n", e->callsite_at, callsite_len - (int)strlen(e->callsite_at), "", e->time_s, e->n_acqs, e->ns_avg * 1e-3); pr(f, "%s", s->str); g_string_free(s, TRUE); } pr(f, "%s\n", dashes); g_free(dashes); } static void report_destroy(QSPReport *rep) { size_t i; for (i = 0; i < rep->n_entries; i++) { QSPReportEntry *e = &rep->entries[i]; g_free(e->callsite_at); } g_free(rep->entries); } void qsp_report(FILE *f, fprintf_function cpu_fprintf, size_t max, enum QSPSortBy sort_by, bool callsite_coalesce) { GTree *tree = g_tree_new_full(qsp_tree_cmp, &sort_by, g_free, NULL); QSPReport rep; qsp_init(); rep.entries = g_new0(QSPReportEntry, max); rep.n_entries = 0; rep.max_n_entries = max; qsp_mktree(tree, callsite_coalesce); g_tree_foreach(tree, qsp_tree_report, &rep); g_tree_destroy(tree); pr_report(&rep, f, cpu_fprintf); report_destroy(&rep); } static void qsp_snapshot_destroy(QSPSnapshot *snap) { qht_iter(&snap->ht, qsp_ht_delete, NULL); qht_destroy(&snap->ht); g_free(snap); } void qsp_reset(void) { QSPSnapshot *new = g_new(QSPSnapshot, 1); QSPSnapshot *old; qsp_init(); qht_init(&new->ht, qsp_entry_cmp, QSP_INITIAL_SIZE, QHT_MODE_AUTO_RESIZE | QHT_MODE_RAW_MUTEXES); /* take a snapshot of the current state */ qht_iter(&qsp_ht, qsp_aggregate, &new->ht); /* replace the previous snapshot, if any */ old = atomic_xchg(&qsp_snapshot, new); if (old) { call_rcu(old, qsp_snapshot_destroy, rcu); } }
pmp-tool/PMP
src/qemu/src-pmp/include/hw/virtio/vhost-backend.h
/* * vhost-backend * * Copyright (c) 2013 Virtual Open Systems Sarl. * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #ifndef VHOST_BACKEND_H #define VHOST_BACKEND_H #include "exec/memory.h" typedef enum VhostBackendType { VHOST_BACKEND_TYPE_NONE = 0, VHOST_BACKEND_TYPE_KERNEL = 1, VHOST_BACKEND_TYPE_USER = 2, VHOST_BACKEND_TYPE_MAX = 3, } VhostBackendType; typedef enum VhostSetConfigType { VHOST_SET_CONFIG_TYPE_MASTER = 0, VHOST_SET_CONFIG_TYPE_MIGRATION = 1, } VhostSetConfigType; struct vhost_inflight; struct vhost_dev; struct vhost_log; struct vhost_memory; struct vhost_vring_file; struct vhost_vring_state; struct vhost_vring_addr; struct vhost_scsi_target; struct vhost_iotlb_msg; typedef int (*vhost_backend_init)(struct vhost_dev *dev, void *opaque); typedef int (*vhost_backend_cleanup)(struct vhost_dev *dev); typedef int (*vhost_backend_memslots_limit)(struct vhost_dev *dev); typedef int (*vhost_net_set_backend_op)(struct vhost_dev *dev, struct vhost_vring_file *file); typedef int (*vhost_net_set_mtu_op)(struct vhost_dev *dev, uint16_t mtu); typedef int (*vhost_scsi_set_endpoint_op)(struct vhost_dev *dev, struct vhost_scsi_target *target); typedef int (*vhost_scsi_clear_endpoint_op)(struct vhost_dev *dev, struct vhost_scsi_target *target); typedef int (*vhost_scsi_get_abi_version_op)(struct vhost_dev *dev, int *version); typedef int (*vhost_set_log_base_op)(struct vhost_dev *dev, uint64_t base, struct vhost_log *log); typedef int (*vhost_set_mem_table_op)(struct vhost_dev *dev, struct vhost_memory *mem); typedef int (*vhost_set_vring_addr_op)(struct vhost_dev *dev, struct vhost_vring_addr *addr); typedef int (*vhost_set_vring_endian_op)(struct vhost_dev *dev, struct vhost_vring_state *ring); typedef int (*vhost_set_vring_num_op)(struct vhost_dev *dev, struct vhost_vring_state *ring); typedef int (*vhost_set_vring_base_op)(struct vhost_dev *dev, struct vhost_vring_state *ring); typedef int (*vhost_get_vring_base_op)(struct vhost_dev *dev, struct vhost_vring_state *ring); typedef int (*vhost_set_vring_kick_op)(struct vhost_dev *dev, struct vhost_vring_file *file); typedef int (*vhost_set_vring_call_op)(struct vhost_dev *dev, struct vhost_vring_file *file); typedef int (*vhost_set_vring_busyloop_timeout_op)(struct vhost_dev *dev, struct vhost_vring_state *r); typedef int (*vhost_set_features_op)(struct vhost_dev *dev, uint64_t features); typedef int (*vhost_get_features_op)(struct vhost_dev *dev, uint64_t *features); typedef int (*vhost_set_owner_op)(struct vhost_dev *dev); typedef int (*vhost_reset_device_op)(struct vhost_dev *dev); typedef int (*vhost_get_vq_index_op)(struct vhost_dev *dev, int idx); typedef int (*vhost_set_vring_enable_op)(struct vhost_dev *dev, int enable); typedef bool (*vhost_requires_shm_log_op)(struct vhost_dev *dev); typedef int (*vhost_migration_done_op)(struct vhost_dev *dev, char *mac_addr); typedef bool (*vhost_backend_can_merge_op)(struct vhost_dev *dev, uint64_t start1, uint64_t size1, uint64_t start2, uint64_t size2); typedef int (*vhost_vsock_set_guest_cid_op)(struct vhost_dev *dev, uint64_t guest_cid); typedef int (*vhost_vsock_set_running_op)(struct vhost_dev *dev, int start); typedef void (*vhost_set_iotlb_callback_op)(struct vhost_dev *dev, int enabled); typedef int (*vhost_send_device_iotlb_msg_op)(struct vhost_dev *dev, struct vhost_iotlb_msg *imsg); typedef int (*vhost_set_config_op)(struct vhost_dev *dev, const uint8_t *data, uint32_t offset, uint32_t size, uint32_t flags); typedef int (*vhost_get_config_op)(struct vhost_dev *dev, uint8_t *config, uint32_t config_len); typedef int (*vhost_crypto_create_session_op)(struct vhost_dev *dev, void *session_info, uint64_t *session_id); typedef int (*vhost_crypto_close_session_op)(struct vhost_dev *dev, uint64_t session_id); typedef bool (*vhost_backend_mem_section_filter_op)(struct vhost_dev *dev, MemoryRegionSection *section); typedef int (*vhost_get_inflight_fd_op)(struct vhost_dev *dev, uint16_t queue_size, struct vhost_inflight *inflight); typedef int (*vhost_set_inflight_fd_op)(struct vhost_dev *dev, struct vhost_inflight *inflight); typedef struct VhostOps { VhostBackendType backend_type; vhost_backend_init vhost_backend_init; vhost_backend_cleanup vhost_backend_cleanup; vhost_backend_memslots_limit vhost_backend_memslots_limit; vhost_net_set_backend_op vhost_net_set_backend; vhost_net_set_mtu_op vhost_net_set_mtu; vhost_scsi_set_endpoint_op vhost_scsi_set_endpoint; vhost_scsi_clear_endpoint_op vhost_scsi_clear_endpoint; vhost_scsi_get_abi_version_op vhost_scsi_get_abi_version; vhost_set_log_base_op vhost_set_log_base; vhost_set_mem_table_op vhost_set_mem_table; vhost_set_vring_addr_op vhost_set_vring_addr; vhost_set_vring_endian_op vhost_set_vring_endian; vhost_set_vring_num_op vhost_set_vring_num; vhost_set_vring_base_op vhost_set_vring_base; vhost_get_vring_base_op vhost_get_vring_base; vhost_set_vring_kick_op vhost_set_vring_kick; vhost_set_vring_call_op vhost_set_vring_call; vhost_set_vring_busyloop_timeout_op vhost_set_vring_busyloop_timeout; vhost_set_features_op vhost_set_features; vhost_get_features_op vhost_get_features; vhost_set_owner_op vhost_set_owner; vhost_reset_device_op vhost_reset_device; vhost_get_vq_index_op vhost_get_vq_index; vhost_set_vring_enable_op vhost_set_vring_enable; vhost_requires_shm_log_op vhost_requires_shm_log; vhost_migration_done_op vhost_migration_done; vhost_backend_can_merge_op vhost_backend_can_merge; vhost_vsock_set_guest_cid_op vhost_vsock_set_guest_cid; vhost_vsock_set_running_op vhost_vsock_set_running; vhost_set_iotlb_callback_op vhost_set_iotlb_callback; vhost_send_device_iotlb_msg_op vhost_send_device_iotlb_msg; vhost_get_config_op vhost_get_config; vhost_set_config_op vhost_set_config; vhost_crypto_create_session_op vhost_crypto_create_session; vhost_crypto_close_session_op vhost_crypto_close_session; vhost_backend_mem_section_filter_op vhost_backend_mem_section_filter; vhost_get_inflight_fd_op vhost_get_inflight_fd; vhost_set_inflight_fd_op vhost_set_inflight_fd; } VhostOps; extern const VhostOps user_ops; int vhost_set_backend_type(struct vhost_dev *dev, VhostBackendType backend_type); int vhost_backend_update_device_iotlb(struct vhost_dev *dev, uint64_t iova, uint64_t uaddr, uint64_t len, IOMMUAccessFlags perm); int vhost_backend_invalidate_device_iotlb(struct vhost_dev *dev, uint64_t iova, uint64_t len); int vhost_backend_handle_iotlb_msg(struct vhost_dev *dev, struct vhost_iotlb_msg *imsg); #endif /* VHOST_BACKEND_H */
pmp-tool/PMP
src/qemu/src-pmp/include/crypto/block.h
/* * QEMU Crypto block device encryption * * Copyright (c) 2015-2016 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. * */ #ifndef QCRYPTO_BLOCK_H #define QCRYPTO_BLOCK_H #include "crypto/cipher.h" #include "crypto/ivgen.h" typedef struct QCryptoBlock QCryptoBlock; /* See also QCryptoBlockFormat, QCryptoBlockCreateOptions * and QCryptoBlockOpenOptions in qapi/crypto.json */ typedef ssize_t (*QCryptoBlockReadFunc)(QCryptoBlock *block, size_t offset, uint8_t *buf, size_t buflen, void *opaque, Error **errp); typedef ssize_t (*QCryptoBlockInitFunc)(QCryptoBlock *block, size_t headerlen, void *opaque, Error **errp); typedef ssize_t (*QCryptoBlockWriteFunc)(QCryptoBlock *block, size_t offset, const uint8_t *buf, size_t buflen, void *opaque, Error **errp); /** * qcrypto_block_has_format: * @format: the encryption format * @buf: the data from head of the volume * @len: the length of @buf in bytes * * Given @len bytes of data from the head of a storage volume * in @buf, probe to determine if the volume has the encryption * format specified in @format. * * Returns: true if the data in @buf matches @format */ bool qcrypto_block_has_format(QCryptoBlockFormat format, const uint8_t *buf, size_t buflen); typedef enum { QCRYPTO_BLOCK_OPEN_NO_IO = (1 << 0), } QCryptoBlockOpenFlags; /** * qcrypto_block_open: * @options: the encryption options * @optprefix: name prefix for options * @readfunc: callback for reading data from the volume * @opaque: data to pass to @readfunc * @flags: bitmask of QCryptoBlockOpenFlags values * @n_threads: allow concurrent I/O from up to @n_threads threads * @errp: pointer to a NULL-initialized error object * * Create a new block encryption object for an existing * storage volume encrypted with format identified by * the parameters in @options. * * This will use @readfunc to initialize the encryption * context based on the volume header(s), extracting the * master key(s) as required. * * If @flags contains QCRYPTO_BLOCK_OPEN_NO_IO then * the open process will be optimized to skip any parts * that are only required to perform I/O. In particular * this would usually avoid the need to decrypt any * master keys. The only thing that can be done with * the resulting QCryptoBlock object would be to query * metadata such as the payload offset. There will be * no cipher or ivgen objects available. * * If any part of initializing the encryption context * fails an error will be returned. This could be due * to the volume being in the wrong format, a cipher * or IV generator algorithm that is not supported, * or incorrect passphrases. * * Returns: a block encryption format, or NULL on error */ QCryptoBlock *qcrypto_block_open(QCryptoBlockOpenOptions *options, const char *optprefix, QCryptoBlockReadFunc readfunc, void *opaque, unsigned int flags, size_t n_threads, Error **errp); /** * qcrypto_block_create: * @options: the encryption options * @optprefix: name prefix for options * @initfunc: callback for initializing volume header * @writefunc: callback for writing data to the volume header * @opaque: data to pass to @initfunc and @writefunc * @errp: pointer to a NULL-initialized error object * * Create a new block encryption object for initializing * a storage volume to be encrypted with format identified * by the parameters in @options. * * This method will allocate space for a new volume header * using @initfunc and then write header data using @writefunc, * generating new master keys, etc as required. Any existing * data present on the volume will be irrevocably destroyed. * * If any part of initializing the encryption context * fails an error will be returned. This could be due * to the volume being in the wrong format, a cipher * or IV generator algorithm that is not supported, * or incorrect passphrases. * * Returns: a block encryption format, or NULL on error */ QCryptoBlock *qcrypto_block_create(QCryptoBlockCreateOptions *options, const char *optprefix, QCryptoBlockInitFunc initfunc, QCryptoBlockWriteFunc writefunc, void *opaque, Error **errp); /** * qcrypto_block_get_info: * @block: the block encryption object * @errp: pointer to a NULL-initialized error object * * Get information about the configuration options for the * block encryption object. This includes details such as * the cipher algorithms, modes, and initialization vector * generators. * * Returns: a block encryption info object, or NULL on error */ QCryptoBlockInfo *qcrypto_block_get_info(QCryptoBlock *block, Error **errp); /** * @qcrypto_block_decrypt: * @block: the block encryption object * @offset: the position at which @iov was read * @buf: the buffer to decrypt * @len: the length of @buf in bytes * @errp: pointer to a NULL-initialized error object * * Decrypt @len bytes of cipher text in @buf, writing * plain text back into @buf. @len and @offset must be * a multiple of the encryption format sector size. * * Returns 0 on success, -1 on failure */ int qcrypto_block_decrypt(QCryptoBlock *block, uint64_t offset, uint8_t *buf, size_t len, Error **errp); /** * @qcrypto_block_encrypt: * @block: the block encryption object * @offset: the position at which @iov will be written * @buf: the buffer to decrypt * @len: the length of @buf in bytes * @errp: pointer to a NULL-initialized error object * * Encrypt @len bytes of plain text in @buf, writing * cipher text back into @buf. @len and @offset must be * a multiple of the encryption format sector size. * * Returns 0 on success, -1 on failure */ int qcrypto_block_encrypt(QCryptoBlock *block, uint64_t offset, uint8_t *buf, size_t len, Error **errp); /** * qcrypto_block_get_cipher: * @block: the block encryption object * * Get the cipher to use for payload encryption * * Returns: the cipher object */ QCryptoCipher *qcrypto_block_get_cipher(QCryptoBlock *block); /** * qcrypto_block_get_ivgen: * @block: the block encryption object * * Get the initialization vector generator to use for * payload encryption * * Returns: the IV generator object */ QCryptoIVGen *qcrypto_block_get_ivgen(QCryptoBlock *block); /** * qcrypto_block_get_kdf_hash: * @block: the block encryption object * * Get the hash algorithm used with the key derivation * function * * Returns: the hash algorithm */ QCryptoHashAlgorithm qcrypto_block_get_kdf_hash(QCryptoBlock *block); /** * qcrypto_block_get_payload_offset: * @block: the block encryption object * * Get the offset to the payload indicated by the * encryption header, in bytes. * * Returns: the payload offset in bytes */ uint64_t qcrypto_block_get_payload_offset(QCryptoBlock *block); /** * qcrypto_block_get_sector_size: * @block: the block encryption object * * Get the size of sectors used for payload encryption. A new * IV is used at the start of each sector. The encryption * sector size is not required to match the sector size of the * underlying storage. For example LUKS will always use a 512 * byte sector size, even if the volume is on a disk with 4k * sectors. * * Returns: the sector in bytes */ uint64_t qcrypto_block_get_sector_size(QCryptoBlock *block); /** * qcrypto_block_free: * @block: the block encryption object * * Release all resources associated with the encryption * object */ void qcrypto_block_free(QCryptoBlock *block); #endif /* QCRYPTO_BLOCK_H */
pmp-tool/PMP
src/qemu/src-pmp/accel/tcg/cputlb.c
<filename>src/qemu/src-pmp/accel/tcg/cputlb.c<gh_stars>1-10 /* * Common CPU TLB handling * * Copyright (c) 2003 <NAME> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qemu/main-loop.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/memory.h" #include "exec/address-spaces.h" #include "exec/cpu_ldst.h" #include "exec/cputlb.h" #include "exec/memory-internal.h" #include "exec/ram_addr.h" #include "tcg/tcg.h" #include "qemu/error-report.h" #include "exec/log.h" #include "exec/helper-proto.h" #include "qemu/atomic.h" #include "qemu/atomic128.h" /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ /* #define DEBUG_TLB */ /* #define DEBUG_TLB_LOG */ #ifdef DEBUG_TLB # define DEBUG_TLB_GATE 1 # ifdef DEBUG_TLB_LOG # define DEBUG_TLB_LOG_GATE 1 # else # define DEBUG_TLB_LOG_GATE 0 # endif #else # define DEBUG_TLB_GATE 0 # define DEBUG_TLB_LOG_GATE 0 #endif #define tlb_debug(fmt, ...) do { \ if (DEBUG_TLB_LOG_GATE) { \ qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ ## __VA_ARGS__); \ } else if (DEBUG_TLB_GATE) { \ fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ } \ } while (0) #define assert_cpu_is_self(cpu) do { \ if (DEBUG_TLB_GATE) { \ g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ } \ } while (0) /* run_on_cpu_data.target_ptr should always be big enough for a * target_ulong even on 32 bit builds */ QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); /* We currently can't handle more than 16 bits in the MMUIDX bitmask. */ QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) static inline size_t sizeof_tlb(CPUArchState *env, uintptr_t mmu_idx) { return env->tlb_mask[mmu_idx] + (1 << CPU_TLB_ENTRY_BITS); } static void tlb_window_reset(CPUTLBWindow *window, int64_t ns, size_t max_entries) { window->begin_ns = ns; window->max_entries = max_entries; } static void tlb_dyn_init(CPUArchState *env) { int i; for (i = 0; i < NB_MMU_MODES; i++) { CPUTLBDesc *desc = &env->tlb_d[i]; size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; tlb_window_reset(&desc->window, get_clock_realtime(), 0); desc->n_used_entries = 0; env->tlb_mask[i] = (n_entries - 1) << CPU_TLB_ENTRY_BITS; env->tlb_table[i] = g_new(CPUTLBEntry, n_entries); env->iotlb[i] = g_new(CPUIOTLBEntry, n_entries); } } /** * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary * @env: CPU that owns the TLB * @mmu_idx: MMU index of the TLB * * Called with tlb_lock_held. * * We have two main constraints when resizing a TLB: (1) we only resize it * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing * the array or unnecessarily flushing it), which means we do not control how * frequently the resizing can occur; (2) we don't have access to the guest's * future scheduling decisions, and therefore have to decide the magnitude of * the resize based on past observations. * * In general, a memory-hungry process can benefit greatly from an appropriately * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that * we just have to make the TLB as large as possible; while an oversized TLB * results in minimal TLB miss rates, it also takes longer to be flushed * (flushes can be _very_ frequent), and the reduced locality can also hurt * performance. * * To achieve near-optimal performance for all kinds of workloads, we: * * 1. Aggressively increase the size of the TLB when the use rate of the * TLB being flushed is high, since it is likely that in the near future this * memory-hungry process will execute again, and its memory hungriness will * probably be similar. * * 2. Slowly reduce the size of the TLB as the use rate declines over a * reasonably large time window. The rationale is that if in such a time window * we have not observed a high TLB use rate, it is likely that we won't observe * it in the near future. In that case, once a time window expires we downsize * the TLB to match the maximum use rate observed in the window. * * 3. Try to keep the maximum use rate in a time window in the 30-70% range, * since in that range performance is likely near-optimal. Recall that the TLB * is direct mapped, so we want the use rate to be low (or at least not too * high), since otherwise we are likely to have a significant amount of * conflict misses. */ static void tlb_mmu_resize_locked(CPUArchState *env, int mmu_idx) { CPUTLBDesc *desc = &env->tlb_d[mmu_idx]; size_t old_size = tlb_n_entries(env, mmu_idx); size_t rate; size_t new_size = old_size; int64_t now = get_clock_realtime(); int64_t window_len_ms = 100; int64_t window_len_ns = window_len_ms * 1000 * 1000; bool window_expired = now > desc->window.begin_ns + window_len_ns; if (desc->n_used_entries > desc->window.max_entries) { desc->window.max_entries = desc->n_used_entries; } rate = desc->window.max_entries * 100 / old_size; if (rate > 70) { new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); } else if (rate < 30 && window_expired) { size_t ceil = pow2ceil(desc->window.max_entries); size_t expected_rate = desc->window.max_entries * 100 / ceil; /* * Avoid undersizing when the max number of entries seen is just below * a pow2. For instance, if max_entries == 1025, the expected use rate * would be 1025/2048==50%. However, if max_entries == 1023, we'd get * 1023/1024==99.9% use rate, so we'd likely end up doubling the size * later. Thus, make sure that the expected use rate remains below 70%. * (and since we double the size, that means the lowest rate we'd * expect to get is 35%, which is still in the 30-70% range where * we consider that the size is appropriate.) */ if (expected_rate > 70) { ceil *= 2; } new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); } if (new_size == old_size) { if (window_expired) { tlb_window_reset(&desc->window, now, desc->n_used_entries); } return; } g_free(env->tlb_table[mmu_idx]); g_free(env->iotlb[mmu_idx]); tlb_window_reset(&desc->window, now, 0); /* desc->n_used_entries is cleared by the caller */ env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS; env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size); env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size); /* * If the allocations fail, try smaller sizes. We just freed some * memory, so going back to half of new_size has a good chance of working. * Increased memory pressure elsewhere in the system might cause the * allocations to fail though, so we progressively reduce the allocation * size, aborting if we cannot even allocate the smallest TLB we support. */ while (env->tlb_table[mmu_idx] == NULL || env->iotlb[mmu_idx] == NULL) { if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { error_report("%s: %s", __func__, strerror(errno)); abort(); } new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); env->tlb_mask[mmu_idx] = (new_size - 1) << CPU_TLB_ENTRY_BITS; g_free(env->tlb_table[mmu_idx]); g_free(env->iotlb[mmu_idx]); env->tlb_table[mmu_idx] = g_try_new(CPUTLBEntry, new_size); env->iotlb[mmu_idx] = g_try_new(CPUIOTLBEntry, new_size); } } static inline void tlb_table_flush_by_mmuidx(CPUArchState *env, int mmu_idx) { tlb_mmu_resize_locked(env, mmu_idx); memset(env->tlb_table[mmu_idx], -1, sizeof_tlb(env, mmu_idx)); env->tlb_d[mmu_idx].n_used_entries = 0; } static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) { env->tlb_d[mmu_idx].n_used_entries++; } static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) { env->tlb_d[mmu_idx].n_used_entries--; } void tlb_init(CPUState *cpu) { CPUArchState *env = cpu->env_ptr; qemu_spin_init(&env->tlb_c.lock); /* Ensure that cpu_reset performs a full flush. */ env->tlb_c.dirty = ALL_MMUIDX_BITS; tlb_dyn_init(env); } /* flush_all_helper: run fn across all cpus * * If the wait flag is set then the src cpu's helper will be queued as * "safe" work and the loop exited creating a synchronisation point * where all queued work will be finished before execution starts * again. */ static void flush_all_helper(CPUState *src, run_on_cpu_func fn, run_on_cpu_data d) { CPUState *cpu; CPU_FOREACH(cpu) { if (cpu != src) { async_run_on_cpu(cpu, fn, d); } } } void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) { CPUState *cpu; size_t full = 0, part = 0, elide = 0; CPU_FOREACH(cpu) { CPUArchState *env = cpu->env_ptr; full += atomic_read(&env->tlb_c.full_flush_count); part += atomic_read(&env->tlb_c.part_flush_count); elide += atomic_read(&env->tlb_c.elide_flush_count); } *pfull = full; *ppart = part; *pelide = elide; } static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx) { tlb_table_flush_by_mmuidx(env, mmu_idx); memset(env->tlb_v_table[mmu_idx], -1, sizeof(env->tlb_v_table[0])); env->tlb_d[mmu_idx].large_page_addr = -1; env->tlb_d[mmu_idx].large_page_mask = -1; env->tlb_d[mmu_idx].vindex = 0; } static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) { CPUArchState *env = cpu->env_ptr; uint16_t asked = data.host_int; uint16_t all_dirty, work, to_clean; assert_cpu_is_self(cpu); tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); qemu_spin_lock(&env->tlb_c.lock); all_dirty = env->tlb_c.dirty; to_clean = asked & all_dirty; all_dirty &= ~to_clean; env->tlb_c.dirty = all_dirty; for (work = to_clean; work != 0; work &= work - 1) { int mmu_idx = ctz32(work); tlb_flush_one_mmuidx_locked(env, mmu_idx); } qemu_spin_unlock(&env->tlb_c.lock); cpu_tb_jmp_cache_clear(cpu); if (to_clean == ALL_MMUIDX_BITS) { atomic_set(&env->tlb_c.full_flush_count, env->tlb_c.full_flush_count + 1); } else { atomic_set(&env->tlb_c.part_flush_count, env->tlb_c.part_flush_count + ctpop16(to_clean)); if (to_clean != asked) { atomic_set(&env->tlb_c.elide_flush_count, env->tlb_c.elide_flush_count + ctpop16(asked & ~to_clean)); } } } void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) { tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); if (cpu->created && !qemu_cpu_is_self(cpu)) { async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, RUN_ON_CPU_HOST_INT(idxmap)); } else { tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); } } void tlb_flush(CPUState *cpu) { tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); } void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) { const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); } void tlb_flush_all_cpus(CPUState *src_cpu) { tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); } void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) { const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); } void tlb_flush_all_cpus_synced(CPUState *src_cpu) { tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); } static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, target_ulong page) { return tlb_hit_page(tlb_entry->addr_read, page) || tlb_hit_page(tlb_addr_write(tlb_entry), page) || tlb_hit_page(tlb_entry->addr_code, page); } /** * tlb_entry_is_empty - return true if the entry is not in use * @te: pointer to CPUTLBEntry */ static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) { return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; } /* Called with tlb_c.lock held */ static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, target_ulong page) { if (tlb_hit_page_anyprot(tlb_entry, page)) { memset(tlb_entry, -1, sizeof(*tlb_entry)); return true; } return false; } /* Called with tlb_c.lock held */ static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, target_ulong page) { int k; assert_cpu_is_self(ENV_GET_CPU(env)); for (k = 0; k < CPU_VTLB_SIZE; k++) { if (tlb_flush_entry_locked(&env->tlb_v_table[mmu_idx][k], page)) { tlb_n_used_entries_dec(env, mmu_idx); } } } static void tlb_flush_page_locked(CPUArchState *env, int midx, target_ulong page) { target_ulong lp_addr = env->tlb_d[midx].large_page_addr; target_ulong lp_mask = env->tlb_d[midx].large_page_mask; /* Check if we need to flush due to large pages. */ if ((page & lp_mask) == lp_addr) { tlb_debug("forcing full flush midx %d (" TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", midx, lp_addr, lp_mask); tlb_flush_one_mmuidx_locked(env, midx); } else { if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { tlb_n_used_entries_dec(env, midx); } tlb_flush_vtlb_page_locked(env, midx, page); } } /* As we are going to hijack the bottom bits of the page address for a * mmuidx bit mask we need to fail to build if we can't do that */ QEMU_BUILD_BUG_ON(NB_MMU_MODES > TARGET_PAGE_BITS_MIN); static void tlb_flush_page_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) { CPUArchState *env = cpu->env_ptr; target_ulong addr_and_mmuidx = (target_ulong) data.target_ptr; target_ulong addr = addr_and_mmuidx & TARGET_PAGE_MASK; unsigned long mmu_idx_bitmap = addr_and_mmuidx & ALL_MMUIDX_BITS; int mmu_idx; assert_cpu_is_self(cpu); tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%lx\n", addr, mmu_idx_bitmap); qemu_spin_lock(&env->tlb_c.lock); for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { if (test_bit(mmu_idx, &mmu_idx_bitmap)) { tlb_flush_page_locked(env, mmu_idx, addr); } } qemu_spin_unlock(&env->tlb_c.lock); tb_flush_jmp_cache(cpu, addr); } void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) { target_ulong addr_and_mmu_idx; tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); /* This should already be page aligned */ addr_and_mmu_idx = addr & TARGET_PAGE_MASK; addr_and_mmu_idx |= idxmap; if (!qemu_cpu_is_self(cpu)) { async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_work, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); } else { tlb_flush_page_by_mmuidx_async_work( cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); } } void tlb_flush_page(CPUState *cpu, target_ulong addr) { tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); } void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, uint16_t idxmap) { const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work; target_ulong addr_and_mmu_idx; tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); /* This should already be page aligned */ addr_and_mmu_idx = addr & TARGET_PAGE_MASK; addr_and_mmu_idx |= idxmap; flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); fn(src_cpu, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); } void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) { tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); } void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, target_ulong addr, uint16_t idxmap) { const run_on_cpu_func fn = tlb_flush_page_by_mmuidx_async_work; target_ulong addr_and_mmu_idx; tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); /* This should already be page aligned */ addr_and_mmu_idx = addr & TARGET_PAGE_MASK; addr_and_mmu_idx |= idxmap; flush_all_helper(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_TARGET_PTR(addr_and_mmu_idx)); } void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) { tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); } /* update the TLBs so that writes to code in the virtual page 'addr' can be detected */ void tlb_protect_code(ram_addr_t ram_addr) { cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, DIRTY_MEMORY_CODE); } /* update the TLB so that writes in physical page 'phys_addr' are no longer tested for self modifying code */ void tlb_unprotect_code(ram_addr_t ram_addr) { cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); } /* * Dirty write flag handling * * When the TCG code writes to a location it looks up the address in * the TLB and uses that data to compute the final address. If any of * the lower bits of the address are set then the slow path is forced. * There are a number of reasons to do this but for normal RAM the * most usual is detecting writes to code regions which may invalidate * generated code. * * Other vCPUs might be reading their TLBs during guest execution, so we update * te->addr_write with atomic_set. We don't need to worry about this for * oversized guests as MTTCG is disabled for them. * * Called with tlb_c.lock held. */ static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, uintptr_t start, uintptr_t length) { uintptr_t addr = tlb_entry->addr_write; if ((addr & (TLB_INVALID_MASK | TLB_MMIO | TLB_NOTDIRTY)) == 0) { addr &= TARGET_PAGE_MASK; addr += tlb_entry->addend; if ((addr - start) < length) { #if TCG_OVERSIZED_GUEST tlb_entry->addr_write |= TLB_NOTDIRTY; #else atomic_set(&tlb_entry->addr_write, tlb_entry->addr_write | TLB_NOTDIRTY); #endif } } } /* * Called with tlb_c.lock held. * Called only from the vCPU context, i.e. the TLB's owner thread. */ static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) { *d = *s; } /* This is a cross vCPU call (i.e. another vCPU resetting the flags of * the target vCPU). * We must take tlb_c.lock to avoid racing with another vCPU update. The only * thing actually updated is the target TLB entry ->addr_write flags. */ void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) { CPUArchState *env; int mmu_idx; env = cpu->env_ptr; qemu_spin_lock(&env->tlb_c.lock); for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { unsigned int i; unsigned int n = tlb_n_entries(env, mmu_idx); for (i = 0; i < n; i++) { tlb_reset_dirty_range_locked(&env->tlb_table[mmu_idx][i], start1, length); } for (i = 0; i < CPU_VTLB_SIZE; i++) { tlb_reset_dirty_range_locked(&env->tlb_v_table[mmu_idx][i], start1, length); } } qemu_spin_unlock(&env->tlb_c.lock); } /* Called with tlb_c.lock held */ static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, target_ulong vaddr) { if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { tlb_entry->addr_write = vaddr; } } /* update the TLB corresponding to virtual page vaddr so that it is no longer dirty */ void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) { CPUArchState *env = cpu->env_ptr; int mmu_idx; assert_cpu_is_self(cpu); vaddr &= TARGET_PAGE_MASK; qemu_spin_lock(&env->tlb_c.lock); for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); } for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { int k; for (k = 0; k < CPU_VTLB_SIZE; k++) { tlb_set_dirty1_locked(&env->tlb_v_table[mmu_idx][k], vaddr); } } qemu_spin_unlock(&env->tlb_c.lock); } /* Our TLB does not support large pages, so remember the area covered by large pages and trigger a full TLB flush if these are invalidated. */ static void tlb_add_large_page(CPUArchState *env, int mmu_idx, target_ulong vaddr, target_ulong size) { target_ulong lp_addr = env->tlb_d[mmu_idx].large_page_addr; target_ulong lp_mask = ~(size - 1); if (lp_addr == (target_ulong)-1) { /* No previous large page. */ lp_addr = vaddr; } else { /* Extend the existing region to include the new page. This is a compromise between unnecessary flushes and the cost of maintaining a full variable size TLB. */ lp_mask &= env->tlb_d[mmu_idx].large_page_mask; while (((lp_addr ^ vaddr) & lp_mask) != 0) { lp_mask <<= 1; } } env->tlb_d[mmu_idx].large_page_addr = lp_addr & lp_mask; env->tlb_d[mmu_idx].large_page_mask = lp_mask; } /* Add a new TLB entry. At most one entry for a given virtual address * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the * supplied size is only used by tlb_flush_page. * * Called from TCG-generated code, which is under an RCU read-side * critical section. */ void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, hwaddr paddr, MemTxAttrs attrs, int prot, int mmu_idx, target_ulong size) { CPUArchState *env = cpu->env_ptr; MemoryRegionSection *section; unsigned int index; target_ulong address; target_ulong code_address; uintptr_t addend; CPUTLBEntry *te, tn; hwaddr iotlb, xlat, sz, paddr_page; target_ulong vaddr_page; int asidx = cpu_asidx_from_attrs(cpu, attrs); assert_cpu_is_self(cpu); if (size <= TARGET_PAGE_SIZE) { sz = TARGET_PAGE_SIZE; } else { tlb_add_large_page(env, mmu_idx, vaddr, size); sz = size; } vaddr_page = vaddr & TARGET_PAGE_MASK; paddr_page = paddr & TARGET_PAGE_MASK; section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, &xlat, &sz, attrs, &prot); assert(sz >= TARGET_PAGE_SIZE); tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx " prot=%x idx=%d\n", vaddr, paddr, prot, mmu_idx); address = vaddr_page; if (size < TARGET_PAGE_SIZE) { /* * Slow-path the TLB entries; we will repeat the MMU check and TLB * fill on every access. */ address |= TLB_RECHECK; } if (!memory_region_is_ram(section->mr) && !memory_region_is_romd(section->mr)) { /* IO memory case */ address |= TLB_MMIO; addend = 0; } else { /* TLB_MMIO for rom/romd handled below */ addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; } code_address = address; iotlb = memory_region_section_get_iotlb(cpu, section, vaddr_page, paddr_page, xlat, prot, &address); index = tlb_index(env, mmu_idx, vaddr_page); te = tlb_entry(env, mmu_idx, vaddr_page); /* * Hold the TLB lock for the rest of the function. We could acquire/release * the lock several times in the function, but it is faster to amortize the * acquisition cost by acquiring it just once. Note that this leads to * a longer critical section, but this is not a concern since the TLB lock * is unlikely to be contended. */ qemu_spin_lock(&env->tlb_c.lock); /* Note that the tlb is no longer clean. */ env->tlb_c.dirty |= 1 << mmu_idx; /* Make sure there's no cached translation for the new page. */ tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); /* * Only evict the old entry to the victim tlb if it's for a * different page; otherwise just overwrite the stale data. */ if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { unsigned vidx = env->tlb_d[mmu_idx].vindex++ % CPU_VTLB_SIZE; CPUTLBEntry *tv = &env->tlb_v_table[mmu_idx][vidx]; /* Evict the old entry into the victim tlb. */ copy_tlb_helper_locked(tv, te); env->iotlb_v[mmu_idx][vidx] = env->iotlb[mmu_idx][index]; tlb_n_used_entries_dec(env, mmu_idx); } /* refill the tlb */ /* * At this point iotlb contains a physical section number in the lower * TARGET_PAGE_BITS, and either * + the ram_addr_t of the page base of the target RAM (if NOTDIRTY or ROM) * + the offset within section->mr of the page base (otherwise) * We subtract the vaddr_page (which is page aligned and thus won't * disturb the low bits) to give an offset which can be added to the * (non-page-aligned) vaddr of the eventual memory access to get * the MemoryRegion offset for the access. Note that the vaddr we * subtract here is that of the page base, and not the same as the * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). */ env->iotlb[mmu_idx][index].addr = iotlb - vaddr_page; env->iotlb[mmu_idx][index].attrs = attrs; /* Now calculate the new entry */ tn.addend = addend - vaddr_page; if (prot & PAGE_READ) { tn.addr_read = address; } else { tn.addr_read = -1; } if (prot & PAGE_EXEC) { tn.addr_code = code_address; } else { tn.addr_code = -1; } tn.addr_write = -1; if (prot & PAGE_WRITE) { if ((memory_region_is_ram(section->mr) && section->readonly) || memory_region_is_romd(section->mr)) { /* Write access calls the I/O callback. */ tn.addr_write = address | TLB_MMIO; } else if (memory_region_is_ram(section->mr) && cpu_physical_memory_is_clean( memory_region_get_ram_addr(section->mr) + xlat)) { tn.addr_write = address | TLB_NOTDIRTY; } else { tn.addr_write = address; } if (prot & PAGE_WRITE_INV) { tn.addr_write |= TLB_INVALID_MASK; } } copy_tlb_helper_locked(te, &tn); tlb_n_used_entries_inc(env, mmu_idx); qemu_spin_unlock(&env->tlb_c.lock); } /* Add a new TLB entry, but without specifying the memory * transaction attributes to be used. */ void tlb_set_page(CPUState *cpu, target_ulong vaddr, hwaddr paddr, int prot, int mmu_idx, target_ulong size) { tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, prot, mmu_idx, size); } static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) { ram_addr_t ram_addr; ram_addr = qemu_ram_addr_from_host(ptr); if (ram_addr == RAM_ADDR_INVALID) { error_report("Bad ram pointer %p", ptr); abort(); } return ram_addr; } static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, int mmu_idx, target_ulong addr, uintptr_t retaddr, bool recheck, MMUAccessType access_type, int size) { CPUState *cpu = ENV_GET_CPU(env); hwaddr mr_offset; MemoryRegionSection *section; MemoryRegion *mr; uint64_t val; bool locked = false; MemTxResult r; if (recheck) { /* * This is a TLB_RECHECK access, where the MMU protection * covers a smaller range than a target page, and we must * repeat the MMU check here. This tlb_fill() call might * longjump out if this access should cause a guest exception. */ CPUTLBEntry *entry; target_ulong tlb_addr; tlb_fill(cpu, addr, size, MMU_DATA_LOAD, mmu_idx, retaddr); entry = tlb_entry(env, mmu_idx, addr); tlb_addr = entry->addr_read; if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { /* RAM access */ uintptr_t haddr = addr + entry->addend; return ldn_p((void *)haddr, size); } /* Fall through for handling IO accesses */ } section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); mr = section->mr; mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; cpu->mem_io_pc = retaddr; if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { cpu_io_recompile(cpu, retaddr); } cpu->mem_io_vaddr = addr; cpu->mem_io_access_type = access_type; if (mr->global_locking && !qemu_mutex_iothread_locked()) { qemu_mutex_lock_iothread(); locked = true; } r = memory_region_dispatch_read(mr, mr_offset, &val, size, iotlbentry->attrs); if (r != MEMTX_OK) { hwaddr physaddr = mr_offset + section->offset_within_address_space - section->offset_within_region; cpu_transaction_failed(cpu, physaddr, addr, size, access_type, mmu_idx, iotlbentry->attrs, r, retaddr); } if (locked) { qemu_mutex_unlock_iothread(); } return val; } static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, int mmu_idx, uint64_t val, target_ulong addr, uintptr_t retaddr, bool recheck, int size) { CPUState *cpu = ENV_GET_CPU(env); hwaddr mr_offset; MemoryRegionSection *section; MemoryRegion *mr; bool locked = false; MemTxResult r; if (recheck) { /* * This is a TLB_RECHECK access, where the MMU protection * covers a smaller range than a target page, and we must * repeat the MMU check here. This tlb_fill() call might * longjump out if this access should cause a guest exception. */ CPUTLBEntry *entry; target_ulong tlb_addr; tlb_fill(cpu, addr, size, MMU_DATA_STORE, mmu_idx, retaddr); entry = tlb_entry(env, mmu_idx, addr); tlb_addr = tlb_addr_write(entry); if (!(tlb_addr & ~(TARGET_PAGE_MASK | TLB_RECHECK))) { /* RAM access */ uintptr_t haddr = addr + entry->addend; stn_p((void *)haddr, size, val); return; } /* Fall through for handling IO accesses */ } section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); mr = section->mr; mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; if (mr != &io_mem_rom && mr != &io_mem_notdirty && !cpu->can_do_io) { cpu_io_recompile(cpu, retaddr); } cpu->mem_io_vaddr = addr; cpu->mem_io_pc = retaddr; if (mr->global_locking && !qemu_mutex_iothread_locked()) { qemu_mutex_lock_iothread(); locked = true; } r = memory_region_dispatch_write(mr, mr_offset, val, size, iotlbentry->attrs); if (r != MEMTX_OK) { hwaddr physaddr = mr_offset + section->offset_within_address_space - section->offset_within_region; cpu_transaction_failed(cpu, physaddr, addr, size, MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, retaddr); } if (locked) { qemu_mutex_unlock_iothread(); } } /* Return true if ADDR is present in the victim tlb, and has been copied back to the main tlb. */ static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, size_t elt_ofs, target_ulong page) { size_t vidx; assert_cpu_is_self(ENV_GET_CPU(env)); for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { CPUTLBEntry *vtlb = &env->tlb_v_table[mmu_idx][vidx]; target_ulong cmp; /* elt_ofs might correspond to .addr_write, so use atomic_read */ #if TCG_OVERSIZED_GUEST cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); #else cmp = atomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); #endif if (cmp == page) { /* Found entry in victim tlb, swap tlb and iotlb. */ CPUTLBEntry tmptlb, *tlb = &env->tlb_table[mmu_idx][index]; qemu_spin_lock(&env->tlb_c.lock); copy_tlb_helper_locked(&tmptlb, tlb); copy_tlb_helper_locked(tlb, vtlb); copy_tlb_helper_locked(vtlb, &tmptlb); qemu_spin_unlock(&env->tlb_c.lock); CPUIOTLBEntry tmpio, *io = &env->iotlb[mmu_idx][index]; CPUIOTLBEntry *vio = &env->iotlb_v[mmu_idx][vidx]; tmpio = *io; *io = *vio; *vio = tmpio; return true; } } return false; } /* Macro to call the above, with local variables from the use context. */ #define VICTIM_TLB_HIT(TY, ADDR) \ victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ (ADDR) & TARGET_PAGE_MASK) /* NOTE: this function can trigger an exception */ /* NOTE2: the returned address is not exactly the physical address: it * is actually a ram_addr_t (in system mode; the user mode emulation * version of this function returns a guest virtual address). */ tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) { uintptr_t mmu_idx = cpu_mmu_index(env, true); uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); void *p; if (unlikely(!tlb_hit(entry->addr_code, addr))) { if (!VICTIM_TLB_HIT(addr_code, addr)) { tlb_fill(ENV_GET_CPU(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); index = tlb_index(env, mmu_idx, addr); entry = tlb_entry(env, mmu_idx, addr); } assert(tlb_hit(entry->addr_code, addr)); } if (unlikely(entry->addr_code & (TLB_RECHECK | TLB_MMIO))) { /* * Return -1 if we can't translate and execute from an entire * page of RAM here, which will cause us to execute by loading * and translating one insn at a time, without caching: * - TLB_RECHECK: means the MMU protection covers a smaller range * than a target page, so we must redo the MMU check every insn * - TLB_MMIO: region is not backed by RAM */ return -1; } p = (void *)((uintptr_t)addr + entry->addend); return qemu_ram_addr_from_host_nofail(p); } /* Probe for whether the specified guest write access is permitted. * If it is not permitted then an exception will be taken in the same * way as if this were a real write access (and we will not return). * Otherwise the function will return, and there will be a valid * entry in the TLB for this access. */ void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, uintptr_t retaddr) { uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); if (!tlb_hit(tlb_addr_write(entry), addr)) { /* TLB entry is for a different page */ if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, size, MMU_DATA_STORE, mmu_idx, retaddr); } } } /* Probe for a read-modify-write atomic operation. Do not allow unaligned * operations, or io operations to proceed. Return the host address. */ static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, TCGMemOpIdx oi, uintptr_t retaddr, NotDirtyInfo *ndi) { size_t mmu_idx = get_mmuidx(oi); uintptr_t index = tlb_index(env, mmu_idx, addr); CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); target_ulong tlb_addr = tlb_addr_write(tlbe); TCGMemOp mop = get_memop(oi); int a_bits = get_alignment_bits(mop); int s_bits = mop & MO_SIZE; void *hostaddr; /* Adjust the given return address. */ retaddr -= GETPC_ADJ; /* Enforce guest required alignment. */ if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { /* ??? Maybe indicate atomic op to cpu_unaligned_access */ cpu_unaligned_access(ENV_GET_CPU(env), addr, MMU_DATA_STORE, mmu_idx, retaddr); } /* Enforce qemu required alignment. */ if (unlikely(addr & ((1 << s_bits) - 1))) { /* We get here if guest alignment was not requested, or was not enforced by cpu_unaligned_access above. We might widen the access and emulate, but for now mark an exception and exit the cpu loop. */ goto stop_the_world; } /* Check TLB entry and enforce page permissions. */ if (!tlb_hit(tlb_addr, addr)) { if (!VICTIM_TLB_HIT(addr_write, addr)) { tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_STORE, mmu_idx, retaddr); index = tlb_index(env, mmu_idx, addr); tlbe = tlb_entry(env, mmu_idx, addr); } tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; } /* Notice an IO access or a needs-MMU-lookup access */ if (unlikely(tlb_addr & (TLB_MMIO | TLB_RECHECK))) { /* There's really nothing that can be done to support this apart from stop-the-world. */ goto stop_the_world; } /* Let the guest notice RMW on a write-only page. */ if (unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { tlb_fill(ENV_GET_CPU(env), addr, 1 << s_bits, MMU_DATA_LOAD, mmu_idx, retaddr); /* Since we don't support reads and writes to different addresses, and we do have the proper page loaded for write, this shouldn't ever return. But just in case, handle via stop-the-world. */ goto stop_the_world; } hostaddr = (void *)((uintptr_t)addr + tlbe->addend); ndi->active = false; if (unlikely(tlb_addr & TLB_NOTDIRTY)) { ndi->active = true; memory_notdirty_write_prepare(ndi, ENV_GET_CPU(env), addr, qemu_ram_addr_from_host_nofail(hostaddr), 1 << s_bits); } return hostaddr; stop_the_world: cpu_loop_exit_atomic(ENV_GET_CPU(env), retaddr); } #ifdef TARGET_WORDS_BIGENDIAN # define TGT_BE(X) (X) # define TGT_LE(X) BSWAP(X) #else # define TGT_BE(X) BSWAP(X) # define TGT_LE(X) (X) #endif #define MMUSUFFIX _mmu #define DATA_SIZE 1 #include "softmmu_template.h" #define DATA_SIZE 2 #include "softmmu_template.h" #define DATA_SIZE 4 #include "softmmu_template.h" #define DATA_SIZE 8 #include "softmmu_template.h" /* First set of helpers allows passing in of OI and RETADDR. This makes them callable from other helpers. */ #define EXTRA_ARGS , TCGMemOpIdx oi, uintptr_t retaddr #define ATOMIC_NAME(X) \ HELPER(glue(glue(glue(atomic_ ## X, SUFFIX), END), _mmu)) #define ATOMIC_MMU_DECLS NotDirtyInfo ndi #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, retaddr, &ndi) #define ATOMIC_MMU_CLEANUP \ do { \ if (unlikely(ndi.active)) { \ memory_notdirty_write_complete(&ndi); \ } \ } while (0) #define DATA_SIZE 1 #include "atomic_template.h" #define DATA_SIZE 2 #include "atomic_template.h" #define DATA_SIZE 4 #include "atomic_template.h" #ifdef CONFIG_ATOMIC64 #define DATA_SIZE 8 #include "atomic_template.h" #endif #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 #define DATA_SIZE 16 #include "atomic_template.h" #endif /* Second set of helpers are directly callable from TCG as helpers. */ #undef EXTRA_ARGS #undef ATOMIC_NAME #undef ATOMIC_MMU_LOOKUP #define EXTRA_ARGS , TCGMemOpIdx oi #define ATOMIC_NAME(X) HELPER(glue(glue(atomic_ ## X, SUFFIX), END)) #define ATOMIC_MMU_LOOKUP atomic_mmu_lookup(env, addr, oi, GETPC(), &ndi) #define DATA_SIZE 1 #include "atomic_template.h" #define DATA_SIZE 2 #include "atomic_template.h" #define DATA_SIZE 4 #include "atomic_template.h" #ifdef CONFIG_ATOMIC64 #define DATA_SIZE 8 #include "atomic_template.h" #endif /* Code access functions. */ #undef MMUSUFFIX #define MMUSUFFIX _cmmu #undef GETPC #define GETPC() ((uintptr_t)0) #define SOFTMMU_CODE_ACCESS #define DATA_SIZE 1 #include "softmmu_template.h" #define DATA_SIZE 2 #include "softmmu_template.h" #define DATA_SIZE 4 #include "softmmu_template.h" #define DATA_SIZE 8 #include "softmmu_template.h"
pmp-tool/PMP
src/qemu/src-pmp/tcg/riscv/tcg-target.inc.c
<reponame>pmp-tool/PMP /* * Tiny Code Generator for QEMU * * Copyright (c) 2018 SiFive, Inc * Copyright (c) 2008-2009 <NAME> <<EMAIL>> * Copyright (c) 2009 <NAME> <<EMAIL>> * Copyright (c) 2008 <NAME> * * Based on i386/tcg-target.c and mips/tcg-target.c * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "tcg-pool.inc.c" #ifdef CONFIG_DEBUG_TCG static const char * const tcg_target_reg_names[TCG_TARGET_NB_REGS] = { "zero", "ra", "sp", "gp", "tp", "t0", "t1", "t2", "s0", "s1", "a0", "a1", "a2", "a3", "a4", "a5", "a6", "a7", "s2", "s3", "s4", "s5", "s6", "s7", "s8", "s9", "s10", "s11", "t3", "t4", "t5", "t6" }; #endif static const int tcg_target_reg_alloc_order[] = { /* Call saved registers */ /* TCG_REG_S0 reservered for TCG_AREG0 */ TCG_REG_S1, TCG_REG_S2, TCG_REG_S3, TCG_REG_S4, TCG_REG_S5, TCG_REG_S6, TCG_REG_S7, TCG_REG_S8, TCG_REG_S9, TCG_REG_S10, TCG_REG_S11, /* Call clobbered registers */ TCG_REG_T0, TCG_REG_T1, TCG_REG_T2, TCG_REG_T3, TCG_REG_T4, TCG_REG_T5, TCG_REG_T6, /* Argument registers */ TCG_REG_A0, TCG_REG_A1, TCG_REG_A2, TCG_REG_A3, TCG_REG_A4, TCG_REG_A5, TCG_REG_A6, TCG_REG_A7, }; static const int tcg_target_call_iarg_regs[] = { TCG_REG_A0, TCG_REG_A1, TCG_REG_A2, TCG_REG_A3, TCG_REG_A4, TCG_REG_A5, TCG_REG_A6, TCG_REG_A7, }; static const int tcg_target_call_oarg_regs[] = { TCG_REG_A0, TCG_REG_A1, }; #define TCG_CT_CONST_ZERO 0x100 #define TCG_CT_CONST_S12 0x200 #define TCG_CT_CONST_N12 0x400 #define TCG_CT_CONST_M12 0x800 static inline tcg_target_long sextreg(tcg_target_long val, int pos, int len) { if (TCG_TARGET_REG_BITS == 32) { return sextract32(val, pos, len); } else { return sextract64(val, pos, len); } } /* parse target specific constraints */ static const char *target_parse_constraint(TCGArgConstraint *ct, const char *ct_str, TCGType type) { switch (*ct_str++) { case 'r': ct->ct |= TCG_CT_REG; ct->u.regs = 0xffffffff; break; case 'L': /* qemu_ld/qemu_st constraint */ ct->ct |= TCG_CT_REG; ct->u.regs = 0xffffffff; /* qemu_ld/qemu_st uses TCG_REG_TMP0 */ #if defined(CONFIG_SOFTMMU) tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[0]); tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[1]); tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[2]); tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[3]); tcg_regset_reset_reg(ct->u.regs, tcg_target_call_iarg_regs[4]); #endif break; case 'I': ct->ct |= TCG_CT_CONST_S12; break; case 'N': ct->ct |= TCG_CT_CONST_N12; break; case 'M': ct->ct |= TCG_CT_CONST_M12; break; case 'Z': /* we can use a zero immediate as a zero register argument. */ ct->ct |= TCG_CT_CONST_ZERO; break; default: return NULL; } return ct_str; } /* test if a constant matches the constraint */ static int tcg_target_const_match(tcg_target_long val, TCGType type, const TCGArgConstraint *arg_ct) { int ct = arg_ct->ct; if (ct & TCG_CT_CONST) { return 1; } if ((ct & TCG_CT_CONST_ZERO) && val == 0) { return 1; } if ((ct & TCG_CT_CONST_S12) && val == sextreg(val, 0, 12)) { return 1; } if ((ct & TCG_CT_CONST_N12) && -val == sextreg(-val, 0, 12)) { return 1; } if ((ct & TCG_CT_CONST_M12) && val >= -0xfff && val <= 0xfff) { return 1; } return 0; } /* * RISC-V Base ISA opcodes (IM) */ typedef enum { OPC_ADD = 0x33, OPC_ADDI = 0x13, OPC_AND = 0x7033, OPC_ANDI = 0x7013, OPC_AUIPC = 0x17, OPC_BEQ = 0x63, OPC_BGE = 0x5063, OPC_BGEU = 0x7063, OPC_BLT = 0x4063, OPC_BLTU = 0x6063, OPC_BNE = 0x1063, OPC_DIV = 0x2004033, OPC_DIVU = 0x2005033, OPC_JAL = 0x6f, OPC_JALR = 0x67, OPC_LB = 0x3, OPC_LBU = 0x4003, OPC_LD = 0x3003, OPC_LH = 0x1003, OPC_LHU = 0x5003, OPC_LUI = 0x37, OPC_LW = 0x2003, OPC_LWU = 0x6003, OPC_MUL = 0x2000033, OPC_MULH = 0x2001033, OPC_MULHSU = 0x2002033, OPC_MULHU = 0x2003033, OPC_OR = 0x6033, OPC_ORI = 0x6013, OPC_REM = 0x2006033, OPC_REMU = 0x2007033, OPC_SB = 0x23, OPC_SD = 0x3023, OPC_SH = 0x1023, OPC_SLL = 0x1033, OPC_SLLI = 0x1013, OPC_SLT = 0x2033, OPC_SLTI = 0x2013, OPC_SLTIU = 0x3013, OPC_SLTU = 0x3033, OPC_SRA = 0x40005033, OPC_SRAI = 0x40005013, OPC_SRL = 0x5033, OPC_SRLI = 0x5013, OPC_SUB = 0x40000033, OPC_SW = 0x2023, OPC_XOR = 0x4033, OPC_XORI = 0x4013, #if TCG_TARGET_REG_BITS == 64 OPC_ADDIW = 0x1b, OPC_ADDW = 0x3b, OPC_DIVUW = 0x200503b, OPC_DIVW = 0x200403b, OPC_MULW = 0x200003b, OPC_REMUW = 0x200703b, OPC_REMW = 0x200603b, OPC_SLLIW = 0x101b, OPC_SLLW = 0x103b, OPC_SRAIW = 0x4000501b, OPC_SRAW = 0x4000503b, OPC_SRLIW = 0x501b, OPC_SRLW = 0x503b, OPC_SUBW = 0x4000003b, #else /* Simplify code throughout by defining aliases for RV32. */ OPC_ADDIW = OPC_ADDI, OPC_ADDW = OPC_ADD, OPC_DIVUW = OPC_DIVU, OPC_DIVW = OPC_DIV, OPC_MULW = OPC_MUL, OPC_REMUW = OPC_REMU, OPC_REMW = OPC_REM, OPC_SLLIW = OPC_SLLI, OPC_SLLW = OPC_SLL, OPC_SRAIW = OPC_SRAI, OPC_SRAW = OPC_SRA, OPC_SRLIW = OPC_SRLI, OPC_SRLW = OPC_SRL, OPC_SUBW = OPC_SUB, #endif OPC_FENCE = 0x0000000f, } RISCVInsn; /* * RISC-V immediate and instruction encoders (excludes 16-bit RVC) */ /* Type-R */ static int32_t encode_r(RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2) { return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20; } /* Type-I */ static int32_t encode_imm12(uint32_t imm) { return (imm & 0xfff) << 20; } static int32_t encode_i(RISCVInsn opc, TCGReg rd, TCGReg rs1, uint32_t imm) { return opc | (rd & 0x1f) << 7 | (rs1 & 0x1f) << 15 | encode_imm12(imm); } /* Type-S */ static int32_t encode_simm12(uint32_t imm) { int32_t ret = 0; ret |= (imm & 0xFE0) << 20; ret |= (imm & 0x1F) << 7; return ret; } static int32_t encode_s(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) { return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_simm12(imm); } /* Type-SB */ static int32_t encode_sbimm12(uint32_t imm) { int32_t ret = 0; ret |= (imm & 0x1000) << 19; ret |= (imm & 0x7e0) << 20; ret |= (imm & 0x1e) << 7; ret |= (imm & 0x800) >> 4; return ret; } static int32_t encode_sb(RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) { return opc | (rs1 & 0x1f) << 15 | (rs2 & 0x1f) << 20 | encode_sbimm12(imm); } /* Type-U */ static int32_t encode_uimm20(uint32_t imm) { return imm & 0xfffff000; } static int32_t encode_u(RISCVInsn opc, TCGReg rd, uint32_t imm) { return opc | (rd & 0x1f) << 7 | encode_uimm20(imm); } /* Type-UJ */ static int32_t encode_ujimm20(uint32_t imm) { int32_t ret = 0; ret |= (imm & 0x0007fe) << (21 - 1); ret |= (imm & 0x000800) << (20 - 11); ret |= (imm & 0x0ff000) << (12 - 12); ret |= (imm & 0x100000) << (31 - 20); return ret; } static int32_t encode_uj(RISCVInsn opc, TCGReg rd, uint32_t imm) { return opc | (rd & 0x1f) << 7 | encode_ujimm20(imm); } /* * RISC-V instruction emitters */ static void tcg_out_opc_reg(TCGContext *s, RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGReg rs2) { tcg_out32(s, encode_r(opc, rd, rs1, rs2)); } static void tcg_out_opc_imm(TCGContext *s, RISCVInsn opc, TCGReg rd, TCGReg rs1, TCGArg imm) { tcg_out32(s, encode_i(opc, rd, rs1, imm)); } static void tcg_out_opc_store(TCGContext *s, RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) { tcg_out32(s, encode_s(opc, rs1, rs2, imm)); } static void tcg_out_opc_branch(TCGContext *s, RISCVInsn opc, TCGReg rs1, TCGReg rs2, uint32_t imm) { tcg_out32(s, encode_sb(opc, rs1, rs2, imm)); } static void tcg_out_opc_upper(TCGContext *s, RISCVInsn opc, TCGReg rd, uint32_t imm) { tcg_out32(s, encode_u(opc, rd, imm)); } static void tcg_out_opc_jump(TCGContext *s, RISCVInsn opc, TCGReg rd, uint32_t imm) { tcg_out32(s, encode_uj(opc, rd, imm)); } static void tcg_out_nop_fill(tcg_insn_unit *p, int count) { int i; for (i = 0; i < count; ++i) { p[i] = encode_i(OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0); } } /* * Relocations */ static bool reloc_sbimm12(tcg_insn_unit *code_ptr, tcg_insn_unit *target) { intptr_t offset = (intptr_t)target - (intptr_t)code_ptr; if (offset == sextreg(offset, 1, 12) << 1) { code_ptr[0] |= encode_sbimm12(offset); return true; } return false; } static bool reloc_jimm20(tcg_insn_unit *code_ptr, tcg_insn_unit *target) { intptr_t offset = (intptr_t)target - (intptr_t)code_ptr; if (offset == sextreg(offset, 1, 20) << 1) { code_ptr[0] |= encode_ujimm20(offset); return true; } return false; } static bool reloc_call(tcg_insn_unit *code_ptr, tcg_insn_unit *target) { intptr_t offset = (intptr_t)target - (intptr_t)code_ptr; int32_t lo = sextreg(offset, 0, 12); int32_t hi = offset - lo; if (offset == hi + lo) { code_ptr[0] |= encode_uimm20(hi); code_ptr[1] |= encode_imm12(lo); return true; } return false; } static bool patch_reloc(tcg_insn_unit *code_ptr, int type, intptr_t value, intptr_t addend) { uint32_t insn = *code_ptr; intptr_t diff; bool short_jmp; tcg_debug_assert(addend == 0); switch (type) { case R_RISCV_BRANCH: diff = value - (uintptr_t)code_ptr; short_jmp = diff == sextreg(diff, 0, 12); if (short_jmp) { return reloc_sbimm12(code_ptr, (tcg_insn_unit *)value); } else { /* Invert the condition */ insn = insn ^ (1 << 12); /* Clear the offset */ insn &= 0x01fff07f; /* Set the offset to the PC + 8 */ insn |= encode_sbimm12(8); /* Move forward */ code_ptr[0] = insn; /* Overwrite the NOP with jal x0,value */ diff = value - (uintptr_t)(code_ptr + 1); insn = encode_uj(OPC_JAL, TCG_REG_ZERO, diff); code_ptr[1] = insn; return true; } break; case R_RISCV_JAL: return reloc_jimm20(code_ptr, (tcg_insn_unit *)value); break; case R_RISCV_CALL: return reloc_call(code_ptr, (tcg_insn_unit *)value); break; default: tcg_abort(); } } /* * TCG intrinsics */ static void tcg_out_mov(TCGContext *s, TCGType type, TCGReg ret, TCGReg arg) { if (ret == arg) { return; } switch (type) { case TCG_TYPE_I32: case TCG_TYPE_I64: tcg_out_opc_imm(s, OPC_ADDI, ret, arg, 0); break; default: g_assert_not_reached(); } } static void tcg_out_movi(TCGContext *s, TCGType type, TCGReg rd, tcg_target_long val) { tcg_target_long lo, hi, tmp; int shift, ret; if (TCG_TARGET_REG_BITS == 64 && type == TCG_TYPE_I32) { val = (int32_t)val; } lo = sextreg(val, 0, 12); if (val == lo) { tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, lo); return; } hi = val - lo; if (TCG_TARGET_REG_BITS == 32 || val == (int32_t)val) { tcg_out_opc_upper(s, OPC_LUI, rd, hi); if (lo != 0) { tcg_out_opc_imm(s, OPC_ADDIW, rd, rd, lo); } return; } /* We can only be here if TCG_TARGET_REG_BITS != 32 */ tmp = tcg_pcrel_diff(s, (void *)val); if (tmp == (int32_t)tmp) { tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); tcg_out_opc_imm(s, OPC_ADDI, rd, rd, 0); ret = reloc_call(s->code_ptr - 2, (tcg_insn_unit *)val); tcg_debug_assert(ret == true); return; } /* Look for a single 20-bit section. */ shift = ctz64(val); tmp = val >> shift; if (tmp == sextreg(tmp, 0, 20)) { tcg_out_opc_upper(s, OPC_LUI, rd, tmp << 12); if (shift > 12) { tcg_out_opc_imm(s, OPC_SLLI, rd, rd, shift - 12); } else { tcg_out_opc_imm(s, OPC_SRAI, rd, rd, 12 - shift); } return; } /* Look for a few high zero bits, with lots of bits set in the middle. */ shift = clz64(val); tmp = val << shift; if (tmp == sextreg(tmp, 12, 20) << 12) { tcg_out_opc_upper(s, OPC_LUI, rd, tmp); tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); return; } else if (tmp == sextreg(tmp, 0, 12)) { tcg_out_opc_imm(s, OPC_ADDI, rd, TCG_REG_ZERO, tmp); tcg_out_opc_imm(s, OPC_SRLI, rd, rd, shift); return; } /* Drop into the constant pool. */ new_pool_label(s, val, R_RISCV_CALL, s->code_ptr, 0); tcg_out_opc_upper(s, OPC_AUIPC, rd, 0); tcg_out_opc_imm(s, OPC_LD, rd, rd, 0); } static void tcg_out_ext8u(TCGContext *s, TCGReg ret, TCGReg arg) { tcg_out_opc_imm(s, OPC_ANDI, ret, arg, 0xff); } static void tcg_out_ext16u(TCGContext *s, TCGReg ret, TCGReg arg) { tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); tcg_out_opc_imm(s, OPC_SRLIW, ret, ret, 16); } static void tcg_out_ext32u(TCGContext *s, TCGReg ret, TCGReg arg) { tcg_out_opc_imm(s, OPC_SLLI, ret, arg, 32); tcg_out_opc_imm(s, OPC_SRLI, ret, ret, 32); } static void tcg_out_ext8s(TCGContext *s, TCGReg ret, TCGReg arg) { tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 24); tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 24); } static void tcg_out_ext16s(TCGContext *s, TCGReg ret, TCGReg arg) { tcg_out_opc_imm(s, OPC_SLLIW, ret, arg, 16); tcg_out_opc_imm(s, OPC_SRAIW, ret, ret, 16); } static void tcg_out_ext32s(TCGContext *s, TCGReg ret, TCGReg arg) { tcg_out_opc_imm(s, OPC_ADDIW, ret, arg, 0); } static void tcg_out_ldst(TCGContext *s, RISCVInsn opc, TCGReg data, TCGReg addr, intptr_t offset) { intptr_t imm12 = sextreg(offset, 0, 12); if (offset != imm12) { intptr_t diff = offset - (uintptr_t)s->code_ptr; if (addr == TCG_REG_ZERO && diff == (int32_t)diff) { imm12 = sextreg(diff, 0, 12); tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP2, diff - imm12); } else { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP2, offset - imm12); if (addr != TCG_REG_ZERO) { tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, addr); } } addr = TCG_REG_TMP2; } switch (opc) { case OPC_SB: case OPC_SH: case OPC_SW: case OPC_SD: tcg_out_opc_store(s, opc, addr, data, imm12); break; case OPC_LB: case OPC_LBU: case OPC_LH: case OPC_LHU: case OPC_LW: case OPC_LWU: case OPC_LD: tcg_out_opc_imm(s, opc, data, addr, imm12); break; default: g_assert_not_reached(); } } static void tcg_out_ld(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); tcg_out_ldst(s, is32bit ? OPC_LW : OPC_LD, arg, arg1, arg2); } static void tcg_out_st(TCGContext *s, TCGType type, TCGReg arg, TCGReg arg1, intptr_t arg2) { bool is32bit = (TCG_TARGET_REG_BITS == 32 || type == TCG_TYPE_I32); tcg_out_ldst(s, is32bit ? OPC_SW : OPC_SD, arg, arg1, arg2); } static bool tcg_out_sti(TCGContext *s, TCGType type, TCGArg val, TCGReg base, intptr_t ofs) { if (val == 0) { tcg_out_st(s, type, TCG_REG_ZERO, base, ofs); return true; } return false; } static void tcg_out_addsub2(TCGContext *s, TCGReg rl, TCGReg rh, TCGReg al, TCGReg ah, TCGArg bl, TCGArg bh, bool cbl, bool cbh, bool is_sub, bool is32bit) { const RISCVInsn opc_add = is32bit ? OPC_ADDW : OPC_ADD; const RISCVInsn opc_addi = is32bit ? OPC_ADDIW : OPC_ADDI; const RISCVInsn opc_sub = is32bit ? OPC_SUBW : OPC_SUB; TCGReg th = TCG_REG_TMP1; /* If we have a negative constant such that negating it would make the high part zero, we can (usually) eliminate one insn. */ if (cbl && cbh && bh == -1 && bl != 0) { bl = -bl; bh = 0; is_sub = !is_sub; } /* By operating on the high part first, we get to use the final carry operation to move back from the temporary. */ if (!cbh) { tcg_out_opc_reg(s, (is_sub ? opc_sub : opc_add), th, ah, bh); } else if (bh != 0 || ah == rl) { tcg_out_opc_imm(s, opc_addi, th, ah, (is_sub ? -bh : bh)); } else { th = ah; } /* Note that tcg optimization should eliminate the bl == 0 case. */ if (is_sub) { if (cbl) { tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, al, bl); tcg_out_opc_imm(s, opc_addi, rl, al, -bl); } else { tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, al, bl); tcg_out_opc_reg(s, opc_sub, rl, al, bl); } tcg_out_opc_reg(s, opc_sub, rh, th, TCG_REG_TMP0); } else { if (cbl) { tcg_out_opc_imm(s, opc_addi, rl, al, bl); tcg_out_opc_imm(s, OPC_SLTIU, TCG_REG_TMP0, rl, bl); } else if (rl == al && rl == bl) { tcg_out_opc_imm(s, OPC_SLTI, TCG_REG_TMP0, al, 0); tcg_out_opc_reg(s, opc_addi, rl, al, bl); } else { tcg_out_opc_reg(s, opc_add, rl, al, bl); tcg_out_opc_reg(s, OPC_SLTU, TCG_REG_TMP0, rl, (rl == bl ? al : bl)); } tcg_out_opc_reg(s, opc_add, rh, th, TCG_REG_TMP0); } } static const struct { RISCVInsn op; bool swap; } tcg_brcond_to_riscv[] = { [TCG_COND_EQ] = { OPC_BEQ, false }, [TCG_COND_NE] = { OPC_BNE, false }, [TCG_COND_LT] = { OPC_BLT, false }, [TCG_COND_GE] = { OPC_BGE, false }, [TCG_COND_LE] = { OPC_BGE, true }, [TCG_COND_GT] = { OPC_BLT, true }, [TCG_COND_LTU] = { OPC_BLTU, false }, [TCG_COND_GEU] = { OPC_BGEU, false }, [TCG_COND_LEU] = { OPC_BGEU, true }, [TCG_COND_GTU] = { OPC_BLTU, true } }; static void tcg_out_brcond(TCGContext *s, TCGCond cond, TCGReg arg1, TCGReg arg2, TCGLabel *l) { RISCVInsn op = tcg_brcond_to_riscv[cond].op; tcg_debug_assert(op != 0); if (tcg_brcond_to_riscv[cond].swap) { TCGReg t = arg1; arg1 = arg2; arg2 = t; } if (l->has_value) { intptr_t diff = tcg_pcrel_diff(s, l->u.value_ptr); if (diff == sextreg(diff, 0, 12)) { tcg_out_opc_branch(s, op, arg1, arg2, diff); } else { /* Invert the conditional branch. */ tcg_out_opc_branch(s, op ^ (1 << 12), arg1, arg2, 8); tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, diff - 4); } } else { tcg_out_reloc(s, s->code_ptr, R_RISCV_BRANCH, l, 0); tcg_out_opc_branch(s, op, arg1, arg2, 0); /* NOP to allow patching later */ tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0); } } static void tcg_out_setcond(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg arg1, TCGReg arg2) { switch (cond) { case TCG_COND_EQ: tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); tcg_out_opc_imm(s, OPC_SLTIU, ret, ret, 1); break; case TCG_COND_NE: tcg_out_opc_reg(s, OPC_SUB, ret, arg1, arg2); tcg_out_opc_reg(s, OPC_SLTU, ret, TCG_REG_ZERO, ret); break; case TCG_COND_LT: tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); break; case TCG_COND_GE: tcg_out_opc_reg(s, OPC_SLT, ret, arg1, arg2); tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); break; case TCG_COND_LE: tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); break; case TCG_COND_GT: tcg_out_opc_reg(s, OPC_SLT, ret, arg2, arg1); break; case TCG_COND_LTU: tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); break; case TCG_COND_GEU: tcg_out_opc_reg(s, OPC_SLTU, ret, arg1, arg2); tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); break; case TCG_COND_LEU: tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); tcg_out_opc_imm(s, OPC_XORI, ret, ret, 1); break; case TCG_COND_GTU: tcg_out_opc_reg(s, OPC_SLTU, ret, arg2, arg1); break; default: g_assert_not_reached(); break; } } static void tcg_out_brcond2(TCGContext *s, TCGCond cond, TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh, TCGLabel *l) { /* todo */ g_assert_not_reached(); } static void tcg_out_setcond2(TCGContext *s, TCGCond cond, TCGReg ret, TCGReg al, TCGReg ah, TCGReg bl, TCGReg bh) { /* todo */ g_assert_not_reached(); } static inline void tcg_out_goto(TCGContext *s, tcg_insn_unit *target) { ptrdiff_t offset = tcg_pcrel_diff(s, target); tcg_debug_assert(offset == sextreg(offset, 1, 20) << 1); tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, offset); } static void tcg_out_call_int(TCGContext *s, tcg_insn_unit *arg, bool tail) { TCGReg link = tail ? TCG_REG_ZERO : TCG_REG_RA; ptrdiff_t offset = tcg_pcrel_diff(s, arg); int ret; if (offset == sextreg(offset, 1, 20) << 1) { /* short jump: -2097150 to 2097152 */ tcg_out_opc_jump(s, OPC_JAL, link, offset); } else if (TCG_TARGET_REG_BITS == 32 || offset == sextreg(offset, 1, 31) << 1) { /* long jump: -2147483646 to 2147483648 */ tcg_out_opc_upper(s, OPC_AUIPC, TCG_REG_TMP0, 0); tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, 0); ret = reloc_call(s->code_ptr - 2, arg);\ tcg_debug_assert(ret == true); } else if (TCG_TARGET_REG_BITS == 64) { /* far jump: 64-bit */ tcg_target_long imm = sextreg((tcg_target_long)arg, 0, 12); tcg_target_long base = (tcg_target_long)arg - imm; tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_TMP0, base); tcg_out_opc_imm(s, OPC_JALR, link, TCG_REG_TMP0, imm); } else { g_assert_not_reached(); } } static void tcg_out_call(TCGContext *s, tcg_insn_unit *arg) { tcg_out_call_int(s, arg, false); } static void tcg_out_mb(TCGContext *s, TCGArg a0) { tcg_insn_unit insn = OPC_FENCE; if (a0 & TCG_MO_LD_LD) { insn |= 0x02200000; } if (a0 & TCG_MO_ST_LD) { insn |= 0x01200000; } if (a0 & TCG_MO_LD_ST) { insn |= 0x02100000; } if (a0 & TCG_MO_ST_ST) { insn |= 0x02200000; } tcg_out32(s, insn); } /* * Load/store and TLB */ #if defined(CONFIG_SOFTMMU) #include "tcg-ldst.inc.c" /* helper signature: helper_ret_ld_mmu(CPUState *env, target_ulong addr, * TCGMemOpIdx oi, uintptr_t ra) */ static void * const qemu_ld_helpers[16] = { [MO_UB] = helper_ret_ldub_mmu, [MO_SB] = helper_ret_ldsb_mmu, [MO_LEUW] = helper_le_lduw_mmu, [MO_LESW] = helper_le_ldsw_mmu, [MO_LEUL] = helper_le_ldul_mmu, #if TCG_TARGET_REG_BITS == 64 [MO_LESL] = helper_le_ldsl_mmu, #endif [MO_LEQ] = helper_le_ldq_mmu, [MO_BEUW] = helper_be_lduw_mmu, [MO_BESW] = helper_be_ldsw_mmu, [MO_BEUL] = helper_be_ldul_mmu, #if TCG_TARGET_REG_BITS == 64 [MO_BESL] = helper_be_ldsl_mmu, #endif [MO_BEQ] = helper_be_ldq_mmu, }; /* helper signature: helper_ret_st_mmu(CPUState *env, target_ulong addr, * uintxx_t val, TCGMemOpIdx oi, * uintptr_t ra) */ static void * const qemu_st_helpers[16] = { [MO_UB] = helper_ret_stb_mmu, [MO_LEUW] = helper_le_stw_mmu, [MO_LEUL] = helper_le_stl_mmu, [MO_LEQ] = helper_le_stq_mmu, [MO_BEUW] = helper_be_stw_mmu, [MO_BEUL] = helper_be_stl_mmu, [MO_BEQ] = helper_be_stq_mmu, }; /* We don't support oversize guests */ QEMU_BUILD_BUG_ON(TCG_TARGET_REG_BITS < TARGET_LONG_BITS); /* We expect tlb_mask to be before tlb_table. */ QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) < offsetof(CPUArchState, tlb_mask)); /* We expect tlb_mask to be "near" tlb_table. */ QEMU_BUILD_BUG_ON(offsetof(CPUArchState, tlb_table) - offsetof(CPUArchState, tlb_mask) >= 0x800); static void tcg_out_tlb_load(TCGContext *s, TCGReg addrl, TCGReg addrh, TCGMemOpIdx oi, tcg_insn_unit **label_ptr, bool is_load) { TCGMemOp opc = get_memop(oi); unsigned s_bits = opc & MO_SIZE; unsigned a_bits = get_alignment_bits(opc); tcg_target_long compare_mask; int mem_index = get_mmuidx(oi); int mask_off, table_off; TCGReg mask_base = TCG_AREG0, table_base = TCG_AREG0; mask_off = offsetof(CPUArchState, tlb_mask[mem_index]); table_off = offsetof(CPUArchState, tlb_table[mem_index]); if (table_off > 0x7ff) { int mask_hi = mask_off - sextreg(mask_off, 0, 12); int table_hi = table_off - sextreg(table_off, 0, 12); if (likely(mask_hi == table_hi)) { mask_base = table_base = TCG_REG_TMP1; tcg_out_opc_upper(s, OPC_LUI, mask_base, mask_hi); tcg_out_opc_reg(s, OPC_ADD, mask_base, mask_base, TCG_AREG0); mask_off -= mask_hi; table_off -= mask_hi; } else { mask_base = TCG_REG_TMP0; table_base = TCG_REG_TMP1; tcg_out_opc_upper(s, OPC_LUI, mask_base, mask_hi); tcg_out_opc_reg(s, OPC_ADD, mask_base, mask_base, TCG_AREG0); table_off -= mask_off; mask_off -= mask_hi; tcg_out_opc_imm(s, OPC_ADDI, table_base, mask_base, mask_off); } } tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, mask_base, mask_off); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP1, table_base, table_off); tcg_out_opc_imm(s, OPC_SRLI, TCG_REG_TMP2, addrl, TARGET_PAGE_BITS - CPU_TLB_ENTRY_BITS); tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP0); tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP2, TCG_REG_TMP2, TCG_REG_TMP1); /* Load the tlb comparator and the addend. */ tcg_out_ld(s, TCG_TYPE_TL, TCG_REG_TMP0, TCG_REG_TMP2, is_load ? offsetof(CPUTLBEntry, addr_read) : offsetof(CPUTLBEntry, addr_write)); tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP2, TCG_REG_TMP2, offsetof(CPUTLBEntry, addend)); /* We don't support unaligned accesses. */ if (a_bits < s_bits) { a_bits = s_bits; } /* Clear the non-page, non-alignment bits from the address. */ compare_mask = (tcg_target_long)TARGET_PAGE_MASK | ((1 << a_bits) - 1); if (compare_mask == sextreg(compare_mask, 0, 12)) { tcg_out_opc_imm(s, OPC_ANDI, TCG_REG_TMP1, addrl, compare_mask); } else { tcg_out_movi(s, TCG_TYPE_TL, TCG_REG_TMP1, compare_mask); tcg_out_opc_reg(s, OPC_AND, TCG_REG_TMP1, TCG_REG_TMP1, addrl); } /* Compare masked address with the TLB entry. */ label_ptr[0] = s->code_ptr; tcg_out_opc_branch(s, OPC_BNE, TCG_REG_TMP0, TCG_REG_TMP1, 0); /* NOP to allow patching later */ tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_ZERO, TCG_REG_ZERO, 0); /* TLB Hit - translate address using addend. */ if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, TCG_REG_TMP0, addrl); addrl = TCG_REG_TMP0; } tcg_out_opc_reg(s, OPC_ADD, TCG_REG_TMP0, TCG_REG_TMP2, addrl); } static void add_qemu_ldst_label(TCGContext *s, int is_ld, TCGMemOpIdx oi, TCGType ext, TCGReg datalo, TCGReg datahi, TCGReg addrlo, TCGReg addrhi, void *raddr, tcg_insn_unit **label_ptr) { TCGLabelQemuLdst *label = new_ldst_label(s); label->is_ld = is_ld; label->oi = oi; label->type = ext; label->datalo_reg = datalo; label->datahi_reg = datahi; label->addrlo_reg = addrlo; label->addrhi_reg = addrhi; label->raddr = raddr; label->label_ptr[0] = label_ptr[0]; } static void tcg_out_qemu_ld_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; TCGMemOp opc = get_memop(oi); TCGReg a0 = tcg_target_call_iarg_regs[0]; TCGReg a1 = tcg_target_call_iarg_regs[1]; TCGReg a2 = tcg_target_call_iarg_regs[2]; TCGReg a3 = tcg_target_call_iarg_regs[3]; /* We don't support oversize guests */ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { g_assert_not_reached(); } /* resolve label address */ patch_reloc(l->label_ptr[0], R_RISCV_BRANCH, (intptr_t) s->code_ptr, 0); /* call load helper */ tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); tcg_out_movi(s, TCG_TYPE_PTR, a2, oi); tcg_out_movi(s, TCG_TYPE_PTR, a3, (tcg_target_long)l->raddr); tcg_out_call(s, qemu_ld_helpers[opc & (MO_BSWAP | MO_SSIZE)]); tcg_out_mov(s, (opc & MO_SIZE) == MO_64, l->datalo_reg, a0); tcg_out_goto(s, l->raddr); } static void tcg_out_qemu_st_slow_path(TCGContext *s, TCGLabelQemuLdst *l) { TCGMemOpIdx oi = l->oi; TCGMemOp opc = get_memop(oi); TCGMemOp s_bits = opc & MO_SIZE; TCGReg a0 = tcg_target_call_iarg_regs[0]; TCGReg a1 = tcg_target_call_iarg_regs[1]; TCGReg a2 = tcg_target_call_iarg_regs[2]; TCGReg a3 = tcg_target_call_iarg_regs[3]; TCGReg a4 = tcg_target_call_iarg_regs[4]; /* We don't support oversize guests */ if (TCG_TARGET_REG_BITS < TARGET_LONG_BITS) { g_assert_not_reached(); } /* resolve label address */ patch_reloc(l->label_ptr[0], R_RISCV_BRANCH, (intptr_t) s->code_ptr, 0); /* call store helper */ tcg_out_mov(s, TCG_TYPE_PTR, a0, TCG_AREG0); tcg_out_mov(s, TCG_TYPE_PTR, a1, l->addrlo_reg); tcg_out_mov(s, TCG_TYPE_PTR, a2, l->datalo_reg); switch (s_bits) { case MO_8: tcg_out_ext8u(s, a2, a2); break; case MO_16: tcg_out_ext16u(s, a2, a2); break; default: break; } tcg_out_movi(s, TCG_TYPE_PTR, a3, oi); tcg_out_movi(s, TCG_TYPE_PTR, a4, (tcg_target_long)l->raddr); tcg_out_call(s, qemu_st_helpers[opc & (MO_BSWAP | MO_SSIZE)]); tcg_out_goto(s, l->raddr); } #endif /* CONFIG_SOFTMMU */ static void tcg_out_qemu_ld_direct(TCGContext *s, TCGReg lo, TCGReg hi, TCGReg base, TCGMemOp opc, bool is_64) { const TCGMemOp bswap = opc & MO_BSWAP; /* We don't yet handle byteswapping, assert */ g_assert(!bswap); switch (opc & (MO_SSIZE)) { case MO_UB: tcg_out_opc_imm(s, OPC_LBU, lo, base, 0); break; case MO_SB: tcg_out_opc_imm(s, OPC_LB, lo, base, 0); break; case MO_UW: tcg_out_opc_imm(s, OPC_LHU, lo, base, 0); break; case MO_SW: tcg_out_opc_imm(s, OPC_LH, lo, base, 0); break; case MO_UL: if (TCG_TARGET_REG_BITS == 64 && is_64) { tcg_out_opc_imm(s, OPC_LWU, lo, base, 0); break; } /* FALLTHRU */ case MO_SL: tcg_out_opc_imm(s, OPC_LW, lo, base, 0); break; case MO_Q: /* Prefer to load from offset 0 first, but allow for overlap. */ if (TCG_TARGET_REG_BITS == 64) { tcg_out_opc_imm(s, OPC_LD, lo, base, 0); } else if (lo != base) { tcg_out_opc_imm(s, OPC_LW, lo, base, 0); tcg_out_opc_imm(s, OPC_LW, hi, base, 4); } else { tcg_out_opc_imm(s, OPC_LW, hi, base, 4); tcg_out_opc_imm(s, OPC_LW, lo, base, 0); } break; default: g_assert_not_reached(); } } static void tcg_out_qemu_ld(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; TCGMemOpIdx oi; TCGMemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[1]; #endif TCGReg base = TCG_REG_TMP0; data_regl = *args++; data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); addr_regl = *args++; addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); oi = *args++; opc = get_memop(oi); #if defined(CONFIG_SOFTMMU) tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 1); tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); add_qemu_ldst_label(s, 1, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), data_regl, data_regh, addr_regl, addr_regh, s->code_ptr, label_ptr); #else if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, base, addr_regl); addr_regl = base; } if (guest_base == 0) { tcg_out_opc_reg(s, OPC_ADD, base, addr_regl, TCG_REG_ZERO); } else { tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); } tcg_out_qemu_ld_direct(s, data_regl, data_regh, base, opc, is_64); #endif } static void tcg_out_qemu_st_direct(TCGContext *s, TCGReg lo, TCGReg hi, TCGReg base, TCGMemOp opc) { const TCGMemOp bswap = opc & MO_BSWAP; /* We don't yet handle byteswapping, assert */ g_assert(!bswap); switch (opc & (MO_SSIZE)) { case MO_8: tcg_out_opc_store(s, OPC_SB, base, lo, 0); break; case MO_16: tcg_out_opc_store(s, OPC_SH, base, lo, 0); break; case MO_32: tcg_out_opc_store(s, OPC_SW, base, lo, 0); break; case MO_64: if (TCG_TARGET_REG_BITS == 64) { tcg_out_opc_store(s, OPC_SD, base, lo, 0); } else { tcg_out_opc_store(s, OPC_SW, base, lo, 0); tcg_out_opc_store(s, OPC_SW, base, hi, 4); } break; default: g_assert_not_reached(); } } static void tcg_out_qemu_st(TCGContext *s, const TCGArg *args, bool is_64) { TCGReg addr_regl, addr_regh __attribute__((unused)); TCGReg data_regl, data_regh; TCGMemOpIdx oi; TCGMemOp opc; #if defined(CONFIG_SOFTMMU) tcg_insn_unit *label_ptr[1]; #endif TCGReg base = TCG_REG_TMP0; data_regl = *args++; data_regh = (TCG_TARGET_REG_BITS == 32 && is_64 ? *args++ : 0); addr_regl = *args++; addr_regh = (TCG_TARGET_REG_BITS < TARGET_LONG_BITS ? *args++ : 0); oi = *args++; opc = get_memop(oi); #if defined(CONFIG_SOFTMMU) tcg_out_tlb_load(s, addr_regl, addr_regh, oi, label_ptr, 0); tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); add_qemu_ldst_label(s, 0, oi, (is_64 ? TCG_TYPE_I64 : TCG_TYPE_I32), data_regl, data_regh, addr_regl, addr_regh, s->code_ptr, label_ptr); #else if (TCG_TARGET_REG_BITS > TARGET_LONG_BITS) { tcg_out_ext32u(s, base, addr_regl); addr_regl = base; } if (guest_base == 0) { tcg_out_opc_reg(s, OPC_ADD, base, addr_regl, TCG_REG_ZERO); } else { tcg_out_opc_reg(s, OPC_ADD, base, TCG_GUEST_BASE_REG, addr_regl); } tcg_out_qemu_st_direct(s, data_regl, data_regh, base, opc); #endif } static tcg_insn_unit *tb_ret_addr; static void tcg_out_op(TCGContext *s, TCGOpcode opc, const TCGArg *args, const int *const_args) { TCGArg a0 = args[0]; TCGArg a1 = args[1]; TCGArg a2 = args[2]; int c2 = const_args[2]; switch (opc) { case INDEX_op_exit_tb: /* Reuse the zeroing that exists for goto_ptr. */ if (a0 == 0) { tcg_out_call_int(s, s->code_gen_epilogue, true); } else { tcg_out_movi(s, TCG_TYPE_PTR, TCG_REG_A0, a0); tcg_out_call_int(s, tb_ret_addr, true); } break; case INDEX_op_goto_tb: assert(s->tb_jmp_insn_offset == 0); /* indirect jump method */ tcg_out_ld(s, TCG_TYPE_PTR, TCG_REG_TMP0, TCG_REG_ZERO, (uintptr_t)(s->tb_jmp_target_addr + a0)); tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_TMP0, 0); set_jmp_reset_offset(s, a0); break; case INDEX_op_goto_ptr: tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, a0, 0); break; case INDEX_op_br: tcg_out_reloc(s, s->code_ptr, R_RISCV_JAL, arg_label(a0), 0); tcg_out_opc_jump(s, OPC_JAL, TCG_REG_ZERO, 0); break; case INDEX_op_ld8u_i32: case INDEX_op_ld8u_i64: tcg_out_ldst(s, OPC_LBU, a0, a1, a2); break; case INDEX_op_ld8s_i32: case INDEX_op_ld8s_i64: tcg_out_ldst(s, OPC_LB, a0, a1, a2); break; case INDEX_op_ld16u_i32: case INDEX_op_ld16u_i64: tcg_out_ldst(s, OPC_LHU, a0, a1, a2); break; case INDEX_op_ld16s_i32: case INDEX_op_ld16s_i64: tcg_out_ldst(s, OPC_LH, a0, a1, a2); break; case INDEX_op_ld32u_i64: tcg_out_ldst(s, OPC_LWU, a0, a1, a2); break; case INDEX_op_ld_i32: case INDEX_op_ld32s_i64: tcg_out_ldst(s, OPC_LW, a0, a1, a2); break; case INDEX_op_ld_i64: tcg_out_ldst(s, OPC_LD, a0, a1, a2); break; case INDEX_op_st8_i32: case INDEX_op_st8_i64: tcg_out_ldst(s, OPC_SB, a0, a1, a2); break; case INDEX_op_st16_i32: case INDEX_op_st16_i64: tcg_out_ldst(s, OPC_SH, a0, a1, a2); break; case INDEX_op_st_i32: case INDEX_op_st32_i64: tcg_out_ldst(s, OPC_SW, a0, a1, a2); break; case INDEX_op_st_i64: tcg_out_ldst(s, OPC_SD, a0, a1, a2); break; case INDEX_op_add_i32: if (c2) { tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_ADDW, a0, a1, a2); } break; case INDEX_op_add_i64: if (c2) { tcg_out_opc_imm(s, OPC_ADDI, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_ADD, a0, a1, a2); } break; case INDEX_op_sub_i32: if (c2) { tcg_out_opc_imm(s, OPC_ADDIW, a0, a1, -a2); } else { tcg_out_opc_reg(s, OPC_SUBW, a0, a1, a2); } break; case INDEX_op_sub_i64: if (c2) { tcg_out_opc_imm(s, OPC_ADDI, a0, a1, -a2); } else { tcg_out_opc_reg(s, OPC_SUB, a0, a1, a2); } break; case INDEX_op_and_i32: case INDEX_op_and_i64: if (c2) { tcg_out_opc_imm(s, OPC_ANDI, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_AND, a0, a1, a2); } break; case INDEX_op_or_i32: case INDEX_op_or_i64: if (c2) { tcg_out_opc_imm(s, OPC_ORI, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_OR, a0, a1, a2); } break; case INDEX_op_xor_i32: case INDEX_op_xor_i64: if (c2) { tcg_out_opc_imm(s, OPC_XORI, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_XOR, a0, a1, a2); } break; case INDEX_op_not_i32: case INDEX_op_not_i64: tcg_out_opc_imm(s, OPC_XORI, a0, a1, -1); break; case INDEX_op_neg_i32: tcg_out_opc_reg(s, OPC_SUBW, a0, TCG_REG_ZERO, a1); break; case INDEX_op_neg_i64: tcg_out_opc_reg(s, OPC_SUB, a0, TCG_REG_ZERO, a1); break; case INDEX_op_mul_i32: tcg_out_opc_reg(s, OPC_MULW, a0, a1, a2); break; case INDEX_op_mul_i64: tcg_out_opc_reg(s, OPC_MUL, a0, a1, a2); break; case INDEX_op_div_i32: tcg_out_opc_reg(s, OPC_DIVW, a0, a1, a2); break; case INDEX_op_div_i64: tcg_out_opc_reg(s, OPC_DIV, a0, a1, a2); break; case INDEX_op_divu_i32: tcg_out_opc_reg(s, OPC_DIVUW, a0, a1, a2); break; case INDEX_op_divu_i64: tcg_out_opc_reg(s, OPC_DIVU, a0, a1, a2); break; case INDEX_op_rem_i32: tcg_out_opc_reg(s, OPC_REMW, a0, a1, a2); break; case INDEX_op_rem_i64: tcg_out_opc_reg(s, OPC_REM, a0, a1, a2); break; case INDEX_op_remu_i32: tcg_out_opc_reg(s, OPC_REMUW, a0, a1, a2); break; case INDEX_op_remu_i64: tcg_out_opc_reg(s, OPC_REMU, a0, a1, a2); break; case INDEX_op_shl_i32: if (c2) { tcg_out_opc_imm(s, OPC_SLLIW, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_SLLW, a0, a1, a2); } break; case INDEX_op_shl_i64: if (c2) { tcg_out_opc_imm(s, OPC_SLLI, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_SLL, a0, a1, a2); } break; case INDEX_op_shr_i32: if (c2) { tcg_out_opc_imm(s, OPC_SRLIW, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_SRLW, a0, a1, a2); } break; case INDEX_op_shr_i64: if (c2) { tcg_out_opc_imm(s, OPC_SRLI, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_SRL, a0, a1, a2); } break; case INDEX_op_sar_i32: if (c2) { tcg_out_opc_imm(s, OPC_SRAIW, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_SRAW, a0, a1, a2); } break; case INDEX_op_sar_i64: if (c2) { tcg_out_opc_imm(s, OPC_SRAI, a0, a1, a2); } else { tcg_out_opc_reg(s, OPC_SRA, a0, a1, a2); } break; case INDEX_op_add2_i32: tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], const_args[4], const_args[5], false, true); break; case INDEX_op_add2_i64: tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], const_args[4], const_args[5], false, false); break; case INDEX_op_sub2_i32: tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], const_args[4], const_args[5], true, true); break; case INDEX_op_sub2_i64: tcg_out_addsub2(s, a0, a1, a2, args[3], args[4], args[5], const_args[4], const_args[5], true, false); break; case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: tcg_out_brcond(s, a2, a0, a1, arg_label(args[3])); break; case INDEX_op_brcond2_i32: tcg_out_brcond2(s, args[4], a0, a1, a2, args[3], arg_label(args[5])); break; case INDEX_op_setcond_i32: case INDEX_op_setcond_i64: tcg_out_setcond(s, args[3], a0, a1, a2); break; case INDEX_op_setcond2_i32: tcg_out_setcond2(s, args[5], a0, a1, a2, args[3], args[4]); break; case INDEX_op_qemu_ld_i32: tcg_out_qemu_ld(s, args, false); break; case INDEX_op_qemu_ld_i64: tcg_out_qemu_ld(s, args, true); break; case INDEX_op_qemu_st_i32: tcg_out_qemu_st(s, args, false); break; case INDEX_op_qemu_st_i64: tcg_out_qemu_st(s, args, true); break; case INDEX_op_ext8u_i32: case INDEX_op_ext8u_i64: tcg_out_ext8u(s, a0, a1); break; case INDEX_op_ext16u_i32: case INDEX_op_ext16u_i64: tcg_out_ext16u(s, a0, a1); break; case INDEX_op_ext32u_i64: case INDEX_op_extu_i32_i64: tcg_out_ext32u(s, a0, a1); break; case INDEX_op_ext8s_i32: case INDEX_op_ext8s_i64: tcg_out_ext8s(s, a0, a1); break; case INDEX_op_ext16s_i32: case INDEX_op_ext16s_i64: tcg_out_ext16s(s, a0, a1); break; case INDEX_op_ext32s_i64: case INDEX_op_extrl_i64_i32: case INDEX_op_ext_i32_i64: tcg_out_ext32s(s, a0, a1); break; case INDEX_op_extrh_i64_i32: tcg_out_opc_imm(s, OPC_SRAI, a0, a1, 32); break; case INDEX_op_mulsh_i32: case INDEX_op_mulsh_i64: tcg_out_opc_reg(s, OPC_MULH, a0, a1, a2); break; case INDEX_op_muluh_i32: case INDEX_op_muluh_i64: tcg_out_opc_reg(s, OPC_MULHU, a0, a1, a2); break; case INDEX_op_mb: tcg_out_mb(s, a0); break; case INDEX_op_mov_i32: /* Always emitted via tcg_out_mov. */ case INDEX_op_mov_i64: case INDEX_op_movi_i32: /* Always emitted via tcg_out_movi. */ case INDEX_op_movi_i64: case INDEX_op_call: /* Always emitted via tcg_out_call. */ default: g_assert_not_reached(); } } static const TCGTargetOpDef *tcg_target_op_def(TCGOpcode op) { static const TCGTargetOpDef r = { .args_ct_str = { "r" } }; static const TCGTargetOpDef r_r = { .args_ct_str = { "r", "r" } }; static const TCGTargetOpDef rZ_r = { .args_ct_str = { "rZ", "r" } }; static const TCGTargetOpDef rZ_rZ = { .args_ct_str = { "rZ", "rZ" } }; static const TCGTargetOpDef rZ_rZ_rZ_rZ = { .args_ct_str = { "rZ", "rZ", "rZ", "rZ" } }; static const TCGTargetOpDef r_r_ri = { .args_ct_str = { "r", "r", "ri" } }; static const TCGTargetOpDef r_r_rI = { .args_ct_str = { "r", "r", "rI" } }; static const TCGTargetOpDef r_rZ_rN = { .args_ct_str = { "r", "rZ", "rN" } }; static const TCGTargetOpDef r_rZ_rZ = { .args_ct_str = { "r", "rZ", "rZ" } }; static const TCGTargetOpDef r_rZ_rZ_rZ_rZ = { .args_ct_str = { "r", "rZ", "rZ", "rZ", "rZ" } }; static const TCGTargetOpDef r_L = { .args_ct_str = { "r", "L" } }; static const TCGTargetOpDef r_r_L = { .args_ct_str = { "r", "r", "L" } }; static const TCGTargetOpDef r_L_L = { .args_ct_str = { "r", "L", "L" } }; static const TCGTargetOpDef r_r_L_L = { .args_ct_str = { "r", "r", "L", "L" } }; static const TCGTargetOpDef LZ_L = { .args_ct_str = { "LZ", "L" } }; static const TCGTargetOpDef LZ_L_L = { .args_ct_str = { "LZ", "L", "L" } }; static const TCGTargetOpDef LZ_LZ_L = { .args_ct_str = { "LZ", "LZ", "L" } }; static const TCGTargetOpDef LZ_LZ_L_L = { .args_ct_str = { "LZ", "LZ", "L", "L" } }; static const TCGTargetOpDef r_r_rZ_rZ_rM_rM = { .args_ct_str = { "r", "r", "rZ", "rZ", "rM", "rM" } }; switch (op) { case INDEX_op_goto_ptr: return &r; case INDEX_op_ld8u_i32: case INDEX_op_ld8s_i32: case INDEX_op_ld16u_i32: case INDEX_op_ld16s_i32: case INDEX_op_ld_i32: case INDEX_op_not_i32: case INDEX_op_neg_i32: case INDEX_op_ld8u_i64: case INDEX_op_ld8s_i64: case INDEX_op_ld16u_i64: case INDEX_op_ld16s_i64: case INDEX_op_ld32s_i64: case INDEX_op_ld32u_i64: case INDEX_op_ld_i64: case INDEX_op_not_i64: case INDEX_op_neg_i64: case INDEX_op_ext8u_i32: case INDEX_op_ext8u_i64: case INDEX_op_ext16u_i32: case INDEX_op_ext16u_i64: case INDEX_op_ext32u_i64: case INDEX_op_extu_i32_i64: case INDEX_op_ext8s_i32: case INDEX_op_ext8s_i64: case INDEX_op_ext16s_i32: case INDEX_op_ext16s_i64: case INDEX_op_ext32s_i64: case INDEX_op_extrl_i64_i32: case INDEX_op_extrh_i64_i32: case INDEX_op_ext_i32_i64: return &r_r; case INDEX_op_st8_i32: case INDEX_op_st16_i32: case INDEX_op_st_i32: case INDEX_op_st8_i64: case INDEX_op_st16_i64: case INDEX_op_st32_i64: case INDEX_op_st_i64: return &rZ_r; case INDEX_op_add_i32: case INDEX_op_and_i32: case INDEX_op_or_i32: case INDEX_op_xor_i32: case INDEX_op_add_i64: case INDEX_op_and_i64: case INDEX_op_or_i64: case INDEX_op_xor_i64: return &r_r_rI; case INDEX_op_sub_i32: case INDEX_op_sub_i64: return &r_rZ_rN; case INDEX_op_mul_i32: case INDEX_op_mulsh_i32: case INDEX_op_muluh_i32: case INDEX_op_div_i32: case INDEX_op_divu_i32: case INDEX_op_rem_i32: case INDEX_op_remu_i32: case INDEX_op_setcond_i32: case INDEX_op_mul_i64: case INDEX_op_mulsh_i64: case INDEX_op_muluh_i64: case INDEX_op_div_i64: case INDEX_op_divu_i64: case INDEX_op_rem_i64: case INDEX_op_remu_i64: case INDEX_op_setcond_i64: return &r_rZ_rZ; case INDEX_op_shl_i32: case INDEX_op_shr_i32: case INDEX_op_sar_i32: case INDEX_op_shl_i64: case INDEX_op_shr_i64: case INDEX_op_sar_i64: return &r_r_ri; case INDEX_op_brcond_i32: case INDEX_op_brcond_i64: return &rZ_rZ; case INDEX_op_add2_i32: case INDEX_op_add2_i64: case INDEX_op_sub2_i32: case INDEX_op_sub2_i64: return &r_r_rZ_rZ_rM_rM; case INDEX_op_brcond2_i32: return &rZ_rZ_rZ_rZ; case INDEX_op_setcond2_i32: return &r_rZ_rZ_rZ_rZ; case INDEX_op_qemu_ld_i32: return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_L : &r_L_L; case INDEX_op_qemu_st_i32: return TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &LZ_L : &LZ_L_L; case INDEX_op_qemu_ld_i64: return TCG_TARGET_REG_BITS == 64 ? &r_L : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &r_r_L : &r_r_L_L; case INDEX_op_qemu_st_i64: return TCG_TARGET_REG_BITS == 64 ? &LZ_L : TARGET_LONG_BITS <= TCG_TARGET_REG_BITS ? &LZ_LZ_L : &LZ_LZ_L_L; default: return NULL; } } static const int tcg_target_callee_save_regs[] = { TCG_REG_S0, /* used for the global env (TCG_AREG0) */ TCG_REG_S1, TCG_REG_S2, TCG_REG_S3, TCG_REG_S4, TCG_REG_S5, TCG_REG_S6, TCG_REG_S7, TCG_REG_S8, TCG_REG_S9, TCG_REG_S10, TCG_REG_S11, TCG_REG_RA, /* should be last for ABI compliance */ }; /* Stack frame parameters. */ #define REG_SIZE (TCG_TARGET_REG_BITS / 8) #define SAVE_SIZE ((int)ARRAY_SIZE(tcg_target_callee_save_regs) * REG_SIZE) #define TEMP_SIZE (CPU_TEMP_BUF_NLONGS * (int)sizeof(long)) #define FRAME_SIZE ((TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE + SAVE_SIZE \ + TCG_TARGET_STACK_ALIGN - 1) \ & -TCG_TARGET_STACK_ALIGN) #define SAVE_OFS (TCG_STATIC_CALL_ARGS_SIZE + TEMP_SIZE) /* We're expecting to be able to use an immediate for frame allocation. */ QEMU_BUILD_BUG_ON(FRAME_SIZE > 0x7ff); /* Generate global QEMU prologue and epilogue code */ static void tcg_target_qemu_prologue(TCGContext *s) { int i; tcg_set_frame(s, TCG_REG_SP, TCG_STATIC_CALL_ARGS_SIZE, TEMP_SIZE); /* TB prologue */ tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, -FRAME_SIZE); for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { tcg_out_st(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], TCG_REG_SP, SAVE_OFS + i * REG_SIZE); } #if !defined(CONFIG_SOFTMMU) tcg_out_movi(s, TCG_TYPE_PTR, TCG_GUEST_BASE_REG, guest_base); tcg_regset_set_reg(s->reserved_regs, TCG_GUEST_BASE_REG); #endif /* Call generated code */ tcg_out_mov(s, TCG_TYPE_PTR, TCG_AREG0, tcg_target_call_iarg_regs[0]); tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, tcg_target_call_iarg_regs[1], 0); /* Return path for goto_ptr. Set return value to 0 */ s->code_gen_epilogue = s->code_ptr; tcg_out_mov(s, TCG_TYPE_REG, TCG_REG_A0, TCG_REG_ZERO); /* TB epilogue */ tb_ret_addr = s->code_ptr; for (i = 0; i < ARRAY_SIZE(tcg_target_callee_save_regs); i++) { tcg_out_ld(s, TCG_TYPE_REG, tcg_target_callee_save_regs[i], TCG_REG_SP, SAVE_OFS + i * REG_SIZE); } tcg_out_opc_imm(s, OPC_ADDI, TCG_REG_SP, TCG_REG_SP, FRAME_SIZE); tcg_out_opc_imm(s, OPC_JALR, TCG_REG_ZERO, TCG_REG_RA, 0); } static void tcg_target_init(TCGContext *s) { tcg_target_available_regs[TCG_TYPE_I32] = 0xffffffff; if (TCG_TARGET_REG_BITS == 64) { tcg_target_available_regs[TCG_TYPE_I64] = 0xffffffff; } tcg_target_call_clobber_regs = -1u; tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S0); tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S1); tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S2); tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S3); tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S4); tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S5); tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S6); tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S7); tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S8); tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S9); tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S10); tcg_regset_reset_reg(tcg_target_call_clobber_regs, TCG_REG_S11); s->reserved_regs = 0; tcg_regset_set_reg(s->reserved_regs, TCG_REG_ZERO); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP0); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP1); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TMP2); tcg_regset_set_reg(s->reserved_regs, TCG_REG_SP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_GP); tcg_regset_set_reg(s->reserved_regs, TCG_REG_TP); } typedef struct { DebugFrameHeader h; uint8_t fde_def_cfa[4]; uint8_t fde_reg_ofs[ARRAY_SIZE(tcg_target_callee_save_regs) * 2]; } DebugFrame; #define ELF_HOST_MACHINE EM_RISCV static const DebugFrame debug_frame = { .h.cie.len = sizeof(DebugFrameCIE) - 4, /* length after .len member */ .h.cie.id = -1, .h.cie.version = 1, .h.cie.code_align = 1, .h.cie.data_align = -(TCG_TARGET_REG_BITS / 8) & 0x7f, /* sleb128 */ .h.cie.return_column = TCG_REG_RA, /* Total FDE size does not include the "len" member. */ .h.fde.len = sizeof(DebugFrame) - offsetof(DebugFrame, h.fde.cie_offset), .fde_def_cfa = { 12, TCG_REG_SP, /* DW_CFA_def_cfa sp, ... */ (FRAME_SIZE & 0x7f) | 0x80, /* ... uleb128 FRAME_SIZE */ (FRAME_SIZE >> 7) }, .fde_reg_ofs = { 0x80 + 9, 12, /* DW_CFA_offset, s1, -96 */ 0x80 + 18, 11, /* DW_CFA_offset, s2, -88 */ 0x80 + 19, 10, /* DW_CFA_offset, s3, -80 */ 0x80 + 20, 9, /* DW_CFA_offset, s4, -72 */ 0x80 + 21, 8, /* DW_CFA_offset, s5, -64 */ 0x80 + 22, 7, /* DW_CFA_offset, s6, -56 */ 0x80 + 23, 6, /* DW_CFA_offset, s7, -48 */ 0x80 + 24, 5, /* DW_CFA_offset, s8, -40 */ 0x80 + 25, 4, /* DW_CFA_offset, s9, -32 */ 0x80 + 26, 3, /* DW_CFA_offset, s10, -24 */ 0x80 + 27, 2, /* DW_CFA_offset, s11, -16 */ 0x80 + 1 , 1, /* DW_CFA_offset, ra, -8 */ } }; void tcg_register_jit(void *buf, size_t buf_size) { tcg_register_jit_int(buf, buf_size, &debug_frame, sizeof(debug_frame)); }
pmp-tool/PMP
src/qemu/src-pmp/tests/test-arm-mptimer.c
<gh_stars>1-10 /* * QTest testcase for the ARM MPTimer * * Copyright (c) 2016 <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "qemu/timer.h" #include "libqtest.h" #define TIMER_BLOCK_SCALE(s) ((((s) & 0xff) + 1) * 10) #define TIMER_BLOCK_STEP(scaler, steps_nb) \ clock_step(TIMER_BLOCK_SCALE(scaler) * (int64_t)(steps_nb) + 1) #define TIMER_BASE_PHYS 0x1e000600 #define TIMER_LOAD 0x00 #define TIMER_COUNTER 0x04 #define TIMER_CONTROL 0x08 #define TIMER_INTSTAT 0x0C #define TIMER_CONTROL_ENABLE (1 << 0) #define TIMER_CONTROL_PERIODIC (1 << 1) #define TIMER_CONTROL_IT_ENABLE (1 << 2) #define TIMER_CONTROL_PRESCALER(p) (((p) & 0xff) << 8) #define PERIODIC 1 #define ONESHOT 0 #define NOSCALE 0 static int nonscaled = NOSCALE; static int scaled = 122; static void timer_load(uint32_t load) { writel(TIMER_BASE_PHYS + TIMER_LOAD, load); } static void timer_start(int periodic, uint32_t scale) { uint32_t ctl = TIMER_CONTROL_ENABLE | TIMER_CONTROL_PRESCALER(scale); if (periodic) { ctl |= TIMER_CONTROL_PERIODIC; } writel(TIMER_BASE_PHYS + TIMER_CONTROL, ctl); } static void timer_stop(void) { writel(TIMER_BASE_PHYS + TIMER_CONTROL, 0); } static void timer_int_clr(void) { writel(TIMER_BASE_PHYS + TIMER_INTSTAT, 1); } static void timer_reset(void) { timer_stop(); timer_load(0); timer_int_clr(); } static uint32_t timer_get_and_clr_int_sts(void) { uint32_t int_sts = readl(TIMER_BASE_PHYS + TIMER_INTSTAT); if (int_sts) { timer_int_clr(); } return int_sts; } static uint32_t timer_counter(void) { return readl(TIMER_BASE_PHYS + TIMER_COUNTER); } static void timer_set_counter(uint32_t value) { writel(TIMER_BASE_PHYS + TIMER_COUNTER, value); } static void test_timer_oneshot(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_load(9999999); timer_start(ONESHOT, scaler); TIMER_BLOCK_STEP(scaler, 9999); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); g_assert_cmpuint(timer_counter(), ==, 9990000); TIMER_BLOCK_STEP(scaler, 9990000); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); TIMER_BLOCK_STEP(scaler, 9990000); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } static void test_timer_pause(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_load(999999999); timer_start(ONESHOT, scaler); TIMER_BLOCK_STEP(scaler, 999); g_assert_cmpuint(timer_counter(), ==, 999999000); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); TIMER_BLOCK_STEP(scaler, 9000); g_assert_cmpuint(timer_counter(), ==, 999990000); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_stop(); g_assert_cmpuint(timer_counter(), ==, 999990000); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); TIMER_BLOCK_STEP(scaler, 90000); g_assert_cmpuint(timer_counter(), ==, 999990000); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_start(ONESHOT, scaler); TIMER_BLOCK_STEP(scaler, 999990000); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); g_assert_cmpuint(timer_counter(), ==, 0); TIMER_BLOCK_STEP(scaler, 999990000); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); g_assert_cmpuint(timer_counter(), ==, 0); } static void test_timer_reload(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_load(UINT32_MAX); timer_start(ONESHOT, scaler); TIMER_BLOCK_STEP(scaler, 90000); g_assert_cmpuint(timer_counter(), ==, UINT32_MAX - 90000); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_load(UINT32_MAX); TIMER_BLOCK_STEP(scaler, 90000); g_assert_cmpuint(timer_counter(), ==, UINT32_MAX - 90000); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } static void test_timer_periodic(gconstpointer arg) { int scaler = *((int *) arg); int repeat = 10; timer_reset(); timer_load(100); timer_start(PERIODIC, scaler); while (repeat--) { clock_step(TIMER_BLOCK_SCALE(scaler) * (101 + repeat) + 1); g_assert_cmpuint(timer_counter(), ==, 100 - repeat); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); clock_step(TIMER_BLOCK_SCALE(scaler) * (101 - repeat) - 1); } } static void test_timer_oneshot_to_periodic(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_load(10000); timer_start(ONESHOT, scaler); TIMER_BLOCK_STEP(scaler, 1000); g_assert_cmpuint(timer_counter(), ==, 9000); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_start(PERIODIC, scaler); TIMER_BLOCK_STEP(scaler, 14001); g_assert_cmpuint(timer_counter(), ==, 5000); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); } static void test_timer_periodic_to_oneshot(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_load(99999999); timer_start(PERIODIC, scaler); TIMER_BLOCK_STEP(scaler, 999); g_assert_cmpuint(timer_counter(), ==, 99999000); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_start(ONESHOT, scaler); TIMER_BLOCK_STEP(scaler, 99999009); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); } static void test_timer_prescaler(void) { timer_reset(); timer_load(9999999); timer_start(ONESHOT, NOSCALE); TIMER_BLOCK_STEP(NOSCALE, 9999998); g_assert_cmpuint(timer_counter(), ==, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); TIMER_BLOCK_STEP(NOSCALE, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); timer_reset(); timer_load(9999999); timer_start(ONESHOT, 0xAB); TIMER_BLOCK_STEP(0xAB, 9999998); g_assert_cmpuint(timer_counter(), ==, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); TIMER_BLOCK_STEP(0xAB, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); } static void test_timer_prescaler_on_the_fly(void) { timer_reset(); timer_load(9999999); timer_start(ONESHOT, NOSCALE); TIMER_BLOCK_STEP(NOSCALE, 999); g_assert_cmpuint(timer_counter(), ==, 9999000); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_start(ONESHOT, 0xAB); TIMER_BLOCK_STEP(0xAB, 9000); g_assert_cmpuint(timer_counter(), ==, 9990000); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } static void test_timer_set_oneshot_counter_to_0(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_load(UINT32_MAX); timer_start(ONESHOT, scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, UINT32_MAX - 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_set_counter(0); TIMER_BLOCK_STEP(scaler, 10); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); } static void test_timer_set_periodic_counter_to_0(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_load(UINT32_MAX); timer_start(PERIODIC, scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, UINT32_MAX - 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_set_counter(0); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, UINT32_MAX - (scaler ? 0 : 1)); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); timer_reset(); timer_set_counter(UINT32_MAX); timer_start(PERIODIC, scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, UINT32_MAX - 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_set_counter(0); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); } static void test_timer_noload_oneshot(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_start(ONESHOT, scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } static void test_timer_noload_periodic(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_start(PERIODIC, scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); } static void test_timer_zero_load_oneshot(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_start(ONESHOT, scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); g_assert_cmpuint(timer_counter(), ==, 0); timer_load(0); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } static void test_timer_zero_load_periodic(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_start(PERIODIC, scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); g_assert_cmpuint(timer_counter(), ==, 0); timer_load(0); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); } static void test_timer_zero_load_oneshot_to_nonzero(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_start(ONESHOT, scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); timer_load(0); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); g_assert_cmpuint(timer_counter(), ==, 0); timer_load(999); TIMER_BLOCK_STEP(scaler, 1001); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); } static void test_timer_zero_load_periodic_to_nonzero(gconstpointer arg) { int scaler = *((int *) arg); int i; timer_reset(); timer_start(PERIODIC, scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); g_assert_cmpuint(timer_counter(), ==, 0); timer_load(0); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); timer_load(1999999); for (i = 1; i < 10; i++) { TIMER_BLOCK_STEP(scaler, 2000001); g_assert_cmpuint(timer_counter(), ==, 1999999 - i); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } } static void test_timer_nonzero_load_oneshot_to_zero(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_start(ONESHOT, scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); g_assert_cmpuint(timer_counter(), ==, 0); timer_load(UINT32_MAX); timer_load(0); TIMER_BLOCK_STEP(scaler, 100); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); } static void test_timer_nonzero_load_periodic_to_zero(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_start(PERIODIC, scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); timer_load(UINT32_MAX); timer_load(0); TIMER_BLOCK_STEP(scaler, 100); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); } static void test_timer_set_periodic_counter_on_the_fly(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_load(UINT32_MAX / 2); timer_start(PERIODIC, scaler); TIMER_BLOCK_STEP(scaler, 100); g_assert_cmpuint(timer_counter(), ==, UINT32_MAX / 2 - 100); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_set_counter(UINT32_MAX); TIMER_BLOCK_STEP(scaler, 100); g_assert_cmpuint(timer_counter(), ==, UINT32_MAX - 100); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } static void test_timer_enable_and_set_counter(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_start(ONESHOT, scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); timer_set_counter(UINT32_MAX); TIMER_BLOCK_STEP(scaler, 100); g_assert_cmpuint(timer_counter(), ==, UINT32_MAX - 100); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } static void test_timer_set_counter_and_enable(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_set_counter(UINT32_MAX); timer_start(ONESHOT, scaler); TIMER_BLOCK_STEP(scaler, 100); g_assert_cmpuint(timer_counter(), ==, UINT32_MAX - 100); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } static void test_timer_set_counter_disabled(void) { timer_reset(); timer_set_counter(999999999); TIMER_BLOCK_STEP(NOSCALE, 100); g_assert_cmpuint(timer_counter(), ==, 999999999); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } static void test_timer_load_disabled(void) { timer_reset(); timer_load(999999999); TIMER_BLOCK_STEP(NOSCALE, 100); g_assert_cmpuint(timer_counter(), ==, 999999999); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } static void test_timer_oneshot_with_counter_0_on_start(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_load(999); timer_set_counter(0); timer_start(ONESHOT, scaler); TIMER_BLOCK_STEP(scaler, 100); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); TIMER_BLOCK_STEP(scaler, 100); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } static void test_timer_periodic_with_counter_0_on_start(gconstpointer arg) { int scaler = *((int *) arg); int i; timer_reset(); timer_load(UINT32_MAX); timer_set_counter(0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); g_assert_cmpuint(timer_counter(), ==, 0); timer_start(PERIODIC, scaler); TIMER_BLOCK_STEP(scaler, 100); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); g_assert_cmpuint(timer_counter(), ==, UINT32_MAX + (scaler ? 1 : 0) - 100); TIMER_BLOCK_STEP(scaler, 100); g_assert_cmpuint(timer_counter(), ==, UINT32_MAX + (scaler ? 1 : 0) - 200); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_reset(); timer_load(1999999); timer_set_counter(0); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_start(PERIODIC, scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); for (i = 2 - (!!scaler ? 1 : 0); i < 10; i++) { TIMER_BLOCK_STEP(scaler, 2000001); g_assert_cmpuint(timer_counter(), ==, 1999999 - i); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } } static void test_periodic_counter(gconstpointer arg) { const int test_load = 10; int scaler = *((int *) arg); int test_val; timer_reset(); timer_load(test_load); timer_start(PERIODIC, scaler); clock_step(1); for (test_val = 0; test_val <= test_load; test_val++) { clock_step(TIMER_BLOCK_SCALE(scaler) * test_load); g_assert_cmpint(timer_counter(), ==, test_val); } } static void test_timer_set_counter_periodic_with_zero_load(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_start(PERIODIC, scaler); timer_load(0); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); timer_set_counter(999); TIMER_BLOCK_STEP(scaler, 999); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); } static void test_timer_set_oneshot_load_to_0(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_load(UINT32_MAX); timer_start(ONESHOT, scaler); TIMER_BLOCK_STEP(scaler, 100); g_assert_cmpuint(timer_counter(), ==, UINT32_MAX - 100); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_load(0); TIMER_BLOCK_STEP(scaler, 100); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); TIMER_BLOCK_STEP(scaler, 100); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } static void test_timer_set_periodic_load_to_0(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_load(UINT32_MAX); timer_start(PERIODIC, scaler); TIMER_BLOCK_STEP(scaler, 100); g_assert_cmpuint(timer_counter(), ==, UINT32_MAX - 100); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_load(0); TIMER_BLOCK_STEP(scaler, 100); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); TIMER_BLOCK_STEP(scaler, 100); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); g_assert_cmpuint(timer_counter(), ==, 0); } static void test_deferred_trigger(void) { int mode = ONESHOT; again: timer_reset(); timer_start(mode, 255); clock_step(100); g_assert_cmpuint(timer_counter(), ==, 0); TIMER_BLOCK_STEP(255, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); timer_reset(); timer_load(2); timer_start(mode, 255); clock_step(100); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); TIMER_BLOCK_STEP(255, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); TIMER_BLOCK_STEP(255, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); timer_reset(); timer_load(UINT32_MAX); timer_start(mode, 255); clock_step(100); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_set_counter(0); clock_step(100); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); TIMER_BLOCK_STEP(255, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); timer_reset(); timer_load(UINT32_MAX); timer_start(mode, 255); clock_step(100); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_load(0); clock_step(100); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); TIMER_BLOCK_STEP(255, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); if (mode == ONESHOT) { mode = PERIODIC; goto again; } } static void test_timer_zero_load_mode_switch(gconstpointer arg) { int scaler = *((int *) arg); timer_reset(); timer_load(0); timer_start(PERIODIC, scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); TIMER_BLOCK_STEP(scaler, 1); timer_start(ONESHOT, scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); TIMER_BLOCK_STEP(scaler, 1); timer_start(PERIODIC, scaler); TIMER_BLOCK_STEP(scaler, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, !!scaler); } static void test_timer_zero_load_prescaled_periodic_to_nonscaled_oneshot(void) { timer_reset(); timer_load(0); timer_start(PERIODIC, 255); TIMER_BLOCK_STEP(255, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); TIMER_BLOCK_STEP(255, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); TIMER_BLOCK_STEP(255, 1); timer_start(ONESHOT, NOSCALE); TIMER_BLOCK_STEP(NOSCALE, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); TIMER_BLOCK_STEP(NOSCALE, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } static void test_timer_zero_load_prescaled_oneshot_to_nonscaled_periodic(void) { timer_reset(); timer_load(0); timer_start(ONESHOT, 255); TIMER_BLOCK_STEP(255, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_start(PERIODIC, NOSCALE); TIMER_BLOCK_STEP(NOSCALE, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } static void test_timer_zero_load_nonscaled_oneshot_to_prescaled_periodic(void) { timer_reset(); timer_load(0); timer_start(ONESHOT, NOSCALE); TIMER_BLOCK_STEP(NOSCALE, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_start(PERIODIC, 255); TIMER_BLOCK_STEP(255, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); TIMER_BLOCK_STEP(255, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } static void test_timer_zero_load_nonscaled_periodic_to_prescaled_oneshot(void) { timer_reset(); timer_load(0); timer_start(PERIODIC, NOSCALE); TIMER_BLOCK_STEP(NOSCALE, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); timer_start(ONESHOT, 255); TIMER_BLOCK_STEP(255, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 1); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); TIMER_BLOCK_STEP(255, 1); g_assert_cmpuint(timer_counter(), ==, 0); g_assert_cmpuint(timer_get_and_clr_int_sts(), ==, 0); } /* * Add a qtest test that comes in two versions: one with * a timer scaler setting, and one with the timer nonscaled. */ static void add_scaler_test(const char *str, bool scale, void (*fn)(const void *)) { char *name; int *scaler = scale ? &scaled : &nonscaled; name = g_strdup_printf("%s=%d", str, *scaler); qtest_add_data_func(name, scaler, fn); g_free(name); } int main(int argc, char **argv) { int ret; int scale; g_test_init(&argc, &argv, NULL); qtest_add_func("mptimer/deferred_trigger", test_deferred_trigger); qtest_add_func("mptimer/load_disabled", test_timer_load_disabled); qtest_add_func("mptimer/set_counter_disabled", test_timer_set_counter_disabled); qtest_add_func("mptimer/zero_load_prescaled_periodic_to_nonscaled_oneshot", test_timer_zero_load_prescaled_periodic_to_nonscaled_oneshot); qtest_add_func("mptimer/zero_load_prescaled_oneshot_to_nonscaled_periodic", test_timer_zero_load_prescaled_oneshot_to_nonscaled_periodic); qtest_add_func("mptimer/zero_load_nonscaled_oneshot_to_prescaled_periodic", test_timer_zero_load_nonscaled_oneshot_to_prescaled_periodic); qtest_add_func("mptimer/zero_load_nonscaled_periodic_to_prescaled_oneshot", test_timer_zero_load_nonscaled_periodic_to_prescaled_oneshot); qtest_add_func("mptimer/prescaler", test_timer_prescaler); qtest_add_func("mptimer/prescaler_on_the_fly", test_timer_prescaler_on_the_fly); for (scale = 0; scale < 2; scale++) { add_scaler_test("mptimer/oneshot scaler", scale, test_timer_oneshot); add_scaler_test("mptimer/pause scaler", scale, test_timer_pause); add_scaler_test("mptimer/reload scaler", scale, test_timer_reload); add_scaler_test("mptimer/periodic scaler", scale, test_timer_periodic); add_scaler_test("mptimer/oneshot_to_periodic scaler", scale, test_timer_oneshot_to_periodic); add_scaler_test("mptimer/periodic_to_oneshot scaler", scale, test_timer_periodic_to_oneshot); add_scaler_test("mptimer/set_oneshot_counter_to_0 scaler", scale, test_timer_set_oneshot_counter_to_0); add_scaler_test("mptimer/set_periodic_counter_to_0 scaler", scale, test_timer_set_periodic_counter_to_0); add_scaler_test("mptimer/noload_oneshot scaler", scale, test_timer_noload_oneshot); add_scaler_test("mptimer/noload_periodic scaler", scale, test_timer_noload_periodic); add_scaler_test("mptimer/zero_load_oneshot scaler", scale, test_timer_zero_load_oneshot); add_scaler_test("mptimer/zero_load_periodic scaler", scale, test_timer_zero_load_periodic); add_scaler_test("mptimer/zero_load_oneshot_to_nonzero scaler", scale, test_timer_zero_load_oneshot_to_nonzero); add_scaler_test("mptimer/zero_load_periodic_to_nonzero scaler", scale, test_timer_zero_load_periodic_to_nonzero); add_scaler_test("mptimer/nonzero_load_oneshot_to_zero scaler", scale, test_timer_nonzero_load_oneshot_to_zero); add_scaler_test("mptimer/nonzero_load_periodic_to_zero scaler", scale, test_timer_nonzero_load_periodic_to_zero); add_scaler_test("mptimer/set_periodic_counter_on_the_fly scaler", scale, test_timer_set_periodic_counter_on_the_fly); add_scaler_test("mptimer/enable_and_set_counter scaler", scale, test_timer_enable_and_set_counter); add_scaler_test("mptimer/set_counter_and_enable scaler", scale, test_timer_set_counter_and_enable); add_scaler_test("mptimer/oneshot_with_counter_0_on_start scaler", scale, test_timer_oneshot_with_counter_0_on_start); add_scaler_test("mptimer/periodic_with_counter_0_on_start scaler", scale, test_timer_periodic_with_counter_0_on_start); add_scaler_test("mptimer/periodic_counter scaler", scale, test_periodic_counter); add_scaler_test("mptimer/set_counter_periodic_with_zero_load scaler", scale, test_timer_set_counter_periodic_with_zero_load); add_scaler_test("mptimer/set_oneshot_load_to_0 scaler", scale, test_timer_set_oneshot_load_to_0); add_scaler_test("mptimer/set_periodic_load_to_0 scaler", scale, test_timer_set_periodic_load_to_0); add_scaler_test("mptimer/zero_load_mode_switch scaler", scale, test_timer_zero_load_mode_switch); } qtest_start("-machine vexpress-a9"); ret = g_test_run(); qtest_end(); return ret; }
pmp-tool/PMP
src/qemu/src-pmp/tests/tcg/mips/user/ase/msa/int-add/test_msa_addv_b.c
/* * Test program for MSA instruction ADDV.B * * Copyright (C) 2019 RT-RK Computer Based Systems LLC * Copyright (C) 2019 <NAME> <<EMAIL>> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. * */ #include <sys/time.h> #include <stdint.h> #include "../../../../include/wrappers_msa.h" #include "../../../../include/test_inputs_128.h" #include "../../../../include/test_utils_128.h" #define TEST_COUNT_TOTAL ( \ (PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT) + \ (RANDOM_INPUTS_SHORT_COUNT) * (RANDOM_INPUTS_SHORT_COUNT)) int32_t main(void) { char *instruction_name = "ADDV.B"; int32_t ret; uint32_t i, j; struct timeval start, end; double elapsed_time; uint64_t b128_result[TEST_COUNT_TOTAL][2]; uint64_t b128_expect[TEST_COUNT_TOTAL][2] = { { 0xfefefefefefefefeULL, 0xfefefefefefefefeULL, }, /* 0 */ { 0xffffffffffffffffULL, 0xffffffffffffffffULL, }, { 0xa9a9a9a9a9a9a9a9ULL, 0xa9a9a9a9a9a9a9a9ULL, }, { 0x5454545454545454ULL, 0x5454545454545454ULL, }, { 0xcbcbcbcbcbcbcbcbULL, 0xcbcbcbcbcbcbcbcbULL, }, { 0x3232323232323232ULL, 0x3232323232323232ULL, }, { 0xe28d37e28d37e28dULL, 0x37e28d37e28d37e2ULL, }, { 0x1b70c61b70c61b70ULL, 0xc61b70c61b70c61bULL, }, { 0xffffffffffffffffULL, 0xffffffffffffffffULL, }, /* 8 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0xaaaaaaaaaaaaaaaaULL, 0xaaaaaaaaaaaaaaaaULL, }, { 0x5555555555555555ULL, 0x5555555555555555ULL, }, { 0xccccccccccccccccULL, 0xccccccccccccccccULL, }, { 0x3333333333333333ULL, 0x3333333333333333ULL, }, { 0xe38e38e38e38e38eULL, 0x38e38e38e38e38e3ULL, }, { 0x1c71c71c71c71c71ULL, 0xc71c71c71c71c71cULL, }, { 0xa9a9a9a9a9a9a9a9ULL, 0xa9a9a9a9a9a9a9a9ULL, }, /* 16 */ { 0xaaaaaaaaaaaaaaaaULL, 0xaaaaaaaaaaaaaaaaULL, }, { 0x5454545454545454ULL, 0x5454545454545454ULL, }, { 0xffffffffffffffffULL, 0xffffffffffffffffULL, }, { 0x7676767676767676ULL, 0x7676767676767676ULL, }, { 0xddddddddddddddddULL, 0xddddddddddddddddULL, }, { 0x8d38e28d38e28d38ULL, 0xe28d38e28d38e28dULL, }, { 0xc61b71c61b71c61bULL, 0x71c61b71c61b71c6ULL, }, { 0x5454545454545454ULL, 0x5454545454545454ULL, }, /* 24 */ { 0x5555555555555555ULL, 0x5555555555555555ULL, }, { 0xffffffffffffffffULL, 0xffffffffffffffffULL, }, { 0xaaaaaaaaaaaaaaaaULL, 0xaaaaaaaaaaaaaaaaULL, }, { 0x2121212121212121ULL, 0x2121212121212121ULL, }, { 0x8888888888888888ULL, 0x8888888888888888ULL, }, { 0x38e38d38e38d38e3ULL, 0x8d38e38d38e38d38ULL, }, { 0x71c61c71c61c71c6ULL, 0x1c71c61c71c61c71ULL, }, { 0xcbcbcbcbcbcbcbcbULL, 0xcbcbcbcbcbcbcbcbULL, }, /* 32 */ { 0xccccccccccccccccULL, 0xccccccccccccccccULL, }, { 0x7676767676767676ULL, 0x7676767676767676ULL, }, { 0x2121212121212121ULL, 0x2121212121212121ULL, }, { 0x9898989898989898ULL, 0x9898989898989898ULL, }, { 0xffffffffffffffffULL, 0xffffffffffffffffULL, }, { 0xaf5a04af5a04af5aULL, 0x04af5a04af5a04afULL, }, { 0xe83d93e83d93e83dULL, 0x93e83d93e83d93e8ULL, }, { 0x3232323232323232ULL, 0x3232323232323232ULL, }, /* 40 */ { 0x3333333333333333ULL, 0x3333333333333333ULL, }, { 0xddddddddddddddddULL, 0xddddddddddddddddULL, }, { 0x8888888888888888ULL, 0x8888888888888888ULL, }, { 0xffffffffffffffffULL, 0xffffffffffffffffULL, }, { 0x6666666666666666ULL, 0x6666666666666666ULL, }, { 0x16c16b16c16b16c1ULL, 0x6b16c16b16c16b16ULL, }, { 0x4fa4fa4fa4fa4fa4ULL, 0xfa4fa4fa4fa4fa4fULL, }, { 0xe28d37e28d37e28dULL, 0x37e28d37e28d37e2ULL, }, /* 48 */ { 0xe38e38e38e38e38eULL, 0x38e38e38e38e38e3ULL, }, { 0x8d38e28d38e28d38ULL, 0xe28d38e28d38e28dULL, }, { 0x38e38d38e38d38e3ULL, 0x8d38e38d38e38d38ULL, }, { 0xaf5a04af5a04af5aULL, 0x04af5a04af5a04afULL, }, { 0x16c16b16c16b16c1ULL, 0x6b16c16b16c16b16ULL, }, { 0xc61c70c61c70c61cULL, 0x70c61c70c61c70c6ULL, }, { 0xffffffffffffffffULL, 0xffffffffffffffffULL, }, { 0x1b70c61b70c61b70ULL, 0xc61b70c61b70c61bULL, }, /* 56 */ { 0x1c71c71c71c71c71ULL, 0xc71c71c71c71c71cULL, }, { 0xc61b71c61b71c61bULL, 0x71c61b71c61b71c6ULL, }, { 0x71c61c71c61c71c6ULL, 0x1c71c61c71c61c71ULL, }, { 0xe83d93e83d93e83dULL, 0x93e83d93e83d93e8ULL, }, { 0x4fa4fa4fa4fa4fa4ULL, 0xfa4fa4fa4fa4fa4fULL, }, { 0xffffffffffffffffULL, 0xffffffffffffffffULL, }, { 0x38e28e38e28e38e2ULL, 0x8e38e28e38e28e38ULL, }, { 0x10d4cc9850c4aa80ULL, 0x96ce16bcfcf66018ULL, }, /* 64 */ { 0x8328e62f75f51c48ULL, 0x5d5ec67813ba0208ULL, }, { 0x34c49476e131e0c0ULL, 0x723fd15da9a6d520ULL, }, { 0xf8b9fc198693378eULL, 0xd8589336a7bd92acULL, }, { 0x8328e62f75f51c48ULL, 0x5d5ec67813ba0208ULL, }, { 0xf67c00c69a268e10ULL, 0x24ee76342a7ea4f8ULL, }, { 0xa718ae0d06625288ULL, 0x39cf8119c06a7710ULL, }, { 0x6b0d16b0abc4a956ULL, 0x9fe843f2be81349cULL, }, { 0x34c49476e131e0c0ULL, 0x723fd15da9a6d520ULL, }, /* 72 */ { 0xa718ae0d06625288ULL, 0x39cf8119c06a7710ULL, }, { 0x58b45c54729e1600ULL, 0x4eb08cfe56564a28ULL, }, { 0x1ca9c4f717006dceULL, 0xb4c94ed7546d07b4ULL, }, { 0xf8b9fc198693378eULL, 0xd8589336a7bd92acULL, }, { 0x6b0d16b0abc4a956ULL, 0x9fe843f2be81349cULL, }, }; gettimeofday(&start, NULL); for (i = 0; i < PATTERN_INPUTS_SHORT_COUNT; i++) { for (j = 0; j < PATTERN_INPUTS_SHORT_COUNT; j++) { do_msa_ADDV_B(b128_pattern[i], b128_pattern[j], b128_result[PATTERN_INPUTS_SHORT_COUNT * i + j]); } } for (i = 0; i < RANDOM_INPUTS_SHORT_COUNT; i++) { for (j = 0; j < RANDOM_INPUTS_SHORT_COUNT; j++) { do_msa_ADDV_B(b128_random[i], b128_random[j], b128_result[((PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT)) + RANDOM_INPUTS_SHORT_COUNT * i + j]); } } gettimeofday(&end, NULL); elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; ret = check_results(instruction_name, TEST_COUNT_TOTAL, elapsed_time, &b128_result[0][0], &b128_expect[0][0]); return ret; }
pmp-tool/PMP
src/qemu/src-pmp/contrib/elf2dmp/pdb.c
/* * Copyright (c) 2018 Virtuozzo International GmbH * * Based on source of Wine project * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2.1 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, write to the Free Software * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA */ #include <inttypes.h> #include "qemu/osdep.h" #include "pdb.h" #include "err.h" static uint32_t pdb_get_file_size(const struct pdb_reader *r, unsigned idx) { return r->ds.toc->file_size[idx]; } static pdb_seg *get_seg_by_num(struct pdb_reader *r, size_t n) { size_t i = 0; char *ptr; for (ptr = r->segs; (ptr < r->segs + r->segs_size); ) { i++; ptr += 8; if (i == n) { break; } ptr += sizeof(pdb_seg); } return (pdb_seg *)ptr; } uint64_t pdb_find_public_v3_symbol(struct pdb_reader *r, const char *name) { size_t size = pdb_get_file_size(r, r->symbols->gsym_file); int length; const union codeview_symbol *sym; const uint8_t *root = r->modimage; size_t i; for (i = 0; i < size; i += length) { sym = (const void *)(root + i); length = sym->generic.len + 2; if (!sym->generic.id || length < 4) { break; } if (sym->generic.id == S_PUB_V3 && !strcmp(name, sym->public_v3.name)) { pdb_seg *segment = get_seg_by_num(r, sym->public_v3.segment); uint32_t sect_rva = segment->dword[1]; uint64_t rva = sect_rva + sym->public_v3.offset; printf("%s: 0x%016x(%d:\'%.8s\') + 0x%08x = 0x%09"PRIx64"\n", name, sect_rva, sym->public_v3.segment, ((char *)segment - 8), sym->public_v3.offset, rva); return rva; } } return 0; } uint64_t pdb_resolve(uint64_t img_base, struct pdb_reader *r, const char *name) { uint64_t rva = pdb_find_public_v3_symbol(r, name); if (!rva) { return 0; } return img_base + rva; } static void pdb_reader_ds_exit(struct pdb_reader *r) { free(r->ds.toc); } static void pdb_exit_symbols(struct pdb_reader *r) { free(r->modimage); free(r->symbols); } static void pdb_exit_segments(struct pdb_reader *r) { free(r->segs); } static void *pdb_ds_read(const PDB_DS_HEADER *header, const uint32_t *block_list, int size) { int i, nBlocks; uint8_t *buffer; if (!size) { return NULL; } nBlocks = (size + header->block_size - 1) / header->block_size; buffer = malloc(nBlocks * header->block_size); if (!buffer) { return NULL; } for (i = 0; i < nBlocks; i++) { memcpy(buffer + i * header->block_size, (const char *)header + block_list[i] * header->block_size, header->block_size); } return buffer; } static void *pdb_ds_read_file(struct pdb_reader* r, uint32_t file_number) { const uint32_t *block_list; uint32_t block_size; const uint32_t *file_size; size_t i; if (!r->ds.toc || file_number >= r->ds.toc->num_files) { return NULL; } file_size = r->ds.toc->file_size; r->file_used[file_number / 32] |= 1 << (file_number % 32); if (file_size[file_number] == 0 || file_size[file_number] == 0xFFFFFFFF) { return NULL; } block_list = file_size + r->ds.toc->num_files; block_size = r->ds.header->block_size; for (i = 0; i < file_number; i++) { block_list += (file_size[i] + block_size - 1) / block_size; } return pdb_ds_read(r->ds.header, block_list, file_size[file_number]); } static int pdb_init_segments(struct pdb_reader *r) { char *segs; unsigned stream_idx = r->sidx.segments; segs = pdb_ds_read_file(r, stream_idx); if (!segs) { return 1; } r->segs = segs; r->segs_size = pdb_get_file_size(r, stream_idx); return 0; } static int pdb_init_symbols(struct pdb_reader *r) { int err = 0; PDB_SYMBOLS *symbols; PDB_STREAM_INDEXES *sidx = &r->sidx; memset(sidx, -1, sizeof(*sidx)); symbols = pdb_ds_read_file(r, 3); if (!symbols) { return 1; } r->symbols = symbols; if (symbols->stream_index_size != sizeof(PDB_STREAM_INDEXES)) { err = 1; goto out_symbols; } memcpy(sidx, (const char *)symbols + sizeof(PDB_SYMBOLS) + symbols->module_size + symbols->offset_size + symbols->hash_size + symbols->srcmodule_size + symbols->pdbimport_size + symbols->unknown2_size, sizeof(*sidx)); /* Read global symbol table */ r->modimage = pdb_ds_read_file(r, symbols->gsym_file); if (!r->modimage) { err = 1; goto out_symbols; } return 0; out_symbols: free(symbols); return err; } static int pdb_reader_ds_init(struct pdb_reader *r, PDB_DS_HEADER *hdr) { memset(r->file_used, 0, sizeof(r->file_used)); r->ds.header = hdr; r->ds.toc = pdb_ds_read(hdr, (uint32_t *)((uint8_t *)hdr + hdr->toc_page * hdr->block_size), hdr->toc_size); if (!r->ds.toc) { return 1; } return 0; } static int pdb_reader_init(struct pdb_reader *r, void *data) { int err = 0; const char pdb7[] = "Microsoft C/C++ MSF 7.00"; if (memcmp(data, pdb7, sizeof(pdb7) - 1)) { return 1; } if (pdb_reader_ds_init(r, data)) { return 1; } r->ds.root = pdb_ds_read_file(r, 1); if (!r->ds.root) { err = 1; goto out_ds; } if (pdb_init_symbols(r)) { err = 1; goto out_root; } if (pdb_init_segments(r)) { err = 1; goto out_sym; } return 0; out_sym: pdb_exit_symbols(r); out_root: free(r->ds.root); out_ds: pdb_reader_ds_exit(r); return err; } static void pdb_reader_exit(struct pdb_reader *r) { pdb_exit_segments(r); pdb_exit_symbols(r); free(r->ds.root); pdb_reader_ds_exit(r); } int pdb_init_from_file(const char *name, struct pdb_reader *reader) { GError *gerr = NULL; int err = 0; void *map; reader->gmf = g_mapped_file_new(name, TRUE, &gerr); if (gerr) { eprintf("Failed to map PDB file \'%s\'\n", name); return 1; } reader->file_size = g_mapped_file_get_length(reader->gmf); map = g_mapped_file_get_contents(reader->gmf); if (pdb_reader_init(reader, map)) { err = 1; goto out_unmap; } return 0; out_unmap: g_mapped_file_unref(reader->gmf); return err; } void pdb_exit(struct pdb_reader *reader) { g_mapped_file_unref(reader->gmf); pdb_reader_exit(reader); }
pmp-tool/PMP
src/qemu/src-pmp/include/block/dirty-bitmap.h
#ifndef BLOCK_DIRTY_BITMAP_H #define BLOCK_DIRTY_BITMAP_H #include "qemu-common.h" #include "qapi/qapi-types-block-core.h" #include "qemu/hbitmap.h" typedef enum BitmapCheckFlags { BDRV_BITMAP_BUSY = 1, BDRV_BITMAP_RO = 2, BDRV_BITMAP_INCONSISTENT = 4, } BitmapCheckFlags; #define BDRV_BITMAP_DEFAULT (BDRV_BITMAP_BUSY | BDRV_BITMAP_RO | \ BDRV_BITMAP_INCONSISTENT) #define BDRV_BITMAP_ALLOW_RO (BDRV_BITMAP_BUSY | BDRV_BITMAP_INCONSISTENT) BdrvDirtyBitmap *bdrv_create_dirty_bitmap(BlockDriverState *bs, uint32_t granularity, const char *name, Error **errp); void bdrv_create_meta_dirty_bitmap(BdrvDirtyBitmap *bitmap, int chunk_size); void bdrv_release_meta_dirty_bitmap(BdrvDirtyBitmap *bitmap); int bdrv_dirty_bitmap_create_successor(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, Error **errp); BdrvDirtyBitmap *bdrv_dirty_bitmap_abdicate(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, Error **errp); BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, Error **errp); void bdrv_dirty_bitmap_enable_successor(BdrvDirtyBitmap *bitmap); BdrvDirtyBitmap *bdrv_find_dirty_bitmap(BlockDriverState *bs, const char *name); int bdrv_dirty_bitmap_check(const BdrvDirtyBitmap *bitmap, uint32_t flags, Error **errp); void bdrv_release_dirty_bitmap(BlockDriverState *bs, BdrvDirtyBitmap *bitmap); void bdrv_release_named_dirty_bitmaps(BlockDriverState *bs); void bdrv_remove_persistent_dirty_bitmap(BlockDriverState *bs, const char *name, Error **errp); void bdrv_disable_dirty_bitmap(BdrvDirtyBitmap *bitmap); void bdrv_enable_dirty_bitmap(BdrvDirtyBitmap *bitmap); void bdrv_enable_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap); BlockDirtyInfoList *bdrv_query_dirty_bitmaps(BlockDriverState *bs); uint32_t bdrv_get_default_bitmap_granularity(BlockDriverState *bs); uint32_t bdrv_dirty_bitmap_granularity(const BdrvDirtyBitmap *bitmap); bool bdrv_dirty_bitmap_enabled(BdrvDirtyBitmap *bitmap); bool bdrv_dirty_bitmap_has_successor(BdrvDirtyBitmap *bitmap); const char *bdrv_dirty_bitmap_name(const BdrvDirtyBitmap *bitmap); int64_t bdrv_dirty_bitmap_size(const BdrvDirtyBitmap *bitmap); DirtyBitmapStatus bdrv_dirty_bitmap_status(BdrvDirtyBitmap *bitmap); void bdrv_set_dirty_bitmap(BdrvDirtyBitmap *bitmap, int64_t offset, int64_t bytes); void bdrv_reset_dirty_bitmap(BdrvDirtyBitmap *bitmap, int64_t offset, int64_t bytes); BdrvDirtyBitmapIter *bdrv_dirty_meta_iter_new(BdrvDirtyBitmap *bitmap); BdrvDirtyBitmapIter *bdrv_dirty_iter_new(BdrvDirtyBitmap *bitmap); void bdrv_dirty_iter_free(BdrvDirtyBitmapIter *iter); uint64_t bdrv_dirty_bitmap_serialization_size(const BdrvDirtyBitmap *bitmap, uint64_t offset, uint64_t bytes); uint64_t bdrv_dirty_bitmap_serialization_align(const BdrvDirtyBitmap *bitmap); void bdrv_dirty_bitmap_serialize_part(const BdrvDirtyBitmap *bitmap, uint8_t *buf, uint64_t offset, uint64_t bytes); void bdrv_dirty_bitmap_deserialize_part(BdrvDirtyBitmap *bitmap, uint8_t *buf, uint64_t offset, uint64_t bytes, bool finish); void bdrv_dirty_bitmap_deserialize_zeroes(BdrvDirtyBitmap *bitmap, uint64_t offset, uint64_t bytes, bool finish); void bdrv_dirty_bitmap_deserialize_ones(BdrvDirtyBitmap *bitmap, uint64_t offset, uint64_t bytes, bool finish); void bdrv_dirty_bitmap_deserialize_finish(BdrvDirtyBitmap *bitmap); void bdrv_dirty_bitmap_set_readonly(BdrvDirtyBitmap *bitmap, bool value); void bdrv_dirty_bitmap_set_persistence(BdrvDirtyBitmap *bitmap, bool persistent); void bdrv_dirty_bitmap_set_inconsistent(BdrvDirtyBitmap *bitmap); void bdrv_dirty_bitmap_set_busy(BdrvDirtyBitmap *bitmap, bool busy); void bdrv_merge_dirty_bitmap(BdrvDirtyBitmap *dest, const BdrvDirtyBitmap *src, HBitmap **backup, Error **errp); void bdrv_dirty_bitmap_set_migration(BdrvDirtyBitmap *bitmap, bool migration); /* Functions that require manual locking. */ void bdrv_dirty_bitmap_lock(BdrvDirtyBitmap *bitmap); void bdrv_dirty_bitmap_unlock(BdrvDirtyBitmap *bitmap); bool bdrv_get_dirty_locked(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, int64_t offset); void bdrv_set_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap, int64_t offset, int64_t bytes); void bdrv_reset_dirty_bitmap_locked(BdrvDirtyBitmap *bitmap, int64_t offset, int64_t bytes); int64_t bdrv_dirty_iter_next(BdrvDirtyBitmapIter *iter); void bdrv_set_dirty_iter(BdrvDirtyBitmapIter *hbi, int64_t offset); int64_t bdrv_get_dirty_count(BdrvDirtyBitmap *bitmap); int64_t bdrv_get_meta_dirty_count(BdrvDirtyBitmap *bitmap); void bdrv_dirty_bitmap_truncate(BlockDriverState *bs, int64_t bytes); bool bdrv_dirty_bitmap_readonly(const BdrvDirtyBitmap *bitmap); bool bdrv_has_readonly_bitmaps(BlockDriverState *bs); bool bdrv_dirty_bitmap_get_autoload(const BdrvDirtyBitmap *bitmap); bool bdrv_dirty_bitmap_get_persistence(BdrvDirtyBitmap *bitmap); bool bdrv_dirty_bitmap_inconsistent(const BdrvDirtyBitmap *bitmap); bool bdrv_has_changed_persistent_bitmaps(BlockDriverState *bs); BdrvDirtyBitmap *bdrv_dirty_bitmap_next(BlockDriverState *bs, BdrvDirtyBitmap *bitmap); char *bdrv_dirty_bitmap_sha256(const BdrvDirtyBitmap *bitmap, Error **errp); int64_t bdrv_dirty_bitmap_next_zero(BdrvDirtyBitmap *bitmap, uint64_t offset, uint64_t bytes); bool bdrv_dirty_bitmap_next_dirty_area(BdrvDirtyBitmap *bitmap, uint64_t *offset, uint64_t *bytes); BdrvDirtyBitmap *bdrv_reclaim_dirty_bitmap_locked(BlockDriverState *bs, BdrvDirtyBitmap *bitmap, Error **errp); #endif
pmp-tool/PMP
src/qemu/src-pmp/backends/hostmem-file.c
<reponame>pmp-tool/PMP /* * QEMU Host Memory Backend for hugetlbfs * * Copyright (C) 2013-2014 Red Hat Inc * * Authors: * <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu-common.h" #include "qemu/error-report.h" #include "sysemu/hostmem.h" #include "sysemu/sysemu.h" #include "qom/object_interfaces.h" /* hostmem-file.c */ /** * @TYPE_MEMORY_BACKEND_FILE: * name of backend that uses mmap on a file descriptor */ #define TYPE_MEMORY_BACKEND_FILE "memory-backend-file" #define MEMORY_BACKEND_FILE(obj) \ OBJECT_CHECK(HostMemoryBackendFile, (obj), TYPE_MEMORY_BACKEND_FILE) typedef struct HostMemoryBackendFile HostMemoryBackendFile; struct HostMemoryBackendFile { HostMemoryBackend parent_obj; char *mem_path; uint64_t align; bool discard_data; bool is_pmem; }; static void file_backend_memory_alloc(HostMemoryBackend *backend, Error **errp) { #ifndef CONFIG_POSIX error_setg(errp, "backend '%s' not supported on this host", object_get_typename(OBJECT(backend))); #else HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(backend); gchar *name; if (!backend->size) { error_setg(errp, "can't create backend with size 0"); return; } if (!fb->mem_path) { error_setg(errp, "mem-path property not set"); return; } /* * Verify pmem file size since starting a guest with an incorrect size * leads to confusing failures inside the guest. */ if (fb->is_pmem) { Error *local_err = NULL; uint64_t size; size = qemu_get_pmem_size(fb->mem_path, &local_err); if (!size) { error_propagate(errp, local_err); return; } if (backend->size > size) { error_setg(errp, "size property %" PRIu64 " is larger than " "pmem file \"%s\" size %" PRIu64, backend->size, fb->mem_path, size); return; } } backend->force_prealloc = mem_prealloc; name = host_memory_backend_get_name(backend); memory_region_init_ram_from_file(&backend->mr, OBJECT(backend), name, backend->size, fb->align, (backend->share ? RAM_SHARED : 0) | (fb->is_pmem ? RAM_PMEM : 0), fb->mem_path, errp); g_free(name); #endif } static char *get_mem_path(Object *o, Error **errp) { HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o); return g_strdup(fb->mem_path); } static void set_mem_path(Object *o, const char *str, Error **errp) { HostMemoryBackend *backend = MEMORY_BACKEND(o); HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o); if (host_memory_backend_mr_inited(backend)) { error_setg(errp, "cannot change property 'mem-path' of %s", object_get_typename(o)); return; } g_free(fb->mem_path); fb->mem_path = g_strdup(str); } static bool file_memory_backend_get_discard_data(Object *o, Error **errp) { return MEMORY_BACKEND_FILE(o)->discard_data; } static void file_memory_backend_set_discard_data(Object *o, bool value, Error **errp) { MEMORY_BACKEND_FILE(o)->discard_data = value; } static void file_memory_backend_get_align(Object *o, Visitor *v, const char *name, void *opaque, Error **errp) { HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o); uint64_t val = fb->align; visit_type_size(v, name, &val, errp); } static void file_memory_backend_set_align(Object *o, Visitor *v, const char *name, void *opaque, Error **errp) { HostMemoryBackend *backend = MEMORY_BACKEND(o); HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o); Error *local_err = NULL; uint64_t val; if (host_memory_backend_mr_inited(backend)) { error_setg(&local_err, "cannot change property '%s' of %s", name, object_get_typename(o)); goto out; } visit_type_size(v, name, &val, &local_err); if (local_err) { goto out; } fb->align = val; out: error_propagate(errp, local_err); } static bool file_memory_backend_get_pmem(Object *o, Error **errp) { return MEMORY_BACKEND_FILE(o)->is_pmem; } static void file_memory_backend_set_pmem(Object *o, bool value, Error **errp) { HostMemoryBackend *backend = MEMORY_BACKEND(o); HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o); if (host_memory_backend_mr_inited(backend)) { error_setg(errp, "cannot change property 'pmem' of %s.", object_get_typename(o)); return; } #ifndef CONFIG_LIBPMEM if (value) { Error *local_err = NULL; error_setg(&local_err, "Lack of libpmem support while setting the 'pmem=on'" " of %s. We can't ensure data persistence.", object_get_typename(o)); error_propagate(errp, local_err); return; } #endif fb->is_pmem = value; } static void file_backend_unparent(Object *obj) { HostMemoryBackend *backend = MEMORY_BACKEND(obj); HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(obj); if (host_memory_backend_mr_inited(backend) && fb->discard_data) { void *ptr = memory_region_get_ram_ptr(&backend->mr); uint64_t sz = memory_region_size(&backend->mr); qemu_madvise(ptr, sz, QEMU_MADV_REMOVE); } } static void file_backend_class_init(ObjectClass *oc, void *data) { HostMemoryBackendClass *bc = MEMORY_BACKEND_CLASS(oc); bc->alloc = file_backend_memory_alloc; oc->unparent = file_backend_unparent; object_class_property_add_bool(oc, "discard-data", file_memory_backend_get_discard_data, file_memory_backend_set_discard_data, &error_abort); object_class_property_add_str(oc, "mem-path", get_mem_path, set_mem_path, &error_abort); object_class_property_add(oc, "align", "int", file_memory_backend_get_align, file_memory_backend_set_align, NULL, NULL, &error_abort); object_class_property_add_bool(oc, "pmem", file_memory_backend_get_pmem, file_memory_backend_set_pmem, &error_abort); } static void file_backend_instance_finalize(Object *o) { HostMemoryBackendFile *fb = MEMORY_BACKEND_FILE(o); g_free(fb->mem_path); } static const TypeInfo file_backend_info = { .name = TYPE_MEMORY_BACKEND_FILE, .parent = TYPE_MEMORY_BACKEND, .class_init = file_backend_class_init, .instance_finalize = file_backend_instance_finalize, .instance_size = sizeof(HostMemoryBackendFile), }; static void register_types(void) { type_register_static(&file_backend_info); } type_init(register_types);
pmp-tool/PMP
src/qemu/src-pmp/hw/audio/lm4549.c
<filename>src/qemu/src-pmp/hw/audio/lm4549.c<gh_stars>1-10 /* * LM4549 Audio Codec Interface * * Copyright (c) 2011 * Written by <NAME> - www.elasticsheep.com * * This code is licensed under the GPL. * * ***************************************************************** * * This driver emulates the LM4549 codec. * * It supports only one playback voice and no record voice. */ #include "qemu/osdep.h" #include "hw/hw.h" #include "audio/audio.h" #include "lm4549.h" #if 0 #define LM4549_DEBUG 1 #endif #if 0 #define LM4549_DUMP_DAC_INPUT 1 #endif #ifdef LM4549_DEBUG #define DPRINTF(fmt, ...) \ do { printf("lm4549: " fmt , ## __VA_ARGS__); } while (0) #else #define DPRINTF(fmt, ...) do {} while (0) #endif #if defined(LM4549_DUMP_DAC_INPUT) static FILE *fp_dac_input; #endif /* LM4549 register list */ enum { LM4549_Reset = 0x00, LM4549_Master_Volume = 0x02, LM4549_Line_Out_Volume = 0x04, LM4549_Master_Volume_Mono = 0x06, LM4549_PC_Beep_Volume = 0x0A, LM4549_Phone_Volume = 0x0C, LM4549_Mic_Volume = 0x0E, LM4549_Line_In_Volume = 0x10, LM4549_CD_Volume = 0x12, LM4549_Video_Volume = 0x14, LM4549_Aux_Volume = 0x16, LM4549_PCM_Out_Volume = 0x18, LM4549_Record_Select = 0x1A, LM4549_Record_Gain = 0x1C, LM4549_General_Purpose = 0x20, LM4549_3D_Control = 0x22, LM4549_Powerdown_Ctrl_Stat = 0x26, LM4549_Ext_Audio_ID = 0x28, LM4549_Ext_Audio_Stat_Ctrl = 0x2A, LM4549_PCM_Front_DAC_Rate = 0x2C, LM4549_PCM_ADC_Rate = 0x32, LM4549_Vendor_ID1 = 0x7C, LM4549_Vendor_ID2 = 0x7E }; static void lm4549_reset(lm4549_state *s) { uint16_t *regfile = s->regfile; regfile[LM4549_Reset] = 0x0d50; regfile[LM4549_Master_Volume] = 0x8008; regfile[LM4549_Line_Out_Volume] = 0x8000; regfile[LM4549_Master_Volume_Mono] = 0x8000; regfile[LM4549_PC_Beep_Volume] = 0x0000; regfile[LM4549_Phone_Volume] = 0x8008; regfile[LM4549_Mic_Volume] = 0x8008; regfile[LM4549_Line_In_Volume] = 0x8808; regfile[LM4549_CD_Volume] = 0x8808; regfile[LM4549_Video_Volume] = 0x8808; regfile[LM4549_Aux_Volume] = 0x8808; regfile[LM4549_PCM_Out_Volume] = 0x8808; regfile[LM4549_Record_Select] = 0x0000; regfile[LM4549_Record_Gain] = 0x8000; regfile[LM4549_General_Purpose] = 0x0000; regfile[LM4549_3D_Control] = 0x0101; regfile[LM4549_Powerdown_Ctrl_Stat] = 0x000f; regfile[LM4549_Ext_Audio_ID] = 0x0001; regfile[LM4549_Ext_Audio_Stat_Ctrl] = 0x0000; regfile[LM4549_PCM_Front_DAC_Rate] = 0xbb80; regfile[LM4549_PCM_ADC_Rate] = 0xbb80; regfile[LM4549_Vendor_ID1] = 0x4e53; regfile[LM4549_Vendor_ID2] = 0x4331; } static void lm4549_audio_transfer(lm4549_state *s) { uint32_t written_bytes, written_samples; uint32_t i; /* Activate the voice */ AUD_set_active_out(s->voice, 1); s->voice_is_active = 1; /* Try to write the buffer content */ written_bytes = AUD_write(s->voice, s->buffer, s->buffer_level * sizeof(uint16_t)); written_samples = written_bytes >> 1; #if defined(LM4549_DUMP_DAC_INPUT) fwrite(s->buffer, sizeof(uint8_t), written_bytes, fp_dac_input); #endif s->buffer_level -= written_samples; if (s->buffer_level > 0) { /* Move the data back to the start of the buffer */ for (i = 0; i < s->buffer_level; i++) { s->buffer[i] = s->buffer[i + written_samples]; } } } static void lm4549_audio_out_callback(void *opaque, int free) { lm4549_state *s = (lm4549_state *)opaque; static uint32_t prev_buffer_level; #ifdef LM4549_DEBUG int size = AUD_get_buffer_size_out(s->voice); DPRINTF("audio_out_callback size = %i free = %i\n", size, free); #endif /* Detect that no data are consumed => disable the voice */ if (s->buffer_level == prev_buffer_level) { AUD_set_active_out(s->voice, 0); s->voice_is_active = 0; } prev_buffer_level = s->buffer_level; /* Check if a buffer transfer is pending */ if (s->buffer_level == LM4549_BUFFER_SIZE) { lm4549_audio_transfer(s); /* Request more data */ if (s->data_req_cb != NULL) { (s->data_req_cb)(s->opaque); } } } uint32_t lm4549_read(lm4549_state *s, hwaddr offset) { uint16_t *regfile = s->regfile; uint32_t value = 0; /* Read the stored value */ assert(offset < 128); value = regfile[offset]; DPRINTF("read [0x%02x] = 0x%04x\n", offset, value); return value; } void lm4549_write(lm4549_state *s, hwaddr offset, uint32_t value) { uint16_t *regfile = s->regfile; assert(offset < 128); DPRINTF("write [0x%02x] = 0x%04x\n", offset, value); switch (offset) { case LM4549_Reset: lm4549_reset(s); break; case LM4549_PCM_Front_DAC_Rate: regfile[LM4549_PCM_Front_DAC_Rate] = value; DPRINTF("DAC rate change = %i\n", value); /* Re-open a voice with the new sample rate */ struct audsettings as; as.freq = value; as.nchannels = 2; as.fmt = AUDIO_FORMAT_S16; as.endianness = 0; s->voice = AUD_open_out( &s->card, s->voice, "lm4549.out", s, lm4549_audio_out_callback, &as ); break; case LM4549_Powerdown_Ctrl_Stat: value &= ~0xf; value |= regfile[LM4549_Powerdown_Ctrl_Stat] & 0xf; regfile[LM4549_Powerdown_Ctrl_Stat] = value; break; case LM4549_Ext_Audio_ID: case LM4549_Vendor_ID1: case LM4549_Vendor_ID2: DPRINTF("Write to read-only register 0x%x\n", (int)offset); break; default: /* Store the new value */ regfile[offset] = value; break; } } uint32_t lm4549_write_samples(lm4549_state *s, uint32_t left, uint32_t right) { /* The left and right samples are in 20-bit resolution. The LM4549 has 18-bit resolution and only uses the bits [19:2]. This model supports 16-bit playback. */ if (s->buffer_level > LM4549_BUFFER_SIZE - 2) { DPRINTF("write_sample Buffer full\n"); return 0; } /* Store 16-bit samples in the buffer */ s->buffer[s->buffer_level++] = (left >> 4); s->buffer[s->buffer_level++] = (right >> 4); if (s->buffer_level == LM4549_BUFFER_SIZE) { /* Trigger the transfer of the buffer to the audio host */ lm4549_audio_transfer(s); } return 1; } static int lm4549_post_load(void *opaque, int version_id) { lm4549_state *s = (lm4549_state *)opaque; uint16_t *regfile = s->regfile; /* Re-open a voice with the current sample rate */ uint32_t freq = regfile[LM4549_PCM_Front_DAC_Rate]; DPRINTF("post_load freq = %i\n", freq); DPRINTF("post_load voice_is_active = %i\n", s->voice_is_active); struct audsettings as; as.freq = freq; as.nchannels = 2; as.fmt = AUDIO_FORMAT_S16; as.endianness = 0; s->voice = AUD_open_out( &s->card, s->voice, "lm4549.out", s, lm4549_audio_out_callback, &as ); /* Request data */ if (s->voice_is_active == 1) { lm4549_audio_out_callback(s, AUD_get_buffer_size_out(s->voice)); } return 0; } void lm4549_init(lm4549_state *s, lm4549_callback data_req_cb, void* opaque) { struct audsettings as; /* Store the callback and opaque pointer */ s->data_req_cb = data_req_cb; s->opaque = opaque; /* Init the registers */ lm4549_reset(s); /* Register an audio card */ AUD_register_card("lm4549", &s->card); /* Open a default voice */ as.freq = 48000; as.nchannels = 2; as.fmt = AUDIO_FORMAT_S16; as.endianness = 0; s->voice = AUD_open_out( &s->card, s->voice, "lm4549.out", s, lm4549_audio_out_callback, &as ); AUD_set_volume_out(s->voice, 0, 255, 255); s->voice_is_active = 0; /* Reset the input buffer */ memset(s->buffer, 0x00, sizeof(s->buffer)); s->buffer_level = 0; #if defined(LM4549_DUMP_DAC_INPUT) fp_dac_input = fopen("lm4549_dac_input.pcm", "wb"); if (!fp_dac_input) { hw_error("Unable to open lm4549_dac_input.pcm for writing\n"); } #endif } const VMStateDescription vmstate_lm4549_state = { .name = "lm4549_state", .version_id = 1, .minimum_version_id = 1, .post_load = lm4549_post_load, .fields = (VMStateField[]) { VMSTATE_UINT32(voice_is_active, lm4549_state), VMSTATE_UINT16_ARRAY(regfile, lm4549_state, 128), VMSTATE_UINT16_ARRAY(buffer, lm4549_state, LM4549_BUFFER_SIZE), VMSTATE_UINT32(buffer_level, lm4549_state), VMSTATE_END_OF_LIST() } };
pmp-tool/PMP
src/qemu/src-pmp/tests/test-filter-mirror.c
/* * QTest testcase for filter-mirror * * Copyright (c) 2016 FUJITSU LIMITED * Author: <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU GPL, version 2 or * later. See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "libqtest.h" #include "qapi/qmp/qdict.h" #include "qemu/iov.h" #include "qemu/sockets.h" #include "qemu/error-report.h" #include "qemu/main-loop.h" /* TODO actually test the results and get rid of this */ #define qmp_discard_response(qs, ...) qobject_unref(qtest_qmp(qs, __VA_ARGS__)) static void test_mirror(void) { int send_sock[2], recv_sock[2]; uint32_t ret = 0, len = 0; char send_buf[] = "Hello! filter-mirror~"; char *recv_buf; uint32_t size = sizeof(send_buf); size = htonl(size); const char *devstr = "e1000"; QTestState *qts; if (g_str_equal(qtest_get_arch(), "s390x")) { devstr = "virtio-net-ccw"; } ret = socketpair(PF_UNIX, SOCK_STREAM, 0, send_sock); g_assert_cmpint(ret, !=, -1); ret = socketpair(PF_UNIX, SOCK_STREAM, 0, recv_sock); g_assert_cmpint(ret, !=, -1); qts = qtest_initf( "-netdev socket,id=qtest-bn0,fd=%d " "-device %s,netdev=qtest-bn0,id=qtest-e0 " "-chardev socket,id=mirror0,fd=%d " "-object filter-mirror,id=qtest-f0,netdev=qtest-bn0,queue=tx,outdev=mirror0 " , send_sock[1], devstr, recv_sock[1]); struct iovec iov[] = { { .iov_base = &size, .iov_len = sizeof(size), }, { .iov_base = send_buf, .iov_len = sizeof(send_buf), }, }; /* send a qmp command to guarantee that 'connected' is setting to true. */ qmp_discard_response(qts, "{ 'execute' : 'query-status'}"); ret = iov_send(send_sock[0], iov, 2, 0, sizeof(size) + sizeof(send_buf)); g_assert_cmpint(ret, ==, sizeof(send_buf) + sizeof(size)); close(send_sock[0]); ret = qemu_recv(recv_sock[0], &len, sizeof(len), 0); g_assert_cmpint(ret, ==, sizeof(len)); len = ntohl(len); g_assert_cmpint(len, ==, sizeof(send_buf)); recv_buf = g_malloc(len); ret = qemu_recv(recv_sock[0], recv_buf, len, 0); g_assert_cmpstr(recv_buf, ==, send_buf); g_free(recv_buf); close(send_sock[0]); close(send_sock[1]); close(recv_sock[0]); close(recv_sock[1]); qtest_quit(qts); } int main(int argc, char **argv) { int ret; g_test_init(&argc, &argv, NULL); qtest_add_func("/netfilter/mirror", test_mirror); ret = g_test_run(); return ret; }
pmp-tool/PMP
src/qemu/src-pmp/util/qemu-coroutine-sleep.c
<gh_stars>1-10 /* * QEMU coroutine sleep * * Copyright IBM, Corp. 2011 * * Authors: * <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU LGPL, version 2 or later. * See the COPYING.LIB file in the top-level directory. * */ #include "qemu/osdep.h" #include "qemu/coroutine.h" #include "qemu/coroutine_int.h" #include "qemu/timer.h" #include "block/aio.h" static void co_sleep_cb(void *opaque) { Coroutine *co = opaque; /* Write of schedule protected by barrier write in aio_co_schedule */ atomic_set(&co->scheduled, NULL); aio_co_wake(co); } void coroutine_fn qemu_co_sleep_ns(QEMUClockType type, int64_t ns) { AioContext *ctx = qemu_get_current_aio_context(); QEMUTimer *ts; Coroutine *co = qemu_coroutine_self(); const char *scheduled = atomic_cmpxchg(&co->scheduled, NULL, __func__); if (scheduled) { fprintf(stderr, "%s: Co-routine was already scheduled in '%s'\n", __func__, scheduled); abort(); } ts = aio_timer_new(ctx, type, SCALE_NS, co_sleep_cb, co); timer_mod(ts, qemu_clock_get_ns(type) + ns); qemu_coroutine_yield(); timer_del(ts); timer_free(ts); }
pmp-tool/PMP
src/qemu/src-pmp/tests/migration-test.c
/* * QTest testcase for migration * * Copyright (c) 2016-2018 Red Hat, Inc. and/or its affiliates * based on the vhost-user-test.c that is: * Copyright (c) 2014 Virtual Open Systems Sarl. * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #include "qemu/osdep.h" #include "libqtest.h" #include "qapi/qmp/qdict.h" #include "qapi/qmp/qjson.h" #include "qemu/option.h" #include "qemu/range.h" #include "qemu/sockets.h" #include "chardev/char.h" #include "sysemu/sysemu.h" #include "qapi/qapi-visit-sockets.h" #include "qapi/qobject-input-visitor.h" #include "qapi/qobject-output-visitor.h" #include "migration/migration-test.h" /* TODO actually test the results and get rid of this */ #define qtest_qmp_discard_response(...) qobject_unref(qtest_qmp(__VA_ARGS__)) unsigned start_address; unsigned end_address; bool got_stop; static bool uffd_feature_thread_id; #if defined(__linux__) #include <sys/syscall.h> #include <sys/vfs.h> #endif #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) #include <sys/eventfd.h> #include <sys/ioctl.h> #include <linux/userfaultfd.h> static bool ufd_version_check(void) { struct uffdio_api api_struct; uint64_t ioctl_mask; int ufd = syscall(__NR_userfaultfd, O_CLOEXEC); if (ufd == -1) { g_test_message("Skipping test: userfaultfd not available"); return false; } api_struct.api = UFFD_API; api_struct.features = 0; if (ioctl(ufd, UFFDIO_API, &api_struct)) { g_test_message("Skipping test: UFFDIO_API failed"); return false; } uffd_feature_thread_id = api_struct.features & UFFD_FEATURE_THREAD_ID; ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | (__u64)1 << _UFFDIO_UNREGISTER; if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { g_test_message("Skipping test: Missing userfault feature"); return false; } return true; } #else static bool ufd_version_check(void) { g_test_message("Skipping test: Userfault not available (builtdtime)"); return false; } #endif static const char *tmpfs; /* The boot file modifies memory area in [start_address, end_address) * repeatedly. It outputs a 'B' at a fixed rate while it's still running. */ #include "tests/migration/i386/a-b-bootblock.h" #include "tests/migration/aarch64/a-b-kernel.h" static void init_bootfile(const char *bootpath, void *content) { FILE *bootfile = fopen(bootpath, "wb"); g_assert_cmpint(fwrite(content, 512, 1, bootfile), ==, 1); fclose(bootfile); } #include "tests/migration/s390x/a-b-bios.h" static void init_bootfile_s390x(const char *bootpath) { FILE *bootfile = fopen(bootpath, "wb"); size_t len = sizeof(s390x_elf); g_assert_cmpint(fwrite(s390x_elf, len, 1, bootfile), ==, 1); fclose(bootfile); } /* * Wait for some output in the serial output file, * we get an 'A' followed by an endless string of 'B's * but on the destination we won't have the A. */ static void wait_for_serial(const char *side) { char *serialpath = g_strdup_printf("%s/%s", tmpfs, side); FILE *serialfile = fopen(serialpath, "r"); const char *arch = qtest_get_arch(); int started = (strcmp(side, "src_serial") == 0 && strcmp(arch, "ppc64") == 0) ? 0 : 1; g_free(serialpath); do { int readvalue = fgetc(serialfile); if (!started) { /* SLOF prints its banner before starting test, * to ignore it, mark the start of the test with '_', * ignore all characters until this marker */ switch (readvalue) { case '_': started = 1; break; case EOF: fseek(serialfile, 0, SEEK_SET); usleep(1000); break; } continue; } switch (readvalue) { case 'A': /* Fine */ break; case 'B': /* It's alive! */ fclose(serialfile); return; case EOF: started = (strcmp(side, "src_serial") == 0 && strcmp(arch, "ppc64") == 0) ? 0 : 1; fseek(serialfile, 0, SEEK_SET); usleep(1000); break; default: fprintf(stderr, "Unexpected %d on %s serial\n", readvalue, side); g_assert_not_reached(); } } while (true); } static void stop_cb(void *opaque, const char *name, QDict *data) { if (!strcmp(name, "STOP")) { got_stop = true; } } /* * Events can get in the way of responses we are actually waiting for. */ GCC_FMT_ATTR(2, 3) static QDict *wait_command(QTestState *who, const char *command, ...) { va_list ap; va_start(ap, command); qtest_qmp_vsend(who, command, ap); va_end(ap); return qtest_qmp_receive_success(who, stop_cb, NULL); } /* * Note: caller is responsible to free the returned object via * qobject_unref() after use */ static QDict *migrate_query(QTestState *who) { return wait_command(who, "{ 'execute': 'query-migrate' }"); } /* * Note: caller is responsible to free the returned object via * g_free() after use */ static gchar *migrate_query_status(QTestState *who) { QDict *rsp_return = migrate_query(who); gchar *status = g_strdup(qdict_get_str(rsp_return, "status")); g_assert(status); qobject_unref(rsp_return); return status; } /* * It's tricky to use qemu's migration event capability with qtest, * events suddenly appearing confuse the qmp()/hmp() responses. */ static int64_t read_ram_property_int(QTestState *who, const char *property) { QDict *rsp_return, *rsp_ram; int64_t result; rsp_return = migrate_query(who); if (!qdict_haskey(rsp_return, "ram")) { /* Still in setup */ result = 0; } else { rsp_ram = qdict_get_qdict(rsp_return, "ram"); result = qdict_get_try_int(rsp_ram, property, 0); } qobject_unref(rsp_return); return result; } static uint64_t get_migration_pass(QTestState *who) { return read_ram_property_int(who, "dirty-sync-count"); } static void read_blocktime(QTestState *who) { QDict *rsp_return; rsp_return = migrate_query(who); g_assert(qdict_haskey(rsp_return, "postcopy-blocktime")); qobject_unref(rsp_return); } static void wait_for_migration_status(QTestState *who, const char *goal) { while (true) { bool completed; char *status; status = migrate_query_status(who); completed = strcmp(status, goal) == 0; g_assert_cmpstr(status, !=, "failed"); g_free(status); if (completed) { return; } usleep(1000); } } static void wait_for_migration_complete(QTestState *who) { wait_for_migration_status(who, "completed"); } static void wait_for_migration_pass(QTestState *who) { uint64_t initial_pass = get_migration_pass(who); uint64_t pass; /* Wait for the 1st sync */ while (!got_stop && !initial_pass) { usleep(1000); initial_pass = get_migration_pass(who); } do { usleep(1000); pass = get_migration_pass(who); } while (pass == initial_pass && !got_stop); } static void check_guests_ram(QTestState *who) { /* Our ASM test will have been incrementing one byte from each page from * start_address to < end_address in order. This gives us a constraint * that any page's byte should be equal or less than the previous pages * byte (mod 256); and they should all be equal except for one transition * at the point where we meet the incrementer. (We're running this with * the guest stopped). */ unsigned address; uint8_t first_byte; uint8_t last_byte; bool hit_edge = false; bool bad = false; qtest_memread(who, start_address, &first_byte, 1); last_byte = first_byte; for (address = start_address + TEST_MEM_PAGE_SIZE; address < end_address; address += TEST_MEM_PAGE_SIZE) { uint8_t b; qtest_memread(who, address, &b, 1); if (b != last_byte) { if (((b + 1) % 256) == last_byte && !hit_edge) { /* This is OK, the guest stopped at the point of * incrementing the previous page but didn't get * to us yet. */ hit_edge = true; last_byte = b; } else { fprintf(stderr, "Memory content inconsistency at %x" " first_byte = %x last_byte = %x current = %x" " hit_edge = %x\n", address, first_byte, last_byte, b, hit_edge); bad = true; } } } g_assert_false(bad); } static void cleanup(const char *filename) { char *path = g_strdup_printf("%s/%s", tmpfs, filename); unlink(path); g_free(path); } static char *get_shmem_opts(const char *mem_size, const char *shmem_path) { return g_strdup_printf("-object memory-backend-file,id=mem0,size=%s" ",mem-path=%s,share=on -numa node,memdev=mem0", mem_size, shmem_path); } static char *SocketAddress_to_str(SocketAddress *addr) { switch (addr->type) { case SOCKET_ADDRESS_TYPE_INET: return g_strdup_printf("tcp:%s:%s", addr->u.inet.host, addr->u.inet.port); case SOCKET_ADDRESS_TYPE_UNIX: return g_strdup_printf("unix:%s", addr->u.q_unix.path); case SOCKET_ADDRESS_TYPE_FD: return g_strdup_printf("fd:%s", addr->u.fd.str); case SOCKET_ADDRESS_TYPE_VSOCK: return g_strdup_printf("tcp:%s:%s", addr->u.vsock.cid, addr->u.vsock.port); default: return g_strdup("unknown address type"); } } static char *migrate_get_socket_address(QTestState *who, const char *parameter) { QDict *rsp; char *result; Error *local_err = NULL; SocketAddressList *addrs; Visitor *iv = NULL; QObject *object; rsp = migrate_query(who); object = qdict_get(rsp, parameter); iv = qobject_input_visitor_new(object); visit_type_SocketAddressList(iv, NULL, &addrs, &local_err); visit_free(iv); /* we are only using a single address */ result = SocketAddress_to_str(addrs->value); qapi_free_SocketAddressList(addrs); qobject_unref(rsp); return result; } static long long migrate_get_parameter(QTestState *who, const char *parameter) { QDict *rsp; long long result; rsp = wait_command(who, "{ 'execute': 'query-migrate-parameters' }"); result = qdict_get_int(rsp, parameter); qobject_unref(rsp); return result; } static void migrate_check_parameter(QTestState *who, const char *parameter, long long value) { long long result; result = migrate_get_parameter(who, parameter); g_assert_cmpint(result, ==, value); } static void migrate_set_parameter(QTestState *who, const char *parameter, long long value) { QDict *rsp; rsp = qtest_qmp(who, "{ 'execute': 'migrate-set-parameters'," "'arguments': { %s: %lld } }", parameter, value); g_assert(qdict_haskey(rsp, "return")); qobject_unref(rsp); migrate_check_parameter(who, parameter, value); } static void migrate_pause(QTestState *who) { QDict *rsp; rsp = wait_command(who, "{ 'execute': 'migrate-pause' }"); qobject_unref(rsp); } static void migrate_recover(QTestState *who, const char *uri) { QDict *rsp; rsp = wait_command(who, "{ 'execute': 'migrate-recover', " " 'id': 'recover-cmd', " " 'arguments': { 'uri': %s } }", uri); qobject_unref(rsp); } static void migrate_set_capability(QTestState *who, const char *capability, bool value) { QDict *rsp; rsp = qtest_qmp(who, "{ 'execute': 'migrate-set-capabilities'," "'arguments': { " "'capabilities': [ { " "'capability': %s, 'state': %i } ] } }", capability, value); g_assert(qdict_haskey(rsp, "return")); qobject_unref(rsp); } /* * Send QMP command "migrate". * Arguments are built from @fmt... (formatted like * qobject_from_jsonf_nofail()) with "uri": @uri spliced in. */ GCC_FMT_ATTR(3, 4) static void migrate(QTestState *who, const char *uri, const char *fmt, ...) { va_list ap; QDict *args, *rsp; va_start(ap, fmt); args = qdict_from_vjsonf_nofail(fmt, ap); va_end(ap); g_assert(!qdict_haskey(args, "uri")); qdict_put_str(args, "uri", uri); rsp = qmp("{ 'execute': 'migrate', 'arguments': %p}", args); g_assert(qdict_haskey(rsp, "return")); qobject_unref(rsp); } static void migrate_postcopy_start(QTestState *from, QTestState *to) { QDict *rsp; rsp = wait_command(from, "{ 'execute': 'migrate-start-postcopy' }"); qobject_unref(rsp); if (!got_stop) { qtest_qmp_eventwait(from, "STOP"); } qtest_qmp_eventwait(to, "RESUME"); } static int test_migrate_start(QTestState **from, QTestState **to, const char *uri, bool hide_stderr, bool use_shmem) { gchar *cmd_src, *cmd_dst; char *bootpath = NULL; char *extra_opts = NULL; char *shmem_path = NULL; const char *arch = qtest_get_arch(); const char *accel = "kvm:tcg"; if (use_shmem) { if (!g_file_test("/dev/shm", G_FILE_TEST_IS_DIR)) { g_test_skip("/dev/shm is not supported"); return -1; } shmem_path = g_strdup_printf("/dev/shm/qemu-%d", getpid()); } got_stop = false; bootpath = g_strdup_printf("%s/bootsect", tmpfs); if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { init_bootfile(bootpath, x86_bootsect); extra_opts = use_shmem ? get_shmem_opts("150M", shmem_path) : NULL; cmd_src = g_strdup_printf("-machine accel=%s -m 150M" " -name source,debug-threads=on" " -serial file:%s/src_serial" " -drive file=%s,format=raw %s", accel, tmpfs, bootpath, extra_opts ? extra_opts : ""); cmd_dst = g_strdup_printf("-machine accel=%s -m 150M" " -name target,debug-threads=on" " -serial file:%s/dest_serial" " -drive file=%s,format=raw" " -incoming %s %s", accel, tmpfs, bootpath, uri, extra_opts ? extra_opts : ""); start_address = X86_TEST_MEM_START; end_address = X86_TEST_MEM_END; } else if (g_str_equal(arch, "s390x")) { init_bootfile_s390x(bootpath); extra_opts = use_shmem ? get_shmem_opts("128M", shmem_path) : NULL; cmd_src = g_strdup_printf("-machine accel=%s -m 128M" " -name source,debug-threads=on" " -serial file:%s/src_serial -bios %s %s", accel, tmpfs, bootpath, extra_opts ? extra_opts : ""); cmd_dst = g_strdup_printf("-machine accel=%s -m 128M" " -name target,debug-threads=on" " -serial file:%s/dest_serial -bios %s" " -incoming %s %s", accel, tmpfs, bootpath, uri, extra_opts ? extra_opts : ""); start_address = S390_TEST_MEM_START; end_address = S390_TEST_MEM_END; } else if (strcmp(arch, "ppc64") == 0) { extra_opts = use_shmem ? get_shmem_opts("256M", shmem_path) : NULL; cmd_src = g_strdup_printf("-machine accel=%s -m 256M -nodefaults" " -name source,debug-threads=on" " -serial file:%s/src_serial" " -prom-env 'use-nvramrc?=true' -prom-env " "'nvramrc=hex .\" _\" begin %x %x " "do i c@ 1 + i c! 1000 +loop .\" B\" 0 " "until' %s", accel, tmpfs, end_address, start_address, extra_opts ? extra_opts : ""); cmd_dst = g_strdup_printf("-machine accel=%s -m 256M" " -name target,debug-threads=on" " -serial file:%s/dest_serial" " -incoming %s %s", accel, tmpfs, uri, extra_opts ? extra_opts : ""); start_address = PPC_TEST_MEM_START; end_address = PPC_TEST_MEM_END; } else if (strcmp(arch, "aarch64") == 0) { init_bootfile(bootpath, aarch64_kernel); extra_opts = use_shmem ? get_shmem_opts("150M", shmem_path) : NULL; cmd_src = g_strdup_printf("-machine virt,accel=%s,gic-version=max " "-name vmsource,debug-threads=on -cpu max " "-m 150M -serial file:%s/src_serial " "-kernel %s %s", accel, tmpfs, bootpath, extra_opts ? extra_opts : ""); cmd_dst = g_strdup_printf("-machine virt,accel=%s,gic-version=max " "-name vmdest,debug-threads=on -cpu max " "-m 150M -serial file:%s/dest_serial " "-kernel %s " "-incoming %s %s", accel, tmpfs, bootpath, uri, extra_opts ? extra_opts : ""); start_address = ARM_TEST_MEM_START; end_address = ARM_TEST_MEM_END; g_assert(sizeof(aarch64_kernel) <= ARM_TEST_MAX_KERNEL_SIZE); } else { g_assert_not_reached(); } g_free(bootpath); g_free(extra_opts); if (hide_stderr) { gchar *tmp; tmp = g_strdup_printf("%s 2>/dev/null", cmd_src); g_free(cmd_src); cmd_src = tmp; tmp = g_strdup_printf("%s 2>/dev/null", cmd_dst); g_free(cmd_dst); cmd_dst = tmp; } *from = qtest_start(cmd_src); g_free(cmd_src); *to = qtest_init(cmd_dst); g_free(cmd_dst); /* * Remove shmem file immediately to avoid memory leak in test failed case. * It's valid becase QEMU has already opened this file */ if (use_shmem) { unlink(shmem_path); g_free(shmem_path); } return 0; } static void test_migrate_end(QTestState *from, QTestState *to, bool test_dest) { unsigned char dest_byte_a, dest_byte_b, dest_byte_c, dest_byte_d; qtest_quit(from); if (test_dest) { qtest_memread(to, start_address, &dest_byte_a, 1); /* Destination still running, wait for a byte to change */ do { qtest_memread(to, start_address, &dest_byte_b, 1); usleep(1000 * 10); } while (dest_byte_a == dest_byte_b); qtest_qmp_discard_response(to, "{ 'execute' : 'stop'}"); /* With it stopped, check nothing changes */ qtest_memread(to, start_address, &dest_byte_c, 1); usleep(1000 * 200); qtest_memread(to, start_address, &dest_byte_d, 1); g_assert_cmpint(dest_byte_c, ==, dest_byte_d); check_guests_ram(to); } qtest_quit(to); cleanup("bootsect"); cleanup("migsocket"); cleanup("src_serial"); cleanup("dest_serial"); } static void deprecated_set_downtime(QTestState *who, const double value) { QDict *rsp; rsp = qtest_qmp(who, "{ 'execute': 'migrate_set_downtime'," " 'arguments': { 'value': %f } }", value); g_assert(qdict_haskey(rsp, "return")); qobject_unref(rsp); migrate_check_parameter(who, "downtime-limit", value * 1000); } static void deprecated_set_speed(QTestState *who, long long value) { QDict *rsp; rsp = qtest_qmp(who, "{ 'execute': 'migrate_set_speed'," "'arguments': { 'value': %lld } }", value); g_assert(qdict_haskey(rsp, "return")); qobject_unref(rsp); migrate_check_parameter(who, "max-bandwidth", value); } static void deprecated_set_cache_size(QTestState *who, long long value) { QDict *rsp; rsp = qtest_qmp(who, "{ 'execute': 'migrate-set-cache-size'," "'arguments': { 'value': %lld } }", value); g_assert(qdict_haskey(rsp, "return")); qobject_unref(rsp); migrate_check_parameter(who, "xbzrle-cache-size", value); } static void test_deprecated(void) { QTestState *from; from = qtest_start("-machine none"); deprecated_set_downtime(from, 0.12345); deprecated_set_speed(from, 12345); deprecated_set_cache_size(from, 4096); qtest_quit(from); } static int migrate_postcopy_prepare(QTestState **from_ptr, QTestState **to_ptr, bool hide_error) { char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); QTestState *from, *to; if (test_migrate_start(&from, &to, uri, hide_error, false)) { return -1; } migrate_set_capability(from, "postcopy-ram", true); migrate_set_capability(to, "postcopy-ram", true); migrate_set_capability(to, "postcopy-blocktime", true); /* We want to pick a speed slow enough that the test completes * quickly, but that it doesn't complete precopy even on a slow * machine, so also set the downtime. */ migrate_set_parameter(from, "max-bandwidth", 100000000); migrate_set_parameter(from, "downtime-limit", 1); /* Wait for the first serial output from the source */ wait_for_serial("src_serial"); migrate(from, uri, "{}"); g_free(uri); wait_for_migration_pass(from); *from_ptr = from; *to_ptr = to; return 0; } static void migrate_postcopy_complete(QTestState *from, QTestState *to) { wait_for_migration_complete(from); /* Make sure we get at least one "B" on destination */ wait_for_serial("dest_serial"); if (uffd_feature_thread_id) { read_blocktime(to); } test_migrate_end(from, to, true); } static void test_postcopy(void) { QTestState *from, *to; if (migrate_postcopy_prepare(&from, &to, false)) { return; } migrate_postcopy_start(from, to); migrate_postcopy_complete(from, to); } static void test_postcopy_recovery(void) { QTestState *from, *to; char *uri; if (migrate_postcopy_prepare(&from, &to, true)) { return; } /* Turn postcopy speed down, 4K/s is slow enough on any machines */ migrate_set_parameter(from, "max-postcopy-bandwidth", 4096); /* Now we start the postcopy */ migrate_postcopy_start(from, to); /* * Wait until postcopy is really started; we can only run the * migrate-pause command during a postcopy */ wait_for_migration_status(from, "postcopy-active"); /* * Manually stop the postcopy migration. This emulates a network * failure with the migration socket */ migrate_pause(from); /* * Wait for destination side to reach postcopy-paused state. The * migrate-recover command can only succeed if destination machine * is in the paused state */ wait_for_migration_status(to, "postcopy-paused"); /* * Create a new socket to emulate a new channel that is different * from the broken migration channel; tell the destination to * listen to the new port */ uri = g_strdup_printf("unix:%s/migsocket-recover", tmpfs); migrate_recover(to, uri); /* * Try to rebuild the migration channel using the resume flag and * the newly created channel */ wait_for_migration_status(from, "postcopy-paused"); migrate(from, uri, "{'resume': true}"); g_free(uri); /* Restore the postcopy bandwidth to unlimited */ migrate_set_parameter(from, "max-postcopy-bandwidth", 0); migrate_postcopy_complete(from, to); } static void test_baddest(void) { QTestState *from, *to; QDict *rsp_return; char *status; bool failed; if (test_migrate_start(&from, &to, "tcp:0:0", true, false)) { return; } migrate(from, "tcp:0:0", "{}"); do { status = migrate_query_status(from); g_assert(!strcmp(status, "setup") || !(strcmp(status, "failed"))); failed = !strcmp(status, "failed"); g_free(status); } while (!failed); /* Is the machine currently running? */ rsp_return = wait_command(from, "{ 'execute': 'query-status' }"); g_assert(qdict_haskey(rsp_return, "running")); g_assert(qdict_get_bool(rsp_return, "running")); qobject_unref(rsp_return); test_migrate_end(from, to, false); } static void test_precopy_unix(void) { char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); QTestState *from, *to; if (test_migrate_start(&from, &to, uri, false, false)) { return; } /* We want to pick a speed slow enough that the test completes * quickly, but that it doesn't complete precopy even on a slow * machine, so also set the downtime. */ /* 1 ms should make it not converge*/ migrate_set_parameter(from, "downtime-limit", 1); /* 1GB/s */ migrate_set_parameter(from, "max-bandwidth", 1000000000); /* Wait for the first serial output from the source */ wait_for_serial("src_serial"); migrate(from, uri, "{}"); wait_for_migration_pass(from); /* 300 ms should converge */ migrate_set_parameter(from, "downtime-limit", 300); if (!got_stop) { qtest_qmp_eventwait(from, "STOP"); } qtest_qmp_eventwait(to, "RESUME"); wait_for_serial("dest_serial"); wait_for_migration_complete(from); test_migrate_end(from, to, true); g_free(uri); } #if 0 /* Currently upset on aarch64 TCG */ static void test_ignore_shared(void) { char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); QTestState *from, *to; if (test_migrate_start(&from, &to, uri, false, true)) { return; } migrate_set_capability(from, "x-ignore-shared", true); migrate_set_capability(to, "x-ignore-shared", true); /* Wait for the first serial output from the source */ wait_for_serial("src_serial"); migrate(from, uri, "{}"); wait_for_migration_pass(from); if (!got_stop) { qtest_qmp_eventwait(from, "STOP"); } qtest_qmp_eventwait(to, "RESUME"); wait_for_serial("dest_serial"); wait_for_migration_complete(from); /* Check whether shared RAM has been really skipped */ g_assert_cmpint(read_ram_property_int(from, "transferred"), <, 1024 * 1024); test_migrate_end(from, to, true); g_free(uri); } #endif static void test_xbzrle(const char *uri) { QTestState *from, *to; if (test_migrate_start(&from, &to, uri, false, false)) { return; } /* * We want to pick a speed slow enough that the test completes * quickly, but that it doesn't complete precopy even on a slow * machine, so also set the downtime. */ /* 1 ms should make it not converge*/ migrate_set_parameter(from, "downtime-limit", 1); /* 1GB/s */ migrate_set_parameter(from, "max-bandwidth", 1000000000); migrate_set_parameter(from, "xbzrle-cache-size", 33554432); migrate_set_capability(from, "xbzrle", "true"); migrate_set_capability(to, "xbzrle", "true"); /* Wait for the first serial output from the source */ wait_for_serial("src_serial"); migrate(from, uri, "{}"); wait_for_migration_pass(from); /* 300ms should converge */ migrate_set_parameter(from, "downtime-limit", 300); if (!got_stop) { qtest_qmp_eventwait(from, "STOP"); } qtest_qmp_eventwait(to, "RESUME"); wait_for_serial("dest_serial"); wait_for_migration_complete(from); test_migrate_end(from, to, true); } static void test_xbzrle_unix(void) { char *uri = g_strdup_printf("unix:%s/migsocket", tmpfs); test_xbzrle(uri); g_free(uri); } static void test_precopy_tcp(void) { char *uri; QTestState *from, *to; if (test_migrate_start(&from, &to, "tcp:127.0.0.1:0", false, false)) { return; } /* * We want to pick a speed slow enough that the test completes * quickly, but that it doesn't complete precopy even on a slow * machine, so also set the downtime. */ /* 1 ms should make it not converge*/ migrate_set_parameter(from, "downtime-limit", 1); /* 1GB/s */ migrate_set_parameter(from, "max-bandwidth", 1000000000); /* Wait for the first serial output from the source */ wait_for_serial("src_serial"); uri = migrate_get_socket_address(to, "socket-address"); migrate(from, uri, "{}"); wait_for_migration_pass(from); /* 300ms should converge */ migrate_set_parameter(from, "downtime-limit", 300); if (!got_stop) { qtest_qmp_eventwait(from, "STOP"); } qtest_qmp_eventwait(to, "RESUME"); wait_for_serial("dest_serial"); wait_for_migration_complete(from); test_migrate_end(from, to, true); g_free(uri); } int main(int argc, char **argv) { char template[] = "/tmp/migration-test-XXXXXX"; int ret; g_test_init(&argc, &argv, NULL); if (!ufd_version_check()) { return g_test_run(); } /* * On ppc64, the test only works with kvm-hv, but not with kvm-pr and TCG * is touchy due to race conditions on dirty bits (especially on PPC for * some reason) */ if (g_str_equal(qtest_get_arch(), "ppc64") && access("/sys/module/kvm_hv", F_OK)) { g_test_message("Skipping test: kvm_hv not available"); return g_test_run(); } /* * Similar to ppc64, s390x seems to be touchy with TCG, so disable it * there until the problems are resolved */ if (g_str_equal(qtest_get_arch(), "s390x")) { #if defined(HOST_S390X) if (access("/dev/kvm", R_OK | W_OK)) { g_test_message("Skipping test: kvm not available"); return g_test_run(); } #else g_test_message("Skipping test: Need s390x host to work properly"); return g_test_run(); #endif } tmpfs = mkdtemp(template); if (!tmpfs) { g_test_message("mkdtemp on path (%s): %s", template, strerror(errno)); } g_assert(tmpfs); module_call_init(MODULE_INIT_QOM); qtest_add_func("/migration/postcopy/unix", test_postcopy); qtest_add_func("/migration/postcopy/recovery", test_postcopy_recovery); qtest_add_func("/migration/deprecated", test_deprecated); qtest_add_func("/migration/bad_dest", test_baddest); qtest_add_func("/migration/precopy/unix", test_precopy_unix); qtest_add_func("/migration/precopy/tcp", test_precopy_tcp); /* qtest_add_func("/migration/ignore_shared", test_ignore_shared); */ qtest_add_func("/migration/xbzrle/unix", test_xbzrle_unix); ret = g_test_run(); g_assert_cmpint(ret, ==, 0); ret = rmdir(tmpfs); if (ret != 0) { g_test_message("unable to rmdir: path (%s): %s", tmpfs, strerror(errno)); } return ret; }
pmp-tool/PMP
src/qemu/src-pmp/include/chardev/char-fe.h
#ifndef QEMU_CHAR_FE_H #define QEMU_CHAR_FE_H #include "chardev/char.h" typedef void IOEventHandler(void *opaque, int event); typedef int BackendChangeHandler(void *opaque); /* This is the backend as seen by frontend, the actual backend is * Chardev */ struct CharBackend { Chardev *chr; IOEventHandler *chr_event; IOCanReadHandler *chr_can_read; IOReadHandler *chr_read; BackendChangeHandler *chr_be_change; void *opaque; int tag; int fe_open; }; /** * qemu_chr_fe_init: * * Initializes a front end for the given CharBackend and * Chardev. Call qemu_chr_fe_deinit() to remove the association and * release the driver. * * Returns: false on error. */ bool qemu_chr_fe_init(CharBackend *b, Chardev *s, Error **errp); /** * qemu_chr_fe_deinit: * @b: a CharBackend * @del: if true, delete the chardev backend * * Dissociate the CharBackend from the Chardev. * * Safe to call without associated Chardev. */ void qemu_chr_fe_deinit(CharBackend *b, bool del); /** * qemu_chr_fe_get_driver: * * Returns: the driver associated with a CharBackend or NULL if no * associated Chardev. * Note: avoid this function as the driver should never be accessed directly, * especially by the frontends that support chardevice hotswap. * Consider qemu_chr_fe_backend_connected() to check for driver existence */ Chardev *qemu_chr_fe_get_driver(CharBackend *be); /** * qemu_chr_fe_backend_connected: * * Returns: true if there is a chardevice associated with @be. */ bool qemu_chr_fe_backend_connected(CharBackend *be); /** * qemu_chr_fe_backend_open: * * Returns: true if chardevice associated with @be is open. */ bool qemu_chr_fe_backend_open(CharBackend *be); /** * qemu_chr_fe_set_handlers_full: * @b: a CharBackend * @fd_can_read: callback to get the amount of data the frontend may * receive * @fd_read: callback to receive data from char * @fd_event: event callback * @be_change: backend change callback; passing NULL means hot backend change * is not supported and will not be attempted * @opaque: an opaque pointer for the callbacks * @context: a main loop context or NULL for the default * @set_open: whether to call qemu_chr_fe_set_open() implicitely when * any of the handler is non-NULL * @sync_state: whether to issue event callback with updated state * * Set the front end char handlers. The front end takes the focus if * any of the handler is non-NULL. * * Without associated Chardev, nothing is changed. */ void qemu_chr_fe_set_handlers_full(CharBackend *b, IOCanReadHandler *fd_can_read, IOReadHandler *fd_read, IOEventHandler *fd_event, BackendChangeHandler *be_change, void *opaque, GMainContext *context, bool set_open, bool sync_state); /** * qemu_chr_fe_set_handlers: * * Version of qemu_chr_fe_set_handlers_full() with sync_state = true. */ void qemu_chr_fe_set_handlers(CharBackend *b, IOCanReadHandler *fd_can_read, IOReadHandler *fd_read, IOEventHandler *fd_event, BackendChangeHandler *be_change, void *opaque, GMainContext *context, bool set_open); /** * qemu_chr_fe_take_focus: * * Take the focus (if the front end is muxed). * * Without associated Chardev, nothing is changed. */ void qemu_chr_fe_take_focus(CharBackend *b); /** * qemu_chr_fe_accept_input: * * Notify that the frontend is ready to receive data */ void qemu_chr_fe_accept_input(CharBackend *be); /** * qemu_chr_fe_disconnect: * * Close a fd accepted by character backend. * Without associated Chardev, do nothing. */ void qemu_chr_fe_disconnect(CharBackend *be); /** * qemu_chr_fe_wait_connected: * * Wait for characted backend to be connected, return < 0 on error or * if no associated Chardev. */ int qemu_chr_fe_wait_connected(CharBackend *be, Error **errp); /** * qemu_chr_fe_set_echo: * @echo: true to enable echo, false to disable echo * * Ask the backend to override its normal echo setting. This only really * applies to the stdio backend and is used by the QMP server such that you * can see what you type if you try to type QMP commands. * Without associated Chardev, do nothing. */ void qemu_chr_fe_set_echo(CharBackend *be, bool echo); /** * qemu_chr_fe_set_open: * * Set character frontend open status. This is an indication that the * front end is ready (or not) to begin doing I/O. * Without associated Chardev, do nothing. */ void qemu_chr_fe_set_open(CharBackend *be, int fe_open); /** * qemu_chr_fe_printf: * @fmt: see #printf * * Write to a character backend using a printf style interface. This * function is thread-safe. It does nothing without associated * Chardev. */ void qemu_chr_fe_printf(CharBackend *be, const char *fmt, ...) GCC_FMT_ATTR(2, 3); /** * qemu_chr_fe_add_watch: * @cond: the condition to poll for * @func: the function to call when the condition happens * @user_data: the opaque pointer to pass to @func * * If the backend is connected, create and add a #GSource that fires * when the given condition (typically G_IO_OUT|G_IO_HUP or G_IO_HUP) * is active; return the #GSource's tag. If it is disconnected, * or without associated Chardev, return 0. * * Note that you are responsible to update the front-end sources if * you are switching the main context with qemu_chr_fe_set_handlers(). * * Returns: the source tag */ guint qemu_chr_fe_add_watch(CharBackend *be, GIOCondition cond, GIOFunc func, void *user_data); /** * qemu_chr_fe_write: * @buf: the data * @len: the number of bytes to send * * Write data to a character backend from the front end. This function * will send data from the front end to the back end. This function * is thread-safe. * * Returns: the number of bytes consumed (0 if no associated Chardev) */ int qemu_chr_fe_write(CharBackend *be, const uint8_t *buf, int len); /** * qemu_chr_fe_write_all: * @buf: the data * @len: the number of bytes to send * * Write data to a character backend from the front end. This function will * send data from the front end to the back end. Unlike @qemu_chr_fe_write, * this function will block if the back end cannot consume all of the data * attempted to be written. This function is thread-safe. * * Returns: the number of bytes consumed (0 if no associated Chardev) */ int qemu_chr_fe_write_all(CharBackend *be, const uint8_t *buf, int len); /** * qemu_chr_fe_read_all: * @buf: the data buffer * @len: the number of bytes to read * * Read data to a buffer from the back end. * * Returns: the number of bytes read (0 if no associated Chardev) */ int qemu_chr_fe_read_all(CharBackend *be, uint8_t *buf, int len); /** * qemu_chr_fe_ioctl: * @cmd: see CHR_IOCTL_* * @arg: the data associated with @cmd * * Issue a device specific ioctl to a backend. This function is thread-safe. * * Returns: if @cmd is not supported by the backend or there is no * associated Chardev, -ENOTSUP, otherwise the return * value depends on the semantics of @cmd */ int qemu_chr_fe_ioctl(CharBackend *be, int cmd, void *arg); /** * qemu_chr_fe_get_msgfd: * * For backends capable of fd passing, return the latest file descriptor passed * by a client. * * Returns: -1 if fd passing isn't supported or there is no pending file * descriptor. If a file descriptor is returned, subsequent calls to * this function will return -1 until a client sends a new file * descriptor. */ int qemu_chr_fe_get_msgfd(CharBackend *be); /** * qemu_chr_fe_get_msgfds: * * For backends capable of fd passing, return the number of file received * descriptors and fills the fds array up to num elements * * Returns: -1 if fd passing isn't supported or there are no pending file * descriptors. If file descriptors are returned, subsequent calls to * this function will return -1 until a client sends a new set of file * descriptors. */ int qemu_chr_fe_get_msgfds(CharBackend *be, int *fds, int num); /** * qemu_chr_fe_set_msgfds: * * For backends capable of fd passing, set an array of fds to be passed with * the next send operation. * A subsequent call to this function before calling a write function will * result in overwriting the fd array with the new value without being send. * Upon writing the message the fd array is freed. * * Returns: -1 if fd passing isn't supported or no associated Chardev. */ int qemu_chr_fe_set_msgfds(CharBackend *be, int *fds, int num); #endif /* QEMU_CHAR_FE_H */
pmp-tool/PMP
src/qemu/src-pmp/tests/libqos/arm-sabrelite-machine.c
<filename>src/qemu/src-pmp/tests/libqos/arm-sabrelite-machine.c<gh_stars>1-10 /* * libqos driver framework * * Copyright (c) 2018 <NAME> <<EMAIL>> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2 as published by the Free Software Foundation. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/> */ #include "qemu/osdep.h" #include "libqtest.h" #include "libqos/malloc.h" #include "libqos/qgraph.h" #include "sdhci.h" #define ARM_PAGE_SIZE 4096 #define SABRELITE_RAM_START 0x10000000 #define SABRELITE_RAM_END 0x30000000 typedef struct QSabreliteMachine QSabreliteMachine; struct QSabreliteMachine { QOSGraphObject obj; QGuestAllocator alloc; QSDHCI_MemoryMapped sdhci; }; static void *sabrelite_get_driver(void *object, const char *interface) { QSabreliteMachine *machine = object; if (!g_strcmp0(interface, "memory")) { return &machine->alloc; } fprintf(stderr, "%s not present in arm/sabrelite\n", interface); g_assert_not_reached(); } static QOSGraphObject *sabrelite_get_device(void *obj, const char *device) { QSabreliteMachine *machine = obj; if (!g_strcmp0(device, "generic-sdhci")) { return &machine->sdhci.obj; } fprintf(stderr, "%s not present in arm/sabrelite\n", device); g_assert_not_reached(); } static void sabrelite_destructor(QOSGraphObject *obj) { QSabreliteMachine *machine = (QSabreliteMachine *) obj; alloc_destroy(&machine->alloc); } static void *qos_create_machine_arm_sabrelite(QTestState *qts) { QSabreliteMachine *machine = g_new0(QSabreliteMachine, 1); alloc_init(&machine->alloc, 0, SABRELITE_RAM_START, SABRELITE_RAM_END, ARM_PAGE_SIZE); machine->obj.get_device = sabrelite_get_device; machine->obj.get_driver = sabrelite_get_driver; machine->obj.destructor = sabrelite_destructor; qos_init_sdhci_mm(&machine->sdhci, qts, 0x02190000, &(QSDHCIProperties) { .version = 3, .baseclock = 0, .capab.sdma = true, .capab.reg = 0x057834b4, }); return &machine->obj; } static void sabrelite_register_nodes(void) { qos_node_create_machine("arm/sabrelite", qos_create_machine_arm_sabrelite); qos_node_contains("arm/sabrelite", "generic-sdhci", NULL); } libqos_init(sabrelite_register_nodes);
pmp-tool/PMP
src/qemu/src-pmp/hw/misc/mips_cpc.c
<gh_stars>1-10 /* * Cluster Power Controller emulation * * Copyright (c) 2016 Imagination Technologies * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "qapi/error.h" #include "cpu.h" #include "qemu/log.h" #include "hw/sysbus.h" #include "hw/misc/mips_cpc.h" static inline uint64_t cpc_vp_run_mask(MIPSCPCState *cpc) { return (1ULL << cpc->num_vp) - 1; } static void mips_cpu_reset_async_work(CPUState *cs, run_on_cpu_data data) { MIPSCPCState *cpc = (MIPSCPCState *) data.host_ptr; cpu_reset(cs); cpc->vp_running |= 1ULL << cs->cpu_index; } static void cpc_run_vp(MIPSCPCState *cpc, uint64_t vp_run) { CPUState *cs = first_cpu; CPU_FOREACH(cs) { uint64_t i = 1ULL << cs->cpu_index; if (i & vp_run & ~cpc->vp_running) { /* * To avoid racing with a CPU we are just kicking off. * We do the final bit of preparation for the work in * the target CPUs context. */ async_safe_run_on_cpu(cs, mips_cpu_reset_async_work, RUN_ON_CPU_HOST_PTR(cpc)); } } } static void cpc_stop_vp(MIPSCPCState *cpc, uint64_t vp_stop) { CPUState *cs = first_cpu; CPU_FOREACH(cs) { uint64_t i = 1ULL << cs->cpu_index; if (i & vp_stop & cpc->vp_running) { cpu_interrupt(cs, CPU_INTERRUPT_HALT); cpc->vp_running &= ~i; } } } static void cpc_write(void *opaque, hwaddr offset, uint64_t data, unsigned size) { MIPSCPCState *s = opaque; switch (offset) { case CPC_CL_BASE_OFS + CPC_VP_RUN_OFS: case CPC_CO_BASE_OFS + CPC_VP_RUN_OFS: cpc_run_vp(s, data & cpc_vp_run_mask(s)); break; case CPC_CL_BASE_OFS + CPC_VP_STOP_OFS: case CPC_CO_BASE_OFS + CPC_VP_STOP_OFS: cpc_stop_vp(s, data & cpc_vp_run_mask(s)); break; default: qemu_log_mask(LOG_UNIMP, "%s: Bad offset 0x%x\n", __func__, (int)offset); break; } return; } static uint64_t cpc_read(void *opaque, hwaddr offset, unsigned size) { MIPSCPCState *s = opaque; switch (offset) { case CPC_CL_BASE_OFS + CPC_VP_RUNNING_OFS: case CPC_CO_BASE_OFS + CPC_VP_RUNNING_OFS: return s->vp_running; default: qemu_log_mask(LOG_UNIMP, "%s: Bad offset 0x%x\n", __func__, (int)offset); return 0; } } static const MemoryRegionOps cpc_ops = { .read = cpc_read, .write = cpc_write, .endianness = DEVICE_NATIVE_ENDIAN, .impl = { .max_access_size = 8, }, }; static void mips_cpc_init(Object *obj) { SysBusDevice *sbd = SYS_BUS_DEVICE(obj); MIPSCPCState *s = MIPS_CPC(obj); memory_region_init_io(&s->mr, OBJECT(s), &cpc_ops, s, "mips-cpc", CPC_ADDRSPACE_SZ); sysbus_init_mmio(sbd, &s->mr); } static void mips_cpc_realize(DeviceState *dev, Error **errp) { MIPSCPCState *s = MIPS_CPC(dev); if (s->vp_start_running > cpc_vp_run_mask(s)) { error_setg(errp, "incorrect vp_start_running 0x%" PRIx64 " for num_vp = %d", s->vp_running, s->num_vp); return; } } static void mips_cpc_reset(DeviceState *dev) { MIPSCPCState *s = MIPS_CPC(dev); /* Reflect the fact that all VPs are halted on reset */ s->vp_running = 0; /* Put selected VPs into run state */ cpc_run_vp(s, s->vp_start_running); } static const VMStateDescription vmstate_mips_cpc = { .name = "mips-cpc", .version_id = 0, .minimum_version_id = 0, .fields = (VMStateField[]) { VMSTATE_UINT64(vp_running, MIPSCPCState), VMSTATE_END_OF_LIST() }, }; static Property mips_cpc_properties[] = { DEFINE_PROP_UINT32("num-vp", MIPSCPCState, num_vp, 0x1), DEFINE_PROP_UINT64("vp-start-running", MIPSCPCState, vp_start_running, 0x1), DEFINE_PROP_END_OF_LIST(), }; static void mips_cpc_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = mips_cpc_realize; dc->reset = mips_cpc_reset; dc->vmsd = &vmstate_mips_cpc; dc->props = mips_cpc_properties; } static const TypeInfo mips_cpc_info = { .name = TYPE_MIPS_CPC, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(MIPSCPCState), .instance_init = mips_cpc_init, .class_init = mips_cpc_class_init, }; static void mips_cpc_register_types(void) { type_register_static(&mips_cpc_info); } type_init(mips_cpc_register_types)
pmp-tool/PMP
src/qemu/src-pmp/target/ppc/mmu-radix64.c
/* * PowerPC Radix MMU mulation helpers for QEMU. * * Copyright (c) 2016 <NAME>, IBM Corporation * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "cpu.h" #include "exec/exec-all.h" #include "exec/helper-proto.h" #include "qemu/error-report.h" #include "sysemu/kvm.h" #include "kvm_ppc.h" #include "exec/log.h" #include "mmu-radix64.h" #include "mmu-book3s-v3.h" static bool ppc_radix64_get_fully_qualified_addr(CPUPPCState *env, vaddr eaddr, uint64_t *lpid, uint64_t *pid) { if (msr_hv) { /* MSR[HV] -> Hypervisor/bare metal */ switch (eaddr & R_EADDR_QUADRANT) { case R_EADDR_QUADRANT0: *lpid = 0; *pid = env->spr[SPR_BOOKS_PID]; break; case R_EADDR_QUADRANT1: *lpid = env->spr[SPR_LPIDR]; *pid = env->spr[SPR_BOOKS_PID]; break; case R_EADDR_QUADRANT2: *lpid = env->spr[SPR_LPIDR]; *pid = 0; break; case R_EADDR_QUADRANT3: *lpid = 0; *pid = 0; break; } } else { /* !MSR[HV] -> Guest */ switch (eaddr & R_EADDR_QUADRANT) { case R_EADDR_QUADRANT0: /* Guest application */ *lpid = env->spr[SPR_LPIDR]; *pid = env->spr[SPR_BOOKS_PID]; break; case R_EADDR_QUADRANT1: /* Illegal */ case R_EADDR_QUADRANT2: return false; case R_EADDR_QUADRANT3: /* Guest OS */ *lpid = env->spr[SPR_LPIDR]; *pid = 0; /* pid set to 0 -> addresses guest operating system */ break; } } return true; } static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; if (rwx == 2) { /* Instruction Segment Interrupt */ cs->exception_index = POWERPC_EXCP_ISEG; } else { /* Data Segment Interrupt */ cs->exception_index = POWERPC_EXCP_DSEG; env->spr[SPR_DAR] = eaddr; } env->error_code = 0; } static void ppc_radix64_raise_si(PowerPCCPU *cpu, int rwx, vaddr eaddr, uint32_t cause) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; if (rwx == 2) { /* Instruction Storage Interrupt */ cs->exception_index = POWERPC_EXCP_ISI; env->error_code = cause; } else { /* Data Storage Interrupt */ cs->exception_index = POWERPC_EXCP_DSI; if (rwx == 1) { /* Write -> Store */ cause |= DSISR_ISSTORE; } env->spr[SPR_DSISR] = cause; env->spr[SPR_DAR] = eaddr; env->error_code = 0; } } static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte, int *fault_cause, int *prot) { CPUPPCState *env = &cpu->env; const int need_prot[] = { PAGE_READ, PAGE_WRITE, PAGE_EXEC }; /* Check Page Attributes (pte58:59) */ if (((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO) && (rwx == 2)) { /* * Radix PTE entries with the non-idempotent I/O attribute are treated * as guarded storage */ *fault_cause |= SRR1_NOEXEC_GUARD; return true; } /* Determine permissions allowed by Encoded Access Authority */ if ((pte & R_PTE_EAA_PRIV) && msr_pr) { /* Insufficient Privilege */ *prot = 0; } else if (msr_pr || (pte & R_PTE_EAA_PRIV)) { *prot = ppc_radix64_get_prot_eaa(pte); } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) */ *prot = ppc_radix64_get_prot_eaa(pte); *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */ } /* Check if requested access type is allowed */ if (need_prot[rwx] & ~(*prot)) { /* Page Protected for that Access */ *fault_cause |= DSISR_PROTFAULT; return true; } return false; } static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte, hwaddr pte_addr, int *prot) { CPUState *cs = CPU(cpu); uint64_t npte; npte = pte | R_PTE_R; /* Always set reference bit */ if (rwx == 1) { /* Store/Write */ npte |= R_PTE_C; /* Set change bit */ } else { /* * Treat the page as read-only for now, so that a later write * will pass through this function again to set the C bit. */ *prot &= ~PAGE_WRITE; } if (pte ^ npte) { /* If pte has changed then write it back */ stq_phys(cs->as, pte_addr, npte); } } static uint64_t ppc_radix64_walk_tree(PowerPCCPU *cpu, vaddr eaddr, uint64_t base_addr, uint64_t nls, hwaddr *raddr, int *psize, int *fault_cause, hwaddr *pte_addr) { CPUState *cs = CPU(cpu); uint64_t index, pde; if (nls < 5) { /* Directory maps less than 2**5 entries */ *fault_cause |= DSISR_R_BADCONFIG; return 0; } /* Read page <directory/table> entry from guest address space */ index = eaddr >> (*psize - nls); /* Shift */ index &= ((1UL << nls) - 1); /* Mask */ pde = ldq_phys(cs->as, base_addr + (index * sizeof(pde))); if (!(pde & R_PTE_VALID)) { /* Invalid Entry */ *fault_cause |= DSISR_NOPTE; return 0; } *psize -= nls; /* Check if Leaf Entry -> Page Table Entry -> Stop the Search */ if (pde & R_PTE_LEAF) { uint64_t rpn = pde & R_PTE_RPN; uint64_t mask = (1UL << *psize) - 1; /* Or high bits of rpn and low bits to ea to form whole real addr */ *raddr = (rpn & ~mask) | (eaddr & mask); *pte_addr = base_addr + (index * sizeof(pde)); return pde; } /* Next Level of Radix Tree */ return ppc_radix64_walk_tree(cpu, eaddr, pde & R_PDE_NLB, pde & R_PDE_NLS, raddr, psize, fault_cause, pte_addr); } static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate) { CPUPPCState *env = &cpu->env; if (!(pate->dw0 & PATE0_HR)) { return false; } if (lpid == 0 && !msr_hv) { return false; } /* More checks ... */ return true; } int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx, int mmu_idx) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; PPCVirtualHypervisorClass *vhc; hwaddr raddr, pte_addr; uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte; int page_size, prot, fault_cause = 0; ppc_v3_pate_t pate; assert((rwx == 0) || (rwx == 1) || (rwx == 2)); assert(ppc64_use_proc_tbl(cpu)); /* Real Mode Access */ if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) { /* In real mode top 4 effective addr bits (mostly) ignored */ raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx, TARGET_PAGE_SIZE); return 0; } /* Virtual Mode Access - get the fully qualified address */ if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) { ppc_radix64_raise_segi(cpu, rwx, eaddr); return 1; } /* Get Process Table */ if (cpu->vhyp) { vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); vhc->get_pate(cpu->vhyp, &pate); } else { if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); return 1; } if (!validate_pate(cpu, lpid, &pate)) { ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG); } /* We don't support guest mode yet */ if (lpid != 0) { error_report("PowerNV guest support Unimplemented"); exit(1); } } /* Index Process Table by PID to Find Corresponding Process Table Entry */ offset = pid * sizeof(struct prtb_entry); size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); if (offset >= size) { /* offset exceeds size of the process table */ ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE); return 1; } prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset); /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ page_size = PRTBE_R_GET_RTS(prtbe0); pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK, prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS, &raddr, &page_size, &fault_cause, &pte_addr); if (!pte || ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, &prot)) { /* Couldn't get pte or access denied due to protection */ ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause); return 1; } /* Update Reference and Change Bits */ ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, &prot); tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK, prot, mmu_idx, 1UL << page_size); return 0; } hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr) { CPUState *cs = CPU(cpu); CPUPPCState *env = &cpu->env; PPCVirtualHypervisorClass *vhc; hwaddr raddr, pte_addr; uint64_t lpid = 0, pid = 0, offset, size, prtbe0, pte; int page_size, fault_cause = 0; ppc_v3_pate_t pate; /* Handle Real Mode */ if (msr_dr == 0) { /* In real mode top 4 effective addr bits (mostly) ignored */ return eaddr & 0x0FFFFFFFFFFFFFFFULL; } /* Virtual Mode Access - get the fully qualified address */ if (!ppc_radix64_get_fully_qualified_addr(env, eaddr, &lpid, &pid)) { return -1; } /* Get Process Table */ if (cpu->vhyp) { vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); vhc->get_pate(cpu->vhyp, &pate); } else { if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { return -1; } if (!validate_pate(cpu, lpid, &pate)) { return -1; } /* We don't support guest mode yet */ if (lpid != 0) { error_report("PowerNV guest support Unimplemented"); exit(1); } } /* Index Process Table by PID to Find Corresponding Process Table Entry */ offset = pid * sizeof(struct prtb_entry); size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); if (offset >= size) { /* offset exceeds size of the process table */ return -1; } prtbe0 = ldq_phys(cs->as, (pate.dw1 & PATE1_R_PRTB) + offset); /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ page_size = PRTBE_R_GET_RTS(prtbe0); pte = ppc_radix64_walk_tree(cpu, eaddr & R_EADDR_MASK, prtbe0 & PRTBE_R_RPDB, prtbe0 & PRTBE_R_RPDS, &raddr, &page_size, &fault_cause, &pte_addr); if (!pte) { return -1; } return raddr & TARGET_PAGE_MASK; }
pmp-tool/PMP
src/qemu/src-pmp/hw/display/virtio-vga.c
#include "qemu/osdep.h" #include "hw/hw.h" #include "hw/pci/pci.h" #include "vga_int.h" #include "hw/virtio/virtio-pci.h" #include "hw/virtio/virtio-gpu.h" #include "qapi/error.h" /* * virtio-vga: This extends VirtioPCIProxy. */ #define TYPE_VIRTIO_VGA "virtio-vga" #define VIRTIO_VGA(obj) \ OBJECT_CHECK(VirtIOVGA, (obj), TYPE_VIRTIO_VGA) #define VIRTIO_VGA_GET_CLASS(obj) \ OBJECT_GET_CLASS(VirtIOVGAClass, obj, TYPE_VIRTIO_VGA) #define VIRTIO_VGA_CLASS(klass) \ OBJECT_CLASS_CHECK(VirtIOVGAClass, klass, TYPE_VIRTIO_VGA) typedef struct VirtIOVGA { VirtIOPCIProxy parent_obj; VirtIOGPU vdev; VGACommonState vga; MemoryRegion vga_mrs[3]; } VirtIOVGA; typedef struct VirtIOVGAClass { VirtioPCIClass parent_class; DeviceReset parent_reset; } VirtIOVGAClass; static void virtio_vga_invalidate_display(void *opaque) { VirtIOVGA *vvga = opaque; if (vvga->vdev.enable) { virtio_gpu_ops.invalidate(&vvga->vdev); } else { vvga->vga.hw_ops->invalidate(&vvga->vga); } } static void virtio_vga_update_display(void *opaque) { VirtIOVGA *vvga = opaque; if (vvga->vdev.enable) { virtio_gpu_ops.gfx_update(&vvga->vdev); } else { vvga->vga.hw_ops->gfx_update(&vvga->vga); } } static void virtio_vga_text_update(void *opaque, console_ch_t *chardata) { VirtIOVGA *vvga = opaque; if (vvga->vdev.enable) { if (virtio_gpu_ops.text_update) { virtio_gpu_ops.text_update(&vvga->vdev, chardata); } } else { if (vvga->vga.hw_ops->text_update) { vvga->vga.hw_ops->text_update(&vvga->vga, chardata); } } } static int virtio_vga_ui_info(void *opaque, uint32_t idx, QemuUIInfo *info) { VirtIOVGA *vvga = opaque; if (virtio_gpu_ops.ui_info) { return virtio_gpu_ops.ui_info(&vvga->vdev, idx, info); } return -1; } static void virtio_vga_gl_block(void *opaque, bool block) { VirtIOVGA *vvga = opaque; if (virtio_gpu_ops.gl_block) { virtio_gpu_ops.gl_block(&vvga->vdev, block); } } static const GraphicHwOps virtio_vga_ops = { .invalidate = virtio_vga_invalidate_display, .gfx_update = virtio_vga_update_display, .text_update = virtio_vga_text_update, .ui_info = virtio_vga_ui_info, .gl_block = virtio_vga_gl_block, }; static const VMStateDescription vmstate_virtio_vga = { .name = "virtio-vga", .version_id = 2, .minimum_version_id = 2, .fields = (VMStateField[]) { /* no pci stuff here, saving the virtio device will handle that */ VMSTATE_STRUCT(vga, VirtIOVGA, 0, vmstate_vga_common, VGACommonState), VMSTATE_END_OF_LIST() } }; /* VGA device wrapper around PCI device around virtio GPU */ static void virtio_vga_realize(VirtIOPCIProxy *vpci_dev, Error **errp) { VirtIOVGA *vvga = VIRTIO_VGA(vpci_dev); VirtIOGPU *g = &vvga->vdev; VGACommonState *vga = &vvga->vga; Error *err = NULL; uint32_t offset; int i; /* init vga compat bits */ vga->vram_size_mb = 8; vga_common_init(vga, OBJECT(vpci_dev)); vga_init(vga, OBJECT(vpci_dev), pci_address_space(&vpci_dev->pci_dev), pci_address_space_io(&vpci_dev->pci_dev), true); pci_register_bar(&vpci_dev->pci_dev, 0, PCI_BASE_ADDRESS_MEM_PREFETCH, &vga->vram); /* * Configure virtio bar and regions * * We use bar #2 for the mmio regions, to be compatible with stdvga. * virtio regions are moved to the end of bar #2, to make room for * the stdvga mmio registers at the start of bar #2. */ vpci_dev->modern_mem_bar_idx = 2; vpci_dev->msix_bar_idx = 4; if (!(vpci_dev->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ)) { /* * with page-per-vq=off there is no padding space we can use * for the stdvga registers. Make the common and isr regions * smaller then. */ vpci_dev->common.size /= 2; vpci_dev->isr.size /= 2; } offset = memory_region_size(&vpci_dev->modern_bar); offset -= vpci_dev->notify.size; vpci_dev->notify.offset = offset; offset -= vpci_dev->device.size; vpci_dev->device.offset = offset; offset -= vpci_dev->isr.size; vpci_dev->isr.offset = offset; offset -= vpci_dev->common.size; vpci_dev->common.offset = offset; /* init virtio bits */ qdev_set_parent_bus(DEVICE(g), BUS(&vpci_dev->bus)); virtio_pci_force_virtio_1(vpci_dev); object_property_set_bool(OBJECT(g), true, "realized", &err); if (err) { error_propagate(errp, err); return; } /* add stdvga mmio regions */ pci_std_vga_mmio_region_init(vga, OBJECT(vvga), &vpci_dev->modern_bar, vvga->vga_mrs, true, false); vga->con = g->scanout[0].con; graphic_console_set_hwops(vga->con, &virtio_vga_ops, vvga); for (i = 0; i < g->conf.max_outputs; i++) { object_property_set_link(OBJECT(g->scanout[i].con), OBJECT(vpci_dev), "device", errp); } } static void virtio_vga_reset(DeviceState *dev) { VirtIOVGAClass *klass = VIRTIO_VGA_GET_CLASS(dev); VirtIOVGA *vvga = VIRTIO_VGA(dev); /* reset virtio-gpu */ klass->parent_reset(dev); /* reset vga */ vga_common_reset(&vvga->vga); vga_dirty_log_start(&vvga->vga); } static Property virtio_vga_properties[] = { DEFINE_VIRTIO_GPU_PCI_PROPERTIES(VirtIOPCIProxy), DEFINE_PROP_END_OF_LIST(), }; static void virtio_vga_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); VirtIOVGAClass *v = VIRTIO_VGA_CLASS(klass); PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); set_bit(DEVICE_CATEGORY_DISPLAY, dc->categories); dc->props = virtio_vga_properties; dc->vmsd = &vmstate_virtio_vga; dc->hotpluggable = false; device_class_set_parent_reset(dc, virtio_vga_reset, &v->parent_reset); k->realize = virtio_vga_realize; pcidev_k->romfile = "vgabios-virtio.bin"; pcidev_k->class_id = PCI_CLASS_DISPLAY_VGA; } static void virtio_vga_inst_initfn(Object *obj) { VirtIOVGA *dev = VIRTIO_VGA(obj); virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), TYPE_VIRTIO_GPU); } static VirtioPCIDeviceTypeInfo virtio_vga_info = { .generic_name = TYPE_VIRTIO_VGA, .instance_size = sizeof(struct VirtIOVGA), .instance_init = virtio_vga_inst_initfn, .class_size = sizeof(struct VirtIOVGAClass), .class_init = virtio_vga_class_init, }; static void virtio_vga_register_types(void) { virtio_pci_types_register(&virtio_vga_info); } type_init(virtio_vga_register_types)
pmp-tool/PMP
src/qemu/src-pmp/tests/tcg/minilib/printf.c
<filename>src/qemu/src-pmp/tests/tcg/minilib/printf.c /* * Copyright (C) 2015 Virtual Open Systems SAS * Author: <NAME> <<EMAIL>> * * printf based on implementation by <NAME> <<EMAIL>> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * SPDX-License-Identifier: GPL-2.0-only */ #include "minilib.h" typedef __builtin_va_list va_list; #define va_start(ap, X) __builtin_va_start(ap, X) #define va_arg(ap, type) __builtin_va_arg(ap, type) #define va_end(ap) __builtin_va_end(ap) static void print_str(char *s) { while (*s) { __sys_outc(*s++); } } static void print_num(unsigned long long value, int base) { char digits[] = "0123456789abcdef"; char buf[32]; int i = sizeof(buf) - 2, j; /* Set the buffer to 0. See problem of before. */ for (j = 0; j < 32; j++) { buf[j] = 0; } do { buf[i--] = digits[value % base]; value /= base; } while (value); print_str(&buf[i + 1]); } void ml_printf(const char *fmt, ...) { va_list ap; char *str; int base; int has_long; int alt_form; unsigned long long val; va_start(ap, fmt); for (; *fmt; fmt++) { if (*fmt != '%') { __sys_outc(*fmt); continue; } fmt++; if (*fmt == '#') { fmt++; alt_form = 1; } else { alt_form = 0; } if (*fmt == 'l') { fmt++; if (*fmt == 'l') { fmt++; has_long = 2; } else { has_long = 1; } } else { has_long = 0; } switch (*fmt) { case 'x': case 'p': base = 16; goto convert_number; case 'd': case 'i': case 'u': base = 10; goto convert_number; case 'o': base = 8; goto convert_number; convert_number: switch (has_long) { case 0: val = va_arg(ap, unsigned int); break; case 1: val = va_arg(ap, unsigned long); break; case 2: val = va_arg(ap, unsigned long long); break; } if (alt_form && base == 16) { print_str("0x"); } print_num(val, base); break; case 's': str = va_arg(ap, char*); print_str(str); break; case '%': __sys_outc(*fmt); break; default: __sys_outc('%'); __sys_outc(*fmt); break; } } va_end(ap); }
pmp-tool/PMP
src/qemu/src-pmp/include/qemu/uuid.h
<gh_stars>1-10 /* * QEMU UUID functions * * Copyright 2016 Red Hat, Inc. * * Authors: * <NAME> <<EMAIL>> * * This program is free software; you can redistribute it and/or modify it * under the terms of the GNU General Public License as published by the Free * Software Foundation; either version 2 of the License, or (at your option) * any later version. * */ #ifndef QEMU_UUID_H #define QEMU_UUID_H #include "qemu-common.h" /* Version 4 UUID (pseudo random numbers), RFC4122 4.4. */ typedef struct { union { unsigned char data[16]; struct { /* Generated in BE endian, can be swapped with qemu_uuid_bswap. */ uint32_t time_low; uint16_t time_mid; uint16_t time_high_and_version; uint8_t clock_seq_and_reserved; uint8_t clock_seq_low; uint8_t node[6]; } fields; }; } QemuUUID; #define UUID_FMT "%02hhx%02hhx%02hhx%02hhx-" \ "%02hhx%02hhx-%02hhx%02hhx-" \ "%02hhx%02hhx-" \ "%02hhx%02hhx%02hhx%02hhx%02hhx%02hhx" #define UUID_FMT_LEN 36 #define UUID_NONE "00000000-0000-0000-0000-000000000000" void qemu_uuid_generate(QemuUUID *out); int qemu_uuid_is_null(const QemuUUID *uu); int qemu_uuid_is_equal(const QemuUUID *lhv, const QemuUUID *rhv); void qemu_uuid_unparse(const QemuUUID *uuid, char *out); char *qemu_uuid_unparse_strdup(const QemuUUID *uuid); int qemu_uuid_parse(const char *str, QemuUUID *uuid); QemuUUID qemu_uuid_bswap(QemuUUID uuid); #endif
pmp-tool/PMP
src/qemu/src-pmp/dump.c
/* * QEMU dump * * Copyright Fujitsu, Corp. 2011, 2012 * * Authors: * <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #include "qemu/osdep.h" #include "qemu/cutils.h" #include "elf.h" #include "cpu.h" #include "exec/hwaddr.h" #include "monitor/monitor.h" #include "sysemu/kvm.h" #include "sysemu/dump.h" #include "sysemu/sysemu.h" #include "sysemu/memory_mapping.h" #include "sysemu/cpus.h" #include "qapi/error.h" #include "qapi/qapi-commands-misc.h" #include "qapi/qapi-events-misc.h" #include "qapi/qmp/qerror.h" #include "qemu/error-report.h" #include "hw/misc/vmcoreinfo.h" #ifdef TARGET_X86_64 #include "win_dump.h" #endif #include <zlib.h> #ifdef CONFIG_LZO #include <lzo/lzo1x.h> #endif #ifdef CONFIG_SNAPPY #include <snappy-c.h> #endif #ifndef ELF_MACHINE_UNAME #define ELF_MACHINE_UNAME "Unknown" #endif #define MAX_GUEST_NOTE_SIZE (1 << 20) /* 1MB should be enough */ #define ELF_NOTE_SIZE(hdr_size, name_size, desc_size) \ ((DIV_ROUND_UP((hdr_size), 4) + \ DIV_ROUND_UP((name_size), 4) + \ DIV_ROUND_UP((desc_size), 4)) * 4) uint16_t cpu_to_dump16(DumpState *s, uint16_t val) { if (s->dump_info.d_endian == ELFDATA2LSB) { val = cpu_to_le16(val); } else { val = cpu_to_be16(val); } return val; } uint32_t cpu_to_dump32(DumpState *s, uint32_t val) { if (s->dump_info.d_endian == ELFDATA2LSB) { val = cpu_to_le32(val); } else { val = cpu_to_be32(val); } return val; } uint64_t cpu_to_dump64(DumpState *s, uint64_t val) { if (s->dump_info.d_endian == ELFDATA2LSB) { val = cpu_to_le64(val); } else { val = cpu_to_be64(val); } return val; } static int dump_cleanup(DumpState *s) { guest_phys_blocks_free(&s->guest_phys_blocks); memory_mapping_list_free(&s->list); close(s->fd); g_free(s->guest_note); s->guest_note = NULL; if (s->resume) { if (s->detached) { qemu_mutex_lock_iothread(); } vm_start(); if (s->detached) { qemu_mutex_unlock_iothread(); } } return 0; } static int fd_write_vmcore(const void *buf, size_t size, void *opaque) { DumpState *s = opaque; size_t written_size; written_size = qemu_write_full(s->fd, buf, size); if (written_size != size) { return -errno; } return 0; } static void write_elf64_header(DumpState *s, Error **errp) { Elf64_Ehdr elf_header; int ret; memset(&elf_header, 0, sizeof(Elf64_Ehdr)); memcpy(&elf_header, ELFMAG, SELFMAG); elf_header.e_ident[EI_CLASS] = ELFCLASS64; elf_header.e_ident[EI_DATA] = s->dump_info.d_endian; elf_header.e_ident[EI_VERSION] = EV_CURRENT; elf_header.e_type = cpu_to_dump16(s, ET_CORE); elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine); elf_header.e_version = cpu_to_dump32(s, EV_CURRENT); elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header)); elf_header.e_phoff = cpu_to_dump64(s, sizeof(Elf64_Ehdr)); elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf64_Phdr)); elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num); if (s->have_section) { uint64_t shoff = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info; elf_header.e_shoff = cpu_to_dump64(s, shoff); elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf64_Shdr)); elf_header.e_shnum = cpu_to_dump16(s, 1); } ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s); if (ret < 0) { error_setg_errno(errp, -ret, "dump: failed to write elf header"); } } static void write_elf32_header(DumpState *s, Error **errp) { Elf32_Ehdr elf_header; int ret; memset(&elf_header, 0, sizeof(Elf32_Ehdr)); memcpy(&elf_header, ELFMAG, SELFMAG); elf_header.e_ident[EI_CLASS] = ELFCLASS32; elf_header.e_ident[EI_DATA] = s->dump_info.d_endian; elf_header.e_ident[EI_VERSION] = EV_CURRENT; elf_header.e_type = cpu_to_dump16(s, ET_CORE); elf_header.e_machine = cpu_to_dump16(s, s->dump_info.d_machine); elf_header.e_version = cpu_to_dump32(s, EV_CURRENT); elf_header.e_ehsize = cpu_to_dump16(s, sizeof(elf_header)); elf_header.e_phoff = cpu_to_dump32(s, sizeof(Elf32_Ehdr)); elf_header.e_phentsize = cpu_to_dump16(s, sizeof(Elf32_Phdr)); elf_header.e_phnum = cpu_to_dump16(s, s->phdr_num); if (s->have_section) { uint32_t shoff = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info; elf_header.e_shoff = cpu_to_dump32(s, shoff); elf_header.e_shentsize = cpu_to_dump16(s, sizeof(Elf32_Shdr)); elf_header.e_shnum = cpu_to_dump16(s, 1); } ret = fd_write_vmcore(&elf_header, sizeof(elf_header), s); if (ret < 0) { error_setg_errno(errp, -ret, "dump: failed to write elf header"); } } static void write_elf64_load(DumpState *s, MemoryMapping *memory_mapping, int phdr_index, hwaddr offset, hwaddr filesz, Error **errp) { Elf64_Phdr phdr; int ret; memset(&phdr, 0, sizeof(Elf64_Phdr)); phdr.p_type = cpu_to_dump32(s, PT_LOAD); phdr.p_offset = cpu_to_dump64(s, offset); phdr.p_paddr = cpu_to_dump64(s, memory_mapping->phys_addr); phdr.p_filesz = cpu_to_dump64(s, filesz); phdr.p_memsz = cpu_to_dump64(s, memory_mapping->length); phdr.p_vaddr = cpu_to_dump64(s, memory_mapping->virt_addr) ?: phdr.p_paddr; assert(memory_mapping->length >= filesz); ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s); if (ret < 0) { error_setg_errno(errp, -ret, "dump: failed to write program header table"); } } static void write_elf32_load(DumpState *s, MemoryMapping *memory_mapping, int phdr_index, hwaddr offset, hwaddr filesz, Error **errp) { Elf32_Phdr phdr; int ret; memset(&phdr, 0, sizeof(Elf32_Phdr)); phdr.p_type = cpu_to_dump32(s, PT_LOAD); phdr.p_offset = cpu_to_dump32(s, offset); phdr.p_paddr = cpu_to_dump32(s, memory_mapping->phys_addr); phdr.p_filesz = cpu_to_dump32(s, filesz); phdr.p_memsz = cpu_to_dump32(s, memory_mapping->length); phdr.p_vaddr = cpu_to_dump32(s, memory_mapping->virt_addr) ?: phdr.p_paddr; assert(memory_mapping->length >= filesz); ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s); if (ret < 0) { error_setg_errno(errp, -ret, "dump: failed to write program header table"); } } static void write_elf64_note(DumpState *s, Error **errp) { Elf64_Phdr phdr; hwaddr begin = s->memory_offset - s->note_size; int ret; memset(&phdr, 0, sizeof(Elf64_Phdr)); phdr.p_type = cpu_to_dump32(s, PT_NOTE); phdr.p_offset = cpu_to_dump64(s, begin); phdr.p_paddr = 0; phdr.p_filesz = cpu_to_dump64(s, s->note_size); phdr.p_memsz = cpu_to_dump64(s, s->note_size); phdr.p_vaddr = 0; ret = fd_write_vmcore(&phdr, sizeof(Elf64_Phdr), s); if (ret < 0) { error_setg_errno(errp, -ret, "dump: failed to write program header table"); } } static inline int cpu_index(CPUState *cpu) { return cpu->cpu_index + 1; } static void write_guest_note(WriteCoreDumpFunction f, DumpState *s, Error **errp) { int ret; if (s->guest_note) { ret = f(s->guest_note, s->guest_note_size, s); if (ret < 0) { error_setg(errp, "dump: failed to write guest note"); } } } static void write_elf64_notes(WriteCoreDumpFunction f, DumpState *s, Error **errp) { CPUState *cpu; int ret; int id; CPU_FOREACH(cpu) { id = cpu_index(cpu); ret = cpu_write_elf64_note(f, cpu, id, s); if (ret < 0) { error_setg(errp, "dump: failed to write elf notes"); return; } } CPU_FOREACH(cpu) { ret = cpu_write_elf64_qemunote(f, cpu, s); if (ret < 0) { error_setg(errp, "dump: failed to write CPU status"); return; } } write_guest_note(f, s, errp); } static void write_elf32_note(DumpState *s, Error **errp) { hwaddr begin = s->memory_offset - s->note_size; Elf32_Phdr phdr; int ret; memset(&phdr, 0, sizeof(Elf32_Phdr)); phdr.p_type = cpu_to_dump32(s, PT_NOTE); phdr.p_offset = cpu_to_dump32(s, begin); phdr.p_paddr = 0; phdr.p_filesz = cpu_to_dump32(s, s->note_size); phdr.p_memsz = cpu_to_dump32(s, s->note_size); phdr.p_vaddr = 0; ret = fd_write_vmcore(&phdr, sizeof(Elf32_Phdr), s); if (ret < 0) { error_setg_errno(errp, -ret, "dump: failed to write program header table"); } } static void write_elf32_notes(WriteCoreDumpFunction f, DumpState *s, Error **errp) { CPUState *cpu; int ret; int id; CPU_FOREACH(cpu) { id = cpu_index(cpu); ret = cpu_write_elf32_note(f, cpu, id, s); if (ret < 0) { error_setg(errp, "dump: failed to write elf notes"); return; } } CPU_FOREACH(cpu) { ret = cpu_write_elf32_qemunote(f, cpu, s); if (ret < 0) { error_setg(errp, "dump: failed to write CPU status"); return; } } write_guest_note(f, s, errp); } static void write_elf_section(DumpState *s, int type, Error **errp) { Elf32_Shdr shdr32; Elf64_Shdr shdr64; int shdr_size; void *shdr; int ret; if (type == 0) { shdr_size = sizeof(Elf32_Shdr); memset(&shdr32, 0, shdr_size); shdr32.sh_info = cpu_to_dump32(s, s->sh_info); shdr = &shdr32; } else { shdr_size = sizeof(Elf64_Shdr); memset(&shdr64, 0, shdr_size); shdr64.sh_info = cpu_to_dump32(s, s->sh_info); shdr = &shdr64; } ret = fd_write_vmcore(&shdr, shdr_size, s); if (ret < 0) { error_setg_errno(errp, -ret, "dump: failed to write section header table"); } } static void write_data(DumpState *s, void *buf, int length, Error **errp) { int ret; ret = fd_write_vmcore(buf, length, s); if (ret < 0) { error_setg_errno(errp, -ret, "dump: failed to save memory"); } else { s->written_size += length; } } /* write the memory to vmcore. 1 page per I/O. */ static void write_memory(DumpState *s, GuestPhysBlock *block, ram_addr_t start, int64_t size, Error **errp) { int64_t i; Error *local_err = NULL; for (i = 0; i < size / s->dump_info.page_size; i++) { write_data(s, block->host_addr + start + i * s->dump_info.page_size, s->dump_info.page_size, &local_err); if (local_err) { error_propagate(errp, local_err); return; } } if ((size % s->dump_info.page_size) != 0) { write_data(s, block->host_addr + start + i * s->dump_info.page_size, size % s->dump_info.page_size, &local_err); if (local_err) { error_propagate(errp, local_err); return; } } } /* get the memory's offset and size in the vmcore */ static void get_offset_range(hwaddr phys_addr, ram_addr_t mapping_length, DumpState *s, hwaddr *p_offset, hwaddr *p_filesz) { GuestPhysBlock *block; hwaddr offset = s->memory_offset; int64_t size_in_block, start; /* When the memory is not stored into vmcore, offset will be -1 */ *p_offset = -1; *p_filesz = 0; if (s->has_filter) { if (phys_addr < s->begin || phys_addr >= s->begin + s->length) { return; } } QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { if (s->has_filter) { if (block->target_start >= s->begin + s->length || block->target_end <= s->begin) { /* This block is out of the range */ continue; } if (s->begin <= block->target_start) { start = block->target_start; } else { start = s->begin; } size_in_block = block->target_end - start; if (s->begin + s->length < block->target_end) { size_in_block -= block->target_end - (s->begin + s->length); } } else { start = block->target_start; size_in_block = block->target_end - block->target_start; } if (phys_addr >= start && phys_addr < start + size_in_block) { *p_offset = phys_addr - start + offset; /* The offset range mapped from the vmcore file must not spill over * the GuestPhysBlock, clamp it. The rest of the mapping will be * zero-filled in memory at load time; see * <http://refspecs.linuxbase.org/elf/gabi4+/ch5.pheader.html>. */ *p_filesz = phys_addr + mapping_length <= start + size_in_block ? mapping_length : size_in_block - (phys_addr - start); return; } offset += size_in_block; } } static void write_elf_loads(DumpState *s, Error **errp) { hwaddr offset, filesz; MemoryMapping *memory_mapping; uint32_t phdr_index = 1; uint32_t max_index; Error *local_err = NULL; if (s->have_section) { max_index = s->sh_info; } else { max_index = s->phdr_num; } QTAILQ_FOREACH(memory_mapping, &s->list.head, next) { get_offset_range(memory_mapping->phys_addr, memory_mapping->length, s, &offset, &filesz); if (s->dump_info.d_class == ELFCLASS64) { write_elf64_load(s, memory_mapping, phdr_index++, offset, filesz, &local_err); } else { write_elf32_load(s, memory_mapping, phdr_index++, offset, filesz, &local_err); } if (local_err) { error_propagate(errp, local_err); return; } if (phdr_index >= max_index) { break; } } } /* write elf header, PT_NOTE and elf note to vmcore. */ static void dump_begin(DumpState *s, Error **errp) { Error *local_err = NULL; /* * the vmcore's format is: * -------------- * | elf header | * -------------- * | PT_NOTE | * -------------- * | PT_LOAD | * -------------- * | ...... | * -------------- * | PT_LOAD | * -------------- * | sec_hdr | * -------------- * | elf note | * -------------- * | memory | * -------------- * * we only know where the memory is saved after we write elf note into * vmcore. */ /* write elf header to vmcore */ if (s->dump_info.d_class == ELFCLASS64) { write_elf64_header(s, &local_err); } else { write_elf32_header(s, &local_err); } if (local_err) { error_propagate(errp, local_err); return; } if (s->dump_info.d_class == ELFCLASS64) { /* write PT_NOTE to vmcore */ write_elf64_note(s, &local_err); if (local_err) { error_propagate(errp, local_err); return; } /* write all PT_LOAD to vmcore */ write_elf_loads(s, &local_err); if (local_err) { error_propagate(errp, local_err); return; } /* write section to vmcore */ if (s->have_section) { write_elf_section(s, 1, &local_err); if (local_err) { error_propagate(errp, local_err); return; } } /* write notes to vmcore */ write_elf64_notes(fd_write_vmcore, s, &local_err); if (local_err) { error_propagate(errp, local_err); return; } } else { /* write PT_NOTE to vmcore */ write_elf32_note(s, &local_err); if (local_err) { error_propagate(errp, local_err); return; } /* write all PT_LOAD to vmcore */ write_elf_loads(s, &local_err); if (local_err) { error_propagate(errp, local_err); return; } /* write section to vmcore */ if (s->have_section) { write_elf_section(s, 0, &local_err); if (local_err) { error_propagate(errp, local_err); return; } } /* write notes to vmcore */ write_elf32_notes(fd_write_vmcore, s, &local_err); if (local_err) { error_propagate(errp, local_err); return; } } } static int get_next_block(DumpState *s, GuestPhysBlock *block) { while (1) { block = QTAILQ_NEXT(block, next); if (!block) { /* no more block */ return 1; } s->start = 0; s->next_block = block; if (s->has_filter) { if (block->target_start >= s->begin + s->length || block->target_end <= s->begin) { /* This block is out of the range */ continue; } if (s->begin > block->target_start) { s->start = s->begin - block->target_start; } } return 0; } } /* write all memory to vmcore */ static void dump_iterate(DumpState *s, Error **errp) { GuestPhysBlock *block; int64_t size; Error *local_err = NULL; do { block = s->next_block; size = block->target_end - block->target_start; if (s->has_filter) { size -= s->start; if (s->begin + s->length < block->target_end) { size -= block->target_end - (s->begin + s->length); } } write_memory(s, block, s->start, size, &local_err); if (local_err) { error_propagate(errp, local_err); return; } } while (!get_next_block(s, block)); } static void create_vmcore(DumpState *s, Error **errp) { Error *local_err = NULL; dump_begin(s, &local_err); if (local_err) { error_propagate(errp, local_err); return; } dump_iterate(s, errp); } static int write_start_flat_header(int fd) { MakedumpfileHeader *mh; int ret = 0; QEMU_BUILD_BUG_ON(sizeof *mh > MAX_SIZE_MDF_HEADER); mh = g_malloc0(MAX_SIZE_MDF_HEADER); memcpy(mh->signature, MAKEDUMPFILE_SIGNATURE, MIN(sizeof mh->signature, sizeof MAKEDUMPFILE_SIGNATURE)); mh->type = cpu_to_be64(TYPE_FLAT_HEADER); mh->version = cpu_to_be64(VERSION_FLAT_HEADER); size_t written_size; written_size = qemu_write_full(fd, mh, MAX_SIZE_MDF_HEADER); if (written_size != MAX_SIZE_MDF_HEADER) { ret = -1; } g_free(mh); return ret; } static int write_end_flat_header(int fd) { MakedumpfileDataHeader mdh; mdh.offset = END_FLAG_FLAT_HEADER; mdh.buf_size = END_FLAG_FLAT_HEADER; size_t written_size; written_size = qemu_write_full(fd, &mdh, sizeof(mdh)); if (written_size != sizeof(mdh)) { return -1; } return 0; } static int write_buffer(int fd, off_t offset, const void *buf, size_t size) { size_t written_size; MakedumpfileDataHeader mdh; mdh.offset = cpu_to_be64(offset); mdh.buf_size = cpu_to_be64(size); written_size = qemu_write_full(fd, &mdh, sizeof(mdh)); if (written_size != sizeof(mdh)) { return -1; } written_size = qemu_write_full(fd, buf, size); if (written_size != size) { return -1; } return 0; } static int buf_write_note(const void *buf, size_t size, void *opaque) { DumpState *s = opaque; /* note_buf is not enough */ if (s->note_buf_offset + size > s->note_size) { return -1; } memcpy(s->note_buf + s->note_buf_offset, buf, size); s->note_buf_offset += size; return 0; } /* * This function retrieves various sizes from an elf header. * * @note has to be a valid ELF note. The return sizes are unmodified * (not padded or rounded up to be multiple of 4). */ static void get_note_sizes(DumpState *s, const void *note, uint64_t *note_head_size, uint64_t *name_size, uint64_t *desc_size) { uint64_t note_head_sz; uint64_t name_sz; uint64_t desc_sz; if (s->dump_info.d_class == ELFCLASS64) { const Elf64_Nhdr *hdr = note; note_head_sz = sizeof(Elf64_Nhdr); name_sz = tswap64(hdr->n_namesz); desc_sz = tswap64(hdr->n_descsz); } else { const Elf32_Nhdr *hdr = note; note_head_sz = sizeof(Elf32_Nhdr); name_sz = tswap32(hdr->n_namesz); desc_sz = tswap32(hdr->n_descsz); } if (note_head_size) { *note_head_size = note_head_sz; } if (name_size) { *name_size = name_sz; } if (desc_size) { *desc_size = desc_sz; } } static bool note_name_equal(DumpState *s, const uint8_t *note, const char *name) { int len = strlen(name) + 1; uint64_t head_size, name_size; get_note_sizes(s, note, &head_size, &name_size, NULL); head_size = ROUND_UP(head_size, 4); return name_size == len && memcmp(note + head_size, name, len) == 0; } /* write common header, sub header and elf note to vmcore */ static void create_header32(DumpState *s, Error **errp) { DiskDumpHeader32 *dh = NULL; KdumpSubHeader32 *kh = NULL; size_t size; uint32_t block_size; uint32_t sub_hdr_size; uint32_t bitmap_blocks; uint32_t status = 0; uint64_t offset_note; Error *local_err = NULL; /* write common header, the version of kdump-compressed format is 6th */ size = sizeof(DiskDumpHeader32); dh = g_malloc0(size); memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN); dh->header_version = cpu_to_dump32(s, 6); block_size = s->dump_info.page_size; dh->block_size = cpu_to_dump32(s, block_size); sub_hdr_size = sizeof(struct KdumpSubHeader32) + s->note_size; sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size); dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size); /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */ dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX)); dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus); bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2; dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks); strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine)); if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) { status |= DUMP_DH_COMPRESSED_ZLIB; } #ifdef CONFIG_LZO if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) { status |= DUMP_DH_COMPRESSED_LZO; } #endif #ifdef CONFIG_SNAPPY if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) { status |= DUMP_DH_COMPRESSED_SNAPPY; } #endif dh->status = cpu_to_dump32(s, status); if (write_buffer(s->fd, 0, dh, size) < 0) { error_setg(errp, "dump: failed to write disk dump header"); goto out; } /* write sub header */ size = sizeof(KdumpSubHeader32); kh = g_malloc0(size); /* 64bit max_mapnr_64 */ kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr); kh->phys_base = cpu_to_dump32(s, s->dump_info.phys_base); kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL); offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size; if (s->guest_note && note_name_equal(s, s->guest_note, "VMCOREINFO")) { uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo; get_note_sizes(s, s->guest_note, &hsize, &name_size, &size_vmcoreinfo_desc); offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size + (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4; kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo); kh->size_vmcoreinfo = cpu_to_dump32(s, size_vmcoreinfo_desc); } kh->offset_note = cpu_to_dump64(s, offset_note); kh->note_size = cpu_to_dump32(s, s->note_size); if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS * block_size, kh, size) < 0) { error_setg(errp, "dump: failed to write kdump sub header"); goto out; } /* write note */ s->note_buf = g_malloc0(s->note_size); s->note_buf_offset = 0; /* use s->note_buf to store notes temporarily */ write_elf32_notes(buf_write_note, s, &local_err); if (local_err) { error_propagate(errp, local_err); goto out; } if (write_buffer(s->fd, offset_note, s->note_buf, s->note_size) < 0) { error_setg(errp, "dump: failed to write notes"); goto out; } /* get offset of dump_bitmap */ s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) * block_size; /* get offset of page */ s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) * block_size; out: g_free(dh); g_free(kh); g_free(s->note_buf); } /* write common header, sub header and elf note to vmcore */ static void create_header64(DumpState *s, Error **errp) { DiskDumpHeader64 *dh = NULL; KdumpSubHeader64 *kh = NULL; size_t size; uint32_t block_size; uint32_t sub_hdr_size; uint32_t bitmap_blocks; uint32_t status = 0; uint64_t offset_note; Error *local_err = NULL; /* write common header, the version of kdump-compressed format is 6th */ size = sizeof(DiskDumpHeader64); dh = g_malloc0(size); memcpy(dh->signature, KDUMP_SIGNATURE, SIG_LEN); dh->header_version = cpu_to_dump32(s, 6); block_size = s->dump_info.page_size; dh->block_size = cpu_to_dump32(s, block_size); sub_hdr_size = sizeof(struct KdumpSubHeader64) + s->note_size; sub_hdr_size = DIV_ROUND_UP(sub_hdr_size, block_size); dh->sub_hdr_size = cpu_to_dump32(s, sub_hdr_size); /* dh->max_mapnr may be truncated, full 64bit is in kh.max_mapnr_64 */ dh->max_mapnr = cpu_to_dump32(s, MIN(s->max_mapnr, UINT_MAX)); dh->nr_cpus = cpu_to_dump32(s, s->nr_cpus); bitmap_blocks = DIV_ROUND_UP(s->len_dump_bitmap, block_size) * 2; dh->bitmap_blocks = cpu_to_dump32(s, bitmap_blocks); strncpy(dh->utsname.machine, ELF_MACHINE_UNAME, sizeof(dh->utsname.machine)); if (s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) { status |= DUMP_DH_COMPRESSED_ZLIB; } #ifdef CONFIG_LZO if (s->flag_compress & DUMP_DH_COMPRESSED_LZO) { status |= DUMP_DH_COMPRESSED_LZO; } #endif #ifdef CONFIG_SNAPPY if (s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) { status |= DUMP_DH_COMPRESSED_SNAPPY; } #endif dh->status = cpu_to_dump32(s, status); if (write_buffer(s->fd, 0, dh, size) < 0) { error_setg(errp, "dump: failed to write disk dump header"); goto out; } /* write sub header */ size = sizeof(KdumpSubHeader64); kh = g_malloc0(size); /* 64bit max_mapnr_64 */ kh->max_mapnr_64 = cpu_to_dump64(s, s->max_mapnr); kh->phys_base = cpu_to_dump64(s, s->dump_info.phys_base); kh->dump_level = cpu_to_dump32(s, DUMP_LEVEL); offset_note = DISKDUMP_HEADER_BLOCKS * block_size + size; if (s->guest_note && note_name_equal(s, s->guest_note, "VMCOREINFO")) { uint64_t hsize, name_size, size_vmcoreinfo_desc, offset_vmcoreinfo; get_note_sizes(s, s->guest_note, &hsize, &name_size, &size_vmcoreinfo_desc); offset_vmcoreinfo = offset_note + s->note_size - s->guest_note_size + (DIV_ROUND_UP(hsize, 4) + DIV_ROUND_UP(name_size, 4)) * 4; kh->offset_vmcoreinfo = cpu_to_dump64(s, offset_vmcoreinfo); kh->size_vmcoreinfo = cpu_to_dump64(s, size_vmcoreinfo_desc); } kh->offset_note = cpu_to_dump64(s, offset_note); kh->note_size = cpu_to_dump64(s, s->note_size); if (write_buffer(s->fd, DISKDUMP_HEADER_BLOCKS * block_size, kh, size) < 0) { error_setg(errp, "dump: failed to write kdump sub header"); goto out; } /* write note */ s->note_buf = g_malloc0(s->note_size); s->note_buf_offset = 0; /* use s->note_buf to store notes temporarily */ write_elf64_notes(buf_write_note, s, &local_err); if (local_err) { error_propagate(errp, local_err); goto out; } if (write_buffer(s->fd, offset_note, s->note_buf, s->note_size) < 0) { error_setg(errp, "dump: failed to write notes"); goto out; } /* get offset of dump_bitmap */ s->offset_dump_bitmap = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size) * block_size; /* get offset of page */ s->offset_page = (DISKDUMP_HEADER_BLOCKS + sub_hdr_size + bitmap_blocks) * block_size; out: g_free(dh); g_free(kh); g_free(s->note_buf); } static void write_dump_header(DumpState *s, Error **errp) { Error *local_err = NULL; if (s->dump_info.d_class == ELFCLASS32) { create_header32(s, &local_err); } else { create_header64(s, &local_err); } error_propagate(errp, local_err); } static size_t dump_bitmap_get_bufsize(DumpState *s) { return s->dump_info.page_size; } /* * set dump_bitmap sequencely. the bit before last_pfn is not allowed to be * rewritten, so if need to set the first bit, set last_pfn and pfn to 0. * set_dump_bitmap will always leave the recently set bit un-sync. And setting * (last bit + sizeof(buf) * 8) to 0 will do flushing the content in buf into * vmcore, ie. synchronizing un-sync bit into vmcore. */ static int set_dump_bitmap(uint64_t last_pfn, uint64_t pfn, bool value, uint8_t *buf, DumpState *s) { off_t old_offset, new_offset; off_t offset_bitmap1, offset_bitmap2; uint32_t byte, bit; size_t bitmap_bufsize = dump_bitmap_get_bufsize(s); size_t bits_per_buf = bitmap_bufsize * CHAR_BIT; /* should not set the previous place */ assert(last_pfn <= pfn); /* * if the bit needed to be set is not cached in buf, flush the data in buf * to vmcore firstly. * making new_offset be bigger than old_offset can also sync remained data * into vmcore. */ old_offset = bitmap_bufsize * (last_pfn / bits_per_buf); new_offset = bitmap_bufsize * (pfn / bits_per_buf); while (old_offset < new_offset) { /* calculate the offset and write dump_bitmap */ offset_bitmap1 = s->offset_dump_bitmap + old_offset; if (write_buffer(s->fd, offset_bitmap1, buf, bitmap_bufsize) < 0) { return -1; } /* dump level 1 is chosen, so 1st and 2nd bitmap are same */ offset_bitmap2 = s->offset_dump_bitmap + s->len_dump_bitmap + old_offset; if (write_buffer(s->fd, offset_bitmap2, buf, bitmap_bufsize) < 0) { return -1; } memset(buf, 0, bitmap_bufsize); old_offset += bitmap_bufsize; } /* get the exact place of the bit in the buf, and set it */ byte = (pfn % bits_per_buf) / CHAR_BIT; bit = (pfn % bits_per_buf) % CHAR_BIT; if (value) { buf[byte] |= 1u << bit; } else { buf[byte] &= ~(1u << bit); } return 0; } static uint64_t dump_paddr_to_pfn(DumpState *s, uint64_t addr) { int target_page_shift = ctz32(s->dump_info.page_size); return (addr >> target_page_shift) - ARCH_PFN_OFFSET; } static uint64_t dump_pfn_to_paddr(DumpState *s, uint64_t pfn) { int target_page_shift = ctz32(s->dump_info.page_size); return (pfn + ARCH_PFN_OFFSET) << target_page_shift; } /* * exam every page and return the page frame number and the address of the page. * bufptr can be NULL. note: the blocks here is supposed to reflect guest-phys * blocks, so block->target_start and block->target_end should be interal * multiples of the target page size. */ static bool get_next_page(GuestPhysBlock **blockptr, uint64_t *pfnptr, uint8_t **bufptr, DumpState *s) { GuestPhysBlock *block = *blockptr; hwaddr addr, target_page_mask = ~((hwaddr)s->dump_info.page_size - 1); uint8_t *buf; /* block == NULL means the start of the iteration */ if (!block) { block = QTAILQ_FIRST(&s->guest_phys_blocks.head); *blockptr = block; assert((block->target_start & ~target_page_mask) == 0); assert((block->target_end & ~target_page_mask) == 0); *pfnptr = dump_paddr_to_pfn(s, block->target_start); if (bufptr) { *bufptr = block->host_addr; } return true; } *pfnptr = *pfnptr + 1; addr = dump_pfn_to_paddr(s, *pfnptr); if ((addr >= block->target_start) && (addr + s->dump_info.page_size <= block->target_end)) { buf = block->host_addr + (addr - block->target_start); } else { /* the next page is in the next block */ block = QTAILQ_NEXT(block, next); *blockptr = block; if (!block) { return false; } assert((block->target_start & ~target_page_mask) == 0); assert((block->target_end & ~target_page_mask) == 0); *pfnptr = dump_paddr_to_pfn(s, block->target_start); buf = block->host_addr; } if (bufptr) { *bufptr = buf; } return true; } static void write_dump_bitmap(DumpState *s, Error **errp) { int ret = 0; uint64_t last_pfn, pfn; void *dump_bitmap_buf; size_t num_dumpable; GuestPhysBlock *block_iter = NULL; size_t bitmap_bufsize = dump_bitmap_get_bufsize(s); size_t bits_per_buf = bitmap_bufsize * CHAR_BIT; /* dump_bitmap_buf is used to store dump_bitmap temporarily */ dump_bitmap_buf = g_malloc0(bitmap_bufsize); num_dumpable = 0; last_pfn = 0; /* * exam memory page by page, and set the bit in dump_bitmap corresponded * to the existing page. */ while (get_next_page(&block_iter, &pfn, NULL, s)) { ret = set_dump_bitmap(last_pfn, pfn, true, dump_bitmap_buf, s); if (ret < 0) { error_setg(errp, "dump: failed to set dump_bitmap"); goto out; } last_pfn = pfn; num_dumpable++; } /* * set_dump_bitmap will always leave the recently set bit un-sync. Here we * set the remaining bits from last_pfn to the end of the bitmap buffer to * 0. With those set, the un-sync bit will be synchronized into the vmcore. */ if (num_dumpable > 0) { ret = set_dump_bitmap(last_pfn, last_pfn + bits_per_buf, false, dump_bitmap_buf, s); if (ret < 0) { error_setg(errp, "dump: failed to sync dump_bitmap"); goto out; } } /* number of dumpable pages that will be dumped later */ s->num_dumpable = num_dumpable; out: g_free(dump_bitmap_buf); } static void prepare_data_cache(DataCache *data_cache, DumpState *s, off_t offset) { data_cache->fd = s->fd; data_cache->data_size = 0; data_cache->buf_size = 4 * dump_bitmap_get_bufsize(s); data_cache->buf = g_malloc0(data_cache->buf_size); data_cache->offset = offset; } static int write_cache(DataCache *dc, const void *buf, size_t size, bool flag_sync) { /* * dc->buf_size should not be less than size, otherwise dc will never be * enough */ assert(size <= dc->buf_size); /* * if flag_sync is set, synchronize data in dc->buf into vmcore. * otherwise check if the space is enough for caching data in buf, if not, * write the data in dc->buf to dc->fd and reset dc->buf */ if ((!flag_sync && dc->data_size + size > dc->buf_size) || (flag_sync && dc->data_size > 0)) { if (write_buffer(dc->fd, dc->offset, dc->buf, dc->data_size) < 0) { return -1; } dc->offset += dc->data_size; dc->data_size = 0; } if (!flag_sync) { memcpy(dc->buf + dc->data_size, buf, size); dc->data_size += size; } return 0; } static void free_data_cache(DataCache *data_cache) { g_free(data_cache->buf); } static size_t get_len_buf_out(size_t page_size, uint32_t flag_compress) { switch (flag_compress) { case DUMP_DH_COMPRESSED_ZLIB: return compressBound(page_size); case DUMP_DH_COMPRESSED_LZO: /* * LZO will expand incompressible data by a little amount. Please check * the following URL to see the expansion calculation: * http://www.oberhumer.com/opensource/lzo/lzofaq.php */ return page_size + page_size / 16 + 64 + 3; #ifdef CONFIG_SNAPPY case DUMP_DH_COMPRESSED_SNAPPY: return snappy_max_compressed_length(page_size); #endif } return 0; } /* * check if the page is all 0 */ static inline bool is_zero_page(const uint8_t *buf, size_t page_size) { return buffer_is_zero(buf, page_size); } static void write_dump_pages(DumpState *s, Error **errp) { int ret = 0; DataCache page_desc, page_data; size_t len_buf_out, size_out; #ifdef CONFIG_LZO lzo_bytep wrkmem = NULL; #endif uint8_t *buf_out = NULL; off_t offset_desc, offset_data; PageDescriptor pd, pd_zero; uint8_t *buf; GuestPhysBlock *block_iter = NULL; uint64_t pfn_iter; /* get offset of page_desc and page_data in dump file */ offset_desc = s->offset_page; offset_data = offset_desc + sizeof(PageDescriptor) * s->num_dumpable; prepare_data_cache(&page_desc, s, offset_desc); prepare_data_cache(&page_data, s, offset_data); /* prepare buffer to store compressed data */ len_buf_out = get_len_buf_out(s->dump_info.page_size, s->flag_compress); assert(len_buf_out != 0); #ifdef CONFIG_LZO wrkmem = g_malloc(LZO1X_1_MEM_COMPRESS); #endif buf_out = g_malloc(len_buf_out); /* * init zero page's page_desc and page_data, because every zero page * uses the same page_data */ pd_zero.size = cpu_to_dump32(s, s->dump_info.page_size); pd_zero.flags = cpu_to_dump32(s, 0); pd_zero.offset = cpu_to_dump64(s, offset_data); pd_zero.page_flags = cpu_to_dump64(s, 0); buf = g_malloc0(s->dump_info.page_size); ret = write_cache(&page_data, buf, s->dump_info.page_size, false); g_free(buf); if (ret < 0) { error_setg(errp, "dump: failed to write page data (zero page)"); goto out; } offset_data += s->dump_info.page_size; /* * dump memory to vmcore page by page. zero page will all be resided in the * first page of page section */ while (get_next_page(&block_iter, &pfn_iter, &buf, s)) { /* check zero page */ if (is_zero_page(buf, s->dump_info.page_size)) { ret = write_cache(&page_desc, &pd_zero, sizeof(PageDescriptor), false); if (ret < 0) { error_setg(errp, "dump: failed to write page desc"); goto out; } } else { /* * not zero page, then: * 1. compress the page * 2. write the compressed page into the cache of page_data * 3. get page desc of the compressed page and write it into the * cache of page_desc * * only one compression format will be used here, for * s->flag_compress is set. But when compression fails to work, * we fall back to save in plaintext. */ size_out = len_buf_out; if ((s->flag_compress & DUMP_DH_COMPRESSED_ZLIB) && (compress2(buf_out, (uLongf *)&size_out, buf, s->dump_info.page_size, Z_BEST_SPEED) == Z_OK) && (size_out < s->dump_info.page_size)) { pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_ZLIB); pd.size = cpu_to_dump32(s, size_out); ret = write_cache(&page_data, buf_out, size_out, false); if (ret < 0) { error_setg(errp, "dump: failed to write page data"); goto out; } #ifdef CONFIG_LZO } else if ((s->flag_compress & DUMP_DH_COMPRESSED_LZO) && (lzo1x_1_compress(buf, s->dump_info.page_size, buf_out, (lzo_uint *)&size_out, wrkmem) == LZO_E_OK) && (size_out < s->dump_info.page_size)) { pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_LZO); pd.size = cpu_to_dump32(s, size_out); ret = write_cache(&page_data, buf_out, size_out, false); if (ret < 0) { error_setg(errp, "dump: failed to write page data"); goto out; } #endif #ifdef CONFIG_SNAPPY } else if ((s->flag_compress & DUMP_DH_COMPRESSED_SNAPPY) && (snappy_compress((char *)buf, s->dump_info.page_size, (char *)buf_out, &size_out) == SNAPPY_OK) && (size_out < s->dump_info.page_size)) { pd.flags = cpu_to_dump32(s, DUMP_DH_COMPRESSED_SNAPPY); pd.size = cpu_to_dump32(s, size_out); ret = write_cache(&page_data, buf_out, size_out, false); if (ret < 0) { error_setg(errp, "dump: failed to write page data"); goto out; } #endif } else { /* * fall back to save in plaintext, size_out should be * assigned the target's page size */ pd.flags = cpu_to_dump32(s, 0); size_out = s->dump_info.page_size; pd.size = cpu_to_dump32(s, size_out); ret = write_cache(&page_data, buf, s->dump_info.page_size, false); if (ret < 0) { error_setg(errp, "dump: failed to write page data"); goto out; } } /* get and write page desc here */ pd.page_flags = cpu_to_dump64(s, 0); pd.offset = cpu_to_dump64(s, offset_data); offset_data += size_out; ret = write_cache(&page_desc, &pd, sizeof(PageDescriptor), false); if (ret < 0) { error_setg(errp, "dump: failed to write page desc"); goto out; } } s->written_size += s->dump_info.page_size; } ret = write_cache(&page_desc, NULL, 0, true); if (ret < 0) { error_setg(errp, "dump: failed to sync cache for page_desc"); goto out; } ret = write_cache(&page_data, NULL, 0, true); if (ret < 0) { error_setg(errp, "dump: failed to sync cache for page_data"); goto out; } out: free_data_cache(&page_desc); free_data_cache(&page_data); #ifdef CONFIG_LZO g_free(wrkmem); #endif g_free(buf_out); } static void create_kdump_vmcore(DumpState *s, Error **errp) { int ret; Error *local_err = NULL; /* * the kdump-compressed format is: * File offset * +------------------------------------------+ 0x0 * | main header (struct disk_dump_header) | * |------------------------------------------+ block 1 * | sub header (struct kdump_sub_header) | * |------------------------------------------+ block 2 * | 1st-dump_bitmap | * |------------------------------------------+ block 2 + X blocks * | 2nd-dump_bitmap | (aligned by block) * |------------------------------------------+ block 2 + 2 * X blocks * | page desc for pfn 0 (struct page_desc) | (aligned by block) * | page desc for pfn 1 (struct page_desc) | * | : | * |------------------------------------------| (not aligned by block) * | page data (pfn 0) | * | page data (pfn 1) | * | : | * +------------------------------------------+ */ ret = write_start_flat_header(s->fd); if (ret < 0) { error_setg(errp, "dump: failed to write start flat header"); return; } write_dump_header(s, &local_err); if (local_err) { error_propagate(errp, local_err); return; } write_dump_bitmap(s, &local_err); if (local_err) { error_propagate(errp, local_err); return; } write_dump_pages(s, &local_err); if (local_err) { error_propagate(errp, local_err); return; } ret = write_end_flat_header(s->fd); if (ret < 0) { error_setg(errp, "dump: failed to write end flat header"); return; } } static ram_addr_t get_start_block(DumpState *s) { GuestPhysBlock *block; if (!s->has_filter) { s->next_block = QTAILQ_FIRST(&s->guest_phys_blocks.head); return 0; } QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { if (block->target_start >= s->begin + s->length || block->target_end <= s->begin) { /* This block is out of the range */ continue; } s->next_block = block; if (s->begin > block->target_start) { s->start = s->begin - block->target_start; } else { s->start = 0; } return s->start; } return -1; } static void get_max_mapnr(DumpState *s) { GuestPhysBlock *last_block; last_block = QTAILQ_LAST(&s->guest_phys_blocks.head); s->max_mapnr = dump_paddr_to_pfn(s, last_block->target_end); } static DumpState dump_state_global = { .status = DUMP_STATUS_NONE }; static void dump_state_prepare(DumpState *s) { /* zero the struct, setting status to active */ *s = (DumpState) { .status = DUMP_STATUS_ACTIVE }; } bool dump_in_progress(void) { DumpState *state = &dump_state_global; return (atomic_read(&state->status) == DUMP_STATUS_ACTIVE); } /* calculate total size of memory to be dumped (taking filter into * acoount.) */ static int64_t dump_calculate_size(DumpState *s) { GuestPhysBlock *block; int64_t size = 0, total = 0, left = 0, right = 0; QTAILQ_FOREACH(block, &s->guest_phys_blocks.head, next) { if (s->has_filter) { /* calculate the overlapped region. */ left = MAX(s->begin, block->target_start); right = MIN(s->begin + s->length, block->target_end); size = right - left; size = size > 0 ? size : 0; } else { /* count the whole region in */ size = (block->target_end - block->target_start); } total += size; } return total; } static void vmcoreinfo_update_phys_base(DumpState *s) { uint64_t size, note_head_size, name_size, phys_base; char **lines; uint8_t *vmci; size_t i; if (!note_name_equal(s, s->guest_note, "VMCOREINFO")) { return; } get_note_sizes(s, s->guest_note, &note_head_size, &name_size, &size); note_head_size = ROUND_UP(note_head_size, 4); vmci = s->guest_note + note_head_size + ROUND_UP(name_size, 4); *(vmci + size) = '\0'; lines = g_strsplit((char *)vmci, "\n", -1); for (i = 0; lines[i]; i++) { const char *prefix = NULL; if (s->dump_info.d_machine == EM_X86_64) { prefix = "NUMBER(phys_base)="; } else if (s->dump_info.d_machine == EM_AARCH64) { prefix = "NUMBER(PHYS_OFFSET)="; } if (prefix && g_str_has_prefix(lines[i], prefix)) { if (qemu_strtou64(lines[i] + strlen(prefix), NULL, 16, &phys_base) < 0) { warn_report("Failed to read %s", prefix); } else { s->dump_info.phys_base = phys_base; } break; } } g_strfreev(lines); } static void dump_init(DumpState *s, int fd, bool has_format, DumpGuestMemoryFormat format, bool paging, bool has_filter, int64_t begin, int64_t length, Error **errp) { VMCoreInfoState *vmci = vmcoreinfo_find(); CPUState *cpu; int nr_cpus; Error *err = NULL; int ret; s->has_format = has_format; s->format = format; s->written_size = 0; /* kdump-compressed is conflict with paging and filter */ if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) { assert(!paging && !has_filter); } if (runstate_is_running()) { vm_stop(RUN_STATE_SAVE_VM); s->resume = true; } else { s->resume = false; } /* If we use KVM, we should synchronize the registers before we get dump * info or physmap info. */ cpu_synchronize_all_states(); nr_cpus = 0; CPU_FOREACH(cpu) { nr_cpus++; } s->fd = fd; s->has_filter = has_filter; s->begin = begin; s->length = length; memory_mapping_list_init(&s->list); guest_phys_blocks_init(&s->guest_phys_blocks); guest_phys_blocks_append(&s->guest_phys_blocks); s->total_size = dump_calculate_size(s); #ifdef DEBUG_DUMP_GUEST_MEMORY fprintf(stderr, "DUMP: total memory to dump: %lu\n", s->total_size); #endif /* it does not make sense to dump non-existent memory */ if (!s->total_size) { error_setg(errp, "dump: no guest memory to dump"); goto cleanup; } s->start = get_start_block(s); if (s->start == -1) { error_setg(errp, QERR_INVALID_PARAMETER, "begin"); goto cleanup; } /* get dump info: endian, class and architecture. * If the target architecture is not supported, cpu_get_dump_info() will * return -1. */ ret = cpu_get_dump_info(&s->dump_info, &s->guest_phys_blocks); if (ret < 0) { error_setg(errp, QERR_UNSUPPORTED); goto cleanup; } if (!s->dump_info.page_size) { s->dump_info.page_size = TARGET_PAGE_SIZE; } s->note_size = cpu_get_note_size(s->dump_info.d_class, s->dump_info.d_machine, nr_cpus); if (s->note_size < 0) { error_setg(errp, QERR_UNSUPPORTED); goto cleanup; } /* * The goal of this block is to (a) update the previously guessed * phys_base, (b) copy the guest note out of the guest. * Failure to do so is not fatal for dumping. */ if (vmci) { uint64_t addr, note_head_size, name_size, desc_size; uint32_t size; uint16_t format; note_head_size = s->dump_info.d_class == ELFCLASS32 ? sizeof(Elf32_Nhdr) : sizeof(Elf64_Nhdr); format = le16_to_cpu(vmci->vmcoreinfo.guest_format); size = le32_to_cpu(vmci->vmcoreinfo.size); addr = le64_to_cpu(vmci->vmcoreinfo.paddr); if (!vmci->has_vmcoreinfo) { warn_report("guest note is not present"); } else if (size < note_head_size || size > MAX_GUEST_NOTE_SIZE) { warn_report("guest note size is invalid: %" PRIu32, size); } else if (format != FW_CFG_VMCOREINFO_FORMAT_ELF) { warn_report("guest note format is unsupported: %" PRIu16, format); } else { s->guest_note = g_malloc(size + 1); /* +1 for adding \0 */ cpu_physical_memory_read(addr, s->guest_note, size); get_note_sizes(s, s->guest_note, NULL, &name_size, &desc_size); s->guest_note_size = ELF_NOTE_SIZE(note_head_size, name_size, desc_size); if (name_size > MAX_GUEST_NOTE_SIZE || desc_size > MAX_GUEST_NOTE_SIZE || s->guest_note_size > size) { warn_report("Invalid guest note header"); g_free(s->guest_note); s->guest_note = NULL; } else { vmcoreinfo_update_phys_base(s); s->note_size += s->guest_note_size; } } } /* get memory mapping */ if (paging) { qemu_get_guest_memory_mapping(&s->list, &s->guest_phys_blocks, &err); if (err != NULL) { error_propagate(errp, err); goto cleanup; } } else { qemu_get_guest_simple_memory_mapping(&s->list, &s->guest_phys_blocks); } s->nr_cpus = nr_cpus; get_max_mapnr(s); uint64_t tmp; tmp = DIV_ROUND_UP(DIV_ROUND_UP(s->max_mapnr, CHAR_BIT), s->dump_info.page_size); s->len_dump_bitmap = tmp * s->dump_info.page_size; /* init for kdump-compressed format */ if (has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) { switch (format) { case DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB: s->flag_compress = DUMP_DH_COMPRESSED_ZLIB; break; case DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO: #ifdef CONFIG_LZO if (lzo_init() != LZO_E_OK) { error_setg(errp, "failed to initialize the LZO library"); goto cleanup; } #endif s->flag_compress = DUMP_DH_COMPRESSED_LZO; break; case DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY: s->flag_compress = DUMP_DH_COMPRESSED_SNAPPY; break; default: s->flag_compress = 0; } return; } if (s->has_filter) { memory_mapping_filter(&s->list, s->begin, s->length); } /* * calculate phdr_num * * the type of ehdr->e_phnum is uint16_t, so we should avoid overflow */ s->phdr_num = 1; /* PT_NOTE */ if (s->list.num < UINT16_MAX - 2) { s->phdr_num += s->list.num; s->have_section = false; } else { s->have_section = true; s->phdr_num = PN_XNUM; s->sh_info = 1; /* PT_NOTE */ /* the type of shdr->sh_info is uint32_t, so we should avoid overflow */ if (s->list.num <= UINT32_MAX - 1) { s->sh_info += s->list.num; } else { s->sh_info = UINT32_MAX; } } if (s->dump_info.d_class == ELFCLASS64) { if (s->have_section) { s->memory_offset = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->sh_info + sizeof(Elf64_Shdr) + s->note_size; } else { s->memory_offset = sizeof(Elf64_Ehdr) + sizeof(Elf64_Phdr) * s->phdr_num + s->note_size; } } else { if (s->have_section) { s->memory_offset = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->sh_info + sizeof(Elf32_Shdr) + s->note_size; } else { s->memory_offset = sizeof(Elf32_Ehdr) + sizeof(Elf32_Phdr) * s->phdr_num + s->note_size; } } return; cleanup: dump_cleanup(s); } /* this operation might be time consuming. */ static void dump_process(DumpState *s, Error **errp) { Error *local_err = NULL; DumpQueryResult *result = NULL; if (s->has_format && s->format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) { #ifdef TARGET_X86_64 create_win_dump(s, &local_err); #endif } else if (s->has_format && s->format != DUMP_GUEST_MEMORY_FORMAT_ELF) { create_kdump_vmcore(s, &local_err); } else { create_vmcore(s, &local_err); } /* make sure status is written after written_size updates */ smp_wmb(); atomic_set(&s->status, (local_err ? DUMP_STATUS_FAILED : DUMP_STATUS_COMPLETED)); /* send DUMP_COMPLETED message (unconditionally) */ result = qmp_query_dump(NULL); /* should never fail */ assert(result); qapi_event_send_dump_completed(result, !!local_err, (local_err ? \ error_get_pretty(local_err) : NULL)); qapi_free_DumpQueryResult(result); error_propagate(errp, local_err); dump_cleanup(s); } static void *dump_thread(void *data) { DumpState *s = (DumpState *)data; dump_process(s, NULL); return NULL; } DumpQueryResult *qmp_query_dump(Error **errp) { DumpQueryResult *result = g_new(DumpQueryResult, 1); DumpState *state = &dump_state_global; result->status = atomic_read(&state->status); /* make sure we are reading status and written_size in order */ smp_rmb(); result->completed = state->written_size; result->total = state->total_size; return result; } void qmp_dump_guest_memory(bool paging, const char *file, bool has_detach, bool detach, bool has_begin, int64_t begin, bool has_length, int64_t length, bool has_format, DumpGuestMemoryFormat format, Error **errp) { const char *p; int fd = -1; DumpState *s; Error *local_err = NULL; bool detach_p = false; if (runstate_check(RUN_STATE_INMIGRATE)) { error_setg(errp, "Dump not allowed during incoming migration."); return; } /* if there is a dump in background, we should wait until the dump * finished */ if (dump_in_progress()) { error_setg(errp, "There is a dump in process, please wait."); return; } /* * kdump-compressed format need the whole memory dumped, so paging or * filter is not supported here. */ if ((has_format && format != DUMP_GUEST_MEMORY_FORMAT_ELF) && (paging || has_begin || has_length)) { error_setg(errp, "kdump-compressed format doesn't support paging or " "filter"); return; } if (has_begin && !has_length) { error_setg(errp, QERR_MISSING_PARAMETER, "length"); return; } if (!has_begin && has_length) { error_setg(errp, QERR_MISSING_PARAMETER, "begin"); return; } if (has_detach) { detach_p = detach; } /* check whether lzo/snappy is supported */ #ifndef CONFIG_LZO if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO) { error_setg(errp, "kdump-lzo is not available now"); return; } #endif #ifndef CONFIG_SNAPPY if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY) { error_setg(errp, "kdump-snappy is not available now"); return; } #endif #ifndef TARGET_X86_64 if (has_format && format == DUMP_GUEST_MEMORY_FORMAT_WIN_DMP) { error_setg(errp, "Windows dump is only available for x86-64"); return; } #endif #if !defined(WIN32) if (strstart(file, "fd:", &p)) { fd = monitor_get_fd(cur_mon, p, errp); if (fd == -1) { return; } } #endif if (strstart(file, "file:", &p)) { fd = qemu_open(p, O_WRONLY | O_CREAT | O_TRUNC | O_BINARY, S_IRUSR); if (fd < 0) { error_setg_file_open(errp, errno, p); return; } } if (fd == -1) { error_setg(errp, QERR_INVALID_PARAMETER, "protocol"); return; } s = &dump_state_global; dump_state_prepare(s); dump_init(s, fd, has_format, format, paging, has_begin, begin, length, &local_err); if (local_err) { error_propagate(errp, local_err); atomic_set(&s->status, DUMP_STATUS_FAILED); return; } if (detach_p) { /* detached dump */ s->detached = true; qemu_thread_create(&s->dump_thread, "dump_thread", dump_thread, s, QEMU_THREAD_DETACHED); } else { /* sync dump */ dump_process(s, errp); } } DumpGuestMemoryCapability *qmp_query_dump_guest_memory_capability(Error **errp) { DumpGuestMemoryFormatList *item; DumpGuestMemoryCapability *cap = g_malloc0(sizeof(DumpGuestMemoryCapability)); /* elf is always available */ item = g_malloc0(sizeof(DumpGuestMemoryFormatList)); cap->formats = item; item->value = DUMP_GUEST_MEMORY_FORMAT_ELF; /* kdump-zlib is always available */ item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList)); item = item->next; item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_ZLIB; /* add new item if kdump-lzo is available */ #ifdef CONFIG_LZO item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList)); item = item->next; item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_LZO; #endif /* add new item if kdump-snappy is available */ #ifdef CONFIG_SNAPPY item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList)); item = item->next; item->value = DUMP_GUEST_MEMORY_FORMAT_KDUMP_SNAPPY; #endif /* Windows dump is available only if target is x86_64 */ #ifdef TARGET_X86_64 item->next = g_malloc0(sizeof(DumpGuestMemoryFormatList)); item = item->next; item->value = DUMP_GUEST_MEMORY_FORMAT_WIN_DMP; #endif return cap; }
pmp-tool/PMP
src/qemu/src-pmp/hw/display/ati_2d.c
<filename>src/qemu/src-pmp/hw/display/ati_2d.c /* * QEMU ATI SVGA emulation * 2D engine functions * * Copyright (c) 2019 <NAME> * * This work is licensed under the GNU GPL license version 2 or later. */ #include "ati_int.h" #include "ati_regs.h" #include "qemu/log.h" #include "ui/pixel_ops.h" /* * NOTE: * This is 2D _acceleration_ and supposed to be fast. Therefore, don't try to * reinvent the wheel (unlikely to get better with a naive implementation than * existing libraries) and avoid (poorly) reimplementing gfx primitives. * That is unnecessary and would become a performance problem. Instead, try to * map to and reuse existing optimised facilities (e.g. pixman) wherever * possible. */ static int ati_bpp_from_datatype(ATIVGAState *s) { switch (s->regs.dp_datatype & 0xf) { case 2: return 8; case 3: case 4: return 16; case 5: return 24; case 6: return 32; default: qemu_log_mask(LOG_UNIMP, "Unknown dst datatype %d\n", s->regs.dp_datatype & 0xf); return 0; } } void ati_2d_blt(ATIVGAState *s) { /* FIXME it is probably more complex than this and may need to be */ /* rewritten but for now as a start just to get some output: */ DisplaySurface *ds = qemu_console_surface(s->vga.con); DPRINTF("%p %u ds: %p %d %d rop: %x\n", s->vga.vram_ptr, s->vga.vbe_start_addr, surface_data(ds), surface_stride(ds), surface_bits_per_pixel(ds), (s->regs.dp_mix & GMC_ROP3_MASK) >> 16); DPRINTF("%d %d, %d %d, (%d,%d) -> (%d,%d) %dx%d\n", s->regs.src_offset, s->regs.dst_offset, s->regs.src_pitch, s->regs.dst_pitch, s->regs.src_x, s->regs.src_y, s->regs.dst_x, s->regs.dst_y, s->regs.dst_width, s->regs.dst_height); switch (s->regs.dp_mix & GMC_ROP3_MASK) { case ROP3_SRCCOPY: { uint8_t *src_bits, *dst_bits, *end; int src_stride, dst_stride, bpp = ati_bpp_from_datatype(s); src_bits = s->vga.vram_ptr + s->regs.src_offset; dst_bits = s->vga.vram_ptr + s->regs.dst_offset; src_stride = s->regs.src_pitch; dst_stride = s->regs.dst_pitch; if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { src_bits += s->regs.crtc_offset & 0x07ffffff; dst_bits += s->regs.crtc_offset & 0x07ffffff; src_stride *= bpp; dst_stride *= bpp; } src_stride /= sizeof(uint32_t); dst_stride /= sizeof(uint32_t); DPRINTF("pixman_blt(%p, %p, %d, %d, %d, %d, %d, %d, %d, %d, %d, %d)\n", src_bits, dst_bits, src_stride, dst_stride, bpp, bpp, s->regs.src_x, s->regs.src_y, s->regs.dst_x, s->regs.dst_y, s->regs.dst_width, s->regs.dst_height); end = s->vga.vram_ptr + s->vga.vram_size; if (src_bits >= end || dst_bits >= end || src_bits + (s->regs.src_y + s->regs.dst_height) * src_stride + s->regs.src_x >= end || dst_bits + (s->regs.dst_y + s->regs.dst_height) * dst_stride + s->regs.dst_x >= end) { qemu_log_mask(LOG_UNIMP, "blt outside vram not implemented\n"); return; } pixman_blt((uint32_t *)src_bits, (uint32_t *)dst_bits, src_stride, dst_stride, bpp, bpp, s->regs.src_x, s->regs.src_y, s->regs.dst_x, s->regs.dst_y, s->regs.dst_width, s->regs.dst_height); if (dst_bits >= s->vga.vram_ptr + s->vga.vbe_start_addr && dst_bits < s->vga.vram_ptr + s->vga.vbe_start_addr + s->vga.vbe_regs[VBE_DISPI_INDEX_YRES] * s->vga.vbe_line_offset) { memory_region_set_dirty(&s->vga.vram, s->vga.vbe_start_addr + s->regs.dst_offset + s->regs.dst_y * surface_stride(ds), s->regs.dst_height * surface_stride(ds)); } s->regs.dst_x += s->regs.dst_width; s->regs.dst_y += s->regs.dst_height; break; } case ROP3_PATCOPY: case ROP3_BLACKNESS: case ROP3_WHITENESS: { uint8_t *dst_bits, *end; int dst_stride, bpp = ati_bpp_from_datatype(s); uint32_t filler = 0; dst_bits = s->vga.vram_ptr + s->regs.dst_offset; dst_stride = s->regs.dst_pitch; if (s->dev_id == PCI_DEVICE_ID_ATI_RAGE128_PF) { dst_bits += s->regs.crtc_offset & 0x07ffffff; dst_stride *= bpp; } dst_stride /= sizeof(uint32_t); switch (s->regs.dp_mix & GMC_ROP3_MASK) { case ROP3_PATCOPY: filler = bswap32(s->regs.dp_brush_frgd_clr); break; case ROP3_BLACKNESS: filler = rgb_to_pixel32(s->vga.palette[0], s->vga.palette[1], s->vga.palette[2]) << 8 | 0xff; break; case ROP3_WHITENESS: filler = rgb_to_pixel32(s->vga.palette[3], s->vga.palette[4], s->vga.palette[5]) << 8 | 0xff; break; } DPRINTF("pixman_fill(%p, %d, %d, %d, %d, %d, %d, %x)\n", dst_bits, dst_stride, bpp, s->regs.dst_x, s->regs.dst_y, s->regs.dst_width, s->regs.dst_height, filler); end = s->vga.vram_ptr + s->vga.vram_size; if (dst_bits >= end || dst_bits + (s->regs.dst_y + s->regs.dst_height) * dst_stride + s->regs.dst_x >= end) { qemu_log_mask(LOG_UNIMP, "blt outside vram not implemented\n"); return; } pixman_fill((uint32_t *)dst_bits, dst_stride, bpp, s->regs.dst_x, s->regs.dst_y, s->regs.dst_width, s->regs.dst_height, filler); if (dst_bits >= s->vga.vram_ptr + s->vga.vbe_start_addr && dst_bits < s->vga.vram_ptr + s->vga.vbe_start_addr + s->vga.vbe_regs[VBE_DISPI_INDEX_YRES] * s->vga.vbe_line_offset) { memory_region_set_dirty(&s->vga.vram, s->vga.vbe_start_addr + s->regs.dst_offset + s->regs.dst_y * surface_stride(ds), s->regs.dst_height * surface_stride(ds)); } s->regs.dst_y += s->regs.dst_height; break; } default: qemu_log_mask(LOG_UNIMP, "Unimplemented ati_2d blt op %x\n", (s->regs.dp_mix & GMC_ROP3_MASK) >> 16); } }
pmp-tool/PMP
src/qemu/src-pmp/tests/virtio-rng-test.c
/* * QTest testcase for VirtIO RNG * * Copyright (c) 2014 SUSE LINUX Products GmbH * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "libqtest.h" #include "libqos/qgraph.h" #include "libqos/virtio-rng.h" #define PCI_SLOT_HP 0x06 static void rng_hotplug(void *obj, void *data, QGuestAllocator *alloc) { const char *arch = qtest_get_arch(); qtest_qmp_device_add("virtio-rng-pci", "rng1", "{'addr': %s}", stringify(PCI_SLOT_HP)); if (strcmp(arch, "i386") == 0 || strcmp(arch, "x86_64") == 0) { qpci_unplug_acpi_device_test("rng1", PCI_SLOT_HP); } } static void register_virtio_rng_test(void) { qos_add_test("hotplug", "virtio-rng-pci", rng_hotplug, NULL); } libqos_init(register_virtio_rng_test);
pmp-tool/PMP
src/qemu/src-pmp/target/s390x/cpu_features.c
/* * CPU features/facilities for s390x * * Copyright IBM Corp. 2016, 2018 * * Author(s): <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU GPL, version 2 or (at * your option) any later version. See the COPYING file in the top-level * directory. */ #include "qemu/osdep.h" #include "qemu/module.h" #include "cpu_features.h" #define FEAT_INIT(_name, _type, _bit, _desc) \ { \ .name = _name, \ .type = _type, \ .bit = _bit, \ .desc = _desc, \ } /* S390FeatDef.bit is not applicable as there is no feature block. */ #define FEAT_INIT_MISC(_name, _desc) \ FEAT_INIT(_name, S390_FEAT_TYPE_MISC, 0, _desc) /* indexed by feature number for easy lookup */ static const S390FeatDef s390_features[] = { FEAT_INIT("esan3", S390_FEAT_TYPE_STFL, 0, "Instructions marked as n3"), FEAT_INIT("zarch", S390_FEAT_TYPE_STFL, 1, "z/Architecture architectural mode"), FEAT_INIT("dateh", S390_FEAT_TYPE_STFL, 3, "DAT-enhancement facility"), FEAT_INIT("idtes", S390_FEAT_TYPE_STFL, 4, "IDTE selective TLB segment-table clearing"), FEAT_INIT("idter", S390_FEAT_TYPE_STFL, 5, "IDTE selective TLB region-table clearing"), FEAT_INIT("asnlxr", S390_FEAT_TYPE_STFL, 6, "ASN-and-LX reuse facility"), FEAT_INIT("stfle", S390_FEAT_TYPE_STFL, 7, "Store-facility-list-extended facility"), FEAT_INIT("edat", S390_FEAT_TYPE_STFL, 8, "Enhanced-DAT facility"), FEAT_INIT("srs", S390_FEAT_TYPE_STFL, 9, "Sense-running-status facility"), FEAT_INIT("csske", S390_FEAT_TYPE_STFL, 10, "Conditional-SSKE facility"), FEAT_INIT("ctop", S390_FEAT_TYPE_STFL, 11, "Configuration-topology facility"), FEAT_INIT("apqci", S390_FEAT_TYPE_STFL, 12, "Query AP Configuration Information facility"), FEAT_INIT("ipter", S390_FEAT_TYPE_STFL, 13, "IPTE-range facility"), FEAT_INIT("nonqks", S390_FEAT_TYPE_STFL, 14, "Nonquiescing key-setting facility"), FEAT_INIT("apft", S390_FEAT_TYPE_STFL, 15, "AP Facilities Test facility"), FEAT_INIT("etf2", S390_FEAT_TYPE_STFL, 16, "Extended-translation facility 2"), FEAT_INIT("msa-base", S390_FEAT_TYPE_STFL, 17, "Message-security-assist facility (excluding subfunctions)"), FEAT_INIT("ldisp", S390_FEAT_TYPE_STFL, 18, "Long-displacement facility"), FEAT_INIT("ldisphp", S390_FEAT_TYPE_STFL, 19, "Long-displacement facility has high performance"), FEAT_INIT("hfpm", S390_FEAT_TYPE_STFL, 20, "HFP-multiply-add/subtract facility"), FEAT_INIT("eimm", S390_FEAT_TYPE_STFL, 21, "Extended-immediate facility"), FEAT_INIT("etf3", S390_FEAT_TYPE_STFL, 22, "Extended-translation facility 3"), FEAT_INIT("hfpue", S390_FEAT_TYPE_STFL, 23, "HFP-unnormalized-extension facility"), FEAT_INIT("etf2eh", S390_FEAT_TYPE_STFL, 24, "ETF2-enhancement facility"), FEAT_INIT("stckf", S390_FEAT_TYPE_STFL, 25, "Store-clock-fast facility"), FEAT_INIT("parseh", S390_FEAT_TYPE_STFL, 26, "Parsing-enhancement facility"), FEAT_INIT("mvcos", S390_FEAT_TYPE_STFL, 27, "Move-with-optional-specification facility"), FEAT_INIT("tods-base", S390_FEAT_TYPE_STFL, 28, "TOD-clock-steering facility (excluding subfunctions)"), FEAT_INIT("etf3eh", S390_FEAT_TYPE_STFL, 30, "ETF3-enhancement facility"), FEAT_INIT("ectg", S390_FEAT_TYPE_STFL, 31, "Extract-CPU-time facility"), FEAT_INIT("csst", S390_FEAT_TYPE_STFL, 32, "Compare-and-swap-and-store facility"), FEAT_INIT("csst2", S390_FEAT_TYPE_STFL, 33, "Compare-and-swap-and-store facility 2"), FEAT_INIT("ginste", S390_FEAT_TYPE_STFL, 34, "General-instructions-extension facility"), FEAT_INIT("exrl", S390_FEAT_TYPE_STFL, 35, "Execute-extensions facility"), FEAT_INIT("emon", S390_FEAT_TYPE_STFL, 36, "Enhanced-monitor facility"), FEAT_INIT("fpe", S390_FEAT_TYPE_STFL, 37, "Floating-point extension facility"), FEAT_INIT("opc", S390_FEAT_TYPE_STFL, 38, "Order Preserving Compression facility"), FEAT_INIT("sprogp", S390_FEAT_TYPE_STFL, 40, "Set-program-parameters facility"), FEAT_INIT("fpseh", S390_FEAT_TYPE_STFL, 41, "Floating-point-support-enhancement facilities"), FEAT_INIT("dfp", S390_FEAT_TYPE_STFL, 42, "DFP (decimal-floating-point) facility"), FEAT_INIT("dfphp", S390_FEAT_TYPE_STFL, 43, "DFP (decimal-floating-point) facility has high performance"), FEAT_INIT("pfpo", S390_FEAT_TYPE_STFL, 44, "PFPO instruction"), FEAT_INIT("stfle45", S390_FEAT_TYPE_STFL, 45, "Various facilities introduced with z196"), FEAT_INIT("cmpsceh", S390_FEAT_TYPE_STFL, 47, "CMPSC-enhancement facility"), FEAT_INIT("dfpzc", S390_FEAT_TYPE_STFL, 48, "Decimal-floating-point zoned-conversion facility"), FEAT_INIT("stfle49", S390_FEAT_TYPE_STFL, 49, "Various facilities introduced with zEC12"), FEAT_INIT("cte", S390_FEAT_TYPE_STFL, 50, "Constrained transactional-execution facility"), FEAT_INIT("ltlbc", S390_FEAT_TYPE_STFL, 51, "Local-TLB-clearing facility"), FEAT_INIT("iacc2", S390_FEAT_TYPE_STFL, 52, "Interlocked-access facility 2"), FEAT_INIT("stfle53", S390_FEAT_TYPE_STFL, 53, "Various facilities introduced with z13"), FEAT_INIT("eec", S390_FEAT_TYPE_STFL, 54, "Entropy encoding compression facility"), FEAT_INIT("msa5-base", S390_FEAT_TYPE_STFL, 57, "Message-security-assist-extension-5 facility (excluding subfunctions)"), FEAT_INIT("minste2", S390_FEAT_TYPE_STFL, 58, "Miscellaneous-instruction-extensions facility 2"), FEAT_INIT("sema", S390_FEAT_TYPE_STFL, 59, "Semaphore-assist facility"), FEAT_INIT("tsi", S390_FEAT_TYPE_STFL, 60, "Time-slice Instrumentation facility"), FEAT_INIT("ri", S390_FEAT_TYPE_STFL, 64, "CPU runtime-instrumentation facility"), FEAT_INIT("zpci", S390_FEAT_TYPE_STFL, 69, "z/PCI facility"), FEAT_INIT("aen", S390_FEAT_TYPE_STFL, 71, "General-purpose-adapter-event-notification facility"), FEAT_INIT("ais", S390_FEAT_TYPE_STFL, 72, "General-purpose-adapter-interruption-suppression facility"), FEAT_INIT("te", S390_FEAT_TYPE_STFL, 73, "Transactional-execution facility"), FEAT_INIT("sthyi", S390_FEAT_TYPE_STFL, 74, "Store-hypervisor-information facility"), FEAT_INIT("aefsi", S390_FEAT_TYPE_STFL, 75, "Access-exception-fetch/store-indication facility"), FEAT_INIT("msa3-base", S390_FEAT_TYPE_STFL, 76, "Message-security-assist-extension-3 facility (excluding subfunctions)"), FEAT_INIT("msa4-base", S390_FEAT_TYPE_STFL, 77, "Message-security-assist-extension-4 facility (excluding subfunctions)"), FEAT_INIT("edat2", S390_FEAT_TYPE_STFL, 78, "Enhanced-DAT facility 2"), FEAT_INIT("dfppc", S390_FEAT_TYPE_STFL, 80, "Decimal-floating-point packed-conversion facility"), FEAT_INIT("ppa15", S390_FEAT_TYPE_STFL, 81, "PPA15 is installed"), FEAT_INIT("bpb", S390_FEAT_TYPE_STFL, 82, "Branch prediction blocking"), FEAT_INIT("vx", S390_FEAT_TYPE_STFL, 129, "Vector facility"), FEAT_INIT("iep", S390_FEAT_TYPE_STFL, 130, "Instruction-execution-protection facility"), FEAT_INIT("sea_esop2", S390_FEAT_TYPE_STFL, 131, "Side-effect-access facility and Enhanced-suppression-on-protection facility 2"), FEAT_INIT("gs", S390_FEAT_TYPE_STFL, 133, "Guarded-storage facility"), FEAT_INIT("vxpd", S390_FEAT_TYPE_STFL, 134, "Vector packed decimal facility"), FEAT_INIT("vxeh", S390_FEAT_TYPE_STFL, 135, "Vector enhancements facility"), FEAT_INIT("mepoch", S390_FEAT_TYPE_STFL, 139, "Multiple-epoch facility"), FEAT_INIT("tpei", S390_FEAT_TYPE_STFL, 144, "Test-pending-external-interruption facility"), FEAT_INIT("irbm", S390_FEAT_TYPE_STFL, 145, "Insert-reference-bits-multiple facility"), FEAT_INIT("msa8-base", S390_FEAT_TYPE_STFL, 146, "Message-security-assist-extension-8 facility (excluding subfunctions)"), FEAT_INIT("cmmnt", S390_FEAT_TYPE_STFL, 147, "CMM: ESSA-enhancement (no translate) facility"), FEAT_INIT("etoken", S390_FEAT_TYPE_STFL, 156, "Etoken facility"), /* SCLP SCCB Byte 80 - 98 (bit numbers relative to byte-80) */ FEAT_INIT("gsls", S390_FEAT_TYPE_SCLP_CONF_CHAR, 40, "SIE: Guest-storage-limit-suppression facility"), FEAT_INIT("esop", S390_FEAT_TYPE_SCLP_CONF_CHAR, 46, "Enhanced-suppression-on-protection facility"), FEAT_INIT("hpma2", S390_FEAT_TYPE_SCLP_CONF_CHAR, 90, "Host page management assist 2 Facility"), /* 91-2 */ FEAT_INIT("kss", S390_FEAT_TYPE_SCLP_CONF_CHAR, 151, "SIE: Keyless-subset facility"), /* 98-7 */ /* SCLP SCCB Byte 116 - 119 (bit numbers relative to byte-116) */ FEAT_INIT("64bscao", S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT, 0, "SIE: 64-bit-SCAO facility"), FEAT_INIT("cmma", S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT, 1, "SIE: Collaborative-memory-management assist"), FEAT_INIT("pfmfi", S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT, 9, "SIE: PFMF interpretation facility"), FEAT_INIT("ibs", S390_FEAT_TYPE_SCLP_CONF_CHAR_EXT, 10, "SIE: Interlock-and-broadcast-suppression facility"), FEAT_INIT("sief2", S390_FEAT_TYPE_SCLP_CPU, 4, "SIE: interception format 2 (Virtual SIE)"), FEAT_INIT("skey", S390_FEAT_TYPE_SCLP_CPU, 5, "SIE: Storage-key facility"), FEAT_INIT("gpereh", S390_FEAT_TYPE_SCLP_CPU, 10, "SIE: Guest-PER enhancement facility"), FEAT_INIT("siif", S390_FEAT_TYPE_SCLP_CPU, 11, "SIE: Shared IPTE-interlock facility"), FEAT_INIT("sigpif", S390_FEAT_TYPE_SCLP_CPU, 12, "SIE: SIGP interpretation facility"), FEAT_INIT("ib", S390_FEAT_TYPE_SCLP_CPU, 42, "SIE: Intervention bypass facility"), FEAT_INIT("cei", S390_FEAT_TYPE_SCLP_CPU, 43, "SIE: Conditional-external-interception facility"), FEAT_INIT_MISC("dateh2", "DAT-enhancement facility 2"), FEAT_INIT_MISC("cmm", "Collaborative-memory-management facility"), FEAT_INIT_MISC("ap", "AP instructions installed"), FEAT_INIT("plo-cl", S390_FEAT_TYPE_PLO, 0, "PLO Compare and load (32 bit in general registers)"), FEAT_INIT("plo-clg", S390_FEAT_TYPE_PLO, 1, "PLO Compare and load (64 bit in parameter list)"), FEAT_INIT("plo-clgr", S390_FEAT_TYPE_PLO, 2, "PLO Compare and load (32 bit in general registers)"), FEAT_INIT("plo-clx", S390_FEAT_TYPE_PLO, 3, "PLO Compare and load (128 bit in parameter list)"), FEAT_INIT("plo-cs", S390_FEAT_TYPE_PLO, 4, "PLO Compare and swap (32 bit in general registers)"), FEAT_INIT("plo-csg", S390_FEAT_TYPE_PLO, 5, "PLO Compare and swap (64 bit in parameter list)"), FEAT_INIT("plo-csgr", S390_FEAT_TYPE_PLO, 6, "PLO Compare and swap (32 bit in general registers)"), FEAT_INIT("plo-csx", S390_FEAT_TYPE_PLO, 7, "PLO Compare and swap (128 bit in parameter list)"), FEAT_INIT("plo-dcs", S390_FEAT_TYPE_PLO, 8, "PLO Double compare and swap (32 bit in general registers)"), FEAT_INIT("plo-dcsg", S390_FEAT_TYPE_PLO, 9, "PLO Double compare and swap (64 bit in parameter list)"), FEAT_INIT("plo-dcsgr", S390_FEAT_TYPE_PLO, 10, "PLO Double compare and swap (32 bit in general registers)"), FEAT_INIT("plo-dcsx", S390_FEAT_TYPE_PLO, 11, "PLO Double compare and swap (128 bit in parameter list)"), FEAT_INIT("plo-csst", S390_FEAT_TYPE_PLO, 12, "PLO Compare and swap and store (32 bit in general registers)"), FEAT_INIT("plo-csstg", S390_FEAT_TYPE_PLO, 13, "PLO Compare and swap and store (64 bit in parameter list)"), FEAT_INIT("plo-csstgr", S390_FEAT_TYPE_PLO, 14, "PLO Compare and swap and store (32 bit in general registers)"), FEAT_INIT("plo-csstx", S390_FEAT_TYPE_PLO, 15, "PLO Compare and swap and store (128 bit in parameter list)"), FEAT_INIT("plo-csdst", S390_FEAT_TYPE_PLO, 16, "PLO Compare and swap and double store (32 bit in general registers)"), FEAT_INIT("plo-csdstg", S390_FEAT_TYPE_PLO, 17, "PLO Compare and swap and double store (64 bit in parameter list)"), FEAT_INIT("plo-csdstgr", S390_FEAT_TYPE_PLO, 18, "PLO Compare and swap and double store (32 bit in general registers)"), FEAT_INIT("plo-csdstx", S390_FEAT_TYPE_PLO, 19, "PLO Compare and swap and double store (128 bit in parameter list)"), FEAT_INIT("plo-cstst", S390_FEAT_TYPE_PLO, 20, "PLO Compare and swap and triple store (32 bit in general registers)"), FEAT_INIT("plo-cststg", S390_FEAT_TYPE_PLO, 21, "PLO Compare and swap and triple store (64 bit in parameter list)"), FEAT_INIT("plo-cststgr", S390_FEAT_TYPE_PLO, 22, "PLO Compare and swap and triple store (32 bit in general registers)"), FEAT_INIT("plo-cststx", S390_FEAT_TYPE_PLO, 23, "PLO Compare and swap and triple store (128 bit in parameter list)"), FEAT_INIT("ptff-qto", S390_FEAT_TYPE_PTFF, 1, "PTFF Query TOD Offset"), FEAT_INIT("ptff-qsi", S390_FEAT_TYPE_PTFF, 2, "PTFF Query Steering Information"), FEAT_INIT("ptff-qpc", S390_FEAT_TYPE_PTFF, 3, "PTFF Query Physical Clock"), FEAT_INIT("ptff-qui", S390_FEAT_TYPE_PTFF, 4, "PTFF Query UTC Information"), FEAT_INIT("ptff-qtou", S390_FEAT_TYPE_PTFF, 5, "PTFF Query TOD Offset User"), FEAT_INIT("ptff-qsie", S390_FEAT_TYPE_PTFF, 10, "PTFF Query Steering Information Extended"), FEAT_INIT("ptff-qtoue", S390_FEAT_TYPE_PTFF, 13, "PTFF Query TOD Offset User Extended"), FEAT_INIT("ptff-sto", S390_FEAT_TYPE_PTFF, 65, "PTFF Set TOD Offset"), FEAT_INIT("ptff-stou", S390_FEAT_TYPE_PTFF, 69, "PTFF Set TOD Offset User"), FEAT_INIT("ptff-stoe", S390_FEAT_TYPE_PTFF, 73, "PTFF Set TOD Offset Extended"), FEAT_INIT("ptff-stoue", S390_FEAT_TYPE_PTFF, 77, "PTFF Set TOD Offset User Extended"), FEAT_INIT("kmac-dea", S390_FEAT_TYPE_KMAC, 1, "KMAC DEA"), FEAT_INIT("kmac-tdea-128", S390_FEAT_TYPE_KMAC, 2, "KMAC TDEA-128"), FEAT_INIT("kmac-tdea-192", S390_FEAT_TYPE_KMAC, 3, "KMAC TDEA-192"), FEAT_INIT("kmac-edea", S390_FEAT_TYPE_KMAC, 9, "KMAC Encrypted-DEA"), FEAT_INIT("kmac-etdea-128", S390_FEAT_TYPE_KMAC, 10, "KMAC Encrypted-TDEA-128"), FEAT_INIT("kmac-etdea-192", S390_FEAT_TYPE_KMAC, 11, "KMAC Encrypted-TDEA-192"), FEAT_INIT("kmac-aes-128", S390_FEAT_TYPE_KMAC, 18, "KMAC AES-128"), FEAT_INIT("kmac-aes-192", S390_FEAT_TYPE_KMAC, 19, "KMAC AES-192"), FEAT_INIT("kmac-aes-256", S390_FEAT_TYPE_KMAC, 20, "KMAC AES-256"), FEAT_INIT("kmac-eaes-128", S390_FEAT_TYPE_KMAC, 26, "KMAC Encrypted-AES-128"), FEAT_INIT("kmac-eaes-192", S390_FEAT_TYPE_KMAC, 27, "KMAC Encrypted-AES-192"), FEAT_INIT("kmac-eaes-256", S390_FEAT_TYPE_KMAC, 28, "KMAC Encrypted-AES-256"), FEAT_INIT("kmc-dea", S390_FEAT_TYPE_KMC, 1, "KMC DEA"), FEAT_INIT("kmc-tdea-128", S390_FEAT_TYPE_KMC, 2, "KMC TDEA-128"), FEAT_INIT("kmc-tdea-192", S390_FEAT_TYPE_KMC, 3, "KMC TDEA-192"), FEAT_INIT("kmc-edea", S390_FEAT_TYPE_KMC, 9, "KMC Encrypted-DEA"), FEAT_INIT("kmc-etdea-128", S390_FEAT_TYPE_KMC, 10, "KMC Encrypted-TDEA-128"), FEAT_INIT("kmc-etdea-192", S390_FEAT_TYPE_KMC, 11, "KMC Encrypted-TDEA-192"), FEAT_INIT("kmc-aes-128", S390_FEAT_TYPE_KMC, 18, "KMC AES-128"), FEAT_INIT("kmc-aes-192", S390_FEAT_TYPE_KMC, 19, "KMC AES-192"), FEAT_INIT("kmc-aes-256", S390_FEAT_TYPE_KMC, 20, "KMC AES-256"), FEAT_INIT("kmc-eaes-128", S390_FEAT_TYPE_KMC, 26, "KMC Encrypted-AES-128"), FEAT_INIT("kmc-eaes-192", S390_FEAT_TYPE_KMC, 27, "KMC Encrypted-AES-192"), FEAT_INIT("kmc-eaes-256", S390_FEAT_TYPE_KMC, 28, "KMC Encrypted-AES-256"), FEAT_INIT("kmc-prng", S390_FEAT_TYPE_KMC, 67, "KMC PRNG"), FEAT_INIT("km-dea", S390_FEAT_TYPE_KM, 1, "KM DEA"), FEAT_INIT("km-tdea-128", S390_FEAT_TYPE_KM, 2, "KM TDEA-128"), FEAT_INIT("km-tdea-192", S390_FEAT_TYPE_KM, 3, "KM TDEA-192"), FEAT_INIT("km-edea", S390_FEAT_TYPE_KM, 9, "KM Encrypted-DEA"), FEAT_INIT("km-etdea-128", S390_FEAT_TYPE_KM, 10, "KM Encrypted-TDEA-128"), FEAT_INIT("km-etdea-192", S390_FEAT_TYPE_KM, 11, "KM Encrypted-TDEA-192"), FEAT_INIT("km-aes-128", S390_FEAT_TYPE_KM, 18, "KM AES-128"), FEAT_INIT("km-aes-192", S390_FEAT_TYPE_KM, 19, "KM AES-192"), FEAT_INIT("km-aes-256", S390_FEAT_TYPE_KM, 20, "KM AES-256"), FEAT_INIT("km-eaes-128", S390_FEAT_TYPE_KM, 26, "KM Encrypted-AES-128"), FEAT_INIT("km-eaes-192", S390_FEAT_TYPE_KM, 27, "KM Encrypted-AES-192"), FEAT_INIT("km-eaes-256", S390_FEAT_TYPE_KM, 28, "KM Encrypted-AES-256"), FEAT_INIT("km-xts-aes-128", S390_FEAT_TYPE_KM, 50, "KM XTS-AES-128"), FEAT_INIT("km-xts-aes-256", S390_FEAT_TYPE_KM, 52, "KM XTS-AES-256"), FEAT_INIT("km-xts-eaes-128", S390_FEAT_TYPE_KM, 58, "KM XTS-Encrypted-AES-128"), FEAT_INIT("km-xts-eaes-256", S390_FEAT_TYPE_KM, 60, "KM XTS-Encrypted-AES-256"), FEAT_INIT("kimd-sha-1", S390_FEAT_TYPE_KIMD, 1, "KIMD SHA-1"), FEAT_INIT("kimd-sha-256", S390_FEAT_TYPE_KIMD, 2, "KIMD SHA-256"), FEAT_INIT("kimd-sha-512", S390_FEAT_TYPE_KIMD, 3, "KIMD SHA-512"), FEAT_INIT("kimd-sha3-224", S390_FEAT_TYPE_KIMD, 32, "KIMD SHA3-224"), FEAT_INIT("kimd-sha3-256", S390_FEAT_TYPE_KIMD, 33, "KIMD SHA3-256"), FEAT_INIT("kimd-sha3-384", S390_FEAT_TYPE_KIMD, 34, "KIMD SHA3-384"), FEAT_INIT("kimd-sha3-512", S390_FEAT_TYPE_KIMD, 35, "KIMD SHA3-512"), FEAT_INIT("kimd-shake-128", S390_FEAT_TYPE_KIMD, 36, "KIMD SHAKE-128"), FEAT_INIT("kimd-shake-256", S390_FEAT_TYPE_KIMD, 37, "KIMD SHAKE-256"), FEAT_INIT("kimd-ghash", S390_FEAT_TYPE_KIMD, 65, "KIMD GHASH"), FEAT_INIT("klmd-sha-1", S390_FEAT_TYPE_KLMD, 1, "KLMD SHA-1"), FEAT_INIT("klmd-sha-256", S390_FEAT_TYPE_KLMD, 2, "KLMD SHA-256"), FEAT_INIT("klmd-sha-512", S390_FEAT_TYPE_KLMD, 3, "KLMD SHA-512"), FEAT_INIT("klmd-sha3-224", S390_FEAT_TYPE_KLMD, 32, "KLMD SHA3-224"), FEAT_INIT("klmd-sha3-256", S390_FEAT_TYPE_KLMD, 33, "KLMD SHA3-256"), FEAT_INIT("klmd-sha3-384", S390_FEAT_TYPE_KLMD, 34, "KLMD SHA3-384"), FEAT_INIT("klmd-sha3-512", S390_FEAT_TYPE_KLMD, 35, "KLMD SHA3-512"), FEAT_INIT("klmd-shake-128", S390_FEAT_TYPE_KLMD, 36, "KLMD SHAKE-128"), FEAT_INIT("klmd-shake-256", S390_FEAT_TYPE_KLMD, 37, "KLMD SHAKE-256"), FEAT_INIT("pckmo-edea", S390_FEAT_TYPE_PCKMO, 1, "PCKMO Encrypted-DEA-Key"), FEAT_INIT("pckmo-etdea-128", S390_FEAT_TYPE_PCKMO, 2, "PCKMO Encrypted-TDEA-128-Key"), FEAT_INIT("pckmo-etdea-192", S390_FEAT_TYPE_PCKMO, 3, "PCKMO Encrypted-TDEA-192-Key"), FEAT_INIT("pckmo-aes-128", S390_FEAT_TYPE_PCKMO, 18, "PCKMO Encrypted-AES-128-Key"), FEAT_INIT("pckmo-aes-192", S390_FEAT_TYPE_PCKMO, 19, "PCKMO Encrypted-AES-192-Key"), FEAT_INIT("pckmo-aes-256", S390_FEAT_TYPE_PCKMO, 20, "PCKMO Encrypted-AES-256-Key"), FEAT_INIT("kmctr-dea", S390_FEAT_TYPE_KMCTR, 1, "KMCTR DEA"), FEAT_INIT("kmctr-tdea-128", S390_FEAT_TYPE_KMCTR, 2, "KMCTR TDEA-128"), FEAT_INIT("kmctr-tdea-192", S390_FEAT_TYPE_KMCTR, 3, "KMCTR TDEA-192"), FEAT_INIT("kmctr-edea", S390_FEAT_TYPE_KMCTR, 9, "KMCTR Encrypted-DEA"), FEAT_INIT("kmctr-etdea-128", S390_FEAT_TYPE_KMCTR, 10, "KMCTR Encrypted-TDEA-128"), FEAT_INIT("kmctr-etdea-192", S390_FEAT_TYPE_KMCTR, 11, "KMCTR Encrypted-TDEA-192"), FEAT_INIT("kmctr-aes-128", S390_FEAT_TYPE_KMCTR, 18, "KMCTR AES-128"), FEAT_INIT("kmctr-aes-192", S390_FEAT_TYPE_KMCTR, 19, "KMCTR AES-192"), FEAT_INIT("kmctr-aes-256", S390_FEAT_TYPE_KMCTR, 20, "KMCTR AES-256"), FEAT_INIT("kmctr-eaes-128", S390_FEAT_TYPE_KMCTR, 26, "KMCTR Encrypted-AES-128"), FEAT_INIT("kmctr-eaes-192", S390_FEAT_TYPE_KMCTR, 27, "KMCTR Encrypted-AES-192"), FEAT_INIT("kmctr-eaes-256", S390_FEAT_TYPE_KMCTR, 28, "KMCTR Encrypted-AES-256"), FEAT_INIT("kmf-dea", S390_FEAT_TYPE_KMF, 1, "KMF DEA"), FEAT_INIT("kmf-tdea-128", S390_FEAT_TYPE_KMF, 2, "KMF TDEA-128"), FEAT_INIT("kmf-tdea-192", S390_FEAT_TYPE_KMF, 3, "KMF TDEA-192"), FEAT_INIT("kmf-edea", S390_FEAT_TYPE_KMF, 9, "KMF Encrypted-DEA"), FEAT_INIT("kmf-etdea-128", S390_FEAT_TYPE_KMF, 10, "KMF Encrypted-TDEA-128"), FEAT_INIT("kmf-etdea-192", S390_FEAT_TYPE_KMF, 11, "KMF Encrypted-TDEA-192"), FEAT_INIT("kmf-aes-128", S390_FEAT_TYPE_KMF, 18, "KMF AES-128"), FEAT_INIT("kmf-aes-192", S390_FEAT_TYPE_KMF, 19, "KMF AES-192"), FEAT_INIT("kmf-aes-256", S390_FEAT_TYPE_KMF, 20, "KMF AES-256"), FEAT_INIT("kmf-eaes-128", S390_FEAT_TYPE_KMF, 26, "KMF Encrypted-AES-128"), FEAT_INIT("kmf-eaes-192", S390_FEAT_TYPE_KMF, 27, "KMF Encrypted-AES-192"), FEAT_INIT("kmf-eaes-256", S390_FEAT_TYPE_KMF, 28, "KMF Encrypted-AES-256"), FEAT_INIT("kmo-dea", S390_FEAT_TYPE_KMO, 1, "KMO DEA"), FEAT_INIT("kmo-tdea-128", S390_FEAT_TYPE_KMO, 2, "KMO TDEA-128"), FEAT_INIT("kmo-tdea-192", S390_FEAT_TYPE_KMO, 3, "KMO TDEA-192"), FEAT_INIT("kmo-edea", S390_FEAT_TYPE_KMO, 9, "KMO Encrypted-DEA"), FEAT_INIT("kmo-etdea-128", S390_FEAT_TYPE_KMO, 10, "KMO Encrypted-TDEA-128"), FEAT_INIT("kmo-etdea-192", S390_FEAT_TYPE_KMO, 11, "KMO Encrypted-TDEA-192"), FEAT_INIT("kmo-aes-128", S390_FEAT_TYPE_KMO, 18, "KMO AES-128"), FEAT_INIT("kmo-aes-192", S390_FEAT_TYPE_KMO, 19, "KMO AES-192"), FEAT_INIT("kmo-aes-256", S390_FEAT_TYPE_KMO, 20, "KMO AES-256"), FEAT_INIT("kmo-eaes-128", S390_FEAT_TYPE_KMO, 26, "KMO Encrypted-AES-128"), FEAT_INIT("kmo-eaes-192", S390_FEAT_TYPE_KMO, 27, "KMO Encrypted-AES-192"), FEAT_INIT("kmo-eaes-256", S390_FEAT_TYPE_KMO, 28, "KMO Encrypted-AES-256"), FEAT_INIT("pcc-cmac-dea", S390_FEAT_TYPE_PCC, 1, "PCC Compute-Last-Block-CMAC-Using-DEA"), FEAT_INIT("pcc-cmac-tdea-128", S390_FEAT_TYPE_PCC, 2, "PCC Compute-Last-Block-CMAC-Using-TDEA-128"), FEAT_INIT("pcc-cmac-tdea-192", S390_FEAT_TYPE_PCC, 3, "PCC Compute-Last-Block-CMAC-Using-TDEA-192"), FEAT_INIT("pcc-cmac-edea", S390_FEAT_TYPE_PCC, 9, "PCC Compute-Last-Block-CMAC-Using-Encrypted-DEA"), FEAT_INIT("pcc-cmac-etdea-128", S390_FEAT_TYPE_PCC, 10, "PCC Compute-Last-Block-CMAC-Using-Encrypted-TDEA-128"), FEAT_INIT("pcc-cmac-etdea-192", S390_FEAT_TYPE_PCC, 11, "PCC Compute-Last-Block-CMAC-Using-EncryptedTDEA-192"), FEAT_INIT("pcc-cmac-aes-128", S390_FEAT_TYPE_PCC, 18, "PCC Compute-Last-Block-CMAC-Using-AES-128"), FEAT_INIT("pcc-cmac-aes-192", S390_FEAT_TYPE_PCC, 19, "PCC Compute-Last-Block-CMAC-Using-AES-192"), FEAT_INIT("pcc-cmac-eaes-256", S390_FEAT_TYPE_PCC, 20, "PCC Compute-Last-Block-CMAC-Using-AES-256"), FEAT_INIT("pcc-cmac-eaes-128", S390_FEAT_TYPE_PCC, 26, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-128"), FEAT_INIT("pcc-cmac-eaes-192", S390_FEAT_TYPE_PCC, 27, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-192"), FEAT_INIT("pcc-cmac-eaes-256", S390_FEAT_TYPE_PCC, 28, "PCC Compute-Last-Block-CMAC-Using-Encrypted-AES-256"), FEAT_INIT("pcc-xts-aes-128", S390_FEAT_TYPE_PCC, 50, "PCC Compute-XTS-Parameter-Using-AES-128"), FEAT_INIT("pcc-xts-aes-256", S390_FEAT_TYPE_PCC, 52, "PCC Compute-XTS-Parameter-Using-AES-256"), FEAT_INIT("pcc-xts-eaes-128", S390_FEAT_TYPE_PCC, 58, "PCC Compute-XTS-Parameter-Using-Encrypted-AES-128"), FEAT_INIT("pcc-xts-eaes-256", S390_FEAT_TYPE_PCC, 60, "PCC Compute-XTS-Parameter-Using-Encrypted-AES-256"), FEAT_INIT("ppno-sha-512-drng", S390_FEAT_TYPE_PPNO, 3, "PPNO SHA-512-DRNG"), FEAT_INIT("prno-trng-qrtcr", S390_FEAT_TYPE_PPNO, 112, "PRNO TRNG-Query-Raw-to-Conditioned-Ratio"), FEAT_INIT("prno-trng", S390_FEAT_TYPE_PPNO, 114, "PRNO TRNG"), FEAT_INIT("kma-gcm-aes-128", S390_FEAT_TYPE_KMA, 18, "KMA GCM-AES-128"), FEAT_INIT("kma-gcm-aes-192", S390_FEAT_TYPE_KMA, 19, "KMA GCM-AES-192"), FEAT_INIT("kma-gcm-aes-256", S390_FEAT_TYPE_KMA, 20, "KMA GCM-AES-256"), FEAT_INIT("kma-gcm-eaes-128", S390_FEAT_TYPE_KMA, 26, "KMA GCM-Encrypted-AES-128"), FEAT_INIT("kma-gcm-eaes-192", S390_FEAT_TYPE_KMA, 27, "KMA GCM-Encrypted-AES-192"), FEAT_INIT("kma-gcm-eaes-256", S390_FEAT_TYPE_KMA, 28, "KMA GCM-Encrypted-AES-256"), }; const S390FeatDef *s390_feat_def(S390Feat feat) { return &s390_features[feat]; } S390Feat s390_feat_by_type_and_bit(S390FeatType type, int bit) { S390Feat feat; for (feat = 0; feat < ARRAY_SIZE(s390_features); feat++) { if (s390_features[feat].type == type && s390_features[feat].bit == bit) { return feat; } } return S390_FEAT_MAX; } void s390_init_feat_bitmap(const S390FeatInit init, S390FeatBitmap bitmap) { int i, j; for (i = 0; i < (S390_FEAT_MAX / 64 + 1); i++) { if (init[i]) { for (j = 0; j < 64; j++) { if (init[i] & 1ULL << j) { set_bit(i * 64 + j, bitmap); } } } } } void s390_fill_feat_block(const S390FeatBitmap features, S390FeatType type, uint8_t *data) { S390Feat feat; int bit_nr; switch (type) { case S390_FEAT_TYPE_STFL: if (test_bit(S390_FEAT_ZARCH, features)) { /* Features that are always active */ set_be_bit(2, data); /* z/Architecture */ set_be_bit(138, data); /* Configuration-z-architectural-mode */ } break; case S390_FEAT_TYPE_PTFF: case S390_FEAT_TYPE_KMAC: case S390_FEAT_TYPE_KMC: case S390_FEAT_TYPE_KM: case S390_FEAT_TYPE_KIMD: case S390_FEAT_TYPE_KLMD: case S390_FEAT_TYPE_PCKMO: case S390_FEAT_TYPE_KMCTR: case S390_FEAT_TYPE_KMF: case S390_FEAT_TYPE_KMO: case S390_FEAT_TYPE_PCC: case S390_FEAT_TYPE_PPNO: case S390_FEAT_TYPE_KMA: set_be_bit(0, data); /* query is always available */ break; default: break; }; feat = find_first_bit(features, S390_FEAT_MAX); while (feat < S390_FEAT_MAX) { if (s390_features[feat].type == type) { bit_nr = s390_features[feat].bit; /* big endian on uint8_t array */ set_be_bit(bit_nr, data); } feat = find_next_bit(features, S390_FEAT_MAX, feat + 1); } } void s390_add_from_feat_block(S390FeatBitmap features, S390FeatType type, uint8_t *data) { int nr_bits, le_bit; switch (type) { case S390_FEAT_TYPE_STFL: nr_bits = 16384; break; case S390_FEAT_TYPE_PLO: nr_bits = 256; break; default: /* all cpu subfunctions have 128 bit */ nr_bits = 128; }; le_bit = find_first_bit((unsigned long *) data, nr_bits); while (le_bit < nr_bits) { /* convert the bit number to a big endian bit nr */ S390Feat feat = s390_feat_by_type_and_bit(type, BE_BIT_NR(le_bit)); /* ignore unknown bits */ if (feat < S390_FEAT_MAX) { set_bit(feat, features); } le_bit = find_next_bit((unsigned long *) data, nr_bits, le_bit + 1); } } void s390_feat_bitmap_to_ascii(const S390FeatBitmap features, void *opaque, void (*fn)(const char *name, void *opaque)) { S390FeatBitmap bitmap, tmp; S390FeatGroup group; S390Feat feat; bitmap_copy(bitmap, features, S390_FEAT_MAX); /* process whole groups first */ for (group = 0; group < S390_FEAT_GROUP_MAX; group++) { const S390FeatGroupDef *def = s390_feat_group_def(group); bitmap_and(tmp, bitmap, def->feat, S390_FEAT_MAX); if (bitmap_equal(tmp, def->feat, S390_FEAT_MAX)) { bitmap_andnot(bitmap, bitmap, def->feat, S390_FEAT_MAX); fn(def->name, opaque); } } /* report leftovers as separate features */ feat = find_first_bit(bitmap, S390_FEAT_MAX); while (feat < S390_FEAT_MAX) { fn(s390_feat_def(feat)->name, opaque); feat = find_next_bit(bitmap, S390_FEAT_MAX, feat + 1); }; } #define FEAT_GROUP_INIT(_name, _group, _desc) \ { \ .name = _name, \ .desc = _desc, \ .init = { S390_FEAT_GROUP_LIST_ ## _group }, \ } /* indexed by feature group number for easy lookup */ static S390FeatGroupDef s390_feature_groups[] = { FEAT_GROUP_INIT("plo", PLO, "Perform-locked-operation facility"), FEAT_GROUP_INIT("tods", TOD_CLOCK_STEERING, "Tod-clock-steering facility"), FEAT_GROUP_INIT("gen13ptff", GEN13_PTFF, "PTFF enhancements introduced with z13"), FEAT_GROUP_INIT("msa", MSA, "Message-security-assist facility"), FEAT_GROUP_INIT("msa1", MSA_EXT_1, "Message-security-assist-extension 1 facility"), FEAT_GROUP_INIT("msa2", MSA_EXT_2, "Message-security-assist-extension 2 facility"), FEAT_GROUP_INIT("msa3", MSA_EXT_3, "Message-security-assist-extension 3 facility"), FEAT_GROUP_INIT("msa4", MSA_EXT_4, "Message-security-assist-extension 4 facility"), FEAT_GROUP_INIT("msa5", MSA_EXT_5, "Message-security-assist-extension 5 facility"), FEAT_GROUP_INIT("msa6", MSA_EXT_6, "Message-security-assist-extension 6 facility"), FEAT_GROUP_INIT("msa7", MSA_EXT_7, "Message-security-assist-extension 7 facility"), FEAT_GROUP_INIT("msa8", MSA_EXT_8, "Message-security-assist-extension 8 facility"), FEAT_GROUP_INIT("mepochptff", MULTIPLE_EPOCH_PTFF, "PTFF enhancements introduced with Multiple-epoch facility"), }; const S390FeatGroupDef *s390_feat_group_def(S390FeatGroup group) { return &s390_feature_groups[group]; } static void init_groups(void) { int i; /* init all bitmaps from gnerated data initially */ for (i = 0; i < ARRAY_SIZE(s390_feature_groups); i++) { s390_init_feat_bitmap(s390_feature_groups[i].init, s390_feature_groups[i].feat); } } type_init(init_groups)
pmp-tool/PMP
src/qemu/src-pmp/hw/virtio/vhost-user-scsi-pci.c
/* * Vhost user scsi PCI Bindings * * Copyright (c) 2016 Nutanix Inc. All rights reserved. * * Author: * <NAME> <<EMAIL>> * * This work is largely based on the "vhost-scsi" implementation by: * <NAME> <<EMAIL>> * <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU LGPL, version 2 or later. * See the COPYING.LIB file in the top-level directory. * */ #include "qemu/osdep.h" #include "standard-headers/linux/virtio_pci.h" #include "hw/virtio/vhost-user-scsi.h" #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-scsi.h" #include "hw/pci/pci.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "hw/pci/msi.h" #include "hw/pci/msix.h" #include "hw/loader.h" #include "sysemu/kvm.h" #include "virtio-pci.h" typedef struct VHostUserSCSIPCI VHostUserSCSIPCI; #define TYPE_VHOST_USER_SCSI_PCI "vhost-user-scsi-pci-base" #define VHOST_USER_SCSI_PCI(obj) \ OBJECT_CHECK(VHostUserSCSIPCI, (obj), TYPE_VHOST_USER_SCSI_PCI) struct VHostUserSCSIPCI { VirtIOPCIProxy parent_obj; VHostUserSCSI vdev; }; static Property vhost_user_scsi_pci_properties[] = { DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, DEV_NVECTORS_UNSPECIFIED), DEFINE_PROP_END_OF_LIST(), }; static void vhost_user_scsi_pci_realize(VirtIOPCIProxy *vpci_dev, Error **errp) { VHostUserSCSIPCI *dev = VHOST_USER_SCSI_PCI(vpci_dev); DeviceState *vdev = DEVICE(&dev->vdev); VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(vdev); if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) { vpci_dev->nvectors = vs->conf.num_queues + 3; } qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus)); object_property_set_bool(OBJECT(vdev), true, "realized", errp); } static void vhost_user_scsi_pci_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass); PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass); k->realize = vhost_user_scsi_pci_realize; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); dc->props = vhost_user_scsi_pci_properties; pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI; pcidev_k->revision = 0x00; pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI; } static void vhost_user_scsi_pci_instance_init(Object *obj) { VHostUserSCSIPCI *dev = VHOST_USER_SCSI_PCI(obj); virtio_instance_init_common(obj, &dev->vdev, sizeof(dev->vdev), TYPE_VHOST_USER_SCSI); object_property_add_alias(obj, "bootindex", OBJECT(&dev->vdev), "bootindex", &error_abort); } static const VirtioPCIDeviceTypeInfo vhost_user_scsi_pci_info = { .base_name = TYPE_VHOST_USER_SCSI_PCI, .generic_name = "vhost-user-scsi-pci", .transitional_name = "vhost-user-scsi-pci-transitional", .non_transitional_name = "vhost-user-scsi-pci-non-transitional", .instance_size = sizeof(VHostUserSCSIPCI), .instance_init = vhost_user_scsi_pci_instance_init, .class_init = vhost_user_scsi_pci_class_init, }; static void vhost_user_scsi_pci_register(void) { virtio_pci_types_register(&vhost_user_scsi_pci_info); } type_init(vhost_user_scsi_pci_register)
pmp-tool/PMP
src/qemu/src-pmp/tests/virtio-blk-test.c
/* * QTest testcase for VirtIO Block Device * * Copyright (c) 2014 SUSE LINUX Products GmbH * Copyright (c) 2014 <NAME> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "libqtest.h" #include "qemu/bswap.h" #include "standard-headers/linux/virtio_blk.h" #include "standard-headers/linux/virtio_pci.h" #include "libqos/qgraph.h" #include "libqos/virtio-blk.h" /* TODO actually test the results and get rid of this */ #define qmp_discard_response(...) qobject_unref(qmp(__VA_ARGS__)) #define TEST_IMAGE_SIZE (64 * 1024 * 1024) #define QVIRTIO_BLK_TIMEOUT_US (30 * 1000 * 1000) #define PCI_SLOT_HP 0x06 typedef struct QVirtioBlkReq { uint32_t type; uint32_t ioprio; uint64_t sector; char *data; uint8_t status; } QVirtioBlkReq; #ifdef HOST_WORDS_BIGENDIAN const bool host_is_big_endian = true; #else const bool host_is_big_endian; /* false */ #endif static void drive_destroy(void *path) { unlink(path); g_free(path); qos_invalidate_command_line(); } static char *drive_create(void) { int fd, ret; char *t_path = g_strdup("/tmp/qtest.XXXXXX"); /* Create a temporary raw image */ fd = mkstemp(t_path); g_assert_cmpint(fd, >=, 0); ret = ftruncate(fd, TEST_IMAGE_SIZE); g_assert_cmpint(ret, ==, 0); close(fd); g_test_queue_destroy(drive_destroy, t_path); return t_path; } static inline void virtio_blk_fix_request(QVirtioDevice *d, QVirtioBlkReq *req) { if (qvirtio_is_big_endian(d) != host_is_big_endian) { req->type = bswap32(req->type); req->ioprio = bswap32(req->ioprio); req->sector = bswap64(req->sector); } } static inline void virtio_blk_fix_dwz_hdr(QVirtioDevice *d, struct virtio_blk_discard_write_zeroes *dwz_hdr) { if (qvirtio_is_big_endian(d) != host_is_big_endian) { dwz_hdr->sector = bswap64(dwz_hdr->sector); dwz_hdr->num_sectors = bswap32(dwz_hdr->num_sectors); dwz_hdr->flags = bswap32(dwz_hdr->flags); } } static uint64_t virtio_blk_request(QGuestAllocator *alloc, QVirtioDevice *d, QVirtioBlkReq *req, uint64_t data_size) { uint64_t addr; uint8_t status = 0xFF; switch (req->type) { case VIRTIO_BLK_T_IN: case VIRTIO_BLK_T_OUT: g_assert_cmpuint(data_size % 512, ==, 0); break; case VIRTIO_BLK_T_DISCARD: case VIRTIO_BLK_T_WRITE_ZEROES: g_assert_cmpuint(data_size % sizeof(struct virtio_blk_discard_write_zeroes), ==, 0); break; default: g_assert_cmpuint(data_size, ==, 0); } addr = guest_alloc(alloc, sizeof(*req) + data_size); virtio_blk_fix_request(d, req); memwrite(addr, req, 16); memwrite(addr + 16, req->data, data_size); memwrite(addr + 16 + data_size, &status, sizeof(status)); return addr; } static void test_basic(QVirtioDevice *dev, QGuestAllocator *alloc, QVirtQueue *vq) { QVirtioBlkReq req; uint64_t req_addr; uint64_t capacity; uint32_t features; uint32_t free_head; uint8_t status; char *data; capacity = qvirtio_config_readq(dev, 0); g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512); features = qvirtio_get_features(dev); features = features & ~(QVIRTIO_F_BAD_FEATURE | (1u << VIRTIO_RING_F_INDIRECT_DESC) | (1u << VIRTIO_RING_F_EVENT_IDX) | (1u << VIRTIO_BLK_F_SCSI)); qvirtio_set_features(dev, features); qvirtio_set_driver_ok(dev); /* Write and read with 3 descriptor layout */ /* Write request */ req.type = VIRTIO_BLK_T_OUT; req.ioprio = 1; req.sector = 0; req.data = g_malloc0(512); strcpy(req.data, "TEST"); req_addr = virtio_blk_request(alloc, dev, &req, 512); g_free(req.data); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, 512, false, true); qvirtqueue_add(vq, req_addr + 528, 1, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); guest_free(alloc, req_addr); /* Read request */ req.type = VIRTIO_BLK_T_IN; req.ioprio = 1; req.sector = 0; req.data = g_malloc0(512); req_addr = virtio_blk_request(alloc, dev, &req, 512); g_free(req.data); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, 512, true, true); qvirtqueue_add(vq, req_addr + 528, 1, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); data = g_malloc0(512); memread(req_addr + 16, data, 512); g_assert_cmpstr(data, ==, "TEST"); g_free(data); guest_free(alloc, req_addr); if (features & (1u << VIRTIO_BLK_F_WRITE_ZEROES)) { struct virtio_blk_discard_write_zeroes dwz_hdr; void *expected; /* * WRITE_ZEROES request on the same sector of previous test where * we wrote "TEST". */ req.type = VIRTIO_BLK_T_WRITE_ZEROES; req.data = (char *) &dwz_hdr; dwz_hdr.sector = 0; dwz_hdr.num_sectors = 1; dwz_hdr.flags = 0; virtio_blk_fix_dwz_hdr(dev, &dwz_hdr); req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr)); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, sizeof(dwz_hdr), false, true); qvirtqueue_add(vq, req_addr + 16 + sizeof(dwz_hdr), 1, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 16 + sizeof(dwz_hdr)); g_assert_cmpint(status, ==, 0); guest_free(alloc, req_addr); /* Read request to check if the sector contains all zeroes */ req.type = VIRTIO_BLK_T_IN; req.ioprio = 1; req.sector = 0; req.data = g_malloc0(512); req_addr = virtio_blk_request(alloc, dev, &req, 512); g_free(req.data); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, 512, true, true); qvirtqueue_add(vq, req_addr + 528, 1, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); data = g_malloc(512); expected = g_malloc0(512); memread(req_addr + 16, data, 512); g_assert_cmpmem(data, 512, expected, 512); g_free(expected); g_free(data); guest_free(alloc, req_addr); } if (features & (1u << VIRTIO_BLK_F_DISCARD)) { struct virtio_blk_discard_write_zeroes dwz_hdr; req.type = VIRTIO_BLK_T_DISCARD; req.data = (char *) &dwz_hdr; dwz_hdr.sector = 0; dwz_hdr.num_sectors = 1; dwz_hdr.flags = 0; virtio_blk_fix_dwz_hdr(dev, &dwz_hdr); req_addr = virtio_blk_request(alloc, dev, &req, sizeof(dwz_hdr)); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, sizeof(dwz_hdr), false, true); qvirtqueue_add(vq, req_addr + 16 + sizeof(dwz_hdr), 1, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 16 + sizeof(dwz_hdr)); g_assert_cmpint(status, ==, 0); guest_free(alloc, req_addr); } if (features & (1u << VIRTIO_F_ANY_LAYOUT)) { /* Write and read with 2 descriptor layout */ /* Write request */ req.type = VIRTIO_BLK_T_OUT; req.ioprio = 1; req.sector = 1; req.data = g_malloc0(512); strcpy(req.data, "TEST"); req_addr = virtio_blk_request(alloc, dev, &req, 512); g_free(req.data); free_head = qvirtqueue_add(vq, req_addr, 528, false, true); qvirtqueue_add(vq, req_addr + 528, 1, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); guest_free(alloc, req_addr); /* Read request */ req.type = VIRTIO_BLK_T_IN; req.ioprio = 1; req.sector = 1; req.data = g_malloc0(512); req_addr = virtio_blk_request(alloc, dev, &req, 512); g_free(req.data); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, 513, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); data = g_malloc0(512); memread(req_addr + 16, data, 512); g_assert_cmpstr(data, ==, "TEST"); g_free(data); guest_free(alloc, req_addr); } } static void basic(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtioBlk *blk_if = obj; QVirtQueue *vq; vq = qvirtqueue_setup(blk_if->vdev, t_alloc, 0); test_basic(blk_if->vdev, t_alloc, vq); qvirtqueue_cleanup(blk_if->vdev->bus, vq, t_alloc); } static void indirect(void *obj, void *u_data, QGuestAllocator *t_alloc) { QVirtQueue *vq; QVirtioBlk *blk_if = obj; QVirtioDevice *dev = blk_if->vdev; QVirtioBlkReq req; QVRingIndirectDesc *indirect; uint64_t req_addr; uint64_t capacity; uint32_t features; uint32_t free_head; uint8_t status; char *data; capacity = qvirtio_config_readq(dev, 0); g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512); features = qvirtio_get_features(dev); g_assert_cmphex(features & (1u << VIRTIO_RING_F_INDIRECT_DESC), !=, 0); features = features & ~(QVIRTIO_F_BAD_FEATURE | (1u << VIRTIO_RING_F_EVENT_IDX) | (1u << VIRTIO_BLK_F_SCSI)); qvirtio_set_features(dev, features); vq = qvirtqueue_setup(dev, t_alloc, 0); qvirtio_set_driver_ok(dev); /* Write request */ req.type = VIRTIO_BLK_T_OUT; req.ioprio = 1; req.sector = 0; req.data = g_malloc0(512); strcpy(req.data, "TEST"); req_addr = virtio_blk_request(t_alloc, dev, &req, 512); g_free(req.data); indirect = qvring_indirect_desc_setup(dev, t_alloc, 2); qvring_indirect_desc_add(indirect, req_addr, 528, false); qvring_indirect_desc_add(indirect, req_addr + 528, 1, true); free_head = qvirtqueue_add_indirect(vq, indirect); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); g_free(indirect); guest_free(t_alloc, req_addr); /* Read request */ req.type = VIRTIO_BLK_T_IN; req.ioprio = 1; req.sector = 0; req.data = g_malloc0(512); strcpy(req.data, "TEST"); req_addr = virtio_blk_request(t_alloc, dev, &req, 512); g_free(req.data); indirect = qvring_indirect_desc_setup(dev, t_alloc, 2); qvring_indirect_desc_add(indirect, req_addr, 16, false); qvring_indirect_desc_add(indirect, req_addr + 16, 513, true); free_head = qvirtqueue_add_indirect(vq, indirect); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); data = g_malloc0(512); memread(req_addr + 16, data, 512); g_assert_cmpstr(data, ==, "TEST"); g_free(data); g_free(indirect); guest_free(t_alloc, req_addr); qvirtqueue_cleanup(dev->bus, vq, t_alloc); } static void config(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtioBlk *blk_if = obj; QVirtioDevice *dev = blk_if->vdev; int n_size = TEST_IMAGE_SIZE / 2; uint64_t capacity; capacity = qvirtio_config_readq(dev, 0); g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512); qvirtio_set_driver_ok(dev); qmp_discard_response("{ 'execute': 'block_resize', " " 'arguments': { 'device': 'drive0', " " 'size': %d } }", n_size); qvirtio_wait_config_isr(dev, QVIRTIO_BLK_TIMEOUT_US); capacity = qvirtio_config_readq(dev, 0); g_assert_cmpint(capacity, ==, n_size / 512); } static void msix(void *obj, void *u_data, QGuestAllocator *t_alloc) { QVirtQueue *vq; QVirtioBlkPCI *blk = obj; QVirtioPCIDevice *pdev = &blk->pci_vdev; QVirtioDevice *dev = &pdev->vdev; QVirtioBlkReq req; int n_size = TEST_IMAGE_SIZE / 2; uint64_t req_addr; uint64_t capacity; uint32_t features; uint32_t free_head; uint8_t status; char *data; QOSGraphObject *blk_object = obj; QPCIDevice *pci_dev = blk_object->get_driver(blk_object, "pci-device"); if (qpci_check_buggy_msi(pci_dev)) { return; } qpci_msix_enable(pdev->pdev); qvirtio_pci_set_msix_configuration_vector(pdev, t_alloc, 0); capacity = qvirtio_config_readq(dev, 0); g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512); features = qvirtio_get_features(dev); features = features & ~(QVIRTIO_F_BAD_FEATURE | (1u << VIRTIO_RING_F_INDIRECT_DESC) | (1u << VIRTIO_RING_F_EVENT_IDX) | (1u << VIRTIO_BLK_F_SCSI)); qvirtio_set_features(dev, features); vq = qvirtqueue_setup(dev, t_alloc, 0); qvirtqueue_pci_msix_setup(pdev, (QVirtQueuePCI *)vq, t_alloc, 1); qvirtio_set_driver_ok(dev); qmp_discard_response("{ 'execute': 'block_resize', " " 'arguments': { 'device': 'drive0', " " 'size': %d } }", n_size); qvirtio_wait_config_isr(dev, QVIRTIO_BLK_TIMEOUT_US); capacity = qvirtio_config_readq(dev, 0); g_assert_cmpint(capacity, ==, n_size / 512); /* Write request */ req.type = VIRTIO_BLK_T_OUT; req.ioprio = 1; req.sector = 0; req.data = g_malloc0(512); strcpy(req.data, "TEST"); req_addr = virtio_blk_request(t_alloc, dev, &req, 512); g_free(req.data); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, 512, false, true); qvirtqueue_add(vq, req_addr + 528, 1, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); guest_free(t_alloc, req_addr); /* Read request */ req.type = VIRTIO_BLK_T_IN; req.ioprio = 1; req.sector = 0; req.data = g_malloc0(512); req_addr = virtio_blk_request(t_alloc, dev, &req, 512); g_free(req.data); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, 512, true, true); qvirtqueue_add(vq, req_addr + 528, 1, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); data = g_malloc0(512); memread(req_addr + 16, data, 512); g_assert_cmpstr(data, ==, "TEST"); g_free(data); guest_free(t_alloc, req_addr); /* End test */ qpci_msix_disable(pdev->pdev); qvirtqueue_cleanup(dev->bus, vq, t_alloc); } static void idx(void *obj, void *u_data, QGuestAllocator *t_alloc) { QVirtQueue *vq; QVirtioBlkPCI *blk = obj; QVirtioPCIDevice *pdev = &blk->pci_vdev; QVirtioDevice *dev = &pdev->vdev; QVirtioBlkReq req; uint64_t req_addr; uint64_t capacity; uint32_t features; uint32_t free_head; uint32_t write_head; uint32_t desc_idx; uint8_t status; char *data; QOSGraphObject *blk_object = obj; QPCIDevice *pci_dev = blk_object->get_driver(blk_object, "pci-device"); if (qpci_check_buggy_msi(pci_dev)) { return; } qpci_msix_enable(pdev->pdev); qvirtio_pci_set_msix_configuration_vector(pdev, t_alloc, 0); capacity = qvirtio_config_readq(dev, 0); g_assert_cmpint(capacity, ==, TEST_IMAGE_SIZE / 512); features = qvirtio_get_features(dev); features = features & ~(QVIRTIO_F_BAD_FEATURE | (1u << VIRTIO_RING_F_INDIRECT_DESC) | (1u << VIRTIO_F_NOTIFY_ON_EMPTY) | (1u << VIRTIO_BLK_F_SCSI)); qvirtio_set_features(dev, features); vq = qvirtqueue_setup(dev, t_alloc, 0); qvirtqueue_pci_msix_setup(pdev, (QVirtQueuePCI *)vq, t_alloc, 1); qvirtio_set_driver_ok(dev); /* Write request */ req.type = VIRTIO_BLK_T_OUT; req.ioprio = 1; req.sector = 0; req.data = g_malloc0(512); strcpy(req.data, "TEST"); req_addr = virtio_blk_request(t_alloc, dev, &req, 512); g_free(req.data); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, 512, false, true); qvirtqueue_add(vq, req_addr + 528, 1, true, false); qvirtqueue_kick(dev, vq, free_head); qvirtio_wait_used_elem(dev, vq, free_head, NULL, QVIRTIO_BLK_TIMEOUT_US); /* Write request */ req.type = VIRTIO_BLK_T_OUT; req.ioprio = 1; req.sector = 1; req.data = g_malloc0(512); strcpy(req.data, "TEST"); req_addr = virtio_blk_request(t_alloc, dev, &req, 512); g_free(req.data); /* Notify after processing the third request */ qvirtqueue_set_used_event(vq, 2); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, 512, false, true); qvirtqueue_add(vq, req_addr + 528, 1, true, false); qvirtqueue_kick(dev, vq, free_head); write_head = free_head; /* No notification expected */ status = qvirtio_wait_status_byte_no_isr(dev, vq, req_addr + 528, QVIRTIO_BLK_TIMEOUT_US); g_assert_cmpint(status, ==, 0); guest_free(t_alloc, req_addr); /* Read request */ req.type = VIRTIO_BLK_T_IN; req.ioprio = 1; req.sector = 1; req.data = g_malloc0(512); req_addr = virtio_blk_request(t_alloc, dev, &req, 512); g_free(req.data); free_head = qvirtqueue_add(vq, req_addr, 16, false, true); qvirtqueue_add(vq, req_addr + 16, 512, true, true); qvirtqueue_add(vq, req_addr + 528, 1, true, false); qvirtqueue_kick(dev, vq, free_head); /* We get just one notification for both requests */ qvirtio_wait_used_elem(dev, vq, write_head, NULL, QVIRTIO_BLK_TIMEOUT_US); g_assert(qvirtqueue_get_buf(vq, &desc_idx, NULL)); g_assert_cmpint(desc_idx, ==, free_head); status = readb(req_addr + 528); g_assert_cmpint(status, ==, 0); data = g_malloc0(512); memread(req_addr + 16, data, 512); g_assert_cmpstr(data, ==, "TEST"); g_free(data); guest_free(t_alloc, req_addr); /* End test */ qpci_msix_disable(pdev->pdev); qvirtqueue_cleanup(dev->bus, vq, t_alloc); } static void pci_hotplug(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtioPCIDevice *dev1 = obj; QVirtioPCIDevice *dev; /* plug secondary disk */ qtest_qmp_device_add("virtio-blk-pci", "drv1", "{'addr': %s, 'drive': 'drive1'}", stringify(PCI_SLOT_HP) ".0"); dev = virtio_pci_new(dev1->pdev->bus, &(QPCIAddress) { .devfn = QPCI_DEVFN(PCI_SLOT_HP, 0) }); g_assert_nonnull(dev); g_assert_cmpint(dev->vdev.device_type, ==, VIRTIO_ID_BLOCK); qvirtio_pci_device_disable(dev); qos_object_destroy((QOSGraphObject *)dev); /* unplug secondary disk */ qpci_unplug_acpi_device_test("drv1", PCI_SLOT_HP); } /* * Check that setting the vring addr on a non-existent virtqueue does * not crash. */ static void test_nonexistent_virtqueue(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtioBlkPCI *blk = obj; QVirtioPCIDevice *pdev = &blk->pci_vdev; QPCIBar bar0; QPCIDevice *dev; dev = qpci_device_find(pdev->pdev->bus, QPCI_DEVFN(4, 0)); g_assert(dev != NULL); qpci_device_enable(dev); bar0 = qpci_iomap(dev, 0, NULL); qpci_io_writeb(dev, bar0, VIRTIO_PCI_QUEUE_SEL, 2); qpci_io_writel(dev, bar0, VIRTIO_PCI_QUEUE_PFN, 1); g_free(dev); } static void resize(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtioBlk *blk_if = obj; QVirtioDevice *dev = blk_if->vdev; int n_size = TEST_IMAGE_SIZE / 2; uint64_t capacity; QVirtQueue *vq; vq = qvirtqueue_setup(dev, t_alloc, 0); test_basic(dev, t_alloc, vq); qmp_discard_response("{ 'execute': 'block_resize', " " 'arguments': { 'device': 'drive0', " " 'size': %d } }", n_size); qvirtio_wait_queue_isr(dev, vq, QVIRTIO_BLK_TIMEOUT_US); capacity = qvirtio_config_readq(dev, 0); g_assert_cmpint(capacity, ==, n_size / 512); qvirtqueue_cleanup(dev->bus, vq, t_alloc); } static void *virtio_blk_test_setup(GString *cmd_line, void *arg) { char *tmp_path = drive_create(); g_string_append_printf(cmd_line, " -drive if=none,id=drive0,file=%s,format=raw,auto-read-only=off " "-drive if=none,id=drive1,file=null-co://,format=raw ", tmp_path); return arg; } static void register_virtio_blk_test(void) { QOSGraphTestOptions opts = { .before = virtio_blk_test_setup, }; qos_add_test("indirect", "virtio-blk", indirect, &opts); qos_add_test("config", "virtio-blk", config, &opts); qos_add_test("basic", "virtio-blk", basic, &opts); qos_add_test("resize", "virtio-blk", resize, &opts); /* tests just for virtio-blk-pci */ qos_add_test("msix", "virtio-blk-pci", msix, &opts); qos_add_test("idx", "virtio-blk-pci", idx, &opts); qos_add_test("nxvirtq", "virtio-blk-pci", test_nonexistent_virtqueue, &opts); qos_add_test("hotplug", "virtio-blk-pci", pci_hotplug, &opts); } libqos_init(register_virtio_blk_test);
pmp-tool/PMP
src/qemu/src-pmp/hw/xen/xen_devconfig.c
<gh_stars>1-10 #include "qemu/osdep.h" #include "hw/xen/xen-legacy-backend.h" #include "qemu/option.h" #include "sysemu/blockdev.h" /* ------------------------------------------------------------- */ static int xen_config_dev_dirs(const char *ftype, const char *btype, int vdev, char *fe, char *be, int len) { char *dom; dom = xs_get_domain_path(xenstore, xen_domid); snprintf(fe, len, "%s/device/%s/%d", dom, ftype, vdev); free(dom); dom = xs_get_domain_path(xenstore, 0); snprintf(be, len, "%s/backend/%s/%d/%d", dom, btype, xen_domid, vdev); free(dom); xenstore_mkdir(fe, XS_PERM_READ | XS_PERM_WRITE); xenstore_mkdir(be, XS_PERM_READ); return 0; } static int xen_config_dev_all(char *fe, char *be) { /* frontend */ if (xen_protocol) xenstore_write_str(fe, "protocol", xen_protocol); xenstore_write_int(fe, "state", XenbusStateInitialising); xenstore_write_int(fe, "backend-id", 0); xenstore_write_str(fe, "backend", be); /* backend */ xenstore_write_str(be, "domain", qemu_name ? qemu_name : "no-name"); xenstore_write_int(be, "online", 1); xenstore_write_int(be, "state", XenbusStateInitialising); xenstore_write_int(be, "frontend-id", xen_domid); xenstore_write_str(be, "frontend", fe); return 0; } /* ------------------------------------------------------------- */ int xen_config_dev_blk(DriveInfo *disk) { char fe[256], be[256], device_name[32]; int vdev = 202 * 256 + 16 * disk->unit; int cdrom = disk->media_cd; const char *devtype = cdrom ? "cdrom" : "disk"; const char *mode = cdrom ? "r" : "w"; const char *filename = qemu_opt_get(disk->opts, "file"); snprintf(device_name, sizeof(device_name), "xvd%c", 'a' + disk->unit); xen_pv_printf(NULL, 1, "config disk %d [%s]: %s\n", disk->unit, device_name, filename); xen_config_dev_dirs("vbd", "qdisk", vdev, fe, be, sizeof(fe)); /* frontend */ xenstore_write_int(fe, "virtual-device", vdev); xenstore_write_str(fe, "device-type", devtype); /* backend */ xenstore_write_str(be, "dev", device_name); xenstore_write_str(be, "type", "file"); xenstore_write_str(be, "params", filename); xenstore_write_str(be, "mode", mode); /* common stuff */ return xen_config_dev_all(fe, be); } int xen_config_dev_nic(NICInfo *nic) { char fe[256], be[256]; char mac[20]; int vlan_id = -1; net_hub_id_for_client(nic->netdev, &vlan_id); snprintf(mac, sizeof(mac), "%02x:%02x:%02x:%02x:%02x:%02x", nic->macaddr.a[0], nic->macaddr.a[1], nic->macaddr.a[2], nic->macaddr.a[3], nic->macaddr.a[4], nic->macaddr.a[5]); xen_pv_printf(NULL, 1, "config nic %d: mac=\"%s\"\n", vlan_id, mac); xen_config_dev_dirs("vif", "qnic", vlan_id, fe, be, sizeof(fe)); /* frontend */ xenstore_write_int(fe, "handle", vlan_id); xenstore_write_str(fe, "mac", mac); /* backend */ xenstore_write_int(be, "handle", vlan_id); xenstore_write_str(be, "mac", mac); /* common stuff */ return xen_config_dev_all(fe, be); } int xen_config_dev_vfb(int vdev, const char *type) { char fe[256], be[256]; xen_config_dev_dirs("vfb", "vfb", vdev, fe, be, sizeof(fe)); /* backend */ xenstore_write_str(be, "type", type); /* common stuff */ return xen_config_dev_all(fe, be); } int xen_config_dev_vkbd(int vdev) { char fe[256], be[256]; xen_config_dev_dirs("vkbd", "vkbd", vdev, fe, be, sizeof(fe)); return xen_config_dev_all(fe, be); } int xen_config_dev_console(int vdev) { char fe[256], be[256]; xen_config_dev_dirs("console", "console", vdev, fe, be, sizeof(fe)); return xen_config_dev_all(fe, be); }
pmp-tool/PMP
src/qemu/src-pmp/authz/simple.c
<filename>src/qemu/src-pmp/authz/simple.c /* * QEMU simple authorization driver * * Copyright (c) 2018 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. * */ #include "qemu/osdep.h" #include "authz/simple.h" #include "authz/trace.h" #include "qom/object_interfaces.h" static bool qauthz_simple_is_allowed(QAuthZ *authz, const char *identity, Error **errp) { QAuthZSimple *sauthz = QAUTHZ_SIMPLE(authz); trace_qauthz_simple_is_allowed(authz, sauthz->identity, identity); return g_str_equal(identity, sauthz->identity); } static void qauthz_simple_prop_set_identity(Object *obj, const char *value, Error **errp G_GNUC_UNUSED) { QAuthZSimple *sauthz = QAUTHZ_SIMPLE(obj); g_free(sauthz->identity); sauthz->identity = g_strdup(value); } static char * qauthz_simple_prop_get_identity(Object *obj, Error **errp G_GNUC_UNUSED) { QAuthZSimple *sauthz = QAUTHZ_SIMPLE(obj); return g_strdup(sauthz->identity); } static void qauthz_simple_finalize(Object *obj) { QAuthZSimple *sauthz = QAUTHZ_SIMPLE(obj); g_free(sauthz->identity); } static void qauthz_simple_class_init(ObjectClass *oc, void *data) { QAuthZClass *authz = QAUTHZ_CLASS(oc); authz->is_allowed = qauthz_simple_is_allowed; object_class_property_add_str(oc, "identity", qauthz_simple_prop_get_identity, qauthz_simple_prop_set_identity, NULL); } QAuthZSimple *qauthz_simple_new(const char *id, const char *identity, Error **errp) { return QAUTHZ_SIMPLE( object_new_with_props(TYPE_QAUTHZ_SIMPLE, object_get_objects_root(), id, errp, "identity", identity, NULL)); } static const TypeInfo qauthz_simple_info = { .parent = TYPE_QAUTHZ, .name = TYPE_QAUTHZ_SIMPLE, .instance_size = sizeof(QAuthZSimple), .instance_finalize = qauthz_simple_finalize, .class_size = sizeof(QAuthZSimpleClass), .class_init = qauthz_simple_class_init, .interfaces = (InterfaceInfo[]) { { TYPE_USER_CREATABLE }, { } } }; static void qauthz_simple_register_types(void) { type_register_static(&qauthz_simple_info); } type_init(qauthz_simple_register_types);
pmp-tool/PMP
src/qemu/src-pmp/tests/tcg/mips/user/ase/msa/interleave/test_msa_ilvr_b.c
<filename>src/qemu/src-pmp/tests/tcg/mips/user/ase/msa/interleave/test_msa_ilvr_b.c /* * Test program for MSA instruction ILVR.B * * Copyright (C) 2019 Wave Computing, Inc. * Copyright (C) 2019 <NAME> <<EMAIL>> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. * */ #include <sys/time.h> #include <stdint.h> #include "../../../../include/wrappers_msa.h" #include "../../../../include/test_inputs_128.h" #include "../../../../include/test_utils_128.h" #define TEST_COUNT_TOTAL ( \ (PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT) + \ (RANDOM_INPUTS_SHORT_COUNT) * (RANDOM_INPUTS_SHORT_COUNT)) int32_t main(void) { char *instruction_name = "ILVR.B"; int32_t ret; uint32_t i, j; struct timeval start, end; double elapsed_time; uint64_t b128_result[TEST_COUNT_TOTAL][2]; uint64_t b128_expect[TEST_COUNT_TOTAL][2] = { { 0xffffffffffffffffULL, 0xffffffffffffffffULL, }, /* 0 */ { 0xff00ff00ff00ff00ULL, 0xff00ff00ff00ff00ULL, }, { 0xffaaffaaffaaffaaULL, 0xffaaffaaffaaffaaULL, }, { 0xff55ff55ff55ff55ULL, 0xff55ff55ff55ff55ULL, }, { 0xffccffccffccffccULL, 0xffccffccffccffccULL, }, { 0xff33ff33ff33ff33ULL, 0xff33ff33ff33ff33ULL, }, { 0xff8eff38ffe3ff8eULL, 0xffe3ff8eff38ffe3ULL, }, { 0xff71ffc7ff1cff71ULL, 0xff1cff71ffc7ff1cULL, }, { 0x00ff00ff00ff00ffULL, 0x00ff00ff00ff00ffULL, }, /* 8 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x00aa00aa00aa00aaULL, 0x00aa00aa00aa00aaULL, }, { 0x0055005500550055ULL, 0x0055005500550055ULL, }, { 0x00cc00cc00cc00ccULL, 0x00cc00cc00cc00ccULL, }, { 0x0033003300330033ULL, 0x0033003300330033ULL, }, { 0x008e003800e3008eULL, 0x00e3008e003800e3ULL, }, { 0x007100c7001c0071ULL, 0x001c007100c7001cULL, }, { 0xaaffaaffaaffaaffULL, 0xaaffaaffaaffaaffULL, }, /* 16 */ { 0xaa00aa00aa00aa00ULL, 0xaa00aa00aa00aa00ULL, }, { 0xaaaaaaaaaaaaaaaaULL, 0xaaaaaaaaaaaaaaaaULL, }, { 0xaa55aa55aa55aa55ULL, 0xaa55aa55aa55aa55ULL, }, { 0xaaccaaccaaccaaccULL, 0xaaccaaccaaccaaccULL, }, { 0xaa33aa33aa33aa33ULL, 0xaa33aa33aa33aa33ULL, }, { 0xaa8eaa38aae3aa8eULL, 0xaae3aa8eaa38aae3ULL, }, { 0xaa71aac7aa1caa71ULL, 0xaa1caa71aac7aa1cULL, }, { 0x55ff55ff55ff55ffULL, 0x55ff55ff55ff55ffULL, }, /* 24 */ { 0x5500550055005500ULL, 0x5500550055005500ULL, }, { 0x55aa55aa55aa55aaULL, 0x55aa55aa55aa55aaULL, }, { 0x5555555555555555ULL, 0x5555555555555555ULL, }, { 0x55cc55cc55cc55ccULL, 0x55cc55cc55cc55ccULL, }, { 0x5533553355335533ULL, 0x5533553355335533ULL, }, { 0x558e553855e3558eULL, 0x55e3558e553855e3ULL, }, { 0x557155c7551c5571ULL, 0x551c557155c7551cULL, }, { 0xccffccffccffccffULL, 0xccffccffccffccffULL, }, /* 32 */ { 0xcc00cc00cc00cc00ULL, 0xcc00cc00cc00cc00ULL, }, { 0xccaaccaaccaaccaaULL, 0xccaaccaaccaaccaaULL, }, { 0xcc55cc55cc55cc55ULL, 0xcc55cc55cc55cc55ULL, }, { 0xccccccccccccccccULL, 0xccccccccccccccccULL, }, { 0xcc33cc33cc33cc33ULL, 0xcc33cc33cc33cc33ULL, }, { 0xcc8ecc38cce3cc8eULL, 0xcce3cc8ecc38cce3ULL, }, { 0xcc71ccc7cc1ccc71ULL, 0xcc1ccc71ccc7cc1cULL, }, { 0x33ff33ff33ff33ffULL, 0x33ff33ff33ff33ffULL, }, /* 40 */ { 0x3300330033003300ULL, 0x3300330033003300ULL, }, { 0x33aa33aa33aa33aaULL, 0x33aa33aa33aa33aaULL, }, { 0x3355335533553355ULL, 0x3355335533553355ULL, }, { 0x33cc33cc33cc33ccULL, 0x33cc33cc33cc33ccULL, }, { 0x3333333333333333ULL, 0x3333333333333333ULL, }, { 0x338e333833e3338eULL, 0x33e3338e333833e3ULL, }, { 0x337133c7331c3371ULL, 0x331c337133c7331cULL, }, { 0x8eff38ffe3ff8effULL, 0xe3ff8eff38ffe3ffULL, }, /* 48 */ { 0x8e003800e3008e00ULL, 0xe3008e003800e300ULL, }, { 0x8eaa38aae3aa8eaaULL, 0xe3aa8eaa38aae3aaULL, }, { 0x8e553855e3558e55ULL, 0xe3558e553855e355ULL, }, { 0x8ecc38cce3cc8eccULL, 0xe3cc8ecc38cce3ccULL, }, { 0x8e333833e3338e33ULL, 0xe3338e333833e333ULL, }, { 0x8e8e3838e3e38e8eULL, 0xe3e38e8e3838e3e3ULL, }, { 0x8e7138c7e31c8e71ULL, 0xe31c8e7138c7e31cULL, }, { 0x71ffc7ff1cff71ffULL, 0x1cff71ffc7ff1cffULL, }, /* 56 */ { 0x7100c7001c007100ULL, 0x1c007100c7001c00ULL, }, { 0x71aac7aa1caa71aaULL, 0x1caa71aac7aa1caaULL, }, { 0x7155c7551c557155ULL, 0x1c557155c7551c55ULL, }, { 0x71ccc7cc1ccc71ccULL, 0x1ccc71ccc7cc1cccULL, }, { 0x7133c7331c337133ULL, 0x1c337133c7331c33ULL, }, { 0x718ec7381ce3718eULL, 0x1ce3718ec7381ce3ULL, }, { 0x7171c7c71c1c7171ULL, 0x1c1c7171c7c71c1cULL, }, { 0x2828626255554040ULL, 0x88886a6ae6e6ccccULL, }, /* 64 */ { 0x284d629355c74008ULL, 0x88fb6abee600cc63ULL, }, { 0x28b962cf558b4080ULL, 0x88ac6a5ae6aeccaaULL, }, { 0x285e623155e2404eULL, 0x88706a4fe616cc4dULL, }, { 0x4d289362c7550840ULL, 0xfb88be6a00e663ccULL, }, { 0x4d4d9393c7c70808ULL, 0xfbfbbebe00006363ULL, }, { 0x4db993cfc78b0880ULL, 0xfbacbe5a00ae63aaULL, }, { 0x4d5e9331c7e2084eULL, 0xfb70be4f0016634dULL, }, { 0xb928cf628b558040ULL, 0xac885a6aaee6aaccULL, }, /* 72 */ { 0xb94dcf938bc78008ULL, 0xacfb5abeae00aa63ULL, }, { 0xb9b9cfcf8b8b8080ULL, 0xacac5a5aaeaeaaaaULL, }, { 0xb95ecf318be2804eULL, 0xac705a4fae16aa4dULL, }, { 0x5e283162e2554e40ULL, 0x70884f6a16e64dccULL, }, { 0x5e4d3193e2c74e08ULL, 0x70fb4fbe16004d63ULL, }, { 0x5eb931cfe28b4e80ULL, 0x70ac4f5a16ae4daaULL, }, { 0x5e5e3131e2e24e4eULL, 0x70704f4f16164d4dULL, }, }; gettimeofday(&start, NULL); for (i = 0; i < PATTERN_INPUTS_SHORT_COUNT; i++) { for (j = 0; j < PATTERN_INPUTS_SHORT_COUNT; j++) { do_msa_ILVR_B(b128_pattern[i], b128_pattern[j], b128_result[PATTERN_INPUTS_SHORT_COUNT * i + j]); } } for (i = 0; i < RANDOM_INPUTS_SHORT_COUNT; i++) { for (j = 0; j < RANDOM_INPUTS_SHORT_COUNT; j++) { do_msa_ILVR_B(b128_random[i], b128_random[j], b128_result[((PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT)) + RANDOM_INPUTS_SHORT_COUNT * i + j]); } } gettimeofday(&end, NULL); elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; ret = check_results(instruction_name, TEST_COUNT_TOTAL, elapsed_time, &b128_result[0][0], &b128_expect[0][0]); return ret; }
pmp-tool/PMP
src/qemu/src-pmp/roms/openbios/drivers/pmu.c
/* * Device driver for the via-pmu on Apple Powermacs. * * The VIA (versatile interface adapter) interfaces to the PMU, * a 6805 microprocessor core whose primary function is to control * battery charging and system power on the PowerBook 3400 and 2400. * The PMU also controls the ADB (Apple Desktop Bus) which connects * to the keyboard and mouse, as well as the non-volatile RAM * and the RTC (real time clock) chip. * * Copyright (C) 1998 <NAME> and <NAME>. * Copyright (C) 2001-2002 <NAME> * Copyright (C) 2006-2007 <NAME> * */ #include "config.h" #include "libopenbios/bindings.h" #include "drivers/drivers.h" #include "libc/byteorder.h" #include "libc/vsprintf.h" #include "macio.h" #include "pmu.h" #undef DEBUG_PMU #ifdef DEBUG_PMU #define PMU_DPRINTF(fmt, args...) \ do { printk("PMU - %s: " fmt, __func__ , ##args); } while (0) #else #define PMU_DPRINTF(fmt, args...) do { } while (0) #endif #define IO_PMU_OFFSET 0x00016000 #define IO_PMU_SIZE 0x00002000 /* VIA registers - spaced 0x200 bytes apart */ #define RS 0x200 /* skip between registers */ #define B 0 /* B-side data */ #define A RS /* A-side data */ #define DIRB (2*RS) /* B-side direction (1=output) */ #define DIRA (3*RS) /* A-side direction (1=output) */ #define T1CL (4*RS) /* Timer 1 ctr/latch (low 8 bits) */ #define T1CH (5*RS) /* Timer 1 counter (high 8 bits) */ #define T1LL (6*RS) /* Timer 1 latch (low 8 bits) */ #define T1LH (7*RS) /* Timer 1 latch (high 8 bits) */ #define T2CL (8*RS) /* Timer 2 ctr/latch (low 8 bits) */ #define T2CH (9*RS) /* Timer 2 counter (high 8 bits) */ #define SR (10*RS) /* Shift register */ #define ACR (11*RS) /* Auxiliary control register */ #define PCR (12*RS) /* Peripheral control register */ #define IFR (13*RS) /* Interrupt flag register */ #define IER (14*RS) /* Interrupt enable register */ #define ANH (15*RS) /* A-side data, no handshake */ /* Bits in B data register: all active low */ #define TACK 0x08 /* Transfer request (input) */ #define TREQ 0x10 /* Transfer acknowledge (output) */ /* Bits in ACR */ #define SR_CTRL 0x1c /* Shift register control bits */ #define SR_EXT 0x0c /* Shift on external clock */ #define SR_OUT 0x10 /* Shift out if 1 */ /* Bits in IFR and IER */ #define IER_SET 0x80 /* set bits in IER */ #define IER_CLR 0 /* clear bits in IER */ #define SR_INT 0x04 /* Shift register full/empty */ /* * This table indicates for each PMU opcode: * - the number of data bytes to be sent with the command, or -1 * if a length byte should be sent, * - the number of response bytes which the PMU will return, or * -1 if it will send a length byte. */ static const int8_t pmu_data_len[256][2] = { /* 0 1 2 3 4 5 6 7 */ /*00*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*08*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, /*10*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*18*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0, 0}, /*20*/ {-1, 0},{ 0, 0},{ 2, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0}, /*28*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{ 0,-1}, /*30*/ { 4, 0},{20, 0},{-1, 0},{ 3, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*38*/ { 0, 4},{ 0,20},{ 2,-1},{ 2, 1},{ 3,-1},{-1,-1},{-1,-1},{ 4, 0}, /*40*/ { 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*48*/ { 0, 1},{ 0, 1},{-1,-1},{ 1, 0},{ 1, 0},{-1,-1},{-1,-1},{-1,-1}, /*50*/ { 1, 0},{ 0, 0},{ 2, 0},{ 2, 0},{-1, 0},{ 1, 0},{ 3, 0},{ 1, 0}, /*58*/ { 0, 1},{ 1, 0},{ 0, 2},{ 0, 2},{ 0,-1},{-1,-1},{-1,-1},{-1,-1}, /*60*/ { 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*68*/ { 0, 3},{ 0, 3},{ 0, 2},{ 0, 8},{ 0,-1},{ 0,-1},{-1,-1},{-1,-1}, /*70*/ { 1, 0},{ 1, 0},{ 1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*78*/ { 0,-1},{ 0,-1},{-1,-1},{-1,-1},{-1,-1},{ 5, 1},{ 4, 1},{ 4, 1}, /*80*/ { 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*88*/ { 0, 5},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, /*90*/ { 1, 0},{ 2, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*98*/ { 0, 1},{ 0, 1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, /*a0*/ { 2, 0},{ 2, 0},{ 2, 0},{ 4, 0},{-1, 0},{ 0, 0},{-1, 0},{-1, 0}, /*a8*/ { 1, 1},{ 1, 0},{ 3, 0},{ 2, 0},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, /*b0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*b8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, /*c0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*c8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, /*d0*/ { 0, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*d8*/ { 1, 1},{ 1, 1},{-1,-1},{-1,-1},{ 0, 1},{ 0,-1},{-1,-1},{-1,-1}, /*e0*/ {-1, 0},{ 4, 0},{ 0, 1},{-1, 0},{-1, 0},{ 4, 0},{-1, 0},{-1, 0}, /*e8*/ { 3,-1},{-1,-1},{ 0, 1},{-1,-1},{ 0,-1},{-1,-1},{-1,-1},{ 0, 0}, /*f0*/ {-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0},{-1, 0}, /*f8*/ {-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1},{-1,-1}, }; /* * PMU commands */ #define PMU_POWER_CTRL0 0x10 /* control power of some devices */ #define PMU_POWER_CTRL 0x11 /* control power of some devices */ #define PMU_ADB_CMD 0x20 /* send ADB packet */ #define PMU_ADB_POLL_OFF 0x21 /* disable ADB auto-poll */ #define PMU_WRITE_NVRAM 0x33 /* write non-volatile RAM */ #define PMU_READ_NVRAM 0x3b /* read non-volatile RAM */ #define PMU_SET_RTC 0x30 /* set real-time clock */ #define PMU_READ_RTC 0x38 /* read real-time clock */ #define PMU_SET_VOLBUTTON 0x40 /* set volume up/down position */ #define PMU_BACKLIGHT_BRIGHT 0x41 /* set backlight brightness */ #define PMU_GET_VOLBUTTON 0x48 /* get volume up/down position */ #define PMU_PCEJECT 0x4c /* eject PC-card from slot */ #define PMU_BATTERY_STATE 0x6b /* report battery state etc. */ #define PMU_SMART_BATTERY_STATE 0x6f /* report battery state (new way) */ #define PMU_SET_INTR_MASK 0x70 /* set PMU interrupt mask */ #define PMU_INT_ACK 0x78 /* read interrupt bits */ #define PMU_SHUTDOWN 0x7e /* turn power off */ #define PMU_CPU_SPEED 0x7d /* control CPU speed on some models */ #define PMU_SLEEP 0x7f /* put CPU to sleep */ #define PMU_POWER_EVENTS 0x8f /* Send power-event commands to PMU */ #define PMU_I2C_CMD 0x9a /* I2C operations */ #define PMU_RESET 0xd0 /* reset CPU */ #define PMU_GET_BRIGHTBUTTON 0xd9 /* report brightness up/down pos */ #define PMU_GET_COVER 0xdc /* report cover open/closed */ #define PMU_SYSTEM_READY 0xdf /* tell PMU we are awake */ #define PMU_GET_VERSION 0xea /* read the PMU version */ /* Bits to use with the PMU_POWER_CTRL0 command */ #define PMU_POW0_ON 0x80 /* OR this to power ON the device */ #define PMU_POW0_OFF 0x00 /* leave bit 7 to 0 to power it OFF */ #define PMU_POW0_HARD_DRIVE 0x04 /* Hard drive power (on wallstreet/lombard ?) */ /* Bits to use with the PMU_POWER_CTRL command */ #define PMU_POW_ON 0x80 /* OR this to power ON the device */ #define PMU_POW_OFF 0x00 /* leave bit 7 to 0 to power it OFF */ #define PMU_POW_BACKLIGHT 0x01 /* backlight power */ #define PMU_POW_CHARGER 0x02 /* battery charger power */ #define PMU_POW_IRLED 0x04 /* IR led power (on wallstreet) */ #define PMU_POW_MEDIABAY 0x08 /* media bay power (wallstreet/lombard ?) */ /* Bits in PMU interrupt and interrupt mask bytes */ #define PMU_INT_PCEJECT 0x04 /* PC-card eject buttons */ #define PMU_INT_SNDBRT 0x08 /* sound/brightness up/down buttons */ #define PMU_INT_ADB 0x10 /* ADB autopoll or reply data */ #define PMU_INT_BATTERY 0x20 /* Battery state change */ #define PMU_INT_ENVIRONMENT 0x40 /* Environment interrupts */ #define PMU_INT_TICK 0x80 /* 1-second tick interrupt */ /* Other bits in PMU interrupt valid when PMU_INT_ADB is set */ #define PMU_INT_ADB_AUTO 0x04 /* ADB autopoll, when PMU_INT_ADB */ #define PMU_INT_WAITING_CHARGER 0x01 /* ??? */ #define PMU_INT_AUTO_SRQ_POLL 0x02 /* ??? */ /* Bits in the environement message (either obtained via PMU_GET_COVER, * or via PMU_INT_ENVIRONMENT on core99 */ #define PMU_ENV_LID_CLOSED 0x01 /* The lid is closed */ /* I2C related definitions */ #define PMU_I2C_MODE_SIMPLE 0 #define PMU_I2C_MODE_STDSUB 1 #define PMU_I2C_MODE_COMBINED 2 #define PMU_I2C_BUS_STATUS 0 #define PMU_I2C_BUS_SYSCLK 1 #define PMU_I2C_BUS_POWER 2 #define PMU_I2C_STATUS_OK 0 #define PMU_I2C_STATUS_DATAREAD 1 #define PMU_I2C_STATUS_BUSY 0xfe /* PMU PMU_POWER_EVENTS commands */ enum { PMU_PWR_GET_POWERUP_EVENTS = 0x00, PMU_PWR_SET_POWERUP_EVENTS = 0x01, PMU_PWR_CLR_POWERUP_EVENTS = 0x02, PMU_PWR_GET_WAKEUP_EVENTS = 0x03, PMU_PWR_SET_WAKEUP_EVENTS = 0x04, PMU_PWR_CLR_WAKEUP_EVENTS = 0x05, }; /* Power events wakeup bits */ enum { PMU_PWR_WAKEUP_KEY = 0x01, /* Wake on key press */ PMU_PWR_WAKEUP_AC_INSERT = 0x02, /* Wake on AC adapter plug */ PMU_PWR_WAKEUP_AC_CHANGE = 0x04, PMU_PWR_WAKEUP_LID_OPEN = 0x08, PMU_PWR_WAKEUP_RING = 0x10, }; static uint8_t pmu_readb(pmu_t *dev, int reg) { return *(volatile uint8_t *)(dev->base + reg); asm volatile("eieio" : : : "memory"); } static void pmu_writeb(pmu_t *dev, int reg, uint8_t val) { *(volatile uint8_t *)(dev->base + reg) = val; asm volatile("eieio" : : : "memory"); } static void pmu_handshake(pmu_t *dev) { pmu_writeb(dev, B, pmu_readb(dev, B) & ~TREQ); while ((pmu_readb(dev, B) & TACK) != 0); pmu_writeb(dev, B, pmu_readb(dev, B) | TREQ); while ((pmu_readb(dev, B) & TACK) == 0); } static void pmu_send_byte(pmu_t *dev, uint8_t val) { pmu_writeb(dev, ACR, pmu_readb(dev, ACR) | SR_OUT | SR_EXT); pmu_writeb(dev, SR, val); pmu_handshake(dev); } static uint8_t pmu_recv_byte(pmu_t *dev) { pmu_writeb(dev, ACR, (pmu_readb(dev, ACR) & ~SR_OUT) | SR_EXT); pmu_readb(dev, SR); pmu_handshake(dev); return pmu_readb(dev, SR); } int pmu_request(pmu_t *dev, uint8_t cmd, uint8_t in_len, uint8_t *in_data, uint8_t *out_len, uint8_t *out_data) { int i, l, out_sz; uint8_t d; /* Check command data size */ l = pmu_data_len[cmd][0]; if (l >= 0 && in_len != l) { printk("PMU: Error, request %02x wants %d args, got %d\n", cmd, l, in_len); return -1; } /* Make sure PMU is idle */ while ((pmu_readb(dev, B) & TACK) == 0); /* Send command */ pmu_send_byte(dev, cmd); /* Optionally send data length */ if (l < 0) { pmu_send_byte(dev, in_len); /* Send data */ } for (i = 0; i < in_len; i++) { pmu_send_byte(dev, in_data[i]); } /* Check response size */ l = pmu_data_len[cmd][1]; if (l < 0) { l = pmu_recv_byte(dev); } if (out_len) { out_sz = *out_len; *out_len = 0; } else { out_sz = 0; } if (l > out_sz) { printk("PMU: Error, request %02x returns %d bytes" ", room for %d\n", cmd, l, out_sz); } for (i = 0; i < l; i++) { d = pmu_recv_byte(dev); if (i < out_sz) { out_data[i] = d; (*out_len)++; } } return 0; } #define MAX_REQ_SIZE 128 #ifdef CONFIG_DRIVER_ADB static int pmu_adb_req(void *host, const uint8_t *snd_buf, int len, uint8_t *rcv_buf) { uint8_t buffer[MAX_REQ_SIZE], *pos, olen; int rc; PMU_DPRINTF("pmu_adb_req: len=%d: %02x %02x %02x...\n", len, snd_buf[0], snd_buf[1], snd_buf[2]); if (len >= (MAX_REQ_SIZE - 1)) { printk("pmu_adb_req: too big ! (%d)\n", len); return -1; } buffer[0] = snd_buf[0]; buffer[1] = 0; /* We don't do autopoll */ buffer[2] = len - 1; if (len > 1) { memcpy(&buffer[3], &snd_buf[1], len - 1); } rc = pmu_request(host, PMU_ADB_CMD, len + 2, buffer, NULL, NULL); if (rc) { printk("PMU adb request failure %d\n", rc); return 0; } olen = MAX_REQ_SIZE; rc = pmu_request(host, PMU_INT_ACK, 0, NULL, &olen, buffer); if (rc) { printk("PMU intack request failure %d\n", rc); return 0; } PMU_DPRINTF("pmu_resp=%d int=0x%02x\n", olen, buffer[0]); if (olen <= 2) { return 0; } else { pos = &buffer[3]; olen -= 3; PMU_DPRINTF("ADB resp: 0x%02x 0x%02x\n", buffer[3], buffer[4]); } memcpy(rcv_buf, pos, olen); return olen; } #endif DECLARE_UNNAMED_NODE(ob_pmu, INSTALL_OPEN, sizeof(int)); static pmu_t *main_pmu; static void pmu_reset_all(void) { pmu_request(main_pmu, PMU_RESET, 0, NULL, NULL, NULL); } static void pmu_poweroff(void) { uint8_t params[] = "MATT"; pmu_request(main_pmu, PMU_SHUTDOWN, 4, params, NULL, NULL); } static void ob_pmu_initialize (int *idx) { phandle_t ph=get_cur_dev(); int props[2]; push_str("via-pmu"); fword("device-type"); set_int_property(ph, "#address-cells", 1); set_int_property(ph, "#size-cells", 0); set_property(ph, "compatible", "pmu", 4); props[0] = __cpu_to_be32(IO_PMU_OFFSET); props[1] = __cpu_to_be32(IO_PMU_SIZE); set_property(ph, "reg", (char *)&props, sizeof(props)); /* On newworld machines the PMU is on interrupt 0x19 */ props[0] = 0x19; props[1] = 1; set_property(ph, "interrupts", (char *)props, sizeof(props)); set_int_property(ph, "pmu-version", 0xd0330c); bind_func("pmu-reset-all", pmu_reset_all); feval("['] pmu-reset-all to reset-all"); } static void ob_pmu_open(int *idx) { RET(-1); } static void ob_pmu_close(int *idx) { } NODE_METHODS(ob_pmu) = { { NULL, ob_pmu_initialize }, { "open", ob_pmu_open }, { "close", ob_pmu_close }, }; DECLARE_UNNAMED_NODE(rtc, INSTALL_OPEN, sizeof(int)); static void rtc_open(int *idx) { RET(-1); } /* * get-time ( -- second minute hour day month year ) * */ static const int days_month[12] = { 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; static const int days_month_leap[12] = { 31, 29, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31 }; static inline int is_leap(int year) { return ((year % 4 == 0) && (year % 100 != 0)) || (year % 400 == 0); } static void rtc_get_time(int *idx) { uint8_t obuf[4], olen; ucell second, minute, hour, day, month, year; uint32_t now; int current; const int *days; olen = 4; pmu_request(main_pmu, PMU_READ_RTC, 0, NULL, &olen, obuf); /* seconds since 01/01/1904 */ now = (obuf[0] << 24) + (obuf[1] << 16) + (obuf[2] << 8) + obuf[3]; second = now % 60; now /= 60; minute = now % 60; now /= 60; hour = now % 24; now /= 24; year = now * 100 / 36525; now -= year * 36525 / 100; year += 1904; days = is_leap(year) ? days_month_leap : days_month; current = 0; month = 0; while (month < 12) { if (now <= current + days[month]) { break; } current += days[month]; month++; } month++; day = now - current; PUSH(second); PUSH(minute); PUSH(hour); PUSH(day); PUSH(month); PUSH(year); } /* * set-time ( second minute hour day month year -- ) * */ static void rtc_set_time(int *idx) { uint8_t ibuf[4]; ucell second, minute, hour, day, month, year; const int *days; uint32_t now; unsigned int nb_days; int i; year = POP(); month = POP(); day = POP(); hour = POP(); minute = POP(); second = POP(); days = is_leap(year) ? days_month_leap : days_month; nb_days = (year - 1904) * 36525 / 100 + day; for (i = 0; i < month - 1; i++) { nb_days += days[i]; } now = (((nb_days * 24) + hour) * 60 + minute) * 60 + second; ibuf[0] = now >> 24; ibuf[1] = now >> 16; ibuf[2] = now >> 8; ibuf[3] = now; pmu_request(main_pmu, PMU_SET_RTC, 4, ibuf, NULL, NULL); } NODE_METHODS(rtc) = { { "open", rtc_open }, { "get-time", rtc_get_time }, { "set-time", rtc_set_time }, }; static void rtc_init(char *path) { phandle_t ph, aliases; char buf[64]; snprintf(buf, sizeof(buf), "%s/rtc", path); REGISTER_NAMED_NODE(rtc, buf); ph = find_dev(buf); set_property(ph, "device_type", "rtc", 4); set_property(ph, "compatible", "rtc,via-pmu", 12); aliases = find_dev("/aliases"); set_property(aliases, "rtc", buf, strlen(buf) + 1); device_end(); } static void powermgt_init(char *path) { phandle_t ph; char buf[64]; /* This is a bunch of magic "Feature" bits for which we only have * partial definitions from Darwin. These are taken from a * PowerMac3,1 device-tree. They are also identical in a * PowerMac5,1 "Cube". Note that more recent machines such as * the MacMini (PowerMac10,1) do not have this property, however * MacOS 9 seems to require it (it hangs during boot otherwise). */ const char prim[] = { 0x00, 0x00, 0x00, 0xff, 0x00, 0x00, 0x00, 0x2c, 0x00, 0x03, 0x0d, 0x40, /* Public PM features */ /* 0x00000001 : Wake timer supported */ /* 0x00000004 : Processor cycling supported */ /* 0x00000100 : Can wake on modem ring */ /* 0x00000200 : Has monitor dimming support */ /* 0x00000400 : Can program startup timer */ /* 0x00002000 : Supports wake on LAN */ /* 0x00004000 : Can wake on LID/case open */ /* 0x00008000 : Can power off PCI on sleep */ /* 0x00010000 : Supports deep sleep */ 0x00, 0x01, 0xe7, 0x05, /* Private PM features */ /* 0x00000400 : Supports ICT control */ /* 0x00001000 : Supports Idle2 in hardware */ /* 0x00002000 : Open case prevents sleep */ 0x00, 0x00, 0x34, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* # of batteries supported */ 0x26, 0x0d, 0x46, 0x00, 0x02, 0x78, 0x78, 0x3c, 0x00 }; snprintf(buf, sizeof(buf), "%s/power-mgt", path); REGISTER_NAMED_NODE(rtc, buf); // XXX ? ph = find_dev(buf); set_property(ph, "device_type", "power-mgt", 10); set_property(ph, "compatible", "via-pmu-99", 11); set_property(ph, "registry-name", "extint-gpio1", 13); set_property(ph, "prim-info", prim, sizeof(prim)); device_end(); } pmu_t *pmu_init(const char *path, phys_addr_t base) { pmu_t *pmu; char buf[64]; phandle_t aliases; base += IO_PMU_OFFSET; PMU_DPRINTF(" base=" FMT_plx "\n", base); pmu = malloc(sizeof(pmu_t)); if (pmu == NULL) { return NULL; } snprintf(buf, sizeof(buf), "%s/via-pmu", path); REGISTER_NAMED_NODE(ob_pmu, buf); aliases = find_dev("/aliases"); set_property(aliases, "via-pmu", buf, strlen(buf) + 1); pmu->base = base; #ifdef CONFIG_DRIVER_ADB if (has_adb()) { pmu->adb_bus = adb_bus_new(pmu, &pmu_adb_req); adb_bus_init(buf, pmu->adb_bus); } #endif rtc_init(buf); powermgt_init(buf); main_pmu = pmu; device_end(); bind_func("poweroff", pmu_poweroff); return pmu; }
pmp-tool/PMP
src/qemu/src-pmp/tests/test-block-iothread.c
<filename>src/qemu/src-pmp/tests/test-block-iothread.c /* * Block tests for iothreads * * Copyright (c) 2018 <NAME> <<EMAIL>> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #include "block/block.h" #include "block/blockjob_int.h" #include "sysemu/block-backend.h" #include "qapi/error.h" #include "iothread.h" static int coroutine_fn bdrv_test_co_prwv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags) { return 0; } static int coroutine_fn bdrv_test_co_pdiscard(BlockDriverState *bs, int64_t offset, int bytes) { return 0; } static int coroutine_fn bdrv_test_co_truncate(BlockDriverState *bs, int64_t offset, PreallocMode prealloc, Error **errp) { return 0; } static int coroutine_fn bdrv_test_co_block_status(BlockDriverState *bs, bool want_zero, int64_t offset, int64_t count, int64_t *pnum, int64_t *map, BlockDriverState **file) { *pnum = count; return 0; } static BlockDriver bdrv_test = { .format_name = "test", .instance_size = 1, .bdrv_co_preadv = bdrv_test_co_prwv, .bdrv_co_pwritev = bdrv_test_co_prwv, .bdrv_co_pdiscard = bdrv_test_co_pdiscard, .bdrv_co_truncate = bdrv_test_co_truncate, .bdrv_co_block_status = bdrv_test_co_block_status, }; static void test_sync_op_pread(BdrvChild *c) { uint8_t buf[512]; int ret; /* Success */ ret = bdrv_pread(c, 0, buf, sizeof(buf)); g_assert_cmpint(ret, ==, 512); /* Early error: Negative offset */ ret = bdrv_pread(c, -2, buf, sizeof(buf)); g_assert_cmpint(ret, ==, -EIO); } static void test_sync_op_pwrite(BdrvChild *c) { uint8_t buf[512]; int ret; /* Success */ ret = bdrv_pwrite(c, 0, buf, sizeof(buf)); g_assert_cmpint(ret, ==, 512); /* Early error: Negative offset */ ret = bdrv_pwrite(c, -2, buf, sizeof(buf)); g_assert_cmpint(ret, ==, -EIO); } static void test_sync_op_blk_pread(BlockBackend *blk) { uint8_t buf[512]; int ret; /* Success */ ret = blk_pread(blk, 0, buf, sizeof(buf)); g_assert_cmpint(ret, ==, 512); /* Early error: Negative offset */ ret = blk_pread(blk, -2, buf, sizeof(buf)); g_assert_cmpint(ret, ==, -EIO); } static void test_sync_op_blk_pwrite(BlockBackend *blk) { uint8_t buf[512]; int ret; /* Success */ ret = blk_pwrite(blk, 0, buf, sizeof(buf), 0); g_assert_cmpint(ret, ==, 512); /* Early error: Negative offset */ ret = blk_pwrite(blk, -2, buf, sizeof(buf), 0); g_assert_cmpint(ret, ==, -EIO); } static void test_sync_op_load_vmstate(BdrvChild *c) { uint8_t buf[512]; int ret; /* Error: Driver does not support snapshots */ ret = bdrv_load_vmstate(c->bs, buf, 0, sizeof(buf)); g_assert_cmpint(ret, ==, -ENOTSUP); } static void test_sync_op_save_vmstate(BdrvChild *c) { uint8_t buf[512]; int ret; /* Error: Driver does not support snapshots */ ret = bdrv_save_vmstate(c->bs, buf, 0, sizeof(buf)); g_assert_cmpint(ret, ==, -ENOTSUP); } static void test_sync_op_pdiscard(BdrvChild *c) { int ret; /* Normal success path */ c->bs->open_flags |= BDRV_O_UNMAP; ret = bdrv_pdiscard(c, 0, 512); g_assert_cmpint(ret, ==, 0); /* Early success: UNMAP not supported */ c->bs->open_flags &= ~BDRV_O_UNMAP; ret = bdrv_pdiscard(c, 0, 512); g_assert_cmpint(ret, ==, 0); /* Early error: Negative offset */ ret = bdrv_pdiscard(c, -2, 512); g_assert_cmpint(ret, ==, -EIO); } static void test_sync_op_blk_pdiscard(BlockBackend *blk) { int ret; /* Early success: UNMAP not supported */ ret = blk_pdiscard(blk, 0, 512); g_assert_cmpint(ret, ==, 0); /* Early error: Negative offset */ ret = blk_pdiscard(blk, -2, 512); g_assert_cmpint(ret, ==, -EIO); } static void test_sync_op_truncate(BdrvChild *c) { int ret; /* Normal success path */ ret = bdrv_truncate(c, 65536, PREALLOC_MODE_OFF, NULL); g_assert_cmpint(ret, ==, 0); /* Early error: Negative offset */ ret = bdrv_truncate(c, -2, PREALLOC_MODE_OFF, NULL); g_assert_cmpint(ret, ==, -EINVAL); /* Error: Read-only image */ c->bs->read_only = true; c->bs->open_flags &= ~BDRV_O_RDWR; ret = bdrv_truncate(c, 65536, PREALLOC_MODE_OFF, NULL); g_assert_cmpint(ret, ==, -EACCES); c->bs->read_only = false; c->bs->open_flags |= BDRV_O_RDWR; } static void test_sync_op_block_status(BdrvChild *c) { int ret; int64_t n; /* Normal success path */ ret = bdrv_is_allocated(c->bs, 0, 65536, &n); g_assert_cmpint(ret, ==, 0); /* Early success: No driver support */ bdrv_test.bdrv_co_block_status = NULL; ret = bdrv_is_allocated(c->bs, 0, 65536, &n); g_assert_cmpint(ret, ==, 1); /* Early success: bytes = 0 */ ret = bdrv_is_allocated(c->bs, 0, 0, &n); g_assert_cmpint(ret, ==, 0); /* Early success: Offset > image size*/ ret = bdrv_is_allocated(c->bs, 0x1000000, 0x1000000, &n); g_assert_cmpint(ret, ==, 0); } static void test_sync_op_flush(BdrvChild *c) { int ret; /* Normal success path */ ret = bdrv_flush(c->bs); g_assert_cmpint(ret, ==, 0); /* Early success: Read-only image */ c->bs->read_only = true; c->bs->open_flags &= ~BDRV_O_RDWR; ret = bdrv_flush(c->bs); g_assert_cmpint(ret, ==, 0); c->bs->read_only = false; c->bs->open_flags |= BDRV_O_RDWR; } static void test_sync_op_blk_flush(BlockBackend *blk) { BlockDriverState *bs = blk_bs(blk); int ret; /* Normal success path */ ret = blk_flush(blk); g_assert_cmpint(ret, ==, 0); /* Early success: Read-only image */ bs->read_only = true; bs->open_flags &= ~BDRV_O_RDWR; ret = blk_flush(blk); g_assert_cmpint(ret, ==, 0); bs->read_only = false; bs->open_flags |= BDRV_O_RDWR; } static void test_sync_op_check(BdrvChild *c) { BdrvCheckResult result; int ret; /* Error: Driver does not implement check */ ret = bdrv_check(c->bs, &result, 0); g_assert_cmpint(ret, ==, -ENOTSUP); } static void test_sync_op_invalidate_cache(BdrvChild *c) { /* Early success: Image is not inactive */ bdrv_invalidate_cache(c->bs, NULL); } typedef struct SyncOpTest { const char *name; void (*fn)(BdrvChild *c); void (*blkfn)(BlockBackend *blk); } SyncOpTest; const SyncOpTest sync_op_tests[] = { { .name = "/sync-op/pread", .fn = test_sync_op_pread, .blkfn = test_sync_op_blk_pread, }, { .name = "/sync-op/pwrite", .fn = test_sync_op_pwrite, .blkfn = test_sync_op_blk_pwrite, }, { .name = "/sync-op/load_vmstate", .fn = test_sync_op_load_vmstate, }, { .name = "/sync-op/save_vmstate", .fn = test_sync_op_save_vmstate, }, { .name = "/sync-op/pdiscard", .fn = test_sync_op_pdiscard, .blkfn = test_sync_op_blk_pdiscard, }, { .name = "/sync-op/truncate", .fn = test_sync_op_truncate, }, { .name = "/sync-op/block_status", .fn = test_sync_op_block_status, }, { .name = "/sync-op/flush", .fn = test_sync_op_flush, .blkfn = test_sync_op_blk_flush, }, { .name = "/sync-op/check", .fn = test_sync_op_check, }, { .name = "/sync-op/invalidate_cache", .fn = test_sync_op_invalidate_cache, }, }; /* Test synchronous operations that run in a different iothread, so we have to * poll for the coroutine there to return. */ static void test_sync_op(const void *opaque) { const SyncOpTest *t = opaque; IOThread *iothread = iothread_new(); AioContext *ctx = iothread_get_aio_context(iothread); BlockBackend *blk; BlockDriverState *bs; BdrvChild *c; blk = blk_new(BLK_PERM_ALL, BLK_PERM_ALL); bs = bdrv_new_open_driver(&bdrv_test, "base", BDRV_O_RDWR, &error_abort); bs->total_sectors = 65536 / BDRV_SECTOR_SIZE; blk_insert_bs(blk, bs, &error_abort); c = QLIST_FIRST(&bs->parents); blk_set_aio_context(blk, ctx); aio_context_acquire(ctx); t->fn(c); if (t->blkfn) { t->blkfn(blk); } aio_context_release(ctx); blk_set_aio_context(blk, qemu_get_aio_context()); bdrv_unref(bs); blk_unref(blk); } int main(int argc, char **argv) { int i; bdrv_init(); qemu_init_main_loop(&error_abort); g_test_init(&argc, &argv, NULL); for (i = 0; i < ARRAY_SIZE(sync_op_tests); i++) { const SyncOpTest *t = &sync_op_tests[i]; g_test_add_data_func(t->name, t, test_sync_op); } return g_test_run(); }
pmp-tool/PMP
src/qemu/src-pmp/tests/e1000e-test.c
/* * QTest testcase for e1000e NIC * * Copyright (c) 2015 Ravello Systems LTD (http://ravellosystems.com) * Developed by Daynix Computing LTD (http://www.daynix.com) * * Authors: * <NAME> <<EMAIL>> * <NAME> <<EMAIL>> * <NAME> <<EMAIL>> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #include "qemu/osdep.h" #include "libqtest.h" #include "qemu-common.h" #include "libqos/pci-pc.h" #include "qemu/sockets.h" #include "qemu/iov.h" #include "qemu/bitops.h" #include "libqos/malloc.h" #include "libqos/e1000e.h" static void e1000e_send_verify(QE1000E *d, int *test_sockets, QGuestAllocator *alloc) { struct { uint64_t buffer_addr; union { uint32_t data; struct { uint16_t length; uint8_t cso; uint8_t cmd; } flags; } lower; union { uint32_t data; struct { uint8_t status; uint8_t css; uint16_t special; } fields; } upper; } descr; static const uint32_t dtyp_data = BIT(20); static const uint32_t dtyp_ext = BIT(29); static const uint32_t dcmd_rs = BIT(27); static const uint32_t dcmd_eop = BIT(24); static const uint32_t dsta_dd = BIT(0); static const int data_len = 64; char buffer[64]; int ret; uint32_t recv_len; /* Prepare test data buffer */ uint64_t data = guest_alloc(alloc, data_len); memwrite(data, "TEST", 5); /* Prepare TX descriptor */ memset(&descr, 0, sizeof(descr)); descr.buffer_addr = cpu_to_le64(data); descr.lower.data = cpu_to_le32(dcmd_rs | dcmd_eop | dtyp_ext | dtyp_data | data_len); /* Put descriptor to the ring */ e1000e_tx_ring_push(d, &descr); /* Wait for TX WB interrupt */ e1000e_wait_isr(d, E1000E_TX0_MSG_ID); /* Check DD bit */ g_assert_cmphex(le32_to_cpu(descr.upper.data) & dsta_dd, ==, dsta_dd); /* Check data sent to the backend */ ret = qemu_recv(test_sockets[0], &recv_len, sizeof(recv_len), 0); g_assert_cmpint(ret, == , sizeof(recv_len)); qemu_recv(test_sockets[0], buffer, 64, 0); g_assert_cmpstr(buffer, == , "TEST"); /* Free test data buffer */ guest_free(alloc, data); } static void e1000e_receive_verify(QE1000E *d, int *test_sockets, QGuestAllocator *alloc) { union { struct { uint64_t buffer_addr; uint64_t reserved; } read; struct { struct { uint32_t mrq; union { uint32_t rss; struct { uint16_t ip_id; uint16_t csum; } csum_ip; } hi_dword; } lower; struct { uint32_t status_error; uint16_t length; uint16_t vlan; } upper; } wb; } descr; static const uint32_t esta_dd = BIT(0); char test[] = "TEST"; int len = htonl(sizeof(test)); struct iovec iov[] = { { .iov_base = &len, .iov_len = sizeof(len), },{ .iov_base = test, .iov_len = sizeof(test), }, }; static const int data_len = 64; char buffer[64]; int ret; /* Send a dummy packet to device's socket*/ ret = iov_send(test_sockets[0], iov, 2, 0, sizeof(len) + sizeof(test)); g_assert_cmpint(ret, == , sizeof(test) + sizeof(len)); /* Prepare test data buffer */ uint64_t data = guest_alloc(alloc, data_len); /* Prepare RX descriptor */ memset(&descr, 0, sizeof(descr)); descr.read.buffer_addr = cpu_to_le64(data); /* Put descriptor to the ring */ e1000e_rx_ring_push(d, &descr); /* Wait for TX WB interrupt */ e1000e_wait_isr(d, E1000E_RX0_MSG_ID); /* Check DD bit */ g_assert_cmphex(le32_to_cpu(descr.wb.upper.status_error) & esta_dd, ==, esta_dd); /* Check data sent to the backend */ memread(data, buffer, sizeof(buffer)); g_assert_cmpstr(buffer, == , "TEST"); /* Free test data buffer */ guest_free(alloc, data); } static void test_e1000e_init(void *obj, void *data, QGuestAllocator * alloc) { /* init does nothing */ } static void test_e1000e_tx(void *obj, void *data, QGuestAllocator * alloc) { QE1000E_PCI *e1000e = obj; QE1000E *d = &e1000e->e1000e; QOSGraphObject *e_object = obj; QPCIDevice *dev = e_object->get_driver(e_object, "pci-device"); /* FIXME: add spapr support */ if (qpci_check_buggy_msi(dev)) { return; } e1000e_send_verify(d, data, alloc); } static void test_e1000e_rx(void *obj, void *data, QGuestAllocator * alloc) { QE1000E_PCI *e1000e = obj; QE1000E *d = &e1000e->e1000e; QOSGraphObject *e_object = obj; QPCIDevice *dev = e_object->get_driver(e_object, "pci-device"); /* FIXME: add spapr support */ if (qpci_check_buggy_msi(dev)) { return; } e1000e_receive_verify(d, data, alloc); } static void test_e1000e_multiple_transfers(void *obj, void *data, QGuestAllocator *alloc) { static const long iterations = 4 * 1024; long i; QE1000E_PCI *e1000e = obj; QE1000E *d = &e1000e->e1000e; QOSGraphObject *e_object = obj; QPCIDevice *dev = e_object->get_driver(e_object, "pci-device"); /* FIXME: add spapr support */ if (qpci_check_buggy_msi(dev)) { return; } for (i = 0; i < iterations; i++) { e1000e_send_verify(d, data, alloc); e1000e_receive_verify(d, data, alloc); } } static void test_e1000e_hotplug(void *obj, void *data, QGuestAllocator * alloc) { qtest_qmp_device_add("e1000e", "e1000e_net", "{'addr': '0x06'}"); qpci_unplug_acpi_device_test("e1000e_net", 0x06); } static void data_test_clear(void *sockets) { int *test_sockets = sockets; close(test_sockets[0]); qos_invalidate_command_line(); close(test_sockets[1]); g_free(test_sockets); } static void *data_test_init(GString *cmd_line, void *arg) { int *test_sockets = g_new(int, 2); int ret = socketpair(PF_UNIX, SOCK_STREAM, 0, test_sockets); g_assert_cmpint(ret, != , -1); g_string_append_printf(cmd_line, " -netdev socket,fd=%d,id=hs0 ", test_sockets[1]); g_test_queue_destroy(data_test_clear, test_sockets); return test_sockets; } static void register_e1000e_test(void) { QOSGraphTestOptions opts = { .before = data_test_init, }; qos_add_test("init", "e1000e", test_e1000e_init, &opts); qos_add_test("tx", "e1000e", test_e1000e_tx, &opts); qos_add_test("rx", "e1000e", test_e1000e_rx, &opts); qos_add_test("multiple_transfers", "e1000e", test_e1000e_multiple_transfers, &opts); qos_add_test("hotplug", "e1000e", test_e1000e_hotplug, &opts); } libqos_init(register_e1000e_test);
pmp-tool/PMP
src/qemu/src-pmp/roms/skiboot/include/npu2.h
/* Copyright 2013-2016 IBM Corp. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or * implied. * See the License for the specific language governing permissions and * limitations under the License. */ #ifndef __NPU2_H #define __NPU2_H #include <pci.h> #include <phys-map.h> #include <npu2-regs.h> /* Debugging options */ #define NPU2DBG(p, fmt, a...) prlog(PR_DEBUG, "NPU%d: " fmt, \ (p)->phb_nvlink.opal_id, ##a) #define NPU2INF(p, fmt, a...) prlog(PR_INFO, "NPU%d: " fmt, \ (p)->phb_nvlink.opal_id, ##a) #define NPU2ERR(p, fmt, a...) prlog(PR_ERR, "NPU%d: " fmt, \ (p)->phb_nvlink.opal_id, ##a) #define NPU2DEVLOG(l, p, fmt, a...) prlog(l, "NPU%d:%d:%d.%d " fmt, \ (p)->npu->phb_nvlink.opal_id, \ ((p)->bdfn >> 8) & 0xff, \ ((p)->bdfn >> 3) & 0x1f, \ (p)->bdfn & 0x7, ##a) #define NPU2DEVDBG(p, fmt, a...) NPU2DEVLOG(PR_DEBUG, p, fmt, ##a) #define NPU2DEVINF(p, fmt, a...) NPU2DEVLOG(PR_INFO, p, fmt, ##a) #define NPU2DEVERR(p, fmt, a...) NPU2DEVLOG(PR_ERR, p, fmt, ##a) #define OCAPIDBG(dev, fmt, a...) prlog(PR_DEBUG, "OCAPI[%d:%d]: " fmt, \ dev->npu->chip_id, dev->brick_index, ## a) #define OCAPIINF(dev, fmt, a...) prlog(PR_INFO, "OCAPI[%d:%d]: " fmt, \ dev->npu->chip_id, dev->brick_index, ## a) #define OCAPIERR(dev, fmt, a...) prlog(PR_ERR, "OCAPI[%d:%d]: " fmt, \ dev->npu->chip_id, dev->brick_index, ## a) /* Number of PEs supported */ #define NPU2_MAX_PE_NUM 16 #define NPU2_RESERVED_PE_NUM 15 #define NPU2_LINKS_PER_CHIP 6 /* Link flags */ #define NPU2_DEV_PCI_LINKED 0x1 #define NPU2_DEV_DL_RESET 0x2 /* Return the stack (0-2) of a device */ #define NPU2DEV_STACK(ndev) ((ndev)->brick_index / 2) /* Return the brick number (0-1) within a stack */ #define NPU2DEV_BRICK(ndev) ((ndev)->brick_index % 2) /* This represents the state of the actual hardware BARs not the * emulated PCIe BARs. The is a subtle difference between the two as * not all BARs are exposed outside of skiboot. */ struct npu2_bar { enum phys_map_type type; int index; #define NPU2_BAR_FLAG_ENABLED 0x0010 /* Generation ID's are a single space in the hardware but we split * them in two for the emulated PCIe devices so we need to keep track * of which one has been enabled/disabled. */ #define NPU2_BAR_FLAG_ENABLED0 0x0080 #define NPU2_BAR_FLAG_ENABLED1 0x0100 uint32_t flags; uint64_t base; uint64_t size; uint64_t reg; }; /* Rpresents a BAR that is exposed via the PCIe emulated * devices */ struct npu2_pcie_bar { #define NPU2_PCIE_BAR_FLAG_SIZE_HI 0x0020 #define NPU2_PCIE_BAR_FLAG_TRAPPED 0x0040 uint32_t flags; struct npu2_bar npu2_bar; }; enum npu2_dev_type { NPU2_DEV_TYPE_UNKNOWN, NPU2_DEV_TYPE_NVLINK, NPU2_DEV_TYPE_OPENCAPI, }; struct npu2; struct npu2_dev_nvlink { /* For NVLink, device and function numbers are allocated based * on GPU association. Links to connected to the same GPU will * be exposed as different functions of the same * bus/device. */ uint32_t gpu_bdfn; /* PCI virtual device and the associated GPU device */ struct pci_virt_device *pvd; struct phb *phb; struct pci_device *pd; uint8_t link_flags; /* Used to associate the NPU device with GPU PCI devices */ const char *slot_label; }; struct npu2_dev { enum npu2_dev_type type; uint32_t link_index; uint32_t brick_index; uint64_t pl_xscom_base; struct dt_node *dt_node; struct npu2_pcie_bar bars[2]; struct npu2 *npu; uint32_t bdfn; /* Which PHY lanes this device is associated with */ uint32_t lane_mask; uint64_t link_speed; /* not used for NVLink */ /* Track currently running procedure and step number */ uint16_t procedure_number; uint16_t procedure_step; unsigned long procedure_tb; uint32_t procedure_status; /* NVLink */ struct npu2_dev_nvlink nvlink; /* OpenCAPI */ struct phb phb_ocapi; bool train_need_fence; bool train_fenced; }; struct npu2 { uint32_t index; struct dt_node *dt_node; uint32_t chip_id; uint64_t xscom_base; void *regs; uint64_t mm_base; uint64_t mm_size; uint32_t base_lsi; uint32_t irq_base; uint32_t total_devices; struct npu2_dev *devices; enum phys_map_type gpu_map_type; int ctx_ref[NPU2_XTS_BDF_MAP_SIZE]; /* IODA cache */ uint64_t tve_cache[16]; bool tx_zcal_complete[2]; /* Used to protect global MMIO space, in particular the XTS * tables. */ struct lock lock; /* NVLink */ struct phb phb_nvlink; uint32_t phb_index; uint64_t i2c_port_id_ocapi; }; static inline struct npu2 *phb_to_npu2_nvlink(struct phb *phb) { assert(phb->phb_type == phb_type_npu_v2); return container_of(phb, struct npu2, phb_nvlink); } static inline struct npu2_dev *phb_to_npu2_dev_ocapi(struct phb *phb) { assert(phb->phb_type == phb_type_npu_v2_opencapi); return container_of(phb, struct npu2_dev, phb_ocapi); } static inline struct phb *npu2_dev_to_phb(struct npu2_dev *ndev) { switch (ndev->type) { case NPU2_DEV_TYPE_NVLINK: return &ndev->npu->phb_nvlink; case NPU2_DEV_TYPE_OPENCAPI: return &ndev->phb_ocapi; default: assert(false); } } void npu2_i2c_presence_detect(struct npu2 *npu); int npu2_opencapi_init_npu(struct npu2 *npu); int npu2_nvlink_init_npu(struct npu2 *npu); void npu2_nvlink_create_phb(struct npu2 *npu, struct dt_node *dn); void npu2_write_4b(struct npu2 *p, uint64_t reg, uint32_t val); uint32_t npu2_read_4b(struct npu2 *p, uint64_t reg); void npu2_write(struct npu2 *p, uint64_t reg, uint64_t val); uint64_t npu2_read(struct npu2 *p, uint64_t reg); void npu2_write_mask(struct npu2 *p, uint64_t reg, uint64_t val, uint64_t mask); void npu2_write_mask_4b(struct npu2 *p, uint64_t reg, uint32_t val, uint32_t mask); int64_t npu2_dev_procedure(void *dev, struct pci_cfg_reg_filter *pcrf, uint32_t offset, uint32_t len, uint32_t *data, bool write); void npu2_dev_procedure_reset(struct npu2_dev *dev); void npu2_set_link_flag(struct npu2_dev *ndev, uint8_t flag); void npu2_clear_link_flag(struct npu2_dev *ndev, uint8_t flag); uint32_t reset_ntl(struct npu2_dev *ndev); extern int nv_zcal_nominal; void npu2_opencapi_phy_setup(struct npu2_dev *dev); void npu2_opencapi_phy_prbs31(struct npu2_dev *dev); void npu2_opencapi_bump_ui_lane(struct npu2_dev *dev); int64_t npu2_freeze_status(struct phb *phb __unused, uint64_t pe_number __unused, uint8_t *freeze_state, uint16_t *pci_error_type __unused, uint16_t *severity __unused); #endif /* __NPU2_H */
pmp-tool/PMP
src/qemu/src-pmp/include/authz/base.h
<reponame>pmp-tool/PMP /* * QEMU authorization framework base class * * Copyright (c) 2018 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. * */ #ifndef QAUTHZ_BASE_H__ #define QAUTHZ_BASE_H__ #include "qemu-common.h" #include "qapi/error.h" #include "qom/object.h" #define TYPE_QAUTHZ "authz" #define QAUTHZ_CLASS(klass) \ OBJECT_CLASS_CHECK(QAuthZClass, (klass), \ TYPE_QAUTHZ) #define QAUTHZ_GET_CLASS(obj) \ OBJECT_GET_CLASS(QAuthZClass, (obj), \ TYPE_QAUTHZ) #define QAUTHZ(obj) \ OBJECT_CHECK(QAuthZ, (obj), \ TYPE_QAUTHZ) typedef struct QAuthZ QAuthZ; typedef struct QAuthZClass QAuthZClass; /** * QAuthZ: * * The QAuthZ class defines an API contract to be used * for providing an authorization driver for services * with user identities. */ struct QAuthZ { Object parent_obj; }; struct QAuthZClass { ObjectClass parent_class; bool (*is_allowed)(QAuthZ *authz, const char *identity, Error **errp); }; /** * qauthz_is_allowed: * @authz: the authorization object * @identity: the user identity to authorize * @errp: pointer to a NULL initialized error object * * Check if a user @identity is authorized. If an error * occurs this method will return false to indicate * denial, as well as setting @errp to contain the details. * Callers are recommended to treat the denial and error * scenarios identically. Specifically the error info in * @errp should never be fed back to the user being * authorized, it is merely for benefit of administrator * debugging. * * Returns: true if @identity is authorized, false if denied or if * an error occurred. */ bool qauthz_is_allowed(QAuthZ *authz, const char *identity, Error **errp); /** * qauthz_is_allowed_by_id: * @authzid: ID of the authorization object * @identity: the user identity to authorize * @errp: pointer to a NULL initialized error object * * Check if a user @identity is authorized. If an error * occurs this method will return false to indicate * denial, as well as setting @errp to contain the details. * Callers are recommended to treat the denial and error * scenarios identically. Specifically the error info in * @errp should never be fed back to the user being * authorized, it is merely for benefit of administrator * debugging. * * Returns: true if @identity is authorized, false if denied or if * an error occurred. */ bool qauthz_is_allowed_by_id(const char *authzid, const char *identity, Error **errp); #endif /* QAUTHZ_BASE_H__ */
pmp-tool/PMP
src/qemu/src-pmp/include/hw/riscv/sifive_u.h
/* * SiFive U series machine interface * * Copyright (c) 2017 SiFive, Inc. * * This program is free software; you can redistribute it and/or modify it * under the terms and conditions of the GNU General Public License, * version 2 or later, as published by the Free Software Foundation. * * This program is distributed in the hope it will be useful, but WITHOUT * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for * more details. * * You should have received a copy of the GNU General Public License along with * this program. If not, see <http://www.gnu.org/licenses/>. */ #ifndef HW_SIFIVE_U_H #define HW_SIFIVE_U_H #include "hw/net/cadence_gem.h" #define TYPE_RISCV_U_SOC "riscv.sifive.u.soc" #define RISCV_U_SOC(obj) \ OBJECT_CHECK(SiFiveUSoCState, (obj), TYPE_RISCV_U_SOC) typedef struct SiFiveUSoCState { /*< private >*/ SysBusDevice parent_obj; /*< public >*/ RISCVHartArrayState cpus; DeviceState *plic; CadenceGEMState gem; } SiFiveUSoCState; typedef struct SiFiveUState { /*< private >*/ SysBusDevice parent_obj; /*< public >*/ SiFiveUSoCState soc; void *fdt; int fdt_size; } SiFiveUState; enum { SIFIVE_U_DEBUG, SIFIVE_U_MROM, SIFIVE_U_CLINT, SIFIVE_U_PLIC, SIFIVE_U_UART0, SIFIVE_U_UART1, SIFIVE_U_DRAM, SIFIVE_U_GEM }; enum { SIFIVE_U_UART0_IRQ = 3, SIFIVE_U_UART1_IRQ = 4, SIFIVE_U_GEM_IRQ = 0x35 }; enum { SIFIVE_U_CLOCK_FREQ = 1000000000, SIFIVE_U_GEM_CLOCK_FREQ = 125000000 }; #define SIFIVE_U_PLIC_HART_CONFIG "MS" #define SIFIVE_U_PLIC_NUM_SOURCES 127 #define SIFIVE_U_PLIC_NUM_PRIORITIES 7 #define SIFIVE_U_PLIC_PRIORITY_BASE 0x0 #define SIFIVE_U_PLIC_PENDING_BASE 0x1000 #define SIFIVE_U_PLIC_ENABLE_BASE 0x2000 #define SIFIVE_U_PLIC_ENABLE_STRIDE 0x80 #define SIFIVE_U_PLIC_CONTEXT_BASE 0x200000 #define SIFIVE_U_PLIC_CONTEXT_STRIDE 0x1000 #if defined(TARGET_RISCV32) #define SIFIVE_U_CPU TYPE_RISCV_CPU_SIFIVE_U34 #elif defined(TARGET_RISCV64) #define SIFIVE_U_CPU TYPE_RISCV_CPU_SIFIVE_U54 #endif #endif
pmp-tool/PMP
src/qemu/src-pmp/roms/SLOF/lib/libhvcall/libhvcall.h
#ifndef __LIBHVCALL_H__ #define __LIBHVCALL_H__ #define H_SUCCESS 0 #define H_HARDWARE -1 #define H_PRIVILEGE -3 /* Caller not privileged */ #define H_GET_TCE 0x1C #define H_PUT_TCE 0x20 #define H_LOGICAL_CI_LOAD 0x3c #define H_LOGICAL_CI_STORE 0x40 #define H_GET_TERM_CHAR 0x54 #define H_PUT_TERM_CHAR 0x58 #define H_REG_CRQ 0xFC #define H_FREE_CRQ 0x100 #define H_SEND_CRQ 0x108 #define H_REGISTER_LOGICAL_LAN 0x114 #define H_FREE_LOGICAL_LAN 0x118 #define H_ADD_LOGICAL_LAN_BUFFER 0x11C #define H_SEND_LOGICAL_LAN 0x120 /* KVM specific ones */ #define KVMPPC_HCALL_BASE 0xf000 #define KVMPPC_H_RTAS (KVMPPC_HCALL_BASE + 0x0) #define KVMPPC_H_LOGICAL_MEMOP (KVMPPC_HCALL_BASE + 0x1) /* Client Architecture support */ #define KVMPPC_H_CAS (KVMPPC_HCALL_BASE + 0x2) #define KVMPPC_H_UPDATE_DT (KVMPPC_HCALL_BASE + 0x3) #define KVMPPC_HCALL_MAX KVMPPC_H_UPDATE_DT #ifndef __ASSEMBLY__ extern long hv_generic(unsigned long opcode, ...); extern void hv_putchar(char c, int hvtermno); extern char hv_getchar(int hvtermno); extern char hv_haschar(int hvtermno); extern void get_print_banner(unsigned long addr); extern int hv_send_crq(unsigned int unit, uint64_t *msgaddr); static inline long hv_reg_crq(unsigned int unit, unsigned long qaddr, unsigned long qsize) { return hv_generic(H_REG_CRQ, unit, qaddr, qsize); } static inline void hv_free_crq(unsigned int unit) { hv_generic(H_FREE_CRQ, unit); } extern long hv_send_logical_lan(unsigned long unit_address, unsigned long desc1, unsigned long desc2, unsigned long desc3, unsigned long desc4, unsigned long desc5, unsigned long desc6); static inline long h_register_logical_lan(unsigned long unit_address, unsigned long buf_list, unsigned long rec_q, unsigned long filter_list, unsigned long mac_address) { return hv_generic(H_REGISTER_LOGICAL_LAN, unit_address, buf_list, rec_q, filter_list, mac_address); } static inline long h_free_logical_lan(unsigned long unit_address) { return hv_generic(H_FREE_LOGICAL_LAN, unit_address); } static inline long h_add_logical_lan_buffer(unsigned long unit_address, unsigned long buffer) { return hv_generic(H_ADD_LOGICAL_LAN_BUFFER, unit_address, buffer); } #define HV_RTAS_MAX_ARGRET 5 struct hv_rtas_call { uint32_t token; uint32_t nargs; uint32_t nrets; uint32_t argret[HV_RTAS_MAX_ARGRET]; }; static inline unsigned long h_rtas(struct hv_rtas_call *rtas_buf) { return hv_generic(KVMPPC_H_RTAS, (unsigned long)rtas_buf); } extern unsigned long hv_logical_ci_load(unsigned long size, unsigned long addr); extern unsigned long hv_logical_ci_store(unsigned long size, unsigned long addr, unsigned long value); extern unsigned long hv_logical_memop(unsigned long dst, unsigned long src, unsigned long esize, unsigned long count, unsigned long op); extern int patch_broken_sc1(void *start, void *end, uint32_t *test_ins); extern unsigned long hv_cas(unsigned long vec, unsigned long buf, unsigned long size); #endif /* __ASSEMBLY__ */ #endif /* __LIBHVCALL_H__ */
pmp-tool/PMP
src/qemu/src-pmp/include/hw/i386/intel_iommu.h
/* * QEMU emulation of an Intel IOMMU (VT-d) * (DMA Remapping device) * * Copyright (C) 2013 <NAME>, Oracle <<EMAIL>> * Copyright (C) 2014 <NAME>, <<EMAIL>> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2 of the License, or * (at your option) any later version. * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * You should have received a copy of the GNU General Public License along * with this program; if not, see <http://www.gnu.org/licenses/>. */ #ifndef INTEL_IOMMU_H #define INTEL_IOMMU_H #include "hw/qdev.h" #include "sysemu/dma.h" #include "hw/i386/x86-iommu.h" #include "hw/i386/ioapic.h" #include "hw/pci/msi.h" #include "hw/sysbus.h" #include "qemu/iova-tree.h" #define TYPE_INTEL_IOMMU_DEVICE "intel-iommu" #define INTEL_IOMMU_DEVICE(obj) \ OBJECT_CHECK(IntelIOMMUState, (obj), TYPE_INTEL_IOMMU_DEVICE) #define TYPE_INTEL_IOMMU_MEMORY_REGION "intel-iommu-iommu-memory-region" /* DMAR Hardware Unit Definition address (IOMMU unit) */ #define Q35_HOST_BRIDGE_IOMMU_ADDR 0xfed90000ULL #define VTD_PCI_BUS_MAX 256 #define VTD_PCI_SLOT_MAX 32 #define VTD_PCI_FUNC_MAX 8 #define VTD_PCI_SLOT(devfn) (((devfn) >> 3) & 0x1f) #define VTD_PCI_FUNC(devfn) ((devfn) & 0x07) #define VTD_SID_TO_BUS(sid) (((sid) >> 8) & 0xff) #define VTD_SID_TO_DEVFN(sid) ((sid) & 0xff) #define DMAR_REG_SIZE 0x230 #define VTD_HOST_AW_39BIT 39 #define VTD_HOST_AW_48BIT 48 #define VTD_HOST_ADDRESS_WIDTH VTD_HOST_AW_39BIT #define VTD_HAW_MASK(aw) ((1ULL << (aw)) - 1) #define DMAR_REPORT_F_INTR (1) #define VTD_MSI_ADDR_HI_MASK (0xffffffff00000000ULL) #define VTD_MSI_ADDR_HI_SHIFT (32) #define VTD_MSI_ADDR_LO_MASK (0x00000000ffffffffULL) typedef struct VTDContextEntry VTDContextEntry; typedef struct VTDContextCacheEntry VTDContextCacheEntry; typedef struct IntelIOMMUState IntelIOMMUState; typedef struct VTDAddressSpace VTDAddressSpace; typedef struct VTDIOTLBEntry VTDIOTLBEntry; typedef struct VTDBus VTDBus; typedef union VTD_IR_TableEntry VTD_IR_TableEntry; typedef union VTD_IR_MSIAddress VTD_IR_MSIAddress; typedef struct VTDPASIDDirEntry VTDPASIDDirEntry; typedef struct VTDPASIDEntry VTDPASIDEntry; /* Context-Entry */ struct VTDContextEntry { union { struct { uint64_t lo; uint64_t hi; }; struct { uint64_t val[4]; }; }; }; struct VTDContextCacheEntry { /* The cache entry is obsolete if * context_cache_gen!=IntelIOMMUState.context_cache_gen */ uint32_t context_cache_gen; struct VTDContextEntry context_entry; }; /* PASID Directory Entry */ struct VTDPASIDDirEntry { uint64_t val; }; /* PASID Table Entry */ struct VTDPASIDEntry { uint64_t val[8]; }; struct VTDAddressSpace { PCIBus *bus; uint8_t devfn; AddressSpace as; IOMMUMemoryRegion iommu; MemoryRegion root; /* The root container of the device */ MemoryRegion nodmar; /* The alias of shared nodmar MR */ MemoryRegion iommu_ir; /* Interrupt region: 0xfeeXXXXX */ IntelIOMMUState *iommu_state; VTDContextCacheEntry context_cache_entry; QLIST_ENTRY(VTDAddressSpace) next; /* Superset of notifier flags that this address space has */ IOMMUNotifierFlag notifier_flags; IOVATree *iova_tree; /* Traces mapped IOVA ranges */ }; struct VTDBus { PCIBus* bus; /* A reference to the bus to provide translation for */ VTDAddressSpace *dev_as[0]; /* A table of VTDAddressSpace objects indexed by devfn */ }; struct VTDIOTLBEntry { uint64_t gfn; uint16_t domain_id; uint64_t slpte; uint64_t mask; uint8_t access_flags; }; /* VT-d Source-ID Qualifier types */ enum { VTD_SQ_FULL = 0x00, /* Full SID verification */ VTD_SQ_IGN_3 = 0x01, /* Ignore bit 3 */ VTD_SQ_IGN_2_3 = 0x02, /* Ignore bits 2 & 3 */ VTD_SQ_IGN_1_3 = 0x03, /* Ignore bits 1-3 */ VTD_SQ_MAX, }; /* VT-d Source Validation Types */ enum { VTD_SVT_NONE = 0x00, /* No validation */ VTD_SVT_ALL = 0x01, /* Do full validation */ VTD_SVT_BUS = 0x02, /* Validate bus range */ VTD_SVT_MAX, }; /* Interrupt Remapping Table Entry Definition */ union VTD_IR_TableEntry { struct { #ifdef HOST_WORDS_BIGENDIAN uint32_t __reserved_1:8; /* Reserved 1 */ uint32_t vector:8; /* Interrupt Vector */ uint32_t irte_mode:1; /* IRTE Mode */ uint32_t __reserved_0:3; /* Reserved 0 */ uint32_t __avail:4; /* Available spaces for software */ uint32_t delivery_mode:3; /* Delivery Mode */ uint32_t trigger_mode:1; /* Trigger Mode */ uint32_t redir_hint:1; /* Redirection Hint */ uint32_t dest_mode:1; /* Destination Mode */ uint32_t fault_disable:1; /* Fault Processing Disable */ uint32_t present:1; /* Whether entry present/available */ #else uint32_t present:1; /* Whether entry present/available */ uint32_t fault_disable:1; /* Fault Processing Disable */ uint32_t dest_mode:1; /* Destination Mode */ uint32_t redir_hint:1; /* Redirection Hint */ uint32_t trigger_mode:1; /* Trigger Mode */ uint32_t delivery_mode:3; /* Delivery Mode */ uint32_t __avail:4; /* Available spaces for software */ uint32_t __reserved_0:3; /* Reserved 0 */ uint32_t irte_mode:1; /* IRTE Mode */ uint32_t vector:8; /* Interrupt Vector */ uint32_t __reserved_1:8; /* Reserved 1 */ #endif uint32_t dest_id; /* Destination ID */ uint16_t source_id; /* Source-ID */ #ifdef HOST_WORDS_BIGENDIAN uint64_t __reserved_2:44; /* Reserved 2 */ uint64_t sid_vtype:2; /* Source-ID Validation Type */ uint64_t sid_q:2; /* Source-ID Qualifier */ #else uint64_t sid_q:2; /* Source-ID Qualifier */ uint64_t sid_vtype:2; /* Source-ID Validation Type */ uint64_t __reserved_2:44; /* Reserved 2 */ #endif } QEMU_PACKED irte; uint64_t data[2]; }; #define VTD_IR_INT_FORMAT_COMPAT (0) /* Compatible Interrupt */ #define VTD_IR_INT_FORMAT_REMAP (1) /* Remappable Interrupt */ /* Programming format for MSI/MSI-X addresses */ union VTD_IR_MSIAddress { struct { #ifdef HOST_WORDS_BIGENDIAN uint32_t __head:12; /* Should always be: 0x0fee */ uint32_t index_l:15; /* Interrupt index bit 14-0 */ uint32_t int_mode:1; /* Interrupt format */ uint32_t sub_valid:1; /* SHV: Sub-Handle Valid bit */ uint32_t index_h:1; /* Interrupt index bit 15 */ uint32_t __not_care:2; #else uint32_t __not_care:2; uint32_t index_h:1; /* Interrupt index bit 15 */ uint32_t sub_valid:1; /* SHV: Sub-Handle Valid bit */ uint32_t int_mode:1; /* Interrupt format */ uint32_t index_l:15; /* Interrupt index bit 14-0 */ uint32_t __head:12; /* Should always be: 0x0fee */ #endif } QEMU_PACKED addr; uint32_t data; }; /* When IR is enabled, all MSI/MSI-X data bits should be zero */ #define VTD_IR_MSI_DATA (0) /* The iommu (DMAR) device state struct */ struct IntelIOMMUState { X86IOMMUState x86_iommu; MemoryRegion csrmem; MemoryRegion mr_nodmar; MemoryRegion mr_ir; MemoryRegion mr_sys_alias; uint8_t csr[DMAR_REG_SIZE]; /* register values */ uint8_t wmask[DMAR_REG_SIZE]; /* R/W bytes */ uint8_t w1cmask[DMAR_REG_SIZE]; /* RW1C(Write 1 to Clear) bytes */ uint8_t womask[DMAR_REG_SIZE]; /* WO (write only - read returns 0) */ uint32_t version; bool caching_mode; /* RO - is cap CM enabled? */ bool scalable_mode; /* RO - is Scalable Mode supported? */ dma_addr_t root; /* Current root table pointer */ bool root_extended; /* Type of root table (extended or not) */ bool root_scalable; /* Type of root table (scalable or not) */ bool dmar_enabled; /* Set if DMA remapping is enabled */ uint16_t iq_head; /* Current invalidation queue head */ uint16_t iq_tail; /* Current invalidation queue tail */ dma_addr_t iq; /* Current invalidation queue pointer */ uint16_t iq_size; /* IQ Size in number of entries */ bool iq_dw; /* IQ descriptor width 256bit or not */ bool qi_enabled; /* Set if the QI is enabled */ uint8_t iq_last_desc_type; /* The type of last completed descriptor */ /* The index of the Fault Recording Register to be used next. * Wraps around from N-1 to 0, where N is the number of FRCD_REG. */ uint16_t next_frcd_reg; uint64_t cap; /* The value of capability reg */ uint64_t ecap; /* The value of extended capability reg */ uint32_t context_cache_gen; /* Should be in [1,MAX] */ GHashTable *iotlb; /* IOTLB */ GHashTable *vtd_as_by_busptr; /* VTDBus objects indexed by PCIBus* reference */ VTDBus *vtd_as_by_bus_num[VTD_PCI_BUS_MAX]; /* VTDBus objects indexed by bus number */ /* list of registered notifiers */ QLIST_HEAD(, VTDAddressSpace) vtd_as_with_notifiers; /* interrupt remapping */ bool intr_enabled; /* Whether guest enabled IR */ dma_addr_t intr_root; /* Interrupt remapping table pointer */ uint32_t intr_size; /* Number of IR table entries */ bool intr_eime; /* Extended interrupt mode enabled */ OnOffAuto intr_eim; /* Toggle for EIM cabability */ bool buggy_eim; /* Force buggy EIM unless eim=off */ uint8_t aw_bits; /* Host/IOVA address width (in bits) */ bool dma_drain; /* Whether DMA r/w draining enabled */ /* * Protects IOMMU states in general. Currently it protects the * per-IOMMU IOTLB cache, and context entry cache in VTDAddressSpace. */ QemuMutex iommu_lock; }; /* Find the VTD Address space associated with the given bus pointer, * create a new one if none exists */ VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn); #endif
pmp-tool/PMP
src/qemu/src-pmp/hw/scsi/vhost-user-scsi.c
/* * vhost-user-scsi host device * * Copyright (c) 2016 Nutanix Inc. All rights reserved. * * Author: * <NAME> <<EMAIL>> * * This work is largely based on the "vhost-scsi" implementation by: * <NAME> <<EMAIL>> * <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU LGPL, version 2 or later. * See the COPYING.LIB file in the top-level directory. * */ #include "qemu/osdep.h" #include "qapi/error.h" #include "qemu/error-report.h" #include "qom/object.h" #include "hw/fw-path-provider.h" #include "hw/qdev-core.h" #include "hw/virtio/vhost.h" #include "hw/virtio/vhost-backend.h" #include "hw/virtio/vhost-user-scsi.h" #include "hw/virtio/virtio.h" #include "hw/virtio/virtio-access.h" #include "chardev/char-fe.h" /* Features supported by the host application */ static const int user_feature_bits[] = { VIRTIO_F_NOTIFY_ON_EMPTY, VIRTIO_RING_F_INDIRECT_DESC, VIRTIO_RING_F_EVENT_IDX, VIRTIO_SCSI_F_HOTPLUG, VHOST_INVALID_FEATURE_BIT }; static void vhost_user_scsi_set_status(VirtIODevice *vdev, uint8_t status) { VHostUserSCSI *s = (VHostUserSCSI *)vdev; VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); bool start = (status & VIRTIO_CONFIG_S_DRIVER_OK) && vdev->vm_running; if (vsc->dev.started == start) { return; } if (start) { int ret; ret = vhost_scsi_common_start(vsc); if (ret < 0) { error_report("unable to start vhost-user-scsi: %s", strerror(-ret)); exit(1); } } else { vhost_scsi_common_stop(vsc); } } static void vhost_dummy_handle_output(VirtIODevice *vdev, VirtQueue *vq) { } static void vhost_user_scsi_realize(DeviceState *dev, Error **errp) { VirtIOSCSICommon *vs = VIRTIO_SCSI_COMMON(dev); VHostUserSCSI *s = VHOST_USER_SCSI(dev); VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); Error *err = NULL; int ret; if (!vs->conf.chardev.chr) { error_setg(errp, "vhost-user-scsi: missing chardev"); return; } virtio_scsi_common_realize(dev, vhost_dummy_handle_output, vhost_dummy_handle_output, vhost_dummy_handle_output, &err); if (err != NULL) { error_propagate(errp, err); return; } if (!vhost_user_init(&s->vhost_user, &vs->conf.chardev, errp)) { return; } vsc->dev.nvqs = 2 + vs->conf.num_queues; vsc->dev.vqs = g_new(struct vhost_virtqueue, vsc->dev.nvqs); vsc->dev.vq_index = 0; vsc->dev.backend_features = 0; ret = vhost_dev_init(&vsc->dev, &s->vhost_user, VHOST_BACKEND_TYPE_USER, 0); if (ret < 0) { error_setg(errp, "vhost-user-scsi: vhost initialization failed: %s", strerror(-ret)); vhost_user_cleanup(&s->vhost_user); return; } /* Channel and lun both are 0 for bootable vhost-user-scsi disk */ vsc->channel = 0; vsc->lun = 0; vsc->target = vs->conf.boot_tpgt; } static void vhost_user_scsi_unrealize(DeviceState *dev, Error **errp) { VirtIODevice *vdev = VIRTIO_DEVICE(dev); VHostUserSCSI *s = VHOST_USER_SCSI(dev); VHostSCSICommon *vsc = VHOST_SCSI_COMMON(s); struct vhost_virtqueue *vqs = vsc->dev.vqs; /* This will stop the vhost backend. */ vhost_user_scsi_set_status(vdev, 0); vhost_dev_cleanup(&vsc->dev); g_free(vqs); virtio_scsi_common_unrealize(dev, errp); vhost_user_cleanup(&s->vhost_user); } static Property vhost_user_scsi_properties[] = { DEFINE_PROP_CHR("chardev", VirtIOSCSICommon, conf.chardev), DEFINE_PROP_UINT32("boot_tpgt", VirtIOSCSICommon, conf.boot_tpgt, 0), DEFINE_PROP_UINT32("num_queues", VirtIOSCSICommon, conf.num_queues, 1), DEFINE_PROP_UINT32("virtqueue_size", VirtIOSCSICommon, conf.virtqueue_size, 128), DEFINE_PROP_UINT32("max_sectors", VirtIOSCSICommon, conf.max_sectors, 0xFFFF), DEFINE_PROP_UINT32("cmd_per_lun", VirtIOSCSICommon, conf.cmd_per_lun, 128), DEFINE_PROP_BIT64("hotplug", VHostSCSICommon, host_features, VIRTIO_SCSI_F_HOTPLUG, true), DEFINE_PROP_BIT64("param_change", VHostSCSICommon, host_features, VIRTIO_SCSI_F_CHANGE, true), DEFINE_PROP_BIT64("t10_pi", VHostSCSICommon, host_features, VIRTIO_SCSI_F_T10_PI, false), DEFINE_PROP_END_OF_LIST(), }; static const VMStateDescription vmstate_vhost_scsi = { .name = "virtio-scsi", .minimum_version_id = 1, .version_id = 1, .fields = (VMStateField[]) { VMSTATE_VIRTIO_DEVICE, VMSTATE_END_OF_LIST() }, }; static void vhost_user_scsi_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); FWPathProviderClass *fwc = FW_PATH_PROVIDER_CLASS(klass); dc->props = vhost_user_scsi_properties; dc->vmsd = &vmstate_vhost_scsi; set_bit(DEVICE_CATEGORY_STORAGE, dc->categories); vdc->realize = vhost_user_scsi_realize; vdc->unrealize = vhost_user_scsi_unrealize; vdc->get_features = vhost_scsi_common_get_features; vdc->set_config = vhost_scsi_common_set_config; vdc->set_status = vhost_user_scsi_set_status; fwc->get_dev_path = vhost_scsi_common_get_fw_dev_path; } static void vhost_user_scsi_instance_init(Object *obj) { VHostSCSICommon *vsc = VHOST_SCSI_COMMON(obj); vsc->feature_bits = user_feature_bits; /* Add the bootindex property for this object */ device_add_bootindex_property(obj, &vsc->bootindex, "bootindex", NULL, DEVICE(vsc), NULL); } static const TypeInfo vhost_user_scsi_info = { .name = TYPE_VHOST_USER_SCSI, .parent = TYPE_VHOST_SCSI_COMMON, .instance_size = sizeof(VHostUserSCSI), .class_init = vhost_user_scsi_class_init, .instance_init = vhost_user_scsi_instance_init, .interfaces = (InterfaceInfo[]) { { TYPE_FW_PATH_PROVIDER }, { } }, }; static void virtio_register_types(void) { type_register_static(&vhost_user_scsi_info); } type_init(virtio_register_types)
pmp-tool/PMP
src/qemu/src-pmp/authz/base.c
<filename>src/qemu/src-pmp/authz/base.c /* * QEMU authorization framework base class * * Copyright (c) 2018 Red Hat, Inc. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. * */ #include "qemu/osdep.h" #include "authz/base.h" #include "authz/trace.h" bool qauthz_is_allowed(QAuthZ *authz, const char *identity, Error **errp) { QAuthZClass *cls = QAUTHZ_GET_CLASS(authz); bool allowed; allowed = cls->is_allowed(authz, identity, errp); trace_qauthz_is_allowed(authz, identity, allowed); return allowed; } bool qauthz_is_allowed_by_id(const char *authzid, const char *identity, Error **errp) { QAuthZ *authz; Object *obj; Object *container; container = object_get_objects_root(); obj = object_resolve_path_component(container, authzid); if (!obj) { error_setg(errp, "Cannot find QAuthZ object ID %s", authzid); return false; } if (!object_dynamic_cast(obj, TYPE_QAUTHZ)) { error_setg(errp, "Object '%s' is not a QAuthZ subclass", authzid); return false; } authz = QAUTHZ(obj); return qauthz_is_allowed(authz, identity, errp); } static const TypeInfo authz_info = { .parent = TYPE_OBJECT, .name = TYPE_QAUTHZ, .instance_size = sizeof(QAuthZ), .class_size = sizeof(QAuthZClass), .abstract = true, }; static void qauthz_register_types(void) { type_register_static(&authz_info); } type_init(qauthz_register_types)
pmp-tool/PMP
src/qemu/src-pmp/include/hw/ppc/pnv_occ.h
/* * QEMU PowerPC PowerNV Emulation of a few OCC related registers * * Copyright (c) 2015-2017, IBM Corporation. * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License as published by the Free Software Foundation; either * version 2 of the License, or (at your option) any later version. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/>. */ #ifndef _PPC_PNV_OCC_H #define _PPC_PNV_OCC_H #include "hw/ppc/pnv_psi.h" #define TYPE_PNV_OCC "pnv-occ" #define PNV_OCC(obj) OBJECT_CHECK(PnvOCC, (obj), TYPE_PNV_OCC) #define TYPE_PNV8_OCC TYPE_PNV_OCC "-POWER8" #define PNV8_OCC(obj) OBJECT_CHECK(PnvOCC, (obj), TYPE_PNV8_OCC) #define TYPE_PNV9_OCC TYPE_PNV_OCC "-POWER9" #define PNV9_OCC(obj) OBJECT_CHECK(PnvOCC, (obj), TYPE_PNV9_OCC) typedef struct PnvOCC { DeviceState xd; /* OCC Misc interrupt */ uint64_t occmisc; PnvPsi *psi; MemoryRegion xscom_regs; } PnvOCC; #define PNV_OCC_CLASS(klass) \ OBJECT_CLASS_CHECK(PnvOCCClass, (klass), TYPE_PNV_OCC) #define PNV_OCC_GET_CLASS(obj) \ OBJECT_GET_CLASS(PnvOCCClass, (obj), TYPE_PNV_OCC) typedef struct PnvOCCClass { DeviceClass parent_class; int xscom_size; const MemoryRegionOps *xscom_ops; int psi_irq; } PnvOCCClass; #endif /* _PPC_PNV_OCC_H */
pmp-tool/PMP
src/qemu/src-pmp/slirp/src/state.c
<reponame>pmp-tool/PMP<gh_stars>1-10 /* SPDX-License-Identifier: MIT */ /* * libslirp * * Copyright (c) 2004-2008 <NAME> * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "slirp.h" #include "vmstate.h" #include "stream.h" static int slirp_tcp_post_load(void *opaque, int version) { tcp_template((struct tcpcb *)opaque); return 0; } static const VMStateDescription vmstate_slirp_tcp = { .name = "slirp-tcp", .version_id = 0, .post_load = slirp_tcp_post_load, .fields = (VMStateField[]) { VMSTATE_INT16(t_state, struct tcpcb), VMSTATE_INT16_ARRAY(t_timer, struct tcpcb, TCPT_NTIMERS), VMSTATE_INT16(t_rxtshift, struct tcpcb), VMSTATE_INT16(t_rxtcur, struct tcpcb), VMSTATE_INT16(t_dupacks, struct tcpcb), VMSTATE_UINT16(t_maxseg, struct tcpcb), VMSTATE_UINT8(t_force, struct tcpcb), VMSTATE_UINT16(t_flags, struct tcpcb), VMSTATE_UINT32(snd_una, struct tcpcb), VMSTATE_UINT32(snd_nxt, struct tcpcb), VMSTATE_UINT32(snd_up, struct tcpcb), VMSTATE_UINT32(snd_wl1, struct tcpcb), VMSTATE_UINT32(snd_wl2, struct tcpcb), VMSTATE_UINT32(iss, struct tcpcb), VMSTATE_UINT32(snd_wnd, struct tcpcb), VMSTATE_UINT32(rcv_wnd, struct tcpcb), VMSTATE_UINT32(rcv_nxt, struct tcpcb), VMSTATE_UINT32(rcv_up, struct tcpcb), VMSTATE_UINT32(irs, struct tcpcb), VMSTATE_UINT32(rcv_adv, struct tcpcb), VMSTATE_UINT32(snd_max, struct tcpcb), VMSTATE_UINT32(snd_cwnd, struct tcpcb), VMSTATE_UINT32(snd_ssthresh, struct tcpcb), VMSTATE_INT16(t_idle, struct tcpcb), VMSTATE_INT16(t_rtt, struct tcpcb), VMSTATE_UINT32(t_rtseq, struct tcpcb), VMSTATE_INT16(t_srtt, struct tcpcb), VMSTATE_INT16(t_rttvar, struct tcpcb), VMSTATE_UINT16(t_rttmin, struct tcpcb), VMSTATE_UINT32(max_sndwnd, struct tcpcb), VMSTATE_UINT8(t_oobflags, struct tcpcb), VMSTATE_UINT8(t_iobc, struct tcpcb), VMSTATE_INT16(t_softerror, struct tcpcb), VMSTATE_UINT8(snd_scale, struct tcpcb), VMSTATE_UINT8(rcv_scale, struct tcpcb), VMSTATE_UINT8(request_r_scale, struct tcpcb), VMSTATE_UINT8(requested_s_scale, struct tcpcb), VMSTATE_UINT32(ts_recent, struct tcpcb), VMSTATE_UINT32(ts_recent_age, struct tcpcb), VMSTATE_UINT32(last_ack_sent, struct tcpcb), VMSTATE_END_OF_LIST() } }; /* The sbuf has a pair of pointers that are migrated as offsets; * we calculate the offsets and restore the pointers using * pre_save/post_load on a tmp structure. */ struct sbuf_tmp { struct sbuf *parent; uint32_t roff, woff; }; static int sbuf_tmp_pre_save(void *opaque) { struct sbuf_tmp *tmp = opaque; tmp->woff = tmp->parent->sb_wptr - tmp->parent->sb_data; tmp->roff = tmp->parent->sb_rptr - tmp->parent->sb_data; return 0; } static int sbuf_tmp_post_load(void *opaque, int version) { struct sbuf_tmp *tmp = opaque; uint32_t requested_len = tmp->parent->sb_datalen; /* Allocate the buffer space used by the field after the tmp */ sbreserve(tmp->parent, tmp->parent->sb_datalen); if (tmp->parent->sb_datalen != requested_len) { return -ENOMEM; } if (tmp->woff >= requested_len || tmp->roff >= requested_len) { g_critical("invalid sbuf offsets r/w=%u/%u len=%u", tmp->roff, tmp->woff, requested_len); return -EINVAL; } tmp->parent->sb_wptr = tmp->parent->sb_data + tmp->woff; tmp->parent->sb_rptr = tmp->parent->sb_data + tmp->roff; return 0; } static const VMStateDescription vmstate_slirp_sbuf_tmp = { .name = "slirp-sbuf-tmp", .post_load = sbuf_tmp_post_load, .pre_save = sbuf_tmp_pre_save, .version_id = 0, .fields = (VMStateField[]) { VMSTATE_UINT32(woff, struct sbuf_tmp), VMSTATE_UINT32(roff, struct sbuf_tmp), VMSTATE_END_OF_LIST() } }; static const VMStateDescription vmstate_slirp_sbuf = { .name = "slirp-sbuf", .version_id = 0, .fields = (VMStateField[]) { VMSTATE_UINT32(sb_cc, struct sbuf), VMSTATE_UINT32(sb_datalen, struct sbuf), VMSTATE_WITH_TMP(struct sbuf, struct sbuf_tmp, vmstate_slirp_sbuf_tmp), VMSTATE_VBUFFER_UINT32(sb_data, struct sbuf, 0, NULL, sb_datalen), VMSTATE_END_OF_LIST() } }; static bool slirp_older_than_v4(void *opaque, int version_id) { return version_id < 4; } static bool slirp_family_inet(void *opaque, int version_id) { union slirp_sockaddr *ssa = (union slirp_sockaddr *)opaque; return ssa->ss.ss_family == AF_INET; } static int slirp_socket_pre_load(void *opaque) { struct socket *so = opaque; if (tcp_attach(so) < 0) { return -ENOMEM; } /* Older versions don't load these fields */ so->so_ffamily = AF_INET; so->so_lfamily = AF_INET; return 0; } #ifndef _WIN32 #define VMSTATE_SIN4_ADDR(f, s, t) VMSTATE_UINT32_TEST(f, s, t) #else /* Win uses u_long rather than uint32_t - but it's still 32bits long */ #define VMSTATE_SIN4_ADDR(f, s, t) VMSTATE_SINGLE_TEST(f, s, t, 0, \ slirp_vmstate_info_uint32, u_long) #endif /* The OS provided ss_family field isn't that portable; it's size * and type varies (16/8 bit, signed, unsigned) * and the values it contains aren't fully portable. */ typedef struct SS_FamilyTmpStruct { union slirp_sockaddr *parent; uint16_t portable_family; } SS_FamilyTmpStruct; #define SS_FAMILY_MIG_IPV4 2 /* Linux, BSD, Win... */ #define SS_FAMILY_MIG_IPV6 10 /* Linux */ #define SS_FAMILY_MIG_OTHER 0xffff static int ss_family_pre_save(void *opaque) { SS_FamilyTmpStruct *tss = opaque; tss->portable_family = SS_FAMILY_MIG_OTHER; if (tss->parent->ss.ss_family == AF_INET) { tss->portable_family = SS_FAMILY_MIG_IPV4; } else if (tss->parent->ss.ss_family == AF_INET6) { tss->portable_family = SS_FAMILY_MIG_IPV6; } return 0; } static int ss_family_post_load(void *opaque, int version_id) { SS_FamilyTmpStruct *tss = opaque; switch (tss->portable_family) { case SS_FAMILY_MIG_IPV4: tss->parent->ss.ss_family = AF_INET; break; case SS_FAMILY_MIG_IPV6: case 23: /* compatibility: AF_INET6 from mingw */ case 28: /* compatibility: AF_INET6 from FreeBSD sys/socket.h */ tss->parent->ss.ss_family = AF_INET6; break; default: g_critical("invalid ss_family type %x", tss->portable_family); return -EINVAL; } return 0; } static const VMStateDescription vmstate_slirp_ss_family = { .name = "slirp-socket-addr/ss_family", .pre_save = ss_family_pre_save, .post_load = ss_family_post_load, .fields = (VMStateField[]) { VMSTATE_UINT16(portable_family, SS_FamilyTmpStruct), VMSTATE_END_OF_LIST() } }; static const VMStateDescription vmstate_slirp_socket_addr = { .name = "slirp-socket-addr", .version_id = 4, .fields = (VMStateField[]) { VMSTATE_WITH_TMP(union slirp_sockaddr, SS_FamilyTmpStruct, vmstate_slirp_ss_family), VMSTATE_SIN4_ADDR(sin.sin_addr.s_addr, union slirp_sockaddr, slirp_family_inet), VMSTATE_UINT16_TEST(sin.sin_port, union slirp_sockaddr, slirp_family_inet), #if 0 /* Untested: Needs checking by someone with IPv6 test */ VMSTATE_BUFFER_TEST(sin6.sin6_addr, union slirp_sockaddr, slirp_family_inet6), VMSTATE_UINT16_TEST(sin6.sin6_port, union slirp_sockaddr, slirp_family_inet6), VMSTATE_UINT32_TEST(sin6.sin6_flowinfo, union slirp_sockaddr, slirp_family_inet6), VMSTATE_UINT32_TEST(sin6.sin6_scope_id, union slirp_sockaddr, slirp_family_inet6), #endif VMSTATE_END_OF_LIST() } }; static const VMStateDescription vmstate_slirp_socket = { .name = "slirp-socket", .version_id = 4, .pre_load = slirp_socket_pre_load, .fields = (VMStateField[]) { VMSTATE_UINT32(so_urgc, struct socket), /* Pre-v4 versions */ VMSTATE_SIN4_ADDR(so_faddr.s_addr, struct socket, slirp_older_than_v4), VMSTATE_SIN4_ADDR(so_laddr.s_addr, struct socket, slirp_older_than_v4), VMSTATE_UINT16_TEST(so_fport, struct socket, slirp_older_than_v4), VMSTATE_UINT16_TEST(so_lport, struct socket, slirp_older_than_v4), /* v4 and newer */ VMSTATE_STRUCT(fhost, struct socket, 4, vmstate_slirp_socket_addr, union slirp_sockaddr), VMSTATE_STRUCT(lhost, struct socket, 4, vmstate_slirp_socket_addr, union slirp_sockaddr), VMSTATE_UINT8(so_iptos, struct socket), VMSTATE_UINT8(so_emu, struct socket), VMSTATE_UINT8(so_type, struct socket), VMSTATE_INT32(so_state, struct socket), VMSTATE_STRUCT(so_rcv, struct socket, 0, vmstate_slirp_sbuf, struct sbuf), VMSTATE_STRUCT(so_snd, struct socket, 0, vmstate_slirp_sbuf, struct sbuf), VMSTATE_STRUCT_POINTER(so_tcpcb, struct socket, vmstate_slirp_tcp, struct tcpcb), VMSTATE_END_OF_LIST() } }; static const VMStateDescription vmstate_slirp_bootp_client = { .name = "slirp_bootpclient", .fields = (VMStateField[]) { VMSTATE_UINT16(allocated, BOOTPClient), VMSTATE_BUFFER(macaddr, BOOTPClient), VMSTATE_END_OF_LIST() } }; static const VMStateDescription vmstate_slirp = { .name = "slirp", .version_id = 4, .fields = (VMStateField[]) { VMSTATE_UINT16_V(ip_id, Slirp, 2), VMSTATE_STRUCT_ARRAY(bootp_clients, Slirp, NB_BOOTP_CLIENTS, 3, vmstate_slirp_bootp_client, BOOTPClient), VMSTATE_END_OF_LIST() } }; void slirp_state_save(Slirp *slirp, SlirpWriteCb write_cb, void *opaque) { struct gfwd_list *ex_ptr; SlirpOStream f = { .write_cb = write_cb, .opaque = opaque, }; for (ex_ptr = slirp->guestfwd_list; ex_ptr; ex_ptr = ex_ptr->ex_next) if (ex_ptr->write_cb) { struct socket *so; so = slirp_find_ctl_socket(slirp, ex_ptr->ex_addr, ntohs(ex_ptr->ex_fport)); if (!so) { continue; } slirp_ostream_write_u8(&f, 42); slirp_vmstate_save_state(&f, &vmstate_slirp_socket, so); } slirp_ostream_write_u8(&f, 0); slirp_vmstate_save_state(&f, &vmstate_slirp, slirp); } int slirp_state_load(Slirp *slirp, int version_id, SlirpReadCb read_cb, void *opaque) { struct gfwd_list *ex_ptr; SlirpIStream f = { .read_cb = read_cb, .opaque = opaque, }; while (slirp_istream_read_u8(&f)) { int ret; struct socket *so = socreate(slirp); ret = slirp_vmstate_load_state(&f, &vmstate_slirp_socket, so, version_id); if (ret < 0) { return ret; } if ((so->so_faddr.s_addr & slirp->vnetwork_mask.s_addr) != slirp->vnetwork_addr.s_addr) { return -EINVAL; } for (ex_ptr = slirp->guestfwd_list; ex_ptr; ex_ptr = ex_ptr->ex_next) { if (ex_ptr->write_cb && so->so_faddr.s_addr == ex_ptr->ex_addr.s_addr && so->so_fport == ex_ptr->ex_fport) { break; } } if (!ex_ptr) { return -EINVAL; } } return slirp_vmstate_load_state(&f, &vmstate_slirp, slirp, version_id); } int slirp_state_version(void) { return 4; }
pmp-tool/PMP
src/qemu/src-pmp/hw/block/dataplane/xen-block.h
<reponame>pmp-tool/PMP<gh_stars>1-10 /* * Copyright (c) 2018 Citrix Systems Inc. * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef HW_BLOCK_DATAPLANE_XEN_BLOCK_H #define HW_BLOCK_DATAPLANE_XEN_BLOCK_H #include "hw/block/block.h" #include "hw/xen/xen-bus.h" #include "sysemu/iothread.h" typedef struct XenBlockDataPlane XenBlockDataPlane; XenBlockDataPlane *xen_block_dataplane_create(XenDevice *xendev, BlockConf *conf, IOThread *iothread); void xen_block_dataplane_destroy(XenBlockDataPlane *dataplane); void xen_block_dataplane_start(XenBlockDataPlane *dataplane, const unsigned int ring_ref[], unsigned int nr_ring_ref, unsigned int event_channel, unsigned int protocol, Error **errp); void xen_block_dataplane_stop(XenBlockDataPlane *dataplane); #endif /* HW_BLOCK_DATAPLANE_XEN_BLOCK_H */
pmp-tool/PMP
src/qemu/src-pmp/hw/microblaze/petalogix_ml605_mmu.c
<filename>src/qemu/src-pmp/hw/microblaze/petalogix_ml605_mmu.c /* * Model of Petalogix linux reference design targeting Xilinx Spartan ml605 * board. * * Copyright (c) 2011 <NAME> <<EMAIL>> * Copyright (c) 2011 PetaLogix * Copyright (c) 2009 <NAME>. * * Permission is hereby granted, free of charge, to any person obtaining a copy * of this software and associated documentation files (the "Software"), to deal * in the Software without restriction, including without limitation the rights * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell * copies of the Software, and to permit persons to whom the Software is * furnished to do so, subject to the following conditions: * * The above copyright notice and this permission notice shall be included in * all copies or substantial portions of the Software. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN * THE SOFTWARE. */ #include "qemu/osdep.h" #include "qemu/units.h" #include "qapi/error.h" #include "qemu-common.h" #include "cpu.h" #include "hw/sysbus.h" #include "hw/hw.h" #include "net/net.h" #include "hw/block/flash.h" #include "sysemu/sysemu.h" #include "hw/boards.h" #include "hw/char/serial.h" #include "exec/address-spaces.h" #include "hw/ssi/ssi.h" #include "boot.h" #include "hw/stream.h" #define LMB_BRAM_SIZE (128 * KiB) #define FLASH_SIZE (32 * MiB) #define BINARY_DEVICE_TREE_FILE "petalogix-ml605.dtb" #define NUM_SPI_FLASHES 4 #define SPI_BASEADDR 0x40a00000 #define MEMORY_BASEADDR 0x50000000 #define FLASH_BASEADDR 0x86000000 #define INTC_BASEADDR 0x81800000 #define TIMER_BASEADDR 0x83c00000 #define UART16550_BASEADDR 0x83e00000 #define AXIENET_BASEADDR 0x82780000 #define AXIDMA_BASEADDR 0x84600000 #define AXIDMA_IRQ1 0 #define AXIDMA_IRQ0 1 #define TIMER_IRQ 2 #define AXIENET_IRQ 3 #define SPI_IRQ 4 #define UART16550_IRQ 5 static void petalogix_ml605_init(MachineState *machine) { ram_addr_t ram_size = machine->ram_size; MemoryRegion *address_space_mem = get_system_memory(); DeviceState *dev, *dma, *eth0; Object *ds, *cs; MicroBlazeCPU *cpu; SysBusDevice *busdev; DriveInfo *dinfo; int i; MemoryRegion *phys_lmb_bram = g_new(MemoryRegion, 1); MemoryRegion *phys_ram = g_new(MemoryRegion, 1); qemu_irq irq[32]; /* init CPUs */ cpu = MICROBLAZE_CPU(object_new(TYPE_MICROBLAZE_CPU)); object_property_set_str(OBJECT(cpu), "8.10.a", "version", &error_abort); /* Use FPU but don't use floating point conversion and square * root instructions */ object_property_set_int(OBJECT(cpu), 1, "use-fpu", &error_abort); object_property_set_bool(OBJECT(cpu), true, "dcache-writeback", &error_abort); object_property_set_bool(OBJECT(cpu), true, "endianness", &error_abort); object_property_set_bool(OBJECT(cpu), true, "realized", &error_abort); /* Attach emulated BRAM through the LMB. */ memory_region_init_ram(phys_lmb_bram, NULL, "petalogix_ml605.lmb_bram", LMB_BRAM_SIZE, &error_fatal); memory_region_add_subregion(address_space_mem, 0x00000000, phys_lmb_bram); memory_region_init_ram(phys_ram, NULL, "petalogix_ml605.ram", ram_size, &error_fatal); memory_region_add_subregion(address_space_mem, MEMORY_BASEADDR, phys_ram); dinfo = drive_get(IF_PFLASH, 0, 0); /* 5th parameter 2 means bank-width * 10th paremeter 0 means little-endian */ pflash_cfi01_register(FLASH_BASEADDR, "petalogix_ml605.flash", FLASH_SIZE, dinfo ? blk_by_legacy_dinfo(dinfo) : NULL, 64 * KiB, 2, 0x89, 0x18, 0x0000, 0x0, 0); dev = qdev_create(NULL, "xlnx.xps-intc"); qdev_prop_set_uint32(dev, "kind-of-intr", 1 << TIMER_IRQ); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, INTC_BASEADDR); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, qdev_get_gpio_in(DEVICE(cpu), MB_CPU_IRQ)); for (i = 0; i < 32; i++) { irq[i] = qdev_get_gpio_in(dev, i); } serial_mm_init(address_space_mem, UART16550_BASEADDR + 0x1000, 2, irq[UART16550_IRQ], 115200, serial_hd(0), DEVICE_LITTLE_ENDIAN); /* 2 timers at irq 2 @ 100 Mhz. */ dev = qdev_create(NULL, "xlnx.xps-timer"); qdev_prop_set_uint32(dev, "one-timer-only", 0); qdev_prop_set_uint32(dev, "clock-frequency", 100 * 1000000); qdev_init_nofail(dev); sysbus_mmio_map(SYS_BUS_DEVICE(dev), 0, TIMER_BASEADDR); sysbus_connect_irq(SYS_BUS_DEVICE(dev), 0, irq[TIMER_IRQ]); /* axi ethernet and dma initialization. */ qemu_check_nic_model(&nd_table[0], "xlnx.axi-ethernet"); eth0 = qdev_create(NULL, "xlnx.axi-ethernet"); dma = qdev_create(NULL, "xlnx.axi-dma"); /* FIXME: attach to the sysbus instead */ object_property_add_child(qdev_get_machine(), "xilinx-eth", OBJECT(eth0), NULL); object_property_add_child(qdev_get_machine(), "xilinx-dma", OBJECT(dma), NULL); ds = object_property_get_link(OBJECT(dma), "axistream-connected-target", NULL); cs = object_property_get_link(OBJECT(dma), "axistream-control-connected-target", NULL); qdev_set_nic_properties(eth0, &nd_table[0]); qdev_prop_set_uint32(eth0, "rxmem", 0x1000); qdev_prop_set_uint32(eth0, "txmem", 0x1000); object_property_set_link(OBJECT(eth0), OBJECT(ds), "axistream-connected", &error_abort); object_property_set_link(OBJECT(eth0), OBJECT(cs), "axistream-control-connected", &error_abort); qdev_init_nofail(eth0); sysbus_mmio_map(SYS_BUS_DEVICE(eth0), 0, AXIENET_BASEADDR); sysbus_connect_irq(SYS_BUS_DEVICE(eth0), 0, irq[AXIENET_IRQ]); ds = object_property_get_link(OBJECT(eth0), "axistream-connected-target", NULL); cs = object_property_get_link(OBJECT(eth0), "axistream-control-connected-target", NULL); qdev_prop_set_uint32(dma, "freqhz", 100 * 1000000); object_property_set_link(OBJECT(dma), OBJECT(ds), "axistream-connected", &error_abort); object_property_set_link(OBJECT(dma), OBJECT(cs), "axistream-control-connected", &error_abort); qdev_init_nofail(dma); sysbus_mmio_map(SYS_BUS_DEVICE(dma), 0, AXIDMA_BASEADDR); sysbus_connect_irq(SYS_BUS_DEVICE(dma), 0, irq[AXIDMA_IRQ0]); sysbus_connect_irq(SYS_BUS_DEVICE(dma), 1, irq[AXIDMA_IRQ1]); { SSIBus *spi; dev = qdev_create(NULL, "xlnx.xps-spi"); qdev_prop_set_uint8(dev, "num-ss-bits", NUM_SPI_FLASHES); qdev_init_nofail(dev); busdev = SYS_BUS_DEVICE(dev); sysbus_mmio_map(busdev, 0, SPI_BASEADDR); sysbus_connect_irq(busdev, 0, irq[SPI_IRQ]); spi = (SSIBus *)qdev_get_child_bus(dev, "spi"); for (i = 0; i < NUM_SPI_FLASHES; i++) { DriveInfo *dinfo = drive_get_next(IF_MTD); qemu_irq cs_line; dev = ssi_create_slave_no_init(spi, "n25q128"); if (dinfo) { qdev_prop_set_drive(dev, "drive", blk_by_legacy_dinfo(dinfo), &error_fatal); } qdev_init_nofail(dev); cs_line = qdev_get_gpio_in_named(dev, SSI_GPIO_CS, 0); sysbus_connect_irq(busdev, i+1, cs_line); } } /* setup PVR to match kernel settings */ cpu->env.pvr.regs[4] = 0xc56b8000; cpu->env.pvr.regs[5] = 0xc56be000; cpu->env.pvr.regs[10] = 0x0e000000; /* virtex 6 */ microblaze_load_kernel(cpu, MEMORY_BASEADDR, ram_size, machine->initrd_filename, BINARY_DEVICE_TREE_FILE, NULL); } static void petalogix_ml605_machine_init(MachineClass *mc) { mc->desc = "PetaLogix linux refdesign for xilinx ml605 little endian"; mc->init = petalogix_ml605_init; mc->is_default = 0; } DEFINE_MACHINE("petalogix-ml605", petalogix_ml605_machine_init)
pmp-tool/PMP
src/qemu/src-pmp/tests/libqos/e1000e.h
<gh_stars>1-10 /* * libqos driver framework * * Copyright (c) 2018 <NAME> <<EMAIL>> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2 as published by the Free Software Foundation. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/> */ #ifndef QGRAPH_E1000E #define QGRAPH_E1000E #include "libqos/qgraph.h" #include "pci.h" #define E1000E_RX0_MSG_ID (0) #define E1000E_TX0_MSG_ID (1) #define E1000E_OTHER_MSG_ID (2) #define E1000E_TDLEN (0x3808) #define E1000E_TDT (0x3818) #define E1000E_RDLEN (0x2808) #define E1000E_RDT (0x2818) typedef struct QE1000E QE1000E; typedef struct QE1000E_PCI QE1000E_PCI; struct QE1000E { uint64_t tx_ring; uint64_t rx_ring; }; struct QE1000E_PCI { QOSGraphObject obj; QPCIDevice pci_dev; QPCIBar mac_regs; QE1000E e1000e; }; void e1000e_wait_isr(QE1000E *d, uint16_t msg_id); void e1000e_tx_ring_push(QE1000E *d, void *descr); void e1000e_rx_ring_push(QE1000E *d, void *descr); #endif
pmp-tool/PMP
src/qemu/src-pmp/tests/tcg/mips/user/ase/msa/bit-count/test_msa_nloc_d.c
/* * Test program for MSA instruction NLOC.D * * Copyright (C) 2019 Wave Computing, Inc. * Copyright (C) 2019 <NAME> <<EMAIL>> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. * */ #include <sys/time.h> #include <stdint.h> #include "../../../../include/wrappers_msa.h" #include "../../../../include/test_inputs_128.h" #include "../../../../include/test_utils_128.h" #define TEST_COUNT_TOTAL (PATTERN_INPUTS_COUNT + RANDOM_INPUTS_COUNT) int32_t main(void) { char *instruction_name = "NLOC.D"; int32_t ret; uint32_t i; struct timeval start, end; double elapsed_time; uint64_t b128_result[TEST_COUNT_TOTAL][2]; uint64_t b128_expect[TEST_COUNT_TOTAL][2] = { { 0x0000000000000040ULL, 0x0000000000000040ULL, }, /* 0 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000001ULL, 0x0000000000000001ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000002ULL, 0x0000000000000002ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000003ULL, 0x0000000000000000ULL, }, { 0x0000000000000000ULL, 0x0000000000000002ULL, }, { 0x0000000000000004ULL, 0x0000000000000004ULL, }, /* 8 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000005ULL, 0x0000000000000001ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000006ULL, 0x0000000000000002ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000007ULL, 0x0000000000000000ULL, }, { 0x0000000000000000ULL, 0x0000000000000006ULL, }, { 0x0000000000000008ULL, 0x0000000000000008ULL, }, /* 16 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000009ULL, 0x0000000000000000ULL, }, { 0x0000000000000000ULL, 0x0000000000000008ULL, }, { 0x000000000000000aULL, 0x0000000000000006ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x000000000000000bULL, 0x0000000000000000ULL, }, { 0x0000000000000000ULL, 0x0000000000000002ULL, }, { 0x000000000000000cULL, 0x0000000000000000ULL, }, /* 24 */ { 0x0000000000000000ULL, 0x0000000000000008ULL, }, { 0x000000000000000dULL, 0x0000000000000001ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x000000000000000eULL, 0x0000000000000006ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x000000000000000fULL, 0x000000000000000bULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000010ULL, 0x0000000000000010ULL, }, /* 32 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000011ULL, 0x0000000000000000ULL, }, { 0x0000000000000000ULL, 0x0000000000000004ULL, }, { 0x0000000000000012ULL, 0x0000000000000000ULL, }, { 0x0000000000000000ULL, 0x0000000000000008ULL, }, { 0x0000000000000013ULL, 0x0000000000000000ULL, }, { 0x0000000000000000ULL, 0x000000000000000cULL, }, { 0x0000000000000014ULL, 0x0000000000000000ULL, }, /* 40 */ { 0x0000000000000000ULL, 0x0000000000000010ULL, }, { 0x0000000000000015ULL, 0x0000000000000000ULL, }, { 0x0000000000000000ULL, 0x0000000000000014ULL, }, { 0x0000000000000016ULL, 0x0000000000000002ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000017ULL, 0x0000000000000005ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000018ULL, 0x0000000000000008ULL, }, /* 48 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000019ULL, 0x000000000000000bULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x000000000000001aULL, 0x000000000000000eULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x000000000000001bULL, 0x0000000000000011ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x000000000000001cULL, 0x0000000000000014ULL, }, /* 56 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x000000000000001dULL, 0x0000000000000017ULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x000000000000001eULL, 0x000000000000001aULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x000000000000001fULL, 0x000000000000001dULL, }, { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000000000000001ULL, 0x0000000000000000ULL, }, /* 64 */ { 0x0000000000000005ULL, 0x0000000000000000ULL, }, { 0x0000000000000001ULL, 0x0000000000000000ULL, }, { 0x0000000000000000ULL, 0x0000000000000001ULL, }, { 0x0000000000000001ULL, 0x0000000000000001ULL, }, { 0x0000000000000002ULL, 0x0000000000000000ULL, }, { 0x0000000000000001ULL, 0x0000000000000001ULL, }, { 0x0000000000000006ULL, 0x0000000000000001ULL, }, { 0x0000000000000000ULL, 0x0000000000000003ULL, }, /* 72 */ { 0x0000000000000001ULL, 0x0000000000000001ULL, }, { 0x0000000000000001ULL, 0x0000000000000001ULL, }, { 0x0000000000000001ULL, 0x0000000000000002ULL, }, { 0x0000000000000003ULL, 0x0000000000000001ULL, }, { 0x0000000000000001ULL, 0x0000000000000004ULL, }, { 0x0000000000000000ULL, 0x0000000000000003ULL, }, { 0x0000000000000001ULL, 0x0000000000000002ULL, }, }; gettimeofday(&start, NULL); for (i = 0; i < TEST_COUNT_TOTAL; i++) { if (i < PATTERN_INPUTS_COUNT) { do_msa_NLOC_D(b128_pattern[i], b128_result[i]); } else { do_msa_NLOC_D(b128_random[i - PATTERN_INPUTS_COUNT], b128_result[i]); } } gettimeofday(&end, NULL); elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; ret = check_results(instruction_name, TEST_COUNT_TOTAL, elapsed_time, &b128_result[0][0], &b128_expect[0][0]); return ret; }
pmp-tool/PMP
src/PMP/hook/hook.c
#define _GNU_SOURCE #include <dlfcn.h> #include <fcntl.h> #include <malloc.h> #include <stdio.h> #include <stdlib.h> #include <stdarg.h> #include <string.h> #include <unistd.h> #define ALLOCATE_SIZE 0x400000 #define TARGET_LONG_SIZE 8 #define __NR_printmsg 328 static int (*real_open)(const char*, int, ...); static FILE* (*real_fopen)(const char*, const char*); static void* (*real_malloc)(size_t); static void (*real_free)(void*); static inline unsigned long fill_value(void) { return 0x0; } int open(const char *filename, int flags, ...) { if (!real_open) real_open = dlsym(RTLD_NEXT, "open"); int fd = real_open(filename, flags); if (fd == -1) { char redirect[128]; if (flags == O_RDONLY) sprintf(redirect, "%s/input_scheme", getenv("WORKDIR")); if (flags == O_WRONLY || flags == O_RDWR) sprintf(redirect, "/dev/null"); fd = real_open(redirect, flags); } return fd; } FILE* fopen(const char *filename, const char *mode) { if (!real_fopen) real_fopen = dlsym(RTLD_NEXT, "fopen"); FILE *file = real_fopen(filename, mode); if (!file) { char redirect[128]; if (strchr(mode, 'r') != NULL) sprintf(redirect, "%s/input_scheme", getenv("WORKDIR")); if (strchr(mode, 'w') != NULL || strchr(mode, 'a') != NULL) sprintf(redirect, "/dev/null"); file = real_fopen(redirect, mode); } return file; } void* malloc(size_t size) { if (!real_malloc) real_malloc = dlsym(RTLD_NEXT, "malloc"); void *memory = real_malloc(size); // int start_time = clock(); for (size_t i = 0; i < size / TARGET_LONG_SIZE; i++) { unsigned long val = fill_value(); memcpy((void*)(intptr_t)(memory + i * TARGET_LONG_SIZE), &val, TARGET_LONG_SIZE); } // int end_time = clock(); // char msg[256]; // sprintf(msg, "THI: %6f\n", ((float)(end_time-start_time))/1000); // syscall(__NR_printmsg, msg); return memory; } void free(void *ptr) { if (!real_free) real_free = dlsym(RTLD_NEXT, "free"); if ((unsigned long)(ptr) >= ALLOCATE_SIZE) real_free(ptr); } void __hook_init(void) { } void __hook_fini(void) { }
pmp-tool/PMP
src/qemu/src-pmp/tests/tcg/mips/user/ase/msa/interleave/test_msa_ilvl_h.c
<filename>src/qemu/src-pmp/tests/tcg/mips/user/ase/msa/interleave/test_msa_ilvl_h.c /* * Test program for MSA instruction ILVL.H * * Copyright (C) 2019 Wave Computing, Inc. * Copyright (C) 2019 <NAME> <<EMAIL>> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. * */ #include <sys/time.h> #include <stdint.h> #include "../../../../include/wrappers_msa.h" #include "../../../../include/test_inputs_128.h" #include "../../../../include/test_utils_128.h" #define TEST_COUNT_TOTAL ( \ (PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT) + \ (RANDOM_INPUTS_SHORT_COUNT) * (RANDOM_INPUTS_SHORT_COUNT)) int32_t main(void) { char *instruction_name = "ILVL.H"; int32_t ret; uint32_t i, j; struct timeval start, end; double elapsed_time; uint64_t b128_result[TEST_COUNT_TOTAL][2]; uint64_t b128_expect[TEST_COUNT_TOTAL][2] = { { 0xffffffffffffffffULL, 0xffffffffffffffffULL, }, /* 0 */ { 0xffff0000ffff0000ULL, 0xffff0000ffff0000ULL, }, { 0xffffaaaaffffaaaaULL, 0xffffaaaaffffaaaaULL, }, { 0xffff5555ffff5555ULL, 0xffff5555ffff5555ULL, }, { 0xffffccccffffccccULL, 0xffffccccffffccccULL, }, { 0xffff3333ffff3333ULL, 0xffff3333ffff3333ULL, }, { 0xffffe38effff38e3ULL, 0xffff38e3ffff8e38ULL, }, { 0xffff1c71ffffc71cULL, 0xffffc71cffff71c7ULL, }, { 0x0000ffff0000ffffULL, 0x0000ffff0000ffffULL, }, /* 8 */ { 0x0000000000000000ULL, 0x0000000000000000ULL, }, { 0x0000aaaa0000aaaaULL, 0x0000aaaa0000aaaaULL, }, { 0x0000555500005555ULL, 0x0000555500005555ULL, }, { 0x0000cccc0000ccccULL, 0x0000cccc0000ccccULL, }, { 0x0000333300003333ULL, 0x0000333300003333ULL, }, { 0x0000e38e000038e3ULL, 0x000038e300008e38ULL, }, { 0x00001c710000c71cULL, 0x0000c71c000071c7ULL, }, { 0xaaaaffffaaaaffffULL, 0xaaaaffffaaaaffffULL, }, /* 16 */ { 0xaaaa0000aaaa0000ULL, 0xaaaa0000aaaa0000ULL, }, { 0xaaaaaaaaaaaaaaaaULL, 0xaaaaaaaaaaaaaaaaULL, }, { 0xaaaa5555aaaa5555ULL, 0xaaaa5555aaaa5555ULL, }, { 0xaaaaccccaaaaccccULL, 0xaaaaccccaaaaccccULL, }, { 0xaaaa3333aaaa3333ULL, 0xaaaa3333aaaa3333ULL, }, { 0xaaaae38eaaaa38e3ULL, 0xaaaa38e3aaaa8e38ULL, }, { 0xaaaa1c71aaaac71cULL, 0xaaaac71caaaa71c7ULL, }, { 0x5555ffff5555ffffULL, 0x5555ffff5555ffffULL, }, /* 24 */ { 0x5555000055550000ULL, 0x5555000055550000ULL, }, { 0x5555aaaa5555aaaaULL, 0x5555aaaa5555aaaaULL, }, { 0x5555555555555555ULL, 0x5555555555555555ULL, }, { 0x5555cccc5555ccccULL, 0x5555cccc5555ccccULL, }, { 0x5555333355553333ULL, 0x5555333355553333ULL, }, { 0x5555e38e555538e3ULL, 0x555538e355558e38ULL, }, { 0x55551c715555c71cULL, 0x5555c71c555571c7ULL, }, { 0xccccffffccccffffULL, 0xccccffffccccffffULL, }, /* 32 */ { 0xcccc0000cccc0000ULL, 0xcccc0000cccc0000ULL, }, { 0xccccaaaaccccaaaaULL, 0xccccaaaaccccaaaaULL, }, { 0xcccc5555cccc5555ULL, 0xcccc5555cccc5555ULL, }, { 0xccccccccccccccccULL, 0xccccccccccccccccULL, }, { 0xcccc3333cccc3333ULL, 0xcccc3333cccc3333ULL, }, { 0xcccce38ecccc38e3ULL, 0xcccc38e3cccc8e38ULL, }, { 0xcccc1c71ccccc71cULL, 0xccccc71ccccc71c7ULL, }, { 0x3333ffff3333ffffULL, 0x3333ffff3333ffffULL, }, /* 40 */ { 0x3333000033330000ULL, 0x3333000033330000ULL, }, { 0x3333aaaa3333aaaaULL, 0x3333aaaa3333aaaaULL, }, { 0x3333555533335555ULL, 0x3333555533335555ULL, }, { 0x3333cccc3333ccccULL, 0x3333cccc3333ccccULL, }, { 0x3333333333333333ULL, 0x3333333333333333ULL, }, { 0x3333e38e333338e3ULL, 0x333338e333338e38ULL, }, { 0x33331c713333c71cULL, 0x3333c71c333371c7ULL, }, { 0xe38effff38e3ffffULL, 0x38e3ffff8e38ffffULL, }, /* 48 */ { 0xe38e000038e30000ULL, 0x38e300008e380000ULL, }, { 0xe38eaaaa38e3aaaaULL, 0x38e3aaaa8e38aaaaULL, }, { 0xe38e555538e35555ULL, 0x38e355558e385555ULL, }, { 0xe38ecccc38e3ccccULL, 0x38e3cccc8e38ccccULL, }, { 0xe38e333338e33333ULL, 0x38e333338e383333ULL, }, { 0xe38ee38e38e338e3ULL, 0x38e338e38e388e38ULL, }, { 0xe38e1c7138e3c71cULL, 0x38e3c71c8e3871c7ULL, }, { 0x1c71ffffc71cffffULL, 0xc71cffff71c7ffffULL, }, /* 56 */ { 0x1c710000c71c0000ULL, 0xc71c000071c70000ULL, }, { 0x1c71aaaac71caaaaULL, 0xc71caaaa71c7aaaaULL, }, { 0x1c715555c71c5555ULL, 0xc71c555571c75555ULL, }, { 0x1c71ccccc71cccccULL, 0xc71ccccc71c7ccccULL, }, { 0x1c713333c71c3333ULL, 0xc71c333371c73333ULL, }, { 0x1c71e38ec71c38e3ULL, 0xc71c38e371c78e38ULL, }, { 0x1c711c71c71cc71cULL, 0xc71cc71c71c771c7ULL, }, { 0xfe7bfe7bb00cb00cULL, 0x4b674b670b5e0b5eULL, }, /* 64 */ { 0xfe7b153fb00c52fcULL, 0x4b6712f70b5ebb1aULL, }, { 0xfe7bab2bb00c2514ULL, 0x4b6727d80b5ec6ffULL, }, { 0xfe7ba942b00ce2a0ULL, 0x4b678df10b5e88d8ULL, }, { 0x153ffe7b52fcb00cULL, 0x12f74b67bb1a0b5eULL, }, { 0x153f153f52fc52fcULL, 0x12f712f7bb1abb1aULL, }, { 0x153fab2b52fc2514ULL, 0x12f727d8bb1ac6ffULL, }, { 0x153fa94252fce2a0ULL, 0x12f78df1bb1a88d8ULL, }, { 0xab2bfe7b2514b00cULL, 0x27d84b67c6ff0b5eULL, }, /* 72 */ { 0xab2b153f251452fcULL, 0x27d812f7c6ffbb1aULL, }, { 0xab2bab2b25142514ULL, 0x27d827d8c6ffc6ffULL, }, { 0xab2ba9422514e2a0ULL, 0x27d88df1c6ff88d8ULL, }, { 0xa942fe7be2a0b00cULL, 0x8df14b6788d80b5eULL, }, { 0xa942153fe2a052fcULL, 0x8df112f788d8bb1aULL, }, { 0xa942ab2be2a02514ULL, 0x8df127d888d8c6ffULL, }, { 0xa942a942e2a0e2a0ULL, 0x8df18df188d888d8ULL, }, }; gettimeofday(&start, NULL); for (i = 0; i < PATTERN_INPUTS_SHORT_COUNT; i++) { for (j = 0; j < PATTERN_INPUTS_SHORT_COUNT; j++) { do_msa_ILVL_H(b128_pattern[i], b128_pattern[j], b128_result[PATTERN_INPUTS_SHORT_COUNT * i + j]); } } for (i = 0; i < RANDOM_INPUTS_SHORT_COUNT; i++) { for (j = 0; j < RANDOM_INPUTS_SHORT_COUNT; j++) { do_msa_ILVL_H(b128_random[i], b128_random[j], b128_result[((PATTERN_INPUTS_SHORT_COUNT) * (PATTERN_INPUTS_SHORT_COUNT)) + RANDOM_INPUTS_SHORT_COUNT * i + j]); } } gettimeofday(&end, NULL); elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; ret = check_results(instruction_name, TEST_COUNT_TOTAL, elapsed_time, &b128_result[0][0], &b128_expect[0][0]); return ret; }
pmp-tool/PMP
src/qemu/src-pmp/include/hw/virtio/virtio-balloon.h
<reponame>pmp-tool/PMP<gh_stars>1-10 /* * Virtio Support * * Copyright IBM, Corp. 2007-2008 * * Authors: * <NAME> <<EMAIL>> * <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #ifndef QEMU_VIRTIO_BALLOON_H #define QEMU_VIRTIO_BALLOON_H #include "standard-headers/linux/virtio_balloon.h" #include "hw/virtio/virtio.h" #include "sysemu/iothread.h" #define TYPE_VIRTIO_BALLOON "virtio-balloon-device" #define VIRTIO_BALLOON(obj) \ OBJECT_CHECK(VirtIOBalloon, (obj), TYPE_VIRTIO_BALLOON) #define VIRTIO_BALLOON_FREE_PAGE_REPORT_CMD_ID_MIN 0x80000000 typedef struct virtio_balloon_stat VirtIOBalloonStat; typedef struct virtio_balloon_stat_modern { uint16_t tag; uint8_t reserved[6]; uint64_t val; } VirtIOBalloonStatModern; typedef struct PartiallyBalloonedPage PartiallyBalloonedPage; enum virtio_balloon_free_page_report_status { FREE_PAGE_REPORT_S_STOP = 0, FREE_PAGE_REPORT_S_REQUESTED = 1, FREE_PAGE_REPORT_S_START = 2, FREE_PAGE_REPORT_S_DONE = 3, }; typedef struct VirtIOBalloon { VirtIODevice parent_obj; VirtQueue *ivq, *dvq, *svq, *free_page_vq; uint32_t free_page_report_status; uint32_t num_pages; uint32_t actual; uint32_t free_page_report_cmd_id; uint64_t stats[VIRTIO_BALLOON_S_NR]; VirtQueueElement *stats_vq_elem; size_t stats_vq_offset; QEMUTimer *stats_timer; IOThread *iothread; QEMUBH *free_page_bh; /* * Lock to synchronize threads to access the free page reporting related * fields (e.g. free_page_report_status). */ QemuMutex free_page_lock; QemuCond free_page_cond; /* * Set to block iothread to continue reading free page hints as the VM is * stopped. */ bool block_iothread; NotifierWithReturn free_page_report_notify; int64_t stats_last_update; int64_t stats_poll_interval; uint32_t host_features; PartiallyBalloonedPage *pbp; } VirtIOBalloon; #endif
pmp-tool/PMP
src/qemu/src-pmp/hw/acpi/core.c
/* * ACPI implementation * * Copyright (c) 2006 <NAME> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2 as published by the Free Software Foundation. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/> * * Contributions after 2012-01-13 are licensed under the terms of the * GNU GPL, version 2 or (at your option) any later version. */ #include "qemu/osdep.h" #include "sysemu/sysemu.h" #include "hw/hw.h" #include "hw/acpi/acpi.h" #include "hw/nvram/fw_cfg.h" #include "qemu/config-file.h" #include "qapi/error.h" #include "qapi/opts-visitor.h" #include "qapi/qapi-events-run-state.h" #include "qapi/qapi-visit-misc.h" #include "qemu/error-report.h" #include "qemu/option.h" struct acpi_table_header { uint16_t _length; /* our length, not actual part of the hdr */ /* allows easier parsing for fw_cfg clients */ char sig[4] QEMU_NONSTRING; /* ACPI signature (4 ASCII characters) */ uint32_t length; /* Length of table, in bytes, including header */ uint8_t revision; /* ACPI Specification minor version # */ uint8_t checksum; /* To make sum of entire table == 0 */ char oem_id[6] QEMU_NONSTRING; /* OEM identification */ char oem_table_id[8] QEMU_NONSTRING; /* OEM table identification */ uint32_t oem_revision; /* OEM revision number */ char asl_compiler_id[4] QEMU_NONSTRING; /* ASL compiler vendor ID */ uint32_t asl_compiler_revision; /* ASL compiler revision number */ } QEMU_PACKED; #define ACPI_TABLE_HDR_SIZE sizeof(struct acpi_table_header) #define ACPI_TABLE_PFX_SIZE sizeof(uint16_t) /* size of the extra prefix */ static const char unsigned dfl_hdr[ACPI_TABLE_HDR_SIZE - ACPI_TABLE_PFX_SIZE] = "QEMU\0\0\0\0\1\0" /* sig (4), len(4), revno (1), csum (1) */ "QEMUQEQEMUQEMU\1\0\0\0" /* OEM id (6), table (8), revno (4) */ "QEMU\1\0\0\0" /* ASL compiler ID (4), version (4) */ ; char unsigned *acpi_tables; size_t acpi_tables_len; static QemuOptsList qemu_acpi_opts = { .name = "acpi", .implied_opt_name = "data", .head = QTAILQ_HEAD_INITIALIZER(qemu_acpi_opts.head), .desc = { { 0 } } /* validated with OptsVisitor */ }; static void acpi_register_config(void) { qemu_add_opts(&qemu_acpi_opts); } opts_init(acpi_register_config); static int acpi_checksum(const uint8_t *data, int len) { int sum, i; sum = 0; for (i = 0; i < len; i++) { sum += data[i]; } return (-sum) & 0xff; } /* Install a copy of the ACPI table specified in @blob. * * If @has_header is set, @blob starts with the System Description Table Header * structure. Otherwise, "dfl_hdr" is prepended. In any case, each header field * is optionally overwritten from @hdrs. * * It is valid to call this function with * (@blob == NULL && bloblen == 0 && !has_header). * * @hdrs->file and @hdrs->data are ignored. * * SIZE_MAX is considered "infinity" in this function. * * The number of tables that can be installed is not limited, but the 16-bit * counter at the beginning of "acpi_tables" wraps around after UINT16_MAX. */ static void acpi_table_install(const char unsigned *blob, size_t bloblen, bool has_header, const struct AcpiTableOptions *hdrs, Error **errp) { size_t body_start; const char unsigned *hdr_src; size_t body_size, acpi_payload_size; struct acpi_table_header *ext_hdr; unsigned changed_fields; /* Calculate where the ACPI table body starts within the blob, plus where * to copy the ACPI table header from. */ if (has_header) { /* _length | ACPI header in blob | blob body * ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^ * ACPI_TABLE_PFX_SIZE sizeof dfl_hdr body_size * == body_start * * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * acpi_payload_size == bloblen */ body_start = sizeof dfl_hdr; if (bloblen < body_start) { error_setg(errp, "ACPI table claiming to have header is too " "short, available: %zu, expected: %zu", bloblen, body_start); return; } hdr_src = blob; } else { /* _length | ACPI header in template | blob body * ^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^ * ACPI_TABLE_PFX_SIZE sizeof dfl_hdr body_size * == bloblen * * ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ * acpi_payload_size */ body_start = 0; hdr_src = dfl_hdr; } body_size = bloblen - body_start; acpi_payload_size = sizeof dfl_hdr + body_size; if (acpi_payload_size > UINT16_MAX) { error_setg(errp, "ACPI table too big, requested: %zu, max: %u", acpi_payload_size, (unsigned)UINT16_MAX); return; } /* We won't fail from here on. Initialize / extend the globals. */ if (acpi_tables == NULL) { acpi_tables_len = sizeof(uint16_t); acpi_tables = g_malloc0(acpi_tables_len); } acpi_tables = g_realloc(acpi_tables, acpi_tables_len + ACPI_TABLE_PFX_SIZE + sizeof dfl_hdr + body_size); ext_hdr = (struct acpi_table_header *)(acpi_tables + acpi_tables_len); acpi_tables_len += ACPI_TABLE_PFX_SIZE; memcpy(acpi_tables + acpi_tables_len, hdr_src, sizeof dfl_hdr); acpi_tables_len += sizeof dfl_hdr; if (blob != NULL) { memcpy(acpi_tables + acpi_tables_len, blob + body_start, body_size); acpi_tables_len += body_size; } /* increase number of tables */ stw_le_p(acpi_tables, lduw_le_p(acpi_tables) + 1u); /* Update the header fields. The strings need not be NUL-terminated. */ changed_fields = 0; ext_hdr->_length = cpu_to_le16(acpi_payload_size); if (hdrs->has_sig) { strncpy(ext_hdr->sig, hdrs->sig, sizeof ext_hdr->sig); ++changed_fields; } if (has_header && le32_to_cpu(ext_hdr->length) != acpi_payload_size) { warn_report("ACPI table has wrong length, header says " "%" PRIu32 ", actual size %zu bytes", le32_to_cpu(ext_hdr->length), acpi_payload_size); } ext_hdr->length = cpu_to_le32(acpi_payload_size); if (hdrs->has_rev) { ext_hdr->revision = hdrs->rev; ++changed_fields; } ext_hdr->checksum = 0; if (hdrs->has_oem_id) { strncpy(ext_hdr->oem_id, hdrs->oem_id, sizeof ext_hdr->oem_id); ++changed_fields; } if (hdrs->has_oem_table_id) { strncpy(ext_hdr->oem_table_id, hdrs->oem_table_id, sizeof ext_hdr->oem_table_id); ++changed_fields; } if (hdrs->has_oem_rev) { ext_hdr->oem_revision = cpu_to_le32(hdrs->oem_rev); ++changed_fields; } if (hdrs->has_asl_compiler_id) { strncpy(ext_hdr->asl_compiler_id, hdrs->asl_compiler_id, sizeof ext_hdr->asl_compiler_id); ++changed_fields; } if (hdrs->has_asl_compiler_rev) { ext_hdr->asl_compiler_revision = cpu_to_le32(hdrs->asl_compiler_rev); ++changed_fields; } if (!has_header && changed_fields == 0) { warn_report("ACPI table: no headers are specified"); } /* recalculate checksum */ ext_hdr->checksum = acpi_checksum((const char unsigned *)ext_hdr + ACPI_TABLE_PFX_SIZE, acpi_payload_size); } void acpi_table_add(const QemuOpts *opts, Error **errp) { AcpiTableOptions *hdrs = NULL; Error *err = NULL; char **pathnames = NULL; char **cur; size_t bloblen = 0; char unsigned *blob = NULL; { Visitor *v; v = opts_visitor_new(opts); visit_type_AcpiTableOptions(v, NULL, &hdrs, &err); visit_free(v); } if (err) { goto out; } if (hdrs->has_file == hdrs->has_data) { error_setg(&err, "'-acpitable' requires one of 'data' or 'file'"); goto out; } pathnames = g_strsplit(hdrs->has_file ? hdrs->file : hdrs->data, ":", 0); if (pathnames == NULL || pathnames[0] == NULL) { error_setg(&err, "'-acpitable' requires at least one pathname"); goto out; } /* now read in the data files, reallocating buffer as needed */ for (cur = pathnames; *cur; ++cur) { int fd = open(*cur, O_RDONLY | O_BINARY); if (fd < 0) { error_setg(&err, "can't open file %s: %s", *cur, strerror(errno)); goto out; } for (;;) { char unsigned data[8192]; ssize_t r; r = read(fd, data, sizeof data); if (r == 0) { break; } else if (r > 0) { blob = g_realloc(blob, bloblen + r); memcpy(blob + bloblen, data, r); bloblen += r; } else if (errno != EINTR) { error_setg(&err, "can't read file %s: %s", *cur, strerror(errno)); close(fd); goto out; } } close(fd); } acpi_table_install(blob, bloblen, hdrs->has_file, hdrs, &err); out: g_free(blob); g_strfreev(pathnames); qapi_free_AcpiTableOptions(hdrs); error_propagate(errp, err); } unsigned acpi_table_len(void *current) { struct acpi_table_header *hdr = current - sizeof(hdr->_length); return hdr->_length; } static void *acpi_table_hdr(void *h) { struct acpi_table_header *hdr = h; return &hdr->sig; } uint8_t *acpi_table_first(void) { if (!acpi_tables) { return NULL; } return acpi_table_hdr(acpi_tables + ACPI_TABLE_PFX_SIZE); } uint8_t *acpi_table_next(uint8_t *current) { uint8_t *next = current + acpi_table_len(current); if (next - acpi_tables >= acpi_tables_len) { return NULL; } else { return acpi_table_hdr(next); } } int acpi_get_slic_oem(AcpiSlicOem *oem) { uint8_t *u; for (u = acpi_table_first(); u; u = acpi_table_next(u)) { struct acpi_table_header *hdr = (void *)(u - sizeof(hdr->_length)); if (memcmp(hdr->sig, "SLIC", 4) == 0) { oem->id = hdr->oem_id; oem->table_id = hdr->oem_table_id; return 0; } } return -1; } static void acpi_notify_wakeup(Notifier *notifier, void *data) { ACPIREGS *ar = container_of(notifier, ACPIREGS, wakeup); WakeupReason *reason = data; switch (*reason) { case QEMU_WAKEUP_REASON_RTC: ar->pm1.evt.sts |= (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_RT_CLOCK_STATUS); break; case QEMU_WAKEUP_REASON_PMTIMER: ar->pm1.evt.sts |= (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_TIMER_STATUS); break; case QEMU_WAKEUP_REASON_OTHER: /* ACPI_BITMASK_WAKE_STATUS should be set on resume. Pretend that resume was caused by power button */ ar->pm1.evt.sts |= (ACPI_BITMASK_WAKE_STATUS | ACPI_BITMASK_POWER_BUTTON_STATUS); break; default: break; } } /* ACPI PM1a EVT */ uint16_t acpi_pm1_evt_get_sts(ACPIREGS *ar) { /* Compare ns-clock, not PM timer ticks, because acpi_pm_tmr_update function uses ns for setting the timer. */ int64_t d = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL); if (d >= muldiv64(ar->tmr.overflow_time, NANOSECONDS_PER_SECOND, PM_TIMER_FREQUENCY)) { ar->pm1.evt.sts |= ACPI_BITMASK_TIMER_STATUS; } return ar->pm1.evt.sts; } static void acpi_pm1_evt_write_sts(ACPIREGS *ar, uint16_t val) { uint16_t pm1_sts = acpi_pm1_evt_get_sts(ar); if (pm1_sts & val & ACPI_BITMASK_TIMER_STATUS) { /* if TMRSTS is reset, then compute the new overflow time */ acpi_pm_tmr_calc_overflow_time(ar); } ar->pm1.evt.sts &= ~val; } static void acpi_pm1_evt_write_en(ACPIREGS *ar, uint16_t val) { ar->pm1.evt.en = val; qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_RTC, val & ACPI_BITMASK_RT_CLOCK_ENABLE); qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_PMTIMER, val & ACPI_BITMASK_TIMER_ENABLE); } void acpi_pm1_evt_power_down(ACPIREGS *ar) { if (ar->pm1.evt.en & ACPI_BITMASK_POWER_BUTTON_ENABLE) { ar->pm1.evt.sts |= ACPI_BITMASK_POWER_BUTTON_STATUS; ar->tmr.update_sci(ar); } } void acpi_pm1_evt_reset(ACPIREGS *ar) { ar->pm1.evt.sts = 0; ar->pm1.evt.en = 0; qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_RTC, 0); qemu_system_wakeup_enable(QEMU_WAKEUP_REASON_PMTIMER, 0); } static uint64_t acpi_pm_evt_read(void *opaque, hwaddr addr, unsigned width) { ACPIREGS *ar = opaque; switch (addr) { case 0: return acpi_pm1_evt_get_sts(ar); case 2: return ar->pm1.evt.en; default: return 0; } } static void acpi_pm_evt_write(void *opaque, hwaddr addr, uint64_t val, unsigned width) { ACPIREGS *ar = opaque; switch (addr) { case 0: acpi_pm1_evt_write_sts(ar, val); ar->pm1.evt.update_sci(ar); break; case 2: acpi_pm1_evt_write_en(ar, val); ar->pm1.evt.update_sci(ar); break; } } static const MemoryRegionOps acpi_pm_evt_ops = { .read = acpi_pm_evt_read, .write = acpi_pm_evt_write, .valid.min_access_size = 2, .valid.max_access_size = 2, .endianness = DEVICE_LITTLE_ENDIAN, }; void acpi_pm1_evt_init(ACPIREGS *ar, acpi_update_sci_fn update_sci, MemoryRegion *parent) { ar->pm1.evt.update_sci = update_sci; memory_region_init_io(&ar->pm1.evt.io, memory_region_owner(parent), &acpi_pm_evt_ops, ar, "acpi-evt", 4); memory_region_add_subregion(parent, 0, &ar->pm1.evt.io); } /* ACPI PM_TMR */ void acpi_pm_tmr_update(ACPIREGS *ar, bool enable) { int64_t expire_time; /* schedule a timer interruption if needed */ if (enable) { expire_time = muldiv64(ar->tmr.overflow_time, NANOSECONDS_PER_SECOND, PM_TIMER_FREQUENCY); timer_mod(ar->tmr.timer, expire_time); } else { timer_del(ar->tmr.timer); } } static inline int64_t acpi_pm_tmr_get_clock(void) { return muldiv64(qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL), PM_TIMER_FREQUENCY, NANOSECONDS_PER_SECOND); } void acpi_pm_tmr_calc_overflow_time(ACPIREGS *ar) { int64_t d = acpi_pm_tmr_get_clock(); ar->tmr.overflow_time = (d + 0x800000LL) & ~0x7fffffLL; } static uint32_t acpi_pm_tmr_get(ACPIREGS *ar) { uint32_t d = acpi_pm_tmr_get_clock(); return d & 0xffffff; } static void acpi_pm_tmr_timer(void *opaque) { ACPIREGS *ar = opaque; qemu_system_wakeup_request(QEMU_WAKEUP_REASON_PMTIMER, NULL); ar->tmr.update_sci(ar); } static uint64_t acpi_pm_tmr_read(void *opaque, hwaddr addr, unsigned width) { return acpi_pm_tmr_get(opaque); } static void acpi_pm_tmr_write(void *opaque, hwaddr addr, uint64_t val, unsigned width) { /* nothing */ } static const MemoryRegionOps acpi_pm_tmr_ops = { .read = acpi_pm_tmr_read, .write = acpi_pm_tmr_write, .valid.min_access_size = 4, .valid.max_access_size = 4, .endianness = DEVICE_LITTLE_ENDIAN, }; void acpi_pm_tmr_init(ACPIREGS *ar, acpi_update_sci_fn update_sci, MemoryRegion *parent) { ar->tmr.update_sci = update_sci; ar->tmr.timer = timer_new_ns(QEMU_CLOCK_VIRTUAL, acpi_pm_tmr_timer, ar); memory_region_init_io(&ar->tmr.io, memory_region_owner(parent), &acpi_pm_tmr_ops, ar, "acpi-tmr", 4); memory_region_add_subregion(parent, 8, &ar->tmr.io); } void acpi_pm_tmr_reset(ACPIREGS *ar) { ar->tmr.overflow_time = 0; timer_del(ar->tmr.timer); } /* ACPI PM1aCNT */ static void acpi_pm1_cnt_write(ACPIREGS *ar, uint16_t val) { ar->pm1.cnt.cnt = val & ~(ACPI_BITMASK_SLEEP_ENABLE); if (val & ACPI_BITMASK_SLEEP_ENABLE) { /* change suspend type */ uint16_t sus_typ = (val >> 10) & 7; switch(sus_typ) { case 0: /* soft power off */ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); break; case 1: qemu_system_suspend_request(); break; default: if (sus_typ == ar->pm1.cnt.s4_val) { /* S4 request */ qapi_event_send_suspend_disk(); qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); } break; } } } void acpi_pm1_cnt_update(ACPIREGS *ar, bool sci_enable, bool sci_disable) { /* ACPI specs 3.0, 4.7.2.5 */ if (sci_enable) { ar->pm1.cnt.cnt |= ACPI_BITMASK_SCI_ENABLE; } else if (sci_disable) { ar->pm1.cnt.cnt &= ~ACPI_BITMASK_SCI_ENABLE; } } static uint64_t acpi_pm_cnt_read(void *opaque, hwaddr addr, unsigned width) { ACPIREGS *ar = opaque; return ar->pm1.cnt.cnt; } static void acpi_pm_cnt_write(void *opaque, hwaddr addr, uint64_t val, unsigned width) { acpi_pm1_cnt_write(opaque, val); } static const MemoryRegionOps acpi_pm_cnt_ops = { .read = acpi_pm_cnt_read, .write = acpi_pm_cnt_write, .valid.min_access_size = 2, .valid.max_access_size = 2, .endianness = DEVICE_LITTLE_ENDIAN, }; void acpi_pm1_cnt_init(ACPIREGS *ar, MemoryRegion *parent, bool disable_s3, bool disable_s4, uint8_t s4_val) { FWCfgState *fw_cfg; ar->pm1.cnt.s4_val = s4_val; ar->wakeup.notify = acpi_notify_wakeup; qemu_register_wakeup_notifier(&ar->wakeup); /* * Register wake-up support in QMP query-current-machine API */ qemu_register_wakeup_support(); memory_region_init_io(&ar->pm1.cnt.io, memory_region_owner(parent), &acpi_pm_cnt_ops, ar, "acpi-cnt", 2); memory_region_add_subregion(parent, 4, &ar->pm1.cnt.io); fw_cfg = fw_cfg_find(); if (fw_cfg) { uint8_t suspend[6] = {128, 0, 0, 129, 128, 128}; suspend[3] = 1 | ((!disable_s3) << 7); suspend[4] = s4_val | ((!disable_s4) << 7); fw_cfg_add_file(fw_cfg, "etc/system-states", g_memdup(suspend, 6), 6); } } void acpi_pm1_cnt_reset(ACPIREGS *ar) { ar->pm1.cnt.cnt = 0; } /* ACPI GPE */ void acpi_gpe_init(ACPIREGS *ar, uint8_t len) { ar->gpe.len = len; /* Only first len / 2 bytes are ever used, * but the caller in ich9.c migrates full len bytes. * TODO: fix ich9.c and drop the extra allocation. */ ar->gpe.sts = g_malloc0(len); ar->gpe.en = g_malloc0(len); } void acpi_gpe_reset(ACPIREGS *ar) { memset(ar->gpe.sts, 0, ar->gpe.len / 2); memset(ar->gpe.en, 0, ar->gpe.len / 2); } static uint8_t *acpi_gpe_ioport_get_ptr(ACPIREGS *ar, uint32_t addr) { uint8_t *cur = NULL; if (addr < ar->gpe.len / 2) { cur = ar->gpe.sts + addr; } else if (addr < ar->gpe.len) { cur = ar->gpe.en + addr - ar->gpe.len / 2; } else { abort(); } return cur; } void acpi_gpe_ioport_writeb(ACPIREGS *ar, uint32_t addr, uint32_t val) { uint8_t *cur; cur = acpi_gpe_ioport_get_ptr(ar, addr); if (addr < ar->gpe.len / 2) { /* GPE_STS */ *cur = (*cur) & ~val; } else if (addr < ar->gpe.len) { /* GPE_EN */ *cur = val; } else { abort(); } } uint32_t acpi_gpe_ioport_readb(ACPIREGS *ar, uint32_t addr) { uint8_t *cur; uint32_t val; cur = acpi_gpe_ioport_get_ptr(ar, addr); val = 0; if (cur != NULL) { val = *cur; } return val; } void acpi_send_gpe_event(ACPIREGS *ar, qemu_irq irq, AcpiEventStatusBits status) { ar->gpe.sts[0] |= status; acpi_update_sci(ar, irq); } void acpi_update_sci(ACPIREGS *regs, qemu_irq irq) { int sci_level, pm1a_sts; pm1a_sts = acpi_pm1_evt_get_sts(regs); sci_level = ((pm1a_sts & regs->pm1.evt.en & ACPI_BITMASK_PM1_COMMON_ENABLED) != 0) || ((regs->gpe.sts[0] & regs->gpe.en[0]) != 0); qemu_set_irq(irq, sci_level); /* schedule a timer interruption if needed */ acpi_pm_tmr_update(regs, (regs->pm1.evt.en & ACPI_BITMASK_TIMER_ENABLE) && !(pm1a_sts & ACPI_BITMASK_TIMER_STATUS)); }
pmp-tool/PMP
src/qemu/src-pmp/tests/tcg/mips/user/isa/mips64r6/shift/test_mips64r6_dsrlv.c
<filename>src/qemu/src-pmp/tests/tcg/mips/user/isa/mips64r6/shift/test_mips64r6_dsrlv.c /* * Test program for MIPS64R6 instruction DSRLV * * Copyright (C) 2019 Wave Computing, Inc. * Copyright (C) 2019 <NAME> <<EMAIL>> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. * */ #include <sys/time.h> #include <stdint.h> #include "../../../../include/wrappers_mips64r6.h" #include "../../../../include/test_inputs_64.h" #include "../../../../include/test_utils_64.h" #define TEST_COUNT_TOTAL (PATTERN_INPUTS_64_COUNT + RANDOM_INPUTS_64_COUNT) int32_t main(void) { char *instruction_name = "DSRLV"; int32_t ret; uint32_t i, j; struct timeval start, end; double elapsed_time; uint64_t b64_result[TEST_COUNT_TOTAL]; uint64_t b64_expect[TEST_COUNT_TOTAL] = { 0x0000000000000001ULL, /* 0 */ 0xffffffffffffffffULL, 0x00000000003fffffULL, 0x000007ffffffffffULL, 0x000fffffffffffffULL, 0x0000000000001fffULL, 0x0003ffffffffffffULL, 0x0000000000007fffULL, 0x0000000000000000ULL, /* 8 */ 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0000000000000001ULL, /* 16 */ 0xaaaaaaaaaaaaaaaaULL, 0x00000000002aaaaaULL, 0x0000055555555555ULL, 0x000aaaaaaaaaaaaaULL, 0x0000000000001555ULL, 0x0002aaaaaaaaaaaaULL, 0x0000000000005555ULL, 0x0000000000000000ULL, /* 24 */ 0x5555555555555555ULL, 0x0000000000155555ULL, 0x000002aaaaaaaaaaULL, 0x0005555555555555ULL, 0x0000000000000aaaULL, 0x0001555555555555ULL, 0x0000000000002aaaULL, 0x0000000000000001ULL, /* 32 */ 0xccccccccccccccccULL, 0x0000000000333333ULL, 0x0000066666666666ULL, 0x000cccccccccccccULL, 0x0000000000001999ULL, 0x0003333333333333ULL, 0x0000000000006666ULL, 0x0000000000000000ULL, /* 40 */ 0x3333333333333333ULL, 0x00000000000cccccULL, 0x0000019999999999ULL, 0x0003333333333333ULL, 0x0000000000000666ULL, 0x0000ccccccccccccULL, 0x0000000000001999ULL, 0x0000000000000001ULL, /* 48 */ 0xe38e38e38e38e38eULL, 0x000000000038e38eULL, 0x0000071c71c71c71ULL, 0x000e38e38e38e38eULL, 0x0000000000001c71ULL, 0x00038e38e38e38e3ULL, 0x00000000000071c7ULL, 0x0000000000000000ULL, /* 56 */ 0x1c71c71c71c71c71ULL, 0x0000000000071c71ULL, 0x000000e38e38e38eULL, 0x0001c71c71c71c71ULL, 0x000000000000038eULL, 0x000071c71c71c71cULL, 0x0000000000000e38ULL, 0x886ae6cc28625540ULL, /* 64 */ 0x00886ae6cc286255ULL, 0x886ae6cc28625540ULL, 0x000221ab9b30a189ULL, 0xfbbe00634d93c708ULL, 0x00fbbe00634d93c7ULL, 0xfbbe00634d93c708ULL, 0x0003eef8018d364fULL, 0xac5aaeaab9cf8b80ULL, /* 72 */ 0x00ac5aaeaab9cf8bULL, 0xac5aaeaab9cf8b80ULL, 0x0002b16abaaae73eULL, 0x704f164d5e31e24eULL, 0x00704f164d5e31e2ULL, 0x704f164d5e31e24eULL, 0x0001c13c593578c7ULL, }; gettimeofday(&start, NULL); for (i = 0; i < PATTERN_INPUTS_64_SHORT_COUNT; i++) { for (j = 0; j < PATTERN_INPUTS_64_SHORT_COUNT; j++) { do_mips64r6_DSRLV(b64_pattern + i, b64_pattern + j, b64_result + (PATTERN_INPUTS_64_SHORT_COUNT * i + j)); } } for (i = 0; i < RANDOM_INPUTS_64_SHORT_COUNT; i++) { for (j = 0; j < RANDOM_INPUTS_64_SHORT_COUNT; j++) { do_mips64r6_DSRLV(b64_random + i, b64_random + j, b64_result + (((PATTERN_INPUTS_64_SHORT_COUNT) * (PATTERN_INPUTS_64_SHORT_COUNT)) + RANDOM_INPUTS_64_SHORT_COUNT * i + j)); } } gettimeofday(&end, NULL); elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; ret = check_results_64(instruction_name, TEST_COUNT_TOTAL, elapsed_time, b64_result, b64_expect); return ret; }
pmp-tool/PMP
src/qemu/src-pmp/roms/seabios/src/fw/paravirt.h
<gh_stars>1-10 #ifndef __PV_H #define __PV_H #include "config.h" // CONFIG_* #include "biosvar.h" // GET_GLOBAL #include "romfile.h" // struct romfile_s // Types of paravirtualized platforms. #define PF_QEMU (1<<0) #define PF_XEN (1<<1) #define PF_KVM (1<<2) typedef struct QemuCfgDmaAccess { u32 control; u32 length; u64 address; } PACKED QemuCfgDmaAccess; extern u32 RamSize; extern u64 RamSizeOver4G; extern int PlatformRunningOn; static inline int runningOnQEMU(void) { return CONFIG_QEMU || ( CONFIG_QEMU_HARDWARE && GET_GLOBAL(PlatformRunningOn) & PF_QEMU); } static inline int runningOnXen(void) { return CONFIG_XEN && GET_GLOBAL(PlatformRunningOn) & PF_XEN; } static inline int runningOnKVM(void) { return CONFIG_QEMU && GET_GLOBAL(PlatformRunningOn) & PF_KVM; } // Common paravirt ports. #define PORT_SMI_CMD 0x00b2 #define PORT_SMI_STATUS 0x00b3 #define PORT_QEMU_CFG_CTL 0x0510 #define PORT_QEMU_CFG_DATA 0x0511 #define PORT_QEMU_CFG_DMA_ADDR_HIGH 0x0514 #define PORT_QEMU_CFG_DMA_ADDR_LOW 0x0518 // QEMU_CFG_DMA_CONTROL bits #define QEMU_CFG_DMA_CTL_ERROR 0x01 #define QEMU_CFG_DMA_CTL_READ 0x02 #define QEMU_CFG_DMA_CTL_SKIP 0x04 #define QEMU_CFG_DMA_CTL_SELECT 0x08 #define QEMU_CFG_DMA_CTL_WRITE 0x10 // QEMU_CFG_DMA ID bit #define QEMU_CFG_VERSION_DMA 2 int qemu_cfg_enabled(void); int qemu_cfg_dma_enabled(void); void qemu_preinit(void); void qemu_platform_setup(void); void qemu_cfg_init(void); u16 qemu_get_present_cpus_count(void); int qemu_cfg_write_file(void *src, struct romfile_s *file, u32 offset, u32 len); int qemu_cfg_write_file_simple(void *src, u16 key, u32 offset, u32 len); u16 qemu_get_romfile_key(struct romfile_s *file); #endif
pmp-tool/PMP
src/qemu/src-pmp/nbd/nbd-internal.h
<filename>src/qemu/src-pmp/nbd/nbd-internal.h /* * NBD Internal Declarations * * Copyright (C) 2016 Red Hat, Inc. * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef NBD_INTERNAL_H #define NBD_INTERNAL_H #include "block/nbd.h" #include "sysemu/block-backend.h" #include "io/channel-tls.h" #include "qemu/coroutine.h" #include "qemu/iov.h" #ifndef _WIN32 #include <sys/ioctl.h> #endif #if defined(__sun__) || defined(__HAIKU__) #include <sys/ioccom.h> #endif #ifdef __linux__ #include <linux/fs.h> #endif #include "qemu/bswap.h" #include "qemu/queue.h" #include "qemu/main-loop.h" /* This is all part of the "official" NBD API. * * The most up-to-date documentation is available at: * https://github.com/yoe/nbd/blob/master/doc/proto.md */ /* Size of all NBD_OPT_*, without payload */ #define NBD_REQUEST_SIZE (4 + 2 + 2 + 8 + 8 + 4) /* Size of all NBD_REP_* sent in answer to most NBD_OPT_*, without payload */ #define NBD_REPLY_SIZE (4 + 4 + 8) /* Size of reply to NBD_OPT_EXPORT_NAME */ #define NBD_REPLY_EXPORT_NAME_SIZE (8 + 2 + 124) /* Size of oldstyle negotiation */ #define NBD_OLDSTYLE_NEGOTIATE_SIZE (8 + 8 + 8 + 4 + 124) #define NBD_INIT_MAGIC 0x4e42444d41474943LL /* ASCII "NBDMAGIC" */ #define NBD_REQUEST_MAGIC 0x25609513 #define NBD_OPTS_MAGIC 0x49484156454F5054LL /* ASCII "IHAVEOPT" */ #define NBD_CLIENT_MAGIC 0x0000420281861253LL #define NBD_REP_MAGIC 0x0003e889045565a9LL #define NBD_SET_SOCK _IO(0xab, 0) #define NBD_SET_BLKSIZE _IO(0xab, 1) #define NBD_SET_SIZE _IO(0xab, 2) #define NBD_DO_IT _IO(0xab, 3) #define NBD_CLEAR_SOCK _IO(0xab, 4) #define NBD_CLEAR_QUE _IO(0xab, 5) #define NBD_PRINT_DEBUG _IO(0xab, 6) #define NBD_SET_SIZE_BLOCKS _IO(0xab, 7) #define NBD_DISCONNECT _IO(0xab, 8) #define NBD_SET_TIMEOUT _IO(0xab, 9) #define NBD_SET_FLAGS _IO(0xab, 10) /* nbd_write * Writes @size bytes to @ioc. Returns 0 on success. */ static inline int nbd_write(QIOChannel *ioc, const void *buffer, size_t size, Error **errp) { return qio_channel_write_all(ioc, buffer, size, errp) < 0 ? -EIO : 0; } struct NBDTLSHandshakeData { GMainLoop *loop; bool complete; Error *error; }; void nbd_tls_handshake(QIOTask *task, void *opaque); int nbd_drop(QIOChannel *ioc, size_t size, Error **errp); #endif
pmp-tool/PMP
src/qemu/src-pmp/include/qemu/typedefs.h
#ifndef QEMU_TYPEDEFS_H #define QEMU_TYPEDEFS_H /* A load of opaque types so that device init declarations don't have to pull in all the real definitions. */ /* Please keep this list in case-insensitive alphabetical order */ typedef struct AdapterInfo AdapterInfo; typedef struct AddressSpace AddressSpace; typedef struct AioContext AioContext; typedef struct AnnounceTimer AnnounceTimer; typedef struct BdrvDirtyBitmap BdrvDirtyBitmap; typedef struct BdrvDirtyBitmapIter BdrvDirtyBitmapIter; typedef struct BlockBackend BlockBackend; typedef struct BlockBackendRootState BlockBackendRootState; typedef struct BlockDriverState BlockDriverState; typedef struct BusClass BusClass; typedef struct BusState BusState; typedef struct Chardev Chardev; typedef struct CompatProperty CompatProperty; typedef struct CoMutex CoMutex; typedef struct CPUAddressSpace CPUAddressSpace; typedef struct CPUState CPUState; typedef struct DeviceListener DeviceListener; typedef struct DeviceState DeviceState; typedef struct DirtyBitmapSnapshot DirtyBitmapSnapshot; typedef struct DisplayChangeListener DisplayChangeListener; typedef struct DriveInfo DriveInfo; typedef struct Error Error; typedef struct EventNotifier EventNotifier; typedef struct FlatView FlatView; typedef struct FWCfgEntry FWCfgEntry; typedef struct FWCfgIoState FWCfgIoState; typedef struct FWCfgMemState FWCfgMemState; typedef struct FWCfgState FWCfgState; typedef struct HVFX86EmulatorState HVFX86EmulatorState; typedef struct I2CBus I2CBus; typedef struct I2SCodec I2SCodec; typedef struct IOMMUMemoryRegion IOMMUMemoryRegion; typedef struct ISABus ISABus; typedef struct ISADevice ISADevice; typedef struct IsaDma IsaDma; typedef struct MACAddr MACAddr; typedef struct MachineClass MachineClass; typedef struct MachineState MachineState; typedef struct MemoryListener MemoryListener; typedef struct MemoryMappingList MemoryMappingList; typedef struct MemoryRegion MemoryRegion; typedef struct MemoryRegionCache MemoryRegionCache; typedef struct MemoryRegionSection MemoryRegionSection; typedef struct MigrationIncomingState MigrationIncomingState; typedef struct MigrationState MigrationState; typedef struct Monitor Monitor; typedef struct MonitorDef MonitorDef; typedef struct MSIMessage MSIMessage; typedef struct NetClientState NetClientState; typedef struct NetFilterState NetFilterState; typedef struct NICInfo NICInfo; typedef struct NodeInfo NodeInfo; typedef struct NumaNodeMem NumaNodeMem; typedef struct ObjectClass ObjectClass; typedef struct PCIBridge PCIBridge; typedef struct PCIBus PCIBus; typedef struct PCIDevice PCIDevice; typedef struct PCIEAERErr PCIEAERErr; typedef struct PCIEAERLog PCIEAERLog; typedef struct PCIEAERMsg PCIEAERMsg; typedef struct PCIEPort PCIEPort; typedef struct PCIESlot PCIESlot; typedef struct PCIExpressDevice PCIExpressDevice; typedef struct PCIExpressHost PCIExpressHost; typedef struct PCIHostDeviceAddress PCIHostDeviceAddress; typedef struct PCIHostState PCIHostState; typedef struct PCMachineState PCMachineState; typedef struct PostcopyDiscardState PostcopyDiscardState; typedef struct Property Property; typedef struct PropertyInfo PropertyInfo; typedef struct QBool QBool; typedef struct QDict QDict; typedef struct QEMUBH QEMUBH; typedef struct QemuConsole QemuConsole; typedef struct QEMUFile QEMUFile; typedef struct QemuLockable QemuLockable; typedef struct QemuMutex QemuMutex; typedef struct QemuOpt QemuOpt; typedef struct QemuOpts QemuOpts; typedef struct QemuOptsList QemuOptsList; typedef struct QEMUSGList QEMUSGList; typedef struct QemuSpin QemuSpin; typedef struct QEMUTimer QEMUTimer; typedef struct QEMUTimerListGroup QEMUTimerListGroup; typedef struct QJSON QJSON; typedef struct QList QList; typedef struct QNull QNull; typedef struct QNum QNum; typedef struct QObject QObject; typedef struct QString QString; typedef struct RAMBlock RAMBlock; typedef struct Range Range; typedef struct SHPCDevice SHPCDevice; typedef struct SSIBus SSIBus; typedef struct uWireSlave uWireSlave; typedef struct VirtIODevice VirtIODevice; typedef struct Visitor Visitor; typedef void SaveStateHandler(QEMUFile *f, void *opaque); typedef int LoadStateHandler(QEMUFile *f, void *opaque, int version_id); #endif /* QEMU_TYPEDEFS_H */
pmp-tool/PMP
src/qemu/src-pmp/include/hw/block/block.h
<reponame>pmp-tool/PMP /* * Common code for block device models * * Copyright (C) 2012 Red Hat, Inc. * Copyright (c) 2003-2008 <NAME> * * This work is licensed under the terms of the GNU GPL, version 2 or * later. See the COPYING file in the top-level directory. */ #ifndef HW_BLOCK_H #define HW_BLOCK_H #include "exec/hwaddr.h" #include "qapi/qapi-types-block-core.h" /* Configuration */ typedef struct BlockConf { BlockBackend *blk; uint16_t physical_block_size; uint16_t logical_block_size; uint16_t min_io_size; uint32_t opt_io_size; int32_t bootindex; uint32_t discard_granularity; /* geometry, not all devices use this */ uint32_t cyls, heads, secs; OnOffAuto wce; bool share_rw; BlockdevOnError rerror; BlockdevOnError werror; } BlockConf; static inline unsigned int get_physical_block_exp(BlockConf *conf) { unsigned int exp = 0, size; for (size = conf->physical_block_size; size > conf->logical_block_size; size >>= 1) { exp++; } return exp; } #define DEFINE_BLOCK_PROPERTIES(_state, _conf) \ DEFINE_PROP_DRIVE("drive", _state, _conf.blk), \ DEFINE_PROP_BLOCKSIZE("logical_block_size", _state, \ _conf.logical_block_size), \ DEFINE_PROP_BLOCKSIZE("physical_block_size", _state, \ _conf.physical_block_size), \ DEFINE_PROP_UINT16("min_io_size", _state, _conf.min_io_size, 0), \ DEFINE_PROP_UINT32("opt_io_size", _state, _conf.opt_io_size, 0), \ DEFINE_PROP_UINT32("discard_granularity", _state, \ _conf.discard_granularity, -1), \ DEFINE_PROP_ON_OFF_AUTO("write-cache", _state, _conf.wce, \ ON_OFF_AUTO_AUTO), \ DEFINE_PROP_BOOL("share-rw", _state, _conf.share_rw, false) #define DEFINE_BLOCK_CHS_PROPERTIES(_state, _conf) \ DEFINE_PROP_UINT32("cyls", _state, _conf.cyls, 0), \ DEFINE_PROP_UINT32("heads", _state, _conf.heads, 0), \ DEFINE_PROP_UINT32("secs", _state, _conf.secs, 0) #define DEFINE_BLOCK_ERROR_PROPERTIES(_state, _conf) \ DEFINE_PROP_BLOCKDEV_ON_ERROR("rerror", _state, _conf.rerror, \ BLOCKDEV_ON_ERROR_AUTO), \ DEFINE_PROP_BLOCKDEV_ON_ERROR("werror", _state, _conf.werror, \ BLOCKDEV_ON_ERROR_AUTO) /* Backend access helpers */ bool blk_check_size_and_read_all(BlockBackend *blk, void *buf, hwaddr size, Error **errp); /* Configuration helpers */ bool blkconf_geometry(BlockConf *conf, int *trans, unsigned cyls_max, unsigned heads_max, unsigned secs_max, Error **errp); void blkconf_blocksizes(BlockConf *conf); bool blkconf_apply_backend_options(BlockConf *conf, bool readonly, bool resizable, Error **errp); /* Hard disk geometry */ void hd_geometry_guess(BlockBackend *blk, uint32_t *pcyls, uint32_t *pheads, uint32_t *psecs, int *ptrans); int hd_bios_chs_auto_trans(uint32_t cyls, uint32_t heads, uint32_t secs); #endif
pmp-tool/PMP
src/qemu/src-pmp/hw/misc/puv3_pm.c
/* * Power Management device simulation in PKUnity SoC * * Copyright (C) 2010-2012 <NAME> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation, or any later version. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "hw/hw.h" #include "hw/sysbus.h" #undef DEBUG_PUV3 #include "hw/unicore32/puv3.h" #define TYPE_PUV3_PM "puv3_pm" #define PUV3_PM(obj) OBJECT_CHECK(PUV3PMState, (obj), TYPE_PUV3_PM) typedef struct PUV3PMState { SysBusDevice parent_obj; MemoryRegion iomem; uint32_t reg_PMCR; uint32_t reg_PCGR; uint32_t reg_PLL_SYS_CFG; uint32_t reg_PLL_DDR_CFG; uint32_t reg_PLL_VGA_CFG; uint32_t reg_DIVCFG; } PUV3PMState; static uint64_t puv3_pm_read(void *opaque, hwaddr offset, unsigned size) { PUV3PMState *s = opaque; uint32_t ret = 0; switch (offset) { case 0x14: ret = s->reg_PCGR; break; case 0x18: ret = s->reg_PLL_SYS_CFG; break; case 0x1c: ret = s->reg_PLL_DDR_CFG; break; case 0x20: ret = s->reg_PLL_VGA_CFG; break; case 0x24: ret = s->reg_DIVCFG; break; case 0x28: /* PLL SYS STATUS */ ret = 0x00002401; break; case 0x2c: /* PLL DDR STATUS */ ret = 0x00100c00; break; case 0x30: /* PLL VGA STATUS */ ret = 0x00003801; break; case 0x34: /* DIV STATUS */ ret = 0x22f52015; break; case 0x38: /* SW RESET */ ret = 0x0; break; case 0x44: /* PLL DFC DONE */ ret = 0x7; break; default: DPRINTF("Bad offset 0x%x\n", offset); } DPRINTF("offset 0x%x, value 0x%x\n", offset, ret); return ret; } static void puv3_pm_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { PUV3PMState *s = opaque; switch (offset) { case 0x0: s->reg_PMCR = value; break; case 0x14: s->reg_PCGR = value; break; case 0x18: s->reg_PLL_SYS_CFG = value; break; case 0x1c: s->reg_PLL_DDR_CFG = value; break; case 0x20: s->reg_PLL_VGA_CFG = value; break; case 0x24: case 0x38: break; default: DPRINTF("Bad offset 0x%x\n", offset); } DPRINTF("offset 0x%x, value 0x%x\n", offset, value); } static const MemoryRegionOps puv3_pm_ops = { .read = puv3_pm_read, .write = puv3_pm_write, .impl = { .min_access_size = 4, .max_access_size = 4, }, .endianness = DEVICE_NATIVE_ENDIAN, }; static void puv3_pm_realize(DeviceState *dev, Error **errp) { PUV3PMState *s = PUV3_PM(dev); s->reg_PCGR = 0x0; memory_region_init_io(&s->iomem, OBJECT(s), &puv3_pm_ops, s, "puv3_pm", PUV3_REGS_OFFSET); sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); } static void puv3_pm_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = puv3_pm_realize; } static const TypeInfo puv3_pm_info = { .name = TYPE_PUV3_PM, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(PUV3PMState), .class_init = puv3_pm_class_init, }; static void puv3_pm_register_type(void) { type_register_static(&puv3_pm_info); } type_init(puv3_pm_register_type)
pmp-tool/PMP
src/qemu/src-pmp/hw/rdma/rdma_backend_defs.h
<filename>src/qemu/src-pmp/hw/rdma/rdma_backend_defs.h /* * RDMA device: Definitions of Backend Device structures * * Copyright (C) 2018 Oracle * Copyright (C) 2018 Red Hat Inc * * Authors: * <NAME> <<EMAIL>> * <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #ifndef RDMA_BACKEND_DEFS_H #define RDMA_BACKEND_DEFS_H #include "qemu/thread.h" #include "chardev/char-fe.h" #include <infiniband/verbs.h> #include "contrib/rdmacm-mux/rdmacm-mux.h" #include "rdma_utils.h" typedef struct RdmaDeviceResources RdmaDeviceResources; typedef struct RdmaBackendThread { QemuThread thread; bool run; /* Set by thread manager to let thread know it should exit */ bool is_running; /* Set by the thread to report its status */ } RdmaBackendThread; typedef struct RdmaCmMux { CharBackend *chr_be; int can_receive; } RdmaCmMux; typedef struct RdmaBackendDev { RdmaBackendThread comp_thread; PCIDevice *dev; RdmaDeviceResources *rdma_dev_res; struct ibv_device *ib_dev; struct ibv_context *context; struct ibv_comp_channel *channel; uint8_t port_num; RdmaProtectedQList recv_mads_list; RdmaCmMux rdmacm_mux; } RdmaBackendDev; typedef struct RdmaBackendPD { struct ibv_pd *ibpd; } RdmaBackendPD; typedef struct RdmaBackendMR { struct ibv_pd *ibpd; struct ibv_mr *ibmr; } RdmaBackendMR; typedef struct RdmaBackendCQ { RdmaBackendDev *backend_dev; struct ibv_cq *ibcq; } RdmaBackendCQ; typedef struct RdmaBackendQP { struct ibv_pd *ibpd; struct ibv_qp *ibqp; uint8_t sgid_idx; RdmaProtectedGSList cqe_ctx_list; } RdmaBackendQP; #endif
pmp-tool/PMP
src/qemu/src-pmp/tests/tcg/mips/include/test_utils_128.h
/* * Header file for test utilities * * Copyright (C) 2019 Wave Computing, Inc. * Copyright (C) 2019 <NAME> <<EMAIL>> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. * */ #ifndef TEST_UTILS_128_H #define TEST_UTILS_128_H #include <stdio.h> #include <stdint.h> #include <inttypes.h> #include <string.h> #define PRINT_RESULTS 0 static inline int32_t check_results(const char *instruction_name, const uint32_t test_count, const double elapsed_time, const uint64_t *b128_result, const uint64_t *b128_expect) { #if PRINT_RESULTS uint32_t ii; printf("\n"); for (ii = 0; ii < test_count; ii++) { uint64_t a, b; memcpy(&a, (b128_result + 2 * ii), 8); memcpy(&b, (b128_result + 2 * ii + 1), 8); if (ii % 8 != 0) { printf(" { 0x%016llxULL, 0x%016llxULL, },\n", a, b); } else { printf(" { 0x%016llxULL, 0x%016llxULL, }, /* %3d */\n", a, b, ii); } } printf("\n"); #endif uint32_t i; uint32_t pass_count = 0; uint32_t fail_count = 0; printf("%s: ", instruction_name); for (i = 0; i < test_count; i++) { if ((b128_result[2 * i] == b128_expect[2 * i]) && (b128_result[2 * i + 1] == b128_expect[2 * i + 1])) { pass_count++; } else { fail_count++; } } printf("PASS: %3d FAIL: %3d elapsed time: %5.2f ms\n", pass_count, fail_count, elapsed_time); if (fail_count > 0) { return -1; } else { return 0; } } #endif
pmp-tool/PMP
src/qemu/src-pmp/include/sysemu/whpx.h
<filename>src/qemu/src-pmp/include/sysemu/whpx.h<gh_stars>1-10 /* * QEMU Windows Hypervisor Platform accelerator (WHPX) support * * Copyright Microsoft, Corp. 2017 * * Authors: * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. * */ #ifndef QEMU_WHPX_H #define QEMU_WHPX_H #include "qemu-common.h" int whpx_init_vcpu(CPUState *cpu); int whpx_vcpu_exec(CPUState *cpu); void whpx_destroy_vcpu(CPUState *cpu); void whpx_vcpu_kick(CPUState *cpu); void whpx_cpu_synchronize_state(CPUState *cpu); void whpx_cpu_synchronize_post_reset(CPUState *cpu); void whpx_cpu_synchronize_post_init(CPUState *cpu); void whpx_cpu_synchronize_pre_loadvm(CPUState *cpu); #ifdef CONFIG_WHPX int whpx_enabled(void); #else /* CONFIG_WHPX */ #define whpx_enabled() (0) #endif /* CONFIG_WHPX */ #endif /* QEMU_WHPX_H */
pmp-tool/PMP
src/qemu/src-pmp/include/hw/xen/xen-bus.h
/* * Copyright (c) 2018 Citrix Systems Inc. * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef HW_XEN_BUS_H #define HW_XEN_BUS_H #include "hw/xen/xen_common.h" #include "hw/sysbus.h" #include "qemu/notify.h" typedef void (*XenWatchHandler)(void *opaque); typedef struct XenWatch XenWatch; typedef struct XenDevice { DeviceState qdev; domid_t frontend_id; char *name; char *backend_path, *frontend_path; enum xenbus_state backend_state, frontend_state; Notifier exit; XenWatch *backend_state_watch, *frontend_state_watch; bool backend_online; XenWatch *backend_online_watch; xengnttab_handle *xgth; bool feature_grant_copy; xenevtchn_handle *xeh; NotifierList event_notifiers; } XenDevice; typedef char *(*XenDeviceGetName)(XenDevice *xendev, Error **errp); typedef void (*XenDeviceRealize)(XenDevice *xendev, Error **errp); typedef void (*XenDeviceFrontendChanged)(XenDevice *xendev, enum xenbus_state frontend_state, Error **errp); typedef void (*XenDeviceUnrealize)(XenDevice *xendev, Error **errp); typedef struct XenDeviceClass { /*< private >*/ DeviceClass parent_class; /*< public >*/ const char *backend; const char *device; XenDeviceGetName get_name; XenDeviceRealize realize; XenDeviceFrontendChanged frontend_changed; XenDeviceUnrealize unrealize; } XenDeviceClass; #define TYPE_XEN_DEVICE "xen-device" #define XEN_DEVICE(obj) \ OBJECT_CHECK(XenDevice, (obj), TYPE_XEN_DEVICE) #define XEN_DEVICE_CLASS(class) \ OBJECT_CLASS_CHECK(XenDeviceClass, (class), TYPE_XEN_DEVICE) #define XEN_DEVICE_GET_CLASS(obj) \ OBJECT_GET_CLASS(XenDeviceClass, (obj), TYPE_XEN_DEVICE) typedef struct XenBus { BusState qbus; domid_t backend_id; struct xs_handle *xsh; NotifierList watch_notifiers; XenWatch *backend_watch; } XenBus; typedef struct XenBusClass { /*< private >*/ BusClass parent_class; } XenBusClass; #define TYPE_XEN_BUS "xen-bus" #define XEN_BUS(obj) \ OBJECT_CHECK(XenBus, (obj), TYPE_XEN_BUS) #define XEN_BUS_CLASS(class) \ OBJECT_CLASS_CHECK(XenBusClass, (class), TYPE_XEN_BUS) #define XEN_BUS_GET_CLASS(obj) \ OBJECT_GET_CLASS(XenBusClass, (obj), TYPE_XEN_BUS) void xen_bus_init(void); void xen_device_backend_set_state(XenDevice *xendev, enum xenbus_state state); enum xenbus_state xen_device_backend_get_state(XenDevice *xendev); void xen_device_backend_printf(XenDevice *xendev, const char *key, const char *fmt, ...) GCC_FMT_ATTR(3, 4); void xen_device_frontend_printf(XenDevice *xendev, const char *key, const char *fmt, ...) GCC_FMT_ATTR(3, 4); int xen_device_frontend_scanf(XenDevice *xendev, const char *key, const char *fmt, ...); void xen_device_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs, Error **errp); void *xen_device_map_grant_refs(XenDevice *xendev, uint32_t *refs, unsigned int nr_refs, int prot, Error **errp); void xen_device_unmap_grant_refs(XenDevice *xendev, void *map, unsigned int nr_refs, Error **errp); typedef struct XenDeviceGrantCopySegment { union { void *virt; struct { uint32_t ref; off_t offset; } foreign; } source, dest; size_t len; } XenDeviceGrantCopySegment; void xen_device_copy_grant_refs(XenDevice *xendev, bool to_domain, XenDeviceGrantCopySegment segs[], unsigned int nr_segs, Error **errp); typedef struct XenEventChannel XenEventChannel; typedef void (*XenEventHandler)(void *opaque); XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev, unsigned int port, XenEventHandler handler, void *opaque, Error **errp); void xen_device_notify_event_channel(XenDevice *xendev, XenEventChannel *channel, Error **errp); void xen_device_unbind_event_channel(XenDevice *xendev, XenEventChannel *channel, Error **errp); #endif /* HW_XEN_BUS_H */
pmp-tool/PMP
src/qemu/src-pmp/tests/nvme-test.c
/* * QTest testcase for NVMe * * Copyright (c) 2014 SUSE LINUX Products GmbH * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "qemu/units.h" #include "libqtest.h" #include "libqos/qgraph.h" #include "libqos/pci.h" typedef struct QNvme QNvme; struct QNvme { QOSGraphObject obj; QPCIDevice dev; }; static void *nvme_get_driver(void *obj, const char *interface) { QNvme *nvme = obj; if (!g_strcmp0(interface, "pci-device")) { return &nvme->dev; } fprintf(stderr, "%s not present in nvme\n", interface); g_assert_not_reached(); } static void *nvme_create(void *pci_bus, QGuestAllocator *alloc, void *addr) { QNvme *nvme = g_new0(QNvme, 1); QPCIBus *bus = pci_bus; qpci_device_init(&nvme->dev, bus, addr); nvme->obj.get_driver = nvme_get_driver; return &nvme->obj; } /* This used to cause a NULL pointer dereference. */ static void nvmetest_oob_cmb_test(void *obj, void *data, QGuestAllocator *alloc) { const int cmb_bar_size = 2 * MiB; QNvme *nvme = obj; QPCIDevice *pdev = &nvme->dev; QPCIBar bar; qpci_device_enable(pdev); bar = qpci_iomap(pdev, 2, NULL); qpci_io_writel(pdev, bar, 0, 0xccbbaa99); g_assert_cmpint(qpci_io_readb(pdev, bar, 0), ==, 0x99); g_assert_cmpint(qpci_io_readw(pdev, bar, 0), ==, 0xaa99); /* Test partially out-of-bounds accesses. */ qpci_io_writel(pdev, bar, cmb_bar_size - 1, 0x44332211); g_assert_cmpint(qpci_io_readb(pdev, bar, cmb_bar_size - 1), ==, 0x11); g_assert_cmpint(qpci_io_readw(pdev, bar, cmb_bar_size - 1), !=, 0x2211); g_assert_cmpint(qpci_io_readl(pdev, bar, cmb_bar_size - 1), !=, 0x44332211); } static void nvme_register_nodes(void) { QOSGraphEdgeOptions opts = { .extra_device_opts = "addr=04.0,drive=drv0,serial=foo", .before_cmd_line = "-drive id=drv0,if=none,file=null-co://,format=raw", }; add_qpci_address(&opts, &(QPCIAddress) { .devfn = QPCI_DEVFN(4, 0) }); qos_node_create_driver("nvme", nvme_create); qos_node_consumes("nvme", "pci-bus", &opts); qos_node_produces("nvme", "pci-device"); qos_add_test("oob-cmb-access", "nvme", nvmetest_oob_cmb_test, &(QOSGraphTestOptions) { .edge.extra_device_opts = "cmb_size_mb=2" }); } libqos_init(nvme_register_nodes);
pmp-tool/PMP
src/qemu/src-pmp/tests/tcg/mips/user/isa/mips64r6/int-multiply/test_mips64r6_dmul.c
<gh_stars>1-10 /* * Test program for MIPS64R6 instruction DMUL * * Copyright (C) 2019 Wave Computing, Inc. * Copyright (C) 2019 <NAME> <<EMAIL>> * * This program is free software: you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation, either version 2 of the License, or * (at your option) any later version. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see <https://www.gnu.org/licenses/>. * */ #include <sys/time.h> #include <stdint.h> #include "../../../../include/wrappers_mips64r6.h" #include "../../../../include/test_inputs_64.h" #include "../../../../include/test_utils_64.h" #define TEST_COUNT_TOTAL (PATTERN_INPUTS_64_COUNT + RANDOM_INPUTS_64_COUNT) int32_t main(void) { char *instruction_name = "DMUL"; int32_t ret; uint32_t i, j; struct timeval start, end; double elapsed_time; uint64_t b64_result[TEST_COUNT_TOTAL]; uint64_t b64_expect[TEST_COUNT_TOTAL] = { 0x0000000000000001ULL, /* 0 */ 0x0000000000000000ULL, 0x5555555555555556ULL, 0xaaaaaaaaaaaaaaabULL, 0x3333333333333334ULL, 0xcccccccccccccccdULL, 0x1c71c71c71c71c72ULL, 0xe38e38e38e38e38fULL, 0x0000000000000000ULL, /* 8 */ 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0000000000000000ULL, 0x0000000000000000ULL, 0x5555555555555556ULL, /* 16 */ 0x0000000000000000ULL, 0x38e38e38e38e38e4ULL, 0x1c71c71c71c71c72ULL, 0x7777777777777778ULL, 0xdddddddddddddddeULL, 0x12f684bda12f684cULL, 0x425ed097b425ed0aULL, 0xaaaaaaaaaaaaaaabULL, /* 24 */ 0x0000000000000000ULL, 0x1c71c71c71c71c72ULL, 0x8e38e38e38e38e39ULL, 0xbbbbbbbbbbbbbbbcULL, 0xeeeeeeeeeeeeeeefULL, 0x097b425ed097b426ULL, 0xa12f684bda12f685ULL, 0x3333333333333334ULL, /* 32 */ 0x0000000000000000ULL, 0x7777777777777778ULL, 0xbbbbbbbbbbbbbbbcULL, 0xf5c28f5c28f5c290ULL, 0x3d70a3d70a3d70a4ULL, 0x7d27d27d27d27d28ULL, 0xb60b60b60b60b60cULL, 0xcccccccccccccccdULL, /* 40 */ 0x0000000000000000ULL, 0xdddddddddddddddeULL, 0xeeeeeeeeeeeeeeefULL, 0x3d70a3d70a3d70a4ULL, 0x8f5c28f5c28f5c29ULL, 0x9f49f49f49f49f4aULL, 0x2d82d82d82d82d83ULL, 0x1c71c71c71c71c72ULL, /* 48 */ 0x0000000000000000ULL, 0x12f684bda12f684cULL, 0x097b425ed097b426ULL, 0x7d27d27d27d27d28ULL, 0x9f49f49f49f49f4aULL, 0xb0fcd6e9e06522c4ULL, 0x6b74f0329161f9aeULL, 0xe38e38e38e38e38fULL, /* 56 */ 0x0000000000000000ULL, 0x425ed097b425ed0aULL, 0xa12f684bda12f685ULL, 0xb60b60b60b60b60cULL, 0x2d82d82d82d82d83ULL, 0x6b74f0329161f9aeULL, 0x781948b0fcd6e9e1ULL, 0xad45be6961639000ULL, /* 64 */ 0xefa7a5a0e7176a00ULL, 0x08c6139fc4346000ULL, 0xfbe1883aee787980ULL, 0xefa7a5a0e7176a00ULL, 0x37ae2b38fded7040ULL, 0x6acb3d68be6cdc00ULL, 0xedbf72842143b470ULL, 0x08c6139fc4346000ULL, /* 72 */ 0x6acb3d68be6cdc00ULL, 0x8624e5e1e5044000ULL, 0x76a5ab8089e38100ULL, 0xfbe1883aee787980ULL, 0xedbf72842143b470ULL, 0x76a5ab8089e38100ULL, 0x4bb436d5b1e9cfc4ULL, }; gettimeofday(&start, NULL); for (i = 0; i < PATTERN_INPUTS_64_SHORT_COUNT; i++) { for (j = 0; j < PATTERN_INPUTS_64_SHORT_COUNT; j++) { do_mips64r6_DMUL(b64_pattern + i, b64_pattern + j, b64_result + (PATTERN_INPUTS_64_SHORT_COUNT * i + j)); } } for (i = 0; i < RANDOM_INPUTS_64_SHORT_COUNT; i++) { for (j = 0; j < RANDOM_INPUTS_64_SHORT_COUNT; j++) { do_mips64r6_DMUL(b64_random + i, b64_random + j, b64_result + (((PATTERN_INPUTS_64_SHORT_COUNT) * (PATTERN_INPUTS_64_SHORT_COUNT)) + RANDOM_INPUTS_64_SHORT_COUNT * i + j)); } } gettimeofday(&end, NULL); elapsed_time = (end.tv_sec - start.tv_sec) * 1000.0; elapsed_time += (end.tv_usec - start.tv_usec) / 1000.0; ret = check_results_64(instruction_name, TEST_COUNT_TOTAL, elapsed_time, b64_result, b64_expect); return ret; }
pmp-tool/PMP
src/qemu/src-pmp/include/qemu/osdep.h
<reponame>pmp-tool/PMP /* * OS includes and handling of OS dependencies * * This header exists to pull in some common system headers that * most code in QEMU will want, and to fix up some possible issues with * it (missing defines, Windows weirdness, and so on). * * To avoid getting into possible circular include dependencies, this * file should not include any other QEMU headers, with the exceptions * of config-host.h, config-target.h, qemu/compiler.h, * sysemu/os-posix.h, sysemu/os-win32.h, glib-compat.h and * qemu/typedefs.h, all of which are doing a similar job to this file * and are under similar constraints. * * This header also contains prototypes for functions defined in * os-*.c and util/oslib-*.c; those would probably be better split * out into separate header files. * * In an ideal world this header would contain only: * (1) things which everybody needs * (2) things without which code would work on most platforms but * fail to compile or misbehave on a minority of host OSes * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #ifndef QEMU_OSDEP_H #define QEMU_OSDEP_H #include "config-host.h" #ifdef NEED_CPU_H #include "config-target.h" #else #include "exec/poison.h" #endif #ifdef __COVERITY__ /* Coverity does not like the new _Float* types that are used by * recent glibc, and croaks on every single file that includes * stdlib.h. These typedefs are enough to please it. * * Note that these fix parse errors so they cannot be placed in * scripts/coverity-model.c. */ typedef float _Float32; typedef double _Float32x; typedef double _Float64; typedef __float80 _Float64x; typedef __float128 _Float128; #endif #include "qemu/compiler.h" /* Older versions of C++ don't get definitions of various macros from * stdlib.h unless we define these macros before first inclusion of * that system header. */ #ifndef __STDC_CONSTANT_MACROS #define __STDC_CONSTANT_MACROS #endif #ifndef __STDC_LIMIT_MACROS #define __STDC_LIMIT_MACROS #endif #ifndef __STDC_FORMAT_MACROS #define __STDC_FORMAT_MACROS #endif /* The following block of code temporarily renames the daemon() function so the * compiler does not see the warning associated with it in stdlib.h on OSX */ #ifdef __APPLE__ #define daemon qemu_fake_daemon_function #include <stdlib.h> #undef daemon extern int daemon(int, int); #endif #ifdef _WIN32 /* as defined in sdkddkver.h */ #ifndef _WIN32_WINNT #define _WIN32_WINNT 0x0600 /* Vista */ #endif /* reduces the number of implicitly included headers */ #ifndef WIN32_LEAN_AND_MEAN #define WIN32_LEAN_AND_MEAN #endif #endif #include <stdarg.h> #include <stddef.h> #include <stdbool.h> #include <stdint.h> #include <sys/types.h> #include <stdlib.h> /* enable C99/POSIX format strings (needs mingw32-runtime 3.15 or later) */ #ifdef __MINGW32__ #define __USE_MINGW_ANSI_STDIO 1 #endif #include <stdio.h> #include <string.h> #include <strings.h> #include <inttypes.h> #include <limits.h> /* Put unistd.h before time.h as that triggers localtime_r/gmtime_r * function availability on recentish Mingw-w64 platforms. */ #include <unistd.h> #include <time.h> #include <ctype.h> #include <errno.h> #include <fcntl.h> #include <getopt.h> #include <sys/stat.h> #include <sys/time.h> #include <assert.h> /* setjmp must be declared before sysemu/os-win32.h * because it is redefined there. */ #include <setjmp.h> #include <signal.h> #ifdef __OpenBSD__ #include <sys/signal.h> #endif #ifndef _WIN32 #include <sys/wait.h> #else #define WIFEXITED(x) 1 #define WEXITSTATUS(x) (x) #endif #ifdef _WIN32 #include "sysemu/os-win32.h" #endif #ifdef CONFIG_POSIX #include "sysemu/os-posix.h" #endif #include "glib-compat.h" #include "qemu/typedefs.h" /* * For mingw, as of v6.0.0, the function implementing the assert macro is * not marked as noreturn, so the compiler cannot delete code following an * assert(false) as unused. We rely on this within the code base to delete * code that is unreachable when features are disabled. * All supported versions of Glib's g_assert() satisfy this requirement. */ #ifdef __MINGW32__ #undef assert #define assert(x) g_assert(x) #endif /* * According to waitpid man page: * WCOREDUMP * This macro is not specified in POSIX.1-2001 and is not * available on some UNIX implementations (e.g., AIX, SunOS). * Therefore, enclose its use inside #ifdef WCOREDUMP ... #endif. */ #ifndef WCOREDUMP #define WCOREDUMP(status) 0 #endif /* * We have a lot of unaudited code that may fail in strange ways, or * even be a security risk during migration, if you disable assertions * at compile-time. You may comment out these safety checks if you * absolutely want to disable assertion overhead, but it is not * supported upstream so the risk is all yours. Meanwhile, please * submit patches to remove any side-effects inside an assertion, or * fixing error handling that should use Error instead of assert. */ #ifdef NDEBUG #error building with NDEBUG is not supported #endif #ifdef G_DISABLE_ASSERT #error building with G_DISABLE_ASSERT is not supported #endif #ifndef O_LARGEFILE #define O_LARGEFILE 0 #endif #ifndef O_BINARY #define O_BINARY 0 #endif #ifndef MAP_ANONYMOUS #define MAP_ANONYMOUS MAP_ANON #endif #ifndef ENOMEDIUM #define ENOMEDIUM ENODEV #endif #if !defined(ENOTSUP) #define ENOTSUP 4096 #endif #if !defined(ECANCELED) #define ECANCELED 4097 #endif #if !defined(EMEDIUMTYPE) #define EMEDIUMTYPE 4098 #endif #if !defined(ESHUTDOWN) #define ESHUTDOWN 4099 #endif /* time_t may be either 32 or 64 bits depending on the host OS, and * can be either signed or unsigned, so we can't just hardcode a * specific maximum value. This is not a C preprocessor constant, * so you can't use TIME_MAX in an #ifdef, but for our purposes * this isn't a problem. */ /* The macros TYPE_SIGNED, TYPE_WIDTH, and TYPE_MAXIMUM are from * Gnulib, and are under the LGPL v2.1 or (at your option) any * later version. */ /* True if the real type T is signed. */ #define TYPE_SIGNED(t) (!((t)0 < (t)-1)) /* The width in bits of the integer type or expression T. * Padding bits are not supported. */ #define TYPE_WIDTH(t) (sizeof(t) * CHAR_BIT) /* The maximum and minimum values for the integer type T. */ #define TYPE_MAXIMUM(t) \ ((t) (!TYPE_SIGNED(t) \ ? (t)-1 \ : ((((t)1 << (TYPE_WIDTH(t) - 2)) - 1) * 2 + 1))) #ifndef TIME_MAX #define TIME_MAX TYPE_MAXIMUM(time_t) #endif /* HOST_LONG_BITS is the size of a native pointer in bits. */ #if UINTPTR_MAX == UINT32_MAX # define HOST_LONG_BITS 32 #elif UINTPTR_MAX == UINT64_MAX # define HOST_LONG_BITS 64 #else # error Unknown pointer size #endif /* Mac OSX has a <stdint.h> bug that incorrectly defines SIZE_MAX with * the wrong type. Our replacement isn't usable in preprocessor * expressions, but it is sufficient for our needs. */ #if defined(HAVE_BROKEN_SIZE_MAX) && HAVE_BROKEN_SIZE_MAX #undef SIZE_MAX #define SIZE_MAX ((size_t)-1) #endif #ifndef MIN #define MIN(a, b) (((a) < (b)) ? (a) : (b)) #endif #ifndef MAX #define MAX(a, b) (((a) > (b)) ? (a) : (b)) #endif /* Minimum function that returns zero only iff both values are zero. * Intended for use with unsigned values only. */ #ifndef MIN_NON_ZERO #define MIN_NON_ZERO(a, b) ((a) == 0 ? (b) : \ ((b) == 0 ? (a) : (MIN(a, b)))) #endif /* Round number down to multiple */ #define QEMU_ALIGN_DOWN(n, m) ((n) / (m) * (m)) /* Round number up to multiple. Safe when m is not a power of 2 (see * ROUND_UP for a faster version when a power of 2 is guaranteed) */ #define QEMU_ALIGN_UP(n, m) QEMU_ALIGN_DOWN((n) + (m) - 1, (m)) /* Check if n is a multiple of m */ #define QEMU_IS_ALIGNED(n, m) (((n) % (m)) == 0) /* n-byte align pointer down */ #define QEMU_ALIGN_PTR_DOWN(p, n) \ ((typeof(p))QEMU_ALIGN_DOWN((uintptr_t)(p), (n))) /* n-byte align pointer up */ #define QEMU_ALIGN_PTR_UP(p, n) \ ((typeof(p))QEMU_ALIGN_UP((uintptr_t)(p), (n))) /* Check if pointer p is n-bytes aligned */ #define QEMU_PTR_IS_ALIGNED(p, n) QEMU_IS_ALIGNED((uintptr_t)(p), (n)) /* Round number up to multiple. Requires that d be a power of 2 (see * QEMU_ALIGN_UP for a safer but slower version on arbitrary * numbers); works even if d is a smaller type than n. */ #ifndef ROUND_UP #define ROUND_UP(n, d) (((n) + (d) - 1) & -(0 ? (n) : (d))) #endif #ifndef DIV_ROUND_UP #define DIV_ROUND_UP(n, d) (((n) + (d) - 1) / (d)) #endif /* * &(x)[0] is always a pointer - if it's same type as x then the argument is a * pointer, not an array. */ #define QEMU_IS_ARRAY(x) (!__builtin_types_compatible_p(typeof(x), \ typeof(&(x)[0]))) #ifndef ARRAY_SIZE #define ARRAY_SIZE(x) ((sizeof(x) / sizeof((x)[0])) + \ QEMU_BUILD_BUG_ON_ZERO(!QEMU_IS_ARRAY(x))) #endif int qemu_daemon(int nochdir, int noclose); void *qemu_try_memalign(size_t alignment, size_t size); void *qemu_memalign(size_t alignment, size_t size); void *qemu_anon_ram_alloc(size_t size, uint64_t *align, bool shared); void qemu_vfree(void *ptr); void qemu_anon_ram_free(void *ptr, size_t size); #define QEMU_MADV_INVALID -1 #if defined(CONFIG_MADVISE) #define QEMU_MADV_WILLNEED MADV_WILLNEED #define QEMU_MADV_DONTNEED MADV_DONTNEED #ifdef MADV_DONTFORK #define QEMU_MADV_DONTFORK MADV_DONTFORK #else #define QEMU_MADV_DONTFORK QEMU_MADV_INVALID #endif #ifdef MADV_MERGEABLE #define QEMU_MADV_MERGEABLE MADV_MERGEABLE #else #define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID #endif #ifdef MADV_UNMERGEABLE #define QEMU_MADV_UNMERGEABLE MADV_UNMERGEABLE #else #define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID #endif #ifdef MADV_DODUMP #define QEMU_MADV_DODUMP MADV_DODUMP #else #define QEMU_MADV_DODUMP QEMU_MADV_INVALID #endif #ifdef MADV_DONTDUMP #define QEMU_MADV_DONTDUMP MADV_DONTDUMP #else #define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID #endif #ifdef MADV_HUGEPAGE #define QEMU_MADV_HUGEPAGE MADV_HUGEPAGE #else #define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID #endif #ifdef MADV_NOHUGEPAGE #define QEMU_MADV_NOHUGEPAGE MADV_NOHUGEPAGE #else #define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID #endif #ifdef MADV_REMOVE #define QEMU_MADV_REMOVE MADV_REMOVE #else #define QEMU_MADV_REMOVE QEMU_MADV_INVALID #endif #elif defined(CONFIG_POSIX_MADVISE) #define QEMU_MADV_WILLNEED POSIX_MADV_WILLNEED #define QEMU_MADV_DONTNEED POSIX_MADV_DONTNEED #define QEMU_MADV_DONTFORK QEMU_MADV_INVALID #define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID #define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID #define QEMU_MADV_DODUMP QEMU_MADV_INVALID #define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID #define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID #define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID #define QEMU_MADV_REMOVE QEMU_MADV_INVALID #else /* no-op */ #define QEMU_MADV_WILLNEED QEMU_MADV_INVALID #define QEMU_MADV_DONTNEED QEMU_MADV_INVALID #define QEMU_MADV_DONTFORK QEMU_MADV_INVALID #define QEMU_MADV_MERGEABLE QEMU_MADV_INVALID #define QEMU_MADV_UNMERGEABLE QEMU_MADV_INVALID #define QEMU_MADV_DODUMP QEMU_MADV_INVALID #define QEMU_MADV_DONTDUMP QEMU_MADV_INVALID #define QEMU_MADV_HUGEPAGE QEMU_MADV_INVALID #define QEMU_MADV_NOHUGEPAGE QEMU_MADV_INVALID #define QEMU_MADV_REMOVE QEMU_MADV_INVALID #endif #ifdef _WIN32 #define HAVE_CHARDEV_SERIAL 1 #elif defined(__linux__) || defined(__sun__) || defined(__FreeBSD__) \ || defined(__NetBSD__) || defined(__OpenBSD__) || defined(__DragonFly__) \ || defined(__GLIBC__) #define HAVE_CHARDEV_SERIAL 1 #endif #if defined(__linux__) || defined(__FreeBSD__) || \ defined(__FreeBSD_kernel__) || defined(__DragonFly__) #define HAVE_CHARDEV_PARPORT 1 #endif #if defined(CONFIG_LINUX) #ifndef BUS_MCEERR_AR #define BUS_MCEERR_AR 4 #endif #ifndef BUS_MCEERR_AO #define BUS_MCEERR_AO 5 #endif #endif #if defined(__linux__) && \ (defined(__x86_64__) || defined(__arm__) || defined(__aarch64__) \ || defined(__powerpc64__)) /* Use 2 MiB alignment so transparent hugepages can be used by KVM. Valgrind does not support alignments larger than 1 MiB, therefore we need special code which handles running on Valgrind. */ # define QEMU_VMALLOC_ALIGN (512 * 4096) #elif defined(__linux__) && defined(__s390x__) /* Use 1 MiB (segment size) alignment so gmap can be used by KVM. */ # define QEMU_VMALLOC_ALIGN (256 * 4096) #elif defined(__linux__) && defined(__sparc__) #include <sys/shm.h> # define QEMU_VMALLOC_ALIGN MAX(getpagesize(), SHMLBA) #else # define QEMU_VMALLOC_ALIGN getpagesize() #endif #ifdef CONFIG_POSIX struct qemu_signalfd_siginfo { uint32_t ssi_signo; /* Signal number */ int32_t ssi_errno; /* Error number (unused) */ int32_t ssi_code; /* Signal code */ uint32_t ssi_pid; /* PID of sender */ uint32_t ssi_uid; /* Real UID of sender */ int32_t ssi_fd; /* File descriptor (SIGIO) */ uint32_t ssi_tid; /* Kernel timer ID (POSIX timers) */ uint32_t ssi_band; /* Band event (SIGIO) */ uint32_t ssi_overrun; /* POSIX timer overrun count */ uint32_t ssi_trapno; /* Trap number that caused signal */ int32_t ssi_status; /* Exit status or signal (SIGCHLD) */ int32_t ssi_int; /* Integer sent by sigqueue(2) */ uint64_t ssi_ptr; /* Pointer sent by sigqueue(2) */ uint64_t ssi_utime; /* User CPU time consumed (SIGCHLD) */ uint64_t ssi_stime; /* System CPU time consumed (SIGCHLD) */ uint64_t ssi_addr; /* Address that generated signal (for hardware-generated signals) */ uint8_t pad[48]; /* Pad size to 128 bytes (allow for additional fields in the future) */ }; int qemu_signalfd(const sigset_t *mask); void sigaction_invoke(struct sigaction *action, struct qemu_signalfd_siginfo *info); #endif int qemu_madvise(void *addr, size_t len, int advice); int qemu_mprotect_rwx(void *addr, size_t size); int qemu_mprotect_none(void *addr, size_t size); int qemu_open(const char *name, int flags, ...); int qemu_close(int fd); #ifndef _WIN32 int qemu_dup(int fd); #endif int qemu_lock_fd(int fd, int64_t start, int64_t len, bool exclusive); int qemu_unlock_fd(int fd, int64_t start, int64_t len); int qemu_lock_fd_test(int fd, int64_t start, int64_t len, bool exclusive); bool qemu_has_ofd_lock(void); #if defined(__HAIKU__) && defined(__i386__) #define FMT_pid "%ld" #elif defined(WIN64) #define FMT_pid "%" PRId64 #else #define FMT_pid "%d" #endif bool qemu_write_pidfile(const char *pidfile, Error **errp); int qemu_get_thread_id(void); #ifndef CONFIG_IOVEC struct iovec { void *iov_base; size_t iov_len; }; /* * Use the same value as Linux for now. */ #define IOV_MAX 1024 ssize_t readv(int fd, const struct iovec *iov, int iov_cnt); ssize_t writev(int fd, const struct iovec *iov, int iov_cnt); #else #include <sys/uio.h> #endif #ifdef _WIN32 static inline void qemu_timersub(const struct timeval *val1, const struct timeval *val2, struct timeval *res) { res->tv_sec = val1->tv_sec - val2->tv_sec; if (val1->tv_usec < val2->tv_usec) { res->tv_sec--; res->tv_usec = val1->tv_usec - val2->tv_usec + 1000 * 1000; } else { res->tv_usec = val1->tv_usec - val2->tv_usec; } } #else #define qemu_timersub timersub #endif void qemu_set_cloexec(int fd); /* Starting on QEMU 2.5, qemu_hw_version() returns "2.5+" by default * instead of QEMU_VERSION, so setting hw_version on MachineClass * is no longer mandatory. * * Do NOT change this string, or it will break compatibility on all * machine classes that don't set hw_version. */ #define QEMU_HW_VERSION "2.5+" /* QEMU "hardware version" setting. Used to replace code that exposed * QEMU_VERSION to guests in the past and need to keep compatibility. * Do not use qemu_hw_version() in new code. */ void qemu_set_hw_version(const char *); const char *qemu_hw_version(void); void fips_set_state(bool requested); bool fips_get_state(void); /* Return a dynamically allocated pathname denoting a file or directory that is * appropriate for storing local state. * * @relative_pathname need not start with a directory separator; one will be * added automatically. * * The caller is responsible for releasing the value returned with g_free() * after use. */ char *qemu_get_local_state_pathname(const char *relative_pathname); /* Find program directory, and save it for later usage with * qemu_get_exec_dir(). * Try OS specific API first, if not working, parse from argv0. */ void qemu_init_exec_dir(const char *argv0); /* Get the saved exec dir. * Caller needs to release the returned string by g_free() */ char *qemu_get_exec_dir(void); /** * qemu_getauxval: * @type: the auxiliary vector key to lookup * * Search the auxiliary vector for @type, returning the value * or 0 if @type is not present. */ unsigned long qemu_getauxval(unsigned long type); void qemu_set_tty_echo(int fd, bool echo); void os_mem_prealloc(int fd, char *area, size_t sz, int smp_cpus, Error **errp); /** * qemu_get_pmem_size: * @filename: path to a pmem file * @errp: pointer to a NULL-initialized error object * * Determine the size of a persistent memory file. Besides supporting files on * DAX file systems, this function also supports Linux devdax character * devices. * * Returns: the size or 0 on failure */ uint64_t qemu_get_pmem_size(const char *filename, Error **errp); /** * qemu_get_pid_name: * @pid: pid of a process * * For given @pid fetch its name. Caller is responsible for * freeing the string when no longer needed. * Returns allocated string on success, NULL on failure. */ char *qemu_get_pid_name(pid_t pid); /** * qemu_fork: * * A version of fork that avoids signal handler race * conditions that can lead to child process getting * signals that are otherwise only expected by the * parent. It also resets all signal handlers to the * default settings. * * Returns 0 to child process, pid number to parent * or -1 on failure. */ pid_t qemu_fork(Error **errp); /* Using intptr_t ensures that qemu_*_page_mask is sign-extended even * when intptr_t is 32-bit and we are aligning a long long. */ extern uintptr_t qemu_real_host_page_size; extern intptr_t qemu_real_host_page_mask; extern int qemu_icache_linesize; extern int qemu_icache_linesize_log; extern int qemu_dcache_linesize; extern int qemu_dcache_linesize_log; /* * After using getopt or getopt_long, if you need to parse another set * of options, then you must reset optind. Unfortunately the way to * do this varies between implementations of getopt. */ static inline void qemu_reset_optind(void) { #ifdef HAVE_OPTRESET optind = 1; optreset = 1; #else optind = 0; #endif } #endif
pmp-tool/PMP
src/qemu/src-pmp/tests/virtio-scsi-test.c
/* * QTest testcase for VirtIO SCSI * * Copyright (c) 2014 SUSE LINUX Products GmbH * Copyright (c) 2015 Red Hat Inc. * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "libqtest.h" #include "scsi/constants.h" #include "libqos/libqos-pc.h" #include "libqos/libqos-spapr.h" #include "libqos/virtio.h" #include "libqos/virtio-pci.h" #include "standard-headers/linux/virtio_ids.h" #include "standard-headers/linux/virtio_pci.h" #include "standard-headers/linux/virtio_scsi.h" #include "libqos/virtio-scsi.h" #include "libqos/qgraph.h" #define PCI_SLOT 0x02 #define PCI_FN 0x00 #define QVIRTIO_SCSI_TIMEOUT_US (1 * 1000 * 1000) #define MAX_NUM_QUEUES 64 typedef struct { QVirtioDevice *dev; int num_queues; QVirtQueue *vq[MAX_NUM_QUEUES + 2]; } QVirtioSCSIQueues; static QGuestAllocator *alloc; static void qvirtio_scsi_pci_free(QVirtioSCSIQueues *vs) { int i; for (i = 0; i < vs->num_queues + 2; i++) { qvirtqueue_cleanup(vs->dev->bus, vs->vq[i], alloc); } g_free(vs); } static uint64_t qvirtio_scsi_alloc(QVirtioSCSIQueues *vs, size_t alloc_size, const void *data) { uint64_t addr; addr = guest_alloc(alloc, alloc_size); if (data) { memwrite(addr, data, alloc_size); } return addr; } static uint8_t virtio_scsi_do_command(QVirtioSCSIQueues *vs, const uint8_t *cdb, const uint8_t *data_in, size_t data_in_len, uint8_t *data_out, size_t data_out_len, struct virtio_scsi_cmd_resp *resp_out) { QVirtQueue *vq; struct virtio_scsi_cmd_req req = { { 0 } }; struct virtio_scsi_cmd_resp resp = { .response = 0xff, .status = 0xff }; uint64_t req_addr, resp_addr, data_in_addr = 0, data_out_addr = 0; uint8_t response; uint32_t free_head; vq = vs->vq[2]; req.lun[0] = 1; /* Select LUN */ req.lun[1] = 1; /* Select target 1 */ memcpy(req.cdb, cdb, VIRTIO_SCSI_CDB_SIZE); /* XXX: Fix endian if any multi-byte field in req/resp is used */ /* Add request header */ req_addr = qvirtio_scsi_alloc(vs, sizeof(req), &req); free_head = qvirtqueue_add(vq, req_addr, sizeof(req), false, true); if (data_out_len) { data_out_addr = qvirtio_scsi_alloc(vs, data_out_len, data_out); qvirtqueue_add(vq, data_out_addr, data_out_len, false, true); } /* Add response header */ resp_addr = qvirtio_scsi_alloc(vs, sizeof(resp), &resp); qvirtqueue_add(vq, resp_addr, sizeof(resp), true, !!data_in_len); if (data_in_len) { data_in_addr = qvirtio_scsi_alloc(vs, data_in_len, data_in); qvirtqueue_add(vq, data_in_addr, data_in_len, true, false); } qvirtqueue_kick(vs->dev, vq, free_head); qvirtio_wait_used_elem(vs->dev, vq, free_head, NULL, QVIRTIO_SCSI_TIMEOUT_US); response = readb(resp_addr + offsetof(struct virtio_scsi_cmd_resp, response)); if (resp_out) { memread(resp_addr, resp_out, sizeof(*resp_out)); } guest_free(alloc, req_addr); guest_free(alloc, resp_addr); guest_free(alloc, data_in_addr); guest_free(alloc, data_out_addr); return response; } static QVirtioSCSIQueues *qvirtio_scsi_init(QVirtioDevice *dev) { QVirtioSCSIQueues *vs; const uint8_t test_unit_ready_cdb[VIRTIO_SCSI_CDB_SIZE] = {}; struct virtio_scsi_cmd_resp resp; int i; vs = g_new0(QVirtioSCSIQueues, 1); vs->dev = dev; vs->num_queues = qvirtio_config_readl(dev, 0); g_assert_cmpint(vs->num_queues, <, MAX_NUM_QUEUES); for (i = 0; i < vs->num_queues + 2; i++) { vs->vq[i] = qvirtqueue_setup(dev, alloc, i); } /* Clear the POWER ON OCCURRED unit attention */ g_assert_cmpint(virtio_scsi_do_command(vs, test_unit_ready_cdb, NULL, 0, NULL, 0, &resp), ==, 0); g_assert_cmpint(resp.status, ==, CHECK_CONDITION); g_assert_cmpint(resp.sense[0], ==, 0x70); /* Fixed format sense buffer */ g_assert_cmpint(resp.sense[2], ==, UNIT_ATTENTION); g_assert_cmpint(resp.sense[12], ==, 0x29); /* POWER ON */ g_assert_cmpint(resp.sense[13], ==, 0x00); return vs; } static void hotplug(void *obj, void *data, QGuestAllocator *alloc) { qtest_qmp_device_add("scsi-hd", "scsihd", "{'drive': 'drv1'}"); qtest_qmp_device_del("scsihd"); } /* Test WRITE SAME with the lba not aligned */ static void test_unaligned_write_same(void *obj, void *data, QGuestAllocator *t_alloc) { QVirtioSCSI *scsi = obj; QVirtioSCSIQueues *vs; uint8_t buf1[512] = { 0 }; uint8_t buf2[512] = { 1 }; const uint8_t write_same_cdb_1[VIRTIO_SCSI_CDB_SIZE] = { 0x41, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x02, 0x00 }; const uint8_t write_same_cdb_2[VIRTIO_SCSI_CDB_SIZE] = { 0x41, 0x00, 0x00, 0x00, 0x00, 0x01, 0x00, 0x33, 0x00, 0x00 }; const uint8_t write_same_cdb_ndob[VIRTIO_SCSI_CDB_SIZE] = { 0x41, 0x01, 0x00, 0x00, 0x00, 0x01, 0x00, 0x33, 0x00, 0x00 }; alloc = t_alloc; vs = qvirtio_scsi_init(scsi->vdev); g_assert_cmphex(0, ==, virtio_scsi_do_command(vs, write_same_cdb_1, NULL, 0, buf1, 512, NULL)); g_assert_cmphex(0, ==, virtio_scsi_do_command(vs, write_same_cdb_2, NULL, 0, buf2, 512, NULL)); g_assert_cmphex(0, ==, virtio_scsi_do_command(vs, write_same_cdb_ndob, NULL, 0, NULL, 0, NULL)); qvirtio_scsi_pci_free(vs); } static void *virtio_scsi_hotplug_setup(GString *cmd_line, void *arg) { g_string_append(cmd_line, " -drive id=drv1,if=none,file=null-co://,format=raw"); return arg; } static void *virtio_scsi_setup(GString *cmd_line, void *arg) { g_string_append(cmd_line, " -drive file=blkdebug::null-co://," "if=none,id=dr1,format=raw,file.align=4k " "-device scsi-hd,drive=dr1,lun=0,scsi-id=1"); return arg; } static void register_virtio_scsi_test(void) { QOSGraphTestOptions opts = { }; opts.before = virtio_scsi_hotplug_setup; qos_add_test("hotplug", "virtio-scsi", hotplug, &opts); opts.before = virtio_scsi_setup; qos_add_test("unaligned-write-same", "virtio-scsi", test_unaligned_write_same, &opts); } libqos_init(register_virtio_scsi_test);
pmp-tool/PMP
src/qemu/src-pmp/include/sysemu/balloon.h
/* * Balloon * * Copyright IBM, Corp. 2008 * * Authors: * <NAME> <<EMAIL>> * * This work is licensed under the terms of the GNU GPL, version 2. See * the COPYING file in the top-level directory. * */ #ifndef QEMU_BALLOON_H #define QEMU_BALLOON_H #include "qapi/qapi-types-misc.h" typedef void (QEMUBalloonEvent)(void *opaque, ram_addr_t target); typedef void (QEMUBalloonStatus)(void *opaque, BalloonInfo *info); int qemu_add_balloon_handler(QEMUBalloonEvent *event_func, QEMUBalloonStatus *stat_func, void *opaque); void qemu_remove_balloon_handler(void *opaque); bool qemu_balloon_is_inhibited(void); void qemu_balloon_inhibit(bool state); #endif
pmp-tool/PMP
src/qemu/src-pmp/tests/libqos/sdhci.h
<reponame>pmp-tool/PMP<gh_stars>1-10 /* * libqos driver framework * * Copyright (c) 2018 <NAME> <<EMAIL>> * * This library is free software; you can redistribute it and/or * modify it under the terms of the GNU Lesser General Public * License version 2 as published by the Free Software Foundation. * * This library is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU * Lesser General Public License for more details. * * You should have received a copy of the GNU Lesser General Public * License along with this library; if not, see <http://www.gnu.org/licenses/> */ #ifndef QGRAPH_QSDHCI #define QGRAPH_QSDHCI #include "libqos/qgraph.h" #include "pci.h" typedef struct QSDHCI QSDHCI; typedef struct QSDHCI_MemoryMapped QSDHCI_MemoryMapped; typedef struct QSDHCI_PCI QSDHCI_PCI; typedef struct QSDHCIProperties QSDHCIProperties; /* Properties common to all QSDHCI devices */ struct QSDHCIProperties { uint8_t version; uint8_t baseclock; struct { bool sdma; uint64_t reg; } capab; }; struct QSDHCI { uint16_t (*readw)(QSDHCI *s, uint32_t reg); uint64_t (*readq)(QSDHCI *s, uint32_t reg); void (*writeq)(QSDHCI *s, uint32_t reg, uint64_t val); QSDHCIProperties props; }; /* Memory Mapped implementation of QSDHCI */ struct QSDHCI_MemoryMapped { QOSGraphObject obj; QTestState *qts; QSDHCI sdhci; uint64_t addr; }; /* PCI implementation of QSDHCI */ struct QSDHCI_PCI { QOSGraphObject obj; QPCIDevice dev; QSDHCI sdhci; QPCIBar mem_bar; }; /** * qos_init_sdhci_mm(): external constructor used by all drivers/machines * that "contain" a #QSDHCI_MemoryMapped driver */ void qos_init_sdhci_mm(QSDHCI_MemoryMapped *sdhci, QTestState *qts, uint32_t addr, QSDHCIProperties *common); #endif
pmp-tool/PMP
src/qemu/src-pmp/hw/xen/xen-bus.c
/* * Copyright (c) 2018 Citrix Systems Inc. * * This work is licensed under the terms of the GNU GPL, version 2 or later. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "qemu/main-loop.h" #include "qemu/uuid.h" #include "hw/hw.h" #include "hw/sysbus.h" #include "hw/xen/xen.h" #include "hw/xen/xen-backend.h" #include "hw/xen/xen-bus.h" #include "hw/xen/xen-bus-helper.h" #include "monitor/monitor.h" #include "qapi/error.h" #include "qapi/qmp/qdict.h" #include "sysemu/sysemu.h" #include "trace.h" static char *xen_device_get_backend_path(XenDevice *xendev) { XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); XenDeviceClass *xendev_class = XEN_DEVICE_GET_CLASS(xendev); const char *type = object_get_typename(OBJECT(xendev)); const char *backend = xendev_class->backend; if (!backend) { backend = type; } return g_strdup_printf("/local/domain/%u/backend/%s/%u/%s", xenbus->backend_id, backend, xendev->frontend_id, xendev->name); } static char *xen_device_get_frontend_path(XenDevice *xendev) { XenDeviceClass *xendev_class = XEN_DEVICE_GET_CLASS(xendev); const char *type = object_get_typename(OBJECT(xendev)); const char *device = xendev_class->device; if (!device) { device = type; } return g_strdup_printf("/local/domain/%u/device/%s/%s", xendev->frontend_id, device, xendev->name); } static void xen_device_unplug(XenDevice *xendev, Error **errp) { XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); const char *type = object_get_typename(OBJECT(xendev)); Error *local_err = NULL; xs_transaction_t tid; trace_xen_device_unplug(type, xendev->name); /* Mimic the way the Xen toolstack does an unplug */ again: tid = xs_transaction_start(xenbus->xsh); if (tid == XBT_NULL) { error_setg_errno(errp, errno, "failed xs_transaction_start"); return; } xs_node_printf(xenbus->xsh, tid, xendev->backend_path, "online", &local_err, "%u", 0); if (local_err) { goto abort; } xs_node_printf(xenbus->xsh, tid, xendev->backend_path, "state", &local_err, "%u", XenbusStateClosing); if (local_err) { goto abort; } if (!xs_transaction_end(xenbus->xsh, tid, false)) { if (errno == EAGAIN) { goto again; } error_setg_errno(errp, errno, "failed xs_transaction_end"); } return; abort: /* * We only abort if there is already a failure so ignore any error * from ending the transaction. */ xs_transaction_end(xenbus->xsh, tid, true); error_propagate(errp, local_err); } static void xen_bus_print_dev(Monitor *mon, DeviceState *dev, int indent) { XenDevice *xendev = XEN_DEVICE(dev); monitor_printf(mon, "%*sname = '%s' frontend_id = %u\n", indent, "", xendev->name, xendev->frontend_id); } static char *xen_bus_get_dev_path(DeviceState *dev) { return xen_device_get_backend_path(XEN_DEVICE(dev)); } struct XenWatch { char *node, *key; char *token; XenWatchHandler handler; void *opaque; Notifier notifier; }; static void watch_notify(Notifier *n, void *data) { XenWatch *watch = container_of(n, XenWatch, notifier); const char *token = data; if (!strcmp(watch->token, token)) { watch->handler(watch->opaque); } } static XenWatch *new_watch(const char *node, const char *key, XenWatchHandler handler, void *opaque) { XenWatch *watch = g_new0(XenWatch, 1); QemuUUID uuid; qemu_uuid_generate(&uuid); watch->token = qemu_uuid_unparse_strdup(&uuid); watch->node = g_strdup(node); watch->key = g_strdup(key); watch->handler = handler; watch->opaque = opaque; watch->notifier.notify = watch_notify; return watch; } static void free_watch(XenWatch *watch) { g_free(watch->token); g_free(watch->key); g_free(watch->node); g_free(watch); } static XenWatch *xen_bus_add_watch(XenBus *xenbus, const char *node, const char *key, XenWatchHandler handler, void *opaque, Error **errp) { XenWatch *watch = new_watch(node, key, handler, opaque); Error *local_err = NULL; trace_xen_bus_add_watch(watch->node, watch->key, watch->token); notifier_list_add(&xenbus->watch_notifiers, &watch->notifier); xs_node_watch(xenbus->xsh, node, key, watch->token, &local_err); if (local_err) { error_propagate(errp, local_err); notifier_remove(&watch->notifier); free_watch(watch); return NULL; } return watch; } static void xen_bus_remove_watch(XenBus *xenbus, XenWatch *watch, Error **errp) { trace_xen_bus_remove_watch(watch->node, watch->key, watch->token); xs_node_unwatch(xenbus->xsh, watch->node, watch->key, watch->token, errp); notifier_remove(&watch->notifier); free_watch(watch); } static void xen_bus_backend_create(XenBus *xenbus, const char *type, const char *name, char *path, Error **errp) { xs_transaction_t tid; char **key; QDict *opts; unsigned int i, n; Error *local_err = NULL; trace_xen_bus_backend_create(type, path); again: tid = xs_transaction_start(xenbus->xsh); if (tid == XBT_NULL) { error_setg(errp, "failed xs_transaction_start"); return; } key = xs_directory(xenbus->xsh, tid, path, &n); if (!key) { if (!xs_transaction_end(xenbus->xsh, tid, true)) { error_setg_errno(errp, errno, "failed xs_transaction_end"); } return; } opts = qdict_new(); for (i = 0; i < n; i++) { char *val; /* * Assume anything found in the xenstore backend area, other than * the keys created for a generic XenDevice, are parameters * to be used to configure the backend. */ if (!strcmp(key[i], "state") || !strcmp(key[i], "online") || !strcmp(key[i], "frontend") || !strcmp(key[i], "frontend-id") || !strcmp(key[i], "hotplug-status")) continue; if (xs_node_scanf(xenbus->xsh, tid, path, key[i], NULL, "%ms", &val) == 1) { qdict_put_str(opts, key[i], val); free(val); } } free(key); if (!xs_transaction_end(xenbus->xsh, tid, false)) { qobject_unref(opts); if (errno == EAGAIN) { goto again; } error_setg_errno(errp, errno, "failed xs_transaction_end"); return; } xen_backend_device_create(xenbus, type, name, opts, &local_err); qobject_unref(opts); if (local_err) { error_propagate_prepend(errp, local_err, "failed to create '%s' device '%s': ", type, name); } } static void xen_bus_type_enumerate(XenBus *xenbus, const char *type) { char *domain_path = g_strdup_printf("backend/%s/%u", type, xen_domid); char **backend; unsigned int i, n; trace_xen_bus_type_enumerate(type); backend = xs_directory(xenbus->xsh, XBT_NULL, domain_path, &n); if (!backend) { goto out; } for (i = 0; i < n; i++) { char *backend_path = g_strdup_printf("%s/%s", domain_path, backend[i]); enum xenbus_state backend_state; if (xs_node_scanf(xenbus->xsh, XBT_NULL, backend_path, "state", NULL, "%u", &backend_state) != 1) backend_state = XenbusStateUnknown; if (backend_state == XenbusStateInitialising) { Error *local_err = NULL; xen_bus_backend_create(xenbus, type, backend[i], backend_path, &local_err); if (local_err) { error_report_err(local_err); } } g_free(backend_path); } free(backend); out: g_free(domain_path); } static void xen_bus_enumerate(void *opaque) { XenBus *xenbus = opaque; char **type; unsigned int i, n; trace_xen_bus_enumerate(); type = xs_directory(xenbus->xsh, XBT_NULL, "backend", &n); if (!type) { return; } for (i = 0; i < n; i++) { xen_bus_type_enumerate(xenbus, type[i]); } free(type); } static void xen_bus_unrealize(BusState *bus, Error **errp) { XenBus *xenbus = XEN_BUS(bus); trace_xen_bus_unrealize(); if (xenbus->backend_watch) { xen_bus_remove_watch(xenbus, xenbus->backend_watch, NULL); xenbus->backend_watch = NULL; } if (!xenbus->xsh) { return; } qemu_set_fd_handler(xs_fileno(xenbus->xsh), NULL, NULL, NULL); xs_close(xenbus->xsh); } static void xen_bus_watch(void *opaque) { XenBus *xenbus = opaque; char **v; const char *token; g_assert(xenbus->xsh); v = xs_check_watch(xenbus->xsh); if (!v) { return; } token = v[XS_WATCH_TOKEN]; trace_xen_bus_watch(token); notifier_list_notify(&xenbus->watch_notifiers, (void *)token); free(v); } static void xen_bus_realize(BusState *bus, Error **errp) { XenBus *xenbus = XEN_BUS(bus); unsigned int domid; Error *local_err = NULL; trace_xen_bus_realize(); xenbus->xsh = xs_open(0); if (!xenbus->xsh) { error_setg_errno(errp, errno, "failed xs_open"); goto fail; } if (xs_node_scanf(xenbus->xsh, XBT_NULL, "", /* domain root node */ "domid", NULL, "%u", &domid) == 1) { xenbus->backend_id = domid; } else { xenbus->backend_id = 0; /* Assume lack of node means dom0 */ } notifier_list_init(&xenbus->watch_notifiers); qemu_set_fd_handler(xs_fileno(xenbus->xsh), xen_bus_watch, NULL, xenbus); module_call_init(MODULE_INIT_XEN_BACKEND); xenbus->backend_watch = xen_bus_add_watch(xenbus, "", /* domain root node */ "backend", xen_bus_enumerate, xenbus, &local_err); if (local_err) { /* This need not be treated as a hard error so don't propagate */ error_reportf_err(local_err, "failed to set up enumeration watch: "); } return; fail: xen_bus_unrealize(bus, &error_abort); } static void xen_bus_unplug_request(HotplugHandler *hotplug, DeviceState *dev, Error **errp) { XenDevice *xendev = XEN_DEVICE(dev); xen_device_unplug(xendev, errp); } static void xen_bus_class_init(ObjectClass *class, void *data) { BusClass *bus_class = BUS_CLASS(class); HotplugHandlerClass *hotplug_class = HOTPLUG_HANDLER_CLASS(class); bus_class->print_dev = xen_bus_print_dev; bus_class->get_dev_path = xen_bus_get_dev_path; bus_class->realize = xen_bus_realize; bus_class->unrealize = xen_bus_unrealize; hotplug_class->unplug_request = xen_bus_unplug_request; } static const TypeInfo xen_bus_type_info = { .name = TYPE_XEN_BUS, .parent = TYPE_BUS, .instance_size = sizeof(XenBus), .class_size = sizeof(XenBusClass), .class_init = xen_bus_class_init, .interfaces = (InterfaceInfo[]) { { TYPE_HOTPLUG_HANDLER }, { } }, }; void xen_device_backend_printf(XenDevice *xendev, const char *key, const char *fmt, ...) { XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); Error *local_err = NULL; va_list ap; g_assert(xenbus->xsh); va_start(ap, fmt); xs_node_vprintf(xenbus->xsh, XBT_NULL, xendev->backend_path, key, &local_err, fmt, ap); va_end(ap); if (local_err) { error_report_err(local_err); } } static int xen_device_backend_scanf(XenDevice *xendev, const char *key, const char *fmt, ...) { XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); va_list ap; int rc; g_assert(xenbus->xsh); va_start(ap, fmt); rc = xs_node_vscanf(xenbus->xsh, XBT_NULL, xendev->backend_path, key, NULL, fmt, ap); va_end(ap); return rc; } void xen_device_backend_set_state(XenDevice *xendev, enum xenbus_state state) { const char *type = object_get_typename(OBJECT(xendev)); if (xendev->backend_state == state) { return; } trace_xen_device_backend_state(type, xendev->name, xs_strstate(state)); xendev->backend_state = state; xen_device_backend_printf(xendev, "state", "%u", state); } enum xenbus_state xen_device_backend_get_state(XenDevice *xendev) { return xendev->backend_state; } static void xen_device_backend_set_online(XenDevice *xendev, bool online) { const char *type = object_get_typename(OBJECT(xendev)); if (xendev->backend_online == online) { return; } trace_xen_device_backend_online(type, xendev->name, online); xendev->backend_online = online; xen_device_backend_printf(xendev, "online", "%u", online); } static void xen_device_backend_changed(void *opaque) { XenDevice *xendev = opaque; const char *type = object_get_typename(OBJECT(xendev)); enum xenbus_state state; unsigned int online; trace_xen_device_backend_changed(type, xendev->name); if (xen_device_backend_scanf(xendev, "state", "%u", &state) != 1) { state = XenbusStateUnknown; } xen_device_backend_set_state(xendev, state); if (xen_device_backend_scanf(xendev, "online", "%u", &online) != 1) { online = 0; } xen_device_backend_set_online(xendev, !!online); /* * If the toolstack (or unplug request callback) has set the backend * state to Closing, but there is no active frontend (i.e. the * state is not Connected) then set the backend state to Closed. */ if (xendev->backend_state == XenbusStateClosing && xendev->frontend_state != XenbusStateConnected) { xen_device_backend_set_state(xendev, XenbusStateClosed); } /* * If a backend is still 'online' then we should leave it alone but, * if a backend is not 'online', then the device should be destroyed * once the state is Closed. */ if (!xendev->backend_online && (xendev->backend_state == XenbusStateClosed || xendev->backend_state == XenbusStateInitialising || xendev->backend_state == XenbusStateInitWait || xendev->backend_state == XenbusStateUnknown)) { Error *local_err = NULL; if (!xen_backend_try_device_destroy(xendev, &local_err)) { object_unparent(OBJECT(xendev)); } if (local_err) { error_report_err(local_err); } } } static void xen_device_backend_create(XenDevice *xendev, Error **errp) { XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); struct xs_permissions perms[2]; Error *local_err = NULL; xendev->backend_path = xen_device_get_backend_path(xendev); perms[0].id = xenbus->backend_id; perms[0].perms = XS_PERM_NONE; perms[1].id = xendev->frontend_id; perms[1].perms = XS_PERM_READ; g_assert(xenbus->xsh); xs_node_create(xenbus->xsh, XBT_NULL, xendev->backend_path, perms, ARRAY_SIZE(perms), &local_err); if (local_err) { error_propagate_prepend(errp, local_err, "failed to create backend: "); return; } xendev->backend_state_watch = xen_bus_add_watch(xenbus, xendev->backend_path, "state", xen_device_backend_changed, xendev, &local_err); if (local_err) { error_propagate_prepend(errp, local_err, "failed to watch backend state: "); return; } xendev->backend_online_watch = xen_bus_add_watch(xenbus, xendev->backend_path, "online", xen_device_backend_changed, xendev, &local_err); if (local_err) { error_propagate_prepend(errp, local_err, "failed to watch backend online: "); return; } } static void xen_device_backend_destroy(XenDevice *xendev) { XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); Error *local_err = NULL; if (xendev->backend_online_watch) { xen_bus_remove_watch(xenbus, xendev->backend_online_watch, NULL); xendev->backend_online_watch = NULL; } if (xendev->backend_state_watch) { xen_bus_remove_watch(xenbus, xendev->backend_state_watch, NULL); xendev->backend_state_watch = NULL; } if (!xendev->backend_path) { return; } g_assert(xenbus->xsh); xs_node_destroy(xenbus->xsh, XBT_NULL, xendev->backend_path, &local_err); g_free(xendev->backend_path); xendev->backend_path = NULL; if (local_err) { error_report_err(local_err); } } void xen_device_frontend_printf(XenDevice *xendev, const char *key, const char *fmt, ...) { XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); Error *local_err = NULL; va_list ap; g_assert(xenbus->xsh); va_start(ap, fmt); xs_node_vprintf(xenbus->xsh, XBT_NULL, xendev->frontend_path, key, &local_err, fmt, ap); va_end(ap); if (local_err) { error_report_err(local_err); } } int xen_device_frontend_scanf(XenDevice *xendev, const char *key, const char *fmt, ...) { XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); va_list ap; int rc; g_assert(xenbus->xsh); va_start(ap, fmt); rc = xs_node_vscanf(xenbus->xsh, XBT_NULL, xendev->frontend_path, key, NULL, fmt, ap); va_end(ap); return rc; } static void xen_device_frontend_set_state(XenDevice *xendev, enum xenbus_state state) { const char *type = object_get_typename(OBJECT(xendev)); if (xendev->frontend_state == state) { return; } trace_xen_device_frontend_state(type, xendev->name, xs_strstate(state)); xendev->frontend_state = state; xen_device_frontend_printf(xendev, "state", "%u", state); } static void xen_device_frontend_changed(void *opaque) { XenDevice *xendev = opaque; XenDeviceClass *xendev_class = XEN_DEVICE_GET_CLASS(xendev); const char *type = object_get_typename(OBJECT(xendev)); enum xenbus_state state; trace_xen_device_frontend_changed(type, xendev->name); if (xen_device_frontend_scanf(xendev, "state", "%u", &state) != 1) { state = XenbusStateUnknown; } xen_device_frontend_set_state(xendev, state); if (state == XenbusStateInitialising && xendev->backend_state == XenbusStateClosed && xendev->backend_online) { /* * The frontend is re-initializing so switch back to * InitWait. */ xen_device_backend_set_state(xendev, XenbusStateInitWait); return; } if (xendev_class->frontend_changed) { Error *local_err = NULL; xendev_class->frontend_changed(xendev, state, &local_err); if (local_err) { error_reportf_err(local_err, "frontend change error: "); } } } static void xen_device_frontend_create(XenDevice *xendev, Error **errp) { XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); struct xs_permissions perms[2]; Error *local_err = NULL; xendev->frontend_path = xen_device_get_frontend_path(xendev); perms[0].id = xendev->frontend_id; perms[0].perms = XS_PERM_NONE; perms[1].id = xenbus->backend_id; perms[1].perms = XS_PERM_READ | XS_PERM_WRITE; g_assert(xenbus->xsh); xs_node_create(xenbus->xsh, XBT_NULL, xendev->frontend_path, perms, ARRAY_SIZE(perms), &local_err); if (local_err) { error_propagate_prepend(errp, local_err, "failed to create frontend: "); return; } xendev->frontend_state_watch = xen_bus_add_watch(xenbus, xendev->frontend_path, "state", xen_device_frontend_changed, xendev, &local_err); if (local_err) { error_propagate_prepend(errp, local_err, "failed to watch frontend state: "); } } static void xen_device_frontend_destroy(XenDevice *xendev) { XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); Error *local_err = NULL; if (xendev->frontend_state_watch) { xen_bus_remove_watch(xenbus, xendev->frontend_state_watch, NULL); xendev->frontend_state_watch = NULL; } if (!xendev->frontend_path) { return; } g_assert(xenbus->xsh); xs_node_destroy(xenbus->xsh, XBT_NULL, xendev->frontend_path, &local_err); g_free(xendev->frontend_path); xendev->frontend_path = NULL; if (local_err) { error_report_err(local_err); } } void xen_device_set_max_grant_refs(XenDevice *xendev, unsigned int nr_refs, Error **errp) { if (xengnttab_set_max_grants(xendev->xgth, nr_refs)) { error_setg_errno(errp, errno, "xengnttab_set_max_grants failed"); } } void *xen_device_map_grant_refs(XenDevice *xendev, uint32_t *refs, unsigned int nr_refs, int prot, Error **errp) { void *map = xengnttab_map_domain_grant_refs(xendev->xgth, nr_refs, xendev->frontend_id, refs, prot); if (!map) { error_setg_errno(errp, errno, "xengnttab_map_domain_grant_refs failed"); } return map; } void xen_device_unmap_grant_refs(XenDevice *xendev, void *map, unsigned int nr_refs, Error **errp) { if (xengnttab_unmap(xendev->xgth, map, nr_refs)) { error_setg_errno(errp, errno, "xengnttab_unmap failed"); } } static void compat_copy_grant_refs(XenDevice *xendev, bool to_domain, XenDeviceGrantCopySegment segs[], unsigned int nr_segs, Error **errp) { uint32_t *refs = g_new(uint32_t, nr_segs); int prot = to_domain ? PROT_WRITE : PROT_READ; void *map; unsigned int i; for (i = 0; i < nr_segs; i++) { XenDeviceGrantCopySegment *seg = &segs[i]; refs[i] = to_domain ? seg->dest.foreign.ref : seg->source.foreign.ref; } map = xengnttab_map_domain_grant_refs(xendev->xgth, nr_segs, xendev->frontend_id, refs, prot); if (!map) { error_setg_errno(errp, errno, "xengnttab_map_domain_grant_refs failed"); goto done; } for (i = 0; i < nr_segs; i++) { XenDeviceGrantCopySegment *seg = &segs[i]; void *page = map + (i * XC_PAGE_SIZE); if (to_domain) { memcpy(page + seg->dest.foreign.offset, seg->source.virt, seg->len); } else { memcpy(seg->dest.virt, page + seg->source.foreign.offset, seg->len); } } if (xengnttab_unmap(xendev->xgth, map, nr_segs)) { error_setg_errno(errp, errno, "xengnttab_unmap failed"); } done: g_free(refs); } void xen_device_copy_grant_refs(XenDevice *xendev, bool to_domain, XenDeviceGrantCopySegment segs[], unsigned int nr_segs, Error **errp) { xengnttab_grant_copy_segment_t *xengnttab_segs; unsigned int i; if (!xendev->feature_grant_copy) { compat_copy_grant_refs(xendev, to_domain, segs, nr_segs, errp); return; } xengnttab_segs = g_new0(xengnttab_grant_copy_segment_t, nr_segs); for (i = 0; i < nr_segs; i++) { XenDeviceGrantCopySegment *seg = &segs[i]; xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i]; if (to_domain) { xengnttab_seg->flags = GNTCOPY_dest_gref; xengnttab_seg->dest.foreign.domid = xendev->frontend_id; xengnttab_seg->dest.foreign.ref = seg->dest.foreign.ref; xengnttab_seg->dest.foreign.offset = seg->dest.foreign.offset; xengnttab_seg->source.virt = seg->source.virt; } else { xengnttab_seg->flags = GNTCOPY_source_gref; xengnttab_seg->source.foreign.domid = xendev->frontend_id; xengnttab_seg->source.foreign.ref = seg->source.foreign.ref; xengnttab_seg->source.foreign.offset = seg->source.foreign.offset; xengnttab_seg->dest.virt = seg->dest.virt; } xengnttab_seg->len = seg->len; } if (xengnttab_grant_copy(xendev->xgth, nr_segs, xengnttab_segs)) { error_setg_errno(errp, errno, "xengnttab_grant_copy failed"); goto done; } for (i = 0; i < nr_segs; i++) { xengnttab_grant_copy_segment_t *xengnttab_seg = &xengnttab_segs[i]; if (xengnttab_seg->status != GNTST_okay) { error_setg(errp, "xengnttab_grant_copy seg[%u] failed", i); break; } } done: g_free(xengnttab_segs); } struct XenEventChannel { evtchn_port_t local_port; XenEventHandler handler; void *opaque; Notifier notifier; }; static void event_notify(Notifier *n, void *data) { XenEventChannel *channel = container_of(n, XenEventChannel, notifier); unsigned long port = (unsigned long)data; if (port == channel->local_port) { channel->handler(channel->opaque); } } XenEventChannel *xen_device_bind_event_channel(XenDevice *xendev, unsigned int port, XenEventHandler handler, void *opaque, Error **errp) { XenEventChannel *channel = g_new0(XenEventChannel, 1); xenevtchn_port_or_error_t local_port; local_port = xenevtchn_bind_interdomain(xendev->xeh, xendev->frontend_id, port); if (local_port < 0) { error_setg_errno(errp, errno, "xenevtchn_bind_interdomain failed"); g_free(channel); return NULL; } channel->local_port = local_port; channel->handler = handler; channel->opaque = opaque; channel->notifier.notify = event_notify; notifier_list_add(&xendev->event_notifiers, &channel->notifier); return channel; } void xen_device_notify_event_channel(XenDevice *xendev, XenEventChannel *channel, Error **errp) { if (!channel) { error_setg(errp, "bad channel"); return; } if (xenevtchn_notify(xendev->xeh, channel->local_port) < 0) { error_setg_errno(errp, errno, "xenevtchn_notify failed"); } } void xen_device_unbind_event_channel(XenDevice *xendev, XenEventChannel *channel, Error **errp) { if (!channel) { error_setg(errp, "bad channel"); return; } notifier_remove(&channel->notifier); if (xenevtchn_unbind(xendev->xeh, channel->local_port) < 0) { error_setg_errno(errp, errno, "xenevtchn_unbind failed"); } g_free(channel); } static void xen_device_unrealize(DeviceState *dev, Error **errp) { XenDevice *xendev = XEN_DEVICE(dev); XenDeviceClass *xendev_class = XEN_DEVICE_GET_CLASS(xendev); const char *type = object_get_typename(OBJECT(xendev)); if (!xendev->name) { return; } trace_xen_device_unrealize(type, xendev->name); if (xendev->exit.notify) { qemu_remove_exit_notifier(&xendev->exit); xendev->exit.notify = NULL; } if (xendev_class->unrealize) { xendev_class->unrealize(xendev, errp); } xen_device_frontend_destroy(xendev); xen_device_backend_destroy(xendev); if (xendev->xeh) { qemu_set_fd_handler(xenevtchn_fd(xendev->xeh), NULL, NULL, NULL); xenevtchn_close(xendev->xeh); xendev->xeh = NULL; } if (xendev->xgth) { xengnttab_close(xendev->xgth); xendev->xgth = NULL; } g_free(xendev->name); xendev->name = NULL; } static void xen_device_exit(Notifier *n, void *data) { XenDevice *xendev = container_of(n, XenDevice, exit); xen_device_unrealize(DEVICE(xendev), &error_abort); } static void xen_device_event(void *opaque) { XenDevice *xendev = opaque; unsigned long port = xenevtchn_pending(xendev->xeh); notifier_list_notify(&xendev->event_notifiers, (void *)port); xenevtchn_unmask(xendev->xeh, port); } static void xen_device_realize(DeviceState *dev, Error **errp) { XenDevice *xendev = XEN_DEVICE(dev); XenDeviceClass *xendev_class = XEN_DEVICE_GET_CLASS(xendev); XenBus *xenbus = XEN_BUS(qdev_get_parent_bus(DEVICE(xendev))); const char *type = object_get_typename(OBJECT(xendev)); Error *local_err = NULL; if (xendev->frontend_id == DOMID_INVALID) { xendev->frontend_id = xen_domid; } if (xendev->frontend_id >= DOMID_FIRST_RESERVED) { error_setg(errp, "invalid frontend-id"); goto unrealize; } if (!xendev_class->get_name) { error_setg(errp, "get_name method not implemented"); goto unrealize; } xendev->name = xendev_class->get_name(xendev, &local_err); if (local_err) { error_propagate_prepend(errp, local_err, "failed to get device name: "); goto unrealize; } trace_xen_device_realize(type, xendev->name); xendev->xgth = xengnttab_open(NULL, 0); if (!xendev->xgth) { error_setg_errno(errp, errno, "failed xengnttab_open"); goto unrealize; } xendev->feature_grant_copy = (xengnttab_grant_copy(xendev->xgth, 0, NULL) == 0); xendev->xeh = xenevtchn_open(NULL, 0); if (!xendev->xeh) { error_setg_errno(errp, errno, "failed xenevtchn_open"); goto unrealize; } notifier_list_init(&xendev->event_notifiers); qemu_set_fd_handler(xenevtchn_fd(xendev->xeh), xen_device_event, NULL, xendev); xen_device_backend_create(xendev, &local_err); if (local_err) { error_propagate(errp, local_err); goto unrealize; } xen_device_frontend_create(xendev, &local_err); if (local_err) { error_propagate(errp, local_err); goto unrealize; } if (xendev_class->realize) { xendev_class->realize(xendev, &local_err); if (local_err) { error_propagate(errp, local_err); goto unrealize; } } xen_device_backend_printf(xendev, "frontend", "%s", xendev->frontend_path); xen_device_backend_printf(xendev, "frontend-id", "%u", xendev->frontend_id); xen_device_backend_printf(xendev, "hotplug-status", "connected"); xen_device_backend_set_online(xendev, true); xen_device_backend_set_state(xendev, XenbusStateInitWait); xen_device_frontend_printf(xendev, "backend", "%s", xendev->backend_path); xen_device_frontend_printf(xendev, "backend-id", "%u", xenbus->backend_id); xen_device_frontend_set_state(xendev, XenbusStateInitialising); xendev->exit.notify = xen_device_exit; qemu_add_exit_notifier(&xendev->exit); return; unrealize: xen_device_unrealize(dev, &error_abort); } static Property xen_device_props[] = { DEFINE_PROP_UINT16("frontend-id", XenDevice, frontend_id, DOMID_INVALID), DEFINE_PROP_END_OF_LIST() }; static void xen_device_class_init(ObjectClass *class, void *data) { DeviceClass *dev_class = DEVICE_CLASS(class); dev_class->realize = xen_device_realize; dev_class->unrealize = xen_device_unrealize; dev_class->props = xen_device_props; dev_class->bus_type = TYPE_XEN_BUS; } static const TypeInfo xen_device_type_info = { .name = TYPE_XEN_DEVICE, .parent = TYPE_DEVICE, .instance_size = sizeof(XenDevice), .abstract = true, .class_size = sizeof(XenDeviceClass), .class_init = xen_device_class_init, }; typedef struct XenBridge { SysBusDevice busdev; } XenBridge; #define TYPE_XEN_BRIDGE "xen-bridge" static const TypeInfo xen_bridge_type_info = { .name = TYPE_XEN_BRIDGE, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(XenBridge), }; static void xen_register_types(void) { type_register_static(&xen_bridge_type_info); type_register_static(&xen_bus_type_info); type_register_static(&xen_device_type_info); } type_init(xen_register_types) void xen_bus_init(void) { DeviceState *dev = qdev_create(NULL, TYPE_XEN_BRIDGE); BusState *bus = qbus_create(TYPE_XEN_BUS, dev, NULL); qdev_init_nofail(dev); qbus_set_bus_hotplug_handler(bus, &error_abort); }
pmp-tool/PMP
src/qemu/src-pmp/target/xtensa/exc_helper.c
/* * Copyright (c) 2011 - 2019, <NAME>, Open Source and Linux Lab. * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the Open Source and Linux Lab nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include "qemu/osdep.h" #include "qemu/main-loop.h" #include "cpu.h" #include "exec/helper-proto.h" #include "qemu/host-utils.h" #include "exec/exec-all.h" void HELPER(exception)(CPUXtensaState *env, uint32_t excp) { CPUState *cs = CPU(xtensa_env_get_cpu(env)); cs->exception_index = excp; if (excp == EXCP_YIELD) { env->yield_needed = 0; } if (excp == EXCP_DEBUG) { env->exception_taken = 0; } cpu_loop_exit(cs); } void HELPER(exception_cause)(CPUXtensaState *env, uint32_t pc, uint32_t cause) { uint32_t vector; env->pc = pc; if (env->sregs[PS] & PS_EXCM) { if (env->config->ndepc) { env->sregs[DEPC] = pc; } else { env->sregs[EPC1] = pc; } vector = EXC_DOUBLE; } else { env->sregs[EPC1] = pc; vector = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL; } env->sregs[EXCCAUSE] = cause; env->sregs[PS] |= PS_EXCM; HELPER(exception)(env, vector); } void HELPER(exception_cause_vaddr)(CPUXtensaState *env, uint32_t pc, uint32_t cause, uint32_t vaddr) { env->sregs[EXCVADDR] = vaddr; HELPER(exception_cause)(env, pc, cause); } void debug_exception_env(CPUXtensaState *env, uint32_t cause) { if (xtensa_get_cintlevel(env) < env->config->debug_level) { HELPER(debug_exception)(env, env->pc, cause); } } void HELPER(debug_exception)(CPUXtensaState *env, uint32_t pc, uint32_t cause) { unsigned level = env->config->debug_level; env->pc = pc; env->sregs[DEBUGCAUSE] = cause; env->sregs[EPC1 + level - 1] = pc; env->sregs[EPS2 + level - 2] = env->sregs[PS]; env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | PS_EXCM | (level << PS_INTLEVEL_SHIFT); HELPER(exception)(env, EXC_DEBUG); } #ifndef CONFIG_USER_ONLY void HELPER(waiti)(CPUXtensaState *env, uint32_t pc, uint32_t intlevel) { CPUState *cpu; env->pc = pc; env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | (intlevel << PS_INTLEVEL_SHIFT); qemu_mutex_lock_iothread(); check_interrupts(env); qemu_mutex_unlock_iothread(); if (env->pending_irq_level) { cpu_loop_exit(CPU(xtensa_env_get_cpu(env))); return; } cpu = CPU(xtensa_env_get_cpu(env)); cpu->halted = 1; HELPER(exception)(env, EXCP_HLT); } void HELPER(check_interrupts)(CPUXtensaState *env) { qemu_mutex_lock_iothread(); check_interrupts(env); qemu_mutex_unlock_iothread(); } void HELPER(intset)(CPUXtensaState *env, uint32_t v) { atomic_or(&env->sregs[INTSET], v & env->config->inttype_mask[INTTYPE_SOFTWARE]); } void HELPER(intclear)(CPUXtensaState *env, uint32_t v) { atomic_and(&env->sregs[INTSET], ~(v & (env->config->inttype_mask[INTTYPE_SOFTWARE] | env->config->inttype_mask[INTTYPE_EDGE]))); } static uint32_t relocated_vector(CPUXtensaState *env, uint32_t vector) { if (xtensa_option_enabled(env->config, XTENSA_OPTION_RELOCATABLE_VECTOR)) { return vector - env->config->vecbase + env->sregs[VECBASE]; } else { return vector; } } /*! * Handle penging IRQ. * For the high priority interrupt jump to the corresponding interrupt vector. * For the level-1 interrupt convert it to either user, kernel or double * exception with the 'level-1 interrupt' exception cause. */ static void handle_interrupt(CPUXtensaState *env) { int level = env->pending_irq_level; if (level > xtensa_get_cintlevel(env) && level <= env->config->nlevel && (env->config->level_mask[level] & env->sregs[INTSET] & env->sregs[INTENABLE])) { CPUState *cs = CPU(xtensa_env_get_cpu(env)); if (level > 1) { env->sregs[EPC1 + level - 1] = env->pc; env->sregs[EPS2 + level - 2] = env->sregs[PS]; env->sregs[PS] = (env->sregs[PS] & ~PS_INTLEVEL) | level | PS_EXCM; env->pc = relocated_vector(env, env->config->interrupt_vector[level]); } else { env->sregs[EXCCAUSE] = LEVEL1_INTERRUPT_CAUSE; if (env->sregs[PS] & PS_EXCM) { if (env->config->ndepc) { env->sregs[DEPC] = env->pc; } else { env->sregs[EPC1] = env->pc; } cs->exception_index = EXC_DOUBLE; } else { env->sregs[EPC1] = env->pc; cs->exception_index = (env->sregs[PS] & PS_UM) ? EXC_USER : EXC_KERNEL; } env->sregs[PS] |= PS_EXCM; } env->exception_taken = 1; } } /* Called from cpu_handle_interrupt with BQL held */ void xtensa_cpu_do_interrupt(CPUState *cs) { XtensaCPU *cpu = XTENSA_CPU(cs); CPUXtensaState *env = &cpu->env; if (cs->exception_index == EXC_IRQ) { qemu_log_mask(CPU_LOG_INT, "%s(EXC_IRQ) level = %d, cintlevel = %d, " "pc = %08x, a0 = %08x, ps = %08x, " "intset = %08x, intenable = %08x, " "ccount = %08x\n", __func__, env->pending_irq_level, xtensa_get_cintlevel(env), env->pc, env->regs[0], env->sregs[PS], env->sregs[INTSET], env->sregs[INTENABLE], env->sregs[CCOUNT]); handle_interrupt(env); } switch (cs->exception_index) { case EXC_WINDOW_OVERFLOW4: case EXC_WINDOW_UNDERFLOW4: case EXC_WINDOW_OVERFLOW8: case EXC_WINDOW_UNDERFLOW8: case EXC_WINDOW_OVERFLOW12: case EXC_WINDOW_UNDERFLOW12: case EXC_KERNEL: case EXC_USER: case EXC_DOUBLE: case EXC_DEBUG: qemu_log_mask(CPU_LOG_INT, "%s(%d) " "pc = %08x, a0 = %08x, ps = %08x, ccount = %08x\n", __func__, cs->exception_index, env->pc, env->regs[0], env->sregs[PS], env->sregs[CCOUNT]); if (env->config->exception_vector[cs->exception_index]) { uint32_t vector; vector = env->config->exception_vector[cs->exception_index]; env->pc = relocated_vector(env, vector); env->exception_taken = 1; } else { qemu_log_mask(CPU_LOG_INT, "%s(pc = %08x) bad exception_index: %d\n", __func__, env->pc, cs->exception_index); } break; case EXC_IRQ: break; default: qemu_log("%s(pc = %08x) unknown exception_index: %d\n", __func__, env->pc, cs->exception_index); break; } check_interrupts(env); } #else void xtensa_cpu_do_interrupt(CPUState *cs) { } #endif bool xtensa_cpu_exec_interrupt(CPUState *cs, int interrupt_request) { if (interrupt_request & CPU_INTERRUPT_HARD) { cs->exception_index = EXC_IRQ; xtensa_cpu_do_interrupt(cs); return true; } return false; }
pmp-tool/PMP
src/qemu/src-pmp/slirp/src/tcp_subr.c
/* SPDX-License-Identifier: BSD-3-Clause */ /* * Copyright (c) 1982, 1986, 1988, 1990, 1993 * The Regents of the University of California. All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions * are met: * 1. Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * 2. Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * 3. Neither the name of the University nor the names of its contributors * may be used to endorse or promote products derived from this software * without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF * SUCH DAMAGE. * * @(#)tcp_subr.c 8.1 (Berkeley) 6/10/93 * tcp_subr.c,v 1.5 1994/10/08 22:39:58 phk Exp */ /* * Changes and additions relating to SLiRP * Copyright (c) 1995 <NAME>. */ #include "slirp.h" /* patchable/settable parameters for tcp */ /* Don't do rfc1323 performance enhancements */ #define TCP_DO_RFC1323 0 /* * Tcp initialization */ void tcp_init(Slirp *slirp) { slirp->tcp_iss = 1; /* wrong */ slirp->tcb.so_next = slirp->tcb.so_prev = &slirp->tcb; slirp->tcp_last_so = &slirp->tcb; } void tcp_cleanup(Slirp *slirp) { while (slirp->tcb.so_next != &slirp->tcb) { tcp_close(sototcpcb(slirp->tcb.so_next)); } } /* * Create template to be used to send tcp packets on a connection. * Call after host entry created, fills * in a skeletal tcp/ip header, minimizing the amount of work * necessary when the connection is used. */ void tcp_template(struct tcpcb *tp) { struct socket *so = tp->t_socket; register struct tcpiphdr *n = &tp->t_template; n->ti_mbuf = NULL; memset(&n->ti, 0, sizeof(n->ti)); n->ti_x0 = 0; switch (so->so_ffamily) { case AF_INET: n->ti_pr = IPPROTO_TCP; n->ti_len = htons(sizeof(struct tcphdr)); n->ti_src = so->so_faddr; n->ti_dst = so->so_laddr; n->ti_sport = so->so_fport; n->ti_dport = so->so_lport; break; case AF_INET6: n->ti_nh6 = IPPROTO_TCP; n->ti_len = htons(sizeof(struct tcphdr)); n->ti_src6 = so->so_faddr6; n->ti_dst6 = so->so_laddr6; n->ti_sport = so->so_fport6; n->ti_dport = so->so_lport6; break; default: g_assert_not_reached(); } n->ti_seq = 0; n->ti_ack = 0; n->ti_x2 = 0; n->ti_off = 5; n->ti_flags = 0; n->ti_win = 0; n->ti_sum = 0; n->ti_urp = 0; } /* * Send a single message to the TCP at address specified by * the given TCP/IP header. If m == 0, then we make a copy * of the tcpiphdr at ti and send directly to the addressed host. * This is used to force keep alive messages out using the TCP * template for a connection tp->t_template. If flags are given * then we send a message back to the TCP which originated the * segment ti, and discard the mbuf containing it and any other * attached mbufs. * * In any case the ack and sequence number of the transmitted * segment are as specified by the parameters. */ void tcp_respond(struct tcpcb *tp, struct tcpiphdr *ti, struct mbuf *m, tcp_seq ack, tcp_seq seq, int flags, unsigned short af) { register int tlen; int win = 0; DEBUG_CALL("tcp_respond"); DEBUG_ARG("tp = %p", tp); DEBUG_ARG("ti = %p", ti); DEBUG_ARG("m = %p", m); DEBUG_ARG("ack = %u", ack); DEBUG_ARG("seq = %u", seq); DEBUG_ARG("flags = %x", flags); if (tp) win = sbspace(&tp->t_socket->so_rcv); if (m == NULL) { if (!tp || (m = m_get(tp->t_socket->slirp)) == NULL) return; tlen = 0; m->m_data += IF_MAXLINKHDR; *mtod(m, struct tcpiphdr *) = *ti; ti = mtod(m, struct tcpiphdr *); switch (af) { case AF_INET: ti->ti.ti_i4.ih_x1 = 0; break; case AF_INET6: ti->ti.ti_i6.ih_x1 = 0; break; default: g_assert_not_reached(); } flags = TH_ACK; } else { /* * ti points into m so the next line is just making * the mbuf point to ti */ m->m_data = (char *)ti; m->m_len = sizeof (struct tcpiphdr); tlen = 0; #define xchg(a,b,type) { type t; t=a; a=b; b=t; } switch (af) { case AF_INET: xchg(ti->ti_dst.s_addr, ti->ti_src.s_addr, uint32_t); xchg(ti->ti_dport, ti->ti_sport, uint16_t); break; case AF_INET6: xchg(ti->ti_dst6, ti->ti_src6, struct in6_addr); xchg(ti->ti_dport, ti->ti_sport, uint16_t); break; default: g_assert_not_reached(); } #undef xchg } ti->ti_len = htons((uint16_t)(sizeof (struct tcphdr) + tlen)); tlen += sizeof (struct tcpiphdr); m->m_len = tlen; ti->ti_mbuf = NULL; ti->ti_x0 = 0; ti->ti_seq = htonl(seq); ti->ti_ack = htonl(ack); ti->ti_x2 = 0; ti->ti_off = sizeof (struct tcphdr) >> 2; ti->ti_flags = flags; if (tp) ti->ti_win = htons((uint16_t) (win >> tp->rcv_scale)); else ti->ti_win = htons((uint16_t)win); ti->ti_urp = 0; ti->ti_sum = 0; ti->ti_sum = cksum(m, tlen); struct tcpiphdr tcpiph_save = *(mtod(m, struct tcpiphdr *)); struct ip *ip; struct ip6 *ip6; switch (af) { case AF_INET: m->m_data += sizeof(struct tcpiphdr) - sizeof(struct tcphdr) - sizeof(struct ip); m->m_len -= sizeof(struct tcpiphdr) - sizeof(struct tcphdr) - sizeof(struct ip); ip = mtod(m, struct ip *); ip->ip_len = m->m_len; ip->ip_dst = tcpiph_save.ti_dst; ip->ip_src = tcpiph_save.ti_src; ip->ip_p = tcpiph_save.ti_pr; if (flags & TH_RST) { ip->ip_ttl = MAXTTL; } else { ip->ip_ttl = IPDEFTTL; } ip_output(NULL, m); break; case AF_INET6: m->m_data += sizeof(struct tcpiphdr) - sizeof(struct tcphdr) - sizeof(struct ip6); m->m_len -= sizeof(struct tcpiphdr) - sizeof(struct tcphdr) - sizeof(struct ip6); ip6 = mtod(m, struct ip6 *); ip6->ip_pl = tcpiph_save.ti_len; ip6->ip_dst = tcpiph_save.ti_dst6; ip6->ip_src = tcpiph_save.ti_src6; ip6->ip_nh = tcpiph_save.ti_nh6; ip6_output(NULL, m, 0); break; default: g_assert_not_reached(); } } /* * Create a new TCP control block, making an * empty reassembly queue and hooking it to the argument * protocol control block. */ struct tcpcb * tcp_newtcpcb(struct socket *so) { register struct tcpcb *tp; tp = (struct tcpcb *)malloc(sizeof(*tp)); if (tp == NULL) return ((struct tcpcb *)0); memset((char *) tp, 0, sizeof(struct tcpcb)); tp->seg_next = tp->seg_prev = (struct tcpiphdr*)tp; tp->t_maxseg = (so->so_ffamily == AF_INET) ? TCP_MSS : TCP6_MSS; tp->t_flags = TCP_DO_RFC1323 ? (TF_REQ_SCALE|TF_REQ_TSTMP) : 0; tp->t_socket = so; /* * Init srtt to TCPTV_SRTTBASE (0), so we can tell that we have no * rtt estimate. Set rttvar so that srtt + 2 * rttvar gives * reasonable initial retransmit time. */ tp->t_srtt = TCPTV_SRTTBASE; tp->t_rttvar = TCPTV_SRTTDFLT << 2; tp->t_rttmin = TCPTV_MIN; TCPT_RANGESET(tp->t_rxtcur, ((TCPTV_SRTTBASE >> 2) + (TCPTV_SRTTDFLT << 2)) >> 1, TCPTV_MIN, TCPTV_REXMTMAX); tp->snd_cwnd = TCP_MAXWIN << TCP_MAX_WINSHIFT; tp->snd_ssthresh = TCP_MAXWIN << TCP_MAX_WINSHIFT; tp->t_state = TCPS_CLOSED; so->so_tcpcb = tp; return (tp); } /* * Drop a TCP connection, reporting * the specified error. If connection is synchronized, * then send a RST to peer. */ struct tcpcb *tcp_drop(struct tcpcb *tp, int err) { DEBUG_CALL("tcp_drop"); DEBUG_ARG("tp = %p", tp); DEBUG_ARG("errno = %d", errno); if (TCPS_HAVERCVDSYN(tp->t_state)) { tp->t_state = TCPS_CLOSED; (void) tcp_output(tp); } return (tcp_close(tp)); } /* * Close a TCP control block: * discard all space held by the tcp * discard internet protocol block * wake up any sleepers */ struct tcpcb * tcp_close(struct tcpcb *tp) { register struct tcpiphdr *t; struct socket *so = tp->t_socket; Slirp *slirp = so->slirp; register struct mbuf *m; DEBUG_CALL("tcp_close"); DEBUG_ARG("tp = %p", tp); /* free the reassembly queue, if any */ t = tcpfrag_list_first(tp); while (!tcpfrag_list_end(t, tp)) { t = tcpiphdr_next(t); m = tcpiphdr_prev(t)->ti_mbuf; remque(tcpiphdr2qlink(tcpiphdr_prev(t))); m_free(m); } free(tp); so->so_tcpcb = NULL; /* clobber input socket cache if we're closing the cached connection */ if (so == slirp->tcp_last_so) slirp->tcp_last_so = &slirp->tcb; so->slirp->cb->unregister_poll_fd(so->s, so->slirp->opaque); closesocket(so->s); sbfree(&so->so_rcv); sbfree(&so->so_snd); sofree(so); return ((struct tcpcb *)0); } /* * TCP protocol interface to socket abstraction. */ /* * User issued close, and wish to trail through shutdown states: * if never received SYN, just forget it. If got a SYN from peer, * but haven't sent FIN, then go to FIN_WAIT_1 state to send peer a FIN. * If already got a FIN from peer, then almost done; go to LAST_ACK * state. In all other cases, have already sent FIN to peer (e.g. * after PRU_SHUTDOWN), and just have to play tedious game waiting * for peer to send FIN or not respond to keep-alives, etc. * We can let the user exit from the close as soon as the FIN is acked. */ void tcp_sockclosed(struct tcpcb *tp) { DEBUG_CALL("tcp_sockclosed"); DEBUG_ARG("tp = %p", tp); if (!tp) { return; } switch (tp->t_state) { case TCPS_CLOSED: case TCPS_LISTEN: case TCPS_SYN_SENT: tp->t_state = TCPS_CLOSED; tp = tcp_close(tp); break; case TCPS_SYN_RECEIVED: case TCPS_ESTABLISHED: tp->t_state = TCPS_FIN_WAIT_1; break; case TCPS_CLOSE_WAIT: tp->t_state = TCPS_LAST_ACK; break; } tcp_output(tp); } /* * Connect to a host on the Internet * Called by tcp_input * Only do a connect, the tcp fields will be set in tcp_input * return 0 if there's a result of the connect, * else return -1 means we're still connecting * The return value is almost always -1 since the socket is * nonblocking. Connect returns after the SYN is sent, and does * not wait for ACK+SYN. */ int tcp_fconnect(struct socket *so, unsigned short af) { int ret=0; DEBUG_CALL("tcp_fconnect"); DEBUG_ARG("so = %p", so); ret = so->s = slirp_socket(af, SOCK_STREAM, 0); if (ret >= 0) { int opt, s=so->s; struct sockaddr_storage addr; slirp_set_nonblock(s); so->slirp->cb->register_poll_fd(so->s, so->slirp->opaque); slirp_socket_set_fast_reuse(s); opt = 1; setsockopt(s, SOL_SOCKET, SO_OOBINLINE, &opt, sizeof(opt)); opt = 1; setsockopt(s, IPPROTO_TCP, TCP_NODELAY, &opt, sizeof(opt)); addr = so->fhost.ss; DEBUG_CALL(" connect()ing"); sotranslate_out(so, &addr); /* We don't care what port we get */ ret = connect(s, (struct sockaddr *)&addr, sockaddr_size(&addr)); /* * If it's not in progress, it failed, so we just return 0, * without clearing SS_NOFDREF */ soisfconnecting(so); } return(ret); } /* * Accept the socket and connect to the local-host * * We have a problem. The correct thing to do would be * to first connect to the local-host, and only if the * connection is accepted, then do an accept() here. * But, a) we need to know who's trying to connect * to the socket to be able to SYN the local-host, and * b) we are already connected to the foreign host by * the time it gets to accept(), so... We simply accept * here and SYN the local-host. */ void tcp_connect(struct socket *inso) { Slirp *slirp = inso->slirp; struct socket *so; struct sockaddr_storage addr; socklen_t addrlen = sizeof(struct sockaddr_storage); struct tcpcb *tp; int s, opt; DEBUG_CALL("tcp_connect"); DEBUG_ARG("inso = %p", inso); /* * If it's an SS_ACCEPTONCE socket, no need to socreate() * another socket, just use the accept() socket. */ if (inso->so_state & SS_FACCEPTONCE) { /* FACCEPTONCE already have a tcpcb */ so = inso; } else { so = socreate(slirp); if (tcp_attach(so) < 0) { g_free(so); /* NOT sofree */ return; } so->lhost = inso->lhost; so->so_ffamily = inso->so_ffamily; } tcp_mss(sototcpcb(so), 0); s = accept(inso->s, (struct sockaddr *)&addr, &addrlen); if (s < 0) { tcp_close(sototcpcb(so)); /* This will sofree() as well */ return; } slirp_set_nonblock(s); so->slirp->cb->register_poll_fd(so->s, so->slirp->opaque); slirp_socket_set_fast_reuse(s); opt = 1; setsockopt(s, SOL_SOCKET, SO_OOBINLINE, &opt, sizeof(int)); slirp_socket_set_nodelay(s); so->fhost.ss = addr; sotranslate_accept(so); /* Close the accept() socket, set right state */ if (inso->so_state & SS_FACCEPTONCE) { /* If we only accept once, close the accept() socket */ so->slirp->cb->unregister_poll_fd(so->s, so->slirp->opaque); closesocket(so->s); /* Don't select it yet, even though we have an FD */ /* if it's not FACCEPTONCE, it's already NOFDREF */ so->so_state = SS_NOFDREF; } so->s = s; so->so_state |= SS_INCOMING; so->so_iptos = tcp_tos(so); tp = sototcpcb(so); tcp_template(tp); tp->t_state = TCPS_SYN_SENT; tp->t_timer[TCPT_KEEP] = TCPTV_KEEP_INIT; tp->iss = slirp->tcp_iss; slirp->tcp_iss += TCP_ISSINCR/2; tcp_sendseqinit(tp); tcp_output(tp); } /* * Attach a TCPCB to a socket. */ int tcp_attach(struct socket *so) { if ((so->so_tcpcb = tcp_newtcpcb(so)) == NULL) return -1; insque(so, &so->slirp->tcb); return 0; } /* * Set the socket's type of service field */ static const struct tos_t tcptos[] = { {0, 20, IPTOS_THROUGHPUT, 0}, /* ftp data */ {21, 21, IPTOS_LOWDELAY, EMU_FTP}, /* ftp control */ {0, 23, IPTOS_LOWDELAY, 0}, /* telnet */ {0, 80, IPTOS_THROUGHPUT, 0}, /* WWW */ {0, 513, IPTOS_LOWDELAY, EMU_RLOGIN|EMU_NOCONNECT}, /* rlogin */ {0, 544, IPTOS_LOWDELAY, EMU_KSH}, /* kshell */ {0, 543, IPTOS_LOWDELAY, 0}, /* klogin */ {0, 6667, IPTOS_THROUGHPUT, EMU_IRC}, /* IRC */ {0, 6668, IPTOS_THROUGHPUT, EMU_IRC}, /* IRC undernet */ {0, 7070, IPTOS_LOWDELAY, EMU_REALAUDIO }, /* RealAudio control */ {0, 113, IPTOS_LOWDELAY, EMU_IDENT }, /* identd protocol */ {0, 0, 0, 0} }; static struct emu_t *tcpemu = NULL; /* * Return TOS according to the above table */ uint8_t tcp_tos(struct socket *so) { int i = 0; struct emu_t *emup; while(tcptos[i].tos) { if ((tcptos[i].fport && (ntohs(so->so_fport) == tcptos[i].fport)) || (tcptos[i].lport && (ntohs(so->so_lport) == tcptos[i].lport))) { so->so_emu = tcptos[i].emu; return tcptos[i].tos; } i++; } /* Nope, lets see if there's a user-added one */ for (emup = tcpemu; emup; emup = emup->next) { if ((emup->fport && (ntohs(so->so_fport) == emup->fport)) || (emup->lport && (ntohs(so->so_lport) == emup->lport))) { so->so_emu = emup->emu; return emup->tos; } } return 0; } /* * Emulate programs that try and connect to us * This includes ftp (the data connection is * initiated by the server) and IRC (DCC CHAT and * DCC SEND) for now * * NOTE: It's possible to crash SLiRP by sending it * unstandard strings to emulate... if this is a problem, * more checks are needed here * * XXX Assumes the whole command came in one packet * * XXX Some ftp clients will have their TOS set to * LOWDELAY and so Nagel will kick in. Because of this, * we'll get the first letter, followed by the rest, so * we simply scan for ORT instead of PORT... * DCC doesn't have this problem because there's other stuff * in the packet before the DCC command. * * Return 1 if the mbuf m is still valid and should be * sbappend()ed * * NOTE: if you return 0 you MUST m_free() the mbuf! */ int tcp_emu(struct socket *so, struct mbuf *m) { Slirp *slirp = so->slirp; unsigned n1, n2, n3, n4, n5, n6; char buff[257]; uint32_t laddr; unsigned lport; char *bptr; DEBUG_CALL("tcp_emu"); DEBUG_ARG("so = %p", so); DEBUG_ARG("m = %p", m); switch(so->so_emu) { int x, i; /* TODO: IPv6 */ case EMU_IDENT: /* * Identification protocol as per rfc-1413 */ { struct socket *tmpso; struct sockaddr_in addr; socklen_t addrlen = sizeof(struct sockaddr_in); struct sbuf *so_rcv = &so->so_rcv; if (m->m_len > so_rcv->sb_datalen - (so_rcv->sb_wptr - so_rcv->sb_data)) { return 1; } memcpy(so_rcv->sb_wptr, m->m_data, m->m_len); so_rcv->sb_wptr += m->m_len; so_rcv->sb_rptr += m->m_len; m->m_data[m->m_len] = 0; /* NULL terminate */ if (strchr(m->m_data, '\r') || strchr(m->m_data, '\n')) { if (sscanf(so_rcv->sb_data, "%u%*[ ,]%u", &n1, &n2) == 2) { HTONS(n1); HTONS(n2); /* n2 is the one on our host */ for (tmpso = slirp->tcb.so_next; tmpso != &slirp->tcb; tmpso = tmpso->so_next) { if (tmpso->so_laddr.s_addr == so->so_laddr.s_addr && tmpso->so_lport == n2 && tmpso->so_faddr.s_addr == so->so_faddr.s_addr && tmpso->so_fport == n1) { if (getsockname(tmpso->s, (struct sockaddr *)&addr, &addrlen) == 0) n2 = addr.sin_port; break; } } NTOHS(n1); NTOHS(n2); so_rcv->sb_cc = snprintf(so_rcv->sb_data, so_rcv->sb_datalen, "%d,%d\r\n", n1, n2); so_rcv->sb_rptr = so_rcv->sb_data; so_rcv->sb_wptr = so_rcv->sb_data + so_rcv->sb_cc; } } m_free(m); return 0; } case EMU_FTP: /* ftp */ *(m->m_data+m->m_len) = 0; /* NUL terminate for strstr */ if ((bptr = (char *)strstr(m->m_data, "ORT")) != NULL) { /* * Need to emulate the PORT command */ x = sscanf(bptr, "ORT %u,%u,%u,%u,%u,%u\r\n%256[^\177]", &n1, &n2, &n3, &n4, &n5, &n6, buff); if (x < 6) return 1; laddr = htonl((n1 << 24) | (n2 << 16) | (n3 << 8) | (n4)); lport = htons((n5 << 8) | (n6)); if ((so = tcp_listen(slirp, INADDR_ANY, 0, laddr, lport, SS_FACCEPTONCE)) == NULL) { return 1; } n6 = ntohs(so->so_fport); n5 = (n6 >> 8) & 0xff; n6 &= 0xff; laddr = ntohl(so->so_faddr.s_addr); n1 = ((laddr >> 24) & 0xff); n2 = ((laddr >> 16) & 0xff); n3 = ((laddr >> 8) & 0xff); n4 = (laddr & 0xff); m->m_len = bptr - m->m_data; /* Adjust length */ m->m_len += snprintf(bptr, m->m_size - m->m_len, "ORT %d,%d,%d,%d,%d,%d\r\n%s", n1, n2, n3, n4, n5, n6, x==7?buff:""); return 1; } else if ((bptr = (char *)strstr(m->m_data, "27 Entering")) != NULL) { /* * Need to emulate the PASV response */ x = sscanf(bptr, "27 Entering Passive Mode (%u,%u,%u,%u,%u,%u)\r\n%256[^\177]", &n1, &n2, &n3, &n4, &n5, &n6, buff); if (x < 6) return 1; laddr = htonl((n1 << 24) | (n2 << 16) | (n3 << 8) | (n4)); lport = htons((n5 << 8) | (n6)); if ((so = tcp_listen(slirp, INADDR_ANY, 0, laddr, lport, SS_FACCEPTONCE)) == NULL) { return 1; } n6 = ntohs(so->so_fport); n5 = (n6 >> 8) & 0xff; n6 &= 0xff; laddr = ntohl(so->so_faddr.s_addr); n1 = ((laddr >> 24) & 0xff); n2 = ((laddr >> 16) & 0xff); n3 = ((laddr >> 8) & 0xff); n4 = (laddr & 0xff); m->m_len = bptr - m->m_data; /* Adjust length */ m->m_len += snprintf(bptr, m->m_size - m->m_len, "27 Entering Passive Mode (%d,%d,%d,%d,%d,%d)\r\n%s", n1, n2, n3, n4, n5, n6, x==7?buff:""); return 1; } return 1; case EMU_KSH: /* * The kshell (Kerberos rsh) and shell services both pass * a local port port number to carry signals to the server * and stderr to the client. It is passed at the beginning * of the connection as a NUL-terminated decimal ASCII string. */ so->so_emu = 0; for (lport = 0, i = 0; i < m->m_len-1; ++i) { if (m->m_data[i] < '0' || m->m_data[i] > '9') return 1; /* invalid number */ lport *= 10; lport += m->m_data[i] - '0'; } if (m->m_data[m->m_len-1] == '\0' && lport != 0 && (so = tcp_listen(slirp, INADDR_ANY, 0, so->so_laddr.s_addr, htons(lport), SS_FACCEPTONCE)) != NULL) m->m_len = snprintf(m->m_data, m->m_size, "%d", ntohs(so->so_fport)) + 1; return 1; case EMU_IRC: /* * Need to emulate DCC CHAT, DCC SEND and DCC MOVE */ *(m->m_data+m->m_len) = 0; /* NULL terminate the string for strstr */ if ((bptr = (char *)strstr(m->m_data, "DCC")) == NULL) return 1; /* The %256s is for the broken mIRC */ if (sscanf(bptr, "DCC CHAT %256s %u %u", buff, &laddr, &lport) == 3) { if ((so = tcp_listen(slirp, INADDR_ANY, 0, htonl(laddr), htons(lport), SS_FACCEPTONCE)) == NULL) { return 1; } m->m_len = bptr - m->m_data; /* Adjust length */ m->m_len += snprintf(bptr, m->m_size, "DCC CHAT chat %lu %u%c\n", (unsigned long)ntohl(so->so_faddr.s_addr), ntohs(so->so_fport), 1); } else if (sscanf(bptr, "DCC SEND %256s %u %u %u", buff, &laddr, &lport, &n1) == 4) { if ((so = tcp_listen(slirp, INADDR_ANY, 0, htonl(laddr), htons(lport), SS_FACCEPTONCE)) == NULL) { return 1; } m->m_len = bptr - m->m_data; /* Adjust length */ m->m_len += snprintf(bptr, m->m_size, "DCC SEND %s %lu %u %u%c\n", buff, (unsigned long)ntohl(so->so_faddr.s_addr), ntohs(so->so_fport), n1, 1); } else if (sscanf(bptr, "DCC MOVE %256s %u %u %u", buff, &laddr, &lport, &n1) == 4) { if ((so = tcp_listen(slirp, INADDR_ANY, 0, htonl(laddr), htons(lport), SS_FACCEPTONCE)) == NULL) { return 1; } m->m_len = bptr - m->m_data; /* Adjust length */ m->m_len += snprintf(bptr, m->m_size, "DCC MOVE %s %lu %u %u%c\n", buff, (unsigned long)ntohl(so->so_faddr.s_addr), ntohs(so->so_fport), n1, 1); } return 1; case EMU_REALAUDIO: /* * RealAudio emulation - JP. We must try to parse the incoming * data and try to find the two characters that contain the * port number. Then we redirect an udp port and replace the * number with the real port we got. * * The 1.0 beta versions of the player are not supported * any more. * * A typical packet for player version 1.0 (release version): * * 0000:50 4E 41 00 05 * 0000:00 01 00 02 1B D7 00 00 67 E6 6C DC 63 00 12 50 ........g.l.c..P * 0010:4E 43 4C 49 45 4E 54 20 31 30 31 20 41 4C 50 48 NCLIENT 101 ALPH * 0020:41 6C 00 00 52 00 17 72 61 66 69 6C 65 73 2F 76 Al..R..rafiles/v * 0030:6F 61 2F 65 6E 67 6C 69 73 68 5F 2E 72 61 79 42 oa/english_.rayB * * Now the port number 0x1BD7 is found at offset 0x04 of the * Now the port number 0x1BD7 is found at offset 0x04 of the * second packet. This time we received five bytes first and * then the rest. You never know how many bytes you get. * * A typical packet for player version 2.0 (beta): * * 0000:50 4E 41 00 06 00 02 00 00 00 01 00 02 1B C1 00 PNA............. * 0010:00 67 75 78 F5 63 00 0A 57 69 6E 32 2E 30 2E 30 .gux.c..Win2.0.0 * 0020:2E 35 6C 00 00 52 00 1C 72 61 66 69 6C 65 73 2F .5l..R..rafiles/ * 0030:77 65 62 73 69 74 65 2F 32 30 72 65 6C 65 61 73 website/20releas * 0040:65 2E 72 61 79 53 00 00 06 36 42 e.rayS...6B * * Port number 0x1BC1 is found at offset 0x0d. * * This is just a horrible switch statement. Variable ra tells * us where we're going. */ bptr = m->m_data; while (bptr < m->m_data + m->m_len) { uint16_t p; static int ra = 0; char ra_tbl[4]; ra_tbl[0] = 0x50; ra_tbl[1] = 0x4e; ra_tbl[2] = 0x41; ra_tbl[3] = 0; switch (ra) { case 0: case 2: case 3: if (*bptr++ != ra_tbl[ra]) { ra = 0; continue; } break; case 1: /* * We may get 0x50 several times, ignore them */ if (*bptr == 0x50) { ra = 1; bptr++; continue; } else if (*bptr++ != ra_tbl[ra]) { ra = 0; continue; } break; case 4: /* * skip version number */ bptr++; break; case 5: /* * The difference between versions 1.0 and * 2.0 is here. For future versions of * the player this may need to be modified. */ if (*(bptr + 1) == 0x02) bptr += 8; else bptr += 4; break; case 6: /* This is the field containing the port * number that RA-player is listening to. */ lport = (((uint8_t*)bptr)[0] << 8) + ((uint8_t *)bptr)[1]; if (lport < 6970) lport += 256; /* don't know why */ if (lport < 6970 || lport > 7170) return 1; /* failed */ /* try to get udp port between 6970 - 7170 */ for (p = 6970; p < 7071; p++) { if (udp_listen(slirp, INADDR_ANY, htons(p), so->so_laddr.s_addr, htons(lport), SS_FACCEPTONCE)) { break; } } if (p == 7071) p = 0; *(uint8_t *)bptr++ = (p >> 8) & 0xff; *(uint8_t *)bptr = p & 0xff; ra = 0; return 1; /* port redirected, we're done */ break; default: ra = 0; } ra++; } return 1; default: /* Ooops, not emulated, won't call tcp_emu again */ so->so_emu = 0; return 1; } } /* * Do misc. config of SLiRP while its running. * Return 0 if this connections is to be closed, 1 otherwise, * return 2 if this is a command-line connection */ int tcp_ctl(struct socket *so) { Slirp *slirp = so->slirp; struct sbuf *sb = &so->so_snd; struct gfwd_list *ex_ptr; DEBUG_CALL("tcp_ctl"); DEBUG_ARG("so = %p", so); /* TODO: IPv6 */ if (so->so_faddr.s_addr != slirp->vhost_addr.s_addr) { /* Check if it's pty_exec */ for (ex_ptr = slirp->guestfwd_list; ex_ptr; ex_ptr = ex_ptr->ex_next) { if (ex_ptr->ex_fport == so->so_fport && so->so_faddr.s_addr == ex_ptr->ex_addr.s_addr) { if (ex_ptr->write_cb) { so->s = -1; so->guestfwd = ex_ptr; return 1; } DEBUG_MISC(" executing %s", ex_ptr->ex_exec); return fork_exec(so, ex_ptr->ex_exec); } } } sb->sb_cc = snprintf(sb->sb_wptr, sb->sb_datalen - (sb->sb_wptr - sb->sb_data), "Error: No application configured.\r\n"); sb->sb_wptr += sb->sb_cc; return 0; }
pmp-tool/PMP
src/qemu/src-pmp/hw/dma/puv3_dma.c
<reponame>pmp-tool/PMP /* * DMA device simulation in PKUnity SoC * * Copyright (C) 2010-2012 <NAME> * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation, or any later version. * See the COPYING file in the top-level directory. */ #include "qemu/osdep.h" #include "hw/hw.h" #include "hw/sysbus.h" #undef DEBUG_PUV3 #include "hw/unicore32/puv3.h" #define PUV3_DMA_CH_NR (6) #define PUV3_DMA_CH_MASK (0xff) #define PUV3_DMA_CH(offset) ((offset) >> 8) #define TYPE_PUV3_DMA "puv3_dma" #define PUV3_DMA(obj) OBJECT_CHECK(PUV3DMAState, (obj), TYPE_PUV3_DMA) typedef struct PUV3DMAState { SysBusDevice parent_obj; MemoryRegion iomem; uint32_t reg_CFG[PUV3_DMA_CH_NR]; } PUV3DMAState; static uint64_t puv3_dma_read(void *opaque, hwaddr offset, unsigned size) { PUV3DMAState *s = opaque; uint32_t ret = 0; assert(PUV3_DMA_CH(offset) < PUV3_DMA_CH_NR); switch (offset & PUV3_DMA_CH_MASK) { case 0x10: ret = s->reg_CFG[PUV3_DMA_CH(offset)]; break; default: DPRINTF("Bad offset 0x%x\n", offset); } DPRINTF("offset 0x%x, value 0x%x\n", offset, ret); return ret; } static void puv3_dma_write(void *opaque, hwaddr offset, uint64_t value, unsigned size) { PUV3DMAState *s = opaque; assert(PUV3_DMA_CH(offset) < PUV3_DMA_CH_NR); switch (offset & PUV3_DMA_CH_MASK) { case 0x10: s->reg_CFG[PUV3_DMA_CH(offset)] = value; break; default: DPRINTF("Bad offset 0x%x\n", offset); } DPRINTF("offset 0x%x, value 0x%x\n", offset, value); } static const MemoryRegionOps puv3_dma_ops = { .read = puv3_dma_read, .write = puv3_dma_write, .impl = { .min_access_size = 4, .max_access_size = 4, }, .endianness = DEVICE_NATIVE_ENDIAN, }; static void puv3_dma_realize(DeviceState *dev, Error **errp) { PUV3DMAState *s = PUV3_DMA(dev); int i; for (i = 0; i < PUV3_DMA_CH_NR; i++) { s->reg_CFG[i] = 0x0; } memory_region_init_io(&s->iomem, OBJECT(s), &puv3_dma_ops, s, "puv3_dma", PUV3_REGS_OFFSET); sysbus_init_mmio(SYS_BUS_DEVICE(dev), &s->iomem); } static void puv3_dma_class_init(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); dc->realize = puv3_dma_realize; } static const TypeInfo puv3_dma_info = { .name = TYPE_PUV3_DMA, .parent = TYPE_SYS_BUS_DEVICE, .instance_size = sizeof(PUV3DMAState), .class_init = puv3_dma_class_init, }; static void puv3_dma_register_type(void) { type_register_static(&puv3_dma_info); } type_init(puv3_dma_register_type)
pmp-tool/PMP
src/qemu/src-pmp/hw/usb/ccid-card-emulated.c
<reponame>pmp-tool/PMP /* * CCID Card Device. Emulated card. * * Copyright (c) 2011 Red Hat. * Written by <NAME>. * * This code is licensed under the GNU LGPL, version 2 or later. */ /* * It can be used to provide access to the local hardware in a non exclusive * way, or it can use certificates. It requires the usb-ccid bus. * * Usage 1: standard, mirror hardware reader+card: * qemu .. -usb -device usb-ccid -device ccid-card-emulated * * Usage 2: use certificates, no hardware required * one time: create the certificates: * for i in 1 2 3; do * certutil -d /etc/pki/nssdb -x -t "CT,CT,CT" -S -s "CN=user$i" -n user$i * done * qemu .. -usb -device usb-ccid \ * -device ccid-card-emulated,cert1=user1,cert2=user2,cert3=user3 * * If you use a non default db for the certificates you can specify it using * the db parameter. */ #include "qemu/osdep.h" #include <libcacard.h> #include "qemu/thread.h" #include "qemu/main-loop.h" #include "ccid.h" #include "qapi/error.h" #define DPRINTF(card, lvl, fmt, ...) \ do {\ if (lvl <= card->debug) {\ printf("ccid-card-emul: %s: " fmt , __func__, ## __VA_ARGS__);\ } \ } while (0) #define TYPE_EMULATED_CCID "ccid-card-emulated" #define EMULATED_CCID_CARD(obj) \ OBJECT_CHECK(EmulatedState, (obj), TYPE_EMULATED_CCID) #define BACKEND_NSS_EMULATED_NAME "nss-emulated" #define BACKEND_CERTIFICATES_NAME "certificates" enum { BACKEND_NSS_EMULATED = 1, BACKEND_CERTIFICATES }; #define DEFAULT_BACKEND BACKEND_NSS_EMULATED typedef struct EmulatedState EmulatedState; enum { EMUL_READER_INSERT = 0, EMUL_READER_REMOVE, EMUL_CARD_INSERT, EMUL_CARD_REMOVE, EMUL_GUEST_APDU, EMUL_RESPONSE_APDU, EMUL_ERROR, }; static const char *emul_event_to_string(uint32_t emul_event) { switch (emul_event) { case EMUL_READER_INSERT: return "EMUL_READER_INSERT"; case EMUL_READER_REMOVE: return "EMUL_READER_REMOVE"; case EMUL_CARD_INSERT: return "EMUL_CARD_INSERT"; case EMUL_CARD_REMOVE: return "EMUL_CARD_REMOVE"; case EMUL_GUEST_APDU: return "EMUL_GUEST_APDU"; case EMUL_RESPONSE_APDU: return "EMUL_RESPONSE_APDU"; case EMUL_ERROR: return "EMUL_ERROR"; } return "UNKNOWN"; } typedef struct EmulEvent { QSIMPLEQ_ENTRY(EmulEvent) entry; union { struct { uint32_t type; } gen; struct { uint32_t type; uint64_t code; } error; struct { uint32_t type; uint32_t len; uint8_t data[]; } data; } p; } EmulEvent; #define MAX_ATR_SIZE 40 struct EmulatedState { CCIDCardState base; uint8_t debug; char *backend_str; uint32_t backend; char *cert1; char *cert2; char *cert3; char *db; uint8_t atr[MAX_ATR_SIZE]; uint8_t atr_length; QSIMPLEQ_HEAD(, EmulEvent) event_list; QemuMutex event_list_mutex; QemuThread event_thread_id; VReader *reader; QSIMPLEQ_HEAD(, EmulEvent) guest_apdu_list; QemuMutex vreader_mutex; /* and guest_apdu_list mutex */ QemuMutex handle_apdu_mutex; QemuCond handle_apdu_cond; EventNotifier notifier; int quit_apdu_thread; QemuThread apdu_thread_id; }; static void emulated_apdu_from_guest(CCIDCardState *base, const uint8_t *apdu, uint32_t len) { EmulatedState *card = EMULATED_CCID_CARD(base); EmulEvent *event = (EmulEvent *)g_malloc(sizeof(EmulEvent) + len); assert(event); event->p.data.type = EMUL_GUEST_APDU; event->p.data.len = len; memcpy(event->p.data.data, apdu, len); qemu_mutex_lock(&card->vreader_mutex); QSIMPLEQ_INSERT_TAIL(&card->guest_apdu_list, event, entry); qemu_mutex_unlock(&card->vreader_mutex); qemu_mutex_lock(&card->handle_apdu_mutex); qemu_cond_signal(&card->handle_apdu_cond); qemu_mutex_unlock(&card->handle_apdu_mutex); } static const uint8_t *emulated_get_atr(CCIDCardState *base, uint32_t *len) { EmulatedState *card = EMULATED_CCID_CARD(base); *len = card->atr_length; return card->atr; } static void emulated_push_event(EmulatedState *card, EmulEvent *event) { qemu_mutex_lock(&card->event_list_mutex); QSIMPLEQ_INSERT_TAIL(&(card->event_list), event, entry); qemu_mutex_unlock(&card->event_list_mutex); event_notifier_set(&card->notifier); } static void emulated_push_type(EmulatedState *card, uint32_t type) { EmulEvent *event = g_new(EmulEvent, 1); assert(event); event->p.gen.type = type; emulated_push_event(card, event); } static void emulated_push_error(EmulatedState *card, uint64_t code) { EmulEvent *event = g_new(EmulEvent, 1); assert(event); event->p.error.type = EMUL_ERROR; event->p.error.code = code; emulated_push_event(card, event); } static void emulated_push_data_type(EmulatedState *card, uint32_t type, const uint8_t *data, uint32_t len) { EmulEvent *event = (EmulEvent *)g_malloc(sizeof(EmulEvent) + len); assert(event); event->p.data.type = type; event->p.data.len = len; memcpy(event->p.data.data, data, len); emulated_push_event(card, event); } static void emulated_push_reader_insert(EmulatedState *card) { emulated_push_type(card, EMUL_READER_INSERT); } static void emulated_push_reader_remove(EmulatedState *card) { emulated_push_type(card, EMUL_READER_REMOVE); } static void emulated_push_card_insert(EmulatedState *card, const uint8_t *atr, uint32_t len) { emulated_push_data_type(card, EMUL_CARD_INSERT, atr, len); } static void emulated_push_card_remove(EmulatedState *card) { emulated_push_type(card, EMUL_CARD_REMOVE); } static void emulated_push_response_apdu(EmulatedState *card, const uint8_t *apdu, uint32_t len) { emulated_push_data_type(card, EMUL_RESPONSE_APDU, apdu, len); } #define APDU_BUF_SIZE 270 static void *handle_apdu_thread(void* arg) { EmulatedState *card = arg; uint8_t recv_data[APDU_BUF_SIZE]; int recv_len; VReaderStatus reader_status; EmulEvent *event; while (1) { qemu_mutex_lock(&card->handle_apdu_mutex); qemu_cond_wait(&card->handle_apdu_cond, &card->handle_apdu_mutex); qemu_mutex_unlock(&card->handle_apdu_mutex); if (card->quit_apdu_thread) { card->quit_apdu_thread = 0; /* debugging */ break; } qemu_mutex_lock(&card->vreader_mutex); while (!QSIMPLEQ_EMPTY(&card->guest_apdu_list)) { event = QSIMPLEQ_FIRST(&card->guest_apdu_list); assert((unsigned long)event > 1000); QSIMPLEQ_REMOVE_HEAD(&card->guest_apdu_list, entry); if (event->p.data.type != EMUL_GUEST_APDU) { DPRINTF(card, 1, "unexpected message in handle_apdu_thread\n"); g_free(event); continue; } if (card->reader == NULL) { DPRINTF(card, 1, "reader is NULL\n"); g_free(event); continue; } recv_len = sizeof(recv_data); reader_status = vreader_xfr_bytes(card->reader, event->p.data.data, event->p.data.len, recv_data, &recv_len); DPRINTF(card, 2, "got back apdu of length %d\n", recv_len); if (reader_status == VREADER_OK) { emulated_push_response_apdu(card, recv_data, recv_len); } else { emulated_push_error(card, reader_status); } g_free(event); } qemu_mutex_unlock(&card->vreader_mutex); } return NULL; } static void *event_thread(void *arg) { int atr_len = MAX_ATR_SIZE; uint8_t atr[MAX_ATR_SIZE]; VEvent *event = NULL; EmulatedState *card = arg; while (1) { const char *reader_name; event = vevent_wait_next_vevent(); if (event == NULL || event->type == VEVENT_LAST) { break; } if (event->type != VEVENT_READER_INSERT) { if (card->reader == NULL && event->reader != NULL) { /* Happens after device_add followed by card remove or insert. * XXX: create synthetic add_reader events if vcard_emul_init * already called, which happens if device_del and device_add * are called */ card->reader = vreader_reference(event->reader); } else { if (event->reader != card->reader) { fprintf(stderr, "ERROR: wrong reader: quiting event_thread\n"); break; } } } switch (event->type) { case VEVENT_READER_INSERT: /* TODO: take a specific reader. i.e. track which reader * we are seeing here, check it is the one we want (the first, * or by a particular name), and ignore if we don't want it. */ reader_name = vreader_get_name(event->reader); if (card->reader != NULL) { DPRINTF(card, 2, "READER INSERT - replacing %s with %s\n", vreader_get_name(card->reader), reader_name); qemu_mutex_lock(&card->vreader_mutex); vreader_free(card->reader); qemu_mutex_unlock(&card->vreader_mutex); emulated_push_reader_remove(card); } qemu_mutex_lock(&card->vreader_mutex); DPRINTF(card, 2, "READER INSERT %s\n", reader_name); card->reader = vreader_reference(event->reader); qemu_mutex_unlock(&card->vreader_mutex); emulated_push_reader_insert(card); break; case VEVENT_READER_REMOVE: DPRINTF(card, 2, " READER REMOVE: %s\n", vreader_get_name(event->reader)); qemu_mutex_lock(&card->vreader_mutex); vreader_free(card->reader); card->reader = NULL; qemu_mutex_unlock(&card->vreader_mutex); emulated_push_reader_remove(card); break; case VEVENT_CARD_INSERT: /* get the ATR (intended as a response to a power on from the * reader */ atr_len = MAX_ATR_SIZE; vreader_power_on(event->reader, atr, &atr_len); card->atr_length = (uint8_t)atr_len; DPRINTF(card, 2, " CARD INSERT\n"); emulated_push_card_insert(card, atr, atr_len); break; case VEVENT_CARD_REMOVE: DPRINTF(card, 2, " CARD REMOVE\n"); emulated_push_card_remove(card); break; case VEVENT_LAST: /* quit */ vevent_delete(event); return NULL; break; default: break; } vevent_delete(event); } return NULL; } static void card_event_handler(EventNotifier *notifier) { EmulatedState *card = container_of(notifier, EmulatedState, notifier); EmulEvent *event, *next; event_notifier_test_and_clear(&card->notifier); qemu_mutex_lock(&card->event_list_mutex); QSIMPLEQ_FOREACH_SAFE(event, &card->event_list, entry, next) { DPRINTF(card, 2, "event %s\n", emul_event_to_string(event->p.gen.type)); switch (event->p.gen.type) { case EMUL_RESPONSE_APDU: ccid_card_send_apdu_to_guest(&card->base, event->p.data.data, event->p.data.len); break; case EMUL_READER_INSERT: ccid_card_ccid_attach(&card->base); break; case EMUL_READER_REMOVE: ccid_card_ccid_detach(&card->base); break; case EMUL_CARD_INSERT: assert(event->p.data.len <= MAX_ATR_SIZE); card->atr_length = event->p.data.len; memcpy(card->atr, event->p.data.data, card->atr_length); ccid_card_card_inserted(&card->base); break; case EMUL_CARD_REMOVE: ccid_card_card_removed(&card->base); break; case EMUL_ERROR: ccid_card_card_error(&card->base, event->p.error.code); break; default: DPRINTF(card, 2, "unexpected event\n"); break; } g_free(event); } QSIMPLEQ_INIT(&card->event_list); qemu_mutex_unlock(&card->event_list_mutex); } static int init_event_notifier(EmulatedState *card, Error **errp) { if (event_notifier_init(&card->notifier, false) < 0) { error_setg(errp, "ccid-card-emul: event notifier creation failed"); return -1; } event_notifier_set_handler(&card->notifier, card_event_handler); return 0; } static void clean_event_notifier(EmulatedState *card) { event_notifier_set_handler(&card->notifier, NULL); event_notifier_cleanup(&card->notifier); } #define CERTIFICATES_DEFAULT_DB "/etc/pki/nssdb" #define CERTIFICATES_ARGS_TEMPLATE\ "db=\"%s\" use_hw=no soft=(,Virtual Reader,CAC,,%s,%s,%s)" static int wrap_vcard_emul_init(VCardEmulOptions *options) { static int called; static int options_was_null; if (called) { if ((options == NULL) != options_was_null) { printf("%s: warning: running emulated with certificates" " and emulated side by side is not supported\n", __func__); return VCARD_EMUL_FAIL; } vcard_emul_replay_insertion_events(); return VCARD_EMUL_OK; } options_was_null = (options == NULL); called = 1; return vcard_emul_init(options); } static int emulated_initialize_vcard_from_certificates(EmulatedState *card) { char emul_args[200]; VCardEmulOptions *options = NULL; snprintf(emul_args, sizeof(emul_args) - 1, CERTIFICATES_ARGS_TEMPLATE, card->db ? card->db : CERTIFICATES_DEFAULT_DB, card->cert1, card->cert2, card->cert3); options = vcard_emul_options(emul_args); if (options == NULL) { printf("%s: warning: not using certificates due to" " initialization error\n", __func__); } return wrap_vcard_emul_init(options); } typedef struct EnumTable { const char *name; uint32_t value; } EnumTable; static const EnumTable backend_enum_table[] = { {BACKEND_NSS_EMULATED_NAME, BACKEND_NSS_EMULATED}, {BACKEND_CERTIFICATES_NAME, BACKEND_CERTIFICATES}, {NULL, 0}, }; static uint32_t parse_enumeration(char *str, const EnumTable *table, uint32_t not_found_value) { uint32_t ret = not_found_value; if (str == NULL) return 0; while (table->name != NULL) { if (strcmp(table->name, str) == 0) { ret = table->value; break; } table++; } return ret; } static void emulated_realize(CCIDCardState *base, Error **errp) { EmulatedState *card = EMULATED_CCID_CARD(base); VCardEmulError ret; const EnumTable *ptable; QSIMPLEQ_INIT(&card->event_list); QSIMPLEQ_INIT(&card->guest_apdu_list); qemu_mutex_init(&card->event_list_mutex); qemu_mutex_init(&card->vreader_mutex); qemu_mutex_init(&card->handle_apdu_mutex); qemu_cond_init(&card->handle_apdu_cond); card->reader = NULL; card->quit_apdu_thread = 0; if (init_event_notifier(card, errp) < 0) { goto out1; } card->backend = 0; if (card->backend_str) { card->backend = parse_enumeration(card->backend_str, backend_enum_table, 0); } if (card->backend == 0) { error_setg(errp, "backend must be one of:"); for (ptable = backend_enum_table; ptable->name != NULL; ++ptable) { error_append_hint(errp, "%s\n", ptable->name); } goto out2; } /* TODO: a passthru backened that works on local machine. third card type?*/ if (card->backend == BACKEND_CERTIFICATES) { if (card->cert1 != NULL && card->cert2 != NULL && card->cert3 != NULL) { ret = emulated_initialize_vcard_from_certificates(card); } else { error_setg(errp, "%s: you must provide all three certs for" " certificates backend", TYPE_EMULATED_CCID); goto out2; } } else { if (card->backend != BACKEND_NSS_EMULATED) { error_setg(errp, "%s: bad backend specified. The options are:%s" " (default), %s.", TYPE_EMULATED_CCID, BACKEND_NSS_EMULATED_NAME, BACKEND_CERTIFICATES_NAME); goto out2; } if (card->cert1 != NULL || card->cert2 != NULL || card->cert3 != NULL) { error_setg(errp, "%s: unexpected cert parameters to nss emulated " "backend", TYPE_EMULATED_CCID); goto out2; } /* default to mirroring the local hardware readers */ ret = wrap_vcard_emul_init(NULL); } if (ret != VCARD_EMUL_OK) { error_setg(errp, "%s: failed to initialize vcard", TYPE_EMULATED_CCID); goto out2; } qemu_thread_create(&card->event_thread_id, "ccid/event", event_thread, card, QEMU_THREAD_JOINABLE); qemu_thread_create(&card->apdu_thread_id, "ccid/apdu", handle_apdu_thread, card, QEMU_THREAD_JOINABLE); return; out2: clean_event_notifier(card); out1: qemu_cond_destroy(&card->handle_apdu_cond); qemu_mutex_destroy(&card->handle_apdu_mutex); qemu_mutex_destroy(&card->vreader_mutex); qemu_mutex_destroy(&card->event_list_mutex); } static void emulated_unrealize(CCIDCardState *base, Error **errp) { EmulatedState *card = EMULATED_CCID_CARD(base); VEvent *vevent = vevent_new(VEVENT_LAST, NULL, NULL); vevent_queue_vevent(vevent); /* stop vevent thread */ qemu_thread_join(&card->event_thread_id); card->quit_apdu_thread = 1; /* stop handle_apdu thread */ qemu_cond_signal(&card->handle_apdu_cond); qemu_thread_join(&card->apdu_thread_id); clean_event_notifier(card); /* threads exited, can destroy all condvars/mutexes */ qemu_cond_destroy(&card->handle_apdu_cond); qemu_mutex_destroy(&card->handle_apdu_mutex); qemu_mutex_destroy(&card->vreader_mutex); qemu_mutex_destroy(&card->event_list_mutex); } static Property emulated_card_properties[] = { DEFINE_PROP_STRING("backend", EmulatedState, backend_str), DEFINE_PROP_STRING("cert1", EmulatedState, cert1), DEFINE_PROP_STRING("cert2", EmulatedState, cert2), DEFINE_PROP_STRING("cert3", EmulatedState, cert3), DEFINE_PROP_STRING("db", EmulatedState, db), DEFINE_PROP_UINT8("debug", EmulatedState, debug, 0), DEFINE_PROP_END_OF_LIST(), }; static void emulated_class_initfn(ObjectClass *klass, void *data) { DeviceClass *dc = DEVICE_CLASS(klass); CCIDCardClass *cc = CCID_CARD_CLASS(klass); cc->realize = emulated_realize; cc->unrealize = emulated_unrealize; cc->get_atr = emulated_get_atr; cc->apdu_from_guest = emulated_apdu_from_guest; set_bit(DEVICE_CATEGORY_INPUT, dc->categories); dc->desc = "emulated smartcard"; dc->props = emulated_card_properties; } static const TypeInfo emulated_card_info = { .name = TYPE_EMULATED_CCID, .parent = TYPE_CCID_CARD, .instance_size = sizeof(EmulatedState), .class_init = emulated_class_initfn, }; static void ccid_card_emulated_register_types(void) { type_register_static(&emulated_card_info); } type_init(ccid_card_emulated_register_types)